1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/cc/copy.c 5 * PURPOSE: Implements cache managers copy interface 6 * 7 * PROGRAMMERS: Some people? 8 * Pierre Schweitzer (pierre@reactos.org) 9 */ 10 11 /* INCLUDES ******************************************************************/ 12 13 #include <ntoskrnl.h> 14 #define NDEBUG 15 #include <debug.h> 16 17 /* GLOBALS *******************************************************************/ 18 19 static PFN_NUMBER CcZeroPage = 0; 20 21 #define MAX_ZERO_LENGTH (256 * 1024) 22 23 typedef enum _CC_CAN_WRITE_RETRY 24 { 25 FirstTry = 0, 26 RetryAllowRemote = 253, 27 RetryForceCheckPerFile = 254, 28 RetryMasterLocked = 255, 29 } CC_CAN_WRITE_RETRY; 30 31 ULONG CcRosTraceLevel = CC_API_DEBUG; 32 ULONG CcFastMdlReadWait; 33 ULONG CcFastMdlReadNotPossible; 34 ULONG CcFastReadNotPossible; 35 ULONG CcFastReadWait; 36 ULONG CcFastReadNoWait; 37 ULONG CcFastReadResourceMiss; 38 39 /* Counters: 40 * - Amount of pages flushed to the disk 41 * - Number of flush operations 42 */ 43 ULONG CcDataPages = 0; 44 ULONG CcDataFlushes = 0; 45 46 /* FUNCTIONS *****************************************************************/ 47 48 VOID 49 NTAPI 50 MiZeroPhysicalPage ( 51 IN PFN_NUMBER PageFrameIndex 52 ); 53 54 VOID 55 NTAPI 56 CcInitCacheZeroPage ( 57 VOID) 58 { 59 NTSTATUS Status; 60 61 MI_SET_USAGE(MI_USAGE_CACHE); 62 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName); 63 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage); 64 if (!NT_SUCCESS(Status)) 65 { 66 DbgPrint("Can't allocate CcZeroPage.\n"); 67 KeBugCheck(CACHE_MANAGER); 68 } 69 MiZeroPhysicalPage(CcZeroPage); 70 } 71 72 VOID 73 CcPostDeferredWrites(VOID) 74 { 75 ULONG WrittenBytes; 76 77 /* We'll try to write as much as we can */ 78 WrittenBytes = 0; 79 while (TRUE) 80 { 81 KIRQL OldIrql; 82 PLIST_ENTRY ListEntry; 83 PDEFERRED_WRITE DeferredWrite; 84 85 DeferredWrite = NULL; 86 87 /* Lock our deferred writes list */ 88 KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql); 89 for (ListEntry = CcDeferredWrites.Flink; 90 ListEntry != &CcDeferredWrites; 91 ListEntry = ListEntry->Flink) 92 { 93 /* Extract an entry */ 94 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks); 95 96 /* Compute the modified bytes, based on what we already wrote */ 97 WrittenBytes += DeferredWrite->BytesToWrite; 98 /* We overflowed, give up */ 99 if (WrittenBytes < DeferredWrite->BytesToWrite) 100 { 101 DeferredWrite = NULL; 102 break; 103 } 104 105 /* Check we can write */ 106 if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, RetryForceCheckPerFile)) 107 { 108 /* We can, so remove it from the list and stop looking for entry */ 109 RemoveEntryList(&DeferredWrite->DeferredWriteLinks); 110 break; 111 } 112 113 /* If we don't accept modified pages, stop here */ 114 if (!DeferredWrite->LimitModifiedPages) 115 { 116 DeferredWrite = NULL; 117 break; 118 } 119 120 /* Reset count as nothing was written yet */ 121 WrittenBytes -= DeferredWrite->BytesToWrite; 122 DeferredWrite = NULL; 123 } 124 KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql); 125 126 /* Nothing to write found, give up */ 127 if (DeferredWrite == NULL) 128 { 129 break; 130 } 131 132 /* If we have an event, set it and quit */ 133 if (DeferredWrite->Event) 134 { 135 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE); 136 } 137 /* Otherwise, call the write routine and free the context */ 138 else 139 { 140 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2); 141 ExFreePoolWithTag(DeferredWrite, 'CcDw'); 142 } 143 } 144 } 145 146 VOID 147 CcPerformReadAhead( 148 IN PFILE_OBJECT FileObject) 149 { 150 NTSTATUS Status; 151 LONGLONG CurrentOffset; 152 KIRQL OldIrql; 153 PROS_SHARED_CACHE_MAP SharedCacheMap; 154 PROS_VACB Vacb; 155 ULONG PartialLength; 156 ULONG Length; 157 PPRIVATE_CACHE_MAP PrivateCacheMap; 158 BOOLEAN Locked; 159 160 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 161 162 /* Critical: 163 * PrivateCacheMap might disappear in-between if the handle 164 * to the file is closed (private is attached to the handle not to 165 * the file), so we need to lock the master lock while we deal with 166 * it. It won't disappear without attempting to lock such lock. 167 */ 168 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 169 PrivateCacheMap = FileObject->PrivateCacheMap; 170 /* If the handle was closed since the read ahead was scheduled, just quit */ 171 if (PrivateCacheMap == NULL) 172 { 173 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 174 ObDereferenceObject(FileObject); 175 return; 176 } 177 /* Otherwise, extract read offset and length and release private map */ 178 else 179 { 180 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 181 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart; 182 Length = PrivateCacheMap->ReadAheadLength[1]; 183 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 184 } 185 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 186 187 /* Time to go! */ 188 DPRINT("Doing ReadAhead for %p\n", FileObject); 189 /* Lock the file, first */ 190 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE)) 191 { 192 Locked = FALSE; 193 goto Clear; 194 } 195 196 /* Remember it's locked */ 197 Locked = TRUE; 198 199 /* Don't read past the end of the file */ 200 if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart) 201 { 202 goto Clear; 203 } 204 if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart) 205 { 206 Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset; 207 } 208 209 /* Next of the algorithm will lock like CcCopyData with the slight 210 * difference that we don't copy data back to an user-backed buffer 211 * We just bring data into Cc 212 */ 213 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY; 214 if (PartialLength != 0) 215 { 216 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength); 217 Status = CcRosRequestVacb(SharedCacheMap, 218 ROUND_DOWN(CurrentOffset, VACB_MAPPING_GRANULARITY), 219 &Vacb); 220 if (!NT_SUCCESS(Status)) 221 { 222 DPRINT1("Failed to request VACB: %lx!\n", Status); 223 goto Clear; 224 } 225 226 Status = CcRosEnsureVacbResident(Vacb, TRUE, FALSE, 227 CurrentOffset % VACB_MAPPING_GRANULARITY, PartialLength); 228 if (!NT_SUCCESS(Status)) 229 { 230 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 231 DPRINT1("Failed to read data: %lx!\n", Status); 232 goto Clear; 233 } 234 235 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 236 237 Length -= PartialLength; 238 CurrentOffset += PartialLength; 239 } 240 241 while (Length > 0) 242 { 243 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0); 244 PartialLength = min(VACB_MAPPING_GRANULARITY, Length); 245 Status = CcRosRequestVacb(SharedCacheMap, 246 CurrentOffset, 247 &Vacb); 248 if (!NT_SUCCESS(Status)) 249 { 250 DPRINT1("Failed to request VACB: %lx!\n", Status); 251 goto Clear; 252 } 253 254 Status = CcRosEnsureVacbResident(Vacb, TRUE, FALSE, 0, PartialLength); 255 if (!NT_SUCCESS(Status)) 256 { 257 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 258 DPRINT1("Failed to read data: %lx!\n", Status); 259 goto Clear; 260 } 261 262 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 263 264 Length -= PartialLength; 265 CurrentOffset += PartialLength; 266 } 267 268 Clear: 269 /* See previous comment about private cache map */ 270 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 271 PrivateCacheMap = FileObject->PrivateCacheMap; 272 if (PrivateCacheMap != NULL) 273 { 274 /* Mark read ahead as unactive */ 275 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 276 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE); 277 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 278 } 279 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 280 281 /* If file was locked, release it */ 282 if (Locked) 283 { 284 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext); 285 } 286 287 /* And drop our extra reference (See: CcScheduleReadAhead) */ 288 ObDereferenceObject(FileObject); 289 290 return; 291 } 292 293 /* 294 * @unimplemented 295 */ 296 BOOLEAN 297 NTAPI 298 CcCanIWrite ( 299 IN PFILE_OBJECT FileObject, 300 IN ULONG BytesToWrite, 301 IN BOOLEAN Wait, 302 IN BOOLEAN Retrying) 303 { 304 KIRQL OldIrql; 305 KEVENT WaitEvent; 306 ULONG Length, Pages; 307 BOOLEAN PerFileDefer; 308 DEFERRED_WRITE Context; 309 PFSRTL_COMMON_FCB_HEADER Fcb; 310 CC_CAN_WRITE_RETRY TryContext; 311 PROS_SHARED_CACHE_MAP SharedCacheMap; 312 313 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n", 314 FileObject, BytesToWrite, Wait, Retrying); 315 316 /* Write through is always OK */ 317 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH)) 318 { 319 return TRUE; 320 } 321 322 TryContext = Retrying; 323 /* Allow remote file if not from posted */ 324 if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote) 325 { 326 return TRUE; 327 } 328 329 /* Don't exceed max tolerated size */ 330 Length = MAX_ZERO_LENGTH; 331 if (BytesToWrite < MAX_ZERO_LENGTH) 332 { 333 Length = BytesToWrite; 334 } 335 336 Pages = BYTES_TO_PAGES(Length); 337 338 /* By default, assume limits per file won't be hit */ 339 PerFileDefer = FALSE; 340 Fcb = FileObject->FsContext; 341 /* Do we have to check for limits per file? */ 342 if (TryContext >= RetryForceCheckPerFile || 343 BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES)) 344 { 345 /* If master is not locked, lock it now */ 346 if (TryContext != RetryMasterLocked) 347 { 348 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 349 } 350 351 /* Let's not assume the file is cached... */ 352 if (FileObject->SectionObjectPointer != NULL && 353 FileObject->SectionObjectPointer->SharedCacheMap != NULL) 354 { 355 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 356 /* Do we have limits per file set? */ 357 if (SharedCacheMap->DirtyPageThreshold != 0 && 358 SharedCacheMap->DirtyPages != 0) 359 { 360 /* Yes, check whether they are blocking */ 361 if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold) 362 { 363 PerFileDefer = TRUE; 364 } 365 } 366 } 367 368 /* And don't forget to release master */ 369 if (TryContext != RetryMasterLocked) 370 { 371 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 372 } 373 } 374 375 /* So, now allow write if: 376 * - Not the first try or we have no throttling yet 377 * AND: 378 * - We don't exceed threshold! 379 * - We don't exceed what Mm can allow us to use 380 * + If we're above top, that's fine 381 * + If we're above bottom with limited modified pages, that's fine 382 * + Otherwise, throttle! 383 */ 384 if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) && 385 CcTotalDirtyPages + Pages < CcDirtyPageThreshold && 386 (MmAvailablePages > MmThrottleTop || 387 (MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) && 388 !PerFileDefer) 389 { 390 return TRUE; 391 } 392 393 /* If we can wait, we'll start the wait loop for waiting till we can 394 * write for real 395 */ 396 if (!Wait) 397 { 398 return FALSE; 399 } 400 401 /* Otherwise, if there are no deferred writes yet, start the lazy writer */ 402 if (IsListEmpty(&CcDeferredWrites)) 403 { 404 KIRQL OldIrql; 405 406 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 407 CcScheduleLazyWriteScan(TRUE); 408 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 409 } 410 411 /* Initialize our wait event */ 412 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE); 413 414 /* And prepare a dummy context */ 415 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE; 416 Context.NodeByteSize = sizeof(DEFERRED_WRITE); 417 Context.FileObject = FileObject; 418 Context.BytesToWrite = BytesToWrite; 419 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES); 420 Context.Event = &WaitEvent; 421 422 /* And queue it */ 423 if (Retrying) 424 { 425 /* To the top, if that's a retry */ 426 ExInterlockedInsertHeadList(&CcDeferredWrites, 427 &Context.DeferredWriteLinks, 428 &CcDeferredWriteSpinLock); 429 } 430 else 431 { 432 /* To the bottom, if that's a first time */ 433 ExInterlockedInsertTailList(&CcDeferredWrites, 434 &Context.DeferredWriteLinks, 435 &CcDeferredWriteSpinLock); 436 } 437 438 DPRINT1("Actively deferring write for: %p\n", FileObject); 439 /* Now, we'll loop until our event is set. When it is set, it means that caller 440 * can immediately write, and has to 441 */ 442 do 443 { 444 CcPostDeferredWrites(); 445 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS); 446 447 return TRUE; 448 } 449 450 static 451 int 452 CcpCheckInvalidUserBuffer(PEXCEPTION_POINTERS Except, PVOID Buffer, ULONG Length) 453 { 454 ULONG_PTR ExceptionAddress; 455 ULONG_PTR BeginAddress = (ULONG_PTR)Buffer; 456 ULONG_PTR EndAddress = (ULONG_PTR)Buffer + Length; 457 458 if (Except->ExceptionRecord->ExceptionCode != STATUS_ACCESS_VIOLATION) 459 return EXCEPTION_CONTINUE_SEARCH; 460 if (Except->ExceptionRecord->NumberParameters < 2) 461 return EXCEPTION_CONTINUE_SEARCH; 462 463 ExceptionAddress = Except->ExceptionRecord->ExceptionInformation[1]; 464 if ((ExceptionAddress >= BeginAddress) && (ExceptionAddress < EndAddress)) 465 return EXCEPTION_EXECUTE_HANDLER; 466 467 return EXCEPTION_CONTINUE_SEARCH; 468 } 469 470 /* 471 * @implemented 472 */ 473 BOOLEAN 474 NTAPI 475 CcCopyRead ( 476 IN PFILE_OBJECT FileObject, 477 IN PLARGE_INTEGER FileOffset, 478 IN ULONG Length, 479 IN BOOLEAN Wait, 480 OUT PVOID Buffer, 481 OUT PIO_STATUS_BLOCK IoStatus) 482 { 483 PROS_VACB Vacb; 484 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 485 NTSTATUS Status; 486 LONGLONG CurrentOffset; 487 LONGLONG ReadEnd = FileOffset->QuadPart + Length; 488 ULONG ReadLength = 0; 489 490 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n", 491 FileObject, FileOffset->QuadPart, Length, Wait); 492 493 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, " 494 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n", 495 FileObject, FileOffset->QuadPart, Length, Wait, 496 Buffer, IoStatus); 497 498 if (!SharedCacheMap) 499 return FALSE; 500 501 /* Documented to ASSERT, but KMTests test this case... */ 502 // ASSERT((FileOffset->QuadPart + Length) <= SharedCacheMap->FileSize.QuadPart); 503 504 CurrentOffset = FileOffset->QuadPart; 505 while(CurrentOffset < ReadEnd) 506 { 507 Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb); 508 if (!NT_SUCCESS(Status)) 509 { 510 ExRaiseStatus(Status); 511 return FALSE; 512 } 513 514 _SEH2_TRY 515 { 516 ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY; 517 ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset); 518 SIZE_T CopyLength = VacbLength; 519 520 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength)) 521 return FALSE; 522 523 _SEH2_TRY 524 { 525 RtlCopyMemory(Buffer, (PUCHAR)Vacb->BaseAddress + VacbOffset, CopyLength); 526 } 527 _SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength)) 528 { 529 ExRaiseStatus(STATUS_INVALID_USER_BUFFER); 530 } 531 _SEH2_END; 532 533 ReadLength += VacbLength; 534 535 Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength); 536 CurrentOffset += VacbLength; 537 Length -= VacbLength; 538 } 539 _SEH2_FINALLY 540 { 541 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 542 } 543 _SEH2_END; 544 } 545 546 IoStatus->Status = STATUS_SUCCESS; 547 IoStatus->Information = ReadLength; 548 549 #if 0 550 /* If that was a successful sync read operation, let's handle read ahead */ 551 if (Length == 0 && Wait) 552 { 553 PPRIVATE_CACHE_MAP PrivateCacheMap = FileObject->PrivateCacheMap; 554 555 /* If file isn't random access and next read may get us cross VACB boundary, 556 * schedule next read 557 */ 558 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) && 559 (CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + ReadLength - 1) / VACB_MAPPING_GRANULARITY) 560 { 561 CcScheduleReadAhead(FileObject, FileOffset, ReadLength); 562 } 563 564 /* And update read history in private cache map */ 565 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart; 566 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart; 567 PrivateCacheMap->FileOffset2.QuadPart = FileOffset->QuadPart; 568 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset->QuadPart + ReadLength; 569 } 570 #endif 571 572 return TRUE; 573 } 574 575 /* 576 * @implemented 577 */ 578 BOOLEAN 579 NTAPI 580 CcCopyWrite ( 581 IN PFILE_OBJECT FileObject, 582 IN PLARGE_INTEGER FileOffset, 583 IN ULONG Length, 584 IN BOOLEAN Wait, 585 IN PVOID Buffer) 586 { 587 PROS_VACB Vacb; 588 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 589 NTSTATUS Status; 590 LONGLONG CurrentOffset; 591 LONGLONG WriteEnd; 592 593 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n", 594 FileObject, FileOffset->QuadPart, Length, Wait, Buffer); 595 596 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, " 597 "Length %lu, Wait %u, Buffer 0x%p)\n", 598 FileObject, FileOffset->QuadPart, Length, Wait, Buffer); 599 600 if (!SharedCacheMap) 601 return FALSE; 602 603 Status = RtlLongLongAdd(FileOffset->QuadPart, Length, &WriteEnd); 604 if (!NT_SUCCESS(Status)) 605 ExRaiseStatus(Status); 606 607 ASSERT(WriteEnd <= SharedCacheMap->SectionSize.QuadPart); 608 609 CurrentOffset = FileOffset->QuadPart; 610 while(CurrentOffset < WriteEnd) 611 { 612 ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY; 613 ULONG VacbLength = min(WriteEnd - CurrentOffset, VACB_MAPPING_GRANULARITY - VacbOffset); 614 615 Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb); 616 if (!NT_SUCCESS(Status)) 617 { 618 ExRaiseStatus(Status); 619 return FALSE; 620 } 621 622 _SEH2_TRY 623 { 624 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength)) 625 { 626 return FALSE; 627 } 628 629 _SEH2_TRY 630 { 631 RtlCopyMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), Buffer, VacbLength); 632 } 633 _SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength)) 634 { 635 ExRaiseStatus(STATUS_INVALID_USER_BUFFER); 636 } 637 _SEH2_END; 638 639 Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength); 640 CurrentOffset += VacbLength; 641 642 /* Tell Mm */ 643 Status = MmMakePagesDirty(NULL, Add2Ptr(Vacb->BaseAddress, VacbOffset), VacbLength); 644 if (!NT_SUCCESS(Status)) 645 ExRaiseStatus(Status); 646 } 647 _SEH2_FINALLY 648 { 649 /* Do not mark the VACB as dirty if an exception was raised */ 650 CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE); 651 } 652 _SEH2_END; 653 } 654 655 /* Flush if needed */ 656 if (FileObject->Flags & FO_WRITE_THROUGH) 657 CcFlushCache(FileObject->SectionObjectPointer, FileOffset, Length, NULL); 658 659 return TRUE; 660 } 661 662 /* 663 * @implemented 664 */ 665 VOID 666 NTAPI 667 CcDeferWrite ( 668 IN PFILE_OBJECT FileObject, 669 IN PCC_POST_DEFERRED_WRITE PostRoutine, 670 IN PVOID Context1, 671 IN PVOID Context2, 672 IN ULONG BytesToWrite, 673 IN BOOLEAN Retrying) 674 { 675 KIRQL OldIrql; 676 PDEFERRED_WRITE Context; 677 PFSRTL_COMMON_FCB_HEADER Fcb; 678 679 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n", 680 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying); 681 682 /* Try to allocate a context for queueing the write operation */ 683 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw'); 684 /* If it failed, immediately execute the operation! */ 685 if (Context == NULL) 686 { 687 PostRoutine(Context1, Context2); 688 return; 689 } 690 691 Fcb = FileObject->FsContext; 692 693 /* Otherwise, initialize the context */ 694 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE)); 695 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE; 696 Context->NodeByteSize = sizeof(DEFERRED_WRITE); 697 Context->FileObject = FileObject; 698 Context->PostRoutine = PostRoutine; 699 Context->Context1 = Context1; 700 Context->Context2 = Context2; 701 Context->BytesToWrite = BytesToWrite; 702 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES); 703 704 /* And queue it */ 705 if (Retrying) 706 { 707 /* To the top, if that's a retry */ 708 ExInterlockedInsertHeadList(&CcDeferredWrites, 709 &Context->DeferredWriteLinks, 710 &CcDeferredWriteSpinLock); 711 } 712 else 713 { 714 /* To the bottom, if that's a first time */ 715 ExInterlockedInsertTailList(&CcDeferredWrites, 716 &Context->DeferredWriteLinks, 717 &CcDeferredWriteSpinLock); 718 } 719 720 /* Try to execute the posted writes */ 721 CcPostDeferredWrites(); 722 723 /* Schedule a lazy writer run to handle deferred writes */ 724 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 725 if (!LazyWriter.ScanActive) 726 { 727 CcScheduleLazyWriteScan(FALSE); 728 } 729 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 730 } 731 732 /* 733 * @unimplemented 734 */ 735 VOID 736 NTAPI 737 CcFastCopyRead ( 738 IN PFILE_OBJECT FileObject, 739 IN ULONG FileOffset, 740 IN ULONG Length, 741 IN ULONG PageCount, 742 OUT PVOID Buffer, 743 OUT PIO_STATUS_BLOCK IoStatus) 744 { 745 LARGE_INTEGER LargeFileOffset; 746 BOOLEAN Success; 747 748 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n", 749 FileObject, FileOffset, Length, PageCount, Buffer); 750 751 DBG_UNREFERENCED_PARAMETER(PageCount); 752 753 LargeFileOffset.QuadPart = FileOffset; 754 Success = CcCopyRead(FileObject, 755 &LargeFileOffset, 756 Length, 757 TRUE, 758 Buffer, 759 IoStatus); 760 ASSERT(Success == TRUE); 761 } 762 763 /* 764 * @unimplemented 765 */ 766 VOID 767 NTAPI 768 CcFastCopyWrite ( 769 IN PFILE_OBJECT FileObject, 770 IN ULONG FileOffset, 771 IN ULONG Length, 772 IN PVOID Buffer) 773 { 774 LARGE_INTEGER LargeFileOffset; 775 BOOLEAN Success; 776 777 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n", 778 FileObject, FileOffset, Length, Buffer); 779 780 LargeFileOffset.QuadPart = FileOffset; 781 Success = CcCopyWrite(FileObject, 782 &LargeFileOffset, 783 Length, 784 TRUE, 785 Buffer); 786 ASSERT(Success == TRUE); 787 } 788 789 /* 790 * @implemented 791 */ 792 BOOLEAN 793 NTAPI 794 CcZeroData ( 795 IN PFILE_OBJECT FileObject, 796 IN PLARGE_INTEGER StartOffset, 797 IN PLARGE_INTEGER EndOffset, 798 IN BOOLEAN Wait) 799 { 800 NTSTATUS Status; 801 LARGE_INTEGER WriteOffset; 802 LONGLONG Length; 803 PROS_VACB Vacb; 804 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 805 806 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n", 807 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait); 808 809 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, " 810 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart, 811 Wait); 812 813 Length = EndOffset->QuadPart - StartOffset->QuadPart; 814 WriteOffset.QuadPart = StartOffset->QuadPart; 815 816 if (!SharedCacheMap) 817 { 818 /* Make this a non-cached write */ 819 IO_STATUS_BLOCK Iosb; 820 KEVENT Event; 821 PMDL Mdl; 822 ULONG i; 823 ULONG CurrentLength; 824 PPFN_NUMBER PfnArray; 825 826 /* Setup our Mdl */ 827 Mdl = IoAllocateMdl(NULL, min(Length, MAX_ZERO_LENGTH), FALSE, FALSE, NULL); 828 if (!Mdl) 829 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 830 831 PfnArray = MmGetMdlPfnArray(Mdl); 832 for (i = 0; i < BYTES_TO_PAGES(Mdl->ByteCount); i++) 833 PfnArray[i] = CcZeroPage; 834 Mdl->MdlFlags |= MDL_PAGES_LOCKED; 835 836 /* Perform the write sequencially */ 837 while (Length > 0) 838 { 839 CurrentLength = min(Length, MAX_ZERO_LENGTH); 840 841 Mdl->ByteCount = CurrentLength; 842 843 KeInitializeEvent(&Event, NotificationEvent, FALSE); 844 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb); 845 if (Status == STATUS_PENDING) 846 { 847 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL); 848 Status = Iosb.Status; 849 } 850 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) 851 { 852 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl); 853 } 854 if (!NT_SUCCESS(Status)) 855 { 856 IoFreeMdl(Mdl); 857 ExRaiseStatus(Status); 858 } 859 WriteOffset.QuadPart += CurrentLength; 860 Length -= CurrentLength; 861 } 862 863 IoFreeMdl(Mdl); 864 865 return TRUE; 866 } 867 868 /* See if we should simply truncate the valid data length */ 869 if ((StartOffset->QuadPart < SharedCacheMap->ValidDataLength.QuadPart) && (EndOffset->QuadPart >= SharedCacheMap->ValidDataLength.QuadPart)) 870 { 871 DPRINT1("Truncating VDL.\n"); 872 SharedCacheMap->ValidDataLength = *StartOffset; 873 return TRUE; 874 } 875 876 ASSERT(EndOffset->QuadPart <= SharedCacheMap->SectionSize.QuadPart); 877 878 while(WriteOffset.QuadPart < EndOffset->QuadPart) 879 { 880 ULONG VacbOffset = WriteOffset.QuadPart % VACB_MAPPING_GRANULARITY; 881 ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset); 882 883 Status = CcRosGetVacb(SharedCacheMap, WriteOffset.QuadPart, &Vacb); 884 if (!NT_SUCCESS(Status)) 885 { 886 ExRaiseStatus(Status); 887 return FALSE; 888 } 889 890 _SEH2_TRY 891 { 892 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength)) 893 { 894 return FALSE; 895 } 896 897 RtlZeroMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), VacbLength); 898 899 WriteOffset.QuadPart += VacbLength; 900 Length -= VacbLength; 901 902 /* Tell Mm */ 903 Status = MmMakePagesDirty(NULL, Add2Ptr(Vacb->BaseAddress, VacbOffset), VacbLength); 904 if (!NT_SUCCESS(Status)) 905 ExRaiseStatus(Status); 906 } 907 _SEH2_FINALLY 908 { 909 /* Do not mark the VACB as dirty if an exception was raised */ 910 CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE); 911 } 912 _SEH2_END; 913 } 914 915 /* Flush if needed */ 916 if (FileObject->Flags & FO_WRITE_THROUGH) 917 CcFlushCache(FileObject->SectionObjectPointer, StartOffset, EndOffset->QuadPart - StartOffset->QuadPart, NULL); 918 919 return TRUE; 920 } 921