1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/cc/copy.c 5 * PURPOSE: Implements cache managers copy interface 6 * 7 * PROGRAMMERS: Some people? 8 * Pierre Schweitzer (pierre@reactos.org) 9 */ 10 11 /* INCLUDES ******************************************************************/ 12 13 #include <ntoskrnl.h> 14 #define NDEBUG 15 #include <debug.h> 16 17 /* GLOBALS *******************************************************************/ 18 19 static PFN_NUMBER CcZeroPage = 0; 20 21 #define MAX_ZERO_LENGTH (256 * 1024) 22 23 typedef enum _CC_CAN_WRITE_RETRY 24 { 25 FirstTry = 0, 26 RetryAllowRemote = 253, 27 RetryForceCheckPerFile = 254, 28 RetryMasterLocked = 255, 29 } CC_CAN_WRITE_RETRY; 30 31 ULONG CcRosTraceLevel = CC_API_DEBUG; 32 ULONG CcFastMdlReadWait; 33 ULONG CcFastMdlReadNotPossible; 34 ULONG CcFastReadNotPossible; 35 ULONG CcFastReadWait; 36 ULONG CcFastReadNoWait; 37 ULONG CcFastReadResourceMiss; 38 39 /* Counters: 40 * - Amount of pages flushed to the disk 41 * - Number of flush operations 42 */ 43 ULONG CcDataPages = 0; 44 ULONG CcDataFlushes = 0; 45 46 /* FUNCTIONS *****************************************************************/ 47 48 VOID 49 NTAPI 50 MiZeroPhysicalPage ( 51 IN PFN_NUMBER PageFrameIndex 52 ); 53 54 VOID 55 NTAPI 56 CcInitCacheZeroPage ( 57 VOID) 58 { 59 NTSTATUS Status; 60 61 MI_SET_USAGE(MI_USAGE_CACHE); 62 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName); 63 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage); 64 if (!NT_SUCCESS(Status)) 65 { 66 DbgPrint("Can't allocate CcZeroPage.\n"); 67 KeBugCheck(CACHE_MANAGER); 68 } 69 MiZeroPhysicalPage(CcZeroPage); 70 } 71 72 VOID 73 CcPostDeferredWrites(VOID) 74 { 75 LIST_ENTRY ToInsertBack; 76 77 InitializeListHead(&ToInsertBack); 78 79 /* We'll try to write as much as we can */ 80 while (TRUE) 81 { 82 PDEFERRED_WRITE DeferredWrite; 83 PLIST_ENTRY ListEntry; 84 85 DeferredWrite = NULL; 86 87 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock); 88 89 if (!ListEntry) 90 break; 91 92 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks); 93 94 /* Check if we can write */ 95 if (CcCanIWrite(DeferredWrite->FileObject, DeferredWrite->BytesToWrite, FALSE, RetryForceCheckPerFile)) 96 { 97 /* If we have an event, set it and go along */ 98 if (DeferredWrite->Event) 99 { 100 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE); 101 } 102 /* Otherwise, call the write routine and free the context */ 103 else 104 { 105 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2); 106 ExFreePoolWithTag(DeferredWrite, 'CcDw'); 107 } 108 continue; 109 } 110 111 /* Keep it for later */ 112 InsertHeadList(&ToInsertBack, &DeferredWrite->DeferredWriteLinks); 113 114 /* If we don't accept modified pages, stop here */ 115 if (!DeferredWrite->LimitModifiedPages) 116 { 117 break; 118 } 119 } 120 121 /* Insert what we couldn't write back in the list */ 122 while (!IsListEmpty(&ToInsertBack)) 123 { 124 PLIST_ENTRY ListEntry = RemoveTailList(&ToInsertBack); 125 ExInterlockedInsertHeadList(&CcDeferredWrites, ListEntry, &CcDeferredWriteSpinLock); 126 } 127 } 128 129 VOID 130 CcPerformReadAhead( 131 IN PFILE_OBJECT FileObject) 132 { 133 NTSTATUS Status; 134 LONGLONG CurrentOffset; 135 KIRQL OldIrql; 136 PROS_SHARED_CACHE_MAP SharedCacheMap; 137 PROS_VACB Vacb; 138 ULONG PartialLength; 139 ULONG Length; 140 PPRIVATE_CACHE_MAP PrivateCacheMap; 141 BOOLEAN Locked; 142 143 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 144 145 /* Critical: 146 * PrivateCacheMap might disappear in-between if the handle 147 * to the file is closed (private is attached to the handle not to 148 * the file), so we need to lock the master lock while we deal with 149 * it. It won't disappear without attempting to lock such lock. 150 */ 151 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 152 PrivateCacheMap = FileObject->PrivateCacheMap; 153 /* If the handle was closed since the read ahead was scheduled, just quit */ 154 if (PrivateCacheMap == NULL) 155 { 156 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 157 ObDereferenceObject(FileObject); 158 return; 159 } 160 /* Otherwise, extract read offset and length and release private map */ 161 else 162 { 163 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 164 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart; 165 Length = PrivateCacheMap->ReadAheadLength[1]; 166 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 167 } 168 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 169 170 /* Time to go! */ 171 DPRINT("Doing ReadAhead for %p\n", FileObject); 172 /* Lock the file, first */ 173 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE)) 174 { 175 Locked = FALSE; 176 goto Clear; 177 } 178 179 /* Remember it's locked */ 180 Locked = TRUE; 181 182 /* Don't read past the end of the file */ 183 if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart) 184 { 185 goto Clear; 186 } 187 if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart) 188 { 189 Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset; 190 } 191 192 /* Next of the algorithm will lock like CcCopyData with the slight 193 * difference that we don't copy data back to an user-backed buffer 194 * We just bring data into Cc 195 */ 196 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY; 197 if (PartialLength != 0) 198 { 199 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength); 200 Status = CcRosRequestVacb(SharedCacheMap, 201 ROUND_DOWN(CurrentOffset, VACB_MAPPING_GRANULARITY), 202 &Vacb); 203 if (!NT_SUCCESS(Status)) 204 { 205 DPRINT1("Failed to request VACB: %lx!\n", Status); 206 goto Clear; 207 } 208 209 Status = CcRosEnsureVacbResident(Vacb, TRUE, FALSE, 210 CurrentOffset % VACB_MAPPING_GRANULARITY, PartialLength); 211 if (!NT_SUCCESS(Status)) 212 { 213 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 214 DPRINT1("Failed to read data: %lx!\n", Status); 215 goto Clear; 216 } 217 218 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 219 220 Length -= PartialLength; 221 CurrentOffset += PartialLength; 222 } 223 224 while (Length > 0) 225 { 226 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0); 227 PartialLength = min(VACB_MAPPING_GRANULARITY, Length); 228 Status = CcRosRequestVacb(SharedCacheMap, 229 CurrentOffset, 230 &Vacb); 231 if (!NT_SUCCESS(Status)) 232 { 233 DPRINT1("Failed to request VACB: %lx!\n", Status); 234 goto Clear; 235 } 236 237 Status = CcRosEnsureVacbResident(Vacb, TRUE, FALSE, 0, PartialLength); 238 if (!NT_SUCCESS(Status)) 239 { 240 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 241 DPRINT1("Failed to read data: %lx!\n", Status); 242 goto Clear; 243 } 244 245 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 246 247 Length -= PartialLength; 248 CurrentOffset += PartialLength; 249 } 250 251 Clear: 252 /* See previous comment about private cache map */ 253 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 254 PrivateCacheMap = FileObject->PrivateCacheMap; 255 if (PrivateCacheMap != NULL) 256 { 257 /* Mark read ahead as unactive */ 258 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 259 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE); 260 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock); 261 } 262 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 263 264 /* If file was locked, release it */ 265 if (Locked) 266 { 267 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext); 268 } 269 270 /* And drop our extra reference (See: CcScheduleReadAhead) */ 271 ObDereferenceObject(FileObject); 272 273 return; 274 } 275 276 /* 277 * @unimplemented 278 */ 279 BOOLEAN 280 NTAPI 281 CcCanIWrite ( 282 IN PFILE_OBJECT FileObject, 283 IN ULONG BytesToWrite, 284 IN BOOLEAN Wait, 285 IN BOOLEAN Retrying) 286 { 287 KIRQL OldIrql; 288 KEVENT WaitEvent; 289 ULONG Length, Pages; 290 BOOLEAN PerFileDefer; 291 DEFERRED_WRITE Context; 292 PFSRTL_COMMON_FCB_HEADER Fcb; 293 CC_CAN_WRITE_RETRY TryContext; 294 PROS_SHARED_CACHE_MAP SharedCacheMap; 295 296 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n", 297 FileObject, BytesToWrite, Wait, Retrying); 298 299 /* Write through is always OK */ 300 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH)) 301 { 302 return TRUE; 303 } 304 305 TryContext = Retrying; 306 /* Allow remote file if not from posted */ 307 if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote) 308 { 309 return TRUE; 310 } 311 312 /* Don't exceed max tolerated size */ 313 Length = MAX_ZERO_LENGTH; 314 if (BytesToWrite < MAX_ZERO_LENGTH) 315 { 316 Length = BytesToWrite; 317 } 318 319 Pages = BYTES_TO_PAGES(Length); 320 321 /* By default, assume limits per file won't be hit */ 322 PerFileDefer = FALSE; 323 Fcb = FileObject->FsContext; 324 /* Do we have to check for limits per file? */ 325 if (TryContext >= RetryForceCheckPerFile || 326 BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES)) 327 { 328 /* If master is not locked, lock it now */ 329 if (TryContext != RetryMasterLocked) 330 { 331 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 332 } 333 334 /* Let's not assume the file is cached... */ 335 if (FileObject->SectionObjectPointer != NULL && 336 FileObject->SectionObjectPointer->SharedCacheMap != NULL) 337 { 338 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 339 /* Do we have limits per file set? */ 340 if (SharedCacheMap->DirtyPageThreshold != 0 && 341 SharedCacheMap->DirtyPages != 0) 342 { 343 /* Yes, check whether they are blocking */ 344 if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold) 345 { 346 PerFileDefer = TRUE; 347 } 348 } 349 } 350 351 /* And don't forget to release master */ 352 if (TryContext != RetryMasterLocked) 353 { 354 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 355 } 356 } 357 358 /* So, now allow write if: 359 * - Not the first try or we have no throttling yet 360 * AND: 361 * - We don't exceed threshold! 362 * - We don't exceed what Mm can allow us to use 363 * + If we're above top, that's fine 364 * + If we're above bottom with limited modified pages, that's fine 365 * + Otherwise, throttle! 366 */ 367 if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) && 368 CcTotalDirtyPages + Pages < CcDirtyPageThreshold && 369 (MmAvailablePages > MmThrottleTop || 370 (MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) && 371 !PerFileDefer) 372 { 373 return TRUE; 374 } 375 376 /* If we can wait, we'll start the wait loop for waiting till we can 377 * write for real 378 */ 379 if (!Wait) 380 { 381 return FALSE; 382 } 383 384 /* Initialize our wait event */ 385 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE); 386 387 /* And prepare a dummy context */ 388 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE; 389 Context.NodeByteSize = sizeof(DEFERRED_WRITE); 390 Context.FileObject = FileObject; 391 Context.BytesToWrite = BytesToWrite; 392 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES); 393 Context.Event = &WaitEvent; 394 395 /* And queue it */ 396 if (Retrying) 397 { 398 /* To the top, if that's a retry */ 399 ExInterlockedInsertHeadList(&CcDeferredWrites, 400 &Context.DeferredWriteLinks, 401 &CcDeferredWriteSpinLock); 402 } 403 else 404 { 405 /* To the bottom, if that's a first time */ 406 ExInterlockedInsertTailList(&CcDeferredWrites, 407 &Context.DeferredWriteLinks, 408 &CcDeferredWriteSpinLock); 409 } 410 411 /* Now make sure that the lazy scan writer will be active */ 412 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 413 if (!LazyWriter.ScanActive) 414 CcScheduleLazyWriteScan(TRUE); 415 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 416 417 #if DBG 418 DPRINT1("Actively deferring write for: %p\n", FileObject); 419 DPRINT1("Because:\n"); 420 if (CcTotalDirtyPages + Pages >= CcDirtyPageThreshold) 421 DPRINT1(" There are too many cache dirty pages: %x + %x >= %x\n", CcTotalDirtyPages, Pages, CcDirtyPageThreshold); 422 if (MmAvailablePages <= MmThrottleTop) 423 DPRINT1(" Available pages are below throttle top: %lx <= %lx\n", MmAvailablePages, MmThrottleTop); 424 if (MmModifiedPageListHead.Total >= 1000) 425 DPRINT1(" There are too many modified pages: %lu >= 1000\n", MmModifiedPageListHead.Total); 426 if (MmAvailablePages <= MmThrottleBottom) 427 DPRINT1(" Available pages are below throttle bottom: %lx <= %lx\n", MmAvailablePages, MmThrottleBottom); 428 #endif 429 /* Now, we'll loop until our event is set. When it is set, it means that caller 430 * can immediately write, and has to 431 */ 432 do 433 { 434 CcPostDeferredWrites(); 435 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS); 436 437 return TRUE; 438 } 439 440 static 441 int 442 CcpCheckInvalidUserBuffer(PEXCEPTION_POINTERS Except, PVOID Buffer, ULONG Length) 443 { 444 ULONG_PTR ExceptionAddress; 445 ULONG_PTR BeginAddress = (ULONG_PTR)Buffer; 446 ULONG_PTR EndAddress = (ULONG_PTR)Buffer + Length; 447 448 if (Except->ExceptionRecord->ExceptionCode != STATUS_ACCESS_VIOLATION) 449 return EXCEPTION_CONTINUE_SEARCH; 450 if (Except->ExceptionRecord->NumberParameters < 2) 451 return EXCEPTION_CONTINUE_SEARCH; 452 453 ExceptionAddress = Except->ExceptionRecord->ExceptionInformation[1]; 454 if ((ExceptionAddress >= BeginAddress) && (ExceptionAddress < EndAddress)) 455 return EXCEPTION_EXECUTE_HANDLER; 456 457 return EXCEPTION_CONTINUE_SEARCH; 458 } 459 460 /* 461 * @implemented 462 */ 463 BOOLEAN 464 NTAPI 465 CcCopyRead ( 466 IN PFILE_OBJECT FileObject, 467 IN PLARGE_INTEGER FileOffset, 468 IN ULONG Length, 469 IN BOOLEAN Wait, 470 OUT PVOID Buffer, 471 OUT PIO_STATUS_BLOCK IoStatus) 472 { 473 PROS_VACB Vacb; 474 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 475 NTSTATUS Status; 476 LONGLONG CurrentOffset; 477 LONGLONG ReadEnd = FileOffset->QuadPart + Length; 478 ULONG ReadLength = 0; 479 480 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n", 481 FileObject, FileOffset->QuadPart, Length, Wait); 482 483 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, " 484 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n", 485 FileObject, FileOffset->QuadPart, Length, Wait, 486 Buffer, IoStatus); 487 488 if (!SharedCacheMap) 489 return FALSE; 490 491 /* Documented to ASSERT, but KMTests test this case... */ 492 // ASSERT((FileOffset->QuadPart + Length) <= SharedCacheMap->FileSize.QuadPart); 493 494 CurrentOffset = FileOffset->QuadPart; 495 while(CurrentOffset < ReadEnd) 496 { 497 Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb); 498 if (!NT_SUCCESS(Status)) 499 { 500 ExRaiseStatus(Status); 501 return FALSE; 502 } 503 504 _SEH2_TRY 505 { 506 ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY; 507 ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset); 508 SIZE_T CopyLength = VacbLength; 509 510 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength)) 511 return FALSE; 512 513 _SEH2_TRY 514 { 515 RtlCopyMemory(Buffer, (PUCHAR)Vacb->BaseAddress + VacbOffset, CopyLength); 516 } 517 _SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength)) 518 { 519 ExRaiseStatus(STATUS_INVALID_USER_BUFFER); 520 } 521 _SEH2_END; 522 523 ReadLength += VacbLength; 524 525 Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength); 526 CurrentOffset += VacbLength; 527 Length -= VacbLength; 528 } 529 _SEH2_FINALLY 530 { 531 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE); 532 } 533 _SEH2_END; 534 } 535 536 IoStatus->Status = STATUS_SUCCESS; 537 IoStatus->Information = ReadLength; 538 539 #if 0 540 /* If that was a successful sync read operation, let's handle read ahead */ 541 if (Length == 0 && Wait) 542 { 543 PPRIVATE_CACHE_MAP PrivateCacheMap = FileObject->PrivateCacheMap; 544 545 /* If file isn't random access and next read may get us cross VACB boundary, 546 * schedule next read 547 */ 548 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) && 549 (CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + ReadLength - 1) / VACB_MAPPING_GRANULARITY) 550 { 551 CcScheduleReadAhead(FileObject, FileOffset, ReadLength); 552 } 553 554 /* And update read history in private cache map */ 555 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart; 556 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart; 557 PrivateCacheMap->FileOffset2.QuadPart = FileOffset->QuadPart; 558 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset->QuadPart + ReadLength; 559 } 560 #endif 561 562 return TRUE; 563 } 564 565 /* 566 * @implemented 567 */ 568 BOOLEAN 569 NTAPI 570 CcCopyWrite ( 571 IN PFILE_OBJECT FileObject, 572 IN PLARGE_INTEGER FileOffset, 573 IN ULONG Length, 574 IN BOOLEAN Wait, 575 IN PVOID Buffer) 576 { 577 PROS_VACB Vacb; 578 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 579 NTSTATUS Status; 580 LONGLONG CurrentOffset; 581 LONGLONG WriteEnd; 582 583 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n", 584 FileObject, FileOffset->QuadPart, Length, Wait, Buffer); 585 586 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, " 587 "Length %lu, Wait %u, Buffer 0x%p)\n", 588 FileObject, FileOffset->QuadPart, Length, Wait, Buffer); 589 590 if (!SharedCacheMap) 591 return FALSE; 592 593 Status = RtlLongLongAdd(FileOffset->QuadPart, Length, &WriteEnd); 594 if (!NT_SUCCESS(Status)) 595 ExRaiseStatus(Status); 596 597 ASSERT(WriteEnd <= SharedCacheMap->SectionSize.QuadPart); 598 599 CurrentOffset = FileOffset->QuadPart; 600 while(CurrentOffset < WriteEnd) 601 { 602 ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY; 603 ULONG VacbLength = min(WriteEnd - CurrentOffset, VACB_MAPPING_GRANULARITY - VacbOffset); 604 605 Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb); 606 if (!NT_SUCCESS(Status)) 607 { 608 ExRaiseStatus(Status); 609 return FALSE; 610 } 611 612 _SEH2_TRY 613 { 614 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength)) 615 { 616 return FALSE; 617 } 618 619 _SEH2_TRY 620 { 621 RtlCopyMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), Buffer, VacbLength); 622 } 623 _SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength)) 624 { 625 ExRaiseStatus(STATUS_INVALID_USER_BUFFER); 626 } 627 _SEH2_END; 628 629 Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength); 630 CurrentOffset += VacbLength; 631 632 /* Tell Mm */ 633 Status = MmMakePagesDirty(NULL, Add2Ptr(Vacb->BaseAddress, VacbOffset), VacbLength); 634 if (!NT_SUCCESS(Status)) 635 ExRaiseStatus(Status); 636 } 637 _SEH2_FINALLY 638 { 639 /* Do not mark the VACB as dirty if an exception was raised */ 640 CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE); 641 } 642 _SEH2_END; 643 } 644 645 /* Flush if needed */ 646 if (FileObject->Flags & FO_WRITE_THROUGH) 647 CcFlushCache(FileObject->SectionObjectPointer, FileOffset, Length, NULL); 648 649 return TRUE; 650 } 651 652 /* 653 * @implemented 654 */ 655 VOID 656 NTAPI 657 CcDeferWrite ( 658 IN PFILE_OBJECT FileObject, 659 IN PCC_POST_DEFERRED_WRITE PostRoutine, 660 IN PVOID Context1, 661 IN PVOID Context2, 662 IN ULONG BytesToWrite, 663 IN BOOLEAN Retrying) 664 { 665 KIRQL OldIrql; 666 PDEFERRED_WRITE Context; 667 PFSRTL_COMMON_FCB_HEADER Fcb; 668 669 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n", 670 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying); 671 672 /* Try to allocate a context for queueing the write operation */ 673 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw'); 674 /* If it failed, immediately execute the operation! */ 675 if (Context == NULL) 676 { 677 PostRoutine(Context1, Context2); 678 return; 679 } 680 681 Fcb = FileObject->FsContext; 682 683 /* Otherwise, initialize the context */ 684 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE)); 685 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE; 686 Context->NodeByteSize = sizeof(DEFERRED_WRITE); 687 Context->FileObject = FileObject; 688 Context->PostRoutine = PostRoutine; 689 Context->Context1 = Context1; 690 Context->Context2 = Context2; 691 Context->BytesToWrite = BytesToWrite; 692 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES); 693 694 /* And queue it */ 695 if (Retrying) 696 { 697 /* To the top, if that's a retry */ 698 ExInterlockedInsertHeadList(&CcDeferredWrites, 699 &Context->DeferredWriteLinks, 700 &CcDeferredWriteSpinLock); 701 } 702 else 703 { 704 /* To the bottom, if that's a first time */ 705 ExInterlockedInsertTailList(&CcDeferredWrites, 706 &Context->DeferredWriteLinks, 707 &CcDeferredWriteSpinLock); 708 } 709 710 /* Try to execute the posted writes */ 711 CcPostDeferredWrites(); 712 713 /* Schedule a lazy writer run to handle deferred writes */ 714 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 715 if (!LazyWriter.ScanActive) 716 { 717 CcScheduleLazyWriteScan(FALSE); 718 } 719 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 720 } 721 722 /* 723 * @unimplemented 724 */ 725 VOID 726 NTAPI 727 CcFastCopyRead ( 728 IN PFILE_OBJECT FileObject, 729 IN ULONG FileOffset, 730 IN ULONG Length, 731 IN ULONG PageCount, 732 OUT PVOID Buffer, 733 OUT PIO_STATUS_BLOCK IoStatus) 734 { 735 LARGE_INTEGER LargeFileOffset; 736 BOOLEAN Success; 737 738 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n", 739 FileObject, FileOffset, Length, PageCount, Buffer); 740 741 DBG_UNREFERENCED_PARAMETER(PageCount); 742 743 LargeFileOffset.QuadPart = FileOffset; 744 Success = CcCopyRead(FileObject, 745 &LargeFileOffset, 746 Length, 747 TRUE, 748 Buffer, 749 IoStatus); 750 ASSERT(Success == TRUE); 751 } 752 753 /* 754 * @unimplemented 755 */ 756 VOID 757 NTAPI 758 CcFastCopyWrite ( 759 IN PFILE_OBJECT FileObject, 760 IN ULONG FileOffset, 761 IN ULONG Length, 762 IN PVOID Buffer) 763 { 764 LARGE_INTEGER LargeFileOffset; 765 BOOLEAN Success; 766 767 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n", 768 FileObject, FileOffset, Length, Buffer); 769 770 LargeFileOffset.QuadPart = FileOffset; 771 Success = CcCopyWrite(FileObject, 772 &LargeFileOffset, 773 Length, 774 TRUE, 775 Buffer); 776 ASSERT(Success == TRUE); 777 } 778 779 /* 780 * @implemented 781 */ 782 BOOLEAN 783 NTAPI 784 CcZeroData ( 785 IN PFILE_OBJECT FileObject, 786 IN PLARGE_INTEGER StartOffset, 787 IN PLARGE_INTEGER EndOffset, 788 IN BOOLEAN Wait) 789 { 790 NTSTATUS Status; 791 LARGE_INTEGER WriteOffset; 792 LONGLONG Length; 793 PROS_VACB Vacb; 794 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 795 796 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n", 797 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait); 798 799 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, " 800 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart, 801 Wait); 802 803 Length = EndOffset->QuadPart - StartOffset->QuadPart; 804 WriteOffset.QuadPart = StartOffset->QuadPart; 805 806 if (!SharedCacheMap) 807 { 808 /* Make this a non-cached write */ 809 IO_STATUS_BLOCK Iosb; 810 KEVENT Event; 811 PMDL Mdl; 812 ULONG i; 813 ULONG CurrentLength; 814 PPFN_NUMBER PfnArray; 815 816 /* Setup our Mdl */ 817 Mdl = IoAllocateMdl(NULL, min(Length, MAX_ZERO_LENGTH), FALSE, FALSE, NULL); 818 if (!Mdl) 819 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 820 821 PfnArray = MmGetMdlPfnArray(Mdl); 822 for (i = 0; i < BYTES_TO_PAGES(Mdl->ByteCount); i++) 823 PfnArray[i] = CcZeroPage; 824 Mdl->MdlFlags |= MDL_PAGES_LOCKED; 825 826 /* Perform the write sequencially */ 827 while (Length > 0) 828 { 829 CurrentLength = min(Length, MAX_ZERO_LENGTH); 830 831 Mdl->ByteCount = CurrentLength; 832 833 KeInitializeEvent(&Event, NotificationEvent, FALSE); 834 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb); 835 if (Status == STATUS_PENDING) 836 { 837 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL); 838 Status = Iosb.Status; 839 } 840 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) 841 { 842 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl); 843 } 844 if (!NT_SUCCESS(Status)) 845 { 846 IoFreeMdl(Mdl); 847 ExRaiseStatus(Status); 848 } 849 WriteOffset.QuadPart += CurrentLength; 850 Length -= CurrentLength; 851 } 852 853 IoFreeMdl(Mdl); 854 855 return TRUE; 856 } 857 858 /* See if we should simply truncate the valid data length */ 859 if ((StartOffset->QuadPart < SharedCacheMap->ValidDataLength.QuadPart) && (EndOffset->QuadPart >= SharedCacheMap->ValidDataLength.QuadPart)) 860 { 861 DPRINT1("Truncating VDL.\n"); 862 SharedCacheMap->ValidDataLength = *StartOffset; 863 return TRUE; 864 } 865 866 ASSERT(EndOffset->QuadPart <= SharedCacheMap->SectionSize.QuadPart); 867 868 while(WriteOffset.QuadPart < EndOffset->QuadPart) 869 { 870 ULONG VacbOffset = WriteOffset.QuadPart % VACB_MAPPING_GRANULARITY; 871 ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset); 872 873 Status = CcRosGetVacb(SharedCacheMap, WriteOffset.QuadPart, &Vacb); 874 if (!NT_SUCCESS(Status)) 875 { 876 ExRaiseStatus(Status); 877 return FALSE; 878 } 879 880 _SEH2_TRY 881 { 882 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength)) 883 { 884 return FALSE; 885 } 886 887 RtlZeroMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), VacbLength); 888 889 WriteOffset.QuadPart += VacbLength; 890 Length -= VacbLength; 891 892 /* Tell Mm */ 893 Status = MmMakePagesDirty(NULL, Add2Ptr(Vacb->BaseAddress, VacbOffset), VacbLength); 894 if (!NT_SUCCESS(Status)) 895 ExRaiseStatus(Status); 896 } 897 _SEH2_FINALLY 898 { 899 /* Do not mark the VACB as dirty if an exception was raised */ 900 CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE); 901 } 902 _SEH2_END; 903 } 904 905 /* Flush if needed */ 906 if (FileObject->Flags & FO_WRITE_THROUGH) 907 CcFlushCache(FileObject->SectionObjectPointer, StartOffset, EndOffset->QuadPart - StartOffset->QuadPart, NULL); 908 909 return TRUE; 910 } 911