1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/cc/view.c 5 * PURPOSE: Cache manager 6 * 7 * PROGRAMMERS: David Welch (welch@mcmail.com) 8 * Pierre Schweitzer (pierre@reactos.org) 9 */ 10 11 /* NOTES ********************************************************************** 12 * 13 * This is not the NT implementation of a file cache nor anything much like 14 * it. 15 * 16 * The general procedure for a filesystem to implement a read or write 17 * dispatch routine is as follows 18 * 19 * (1) If caching for the FCB hasn't been initiated then so do by calling 20 * CcInitializeFileCache. 21 * 22 * (2) For each 4k region which is being read or written obtain a cache page 23 * by calling CcRequestCachePage. 24 * 25 * (3) If either the page is being read or not completely written, and it is 26 * not up to date then read its data from the underlying medium. If the read 27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error. 28 * 29 * (4) Copy the data into or out of the page as necessary. 30 * 31 * (5) Release the cache page 32 */ 33 /* INCLUDES ******************************************************************/ 34 35 #include <ntoskrnl.h> 36 #define NDEBUG 37 #include <debug.h> 38 39 /* GLOBALS *******************************************************************/ 40 41 LIST_ENTRY DirtyVacbListHead; 42 static LIST_ENTRY VacbLruListHead; 43 44 NPAGED_LOOKASIDE_LIST iBcbLookasideList; 45 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList; 46 static NPAGED_LOOKASIDE_LIST VacbLookasideList; 47 48 /* Internal vars (MS): 49 * - Threshold above which lazy writer will start action 50 * - Amount of dirty pages 51 * - List for deferred writes 52 * - Spinlock when dealing with the deferred list 53 * - List for "clean" shared cache maps 54 */ 55 ULONG CcDirtyPageThreshold = 0; 56 ULONG CcTotalDirtyPages = 0; 57 LIST_ENTRY CcDeferredWrites; 58 KSPIN_LOCK CcDeferredWriteSpinLock; 59 LIST_ENTRY CcCleanSharedCacheMapList; 60 61 #if DBG 62 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line) 63 { 64 ULONG Refs; 65 66 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount); 67 if (vacb->SharedCacheMap->Trace) 68 { 69 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n", 70 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 71 } 72 73 return Refs; 74 } 75 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line) 76 { 77 ULONG Refs; 78 BOOLEAN VacbDirty = vacb->Dirty; 79 BOOLEAN VacbTrace = vacb->SharedCacheMap->Trace; 80 BOOLEAN VacbPageOut = vacb->PageOut; 81 82 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount); 83 ASSERT(!(Refs == 0 && VacbDirty)); 84 if (VacbTrace) 85 { 86 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n", 87 file, line, vacb, Refs, VacbDirty, VacbPageOut); 88 } 89 90 if (Refs == 0) 91 { 92 CcRosInternalFreeVacb(vacb); 93 } 94 95 return Refs; 96 } 97 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line) 98 { 99 ULONG Refs; 100 101 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0); 102 if (vacb->SharedCacheMap->Trace) 103 { 104 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n", 105 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 106 } 107 108 return Refs; 109 } 110 #endif 111 112 113 /* FUNCTIONS *****************************************************************/ 114 115 VOID 116 CcRosTraceCacheMap ( 117 PROS_SHARED_CACHE_MAP SharedCacheMap, 118 BOOLEAN Trace ) 119 { 120 #if DBG 121 KIRQL oldirql; 122 PLIST_ENTRY current_entry; 123 PROS_VACB current; 124 125 if (!SharedCacheMap) 126 return; 127 128 SharedCacheMap->Trace = Trace; 129 130 if (Trace) 131 { 132 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 133 134 oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 135 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 136 137 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 138 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 139 { 140 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 141 current_entry = current_entry->Flink; 142 143 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n", 144 current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart); 145 } 146 147 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 148 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql); 149 } 150 else 151 { 152 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 153 } 154 155 #else 156 UNREFERENCED_PARAMETER(SharedCacheMap); 157 UNREFERENCED_PARAMETER(Trace); 158 #endif 159 } 160 161 NTSTATUS 162 CcRosFlushVacb ( 163 _In_ PROS_VACB Vacb, 164 _Out_opt_ PIO_STATUS_BLOCK Iosb) 165 { 166 NTSTATUS Status; 167 BOOLEAN HaveLock = FALSE; 168 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 169 170 CcRosUnmarkDirtyVacb(Vacb, TRUE); 171 172 /* Lock for flush, if we are not already the top-level */ 173 if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP) 174 { 175 Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject); 176 if (!NT_SUCCESS(Status)) 177 goto quit; 178 HaveLock = TRUE; 179 } 180 181 Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer, 182 &Vacb->FileOffset, 183 VACB_MAPPING_GRANULARITY, 184 Iosb); 185 186 if (HaveLock) 187 { 188 FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject); 189 } 190 191 quit: 192 if (!NT_SUCCESS(Status)) 193 CcRosMarkDirtyVacb(Vacb); 194 else 195 { 196 /* Update VDL */ 197 if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY)) 198 { 199 SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY; 200 } 201 } 202 203 return Status; 204 } 205 206 static 207 NTSTATUS 208 CcRosDeleteFileCache ( 209 PFILE_OBJECT FileObject, 210 PROS_SHARED_CACHE_MAP SharedCacheMap, 211 PKIRQL OldIrql) 212 /* 213 * FUNCTION: Releases the shared cache map associated with a file object 214 */ 215 { 216 PLIST_ENTRY current_entry; 217 218 ASSERT(SharedCacheMap); 219 ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap); 220 ASSERT(SharedCacheMap->OpenCount == 0); 221 222 /* Remove all VACBs from the global lists */ 223 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 224 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 225 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 226 { 227 PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 228 229 RemoveEntryList(&Vacb->VacbLruListEntry); 230 InitializeListHead(&Vacb->VacbLruListEntry); 231 232 if (Vacb->Dirty) 233 { 234 CcRosUnmarkDirtyVacb(Vacb, FALSE); 235 /* Mark it as dirty again so we know that we have to flush before freeing it */ 236 Vacb->Dirty = TRUE; 237 } 238 239 current_entry = current_entry->Flink; 240 } 241 242 /* Make sure there is no trace anymore of this map */ 243 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 244 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 245 246 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 247 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql); 248 249 /* Now that we're out of the locks, free everything for real */ 250 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead)) 251 { 252 PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry); 253 ULONG RefCount; 254 255 InitializeListHead(&Vacb->CacheMapVacbListEntry); 256 257 /* Flush to disk, if needed */ 258 if (Vacb->Dirty) 259 { 260 IO_STATUS_BLOCK Iosb; 261 NTSTATUS Status; 262 263 Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb); 264 if (!NT_SUCCESS(Status)) 265 { 266 /* Complain. There's not much we can do */ 267 DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status); 268 } 269 Vacb->Dirty = FALSE; 270 } 271 272 RefCount = CcRosVacbDecRefCount(Vacb); 273 #if DBG // CORE-14578 274 if (RefCount != 0) 275 { 276 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart); 277 DPRINT1("There are: %d references left\n", RefCount); 278 DPRINT1("Map: %d\n", Vacb->MappedCount); 279 DPRINT1("Dirty: %d\n", Vacb->Dirty); 280 if (FileObject->FileName.Length != 0) 281 { 282 DPRINT1("File was: %wZ\n", &FileObject->FileName); 283 } 284 else 285 { 286 DPRINT1("No name for the file\n"); 287 } 288 } 289 #else 290 (void)RefCount; 291 #endif 292 } 293 294 /* Release the references we own */ 295 if(SharedCacheMap->Section) 296 ObDereferenceObject(SharedCacheMap->Section); 297 ObDereferenceObject(SharedCacheMap->FileObject); 298 299 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 300 301 /* Acquire the lock again for our caller */ 302 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 303 304 return STATUS_SUCCESS; 305 } 306 307 NTSTATUS 308 CcRosFlushDirtyPages ( 309 ULONG Target, 310 PULONG Count, 311 BOOLEAN Wait, 312 BOOLEAN CalledFromLazy) 313 { 314 PLIST_ENTRY current_entry; 315 NTSTATUS Status; 316 KIRQL OldIrql; 317 BOOLEAN FlushAll = (Target == MAXULONG); 318 319 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target); 320 321 (*Count) = 0; 322 323 KeEnterCriticalRegion(); 324 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 325 326 current_entry = DirtyVacbListHead.Flink; 327 if (current_entry == &DirtyVacbListHead) 328 { 329 DPRINT("No Dirty pages\n"); 330 } 331 332 while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll) 333 { 334 PROS_SHARED_CACHE_MAP SharedCacheMap; 335 PROS_VACB current; 336 BOOLEAN Locked; 337 338 if (current_entry == &DirtyVacbListHead) 339 { 340 ASSERT(FlushAll); 341 if (IsListEmpty(&DirtyVacbListHead)) 342 break; 343 current_entry = DirtyVacbListHead.Flink; 344 } 345 346 current = CONTAINING_RECORD(current_entry, 347 ROS_VACB, 348 DirtyVacbListEntry); 349 current_entry = current_entry->Flink; 350 351 CcRosVacbIncRefCount(current); 352 353 SharedCacheMap = current->SharedCacheMap; 354 355 /* When performing lazy write, don't handle temporary files */ 356 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE)) 357 { 358 CcRosVacbDecRefCount(current); 359 continue; 360 } 361 362 /* Don't attempt to lazy write the files that asked not to */ 363 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED)) 364 { 365 CcRosVacbDecRefCount(current); 366 continue; 367 } 368 369 ASSERT(current->Dirty); 370 371 /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */ 372 if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE) 373 { 374 CcRosVacbDecRefCount(current); 375 continue; 376 } 377 378 SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE; 379 380 /* Keep a ref on the shared cache map */ 381 SharedCacheMap->OpenCount++; 382 383 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 384 385 Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait); 386 if (!Locked) 387 { 388 DPRINT("Not locked!"); 389 ASSERT(!Wait); 390 CcRosVacbDecRefCount(current); 391 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 392 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 393 394 if (--SharedCacheMap->OpenCount == 0) 395 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 396 397 continue; 398 } 399 400 IO_STATUS_BLOCK Iosb; 401 Status = CcRosFlushVacb(current, &Iosb); 402 403 SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext); 404 405 /* We release the VACB before acquiring the lock again, because 406 * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a 407 * Refcount. Freeing must be done outside of the lock. 408 * The refcount is decremented atomically. So this is OK. */ 409 CcRosVacbDecRefCount(current); 410 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 411 412 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 413 414 if (--SharedCacheMap->OpenCount == 0) 415 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 416 417 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) && 418 (Status != STATUS_MEDIA_WRITE_PROTECTED)) 419 { 420 DPRINT1("CC: Failed to flush VACB.\n"); 421 } 422 else 423 { 424 ULONG PagesFreed; 425 426 /* How many pages did we free? */ 427 PagesFreed = Iosb.Information / PAGE_SIZE; 428 (*Count) += PagesFreed; 429 430 if (!Wait) 431 { 432 /* Make sure we don't overflow target! */ 433 if (Target < PagesFreed) 434 { 435 /* If we would have, jump to zero directly */ 436 Target = 0; 437 } 438 else 439 { 440 Target -= PagesFreed; 441 } 442 } 443 } 444 445 current_entry = DirtyVacbListHead.Flink; 446 } 447 448 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 449 KeLeaveCriticalRegion(); 450 451 DPRINT("CcRosFlushDirtyPages() finished\n"); 452 return STATUS_SUCCESS; 453 } 454 455 VOID 456 CcRosTrimCache( 457 _In_ ULONG Target, 458 _Out_ PULONG NrFreed) 459 /* 460 * FUNCTION: Try to free some memory from the file cache. 461 * ARGUMENTS: 462 * Target - The number of pages to be freed. 463 * NrFreed - Points to a variable where the number of pages 464 * actually freed is returned. 465 */ 466 { 467 PLIST_ENTRY current_entry; 468 PROS_VACB current; 469 ULONG PagesFreed; 470 KIRQL oldIrql; 471 LIST_ENTRY FreeList; 472 BOOLEAN FlushedPages = FALSE; 473 474 DPRINT("CcRosTrimCache(Target %lu)\n", Target); 475 476 InitializeListHead(&FreeList); 477 478 *NrFreed = 0; 479 480 retry: 481 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 482 483 current_entry = VacbLruListHead.Flink; 484 while (current_entry != &VacbLruListHead) 485 { 486 ULONG Refs; 487 488 current = CONTAINING_RECORD(current_entry, 489 ROS_VACB, 490 VacbLruListEntry); 491 492 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 493 494 /* Reference the VACB */ 495 CcRosVacbIncRefCount(current); 496 497 /* Check if it's mapped and not dirty */ 498 if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty) 499 { 500 /* This code is never executed. It is left for reference only. */ 501 #if 1 502 DPRINT1("MmPageOutPhysicalAddress unexpectedly called\n"); 503 ASSERT(FALSE); 504 #else 505 ULONG i; 506 PFN_NUMBER Page; 507 508 /* We have to break these locks to call MmPageOutPhysicalAddress */ 509 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 510 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 511 512 /* Page out the VACB */ 513 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++) 514 { 515 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT); 516 517 MmPageOutPhysicalAddress(Page); 518 } 519 520 /* Reacquire the locks */ 521 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 522 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 523 #endif 524 } 525 526 /* Only keep iterating though the loop while the lock is held */ 527 current_entry = current_entry->Flink; 528 529 /* Dereference the VACB */ 530 Refs = CcRosVacbDecRefCount(current); 531 532 /* Check if we can free this entry now */ 533 if (Refs < 2) 534 { 535 ASSERT(!current->Dirty); 536 ASSERT(!current->MappedCount); 537 ASSERT(Refs == 1); 538 539 RemoveEntryList(¤t->CacheMapVacbListEntry); 540 RemoveEntryList(¤t->VacbLruListEntry); 541 InitializeListHead(¤t->VacbLruListEntry); 542 InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry); 543 544 /* Calculate how many pages we freed for Mm */ 545 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target); 546 Target -= PagesFreed; 547 (*NrFreed) += PagesFreed; 548 } 549 550 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 551 } 552 553 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 554 555 /* Try flushing pages if we haven't met our target */ 556 if ((Target > 0) && !FlushedPages) 557 { 558 /* Flush dirty pages to disk */ 559 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE); 560 FlushedPages = TRUE; 561 562 /* We can only swap as many pages as we flushed */ 563 if (PagesFreed < Target) Target = PagesFreed; 564 565 /* Check if we flushed anything */ 566 if (PagesFreed != 0) 567 { 568 /* Try again after flushing dirty pages */ 569 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed); 570 goto retry; 571 } 572 } 573 574 while (!IsListEmpty(&FreeList)) 575 { 576 ULONG Refs; 577 578 current_entry = RemoveHeadList(&FreeList); 579 current = CONTAINING_RECORD(current_entry, 580 ROS_VACB, 581 CacheMapVacbListEntry); 582 InitializeListHead(¤t->CacheMapVacbListEntry); 583 Refs = CcRosVacbDecRefCount(current); 584 ASSERT(Refs == 0); 585 } 586 587 DPRINT("Evicted %lu cache pages\n", (*NrFreed)); 588 } 589 590 NTSTATUS 591 CcRosReleaseVacb ( 592 PROS_SHARED_CACHE_MAP SharedCacheMap, 593 PROS_VACB Vacb, 594 BOOLEAN Dirty, 595 BOOLEAN Mapped) 596 { 597 ULONG Refs; 598 ASSERT(SharedCacheMap); 599 600 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb); 601 602 if (Dirty && !Vacb->Dirty) 603 { 604 CcRosMarkDirtyVacb(Vacb); 605 } 606 607 if (Mapped) 608 { 609 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1) 610 { 611 CcRosVacbIncRefCount(Vacb); 612 } 613 } 614 615 Refs = CcRosVacbDecRefCount(Vacb); 616 ASSERT(Refs > 0); 617 618 return STATUS_SUCCESS; 619 } 620 621 /* Returns with VACB Lock Held! */ 622 PROS_VACB 623 CcRosLookupVacb ( 624 PROS_SHARED_CACHE_MAP SharedCacheMap, 625 LONGLONG FileOffset) 626 { 627 PLIST_ENTRY current_entry; 628 PROS_VACB current; 629 KIRQL oldIrql; 630 631 ASSERT(SharedCacheMap); 632 633 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n", 634 SharedCacheMap, FileOffset); 635 636 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 637 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 638 639 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 640 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 641 { 642 current = CONTAINING_RECORD(current_entry, 643 ROS_VACB, 644 CacheMapVacbListEntry); 645 if (IsPointInRange(current->FileOffset.QuadPart, 646 VACB_MAPPING_GRANULARITY, 647 FileOffset)) 648 { 649 CcRosVacbIncRefCount(current); 650 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 651 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 652 return current; 653 } 654 if (current->FileOffset.QuadPart > FileOffset) 655 break; 656 current_entry = current_entry->Flink; 657 } 658 659 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 660 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 661 662 return NULL; 663 } 664 665 VOID 666 CcRosMarkDirtyVacb ( 667 PROS_VACB Vacb) 668 { 669 KIRQL oldIrql; 670 PROS_SHARED_CACHE_MAP SharedCacheMap; 671 672 SharedCacheMap = Vacb->SharedCacheMap; 673 674 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 675 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 676 677 ASSERT(!Vacb->Dirty); 678 679 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry); 680 /* FIXME: There is no reason to account for the whole VACB. */ 681 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 682 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 683 CcRosVacbIncRefCount(Vacb); 684 685 /* Move to the tail of the LRU list */ 686 RemoveEntryList(&Vacb->VacbLruListEntry); 687 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry); 688 689 Vacb->Dirty = TRUE; 690 691 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 692 693 /* Schedule a lazy writer run to now that we have dirty VACB */ 694 if (!LazyWriter.ScanActive) 695 { 696 CcScheduleLazyWriteScan(FALSE); 697 } 698 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 699 } 700 701 VOID 702 CcRosUnmarkDirtyVacb ( 703 PROS_VACB Vacb, 704 BOOLEAN LockViews) 705 { 706 KIRQL oldIrql; 707 PROS_SHARED_CACHE_MAP SharedCacheMap; 708 709 SharedCacheMap = Vacb->SharedCacheMap; 710 711 if (LockViews) 712 { 713 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 714 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 715 } 716 717 ASSERT(Vacb->Dirty); 718 719 Vacb->Dirty = FALSE; 720 721 RemoveEntryList(&Vacb->DirtyVacbListEntry); 722 InitializeListHead(&Vacb->DirtyVacbListEntry); 723 724 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 725 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 726 727 CcRosVacbDecRefCount(Vacb); 728 729 if (LockViews) 730 { 731 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 732 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 733 } 734 } 735 736 BOOLEAN 737 CcRosFreeOneUnusedVacb( 738 VOID) 739 { 740 KIRQL oldIrql; 741 PLIST_ENTRY current_entry; 742 PROS_VACB to_free = NULL; 743 744 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 745 746 /* Browse all the available VACB */ 747 current_entry = VacbLruListHead.Flink; 748 while ((current_entry != &VacbLruListHead) && (to_free == NULL)) 749 { 750 ULONG Refs; 751 PROS_VACB current; 752 753 current = CONTAINING_RECORD(current_entry, 754 ROS_VACB, 755 VacbLruListEntry); 756 757 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 758 759 /* Only deal with unused VACB, we will free them */ 760 Refs = CcRosVacbGetRefCount(current); 761 if (Refs < 2) 762 { 763 ASSERT(!current->Dirty); 764 ASSERT(!current->MappedCount); 765 ASSERT(Refs == 1); 766 767 /* Reset it, this is the one we want to free */ 768 RemoveEntryList(¤t->CacheMapVacbListEntry); 769 InitializeListHead(¤t->CacheMapVacbListEntry); 770 RemoveEntryList(¤t->VacbLruListEntry); 771 InitializeListHead(¤t->VacbLruListEntry); 772 773 to_free = current; 774 } 775 776 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 777 778 current_entry = current_entry->Flink; 779 } 780 781 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 782 783 /* And now, free the VACB that we found, if any. */ 784 if (to_free == NULL) 785 { 786 return FALSE; 787 } 788 789 /* This must be its last ref */ 790 NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0); 791 792 return TRUE; 793 } 794 795 static 796 NTSTATUS 797 CcRosCreateVacb ( 798 PROS_SHARED_CACHE_MAP SharedCacheMap, 799 LONGLONG FileOffset, 800 PROS_VACB *Vacb) 801 { 802 PROS_VACB current; 803 PROS_VACB previous; 804 PLIST_ENTRY current_entry; 805 NTSTATUS Status; 806 KIRQL oldIrql; 807 ULONG Refs; 808 SIZE_T ViewSize = VACB_MAPPING_GRANULARITY; 809 810 ASSERT(SharedCacheMap); 811 812 DPRINT("CcRosCreateVacb()\n"); 813 814 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList); 815 current->BaseAddress = NULL; 816 current->Dirty = FALSE; 817 current->PageOut = FALSE; 818 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY); 819 current->SharedCacheMap = SharedCacheMap; 820 current->MappedCount = 0; 821 current->ReferenceCount = 0; 822 InitializeListHead(¤t->CacheMapVacbListEntry); 823 InitializeListHead(¤t->DirtyVacbListEntry); 824 InitializeListHead(¤t->VacbLruListEntry); 825 826 CcRosVacbIncRefCount(current); 827 828 while (TRUE) 829 { 830 /* Map VACB in system space */ 831 Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset, 0); 832 if (NT_SUCCESS(Status)) 833 { 834 break; 835 } 836 837 /* 838 * If no space left, try to prune one unused VACB to recover space to map our VACB. 839 * If it succeeds, retry to map, otherwise just fail. 840 */ 841 if (!CcRosFreeOneUnusedVacb()) 842 { 843 ExFreeToNPagedLookasideList(&VacbLookasideList, current); 844 return Status; 845 } 846 } 847 848 #if DBG 849 if (SharedCacheMap->Trace) 850 { 851 DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n", 852 SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress); 853 } 854 #endif 855 856 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 857 858 *Vacb = current; 859 /* There is window between the call to CcRosLookupVacb 860 * and CcRosCreateVacb. We must check if a VACB for the 861 * file offset exist. If there is a VACB, we release 862 * our newly created VACB and return the existing one. 863 */ 864 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 865 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 866 previous = NULL; 867 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 868 { 869 current = CONTAINING_RECORD(current_entry, 870 ROS_VACB, 871 CacheMapVacbListEntry); 872 if (IsPointInRange(current->FileOffset.QuadPart, 873 VACB_MAPPING_GRANULARITY, 874 FileOffset)) 875 { 876 CcRosVacbIncRefCount(current); 877 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 878 #if DBG 879 if (SharedCacheMap->Trace) 880 { 881 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n", 882 SharedCacheMap, 883 (*Vacb), 884 current); 885 } 886 #endif 887 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 888 889 Refs = CcRosVacbDecRefCount(*Vacb); 890 ASSERT(Refs == 0); 891 892 *Vacb = current; 893 return STATUS_SUCCESS; 894 } 895 if (current->FileOffset.QuadPart < FileOffset) 896 { 897 ASSERT(previous == NULL || 898 previous->FileOffset.QuadPart < current->FileOffset.QuadPart); 899 previous = current; 900 } 901 if (current->FileOffset.QuadPart > FileOffset) 902 break; 903 current_entry = current_entry->Flink; 904 } 905 /* There was no existing VACB. */ 906 current = *Vacb; 907 if (previous) 908 { 909 InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry); 910 } 911 else 912 { 913 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry); 914 } 915 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 916 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 917 918 /* Reference it to allow release */ 919 CcRosVacbIncRefCount(current); 920 921 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 922 923 return Status; 924 } 925 926 BOOLEAN 927 CcRosEnsureVacbResident( 928 _In_ PROS_VACB Vacb, 929 _In_ BOOLEAN Wait, 930 _In_ BOOLEAN NoRead, 931 _In_ ULONG Offset, 932 _In_ ULONG Length 933 ) 934 { 935 PVOID BaseAddress; 936 937 ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY); 938 939 #if 0 940 if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->SectionSize.QuadPart) 941 { 942 DPRINT1("Vacb read beyond the file size!\n"); 943 return FALSE; 944 } 945 #endif 946 947 BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset); 948 949 /* Check if the pages are resident */ 950 if (!MmArePagesResident(NULL, BaseAddress, Length)) 951 { 952 if (!Wait) 953 { 954 return FALSE; 955 } 956 957 if (!NoRead) 958 { 959 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 960 NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer, 961 Vacb->FileOffset.QuadPart + Offset, 962 Length, 963 &SharedCacheMap->ValidDataLength); 964 if (!NT_SUCCESS(Status)) 965 ExRaiseStatus(Status); 966 } 967 } 968 969 return TRUE; 970 } 971 972 973 NTSTATUS 974 CcRosGetVacb ( 975 PROS_SHARED_CACHE_MAP SharedCacheMap, 976 LONGLONG FileOffset, 977 PROS_VACB *Vacb) 978 { 979 PROS_VACB current; 980 NTSTATUS Status; 981 ULONG Refs; 982 KIRQL OldIrql; 983 984 ASSERT(SharedCacheMap); 985 986 DPRINT("CcRosGetVacb()\n"); 987 988 /* 989 * Look for a VACB already mapping the same data. 990 */ 991 current = CcRosLookupVacb(SharedCacheMap, FileOffset); 992 if (current == NULL) 993 { 994 /* 995 * Otherwise create a new VACB. 996 */ 997 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t); 998 if (!NT_SUCCESS(Status)) 999 { 1000 return Status; 1001 } 1002 } 1003 1004 Refs = CcRosVacbGetRefCount(current); 1005 1006 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1007 1008 /* Move to the tail of the LRU list */ 1009 RemoveEntryList(¤t->VacbLruListEntry); 1010 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 1011 1012 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1013 1014 /* 1015 * Return the VACB to the caller. 1016 */ 1017 *Vacb = current; 1018 1019 ASSERT(Refs > 1); 1020 1021 return STATUS_SUCCESS; 1022 } 1023 1024 NTSTATUS 1025 CcRosRequestVacb ( 1026 PROS_SHARED_CACHE_MAP SharedCacheMap, 1027 LONGLONG FileOffset, 1028 PROS_VACB *Vacb) 1029 /* 1030 * FUNCTION: Request a page mapping for a shared cache map 1031 */ 1032 { 1033 1034 ASSERT(SharedCacheMap); 1035 1036 if (FileOffset % VACB_MAPPING_GRANULARITY != 0) 1037 { 1038 DPRINT1("Bad fileoffset %I64x should be multiple of %x", 1039 FileOffset, VACB_MAPPING_GRANULARITY); 1040 KeBugCheck(CACHE_MANAGER); 1041 } 1042 1043 return CcRosGetVacb(SharedCacheMap, 1044 FileOffset, 1045 Vacb); 1046 } 1047 1048 NTSTATUS 1049 CcRosInternalFreeVacb ( 1050 PROS_VACB Vacb) 1051 /* 1052 * FUNCTION: Releases a VACB associated with a shared cache map 1053 */ 1054 { 1055 NTSTATUS Status; 1056 1057 DPRINT("Freeing VACB 0x%p\n", Vacb); 1058 #if DBG 1059 if (Vacb->SharedCacheMap->Trace) 1060 { 1061 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb); 1062 } 1063 #endif 1064 1065 if (Vacb->ReferenceCount != 0) 1066 { 1067 DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount); 1068 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length) 1069 { 1070 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName); 1071 } 1072 } 1073 1074 ASSERT(Vacb->ReferenceCount == 0); 1075 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry)); 1076 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry)); 1077 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry)); 1078 1079 /* Delete the mapping */ 1080 Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress); 1081 if (!NT_SUCCESS(Status)) 1082 { 1083 DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status); 1084 ASSERT(FALSE); 1085 /* Proceed with the deĺetion anyway */ 1086 } 1087 1088 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd); 1089 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb); 1090 return STATUS_SUCCESS; 1091 } 1092 1093 /* 1094 * @implemented 1095 */ 1096 VOID 1097 NTAPI 1098 CcFlushCache ( 1099 IN PSECTION_OBJECT_POINTERS SectionObjectPointers, 1100 IN PLARGE_INTEGER FileOffset OPTIONAL, 1101 IN ULONG Length, 1102 OUT PIO_STATUS_BLOCK IoStatus) 1103 { 1104 PROS_SHARED_CACHE_MAP SharedCacheMap; 1105 LONGLONG FlushStart, FlushEnd; 1106 NTSTATUS Status; 1107 1108 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n", 1109 SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length); 1110 1111 if (!SectionObjectPointers) 1112 { 1113 Status = STATUS_INVALID_PARAMETER; 1114 goto quit; 1115 } 1116 1117 if (!SectionObjectPointers->SharedCacheMap) 1118 { 1119 /* Forward this to Mm */ 1120 MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus); 1121 return; 1122 } 1123 1124 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 1125 ASSERT(SharedCacheMap); 1126 if (FileOffset) 1127 { 1128 FlushStart = FileOffset->QuadPart; 1129 Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd); 1130 if (!NT_SUCCESS(Status)) 1131 goto quit; 1132 } 1133 else 1134 { 1135 FlushStart = 0; 1136 FlushEnd = SharedCacheMap->FileSize.QuadPart; 1137 } 1138 1139 Status = STATUS_SUCCESS; 1140 if (IoStatus) 1141 { 1142 IoStatus->Information = 0; 1143 } 1144 1145 /* 1146 * We flush the VACBs that we find here. 1147 * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure. 1148 * This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data 1149 */ 1150 while (FlushStart < FlushEnd) 1151 { 1152 BOOLEAN DirtyVacb = FALSE; 1153 PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart); 1154 1155 if (vacb != NULL) 1156 { 1157 if (vacb->Dirty) 1158 { 1159 IO_STATUS_BLOCK VacbIosb = { 0 }; 1160 Status = CcRosFlushVacb(vacb, &VacbIosb); 1161 if (!NT_SUCCESS(Status)) 1162 { 1163 goto quit; 1164 } 1165 DirtyVacb = TRUE; 1166 1167 if (IoStatus) 1168 IoStatus->Information += VacbIosb.Information; 1169 } 1170 1171 CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE); 1172 } 1173 1174 if (!DirtyVacb) 1175 { 1176 IO_STATUS_BLOCK MmIosb; 1177 LARGE_INTEGER MmOffset; 1178 1179 MmOffset.QuadPart = FlushStart; 1180 1181 if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart) 1182 { 1183 /* The whole range fits within a VACB chunk. */ 1184 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb); 1185 } 1186 else 1187 { 1188 ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY); 1189 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb); 1190 } 1191 1192 if (!NT_SUCCESS(Status)) 1193 goto quit; 1194 1195 if (IoStatus) 1196 IoStatus->Information += MmIosb.Information; 1197 1198 /* Update VDL */ 1199 if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd) 1200 SharedCacheMap->ValidDataLength.QuadPart = FlushEnd; 1201 } 1202 1203 if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart))) 1204 { 1205 /* We're at the end of file ! */ 1206 break; 1207 } 1208 1209 /* Round down to next VACB start now */ 1210 FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY; 1211 } 1212 1213 quit: 1214 if (IoStatus) 1215 { 1216 IoStatus->Status = Status; 1217 } 1218 } 1219 1220 NTSTATUS 1221 CcRosReleaseFileCache ( 1222 PFILE_OBJECT FileObject) 1223 /* 1224 * FUNCTION: Called by the file system when a handle to a file object 1225 * has been closed. 1226 */ 1227 { 1228 KIRQL OldIrql; 1229 PPRIVATE_CACHE_MAP PrivateMap; 1230 PROS_SHARED_CACHE_MAP SharedCacheMap; 1231 1232 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1233 1234 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) 1235 { 1236 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1237 1238 /* Closing the handle, so kill the private cache map 1239 * Before you event try to remove it from FO, always 1240 * lock the master lock, to be sure not to race 1241 * with a potential read ahead ongoing! 1242 */ 1243 PrivateMap = FileObject->PrivateCacheMap; 1244 FileObject->PrivateCacheMap = NULL; 1245 1246 if (PrivateMap != NULL) 1247 { 1248 /* Remove it from the file */ 1249 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1250 RemoveEntryList(&PrivateMap->PrivateLinks); 1251 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1252 1253 /* And free it. */ 1254 if (PrivateMap != &SharedCacheMap->PrivateCacheMap) 1255 { 1256 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP); 1257 } 1258 else 1259 { 1260 PrivateMap->NodeTypeCode = 0; 1261 } 1262 1263 ASSERT(SharedCacheMap->OpenCount > 0); 1264 1265 SharedCacheMap->OpenCount--; 1266 if (SharedCacheMap->OpenCount == 0) 1267 { 1268 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql); 1269 } 1270 } 1271 } 1272 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1273 return STATUS_SUCCESS; 1274 } 1275 1276 NTSTATUS 1277 CcRosInitializeFileCache ( 1278 PFILE_OBJECT FileObject, 1279 PCC_FILE_SIZES FileSizes, 1280 BOOLEAN PinAccess, 1281 PCACHE_MANAGER_CALLBACKS CallBacks, 1282 PVOID LazyWriterContext) 1283 /* 1284 * FUNCTION: Initializes a shared cache map for a file object 1285 */ 1286 { 1287 KIRQL OldIrql; 1288 BOOLEAN Allocated; 1289 PROS_SHARED_CACHE_MAP SharedCacheMap; 1290 1291 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject); 1292 1293 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1294 1295 Allocated = FALSE; 1296 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1297 if (SharedCacheMap == NULL) 1298 { 1299 Allocated = TRUE; 1300 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList); 1301 if (SharedCacheMap == NULL) 1302 { 1303 return STATUS_INSUFFICIENT_RESOURCES; 1304 } 1305 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap)); 1306 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP; 1307 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap); 1308 SharedCacheMap->FileObject = FileObject; 1309 SharedCacheMap->Callbacks = CallBacks; 1310 SharedCacheMap->LazyWriteContext = LazyWriterContext; 1311 SharedCacheMap->SectionSize = FileSizes->AllocationSize; 1312 SharedCacheMap->FileSize = FileSizes->FileSize; 1313 SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength; 1314 SharedCacheMap->PinAccess = PinAccess; 1315 SharedCacheMap->DirtyPageThreshold = 0; 1316 SharedCacheMap->DirtyPages = 0; 1317 InitializeListHead(&SharedCacheMap->PrivateList); 1318 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock); 1319 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead); 1320 InitializeListHead(&SharedCacheMap->BcbList); 1321 1322 SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION; 1323 1324 ObReferenceObjectByPointer(FileObject, 1325 FILE_ALL_ACCESS, 1326 NULL, 1327 KernelMode); 1328 1329 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap; 1330 1331 //CcRosTraceCacheMap(SharedCacheMap, TRUE); 1332 } 1333 else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION) 1334 { 1335 /* The shared cache map is being created somewhere else. Wait for that to happen */ 1336 KEVENT Waiter; 1337 PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent; 1338 1339 KeInitializeEvent(&Waiter, NotificationEvent, FALSE); 1340 SharedCacheMap->CreateEvent = &Waiter; 1341 1342 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1343 1344 KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL); 1345 1346 if (PreviousWaiter) 1347 KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE); 1348 1349 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1350 } 1351 1352 if (FileObject->PrivateCacheMap == NULL) 1353 { 1354 PPRIVATE_CACHE_MAP PrivateMap; 1355 1356 /* Allocate the private cache map for this handle */ 1357 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0) 1358 { 1359 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP); 1360 } 1361 else 1362 { 1363 PrivateMap = &SharedCacheMap->PrivateCacheMap; 1364 } 1365 1366 if (PrivateMap == NULL) 1367 { 1368 /* If we also allocated the shared cache map for this file, kill it */ 1369 if (Allocated) 1370 { 1371 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 1372 1373 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 1374 ObDereferenceObject(FileObject); 1375 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 1376 } 1377 1378 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1379 return STATUS_INSUFFICIENT_RESOURCES; 1380 } 1381 1382 /* Initialize it */ 1383 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP)); 1384 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP; 1385 PrivateMap->ReadAheadMask = PAGE_SIZE - 1; 1386 PrivateMap->FileObject = FileObject; 1387 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock); 1388 1389 /* Link it to the file */ 1390 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1391 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks); 1392 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1393 1394 FileObject->PrivateCacheMap = PrivateMap; 1395 SharedCacheMap->OpenCount++; 1396 } 1397 1398 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1399 1400 /* Create the section */ 1401 if (Allocated) 1402 { 1403 NTSTATUS Status; 1404 1405 ASSERT(SharedCacheMap->Section == NULL); 1406 1407 Status = MmCreateSection( 1408 &SharedCacheMap->Section, 1409 SECTION_ALL_ACCESS, 1410 NULL, 1411 &SharedCacheMap->SectionSize, 1412 PAGE_READWRITE, 1413 SEC_RESERVE, 1414 NULL, 1415 FileObject); 1416 1417 ASSERT(NT_SUCCESS(Status)); 1418 1419 if (!NT_SUCCESS(Status)) 1420 { 1421 CcRosReleaseFileCache(FileObject); 1422 return Status; 1423 } 1424 1425 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1426 1427 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks); 1428 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION; 1429 1430 if (SharedCacheMap->CreateEvent) 1431 { 1432 KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE); 1433 SharedCacheMap->CreateEvent = NULL; 1434 } 1435 1436 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1437 } 1438 1439 return STATUS_SUCCESS; 1440 } 1441 1442 /* 1443 * @implemented 1444 */ 1445 PFILE_OBJECT 1446 NTAPI 1447 CcGetFileObjectFromSectionPtrs ( 1448 IN PSECTION_OBJECT_POINTERS SectionObjectPointers) 1449 { 1450 PROS_SHARED_CACHE_MAP SharedCacheMap; 1451 1452 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers); 1453 1454 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap) 1455 { 1456 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 1457 ASSERT(SharedCacheMap); 1458 return SharedCacheMap->FileObject; 1459 } 1460 return NULL; 1461 } 1462 1463 CODE_SEG("INIT") 1464 VOID 1465 NTAPI 1466 CcInitView ( 1467 VOID) 1468 { 1469 DPRINT("CcInitView()\n"); 1470 1471 InitializeListHead(&DirtyVacbListHead); 1472 InitializeListHead(&VacbLruListHead); 1473 InitializeListHead(&CcDeferredWrites); 1474 InitializeListHead(&CcCleanSharedCacheMapList); 1475 KeInitializeSpinLock(&CcDeferredWriteSpinLock); 1476 ExInitializeNPagedLookasideList(&iBcbLookasideList, 1477 NULL, 1478 NULL, 1479 0, 1480 sizeof(INTERNAL_BCB), 1481 TAG_BCB, 1482 20); 1483 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList, 1484 NULL, 1485 NULL, 1486 0, 1487 sizeof(ROS_SHARED_CACHE_MAP), 1488 TAG_SHARED_CACHE_MAP, 1489 20); 1490 ExInitializeNPagedLookasideList(&VacbLookasideList, 1491 NULL, 1492 NULL, 1493 0, 1494 sizeof(ROS_VACB), 1495 TAG_VACB, 1496 20); 1497 1498 CcInitCacheZeroPage(); 1499 } 1500 1501 #if DBG && defined(KDBG) 1502 1503 #include <kdbg/kdb.h> 1504 1505 BOOLEAN 1506 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[]) 1507 { 1508 PLIST_ENTRY ListEntry; 1509 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File"); 1510 1511 KdbpPrint(" Usage Summary (in kb)\n"); 1512 KdbpPrint("Shared\t\tMapped\tDirty\tName\n"); 1513 /* No need to lock the spin lock here, we're in DBG */ 1514 for (ListEntry = CcCleanSharedCacheMapList.Flink; 1515 ListEntry != &CcCleanSharedCacheMapList; 1516 ListEntry = ListEntry->Flink) 1517 { 1518 PLIST_ENTRY Vacbs; 1519 ULONG Mapped = 0, Dirty = 0; 1520 PROS_SHARED_CACHE_MAP SharedCacheMap; 1521 PUNICODE_STRING FileName; 1522 PWSTR Extra = L""; 1523 1524 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks); 1525 1526 /* Dirty size */ 1527 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024; 1528 1529 /* First, count for all the associated VACB */ 1530 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink; 1531 Vacbs != &SharedCacheMap->CacheMapVacbListHead; 1532 Vacbs = Vacbs->Flink) 1533 { 1534 Mapped += VACB_MAPPING_GRANULARITY / 1024; 1535 } 1536 1537 /* Setup name */ 1538 if (SharedCacheMap->FileObject != NULL && 1539 SharedCacheMap->FileObject->FileName.Length != 0) 1540 { 1541 FileName = &SharedCacheMap->FileObject->FileName; 1542 } 1543 else if (SharedCacheMap->FileObject != NULL && 1544 SharedCacheMap->FileObject->FsContext != NULL && 1545 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 && 1546 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 && 1547 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0) 1548 { 1549 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100); 1550 Extra = L" (FastFAT)"; 1551 } 1552 else 1553 { 1554 FileName = &NoName; 1555 } 1556 1557 /* And print */ 1558 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra); 1559 } 1560 1561 return TRUE; 1562 } 1563 1564 BOOLEAN 1565 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[]) 1566 { 1567 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages, 1568 (CcTotalDirtyPages * PAGE_SIZE) / 1024); 1569 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold, 1570 (CcDirtyPageThreshold * PAGE_SIZE) / 1024); 1571 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages, 1572 (MmAvailablePages * PAGE_SIZE) / 1024); 1573 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop, 1574 (MmThrottleTop * PAGE_SIZE) / 1024); 1575 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom, 1576 (MmThrottleBottom * PAGE_SIZE) / 1024); 1577 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total, 1578 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024); 1579 1580 if (CcTotalDirtyPages >= CcDirtyPageThreshold) 1581 { 1582 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n"); 1583 } 1584 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold) 1585 { 1586 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n"); 1587 } 1588 else 1589 { 1590 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n"); 1591 } 1592 1593 return TRUE; 1594 } 1595 1596 #endif // DBG && defined(KDBG) 1597 1598 /* EOF */ 1599