1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/cc/view.c 5 * PURPOSE: Cache manager 6 * 7 * PROGRAMMERS: David Welch (welch@mcmail.com) 8 * Pierre Schweitzer (pierre@reactos.org) 9 */ 10 11 /* NOTES ********************************************************************** 12 * 13 * This is not the NT implementation of a file cache nor anything much like 14 * it. 15 * 16 * The general procedure for a filesystem to implement a read or write 17 * dispatch routine is as follows 18 * 19 * (1) If caching for the FCB hasn't been initiated then so do by calling 20 * CcInitializeFileCache. 21 * 22 * (2) For each 4k region which is being read or written obtain a cache page 23 * by calling CcRequestCachePage. 24 * 25 * (3) If either the page is being read or not completely written, and it is 26 * not up to date then read its data from the underlying medium. If the read 27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error. 28 * 29 * (4) Copy the data into or out of the page as necessary. 30 * 31 * (5) Release the cache page 32 */ 33 /* INCLUDES ******************************************************************/ 34 35 #include <ntoskrnl.h> 36 #define NDEBUG 37 #include <debug.h> 38 39 /* GLOBALS *******************************************************************/ 40 41 LIST_ENTRY DirtyVacbListHead; 42 static LIST_ENTRY VacbLruListHead; 43 44 NPAGED_LOOKASIDE_LIST iBcbLookasideList; 45 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList; 46 static NPAGED_LOOKASIDE_LIST VacbLookasideList; 47 48 /* Internal vars (MS): 49 * - Threshold above which lazy writer will start action 50 * - Amount of dirty pages 51 * - List for deferred writes 52 * - Spinlock when dealing with the deferred list 53 * - List for "clean" shared cache maps 54 */ 55 ULONG CcDirtyPageThreshold = 0; 56 ULONG CcTotalDirtyPages = 0; 57 LIST_ENTRY CcDeferredWrites; 58 KSPIN_LOCK CcDeferredWriteSpinLock; 59 LIST_ENTRY CcCleanSharedCacheMapList; 60 61 #if DBG 62 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line) 63 { 64 ULONG Refs; 65 66 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount); 67 if (vacb->SharedCacheMap->Trace) 68 { 69 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n", 70 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 71 } 72 73 return Refs; 74 } 75 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line) 76 { 77 ULONG Refs; 78 BOOLEAN VacbDirty = vacb->Dirty; 79 BOOLEAN VacbTrace = vacb->SharedCacheMap->Trace; 80 BOOLEAN VacbPageOut = vacb->PageOut; 81 82 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount); 83 ASSERT(!(Refs == 0 && VacbDirty)); 84 if (VacbTrace) 85 { 86 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n", 87 file, line, vacb, Refs, VacbDirty, VacbPageOut); 88 } 89 90 if (Refs == 0) 91 { 92 CcRosInternalFreeVacb(vacb); 93 } 94 95 return Refs; 96 } 97 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line) 98 { 99 ULONG Refs; 100 101 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0); 102 if (vacb->SharedCacheMap->Trace) 103 { 104 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n", 105 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 106 } 107 108 return Refs; 109 } 110 #endif 111 112 113 /* FUNCTIONS *****************************************************************/ 114 115 VOID 116 CcRosTraceCacheMap ( 117 PROS_SHARED_CACHE_MAP SharedCacheMap, 118 BOOLEAN Trace ) 119 { 120 #if DBG 121 KIRQL oldirql; 122 PLIST_ENTRY current_entry; 123 PROS_VACB current; 124 125 if (!SharedCacheMap) 126 return; 127 128 SharedCacheMap->Trace = Trace; 129 130 if (Trace) 131 { 132 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 133 134 oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 135 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 136 137 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 138 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 139 { 140 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 141 current_entry = current_entry->Flink; 142 143 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n", 144 current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart); 145 } 146 147 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 148 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql); 149 } 150 else 151 { 152 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 153 } 154 155 #else 156 UNREFERENCED_PARAMETER(SharedCacheMap); 157 UNREFERENCED_PARAMETER(Trace); 158 #endif 159 } 160 161 NTSTATUS 162 CcRosFlushVacb ( 163 _In_ PROS_VACB Vacb, 164 _Out_opt_ PIO_STATUS_BLOCK Iosb) 165 { 166 NTSTATUS Status; 167 BOOLEAN HaveLock = FALSE; 168 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 169 170 CcRosUnmarkDirtyVacb(Vacb, TRUE); 171 172 /* Lock for flush, if we are not already the top-level */ 173 if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP) 174 { 175 Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject); 176 if (!NT_SUCCESS(Status)) 177 goto quit; 178 HaveLock = TRUE; 179 } 180 181 Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer, 182 &Vacb->FileOffset, 183 VACB_MAPPING_GRANULARITY, 184 Iosb); 185 186 if (HaveLock) 187 { 188 FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject); 189 } 190 191 quit: 192 if (!NT_SUCCESS(Status)) 193 CcRosMarkDirtyVacb(Vacb); 194 else 195 { 196 /* Update VDL */ 197 if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY)) 198 { 199 SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY; 200 } 201 } 202 203 return Status; 204 } 205 206 static 207 NTSTATUS 208 CcRosDeleteFileCache ( 209 PFILE_OBJECT FileObject, 210 PROS_SHARED_CACHE_MAP SharedCacheMap, 211 PKIRQL OldIrql) 212 /* 213 * FUNCTION: Releases the shared cache map associated with a file object 214 */ 215 { 216 PLIST_ENTRY current_entry; 217 218 ASSERT(SharedCacheMap); 219 ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap); 220 ASSERT(SharedCacheMap->OpenCount == 0); 221 222 /* Remove all VACBs from the global lists */ 223 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 224 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 225 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 226 { 227 PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 228 229 RemoveEntryList(&Vacb->VacbLruListEntry); 230 InitializeListHead(&Vacb->VacbLruListEntry); 231 232 if (Vacb->Dirty) 233 { 234 CcRosUnmarkDirtyVacb(Vacb, FALSE); 235 /* Mark it as dirty again so we know that we have to flush before freeing it */ 236 Vacb->Dirty = TRUE; 237 } 238 239 current_entry = current_entry->Flink; 240 } 241 242 /* Make sure there is no trace anymore of this map */ 243 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 244 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 245 246 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 247 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql); 248 249 /* Now that we're out of the locks, free everything for real */ 250 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead)) 251 { 252 PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry); 253 ULONG RefCount; 254 255 InitializeListHead(&Vacb->CacheMapVacbListEntry); 256 257 /* Flush to disk, if needed */ 258 if (Vacb->Dirty) 259 { 260 IO_STATUS_BLOCK Iosb; 261 NTSTATUS Status; 262 263 Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb); 264 if (!NT_SUCCESS(Status)) 265 { 266 /* Complain. There's not much we can do */ 267 DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status); 268 } 269 Vacb->Dirty = FALSE; 270 } 271 272 RefCount = CcRosVacbDecRefCount(Vacb); 273 #if DBG // CORE-14578 274 if (RefCount != 0) 275 { 276 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart); 277 DPRINT1("There are: %d references left\n", RefCount); 278 DPRINT1("Map: %d\n", Vacb->MappedCount); 279 DPRINT1("Dirty: %d\n", Vacb->Dirty); 280 if (FileObject->FileName.Length != 0) 281 { 282 DPRINT1("File was: %wZ\n", &FileObject->FileName); 283 } 284 else 285 { 286 DPRINT1("No name for the file\n"); 287 } 288 } 289 #else 290 (void)RefCount; 291 #endif 292 } 293 294 /* Release the references we own */ 295 if(SharedCacheMap->Section) 296 ObDereferenceObject(SharedCacheMap->Section); 297 ObDereferenceObject(SharedCacheMap->FileObject); 298 299 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 300 301 /* Acquire the lock again for our caller */ 302 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 303 304 return STATUS_SUCCESS; 305 } 306 307 NTSTATUS 308 CcRosFlushDirtyPages ( 309 ULONG Target, 310 PULONG Count, 311 BOOLEAN Wait, 312 BOOLEAN CalledFromLazy) 313 { 314 PLIST_ENTRY current_entry; 315 NTSTATUS Status; 316 KIRQL OldIrql; 317 BOOLEAN FlushAll = (Target == MAXULONG); 318 319 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target); 320 321 (*Count) = 0; 322 323 KeEnterCriticalRegion(); 324 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 325 326 current_entry = DirtyVacbListHead.Flink; 327 if (current_entry == &DirtyVacbListHead) 328 { 329 DPRINT("No Dirty pages\n"); 330 } 331 332 while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll) 333 { 334 PROS_SHARED_CACHE_MAP SharedCacheMap; 335 PROS_VACB current; 336 BOOLEAN Locked; 337 338 if (current_entry == &DirtyVacbListHead) 339 { 340 ASSERT(FlushAll); 341 if (IsListEmpty(&DirtyVacbListHead)) 342 break; 343 current_entry = DirtyVacbListHead.Flink; 344 } 345 346 current = CONTAINING_RECORD(current_entry, 347 ROS_VACB, 348 DirtyVacbListEntry); 349 current_entry = current_entry->Flink; 350 351 CcRosVacbIncRefCount(current); 352 353 SharedCacheMap = current->SharedCacheMap; 354 355 /* When performing lazy write, don't handle temporary files */ 356 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE)) 357 { 358 CcRosVacbDecRefCount(current); 359 continue; 360 } 361 362 /* Don't attempt to lazy write the files that asked not to */ 363 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED)) 364 { 365 CcRosVacbDecRefCount(current); 366 continue; 367 } 368 369 ASSERT(current->Dirty); 370 371 /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */ 372 if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE) 373 { 374 CcRosVacbDecRefCount(current); 375 continue; 376 } 377 378 SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE; 379 380 /* Keep a ref on the shared cache map */ 381 SharedCacheMap->OpenCount++; 382 383 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 384 385 Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait); 386 if (!Locked) 387 { 388 DPRINT("Not locked!"); 389 ASSERT(!Wait); 390 CcRosVacbDecRefCount(current); 391 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 392 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 393 394 if (--SharedCacheMap->OpenCount == 0) 395 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 396 397 continue; 398 } 399 400 IO_STATUS_BLOCK Iosb; 401 Status = CcRosFlushVacb(current, &Iosb); 402 403 SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext); 404 405 /* We release the VACB before acquiring the lock again, because 406 * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a 407 * Refcount. Freeing must be done outside of the lock. 408 * The refcount is decremented atomically. So this is OK. */ 409 CcRosVacbDecRefCount(current); 410 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 411 412 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 413 414 if (--SharedCacheMap->OpenCount == 0) 415 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 416 417 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) && 418 (Status != STATUS_MEDIA_WRITE_PROTECTED)) 419 { 420 DPRINT1("CC: Failed to flush VACB.\n"); 421 } 422 else 423 { 424 ULONG PagesFreed; 425 426 /* How many pages did we free? */ 427 PagesFreed = Iosb.Information / PAGE_SIZE; 428 (*Count) += PagesFreed; 429 430 if (!Wait) 431 { 432 /* Make sure we don't overflow target! */ 433 if (Target < PagesFreed) 434 { 435 /* If we would have, jump to zero directly */ 436 Target = 0; 437 } 438 else 439 { 440 Target -= PagesFreed; 441 } 442 } 443 } 444 445 current_entry = DirtyVacbListHead.Flink; 446 } 447 448 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 449 KeLeaveCriticalRegion(); 450 451 DPRINT("CcRosFlushDirtyPages() finished\n"); 452 return STATUS_SUCCESS; 453 } 454 455 VOID 456 CcRosTrimCache( 457 _In_ ULONG Target, 458 _Out_ PULONG NrFreed) 459 /* 460 * FUNCTION: Try to free some memory from the file cache. 461 * ARGUMENTS: 462 * Target - The number of pages to be freed. 463 * NrFreed - Points to a variable where the number of pages 464 * actually freed is returned. 465 */ 466 { 467 PLIST_ENTRY current_entry; 468 PROS_VACB current; 469 ULONG PagesFreed; 470 KIRQL oldIrql; 471 LIST_ENTRY FreeList; 472 BOOLEAN FlushedPages = FALSE; 473 474 DPRINT("CcRosTrimCache(Target %lu)\n", Target); 475 476 InitializeListHead(&FreeList); 477 478 *NrFreed = 0; 479 480 retry: 481 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 482 483 current_entry = VacbLruListHead.Flink; 484 while (current_entry != &VacbLruListHead) 485 { 486 ULONG Refs; 487 488 current = CONTAINING_RECORD(current_entry, 489 ROS_VACB, 490 VacbLruListEntry); 491 492 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 493 494 /* Reference the VACB */ 495 CcRosVacbIncRefCount(current); 496 497 /* Check if it's mapped and not dirty */ 498 if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty) 499 { 500 /* This code is never executed. It is left for reference only. */ 501 #if 1 502 DPRINT1("MmPageOutPhysicalAddress unexpectedly called\n"); 503 ASSERT(FALSE); 504 #else 505 ULONG i; 506 PFN_NUMBER Page; 507 508 /* We have to break these locks to call MmPageOutPhysicalAddress */ 509 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 510 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 511 512 /* Page out the VACB */ 513 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++) 514 { 515 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT); 516 517 MmPageOutPhysicalAddress(Page); 518 } 519 520 /* Reacquire the locks */ 521 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 522 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 523 #endif 524 } 525 526 /* Only keep iterating though the loop while the lock is held */ 527 current_entry = current_entry->Flink; 528 529 /* Dereference the VACB */ 530 Refs = CcRosVacbDecRefCount(current); 531 532 /* Check if we can free this entry now */ 533 if (Refs < 2) 534 { 535 ASSERT(!current->Dirty); 536 ASSERT(!current->MappedCount); 537 ASSERT(Refs == 1); 538 539 RemoveEntryList(¤t->CacheMapVacbListEntry); 540 RemoveEntryList(¤t->VacbLruListEntry); 541 InitializeListHead(¤t->VacbLruListEntry); 542 InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry); 543 544 /* Calculate how many pages we freed for Mm */ 545 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target); 546 Target -= PagesFreed; 547 (*NrFreed) += PagesFreed; 548 } 549 550 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 551 } 552 553 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 554 555 /* Try flushing pages if we haven't met our target */ 556 if ((Target > 0) && !FlushedPages) 557 { 558 /* Flush dirty pages to disk */ 559 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE); 560 FlushedPages = TRUE; 561 562 /* We can only swap as many pages as we flushed */ 563 if (PagesFreed < Target) Target = PagesFreed; 564 565 /* Check if we flushed anything */ 566 if (PagesFreed != 0) 567 { 568 /* Try again after flushing dirty pages */ 569 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed); 570 goto retry; 571 } 572 } 573 574 while (!IsListEmpty(&FreeList)) 575 { 576 ULONG Refs; 577 578 current_entry = RemoveHeadList(&FreeList); 579 current = CONTAINING_RECORD(current_entry, 580 ROS_VACB, 581 CacheMapVacbListEntry); 582 InitializeListHead(¤t->CacheMapVacbListEntry); 583 Refs = CcRosVacbDecRefCount(current); 584 ASSERT(Refs == 0); 585 } 586 587 DPRINT("Evicted %lu cache pages\n", (*NrFreed)); 588 } 589 590 NTSTATUS 591 CcRosReleaseVacb ( 592 PROS_SHARED_CACHE_MAP SharedCacheMap, 593 PROS_VACB Vacb, 594 BOOLEAN Dirty, 595 BOOLEAN Mapped) 596 { 597 ULONG Refs; 598 ASSERT(SharedCacheMap); 599 600 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb); 601 602 if (Dirty && !Vacb->Dirty) 603 { 604 CcRosMarkDirtyVacb(Vacb); 605 } 606 607 if (Mapped) 608 { 609 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1) 610 { 611 CcRosVacbIncRefCount(Vacb); 612 } 613 } 614 615 Refs = CcRosVacbDecRefCount(Vacb); 616 ASSERT(Refs > 0); 617 618 return STATUS_SUCCESS; 619 } 620 621 /* Returns with VACB Lock Held! */ 622 PROS_VACB 623 CcRosLookupVacb ( 624 PROS_SHARED_CACHE_MAP SharedCacheMap, 625 LONGLONG FileOffset) 626 { 627 PLIST_ENTRY current_entry; 628 PROS_VACB current; 629 KIRQL oldIrql; 630 631 ASSERT(SharedCacheMap); 632 633 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n", 634 SharedCacheMap, FileOffset); 635 636 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 637 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 638 639 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 640 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 641 { 642 current = CONTAINING_RECORD(current_entry, 643 ROS_VACB, 644 CacheMapVacbListEntry); 645 if (IsPointInRange(current->FileOffset.QuadPart, 646 VACB_MAPPING_GRANULARITY, 647 FileOffset)) 648 { 649 CcRosVacbIncRefCount(current); 650 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 651 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 652 return current; 653 } 654 if (current->FileOffset.QuadPart > FileOffset) 655 break; 656 current_entry = current_entry->Flink; 657 } 658 659 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 660 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 661 662 return NULL; 663 } 664 665 VOID 666 CcRosMarkDirtyVacb ( 667 PROS_VACB Vacb) 668 { 669 KIRQL oldIrql; 670 PROS_SHARED_CACHE_MAP SharedCacheMap; 671 672 SharedCacheMap = Vacb->SharedCacheMap; 673 674 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 675 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 676 677 ASSERT(!Vacb->Dirty); 678 679 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry); 680 /* FIXME: There is no reason to account for the whole VACB. */ 681 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 682 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 683 CcRosVacbIncRefCount(Vacb); 684 685 /* Move to the tail of the LRU list */ 686 RemoveEntryList(&Vacb->VacbLruListEntry); 687 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry); 688 689 Vacb->Dirty = TRUE; 690 691 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 692 693 /* Schedule a lazy writer run to now that we have dirty VACB */ 694 if (!LazyWriter.ScanActive) 695 { 696 CcScheduleLazyWriteScan(FALSE); 697 } 698 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 699 } 700 701 VOID 702 CcRosUnmarkDirtyVacb ( 703 PROS_VACB Vacb, 704 BOOLEAN LockViews) 705 { 706 KIRQL oldIrql; 707 PROS_SHARED_CACHE_MAP SharedCacheMap; 708 709 SharedCacheMap = Vacb->SharedCacheMap; 710 711 if (LockViews) 712 { 713 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 714 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 715 } 716 717 ASSERT(Vacb->Dirty); 718 719 Vacb->Dirty = FALSE; 720 721 RemoveEntryList(&Vacb->DirtyVacbListEntry); 722 InitializeListHead(&Vacb->DirtyVacbListEntry); 723 724 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 725 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 726 727 CcRosVacbDecRefCount(Vacb); 728 729 if (LockViews) 730 { 731 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 732 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 733 } 734 } 735 736 BOOLEAN 737 CcRosFreeOneUnusedVacb( 738 VOID) 739 { 740 KIRQL oldIrql; 741 PLIST_ENTRY current_entry; 742 PROS_VACB to_free = NULL; 743 744 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 745 746 /* Browse all the available VACB */ 747 current_entry = VacbLruListHead.Flink; 748 while ((current_entry != &VacbLruListHead) && (to_free == NULL)) 749 { 750 ULONG Refs; 751 PROS_VACB current; 752 753 current = CONTAINING_RECORD(current_entry, 754 ROS_VACB, 755 VacbLruListEntry); 756 757 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 758 759 /* Only deal with unused VACB, we will free them */ 760 Refs = CcRosVacbGetRefCount(current); 761 if (Refs < 2) 762 { 763 ASSERT(!current->Dirty); 764 ASSERT(!current->MappedCount); 765 ASSERT(Refs == 1); 766 767 /* Reset it, this is the one we want to free */ 768 RemoveEntryList(¤t->CacheMapVacbListEntry); 769 InitializeListHead(¤t->CacheMapVacbListEntry); 770 RemoveEntryList(¤t->VacbLruListEntry); 771 InitializeListHead(¤t->VacbLruListEntry); 772 773 to_free = current; 774 } 775 776 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 777 778 current_entry = current_entry->Flink; 779 } 780 781 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 782 783 /* And now, free the VACB that we found, if any. */ 784 if (to_free == NULL) 785 { 786 return FALSE; 787 } 788 789 /* This must be its last ref */ 790 NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0); 791 792 return TRUE; 793 } 794 795 static 796 NTSTATUS 797 CcRosCreateVacb ( 798 PROS_SHARED_CACHE_MAP SharedCacheMap, 799 LONGLONG FileOffset, 800 PROS_VACB *Vacb) 801 { 802 PROS_VACB current; 803 PROS_VACB previous; 804 PLIST_ENTRY current_entry; 805 NTSTATUS Status; 806 KIRQL oldIrql; 807 ULONG Refs; 808 SIZE_T ViewSize = VACB_MAPPING_GRANULARITY; 809 810 ASSERT(SharedCacheMap); 811 812 DPRINT("CcRosCreateVacb()\n"); 813 814 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList); 815 if (!current) 816 { 817 return STATUS_INSUFFICIENT_RESOURCES; 818 } 819 current->BaseAddress = NULL; 820 current->Dirty = FALSE; 821 current->PageOut = FALSE; 822 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY); 823 current->SharedCacheMap = SharedCacheMap; 824 current->MappedCount = 0; 825 current->ReferenceCount = 0; 826 InitializeListHead(¤t->CacheMapVacbListEntry); 827 InitializeListHead(¤t->DirtyVacbListEntry); 828 InitializeListHead(¤t->VacbLruListEntry); 829 830 CcRosVacbIncRefCount(current); 831 832 while (TRUE) 833 { 834 /* Map VACB in system space */ 835 Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset, 0); 836 if (NT_SUCCESS(Status)) 837 { 838 break; 839 } 840 841 /* 842 * If no space left, try to prune one unused VACB to recover space to map our VACB. 843 * If it succeeds, retry to map, otherwise just fail. 844 */ 845 if (!CcRosFreeOneUnusedVacb()) 846 { 847 ExFreeToNPagedLookasideList(&VacbLookasideList, current); 848 return Status; 849 } 850 } 851 852 #if DBG 853 if (SharedCacheMap->Trace) 854 { 855 DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n", 856 SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress); 857 } 858 #endif 859 860 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 861 862 *Vacb = current; 863 /* There is window between the call to CcRosLookupVacb 864 * and CcRosCreateVacb. We must check if a VACB for the 865 * file offset exist. If there is a VACB, we release 866 * our newly created VACB and return the existing one. 867 */ 868 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 869 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 870 previous = NULL; 871 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 872 { 873 current = CONTAINING_RECORD(current_entry, 874 ROS_VACB, 875 CacheMapVacbListEntry); 876 if (IsPointInRange(current->FileOffset.QuadPart, 877 VACB_MAPPING_GRANULARITY, 878 FileOffset)) 879 { 880 CcRosVacbIncRefCount(current); 881 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 882 #if DBG 883 if (SharedCacheMap->Trace) 884 { 885 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n", 886 SharedCacheMap, 887 (*Vacb), 888 current); 889 } 890 #endif 891 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 892 893 Refs = CcRosVacbDecRefCount(*Vacb); 894 ASSERT(Refs == 0); 895 896 *Vacb = current; 897 return STATUS_SUCCESS; 898 } 899 if (current->FileOffset.QuadPart < FileOffset) 900 { 901 ASSERT(previous == NULL || 902 previous->FileOffset.QuadPart < current->FileOffset.QuadPart); 903 previous = current; 904 } 905 if (current->FileOffset.QuadPart > FileOffset) 906 break; 907 current_entry = current_entry->Flink; 908 } 909 /* There was no existing VACB. */ 910 current = *Vacb; 911 if (previous) 912 { 913 InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry); 914 } 915 else 916 { 917 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry); 918 } 919 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 920 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 921 922 /* Reference it to allow release */ 923 CcRosVacbIncRefCount(current); 924 925 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 926 927 return Status; 928 } 929 930 BOOLEAN 931 CcRosEnsureVacbResident( 932 _In_ PROS_VACB Vacb, 933 _In_ BOOLEAN Wait, 934 _In_ BOOLEAN NoRead, 935 _In_ ULONG Offset, 936 _In_ ULONG Length 937 ) 938 { 939 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 940 941 ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY); 942 943 #if 0 944 if ((Vacb->FileOffset.QuadPart + Offset) > SharedCacheMap->SectionSize.QuadPart) 945 { 946 DPRINT1("Vacb read beyond the file size!\n"); 947 return FALSE; 948 } 949 #endif 950 951 /* Check if the pages are resident */ 952 if (!MmIsDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer, 953 Vacb->FileOffset.QuadPart + Offset, 954 Length)) 955 { 956 if (!Wait) 957 { 958 return FALSE; 959 } 960 961 if (!NoRead) 962 { 963 NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer, 964 Vacb->FileOffset.QuadPart + Offset, 965 Length, 966 &SharedCacheMap->ValidDataLength); 967 if (!NT_SUCCESS(Status)) 968 ExRaiseStatus(Status); 969 } 970 } 971 972 return TRUE; 973 } 974 975 976 NTSTATUS 977 CcRosGetVacb ( 978 PROS_SHARED_CACHE_MAP SharedCacheMap, 979 LONGLONG FileOffset, 980 PROS_VACB *Vacb) 981 { 982 PROS_VACB current; 983 NTSTATUS Status; 984 ULONG Refs; 985 KIRQL OldIrql; 986 987 ASSERT(SharedCacheMap); 988 989 DPRINT("CcRosGetVacb()\n"); 990 991 /* 992 * Look for a VACB already mapping the same data. 993 */ 994 current = CcRosLookupVacb(SharedCacheMap, FileOffset); 995 if (current == NULL) 996 { 997 /* 998 * Otherwise create a new VACB. 999 */ 1000 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t); 1001 if (!NT_SUCCESS(Status)) 1002 { 1003 return Status; 1004 } 1005 } 1006 1007 Refs = CcRosVacbGetRefCount(current); 1008 1009 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1010 1011 /* Move to the tail of the LRU list */ 1012 RemoveEntryList(¤t->VacbLruListEntry); 1013 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 1014 1015 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1016 1017 /* 1018 * Return the VACB to the caller. 1019 */ 1020 *Vacb = current; 1021 1022 ASSERT(Refs > 1); 1023 1024 return STATUS_SUCCESS; 1025 } 1026 1027 NTSTATUS 1028 CcRosRequestVacb ( 1029 PROS_SHARED_CACHE_MAP SharedCacheMap, 1030 LONGLONG FileOffset, 1031 PROS_VACB *Vacb) 1032 /* 1033 * FUNCTION: Request a page mapping for a shared cache map 1034 */ 1035 { 1036 1037 ASSERT(SharedCacheMap); 1038 1039 if (FileOffset % VACB_MAPPING_GRANULARITY != 0) 1040 { 1041 DPRINT1("Bad fileoffset %I64x should be multiple of %x", 1042 FileOffset, VACB_MAPPING_GRANULARITY); 1043 KeBugCheck(CACHE_MANAGER); 1044 } 1045 1046 return CcRosGetVacb(SharedCacheMap, 1047 FileOffset, 1048 Vacb); 1049 } 1050 1051 NTSTATUS 1052 CcRosInternalFreeVacb ( 1053 PROS_VACB Vacb) 1054 /* 1055 * FUNCTION: Releases a VACB associated with a shared cache map 1056 */ 1057 { 1058 NTSTATUS Status; 1059 1060 DPRINT("Freeing VACB 0x%p\n", Vacb); 1061 #if DBG 1062 if (Vacb->SharedCacheMap->Trace) 1063 { 1064 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb); 1065 } 1066 #endif 1067 1068 if (Vacb->ReferenceCount != 0) 1069 { 1070 DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount); 1071 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length) 1072 { 1073 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName); 1074 } 1075 } 1076 1077 ASSERT(Vacb->ReferenceCount == 0); 1078 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry)); 1079 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry)); 1080 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry)); 1081 1082 /* Delete the mapping */ 1083 Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress); 1084 if (!NT_SUCCESS(Status)) 1085 { 1086 DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status); 1087 ASSERT(FALSE); 1088 /* Proceed with the deĺetion anyway */ 1089 } 1090 1091 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd); 1092 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb); 1093 return STATUS_SUCCESS; 1094 } 1095 1096 /* 1097 * @implemented 1098 */ 1099 VOID 1100 NTAPI 1101 CcFlushCache ( 1102 IN PSECTION_OBJECT_POINTERS SectionObjectPointers, 1103 IN PLARGE_INTEGER FileOffset OPTIONAL, 1104 IN ULONG Length, 1105 OUT PIO_STATUS_BLOCK IoStatus) 1106 { 1107 PROS_SHARED_CACHE_MAP SharedCacheMap; 1108 LONGLONG FlushStart, FlushEnd; 1109 NTSTATUS Status; 1110 1111 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n", 1112 SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length); 1113 1114 if (!SectionObjectPointers) 1115 { 1116 Status = STATUS_INVALID_PARAMETER; 1117 goto quit; 1118 } 1119 1120 if (!SectionObjectPointers->SharedCacheMap) 1121 { 1122 /* Forward this to Mm */ 1123 MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus); 1124 return; 1125 } 1126 1127 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 1128 ASSERT(SharedCacheMap); 1129 if (FileOffset) 1130 { 1131 FlushStart = FileOffset->QuadPart; 1132 Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd); 1133 if (!NT_SUCCESS(Status)) 1134 goto quit; 1135 } 1136 else 1137 { 1138 FlushStart = 0; 1139 FlushEnd = SharedCacheMap->FileSize.QuadPart; 1140 } 1141 1142 Status = STATUS_SUCCESS; 1143 if (IoStatus) 1144 { 1145 IoStatus->Information = 0; 1146 } 1147 1148 KeAcquireGuardedMutex(&SharedCacheMap->FlushCacheLock); 1149 1150 /* 1151 * We flush the VACBs that we find here. 1152 * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure. 1153 * This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data 1154 */ 1155 while (FlushStart < FlushEnd) 1156 { 1157 BOOLEAN DirtyVacb = FALSE; 1158 PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart); 1159 1160 if (vacb != NULL) 1161 { 1162 if (vacb->Dirty) 1163 { 1164 IO_STATUS_BLOCK VacbIosb = { 0 }; 1165 Status = CcRosFlushVacb(vacb, &VacbIosb); 1166 if (!NT_SUCCESS(Status)) 1167 { 1168 CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE); 1169 break; 1170 } 1171 DirtyVacb = TRUE; 1172 1173 if (IoStatus) 1174 IoStatus->Information += VacbIosb.Information; 1175 } 1176 1177 CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE); 1178 } 1179 1180 if (!DirtyVacb) 1181 { 1182 IO_STATUS_BLOCK MmIosb; 1183 LARGE_INTEGER MmOffset; 1184 1185 MmOffset.QuadPart = FlushStart; 1186 1187 if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart) 1188 { 1189 /* The whole range fits within a VACB chunk. */ 1190 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb); 1191 } 1192 else 1193 { 1194 ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY); 1195 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb); 1196 } 1197 1198 if (!NT_SUCCESS(Status)) 1199 break; 1200 1201 if (IoStatus) 1202 IoStatus->Information += MmIosb.Information; 1203 1204 /* Update VDL */ 1205 if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd) 1206 SharedCacheMap->ValidDataLength.QuadPart = FlushEnd; 1207 } 1208 1209 if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart))) 1210 { 1211 /* We're at the end of file ! */ 1212 break; 1213 } 1214 1215 /* Round down to next VACB start now */ 1216 FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY; 1217 } 1218 1219 KeReleaseGuardedMutex(&SharedCacheMap->FlushCacheLock); 1220 1221 quit: 1222 if (IoStatus) 1223 { 1224 IoStatus->Status = Status; 1225 } 1226 } 1227 1228 NTSTATUS 1229 CcRosReleaseFileCache ( 1230 PFILE_OBJECT FileObject) 1231 /* 1232 * FUNCTION: Called by the file system when a handle to a file object 1233 * has been closed. 1234 */ 1235 { 1236 KIRQL OldIrql; 1237 PPRIVATE_CACHE_MAP PrivateMap; 1238 PROS_SHARED_CACHE_MAP SharedCacheMap; 1239 1240 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1241 1242 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) 1243 { 1244 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1245 1246 /* Closing the handle, so kill the private cache map 1247 * Before you event try to remove it from FO, always 1248 * lock the master lock, to be sure not to race 1249 * with a potential read ahead ongoing! 1250 */ 1251 PrivateMap = FileObject->PrivateCacheMap; 1252 FileObject->PrivateCacheMap = NULL; 1253 1254 if (PrivateMap != NULL) 1255 { 1256 /* Remove it from the file */ 1257 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1258 RemoveEntryList(&PrivateMap->PrivateLinks); 1259 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1260 1261 /* And free it. */ 1262 if (PrivateMap != &SharedCacheMap->PrivateCacheMap) 1263 { 1264 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP); 1265 } 1266 else 1267 { 1268 PrivateMap->NodeTypeCode = 0; 1269 } 1270 1271 ASSERT(SharedCacheMap->OpenCount > 0); 1272 1273 SharedCacheMap->OpenCount--; 1274 if (SharedCacheMap->OpenCount == 0) 1275 { 1276 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql); 1277 } 1278 } 1279 } 1280 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1281 return STATUS_SUCCESS; 1282 } 1283 1284 NTSTATUS 1285 CcRosInitializeFileCache ( 1286 PFILE_OBJECT FileObject, 1287 PCC_FILE_SIZES FileSizes, 1288 BOOLEAN PinAccess, 1289 PCACHE_MANAGER_CALLBACKS CallBacks, 1290 PVOID LazyWriterContext) 1291 /* 1292 * FUNCTION: Initializes a shared cache map for a file object 1293 */ 1294 { 1295 KIRQL OldIrql; 1296 BOOLEAN Allocated; 1297 PROS_SHARED_CACHE_MAP SharedCacheMap; 1298 1299 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject); 1300 1301 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1302 1303 Allocated = FALSE; 1304 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1305 if (SharedCacheMap == NULL) 1306 { 1307 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList); 1308 if (SharedCacheMap == NULL) 1309 { 1310 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1311 return STATUS_INSUFFICIENT_RESOURCES; 1312 } 1313 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap)); 1314 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP; 1315 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap); 1316 SharedCacheMap->FileObject = FileObject; 1317 SharedCacheMap->Callbacks = CallBacks; 1318 SharedCacheMap->LazyWriteContext = LazyWriterContext; 1319 SharedCacheMap->SectionSize = FileSizes->AllocationSize; 1320 SharedCacheMap->FileSize = FileSizes->FileSize; 1321 SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength; 1322 SharedCacheMap->PinAccess = PinAccess; 1323 SharedCacheMap->DirtyPageThreshold = 0; 1324 SharedCacheMap->DirtyPages = 0; 1325 InitializeListHead(&SharedCacheMap->PrivateList); 1326 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock); 1327 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead); 1328 InitializeListHead(&SharedCacheMap->BcbList); 1329 KeInitializeGuardedMutex(&SharedCacheMap->FlushCacheLock); 1330 1331 SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION; 1332 1333 ObReferenceObjectByPointer(FileObject, 1334 FILE_ALL_ACCESS, 1335 NULL, 1336 KernelMode); 1337 1338 Allocated = TRUE; 1339 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap; 1340 1341 //CcRosTraceCacheMap(SharedCacheMap, TRUE); 1342 } 1343 else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION) 1344 { 1345 /* The shared cache map is being created somewhere else. Wait for that to happen */ 1346 KEVENT Waiter; 1347 PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent; 1348 1349 KeInitializeEvent(&Waiter, NotificationEvent, FALSE); 1350 SharedCacheMap->CreateEvent = &Waiter; 1351 1352 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1353 1354 KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL); 1355 1356 if (PreviousWaiter) 1357 KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE); 1358 1359 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1360 } 1361 1362 if (FileObject->PrivateCacheMap == NULL) 1363 { 1364 PPRIVATE_CACHE_MAP PrivateMap; 1365 1366 /* Allocate the private cache map for this handle */ 1367 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0) 1368 { 1369 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP); 1370 } 1371 else 1372 { 1373 PrivateMap = &SharedCacheMap->PrivateCacheMap; 1374 } 1375 1376 if (PrivateMap == NULL) 1377 { 1378 /* If we also allocated the shared cache map for this file, kill it */ 1379 if (Allocated) 1380 { 1381 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 1382 1383 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 1384 ObDereferenceObject(FileObject); 1385 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 1386 } 1387 1388 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1389 return STATUS_INSUFFICIENT_RESOURCES; 1390 } 1391 1392 /* Initialize it */ 1393 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP)); 1394 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP; 1395 PrivateMap->ReadAheadMask = PAGE_SIZE - 1; 1396 PrivateMap->FileObject = FileObject; 1397 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock); 1398 1399 /* Link it to the file */ 1400 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1401 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks); 1402 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1403 1404 FileObject->PrivateCacheMap = PrivateMap; 1405 SharedCacheMap->OpenCount++; 1406 } 1407 1408 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1409 1410 /* Create the section */ 1411 if (Allocated) 1412 { 1413 NTSTATUS Status; 1414 1415 ASSERT(SharedCacheMap->Section == NULL); 1416 1417 Status = MmCreateSection( 1418 &SharedCacheMap->Section, 1419 SECTION_ALL_ACCESS, 1420 NULL, 1421 &SharedCacheMap->SectionSize, 1422 PAGE_READWRITE, 1423 SEC_RESERVE, 1424 NULL, 1425 FileObject); 1426 1427 ASSERT(NT_SUCCESS(Status)); 1428 1429 if (!NT_SUCCESS(Status)) 1430 { 1431 CcRosReleaseFileCache(FileObject); 1432 return Status; 1433 } 1434 1435 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1436 1437 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks); 1438 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION; 1439 1440 if (SharedCacheMap->CreateEvent) 1441 { 1442 KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE); 1443 SharedCacheMap->CreateEvent = NULL; 1444 } 1445 1446 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1447 } 1448 1449 return STATUS_SUCCESS; 1450 } 1451 1452 /* 1453 * @implemented 1454 */ 1455 PFILE_OBJECT 1456 NTAPI 1457 CcGetFileObjectFromSectionPtrs ( 1458 IN PSECTION_OBJECT_POINTERS SectionObjectPointers) 1459 { 1460 PROS_SHARED_CACHE_MAP SharedCacheMap; 1461 1462 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers); 1463 1464 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap) 1465 { 1466 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 1467 ASSERT(SharedCacheMap); 1468 return SharedCacheMap->FileObject; 1469 } 1470 return NULL; 1471 } 1472 1473 CODE_SEG("INIT") 1474 VOID 1475 NTAPI 1476 CcInitView ( 1477 VOID) 1478 { 1479 DPRINT("CcInitView()\n"); 1480 1481 InitializeListHead(&DirtyVacbListHead); 1482 InitializeListHead(&VacbLruListHead); 1483 InitializeListHead(&CcDeferredWrites); 1484 InitializeListHead(&CcCleanSharedCacheMapList); 1485 KeInitializeSpinLock(&CcDeferredWriteSpinLock); 1486 ExInitializeNPagedLookasideList(&iBcbLookasideList, 1487 NULL, 1488 NULL, 1489 0, 1490 sizeof(INTERNAL_BCB), 1491 TAG_BCB, 1492 20); 1493 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList, 1494 NULL, 1495 NULL, 1496 0, 1497 sizeof(ROS_SHARED_CACHE_MAP), 1498 TAG_SHARED_CACHE_MAP, 1499 20); 1500 ExInitializeNPagedLookasideList(&VacbLookasideList, 1501 NULL, 1502 NULL, 1503 0, 1504 sizeof(ROS_VACB), 1505 TAG_VACB, 1506 20); 1507 1508 CcInitCacheZeroPage(); 1509 } 1510 1511 #if DBG && defined(KDBG) 1512 1513 #include <kdbg/kdb.h> 1514 1515 BOOLEAN 1516 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[]) 1517 { 1518 PLIST_ENTRY ListEntry; 1519 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File"); 1520 1521 KdbpPrint(" Usage Summary (in kb)\n"); 1522 KdbpPrint("Shared\t\tMapped\tDirty\tName\n"); 1523 /* No need to lock the spin lock here, we're in DBG */ 1524 for (ListEntry = CcCleanSharedCacheMapList.Flink; 1525 ListEntry != &CcCleanSharedCacheMapList; 1526 ListEntry = ListEntry->Flink) 1527 { 1528 PLIST_ENTRY Vacbs; 1529 ULONG Mapped = 0, Dirty = 0; 1530 PROS_SHARED_CACHE_MAP SharedCacheMap; 1531 PUNICODE_STRING FileName; 1532 PWSTR Extra = L""; 1533 1534 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks); 1535 1536 /* Dirty size */ 1537 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024; 1538 1539 /* First, count for all the associated VACB */ 1540 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink; 1541 Vacbs != &SharedCacheMap->CacheMapVacbListHead; 1542 Vacbs = Vacbs->Flink) 1543 { 1544 Mapped += VACB_MAPPING_GRANULARITY / 1024; 1545 } 1546 1547 /* Setup name */ 1548 if (SharedCacheMap->FileObject != NULL && 1549 SharedCacheMap->FileObject->FileName.Length != 0) 1550 { 1551 FileName = &SharedCacheMap->FileObject->FileName; 1552 } 1553 else if (SharedCacheMap->FileObject != NULL && 1554 SharedCacheMap->FileObject->FsContext != NULL && 1555 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 && 1556 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 && 1557 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0) 1558 { 1559 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100); 1560 Extra = L" (FastFAT)"; 1561 } 1562 else 1563 { 1564 FileName = &NoName; 1565 } 1566 1567 /* And print */ 1568 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra); 1569 } 1570 1571 return TRUE; 1572 } 1573 1574 BOOLEAN 1575 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[]) 1576 { 1577 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages, 1578 (CcTotalDirtyPages * PAGE_SIZE) / 1024); 1579 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold, 1580 (CcDirtyPageThreshold * PAGE_SIZE) / 1024); 1581 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages, 1582 (MmAvailablePages * PAGE_SIZE) / 1024); 1583 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop, 1584 (MmThrottleTop * PAGE_SIZE) / 1024); 1585 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom, 1586 (MmThrottleBottom * PAGE_SIZE) / 1024); 1587 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total, 1588 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024); 1589 1590 if (CcTotalDirtyPages >= CcDirtyPageThreshold) 1591 { 1592 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n"); 1593 } 1594 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold) 1595 { 1596 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n"); 1597 } 1598 else 1599 { 1600 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n"); 1601 } 1602 1603 return TRUE; 1604 } 1605 1606 #endif // DBG && defined(KDBG) 1607 1608 /* EOF */ 1609