1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/cc/view.c 5 * PURPOSE: Cache manager 6 * 7 * PROGRAMMERS: David Welch (welch@mcmail.com) 8 * Pierre Schweitzer (pierre@reactos.org) 9 */ 10 11 /* NOTES ********************************************************************** 12 * 13 * This is not the NT implementation of a file cache nor anything much like 14 * it. 15 * 16 * The general procedure for a filesystem to implement a read or write 17 * dispatch routine is as follows 18 * 19 * (1) If caching for the FCB hasn't been initiated then so do by calling 20 * CcInitializeFileCache. 21 * 22 * (2) For each 4k region which is being read or written obtain a cache page 23 * by calling CcRequestCachePage. 24 * 25 * (3) If either the page is being read or not completely written, and it is 26 * not up to date then read its data from the underlying medium. If the read 27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error. 28 * 29 * (4) Copy the data into or out of the page as necessary. 30 * 31 * (5) Release the cache page 32 */ 33 /* INCLUDES ******************************************************************/ 34 35 #include <ntoskrnl.h> 36 #define NDEBUG 37 #include <debug.h> 38 39 /* GLOBALS *******************************************************************/ 40 41 LIST_ENTRY DirtyVacbListHead; 42 static LIST_ENTRY VacbLruListHead; 43 44 NPAGED_LOOKASIDE_LIST iBcbLookasideList; 45 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList; 46 static NPAGED_LOOKASIDE_LIST VacbLookasideList; 47 48 /* Internal vars (MS): 49 * - Threshold above which lazy writer will start action 50 * - Amount of dirty pages 51 * - List for deferred writes 52 * - Spinlock when dealing with the deferred list 53 * - List for "clean" shared cache maps 54 */ 55 ULONG CcDirtyPageThreshold = 0; 56 ULONG CcTotalDirtyPages = 0; 57 LIST_ENTRY CcDeferredWrites; 58 KSPIN_LOCK CcDeferredWriteSpinLock; 59 LIST_ENTRY CcCleanSharedCacheMapList; 60 61 #if DBG 62 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line) 63 { 64 ULONG Refs; 65 66 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount); 67 if (vacb->SharedCacheMap->Trace) 68 { 69 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n", 70 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 71 } 72 73 return Refs; 74 } 75 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line) 76 { 77 ULONG Refs; 78 79 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount); 80 ASSERT(!(Refs == 0 && vacb->Dirty)); 81 if (vacb->SharedCacheMap->Trace) 82 { 83 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n", 84 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 85 } 86 87 if (Refs == 0) 88 { 89 CcRosInternalFreeVacb(vacb); 90 } 91 92 return Refs; 93 } 94 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line) 95 { 96 ULONG Refs; 97 98 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0); 99 if (vacb->SharedCacheMap->Trace) 100 { 101 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n", 102 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 103 } 104 105 return Refs; 106 } 107 #endif 108 109 110 /* FUNCTIONS *****************************************************************/ 111 112 VOID 113 CcRosTraceCacheMap ( 114 PROS_SHARED_CACHE_MAP SharedCacheMap, 115 BOOLEAN Trace ) 116 { 117 #if DBG 118 KIRQL oldirql; 119 PLIST_ENTRY current_entry; 120 PROS_VACB current; 121 122 if (!SharedCacheMap) 123 return; 124 125 SharedCacheMap->Trace = Trace; 126 127 if (Trace) 128 { 129 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 130 131 oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 132 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 133 134 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 135 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 136 { 137 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 138 current_entry = current_entry->Flink; 139 140 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n", 141 current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart); 142 } 143 144 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 145 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql); 146 } 147 else 148 { 149 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 150 } 151 152 #else 153 UNREFERENCED_PARAMETER(SharedCacheMap); 154 UNREFERENCED_PARAMETER(Trace); 155 #endif 156 } 157 158 NTSTATUS 159 CcRosFlushVacb ( 160 _In_ PROS_VACB Vacb, 161 _Out_opt_ PIO_STATUS_BLOCK Iosb) 162 { 163 NTSTATUS Status; 164 BOOLEAN HaveLock = FALSE; 165 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 166 167 CcRosUnmarkDirtyVacb(Vacb, TRUE); 168 169 /* Lock for flush, if we are not already the top-level */ 170 if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP) 171 { 172 Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject); 173 if (!NT_SUCCESS(Status)) 174 goto quit; 175 HaveLock = TRUE; 176 } 177 178 Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer, 179 &Vacb->FileOffset, 180 VACB_MAPPING_GRANULARITY, 181 Iosb); 182 183 if (HaveLock) 184 { 185 FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject); 186 } 187 188 quit: 189 if (!NT_SUCCESS(Status)) 190 CcRosMarkDirtyVacb(Vacb); 191 else 192 { 193 /* Update VDL */ 194 if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY)) 195 { 196 SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY; 197 } 198 } 199 200 return Status; 201 } 202 203 static 204 NTSTATUS 205 CcRosDeleteFileCache ( 206 PFILE_OBJECT FileObject, 207 PROS_SHARED_CACHE_MAP SharedCacheMap, 208 PKIRQL OldIrql) 209 /* 210 * FUNCTION: Releases the shared cache map associated with a file object 211 */ 212 { 213 PLIST_ENTRY current_entry; 214 215 ASSERT(SharedCacheMap); 216 ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap); 217 ASSERT(SharedCacheMap->OpenCount == 0); 218 219 /* Remove all VACBs from the global lists */ 220 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 221 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 222 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 223 { 224 PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 225 226 RemoveEntryList(&Vacb->VacbLruListEntry); 227 InitializeListHead(&Vacb->VacbLruListEntry); 228 229 if (Vacb->Dirty) 230 { 231 CcRosUnmarkDirtyVacb(Vacb, FALSE); 232 /* Mark it as dirty again so we know that we have to flush before freeing it */ 233 Vacb->Dirty = TRUE; 234 } 235 236 current_entry = current_entry->Flink; 237 } 238 239 /* Make sure there is no trace anymore of this map */ 240 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 241 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 242 243 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 244 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql); 245 246 /* Now that we're out of the locks, free everything for real */ 247 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead)) 248 { 249 PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry); 250 ULONG RefCount; 251 252 InitializeListHead(&Vacb->CacheMapVacbListEntry); 253 254 /* Flush to disk, if needed */ 255 if (Vacb->Dirty) 256 { 257 IO_STATUS_BLOCK Iosb; 258 NTSTATUS Status; 259 260 Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb); 261 if (!NT_SUCCESS(Status)) 262 { 263 /* Complain. There's not much we can do */ 264 DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status); 265 } 266 Vacb->Dirty = FALSE; 267 } 268 269 RefCount = CcRosVacbDecRefCount(Vacb); 270 #if DBG // CORE-14578 271 if (RefCount != 0) 272 { 273 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart); 274 DPRINT1("There are: %d references left\n", RefCount); 275 DPRINT1("Map: %d\n", Vacb->MappedCount); 276 DPRINT1("Dirty: %d\n", Vacb->Dirty); 277 if (FileObject->FileName.Length != 0) 278 { 279 DPRINT1("File was: %wZ\n", &FileObject->FileName); 280 } 281 else 282 { 283 DPRINT1("No name for the file\n"); 284 } 285 } 286 #else 287 (void)RefCount; 288 #endif 289 } 290 291 /* Release the references we own */ 292 if(SharedCacheMap->Section) 293 ObDereferenceObject(SharedCacheMap->Section); 294 ObDereferenceObject(SharedCacheMap->FileObject); 295 296 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 297 298 /* Acquire the lock again for our caller */ 299 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 300 301 return STATUS_SUCCESS; 302 } 303 304 NTSTATUS 305 CcRosFlushDirtyPages ( 306 ULONG Target, 307 PULONG Count, 308 BOOLEAN Wait, 309 BOOLEAN CalledFromLazy) 310 { 311 PLIST_ENTRY current_entry; 312 NTSTATUS Status; 313 KIRQL OldIrql; 314 BOOLEAN FlushAll = (Target == MAXULONG); 315 316 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target); 317 318 (*Count) = 0; 319 320 KeEnterCriticalRegion(); 321 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 322 323 current_entry = DirtyVacbListHead.Flink; 324 if (current_entry == &DirtyVacbListHead) 325 { 326 DPRINT("No Dirty pages\n"); 327 } 328 329 while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll) 330 { 331 PROS_SHARED_CACHE_MAP SharedCacheMap; 332 PROS_VACB current; 333 BOOLEAN Locked; 334 335 if (current_entry == &DirtyVacbListHead) 336 { 337 ASSERT(FlushAll); 338 if (IsListEmpty(&DirtyVacbListHead)) 339 break; 340 current_entry = DirtyVacbListHead.Flink; 341 } 342 343 current = CONTAINING_RECORD(current_entry, 344 ROS_VACB, 345 DirtyVacbListEntry); 346 current_entry = current_entry->Flink; 347 348 CcRosVacbIncRefCount(current); 349 350 SharedCacheMap = current->SharedCacheMap; 351 352 /* When performing lazy write, don't handle temporary files */ 353 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE)) 354 { 355 CcRosVacbDecRefCount(current); 356 continue; 357 } 358 359 /* Don't attempt to lazy write the files that asked not to */ 360 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED)) 361 { 362 CcRosVacbDecRefCount(current); 363 continue; 364 } 365 366 ASSERT(current->Dirty); 367 368 /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */ 369 if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE) 370 { 371 CcRosVacbDecRefCount(current); 372 continue; 373 } 374 375 SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE; 376 377 /* Keep a ref on the shared cache map */ 378 SharedCacheMap->OpenCount++; 379 380 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 381 382 Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait); 383 if (!Locked) 384 { 385 DPRINT("Not locked!"); 386 ASSERT(!Wait); 387 CcRosVacbDecRefCount(current); 388 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 389 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 390 391 if (--SharedCacheMap->OpenCount == 0) 392 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 393 394 continue; 395 } 396 397 IO_STATUS_BLOCK Iosb; 398 Status = CcRosFlushVacb(current, &Iosb); 399 400 SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext); 401 402 /* We release the VACB before acquiring the lock again, because 403 * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a 404 * Refcount. Freeing must be done outside of the lock. 405 * The refcount is decremented atomically. So this is OK. */ 406 CcRosVacbDecRefCount(current); 407 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 408 409 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 410 411 if (--SharedCacheMap->OpenCount == 0) 412 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 413 414 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) && 415 (Status != STATUS_MEDIA_WRITE_PROTECTED)) 416 { 417 DPRINT1("CC: Failed to flush VACB.\n"); 418 } 419 else 420 { 421 ULONG PagesFreed; 422 423 /* How many pages did we free? */ 424 PagesFreed = Iosb.Information / PAGE_SIZE; 425 (*Count) += PagesFreed; 426 427 if (!Wait) 428 { 429 /* Make sure we don't overflow target! */ 430 if (Target < PagesFreed) 431 { 432 /* If we would have, jump to zero directly */ 433 Target = 0; 434 } 435 else 436 { 437 Target -= PagesFreed; 438 } 439 } 440 } 441 442 current_entry = DirtyVacbListHead.Flink; 443 } 444 445 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 446 KeLeaveCriticalRegion(); 447 448 DPRINT("CcRosFlushDirtyPages() finished\n"); 449 return STATUS_SUCCESS; 450 } 451 452 VOID 453 CcRosTrimCache( 454 _In_ ULONG Target, 455 _Out_ PULONG NrFreed) 456 /* 457 * FUNCTION: Try to free some memory from the file cache. 458 * ARGUMENTS: 459 * Target - The number of pages to be freed. 460 * NrFreed - Points to a variable where the number of pages 461 * actually freed is returned. 462 */ 463 { 464 PLIST_ENTRY current_entry; 465 PROS_VACB current; 466 ULONG PagesFreed; 467 KIRQL oldIrql; 468 LIST_ENTRY FreeList; 469 PFN_NUMBER Page; 470 ULONG i; 471 BOOLEAN FlushedPages = FALSE; 472 473 DPRINT("CcRosTrimCache(Target %lu)\n", Target); 474 475 InitializeListHead(&FreeList); 476 477 *NrFreed = 0; 478 479 retry: 480 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 481 482 current_entry = VacbLruListHead.Flink; 483 while (current_entry != &VacbLruListHead) 484 { 485 ULONG Refs; 486 487 current = CONTAINING_RECORD(current_entry, 488 ROS_VACB, 489 VacbLruListEntry); 490 current_entry = current_entry->Flink; 491 492 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 493 494 /* Reference the VACB */ 495 CcRosVacbIncRefCount(current); 496 497 /* Check if it's mapped and not dirty */ 498 if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty) 499 { 500 /* Page out the VACB */ 501 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++) 502 { 503 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT); 504 505 MmPageOutPhysicalAddress(Page); 506 } 507 } 508 509 /* Dereference the VACB */ 510 Refs = CcRosVacbDecRefCount(current); 511 512 /* Check if we can free this entry now */ 513 if (Refs < 2) 514 { 515 ASSERT(!current->Dirty); 516 ASSERT(!current->MappedCount); 517 ASSERT(Refs == 1); 518 519 RemoveEntryList(¤t->CacheMapVacbListEntry); 520 RemoveEntryList(¤t->VacbLruListEntry); 521 InitializeListHead(¤t->VacbLruListEntry); 522 InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry); 523 524 /* Calculate how many pages we freed for Mm */ 525 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target); 526 Target -= PagesFreed; 527 (*NrFreed) += PagesFreed; 528 } 529 530 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 531 } 532 533 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 534 535 /* Try flushing pages if we haven't met our target */ 536 if ((Target > 0) && !FlushedPages) 537 { 538 /* Flush dirty pages to disk */ 539 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE); 540 FlushedPages = TRUE; 541 542 /* We can only swap as many pages as we flushed */ 543 if (PagesFreed < Target) Target = PagesFreed; 544 545 /* Check if we flushed anything */ 546 if (PagesFreed != 0) 547 { 548 /* Try again after flushing dirty pages */ 549 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed); 550 goto retry; 551 } 552 } 553 554 while (!IsListEmpty(&FreeList)) 555 { 556 ULONG Refs; 557 558 current_entry = RemoveHeadList(&FreeList); 559 current = CONTAINING_RECORD(current_entry, 560 ROS_VACB, 561 CacheMapVacbListEntry); 562 InitializeListHead(¤t->CacheMapVacbListEntry); 563 Refs = CcRosVacbDecRefCount(current); 564 ASSERT(Refs == 0); 565 } 566 567 DPRINT("Evicted %lu cache pages\n", (*NrFreed)); 568 } 569 570 NTSTATUS 571 CcRosReleaseVacb ( 572 PROS_SHARED_CACHE_MAP SharedCacheMap, 573 PROS_VACB Vacb, 574 BOOLEAN Dirty, 575 BOOLEAN Mapped) 576 { 577 ULONG Refs; 578 ASSERT(SharedCacheMap); 579 580 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb); 581 582 if (Dirty && !Vacb->Dirty) 583 { 584 CcRosMarkDirtyVacb(Vacb); 585 } 586 587 if (Mapped) 588 { 589 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1) 590 { 591 CcRosVacbIncRefCount(Vacb); 592 } 593 } 594 595 Refs = CcRosVacbDecRefCount(Vacb); 596 ASSERT(Refs > 0); 597 598 return STATUS_SUCCESS; 599 } 600 601 /* Returns with VACB Lock Held! */ 602 PROS_VACB 603 CcRosLookupVacb ( 604 PROS_SHARED_CACHE_MAP SharedCacheMap, 605 LONGLONG FileOffset) 606 { 607 PLIST_ENTRY current_entry; 608 PROS_VACB current; 609 KIRQL oldIrql; 610 611 ASSERT(SharedCacheMap); 612 613 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n", 614 SharedCacheMap, FileOffset); 615 616 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 617 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 618 619 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 620 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 621 { 622 current = CONTAINING_RECORD(current_entry, 623 ROS_VACB, 624 CacheMapVacbListEntry); 625 if (IsPointInRange(current->FileOffset.QuadPart, 626 VACB_MAPPING_GRANULARITY, 627 FileOffset)) 628 { 629 CcRosVacbIncRefCount(current); 630 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 631 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 632 return current; 633 } 634 if (current->FileOffset.QuadPart > FileOffset) 635 break; 636 current_entry = current_entry->Flink; 637 } 638 639 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 640 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 641 642 return NULL; 643 } 644 645 VOID 646 CcRosMarkDirtyVacb ( 647 PROS_VACB Vacb) 648 { 649 KIRQL oldIrql; 650 PROS_SHARED_CACHE_MAP SharedCacheMap; 651 652 SharedCacheMap = Vacb->SharedCacheMap; 653 654 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 655 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 656 657 ASSERT(!Vacb->Dirty); 658 659 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry); 660 /* FIXME: There is no reason to account for the whole VACB. */ 661 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 662 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 663 CcRosVacbIncRefCount(Vacb); 664 665 /* Move to the tail of the LRU list */ 666 RemoveEntryList(&Vacb->VacbLruListEntry); 667 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry); 668 669 Vacb->Dirty = TRUE; 670 671 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 672 673 /* Schedule a lazy writer run to now that we have dirty VACB */ 674 if (!LazyWriter.ScanActive) 675 { 676 CcScheduleLazyWriteScan(FALSE); 677 } 678 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 679 } 680 681 VOID 682 CcRosUnmarkDirtyVacb ( 683 PROS_VACB Vacb, 684 BOOLEAN LockViews) 685 { 686 KIRQL oldIrql; 687 PROS_SHARED_CACHE_MAP SharedCacheMap; 688 689 SharedCacheMap = Vacb->SharedCacheMap; 690 691 if (LockViews) 692 { 693 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 694 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 695 } 696 697 ASSERT(Vacb->Dirty); 698 699 Vacb->Dirty = FALSE; 700 701 RemoveEntryList(&Vacb->DirtyVacbListEntry); 702 InitializeListHead(&Vacb->DirtyVacbListEntry); 703 704 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 705 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 706 707 CcRosVacbDecRefCount(Vacb); 708 709 if (LockViews) 710 { 711 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 712 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 713 } 714 } 715 716 BOOLEAN 717 CcRosFreeOneUnusedVacb( 718 VOID) 719 { 720 KIRQL oldIrql; 721 PLIST_ENTRY current_entry; 722 PROS_VACB to_free = NULL; 723 724 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 725 726 /* Browse all the available VACB */ 727 current_entry = VacbLruListHead.Flink; 728 while ((current_entry != &VacbLruListHead) && (to_free == NULL)) 729 { 730 ULONG Refs; 731 PROS_VACB current; 732 733 current = CONTAINING_RECORD(current_entry, 734 ROS_VACB, 735 VacbLruListEntry); 736 737 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 738 739 /* Only deal with unused VACB, we will free them */ 740 Refs = CcRosVacbGetRefCount(current); 741 if (Refs < 2) 742 { 743 ASSERT(!current->Dirty); 744 ASSERT(!current->MappedCount); 745 ASSERT(Refs == 1); 746 747 /* Reset it, this is the one we want to free */ 748 RemoveEntryList(¤t->CacheMapVacbListEntry); 749 InitializeListHead(¤t->CacheMapVacbListEntry); 750 RemoveEntryList(¤t->VacbLruListEntry); 751 InitializeListHead(¤t->VacbLruListEntry); 752 753 to_free = current; 754 } 755 756 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 757 758 current_entry = current_entry->Flink; 759 } 760 761 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 762 763 /* And now, free the VACB that we found, if any. */ 764 if (to_free == NULL) 765 { 766 return FALSE; 767 } 768 769 /* This must be its last ref */ 770 NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0); 771 772 return TRUE; 773 } 774 775 static 776 NTSTATUS 777 CcRosCreateVacb ( 778 PROS_SHARED_CACHE_MAP SharedCacheMap, 779 LONGLONG FileOffset, 780 PROS_VACB *Vacb) 781 { 782 PROS_VACB current; 783 PROS_VACB previous; 784 PLIST_ENTRY current_entry; 785 NTSTATUS Status; 786 KIRQL oldIrql; 787 ULONG Refs; 788 SIZE_T ViewSize = VACB_MAPPING_GRANULARITY; 789 790 ASSERT(SharedCacheMap); 791 792 DPRINT("CcRosCreateVacb()\n"); 793 794 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList); 795 current->BaseAddress = NULL; 796 current->Dirty = FALSE; 797 current->PageOut = FALSE; 798 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY); 799 current->SharedCacheMap = SharedCacheMap; 800 current->MappedCount = 0; 801 current->ReferenceCount = 0; 802 InitializeListHead(¤t->CacheMapVacbListEntry); 803 InitializeListHead(¤t->DirtyVacbListEntry); 804 InitializeListHead(¤t->VacbLruListEntry); 805 806 CcRosVacbIncRefCount(current); 807 808 while (TRUE) 809 { 810 /* Map VACB in system space */ 811 Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset, 0); 812 if (NT_SUCCESS(Status)) 813 { 814 break; 815 } 816 817 /* 818 * If no space left, try to prune one unused VACB to recover space to map our VACB. 819 * If it succeeds, retry to map, otherwise just fail. 820 */ 821 if (!CcRosFreeOneUnusedVacb()) 822 { 823 ExFreeToNPagedLookasideList(&VacbLookasideList, current); 824 return Status; 825 } 826 } 827 828 #if DBG 829 if (SharedCacheMap->Trace) 830 { 831 DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n", 832 SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress); 833 } 834 #endif 835 836 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 837 838 *Vacb = current; 839 /* There is window between the call to CcRosLookupVacb 840 * and CcRosCreateVacb. We must check if a VACB for the 841 * file offset exist. If there is a VACB, we release 842 * our newly created VACB and return the existing one. 843 */ 844 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 845 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 846 previous = NULL; 847 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 848 { 849 current = CONTAINING_RECORD(current_entry, 850 ROS_VACB, 851 CacheMapVacbListEntry); 852 if (IsPointInRange(current->FileOffset.QuadPart, 853 VACB_MAPPING_GRANULARITY, 854 FileOffset)) 855 { 856 CcRosVacbIncRefCount(current); 857 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 858 #if DBG 859 if (SharedCacheMap->Trace) 860 { 861 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n", 862 SharedCacheMap, 863 (*Vacb), 864 current); 865 } 866 #endif 867 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 868 869 Refs = CcRosVacbDecRefCount(*Vacb); 870 ASSERT(Refs == 0); 871 872 *Vacb = current; 873 return STATUS_SUCCESS; 874 } 875 if (current->FileOffset.QuadPart < FileOffset) 876 { 877 ASSERT(previous == NULL || 878 previous->FileOffset.QuadPart < current->FileOffset.QuadPart); 879 previous = current; 880 } 881 if (current->FileOffset.QuadPart > FileOffset) 882 break; 883 current_entry = current_entry->Flink; 884 } 885 /* There was no existing VACB. */ 886 current = *Vacb; 887 if (previous) 888 { 889 InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry); 890 } 891 else 892 { 893 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry); 894 } 895 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 896 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 897 898 /* Reference it to allow release */ 899 CcRosVacbIncRefCount(current); 900 901 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 902 903 return Status; 904 } 905 906 BOOLEAN 907 CcRosEnsureVacbResident( 908 _In_ PROS_VACB Vacb, 909 _In_ BOOLEAN Wait, 910 _In_ BOOLEAN NoRead, 911 _In_ ULONG Offset, 912 _In_ ULONG Length 913 ) 914 { 915 PVOID BaseAddress; 916 917 ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY); 918 919 #if 0 920 if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->SectionSize.QuadPart) 921 { 922 DPRINT1("Vacb read beyond the file size!\n"); 923 return FALSE; 924 } 925 #endif 926 927 BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset); 928 929 /* Check if the pages are resident */ 930 if (!MmArePagesResident(NULL, BaseAddress, Length)) 931 { 932 if (!Wait) 933 { 934 return FALSE; 935 } 936 937 if (!NoRead) 938 { 939 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 940 NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer, 941 Vacb->FileOffset.QuadPart + Offset, 942 Length, 943 &SharedCacheMap->ValidDataLength); 944 if (!NT_SUCCESS(Status)) 945 ExRaiseStatus(Status); 946 } 947 } 948 949 return TRUE; 950 } 951 952 953 NTSTATUS 954 CcRosGetVacb ( 955 PROS_SHARED_CACHE_MAP SharedCacheMap, 956 LONGLONG FileOffset, 957 PROS_VACB *Vacb) 958 { 959 PROS_VACB current; 960 NTSTATUS Status; 961 ULONG Refs; 962 KIRQL OldIrql; 963 964 ASSERT(SharedCacheMap); 965 966 DPRINT("CcRosGetVacb()\n"); 967 968 /* 969 * Look for a VACB already mapping the same data. 970 */ 971 current = CcRosLookupVacb(SharedCacheMap, FileOffset); 972 if (current == NULL) 973 { 974 /* 975 * Otherwise create a new VACB. 976 */ 977 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t); 978 if (!NT_SUCCESS(Status)) 979 { 980 return Status; 981 } 982 } 983 984 Refs = CcRosVacbGetRefCount(current); 985 986 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 987 988 /* Move to the tail of the LRU list */ 989 RemoveEntryList(¤t->VacbLruListEntry); 990 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 991 992 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 993 994 /* 995 * Return the VACB to the caller. 996 */ 997 *Vacb = current; 998 999 ASSERT(Refs > 1); 1000 1001 return STATUS_SUCCESS; 1002 } 1003 1004 NTSTATUS 1005 CcRosRequestVacb ( 1006 PROS_SHARED_CACHE_MAP SharedCacheMap, 1007 LONGLONG FileOffset, 1008 PROS_VACB *Vacb) 1009 /* 1010 * FUNCTION: Request a page mapping for a shared cache map 1011 */ 1012 { 1013 1014 ASSERT(SharedCacheMap); 1015 1016 if (FileOffset % VACB_MAPPING_GRANULARITY != 0) 1017 { 1018 DPRINT1("Bad fileoffset %I64x should be multiple of %x", 1019 FileOffset, VACB_MAPPING_GRANULARITY); 1020 KeBugCheck(CACHE_MANAGER); 1021 } 1022 1023 return CcRosGetVacb(SharedCacheMap, 1024 FileOffset, 1025 Vacb); 1026 } 1027 1028 NTSTATUS 1029 CcRosInternalFreeVacb ( 1030 PROS_VACB Vacb) 1031 /* 1032 * FUNCTION: Releases a VACB associated with a shared cache map 1033 */ 1034 { 1035 NTSTATUS Status; 1036 1037 DPRINT("Freeing VACB 0x%p\n", Vacb); 1038 #if DBG 1039 if (Vacb->SharedCacheMap->Trace) 1040 { 1041 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb); 1042 } 1043 #endif 1044 1045 if (Vacb->ReferenceCount != 0) 1046 { 1047 DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount); 1048 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length) 1049 { 1050 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName); 1051 } 1052 } 1053 1054 ASSERT(Vacb->ReferenceCount == 0); 1055 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry)); 1056 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry)); 1057 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry)); 1058 1059 /* Delete the mapping */ 1060 Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress); 1061 if (!NT_SUCCESS(Status)) 1062 { 1063 DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status); 1064 ASSERT(FALSE); 1065 /* Proceed with the deĺetion anyway */ 1066 } 1067 1068 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd); 1069 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb); 1070 return STATUS_SUCCESS; 1071 } 1072 1073 /* 1074 * @implemented 1075 */ 1076 VOID 1077 NTAPI 1078 CcFlushCache ( 1079 IN PSECTION_OBJECT_POINTERS SectionObjectPointers, 1080 IN PLARGE_INTEGER FileOffset OPTIONAL, 1081 IN ULONG Length, 1082 OUT PIO_STATUS_BLOCK IoStatus) 1083 { 1084 PROS_SHARED_CACHE_MAP SharedCacheMap; 1085 LONGLONG FlushStart, FlushEnd; 1086 NTSTATUS Status; 1087 1088 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n", 1089 SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length); 1090 1091 if (!SectionObjectPointers) 1092 { 1093 Status = STATUS_INVALID_PARAMETER; 1094 goto quit; 1095 } 1096 1097 if (!SectionObjectPointers->SharedCacheMap) 1098 { 1099 /* Forward this to Mm */ 1100 MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus); 1101 return; 1102 } 1103 1104 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 1105 ASSERT(SharedCacheMap); 1106 if (FileOffset) 1107 { 1108 FlushStart = FileOffset->QuadPart; 1109 Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd); 1110 if (!NT_SUCCESS(Status)) 1111 goto quit; 1112 } 1113 else 1114 { 1115 FlushStart = 0; 1116 FlushEnd = SharedCacheMap->FileSize.QuadPart; 1117 } 1118 1119 Status = STATUS_SUCCESS; 1120 if (IoStatus) 1121 { 1122 IoStatus->Information = 0; 1123 } 1124 1125 /* 1126 * We flush the VACBs that we find here. 1127 * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure. 1128 * This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data 1129 */ 1130 while (FlushStart < FlushEnd) 1131 { 1132 BOOLEAN DirtyVacb = FALSE; 1133 PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart); 1134 1135 if (vacb != NULL) 1136 { 1137 if (vacb->Dirty) 1138 { 1139 IO_STATUS_BLOCK VacbIosb = { 0 }; 1140 Status = CcRosFlushVacb(vacb, &VacbIosb); 1141 if (!NT_SUCCESS(Status)) 1142 { 1143 goto quit; 1144 } 1145 DirtyVacb = TRUE; 1146 1147 if (IoStatus) 1148 IoStatus->Information += VacbIosb.Information; 1149 } 1150 1151 CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE); 1152 } 1153 1154 if (!DirtyVacb) 1155 { 1156 IO_STATUS_BLOCK MmIosb; 1157 LARGE_INTEGER MmOffset; 1158 1159 MmOffset.QuadPart = FlushStart; 1160 1161 if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart) 1162 { 1163 /* The whole range fits within a VACB chunk. */ 1164 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb); 1165 } 1166 else 1167 { 1168 ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY); 1169 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb); 1170 } 1171 1172 if (!NT_SUCCESS(Status)) 1173 goto quit; 1174 1175 if (IoStatus) 1176 IoStatus->Information += MmIosb.Information; 1177 1178 /* Update VDL */ 1179 if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd) 1180 SharedCacheMap->ValidDataLength.QuadPart = FlushEnd; 1181 } 1182 1183 if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart))) 1184 { 1185 /* We're at the end of file ! */ 1186 break; 1187 } 1188 1189 /* Round down to next VACB start now */ 1190 FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY; 1191 } 1192 1193 quit: 1194 if (IoStatus) 1195 { 1196 IoStatus->Status = Status; 1197 } 1198 } 1199 1200 NTSTATUS 1201 CcRosReleaseFileCache ( 1202 PFILE_OBJECT FileObject) 1203 /* 1204 * FUNCTION: Called by the file system when a handle to a file object 1205 * has been closed. 1206 */ 1207 { 1208 KIRQL OldIrql; 1209 PPRIVATE_CACHE_MAP PrivateMap; 1210 PROS_SHARED_CACHE_MAP SharedCacheMap; 1211 1212 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1213 1214 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) 1215 { 1216 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1217 1218 /* Closing the handle, so kill the private cache map 1219 * Before you event try to remove it from FO, always 1220 * lock the master lock, to be sure not to race 1221 * with a potential read ahead ongoing! 1222 */ 1223 PrivateMap = FileObject->PrivateCacheMap; 1224 FileObject->PrivateCacheMap = NULL; 1225 1226 if (PrivateMap != NULL) 1227 { 1228 /* Remove it from the file */ 1229 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1230 RemoveEntryList(&PrivateMap->PrivateLinks); 1231 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1232 1233 /* And free it. */ 1234 if (PrivateMap != &SharedCacheMap->PrivateCacheMap) 1235 { 1236 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP); 1237 } 1238 else 1239 { 1240 PrivateMap->NodeTypeCode = 0; 1241 } 1242 1243 ASSERT(SharedCacheMap->OpenCount > 0); 1244 1245 SharedCacheMap->OpenCount--; 1246 if (SharedCacheMap->OpenCount == 0) 1247 { 1248 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql); 1249 } 1250 } 1251 } 1252 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1253 return STATUS_SUCCESS; 1254 } 1255 1256 NTSTATUS 1257 CcRosInitializeFileCache ( 1258 PFILE_OBJECT FileObject, 1259 PCC_FILE_SIZES FileSizes, 1260 BOOLEAN PinAccess, 1261 PCACHE_MANAGER_CALLBACKS CallBacks, 1262 PVOID LazyWriterContext) 1263 /* 1264 * FUNCTION: Initializes a shared cache map for a file object 1265 */ 1266 { 1267 KIRQL OldIrql; 1268 BOOLEAN Allocated; 1269 PROS_SHARED_CACHE_MAP SharedCacheMap; 1270 1271 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject); 1272 1273 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1274 1275 Allocated = FALSE; 1276 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1277 if (SharedCacheMap == NULL) 1278 { 1279 Allocated = TRUE; 1280 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList); 1281 if (SharedCacheMap == NULL) 1282 { 1283 return STATUS_INSUFFICIENT_RESOURCES; 1284 } 1285 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap)); 1286 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP; 1287 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap); 1288 SharedCacheMap->FileObject = FileObject; 1289 SharedCacheMap->Callbacks = CallBacks; 1290 SharedCacheMap->LazyWriteContext = LazyWriterContext; 1291 SharedCacheMap->SectionSize = FileSizes->AllocationSize; 1292 SharedCacheMap->FileSize = FileSizes->FileSize; 1293 SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength; 1294 SharedCacheMap->PinAccess = PinAccess; 1295 SharedCacheMap->DirtyPageThreshold = 0; 1296 SharedCacheMap->DirtyPages = 0; 1297 InitializeListHead(&SharedCacheMap->PrivateList); 1298 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock); 1299 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead); 1300 InitializeListHead(&SharedCacheMap->BcbList); 1301 1302 SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION; 1303 1304 ObReferenceObjectByPointer(FileObject, 1305 FILE_ALL_ACCESS, 1306 NULL, 1307 KernelMode); 1308 1309 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap; 1310 1311 //CcRosTraceCacheMap(SharedCacheMap, TRUE); 1312 } 1313 else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION) 1314 { 1315 /* The shared cache map is being created somewhere else. Wait for that to happen */ 1316 KEVENT Waiter; 1317 PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent; 1318 1319 KeInitializeEvent(&Waiter, NotificationEvent, FALSE); 1320 SharedCacheMap->CreateEvent = &Waiter; 1321 1322 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1323 1324 KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL); 1325 1326 if (PreviousWaiter) 1327 KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE); 1328 1329 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1330 } 1331 1332 if (FileObject->PrivateCacheMap == NULL) 1333 { 1334 PPRIVATE_CACHE_MAP PrivateMap; 1335 1336 /* Allocate the private cache map for this handle */ 1337 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0) 1338 { 1339 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP); 1340 } 1341 else 1342 { 1343 PrivateMap = &SharedCacheMap->PrivateCacheMap; 1344 } 1345 1346 if (PrivateMap == NULL) 1347 { 1348 /* If we also allocated the shared cache map for this file, kill it */ 1349 if (Allocated) 1350 { 1351 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 1352 1353 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 1354 ObDereferenceObject(FileObject); 1355 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 1356 } 1357 1358 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1359 return STATUS_INSUFFICIENT_RESOURCES; 1360 } 1361 1362 /* Initialize it */ 1363 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP)); 1364 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP; 1365 PrivateMap->ReadAheadMask = PAGE_SIZE - 1; 1366 PrivateMap->FileObject = FileObject; 1367 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock); 1368 1369 /* Link it to the file */ 1370 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1371 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks); 1372 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1373 1374 FileObject->PrivateCacheMap = PrivateMap; 1375 SharedCacheMap->OpenCount++; 1376 } 1377 1378 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1379 1380 /* Create the section */ 1381 if (Allocated) 1382 { 1383 NTSTATUS Status; 1384 1385 ASSERT(SharedCacheMap->Section == NULL); 1386 1387 Status = MmCreateSection( 1388 &SharedCacheMap->Section, 1389 SECTION_ALL_ACCESS, 1390 NULL, 1391 &SharedCacheMap->SectionSize, 1392 PAGE_READWRITE, 1393 SEC_RESERVE, 1394 NULL, 1395 FileObject); 1396 1397 ASSERT(NT_SUCCESS(Status)); 1398 1399 if (!NT_SUCCESS(Status)) 1400 { 1401 CcRosReleaseFileCache(FileObject); 1402 return Status; 1403 } 1404 1405 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1406 1407 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks); 1408 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION; 1409 1410 if (SharedCacheMap->CreateEvent) 1411 { 1412 KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE); 1413 SharedCacheMap->CreateEvent = NULL; 1414 } 1415 1416 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1417 } 1418 1419 return STATUS_SUCCESS; 1420 } 1421 1422 /* 1423 * @implemented 1424 */ 1425 PFILE_OBJECT 1426 NTAPI 1427 CcGetFileObjectFromSectionPtrs ( 1428 IN PSECTION_OBJECT_POINTERS SectionObjectPointers) 1429 { 1430 PROS_SHARED_CACHE_MAP SharedCacheMap; 1431 1432 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers); 1433 1434 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap) 1435 { 1436 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 1437 ASSERT(SharedCacheMap); 1438 return SharedCacheMap->FileObject; 1439 } 1440 return NULL; 1441 } 1442 1443 CODE_SEG("INIT") 1444 VOID 1445 NTAPI 1446 CcInitView ( 1447 VOID) 1448 { 1449 DPRINT("CcInitView()\n"); 1450 1451 InitializeListHead(&DirtyVacbListHead); 1452 InitializeListHead(&VacbLruListHead); 1453 InitializeListHead(&CcDeferredWrites); 1454 InitializeListHead(&CcCleanSharedCacheMapList); 1455 KeInitializeSpinLock(&CcDeferredWriteSpinLock); 1456 ExInitializeNPagedLookasideList(&iBcbLookasideList, 1457 NULL, 1458 NULL, 1459 0, 1460 sizeof(INTERNAL_BCB), 1461 TAG_BCB, 1462 20); 1463 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList, 1464 NULL, 1465 NULL, 1466 0, 1467 sizeof(ROS_SHARED_CACHE_MAP), 1468 TAG_SHARED_CACHE_MAP, 1469 20); 1470 ExInitializeNPagedLookasideList(&VacbLookasideList, 1471 NULL, 1472 NULL, 1473 0, 1474 sizeof(ROS_VACB), 1475 TAG_VACB, 1476 20); 1477 1478 CcInitCacheZeroPage(); 1479 } 1480 1481 #if DBG && defined(KDBG) 1482 1483 #include <kdbg/kdb.h> 1484 1485 BOOLEAN 1486 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[]) 1487 { 1488 PLIST_ENTRY ListEntry; 1489 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File"); 1490 1491 KdbpPrint(" Usage Summary (in kb)\n"); 1492 KdbpPrint("Shared\t\tMapped\tDirty\tName\n"); 1493 /* No need to lock the spin lock here, we're in DBG */ 1494 for (ListEntry = CcCleanSharedCacheMapList.Flink; 1495 ListEntry != &CcCleanSharedCacheMapList; 1496 ListEntry = ListEntry->Flink) 1497 { 1498 PLIST_ENTRY Vacbs; 1499 ULONG Mapped = 0, Dirty = 0; 1500 PROS_SHARED_CACHE_MAP SharedCacheMap; 1501 PUNICODE_STRING FileName; 1502 PWSTR Extra = L""; 1503 1504 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks); 1505 1506 /* Dirty size */ 1507 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024; 1508 1509 /* First, count for all the associated VACB */ 1510 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink; 1511 Vacbs != &SharedCacheMap->CacheMapVacbListHead; 1512 Vacbs = Vacbs->Flink) 1513 { 1514 Mapped += VACB_MAPPING_GRANULARITY / 1024; 1515 } 1516 1517 /* Setup name */ 1518 if (SharedCacheMap->FileObject != NULL && 1519 SharedCacheMap->FileObject->FileName.Length != 0) 1520 { 1521 FileName = &SharedCacheMap->FileObject->FileName; 1522 } 1523 else if (SharedCacheMap->FileObject != NULL && 1524 SharedCacheMap->FileObject->FsContext != NULL && 1525 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 && 1526 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 && 1527 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0) 1528 { 1529 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100); 1530 Extra = L" (FastFAT)"; 1531 } 1532 else 1533 { 1534 FileName = &NoName; 1535 } 1536 1537 /* And print */ 1538 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra); 1539 } 1540 1541 return TRUE; 1542 } 1543 1544 BOOLEAN 1545 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[]) 1546 { 1547 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages, 1548 (CcTotalDirtyPages * PAGE_SIZE) / 1024); 1549 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold, 1550 (CcDirtyPageThreshold * PAGE_SIZE) / 1024); 1551 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages, 1552 (MmAvailablePages * PAGE_SIZE) / 1024); 1553 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop, 1554 (MmThrottleTop * PAGE_SIZE) / 1024); 1555 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom, 1556 (MmThrottleBottom * PAGE_SIZE) / 1024); 1557 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total, 1558 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024); 1559 1560 if (CcTotalDirtyPages >= CcDirtyPageThreshold) 1561 { 1562 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n"); 1563 } 1564 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold) 1565 { 1566 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n"); 1567 } 1568 else 1569 { 1570 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n"); 1571 } 1572 1573 return TRUE; 1574 } 1575 1576 #endif // DBG && defined(KDBG) 1577 1578 /* EOF */ 1579