1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/cc/view.c 5 * PURPOSE: Cache manager 6 * 7 * PROGRAMMERS: David Welch (welch@mcmail.com) 8 * Pierre Schweitzer (pierre@reactos.org) 9 */ 10 11 /* NOTES ********************************************************************** 12 * 13 * This is not the NT implementation of a file cache nor anything much like 14 * it. 15 * 16 * The general procedure for a filesystem to implement a read or write 17 * dispatch routine is as follows 18 * 19 * (1) If caching for the FCB hasn't been initiated then so do by calling 20 * CcInitializeFileCache. 21 * 22 * (2) For each 4k region which is being read or written obtain a cache page 23 * by calling CcRequestCachePage. 24 * 25 * (3) If either the page is being read or not completely written, and it is 26 * not up to date then read its data from the underlying medium. If the read 27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error. 28 * 29 * (4) Copy the data into or out of the page as necessary. 30 * 31 * (5) Release the cache page 32 */ 33 /* INCLUDES ******************************************************************/ 34 35 #include <ntoskrnl.h> 36 #define NDEBUG 37 #include <debug.h> 38 39 /* GLOBALS *******************************************************************/ 40 41 LIST_ENTRY DirtyVacbListHead; 42 static LIST_ENTRY VacbLruListHead; 43 44 NPAGED_LOOKASIDE_LIST iBcbLookasideList; 45 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList; 46 static NPAGED_LOOKASIDE_LIST VacbLookasideList; 47 48 /* Internal vars (MS): 49 * - Threshold above which lazy writer will start action 50 * - Amount of dirty pages 51 * - List for deferred writes 52 * - Spinlock when dealing with the deferred list 53 * - List for "clean" shared cache maps 54 */ 55 ULONG CcDirtyPageThreshold = 0; 56 ULONG CcTotalDirtyPages = 0; 57 LIST_ENTRY CcDeferredWrites; 58 KSPIN_LOCK CcDeferredWriteSpinLock; 59 LIST_ENTRY CcCleanSharedCacheMapList; 60 61 #if DBG 62 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line) 63 { 64 ULONG Refs; 65 66 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount); 67 if (vacb->SharedCacheMap->Trace) 68 { 69 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n", 70 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 71 } 72 73 return Refs; 74 } 75 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line) 76 { 77 ULONG Refs; 78 79 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount); 80 ASSERT(!(Refs == 0 && vacb->Dirty)); 81 if (vacb->SharedCacheMap->Trace) 82 { 83 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n", 84 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 85 } 86 87 if (Refs == 0) 88 { 89 CcRosInternalFreeVacb(vacb); 90 } 91 92 return Refs; 93 } 94 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line) 95 { 96 ULONG Refs; 97 98 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0); 99 if (vacb->SharedCacheMap->Trace) 100 { 101 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n", 102 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 103 } 104 105 return Refs; 106 } 107 #endif 108 109 110 /* FUNCTIONS *****************************************************************/ 111 112 VOID 113 CcRosTraceCacheMap ( 114 PROS_SHARED_CACHE_MAP SharedCacheMap, 115 BOOLEAN Trace ) 116 { 117 #if DBG 118 KIRQL oldirql; 119 PLIST_ENTRY current_entry; 120 PROS_VACB current; 121 122 if (!SharedCacheMap) 123 return; 124 125 SharedCacheMap->Trace = Trace; 126 127 if (Trace) 128 { 129 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 130 131 oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 132 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 133 134 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 135 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 136 { 137 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 138 current_entry = current_entry->Flink; 139 140 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n", 141 current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart); 142 } 143 144 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 145 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql); 146 } 147 else 148 { 149 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 150 } 151 152 #else 153 UNREFERENCED_PARAMETER(SharedCacheMap); 154 UNREFERENCED_PARAMETER(Trace); 155 #endif 156 } 157 158 NTSTATUS 159 CcRosFlushVacb ( 160 _In_ PROS_VACB Vacb, 161 _Out_opt_ PIO_STATUS_BLOCK Iosb) 162 { 163 NTSTATUS Status; 164 BOOLEAN HaveLock = FALSE; 165 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 166 167 CcRosUnmarkDirtyVacb(Vacb, TRUE); 168 169 /* Lock for flush, if we are not already the top-level */ 170 if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP) 171 { 172 Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject); 173 if (!NT_SUCCESS(Status)) 174 goto quit; 175 HaveLock = TRUE; 176 } 177 178 Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer, 179 &Vacb->FileOffset, 180 VACB_MAPPING_GRANULARITY, 181 Iosb); 182 183 if (HaveLock) 184 { 185 FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject); 186 } 187 188 quit: 189 if (!NT_SUCCESS(Status)) 190 CcRosMarkDirtyVacb(Vacb); 191 else 192 { 193 /* Update VDL */ 194 if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY)) 195 { 196 SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY; 197 } 198 } 199 200 return Status; 201 } 202 203 static 204 NTSTATUS 205 CcRosDeleteFileCache ( 206 PFILE_OBJECT FileObject, 207 PROS_SHARED_CACHE_MAP SharedCacheMap, 208 PKIRQL OldIrql) 209 /* 210 * FUNCTION: Releases the shared cache map associated with a file object 211 */ 212 { 213 PLIST_ENTRY current_entry; 214 215 ASSERT(SharedCacheMap); 216 ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap); 217 ASSERT(SharedCacheMap->OpenCount == 0); 218 219 /* Remove all VACBs from the global lists */ 220 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 221 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 222 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 223 { 224 PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 225 226 RemoveEntryList(&Vacb->VacbLruListEntry); 227 InitializeListHead(&Vacb->VacbLruListEntry); 228 229 if (Vacb->Dirty) 230 { 231 CcRosUnmarkDirtyVacb(Vacb, FALSE); 232 /* Mark it as dirty again so we know that we have to flush before freeing it */ 233 Vacb->Dirty = TRUE; 234 } 235 236 current_entry = current_entry->Flink; 237 } 238 239 /* Make sure there is no trace anymore of this map */ 240 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 241 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 242 243 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 244 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql); 245 246 /* Now that we're out of the locks, free everything for real */ 247 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead)) 248 { 249 PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry); 250 ULONG RefCount; 251 252 InitializeListHead(&Vacb->CacheMapVacbListEntry); 253 254 /* Flush to disk, if needed */ 255 if (Vacb->Dirty) 256 { 257 IO_STATUS_BLOCK Iosb; 258 NTSTATUS Status; 259 260 Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb); 261 if (!NT_SUCCESS(Status)) 262 { 263 /* Complain. There's not much we can do */ 264 DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status); 265 } 266 Vacb->Dirty = FALSE; 267 } 268 269 RefCount = CcRosVacbDecRefCount(Vacb); 270 #if DBG // CORE-14578 271 if (RefCount != 0) 272 { 273 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart); 274 DPRINT1("There are: %d references left\n", RefCount); 275 DPRINT1("Map: %d\n", Vacb->MappedCount); 276 DPRINT1("Dirty: %d\n", Vacb->Dirty); 277 if (FileObject->FileName.Length != 0) 278 { 279 DPRINT1("File was: %wZ\n", &FileObject->FileName); 280 } 281 else 282 { 283 DPRINT1("No name for the file\n"); 284 } 285 } 286 #else 287 (void)RefCount; 288 #endif 289 } 290 291 /* Release the references we own */ 292 if(SharedCacheMap->Section) 293 ObDereferenceObject(SharedCacheMap->Section); 294 ObDereferenceObject(SharedCacheMap->FileObject); 295 296 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 297 298 /* Acquire the lock again for our caller */ 299 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 300 301 return STATUS_SUCCESS; 302 } 303 304 NTSTATUS 305 CcRosFlushDirtyPages ( 306 ULONG Target, 307 PULONG Count, 308 BOOLEAN Wait, 309 BOOLEAN CalledFromLazy) 310 { 311 PLIST_ENTRY current_entry; 312 NTSTATUS Status; 313 KIRQL OldIrql; 314 BOOLEAN FlushAll = (Target == MAXULONG); 315 316 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target); 317 318 (*Count) = 0; 319 320 KeEnterCriticalRegion(); 321 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 322 323 current_entry = DirtyVacbListHead.Flink; 324 if (current_entry == &DirtyVacbListHead) 325 { 326 DPRINT("No Dirty pages\n"); 327 } 328 329 while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll) 330 { 331 PROS_SHARED_CACHE_MAP SharedCacheMap; 332 PROS_VACB current; 333 BOOLEAN Locked; 334 335 if (current_entry == &DirtyVacbListHead) 336 { 337 ASSERT(FlushAll); 338 if (IsListEmpty(&DirtyVacbListHead)) 339 break; 340 current_entry = DirtyVacbListHead.Flink; 341 } 342 343 current = CONTAINING_RECORD(current_entry, 344 ROS_VACB, 345 DirtyVacbListEntry); 346 current_entry = current_entry->Flink; 347 348 CcRosVacbIncRefCount(current); 349 350 SharedCacheMap = current->SharedCacheMap; 351 352 /* When performing lazy write, don't handle temporary files */ 353 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE)) 354 { 355 CcRosVacbDecRefCount(current); 356 continue; 357 } 358 359 /* Don't attempt to lazy write the files that asked not to */ 360 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED)) 361 { 362 CcRosVacbDecRefCount(current); 363 continue; 364 } 365 366 ASSERT(current->Dirty); 367 368 /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */ 369 if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE) 370 { 371 CcRosVacbDecRefCount(current); 372 continue; 373 } 374 375 SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE; 376 377 /* Keep a ref on the shared cache map */ 378 SharedCacheMap->OpenCount++; 379 380 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 381 382 Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait); 383 if (!Locked) 384 { 385 DPRINT("Not locked!"); 386 ASSERT(!Wait); 387 CcRosVacbDecRefCount(current); 388 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 389 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 390 391 if (--SharedCacheMap->OpenCount == 0) 392 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 393 394 continue; 395 } 396 397 IO_STATUS_BLOCK Iosb; 398 Status = CcRosFlushVacb(current, &Iosb); 399 400 SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext); 401 402 /* We release the VACB before acquiring the lock again, because 403 * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a 404 * Refcount. Freeing must be done outside of the lock. 405 * The refcount is decremented atomically. So this is OK. */ 406 CcRosVacbDecRefCount(current); 407 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 408 409 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 410 411 if (--SharedCacheMap->OpenCount == 0) 412 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 413 414 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) && 415 (Status != STATUS_MEDIA_WRITE_PROTECTED)) 416 { 417 DPRINT1("CC: Failed to flush VACB.\n"); 418 } 419 else 420 { 421 ULONG PagesFreed; 422 423 /* How many pages did we free? */ 424 PagesFreed = Iosb.Information / PAGE_SIZE; 425 (*Count) += PagesFreed; 426 427 if (!Wait) 428 { 429 /* Make sure we don't overflow target! */ 430 if (Target < PagesFreed) 431 { 432 /* If we would have, jump to zero directly */ 433 Target = 0; 434 } 435 else 436 { 437 Target -= PagesFreed; 438 } 439 } 440 } 441 442 current_entry = DirtyVacbListHead.Flink; 443 } 444 445 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 446 KeLeaveCriticalRegion(); 447 448 DPRINT("CcRosFlushDirtyPages() finished\n"); 449 return STATUS_SUCCESS; 450 } 451 452 NTSTATUS 453 CcRosReleaseVacb ( 454 PROS_SHARED_CACHE_MAP SharedCacheMap, 455 PROS_VACB Vacb, 456 BOOLEAN Dirty, 457 BOOLEAN Mapped) 458 { 459 ULONG Refs; 460 ASSERT(SharedCacheMap); 461 462 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb); 463 464 if (Dirty && !Vacb->Dirty) 465 { 466 CcRosMarkDirtyVacb(Vacb); 467 } 468 469 if (Mapped) 470 { 471 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1) 472 { 473 CcRosVacbIncRefCount(Vacb); 474 } 475 } 476 477 Refs = CcRosVacbDecRefCount(Vacb); 478 ASSERT(Refs > 0); 479 480 return STATUS_SUCCESS; 481 } 482 483 /* Returns with VACB Lock Held! */ 484 PROS_VACB 485 CcRosLookupVacb ( 486 PROS_SHARED_CACHE_MAP SharedCacheMap, 487 LONGLONG FileOffset) 488 { 489 PLIST_ENTRY current_entry; 490 PROS_VACB current; 491 KIRQL oldIrql; 492 493 ASSERT(SharedCacheMap); 494 495 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n", 496 SharedCacheMap, FileOffset); 497 498 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 499 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 500 501 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 502 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 503 { 504 current = CONTAINING_RECORD(current_entry, 505 ROS_VACB, 506 CacheMapVacbListEntry); 507 if (IsPointInRange(current->FileOffset.QuadPart, 508 VACB_MAPPING_GRANULARITY, 509 FileOffset)) 510 { 511 CcRosVacbIncRefCount(current); 512 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 513 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 514 return current; 515 } 516 if (current->FileOffset.QuadPart > FileOffset) 517 break; 518 current_entry = current_entry->Flink; 519 } 520 521 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 522 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 523 524 return NULL; 525 } 526 527 VOID 528 CcRosMarkDirtyVacb ( 529 PROS_VACB Vacb) 530 { 531 KIRQL oldIrql; 532 PROS_SHARED_CACHE_MAP SharedCacheMap; 533 534 SharedCacheMap = Vacb->SharedCacheMap; 535 536 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 537 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 538 539 ASSERT(!Vacb->Dirty); 540 541 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry); 542 /* FIXME: There is no reason to account for the whole VACB. */ 543 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 544 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 545 CcRosVacbIncRefCount(Vacb); 546 547 /* Move to the tail of the LRU list */ 548 RemoveEntryList(&Vacb->VacbLruListEntry); 549 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry); 550 551 Vacb->Dirty = TRUE; 552 553 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 554 555 /* Schedule a lazy writer run to now that we have dirty VACB */ 556 if (!LazyWriter.ScanActive) 557 { 558 CcScheduleLazyWriteScan(FALSE); 559 } 560 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 561 } 562 563 VOID 564 CcRosUnmarkDirtyVacb ( 565 PROS_VACB Vacb, 566 BOOLEAN LockViews) 567 { 568 KIRQL oldIrql; 569 PROS_SHARED_CACHE_MAP SharedCacheMap; 570 571 SharedCacheMap = Vacb->SharedCacheMap; 572 573 if (LockViews) 574 { 575 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 576 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 577 } 578 579 ASSERT(Vacb->Dirty); 580 581 Vacb->Dirty = FALSE; 582 583 RemoveEntryList(&Vacb->DirtyVacbListEntry); 584 InitializeListHead(&Vacb->DirtyVacbListEntry); 585 586 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 587 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 588 589 CcRosVacbDecRefCount(Vacb); 590 591 if (LockViews) 592 { 593 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 594 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 595 } 596 } 597 598 BOOLEAN 599 CcRosFreeOneUnusedVacb( 600 VOID) 601 { 602 KIRQL oldIrql; 603 PLIST_ENTRY current_entry; 604 PROS_VACB to_free = NULL; 605 606 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 607 608 /* Browse all the available VACB */ 609 current_entry = VacbLruListHead.Flink; 610 while ((current_entry != &VacbLruListHead) && (to_free == NULL)) 611 { 612 ULONG Refs; 613 PROS_VACB current; 614 615 current = CONTAINING_RECORD(current_entry, 616 ROS_VACB, 617 VacbLruListEntry); 618 619 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 620 621 /* Only deal with unused VACB, we will free them */ 622 Refs = CcRosVacbGetRefCount(current); 623 if (Refs < 2) 624 { 625 ASSERT(!current->Dirty); 626 ASSERT(!current->MappedCount); 627 ASSERT(Refs == 1); 628 629 /* Reset it, this is the one we want to free */ 630 RemoveEntryList(¤t->CacheMapVacbListEntry); 631 InitializeListHead(¤t->CacheMapVacbListEntry); 632 RemoveEntryList(¤t->VacbLruListEntry); 633 InitializeListHead(¤t->VacbLruListEntry); 634 635 to_free = current; 636 } 637 638 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 639 640 current_entry = current_entry->Flink; 641 } 642 643 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 644 645 /* And now, free the VACB that we found, if any. */ 646 if (to_free == NULL) 647 { 648 return FALSE; 649 } 650 651 /* This must be its last ref */ 652 NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0); 653 654 return TRUE; 655 } 656 657 static 658 NTSTATUS 659 CcRosCreateVacb ( 660 PROS_SHARED_CACHE_MAP SharedCacheMap, 661 LONGLONG FileOffset, 662 PROS_VACB *Vacb) 663 { 664 PROS_VACB current; 665 PROS_VACB previous; 666 PLIST_ENTRY current_entry; 667 NTSTATUS Status; 668 KIRQL oldIrql; 669 ULONG Refs; 670 SIZE_T ViewSize = VACB_MAPPING_GRANULARITY; 671 672 ASSERT(SharedCacheMap); 673 674 DPRINT("CcRosCreateVacb()\n"); 675 676 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList); 677 current->BaseAddress = NULL; 678 current->Dirty = FALSE; 679 current->PageOut = FALSE; 680 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY); 681 current->SharedCacheMap = SharedCacheMap; 682 current->MappedCount = 0; 683 current->ReferenceCount = 0; 684 InitializeListHead(¤t->CacheMapVacbListEntry); 685 InitializeListHead(¤t->DirtyVacbListEntry); 686 InitializeListHead(¤t->VacbLruListEntry); 687 688 CcRosVacbIncRefCount(current); 689 690 while (TRUE) 691 { 692 /* Map VACB in system space */ 693 Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset, 0); 694 if (NT_SUCCESS(Status)) 695 { 696 break; 697 } 698 699 /* 700 * If no space left, try to prune one unused VACB to recover space to map our VACB. 701 * If it succeeds, retry to map, otherwise just fail. 702 */ 703 if (!CcRosFreeOneUnusedVacb()) 704 { 705 ExFreeToNPagedLookasideList(&VacbLookasideList, current); 706 return Status; 707 } 708 } 709 710 #if DBG 711 if (SharedCacheMap->Trace) 712 { 713 DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n", 714 SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress); 715 } 716 #endif 717 718 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 719 720 *Vacb = current; 721 /* There is window between the call to CcRosLookupVacb 722 * and CcRosCreateVacb. We must check if a VACB for the 723 * file offset exist. If there is a VACB, we release 724 * our newly created VACB and return the existing one. 725 */ 726 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 727 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 728 previous = NULL; 729 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 730 { 731 current = CONTAINING_RECORD(current_entry, 732 ROS_VACB, 733 CacheMapVacbListEntry); 734 if (IsPointInRange(current->FileOffset.QuadPart, 735 VACB_MAPPING_GRANULARITY, 736 FileOffset)) 737 { 738 CcRosVacbIncRefCount(current); 739 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 740 #if DBG 741 if (SharedCacheMap->Trace) 742 { 743 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n", 744 SharedCacheMap, 745 (*Vacb), 746 current); 747 } 748 #endif 749 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 750 751 Refs = CcRosVacbDecRefCount(*Vacb); 752 ASSERT(Refs == 0); 753 754 *Vacb = current; 755 return STATUS_SUCCESS; 756 } 757 if (current->FileOffset.QuadPart < FileOffset) 758 { 759 ASSERT(previous == NULL || 760 previous->FileOffset.QuadPart < current->FileOffset.QuadPart); 761 previous = current; 762 } 763 if (current->FileOffset.QuadPart > FileOffset) 764 break; 765 current_entry = current_entry->Flink; 766 } 767 /* There was no existing VACB. */ 768 current = *Vacb; 769 if (previous) 770 { 771 InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry); 772 } 773 else 774 { 775 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry); 776 } 777 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 778 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 779 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 780 781 /* Reference it to allow release */ 782 CcRosVacbIncRefCount(current); 783 784 return Status; 785 } 786 787 BOOLEAN 788 CcRosEnsureVacbResident( 789 _In_ PROS_VACB Vacb, 790 _In_ BOOLEAN Wait, 791 _In_ BOOLEAN NoRead, 792 _In_ ULONG Offset, 793 _In_ ULONG Length 794 ) 795 { 796 PVOID BaseAddress; 797 798 ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY); 799 800 #if 0 801 if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->SectionSize.QuadPart) 802 { 803 DPRINT1("Vacb read beyond the file size!\n"); 804 return FALSE; 805 } 806 #endif 807 808 BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset); 809 810 /* Check if the pages are resident */ 811 if (!MmArePagesResident(NULL, BaseAddress, Length)) 812 { 813 if (!Wait) 814 { 815 return FALSE; 816 } 817 818 if (!NoRead) 819 { 820 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 821 NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer, 822 Vacb->FileOffset.QuadPart + Offset, 823 Length, 824 &SharedCacheMap->ValidDataLength); 825 if (!NT_SUCCESS(Status)) 826 ExRaiseStatus(Status); 827 } 828 } 829 830 return TRUE; 831 } 832 833 834 NTSTATUS 835 CcRosGetVacb ( 836 PROS_SHARED_CACHE_MAP SharedCacheMap, 837 LONGLONG FileOffset, 838 PROS_VACB *Vacb) 839 { 840 PROS_VACB current; 841 NTSTATUS Status; 842 ULONG Refs; 843 KIRQL OldIrql; 844 845 ASSERT(SharedCacheMap); 846 847 DPRINT("CcRosGetVacb()\n"); 848 849 /* 850 * Look for a VACB already mapping the same data. 851 */ 852 current = CcRosLookupVacb(SharedCacheMap, FileOffset); 853 if (current == NULL) 854 { 855 /* 856 * Otherwise create a new VACB. 857 */ 858 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t); 859 if (!NT_SUCCESS(Status)) 860 { 861 return Status; 862 } 863 } 864 865 Refs = CcRosVacbGetRefCount(current); 866 867 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 868 869 /* Move to the tail of the LRU list */ 870 RemoveEntryList(¤t->VacbLruListEntry); 871 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 872 873 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 874 875 /* 876 * Return the VACB to the caller. 877 */ 878 *Vacb = current; 879 880 ASSERT(Refs > 1); 881 882 return STATUS_SUCCESS; 883 } 884 885 NTSTATUS 886 CcRosRequestVacb ( 887 PROS_SHARED_CACHE_MAP SharedCacheMap, 888 LONGLONG FileOffset, 889 PROS_VACB *Vacb) 890 /* 891 * FUNCTION: Request a page mapping for a shared cache map 892 */ 893 { 894 895 ASSERT(SharedCacheMap); 896 897 if (FileOffset % VACB_MAPPING_GRANULARITY != 0) 898 { 899 DPRINT1("Bad fileoffset %I64x should be multiple of %x", 900 FileOffset, VACB_MAPPING_GRANULARITY); 901 KeBugCheck(CACHE_MANAGER); 902 } 903 904 return CcRosGetVacb(SharedCacheMap, 905 FileOffset, 906 Vacb); 907 } 908 909 NTSTATUS 910 CcRosInternalFreeVacb ( 911 PROS_VACB Vacb) 912 /* 913 * FUNCTION: Releases a VACB associated with a shared cache map 914 */ 915 { 916 NTSTATUS Status; 917 918 DPRINT("Freeing VACB 0x%p\n", Vacb); 919 #if DBG 920 if (Vacb->SharedCacheMap->Trace) 921 { 922 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb); 923 } 924 #endif 925 926 if (Vacb->ReferenceCount != 0) 927 { 928 DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount); 929 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length) 930 { 931 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName); 932 } 933 } 934 935 ASSERT(Vacb->ReferenceCount == 0); 936 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry)); 937 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry)); 938 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry)); 939 940 /* Delete the mapping */ 941 Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress); 942 if (!NT_SUCCESS(Status)) 943 { 944 DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status); 945 ASSERT(FALSE); 946 /* Proceed with the deĺetion anyway */ 947 } 948 949 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd); 950 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb); 951 return STATUS_SUCCESS; 952 } 953 954 /* 955 * @implemented 956 */ 957 VOID 958 NTAPI 959 CcFlushCache ( 960 IN PSECTION_OBJECT_POINTERS SectionObjectPointers, 961 IN PLARGE_INTEGER FileOffset OPTIONAL, 962 IN ULONG Length, 963 OUT PIO_STATUS_BLOCK IoStatus) 964 { 965 PROS_SHARED_CACHE_MAP SharedCacheMap; 966 LONGLONG FlushStart, FlushEnd; 967 NTSTATUS Status; 968 969 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n", 970 SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length); 971 972 if (!SectionObjectPointers) 973 { 974 Status = STATUS_INVALID_PARAMETER; 975 goto quit; 976 } 977 978 if (!SectionObjectPointers->SharedCacheMap) 979 { 980 /* Forward this to Mm */ 981 MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus); 982 return; 983 } 984 985 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 986 ASSERT(SharedCacheMap); 987 if (FileOffset) 988 { 989 FlushStart = FileOffset->QuadPart; 990 Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd); 991 if (!NT_SUCCESS(Status)) 992 goto quit; 993 } 994 else 995 { 996 FlushStart = 0; 997 FlushEnd = SharedCacheMap->FileSize.QuadPart; 998 } 999 1000 Status = STATUS_SUCCESS; 1001 if (IoStatus) 1002 { 1003 IoStatus->Information = 0; 1004 } 1005 1006 /* 1007 * We flush the VACBs that we find here. 1008 * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure. 1009 * This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data 1010 */ 1011 while (FlushStart < FlushEnd) 1012 { 1013 BOOLEAN DirtyVacb = FALSE; 1014 PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart); 1015 1016 if (vacb != NULL) 1017 { 1018 if (vacb->Dirty) 1019 { 1020 IO_STATUS_BLOCK VacbIosb = { 0 }; 1021 Status = CcRosFlushVacb(vacb, &VacbIosb); 1022 if (!NT_SUCCESS(Status)) 1023 { 1024 goto quit; 1025 } 1026 DirtyVacb = TRUE; 1027 1028 if (IoStatus) 1029 IoStatus->Information += VacbIosb.Information; 1030 } 1031 1032 CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE); 1033 } 1034 1035 if (!DirtyVacb) 1036 { 1037 IO_STATUS_BLOCK MmIosb; 1038 LARGE_INTEGER MmOffset; 1039 1040 MmOffset.QuadPart = FlushStart; 1041 1042 if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart) 1043 { 1044 /* The whole range fits within a VACB chunk. */ 1045 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb); 1046 } 1047 else 1048 { 1049 ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY); 1050 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb); 1051 } 1052 1053 if (!NT_SUCCESS(Status)) 1054 goto quit; 1055 1056 if (IoStatus) 1057 IoStatus->Information += MmIosb.Information; 1058 1059 /* Update VDL */ 1060 if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd) 1061 SharedCacheMap->ValidDataLength.QuadPart = FlushEnd; 1062 } 1063 1064 if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart))) 1065 { 1066 /* We're at the end of file ! */ 1067 break; 1068 } 1069 1070 /* Round down to next VACB start now */ 1071 FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY; 1072 } 1073 1074 quit: 1075 if (IoStatus) 1076 { 1077 IoStatus->Status = Status; 1078 } 1079 } 1080 1081 NTSTATUS 1082 CcRosReleaseFileCache ( 1083 PFILE_OBJECT FileObject) 1084 /* 1085 * FUNCTION: Called by the file system when a handle to a file object 1086 * has been closed. 1087 */ 1088 { 1089 KIRQL OldIrql; 1090 PPRIVATE_CACHE_MAP PrivateMap; 1091 PROS_SHARED_CACHE_MAP SharedCacheMap; 1092 1093 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1094 1095 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) 1096 { 1097 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1098 1099 /* Closing the handle, so kill the private cache map 1100 * Before you event try to remove it from FO, always 1101 * lock the master lock, to be sure not to race 1102 * with a potential read ahead ongoing! 1103 */ 1104 PrivateMap = FileObject->PrivateCacheMap; 1105 FileObject->PrivateCacheMap = NULL; 1106 1107 if (PrivateMap != NULL) 1108 { 1109 /* Remove it from the file */ 1110 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1111 RemoveEntryList(&PrivateMap->PrivateLinks); 1112 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1113 1114 /* And free it. */ 1115 if (PrivateMap != &SharedCacheMap->PrivateCacheMap) 1116 { 1117 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP); 1118 } 1119 else 1120 { 1121 PrivateMap->NodeTypeCode = 0; 1122 } 1123 1124 ASSERT(SharedCacheMap->OpenCount > 0); 1125 1126 SharedCacheMap->OpenCount--; 1127 if (SharedCacheMap->OpenCount == 0) 1128 { 1129 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql); 1130 } 1131 } 1132 } 1133 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1134 return STATUS_SUCCESS; 1135 } 1136 1137 NTSTATUS 1138 CcRosInitializeFileCache ( 1139 PFILE_OBJECT FileObject, 1140 PCC_FILE_SIZES FileSizes, 1141 BOOLEAN PinAccess, 1142 PCACHE_MANAGER_CALLBACKS CallBacks, 1143 PVOID LazyWriterContext) 1144 /* 1145 * FUNCTION: Initializes a shared cache map for a file object 1146 */ 1147 { 1148 KIRQL OldIrql; 1149 BOOLEAN Allocated; 1150 PROS_SHARED_CACHE_MAP SharedCacheMap; 1151 1152 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject); 1153 1154 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1155 1156 Allocated = FALSE; 1157 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1158 if (SharedCacheMap == NULL) 1159 { 1160 Allocated = TRUE; 1161 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList); 1162 if (SharedCacheMap == NULL) 1163 { 1164 return STATUS_INSUFFICIENT_RESOURCES; 1165 } 1166 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap)); 1167 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP; 1168 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap); 1169 SharedCacheMap->FileObject = FileObject; 1170 SharedCacheMap->Callbacks = CallBacks; 1171 SharedCacheMap->LazyWriteContext = LazyWriterContext; 1172 SharedCacheMap->SectionSize = FileSizes->AllocationSize; 1173 SharedCacheMap->FileSize = FileSizes->FileSize; 1174 SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength; 1175 SharedCacheMap->PinAccess = PinAccess; 1176 SharedCacheMap->DirtyPageThreshold = 0; 1177 SharedCacheMap->DirtyPages = 0; 1178 InitializeListHead(&SharedCacheMap->PrivateList); 1179 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock); 1180 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead); 1181 InitializeListHead(&SharedCacheMap->BcbList); 1182 1183 SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION; 1184 1185 ObReferenceObjectByPointer(FileObject, 1186 FILE_ALL_ACCESS, 1187 NULL, 1188 KernelMode); 1189 1190 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap; 1191 1192 //CcRosTraceCacheMap(SharedCacheMap, TRUE); 1193 } 1194 else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION) 1195 { 1196 /* The shared cache map is being created somewhere else. Wait for that to happen */ 1197 KEVENT Waiter; 1198 PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent; 1199 1200 KeInitializeEvent(&Waiter, NotificationEvent, FALSE); 1201 SharedCacheMap->CreateEvent = &Waiter; 1202 1203 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1204 1205 KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL); 1206 1207 if (PreviousWaiter) 1208 KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE); 1209 1210 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1211 } 1212 1213 if (FileObject->PrivateCacheMap == NULL) 1214 { 1215 PPRIVATE_CACHE_MAP PrivateMap; 1216 1217 /* Allocate the private cache map for this handle */ 1218 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0) 1219 { 1220 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP); 1221 } 1222 else 1223 { 1224 PrivateMap = &SharedCacheMap->PrivateCacheMap; 1225 } 1226 1227 if (PrivateMap == NULL) 1228 { 1229 /* If we also allocated the shared cache map for this file, kill it */ 1230 if (Allocated) 1231 { 1232 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 1233 1234 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 1235 ObDereferenceObject(FileObject); 1236 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 1237 } 1238 1239 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1240 return STATUS_INSUFFICIENT_RESOURCES; 1241 } 1242 1243 /* Initialize it */ 1244 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP)); 1245 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP; 1246 PrivateMap->ReadAheadMask = PAGE_SIZE - 1; 1247 PrivateMap->FileObject = FileObject; 1248 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock); 1249 1250 /* Link it to the file */ 1251 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1252 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks); 1253 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1254 1255 FileObject->PrivateCacheMap = PrivateMap; 1256 SharedCacheMap->OpenCount++; 1257 } 1258 1259 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1260 1261 /* Create the section */ 1262 if (Allocated) 1263 { 1264 NTSTATUS Status; 1265 1266 ASSERT(SharedCacheMap->Section == NULL); 1267 1268 Status = MmCreateSection( 1269 &SharedCacheMap->Section, 1270 SECTION_ALL_ACCESS, 1271 NULL, 1272 &SharedCacheMap->SectionSize, 1273 PAGE_READWRITE, 1274 SEC_RESERVE, 1275 NULL, 1276 FileObject); 1277 1278 ASSERT(NT_SUCCESS(Status)); 1279 1280 if (!NT_SUCCESS(Status)) 1281 { 1282 CcRosReleaseFileCache(FileObject); 1283 return Status; 1284 } 1285 1286 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1287 1288 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks); 1289 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION; 1290 1291 if (SharedCacheMap->CreateEvent) 1292 { 1293 KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE); 1294 SharedCacheMap->CreateEvent = NULL; 1295 } 1296 1297 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1298 } 1299 1300 return STATUS_SUCCESS; 1301 } 1302 1303 /* 1304 * @implemented 1305 */ 1306 PFILE_OBJECT 1307 NTAPI 1308 CcGetFileObjectFromSectionPtrs ( 1309 IN PSECTION_OBJECT_POINTERS SectionObjectPointers) 1310 { 1311 PROS_SHARED_CACHE_MAP SharedCacheMap; 1312 1313 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers); 1314 1315 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap) 1316 { 1317 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 1318 ASSERT(SharedCacheMap); 1319 return SharedCacheMap->FileObject; 1320 } 1321 return NULL; 1322 } 1323 1324 CODE_SEG("INIT") 1325 VOID 1326 NTAPI 1327 CcInitView ( 1328 VOID) 1329 { 1330 DPRINT("CcInitView()\n"); 1331 1332 InitializeListHead(&DirtyVacbListHead); 1333 InitializeListHead(&VacbLruListHead); 1334 InitializeListHead(&CcDeferredWrites); 1335 InitializeListHead(&CcCleanSharedCacheMapList); 1336 KeInitializeSpinLock(&CcDeferredWriteSpinLock); 1337 ExInitializeNPagedLookasideList(&iBcbLookasideList, 1338 NULL, 1339 NULL, 1340 0, 1341 sizeof(INTERNAL_BCB), 1342 TAG_BCB, 1343 20); 1344 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList, 1345 NULL, 1346 NULL, 1347 0, 1348 sizeof(ROS_SHARED_CACHE_MAP), 1349 TAG_SHARED_CACHE_MAP, 1350 20); 1351 ExInitializeNPagedLookasideList(&VacbLookasideList, 1352 NULL, 1353 NULL, 1354 0, 1355 sizeof(ROS_VACB), 1356 TAG_VACB, 1357 20); 1358 1359 CcInitCacheZeroPage(); 1360 } 1361 1362 #if DBG && defined(KDBG) 1363 1364 #include <kdbg/kdb.h> 1365 1366 BOOLEAN 1367 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[]) 1368 { 1369 PLIST_ENTRY ListEntry; 1370 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File"); 1371 1372 KdbpPrint(" Usage Summary (in kb)\n"); 1373 KdbpPrint("Shared\t\tMapped\tDirty\tName\n"); 1374 /* No need to lock the spin lock here, we're in DBG */ 1375 for (ListEntry = CcCleanSharedCacheMapList.Flink; 1376 ListEntry != &CcCleanSharedCacheMapList; 1377 ListEntry = ListEntry->Flink) 1378 { 1379 PLIST_ENTRY Vacbs; 1380 ULONG Mapped = 0, Dirty = 0; 1381 PROS_SHARED_CACHE_MAP SharedCacheMap; 1382 PUNICODE_STRING FileName; 1383 PWSTR Extra = L""; 1384 1385 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks); 1386 1387 /* Dirty size */ 1388 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024; 1389 1390 /* First, count for all the associated VACB */ 1391 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink; 1392 Vacbs != &SharedCacheMap->CacheMapVacbListHead; 1393 Vacbs = Vacbs->Flink) 1394 { 1395 Mapped += VACB_MAPPING_GRANULARITY / 1024; 1396 } 1397 1398 /* Setup name */ 1399 if (SharedCacheMap->FileObject != NULL && 1400 SharedCacheMap->FileObject->FileName.Length != 0) 1401 { 1402 FileName = &SharedCacheMap->FileObject->FileName; 1403 } 1404 else if (SharedCacheMap->FileObject != NULL && 1405 SharedCacheMap->FileObject->FsContext != NULL && 1406 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 && 1407 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 && 1408 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0) 1409 { 1410 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100); 1411 Extra = L" (FastFAT)"; 1412 } 1413 else 1414 { 1415 FileName = &NoName; 1416 } 1417 1418 /* And print */ 1419 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra); 1420 } 1421 1422 return TRUE; 1423 } 1424 1425 BOOLEAN 1426 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[]) 1427 { 1428 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages, 1429 (CcTotalDirtyPages * PAGE_SIZE) / 1024); 1430 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold, 1431 (CcDirtyPageThreshold * PAGE_SIZE) / 1024); 1432 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages, 1433 (MmAvailablePages * PAGE_SIZE) / 1024); 1434 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop, 1435 (MmThrottleTop * PAGE_SIZE) / 1024); 1436 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom, 1437 (MmThrottleBottom * PAGE_SIZE) / 1024); 1438 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total, 1439 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024); 1440 1441 if (CcTotalDirtyPages >= CcDirtyPageThreshold) 1442 { 1443 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n"); 1444 } 1445 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold) 1446 { 1447 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n"); 1448 } 1449 else 1450 { 1451 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n"); 1452 } 1453 1454 return TRUE; 1455 } 1456 1457 #endif // DBG && defined(KDBG) 1458 1459 /* EOF */ 1460