1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/cc/view.c 5 * PURPOSE: Cache manager 6 * 7 * PROGRAMMERS: David Welch (welch@mcmail.com) 8 * Pierre Schweitzer (pierre@reactos.org) 9 */ 10 11 /* NOTES ********************************************************************** 12 * 13 * This is not the NT implementation of a file cache nor anything much like 14 * it. 15 * 16 * The general procedure for a filesystem to implement a read or write 17 * dispatch routine is as follows 18 * 19 * (1) If caching for the FCB hasn't been initiated then so do by calling 20 * CcInitializeFileCache. 21 * 22 * (2) For each 4k region which is being read or written obtain a cache page 23 * by calling CcRequestCachePage. 24 * 25 * (3) If either the page is being read or not completely written, and it is 26 * not up to date then read its data from the underlying medium. If the read 27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error. 28 * 29 * (4) Copy the data into or out of the page as necessary. 30 * 31 * (5) Release the cache page 32 */ 33 /* INCLUDES ******************************************************************/ 34 35 #include <ntoskrnl.h> 36 #define NDEBUG 37 #include <debug.h> 38 39 /* GLOBALS *******************************************************************/ 40 41 LIST_ENTRY DirtyVacbListHead; 42 static LIST_ENTRY VacbLruListHead; 43 44 NPAGED_LOOKASIDE_LIST iBcbLookasideList; 45 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList; 46 static NPAGED_LOOKASIDE_LIST VacbLookasideList; 47 48 /* Internal vars (MS): 49 * - Threshold above which lazy writer will start action 50 * - Amount of dirty pages 51 * - List for deferred writes 52 * - Spinlock when dealing with the deferred list 53 * - List for "clean" shared cache maps 54 */ 55 ULONG CcDirtyPageThreshold = 0; 56 ULONG CcTotalDirtyPages = 0; 57 LIST_ENTRY CcDeferredWrites; 58 KSPIN_LOCK CcDeferredWriteSpinLock; 59 LIST_ENTRY CcCleanSharedCacheMapList; 60 61 #if DBG 62 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line) 63 { 64 ULONG Refs; 65 66 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount); 67 if (vacb->SharedCacheMap->Trace) 68 { 69 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n", 70 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 71 } 72 73 return Refs; 74 } 75 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line) 76 { 77 ULONG Refs; 78 79 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount); 80 ASSERT(!(Refs == 0 && vacb->Dirty)); 81 if (vacb->SharedCacheMap->Trace) 82 { 83 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n", 84 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 85 } 86 87 if (Refs == 0) 88 { 89 CcRosInternalFreeVacb(vacb); 90 } 91 92 return Refs; 93 } 94 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line) 95 { 96 ULONG Refs; 97 98 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0); 99 if (vacb->SharedCacheMap->Trace) 100 { 101 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n", 102 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); 103 } 104 105 return Refs; 106 } 107 #endif 108 109 110 /* FUNCTIONS *****************************************************************/ 111 112 VOID 113 CcRosTraceCacheMap ( 114 PROS_SHARED_CACHE_MAP SharedCacheMap, 115 BOOLEAN Trace ) 116 { 117 #if DBG 118 KIRQL oldirql; 119 PLIST_ENTRY current_entry; 120 PROS_VACB current; 121 122 if (!SharedCacheMap) 123 return; 124 125 SharedCacheMap->Trace = Trace; 126 127 if (Trace) 128 { 129 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 130 131 oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 132 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 133 134 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 135 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 136 { 137 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 138 current_entry = current_entry->Flink; 139 140 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n", 141 current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart); 142 } 143 144 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 145 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql); 146 } 147 else 148 { 149 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); 150 } 151 152 #else 153 UNREFERENCED_PARAMETER(SharedCacheMap); 154 UNREFERENCED_PARAMETER(Trace); 155 #endif 156 } 157 158 NTSTATUS 159 CcRosFlushVacb ( 160 _In_ PROS_VACB Vacb, 161 _Out_opt_ PIO_STATUS_BLOCK Iosb) 162 { 163 NTSTATUS Status; 164 BOOLEAN HaveLock = FALSE; 165 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 166 167 CcRosUnmarkDirtyVacb(Vacb, TRUE); 168 169 /* Lock for flush, if we are not already the top-level */ 170 if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP) 171 { 172 Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject); 173 if (!NT_SUCCESS(Status)) 174 goto quit; 175 HaveLock = TRUE; 176 } 177 178 Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer, 179 &Vacb->FileOffset, 180 VACB_MAPPING_GRANULARITY, 181 Iosb); 182 183 if (HaveLock) 184 { 185 FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject); 186 } 187 188 quit: 189 if (!NT_SUCCESS(Status)) 190 CcRosMarkDirtyVacb(Vacb); 191 else 192 { 193 /* Update VDL */ 194 if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY)) 195 { 196 SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY; 197 } 198 } 199 200 return Status; 201 } 202 203 static 204 NTSTATUS 205 CcRosDeleteFileCache ( 206 PFILE_OBJECT FileObject, 207 PROS_SHARED_CACHE_MAP SharedCacheMap, 208 PKIRQL OldIrql) 209 /* 210 * FUNCTION: Releases the shared cache map associated with a file object 211 */ 212 { 213 PLIST_ENTRY current_entry; 214 215 ASSERT(SharedCacheMap); 216 ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap); 217 ASSERT(SharedCacheMap->OpenCount == 0); 218 219 /* Remove all VACBs from the global lists */ 220 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 221 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 222 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 223 { 224 PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); 225 226 RemoveEntryList(&Vacb->VacbLruListEntry); 227 InitializeListHead(&Vacb->VacbLruListEntry); 228 229 if (Vacb->Dirty) 230 { 231 CcRosUnmarkDirtyVacb(Vacb, FALSE); 232 /* Mark it as dirty again so we know that we have to flush before freeing it */ 233 Vacb->Dirty = TRUE; 234 } 235 236 current_entry = current_entry->Flink; 237 } 238 239 /* Make sure there is no trace anymore of this map */ 240 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 241 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 242 243 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 244 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql); 245 246 /* Now that we're out of the locks, free everything for real */ 247 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead)) 248 { 249 PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry); 250 ULONG RefCount; 251 252 InitializeListHead(&Vacb->CacheMapVacbListEntry); 253 254 /* Flush to disk, if needed */ 255 if (Vacb->Dirty) 256 { 257 IO_STATUS_BLOCK Iosb; 258 NTSTATUS Status; 259 260 Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb); 261 if (!NT_SUCCESS(Status)) 262 { 263 /* Complain. There's not much we can do */ 264 DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status); 265 } 266 Vacb->Dirty = FALSE; 267 } 268 269 RefCount = CcRosVacbDecRefCount(Vacb); 270 #if DBG // CORE-14578 271 if (RefCount != 0) 272 { 273 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart); 274 DPRINT1("There are: %d references left\n", RefCount); 275 DPRINT1("Map: %d\n", Vacb->MappedCount); 276 DPRINT1("Dirty: %d\n", Vacb->Dirty); 277 if (FileObject->FileName.Length != 0) 278 { 279 DPRINT1("File was: %wZ\n", &FileObject->FileName); 280 } 281 else 282 { 283 DPRINT1("No name for the file\n"); 284 } 285 } 286 #else 287 (void)RefCount; 288 #endif 289 } 290 291 /* Release the references we own */ 292 if(SharedCacheMap->Section) 293 ObDereferenceObject(SharedCacheMap->Section); 294 ObDereferenceObject(SharedCacheMap->FileObject); 295 296 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 297 298 /* Acquire the lock again for our caller */ 299 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 300 301 return STATUS_SUCCESS; 302 } 303 304 NTSTATUS 305 CcRosFlushDirtyPages ( 306 ULONG Target, 307 PULONG Count, 308 BOOLEAN Wait, 309 BOOLEAN CalledFromLazy) 310 { 311 PLIST_ENTRY current_entry; 312 NTSTATUS Status; 313 KIRQL OldIrql; 314 BOOLEAN FlushAll = (Target == MAXULONG); 315 316 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target); 317 318 (*Count) = 0; 319 320 KeEnterCriticalRegion(); 321 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 322 323 current_entry = DirtyVacbListHead.Flink; 324 if (current_entry == &DirtyVacbListHead) 325 { 326 DPRINT("No Dirty pages\n"); 327 } 328 329 while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll) 330 { 331 PROS_SHARED_CACHE_MAP SharedCacheMap; 332 PROS_VACB current; 333 BOOLEAN Locked; 334 335 if (current_entry == &DirtyVacbListHead) 336 { 337 ASSERT(FlushAll); 338 if (IsListEmpty(&DirtyVacbListHead)) 339 break; 340 current_entry = DirtyVacbListHead.Flink; 341 } 342 343 current = CONTAINING_RECORD(current_entry, 344 ROS_VACB, 345 DirtyVacbListEntry); 346 current_entry = current_entry->Flink; 347 348 CcRosVacbIncRefCount(current); 349 350 SharedCacheMap = current->SharedCacheMap; 351 352 /* When performing lazy write, don't handle temporary files */ 353 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE)) 354 { 355 CcRosVacbDecRefCount(current); 356 continue; 357 } 358 359 /* Don't attempt to lazy write the files that asked not to */ 360 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED)) 361 { 362 CcRosVacbDecRefCount(current); 363 continue; 364 } 365 366 ASSERT(current->Dirty); 367 368 /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */ 369 if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE) 370 { 371 CcRosVacbDecRefCount(current); 372 continue; 373 } 374 375 SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE; 376 377 /* Keep a ref on the shared cache map */ 378 SharedCacheMap->OpenCount++; 379 380 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 381 382 Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait); 383 if (!Locked) 384 { 385 DPRINT("Not locked!"); 386 ASSERT(!Wait); 387 CcRosVacbDecRefCount(current); 388 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 389 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 390 391 if (--SharedCacheMap->OpenCount == 0) 392 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 393 394 continue; 395 } 396 397 IO_STATUS_BLOCK Iosb; 398 Status = CcRosFlushVacb(current, &Iosb); 399 400 SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext); 401 402 /* We release the VACB before acquiring the lock again, because 403 * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a 404 * Refcount. Freeing must be done outside of the lock. 405 * The refcount is decremented atomically. So this is OK. */ 406 CcRosVacbDecRefCount(current); 407 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 408 409 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; 410 411 if (--SharedCacheMap->OpenCount == 0) 412 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); 413 414 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) && 415 (Status != STATUS_MEDIA_WRITE_PROTECTED)) 416 { 417 DPRINT1("CC: Failed to flush VACB.\n"); 418 } 419 else 420 { 421 ULONG PagesFreed; 422 423 /* How many pages did we free? */ 424 PagesFreed = Iosb.Information / PAGE_SIZE; 425 (*Count) += PagesFreed; 426 427 if (!Wait) 428 { 429 /* Make sure we don't overflow target! */ 430 if (Target < PagesFreed) 431 { 432 /* If we would have, jump to zero directly */ 433 Target = 0; 434 } 435 else 436 { 437 Target -= PagesFreed; 438 } 439 } 440 } 441 442 current_entry = DirtyVacbListHead.Flink; 443 } 444 445 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 446 KeLeaveCriticalRegion(); 447 448 DPRINT("CcRosFlushDirtyPages() finished\n"); 449 return STATUS_SUCCESS; 450 } 451 452 NTSTATUS 453 CcRosReleaseVacb ( 454 PROS_SHARED_CACHE_MAP SharedCacheMap, 455 PROS_VACB Vacb, 456 BOOLEAN Dirty, 457 BOOLEAN Mapped) 458 { 459 ULONG Refs; 460 ASSERT(SharedCacheMap); 461 462 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb); 463 464 if (Dirty && !Vacb->Dirty) 465 { 466 CcRosMarkDirtyVacb(Vacb); 467 } 468 469 if (Mapped) 470 { 471 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1) 472 { 473 CcRosVacbIncRefCount(Vacb); 474 } 475 } 476 477 Refs = CcRosVacbDecRefCount(Vacb); 478 ASSERT(Refs > 0); 479 480 return STATUS_SUCCESS; 481 } 482 483 /* Returns with VACB Lock Held! */ 484 PROS_VACB 485 CcRosLookupVacb ( 486 PROS_SHARED_CACHE_MAP SharedCacheMap, 487 LONGLONG FileOffset) 488 { 489 PLIST_ENTRY current_entry; 490 PROS_VACB current; 491 KIRQL oldIrql; 492 493 ASSERT(SharedCacheMap); 494 495 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n", 496 SharedCacheMap, FileOffset); 497 498 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 499 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 500 501 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 502 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 503 { 504 current = CONTAINING_RECORD(current_entry, 505 ROS_VACB, 506 CacheMapVacbListEntry); 507 if (IsPointInRange(current->FileOffset.QuadPart, 508 VACB_MAPPING_GRANULARITY, 509 FileOffset)) 510 { 511 CcRosVacbIncRefCount(current); 512 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 513 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 514 return current; 515 } 516 if (current->FileOffset.QuadPart > FileOffset) 517 break; 518 current_entry = current_entry->Flink; 519 } 520 521 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 522 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 523 524 return NULL; 525 } 526 527 VOID 528 CcRosMarkDirtyVacb ( 529 PROS_VACB Vacb) 530 { 531 KIRQL oldIrql; 532 PROS_SHARED_CACHE_MAP SharedCacheMap; 533 534 SharedCacheMap = Vacb->SharedCacheMap; 535 536 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 537 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 538 539 ASSERT(!Vacb->Dirty); 540 541 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry); 542 /* FIXME: There is no reason to account for the whole VACB. */ 543 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 544 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE; 545 CcRosVacbIncRefCount(Vacb); 546 547 /* Move to the tail of the LRU list */ 548 RemoveEntryList(&Vacb->VacbLruListEntry); 549 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry); 550 551 Vacb->Dirty = TRUE; 552 553 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 554 555 /* Schedule a lazy writer run to now that we have dirty VACB */ 556 if (!LazyWriter.ScanActive) 557 { 558 CcScheduleLazyWriteScan(FALSE); 559 } 560 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 561 } 562 563 VOID 564 CcRosUnmarkDirtyVacb ( 565 PROS_VACB Vacb, 566 BOOLEAN LockViews) 567 { 568 KIRQL oldIrql; 569 PROS_SHARED_CACHE_MAP SharedCacheMap; 570 571 SharedCacheMap = Vacb->SharedCacheMap; 572 573 if (LockViews) 574 { 575 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 576 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 577 } 578 579 ASSERT(Vacb->Dirty); 580 581 Vacb->Dirty = FALSE; 582 583 RemoveEntryList(&Vacb->DirtyVacbListEntry); 584 InitializeListHead(&Vacb->DirtyVacbListEntry); 585 586 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 587 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE; 588 589 CcRosVacbDecRefCount(Vacb); 590 591 if (LockViews) 592 { 593 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 594 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 595 } 596 } 597 598 static 599 BOOLEAN 600 CcRosFreeUnusedVacb ( 601 PULONG Count) 602 { 603 ULONG cFreed; 604 BOOLEAN Freed; 605 KIRQL oldIrql; 606 PROS_VACB current; 607 LIST_ENTRY FreeList; 608 PLIST_ENTRY current_entry; 609 610 cFreed = 0; 611 Freed = FALSE; 612 InitializeListHead(&FreeList); 613 614 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 615 616 /* Browse all the available VACB */ 617 current_entry = VacbLruListHead.Flink; 618 while (current_entry != &VacbLruListHead) 619 { 620 ULONG Refs; 621 622 current = CONTAINING_RECORD(current_entry, 623 ROS_VACB, 624 VacbLruListEntry); 625 current_entry = current_entry->Flink; 626 627 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); 628 629 /* Only deal with unused VACB, we will free them */ 630 Refs = CcRosVacbGetRefCount(current); 631 if (Refs < 2) 632 { 633 ASSERT(!current->Dirty); 634 ASSERT(!current->MappedCount); 635 ASSERT(Refs == 1); 636 637 /* Reset and move to free list */ 638 RemoveEntryList(¤t->CacheMapVacbListEntry); 639 RemoveEntryList(¤t->VacbLruListEntry); 640 InitializeListHead(¤t->VacbLruListEntry); 641 InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry); 642 } 643 644 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); 645 646 } 647 648 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 649 650 /* And now, free any of the found VACB, that'll free memory! */ 651 while (!IsListEmpty(&FreeList)) 652 { 653 ULONG Refs; 654 655 current_entry = RemoveHeadList(&FreeList); 656 current = CONTAINING_RECORD(current_entry, 657 ROS_VACB, 658 CacheMapVacbListEntry); 659 InitializeListHead(¤t->CacheMapVacbListEntry); 660 Refs = CcRosVacbDecRefCount(current); 661 ASSERT(Refs == 0); 662 ++cFreed; 663 } 664 665 /* If we freed at least one VACB, return success */ 666 if (cFreed != 0) 667 { 668 Freed = TRUE; 669 } 670 671 /* If caller asked for free count, return it */ 672 if (Count != NULL) 673 { 674 *Count = cFreed; 675 } 676 677 return Freed; 678 } 679 680 static 681 NTSTATUS 682 CcRosCreateVacb ( 683 PROS_SHARED_CACHE_MAP SharedCacheMap, 684 LONGLONG FileOffset, 685 PROS_VACB *Vacb) 686 { 687 PROS_VACB current; 688 PROS_VACB previous; 689 PLIST_ENTRY current_entry; 690 NTSTATUS Status; 691 KIRQL oldIrql; 692 ULONG Refs; 693 BOOLEAN Retried; 694 SIZE_T ViewSize = VACB_MAPPING_GRANULARITY; 695 696 ASSERT(SharedCacheMap); 697 698 DPRINT("CcRosCreateVacb()\n"); 699 700 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList); 701 current->BaseAddress = NULL; 702 current->Dirty = FALSE; 703 current->PageOut = FALSE; 704 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY); 705 current->SharedCacheMap = SharedCacheMap; 706 current->MappedCount = 0; 707 current->ReferenceCount = 0; 708 InitializeListHead(¤t->CacheMapVacbListEntry); 709 InitializeListHead(¤t->DirtyVacbListEntry); 710 InitializeListHead(¤t->VacbLruListEntry); 711 712 CcRosVacbIncRefCount(current); 713 714 Retried = FALSE; 715 Retry: 716 /* Map VACB in system space */ 717 Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset, 0); 718 719 if (!NT_SUCCESS(Status)) 720 { 721 ULONG Freed; 722 /* If no space left, try to prune unused VACB 723 * to recover space to map our VACB 724 * If it succeed, retry to map, otherwise 725 * just fail. 726 */ 727 if (!Retried && CcRosFreeUnusedVacb(&Freed)) 728 { 729 DPRINT("Prunned %d VACB, trying again\n", Freed); 730 Retried = TRUE; 731 goto Retry; 732 } 733 734 ExFreeToNPagedLookasideList(&VacbLookasideList, current); 735 return Status; 736 } 737 738 #if DBG 739 if (SharedCacheMap->Trace) 740 { 741 DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n", 742 SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress); 743 } 744 #endif 745 746 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 747 748 *Vacb = current; 749 /* There is window between the call to CcRosLookupVacb 750 * and CcRosCreateVacb. We must check if a VACB for the 751 * file offset exist. If there is a VACB, we release 752 * our newly created VACB and return the existing one. 753 */ 754 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 755 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; 756 previous = NULL; 757 while (current_entry != &SharedCacheMap->CacheMapVacbListHead) 758 { 759 current = CONTAINING_RECORD(current_entry, 760 ROS_VACB, 761 CacheMapVacbListEntry); 762 if (IsPointInRange(current->FileOffset.QuadPart, 763 VACB_MAPPING_GRANULARITY, 764 FileOffset)) 765 { 766 CcRosVacbIncRefCount(current); 767 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 768 #if DBG 769 if (SharedCacheMap->Trace) 770 { 771 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n", 772 SharedCacheMap, 773 (*Vacb), 774 current); 775 } 776 #endif 777 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 778 779 Refs = CcRosVacbDecRefCount(*Vacb); 780 ASSERT(Refs == 0); 781 782 *Vacb = current; 783 return STATUS_SUCCESS; 784 } 785 if (current->FileOffset.QuadPart < FileOffset) 786 { 787 ASSERT(previous == NULL || 788 previous->FileOffset.QuadPart < current->FileOffset.QuadPart); 789 previous = current; 790 } 791 if (current->FileOffset.QuadPart > FileOffset) 792 break; 793 current_entry = current_entry->Flink; 794 } 795 /* There was no existing VACB. */ 796 current = *Vacb; 797 if (previous) 798 { 799 InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry); 800 } 801 else 802 { 803 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry); 804 } 805 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 806 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 807 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); 808 809 /* Reference it to allow release */ 810 CcRosVacbIncRefCount(current); 811 812 return Status; 813 } 814 815 BOOLEAN 816 CcRosEnsureVacbResident( 817 _In_ PROS_VACB Vacb, 818 _In_ BOOLEAN Wait, 819 _In_ BOOLEAN NoRead, 820 _In_ ULONG Offset, 821 _In_ ULONG Length 822 ) 823 { 824 PVOID BaseAddress; 825 826 ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY); 827 828 #if 0 829 if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->SectionSize.QuadPart) 830 { 831 DPRINT1("Vacb read beyond the file size!\n"); 832 return FALSE; 833 } 834 #endif 835 836 BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset); 837 838 /* Check if the pages are resident */ 839 if (!MmArePagesResident(NULL, BaseAddress, Length)) 840 { 841 if (!Wait) 842 { 843 return FALSE; 844 } 845 846 if (!NoRead) 847 { 848 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 849 NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer, 850 Vacb->FileOffset.QuadPart + Offset, 851 Length, 852 &SharedCacheMap->ValidDataLength); 853 if (!NT_SUCCESS(Status)) 854 ExRaiseStatus(Status); 855 } 856 } 857 858 return TRUE; 859 } 860 861 862 NTSTATUS 863 CcRosGetVacb ( 864 PROS_SHARED_CACHE_MAP SharedCacheMap, 865 LONGLONG FileOffset, 866 PROS_VACB *Vacb) 867 { 868 PROS_VACB current; 869 NTSTATUS Status; 870 ULONG Refs; 871 KIRQL OldIrql; 872 873 ASSERT(SharedCacheMap); 874 875 DPRINT("CcRosGetVacb()\n"); 876 877 /* 878 * Look for a VACB already mapping the same data. 879 */ 880 current = CcRosLookupVacb(SharedCacheMap, FileOffset); 881 if (current == NULL) 882 { 883 /* 884 * Otherwise create a new VACB. 885 */ 886 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t); 887 if (!NT_SUCCESS(Status)) 888 { 889 return Status; 890 } 891 } 892 893 Refs = CcRosVacbGetRefCount(current); 894 895 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 896 897 /* Move to the tail of the LRU list */ 898 RemoveEntryList(¤t->VacbLruListEntry); 899 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); 900 901 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 902 903 /* 904 * Return the VACB to the caller. 905 */ 906 *Vacb = current; 907 908 ASSERT(Refs > 1); 909 910 return STATUS_SUCCESS; 911 } 912 913 NTSTATUS 914 CcRosRequestVacb ( 915 PROS_SHARED_CACHE_MAP SharedCacheMap, 916 LONGLONG FileOffset, 917 PROS_VACB *Vacb) 918 /* 919 * FUNCTION: Request a page mapping for a shared cache map 920 */ 921 { 922 923 ASSERT(SharedCacheMap); 924 925 if (FileOffset % VACB_MAPPING_GRANULARITY != 0) 926 { 927 DPRINT1("Bad fileoffset %I64x should be multiple of %x", 928 FileOffset, VACB_MAPPING_GRANULARITY); 929 KeBugCheck(CACHE_MANAGER); 930 } 931 932 return CcRosGetVacb(SharedCacheMap, 933 FileOffset, 934 Vacb); 935 } 936 937 NTSTATUS 938 CcRosInternalFreeVacb ( 939 PROS_VACB Vacb) 940 /* 941 * FUNCTION: Releases a VACB associated with a shared cache map 942 */ 943 { 944 NTSTATUS Status; 945 946 DPRINT("Freeing VACB 0x%p\n", Vacb); 947 #if DBG 948 if (Vacb->SharedCacheMap->Trace) 949 { 950 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb); 951 } 952 #endif 953 954 if (Vacb->ReferenceCount != 0) 955 { 956 DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount); 957 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length) 958 { 959 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName); 960 } 961 } 962 963 ASSERT(Vacb->ReferenceCount == 0); 964 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry)); 965 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry)); 966 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry)); 967 968 /* Delete the mapping */ 969 Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress); 970 if (!NT_SUCCESS(Status)) 971 { 972 DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status); 973 ASSERT(FALSE); 974 /* Proceed with the deĺetion anyway */ 975 } 976 977 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd); 978 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb); 979 return STATUS_SUCCESS; 980 } 981 982 /* 983 * @implemented 984 */ 985 VOID 986 NTAPI 987 CcFlushCache ( 988 IN PSECTION_OBJECT_POINTERS SectionObjectPointers, 989 IN PLARGE_INTEGER FileOffset OPTIONAL, 990 IN ULONG Length, 991 OUT PIO_STATUS_BLOCK IoStatus) 992 { 993 PROS_SHARED_CACHE_MAP SharedCacheMap; 994 LONGLONG FlushStart, FlushEnd; 995 NTSTATUS Status; 996 997 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n", 998 SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length); 999 1000 if (!SectionObjectPointers) 1001 { 1002 Status = STATUS_INVALID_PARAMETER; 1003 goto quit; 1004 } 1005 1006 if (!SectionObjectPointers->SharedCacheMap) 1007 { 1008 /* Forward this to Mm */ 1009 MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus); 1010 return; 1011 } 1012 1013 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 1014 ASSERT(SharedCacheMap); 1015 if (FileOffset) 1016 { 1017 FlushStart = FileOffset->QuadPart; 1018 Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd); 1019 if (!NT_SUCCESS(Status)) 1020 goto quit; 1021 } 1022 else 1023 { 1024 FlushStart = 0; 1025 FlushEnd = SharedCacheMap->FileSize.QuadPart; 1026 } 1027 1028 Status = STATUS_SUCCESS; 1029 if (IoStatus) 1030 { 1031 IoStatus->Information = 0; 1032 } 1033 1034 /* 1035 * We flush the VACBs that we find here. 1036 * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure. 1037 * This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data 1038 */ 1039 while (FlushStart < FlushEnd) 1040 { 1041 BOOLEAN DirtyVacb = FALSE; 1042 PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart); 1043 1044 if (vacb != NULL) 1045 { 1046 if (vacb->Dirty) 1047 { 1048 IO_STATUS_BLOCK VacbIosb = { 0 }; 1049 Status = CcRosFlushVacb(vacb, &VacbIosb); 1050 if (!NT_SUCCESS(Status)) 1051 { 1052 goto quit; 1053 } 1054 DirtyVacb = TRUE; 1055 1056 if (IoStatus) 1057 IoStatus->Information += VacbIosb.Information; 1058 } 1059 1060 CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE); 1061 } 1062 1063 if (!DirtyVacb) 1064 { 1065 IO_STATUS_BLOCK MmIosb; 1066 LARGE_INTEGER MmOffset; 1067 1068 MmOffset.QuadPart = FlushStart; 1069 1070 if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart) 1071 { 1072 /* The whole range fits within a VACB chunk. */ 1073 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb); 1074 } 1075 else 1076 { 1077 ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY); 1078 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb); 1079 } 1080 1081 if (!NT_SUCCESS(Status)) 1082 goto quit; 1083 1084 if (IoStatus) 1085 IoStatus->Information += MmIosb.Information; 1086 1087 /* Update VDL */ 1088 if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd) 1089 SharedCacheMap->ValidDataLength.QuadPart = FlushEnd; 1090 } 1091 1092 if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart))) 1093 { 1094 /* We're at the end of file ! */ 1095 break; 1096 } 1097 1098 /* Round down to next VACB start now */ 1099 FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY; 1100 } 1101 1102 quit: 1103 if (IoStatus) 1104 { 1105 IoStatus->Status = Status; 1106 } 1107 } 1108 1109 NTSTATUS 1110 CcRosReleaseFileCache ( 1111 PFILE_OBJECT FileObject) 1112 /* 1113 * FUNCTION: Called by the file system when a handle to a file object 1114 * has been closed. 1115 */ 1116 { 1117 KIRQL OldIrql; 1118 PPRIVATE_CACHE_MAP PrivateMap; 1119 PROS_SHARED_CACHE_MAP SharedCacheMap; 1120 1121 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1122 1123 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) 1124 { 1125 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1126 1127 /* Closing the handle, so kill the private cache map 1128 * Before you event try to remove it from FO, always 1129 * lock the master lock, to be sure not to race 1130 * with a potential read ahead ongoing! 1131 */ 1132 PrivateMap = FileObject->PrivateCacheMap; 1133 FileObject->PrivateCacheMap = NULL; 1134 1135 if (PrivateMap != NULL) 1136 { 1137 /* Remove it from the file */ 1138 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1139 RemoveEntryList(&PrivateMap->PrivateLinks); 1140 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1141 1142 /* And free it. */ 1143 if (PrivateMap != &SharedCacheMap->PrivateCacheMap) 1144 { 1145 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP); 1146 } 1147 else 1148 { 1149 PrivateMap->NodeTypeCode = 0; 1150 } 1151 1152 ASSERT(SharedCacheMap->OpenCount > 0); 1153 1154 SharedCacheMap->OpenCount--; 1155 if (SharedCacheMap->OpenCount == 0) 1156 { 1157 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql); 1158 } 1159 } 1160 } 1161 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1162 return STATUS_SUCCESS; 1163 } 1164 1165 NTSTATUS 1166 CcRosInitializeFileCache ( 1167 PFILE_OBJECT FileObject, 1168 PCC_FILE_SIZES FileSizes, 1169 BOOLEAN PinAccess, 1170 PCACHE_MANAGER_CALLBACKS CallBacks, 1171 PVOID LazyWriterContext) 1172 /* 1173 * FUNCTION: Initializes a shared cache map for a file object 1174 */ 1175 { 1176 KIRQL OldIrql; 1177 BOOLEAN Allocated; 1178 PROS_SHARED_CACHE_MAP SharedCacheMap; 1179 1180 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject); 1181 1182 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1183 1184 Allocated = FALSE; 1185 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 1186 if (SharedCacheMap == NULL) 1187 { 1188 Allocated = TRUE; 1189 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList); 1190 if (SharedCacheMap == NULL) 1191 { 1192 return STATUS_INSUFFICIENT_RESOURCES; 1193 } 1194 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap)); 1195 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP; 1196 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap); 1197 SharedCacheMap->FileObject = FileObject; 1198 SharedCacheMap->Callbacks = CallBacks; 1199 SharedCacheMap->LazyWriteContext = LazyWriterContext; 1200 SharedCacheMap->SectionSize = FileSizes->AllocationSize; 1201 SharedCacheMap->FileSize = FileSizes->FileSize; 1202 SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength; 1203 SharedCacheMap->PinAccess = PinAccess; 1204 SharedCacheMap->DirtyPageThreshold = 0; 1205 SharedCacheMap->DirtyPages = 0; 1206 InitializeListHead(&SharedCacheMap->PrivateList); 1207 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock); 1208 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead); 1209 InitializeListHead(&SharedCacheMap->BcbList); 1210 1211 SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION; 1212 1213 ObReferenceObjectByPointer(FileObject, 1214 FILE_ALL_ACCESS, 1215 NULL, 1216 KernelMode); 1217 1218 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap; 1219 1220 //CcRosTraceCacheMap(SharedCacheMap, TRUE); 1221 } 1222 else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION) 1223 { 1224 /* The shared cache map is being created somewhere else. Wait for that to happen */ 1225 KEVENT Waiter; 1226 PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent; 1227 1228 KeInitializeEvent(&Waiter, NotificationEvent, FALSE); 1229 SharedCacheMap->CreateEvent = &Waiter; 1230 1231 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1232 1233 KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL); 1234 1235 if (PreviousWaiter) 1236 KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE); 1237 1238 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1239 } 1240 1241 if (FileObject->PrivateCacheMap == NULL) 1242 { 1243 PPRIVATE_CACHE_MAP PrivateMap; 1244 1245 /* Allocate the private cache map for this handle */ 1246 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0) 1247 { 1248 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP); 1249 } 1250 else 1251 { 1252 PrivateMap = &SharedCacheMap->PrivateCacheMap; 1253 } 1254 1255 if (PrivateMap == NULL) 1256 { 1257 /* If we also allocated the shared cache map for this file, kill it */ 1258 if (Allocated) 1259 { 1260 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); 1261 1262 FileObject->SectionObjectPointer->SharedCacheMap = NULL; 1263 ObDereferenceObject(FileObject); 1264 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); 1265 } 1266 1267 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1268 return STATUS_INSUFFICIENT_RESOURCES; 1269 } 1270 1271 /* Initialize it */ 1272 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP)); 1273 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP; 1274 PrivateMap->ReadAheadMask = PAGE_SIZE - 1; 1275 PrivateMap->FileObject = FileObject; 1276 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock); 1277 1278 /* Link it to the file */ 1279 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); 1280 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks); 1281 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); 1282 1283 FileObject->PrivateCacheMap = PrivateMap; 1284 SharedCacheMap->OpenCount++; 1285 } 1286 1287 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1288 1289 /* Create the section */ 1290 if (Allocated) 1291 { 1292 NTSTATUS Status; 1293 1294 ASSERT(SharedCacheMap->Section == NULL); 1295 1296 Status = MmCreateSection( 1297 &SharedCacheMap->Section, 1298 SECTION_ALL_ACCESS, 1299 NULL, 1300 &SharedCacheMap->SectionSize, 1301 PAGE_READWRITE, 1302 SEC_RESERVE, 1303 NULL, 1304 FileObject); 1305 1306 ASSERT(NT_SUCCESS(Status)); 1307 1308 if (!NT_SUCCESS(Status)) 1309 { 1310 CcRosReleaseFileCache(FileObject); 1311 return Status; 1312 } 1313 1314 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); 1315 1316 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks); 1317 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION; 1318 1319 if (SharedCacheMap->CreateEvent) 1320 { 1321 KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE); 1322 SharedCacheMap->CreateEvent = NULL; 1323 } 1324 1325 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); 1326 } 1327 1328 return STATUS_SUCCESS; 1329 } 1330 1331 /* 1332 * @implemented 1333 */ 1334 PFILE_OBJECT 1335 NTAPI 1336 CcGetFileObjectFromSectionPtrs ( 1337 IN PSECTION_OBJECT_POINTERS SectionObjectPointers) 1338 { 1339 PROS_SHARED_CACHE_MAP SharedCacheMap; 1340 1341 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers); 1342 1343 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap) 1344 { 1345 SharedCacheMap = SectionObjectPointers->SharedCacheMap; 1346 ASSERT(SharedCacheMap); 1347 return SharedCacheMap->FileObject; 1348 } 1349 return NULL; 1350 } 1351 1352 CODE_SEG("INIT") 1353 VOID 1354 NTAPI 1355 CcInitView ( 1356 VOID) 1357 { 1358 DPRINT("CcInitView()\n"); 1359 1360 InitializeListHead(&DirtyVacbListHead); 1361 InitializeListHead(&VacbLruListHead); 1362 InitializeListHead(&CcDeferredWrites); 1363 InitializeListHead(&CcCleanSharedCacheMapList); 1364 KeInitializeSpinLock(&CcDeferredWriteSpinLock); 1365 ExInitializeNPagedLookasideList(&iBcbLookasideList, 1366 NULL, 1367 NULL, 1368 0, 1369 sizeof(INTERNAL_BCB), 1370 TAG_BCB, 1371 20); 1372 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList, 1373 NULL, 1374 NULL, 1375 0, 1376 sizeof(ROS_SHARED_CACHE_MAP), 1377 TAG_SHARED_CACHE_MAP, 1378 20); 1379 ExInitializeNPagedLookasideList(&VacbLookasideList, 1380 NULL, 1381 NULL, 1382 0, 1383 sizeof(ROS_VACB), 1384 TAG_VACB, 1385 20); 1386 1387 CcInitCacheZeroPage(); 1388 } 1389 1390 #if DBG && defined(KDBG) 1391 BOOLEAN 1392 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[]) 1393 { 1394 PLIST_ENTRY ListEntry; 1395 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File"); 1396 1397 KdbpPrint(" Usage Summary (in kb)\n"); 1398 KdbpPrint("Shared\t\tMapped\tDirty\tName\n"); 1399 /* No need to lock the spin lock here, we're in DBG */ 1400 for (ListEntry = CcCleanSharedCacheMapList.Flink; 1401 ListEntry != &CcCleanSharedCacheMapList; 1402 ListEntry = ListEntry->Flink) 1403 { 1404 PLIST_ENTRY Vacbs; 1405 ULONG Mapped = 0, Dirty = 0; 1406 PROS_SHARED_CACHE_MAP SharedCacheMap; 1407 PUNICODE_STRING FileName; 1408 PWSTR Extra = L""; 1409 1410 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks); 1411 1412 /* Dirty size */ 1413 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024; 1414 1415 /* First, count for all the associated VACB */ 1416 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink; 1417 Vacbs != &SharedCacheMap->CacheMapVacbListHead; 1418 Vacbs = Vacbs->Flink) 1419 { 1420 Mapped += VACB_MAPPING_GRANULARITY / 1024; 1421 } 1422 1423 /* Setup name */ 1424 if (SharedCacheMap->FileObject != NULL && 1425 SharedCacheMap->FileObject->FileName.Length != 0) 1426 { 1427 FileName = &SharedCacheMap->FileObject->FileName; 1428 } 1429 else if (SharedCacheMap->FileObject != NULL && 1430 SharedCacheMap->FileObject->FsContext != NULL && 1431 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 && 1432 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 && 1433 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0) 1434 { 1435 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100); 1436 Extra = L" (FastFAT)"; 1437 } 1438 else 1439 { 1440 FileName = &NoName; 1441 } 1442 1443 /* And print */ 1444 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra); 1445 } 1446 1447 return TRUE; 1448 } 1449 1450 BOOLEAN 1451 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[]) 1452 { 1453 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages, 1454 (CcTotalDirtyPages * PAGE_SIZE) / 1024); 1455 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold, 1456 (CcDirtyPageThreshold * PAGE_SIZE) / 1024); 1457 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages, 1458 (MmAvailablePages * PAGE_SIZE) / 1024); 1459 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop, 1460 (MmThrottleTop * PAGE_SIZE) / 1024); 1461 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom, 1462 (MmThrottleBottom * PAGE_SIZE) / 1024); 1463 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total, 1464 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024); 1465 1466 if (CcTotalDirtyPages >= CcDirtyPageThreshold) 1467 { 1468 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n"); 1469 } 1470 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold) 1471 { 1472 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n"); 1473 } 1474 else 1475 { 1476 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n"); 1477 } 1478 1479 return TRUE; 1480 } 1481 #endif 1482 1483 /* EOF */ 1484