1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 /* GLOBALS *******************************************************************/
40
41 LIST_ENTRY DirtyVacbListHead;
42 static LIST_ENTRY VacbLruListHead;
43
44 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
45 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
46 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
47
48 /* Internal vars (MS):
49 * - Threshold above which lazy writer will start action
50 * - Amount of dirty pages
51 * - List for deferred writes
52 * - Spinlock when dealing with the deferred list
53 * - List for "clean" shared cache maps
54 */
55 ULONG CcDirtyPageThreshold = 0;
56 ULONG CcTotalDirtyPages = 0;
57 LIST_ENTRY CcDeferredWrites;
58 KSPIN_LOCK CcDeferredWriteSpinLock;
59 LIST_ENTRY CcCleanSharedCacheMapList;
60
61 #if DBG
CcRosVacbIncRefCount_(PROS_VACB vacb,PCSTR file,INT line)62 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
63 {
64 ULONG Refs;
65
66 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
67 if (vacb->SharedCacheMap->Trace)
68 {
69 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
71 }
72
73 return Refs;
74 }
CcRosVacbDecRefCount_(PROS_VACB vacb,PCSTR file,INT line)75 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
76 {
77 ULONG Refs;
78 BOOLEAN VacbDirty = vacb->Dirty;
79 BOOLEAN VacbTrace = vacb->SharedCacheMap->Trace;
80 BOOLEAN VacbPageOut = vacb->PageOut;
81
82 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
83 ASSERT(!(Refs == 0 && VacbDirty));
84 if (VacbTrace)
85 {
86 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
87 file, line, vacb, Refs, VacbDirty, VacbPageOut);
88 }
89
90 if (Refs == 0)
91 {
92 CcRosInternalFreeVacb(vacb);
93 }
94
95 return Refs;
96 }
CcRosVacbGetRefCount_(PROS_VACB vacb,PCSTR file,INT line)97 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
98 {
99 ULONG Refs;
100
101 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
102 if (vacb->SharedCacheMap->Trace)
103 {
104 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
105 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
106 }
107
108 return Refs;
109 }
110 #endif
111
112
113 /* FUNCTIONS *****************************************************************/
114
115 VOID
CcRosTraceCacheMap(PROS_SHARED_CACHE_MAP SharedCacheMap,BOOLEAN Trace)116 CcRosTraceCacheMap (
117 PROS_SHARED_CACHE_MAP SharedCacheMap,
118 BOOLEAN Trace )
119 {
120 #if DBG
121 KIRQL oldirql;
122 PLIST_ENTRY current_entry;
123 PROS_VACB current;
124
125 if (!SharedCacheMap)
126 return;
127
128 SharedCacheMap->Trace = Trace;
129
130 if (Trace)
131 {
132 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
133
134 oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
135 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
136
137 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
138 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
139 {
140 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
141 current_entry = current_entry->Flink;
142
143 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n",
144 current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart);
145 }
146
147 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
148 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
149 }
150 else
151 {
152 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
153 }
154
155 #else
156 UNREFERENCED_PARAMETER(SharedCacheMap);
157 UNREFERENCED_PARAMETER(Trace);
158 #endif
159 }
160
161 NTSTATUS
CcRosFlushVacb(_In_ PROS_VACB Vacb,_Out_opt_ PIO_STATUS_BLOCK Iosb)162 CcRosFlushVacb (
163 _In_ PROS_VACB Vacb,
164 _Out_opt_ PIO_STATUS_BLOCK Iosb)
165 {
166 NTSTATUS Status;
167 BOOLEAN HaveLock = FALSE;
168 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
169
170 CcRosUnmarkDirtyVacb(Vacb, TRUE);
171
172 /* Lock for flush, if we are not already the top-level */
173 if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP)
174 {
175 Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject);
176 if (!NT_SUCCESS(Status))
177 goto quit;
178 HaveLock = TRUE;
179 }
180
181 Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer,
182 &Vacb->FileOffset,
183 VACB_MAPPING_GRANULARITY,
184 Iosb);
185
186 if (HaveLock)
187 {
188 FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject);
189 }
190
191 quit:
192 if (!NT_SUCCESS(Status))
193 CcRosMarkDirtyVacb(Vacb);
194 else
195 {
196 /* Update VDL */
197 if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY))
198 {
199 SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY;
200 }
201 }
202
203 return Status;
204 }
205
206 static
207 NTSTATUS
CcRosDeleteFileCache(PFILE_OBJECT FileObject,PROS_SHARED_CACHE_MAP SharedCacheMap,PKIRQL OldIrql)208 CcRosDeleteFileCache (
209 PFILE_OBJECT FileObject,
210 PROS_SHARED_CACHE_MAP SharedCacheMap,
211 PKIRQL OldIrql)
212 /*
213 * FUNCTION: Releases the shared cache map associated with a file object
214 */
215 {
216 PLIST_ENTRY current_entry;
217
218 ASSERT(SharedCacheMap);
219 ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap);
220 ASSERT(SharedCacheMap->OpenCount == 0);
221
222 /* Remove all VACBs from the global lists */
223 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
224 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
225 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
226 {
227 PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
228
229 RemoveEntryList(&Vacb->VacbLruListEntry);
230 InitializeListHead(&Vacb->VacbLruListEntry);
231
232 if (Vacb->Dirty)
233 {
234 CcRosUnmarkDirtyVacb(Vacb, FALSE);
235 /* Mark it as dirty again so we know that we have to flush before freeing it */
236 Vacb->Dirty = TRUE;
237 }
238
239 current_entry = current_entry->Flink;
240 }
241
242 /* Make sure there is no trace anymore of this map */
243 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
244 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
245
246 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
247 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
248
249 /* Now that we're out of the locks, free everything for real */
250 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
251 {
252 PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry);
253 ULONG RefCount;
254
255 InitializeListHead(&Vacb->CacheMapVacbListEntry);
256
257 /* Flush to disk, if needed */
258 if (Vacb->Dirty)
259 {
260 IO_STATUS_BLOCK Iosb;
261 NTSTATUS Status;
262
263 Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb);
264 if (!NT_SUCCESS(Status))
265 {
266 /* Complain. There's not much we can do */
267 DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status);
268 }
269 Vacb->Dirty = FALSE;
270 }
271
272 RefCount = CcRosVacbDecRefCount(Vacb);
273 #if DBG // CORE-14578
274 if (RefCount != 0)
275 {
276 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart);
277 DPRINT1("There are: %d references left\n", RefCount);
278 DPRINT1("Map: %d\n", Vacb->MappedCount);
279 DPRINT1("Dirty: %d\n", Vacb->Dirty);
280 if (FileObject->FileName.Length != 0)
281 {
282 DPRINT1("File was: %wZ\n", &FileObject->FileName);
283 }
284 else
285 {
286 DPRINT1("No name for the file\n");
287 }
288 }
289 #else
290 (void)RefCount;
291 #endif
292 }
293
294 /* Release the references we own */
295 if(SharedCacheMap->Section)
296 ObDereferenceObject(SharedCacheMap->Section);
297 ObDereferenceObject(SharedCacheMap->FileObject);
298
299 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
300
301 /* Acquire the lock again for our caller */
302 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
303
304 return STATUS_SUCCESS;
305 }
306
307 NTSTATUS
CcRosFlushDirtyPages(ULONG Target,PULONG Count,BOOLEAN Wait,BOOLEAN CalledFromLazy)308 CcRosFlushDirtyPages (
309 ULONG Target,
310 PULONG Count,
311 BOOLEAN Wait,
312 BOOLEAN CalledFromLazy)
313 {
314 PLIST_ENTRY current_entry;
315 NTSTATUS Status;
316 KIRQL OldIrql;
317 BOOLEAN FlushAll = (Target == MAXULONG);
318
319 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
320
321 (*Count) = 0;
322
323 KeEnterCriticalRegion();
324 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
325
326 current_entry = DirtyVacbListHead.Flink;
327 if (current_entry == &DirtyVacbListHead)
328 {
329 DPRINT("No Dirty pages\n");
330 }
331
332 while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll)
333 {
334 PROS_SHARED_CACHE_MAP SharedCacheMap;
335 PROS_VACB current;
336 BOOLEAN Locked;
337
338 if (current_entry == &DirtyVacbListHead)
339 {
340 ASSERT(FlushAll);
341 if (IsListEmpty(&DirtyVacbListHead))
342 break;
343 current_entry = DirtyVacbListHead.Flink;
344 }
345
346 current = CONTAINING_RECORD(current_entry,
347 ROS_VACB,
348 DirtyVacbListEntry);
349 current_entry = current_entry->Flink;
350
351 CcRosVacbIncRefCount(current);
352
353 SharedCacheMap = current->SharedCacheMap;
354
355 /* When performing lazy write, don't handle temporary files */
356 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
357 {
358 CcRosVacbDecRefCount(current);
359 continue;
360 }
361
362 /* Don't attempt to lazy write the files that asked not to */
363 if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED))
364 {
365 CcRosVacbDecRefCount(current);
366 continue;
367 }
368
369 ASSERT(current->Dirty);
370
371 /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */
372 if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE)
373 {
374 CcRosVacbDecRefCount(current);
375 continue;
376 }
377
378 SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE;
379
380 /* Keep a ref on the shared cache map */
381 SharedCacheMap->OpenCount++;
382
383 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
384
385 Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait);
386 if (!Locked)
387 {
388 DPRINT("Not locked!");
389 ASSERT(!Wait);
390 CcRosVacbDecRefCount(current);
391 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
392 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
393
394 if (--SharedCacheMap->OpenCount == 0)
395 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
396
397 continue;
398 }
399
400 IO_STATUS_BLOCK Iosb;
401 Status = CcRosFlushVacb(current, &Iosb);
402
403 SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext);
404
405 /* We release the VACB before acquiring the lock again, because
406 * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a
407 * Refcount. Freeing must be done outside of the lock.
408 * The refcount is decremented atomically. So this is OK. */
409 CcRosVacbDecRefCount(current);
410 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
411
412 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
413
414 if (--SharedCacheMap->OpenCount == 0)
415 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
416
417 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
418 (Status != STATUS_MEDIA_WRITE_PROTECTED))
419 {
420 DPRINT1("CC: Failed to flush VACB.\n");
421 }
422 else
423 {
424 ULONG PagesFreed;
425
426 /* How many pages did we free? */
427 PagesFreed = Iosb.Information / PAGE_SIZE;
428 (*Count) += PagesFreed;
429
430 if (!Wait)
431 {
432 /* Make sure we don't overflow target! */
433 if (Target < PagesFreed)
434 {
435 /* If we would have, jump to zero directly */
436 Target = 0;
437 }
438 else
439 {
440 Target -= PagesFreed;
441 }
442 }
443 }
444
445 current_entry = DirtyVacbListHead.Flink;
446 }
447
448 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
449 KeLeaveCriticalRegion();
450
451 DPRINT("CcRosFlushDirtyPages() finished\n");
452 return STATUS_SUCCESS;
453 }
454
455 VOID
CcRosTrimCache(_In_ ULONG Target,_Out_ PULONG NrFreed)456 CcRosTrimCache(
457 _In_ ULONG Target,
458 _Out_ PULONG NrFreed)
459 /*
460 * FUNCTION: Try to free some memory from the file cache.
461 * ARGUMENTS:
462 * Target - The number of pages to be freed.
463 * NrFreed - Points to a variable where the number of pages
464 * actually freed is returned.
465 */
466 {
467 PLIST_ENTRY current_entry;
468 PROS_VACB current;
469 ULONG PagesFreed;
470 KIRQL oldIrql;
471 LIST_ENTRY FreeList;
472 BOOLEAN FlushedPages = FALSE;
473
474 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
475
476 InitializeListHead(&FreeList);
477
478 *NrFreed = 0;
479
480 retry:
481 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
482
483 current_entry = VacbLruListHead.Flink;
484 while (current_entry != &VacbLruListHead)
485 {
486 ULONG Refs;
487
488 current = CONTAINING_RECORD(current_entry,
489 ROS_VACB,
490 VacbLruListEntry);
491
492 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
493
494 /* Reference the VACB */
495 CcRosVacbIncRefCount(current);
496
497 /* Check if it's mapped and not dirty */
498 if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty)
499 {
500 /* This code is never executed. It is left for reference only. */
501 #if 1
502 DPRINT1("MmPageOutPhysicalAddress unexpectedly called\n");
503 ASSERT(FALSE);
504 #else
505 ULONG i;
506 PFN_NUMBER Page;
507
508 /* We have to break these locks to call MmPageOutPhysicalAddress */
509 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
510 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
511
512 /* Page out the VACB */
513 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
514 {
515 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
516
517 MmPageOutPhysicalAddress(Page);
518 }
519
520 /* Reacquire the locks */
521 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
522 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
523 #endif
524 }
525
526 /* Only keep iterating though the loop while the lock is held */
527 current_entry = current_entry->Flink;
528
529 /* Dereference the VACB */
530 Refs = CcRosVacbDecRefCount(current);
531
532 /* Check if we can free this entry now */
533 if (Refs < 2)
534 {
535 ASSERT(!current->Dirty);
536 ASSERT(!current->MappedCount);
537 ASSERT(Refs == 1);
538
539 RemoveEntryList(¤t->CacheMapVacbListEntry);
540 RemoveEntryList(¤t->VacbLruListEntry);
541 InitializeListHead(¤t->VacbLruListEntry);
542 InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
543
544 /* Calculate how many pages we freed for Mm */
545 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
546 Target -= PagesFreed;
547 (*NrFreed) += PagesFreed;
548 }
549
550 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
551 }
552
553 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
554
555 /* Try flushing pages if we haven't met our target */
556 if ((Target > 0) && !FlushedPages)
557 {
558 /* Flush dirty pages to disk */
559 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
560 FlushedPages = TRUE;
561
562 /* We can only swap as many pages as we flushed */
563 if (PagesFreed < Target) Target = PagesFreed;
564
565 /* Check if we flushed anything */
566 if (PagesFreed != 0)
567 {
568 /* Try again after flushing dirty pages */
569 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
570 goto retry;
571 }
572 }
573
574 while (!IsListEmpty(&FreeList))
575 {
576 ULONG Refs;
577
578 current_entry = RemoveHeadList(&FreeList);
579 current = CONTAINING_RECORD(current_entry,
580 ROS_VACB,
581 CacheMapVacbListEntry);
582 InitializeListHead(¤t->CacheMapVacbListEntry);
583 Refs = CcRosVacbDecRefCount(current);
584 ASSERT(Refs == 0);
585 }
586
587 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
588 }
589
590 NTSTATUS
CcRosReleaseVacb(PROS_SHARED_CACHE_MAP SharedCacheMap,PROS_VACB Vacb,BOOLEAN Dirty,BOOLEAN Mapped)591 CcRosReleaseVacb (
592 PROS_SHARED_CACHE_MAP SharedCacheMap,
593 PROS_VACB Vacb,
594 BOOLEAN Dirty,
595 BOOLEAN Mapped)
596 {
597 ULONG Refs;
598 ASSERT(SharedCacheMap);
599
600 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb);
601
602 if (Dirty && !Vacb->Dirty)
603 {
604 CcRosMarkDirtyVacb(Vacb);
605 }
606
607 if (Mapped)
608 {
609 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
610 {
611 CcRosVacbIncRefCount(Vacb);
612 }
613 }
614
615 Refs = CcRosVacbDecRefCount(Vacb);
616 ASSERT(Refs > 0);
617
618 return STATUS_SUCCESS;
619 }
620
621 /* Returns with VACB Lock Held! */
622 PROS_VACB
CcRosLookupVacb(PROS_SHARED_CACHE_MAP SharedCacheMap,LONGLONG FileOffset)623 CcRosLookupVacb (
624 PROS_SHARED_CACHE_MAP SharedCacheMap,
625 LONGLONG FileOffset)
626 {
627 PLIST_ENTRY current_entry;
628 PROS_VACB current;
629 KIRQL oldIrql;
630
631 ASSERT(SharedCacheMap);
632
633 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
634 SharedCacheMap, FileOffset);
635
636 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
637 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
638
639 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
640 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
641 {
642 current = CONTAINING_RECORD(current_entry,
643 ROS_VACB,
644 CacheMapVacbListEntry);
645 if (IsPointInRange(current->FileOffset.QuadPart,
646 VACB_MAPPING_GRANULARITY,
647 FileOffset))
648 {
649 CcRosVacbIncRefCount(current);
650 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
651 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
652 return current;
653 }
654 if (current->FileOffset.QuadPart > FileOffset)
655 break;
656 current_entry = current_entry->Flink;
657 }
658
659 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
660 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
661
662 return NULL;
663 }
664
665 VOID
CcRosMarkDirtyVacb(PROS_VACB Vacb)666 CcRosMarkDirtyVacb (
667 PROS_VACB Vacb)
668 {
669 KIRQL oldIrql;
670 PROS_SHARED_CACHE_MAP SharedCacheMap;
671
672 SharedCacheMap = Vacb->SharedCacheMap;
673
674 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
675 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
676
677 ASSERT(!Vacb->Dirty);
678
679 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
680 /* FIXME: There is no reason to account for the whole VACB. */
681 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
682 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
683 CcRosVacbIncRefCount(Vacb);
684
685 /* Move to the tail of the LRU list */
686 RemoveEntryList(&Vacb->VacbLruListEntry);
687 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
688
689 Vacb->Dirty = TRUE;
690
691 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
692
693 /* Schedule a lazy writer run to now that we have dirty VACB */
694 if (!LazyWriter.ScanActive)
695 {
696 CcScheduleLazyWriteScan(FALSE);
697 }
698 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
699 }
700
701 VOID
CcRosUnmarkDirtyVacb(PROS_VACB Vacb,BOOLEAN LockViews)702 CcRosUnmarkDirtyVacb (
703 PROS_VACB Vacb,
704 BOOLEAN LockViews)
705 {
706 KIRQL oldIrql;
707 PROS_SHARED_CACHE_MAP SharedCacheMap;
708
709 SharedCacheMap = Vacb->SharedCacheMap;
710
711 if (LockViews)
712 {
713 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
714 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
715 }
716
717 ASSERT(Vacb->Dirty);
718
719 Vacb->Dirty = FALSE;
720
721 RemoveEntryList(&Vacb->DirtyVacbListEntry);
722 InitializeListHead(&Vacb->DirtyVacbListEntry);
723
724 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
725 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
726
727 CcRosVacbDecRefCount(Vacb);
728
729 if (LockViews)
730 {
731 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
732 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
733 }
734 }
735
736 BOOLEAN
CcRosFreeOneUnusedVacb(VOID)737 CcRosFreeOneUnusedVacb(
738 VOID)
739 {
740 KIRQL oldIrql;
741 PLIST_ENTRY current_entry;
742 PROS_VACB to_free = NULL;
743
744 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
745
746 /* Browse all the available VACB */
747 current_entry = VacbLruListHead.Flink;
748 while ((current_entry != &VacbLruListHead) && (to_free == NULL))
749 {
750 ULONG Refs;
751 PROS_VACB current;
752
753 current = CONTAINING_RECORD(current_entry,
754 ROS_VACB,
755 VacbLruListEntry);
756
757 KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
758
759 /* Only deal with unused VACB, we will free them */
760 Refs = CcRosVacbGetRefCount(current);
761 if (Refs < 2)
762 {
763 ASSERT(!current->Dirty);
764 ASSERT(!current->MappedCount);
765 ASSERT(Refs == 1);
766
767 /* Reset it, this is the one we want to free */
768 RemoveEntryList(¤t->CacheMapVacbListEntry);
769 InitializeListHead(¤t->CacheMapVacbListEntry);
770 RemoveEntryList(¤t->VacbLruListEntry);
771 InitializeListHead(¤t->VacbLruListEntry);
772
773 to_free = current;
774 }
775
776 KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
777
778 current_entry = current_entry->Flink;
779 }
780
781 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
782
783 /* And now, free the VACB that we found, if any. */
784 if (to_free == NULL)
785 {
786 return FALSE;
787 }
788
789 /* This must be its last ref */
790 NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0);
791
792 return TRUE;
793 }
794
795 static
796 NTSTATUS
CcRosCreateVacb(PROS_SHARED_CACHE_MAP SharedCacheMap,LONGLONG FileOffset,PROS_VACB * Vacb)797 CcRosCreateVacb (
798 PROS_SHARED_CACHE_MAP SharedCacheMap,
799 LONGLONG FileOffset,
800 PROS_VACB *Vacb)
801 {
802 PROS_VACB current;
803 PROS_VACB previous;
804 PLIST_ENTRY current_entry;
805 NTSTATUS Status;
806 KIRQL oldIrql;
807 ULONG Refs;
808 SIZE_T ViewSize = VACB_MAPPING_GRANULARITY;
809
810 ASSERT(SharedCacheMap);
811
812 DPRINT("CcRosCreateVacb()\n");
813
814 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
815 if (!current)
816 {
817 return STATUS_INSUFFICIENT_RESOURCES;
818 }
819 current->BaseAddress = NULL;
820 current->Dirty = FALSE;
821 current->PageOut = FALSE;
822 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
823 current->SharedCacheMap = SharedCacheMap;
824 current->MappedCount = 0;
825 current->ReferenceCount = 0;
826 InitializeListHead(¤t->CacheMapVacbListEntry);
827 InitializeListHead(¤t->DirtyVacbListEntry);
828 InitializeListHead(¤t->VacbLruListEntry);
829
830 CcRosVacbIncRefCount(current);
831
832 while (TRUE)
833 {
834 /* Map VACB in system space */
835 Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset, 0);
836 if (NT_SUCCESS(Status))
837 {
838 break;
839 }
840
841 /*
842 * If no space left, try to prune one unused VACB to recover space to map our VACB.
843 * If it succeeds, retry to map, otherwise just fail.
844 */
845 if (!CcRosFreeOneUnusedVacb())
846 {
847 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
848 return Status;
849 }
850 }
851
852 #if DBG
853 if (SharedCacheMap->Trace)
854 {
855 DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n",
856 SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress);
857 }
858 #endif
859
860 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
861
862 *Vacb = current;
863 /* There is window between the call to CcRosLookupVacb
864 * and CcRosCreateVacb. We must check if a VACB for the
865 * file offset exist. If there is a VACB, we release
866 * our newly created VACB and return the existing one.
867 */
868 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
869 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
870 previous = NULL;
871 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
872 {
873 current = CONTAINING_RECORD(current_entry,
874 ROS_VACB,
875 CacheMapVacbListEntry);
876 if (IsPointInRange(current->FileOffset.QuadPart,
877 VACB_MAPPING_GRANULARITY,
878 FileOffset))
879 {
880 CcRosVacbIncRefCount(current);
881 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
882 #if DBG
883 if (SharedCacheMap->Trace)
884 {
885 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
886 SharedCacheMap,
887 (*Vacb),
888 current);
889 }
890 #endif
891 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
892
893 Refs = CcRosVacbDecRefCount(*Vacb);
894 ASSERT(Refs == 0);
895
896 *Vacb = current;
897 return STATUS_SUCCESS;
898 }
899 if (current->FileOffset.QuadPart < FileOffset)
900 {
901 ASSERT(previous == NULL ||
902 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
903 previous = current;
904 }
905 if (current->FileOffset.QuadPart > FileOffset)
906 break;
907 current_entry = current_entry->Flink;
908 }
909 /* There was no existing VACB. */
910 current = *Vacb;
911 if (previous)
912 {
913 InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry);
914 }
915 else
916 {
917 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry);
918 }
919 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
920 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
921
922 /* Reference it to allow release */
923 CcRosVacbIncRefCount(current);
924
925 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
926
927 return Status;
928 }
929
930 BOOLEAN
CcRosEnsureVacbResident(_In_ PROS_VACB Vacb,_In_ BOOLEAN Wait,_In_ BOOLEAN NoRead,_In_ ULONG Offset,_In_ ULONG Length)931 CcRosEnsureVacbResident(
932 _In_ PROS_VACB Vacb,
933 _In_ BOOLEAN Wait,
934 _In_ BOOLEAN NoRead,
935 _In_ ULONG Offset,
936 _In_ ULONG Length
937 )
938 {
939 PVOID BaseAddress;
940
941 ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY);
942
943 #if 0
944 if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->SectionSize.QuadPart)
945 {
946 DPRINT1("Vacb read beyond the file size!\n");
947 return FALSE;
948 }
949 #endif
950
951 BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset);
952
953 /* Check if the pages are resident */
954 if (!MmArePagesResident(NULL, BaseAddress, Length))
955 {
956 if (!Wait)
957 {
958 return FALSE;
959 }
960
961 if (!NoRead)
962 {
963 PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
964 NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer,
965 Vacb->FileOffset.QuadPart + Offset,
966 Length,
967 &SharedCacheMap->ValidDataLength);
968 if (!NT_SUCCESS(Status))
969 ExRaiseStatus(Status);
970 }
971 }
972
973 return TRUE;
974 }
975
976
977 NTSTATUS
CcRosGetVacb(PROS_SHARED_CACHE_MAP SharedCacheMap,LONGLONG FileOffset,PROS_VACB * Vacb)978 CcRosGetVacb (
979 PROS_SHARED_CACHE_MAP SharedCacheMap,
980 LONGLONG FileOffset,
981 PROS_VACB *Vacb)
982 {
983 PROS_VACB current;
984 NTSTATUS Status;
985 ULONG Refs;
986 KIRQL OldIrql;
987
988 ASSERT(SharedCacheMap);
989
990 DPRINT("CcRosGetVacb()\n");
991
992 /*
993 * Look for a VACB already mapping the same data.
994 */
995 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
996 if (current == NULL)
997 {
998 /*
999 * Otherwise create a new VACB.
1000 */
1001 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t);
1002 if (!NT_SUCCESS(Status))
1003 {
1004 return Status;
1005 }
1006 }
1007
1008 Refs = CcRosVacbGetRefCount(current);
1009
1010 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1011
1012 /* Move to the tail of the LRU list */
1013 RemoveEntryList(¤t->VacbLruListEntry);
1014 InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
1015
1016 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1017
1018 /*
1019 * Return the VACB to the caller.
1020 */
1021 *Vacb = current;
1022
1023 ASSERT(Refs > 1);
1024
1025 return STATUS_SUCCESS;
1026 }
1027
1028 NTSTATUS
CcRosRequestVacb(PROS_SHARED_CACHE_MAP SharedCacheMap,LONGLONG FileOffset,PROS_VACB * Vacb)1029 CcRosRequestVacb (
1030 PROS_SHARED_CACHE_MAP SharedCacheMap,
1031 LONGLONG FileOffset,
1032 PROS_VACB *Vacb)
1033 /*
1034 * FUNCTION: Request a page mapping for a shared cache map
1035 */
1036 {
1037
1038 ASSERT(SharedCacheMap);
1039
1040 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
1041 {
1042 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
1043 FileOffset, VACB_MAPPING_GRANULARITY);
1044 KeBugCheck(CACHE_MANAGER);
1045 }
1046
1047 return CcRosGetVacb(SharedCacheMap,
1048 FileOffset,
1049 Vacb);
1050 }
1051
1052 NTSTATUS
CcRosInternalFreeVacb(PROS_VACB Vacb)1053 CcRosInternalFreeVacb (
1054 PROS_VACB Vacb)
1055 /*
1056 * FUNCTION: Releases a VACB associated with a shared cache map
1057 */
1058 {
1059 NTSTATUS Status;
1060
1061 DPRINT("Freeing VACB 0x%p\n", Vacb);
1062 #if DBG
1063 if (Vacb->SharedCacheMap->Trace)
1064 {
1065 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1066 }
1067 #endif
1068
1069 if (Vacb->ReferenceCount != 0)
1070 {
1071 DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
1072 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
1073 {
1074 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
1075 }
1076 }
1077
1078 ASSERT(Vacb->ReferenceCount == 0);
1079 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
1080 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
1081 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
1082
1083 /* Delete the mapping */
1084 Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress);
1085 if (!NT_SUCCESS(Status))
1086 {
1087 DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status);
1088 ASSERT(FALSE);
1089 /* Proceed with the deĺetion anyway */
1090 }
1091
1092 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
1093 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1094 return STATUS_SUCCESS;
1095 }
1096
1097 /*
1098 * @implemented
1099 */
1100 VOID
1101 NTAPI
CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,IN PLARGE_INTEGER FileOffset OPTIONAL,IN ULONG Length,OUT PIO_STATUS_BLOCK IoStatus)1102 CcFlushCache (
1103 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1104 IN PLARGE_INTEGER FileOffset OPTIONAL,
1105 IN ULONG Length,
1106 OUT PIO_STATUS_BLOCK IoStatus)
1107 {
1108 PROS_SHARED_CACHE_MAP SharedCacheMap;
1109 LONGLONG FlushStart, FlushEnd;
1110 NTSTATUS Status;
1111
1112 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n",
1113 SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length);
1114
1115 if (!SectionObjectPointers)
1116 {
1117 Status = STATUS_INVALID_PARAMETER;
1118 goto quit;
1119 }
1120
1121 if (!SectionObjectPointers->SharedCacheMap)
1122 {
1123 /* Forward this to Mm */
1124 MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus);
1125 return;
1126 }
1127
1128 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1129 ASSERT(SharedCacheMap);
1130 if (FileOffset)
1131 {
1132 FlushStart = FileOffset->QuadPart;
1133 Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd);
1134 if (!NT_SUCCESS(Status))
1135 goto quit;
1136 }
1137 else
1138 {
1139 FlushStart = 0;
1140 FlushEnd = SharedCacheMap->FileSize.QuadPart;
1141 }
1142
1143 Status = STATUS_SUCCESS;
1144 if (IoStatus)
1145 {
1146 IoStatus->Information = 0;
1147 }
1148
1149 KeAcquireGuardedMutex(&SharedCacheMap->FlushCacheLock);
1150
1151 /*
1152 * We flush the VACBs that we find here.
1153 * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure.
1154 * This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data
1155 */
1156 while (FlushStart < FlushEnd)
1157 {
1158 BOOLEAN DirtyVacb = FALSE;
1159 PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart);
1160
1161 if (vacb != NULL)
1162 {
1163 if (vacb->Dirty)
1164 {
1165 IO_STATUS_BLOCK VacbIosb = { 0 };
1166 Status = CcRosFlushVacb(vacb, &VacbIosb);
1167 if (!NT_SUCCESS(Status))
1168 {
1169 CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE);
1170 break;
1171 }
1172 DirtyVacb = TRUE;
1173
1174 if (IoStatus)
1175 IoStatus->Information += VacbIosb.Information;
1176 }
1177
1178 CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE);
1179 }
1180
1181 if (!DirtyVacb)
1182 {
1183 IO_STATUS_BLOCK MmIosb;
1184 LARGE_INTEGER MmOffset;
1185
1186 MmOffset.QuadPart = FlushStart;
1187
1188 if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart)
1189 {
1190 /* The whole range fits within a VACB chunk. */
1191 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb);
1192 }
1193 else
1194 {
1195 ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY);
1196 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb);
1197 }
1198
1199 if (!NT_SUCCESS(Status))
1200 break;
1201
1202 if (IoStatus)
1203 IoStatus->Information += MmIosb.Information;
1204
1205 /* Update VDL */
1206 if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd)
1207 SharedCacheMap->ValidDataLength.QuadPart = FlushEnd;
1208 }
1209
1210 if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart)))
1211 {
1212 /* We're at the end of file ! */
1213 break;
1214 }
1215
1216 /* Round down to next VACB start now */
1217 FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY;
1218 }
1219
1220 KeReleaseGuardedMutex(&SharedCacheMap->FlushCacheLock);
1221
1222 quit:
1223 if (IoStatus)
1224 {
1225 IoStatus->Status = Status;
1226 }
1227 }
1228
1229 NTSTATUS
CcRosReleaseFileCache(PFILE_OBJECT FileObject)1230 CcRosReleaseFileCache (
1231 PFILE_OBJECT FileObject)
1232 /*
1233 * FUNCTION: Called by the file system when a handle to a file object
1234 * has been closed.
1235 */
1236 {
1237 KIRQL OldIrql;
1238 PPRIVATE_CACHE_MAP PrivateMap;
1239 PROS_SHARED_CACHE_MAP SharedCacheMap;
1240
1241 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1242
1243 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1244 {
1245 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1246
1247 /* Closing the handle, so kill the private cache map
1248 * Before you event try to remove it from FO, always
1249 * lock the master lock, to be sure not to race
1250 * with a potential read ahead ongoing!
1251 */
1252 PrivateMap = FileObject->PrivateCacheMap;
1253 FileObject->PrivateCacheMap = NULL;
1254
1255 if (PrivateMap != NULL)
1256 {
1257 /* Remove it from the file */
1258 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1259 RemoveEntryList(&PrivateMap->PrivateLinks);
1260 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1261
1262 /* And free it. */
1263 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1264 {
1265 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1266 }
1267 else
1268 {
1269 PrivateMap->NodeTypeCode = 0;
1270 }
1271
1272 ASSERT(SharedCacheMap->OpenCount > 0);
1273
1274 SharedCacheMap->OpenCount--;
1275 if (SharedCacheMap->OpenCount == 0)
1276 {
1277 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1278 }
1279 }
1280 }
1281 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1282 return STATUS_SUCCESS;
1283 }
1284
1285 NTSTATUS
CcRosInitializeFileCache(PFILE_OBJECT FileObject,PCC_FILE_SIZES FileSizes,BOOLEAN PinAccess,PCACHE_MANAGER_CALLBACKS CallBacks,PVOID LazyWriterContext)1286 CcRosInitializeFileCache (
1287 PFILE_OBJECT FileObject,
1288 PCC_FILE_SIZES FileSizes,
1289 BOOLEAN PinAccess,
1290 PCACHE_MANAGER_CALLBACKS CallBacks,
1291 PVOID LazyWriterContext)
1292 /*
1293 * FUNCTION: Initializes a shared cache map for a file object
1294 */
1295 {
1296 KIRQL OldIrql;
1297 BOOLEAN Allocated;
1298 PROS_SHARED_CACHE_MAP SharedCacheMap;
1299
1300 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
1301
1302 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1303
1304 Allocated = FALSE;
1305 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1306 if (SharedCacheMap == NULL)
1307 {
1308 Allocated = TRUE;
1309 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1310 if (SharedCacheMap == NULL)
1311 {
1312 return STATUS_INSUFFICIENT_RESOURCES;
1313 }
1314 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1315 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1316 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1317 SharedCacheMap->FileObject = FileObject;
1318 SharedCacheMap->Callbacks = CallBacks;
1319 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1320 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1321 SharedCacheMap->FileSize = FileSizes->FileSize;
1322 SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength;
1323 SharedCacheMap->PinAccess = PinAccess;
1324 SharedCacheMap->DirtyPageThreshold = 0;
1325 SharedCacheMap->DirtyPages = 0;
1326 InitializeListHead(&SharedCacheMap->PrivateList);
1327 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1328 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1329 InitializeListHead(&SharedCacheMap->BcbList);
1330 KeInitializeGuardedMutex(&SharedCacheMap->FlushCacheLock);
1331
1332 SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION;
1333
1334 ObReferenceObjectByPointer(FileObject,
1335 FILE_ALL_ACCESS,
1336 NULL,
1337 KernelMode);
1338
1339 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1340
1341 //CcRosTraceCacheMap(SharedCacheMap, TRUE);
1342 }
1343 else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION)
1344 {
1345 /* The shared cache map is being created somewhere else. Wait for that to happen */
1346 KEVENT Waiter;
1347 PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent;
1348
1349 KeInitializeEvent(&Waiter, NotificationEvent, FALSE);
1350 SharedCacheMap->CreateEvent = &Waiter;
1351
1352 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1353
1354 KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL);
1355
1356 if (PreviousWaiter)
1357 KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE);
1358
1359 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1360 }
1361
1362 if (FileObject->PrivateCacheMap == NULL)
1363 {
1364 PPRIVATE_CACHE_MAP PrivateMap;
1365
1366 /* Allocate the private cache map for this handle */
1367 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1368 {
1369 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1370 }
1371 else
1372 {
1373 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1374 }
1375
1376 if (PrivateMap == NULL)
1377 {
1378 /* If we also allocated the shared cache map for this file, kill it */
1379 if (Allocated)
1380 {
1381 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1382
1383 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1384 ObDereferenceObject(FileObject);
1385 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1386 }
1387
1388 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1389 return STATUS_INSUFFICIENT_RESOURCES;
1390 }
1391
1392 /* Initialize it */
1393 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1394 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1395 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1396 PrivateMap->FileObject = FileObject;
1397 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1398
1399 /* Link it to the file */
1400 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1401 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1402 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1403
1404 FileObject->PrivateCacheMap = PrivateMap;
1405 SharedCacheMap->OpenCount++;
1406 }
1407
1408 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1409
1410 /* Create the section */
1411 if (Allocated)
1412 {
1413 NTSTATUS Status;
1414
1415 ASSERT(SharedCacheMap->Section == NULL);
1416
1417 Status = MmCreateSection(
1418 &SharedCacheMap->Section,
1419 SECTION_ALL_ACCESS,
1420 NULL,
1421 &SharedCacheMap->SectionSize,
1422 PAGE_READWRITE,
1423 SEC_RESERVE,
1424 NULL,
1425 FileObject);
1426
1427 ASSERT(NT_SUCCESS(Status));
1428
1429 if (!NT_SUCCESS(Status))
1430 {
1431 CcRosReleaseFileCache(FileObject);
1432 return Status;
1433 }
1434
1435 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1436
1437 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1438 SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION;
1439
1440 if (SharedCacheMap->CreateEvent)
1441 {
1442 KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE);
1443 SharedCacheMap->CreateEvent = NULL;
1444 }
1445
1446 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1447 }
1448
1449 return STATUS_SUCCESS;
1450 }
1451
1452 /*
1453 * @implemented
1454 */
1455 PFILE_OBJECT
1456 NTAPI
CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)1457 CcGetFileObjectFromSectionPtrs (
1458 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1459 {
1460 PROS_SHARED_CACHE_MAP SharedCacheMap;
1461
1462 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1463
1464 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1465 {
1466 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1467 ASSERT(SharedCacheMap);
1468 return SharedCacheMap->FileObject;
1469 }
1470 return NULL;
1471 }
1472
1473 CODE_SEG("INIT")
1474 VOID
1475 NTAPI
CcInitView(VOID)1476 CcInitView (
1477 VOID)
1478 {
1479 DPRINT("CcInitView()\n");
1480
1481 InitializeListHead(&DirtyVacbListHead);
1482 InitializeListHead(&VacbLruListHead);
1483 InitializeListHead(&CcDeferredWrites);
1484 InitializeListHead(&CcCleanSharedCacheMapList);
1485 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1486 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1487 NULL,
1488 NULL,
1489 0,
1490 sizeof(INTERNAL_BCB),
1491 TAG_BCB,
1492 20);
1493 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1494 NULL,
1495 NULL,
1496 0,
1497 sizeof(ROS_SHARED_CACHE_MAP),
1498 TAG_SHARED_CACHE_MAP,
1499 20);
1500 ExInitializeNPagedLookasideList(&VacbLookasideList,
1501 NULL,
1502 NULL,
1503 0,
1504 sizeof(ROS_VACB),
1505 TAG_VACB,
1506 20);
1507
1508 CcInitCacheZeroPage();
1509 }
1510
1511 #if DBG && defined(KDBG)
1512
1513 #include <kdbg/kdb.h>
1514
1515 BOOLEAN
ExpKdbgExtFileCache(ULONG Argc,PCHAR Argv[])1516 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1517 {
1518 PLIST_ENTRY ListEntry;
1519 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1520
1521 KdbpPrint(" Usage Summary (in kb)\n");
1522 KdbpPrint("Shared\t\tMapped\tDirty\tName\n");
1523 /* No need to lock the spin lock here, we're in DBG */
1524 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1525 ListEntry != &CcCleanSharedCacheMapList;
1526 ListEntry = ListEntry->Flink)
1527 {
1528 PLIST_ENTRY Vacbs;
1529 ULONG Mapped = 0, Dirty = 0;
1530 PROS_SHARED_CACHE_MAP SharedCacheMap;
1531 PUNICODE_STRING FileName;
1532 PWSTR Extra = L"";
1533
1534 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1535
1536 /* Dirty size */
1537 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1538
1539 /* First, count for all the associated VACB */
1540 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1541 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1542 Vacbs = Vacbs->Flink)
1543 {
1544 Mapped += VACB_MAPPING_GRANULARITY / 1024;
1545 }
1546
1547 /* Setup name */
1548 if (SharedCacheMap->FileObject != NULL &&
1549 SharedCacheMap->FileObject->FileName.Length != 0)
1550 {
1551 FileName = &SharedCacheMap->FileObject->FileName;
1552 }
1553 else if (SharedCacheMap->FileObject != NULL &&
1554 SharedCacheMap->FileObject->FsContext != NULL &&
1555 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1556 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1557 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1558 {
1559 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1560 Extra = L" (FastFAT)";
1561 }
1562 else
1563 {
1564 FileName = &NoName;
1565 }
1566
1567 /* And print */
1568 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra);
1569 }
1570
1571 return TRUE;
1572 }
1573
1574 BOOLEAN
ExpKdbgExtDefWrites(ULONG Argc,PCHAR Argv[])1575 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1576 {
1577 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1578 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1579 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1580 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1581 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1582 (MmAvailablePages * PAGE_SIZE) / 1024);
1583 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1584 (MmThrottleTop * PAGE_SIZE) / 1024);
1585 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1586 (MmThrottleBottom * PAGE_SIZE) / 1024);
1587 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1588 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1589
1590 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1591 {
1592 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1593 }
1594 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1595 {
1596 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1597 }
1598 else
1599 {
1600 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1601 }
1602
1603 return TRUE;
1604 }
1605
1606 #endif // DBG && defined(KDBG)
1607
1608 /* EOF */
1609