xref: /reactos/ntoskrnl/cc/view.c (revision 9cfd8dd9)
1 /*
2  * COPYRIGHT:       See COPYING in the top level directory
3  * PROJECT:         ReactOS kernel
4  * FILE:            ntoskrnl/cc/view.c
5  * PURPOSE:         Cache manager
6  *
7  * PROGRAMMERS:     David Welch (welch@mcmail.com)
8  *                  Pierre Schweitzer (pierre@reactos.org)
9  */
10 
11 /* NOTES **********************************************************************
12  *
13  * This is not the NT implementation of a file cache nor anything much like
14  * it.
15  *
16  * The general procedure for a filesystem to implement a read or write
17  * dispatch routine is as follows
18  *
19  * (1) If caching for the FCB hasn't been initiated then so do by calling
20  * CcInitializeFileCache.
21  *
22  * (2) For each 4k region which is being read or written obtain a cache page
23  * by calling CcRequestCachePage.
24  *
25  * (3) If either the page is being read or not completely written, and it is
26  * not up to date then read its data from the underlying medium. If the read
27  * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28  *
29  * (4) Copy the data into or out of the page as necessary.
30  *
31  * (5) Release the cache page
32  */
33 /* INCLUDES ******************************************************************/
34 
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38 
39 /* GLOBALS *******************************************************************/
40 
41 LIST_ENTRY DirtyVacbListHead;
42 static LIST_ENTRY VacbLruListHead;
43 
44 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
45 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
46 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
47 
48 /* Internal vars (MS):
49  * - Threshold above which lazy writer will start action
50  * - Amount of dirty pages
51  * - List for deferred writes
52  * - Spinlock when dealing with the deferred list
53  * - List for "clean" shared cache maps
54  */
55 ULONG CcDirtyPageThreshold = 0;
56 ULONG CcTotalDirtyPages = 0;
57 LIST_ENTRY CcDeferredWrites;
58 KSPIN_LOCK CcDeferredWriteSpinLock;
59 LIST_ENTRY CcCleanSharedCacheMapList;
60 
61 #if DBG
62 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
63 {
64     ULONG Refs;
65 
66     Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
67     if (vacb->SharedCacheMap->Trace)
68     {
69         DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
70                  file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
71     }
72 
73     return Refs;
74 }
75 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
76 {
77     ULONG Refs;
78     BOOLEAN VacbDirty = vacb->Dirty;
79     BOOLEAN VacbTrace = vacb->SharedCacheMap->Trace;
80     BOOLEAN VacbPageOut = vacb->PageOut;
81 
82     Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
83     ASSERT(!(Refs == 0 && VacbDirty));
84     if (VacbTrace)
85     {
86         DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
87                  file, line, vacb, Refs, VacbDirty, VacbPageOut);
88     }
89 
90     if (Refs == 0)
91     {
92         CcRosInternalFreeVacb(vacb);
93     }
94 
95     return Refs;
96 }
97 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
98 {
99     ULONG Refs;
100 
101     Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
102     if (vacb->SharedCacheMap->Trace)
103     {
104         DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
105                  file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
106     }
107 
108     return Refs;
109 }
110 #endif
111 
112 
113 /* FUNCTIONS *****************************************************************/
114 
115 VOID
116 CcRosTraceCacheMap (
117     PROS_SHARED_CACHE_MAP SharedCacheMap,
118     BOOLEAN Trace )
119 {
120 #if DBG
121     KIRQL oldirql;
122     PLIST_ENTRY current_entry;
123     PROS_VACB current;
124 
125     if (!SharedCacheMap)
126         return;
127 
128     SharedCacheMap->Trace = Trace;
129 
130     if (Trace)
131     {
132         DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
133 
134         oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
135         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
136 
137         current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
138         while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
139         {
140             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
141             current_entry = current_entry->Flink;
142 
143             DPRINT1("  VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n",
144                     current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart);
145         }
146 
147         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
148         KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
149     }
150     else
151     {
152         DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
153     }
154 
155 #else
156     UNREFERENCED_PARAMETER(SharedCacheMap);
157     UNREFERENCED_PARAMETER(Trace);
158 #endif
159 }
160 
161 NTSTATUS
162 CcRosFlushVacb (
163     _In_ PROS_VACB Vacb,
164     _Out_opt_ PIO_STATUS_BLOCK Iosb)
165 {
166     NTSTATUS Status;
167     BOOLEAN HaveLock = FALSE;
168     PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
169 
170     CcRosUnmarkDirtyVacb(Vacb, TRUE);
171 
172     /* Lock for flush, if we are not already the top-level */
173     if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP)
174     {
175         Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject);
176         if (!NT_SUCCESS(Status))
177             goto quit;
178         HaveLock = TRUE;
179     }
180 
181     Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer,
182                             &Vacb->FileOffset,
183                             VACB_MAPPING_GRANULARITY,
184                             Iosb);
185 
186     if (HaveLock)
187     {
188         FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject);
189     }
190 
191 quit:
192     if (!NT_SUCCESS(Status))
193         CcRosMarkDirtyVacb(Vacb);
194     else
195     {
196         /* Update VDL */
197         if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY))
198         {
199             SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY;
200         }
201     }
202 
203     return Status;
204 }
205 
206 static
207 NTSTATUS
208 CcRosDeleteFileCache (
209     PFILE_OBJECT FileObject,
210     PROS_SHARED_CACHE_MAP SharedCacheMap,
211     PKIRQL OldIrql)
212 /*
213  * FUNCTION: Releases the shared cache map associated with a file object
214  */
215 {
216     PLIST_ENTRY current_entry;
217 
218     ASSERT(SharedCacheMap);
219     ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap);
220     ASSERT(SharedCacheMap->OpenCount == 0);
221 
222     /* Remove all VACBs from the global lists */
223     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
224     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
225     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
226     {
227         PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
228 
229         RemoveEntryList(&Vacb->VacbLruListEntry);
230         InitializeListHead(&Vacb->VacbLruListEntry);
231 
232         if (Vacb->Dirty)
233         {
234             CcRosUnmarkDirtyVacb(Vacb, FALSE);
235             /* Mark it as dirty again so we know that we have to flush before freeing it */
236             Vacb->Dirty = TRUE;
237         }
238 
239         current_entry = current_entry->Flink;
240     }
241 
242     /* Make sure there is no trace anymore of this map */
243     FileObject->SectionObjectPointer->SharedCacheMap = NULL;
244     RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
245 
246     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
247     KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
248 
249     /* Now that we're out of the locks, free everything for real */
250     while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
251     {
252         PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry);
253         ULONG RefCount;
254 
255         InitializeListHead(&Vacb->CacheMapVacbListEntry);
256 
257         /* Flush to disk, if needed */
258         if (Vacb->Dirty)
259         {
260             IO_STATUS_BLOCK Iosb;
261             NTSTATUS Status;
262 
263             Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb);
264             if (!NT_SUCCESS(Status))
265             {
266                 /* Complain. There's not much we can do */
267                 DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status);
268             }
269             Vacb->Dirty = FALSE;
270         }
271 
272         RefCount = CcRosVacbDecRefCount(Vacb);
273 #if DBG // CORE-14578
274         if (RefCount != 0)
275         {
276             DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart);
277             DPRINT1("There are: %d references left\n", RefCount);
278             DPRINT1("Map: %d\n", Vacb->MappedCount);
279             DPRINT1("Dirty: %d\n", Vacb->Dirty);
280             if (FileObject->FileName.Length != 0)
281             {
282                 DPRINT1("File was: %wZ\n", &FileObject->FileName);
283             }
284             else
285             {
286                 DPRINT1("No name for the file\n");
287             }
288         }
289 #else
290         (void)RefCount;
291 #endif
292     }
293 
294     /* Release the references we own */
295     if(SharedCacheMap->Section)
296         ObDereferenceObject(SharedCacheMap->Section);
297     ObDereferenceObject(SharedCacheMap->FileObject);
298 
299     ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
300 
301     /* Acquire the lock again for our caller */
302     *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
303 
304     return STATUS_SUCCESS;
305 }
306 
307 NTSTATUS
308 CcRosFlushDirtyPages (
309     ULONG Target,
310     PULONG Count,
311     BOOLEAN Wait,
312     BOOLEAN CalledFromLazy)
313 {
314     PLIST_ENTRY current_entry;
315     NTSTATUS Status;
316     KIRQL OldIrql;
317     BOOLEAN FlushAll = (Target == MAXULONG);
318 
319     DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
320 
321     (*Count) = 0;
322 
323     KeEnterCriticalRegion();
324     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
325 
326     current_entry = DirtyVacbListHead.Flink;
327     if (current_entry == &DirtyVacbListHead)
328     {
329         DPRINT("No Dirty pages\n");
330     }
331 
332     while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll)
333     {
334         PROS_SHARED_CACHE_MAP SharedCacheMap;
335         PROS_VACB current;
336         BOOLEAN Locked;
337 
338         if (current_entry == &DirtyVacbListHead)
339         {
340             ASSERT(FlushAll);
341             if (IsListEmpty(&DirtyVacbListHead))
342                 break;
343             current_entry = DirtyVacbListHead.Flink;
344         }
345 
346         current = CONTAINING_RECORD(current_entry,
347                                     ROS_VACB,
348                                     DirtyVacbListEntry);
349         current_entry = current_entry->Flink;
350 
351         CcRosVacbIncRefCount(current);
352 
353         SharedCacheMap = current->SharedCacheMap;
354 
355         /* When performing lazy write, don't handle temporary files */
356         if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
357         {
358             CcRosVacbDecRefCount(current);
359             continue;
360         }
361 
362         /* Don't attempt to lazy write the files that asked not to */
363         if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED))
364         {
365             CcRosVacbDecRefCount(current);
366             continue;
367         }
368 
369         ASSERT(current->Dirty);
370 
371         /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */
372         if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE)
373         {
374             CcRosVacbDecRefCount(current);
375             continue;
376         }
377 
378         SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE;
379 
380         /* Keep a ref on the shared cache map */
381         SharedCacheMap->OpenCount++;
382 
383         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
384 
385         Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait);
386         if (!Locked)
387         {
388             DPRINT("Not locked!");
389             ASSERT(!Wait);
390             CcRosVacbDecRefCount(current);
391             OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
392             SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
393 
394             if (--SharedCacheMap->OpenCount == 0)
395                 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
396 
397             continue;
398         }
399 
400         IO_STATUS_BLOCK Iosb;
401         Status = CcRosFlushVacb(current, &Iosb);
402 
403         SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext);
404 
405         /* We release the VACB before acquiring the lock again, because
406          * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a
407          * Refcount. Freeing must be done outside of the lock.
408          * The refcount is decremented atomically. So this is OK. */
409         CcRosVacbDecRefCount(current);
410         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
411 
412         SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
413 
414         if (--SharedCacheMap->OpenCount == 0)
415             CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
416 
417         if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
418             (Status != STATUS_MEDIA_WRITE_PROTECTED))
419         {
420             DPRINT1("CC: Failed to flush VACB.\n");
421         }
422         else
423         {
424             ULONG PagesFreed;
425 
426             /* How many pages did we free? */
427             PagesFreed = Iosb.Information / PAGE_SIZE;
428             (*Count) += PagesFreed;
429 
430             if (!Wait)
431             {
432                 /* Make sure we don't overflow target! */
433                 if (Target < PagesFreed)
434                 {
435                     /* If we would have, jump to zero directly */
436                     Target = 0;
437                 }
438                 else
439                 {
440                     Target -= PagesFreed;
441                 }
442             }
443         }
444 
445         current_entry = DirtyVacbListHead.Flink;
446     }
447 
448     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
449     KeLeaveCriticalRegion();
450 
451     DPRINT("CcRosFlushDirtyPages() finished\n");
452     return STATUS_SUCCESS;
453 }
454 
455 VOID
456 CcRosTrimCache(
457     _In_ ULONG Target,
458     _Out_ PULONG NrFreed)
459 /*
460  * FUNCTION: Try to free some memory from the file cache.
461  * ARGUMENTS:
462  *       Target - The number of pages to be freed.
463  *       NrFreed - Points to a variable where the number of pages
464  *                 actually freed is returned.
465  */
466 {
467     PLIST_ENTRY current_entry;
468     PROS_VACB current;
469     ULONG PagesFreed;
470     KIRQL oldIrql;
471     LIST_ENTRY FreeList;
472     PFN_NUMBER Page;
473     ULONG i;
474     BOOLEAN FlushedPages = FALSE;
475 
476     DPRINT("CcRosTrimCache(Target %lu)\n", Target);
477 
478     InitializeListHead(&FreeList);
479 
480     *NrFreed = 0;
481 
482 retry:
483     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
484 
485     current_entry = VacbLruListHead.Flink;
486     while (current_entry != &VacbLruListHead)
487     {
488         ULONG Refs;
489 
490         current = CONTAINING_RECORD(current_entry,
491                                     ROS_VACB,
492                                     VacbLruListEntry);
493         current_entry = current_entry->Flink;
494 
495         KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
496 
497         /* Reference the VACB */
498         CcRosVacbIncRefCount(current);
499 
500         /* Check if it's mapped and not dirty */
501         if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
502         {
503             /* Page out the VACB */
504             for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
505             {
506                 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
507 
508                 MmPageOutPhysicalAddress(Page);
509             }
510         }
511 
512         /* Dereference the VACB */
513         Refs = CcRosVacbDecRefCount(current);
514 
515         /* Check if we can free this entry now */
516         if (Refs < 2)
517         {
518             ASSERT(!current->Dirty);
519             ASSERT(!current->MappedCount);
520             ASSERT(Refs == 1);
521 
522             RemoveEntryList(&current->CacheMapVacbListEntry);
523             RemoveEntryList(&current->VacbLruListEntry);
524             InitializeListHead(&current->VacbLruListEntry);
525             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
526 
527             /* Calculate how many pages we freed for Mm */
528             PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
529             Target -= PagesFreed;
530             (*NrFreed) += PagesFreed;
531         }
532 
533         KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
534     }
535 
536     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
537 
538     /* Try flushing pages if we haven't met our target */
539     if ((Target > 0) && !FlushedPages)
540     {
541         /* Flush dirty pages to disk */
542         CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
543         FlushedPages = TRUE;
544 
545         /* We can only swap as many pages as we flushed */
546         if (PagesFreed < Target) Target = PagesFreed;
547 
548         /* Check if we flushed anything */
549         if (PagesFreed != 0)
550         {
551             /* Try again after flushing dirty pages */
552             DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
553             goto retry;
554         }
555     }
556 
557     while (!IsListEmpty(&FreeList))
558     {
559         ULONG Refs;
560 
561         current_entry = RemoveHeadList(&FreeList);
562         current = CONTAINING_RECORD(current_entry,
563                                     ROS_VACB,
564                                     CacheMapVacbListEntry);
565         InitializeListHead(&current->CacheMapVacbListEntry);
566         Refs = CcRosVacbDecRefCount(current);
567         ASSERT(Refs == 0);
568     }
569 
570     DPRINT("Evicted %lu cache pages\n", (*NrFreed));
571 }
572 
573 NTSTATUS
574 CcRosReleaseVacb (
575     PROS_SHARED_CACHE_MAP SharedCacheMap,
576     PROS_VACB Vacb,
577     BOOLEAN Dirty,
578     BOOLEAN Mapped)
579 {
580     ULONG Refs;
581     ASSERT(SharedCacheMap);
582 
583     DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb);
584 
585     if (Dirty && !Vacb->Dirty)
586     {
587         CcRosMarkDirtyVacb(Vacb);
588     }
589 
590     if (Mapped)
591     {
592         if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
593         {
594             CcRosVacbIncRefCount(Vacb);
595         }
596     }
597 
598     Refs = CcRosVacbDecRefCount(Vacb);
599     ASSERT(Refs > 0);
600 
601     return STATUS_SUCCESS;
602 }
603 
604 /* Returns with VACB Lock Held! */
605 PROS_VACB
606 CcRosLookupVacb (
607     PROS_SHARED_CACHE_MAP SharedCacheMap,
608     LONGLONG FileOffset)
609 {
610     PLIST_ENTRY current_entry;
611     PROS_VACB current;
612     KIRQL oldIrql;
613 
614     ASSERT(SharedCacheMap);
615 
616     DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
617            SharedCacheMap, FileOffset);
618 
619     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
620     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
621 
622     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
623     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
624     {
625         current = CONTAINING_RECORD(current_entry,
626                                     ROS_VACB,
627                                     CacheMapVacbListEntry);
628         if (IsPointInRange(current->FileOffset.QuadPart,
629                            VACB_MAPPING_GRANULARITY,
630                            FileOffset))
631         {
632             CcRosVacbIncRefCount(current);
633             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
634             KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
635             return current;
636         }
637         if (current->FileOffset.QuadPart > FileOffset)
638             break;
639         current_entry = current_entry->Flink;
640     }
641 
642     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
643     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
644 
645     return NULL;
646 }
647 
648 VOID
649 CcRosMarkDirtyVacb (
650     PROS_VACB Vacb)
651 {
652     KIRQL oldIrql;
653     PROS_SHARED_CACHE_MAP SharedCacheMap;
654 
655     SharedCacheMap = Vacb->SharedCacheMap;
656 
657     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
658     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
659 
660     ASSERT(!Vacb->Dirty);
661 
662     InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
663     /* FIXME: There is no reason to account for the whole VACB. */
664     CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
665     Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
666     CcRosVacbIncRefCount(Vacb);
667 
668     /* Move to the tail of the LRU list */
669     RemoveEntryList(&Vacb->VacbLruListEntry);
670     InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
671 
672     Vacb->Dirty = TRUE;
673 
674     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
675 
676     /* Schedule a lazy writer run to now that we have dirty VACB */
677     if (!LazyWriter.ScanActive)
678     {
679         CcScheduleLazyWriteScan(FALSE);
680     }
681     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
682 }
683 
684 VOID
685 CcRosUnmarkDirtyVacb (
686     PROS_VACB Vacb,
687     BOOLEAN LockViews)
688 {
689     KIRQL oldIrql;
690     PROS_SHARED_CACHE_MAP SharedCacheMap;
691 
692     SharedCacheMap = Vacb->SharedCacheMap;
693 
694     if (LockViews)
695     {
696         oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
697         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
698     }
699 
700     ASSERT(Vacb->Dirty);
701 
702     Vacb->Dirty = FALSE;
703 
704     RemoveEntryList(&Vacb->DirtyVacbListEntry);
705     InitializeListHead(&Vacb->DirtyVacbListEntry);
706 
707     CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
708     Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
709 
710     CcRosVacbDecRefCount(Vacb);
711 
712     if (LockViews)
713     {
714         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
715         KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
716     }
717 }
718 
719 BOOLEAN
720 CcRosFreeOneUnusedVacb(
721     VOID)
722 {
723     KIRQL oldIrql;
724     PLIST_ENTRY current_entry;
725     PROS_VACB to_free = NULL;
726 
727     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
728 
729     /* Browse all the available VACB */
730     current_entry = VacbLruListHead.Flink;
731     while ((current_entry != &VacbLruListHead) && (to_free == NULL))
732     {
733         ULONG Refs;
734         PROS_VACB current;
735 
736         current = CONTAINING_RECORD(current_entry,
737                                     ROS_VACB,
738                                     VacbLruListEntry);
739 
740         KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
741 
742         /* Only deal with unused VACB, we will free them */
743         Refs = CcRosVacbGetRefCount(current);
744         if (Refs < 2)
745         {
746             ASSERT(!current->Dirty);
747             ASSERT(!current->MappedCount);
748             ASSERT(Refs == 1);
749 
750             /* Reset it, this is the one we want to free */
751             RemoveEntryList(&current->CacheMapVacbListEntry);
752             InitializeListHead(&current->CacheMapVacbListEntry);
753             RemoveEntryList(&current->VacbLruListEntry);
754             InitializeListHead(&current->VacbLruListEntry);
755 
756             to_free = current;
757         }
758 
759         KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
760 
761         current_entry = current_entry->Flink;
762     }
763 
764     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
765 
766     /* And now, free the VACB that we found, if any. */
767     if (to_free == NULL)
768     {
769         return FALSE;
770     }
771 
772     /* This must be its last ref */
773     NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0);
774 
775     return TRUE;
776 }
777 
778 static
779 NTSTATUS
780 CcRosCreateVacb (
781     PROS_SHARED_CACHE_MAP SharedCacheMap,
782     LONGLONG FileOffset,
783     PROS_VACB *Vacb)
784 {
785     PROS_VACB current;
786     PROS_VACB previous;
787     PLIST_ENTRY current_entry;
788     NTSTATUS Status;
789     KIRQL oldIrql;
790     ULONG Refs;
791     SIZE_T ViewSize = VACB_MAPPING_GRANULARITY;
792 
793     ASSERT(SharedCacheMap);
794 
795     DPRINT("CcRosCreateVacb()\n");
796 
797     current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
798     current->BaseAddress = NULL;
799     current->Dirty = FALSE;
800     current->PageOut = FALSE;
801     current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
802     current->SharedCacheMap = SharedCacheMap;
803     current->MappedCount = 0;
804     current->ReferenceCount = 0;
805     InitializeListHead(&current->CacheMapVacbListEntry);
806     InitializeListHead(&current->DirtyVacbListEntry);
807     InitializeListHead(&current->VacbLruListEntry);
808 
809     CcRosVacbIncRefCount(current);
810 
811     while (TRUE)
812     {
813         /* Map VACB in system space */
814         Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, &current->BaseAddress, &ViewSize, &current->FileOffset, 0);
815         if (NT_SUCCESS(Status))
816         {
817             break;
818         }
819 
820         /*
821          * If no space left, try to prune one unused VACB to recover space to map our VACB.
822          * If it succeeds, retry to map, otherwise just fail.
823          */
824         if (!CcRosFreeOneUnusedVacb())
825         {
826             ExFreeToNPagedLookasideList(&VacbLookasideList, current);
827             return Status;
828         }
829     }
830 
831 #if DBG
832     if (SharedCacheMap->Trace)
833     {
834         DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n",
835                 SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress);
836     }
837 #endif
838 
839     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
840 
841     *Vacb = current;
842     /* There is window between the call to CcRosLookupVacb
843      * and CcRosCreateVacb. We must check if a VACB for the
844      * file offset exist. If there is a VACB, we release
845      * our newly created VACB and return the existing one.
846      */
847     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
848     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
849     previous = NULL;
850     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
851     {
852         current = CONTAINING_RECORD(current_entry,
853                                     ROS_VACB,
854                                     CacheMapVacbListEntry);
855         if (IsPointInRange(current->FileOffset.QuadPart,
856                            VACB_MAPPING_GRANULARITY,
857                            FileOffset))
858         {
859             CcRosVacbIncRefCount(current);
860             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
861 #if DBG
862             if (SharedCacheMap->Trace)
863             {
864                 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
865                         SharedCacheMap,
866                         (*Vacb),
867                         current);
868             }
869 #endif
870             KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
871 
872             Refs = CcRosVacbDecRefCount(*Vacb);
873             ASSERT(Refs == 0);
874 
875             *Vacb = current;
876             return STATUS_SUCCESS;
877         }
878         if (current->FileOffset.QuadPart < FileOffset)
879         {
880             ASSERT(previous == NULL ||
881                    previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
882             previous = current;
883         }
884         if (current->FileOffset.QuadPart > FileOffset)
885             break;
886         current_entry = current_entry->Flink;
887     }
888     /* There was no existing VACB. */
889     current = *Vacb;
890     if (previous)
891     {
892         InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
893     }
894     else
895     {
896         InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
897     }
898     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
899     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
900 
901     /* Reference it to allow release */
902     CcRosVacbIncRefCount(current);
903 
904     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
905 
906     return Status;
907 }
908 
909 BOOLEAN
910 CcRosEnsureVacbResident(
911     _In_ PROS_VACB Vacb,
912     _In_ BOOLEAN Wait,
913     _In_ BOOLEAN NoRead,
914     _In_ ULONG Offset,
915     _In_ ULONG Length
916 )
917 {
918     PVOID BaseAddress;
919 
920     ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY);
921 
922 #if 0
923     if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->SectionSize.QuadPart)
924     {
925         DPRINT1("Vacb read beyond the file size!\n");
926         return FALSE;
927     }
928 #endif
929 
930     BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset);
931 
932     /* Check if the pages are resident */
933     if (!MmArePagesResident(NULL, BaseAddress, Length))
934     {
935         if (!Wait)
936         {
937             return FALSE;
938         }
939 
940         if (!NoRead)
941         {
942             PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
943             NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer,
944                                                         Vacb->FileOffset.QuadPart + Offset,
945                                                         Length,
946                                                         &SharedCacheMap->ValidDataLength);
947             if (!NT_SUCCESS(Status))
948                 ExRaiseStatus(Status);
949         }
950     }
951 
952     return TRUE;
953 }
954 
955 
956 NTSTATUS
957 CcRosGetVacb (
958     PROS_SHARED_CACHE_MAP SharedCacheMap,
959     LONGLONG FileOffset,
960     PROS_VACB *Vacb)
961 {
962     PROS_VACB current;
963     NTSTATUS Status;
964     ULONG Refs;
965     KIRQL OldIrql;
966 
967     ASSERT(SharedCacheMap);
968 
969     DPRINT("CcRosGetVacb()\n");
970 
971     /*
972      * Look for a VACB already mapping the same data.
973      */
974     current = CcRosLookupVacb(SharedCacheMap, FileOffset);
975     if (current == NULL)
976     {
977         /*
978          * Otherwise create a new VACB.
979          */
980         Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
981         if (!NT_SUCCESS(Status))
982         {
983             return Status;
984         }
985     }
986 
987     Refs = CcRosVacbGetRefCount(current);
988 
989     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
990 
991     /* Move to the tail of the LRU list */
992     RemoveEntryList(&current->VacbLruListEntry);
993     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
994 
995     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
996 
997     /*
998      * Return the VACB to the caller.
999      */
1000     *Vacb = current;
1001 
1002     ASSERT(Refs > 1);
1003 
1004     return STATUS_SUCCESS;
1005 }
1006 
1007 NTSTATUS
1008 CcRosRequestVacb (
1009     PROS_SHARED_CACHE_MAP SharedCacheMap,
1010     LONGLONG FileOffset,
1011     PROS_VACB *Vacb)
1012 /*
1013  * FUNCTION: Request a page mapping for a shared cache map
1014  */
1015 {
1016 
1017     ASSERT(SharedCacheMap);
1018 
1019     if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
1020     {
1021         DPRINT1("Bad fileoffset %I64x should be multiple of %x",
1022                 FileOffset, VACB_MAPPING_GRANULARITY);
1023         KeBugCheck(CACHE_MANAGER);
1024     }
1025 
1026     return CcRosGetVacb(SharedCacheMap,
1027                         FileOffset,
1028                         Vacb);
1029 }
1030 
1031 NTSTATUS
1032 CcRosInternalFreeVacb (
1033     PROS_VACB Vacb)
1034 /*
1035  * FUNCTION: Releases a VACB associated with a shared cache map
1036  */
1037 {
1038     NTSTATUS Status;
1039 
1040     DPRINT("Freeing VACB 0x%p\n", Vacb);
1041 #if DBG
1042     if (Vacb->SharedCacheMap->Trace)
1043     {
1044         DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1045     }
1046 #endif
1047 
1048     if (Vacb->ReferenceCount != 0)
1049     {
1050         DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
1051         if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
1052         {
1053             DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
1054         }
1055     }
1056 
1057     ASSERT(Vacb->ReferenceCount == 0);
1058     ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
1059     ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
1060     ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
1061 
1062     /* Delete the mapping */
1063     Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress);
1064     if (!NT_SUCCESS(Status))
1065     {
1066         DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status);
1067         ASSERT(FALSE);
1068         /* Proceed with the deĺetion anyway */
1069     }
1070 
1071     RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
1072     ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1073     return STATUS_SUCCESS;
1074 }
1075 
1076 /*
1077  * @implemented
1078  */
1079 VOID
1080 NTAPI
1081 CcFlushCache (
1082     IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1083     IN PLARGE_INTEGER FileOffset OPTIONAL,
1084     IN ULONG Length,
1085     OUT PIO_STATUS_BLOCK IoStatus)
1086 {
1087     PROS_SHARED_CACHE_MAP SharedCacheMap;
1088     LONGLONG FlushStart, FlushEnd;
1089     NTSTATUS Status;
1090 
1091     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n",
1092         SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length);
1093 
1094     if (!SectionObjectPointers)
1095     {
1096         Status = STATUS_INVALID_PARAMETER;
1097         goto quit;
1098     }
1099 
1100     if (!SectionObjectPointers->SharedCacheMap)
1101     {
1102         /* Forward this to Mm */
1103         MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus);
1104         return;
1105     }
1106 
1107     SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1108     ASSERT(SharedCacheMap);
1109     if (FileOffset)
1110     {
1111         FlushStart = FileOffset->QuadPart;
1112         Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd);
1113         if (!NT_SUCCESS(Status))
1114             goto quit;
1115     }
1116     else
1117     {
1118         FlushStart = 0;
1119         FlushEnd = SharedCacheMap->FileSize.QuadPart;
1120     }
1121 
1122     Status = STATUS_SUCCESS;
1123     if (IoStatus)
1124     {
1125         IoStatus->Information = 0;
1126     }
1127 
1128     /*
1129      * We flush the VACBs that we find here.
1130      * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure.
1131      * This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data
1132      */
1133     while (FlushStart < FlushEnd)
1134     {
1135         BOOLEAN DirtyVacb = FALSE;
1136         PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart);
1137 
1138         if (vacb != NULL)
1139         {
1140             if (vacb->Dirty)
1141             {
1142                 IO_STATUS_BLOCK VacbIosb = { 0 };
1143                 Status = CcRosFlushVacb(vacb, &VacbIosb);
1144                 if (!NT_SUCCESS(Status))
1145                 {
1146                     goto quit;
1147                 }
1148                 DirtyVacb = TRUE;
1149 
1150                 if (IoStatus)
1151                     IoStatus->Information += VacbIosb.Information;
1152             }
1153 
1154             CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE);
1155         }
1156 
1157         if (!DirtyVacb)
1158         {
1159             IO_STATUS_BLOCK MmIosb;
1160             LARGE_INTEGER MmOffset;
1161 
1162             MmOffset.QuadPart = FlushStart;
1163 
1164             if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart)
1165             {
1166                 /* The whole range fits within a VACB chunk. */
1167                 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb);
1168             }
1169             else
1170             {
1171                 ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY);
1172                 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb);
1173             }
1174 
1175             if (!NT_SUCCESS(Status))
1176                 goto quit;
1177 
1178             if (IoStatus)
1179                 IoStatus->Information += MmIosb.Information;
1180 
1181             /* Update VDL */
1182             if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd)
1183                 SharedCacheMap->ValidDataLength.QuadPart = FlushEnd;
1184         }
1185 
1186         if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart)))
1187         {
1188             /* We're at the end of file ! */
1189             break;
1190         }
1191 
1192         /* Round down to next VACB start now */
1193         FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY;
1194     }
1195 
1196 quit:
1197     if (IoStatus)
1198     {
1199         IoStatus->Status = Status;
1200     }
1201 }
1202 
1203 NTSTATUS
1204 CcRosReleaseFileCache (
1205     PFILE_OBJECT FileObject)
1206 /*
1207  * FUNCTION: Called by the file system when a handle to a file object
1208  * has been closed.
1209  */
1210 {
1211     KIRQL OldIrql;
1212     PPRIVATE_CACHE_MAP PrivateMap;
1213     PROS_SHARED_CACHE_MAP SharedCacheMap;
1214 
1215     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1216 
1217     if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1218     {
1219         SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1220 
1221         /* Closing the handle, so kill the private cache map
1222          * Before you event try to remove it from FO, always
1223          * lock the master lock, to be sure not to race
1224          * with a potential read ahead ongoing!
1225          */
1226         PrivateMap = FileObject->PrivateCacheMap;
1227         FileObject->PrivateCacheMap = NULL;
1228 
1229         if (PrivateMap != NULL)
1230         {
1231             /* Remove it from the file */
1232             KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1233             RemoveEntryList(&PrivateMap->PrivateLinks);
1234             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1235 
1236             /* And free it. */
1237             if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1238             {
1239                 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1240             }
1241             else
1242             {
1243                 PrivateMap->NodeTypeCode = 0;
1244             }
1245 
1246             ASSERT(SharedCacheMap->OpenCount > 0);
1247 
1248             SharedCacheMap->OpenCount--;
1249             if (SharedCacheMap->OpenCount == 0)
1250             {
1251                 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1252             }
1253         }
1254     }
1255     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1256     return STATUS_SUCCESS;
1257 }
1258 
1259 NTSTATUS
1260 CcRosInitializeFileCache (
1261     PFILE_OBJECT FileObject,
1262     PCC_FILE_SIZES FileSizes,
1263     BOOLEAN PinAccess,
1264     PCACHE_MANAGER_CALLBACKS CallBacks,
1265     PVOID LazyWriterContext)
1266 /*
1267  * FUNCTION: Initializes a shared cache map for a file object
1268  */
1269 {
1270     KIRQL OldIrql;
1271     BOOLEAN Allocated;
1272     PROS_SHARED_CACHE_MAP SharedCacheMap;
1273 
1274     DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
1275 
1276     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1277 
1278     Allocated = FALSE;
1279     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1280     if (SharedCacheMap == NULL)
1281     {
1282         Allocated = TRUE;
1283         SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1284         if (SharedCacheMap == NULL)
1285         {
1286             return STATUS_INSUFFICIENT_RESOURCES;
1287         }
1288         RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1289         SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1290         SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1291         SharedCacheMap->FileObject = FileObject;
1292         SharedCacheMap->Callbacks = CallBacks;
1293         SharedCacheMap->LazyWriteContext = LazyWriterContext;
1294         SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1295         SharedCacheMap->FileSize = FileSizes->FileSize;
1296         SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength;
1297         SharedCacheMap->PinAccess = PinAccess;
1298         SharedCacheMap->DirtyPageThreshold = 0;
1299         SharedCacheMap->DirtyPages = 0;
1300         InitializeListHead(&SharedCacheMap->PrivateList);
1301         KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1302         InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1303         InitializeListHead(&SharedCacheMap->BcbList);
1304 
1305         SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION;
1306 
1307         ObReferenceObjectByPointer(FileObject,
1308                                    FILE_ALL_ACCESS,
1309                                    NULL,
1310                                    KernelMode);
1311 
1312         FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1313 
1314         //CcRosTraceCacheMap(SharedCacheMap, TRUE);
1315     }
1316     else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION)
1317     {
1318         /* The shared cache map is being created somewhere else. Wait for that to happen */
1319         KEVENT Waiter;
1320         PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent;
1321 
1322         KeInitializeEvent(&Waiter, NotificationEvent, FALSE);
1323         SharedCacheMap->CreateEvent = &Waiter;
1324 
1325         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1326 
1327         KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL);
1328 
1329         if (PreviousWaiter)
1330             KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE);
1331 
1332         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1333     }
1334 
1335     if (FileObject->PrivateCacheMap == NULL)
1336     {
1337         PPRIVATE_CACHE_MAP PrivateMap;
1338 
1339         /* Allocate the private cache map for this handle */
1340         if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1341         {
1342             PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1343         }
1344         else
1345         {
1346             PrivateMap = &SharedCacheMap->PrivateCacheMap;
1347         }
1348 
1349         if (PrivateMap == NULL)
1350         {
1351             /* If we also allocated the shared cache map for this file, kill it */
1352             if (Allocated)
1353             {
1354                 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1355 
1356                 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1357                 ObDereferenceObject(FileObject);
1358                 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1359             }
1360 
1361             KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1362             return STATUS_INSUFFICIENT_RESOURCES;
1363         }
1364 
1365         /* Initialize it */
1366         RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1367         PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1368         PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1369         PrivateMap->FileObject = FileObject;
1370         KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1371 
1372         /* Link it to the file */
1373         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1374         InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1375         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1376 
1377         FileObject->PrivateCacheMap = PrivateMap;
1378         SharedCacheMap->OpenCount++;
1379     }
1380 
1381     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1382 
1383     /* Create the section */
1384     if (Allocated)
1385     {
1386         NTSTATUS Status;
1387 
1388         ASSERT(SharedCacheMap->Section == NULL);
1389 
1390         Status = MmCreateSection(
1391             &SharedCacheMap->Section,
1392             SECTION_ALL_ACCESS,
1393             NULL,
1394             &SharedCacheMap->SectionSize,
1395             PAGE_READWRITE,
1396             SEC_RESERVE,
1397             NULL,
1398             FileObject);
1399 
1400         ASSERT(NT_SUCCESS(Status));
1401 
1402         if (!NT_SUCCESS(Status))
1403         {
1404             CcRosReleaseFileCache(FileObject);
1405             return Status;
1406         }
1407 
1408         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1409 
1410         InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1411         SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION;
1412 
1413         if (SharedCacheMap->CreateEvent)
1414         {
1415             KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE);
1416             SharedCacheMap->CreateEvent = NULL;
1417         }
1418 
1419         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1420     }
1421 
1422     return STATUS_SUCCESS;
1423 }
1424 
1425 /*
1426  * @implemented
1427  */
1428 PFILE_OBJECT
1429 NTAPI
1430 CcGetFileObjectFromSectionPtrs (
1431     IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1432 {
1433     PROS_SHARED_CACHE_MAP SharedCacheMap;
1434 
1435     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1436 
1437     if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1438     {
1439         SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1440         ASSERT(SharedCacheMap);
1441         return SharedCacheMap->FileObject;
1442     }
1443     return NULL;
1444 }
1445 
1446 CODE_SEG("INIT")
1447 VOID
1448 NTAPI
1449 CcInitView (
1450     VOID)
1451 {
1452     DPRINT("CcInitView()\n");
1453 
1454     InitializeListHead(&DirtyVacbListHead);
1455     InitializeListHead(&VacbLruListHead);
1456     InitializeListHead(&CcDeferredWrites);
1457     InitializeListHead(&CcCleanSharedCacheMapList);
1458     KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1459     ExInitializeNPagedLookasideList(&iBcbLookasideList,
1460                                     NULL,
1461                                     NULL,
1462                                     0,
1463                                     sizeof(INTERNAL_BCB),
1464                                     TAG_BCB,
1465                                     20);
1466     ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1467                                     NULL,
1468                                     NULL,
1469                                     0,
1470                                     sizeof(ROS_SHARED_CACHE_MAP),
1471                                     TAG_SHARED_CACHE_MAP,
1472                                     20);
1473     ExInitializeNPagedLookasideList(&VacbLookasideList,
1474                                     NULL,
1475                                     NULL,
1476                                     0,
1477                                     sizeof(ROS_VACB),
1478                                     TAG_VACB,
1479                                     20);
1480 
1481     CcInitCacheZeroPage();
1482 }
1483 
1484 #if DBG && defined(KDBG)
1485 
1486 #include <kdbg/kdb.h>
1487 
1488 BOOLEAN
1489 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1490 {
1491     PLIST_ENTRY ListEntry;
1492     UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1493 
1494     KdbpPrint("  Usage Summary (in kb)\n");
1495     KdbpPrint("Shared\t\tMapped\tDirty\tName\n");
1496     /* No need to lock the spin lock here, we're in DBG */
1497     for (ListEntry = CcCleanSharedCacheMapList.Flink;
1498          ListEntry != &CcCleanSharedCacheMapList;
1499          ListEntry = ListEntry->Flink)
1500     {
1501         PLIST_ENTRY Vacbs;
1502         ULONG Mapped = 0, Dirty = 0;
1503         PROS_SHARED_CACHE_MAP SharedCacheMap;
1504         PUNICODE_STRING FileName;
1505         PWSTR Extra = L"";
1506 
1507         SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1508 
1509         /* Dirty size */
1510         Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1511 
1512         /* First, count for all the associated VACB */
1513         for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1514              Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1515              Vacbs = Vacbs->Flink)
1516         {
1517             Mapped += VACB_MAPPING_GRANULARITY / 1024;
1518         }
1519 
1520         /* Setup name */
1521         if (SharedCacheMap->FileObject != NULL &&
1522             SharedCacheMap->FileObject->FileName.Length != 0)
1523         {
1524             FileName = &SharedCacheMap->FileObject->FileName;
1525         }
1526         else if (SharedCacheMap->FileObject != NULL &&
1527                  SharedCacheMap->FileObject->FsContext != NULL &&
1528                  ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1529                  ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1530                  ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1531         {
1532             FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1533             Extra = L" (FastFAT)";
1534         }
1535         else
1536         {
1537             FileName = &NoName;
1538         }
1539 
1540         /* And print */
1541         KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra);
1542     }
1543 
1544     return TRUE;
1545 }
1546 
1547 BOOLEAN
1548 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1549 {
1550     KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1551               (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1552     KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1553               (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1554     KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1555               (MmAvailablePages * PAGE_SIZE) / 1024);
1556     KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1557               (MmThrottleTop * PAGE_SIZE) / 1024);
1558     KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1559               (MmThrottleBottom * PAGE_SIZE) / 1024);
1560     KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1561               (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1562 
1563     if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1564     {
1565         KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1566     }
1567     else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1568     {
1569         KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1570     }
1571     else
1572     {
1573         KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1574     }
1575 
1576     return TRUE;
1577 }
1578 
1579 #endif // DBG && defined(KDBG)
1580 
1581 /* EOF */
1582