xref: /reactos/ntoskrnl/cc/view.c (revision 2ea03b5b)
1 /*
2  * COPYRIGHT:       See COPYING in the top level directory
3  * PROJECT:         ReactOS kernel
4  * FILE:            ntoskrnl/cc/view.c
5  * PURPOSE:         Cache manager
6  *
7  * PROGRAMMERS:     David Welch (welch@mcmail.com)
8  *                  Pierre Schweitzer (pierre@reactos.org)
9  */
10 
11 /* NOTES **********************************************************************
12  *
13  * This is not the NT implementation of a file cache nor anything much like
14  * it.
15  *
16  * The general procedure for a filesystem to implement a read or write
17  * dispatch routine is as follows
18  *
19  * (1) If caching for the FCB hasn't been initiated then so do by calling
20  * CcInitializeFileCache.
21  *
22  * (2) For each 4k region which is being read or written obtain a cache page
23  * by calling CcRequestCachePage.
24  *
25  * (3) If either the page is being read or not completely written, and it is
26  * not up to date then read its data from the underlying medium. If the read
27  * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28  *
29  * (4) Copy the data into or out of the page as necessary.
30  *
31  * (5) Release the cache page
32  */
33 /* INCLUDES ******************************************************************/
34 
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38 
39 /* GLOBALS *******************************************************************/
40 
41 LIST_ENTRY DirtyVacbListHead;
42 static LIST_ENTRY VacbLruListHead;
43 
44 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
45 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
46 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
47 
48 /* Internal vars (MS):
49  * - Threshold above which lazy writer will start action
50  * - Amount of dirty pages
51  * - List for deferred writes
52  * - Spinlock when dealing with the deferred list
53  * - List for "clean" shared cache maps
54  */
55 ULONG CcDirtyPageThreshold = 0;
56 ULONG CcTotalDirtyPages = 0;
57 LIST_ENTRY CcDeferredWrites;
58 KSPIN_LOCK CcDeferredWriteSpinLock;
59 LIST_ENTRY CcCleanSharedCacheMapList;
60 
61 #if DBG
62 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
63 {
64     ULONG Refs;
65 
66     Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
67     if (vacb->SharedCacheMap->Trace)
68     {
69         DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
70                  file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
71     }
72 
73     return Refs;
74 }
75 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
76 {
77     ULONG Refs;
78 
79     Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
80     ASSERT(!(Refs == 0 && vacb->Dirty));
81     if (vacb->SharedCacheMap->Trace)
82     {
83         DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
84                  file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
85     }
86 
87     if (Refs == 0)
88     {
89         CcRosInternalFreeVacb(vacb);
90     }
91 
92     return Refs;
93 }
94 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
95 {
96     ULONG Refs;
97 
98     Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
99     if (vacb->SharedCacheMap->Trace)
100     {
101         DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
102                  file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
103     }
104 
105     return Refs;
106 }
107 #endif
108 
109 
110 /* FUNCTIONS *****************************************************************/
111 
112 VOID
113 CcRosTraceCacheMap (
114     PROS_SHARED_CACHE_MAP SharedCacheMap,
115     BOOLEAN Trace )
116 {
117 #if DBG
118     KIRQL oldirql;
119     PLIST_ENTRY current_entry;
120     PROS_VACB current;
121 
122     if (!SharedCacheMap)
123         return;
124 
125     SharedCacheMap->Trace = Trace;
126 
127     if (Trace)
128     {
129         DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
130 
131         oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
132         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
133 
134         current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
135         while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
136         {
137             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
138             current_entry = current_entry->Flink;
139 
140             DPRINT1("  VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu, BaseAddress %p, FileOffset %I64d\n",
141                     current, current->ReferenceCount, current->Dirty, current->PageOut, current->BaseAddress, current->FileOffset.QuadPart);
142         }
143 
144         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
145         KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
146     }
147     else
148     {
149         DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
150     }
151 
152 #else
153     UNREFERENCED_PARAMETER(SharedCacheMap);
154     UNREFERENCED_PARAMETER(Trace);
155 #endif
156 }
157 
158 NTSTATUS
159 CcRosFlushVacb (
160     _In_ PROS_VACB Vacb,
161     _Out_opt_ PIO_STATUS_BLOCK Iosb)
162 {
163     NTSTATUS Status;
164     BOOLEAN HaveLock = FALSE;
165     PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
166 
167     CcRosUnmarkDirtyVacb(Vacb, TRUE);
168 
169     /* Lock for flush, if we are not already the top-level */
170     if (IoGetTopLevelIrp() != (PIRP)FSRTL_CACHE_TOP_LEVEL_IRP)
171     {
172         Status = FsRtlAcquireFileForCcFlushEx(Vacb->SharedCacheMap->FileObject);
173         if (!NT_SUCCESS(Status))
174             goto quit;
175         HaveLock = TRUE;
176     }
177 
178     Status = MmFlushSegment(SharedCacheMap->FileObject->SectionObjectPointer,
179                             &Vacb->FileOffset,
180                             VACB_MAPPING_GRANULARITY,
181                             Iosb);
182 
183     if (HaveLock)
184     {
185         FsRtlReleaseFileForCcFlush(Vacb->SharedCacheMap->FileObject);
186     }
187 
188 quit:
189     if (!NT_SUCCESS(Status))
190         CcRosMarkDirtyVacb(Vacb);
191     else
192     {
193         /* Update VDL */
194         if (SharedCacheMap->ValidDataLength.QuadPart < (Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY))
195         {
196             SharedCacheMap->ValidDataLength.QuadPart = Vacb->FileOffset.QuadPart + VACB_MAPPING_GRANULARITY;
197         }
198     }
199 
200     return Status;
201 }
202 
203 static
204 NTSTATUS
205 CcRosDeleteFileCache (
206     PFILE_OBJECT FileObject,
207     PROS_SHARED_CACHE_MAP SharedCacheMap,
208     PKIRQL OldIrql)
209 /*
210  * FUNCTION: Releases the shared cache map associated with a file object
211  */
212 {
213     PLIST_ENTRY current_entry;
214 
215     ASSERT(SharedCacheMap);
216     ASSERT(SharedCacheMap == FileObject->SectionObjectPointer->SharedCacheMap);
217     ASSERT(SharedCacheMap->OpenCount == 0);
218 
219     /* Remove all VACBs from the global lists */
220     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
221     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
222     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
223     {
224         PROS_VACB Vacb = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
225 
226         RemoveEntryList(&Vacb->VacbLruListEntry);
227         InitializeListHead(&Vacb->VacbLruListEntry);
228 
229         if (Vacb->Dirty)
230         {
231             CcRosUnmarkDirtyVacb(Vacb, FALSE);
232             /* Mark it as dirty again so we know that we have to flush before freeing it */
233             Vacb->Dirty = TRUE;
234         }
235 
236         current_entry = current_entry->Flink;
237     }
238 
239     /* Make sure there is no trace anymore of this map */
240     FileObject->SectionObjectPointer->SharedCacheMap = NULL;
241     RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
242 
243     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
244     KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
245 
246     /* Now that we're out of the locks, free everything for real */
247     while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
248     {
249         PROS_VACB Vacb = CONTAINING_RECORD(RemoveHeadList(&SharedCacheMap->CacheMapVacbListHead), ROS_VACB, CacheMapVacbListEntry);
250         ULONG RefCount;
251 
252         InitializeListHead(&Vacb->CacheMapVacbListEntry);
253 
254         /* Flush to disk, if needed */
255         if (Vacb->Dirty)
256         {
257             IO_STATUS_BLOCK Iosb;
258             NTSTATUS Status;
259 
260             Status = MmFlushSegment(FileObject->SectionObjectPointer, &Vacb->FileOffset, VACB_MAPPING_GRANULARITY, &Iosb);
261             if (!NT_SUCCESS(Status))
262             {
263                 /* Complain. There's not much we can do */
264                 DPRINT1("Failed to flush VACB to disk while deleting the cache entry. Status: 0x%08x\n", Status);
265             }
266             Vacb->Dirty = FALSE;
267         }
268 
269         RefCount = CcRosVacbDecRefCount(Vacb);
270 #if DBG // CORE-14578
271         if (RefCount != 0)
272         {
273             DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", Vacb, FileObject, Vacb->FileOffset.QuadPart);
274             DPRINT1("There are: %d references left\n", RefCount);
275             DPRINT1("Map: %d\n", Vacb->MappedCount);
276             DPRINT1("Dirty: %d\n", Vacb->Dirty);
277             if (FileObject->FileName.Length != 0)
278             {
279                 DPRINT1("File was: %wZ\n", &FileObject->FileName);
280             }
281             else
282             {
283                 DPRINT1("No name for the file\n");
284             }
285         }
286 #else
287         (void)RefCount;
288 #endif
289     }
290 
291     /* Release the references we own */
292     if(SharedCacheMap->Section)
293         ObDereferenceObject(SharedCacheMap->Section);
294     ObDereferenceObject(SharedCacheMap->FileObject);
295 
296     ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
297 
298     /* Acquire the lock again for our caller */
299     *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
300 
301     return STATUS_SUCCESS;
302 }
303 
304 NTSTATUS
305 CcRosFlushDirtyPages (
306     ULONG Target,
307     PULONG Count,
308     BOOLEAN Wait,
309     BOOLEAN CalledFromLazy)
310 {
311     PLIST_ENTRY current_entry;
312     NTSTATUS Status;
313     KIRQL OldIrql;
314     BOOLEAN FlushAll = (Target == MAXULONG);
315 
316     DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
317 
318     (*Count) = 0;
319 
320     KeEnterCriticalRegion();
321     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
322 
323     current_entry = DirtyVacbListHead.Flink;
324     if (current_entry == &DirtyVacbListHead)
325     {
326         DPRINT("No Dirty pages\n");
327     }
328 
329     while (((current_entry != &DirtyVacbListHead) && (Target > 0)) || FlushAll)
330     {
331         PROS_SHARED_CACHE_MAP SharedCacheMap;
332         PROS_VACB current;
333         BOOLEAN Locked;
334 
335         if (current_entry == &DirtyVacbListHead)
336         {
337             ASSERT(FlushAll);
338             if (IsListEmpty(&DirtyVacbListHead))
339                 break;
340             current_entry = DirtyVacbListHead.Flink;
341         }
342 
343         current = CONTAINING_RECORD(current_entry,
344                                     ROS_VACB,
345                                     DirtyVacbListEntry);
346         current_entry = current_entry->Flink;
347 
348         CcRosVacbIncRefCount(current);
349 
350         SharedCacheMap = current->SharedCacheMap;
351 
352         /* When performing lazy write, don't handle temporary files */
353         if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
354         {
355             CcRosVacbDecRefCount(current);
356             continue;
357         }
358 
359         /* Don't attempt to lazy write the files that asked not to */
360         if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED))
361         {
362             CcRosVacbDecRefCount(current);
363             continue;
364         }
365 
366         ASSERT(current->Dirty);
367 
368         /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */
369         if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE)
370         {
371             CcRosVacbDecRefCount(current);
372             continue;
373         }
374 
375         SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE;
376 
377         /* Keep a ref on the shared cache map */
378         SharedCacheMap->OpenCount++;
379 
380         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
381 
382         Locked = SharedCacheMap->Callbacks->AcquireForLazyWrite(SharedCacheMap->LazyWriteContext, Wait);
383         if (!Locked)
384         {
385             DPRINT("Not locked!");
386             ASSERT(!Wait);
387             CcRosVacbDecRefCount(current);
388             OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
389             SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
390 
391             if (--SharedCacheMap->OpenCount == 0)
392                 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
393 
394             continue;
395         }
396 
397         IO_STATUS_BLOCK Iosb;
398         Status = CcRosFlushVacb(current, &Iosb);
399 
400         SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext);
401 
402         /* We release the VACB before acquiring the lock again, because
403          * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a
404          * Refcount. Freeing must be done outside of the lock.
405          * The refcount is decremented atomically. So this is OK. */
406         CcRosVacbDecRefCount(current);
407         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
408 
409         SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE;
410 
411         if (--SharedCacheMap->OpenCount == 0)
412             CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
413 
414         if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
415             (Status != STATUS_MEDIA_WRITE_PROTECTED))
416         {
417             DPRINT1("CC: Failed to flush VACB.\n");
418         }
419         else
420         {
421             ULONG PagesFreed;
422 
423             /* How many pages did we free? */
424             PagesFreed = Iosb.Information / PAGE_SIZE;
425             (*Count) += PagesFreed;
426 
427             if (!Wait)
428             {
429                 /* Make sure we don't overflow target! */
430                 if (Target < PagesFreed)
431                 {
432                     /* If we would have, jump to zero directly */
433                     Target = 0;
434                 }
435                 else
436                 {
437                     Target -= PagesFreed;
438                 }
439             }
440         }
441 
442         current_entry = DirtyVacbListHead.Flink;
443     }
444 
445     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
446     KeLeaveCriticalRegion();
447 
448     DPRINT("CcRosFlushDirtyPages() finished\n");
449     return STATUS_SUCCESS;
450 }
451 
452 NTSTATUS
453 CcRosReleaseVacb (
454     PROS_SHARED_CACHE_MAP SharedCacheMap,
455     PROS_VACB Vacb,
456     BOOLEAN Dirty,
457     BOOLEAN Mapped)
458 {
459     ULONG Refs;
460     ASSERT(SharedCacheMap);
461 
462     DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p)\n", SharedCacheMap, Vacb);
463 
464     if (Dirty && !Vacb->Dirty)
465     {
466         CcRosMarkDirtyVacb(Vacb);
467     }
468 
469     if (Mapped)
470     {
471         if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
472         {
473             CcRosVacbIncRefCount(Vacb);
474         }
475     }
476 
477     Refs = CcRosVacbDecRefCount(Vacb);
478     ASSERT(Refs > 0);
479 
480     return STATUS_SUCCESS;
481 }
482 
483 /* Returns with VACB Lock Held! */
484 PROS_VACB
485 CcRosLookupVacb (
486     PROS_SHARED_CACHE_MAP SharedCacheMap,
487     LONGLONG FileOffset)
488 {
489     PLIST_ENTRY current_entry;
490     PROS_VACB current;
491     KIRQL oldIrql;
492 
493     ASSERT(SharedCacheMap);
494 
495     DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
496            SharedCacheMap, FileOffset);
497 
498     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
499     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
500 
501     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
502     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
503     {
504         current = CONTAINING_RECORD(current_entry,
505                                     ROS_VACB,
506                                     CacheMapVacbListEntry);
507         if (IsPointInRange(current->FileOffset.QuadPart,
508                            VACB_MAPPING_GRANULARITY,
509                            FileOffset))
510         {
511             CcRosVacbIncRefCount(current);
512             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
513             KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
514             return current;
515         }
516         if (current->FileOffset.QuadPart > FileOffset)
517             break;
518         current_entry = current_entry->Flink;
519     }
520 
521     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
522     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
523 
524     return NULL;
525 }
526 
527 VOID
528 CcRosMarkDirtyVacb (
529     PROS_VACB Vacb)
530 {
531     KIRQL oldIrql;
532     PROS_SHARED_CACHE_MAP SharedCacheMap;
533 
534     SharedCacheMap = Vacb->SharedCacheMap;
535 
536     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
537     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
538 
539     ASSERT(!Vacb->Dirty);
540 
541     InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
542     /* FIXME: There is no reason to account for the whole VACB. */
543     CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
544     Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
545     CcRosVacbIncRefCount(Vacb);
546 
547     /* Move to the tail of the LRU list */
548     RemoveEntryList(&Vacb->VacbLruListEntry);
549     InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
550 
551     Vacb->Dirty = TRUE;
552 
553     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
554 
555     /* Schedule a lazy writer run to now that we have dirty VACB */
556     if (!LazyWriter.ScanActive)
557     {
558         CcScheduleLazyWriteScan(FALSE);
559     }
560     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
561 }
562 
563 VOID
564 CcRosUnmarkDirtyVacb (
565     PROS_VACB Vacb,
566     BOOLEAN LockViews)
567 {
568     KIRQL oldIrql;
569     PROS_SHARED_CACHE_MAP SharedCacheMap;
570 
571     SharedCacheMap = Vacb->SharedCacheMap;
572 
573     if (LockViews)
574     {
575         oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
576         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
577     }
578 
579     ASSERT(Vacb->Dirty);
580 
581     Vacb->Dirty = FALSE;
582 
583     RemoveEntryList(&Vacb->DirtyVacbListEntry);
584     InitializeListHead(&Vacb->DirtyVacbListEntry);
585 
586     CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
587     Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
588 
589     CcRosVacbDecRefCount(Vacb);
590 
591     if (LockViews)
592     {
593         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
594         KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
595     }
596 }
597 
598 BOOLEAN
599 CcRosFreeOneUnusedVacb(
600     VOID)
601 {
602     KIRQL oldIrql;
603     PLIST_ENTRY current_entry;
604     PROS_VACB to_free = NULL;
605 
606     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
607 
608     /* Browse all the available VACB */
609     current_entry = VacbLruListHead.Flink;
610     while ((current_entry != &VacbLruListHead) && (to_free == NULL))
611     {
612         ULONG Refs;
613         PROS_VACB current;
614 
615         current = CONTAINING_RECORD(current_entry,
616                                     ROS_VACB,
617                                     VacbLruListEntry);
618 
619         KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
620 
621         /* Only deal with unused VACB, we will free them */
622         Refs = CcRosVacbGetRefCount(current);
623         if (Refs < 2)
624         {
625             ASSERT(!current->Dirty);
626             ASSERT(!current->MappedCount);
627             ASSERT(Refs == 1);
628 
629             /* Reset it, this is the one we want to free */
630             RemoveEntryList(&current->CacheMapVacbListEntry);
631             InitializeListHead(&current->CacheMapVacbListEntry);
632             RemoveEntryList(&current->VacbLruListEntry);
633             InitializeListHead(&current->VacbLruListEntry);
634 
635             to_free = current;
636         }
637 
638         KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
639 
640         current_entry = current_entry->Flink;
641     }
642 
643     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
644 
645     /* And now, free the VACB that we found, if any. */
646     if (to_free == NULL)
647     {
648         return FALSE;
649     }
650 
651     /* This must be its last ref */
652     NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0);
653 
654     return TRUE;
655 }
656 
657 static
658 NTSTATUS
659 CcRosCreateVacb (
660     PROS_SHARED_CACHE_MAP SharedCacheMap,
661     LONGLONG FileOffset,
662     PROS_VACB *Vacb)
663 {
664     PROS_VACB current;
665     PROS_VACB previous;
666     PLIST_ENTRY current_entry;
667     NTSTATUS Status;
668     KIRQL oldIrql;
669     ULONG Refs;
670     SIZE_T ViewSize = VACB_MAPPING_GRANULARITY;
671 
672     ASSERT(SharedCacheMap);
673 
674     DPRINT("CcRosCreateVacb()\n");
675 
676     current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
677     current->BaseAddress = NULL;
678     current->Dirty = FALSE;
679     current->PageOut = FALSE;
680     current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
681     current->SharedCacheMap = SharedCacheMap;
682     current->MappedCount = 0;
683     current->ReferenceCount = 0;
684     InitializeListHead(&current->CacheMapVacbListEntry);
685     InitializeListHead(&current->DirtyVacbListEntry);
686     InitializeListHead(&current->VacbLruListEntry);
687 
688     CcRosVacbIncRefCount(current);
689 
690     while (TRUE)
691     {
692         /* Map VACB in system space */
693         Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, &current->BaseAddress, &ViewSize, &current->FileOffset, 0);
694         if (NT_SUCCESS(Status))
695         {
696             break;
697         }
698 
699         /*
700          * If no space left, try to prune one unused VACB to recover space to map our VACB.
701          * If it succeeds, retry to map, otherwise just fail.
702          */
703         if (!CcRosFreeOneUnusedVacb())
704         {
705             ExFreeToNPagedLookasideList(&VacbLookasideList, current);
706             return Status;
707         }
708     }
709 
710 #if DBG
711     if (SharedCacheMap->Trace)
712     {
713         DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n",
714                 SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress);
715     }
716 #endif
717 
718     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
719 
720     *Vacb = current;
721     /* There is window between the call to CcRosLookupVacb
722      * and CcRosCreateVacb. We must check if a VACB for the
723      * file offset exist. If there is a VACB, we release
724      * our newly created VACB and return the existing one.
725      */
726     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
727     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
728     previous = NULL;
729     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
730     {
731         current = CONTAINING_RECORD(current_entry,
732                                     ROS_VACB,
733                                     CacheMapVacbListEntry);
734         if (IsPointInRange(current->FileOffset.QuadPart,
735                            VACB_MAPPING_GRANULARITY,
736                            FileOffset))
737         {
738             CcRosVacbIncRefCount(current);
739             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
740 #if DBG
741             if (SharedCacheMap->Trace)
742             {
743                 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
744                         SharedCacheMap,
745                         (*Vacb),
746                         current);
747             }
748 #endif
749             KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
750 
751             Refs = CcRosVacbDecRefCount(*Vacb);
752             ASSERT(Refs == 0);
753 
754             *Vacb = current;
755             return STATUS_SUCCESS;
756         }
757         if (current->FileOffset.QuadPart < FileOffset)
758         {
759             ASSERT(previous == NULL ||
760                    previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
761             previous = current;
762         }
763         if (current->FileOffset.QuadPart > FileOffset)
764             break;
765         current_entry = current_entry->Flink;
766     }
767     /* There was no existing VACB. */
768     current = *Vacb;
769     if (previous)
770     {
771         InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
772     }
773     else
774     {
775         InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
776     }
777     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
778     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
779 
780     /* Reference it to allow release */
781     CcRosVacbIncRefCount(current);
782 
783     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
784 
785     return Status;
786 }
787 
788 BOOLEAN
789 CcRosEnsureVacbResident(
790     _In_ PROS_VACB Vacb,
791     _In_ BOOLEAN Wait,
792     _In_ BOOLEAN NoRead,
793     _In_ ULONG Offset,
794     _In_ ULONG Length
795 )
796 {
797     PVOID BaseAddress;
798 
799     ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY);
800 
801 #if 0
802     if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->SectionSize.QuadPart)
803     {
804         DPRINT1("Vacb read beyond the file size!\n");
805         return FALSE;
806     }
807 #endif
808 
809     BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset);
810 
811     /* Check if the pages are resident */
812     if (!MmArePagesResident(NULL, BaseAddress, Length))
813     {
814         if (!Wait)
815         {
816             return FALSE;
817         }
818 
819         if (!NoRead)
820         {
821             PROS_SHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap;
822             NTSTATUS Status = MmMakeDataSectionResident(SharedCacheMap->FileObject->SectionObjectPointer,
823                                                         Vacb->FileOffset.QuadPart + Offset,
824                                                         Length,
825                                                         &SharedCacheMap->ValidDataLength);
826             if (!NT_SUCCESS(Status))
827                 ExRaiseStatus(Status);
828         }
829     }
830 
831     return TRUE;
832 }
833 
834 
835 NTSTATUS
836 CcRosGetVacb (
837     PROS_SHARED_CACHE_MAP SharedCacheMap,
838     LONGLONG FileOffset,
839     PROS_VACB *Vacb)
840 {
841     PROS_VACB current;
842     NTSTATUS Status;
843     ULONG Refs;
844     KIRQL OldIrql;
845 
846     ASSERT(SharedCacheMap);
847 
848     DPRINT("CcRosGetVacb()\n");
849 
850     /*
851      * Look for a VACB already mapping the same data.
852      */
853     current = CcRosLookupVacb(SharedCacheMap, FileOffset);
854     if (current == NULL)
855     {
856         /*
857          * Otherwise create a new VACB.
858          */
859         Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
860         if (!NT_SUCCESS(Status))
861         {
862             return Status;
863         }
864     }
865 
866     Refs = CcRosVacbGetRefCount(current);
867 
868     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
869 
870     /* Move to the tail of the LRU list */
871     RemoveEntryList(&current->VacbLruListEntry);
872     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
873 
874     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
875 
876     /*
877      * Return the VACB to the caller.
878      */
879     *Vacb = current;
880 
881     ASSERT(Refs > 1);
882 
883     return STATUS_SUCCESS;
884 }
885 
886 NTSTATUS
887 CcRosRequestVacb (
888     PROS_SHARED_CACHE_MAP SharedCacheMap,
889     LONGLONG FileOffset,
890     PROS_VACB *Vacb)
891 /*
892  * FUNCTION: Request a page mapping for a shared cache map
893  */
894 {
895 
896     ASSERT(SharedCacheMap);
897 
898     if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
899     {
900         DPRINT1("Bad fileoffset %I64x should be multiple of %x",
901                 FileOffset, VACB_MAPPING_GRANULARITY);
902         KeBugCheck(CACHE_MANAGER);
903     }
904 
905     return CcRosGetVacb(SharedCacheMap,
906                         FileOffset,
907                         Vacb);
908 }
909 
910 NTSTATUS
911 CcRosInternalFreeVacb (
912     PROS_VACB Vacb)
913 /*
914  * FUNCTION: Releases a VACB associated with a shared cache map
915  */
916 {
917     NTSTATUS Status;
918 
919     DPRINT("Freeing VACB 0x%p\n", Vacb);
920 #if DBG
921     if (Vacb->SharedCacheMap->Trace)
922     {
923         DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
924     }
925 #endif
926 
927     if (Vacb->ReferenceCount != 0)
928     {
929         DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
930         if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
931         {
932             DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
933         }
934     }
935 
936     ASSERT(Vacb->ReferenceCount == 0);
937     ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
938     ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
939     ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
940 
941     /* Delete the mapping */
942     Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress);
943     if (!NT_SUCCESS(Status))
944     {
945         DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status);
946         ASSERT(FALSE);
947         /* Proceed with the deĺetion anyway */
948     }
949 
950     RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
951     ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
952     return STATUS_SUCCESS;
953 }
954 
955 /*
956  * @implemented
957  */
958 VOID
959 NTAPI
960 CcFlushCache (
961     IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
962     IN PLARGE_INTEGER FileOffset OPTIONAL,
963     IN ULONG Length,
964     OUT PIO_STATUS_BLOCK IoStatus)
965 {
966     PROS_SHARED_CACHE_MAP SharedCacheMap;
967     LONGLONG FlushStart, FlushEnd;
968     NTSTATUS Status;
969 
970     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n",
971         SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length);
972 
973     if (!SectionObjectPointers)
974     {
975         Status = STATUS_INVALID_PARAMETER;
976         goto quit;
977     }
978 
979     if (!SectionObjectPointers->SharedCacheMap)
980     {
981         /* Forward this to Mm */
982         MmFlushSegment(SectionObjectPointers, FileOffset, Length, IoStatus);
983         return;
984     }
985 
986     SharedCacheMap = SectionObjectPointers->SharedCacheMap;
987     ASSERT(SharedCacheMap);
988     if (FileOffset)
989     {
990         FlushStart = FileOffset->QuadPart;
991         Status = RtlLongLongAdd(FlushStart, Length, &FlushEnd);
992         if (!NT_SUCCESS(Status))
993             goto quit;
994     }
995     else
996     {
997         FlushStart = 0;
998         FlushEnd = SharedCacheMap->FileSize.QuadPart;
999     }
1000 
1001     Status = STATUS_SUCCESS;
1002     if (IoStatus)
1003     {
1004         IoStatus->Information = 0;
1005     }
1006 
1007     /*
1008      * We flush the VACBs that we find here.
1009      * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure.
1010      * This is suboptimal, but this is due to the lack of granularity of how we track dirty cache data
1011      */
1012     while (FlushStart < FlushEnd)
1013     {
1014         BOOLEAN DirtyVacb = FALSE;
1015         PROS_VACB vacb = CcRosLookupVacb(SharedCacheMap, FlushStart);
1016 
1017         if (vacb != NULL)
1018         {
1019             if (vacb->Dirty)
1020             {
1021                 IO_STATUS_BLOCK VacbIosb = { 0 };
1022                 Status = CcRosFlushVacb(vacb, &VacbIosb);
1023                 if (!NT_SUCCESS(Status))
1024                 {
1025                     goto quit;
1026                 }
1027                 DirtyVacb = TRUE;
1028 
1029                 if (IoStatus)
1030                     IoStatus->Information += VacbIosb.Information;
1031             }
1032 
1033             CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE);
1034         }
1035 
1036         if (!DirtyVacb)
1037         {
1038             IO_STATUS_BLOCK MmIosb;
1039             LARGE_INTEGER MmOffset;
1040 
1041             MmOffset.QuadPart = FlushStart;
1042 
1043             if (FlushEnd - (FlushEnd % VACB_MAPPING_GRANULARITY) <= FlushStart)
1044             {
1045                 /* The whole range fits within a VACB chunk. */
1046                 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, FlushEnd - FlushStart, &MmIosb);
1047             }
1048             else
1049             {
1050                 ULONG MmLength = VACB_MAPPING_GRANULARITY - (FlushStart % VACB_MAPPING_GRANULARITY);
1051                 Status = MmFlushSegment(SectionObjectPointers, &MmOffset, MmLength, &MmIosb);
1052             }
1053 
1054             if (!NT_SUCCESS(Status))
1055                 goto quit;
1056 
1057             if (IoStatus)
1058                 IoStatus->Information += MmIosb.Information;
1059 
1060             /* Update VDL */
1061             if (SharedCacheMap->ValidDataLength.QuadPart < FlushEnd)
1062                 SharedCacheMap->ValidDataLength.QuadPart = FlushEnd;
1063         }
1064 
1065         if (!NT_SUCCESS(RtlLongLongAdd(FlushStart, VACB_MAPPING_GRANULARITY, &FlushStart)))
1066         {
1067             /* We're at the end of file ! */
1068             break;
1069         }
1070 
1071         /* Round down to next VACB start now */
1072         FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY;
1073     }
1074 
1075 quit:
1076     if (IoStatus)
1077     {
1078         IoStatus->Status = Status;
1079     }
1080 }
1081 
1082 NTSTATUS
1083 CcRosReleaseFileCache (
1084     PFILE_OBJECT FileObject)
1085 /*
1086  * FUNCTION: Called by the file system when a handle to a file object
1087  * has been closed.
1088  */
1089 {
1090     KIRQL OldIrql;
1091     PPRIVATE_CACHE_MAP PrivateMap;
1092     PROS_SHARED_CACHE_MAP SharedCacheMap;
1093 
1094     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1095 
1096     if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1097     {
1098         SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1099 
1100         /* Closing the handle, so kill the private cache map
1101          * Before you event try to remove it from FO, always
1102          * lock the master lock, to be sure not to race
1103          * with a potential read ahead ongoing!
1104          */
1105         PrivateMap = FileObject->PrivateCacheMap;
1106         FileObject->PrivateCacheMap = NULL;
1107 
1108         if (PrivateMap != NULL)
1109         {
1110             /* Remove it from the file */
1111             KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1112             RemoveEntryList(&PrivateMap->PrivateLinks);
1113             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1114 
1115             /* And free it. */
1116             if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1117             {
1118                 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1119             }
1120             else
1121             {
1122                 PrivateMap->NodeTypeCode = 0;
1123             }
1124 
1125             ASSERT(SharedCacheMap->OpenCount > 0);
1126 
1127             SharedCacheMap->OpenCount--;
1128             if (SharedCacheMap->OpenCount == 0)
1129             {
1130                 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1131             }
1132         }
1133     }
1134     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1135     return STATUS_SUCCESS;
1136 }
1137 
1138 NTSTATUS
1139 CcRosInitializeFileCache (
1140     PFILE_OBJECT FileObject,
1141     PCC_FILE_SIZES FileSizes,
1142     BOOLEAN PinAccess,
1143     PCACHE_MANAGER_CALLBACKS CallBacks,
1144     PVOID LazyWriterContext)
1145 /*
1146  * FUNCTION: Initializes a shared cache map for a file object
1147  */
1148 {
1149     KIRQL OldIrql;
1150     BOOLEAN Allocated;
1151     PROS_SHARED_CACHE_MAP SharedCacheMap;
1152 
1153     DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
1154 
1155     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1156 
1157     Allocated = FALSE;
1158     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1159     if (SharedCacheMap == NULL)
1160     {
1161         Allocated = TRUE;
1162         SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1163         if (SharedCacheMap == NULL)
1164         {
1165             return STATUS_INSUFFICIENT_RESOURCES;
1166         }
1167         RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1168         SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1169         SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1170         SharedCacheMap->FileObject = FileObject;
1171         SharedCacheMap->Callbacks = CallBacks;
1172         SharedCacheMap->LazyWriteContext = LazyWriterContext;
1173         SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1174         SharedCacheMap->FileSize = FileSizes->FileSize;
1175         SharedCacheMap->ValidDataLength = FileSizes->ValidDataLength;
1176         SharedCacheMap->PinAccess = PinAccess;
1177         SharedCacheMap->DirtyPageThreshold = 0;
1178         SharedCacheMap->DirtyPages = 0;
1179         InitializeListHead(&SharedCacheMap->PrivateList);
1180         KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1181         InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1182         InitializeListHead(&SharedCacheMap->BcbList);
1183 
1184         SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION;
1185 
1186         ObReferenceObjectByPointer(FileObject,
1187                                    FILE_ALL_ACCESS,
1188                                    NULL,
1189                                    KernelMode);
1190 
1191         FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1192 
1193         //CcRosTraceCacheMap(SharedCacheMap, TRUE);
1194     }
1195     else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION)
1196     {
1197         /* The shared cache map is being created somewhere else. Wait for that to happen */
1198         KEVENT Waiter;
1199         PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent;
1200 
1201         KeInitializeEvent(&Waiter, NotificationEvent, FALSE);
1202         SharedCacheMap->CreateEvent = &Waiter;
1203 
1204         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1205 
1206         KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL);
1207 
1208         if (PreviousWaiter)
1209             KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE);
1210 
1211         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1212     }
1213 
1214     if (FileObject->PrivateCacheMap == NULL)
1215     {
1216         PPRIVATE_CACHE_MAP PrivateMap;
1217 
1218         /* Allocate the private cache map for this handle */
1219         if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1220         {
1221             PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1222         }
1223         else
1224         {
1225             PrivateMap = &SharedCacheMap->PrivateCacheMap;
1226         }
1227 
1228         if (PrivateMap == NULL)
1229         {
1230             /* If we also allocated the shared cache map for this file, kill it */
1231             if (Allocated)
1232             {
1233                 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1234 
1235                 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1236                 ObDereferenceObject(FileObject);
1237                 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1238             }
1239 
1240             KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1241             return STATUS_INSUFFICIENT_RESOURCES;
1242         }
1243 
1244         /* Initialize it */
1245         RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1246         PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1247         PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1248         PrivateMap->FileObject = FileObject;
1249         KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1250 
1251         /* Link it to the file */
1252         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1253         InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1254         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1255 
1256         FileObject->PrivateCacheMap = PrivateMap;
1257         SharedCacheMap->OpenCount++;
1258     }
1259 
1260     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1261 
1262     /* Create the section */
1263     if (Allocated)
1264     {
1265         NTSTATUS Status;
1266 
1267         ASSERT(SharedCacheMap->Section == NULL);
1268 
1269         Status = MmCreateSection(
1270             &SharedCacheMap->Section,
1271             SECTION_ALL_ACCESS,
1272             NULL,
1273             &SharedCacheMap->SectionSize,
1274             PAGE_READWRITE,
1275             SEC_RESERVE,
1276             NULL,
1277             FileObject);
1278 
1279         ASSERT(NT_SUCCESS(Status));
1280 
1281         if (!NT_SUCCESS(Status))
1282         {
1283             CcRosReleaseFileCache(FileObject);
1284             return Status;
1285         }
1286 
1287         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1288 
1289         InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1290         SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION;
1291 
1292         if (SharedCacheMap->CreateEvent)
1293         {
1294             KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE);
1295             SharedCacheMap->CreateEvent = NULL;
1296         }
1297 
1298         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1299     }
1300 
1301     return STATUS_SUCCESS;
1302 }
1303 
1304 /*
1305  * @implemented
1306  */
1307 PFILE_OBJECT
1308 NTAPI
1309 CcGetFileObjectFromSectionPtrs (
1310     IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1311 {
1312     PROS_SHARED_CACHE_MAP SharedCacheMap;
1313 
1314     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1315 
1316     if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1317     {
1318         SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1319         ASSERT(SharedCacheMap);
1320         return SharedCacheMap->FileObject;
1321     }
1322     return NULL;
1323 }
1324 
1325 CODE_SEG("INIT")
1326 VOID
1327 NTAPI
1328 CcInitView (
1329     VOID)
1330 {
1331     DPRINT("CcInitView()\n");
1332 
1333     InitializeListHead(&DirtyVacbListHead);
1334     InitializeListHead(&VacbLruListHead);
1335     InitializeListHead(&CcDeferredWrites);
1336     InitializeListHead(&CcCleanSharedCacheMapList);
1337     KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1338     ExInitializeNPagedLookasideList(&iBcbLookasideList,
1339                                     NULL,
1340                                     NULL,
1341                                     0,
1342                                     sizeof(INTERNAL_BCB),
1343                                     TAG_BCB,
1344                                     20);
1345     ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1346                                     NULL,
1347                                     NULL,
1348                                     0,
1349                                     sizeof(ROS_SHARED_CACHE_MAP),
1350                                     TAG_SHARED_CACHE_MAP,
1351                                     20);
1352     ExInitializeNPagedLookasideList(&VacbLookasideList,
1353                                     NULL,
1354                                     NULL,
1355                                     0,
1356                                     sizeof(ROS_VACB),
1357                                     TAG_VACB,
1358                                     20);
1359 
1360     CcInitCacheZeroPage();
1361 }
1362 
1363 #if DBG && defined(KDBG)
1364 
1365 #include <kdbg/kdb.h>
1366 
1367 BOOLEAN
1368 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1369 {
1370     PLIST_ENTRY ListEntry;
1371     UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1372 
1373     KdbpPrint("  Usage Summary (in kb)\n");
1374     KdbpPrint("Shared\t\tMapped\tDirty\tName\n");
1375     /* No need to lock the spin lock here, we're in DBG */
1376     for (ListEntry = CcCleanSharedCacheMapList.Flink;
1377          ListEntry != &CcCleanSharedCacheMapList;
1378          ListEntry = ListEntry->Flink)
1379     {
1380         PLIST_ENTRY Vacbs;
1381         ULONG Mapped = 0, Dirty = 0;
1382         PROS_SHARED_CACHE_MAP SharedCacheMap;
1383         PUNICODE_STRING FileName;
1384         PWSTR Extra = L"";
1385 
1386         SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1387 
1388         /* Dirty size */
1389         Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1390 
1391         /* First, count for all the associated VACB */
1392         for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1393              Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1394              Vacbs = Vacbs->Flink)
1395         {
1396             Mapped += VACB_MAPPING_GRANULARITY / 1024;
1397         }
1398 
1399         /* Setup name */
1400         if (SharedCacheMap->FileObject != NULL &&
1401             SharedCacheMap->FileObject->FileName.Length != 0)
1402         {
1403             FileName = &SharedCacheMap->FileObject->FileName;
1404         }
1405         else if (SharedCacheMap->FileObject != NULL &&
1406                  SharedCacheMap->FileObject->FsContext != NULL &&
1407                  ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1408                  ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1409                  ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1410         {
1411             FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1412             Extra = L" (FastFAT)";
1413         }
1414         else
1415         {
1416             FileName = &NoName;
1417         }
1418 
1419         /* And print */
1420         KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Mapped, Dirty, FileName, Extra);
1421     }
1422 
1423     return TRUE;
1424 }
1425 
1426 BOOLEAN
1427 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1428 {
1429     KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1430               (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1431     KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1432               (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1433     KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1434               (MmAvailablePages * PAGE_SIZE) / 1024);
1435     KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1436               (MmThrottleTop * PAGE_SIZE) / 1024);
1437     KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1438               (MmThrottleBottom * PAGE_SIZE) / 1024);
1439     KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1440               (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1441 
1442     if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1443     {
1444         KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1445     }
1446     else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1447     {
1448         KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1449     }
1450     else
1451     {
1452         KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1453     }
1454 
1455     return TRUE;
1456 }
1457 
1458 #endif // DBG && defined(KDBG)
1459 
1460 /* EOF */
1461