xref: /reactos/ntoskrnl/cc/view.c (revision 40462c92)
1 /*
2  * COPYRIGHT:       See COPYING in the top level directory
3  * PROJECT:         ReactOS kernel
4  * FILE:            ntoskrnl/cc/view.c
5  * PURPOSE:         Cache manager
6  *
7  * PROGRAMMERS:     David Welch (welch@mcmail.com)
8  *                  Pierre Schweitzer (pierre@reactos.org)
9  */
10 
11 /* NOTES **********************************************************************
12  *
13  * This is not the NT implementation of a file cache nor anything much like
14  * it.
15  *
16  * The general procedure for a filesystem to implement a read or write
17  * dispatch routine is as follows
18  *
19  * (1) If caching for the FCB hasn't been initiated then so do by calling
20  * CcInitializeFileCache.
21  *
22  * (2) For each 4k region which is being read or written obtain a cache page
23  * by calling CcRequestCachePage.
24  *
25  * (3) If either the page is being read or not completely written, and it is
26  * not up to date then read its data from the underlying medium. If the read
27  * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28  *
29  * (4) Copy the data into or out of the page as necessary.
30  *
31  * (5) Release the cache page
32  */
33 /* INCLUDES ******************************************************************/
34 
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38 
39 /* GLOBALS *******************************************************************/
40 
41 LIST_ENTRY DirtyVacbListHead;
42 static LIST_ENTRY VacbLruListHead;
43 
44 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
45 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
46 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
47 
48 /* Internal vars (MS):
49  * - Threshold above which lazy writer will start action
50  * - Amount of dirty pages
51  * - List for deferred writes
52  * - Spinlock when dealing with the deferred list
53  * - List for "clean" shared cache maps
54  */
55 ULONG CcDirtyPageThreshold = 0;
56 ULONG CcTotalDirtyPages = 0;
57 LIST_ENTRY CcDeferredWrites;
58 KSPIN_LOCK CcDeferredWriteSpinLock;
59 LIST_ENTRY CcCleanSharedCacheMapList;
60 
61 #if DBG
62 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
63 {
64     ULONG Refs;
65 
66     Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
67     if (vacb->SharedCacheMap->Trace)
68     {
69         DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
70                  file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
71     }
72 
73     return Refs;
74 }
75 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
76 {
77     ULONG Refs;
78 
79     Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
80     ASSERT(!(Refs == 0 && vacb->Dirty));
81     if (vacb->SharedCacheMap->Trace)
82     {
83         DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
84                  file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
85     }
86 
87     if (Refs == 0)
88     {
89         CcRosInternalFreeVacb(vacb);
90     }
91 
92     return Refs;
93 }
94 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
95 {
96     ULONG Refs;
97 
98     Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
99     if (vacb->SharedCacheMap->Trace)
100     {
101         DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
102                  file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
103     }
104 
105     return Refs;
106 }
107 #endif
108 
109 
110 /* FUNCTIONS *****************************************************************/
111 
112 VOID
113 NTAPI
114 CcRosTraceCacheMap (
115     PROS_SHARED_CACHE_MAP SharedCacheMap,
116     BOOLEAN Trace )
117 {
118 #if DBG
119     KIRQL oldirql;
120     PLIST_ENTRY current_entry;
121     PROS_VACB current;
122 
123     if (!SharedCacheMap)
124         return;
125 
126     SharedCacheMap->Trace = Trace;
127 
128     if (Trace)
129     {
130         DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
131 
132         oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
133         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
134 
135         current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
136         while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
137         {
138             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
139             current_entry = current_entry->Flink;
140 
141             DPRINT1("  VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
142                     current, current->ReferenceCount, current->Dirty, current->PageOut );
143         }
144 
145         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
146         KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
147     }
148     else
149     {
150         DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
151     }
152 
153 #else
154     UNREFERENCED_PARAMETER(SharedCacheMap);
155     UNREFERENCED_PARAMETER(Trace);
156 #endif
157 }
158 
159 NTSTATUS
160 NTAPI
161 CcRosFlushVacb (
162     PROS_VACB Vacb)
163 {
164     NTSTATUS Status;
165 
166     CcRosUnmarkDirtyVacb(Vacb, TRUE);
167 
168     Status = CcWriteVirtualAddress(Vacb);
169     if (!NT_SUCCESS(Status))
170     {
171         CcRosMarkDirtyVacb(Vacb);
172     }
173 
174     return Status;
175 }
176 
177 NTSTATUS
178 NTAPI
179 CcRosFlushDirtyPages (
180     ULONG Target,
181     PULONG Count,
182     BOOLEAN Wait,
183     BOOLEAN CalledFromLazy)
184 {
185     PLIST_ENTRY current_entry;
186     PROS_VACB current;
187     BOOLEAN Locked;
188     NTSTATUS Status;
189     KIRQL OldIrql;
190 
191     DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
192 
193     (*Count) = 0;
194 
195     KeEnterCriticalRegion();
196     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
197 
198     current_entry = DirtyVacbListHead.Flink;
199     if (current_entry == &DirtyVacbListHead)
200     {
201         DPRINT("No Dirty pages\n");
202     }
203 
204     while ((current_entry != &DirtyVacbListHead) && (Target > 0))
205     {
206         current = CONTAINING_RECORD(current_entry,
207                                     ROS_VACB,
208                                     DirtyVacbListEntry);
209         current_entry = current_entry->Flink;
210 
211         CcRosVacbIncRefCount(current);
212 
213         /* When performing lazy write, don't handle temporary files */
214         if (CalledFromLazy &&
215             BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
216         {
217             CcRosVacbDecRefCount(current);
218             continue;
219         }
220 
221         /* Don't attempt to lazy write the files that asked not to */
222         if (CalledFromLazy &&
223             BooleanFlagOn(current->SharedCacheMap->Flags, WRITEBEHIND_DISABLED))
224         {
225             CcRosVacbDecRefCount(current);
226             continue;
227         }
228 
229         ASSERT(current->Dirty);
230 
231         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
232 
233         Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
234                      current->SharedCacheMap->LazyWriteContext, Wait);
235         if (!Locked)
236         {
237             OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
238             CcRosVacbDecRefCount(current);
239             continue;
240         }
241 
242         Status = CcRosFlushVacb(current);
243 
244         current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
245             current->SharedCacheMap->LazyWriteContext);
246 
247         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
248         CcRosVacbDecRefCount(current);
249 
250         if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
251             (Status != STATUS_MEDIA_WRITE_PROTECTED))
252         {
253             DPRINT1("CC: Failed to flush VACB.\n");
254         }
255         else
256         {
257             ULONG PagesFreed;
258 
259             /* How many pages did we free? */
260             PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
261             (*Count) += PagesFreed;
262 
263             /* Make sure we don't overflow target! */
264             if (Target < PagesFreed)
265             {
266                 /* If we would have, jump to zero directly */
267                 Target = 0;
268             }
269             else
270             {
271                 Target -= PagesFreed;
272             }
273         }
274 
275         current_entry = DirtyVacbListHead.Flink;
276     }
277 
278     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
279     KeLeaveCriticalRegion();
280 
281     DPRINT("CcRosFlushDirtyPages() finished\n");
282     return STATUS_SUCCESS;
283 }
284 
285 NTSTATUS
286 CcRosTrimCache (
287     ULONG Target,
288     ULONG Priority,
289     PULONG NrFreed)
290 /*
291  * FUNCTION: Try to free some memory from the file cache.
292  * ARGUMENTS:
293  *       Target - The number of pages to be freed.
294  *       Priority - The priority of free (currently unused).
295  *       NrFreed - Points to a variable where the number of pages
296  *                 actually freed is returned.
297  */
298 {
299     PLIST_ENTRY current_entry;
300     PROS_VACB current;
301     ULONG PagesFreed;
302     KIRQL oldIrql;
303     LIST_ENTRY FreeList;
304     PFN_NUMBER Page;
305     ULONG i;
306     BOOLEAN FlushedPages = FALSE;
307 
308     DPRINT("CcRosTrimCache(Target %lu)\n", Target);
309 
310     InitializeListHead(&FreeList);
311 
312     *NrFreed = 0;
313 
314 retry:
315     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
316 
317     current_entry = VacbLruListHead.Flink;
318     while (current_entry != &VacbLruListHead)
319     {
320         ULONG Refs;
321 
322         current = CONTAINING_RECORD(current_entry,
323                                     ROS_VACB,
324                                     VacbLruListEntry);
325         current_entry = current_entry->Flink;
326 
327         KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
328 
329         /* Reference the VACB */
330         CcRosVacbIncRefCount(current);
331 
332         /* Check if it's mapped and not dirty */
333         if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
334         {
335             /* We have to break these locks because Cc sucks */
336             KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
337             KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
338 
339             /* Page out the VACB */
340             for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
341             {
342                 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
343 
344                 MmPageOutPhysicalAddress(Page);
345             }
346 
347             /* Reacquire the locks */
348             oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
349             KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
350         }
351 
352         /* Dereference the VACB */
353         Refs = CcRosVacbDecRefCount(current);
354 
355         /* Check if we can free this entry now */
356         if (Refs < 2)
357         {
358             ASSERT(!current->Dirty);
359             ASSERT(!current->MappedCount);
360             ASSERT(Refs == 1);
361 
362             RemoveEntryList(&current->CacheMapVacbListEntry);
363             RemoveEntryList(&current->VacbLruListEntry);
364             InitializeListHead(&current->VacbLruListEntry);
365             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
366 
367             /* Calculate how many pages we freed for Mm */
368             PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
369             Target -= PagesFreed;
370             (*NrFreed) += PagesFreed;
371         }
372 
373         KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
374     }
375 
376     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
377 
378     /* Try flushing pages if we haven't met our target */
379     if ((Target > 0) && !FlushedPages)
380     {
381         /* Flush dirty pages to disk */
382         CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
383         FlushedPages = TRUE;
384 
385         /* We can only swap as many pages as we flushed */
386         if (PagesFreed < Target) Target = PagesFreed;
387 
388         /* Check if we flushed anything */
389         if (PagesFreed != 0)
390         {
391             /* Try again after flushing dirty pages */
392             DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
393             goto retry;
394         }
395     }
396 
397     while (!IsListEmpty(&FreeList))
398     {
399         ULONG Refs;
400 
401         current_entry = RemoveHeadList(&FreeList);
402         current = CONTAINING_RECORD(current_entry,
403                                     ROS_VACB,
404                                     CacheMapVacbListEntry);
405         InitializeListHead(&current->CacheMapVacbListEntry);
406         Refs = CcRosVacbDecRefCount(current);
407         ASSERT(Refs == 0);
408     }
409 
410     DPRINT("Evicted %lu cache pages\n", (*NrFreed));
411 
412     return STATUS_SUCCESS;
413 }
414 
415 NTSTATUS
416 NTAPI
417 CcRosReleaseVacb (
418     PROS_SHARED_CACHE_MAP SharedCacheMap,
419     PROS_VACB Vacb,
420     BOOLEAN Valid,
421     BOOLEAN Dirty,
422     BOOLEAN Mapped)
423 {
424     ULONG Refs;
425     ASSERT(SharedCacheMap);
426 
427     DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
428            SharedCacheMap, Vacb, Valid);
429 
430     Vacb->Valid = Valid;
431 
432     if (Dirty && !Vacb->Dirty)
433     {
434         CcRosMarkDirtyVacb(Vacb);
435     }
436 
437     if (Mapped)
438     {
439         if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
440         {
441             CcRosVacbIncRefCount(Vacb);
442         }
443     }
444 
445     Refs = CcRosVacbDecRefCount(Vacb);
446     ASSERT(Refs > 0);
447 
448     return STATUS_SUCCESS;
449 }
450 
451 /* Returns with VACB Lock Held! */
452 PROS_VACB
453 NTAPI
454 CcRosLookupVacb (
455     PROS_SHARED_CACHE_MAP SharedCacheMap,
456     LONGLONG FileOffset)
457 {
458     PLIST_ENTRY current_entry;
459     PROS_VACB current;
460     KIRQL oldIrql;
461 
462     ASSERT(SharedCacheMap);
463 
464     DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
465            SharedCacheMap, FileOffset);
466 
467     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
468     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
469 
470     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
471     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
472     {
473         current = CONTAINING_RECORD(current_entry,
474                                     ROS_VACB,
475                                     CacheMapVacbListEntry);
476         if (IsPointInRange(current->FileOffset.QuadPart,
477                            VACB_MAPPING_GRANULARITY,
478                            FileOffset))
479         {
480             CcRosVacbIncRefCount(current);
481             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
482             KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
483             return current;
484         }
485         if (current->FileOffset.QuadPart > FileOffset)
486             break;
487         current_entry = current_entry->Flink;
488     }
489 
490     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
491     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
492 
493     return NULL;
494 }
495 
496 VOID
497 NTAPI
498 CcRosMarkDirtyVacb (
499     PROS_VACB Vacb)
500 {
501     KIRQL oldIrql;
502     PROS_SHARED_CACHE_MAP SharedCacheMap;
503 
504     SharedCacheMap = Vacb->SharedCacheMap;
505 
506     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
507     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
508 
509     ASSERT(!Vacb->Dirty);
510 
511     InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
512     CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
513     Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
514     CcRosVacbIncRefCount(Vacb);
515 
516     /* Move to the tail of the LRU list */
517     RemoveEntryList(&Vacb->VacbLruListEntry);
518     InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
519 
520     Vacb->Dirty = TRUE;
521 
522     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
523 
524     /* Schedule a lazy writer run to now that we have dirty VACB */
525     if (!LazyWriter.ScanActive)
526     {
527         CcScheduleLazyWriteScan(FALSE);
528     }
529     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
530 }
531 
532 VOID
533 NTAPI
534 CcRosUnmarkDirtyVacb (
535     PROS_VACB Vacb,
536     BOOLEAN LockViews)
537 {
538     KIRQL oldIrql;
539     PROS_SHARED_CACHE_MAP SharedCacheMap;
540 
541     SharedCacheMap = Vacb->SharedCacheMap;
542 
543     if (LockViews)
544     {
545         oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
546         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
547     }
548 
549     ASSERT(Vacb->Dirty);
550 
551     Vacb->Dirty = FALSE;
552 
553     RemoveEntryList(&Vacb->DirtyVacbListEntry);
554     InitializeListHead(&Vacb->DirtyVacbListEntry);
555     CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
556     Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
557     CcRosVacbDecRefCount(Vacb);
558 
559     if (LockViews)
560     {
561         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
562         KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
563     }
564 }
565 
566 NTSTATUS
567 NTAPI
568 CcRosMarkDirtyFile (
569     PROS_SHARED_CACHE_MAP SharedCacheMap,
570     LONGLONG FileOffset)
571 {
572     PROS_VACB Vacb;
573 
574     ASSERT(SharedCacheMap);
575 
576     DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
577            SharedCacheMap, FileOffset);
578 
579     Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
580     if (Vacb == NULL)
581     {
582         KeBugCheck(CACHE_MANAGER);
583     }
584 
585     CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
586 
587     return STATUS_SUCCESS;
588 }
589 
590 /*
591  * Note: this is not the contrary function of
592  * CcRosMapVacbInKernelSpace()
593  */
594 NTSTATUS
595 NTAPI
596 CcRosUnmapVacb (
597     PROS_SHARED_CACHE_MAP SharedCacheMap,
598     LONGLONG FileOffset,
599     BOOLEAN NowDirty)
600 {
601     PROS_VACB Vacb;
602 
603     ASSERT(SharedCacheMap);
604 
605     DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
606            SharedCacheMap, FileOffset, NowDirty);
607 
608     Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
609     if (Vacb == NULL)
610     {
611         return STATUS_UNSUCCESSFUL;
612     }
613 
614     ASSERT(Vacb->MappedCount != 0);
615     if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
616     {
617         CcRosVacbDecRefCount(Vacb);
618     }
619 
620     CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
621 
622     return STATUS_SUCCESS;
623 }
624 
625 static
626 NTSTATUS
627 CcRosMapVacbInKernelSpace(
628     PROS_VACB Vacb)
629 {
630     ULONG i;
631     NTSTATUS Status;
632     ULONG_PTR NumberOfPages;
633     PVOID BaseAddress = NULL;
634 
635     /* Create a memory area. */
636     MmLockAddressSpace(MmGetKernelAddressSpace());
637     Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
638                                 0, // nothing checks for VACB mareas, so set to 0
639                                 &BaseAddress,
640                                 VACB_MAPPING_GRANULARITY,
641                                 PAGE_READWRITE,
642                                 (PMEMORY_AREA*)&Vacb->MemoryArea,
643                                 0,
644                                 PAGE_SIZE);
645     ASSERT(Vacb->BaseAddress == NULL);
646     Vacb->BaseAddress = BaseAddress;
647     MmUnlockAddressSpace(MmGetKernelAddressSpace());
648     if (!NT_SUCCESS(Status))
649     {
650         DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
651         return Status;
652     }
653 
654     ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
655     ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
656     ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
657 
658     /* Create a virtual mapping for this memory area */
659     NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
660     for (i = 0; i < NumberOfPages; i++)
661     {
662         PFN_NUMBER PageFrameNumber;
663 
664         MI_SET_USAGE(MI_USAGE_CACHE);
665         Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
666         if (PageFrameNumber == 0)
667         {
668             DPRINT1("Unable to allocate page\n");
669             KeBugCheck(MEMORY_MANAGEMENT);
670         }
671 
672         ASSERT(BaseAddress == Vacb->BaseAddress);
673         ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
674         ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
675         ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
676 
677         Status = MmCreateVirtualMapping(NULL,
678                                         (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
679                                         PAGE_READWRITE,
680                                         &PageFrameNumber,
681                                         1);
682         if (!NT_SUCCESS(Status))
683         {
684             DPRINT1("Unable to create virtual mapping\n");
685             KeBugCheck(MEMORY_MANAGEMENT);
686         }
687     }
688 
689     return STATUS_SUCCESS;
690 }
691 
692 static
693 BOOLEAN
694 CcRosFreeUnusedVacb (
695     PULONG Count)
696 {
697     ULONG cFreed;
698     BOOLEAN Freed;
699     KIRQL oldIrql;
700     PROS_VACB current;
701     LIST_ENTRY FreeList;
702     PLIST_ENTRY current_entry;
703 
704     cFreed = 0;
705     Freed = FALSE;
706     InitializeListHead(&FreeList);
707 
708     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
709 
710     /* Browse all the available VACB */
711     current_entry = VacbLruListHead.Flink;
712     while (current_entry != &VacbLruListHead)
713     {
714         ULONG Refs;
715 
716         current = CONTAINING_RECORD(current_entry,
717                                     ROS_VACB,
718                                     VacbLruListEntry);
719         current_entry = current_entry->Flink;
720 
721         KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
722 
723         /* Only deal with unused VACB, we will free them */
724         Refs = CcRosVacbGetRefCount(current);
725         if (Refs < 2)
726         {
727             ASSERT(!current->Dirty);
728             ASSERT(!current->MappedCount);
729             ASSERT(Refs == 1);
730 
731             /* Reset and move to free list */
732             RemoveEntryList(&current->CacheMapVacbListEntry);
733             RemoveEntryList(&current->VacbLruListEntry);
734             InitializeListHead(&current->VacbLruListEntry);
735             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
736         }
737 
738         KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
739 
740     }
741 
742     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
743 
744     /* And now, free any of the found VACB, that'll free memory! */
745     while (!IsListEmpty(&FreeList))
746     {
747         ULONG Refs;
748 
749         current_entry = RemoveHeadList(&FreeList);
750         current = CONTAINING_RECORD(current_entry,
751                                     ROS_VACB,
752                                     CacheMapVacbListEntry);
753         InitializeListHead(&current->CacheMapVacbListEntry);
754         Refs = CcRosVacbDecRefCount(current);
755         ASSERT(Refs == 0);
756         ++cFreed;
757     }
758 
759     /* If we freed at least one VACB, return success */
760     if (cFreed != 0)
761     {
762         Freed = TRUE;
763     }
764 
765     /* If caller asked for free count, return it */
766     if (Count != NULL)
767     {
768         *Count = cFreed;
769     }
770 
771     return Freed;
772 }
773 
774 static
775 NTSTATUS
776 CcRosCreateVacb (
777     PROS_SHARED_CACHE_MAP SharedCacheMap,
778     LONGLONG FileOffset,
779     PROS_VACB *Vacb)
780 {
781     PROS_VACB current;
782     PROS_VACB previous;
783     PLIST_ENTRY current_entry;
784     NTSTATUS Status;
785     KIRQL oldIrql;
786     ULONG Refs;
787     BOOLEAN Retried;
788 
789     ASSERT(SharedCacheMap);
790 
791     DPRINT("CcRosCreateVacb()\n");
792 
793     if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
794     {
795         *Vacb = NULL;
796         return STATUS_INVALID_PARAMETER;
797     }
798 
799     current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
800     current->BaseAddress = NULL;
801     current->Valid = FALSE;
802     current->Dirty = FALSE;
803     current->PageOut = FALSE;
804     current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
805     current->SharedCacheMap = SharedCacheMap;
806 #if DBG
807     if (SharedCacheMap->Trace)
808     {
809         DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
810     }
811 #endif
812     current->MappedCount = 0;
813     current->ReferenceCount = 0;
814     InitializeListHead(&current->CacheMapVacbListEntry);
815     InitializeListHead(&current->DirtyVacbListEntry);
816     InitializeListHead(&current->VacbLruListEntry);
817 
818     CcRosVacbIncRefCount(current);
819 
820     Retried = FALSE;
821 Retry:
822     /* Map VACB in kernel space */
823     Status = CcRosMapVacbInKernelSpace(current);
824     if (!NT_SUCCESS(Status))
825     {
826         ULONG Freed;
827         /* If no space left, try to prune unused VACB
828          * to recover space to map our VACB
829          * If it succeed, retry to map, otherwise
830          * just fail.
831          */
832         if (!Retried && CcRosFreeUnusedVacb(&Freed))
833         {
834             DPRINT("Prunned %d VACB, trying again\n", Freed);
835             Retried = TRUE;
836             goto Retry;
837         }
838 
839         ExFreeToNPagedLookasideList(&VacbLookasideList, current);
840         return Status;
841     }
842 
843     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
844 
845     *Vacb = current;
846     /* There is window between the call to CcRosLookupVacb
847      * and CcRosCreateVacb. We must check if a VACB for the
848      * file offset exist. If there is a VACB, we release
849      * our newly created VACB and return the existing one.
850      */
851     KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
852     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
853     previous = NULL;
854     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
855     {
856         current = CONTAINING_RECORD(current_entry,
857                                     ROS_VACB,
858                                     CacheMapVacbListEntry);
859         if (IsPointInRange(current->FileOffset.QuadPart,
860                            VACB_MAPPING_GRANULARITY,
861                            FileOffset))
862         {
863             CcRosVacbIncRefCount(current);
864             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
865 #if DBG
866             if (SharedCacheMap->Trace)
867             {
868                 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
869                         SharedCacheMap,
870                         (*Vacb),
871                         current);
872             }
873 #endif
874             KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
875 
876             Refs = CcRosVacbDecRefCount(*Vacb);
877             ASSERT(Refs == 0);
878 
879             *Vacb = current;
880             return STATUS_SUCCESS;
881         }
882         if (current->FileOffset.QuadPart < FileOffset)
883         {
884             ASSERT(previous == NULL ||
885                    previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
886             previous = current;
887         }
888         if (current->FileOffset.QuadPart > FileOffset)
889             break;
890         current_entry = current_entry->Flink;
891     }
892     /* There was no existing VACB. */
893     current = *Vacb;
894     if (previous)
895     {
896         InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
897     }
898     else
899     {
900         InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
901     }
902     KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
903     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
904     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
905 
906     MI_SET_USAGE(MI_USAGE_CACHE);
907 #if MI_TRACE_PFNS
908     if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
909     {
910         PWCHAR pos;
911         ULONG len = 0;
912         pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
913         if (pos)
914         {
915             len = wcslen(pos) * sizeof(WCHAR);
916             snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
917         }
918         else
919         {
920             snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
921         }
922     }
923 #endif
924 
925     /* Reference it to allow release */
926     CcRosVacbIncRefCount(current);
927 
928     return Status;
929 }
930 
931 NTSTATUS
932 NTAPI
933 CcRosGetVacb (
934     PROS_SHARED_CACHE_MAP SharedCacheMap,
935     LONGLONG FileOffset,
936     PLONGLONG BaseOffset,
937     PVOID* BaseAddress,
938     PBOOLEAN UptoDate,
939     PROS_VACB *Vacb)
940 {
941     PROS_VACB current;
942     NTSTATUS Status;
943     ULONG Refs;
944     KIRQL OldIrql;
945 
946     ASSERT(SharedCacheMap);
947 
948     DPRINT("CcRosGetVacb()\n");
949 
950     /*
951      * Look for a VACB already mapping the same data.
952      */
953     current = CcRosLookupVacb(SharedCacheMap, FileOffset);
954     if (current == NULL)
955     {
956         /*
957          * Otherwise create a new VACB.
958          */
959         Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
960         if (!NT_SUCCESS(Status))
961         {
962             return Status;
963         }
964     }
965 
966     Refs = CcRosVacbGetRefCount(current);
967 
968     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
969 
970     /* Move to the tail of the LRU list */
971     RemoveEntryList(&current->VacbLruListEntry);
972     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
973 
974     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
975 
976     /*
977      * Return information about the VACB to the caller.
978      */
979     *UptoDate = current->Valid;
980     *BaseAddress = current->BaseAddress;
981     DPRINT("*BaseAddress %p\n", *BaseAddress);
982     *Vacb = current;
983     *BaseOffset = current->FileOffset.QuadPart;
984 
985     ASSERT(Refs > 1);
986 
987     return STATUS_SUCCESS;
988 }
989 
990 NTSTATUS
991 NTAPI
992 CcRosRequestVacb (
993     PROS_SHARED_CACHE_MAP SharedCacheMap,
994     LONGLONG FileOffset,
995     PVOID* BaseAddress,
996     PBOOLEAN UptoDate,
997     PROS_VACB *Vacb)
998 /*
999  * FUNCTION: Request a page mapping for a shared cache map
1000  */
1001 {
1002     LONGLONG BaseOffset;
1003 
1004     ASSERT(SharedCacheMap);
1005 
1006     if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
1007     {
1008         DPRINT1("Bad fileoffset %I64x should be multiple of %x",
1009                 FileOffset, VACB_MAPPING_GRANULARITY);
1010         KeBugCheck(CACHE_MANAGER);
1011     }
1012 
1013     return CcRosGetVacb(SharedCacheMap,
1014                         FileOffset,
1015                         &BaseOffset,
1016                         BaseAddress,
1017                         UptoDate,
1018                         Vacb);
1019 }
1020 
1021 static
1022 VOID
1023 CcFreeCachePage (
1024     PVOID Context,
1025     MEMORY_AREA* MemoryArea,
1026     PVOID Address,
1027     PFN_NUMBER Page,
1028     SWAPENTRY SwapEntry,
1029     BOOLEAN Dirty)
1030 {
1031     ASSERT(SwapEntry == 0);
1032     if (Page != 0)
1033     {
1034         ASSERT(MmGetReferenceCountPage(Page) == 1);
1035         MmReleasePageMemoryConsumer(MC_CACHE, Page);
1036     }
1037 }
1038 
1039 NTSTATUS
1040 CcRosInternalFreeVacb (
1041     PROS_VACB Vacb)
1042 /*
1043  * FUNCTION: Releases a VACB associated with a shared cache map
1044  */
1045 {
1046     DPRINT("Freeing VACB 0x%p\n", Vacb);
1047 #if DBG
1048     if (Vacb->SharedCacheMap->Trace)
1049     {
1050         DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1051     }
1052 #endif
1053 
1054     MmLockAddressSpace(MmGetKernelAddressSpace());
1055     MmFreeMemoryArea(MmGetKernelAddressSpace(),
1056                      Vacb->MemoryArea,
1057                      CcFreeCachePage,
1058                      NULL);
1059     MmUnlockAddressSpace(MmGetKernelAddressSpace());
1060 
1061     if (Vacb->ReferenceCount != 0)
1062     {
1063         DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
1064         if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
1065         {
1066             DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
1067         }
1068     }
1069 
1070     ASSERT(Vacb->ReferenceCount == 0);
1071     ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
1072     ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
1073     ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
1074     RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
1075     ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1076     return STATUS_SUCCESS;
1077 }
1078 
1079 /*
1080  * @implemented
1081  */
1082 VOID
1083 NTAPI
1084 CcFlushCache (
1085     IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1086     IN PLARGE_INTEGER FileOffset OPTIONAL,
1087     IN ULONG Length,
1088     OUT PIO_STATUS_BLOCK IoStatus)
1089 {
1090     PROS_SHARED_CACHE_MAP SharedCacheMap;
1091     LARGE_INTEGER Offset;
1092     LONGLONG RemainingLength;
1093     PROS_VACB current;
1094     NTSTATUS Status;
1095 
1096     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1097         SectionObjectPointers, FileOffset, Length);
1098 
1099     DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1100            SectionObjectPointers, FileOffset, Length, IoStatus);
1101 
1102     if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1103     {
1104         SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1105         ASSERT(SharedCacheMap);
1106         if (FileOffset)
1107         {
1108             Offset = *FileOffset;
1109             RemainingLength = Length;
1110         }
1111         else
1112         {
1113             Offset.QuadPart = 0;
1114             RemainingLength = SharedCacheMap->FileSize.QuadPart;
1115         }
1116 
1117         if (IoStatus)
1118         {
1119             IoStatus->Status = STATUS_SUCCESS;
1120             IoStatus->Information = 0;
1121         }
1122 
1123         while (RemainingLength > 0)
1124         {
1125             current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1126             if (current != NULL)
1127             {
1128                 if (current->Dirty)
1129                 {
1130                     Status = CcRosFlushVacb(current);
1131                     if (!NT_SUCCESS(Status) && IoStatus != NULL)
1132                     {
1133                         IoStatus->Status = Status;
1134                     }
1135                 }
1136 
1137                 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, FALSE, FALSE);
1138             }
1139 
1140             Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1141             RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1142         }
1143     }
1144     else
1145     {
1146         if (IoStatus)
1147         {
1148             IoStatus->Status = STATUS_INVALID_PARAMETER;
1149         }
1150     }
1151 }
1152 
1153 NTSTATUS
1154 NTAPI
1155 CcRosDeleteFileCache (
1156     PFILE_OBJECT FileObject,
1157     PROS_SHARED_CACHE_MAP SharedCacheMap,
1158     PKIRQL OldIrql)
1159 /*
1160  * FUNCTION: Releases the shared cache map associated with a file object
1161  */
1162 {
1163     PLIST_ENTRY current_entry;
1164     PROS_VACB current;
1165     LIST_ENTRY FreeList;
1166 
1167     ASSERT(SharedCacheMap);
1168 
1169     SharedCacheMap->OpenCount++;
1170     KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1171 
1172     CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1173 
1174     *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1175     SharedCacheMap->OpenCount--;
1176     if (SharedCacheMap->OpenCount == 0)
1177     {
1178         FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1179 
1180         /*
1181          * Release all VACBs
1182          */
1183         InitializeListHead(&FreeList);
1184         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1185         while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1186         {
1187             current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1188             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1189 
1190             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1191             RemoveEntryList(&current->VacbLruListEntry);
1192             InitializeListHead(&current->VacbLruListEntry);
1193             if (current->Dirty)
1194             {
1195                 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1196                 CcRosUnmarkDirtyVacb(current, FALSE);
1197                 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1198                 DPRINT1("Freeing dirty VACB\n");
1199             }
1200             if (current->MappedCount != 0)
1201             {
1202                 current->MappedCount = 0;
1203                 NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
1204                 DPRINT1("Freeing mapped VACB\n");
1205             }
1206             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1207 
1208             KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1209         }
1210 #if DBG
1211         SharedCacheMap->Trace = FALSE;
1212 #endif
1213         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1214 
1215         KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1216         ObDereferenceObject(SharedCacheMap->FileObject);
1217 
1218         while (!IsListEmpty(&FreeList))
1219         {
1220             ULONG Refs;
1221 
1222             current_entry = RemoveTailList(&FreeList);
1223             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1224             InitializeListHead(&current->CacheMapVacbListEntry);
1225             Refs = CcRosVacbDecRefCount(current);
1226 #if DBG // CORE-14578
1227             if (Refs != 0)
1228             {
1229                 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart);
1230                 DPRINT1("There are: %d references left\n", Refs);
1231                 DPRINT1("Map: %d\n", current->MappedCount);
1232                 DPRINT1("Dirty: %d\n", current->Dirty);
1233                 if (FileObject->FileName.Length != 0)
1234                 {
1235                     DPRINT1("File was: %wZ\n", &FileObject->FileName);
1236                 }
1237                 else if (FileObject->FsContext != NULL &&
1238                          ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1239                          ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1240                          ((PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100))->Length != 0)
1241                 {
1242                     DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100));
1243                 }
1244                 else
1245                 {
1246                     DPRINT1("No name for the file\n");
1247                 }
1248             }
1249 #else
1250             ASSERT(Refs == 0);
1251 #endif
1252         }
1253 
1254         *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1255         RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1256         KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1257 
1258         ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1259         *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1260     }
1261     return STATUS_SUCCESS;
1262 }
1263 
1264 VOID
1265 NTAPI
1266 CcRosReferenceCache (
1267     PFILE_OBJECT FileObject)
1268 {
1269     PROS_SHARED_CACHE_MAP SharedCacheMap;
1270     KIRQL OldIrql;
1271 
1272     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1273     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1274     ASSERT(SharedCacheMap);
1275     ASSERT(SharedCacheMap->OpenCount != 0);
1276     SharedCacheMap->OpenCount++;
1277     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1278 }
1279 
1280 VOID
1281 NTAPI
1282 CcRosRemoveIfClosed (
1283     PSECTION_OBJECT_POINTERS SectionObjectPointer)
1284 {
1285     PROS_SHARED_CACHE_MAP SharedCacheMap;
1286     KIRQL OldIrql;
1287 
1288     DPRINT("CcRosRemoveIfClosed()\n");
1289     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1290     SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1291     if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1292     {
1293         CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
1294     }
1295     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1296 }
1297 
1298 
1299 VOID
1300 NTAPI
1301 CcRosDereferenceCache (
1302     PFILE_OBJECT FileObject)
1303 {
1304     PROS_SHARED_CACHE_MAP SharedCacheMap;
1305     KIRQL OldIrql;
1306 
1307     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1308     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1309     ASSERT(SharedCacheMap);
1310     if (SharedCacheMap->OpenCount > 0)
1311     {
1312         SharedCacheMap->OpenCount--;
1313         if (SharedCacheMap->OpenCount == 0)
1314         {
1315             KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1316             MmFreeSectionSegments(SharedCacheMap->FileObject);
1317 
1318             OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1319             CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1320             KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1321 
1322             return;
1323         }
1324     }
1325     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1326 }
1327 
1328 NTSTATUS
1329 NTAPI
1330 CcRosReleaseFileCache (
1331     PFILE_OBJECT FileObject)
1332 /*
1333  * FUNCTION: Called by the file system when a handle to a file object
1334  * has been closed.
1335  */
1336 {
1337     KIRQL OldIrql;
1338     PPRIVATE_CACHE_MAP PrivateMap;
1339     PROS_SHARED_CACHE_MAP SharedCacheMap;
1340 
1341     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1342 
1343     if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1344     {
1345         SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1346 
1347         /* Closing the handle, so kill the private cache map
1348          * Before you event try to remove it from FO, always
1349          * lock the master lock, to be sure not to race
1350          * with a potential read ahead ongoing!
1351          */
1352         PrivateMap = FileObject->PrivateCacheMap;
1353         FileObject->PrivateCacheMap = NULL;
1354 
1355         if (PrivateMap != NULL)
1356         {
1357             /* Remove it from the file */
1358             KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1359             RemoveEntryList(&PrivateMap->PrivateLinks);
1360             KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1361 
1362             /* And free it. */
1363             if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1364             {
1365                 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1366             }
1367             else
1368             {
1369                 PrivateMap->NodeTypeCode = 0;
1370             }
1371 
1372             if (SharedCacheMap->OpenCount > 0)
1373             {
1374                 SharedCacheMap->OpenCount--;
1375                 if (SharedCacheMap->OpenCount == 0)
1376                 {
1377                     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1378                     MmFreeSectionSegments(SharedCacheMap->FileObject);
1379 
1380                     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1381                     CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1382                     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1383 
1384                     return STATUS_SUCCESS;
1385                 }
1386             }
1387         }
1388     }
1389     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1390     return STATUS_SUCCESS;
1391 }
1392 
1393 NTSTATUS
1394 NTAPI
1395 CcRosInitializeFileCache (
1396     PFILE_OBJECT FileObject,
1397     PCC_FILE_SIZES FileSizes,
1398     BOOLEAN PinAccess,
1399     PCACHE_MANAGER_CALLBACKS CallBacks,
1400     PVOID LazyWriterContext)
1401 /*
1402  * FUNCTION: Initializes a shared cache map for a file object
1403  */
1404 {
1405     KIRQL OldIrql;
1406     BOOLEAN Allocated;
1407     PROS_SHARED_CACHE_MAP SharedCacheMap;
1408 
1409     DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
1410 
1411     Allocated = FALSE;
1412     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1413     if (SharedCacheMap == NULL)
1414     {
1415         Allocated = TRUE;
1416         SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1417         if (SharedCacheMap == NULL)
1418         {
1419             return STATUS_INSUFFICIENT_RESOURCES;
1420         }
1421         RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1422         SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1423         SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1424         SharedCacheMap->FileObject = FileObject;
1425         SharedCacheMap->Callbacks = CallBacks;
1426         SharedCacheMap->LazyWriteContext = LazyWriterContext;
1427         SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1428         SharedCacheMap->FileSize = FileSizes->FileSize;
1429         SharedCacheMap->PinAccess = PinAccess;
1430         SharedCacheMap->DirtyPageThreshold = 0;
1431         SharedCacheMap->DirtyPages = 0;
1432         InitializeListHead(&SharedCacheMap->PrivateList);
1433         KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1434         InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1435         InitializeListHead(&SharedCacheMap->BcbList);
1436     }
1437 
1438     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1439     if (Allocated)
1440     {
1441         if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
1442         {
1443             ObReferenceObjectByPointer(FileObject,
1444                                        FILE_ALL_ACCESS,
1445                                        NULL,
1446                                        KernelMode);
1447             FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1448 
1449             InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1450         }
1451         else
1452         {
1453             ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1454             SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1455         }
1456     }
1457     if (FileObject->PrivateCacheMap == NULL)
1458     {
1459         PPRIVATE_CACHE_MAP PrivateMap;
1460 
1461         /* Allocate the private cache map for this handle */
1462         if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1463         {
1464             PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1465         }
1466         else
1467         {
1468             PrivateMap = &SharedCacheMap->PrivateCacheMap;
1469         }
1470 
1471         if (PrivateMap == NULL)
1472         {
1473             /* If we also allocated the shared cache map for this file, kill it */
1474             if (Allocated)
1475             {
1476                 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1477 
1478                 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1479                 ObDereferenceObject(FileObject);
1480                 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1481             }
1482 
1483             KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1484             return STATUS_INSUFFICIENT_RESOURCES;
1485         }
1486 
1487         /* Initialize it */
1488         RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1489         PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1490         PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1491         PrivateMap->FileObject = FileObject;
1492         KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1493 
1494         /* Link it to the file */
1495         KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1496         InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1497         KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1498 
1499         FileObject->PrivateCacheMap = PrivateMap;
1500         SharedCacheMap->OpenCount++;
1501     }
1502     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1503 
1504     return STATUS_SUCCESS;
1505 }
1506 
1507 /*
1508  * @implemented
1509  */
1510 PFILE_OBJECT
1511 NTAPI
1512 CcGetFileObjectFromSectionPtrs (
1513     IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1514 {
1515     PROS_SHARED_CACHE_MAP SharedCacheMap;
1516 
1517     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1518 
1519     if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1520     {
1521         SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1522         ASSERT(SharedCacheMap);
1523         return SharedCacheMap->FileObject;
1524     }
1525     return NULL;
1526 }
1527 
1528 CODE_SEG("INIT")
1529 VOID
1530 NTAPI
1531 CcInitView (
1532     VOID)
1533 {
1534     DPRINT("CcInitView()\n");
1535 
1536     InitializeListHead(&DirtyVacbListHead);
1537     InitializeListHead(&VacbLruListHead);
1538     InitializeListHead(&CcDeferredWrites);
1539     InitializeListHead(&CcCleanSharedCacheMapList);
1540     KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1541     ExInitializeNPagedLookasideList(&iBcbLookasideList,
1542                                     NULL,
1543                                     NULL,
1544                                     0,
1545                                     sizeof(INTERNAL_BCB),
1546                                     TAG_BCB,
1547                                     20);
1548     ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1549                                     NULL,
1550                                     NULL,
1551                                     0,
1552                                     sizeof(ROS_SHARED_CACHE_MAP),
1553                                     TAG_SHARED_CACHE_MAP,
1554                                     20);
1555     ExInitializeNPagedLookasideList(&VacbLookasideList,
1556                                     NULL,
1557                                     NULL,
1558                                     0,
1559                                     sizeof(ROS_VACB),
1560                                     TAG_VACB,
1561                                     20);
1562 
1563     MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1564 
1565     CcInitCacheZeroPage();
1566 }
1567 
1568 #if DBG && defined(KDBG)
1569 BOOLEAN
1570 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1571 {
1572     PLIST_ENTRY ListEntry;
1573     UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1574 
1575     KdbpPrint("  Usage Summary (in kb)\n");
1576     KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1577     /* No need to lock the spin lock here, we're in DBG */
1578     for (ListEntry = CcCleanSharedCacheMapList.Flink;
1579          ListEntry != &CcCleanSharedCacheMapList;
1580          ListEntry = ListEntry->Flink)
1581     {
1582         PLIST_ENTRY Vacbs;
1583         ULONG Valid = 0, Dirty = 0;
1584         PROS_SHARED_CACHE_MAP SharedCacheMap;
1585         PUNICODE_STRING FileName;
1586         PWSTR Extra = L"";
1587 
1588         SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1589 
1590         /* Dirty size */
1591         Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1592 
1593         /* First, count for all the associated VACB */
1594         for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1595              Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1596              Vacbs = Vacbs->Flink)
1597         {
1598             PROS_VACB Vacb;
1599 
1600             Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1601             if (Vacb->Valid)
1602             {
1603                 Valid += VACB_MAPPING_GRANULARITY / 1024;
1604             }
1605         }
1606 
1607         /* Setup name */
1608         if (SharedCacheMap->FileObject != NULL &&
1609             SharedCacheMap->FileObject->FileName.Length != 0)
1610         {
1611             FileName = &SharedCacheMap->FileObject->FileName;
1612         }
1613         else if (SharedCacheMap->FileObject != NULL &&
1614                  SharedCacheMap->FileObject->FsContext != NULL &&
1615                  ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1616                  ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1617                  ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1618         {
1619             FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1620             Extra = L" (FastFAT)";
1621         }
1622         else
1623         {
1624             FileName = &NoName;
1625         }
1626 
1627         /* And print */
1628         KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Valid, Dirty, FileName, Extra);
1629     }
1630 
1631     return TRUE;
1632 }
1633 
1634 BOOLEAN
1635 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1636 {
1637     KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1638               (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1639     KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1640               (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1641     KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1642               (MmAvailablePages * PAGE_SIZE) / 1024);
1643     KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1644               (MmThrottleTop * PAGE_SIZE) / 1024);
1645     KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1646               (MmThrottleBottom * PAGE_SIZE) / 1024);
1647     KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1648               (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1649 
1650     if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1651     {
1652         KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1653     }
1654     else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1655     {
1656         KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1657     }
1658     else
1659     {
1660         KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1661     }
1662 
1663     return TRUE;
1664 }
1665 #endif
1666 
1667 /* EOF */
1668