xref: /reactos/ntoskrnl/cc/view.c (revision 5ddbd373)
1 /*
2  * COPYRIGHT:       See COPYING in the top level directory
3  * PROJECT:         ReactOS kernel
4  * FILE:            ntoskrnl/cc/view.c
5  * PURPOSE:         Cache manager
6  *
7  * PROGRAMMERS:     David Welch (welch@mcmail.com)
8  *                  Pierre Schweitzer (pierre@reactos.org)
9  */
10 
11 /* NOTES **********************************************************************
12  *
13  * This is not the NT implementation of a file cache nor anything much like
14  * it.
15  *
16  * The general procedure for a filesystem to implement a read or write
17  * dispatch routine is as follows
18  *
19  * (1) If caching for the FCB hasn't been initiated then so do by calling
20  * CcInitializeFileCache.
21  *
22  * (2) For each 4k region which is being read or written obtain a cache page
23  * by calling CcRequestCachePage.
24  *
25  * (3) If either the page is being read or not completely written, and it is
26  * not up to date then read its data from the underlying medium. If the read
27  * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28  *
29  * (4) Copy the data into or out of the page as necessary.
30  *
31  * (5) Release the cache page
32  */
33 /* INCLUDES ******************************************************************/
34 
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38 
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42 
43 /* GLOBALS *******************************************************************/
44 
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47 
48 KGUARDED_MUTEX ViewLock;
49 
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53 
54 /* Internal vars (MS):
55  * - Threshold above which lazy writer will start action
56  * - Amount of dirty pages
57  * - List for deferred writes
58  * - Spinlock when dealing with the deferred list
59  * - List for "clean" shared cache maps
60  */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66 
67 #if DBG
68 VOID CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70     ++vacb->ReferenceCount;
71     if (vacb->SharedCacheMap->Trace)
72     {
73         DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
74                  file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
75     }
76 }
77 VOID CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
78 {
79     ASSERT(vacb->ReferenceCount != 0);
80     --vacb->ReferenceCount;
81     ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
82     if (vacb->SharedCacheMap->Trace)
83     {
84         DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
85                  file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
86     }
87 }
88 #endif
89 
90 NTSTATUS
91 CcRosInternalFreeVacb(PROS_VACB Vacb);
92 
93 
94 /* FUNCTIONS *****************************************************************/
95 
96 VOID
97 NTAPI
98 CcRosTraceCacheMap (
99     PROS_SHARED_CACHE_MAP SharedCacheMap,
100     BOOLEAN Trace )
101 {
102 #if DBG
103     KIRQL oldirql;
104     PLIST_ENTRY current_entry;
105     PROS_VACB current;
106 
107     if (!SharedCacheMap)
108         return;
109 
110     SharedCacheMap->Trace = Trace;
111 
112     if (Trace)
113     {
114         DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
115 
116         KeAcquireGuardedMutex(&ViewLock);
117         KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
118 
119         current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
120         while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
121         {
122             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
123             current_entry = current_entry->Flink;
124 
125             DPRINT1("  VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
126                     current, current->ReferenceCount, current->Dirty, current->PageOut );
127         }
128         KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
129         KeReleaseGuardedMutex(&ViewLock);
130     }
131     else
132     {
133         DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
134     }
135 
136 #else
137     UNREFERENCED_PARAMETER(SharedCacheMap);
138     UNREFERENCED_PARAMETER(Trace);
139 #endif
140 }
141 
142 NTSTATUS
143 NTAPI
144 CcRosFlushVacb (
145     PROS_VACB Vacb)
146 {
147     NTSTATUS Status;
148 
149     Status = CcWriteVirtualAddress(Vacb);
150     if (NT_SUCCESS(Status))
151     {
152         CcRosUnmarkDirtyVacb(Vacb, TRUE);
153     }
154 
155     return Status;
156 }
157 
158 NTSTATUS
159 NTAPI
160 CcRosFlushDirtyPages (
161     ULONG Target,
162     PULONG Count,
163     BOOLEAN Wait,
164     BOOLEAN CalledFromLazy)
165 {
166     PLIST_ENTRY current_entry;
167     PROS_VACB current;
168     BOOLEAN Locked;
169     NTSTATUS Status;
170     LARGE_INTEGER ZeroTimeout;
171 
172     DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
173 
174     (*Count) = 0;
175     ZeroTimeout.QuadPart = 0;
176 
177     KeEnterCriticalRegion();
178     KeAcquireGuardedMutex(&ViewLock);
179 
180     current_entry = DirtyVacbListHead.Flink;
181     if (current_entry == &DirtyVacbListHead)
182     {
183         DPRINT("No Dirty pages\n");
184     }
185 
186     while ((current_entry != &DirtyVacbListHead) && (Target > 0))
187     {
188         current = CONTAINING_RECORD(current_entry,
189                                     ROS_VACB,
190                                     DirtyVacbListEntry);
191         current_entry = current_entry->Flink;
192 
193         CcRosVacbIncRefCount(current);
194 
195         /* When performing lazy write, don't handle temporary files */
196         if (CalledFromLazy &&
197             BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
198         {
199             CcRosVacbDecRefCount(current);
200             continue;
201         }
202 
203         Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
204                      current->SharedCacheMap->LazyWriteContext, Wait);
205         if (!Locked)
206         {
207             CcRosVacbDecRefCount(current);
208             continue;
209         }
210 
211         Status = CcRosAcquireVacbLock(current,
212                                       Wait ? NULL : &ZeroTimeout);
213         if (Status != STATUS_SUCCESS)
214         {
215             current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
216                 current->SharedCacheMap->LazyWriteContext);
217             CcRosVacbDecRefCount(current);
218             continue;
219         }
220 
221         ASSERT(current->Dirty);
222 
223         /* One reference is added above */
224         if (current->ReferenceCount > 2)
225         {
226             CcRosReleaseVacbLock(current);
227             current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
228                 current->SharedCacheMap->LazyWriteContext);
229             CcRosVacbDecRefCount(current);
230             continue;
231         }
232 
233         KeReleaseGuardedMutex(&ViewLock);
234 
235         Status = CcRosFlushVacb(current);
236 
237         CcRosReleaseVacbLock(current);
238         current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
239             current->SharedCacheMap->LazyWriteContext);
240 
241         KeAcquireGuardedMutex(&ViewLock);
242         CcRosVacbDecRefCount(current);
243 
244         if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
245             (Status != STATUS_MEDIA_WRITE_PROTECTED))
246         {
247             DPRINT1("CC: Failed to flush VACB.\n");
248         }
249         else
250         {
251             ULONG PagesFreed;
252 
253             /* How many pages did we free? */
254             PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
255             (*Count) += PagesFreed;
256 
257             /* Make sure we don't overflow target! */
258             if (Target < PagesFreed)
259             {
260                 /* If we would have, jump to zero directly */
261                 Target = 0;
262             }
263             else
264             {
265                 Target -= PagesFreed;
266             }
267         }
268 
269         current_entry = DirtyVacbListHead.Flink;
270     }
271 
272     KeReleaseGuardedMutex(&ViewLock);
273     KeLeaveCriticalRegion();
274 
275     DPRINT("CcRosFlushDirtyPages() finished\n");
276     return STATUS_SUCCESS;
277 }
278 
279 NTSTATUS
280 CcRosTrimCache (
281     ULONG Target,
282     ULONG Priority,
283     PULONG NrFreed)
284 /*
285  * FUNCTION: Try to free some memory from the file cache.
286  * ARGUMENTS:
287  *       Target - The number of pages to be freed.
288  *       Priority - The priority of free (currently unused).
289  *       NrFreed - Points to a variable where the number of pages
290  *                 actually freed is returned.
291  */
292 {
293     PLIST_ENTRY current_entry;
294     PROS_VACB current;
295     ULONG PagesFreed;
296     KIRQL oldIrql;
297     LIST_ENTRY FreeList;
298     PFN_NUMBER Page;
299     ULONG i;
300     BOOLEAN FlushedPages = FALSE;
301 
302     DPRINT("CcRosTrimCache(Target %lu)\n", Target);
303 
304     InitializeListHead(&FreeList);
305 
306     *NrFreed = 0;
307 
308 retry:
309     KeAcquireGuardedMutex(&ViewLock);
310 
311     current_entry = VacbLruListHead.Flink;
312     while (current_entry != &VacbLruListHead)
313     {
314         current = CONTAINING_RECORD(current_entry,
315                                     ROS_VACB,
316                                     VacbLruListEntry);
317         current_entry = current_entry->Flink;
318 
319         KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
320 
321         /* Reference the VACB */
322         CcRosVacbIncRefCount(current);
323 
324         /* Check if it's mapped and not dirty */
325         if (current->MappedCount > 0 && !current->Dirty)
326         {
327             /* We have to break these locks because Cc sucks */
328             KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
329             KeReleaseGuardedMutex(&ViewLock);
330 
331             /* Page out the VACB */
332             for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
333             {
334                 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
335 
336                 MmPageOutPhysicalAddress(Page);
337             }
338 
339             /* Reacquire the locks */
340             KeAcquireGuardedMutex(&ViewLock);
341             KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
342         }
343 
344         /* Dereference the VACB */
345         CcRosVacbDecRefCount(current);
346 
347         /* Check if we can free this entry now */
348         if (current->ReferenceCount < 2)
349         {
350             ASSERT(!current->Dirty);
351             ASSERT(!current->MappedCount);
352             ASSERT(current->ReferenceCount == 1);
353 
354             RemoveEntryList(&current->CacheMapVacbListEntry);
355             RemoveEntryList(&current->VacbLruListEntry);
356             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
357 
358             /* Calculate how many pages we freed for Mm */
359             PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
360             Target -= PagesFreed;
361             (*NrFreed) += PagesFreed;
362         }
363 
364         KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
365     }
366 
367     KeReleaseGuardedMutex(&ViewLock);
368 
369     /* Try flushing pages if we haven't met our target */
370     if ((Target > 0) && !FlushedPages)
371     {
372         /* Flush dirty pages to disk */
373         CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
374         FlushedPages = TRUE;
375 
376         /* We can only swap as many pages as we flushed */
377         if (PagesFreed < Target) Target = PagesFreed;
378 
379         /* Check if we flushed anything */
380         if (PagesFreed != 0)
381         {
382             /* Try again after flushing dirty pages */
383             DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
384             goto retry;
385         }
386     }
387 
388     while (!IsListEmpty(&FreeList))
389     {
390         current_entry = RemoveHeadList(&FreeList);
391         current = CONTAINING_RECORD(current_entry,
392                                     ROS_VACB,
393                                     CacheMapVacbListEntry);
394         CcRosVacbDecRefCount(current);
395         CcRosInternalFreeVacb(current);
396     }
397 
398     DPRINT("Evicted %lu cache pages\n", (*NrFreed));
399 
400     return STATUS_SUCCESS;
401 }
402 
403 NTSTATUS
404 NTAPI
405 CcRosReleaseVacb (
406     PROS_SHARED_CACHE_MAP SharedCacheMap,
407     PROS_VACB Vacb,
408     BOOLEAN Valid,
409     BOOLEAN Dirty,
410     BOOLEAN Mapped)
411 {
412     ASSERT(SharedCacheMap);
413 
414     DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
415            SharedCacheMap, Vacb, Valid);
416 
417     Vacb->Valid = Valid;
418 
419     if (Dirty && !Vacb->Dirty)
420     {
421         CcRosMarkDirtyVacb(Vacb);
422     }
423 
424     if (Mapped)
425     {
426         Vacb->MappedCount++;
427     }
428     CcRosVacbDecRefCount(Vacb);
429     if (Mapped && (Vacb->MappedCount == 1))
430     {
431         CcRosVacbIncRefCount(Vacb);
432     }
433 
434     ASSERT(Vacb->ReferenceCount != 0);
435 
436     CcRosReleaseVacbLock(Vacb);
437 
438     return STATUS_SUCCESS;
439 }
440 
441 /* Returns with VACB Lock Held! */
442 PROS_VACB
443 NTAPI
444 CcRosLookupVacb (
445     PROS_SHARED_CACHE_MAP SharedCacheMap,
446     LONGLONG FileOffset)
447 {
448     PLIST_ENTRY current_entry;
449     PROS_VACB current;
450     KIRQL oldIrql;
451 
452     ASSERT(SharedCacheMap);
453 
454     DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
455            SharedCacheMap, FileOffset);
456 
457     KeAcquireGuardedMutex(&ViewLock);
458     KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
459 
460     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
461     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
462     {
463         current = CONTAINING_RECORD(current_entry,
464                                     ROS_VACB,
465                                     CacheMapVacbListEntry);
466         if (IsPointInRange(current->FileOffset.QuadPart,
467                            VACB_MAPPING_GRANULARITY,
468                            FileOffset))
469         {
470             CcRosVacbIncRefCount(current);
471             KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
472             KeReleaseGuardedMutex(&ViewLock);
473             CcRosAcquireVacbLock(current, NULL);
474             return current;
475         }
476         if (current->FileOffset.QuadPart > FileOffset)
477             break;
478         current_entry = current_entry->Flink;
479     }
480 
481     KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
482     KeReleaseGuardedMutex(&ViewLock);
483 
484     return NULL;
485 }
486 
487 VOID
488 NTAPI
489 CcRosMarkDirtyVacb (
490     PROS_VACB Vacb)
491 {
492     KIRQL oldIrql;
493     PROS_SHARED_CACHE_MAP SharedCacheMap;
494 
495     SharedCacheMap = Vacb->SharedCacheMap;
496 
497     KeAcquireGuardedMutex(&ViewLock);
498     KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
499 
500     ASSERT(!Vacb->Dirty);
501 
502     InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
503     CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
504     Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
505     CcRosVacbIncRefCount(Vacb);
506 
507     /* Move to the tail of the LRU list */
508     RemoveEntryList(&Vacb->VacbLruListEntry);
509     InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
510 
511     Vacb->Dirty = TRUE;
512 
513     KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
514     KeReleaseGuardedMutex(&ViewLock);
515 
516     /* Schedule a lazy writer run to now that we have dirty VACB */
517     oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
518     if (!LazyWriter.ScanActive)
519     {
520         CcScheduleLazyWriteScan(FALSE);
521     }
522     KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
523 }
524 
525 VOID
526 NTAPI
527 CcRosUnmarkDirtyVacb (
528     PROS_VACB Vacb,
529     BOOLEAN LockViews)
530 {
531     KIRQL oldIrql;
532     PROS_SHARED_CACHE_MAP SharedCacheMap;
533 
534     SharedCacheMap = Vacb->SharedCacheMap;
535 
536     if (LockViews)
537     {
538         KeAcquireGuardedMutex(&ViewLock);
539         KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
540     }
541 
542     ASSERT(Vacb->Dirty);
543 
544     Vacb->Dirty = FALSE;
545 
546     RemoveEntryList(&Vacb->DirtyVacbListEntry);
547     CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
548     Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
549     CcRosVacbDecRefCount(Vacb);
550 
551     if (LockViews)
552     {
553         KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
554         KeReleaseGuardedMutex(&ViewLock);
555     }
556 }
557 
558 NTSTATUS
559 NTAPI
560 CcRosMarkDirtyFile (
561     PROS_SHARED_CACHE_MAP SharedCacheMap,
562     LONGLONG FileOffset)
563 {
564     PROS_VACB Vacb;
565 
566     ASSERT(SharedCacheMap);
567 
568     DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
569            SharedCacheMap, FileOffset);
570 
571     Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
572     if (Vacb == NULL)
573     {
574         KeBugCheck(CACHE_MANAGER);
575     }
576 
577     CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
578 
579     return STATUS_SUCCESS;
580 }
581 
582 /*
583  * Note: this is not the contrary function of
584  * CcRosMapVacbInKernelSpace()
585  */
586 NTSTATUS
587 NTAPI
588 CcRosUnmapVacb (
589     PROS_SHARED_CACHE_MAP SharedCacheMap,
590     LONGLONG FileOffset,
591     BOOLEAN NowDirty)
592 {
593     PROS_VACB Vacb;
594 
595     ASSERT(SharedCacheMap);
596 
597     DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
598            SharedCacheMap, FileOffset, NowDirty);
599 
600     Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
601     if (Vacb == NULL)
602     {
603         return STATUS_UNSUCCESSFUL;
604     }
605 
606     ASSERT(Vacb->MappedCount != 0);
607     Vacb->MappedCount--;
608 
609     if (Vacb->MappedCount == 0)
610     {
611         CcRosVacbDecRefCount(Vacb);
612     }
613 
614     CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
615 
616     return STATUS_SUCCESS;
617 }
618 
619 static
620 NTSTATUS
621 CcRosMapVacbInKernelSpace(
622     PROS_VACB Vacb)
623 {
624     ULONG i;
625     NTSTATUS Status;
626     ULONG_PTR NumberOfPages;
627 
628     /* Create a memory area. */
629     MmLockAddressSpace(MmGetKernelAddressSpace());
630     Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
631                                 0, // nothing checks for VACB mareas, so set to 0
632                                 &Vacb->BaseAddress,
633                                 VACB_MAPPING_GRANULARITY,
634                                 PAGE_READWRITE,
635                                 (PMEMORY_AREA*)&Vacb->MemoryArea,
636                                 0,
637                                 PAGE_SIZE);
638     MmUnlockAddressSpace(MmGetKernelAddressSpace());
639     if (!NT_SUCCESS(Status))
640     {
641         DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
642         return Status;
643     }
644 
645     ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
646     ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
647 
648     /* Create a virtual mapping for this memory area */
649     NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
650     for (i = 0; i < NumberOfPages; i++)
651     {
652         PFN_NUMBER PageFrameNumber;
653 
654         MI_SET_USAGE(MI_USAGE_CACHE);
655         Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
656         if (PageFrameNumber == 0)
657         {
658             DPRINT1("Unable to allocate page\n");
659             KeBugCheck(MEMORY_MANAGEMENT);
660         }
661 
662         Status = MmCreateVirtualMapping(NULL,
663                                         (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
664                                         PAGE_READWRITE,
665                                         &PageFrameNumber,
666                                         1);
667         if (!NT_SUCCESS(Status))
668         {
669             DPRINT1("Unable to create virtual mapping\n");
670             KeBugCheck(MEMORY_MANAGEMENT);
671         }
672     }
673 
674     return STATUS_SUCCESS;
675 }
676 
677 static
678 NTSTATUS
679 CcRosCreateVacb (
680     PROS_SHARED_CACHE_MAP SharedCacheMap,
681     LONGLONG FileOffset,
682     PROS_VACB *Vacb)
683 {
684     PROS_VACB current;
685     PROS_VACB previous;
686     PLIST_ENTRY current_entry;
687     NTSTATUS Status;
688     KIRQL oldIrql;
689 
690     ASSERT(SharedCacheMap);
691 
692     DPRINT("CcRosCreateVacb()\n");
693 
694     if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
695     {
696         *Vacb = NULL;
697         return STATUS_INVALID_PARAMETER;
698     }
699 
700     current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
701     current->BaseAddress = NULL;
702     current->Valid = FALSE;
703     current->Dirty = FALSE;
704     current->PageOut = FALSE;
705     current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
706     current->SharedCacheMap = SharedCacheMap;
707 #if DBG
708     if (SharedCacheMap->Trace)
709     {
710         DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
711     }
712 #endif
713     current->MappedCount = 0;
714     current->DirtyVacbListEntry.Flink = NULL;
715     current->DirtyVacbListEntry.Blink = NULL;
716     current->ReferenceCount = 0;
717     current->PinCount = 0;
718     KeInitializeMutex(&current->Mutex, 0);
719     CcRosAcquireVacbLock(current, NULL);
720     KeAcquireGuardedMutex(&ViewLock);
721 
722     *Vacb = current;
723     /* There is window between the call to CcRosLookupVacb
724      * and CcRosCreateVacb. We must check if a VACB for the
725      * file offset exist. If there is a VACB, we release
726      * our newly created VACB and return the existing one.
727      */
728     KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
729     current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
730     previous = NULL;
731     while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
732     {
733         current = CONTAINING_RECORD(current_entry,
734                                     ROS_VACB,
735                                     CacheMapVacbListEntry);
736         if (IsPointInRange(current->FileOffset.QuadPart,
737                            VACB_MAPPING_GRANULARITY,
738                            FileOffset))
739         {
740             CcRosVacbIncRefCount(current);
741             KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
742 #if DBG
743             if (SharedCacheMap->Trace)
744             {
745                 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
746                         SharedCacheMap,
747                         (*Vacb),
748                         current);
749             }
750 #endif
751             CcRosReleaseVacbLock(*Vacb);
752             KeReleaseGuardedMutex(&ViewLock);
753             ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
754             *Vacb = current;
755             CcRosAcquireVacbLock(current, NULL);
756             return STATUS_SUCCESS;
757         }
758         if (current->FileOffset.QuadPart < FileOffset)
759         {
760             ASSERT(previous == NULL ||
761                    previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
762             previous = current;
763         }
764         if (current->FileOffset.QuadPart > FileOffset)
765             break;
766         current_entry = current_entry->Flink;
767     }
768     /* There was no existing VACB. */
769     current = *Vacb;
770     if (previous)
771     {
772         InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
773     }
774     else
775     {
776         InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
777     }
778     KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
779     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
780     CcRosVacbIncRefCount(current);
781     KeReleaseGuardedMutex(&ViewLock);
782 
783     MI_SET_USAGE(MI_USAGE_CACHE);
784 #if MI_TRACE_PFNS
785     if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
786     {
787         PWCHAR pos;
788         ULONG len = 0;
789         pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
790         if (pos)
791         {
792             len = wcslen(pos) * sizeof(WCHAR);
793             snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
794         }
795         else
796         {
797             snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
798         }
799     }
800 #endif
801 
802     Status = CcRosMapVacbInKernelSpace(current);
803     if (!NT_SUCCESS(Status))
804     {
805         RemoveEntryList(&current->CacheMapVacbListEntry);
806         RemoveEntryList(&current->VacbLruListEntry);
807         CcRosReleaseVacbLock(current);
808         ExFreeToNPagedLookasideList(&VacbLookasideList, current);
809     }
810 
811     /* Reference it to allow release */
812     CcRosVacbIncRefCount(current);
813 
814     return Status;
815 }
816 
817 NTSTATUS
818 NTAPI
819 CcRosGetVacb (
820     PROS_SHARED_CACHE_MAP SharedCacheMap,
821     LONGLONG FileOffset,
822     PLONGLONG BaseOffset,
823     PVOID* BaseAddress,
824     PBOOLEAN UptoDate,
825     PROS_VACB *Vacb)
826 {
827     PROS_VACB current;
828     NTSTATUS Status;
829 
830     ASSERT(SharedCacheMap);
831 
832     DPRINT("CcRosGetVacb()\n");
833 
834     /*
835      * Look for a VACB already mapping the same data.
836      */
837     current = CcRosLookupVacb(SharedCacheMap, FileOffset);
838     if (current == NULL)
839     {
840         /*
841          * Otherwise create a new VACB.
842          */
843         Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
844         if (!NT_SUCCESS(Status))
845         {
846             return Status;
847         }
848     }
849 
850     KeAcquireGuardedMutex(&ViewLock);
851 
852     /* Move to the tail of the LRU list */
853     RemoveEntryList(&current->VacbLruListEntry);
854     InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
855 
856     KeReleaseGuardedMutex(&ViewLock);
857 
858     /*
859      * Return information about the VACB to the caller.
860      */
861     *UptoDate = current->Valid;
862     *BaseAddress = current->BaseAddress;
863     DPRINT("*BaseAddress %p\n", *BaseAddress);
864     *Vacb = current;
865     *BaseOffset = current->FileOffset.QuadPart;
866     return STATUS_SUCCESS;
867 }
868 
869 NTSTATUS
870 NTAPI
871 CcRosRequestVacb (
872     PROS_SHARED_CACHE_MAP SharedCacheMap,
873     LONGLONG FileOffset,
874     PVOID* BaseAddress,
875     PBOOLEAN UptoDate,
876     PROS_VACB *Vacb)
877 /*
878  * FUNCTION: Request a page mapping for a shared cache map
879  */
880 {
881     LONGLONG BaseOffset;
882 
883     ASSERT(SharedCacheMap);
884 
885     if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
886     {
887         DPRINT1("Bad fileoffset %I64x should be multiple of %x",
888                 FileOffset, VACB_MAPPING_GRANULARITY);
889         KeBugCheck(CACHE_MANAGER);
890     }
891 
892     return CcRosGetVacb(SharedCacheMap,
893                         FileOffset,
894                         &BaseOffset,
895                         BaseAddress,
896                         UptoDate,
897                         Vacb);
898 }
899 
900 static
901 VOID
902 CcFreeCachePage (
903     PVOID Context,
904     MEMORY_AREA* MemoryArea,
905     PVOID Address,
906     PFN_NUMBER Page,
907     SWAPENTRY SwapEntry,
908     BOOLEAN Dirty)
909 {
910     ASSERT(SwapEntry == 0);
911     if (Page != 0)
912     {
913         ASSERT(MmGetReferenceCountPage(Page) == 1);
914         MmReleasePageMemoryConsumer(MC_CACHE, Page);
915     }
916 }
917 
918 NTSTATUS
919 CcRosInternalFreeVacb (
920     PROS_VACB Vacb)
921 /*
922  * FUNCTION: Releases a VACB associated with a shared cache map
923  */
924 {
925     DPRINT("Freeing VACB 0x%p\n", Vacb);
926 #if DBG
927     if (Vacb->SharedCacheMap->Trace)
928     {
929         DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
930     }
931 #endif
932 
933     MmLockAddressSpace(MmGetKernelAddressSpace());
934     MmFreeMemoryArea(MmGetKernelAddressSpace(),
935                      Vacb->MemoryArea,
936                      CcFreeCachePage,
937                      NULL);
938     MmUnlockAddressSpace(MmGetKernelAddressSpace());
939 
940     if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
941     {
942         DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
943         if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
944         {
945             DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
946         }
947     }
948 
949     ASSERT(Vacb->PinCount == 0);
950     ASSERT(Vacb->ReferenceCount == 0);
951     ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
952     return STATUS_SUCCESS;
953 }
954 
955 /*
956  * @implemented
957  */
958 VOID
959 NTAPI
960 CcFlushCache (
961     IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
962     IN PLARGE_INTEGER FileOffset OPTIONAL,
963     IN ULONG Length,
964     OUT PIO_STATUS_BLOCK IoStatus)
965 {
966     PROS_SHARED_CACHE_MAP SharedCacheMap;
967     LARGE_INTEGER Offset;
968     LONGLONG RemainingLength;
969     PROS_VACB current;
970     NTSTATUS Status;
971 
972     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
973         SectionObjectPointers, FileOffset, Length);
974 
975     DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
976            SectionObjectPointers, FileOffset, Length, IoStatus);
977 
978     if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
979     {
980         SharedCacheMap = SectionObjectPointers->SharedCacheMap;
981         ASSERT(SharedCacheMap);
982         if (FileOffset)
983         {
984             Offset = *FileOffset;
985             RemainingLength = Length;
986         }
987         else
988         {
989             Offset.QuadPart = 0;
990             RemainingLength = SharedCacheMap->FileSize.QuadPart;
991         }
992 
993         if (IoStatus)
994         {
995             IoStatus->Status = STATUS_SUCCESS;
996             IoStatus->Information = 0;
997         }
998 
999         while (RemainingLength > 0)
1000         {
1001             current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1002             if (current != NULL)
1003             {
1004                 if (current->Dirty)
1005                 {
1006                     Status = CcRosFlushVacb(current);
1007                     if (!NT_SUCCESS(Status) && IoStatus != NULL)
1008                     {
1009                         IoStatus->Status = Status;
1010                     }
1011                 }
1012 
1013                 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1014             }
1015 
1016             Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1017             RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1018         }
1019     }
1020     else
1021     {
1022         if (IoStatus)
1023         {
1024             IoStatus->Status = STATUS_INVALID_PARAMETER;
1025         }
1026     }
1027 }
1028 
1029 NTSTATUS
1030 NTAPI
1031 CcRosDeleteFileCache (
1032     PFILE_OBJECT FileObject,
1033     PROS_SHARED_CACHE_MAP SharedCacheMap)
1034 /*
1035  * FUNCTION: Releases the shared cache map associated with a file object
1036  */
1037 {
1038     PLIST_ENTRY current_entry;
1039     PROS_VACB current;
1040     LIST_ENTRY FreeList;
1041     KIRQL oldIrql;
1042 
1043     ASSERT(SharedCacheMap);
1044 
1045     SharedCacheMap->OpenCount++;
1046     KeReleaseGuardedMutex(&ViewLock);
1047 
1048     CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1049 
1050     KeAcquireGuardedMutex(&ViewLock);
1051     SharedCacheMap->OpenCount--;
1052     if (SharedCacheMap->OpenCount == 0)
1053     {
1054         KIRQL OldIrql;
1055 
1056         FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1057 
1058         /*
1059          * Release all VACBs
1060          */
1061         InitializeListHead(&FreeList);
1062         KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1063         while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1064         {
1065             current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1066             KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1067 
1068             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1069             CcRosAcquireVacbLock(current, NULL);
1070             RemoveEntryList(&current->VacbLruListEntry);
1071             if (current->Dirty)
1072             {
1073                 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1074                 CcRosUnmarkDirtyVacb(current, FALSE);
1075                 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1076                 DPRINT1("Freeing dirty VACB\n");
1077             }
1078             InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1079             CcRosReleaseVacbLock(current);
1080 
1081             KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1082         }
1083 #if DBG
1084         SharedCacheMap->Trace = FALSE;
1085 #endif
1086         KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1087 
1088         KeReleaseGuardedMutex(&ViewLock);
1089         ObDereferenceObject(SharedCacheMap->FileObject);
1090 
1091         while (!IsListEmpty(&FreeList))
1092         {
1093             current_entry = RemoveTailList(&FreeList);
1094             current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1095             CcRosVacbDecRefCount(current);
1096             CcRosInternalFreeVacb(current);
1097         }
1098 
1099         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1100         RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1101         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1102 
1103         ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1104         KeAcquireGuardedMutex(&ViewLock);
1105     }
1106     return STATUS_SUCCESS;
1107 }
1108 
1109 VOID
1110 NTAPI
1111 CcRosReferenceCache (
1112     PFILE_OBJECT FileObject)
1113 {
1114     PROS_SHARED_CACHE_MAP SharedCacheMap;
1115     KeAcquireGuardedMutex(&ViewLock);
1116     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1117     ASSERT(SharedCacheMap);
1118     ASSERT(SharedCacheMap->OpenCount != 0);
1119     SharedCacheMap->OpenCount++;
1120     KeReleaseGuardedMutex(&ViewLock);
1121 }
1122 
1123 VOID
1124 NTAPI
1125 CcRosRemoveIfClosed (
1126     PSECTION_OBJECT_POINTERS SectionObjectPointer)
1127 {
1128     PROS_SHARED_CACHE_MAP SharedCacheMap;
1129     DPRINT("CcRosRemoveIfClosed()\n");
1130     KeAcquireGuardedMutex(&ViewLock);
1131     SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1132     if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1133     {
1134         CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1135     }
1136     KeReleaseGuardedMutex(&ViewLock);
1137 }
1138 
1139 
1140 VOID
1141 NTAPI
1142 CcRosDereferenceCache (
1143     PFILE_OBJECT FileObject)
1144 {
1145     PROS_SHARED_CACHE_MAP SharedCacheMap;
1146     KeAcquireGuardedMutex(&ViewLock);
1147     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1148     ASSERT(SharedCacheMap);
1149     if (SharedCacheMap->OpenCount > 0)
1150     {
1151         SharedCacheMap->OpenCount--;
1152         if (SharedCacheMap->OpenCount == 0)
1153         {
1154             MmFreeSectionSegments(SharedCacheMap->FileObject);
1155             CcRosDeleteFileCache(FileObject, SharedCacheMap);
1156         }
1157     }
1158     KeReleaseGuardedMutex(&ViewLock);
1159 }
1160 
1161 NTSTATUS
1162 NTAPI
1163 CcRosReleaseFileCache (
1164     PFILE_OBJECT FileObject)
1165 /*
1166  * FUNCTION: Called by the file system when a handle to a file object
1167  * has been closed.
1168  */
1169 {
1170     KIRQL OldIrql;
1171     PPRIVATE_CACHE_MAP PrivateMap;
1172     PROS_SHARED_CACHE_MAP SharedCacheMap;
1173 
1174     KeAcquireGuardedMutex(&ViewLock);
1175 
1176     if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1177     {
1178         SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1179 
1180         /* Closing the handle, so kill the private cache map
1181          * Before you event try to remove it from FO, always
1182          * lock the master lock, to be sure not to race
1183          * with a potential read ahead ongoing!
1184          */
1185         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1186         PrivateMap = FileObject->PrivateCacheMap;
1187         FileObject->PrivateCacheMap = NULL;
1188         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1189 
1190         if (PrivateMap != NULL)
1191         {
1192             /* Remove it from the file */
1193             KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1194             RemoveEntryList(&PrivateMap->PrivateLinks);
1195             KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1196 
1197             /* And free it. */
1198             if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1199             {
1200                 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1201             }
1202             else
1203             {
1204                 PrivateMap->NodeTypeCode = 0;
1205             }
1206 
1207             if (SharedCacheMap->OpenCount > 0)
1208             {
1209                 SharedCacheMap->OpenCount--;
1210                 if (SharedCacheMap->OpenCount == 0)
1211                 {
1212                     MmFreeSectionSegments(SharedCacheMap->FileObject);
1213                     CcRosDeleteFileCache(FileObject, SharedCacheMap);
1214                 }
1215             }
1216         }
1217     }
1218     KeReleaseGuardedMutex(&ViewLock);
1219     return STATUS_SUCCESS;
1220 }
1221 
1222 NTSTATUS
1223 NTAPI
1224 CcRosInitializeFileCache (
1225     PFILE_OBJECT FileObject,
1226     PCC_FILE_SIZES FileSizes,
1227     BOOLEAN PinAccess,
1228     PCACHE_MANAGER_CALLBACKS CallBacks,
1229     PVOID LazyWriterContext)
1230 /*
1231  * FUNCTION: Initializes a shared cache map for a file object
1232  */
1233 {
1234     KIRQL OldIrql;
1235     BOOLEAN Allocated;
1236     PROS_SHARED_CACHE_MAP SharedCacheMap;
1237 
1238     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1239     DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1240            FileObject, SharedCacheMap);
1241 
1242     Allocated = FALSE;
1243     KeAcquireGuardedMutex(&ViewLock);
1244     if (SharedCacheMap == NULL)
1245     {
1246         Allocated = TRUE;
1247         SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1248         if (SharedCacheMap == NULL)
1249         {
1250             KeReleaseGuardedMutex(&ViewLock);
1251             return STATUS_INSUFFICIENT_RESOURCES;
1252         }
1253         RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1254         ObReferenceObjectByPointer(FileObject,
1255                                    FILE_ALL_ACCESS,
1256                                    NULL,
1257                                    KernelMode);
1258         SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1259         SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1260         SharedCacheMap->FileObject = FileObject;
1261         SharedCacheMap->Callbacks = CallBacks;
1262         SharedCacheMap->LazyWriteContext = LazyWriterContext;
1263         SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1264         SharedCacheMap->FileSize = FileSizes->FileSize;
1265         SharedCacheMap->PinAccess = PinAccess;
1266         SharedCacheMap->DirtyPageThreshold = 0;
1267         SharedCacheMap->DirtyPages = 0;
1268         InitializeListHead(&SharedCacheMap->PrivateList);
1269         KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1270         InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1271         FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1272 
1273         OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1274         InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1275         KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1276     }
1277     if (FileObject->PrivateCacheMap == NULL)
1278     {
1279         PPRIVATE_CACHE_MAP PrivateMap;
1280 
1281         /* Allocate the private cache map for this handle */
1282         if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1283         {
1284             PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1285         }
1286         else
1287         {
1288             PrivateMap = &SharedCacheMap->PrivateCacheMap;
1289         }
1290 
1291         if (PrivateMap == NULL)
1292         {
1293             /* If we also allocated the shared cache map for this file, kill it */
1294             if (Allocated)
1295             {
1296                 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1297                 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1298                 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1299 
1300                 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1301                 ObDereferenceObject(FileObject);
1302                 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1303             }
1304 
1305             KeReleaseGuardedMutex(&ViewLock);
1306             return STATUS_INSUFFICIENT_RESOURCES;
1307         }
1308 
1309         /* Initialize it */
1310         RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1311         PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1312         PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1313         PrivateMap->FileObject = FileObject;
1314         KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1315 
1316         /* Link it to the file */
1317         KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1318         InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1319         KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1320 
1321         FileObject->PrivateCacheMap = PrivateMap;
1322         SharedCacheMap->OpenCount++;
1323     }
1324     KeReleaseGuardedMutex(&ViewLock);
1325 
1326     return STATUS_SUCCESS;
1327 }
1328 
1329 /*
1330  * @implemented
1331  */
1332 PFILE_OBJECT
1333 NTAPI
1334 CcGetFileObjectFromSectionPtrs (
1335     IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1336 {
1337     PROS_SHARED_CACHE_MAP SharedCacheMap;
1338 
1339     CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1340 
1341     if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1342     {
1343         SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1344         ASSERT(SharedCacheMap);
1345         return SharedCacheMap->FileObject;
1346     }
1347     return NULL;
1348 }
1349 
1350 VOID
1351 INIT_FUNCTION
1352 NTAPI
1353 CcInitView (
1354     VOID)
1355 {
1356     DPRINT("CcInitView()\n");
1357 
1358     InitializeListHead(&DirtyVacbListHead);
1359     InitializeListHead(&VacbLruListHead);
1360     InitializeListHead(&CcDeferredWrites);
1361     InitializeListHead(&CcCleanSharedCacheMapList);
1362     KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1363     KeInitializeGuardedMutex(&ViewLock);
1364     ExInitializeNPagedLookasideList(&iBcbLookasideList,
1365                                     NULL,
1366                                     NULL,
1367                                     0,
1368                                     sizeof(INTERNAL_BCB),
1369                                     TAG_BCB,
1370                                     20);
1371     ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1372                                     NULL,
1373                                     NULL,
1374                                     0,
1375                                     sizeof(ROS_SHARED_CACHE_MAP),
1376                                     TAG_SHARED_CACHE_MAP,
1377                                     20);
1378     ExInitializeNPagedLookasideList(&VacbLookasideList,
1379                                     NULL,
1380                                     NULL,
1381                                     0,
1382                                     sizeof(ROS_VACB),
1383                                     TAG_VACB,
1384                                     20);
1385 
1386     MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1387 
1388     CcInitCacheZeroPage();
1389 }
1390 
1391 #if DBG && defined(KDBG)
1392 BOOLEAN
1393 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1394 {
1395     PLIST_ENTRY ListEntry;
1396     UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1397 
1398     KdbpPrint("  Usage Summary (in kb)\n");
1399     KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1400     /* No need to lock the spin lock here, we're in DBG */
1401     for (ListEntry = CcCleanSharedCacheMapList.Flink;
1402          ListEntry != &CcCleanSharedCacheMapList;
1403          ListEntry = ListEntry->Flink)
1404     {
1405         PLIST_ENTRY Vacbs;
1406         ULONG Valid = 0, Dirty = 0;
1407         PROS_SHARED_CACHE_MAP SharedCacheMap;
1408         PUNICODE_STRING FileName;
1409 
1410         SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1411 
1412         /* Dirty size */
1413         Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1414 
1415         /* First, count for all the associated VACB */
1416         for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1417              Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1418              Vacbs = Vacbs->Flink)
1419         {
1420             PROS_VACB Vacb;
1421 
1422             Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1423             if (Vacb->Valid)
1424             {
1425                 Valid += VACB_MAPPING_GRANULARITY / 1024;
1426             }
1427         }
1428 
1429         /* Setup name */
1430         if (SharedCacheMap->FileObject != NULL &&
1431             SharedCacheMap->FileObject->FileName.Length != 0)
1432         {
1433             FileName = &SharedCacheMap->FileObject->FileName;
1434         }
1435         else
1436         {
1437             FileName = &NoName;
1438         }
1439 
1440         /* And print */
1441         KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1442     }
1443 
1444     return TRUE;
1445 }
1446 
1447 BOOLEAN
1448 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1449 {
1450     KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1451               (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1452     KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1453               (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1454     KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1455               (MmAvailablePages * PAGE_SIZE) / 1024);
1456     KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1457               (MmThrottleTop * PAGE_SIZE) / 1024);
1458     KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1459               (MmThrottleBottom * PAGE_SIZE) / 1024);
1460     KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1461               (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1462 
1463     if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1464     {
1465         KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1466     }
1467     else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1468     {
1469         KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1470     }
1471     else
1472     {
1473         KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1474     }
1475 
1476     return TRUE;
1477 }
1478 #endif
1479 
1480 /* EOF */
1481