xref: /reactos/ntoskrnl/cc/cacheman.c (revision cc439606)
1 /*
2  * COPYRIGHT:       See COPYING in the top level directory
3  * PROJECT:         ReactOS kernel
4  * FILE:            ntoskrnl/cc/cacheman.c
5  * PURPOSE:         Cache manager
6  *
7  * PROGRAMMERS:     David Welch (welch@cwcom.net)
8  *                  Pierre Schweitzer (pierre@reactos.org)
9  */
10 
11 /* INCLUDES *****************************************************************/
12 
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16 
17 BOOLEAN CcPfEnablePrefetcher;
18 PFSN_PREFETCHER_GLOBALS CcPfGlobals;
19 MM_SYSTEMSIZE CcCapturedSystemSize;
20 
21 static ULONG BugCheckFileId = 0x4 << 16;
22 
23 /* FUNCTIONS *****************************************************************/
24 
25 VOID
26 NTAPI
27 INIT_FUNCTION
28 CcPfInitializePrefetcher(VOID)
29 {
30     /* Notify debugger */
31     DbgPrintEx(DPFLTR_PREFETCHER_ID,
32                DPFLTR_TRACE_LEVEL,
33                "CCPF: InitializePrefetecher()\n");
34 
35     /* Setup the Prefetcher Data */
36     InitializeListHead(&CcPfGlobals.ActiveTraces);
37     InitializeListHead(&CcPfGlobals.CompletedTraces);
38     ExInitializeFastMutex(&CcPfGlobals.CompletedTracesLock);
39 
40     /* FIXME: Setup the rest of the prefetecher */
41 }
42 
43 BOOLEAN
44 NTAPI
45 INIT_FUNCTION
46 CcInitializeCacheManager(VOID)
47 {
48     ULONG Thread;
49 
50     CcInitView();
51 
52     /* Initialize lazy-writer lists */
53     InitializeListHead(&CcIdleWorkerThreadList);
54     InitializeListHead(&CcExpressWorkQueue);
55     InitializeListHead(&CcRegularWorkQueue);
56     InitializeListHead(&CcPostTickWorkQueue);
57 
58     /* Define lazy writer threshold and the amount of workers,
59       * depending on the system type
60       */
61     CcCapturedSystemSize = MmQuerySystemSize();
62     switch (CcCapturedSystemSize)
63     {
64         case MmSmallSystem:
65             CcNumberWorkerThreads = ExCriticalWorkerThreads - 1;
66             CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
67             break;
68 
69         case MmMediumSystem:
70             CcNumberWorkerThreads = ExCriticalWorkerThreads - 1;
71             CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
72             break;
73 
74         case MmLargeSystem:
75             CcNumberWorkerThreads = ExCriticalWorkerThreads - 2;
76             CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
77             break;
78 
79         default:
80             CcNumberWorkerThreads = 1;
81             CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
82             break;
83     }
84 
85     /* Allocate a work item for all our threads */
86     for (Thread = 0; Thread < CcNumberWorkerThreads; ++Thread)
87     {
88         PWORK_QUEUE_ITEM Item;
89 
90         Item = ExAllocatePoolWithTag(NonPagedPool, sizeof(WORK_QUEUE_ITEM), 'qWcC');
91         if (Item == NULL)
92         {
93             CcBugCheck(0, 0, 0);
94         }
95 
96         /* By default, it's obviously idle */
97         ExInitializeWorkItem(Item, CcWorkerThread, Item);
98         InsertTailList(&CcIdleWorkerThreadList, &Item->List);
99     }
100 
101     /* Initialize our lazy writer */
102     RtlZeroMemory(&LazyWriter, sizeof(LazyWriter));
103     InitializeListHead(&LazyWriter.WorkQueue);
104     /* Delay activation of the lazy writer */
105     KeInitializeDpc(&LazyWriter.ScanDpc, CcScanDpc, NULL);
106     KeInitializeTimer(&LazyWriter.ScanTimer);
107 
108     /* Lookaside list for our work items */
109     ExInitializeNPagedLookasideList(&CcTwilightLookasideList, NULL, NULL, 0, sizeof(WORK_QUEUE_ENTRY), 'KWcC', 0);
110 
111     return TRUE;
112 }
113 
114 VOID
115 NTAPI
116 CcShutdownSystem(VOID)
117 {
118     /* NOTHING TO DO */
119 }
120 
121 /*
122  * @unimplemented
123  */
124 LARGE_INTEGER
125 NTAPI
126 CcGetFlushedValidData (
127     IN PSECTION_OBJECT_POINTERS SectionObjectPointer,
128     IN BOOLEAN BcbListHeld
129     )
130 {
131 	LARGE_INTEGER i;
132 
133 	UNIMPLEMENTED;
134 
135 	i.QuadPart = 0;
136 	return i;
137 }
138 
139 /*
140  * @unimplemented
141  */
142 PVOID
143 NTAPI
144 CcRemapBcb (
145     IN PVOID Bcb
146     )
147 {
148 	UNIMPLEMENTED;
149 
150     return 0;
151 }
152 
153 /*
154  * @unimplemented
155  */
156 VOID
157 NTAPI
158 CcScheduleReadAhead (
159 	IN	PFILE_OBJECT		FileObject,
160 	IN	PLARGE_INTEGER		FileOffset,
161 	IN	ULONG			Length
162 	)
163 {
164     KIRQL OldIrql;
165     LARGE_INTEGER NewOffset;
166     PROS_SHARED_CACHE_MAP SharedCacheMap;
167     PPRIVATE_CACHE_MAP PrivateCacheMap;
168 
169     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
170     PrivateCacheMap = FileObject->PrivateCacheMap;
171 
172     /* If file isn't cached, or if read ahead is disabled, this is no op */
173     if (SharedCacheMap == NULL || PrivateCacheMap == NULL ||
174         BooleanFlagOn(SharedCacheMap->Flags, READAHEAD_DISABLED))
175     {
176         return;
177     }
178 
179     /* Round read length with read ahead mask */
180     Length = ROUND_UP(Length, PrivateCacheMap->ReadAheadMask + 1);
181     /* Compute the offset we'll reach */
182     NewOffset.QuadPart = FileOffset->QuadPart + Length;
183 
184     /* Lock read ahead spin lock */
185     KeAcquireSpinLock(&PrivateCacheMap->ReadAheadSpinLock, &OldIrql);
186     /* Easy case: the file is sequentially read */
187     if (BooleanFlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY))
188     {
189         /* If we went backward, this is no go! */
190         if (NewOffset.QuadPart < PrivateCacheMap->ReadAheadOffset[1].QuadPart)
191         {
192             KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
193             return;
194         }
195 
196         /* FIXME: hackish, but will do the job for now */
197         PrivateCacheMap->ReadAheadOffset[1].QuadPart = NewOffset.QuadPart;
198         PrivateCacheMap->ReadAheadLength[1] = Length;
199     }
200     /* Other cases: try to find some logic in that mess... */
201     else
202     {
203         /* Let's check if we always read the same way (like going down in the file)
204          * and pretend it's enough for now
205          */
206         if (PrivateCacheMap->FileOffset2.QuadPart >= PrivateCacheMap->FileOffset1.QuadPart &&
207             FileOffset->QuadPart >= PrivateCacheMap->FileOffset2.QuadPart)
208         {
209             /* FIXME: hackish, but will do the job for now */
210             PrivateCacheMap->ReadAheadOffset[1].QuadPart = NewOffset.QuadPart;
211             PrivateCacheMap->ReadAheadLength[1] = Length;
212         }
213         else
214         {
215             /* FIXME: handle the other cases */
216             KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
217             UNIMPLEMENTED_ONCE;
218             return;
219         }
220     }
221 
222     /* If read ahead isn't active yet */
223     if (!PrivateCacheMap->Flags.ReadAheadActive)
224     {
225         PWORK_QUEUE_ENTRY WorkItem;
226 
227         /* It's active now!
228          * Be careful with the mask, you don't want to mess with node code
229          */
230         InterlockedOr((volatile long *)&PrivateCacheMap->UlongFlags, PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
231         KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
232 
233         /* Get a work item */
234         WorkItem = ExAllocateFromNPagedLookasideList(&CcTwilightLookasideList);
235         if (WorkItem != NULL)
236         {
237             /* Reference our FO so that it doesn't go in between */
238             ObReferenceObject(FileObject);
239 
240             /* We want to do read ahead! */
241             WorkItem->Function = ReadAhead;
242             WorkItem->Parameters.Read.FileObject = FileObject;
243 
244             /* Queue in the read ahead dedicated queue */
245             CcPostWorkQueue(WorkItem, &CcExpressWorkQueue);
246 
247             return;
248         }
249 
250         /* Fail path: lock again, and revert read ahead active */
251         KeAcquireSpinLock(&PrivateCacheMap->ReadAheadSpinLock, &OldIrql);
252         InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
253     }
254 
255     /* Done (fail) */
256     KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
257 }
258 
259 /*
260  * @implemented
261  */
262 VOID
263 NTAPI
264 CcSetAdditionalCacheAttributes (
265 	IN	PFILE_OBJECT	FileObject,
266 	IN	BOOLEAN		DisableReadAhead,
267 	IN	BOOLEAN		DisableWriteBehind
268 	)
269 {
270     KIRQL OldIrql;
271     PROS_SHARED_CACHE_MAP SharedCacheMap;
272 
273     CCTRACE(CC_API_DEBUG, "FileObject=%p DisableReadAhead=%d DisableWriteBehind=%d\n",
274         FileObject, DisableReadAhead, DisableWriteBehind);
275 
276     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
277 
278     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
279 
280     if (DisableReadAhead)
281     {
282         SetFlag(SharedCacheMap->Flags, READAHEAD_DISABLED);
283     }
284     else
285     {
286         ClearFlag(SharedCacheMap->Flags, READAHEAD_DISABLED);
287     }
288 
289     if (DisableWriteBehind)
290     {
291         /* FIXME: also set flag 0x200 */
292         SetFlag(SharedCacheMap->Flags, WRITEBEHIND_DISABLED);
293     }
294     else
295     {
296         ClearFlag(SharedCacheMap->Flags, WRITEBEHIND_DISABLED);
297     }
298     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
299 }
300 
301 /*
302  * @unimplemented
303  */
304 VOID
305 NTAPI
306 CcSetBcbOwnerPointer (
307 	IN	PVOID	Bcb,
308 	IN	PVOID	Owner
309 	)
310 {
311     PINTERNAL_BCB iBcb = Bcb;
312 
313     CCTRACE(CC_API_DEBUG, "Bcb=%p Owner=%p\n",
314         Bcb, Owner);
315 
316     if (!ExIsResourceAcquiredExclusiveLite(&iBcb->Lock) && !ExIsResourceAcquiredSharedLite(&iBcb->Lock))
317     {
318         DPRINT1("Current thread doesn't own resource!\n");
319         return;
320     }
321 
322     ExSetResourceOwnerPointer(&iBcb->Lock, Owner);
323 }
324 
325 /*
326  * @implemented
327  */
328 VOID
329 NTAPI
330 CcSetDirtyPageThreshold (
331 	IN	PFILE_OBJECT	FileObject,
332 	IN	ULONG		DirtyPageThreshold
333 	)
334 {
335     PFSRTL_COMMON_FCB_HEADER Fcb;
336     PROS_SHARED_CACHE_MAP SharedCacheMap;
337 
338     CCTRACE(CC_API_DEBUG, "FileObject=%p DirtyPageThreshold=%lu\n",
339         FileObject, DirtyPageThreshold);
340 
341     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
342     if (SharedCacheMap != NULL)
343     {
344         SharedCacheMap->DirtyPageThreshold = DirtyPageThreshold;
345     }
346 
347     Fcb = FileObject->FsContext;
348     if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
349     {
350         SetFlag(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
351     }
352 }
353 
354 /*
355  * @implemented
356  */
357 VOID
358 NTAPI
359 CcSetReadAheadGranularity (
360 	IN	PFILE_OBJECT	FileObject,
361 	IN	ULONG		Granularity
362 	)
363 {
364     PPRIVATE_CACHE_MAP PrivateMap;
365 
366     CCTRACE(CC_API_DEBUG, "FileObject=%p Granularity=%lu\n",
367         FileObject, Granularity);
368 
369     PrivateMap = FileObject->PrivateCacheMap;
370     PrivateMap->ReadAheadMask = Granularity - 1;
371 }
372