xref: /reactos/ntoskrnl/cc/cacheman.c (revision 98e8827a)
1 /*
2  * COPYRIGHT:       See COPYING in the top level directory
3  * PROJECT:         ReactOS kernel
4  * FILE:            ntoskrnl/cc/cacheman.c
5  * PURPOSE:         Cache manager
6  *
7  * PROGRAMMERS:     David Welch (welch@cwcom.net)
8  *                  Pierre Schweitzer (pierre@reactos.org)
9  */
10 
11 /* INCLUDES *****************************************************************/
12 
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16 
17 BOOLEAN CcPfEnablePrefetcher;
18 PFSN_PREFETCHER_GLOBALS CcPfGlobals;
19 MM_SYSTEMSIZE CcCapturedSystemSize;
20 
21 static ULONG BugCheckFileId = 0x4 << 16;
22 
23 /* FUNCTIONS *****************************************************************/
24 
25 CODE_SEG("INIT")
26 VOID
27 NTAPI
28 CcPfInitializePrefetcher(VOID)
29 {
30     /* Notify debugger */
31     DbgPrintEx(DPFLTR_PREFETCHER_ID,
32                DPFLTR_TRACE_LEVEL,
33                "CCPF: InitializePrefetecher()\n");
34 
35     /* Setup the Prefetcher Data */
36     InitializeListHead(&CcPfGlobals.ActiveTraces);
37     InitializeListHead(&CcPfGlobals.CompletedTraces);
38     ExInitializeFastMutex(&CcPfGlobals.CompletedTracesLock);
39 
40     /* FIXME: Setup the rest of the prefetecher */
41 }
42 
43 CODE_SEG("INIT")
44 BOOLEAN
45 CcInitializeCacheManager(VOID)
46 {
47     ULONG Thread;
48 
49     CcInitView();
50 
51     /* Initialize lazy-writer lists */
52     InitializeListHead(&CcIdleWorkerThreadList);
53     InitializeListHead(&CcExpressWorkQueue);
54     InitializeListHead(&CcRegularWorkQueue);
55     InitializeListHead(&CcPostTickWorkQueue);
56 
57     /* Define lazy writer threshold and the amount of workers,
58       * depending on the system type
59       */
60     CcCapturedSystemSize = MmQuerySystemSize();
61     switch (CcCapturedSystemSize)
62     {
63         case MmSmallSystem:
64             CcNumberWorkerThreads = ExCriticalWorkerThreads - 1;
65             CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
66             break;
67 
68         case MmMediumSystem:
69             CcNumberWorkerThreads = ExCriticalWorkerThreads - 1;
70             CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
71             break;
72 
73         case MmLargeSystem:
74             CcNumberWorkerThreads = ExCriticalWorkerThreads - 2;
75             CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
76             break;
77 
78         default:
79             CcNumberWorkerThreads = 1;
80             CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
81             break;
82     }
83 
84     /* Allocate a work item for all our threads */
85     for (Thread = 0; Thread < CcNumberWorkerThreads; ++Thread)
86     {
87         PWORK_QUEUE_ITEM Item;
88 
89         Item = ExAllocatePoolWithTag(NonPagedPool, sizeof(WORK_QUEUE_ITEM), 'qWcC');
90         if (Item == NULL)
91         {
92             CcBugCheck(0, 0, 0);
93         }
94 
95         /* By default, it's obviously idle */
96         ExInitializeWorkItem(Item, CcWorkerThread, Item);
97         InsertTailList(&CcIdleWorkerThreadList, &Item->List);
98     }
99 
100     /* Initialize our lazy writer */
101     RtlZeroMemory(&LazyWriter, sizeof(LazyWriter));
102     InitializeListHead(&LazyWriter.WorkQueue);
103     /* Delay activation of the lazy writer */
104     KeInitializeDpc(&LazyWriter.ScanDpc, CcScanDpc, NULL);
105     KeInitializeTimer(&LazyWriter.ScanTimer);
106 
107     /* Lookaside list for our work items */
108     ExInitializeNPagedLookasideList(&CcTwilightLookasideList, NULL, NULL, 0, sizeof(WORK_QUEUE_ENTRY), 'KWcC', 0);
109 
110     return TRUE;
111 }
112 
113 VOID
114 NTAPI
115 CcShutdownSystem(VOID)
116 {
117     /* NOTHING TO DO */
118 }
119 
120 /*
121  * @unimplemented
122  */
123 LARGE_INTEGER
124 NTAPI
125 CcGetFlushedValidData (
126     IN PSECTION_OBJECT_POINTERS SectionObjectPointer,
127     IN BOOLEAN BcbListHeld
128     )
129 {
130 	LARGE_INTEGER i;
131 
132 	UNIMPLEMENTED;
133 
134 	i.QuadPart = 0;
135 	return i;
136 }
137 
138 /*
139  * @unimplemented
140  */
141 PVOID
142 NTAPI
143 CcRemapBcb (
144     IN PVOID Bcb
145     )
146 {
147 	UNIMPLEMENTED;
148 
149     return 0;
150 }
151 
152 /*
153  * @unimplemented
154  */
155 VOID
156 NTAPI
157 CcScheduleReadAhead (
158 	IN	PFILE_OBJECT		FileObject,
159 	IN	PLARGE_INTEGER		FileOffset,
160 	IN	ULONG			Length
161 	)
162 {
163     KIRQL OldIrql;
164     LARGE_INTEGER NewOffset;
165     PROS_SHARED_CACHE_MAP SharedCacheMap;
166     PPRIVATE_CACHE_MAP PrivateCacheMap;
167 
168     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
169     PrivateCacheMap = FileObject->PrivateCacheMap;
170 
171     /* If file isn't cached, or if read ahead is disabled, this is no op */
172     if (SharedCacheMap == NULL || PrivateCacheMap == NULL ||
173         BooleanFlagOn(SharedCacheMap->Flags, READAHEAD_DISABLED))
174     {
175         return;
176     }
177 
178     /* Round read length with read ahead mask */
179     Length = ROUND_UP(Length, PrivateCacheMap->ReadAheadMask + 1);
180     /* Compute the offset we'll reach */
181     NewOffset.QuadPart = FileOffset->QuadPart + Length;
182 
183     /* Lock read ahead spin lock */
184     KeAcquireSpinLock(&PrivateCacheMap->ReadAheadSpinLock, &OldIrql);
185     /* Easy case: the file is sequentially read */
186     if (BooleanFlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY))
187     {
188         /* If we went backward, this is no go! */
189         if (NewOffset.QuadPart < PrivateCacheMap->ReadAheadOffset[1].QuadPart)
190         {
191             KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
192             return;
193         }
194 
195         /* FIXME: hackish, but will do the job for now */
196         PrivateCacheMap->ReadAheadOffset[1].QuadPart = NewOffset.QuadPart;
197         PrivateCacheMap->ReadAheadLength[1] = Length;
198     }
199     /* Other cases: try to find some logic in that mess... */
200     else
201     {
202         /* Let's check if we always read the same way (like going down in the file)
203          * and pretend it's enough for now
204          */
205         if (PrivateCacheMap->FileOffset2.QuadPart >= PrivateCacheMap->FileOffset1.QuadPart &&
206             FileOffset->QuadPart >= PrivateCacheMap->FileOffset2.QuadPart)
207         {
208             /* FIXME: hackish, but will do the job for now */
209             PrivateCacheMap->ReadAheadOffset[1].QuadPart = NewOffset.QuadPart;
210             PrivateCacheMap->ReadAheadLength[1] = Length;
211         }
212         else
213         {
214             /* FIXME: handle the other cases */
215             KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
216             UNIMPLEMENTED_ONCE;
217             return;
218         }
219     }
220 
221     /* If read ahead isn't active yet */
222     if (!PrivateCacheMap->Flags.ReadAheadActive)
223     {
224         PWORK_QUEUE_ENTRY WorkItem;
225 
226         /* It's active now!
227          * Be careful with the mask, you don't want to mess with node code
228          */
229         InterlockedOr((volatile long *)&PrivateCacheMap->UlongFlags, PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
230         KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
231 
232         /* Get a work item */
233         WorkItem = ExAllocateFromNPagedLookasideList(&CcTwilightLookasideList);
234         if (WorkItem != NULL)
235         {
236             /* Reference our FO so that it doesn't go in between */
237             ObReferenceObject(FileObject);
238 
239             /* We want to do read ahead! */
240             WorkItem->Function = ReadAhead;
241             WorkItem->Parameters.Read.FileObject = FileObject;
242 
243             /* Queue in the read ahead dedicated queue */
244             CcPostWorkQueue(WorkItem, &CcExpressWorkQueue);
245 
246             return;
247         }
248 
249         /* Fail path: lock again, and revert read ahead active */
250         KeAcquireSpinLock(&PrivateCacheMap->ReadAheadSpinLock, &OldIrql);
251         InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
252     }
253 
254     /* Done (fail) */
255     KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
256 }
257 
258 /*
259  * @implemented
260  */
261 VOID
262 NTAPI
263 CcSetAdditionalCacheAttributes (
264 	IN	PFILE_OBJECT	FileObject,
265 	IN	BOOLEAN		DisableReadAhead,
266 	IN	BOOLEAN		DisableWriteBehind
267 	)
268 {
269     KIRQL OldIrql;
270     PROS_SHARED_CACHE_MAP SharedCacheMap;
271 
272     CCTRACE(CC_API_DEBUG, "FileObject=%p DisableReadAhead=%d DisableWriteBehind=%d\n",
273         FileObject, DisableReadAhead, DisableWriteBehind);
274 
275     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
276 
277     OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
278 
279     if (DisableReadAhead)
280     {
281         SetFlag(SharedCacheMap->Flags, READAHEAD_DISABLED);
282     }
283     else
284     {
285         ClearFlag(SharedCacheMap->Flags, READAHEAD_DISABLED);
286     }
287 
288     if (DisableWriteBehind)
289     {
290         /* FIXME: also set flag 0x200 */
291         SetFlag(SharedCacheMap->Flags, WRITEBEHIND_DISABLED);
292     }
293     else
294     {
295         ClearFlag(SharedCacheMap->Flags, WRITEBEHIND_DISABLED);
296     }
297     KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
298 }
299 
300 /*
301  * @unimplemented
302  */
303 VOID
304 NTAPI
305 CcSetBcbOwnerPointer (
306 	IN	PVOID	Bcb,
307 	IN	PVOID	Owner
308 	)
309 {
310     PINTERNAL_BCB iBcb = CONTAINING_RECORD(Bcb, INTERNAL_BCB, PFCB);
311 
312     CCTRACE(CC_API_DEBUG, "Bcb=%p Owner=%p\n",
313         Bcb, Owner);
314 
315     if (!ExIsResourceAcquiredExclusiveLite(&iBcb->Lock) && !ExIsResourceAcquiredSharedLite(&iBcb->Lock))
316     {
317         DPRINT1("Current thread doesn't own resource!\n");
318         return;
319     }
320 
321     ExSetResourceOwnerPointer(&iBcb->Lock, Owner);
322 }
323 
324 /*
325  * @implemented
326  */
327 VOID
328 NTAPI
329 CcSetDirtyPageThreshold (
330 	IN	PFILE_OBJECT	FileObject,
331 	IN	ULONG		DirtyPageThreshold
332 	)
333 {
334     PFSRTL_COMMON_FCB_HEADER Fcb;
335     PROS_SHARED_CACHE_MAP SharedCacheMap;
336 
337     CCTRACE(CC_API_DEBUG, "FileObject=%p DirtyPageThreshold=%lu\n",
338         FileObject, DirtyPageThreshold);
339 
340     SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
341     if (SharedCacheMap != NULL)
342     {
343         SharedCacheMap->DirtyPageThreshold = DirtyPageThreshold;
344     }
345 
346     Fcb = FileObject->FsContext;
347     if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
348     {
349         SetFlag(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
350     }
351 }
352 
353 /*
354  * @implemented
355  */
356 VOID
357 NTAPI
358 CcSetReadAheadGranularity (
359 	IN	PFILE_OBJECT	FileObject,
360 	IN	ULONG		Granularity
361 	)
362 {
363     PPRIVATE_CACHE_MAP PrivateMap;
364 
365     CCTRACE(CC_API_DEBUG, "FileObject=%p Granularity=%lu\n",
366         FileObject, Granularity);
367 
368     PrivateMap = FileObject->PrivateCacheMap;
369     PrivateMap->ReadAheadMask = Granularity - 1;
370 }
371