1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_CAN_WRITE_RETRY
24 {
25 FirstTry = 0,
26 RetryAllowRemote = 253,
27 RetryForceCheckPerFile = 254,
28 RetryMasterLocked = 255,
29 } CC_CAN_WRITE_RETRY;
30
31 ULONG CcRosTraceLevel = CC_API_DEBUG;
32 ULONG CcFastMdlReadWait;
33 ULONG CcFastMdlReadNotPossible;
34 ULONG CcFastReadNotPossible;
35 ULONG CcFastReadWait;
36 ULONG CcFastReadNoWait;
37 ULONG CcFastReadResourceMiss;
38
39 /* Counters:
40 * - Amount of pages flushed to the disk
41 * - Number of flush operations
42 */
43 ULONG CcDataPages = 0;
44 ULONG CcDataFlushes = 0;
45
46 /* FUNCTIONS *****************************************************************/
47
48 VOID
49 NTAPI
50 MiZeroPhysicalPage (
51 IN PFN_NUMBER PageFrameIndex
52 );
53
54 VOID
55 NTAPI
CcInitCacheZeroPage(VOID)56 CcInitCacheZeroPage (
57 VOID)
58 {
59 NTSTATUS Status;
60
61 MI_SET_USAGE(MI_USAGE_CACHE);
62 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
63 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
64 if (!NT_SUCCESS(Status))
65 {
66 DbgPrint("Can't allocate CcZeroPage.\n");
67 KeBugCheck(CACHE_MANAGER);
68 }
69 MiZeroPhysicalPage(CcZeroPage);
70 }
71
72 VOID
CcPostDeferredWrites(VOID)73 CcPostDeferredWrites(VOID)
74 {
75 LIST_ENTRY ToInsertBack;
76
77 InitializeListHead(&ToInsertBack);
78
79 /* We'll try to write as much as we can */
80 while (TRUE)
81 {
82 PDEFERRED_WRITE DeferredWrite;
83 PLIST_ENTRY ListEntry;
84
85 DeferredWrite = NULL;
86
87 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
88
89 if (!ListEntry)
90 break;
91
92 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
93
94 /* Check if we can write */
95 if (CcCanIWrite(DeferredWrite->FileObject, DeferredWrite->BytesToWrite, FALSE, RetryForceCheckPerFile))
96 {
97 /* If we have an event, set it and go along */
98 if (DeferredWrite->Event)
99 {
100 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
101 }
102 /* Otherwise, call the write routine and free the context */
103 else
104 {
105 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
106 ExFreePoolWithTag(DeferredWrite, 'CcDw');
107 }
108 continue;
109 }
110
111 /* Keep it for later */
112 InsertHeadList(&ToInsertBack, &DeferredWrite->DeferredWriteLinks);
113
114 /* If we don't accept modified pages, stop here */
115 if (!DeferredWrite->LimitModifiedPages)
116 {
117 break;
118 }
119 }
120
121 /* Insert what we couldn't write back in the list */
122 while (!IsListEmpty(&ToInsertBack))
123 {
124 PLIST_ENTRY ListEntry = RemoveTailList(&ToInsertBack);
125 ExInterlockedInsertHeadList(&CcDeferredWrites, ListEntry, &CcDeferredWriteSpinLock);
126 }
127 }
128
129 VOID
CcPerformReadAhead(IN PFILE_OBJECT FileObject)130 CcPerformReadAhead(
131 IN PFILE_OBJECT FileObject)
132 {
133 NTSTATUS Status;
134 LONGLONG CurrentOffset;
135 KIRQL OldIrql;
136 PROS_SHARED_CACHE_MAP SharedCacheMap;
137 PROS_VACB Vacb;
138 ULONG PartialLength;
139 ULONG Length;
140 PPRIVATE_CACHE_MAP PrivateCacheMap;
141 BOOLEAN Locked;
142 BOOLEAN Success;
143
144 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
145
146 /* Critical:
147 * PrivateCacheMap might disappear in-between if the handle
148 * to the file is closed (private is attached to the handle not to
149 * the file), so we need to lock the master lock while we deal with
150 * it. It won't disappear without attempting to lock such lock.
151 */
152 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
153 PrivateCacheMap = FileObject->PrivateCacheMap;
154 /* If the handle was closed since the read ahead was scheduled, just quit */
155 if (PrivateCacheMap == NULL)
156 {
157 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
158 ObDereferenceObject(FileObject);
159 return;
160 }
161 /* Otherwise, extract read offset and length and release private map */
162 else
163 {
164 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
165 CurrentOffset = PrivateCacheMap->ReadAheadOffset[1].QuadPart;
166 Length = PrivateCacheMap->ReadAheadLength[1];
167 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
168 }
169 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
170
171 /* Time to go! */
172 DPRINT("Doing ReadAhead for %p\n", FileObject);
173 /* Lock the file, first */
174 if (!SharedCacheMap->Callbacks->AcquireForReadAhead(SharedCacheMap->LazyWriteContext, FALSE))
175 {
176 Locked = FALSE;
177 goto Clear;
178 }
179
180 /* Remember it's locked */
181 Locked = TRUE;
182
183 /* Don't read past the end of the file */
184 if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart)
185 {
186 goto Clear;
187 }
188 if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart)
189 {
190 Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset;
191 }
192
193 /* Next of the algorithm will lock like CcCopyData with the slight
194 * difference that we don't copy data back to an user-backed buffer
195 * We just bring data into Cc
196 */
197 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
198 if (PartialLength != 0)
199 {
200 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
201 Status = CcRosRequestVacb(SharedCacheMap,
202 ROUND_DOWN(CurrentOffset, VACB_MAPPING_GRANULARITY),
203 &Vacb);
204 if (!NT_SUCCESS(Status))
205 {
206 DPRINT1("Failed to request VACB: %lx!\n", Status);
207 goto Clear;
208 }
209
210 _SEH2_TRY
211 {
212 Success = CcRosEnsureVacbResident(Vacb, TRUE, FALSE,
213 CurrentOffset % VACB_MAPPING_GRANULARITY, PartialLength);
214 }
215 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
216 {
217 Success = FALSE;
218 }
219 _SEH2_END
220
221 if (!Success)
222 {
223 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
224 DPRINT1("Failed to read data: %lx!\n", Status);
225 goto Clear;
226 }
227
228 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
229
230 Length -= PartialLength;
231 CurrentOffset += PartialLength;
232 }
233
234 while (Length > 0)
235 {
236 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
237 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
238 Status = CcRosRequestVacb(SharedCacheMap,
239 CurrentOffset,
240 &Vacb);
241 if (!NT_SUCCESS(Status))
242 {
243 DPRINT1("Failed to request VACB: %lx!\n", Status);
244 goto Clear;
245 }
246
247 _SEH2_TRY
248 {
249 Success = CcRosEnsureVacbResident(Vacb, TRUE, FALSE, 0, PartialLength);
250 }
251 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
252 {
253 Success = FALSE;
254 }
255 _SEH2_END
256
257 if (!Success)
258 {
259 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
260 DPRINT1("Failed to read data: %lx!\n", Status);
261 goto Clear;
262 }
263
264 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
265
266 Length -= PartialLength;
267 CurrentOffset += PartialLength;
268 }
269
270 Clear:
271 /* See previous comment about private cache map */
272 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
273 PrivateCacheMap = FileObject->PrivateCacheMap;
274 if (PrivateCacheMap != NULL)
275 {
276 /* Mark read ahead as unactive */
277 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
278 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
279 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap->ReadAheadSpinLock);
280 }
281 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
282
283 /* If file was locked, release it */
284 if (Locked)
285 {
286 SharedCacheMap->Callbacks->ReleaseFromReadAhead(SharedCacheMap->LazyWriteContext);
287 }
288
289 /* And drop our extra reference (See: CcScheduleReadAhead) */
290 ObDereferenceObject(FileObject);
291
292 return;
293 }
294
295 /*
296 * @unimplemented
297 */
298 BOOLEAN
299 NTAPI
CcCanIWrite(IN PFILE_OBJECT FileObject,IN ULONG BytesToWrite,IN BOOLEAN Wait,IN BOOLEAN Retrying)300 CcCanIWrite (
301 IN PFILE_OBJECT FileObject,
302 IN ULONG BytesToWrite,
303 IN BOOLEAN Wait,
304 IN BOOLEAN Retrying)
305 {
306 KIRQL OldIrql;
307 KEVENT WaitEvent;
308 ULONG Length, Pages;
309 BOOLEAN PerFileDefer;
310 DEFERRED_WRITE Context;
311 PFSRTL_COMMON_FCB_HEADER Fcb;
312 CC_CAN_WRITE_RETRY TryContext;
313 PROS_SHARED_CACHE_MAP SharedCacheMap;
314
315 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
316 FileObject, BytesToWrite, Wait, Retrying);
317
318 /* Write through is always OK */
319 if (BooleanFlagOn(FileObject->Flags, FO_WRITE_THROUGH))
320 {
321 return TRUE;
322 }
323
324 TryContext = Retrying;
325 /* Allow remote file if not from posted */
326 if (IoIsFileOriginRemote(FileObject) && TryContext < RetryAllowRemote)
327 {
328 return TRUE;
329 }
330
331 /* Don't exceed max tolerated size */
332 Length = MAX_ZERO_LENGTH;
333 if (BytesToWrite < MAX_ZERO_LENGTH)
334 {
335 Length = BytesToWrite;
336 }
337
338 Pages = BYTES_TO_PAGES(Length);
339
340 /* By default, assume limits per file won't be hit */
341 PerFileDefer = FALSE;
342 Fcb = FileObject->FsContext;
343 /* Do we have to check for limits per file? */
344 if (TryContext >= RetryForceCheckPerFile ||
345 BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
346 {
347 /* If master is not locked, lock it now */
348 if (TryContext != RetryMasterLocked)
349 {
350 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
351 }
352
353 /* Let's not assume the file is cached... */
354 if (FileObject->SectionObjectPointer != NULL &&
355 FileObject->SectionObjectPointer->SharedCacheMap != NULL)
356 {
357 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
358 /* Do we have limits per file set? */
359 if (SharedCacheMap->DirtyPageThreshold != 0 &&
360 SharedCacheMap->DirtyPages != 0)
361 {
362 /* Yes, check whether they are blocking */
363 if (Pages + SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
364 {
365 PerFileDefer = TRUE;
366 }
367 }
368 }
369
370 /* And don't forget to release master */
371 if (TryContext != RetryMasterLocked)
372 {
373 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
374 }
375 }
376
377 /* So, now allow write if:
378 * - Not the first try or we have no throttling yet
379 * AND:
380 * - We don't exceed threshold!
381 * - We don't exceed what Mm can allow us to use
382 * + If we're above top, that's fine
383 * + If we're above bottom with limited modified pages, that's fine
384 * + Otherwise, throttle!
385 */
386 if ((TryContext != FirstTry || IsListEmpty(&CcDeferredWrites)) &&
387 CcTotalDirtyPages + Pages < CcDirtyPageThreshold &&
388 (MmAvailablePages > MmThrottleTop ||
389 (MmModifiedPageListHead.Total < 1000 && MmAvailablePages > MmThrottleBottom)) &&
390 !PerFileDefer)
391 {
392 return TRUE;
393 }
394
395 /* If we can wait, we'll start the wait loop for waiting till we can
396 * write for real
397 */
398 if (!Wait)
399 {
400 return FALSE;
401 }
402
403 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
404 if (IsListEmpty(&CcDeferredWrites))
405 {
406 KIRQL OldIrql;
407
408 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
409 CcScheduleLazyWriteScan(TRUE);
410 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
411 }
412
413 /* Initialize our wait event */
414 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
415
416 /* And prepare a dummy context */
417 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
418 Context.NodeByteSize = sizeof(DEFERRED_WRITE);
419 Context.FileObject = FileObject;
420 Context.BytesToWrite = BytesToWrite;
421 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
422 Context.Event = &WaitEvent;
423
424 /* And queue it */
425 if (Retrying)
426 {
427 /* To the top, if that's a retry */
428 ExInterlockedInsertHeadList(&CcDeferredWrites,
429 &Context.DeferredWriteLinks,
430 &CcDeferredWriteSpinLock);
431 }
432 else
433 {
434 /* To the bottom, if that's a first time */
435 ExInterlockedInsertTailList(&CcDeferredWrites,
436 &Context.DeferredWriteLinks,
437 &CcDeferredWriteSpinLock);
438 }
439
440 #if DBG
441 DPRINT1("Actively deferring write for: %p\n", FileObject);
442 DPRINT1("Because:\n");
443 if (CcTotalDirtyPages + Pages >= CcDirtyPageThreshold)
444 DPRINT1(" There are too many cache dirty pages: %x + %x >= %x\n", CcTotalDirtyPages, Pages, CcDirtyPageThreshold);
445 if (MmAvailablePages <= MmThrottleTop)
446 DPRINT1(" Available pages are below throttle top: %lx <= %lx\n", MmAvailablePages, MmThrottleTop);
447 if (MmModifiedPageListHead.Total >= 1000)
448 DPRINT1(" There are too many modified pages: %lu >= 1000\n", MmModifiedPageListHead.Total);
449 if (MmAvailablePages <= MmThrottleBottom)
450 DPRINT1(" Available pages are below throttle bottom: %lx <= %lx\n", MmAvailablePages, MmThrottleBottom);
451 #endif
452 /* Now, we'll loop until our event is set. When it is set, it means that caller
453 * can immediately write, and has to
454 */
455 do
456 {
457 CcPostDeferredWrites();
458 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS);
459
460 return TRUE;
461 }
462
463 static
464 int
CcpCheckInvalidUserBuffer(PEXCEPTION_POINTERS Except,PVOID Buffer,ULONG Length)465 CcpCheckInvalidUserBuffer(PEXCEPTION_POINTERS Except, PVOID Buffer, ULONG Length)
466 {
467 ULONG_PTR ExceptionAddress;
468 ULONG_PTR BeginAddress = (ULONG_PTR)Buffer;
469 ULONG_PTR EndAddress = (ULONG_PTR)Buffer + Length;
470
471 if (Except->ExceptionRecord->ExceptionCode != STATUS_ACCESS_VIOLATION)
472 return EXCEPTION_CONTINUE_SEARCH;
473 if (Except->ExceptionRecord->NumberParameters < 2)
474 return EXCEPTION_CONTINUE_SEARCH;
475
476 ExceptionAddress = Except->ExceptionRecord->ExceptionInformation[1];
477 if ((ExceptionAddress >= BeginAddress) && (ExceptionAddress < EndAddress))
478 return EXCEPTION_EXECUTE_HANDLER;
479
480 return EXCEPTION_CONTINUE_SEARCH;
481 }
482
483 /*
484 * @implemented
485 */
486 BOOLEAN
487 NTAPI
CcCopyRead(IN PFILE_OBJECT FileObject,IN PLARGE_INTEGER FileOffset,IN ULONG Length,IN BOOLEAN Wait,OUT PVOID Buffer,OUT PIO_STATUS_BLOCK IoStatus)488 CcCopyRead (
489 IN PFILE_OBJECT FileObject,
490 IN PLARGE_INTEGER FileOffset,
491 IN ULONG Length,
492 IN BOOLEAN Wait,
493 OUT PVOID Buffer,
494 OUT PIO_STATUS_BLOCK IoStatus)
495 {
496 PROS_VACB Vacb;
497 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
498 NTSTATUS Status;
499 LONGLONG CurrentOffset;
500 LONGLONG ReadEnd = FileOffset->QuadPart + Length;
501 ULONG ReadLength = 0;
502
503 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
504 FileObject, FileOffset->QuadPart, Length, Wait);
505
506 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
507 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
508 FileObject, FileOffset->QuadPart, Length, Wait,
509 Buffer, IoStatus);
510
511 if (!SharedCacheMap)
512 return FALSE;
513
514 /* Documented to ASSERT, but KMTests test this case... */
515 // ASSERT((FileOffset->QuadPart + Length) <= SharedCacheMap->FileSize.QuadPart);
516
517 CurrentOffset = FileOffset->QuadPart;
518 while(CurrentOffset < ReadEnd)
519 {
520 Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb);
521 if (!NT_SUCCESS(Status))
522 {
523 ExRaiseStatus(Status);
524 return FALSE;
525 }
526
527 _SEH2_TRY
528 {
529 ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY;
530 ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset);
531 SIZE_T CopyLength = VacbLength;
532
533 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength))
534 return FALSE;
535
536 _SEH2_TRY
537 {
538 RtlCopyMemory(Buffer, (PUCHAR)Vacb->BaseAddress + VacbOffset, CopyLength);
539 }
540 _SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength))
541 {
542 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
543 }
544 _SEH2_END;
545
546 ReadLength += VacbLength;
547
548 Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength);
549 CurrentOffset += VacbLength;
550 Length -= VacbLength;
551 }
552 _SEH2_FINALLY
553 {
554 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE);
555 }
556 _SEH2_END;
557 }
558
559 IoStatus->Status = STATUS_SUCCESS;
560 IoStatus->Information = ReadLength;
561
562 #if 0
563 /* If that was a successful sync read operation, let's handle read ahead */
564 if (Length == 0 && Wait)
565 {
566 PPRIVATE_CACHE_MAP PrivateCacheMap = FileObject->PrivateCacheMap;
567
568 /* If file isn't random access and next read may get us cross VACB boundary,
569 * schedule next read
570 */
571 if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) &&
572 (CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + ReadLength - 1) / VACB_MAPPING_GRANULARITY)
573 {
574 CcScheduleReadAhead(FileObject, FileOffset, ReadLength);
575 }
576
577 /* And update read history in private cache map */
578 PrivateCacheMap->FileOffset1.QuadPart = PrivateCacheMap->FileOffset2.QuadPart;
579 PrivateCacheMap->BeyondLastByte1.QuadPart = PrivateCacheMap->BeyondLastByte2.QuadPart;
580 PrivateCacheMap->FileOffset2.QuadPart = FileOffset->QuadPart;
581 PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset->QuadPart + ReadLength;
582 }
583 #endif
584
585 return TRUE;
586 }
587
588 /*
589 * @implemented
590 */
591 BOOLEAN
592 NTAPI
CcCopyWrite(IN PFILE_OBJECT FileObject,IN PLARGE_INTEGER FileOffset,IN ULONG Length,IN BOOLEAN Wait,IN PVOID Buffer)593 CcCopyWrite (
594 IN PFILE_OBJECT FileObject,
595 IN PLARGE_INTEGER FileOffset,
596 IN ULONG Length,
597 IN BOOLEAN Wait,
598 IN PVOID Buffer)
599 {
600 PROS_VACB Vacb;
601 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
602 NTSTATUS Status;
603 LONGLONG CurrentOffset;
604 LONGLONG WriteEnd;
605
606 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
607 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
608
609 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
610 "Length %lu, Wait %u, Buffer 0x%p)\n",
611 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
612
613 if (!SharedCacheMap)
614 return FALSE;
615
616 Status = RtlLongLongAdd(FileOffset->QuadPart, Length, &WriteEnd);
617 if (!NT_SUCCESS(Status))
618 ExRaiseStatus(Status);
619
620 ASSERT(WriteEnd <= SharedCacheMap->SectionSize.QuadPart);
621
622 CurrentOffset = FileOffset->QuadPart;
623 while(CurrentOffset < WriteEnd)
624 {
625 ULONG VacbOffset = CurrentOffset % VACB_MAPPING_GRANULARITY;
626 ULONG VacbLength = min(WriteEnd - CurrentOffset, VACB_MAPPING_GRANULARITY - VacbOffset);
627
628 Status = CcRosGetVacb(SharedCacheMap, CurrentOffset, &Vacb);
629 if (!NT_SUCCESS(Status))
630 {
631 ExRaiseStatus(Status);
632 return FALSE;
633 }
634
635 _SEH2_TRY
636 {
637 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength))
638 {
639 return FALSE;
640 }
641
642 _SEH2_TRY
643 {
644 RtlCopyMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), Buffer, VacbLength);
645 }
646 _SEH2_EXCEPT(CcpCheckInvalidUserBuffer(_SEH2_GetExceptionInformation(), Buffer, VacbLength))
647 {
648 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
649 }
650 _SEH2_END;
651
652 Buffer = (PVOID)((ULONG_PTR)Buffer + VacbLength);
653 CurrentOffset += VacbLength;
654
655 /* Tell Mm */
656 Status = MmMakeSegmentDirty(FileObject->SectionObjectPointer,
657 Vacb->FileOffset.QuadPart + VacbOffset,
658 VacbLength);
659 if (!NT_SUCCESS(Status))
660 ExRaiseStatus(Status);
661 }
662 _SEH2_FINALLY
663 {
664 /* Do not mark the VACB as dirty if an exception was raised */
665 CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE);
666 }
667 _SEH2_END;
668 }
669
670 /* Flush if needed */
671 if (FileObject->Flags & FO_WRITE_THROUGH)
672 CcFlushCache(FileObject->SectionObjectPointer, FileOffset, Length, NULL);
673
674 return TRUE;
675 }
676
677 /*
678 * @implemented
679 */
680 VOID
681 NTAPI
CcDeferWrite(IN PFILE_OBJECT FileObject,IN PCC_POST_DEFERRED_WRITE PostRoutine,IN PVOID Context1,IN PVOID Context2,IN ULONG BytesToWrite,IN BOOLEAN Retrying)682 CcDeferWrite (
683 IN PFILE_OBJECT FileObject,
684 IN PCC_POST_DEFERRED_WRITE PostRoutine,
685 IN PVOID Context1,
686 IN PVOID Context2,
687 IN ULONG BytesToWrite,
688 IN BOOLEAN Retrying)
689 {
690 KIRQL OldIrql;
691 PDEFERRED_WRITE Context;
692 PFSRTL_COMMON_FCB_HEADER Fcb;
693
694 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
695 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
696
697 /* Try to allocate a context for queueing the write operation */
698 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
699 /* If it failed, immediately execute the operation! */
700 if (Context == NULL)
701 {
702 PostRoutine(Context1, Context2);
703 return;
704 }
705
706 Fcb = FileObject->FsContext;
707
708 /* Otherwise, initialize the context */
709 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
710 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
711 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
712 Context->FileObject = FileObject;
713 Context->PostRoutine = PostRoutine;
714 Context->Context1 = Context1;
715 Context->Context2 = Context2;
716 Context->BytesToWrite = BytesToWrite;
717 Context->LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
718
719 /* And queue it */
720 if (Retrying)
721 {
722 /* To the top, if that's a retry */
723 ExInterlockedInsertHeadList(&CcDeferredWrites,
724 &Context->DeferredWriteLinks,
725 &CcDeferredWriteSpinLock);
726 }
727 else
728 {
729 /* To the bottom, if that's a first time */
730 ExInterlockedInsertTailList(&CcDeferredWrites,
731 &Context->DeferredWriteLinks,
732 &CcDeferredWriteSpinLock);
733 }
734
735 /* Try to execute the posted writes */
736 CcPostDeferredWrites();
737
738 /* Schedule a lazy writer run to handle deferred writes */
739 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
740 if (!LazyWriter.ScanActive)
741 {
742 CcScheduleLazyWriteScan(FALSE);
743 }
744 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
745 }
746
747 /*
748 * @unimplemented
749 */
750 VOID
751 NTAPI
CcFastCopyRead(IN PFILE_OBJECT FileObject,IN ULONG FileOffset,IN ULONG Length,IN ULONG PageCount,OUT PVOID Buffer,OUT PIO_STATUS_BLOCK IoStatus)752 CcFastCopyRead (
753 IN PFILE_OBJECT FileObject,
754 IN ULONG FileOffset,
755 IN ULONG Length,
756 IN ULONG PageCount,
757 OUT PVOID Buffer,
758 OUT PIO_STATUS_BLOCK IoStatus)
759 {
760 LARGE_INTEGER LargeFileOffset;
761 BOOLEAN Success;
762
763 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
764 FileObject, FileOffset, Length, PageCount, Buffer);
765
766 DBG_UNREFERENCED_PARAMETER(PageCount);
767
768 LargeFileOffset.QuadPart = FileOffset;
769 Success = CcCopyRead(FileObject,
770 &LargeFileOffset,
771 Length,
772 TRUE,
773 Buffer,
774 IoStatus);
775 ASSERT(Success == TRUE);
776 }
777
778 /*
779 * @unimplemented
780 */
781 VOID
782 NTAPI
CcFastCopyWrite(IN PFILE_OBJECT FileObject,IN ULONG FileOffset,IN ULONG Length,IN PVOID Buffer)783 CcFastCopyWrite (
784 IN PFILE_OBJECT FileObject,
785 IN ULONG FileOffset,
786 IN ULONG Length,
787 IN PVOID Buffer)
788 {
789 LARGE_INTEGER LargeFileOffset;
790 BOOLEAN Success;
791
792 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
793 FileObject, FileOffset, Length, Buffer);
794
795 LargeFileOffset.QuadPart = FileOffset;
796 Success = CcCopyWrite(FileObject,
797 &LargeFileOffset,
798 Length,
799 TRUE,
800 Buffer);
801 ASSERT(Success == TRUE);
802 }
803
804 /*
805 * @implemented
806 */
807 BOOLEAN
808 NTAPI
CcZeroData(IN PFILE_OBJECT FileObject,IN PLARGE_INTEGER StartOffset,IN PLARGE_INTEGER EndOffset,IN BOOLEAN Wait)809 CcZeroData (
810 IN PFILE_OBJECT FileObject,
811 IN PLARGE_INTEGER StartOffset,
812 IN PLARGE_INTEGER EndOffset,
813 IN BOOLEAN Wait)
814 {
815 NTSTATUS Status;
816 LARGE_INTEGER WriteOffset;
817 LONGLONG Length;
818 PROS_VACB Vacb;
819 PROS_SHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
820
821 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
822 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
823
824 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
825 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
826 Wait);
827
828 Length = EndOffset->QuadPart - StartOffset->QuadPart;
829 WriteOffset.QuadPart = StartOffset->QuadPart;
830
831 if (!SharedCacheMap)
832 {
833 /* Make this a non-cached write */
834 IO_STATUS_BLOCK Iosb;
835 KEVENT Event;
836 PMDL Mdl;
837 ULONG i;
838 ULONG CurrentLength;
839 PPFN_NUMBER PfnArray;
840
841 /* Setup our Mdl */
842 Mdl = IoAllocateMdl(NULL, min(Length, MAX_ZERO_LENGTH), FALSE, FALSE, NULL);
843 if (!Mdl)
844 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
845
846 PfnArray = MmGetMdlPfnArray(Mdl);
847 for (i = 0; i < BYTES_TO_PAGES(Mdl->ByteCount); i++)
848 PfnArray[i] = CcZeroPage;
849 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
850
851 /* Perform the write sequencially */
852 while (Length > 0)
853 {
854 CurrentLength = min(Length, MAX_ZERO_LENGTH);
855
856 Mdl->ByteCount = CurrentLength;
857
858 KeInitializeEvent(&Event, NotificationEvent, FALSE);
859 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
860 if (Status == STATUS_PENDING)
861 {
862 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
863 Status = Iosb.Status;
864 }
865 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
866 {
867 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
868 }
869 if (!NT_SUCCESS(Status))
870 {
871 IoFreeMdl(Mdl);
872 ExRaiseStatus(Status);
873 }
874 WriteOffset.QuadPart += CurrentLength;
875 Length -= CurrentLength;
876 }
877
878 IoFreeMdl(Mdl);
879
880 return TRUE;
881 }
882
883 /* See if we should simply truncate the valid data length */
884 if ((StartOffset->QuadPart < SharedCacheMap->ValidDataLength.QuadPart) && (EndOffset->QuadPart >= SharedCacheMap->ValidDataLength.QuadPart))
885 {
886 DPRINT1("Truncating VDL.\n");
887 SharedCacheMap->ValidDataLength = *StartOffset;
888 return TRUE;
889 }
890
891 ASSERT(EndOffset->QuadPart <= SharedCacheMap->SectionSize.QuadPart);
892
893 while(WriteOffset.QuadPart < EndOffset->QuadPart)
894 {
895 ULONG VacbOffset = WriteOffset.QuadPart % VACB_MAPPING_GRANULARITY;
896 ULONG VacbLength = min(Length, VACB_MAPPING_GRANULARITY - VacbOffset);
897
898 Status = CcRosGetVacb(SharedCacheMap, WriteOffset.QuadPart, &Vacb);
899 if (!NT_SUCCESS(Status))
900 {
901 ExRaiseStatus(Status);
902 return FALSE;
903 }
904
905 _SEH2_TRY
906 {
907 if (!CcRosEnsureVacbResident(Vacb, Wait, FALSE, VacbOffset, VacbLength))
908 {
909 return FALSE;
910 }
911
912 RtlZeroMemory((PVOID)((ULONG_PTR)Vacb->BaseAddress + VacbOffset), VacbLength);
913
914 WriteOffset.QuadPart += VacbLength;
915 Length -= VacbLength;
916
917 /* Tell Mm */
918 Status = MmMakeSegmentDirty(FileObject->SectionObjectPointer,
919 Vacb->FileOffset.QuadPart + VacbOffset,
920 VacbLength);
921 if (!NT_SUCCESS(Status))
922 ExRaiseStatus(Status);
923 }
924 _SEH2_FINALLY
925 {
926 /* Do not mark the VACB as dirty if an exception was raised */
927 CcRosReleaseVacb(SharedCacheMap, Vacb, !_SEH2_AbnormalTermination(), FALSE);
928 }
929 _SEH2_END;
930 }
931
932 /* Flush if needed */
933 if (FileObject->Flags & FO_WRITE_THROUGH)
934 CcFlushCache(FileObject->SectionObjectPointer, StartOffset, EndOffset->QuadPart - StartOffset->QuadPart, NULL);
935
936 return TRUE;
937 }
938