1 /*
2 * Copyright (C) 1998-2005 ReactOS Team (and the authors from the programmers section)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 *
19 * PROJECT: ReactOS kernel
20 * FILE: ntoskrnl/cache/section/fault.c
21 * PURPOSE: Consolidate fault handlers for sections
22 *
23 * PROGRAMMERS: Arty
24 * Rex Jolliff
25 * David Welch
26 * Eric Kohl
27 * Emanuele Aliberti
28 * Eugene Ingerman
29 * Casper Hornstrup
30 * KJK::Hyperion
31 * Guido de Jong
32 * Ge van Geldorp
33 * Royce Mitchell III
34 * Filip Navara
35 * Aleksey Bragin
36 * Jason Filby
37 * Thomas Weidenmueller
38 * Gunnar Andre' Dalsnes
39 * Mike Nordell
40 * Alex Ionescu
41 * Gregor Anich
42 * Steven Edwards
43 * Herve Poussineau
44 */
45
46 /*
47
48 I've generally organized fault handling code in newmm as handlers that run
49 under a single lock acquisition, check the state, and either take necessary
50 action atomically, or place a wait entry and return a continuation to the
51 caller. This lends itself to code that has a simple, structured form,
52 doesn't make assumptions about lock taking and breaking, and provides an
53 obvious, graphic seperation between code that may block and code that isn't
54 allowed to. This file contains the non-blocking half.
55
56 In order to request a blocking operation to happen outside locks, place a
57 function pointer in the provided MM_REQUIRED_RESOURCES struct and return
58 STATUS_MORE_PROCESSING_REQUIRED. The function indicated will receive the
59 provided struct and take action outside of any mm related locks and at
60 PASSIVE_LEVEL. The same fault handler will be called again after the
61 blocking operation succeeds. In this way, the fault handler can accumulate
62 state, but will freely work while competing with other threads.
63
64 Fault handlers in this file should check for an MM_WAIT_ENTRY in a page
65 table they're using and return STATUS_SUCCESS + 1 if it's found. In that
66 case, the caller will wait on the wait entry event until the competing thread
67 is finished, and recall this handler in the current thread.
68
69 Another thing to note here is that we require mappings to exactly mirror
70 rmaps, so each mapping should be immediately followed by an rmap addition.
71
72 */
73
74 /* INCLUDES *****************************************************************/
75
76 #include <ntoskrnl.h>
77 #include "newmm.h"
78 #define NDEBUG
79 #include <debug.h>
80 #include <mm/ARM3/miarm.h>
81
82 #define DPRINTC DPRINT
83
84 extern KEVENT MmWaitPageEvent;
85
86 #ifdef NEWCC
87 extern PMMWSL MmWorkingSetList;
88
89 /*
90
91 Multiple stage handling of a not-present fault in a data section.
92
93 Required->State is used to accumulate flags that indicate the next action
94 the handler should take.
95
96 State & 2 is currently used to indicate that the page acquired by a previous
97 callout is a global page to the section and should be placed in the section
98 page table.
99
100 Note that the primitive tail recursion done here reaches the base case when
101 the page is present.
102
103 */
104
105 NTSTATUS
106 NTAPI
MmNotPresentFaultCachePage(_In_ PMMSUPPORT AddressSpace,_In_ MEMORY_AREA * MemoryArea,_In_ PVOID Address,_In_ BOOLEAN Locked,_Inout_ PMM_REQUIRED_RESOURCES Required)107 MmNotPresentFaultCachePage (
108 _In_ PMMSUPPORT AddressSpace,
109 _In_ MEMORY_AREA* MemoryArea,
110 _In_ PVOID Address,
111 _In_ BOOLEAN Locked,
112 _Inout_ PMM_REQUIRED_RESOURCES Required)
113 {
114 NTSTATUS Status;
115 PVOID PAddress;
116 ULONG Consumer;
117 PMM_SECTION_SEGMENT Segment;
118 LARGE_INTEGER FileOffset, TotalOffset;
119 ULONG_PTR Entry;
120 ULONG Attributes;
121 PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
122 KIRQL OldIrql;
123
124 DPRINT("Not Present: %p %p (%p-%p)\n",
125 AddressSpace,
126 Address,
127 MA_GetStartingAddress(MemoryArea),
128 MA_GetEndingAddress(MemoryArea));
129
130 /*
131 * There is a window between taking the page fault and locking the
132 * address space when another thread could load the page so we check
133 * that.
134 */
135 if (MmIsPagePresent(Process, Address))
136 {
137 DPRINT("Done\n");
138 return STATUS_SUCCESS;
139 }
140
141 PAddress = MM_ROUND_DOWN(Address, PAGE_SIZE);
142 TotalOffset.QuadPart = (ULONG_PTR)PAddress -
143 MA_GetStartingAddress(MemoryArea);
144
145 Segment = MemoryArea->Data.SectionData.Segment;
146
147 TotalOffset.QuadPart += MemoryArea->Data.SectionData.ViewOffset.QuadPart;
148 FileOffset = TotalOffset;
149
150 //Consumer = (Segment->Flags & MM_DATAFILE_SEGMENT) ? MC_CACHE : MC_USER;
151 Consumer = MC_CACHE;
152
153 if (Segment->FileObject)
154 {
155 __debugbreak();
156 DPRINT("FileName %wZ\n", &Segment->FileObject->FileName);
157 }
158
159 DPRINT("Total Offset %08x%08x\n", TotalOffset.HighPart, TotalOffset.LowPart);
160
161 /* Lock the segment */
162 MmLockSectionSegment(Segment);
163
164 /* Get the entry corresponding to the offset within the section */
165 Entry = MmGetPageEntrySectionSegment(Segment, &TotalOffset);
166
167 Attributes = PAGE_READONLY;
168
169 if (Required->State && Required->Page[0])
170 {
171 DPRINT("Have file and page, set page %x in section @ %x #\n",
172 Required->Page[0],
173 TotalOffset.LowPart);
174
175 if (Required->SwapEntry)
176 MmSetSavedSwapEntryPage(Required->Page[0], Required->SwapEntry);
177
178 if (Required->State & 2)
179 {
180 DPRINT("Set in section @ %x\n", TotalOffset.LowPart);
181 Status = MmSetPageEntrySectionSegment(Segment,
182 &TotalOffset,
183 Entry = MAKE_PFN_SSE(Required->Page[0]));
184 if (!NT_SUCCESS(Status))
185 {
186 MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]);
187 }
188 MmUnlockSectionSegment(Segment);
189 MiSetPageEvent(Process, Address);
190 DPRINT("Status %x\n", Status);
191 return STATUS_MM_RESTART_OPERATION;
192 }
193 else
194 {
195 DPRINT("Set %x in address space @ %p\n", Required->Page[0], Address);
196 Status = MmCreateVirtualMapping(Process,
197 Address,
198 Attributes,
199 Required->Page,
200 1);
201 if (NT_SUCCESS(Status))
202 {
203 MmInsertRmap(Required->Page[0], Process, Address);
204 }
205 else
206 {
207 /* Drop the reference for our address space ... */
208 MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]);
209 }
210 MmUnlockSectionSegment(Segment);
211 DPRINTC("XXX Set Event %x\n", Status);
212 MiSetPageEvent(Process, Address);
213 DPRINT("Status %x\n", Status);
214 return Status;
215 }
216 }
217 else if (MM_IS_WAIT_PTE(Entry))
218 {
219 // Whenever MM_WAIT_ENTRY is required as a swap entry, we need to
220 // ask the fault handler to wait until we should continue. Rathern
221 // than recopy this boilerplate code everywhere, we just ask them
222 // to wait.
223 MmUnlockSectionSegment(Segment);
224 return STATUS_SUCCESS + 1;
225 }
226 else if (Entry)
227 {
228 PFN_NUMBER Page = PFN_FROM_SSE(Entry);
229 DPRINT("Take reference to page %x #\n", Page);
230
231 if (MiGetPfnEntry(Page) == NULL)
232 {
233 DPRINT1("Found no PFN entry for page 0x%x in page entry 0x%x (segment: 0x%p, offset: %08x%08x)\n",
234 Page,
235 Entry,
236 Segment,
237 TotalOffset.HighPart,
238 TotalOffset.LowPart);
239 KeBugCheck(CACHE_MANAGER);
240 }
241
242 OldIrql = MiAcquirePfnLock();
243 MmReferencePage(Page);
244 MiReleasePfnLock(OldIrql);
245
246 Status = MmCreateVirtualMapping(Process, Address, Attributes, &Page, 1);
247 if (NT_SUCCESS(Status))
248 {
249 MmInsertRmap(Page, Process, Address);
250 }
251 DPRINT("XXX Set Event %x\n", Status);
252 MiSetPageEvent(Process, Address);
253 MmUnlockSectionSegment(Segment);
254 DPRINT("Status %x\n", Status);
255 return Status;
256 }
257 else
258 {
259 DPRINT("Get page into section\n");
260 /*
261 * If the entry is zero (and it can't change because we have
262 * locked the segment) then we need to load the page.
263 */
264 //DPRINT1("Read from file %08x %wZ\n", FileOffset.LowPart, &Section->FileObject->FileName);
265 Required->State = 2;
266 Required->Context = Segment->FileObject;
267 Required->Consumer = Consumer;
268 Required->FileOffset = FileOffset;
269 Required->Amount = PAGE_SIZE;
270 Required->DoAcquisition = MiReadFilePage;
271
272 MmSetPageEntrySectionSegment(Segment,
273 &TotalOffset,
274 MAKE_SWAP_SSE(MM_WAIT_ENTRY));
275
276 MmUnlockSectionSegment(Segment);
277 return STATUS_MORE_PROCESSING_REQUIRED;
278 }
279 ASSERT(FALSE);
280 return STATUS_ACCESS_VIOLATION;
281 }
282
283 NTSTATUS
284 NTAPI
MiCopyPageToPage(PFN_NUMBER DestPage,PFN_NUMBER SrcPage)285 MiCopyPageToPage(PFN_NUMBER DestPage, PFN_NUMBER SrcPage)
286 {
287 PEPROCESS Process;
288 KIRQL Irql, Irql2;
289 PVOID TempAddress, TempSource;
290
291 Process = PsGetCurrentProcess();
292 TempAddress = MiMapPageInHyperSpace(Process, DestPage, &Irql);
293 if (TempAddress == NULL)
294 {
295 return STATUS_NO_MEMORY;
296 }
297 TempSource = MiMapPageInHyperSpace(Process, SrcPage, &Irql2);
298 if (!TempSource) {
299 MiUnmapPageInHyperSpace(Process, TempAddress, Irql);
300 return STATUS_NO_MEMORY;
301 }
302
303 memcpy(TempAddress, TempSource, PAGE_SIZE);
304
305 MiUnmapPageInHyperSpace(Process, TempSource, Irql2);
306 MiUnmapPageInHyperSpace(Process, TempAddress, Irql);
307 return STATUS_SUCCESS;
308 }
309
310 /*
311
312 This function is deceptively named, in that it does the actual work of handling
313 access faults on data sections. In the case of the code that's present here,
314 we don't allow cow sections, but we do need this to unset the initial
315 PAGE_READONLY condition of pages faulted into the cache so that we can add
316 a dirty bit in the section page table on the first modification.
317
318 In the ultimate form of this code, CoW is reenabled.
319
320 */
321
322 NTSTATUS
323 NTAPI
MiCowCacheSectionPage(_In_ PMMSUPPORT AddressSpace,_In_ PMEMORY_AREA MemoryArea,_In_ PVOID Address,_In_ BOOLEAN Locked,_Inout_ PMM_REQUIRED_RESOURCES Required)324 MiCowCacheSectionPage (
325 _In_ PMMSUPPORT AddressSpace,
326 _In_ PMEMORY_AREA MemoryArea,
327 _In_ PVOID Address,
328 _In_ BOOLEAN Locked,
329 _Inout_ PMM_REQUIRED_RESOURCES Required)
330 {
331 PMM_SECTION_SEGMENT Segment;
332 PFN_NUMBER NewPage, OldPage;
333 NTSTATUS Status;
334 PVOID PAddress;
335 LARGE_INTEGER Offset;
336 PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
337
338 DPRINT("MmAccessFaultSectionView(%p, %p, %p, %u)\n",
339 AddressSpace,
340 MemoryArea,
341 Address,
342 Locked);
343
344 Segment = MemoryArea->Data.SectionData.Segment;
345
346 /* Lock the segment */
347 MmLockSectionSegment(Segment);
348
349 /* Find the offset of the page */
350 PAddress = MM_ROUND_DOWN(Address, PAGE_SIZE);
351 Offset.QuadPart = (ULONG_PTR)PAddress - MA_GetStartingAddress(MemoryArea) +
352 MemoryArea->Data.SectionData.ViewOffset.QuadPart;
353
354 if (!Segment->WriteCopy /*&&
355 !MemoryArea->Data.SectionData.WriteCopyView*/ ||
356 Segment->Image.Characteristics & IMAGE_SCN_MEM_SHARED)
357 {
358 #if 0
359 if (Region->Protect == PAGE_READWRITE ||
360 Region->Protect == PAGE_EXECUTE_READWRITE)
361 #endif
362 {
363 ULONG_PTR Entry;
364 DPRINTC("setting non-cow page %p %p:%p offset %I64x (%Ix) to writable\n",
365 Segment,
366 Process,
367 PAddress,
368 Offset.QuadPart,
369 MmGetPfnForProcess(Process, Address));
370 if (Segment->FileObject)
371 {
372 DPRINTC("file %wZ\n", &Segment->FileObject->FileName);
373 }
374 Entry = MmGetPageEntrySectionSegment(Segment, &Offset);
375 DPRINT("Entry %x\n", Entry);
376 if (Entry &&
377 !IS_SWAP_FROM_SSE(Entry) &&
378 PFN_FROM_SSE(Entry) == MmGetPfnForProcess(Process, Address)) {
379
380 MmSetPageEntrySectionSegment(Segment,
381 &Offset,
382 DIRTY_SSE(Entry));
383 }
384 MmSetPageProtect(Process, PAddress, PAGE_READWRITE);
385 MmSetDirtyPage(Process, PAddress);
386 MmUnlockSectionSegment(Segment);
387 DPRINT("Done\n");
388 return STATUS_SUCCESS;
389 }
390 #if 0
391 else
392 {
393 DPRINT("Not supposed to be writable\n");
394 MmUnlockSectionSegment(Segment);
395 return STATUS_ACCESS_VIOLATION;
396 }
397 #endif
398 }
399
400 if (!Required->Page[0])
401 {
402 SWAPENTRY SwapEntry;
403 if (MmIsPageSwapEntry(Process, Address))
404 {
405 MmGetPageFileMapping(Process, Address, &SwapEntry);
406 MmUnlockSectionSegment(Segment);
407 if (SwapEntry == MM_WAIT_ENTRY)
408 return STATUS_SUCCESS + 1; // Wait ... somebody else is getting it right now
409 else
410 return STATUS_SUCCESS; // Nonwait swap entry ... handle elsewhere
411 }
412 /* Call out to acquire a page to copy to. We'll be re-called when
413 * the page has been allocated. */
414 Required->Page[1] = MmGetPfnForProcess(Process, Address);
415 Required->Consumer = MC_CACHE;
416 Required->Amount = 1;
417 Required->File = __FILE__;
418 Required->Line = __LINE__;
419 Required->DoAcquisition = MiGetOnePage;
420 MmCreatePageFileMapping(Process, Address, MM_WAIT_ENTRY);
421 MmUnlockSectionSegment(Segment);
422 return STATUS_MORE_PROCESSING_REQUIRED;
423 }
424
425 NewPage = Required->Page[0];
426 OldPage = Required->Page[1];
427
428 DPRINT("Allocated page %x\n", NewPage);
429
430 /* Unshare the old page */
431 MmDeleteRmap(OldPage, Process, PAddress);
432
433 /* Copy the old page */
434 DPRINT("Copying\n");
435 MiCopyPageToPage(NewPage, OldPage);
436
437 /* Set the PTE to point to the new page */
438 Status = MmCreateVirtualMapping(Process,
439 Address,
440 PAGE_READWRITE,
441 &NewPage,
442 1);
443
444 if (!NT_SUCCESS(Status))
445 {
446 DPRINT1("MmCreateVirtualMapping failed, not out of memory\n");
447 ASSERT(FALSE);
448 MmUnlockSectionSegment(Segment);
449 return Status;
450 }
451
452 MmInsertRmap(NewPage, Process, PAddress);
453 MmReleasePageMemoryConsumer(MC_CACHE, OldPage);
454 MmUnlockSectionSegment(Segment);
455
456 DPRINT("Address 0x%p\n", Address);
457 return STATUS_SUCCESS;
458 }
459 #endif
460
461 KEVENT MmWaitPageEvent;
462
463 #ifdef NEWCC
464 typedef struct _WORK_QUEUE_WITH_CONTEXT
465 {
466 WORK_QUEUE_ITEM WorkItem;
467 PMMSUPPORT AddressSpace;
468 PMEMORY_AREA MemoryArea;
469 PMM_REQUIRED_RESOURCES Required;
470 NTSTATUS Status;
471 KEVENT Wait;
472 AcquireResource DoAcquisition;
473 } WORK_QUEUE_WITH_CONTEXT, *PWORK_QUEUE_WITH_CONTEXT;
474
475 /*
476
477 This is the work item used do blocking resource acquisition when a fault
478 handler returns STATUS_MORE_PROCESSING_REQUIRED. It's used to allow resource
479 acquisition to take place on a different stack, and outside of any locks used
480 by fault handling, making recursive fault handling possible when required.
481
482 */
483
_Function_class_(WORKER_THREAD_ROUTINE)484 _Function_class_(WORKER_THREAD_ROUTINE)
485 VOID
486 NTAPI
487 MmpFaultWorker(PVOID Parameter)
488 {
489 PWORK_QUEUE_WITH_CONTEXT WorkItem = Parameter;
490
491 DPRINT("Calling work\n");
492 WorkItem->Status = WorkItem->Required->DoAcquisition(WorkItem->AddressSpace,
493 WorkItem->MemoryArea,
494 WorkItem->Required);
495 DPRINT("Status %x\n", WorkItem->Status);
496 KeSetEvent(&WorkItem->Wait, IO_NO_INCREMENT, FALSE);
497 }
498
499 /*
500
501 This code separates the action of fault handling into an upper and lower
502 handler to allow the inner handler to optionally be called in work item
503 if the stack is getting too deep. My experiments show that the third
504 recursive page fault taken at PASSIVE_LEVEL must be shunted away to a
505 worker thread. In the ultimate form of this code, the primary fault handler
506 makes this decision by using a thread-local counter to detect a too-deep
507 fault stack and call the inner fault handler in a worker thread if required.
508
509 Note that faults are taken at passive level and have access to ordinary
510 driver entry points such as those that read and write files, and filesystems
511 should use paged structures whenever possible. This makes recursive faults
512 both a perfectly normal occurrance, and a worthwhile case to handle.
513
514 The code below will repeatedly call MiCowSectionPage as long as it returns
515 either STATUS_SUCCESS + 1 or STATUS_MORE_PROCESSING_REQUIRED. In the more
516 processing required case, we call out to a blocking resource acquisition
517 function and then recall the faut handler with the shared state represented
518 by the MM_REQUIRED_RESOURCES struct.
519
520 In the other case, we wait on the wait entry event and recall the handler.
521 Each time the wait entry event is signalled, one thread has removed an
522 MM_WAIT_ENTRY from a page table.
523
524 In the ultimate form of this code, there is a single system wide fault handler
525 for each of access fault and not present and each memory area contains a
526 function pointer that indicates the active fault handler. Since the mm code
527 in reactos is currently fragmented, I didn't bring this change to trunk.
528
529 */
530
531 NTSTATUS
532 NTAPI
MmpSectionAccessFaultInner(KPROCESSOR_MODE Mode,PMMSUPPORT AddressSpace,ULONG_PTR Address,BOOLEAN FromMdl,PETHREAD Thread)533 MmpSectionAccessFaultInner(KPROCESSOR_MODE Mode,
534 PMMSUPPORT AddressSpace,
535 ULONG_PTR Address,
536 BOOLEAN FromMdl,
537 PETHREAD Thread)
538 {
539 MEMORY_AREA* MemoryArea;
540 NTSTATUS Status;
541 BOOLEAN Locked = FromMdl;
542 MM_REQUIRED_RESOURCES Resources = { 0 };
543 WORK_QUEUE_WITH_CONTEXT Context;
544
545 RtlZeroMemory(&Context, sizeof(WORK_QUEUE_WITH_CONTEXT));
546
547 DPRINT("MmAccessFault(Mode %d, Address %Ix)\n", Mode, Address);
548
549 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
550 {
551 DPRINT1("Page fault at high IRQL was %u\n", KeGetCurrentIrql());
552 return STATUS_UNSUCCESSFUL;
553 }
554
555 /* Find the memory area for the faulting address */
556 if (Address >= (ULONG_PTR)MmSystemRangeStart)
557 {
558 /* Check permissions */
559 if (Mode != KernelMode)
560 {
561 DPRINT("MmAccessFault(Mode %d, Address %Ix)\n", Mode, Address);
562 return STATUS_ACCESS_VIOLATION;
563 }
564 AddressSpace = MmGetKernelAddressSpace();
565 }
566 else
567 {
568 AddressSpace = &PsGetCurrentProcess()->Vm;
569 }
570
571 if (!FromMdl)
572 {
573 MmLockAddressSpace(AddressSpace);
574 }
575
576 do
577 {
578 MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)Address);
579 if (MemoryArea == NULL ||
580 MemoryArea->DeleteInProgress)
581 {
582 if (!FromMdl)
583 {
584 MmUnlockAddressSpace(AddressSpace);
585 }
586 DPRINT("Address: %Ix\n", Address);
587 return STATUS_ACCESS_VIOLATION;
588 }
589
590 DPRINT("Type %x (%p -> %p)\n",
591 MemoryArea->Type,
592 MA_GetStartingAddress(MemoryArea),
593 MA_GetEndingAddress(MemoryArea));
594
595 Resources.DoAcquisition = NULL;
596
597 // Note: fault handlers are called with address space locked
598 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
599 Status = MiCowCacheSectionPage(AddressSpace,
600 MemoryArea,
601 (PVOID)Address,
602 Locked,
603 &Resources);
604
605 if (!FromMdl)
606 {
607 MmUnlockAddressSpace(AddressSpace);
608 }
609
610 if (Status == STATUS_SUCCESS + 1)
611 {
612 /* Wait page ... */
613 DPRINT("Waiting for %Ix\n", Address);
614 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace), Address);
615 DPRINT("Restarting fault %Ix\n", Address);
616 Status = STATUS_MM_RESTART_OPERATION;
617 }
618 else if (Status == STATUS_MM_RESTART_OPERATION)
619 {
620 /* Clean slate */
621 RtlZeroMemory(&Resources, sizeof(Resources));
622 }
623 else if (Status == STATUS_MORE_PROCESSING_REQUIRED)
624 {
625 if (Thread->ActiveFaultCount > 0)
626 {
627 DPRINT("Already fault handling ... going to work item (%Ix)\n",
628 Address);
629 Context.AddressSpace = AddressSpace;
630 Context.MemoryArea = MemoryArea;
631 Context.Required = &Resources;
632 KeInitializeEvent(&Context.Wait, NotificationEvent, FALSE);
633
634 ExInitializeWorkItem(&Context.WorkItem,
635 MmpFaultWorker,
636 &Context);
637
638 DPRINT("Queue work item\n");
639 ExQueueWorkItem(&Context.WorkItem, DelayedWorkQueue);
640 DPRINT("Wait\n");
641 KeWaitForSingleObject(&Context.Wait, 0, KernelMode, FALSE, NULL);
642 Status = Context.Status;
643 DPRINT("Status %x\n", Status);
644 }
645 else
646 {
647 Status = Resources.DoAcquisition(AddressSpace, MemoryArea, &Resources);
648 }
649
650 if (NT_SUCCESS(Status))
651 {
652 Status = STATUS_MM_RESTART_OPERATION;
653 }
654 }
655
656 if (!FromMdl)
657 {
658 MmLockAddressSpace(AddressSpace);
659 }
660 }
661 while (Status == STATUS_MM_RESTART_OPERATION);
662
663 if (!NT_SUCCESS(Status) && MemoryArea->Type == 1)
664 {
665 DPRINT1("Completed page fault handling %Ix %x\n", Address, Status);
666 DPRINT1("Type %x (%p -> %p)\n",
667 MemoryArea->Type,
668 MA_GetStartingAddress(MemoryArea),
669 MA_GetEndingAddress(MemoryArea));
670 }
671
672 if (!FromMdl)
673 {
674 MmUnlockAddressSpace(AddressSpace);
675 }
676
677 return Status;
678 }
679
680 /*
681
682 This is the outer fault handler mentioned in the description of
683 MmpSectionAccsesFaultInner. It increments a fault depth count in the current
684 thread.
685
686 In the ultimate form of this code, the lower fault handler will optionally
687 use the count to keep the kernel stack from overflowing.
688
689 */
690
691 NTSTATUS
692 NTAPI
MmAccessFaultCacheSection(KPROCESSOR_MODE Mode,ULONG_PTR Address,BOOLEAN FromMdl)693 MmAccessFaultCacheSection(KPROCESSOR_MODE Mode,
694 ULONG_PTR Address,
695 BOOLEAN FromMdl)
696 {
697 PETHREAD Thread;
698 PMMSUPPORT AddressSpace;
699 NTSTATUS Status;
700
701 DPRINT("MmpAccessFault(Mode %d, Address %Ix)\n", Mode, Address);
702
703 Thread = PsGetCurrentThread();
704
705 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
706 {
707 DPRINT1("Page fault at high IRQL %u, address %Ix\n",
708 KeGetCurrentIrql(),
709 Address);
710 return STATUS_UNSUCCESSFUL;
711 }
712
713 /* Find the memory area for the faulting address */
714 if (Address >= (ULONG_PTR)MmSystemRangeStart)
715 {
716 /* Check permissions */
717 if (Mode != KernelMode)
718 {
719 DPRINT1("Address: %p:%Ix\n", PsGetCurrentProcess(), Address);
720 return STATUS_ACCESS_VIOLATION;
721 }
722 AddressSpace = MmGetKernelAddressSpace();
723 }
724 else
725 {
726 AddressSpace = &PsGetCurrentProcess()->Vm;
727 }
728
729 Thread->ActiveFaultCount++;
730 Status = MmpSectionAccessFaultInner(Mode,
731 AddressSpace,
732 Address,
733 FromMdl,
734 Thread);
735 Thread->ActiveFaultCount--;
736
737 return Status;
738 }
739
740 /*
741
742 As above, this code separates the active part of fault handling from a carrier
743 that can use the thread's active fault count to determine whether a work item
744 is required. Also as above, this function repeatedly calls the active not
745 present fault handler until a clear success or failure is received, using a
746 return of STATUS_MORE_PROCESSING_REQUIRED or STATUS_SUCCESS + 1.
747
748 */
749
750 NTSTATUS
751 NTAPI
MmNotPresentFaultCacheSectionInner(KPROCESSOR_MODE Mode,PMMSUPPORT AddressSpace,ULONG_PTR Address,BOOLEAN FromMdl,PETHREAD Thread)752 MmNotPresentFaultCacheSectionInner(KPROCESSOR_MODE Mode,
753 PMMSUPPORT AddressSpace,
754 ULONG_PTR Address,
755 BOOLEAN FromMdl,
756 PETHREAD Thread)
757 {
758 BOOLEAN Locked = FromMdl;
759 PMEMORY_AREA MemoryArea;
760 MM_REQUIRED_RESOURCES Resources = { 0 };
761 WORK_QUEUE_WITH_CONTEXT Context;
762 NTSTATUS Status = STATUS_SUCCESS;
763
764 RtlZeroMemory(&Context, sizeof(WORK_QUEUE_WITH_CONTEXT));
765
766 if (!FromMdl)
767 {
768 MmLockAddressSpace(AddressSpace);
769 }
770
771 /* Call the memory area specific fault handler */
772 do
773 {
774 MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)Address);
775 if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
776 {
777 Status = STATUS_ACCESS_VIOLATION;
778 if (MemoryArea)
779 {
780 DPRINT1("Type %x DIP %x\n",
781 MemoryArea->Type,
782 MemoryArea->DeleteInProgress);
783 }
784 else
785 {
786 DPRINT1("No memory area\n");
787 }
788 DPRINT1("Process %p, Address %Ix\n",
789 MmGetAddressSpaceOwner(AddressSpace),
790 Address);
791 break;
792 }
793
794 DPRINTC("Type %x (%p -> %08Ix -> %p) in %p\n",
795 MemoryArea->Type,
796 MA_GetStartingAddress(MemoryArea),
797 Address,
798 MA_GetEndingAddress(MemoryArea),
799 PsGetCurrentThread());
800
801 Resources.DoAcquisition = NULL;
802
803 // Note: fault handlers are called with address space locked
804 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
805
806 Status = MmNotPresentFaultCachePage(AddressSpace,
807 MemoryArea,
808 (PVOID)Address,
809 Locked,
810 &Resources);
811
812 if (!FromMdl)
813 {
814 MmUnlockAddressSpace(AddressSpace);
815 }
816
817 if (Status == STATUS_SUCCESS)
818 {
819 ; // Nothing
820 }
821 else if (Status == STATUS_SUCCESS + 1)
822 {
823 /* Wait page ... */
824 DPRINT("Waiting for %Ix\n", Address);
825 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace), Address);
826 DPRINT("Done waiting for %Ix\n", Address);
827 Status = STATUS_MM_RESTART_OPERATION;
828 }
829 else if (Status == STATUS_MM_RESTART_OPERATION)
830 {
831 /* Clean slate */
832 DPRINT("Clear resource\n");
833 RtlZeroMemory(&Resources, sizeof(Resources));
834 }
835 else if (Status == STATUS_MORE_PROCESSING_REQUIRED)
836 {
837 if (Thread->ActiveFaultCount > 2)
838 {
839 DPRINTC("Already fault handling ... going to work item (%Ix)\n", Address);
840 Context.AddressSpace = AddressSpace;
841 Context.MemoryArea = MemoryArea;
842 Context.Required = &Resources;
843 KeInitializeEvent(&Context.Wait, NotificationEvent, FALSE);
844
845 ExInitializeWorkItem(&Context.WorkItem,
846 (PWORKER_THREAD_ROUTINE)MmpFaultWorker,
847 &Context);
848
849 DPRINT("Queue work item\n");
850 ExQueueWorkItem(&Context.WorkItem, DelayedWorkQueue);
851 DPRINT("Wait\n");
852 KeWaitForSingleObject(&Context.Wait, 0, KernelMode, FALSE, NULL);
853 Status = Context.Status;
854 DPRINTC("Status %x\n", Status);
855 }
856 else
857 {
858 DPRINT("DoAcquisition %p\n", Resources.DoAcquisition);
859
860 Status = Resources.DoAcquisition(AddressSpace,
861 MemoryArea,
862 &Resources);
863
864 DPRINT("DoAcquisition %p -> %x\n",
865 Resources.DoAcquisition,
866 Status);
867 }
868
869 if (NT_SUCCESS(Status))
870 {
871 Status = STATUS_MM_RESTART_OPERATION;
872 }
873 }
874 else if (NT_SUCCESS(Status))
875 {
876 ASSERT(FALSE);
877 }
878
879 if (!FromMdl)
880 {
881 MmLockAddressSpace(AddressSpace);
882 }
883 }
884 while (Status == STATUS_MM_RESTART_OPERATION);
885
886 DPRINTC("Completed page fault handling: %p:%Ix %x\n",
887 MmGetAddressSpaceOwner(AddressSpace),
888 Address,
889 Status);
890
891 if (!FromMdl)
892 {
893 MmUnlockAddressSpace(AddressSpace);
894 }
895
896 MiSetPageEvent(MmGetAddressSpaceOwner(AddressSpace), Address);
897 DPRINT("Done %x\n", Status);
898
899 return Status;
900 }
901
902 /*
903
904 Call the inner not present fault handler, keeping track of the fault count.
905 In the ultimate form of this code, optionally use a worker thread the handle
906 the fault in order to sidestep stack overflow in the multiple fault case.
907
908 */
909
910 NTSTATUS
911 NTAPI
MmNotPresentFaultCacheSection(KPROCESSOR_MODE Mode,ULONG_PTR Address,BOOLEAN FromMdl)912 MmNotPresentFaultCacheSection(KPROCESSOR_MODE Mode,
913 ULONG_PTR Address,
914 BOOLEAN FromMdl)
915 {
916 PETHREAD Thread;
917 PMMSUPPORT AddressSpace;
918 NTSTATUS Status;
919
920 Address &= ~(PAGE_SIZE - 1);
921 DPRINT("MmNotPresentFault(Mode %d, Address %Ix)\n", Mode, Address);
922
923 Thread = PsGetCurrentThread();
924
925 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
926 {
927 DPRINT1("Page fault at high IRQL %u, address %Ix\n",
928 KeGetCurrentIrql(),
929 Address);
930
931 ASSERT(FALSE);
932 return STATUS_UNSUCCESSFUL;
933 }
934
935 /* Find the memory area for the faulting address */
936 if (Address >= (ULONG_PTR)MmSystemRangeStart)
937 {
938 /* Check permissions */
939 if (Mode != KernelMode)
940 {
941 DPRINTC("Address: %x\n", Address);
942 return STATUS_ACCESS_VIOLATION;
943 }
944 AddressSpace = MmGetKernelAddressSpace();
945 }
946 else
947 {
948 AddressSpace = &PsGetCurrentProcess()->Vm;
949 }
950
951 Thread->ActiveFaultCount++;
952 Status = MmNotPresentFaultCacheSectionInner(Mode,
953 AddressSpace,
954 Address,
955 FromMdl,
956 Thread);
957 Thread->ActiveFaultCount--;
958
959 ASSERT(Status != STATUS_UNSUCCESSFUL);
960 ASSERT(Status != STATUS_INVALID_PARAMETER);
961 DPRINT("MmAccessFault %p:%Ix -> %x\n",
962 MmGetAddressSpaceOwner(AddressSpace),
963 Address,
964 Status);
965
966 return Status;
967 }
968 #endif
969