xref: /reactos/ntoskrnl/mm/ARM3/mdlsup.c (revision 5d96ba92)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/mdlsup.c
5  * PURPOSE:         ARM Memory Manager Memory Descriptor List (MDL) Management
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 /* GLOBALS ********************************************************************/
19 
20 BOOLEAN MmTrackPtes;
21 BOOLEAN MmTrackLockedPages;
22 SIZE_T MmSystemLockPagesCount;
23 
24 ULONG MiCacheOverride[MiNotMapped + 1];
25 
26 /* INTERNAL FUNCTIONS *********************************************************/
27 static
28 PVOID
29 NTAPI
MiMapLockedPagesInUserSpace(_In_ PMDL Mdl,_In_ PVOID StartVa,_In_ MEMORY_CACHING_TYPE CacheType,_In_opt_ PVOID BaseAddress)30 MiMapLockedPagesInUserSpace(
31     _In_ PMDL Mdl,
32     _In_ PVOID StartVa,
33     _In_ MEMORY_CACHING_TYPE CacheType,
34     _In_opt_ PVOID BaseAddress)
35 {
36     NTSTATUS Status;
37     PEPROCESS Process = PsGetCurrentProcess();
38     PETHREAD Thread = PsGetCurrentThread();
39     TABLE_SEARCH_RESULT Result;
40     MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
41     MI_PFN_CACHE_ATTRIBUTE EffectiveCacheAttribute;
42     BOOLEAN IsIoMapping;
43     KIRQL OldIrql;
44     ULONG_PTR StartingVa;
45     ULONG_PTR EndingVa;
46     PMMADDRESS_NODE Parent;
47     PMMVAD_LONG Vad;
48     ULONG NumberOfPages;
49     PMMPTE PointerPte;
50     PMMPDE PointerPde;
51     MMPTE TempPte;
52     PPFN_NUMBER MdlPages;
53     PMMPFN Pfn1;
54     PMMPFN Pfn2;
55     BOOLEAN AddressSpaceLocked = FALSE;
56 
57     PAGED_CODE();
58 
59     DPRINT("MiMapLockedPagesInUserSpace(%p, %p, 0x%x, %p)\n",
60            Mdl, StartVa, CacheType, BaseAddress);
61 
62     NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa,
63                                                    MmGetMdlByteCount(Mdl));
64     MdlPages = MmGetMdlPfnArray(Mdl);
65 
66     ASSERT(CacheType <= MmWriteCombined);
67 
68     IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
69     CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
70 
71     /* Large pages are always cached, make sure we're not asking for those */
72     if (CacheAttribute != MiCached)
73     {
74         DPRINT1("FIXME: Need to check for large pages\n");
75     }
76 
77     Status = PsChargeProcessNonPagedPoolQuota(Process, sizeof(MMVAD_LONG));
78     if (!NT_SUCCESS(Status))
79     {
80         Vad = NULL;
81         goto Error;
82     }
83 
84     /* Allocate a VAD for our mapped region */
85     Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
86     if (Vad == NULL)
87     {
88         PsReturnProcessNonPagedPoolQuota(Process, sizeof(MMVAD_LONG));
89         Status = STATUS_INSUFFICIENT_RESOURCES;
90         goto Error;
91     }
92 
93     /* Initialize PhysicalMemory VAD */
94     RtlZeroMemory(Vad, sizeof(*Vad));
95     Vad->u2.VadFlags2.LongVad = 1;
96     Vad->u.VadFlags.VadType = VadDevicePhysicalMemory;
97     Vad->u.VadFlags.Protection = MM_READWRITE;
98     Vad->u.VadFlags.PrivateMemory = 1;
99 
100     /* Did the caller specify an address? */
101     if (BaseAddress == NULL)
102     {
103         /* We get to pick the address */
104         MmLockAddressSpace(&Process->Vm);
105         AddressSpaceLocked = TRUE;
106         if (Process->VmDeleted)
107         {
108             Status = STATUS_PROCESS_IS_TERMINATING;
109             goto Error;
110         }
111 
112         Result = MiFindEmptyAddressRangeInTree(NumberOfPages << PAGE_SHIFT,
113                                                MM_VIRTMEM_GRANULARITY,
114                                                &Process->VadRoot,
115                                                &Parent,
116                                                &StartingVa);
117         if (Result == TableFoundNode)
118         {
119             Status = STATUS_NO_MEMORY;
120             goto Error;
121         }
122         EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1;
123         BaseAddress = (PVOID)StartingVa;
124     }
125     else
126     {
127         /* Caller specified a base address */
128         StartingVa = (ULONG_PTR)BaseAddress;
129         EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1;
130 
131         /* Make sure it's valid */
132         if (BYTE_OFFSET(StartingVa) != 0 ||
133             EndingVa <= StartingVa ||
134             EndingVa > (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS)
135         {
136             Status = STATUS_INVALID_ADDRESS;
137             goto Error;
138         }
139 
140         MmLockAddressSpace(&Process->Vm);
141         AddressSpaceLocked = TRUE;
142         if (Process->VmDeleted)
143         {
144             Status = STATUS_PROCESS_IS_TERMINATING;
145             goto Error;
146         }
147 
148         /* Check if it's already in use */
149         Result = MiCheckForConflictingNode(StartingVa >> PAGE_SHIFT,
150                                            EndingVa >> PAGE_SHIFT,
151                                            &Process->VadRoot,
152                                            &Parent);
153         if (Result == TableFoundNode)
154         {
155             Status = STATUS_CONFLICTING_ADDRESSES;
156             goto Error;
157         }
158     }
159 
160     Vad->StartingVpn = StartingVa >> PAGE_SHIFT;
161     Vad->EndingVpn = EndingVa >> PAGE_SHIFT;
162 
163     MiLockProcessWorkingSetUnsafe(Process, Thread);
164 
165     ASSERT(Vad->EndingVpn >= Vad->StartingVpn);
166     MiInsertVad((PMMVAD)Vad, &Process->VadRoot);
167 
168     /* Check if this is uncached */
169     if (CacheAttribute != MiCached)
170     {
171         /* Flush all caches */
172         KeFlushEntireTb(TRUE, TRUE);
173         KeInvalidateAllCaches();
174     }
175 
176     PointerPte = MiAddressToPte(BaseAddress);
177     while (NumberOfPages != 0 &&
178            *MdlPages != LIST_HEAD)
179     {
180         PointerPde = MiPteToPde(PointerPte);
181         MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
182         ASSERT(PointerPte->u.Hard.Valid == 0);
183 
184         /* Add a PDE reference for each page */
185         MiIncrementPageTableReferences(BaseAddress);
186 
187         /* Set up our basic user PTE */
188         MI_MAKE_HARDWARE_PTE_USER(&TempPte,
189                                   PointerPte,
190                                   MM_READWRITE,
191                                   *MdlPages);
192 
193         EffectiveCacheAttribute = CacheAttribute;
194 
195         /* We need to respect the PFN's caching information in some cases */
196         Pfn2 = MiGetPfnEntry(*MdlPages);
197         if (Pfn2 != NULL)
198         {
199             ASSERT(Pfn2->u3.e2.ReferenceCount != 0);
200 
201             switch (Pfn2->u3.e1.CacheAttribute)
202             {
203                 case MiNonCached:
204                     if (CacheAttribute != MiNonCached)
205                     {
206                         MiCacheOverride[1]++;
207                         EffectiveCacheAttribute = MiNonCached;
208                     }
209                     break;
210 
211                 case MiCached:
212                     if (CacheAttribute != MiCached)
213                     {
214                         MiCacheOverride[0]++;
215                         EffectiveCacheAttribute = MiCached;
216                     }
217                     break;
218 
219                 case MiWriteCombined:
220                     if (CacheAttribute != MiWriteCombined)
221                     {
222                         MiCacheOverride[2]++;
223                         EffectiveCacheAttribute = MiWriteCombined;
224                     }
225                     break;
226 
227                 default:
228                     /* We don't support AWE magic (MiNotMapped) */
229                     DPRINT1("FIXME: MiNotMapped is not supported\n");
230                     ASSERT(FALSE);
231                     break;
232             }
233         }
234 
235         /* Configure caching */
236         switch (EffectiveCacheAttribute)
237         {
238             case MiNonCached:
239                 MI_PAGE_DISABLE_CACHE(&TempPte);
240                 MI_PAGE_WRITE_THROUGH(&TempPte);
241                 break;
242             case MiCached:
243                 break;
244             case MiWriteCombined:
245                 MI_PAGE_DISABLE_CACHE(&TempPte);
246                 MI_PAGE_WRITE_COMBINED(&TempPte);
247                 break;
248             default:
249                 ASSERT(FALSE);
250                 break;
251         }
252 
253         /* Make the page valid */
254         MI_WRITE_VALID_PTE(PointerPte, TempPte);
255 
256         /* Acquire a share count */
257         Pfn1 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
258         DPRINT("Incrementing %p from %p\n", Pfn1, _ReturnAddress());
259         OldIrql = MiAcquirePfnLock();
260         Pfn1->u2.ShareCount++;
261         MiReleasePfnLock(OldIrql);
262 
263         /* Next page */
264         MdlPages++;
265         PointerPte++;
266         NumberOfPages--;
267         BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
268     }
269 
270     MiUnlockProcessWorkingSetUnsafe(Process, Thread);
271     ASSERT(AddressSpaceLocked);
272     MmUnlockAddressSpace(&Process->Vm);
273 
274     ASSERT(StartingVa != 0);
275     return (PVOID)((ULONG_PTR)StartingVa + MmGetMdlByteOffset(Mdl));
276 
277 Error:
278     if (AddressSpaceLocked)
279     {
280         MmUnlockAddressSpace(&Process->Vm);
281     }
282     if (Vad != NULL)
283     {
284         ExFreePoolWithTag(Vad, 'ldaV');
285         PsReturnProcessNonPagedPoolQuota(Process, sizeof(MMVAD_LONG));
286     }
287     ExRaiseStatus(Status);
288 }
289 
290 static
291 VOID
292 NTAPI
MiUnmapLockedPagesInUserSpace(_In_ PVOID BaseAddress,_In_ PMDL Mdl)293 MiUnmapLockedPagesInUserSpace(
294     _In_ PVOID BaseAddress,
295     _In_ PMDL Mdl)
296 {
297     PEPROCESS Process = PsGetCurrentProcess();
298     PETHREAD Thread = PsGetCurrentThread();
299     PMMVAD Vad;
300     PMMPTE PointerPte;
301     PMMPDE PointerPde;
302     KIRQL OldIrql;
303     ULONG NumberOfPages;
304     PPFN_NUMBER MdlPages;
305     PFN_NUMBER PageTablePage;
306 
307     DPRINT("MiUnmapLockedPagesInUserSpace(%p, %p)\n", BaseAddress, Mdl);
308 
309     NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl),
310                                                    MmGetMdlByteCount(Mdl));
311     ASSERT(NumberOfPages != 0);
312     MdlPages = MmGetMdlPfnArray(Mdl);
313 
314     /* Find the VAD */
315     MmLockAddressSpace(&Process->Vm);
316     Vad = MiLocateAddress(BaseAddress);
317     if (!Vad ||
318         Vad->u.VadFlags.VadType != VadDevicePhysicalMemory)
319     {
320         DPRINT1("MiUnmapLockedPagesInUserSpace invalid for %p\n", BaseAddress);
321         MmUnlockAddressSpace(&Process->Vm);
322         return;
323     }
324 
325     MiLockProcessWorkingSetUnsafe(Process, Thread);
326 
327     /* Remove it from the process VAD tree */
328     ASSERT(Process->VadRoot.NumberGenericTableElements >= 1);
329     MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot);
330     PsReturnProcessNonPagedPoolQuota(Process, sizeof(MMVAD_LONG));
331 
332     /* MiRemoveNode should have removed us if we were the hint */
333     ASSERT(Process->VadRoot.NodeHint != Vad);
334 
335     PointerPte = MiAddressToPte(BaseAddress);
336     OldIrql = MiAcquirePfnLock();
337     while (NumberOfPages != 0 &&
338            *MdlPages != LIST_HEAD)
339     {
340         ASSERT(MiAddressToPte(PointerPte)->u.Hard.Valid == 1);
341         ASSERT(PointerPte->u.Hard.Valid == 1);
342 
343         /* Invalidate it */
344         MI_ERASE_PTE(PointerPte);
345 
346         /* We invalidated this PTE, so dereference the PDE */
347         PointerPde = MiAddressToPde(BaseAddress);
348         PageTablePage = PointerPde->u.Hard.PageFrameNumber;
349         MiDecrementShareCount(MiGetPfnEntry(PageTablePage), PageTablePage);
350 
351         if (MiDecrementPageTableReferences(BaseAddress) == 0)
352         {
353             ASSERT(MiIsPteOnPdeBoundary(PointerPte + 1) || (NumberOfPages == 1));
354             MiDeletePde(PointerPde, Process);
355         }
356 
357         /* Next page */
358         PointerPte++;
359         NumberOfPages--;
360         BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
361         MdlPages++;
362     }
363 
364     KeFlushProcessTb();
365     MiReleasePfnLock(OldIrql);
366     MiUnlockProcessWorkingSetUnsafe(Process, Thread);
367     MmUnlockAddressSpace(&Process->Vm);
368     ExFreePoolWithTag(Vad, 'ldaV');
369 }
370 
371 /* PUBLIC FUNCTIONS ***********************************************************/
372 
373 /*
374  * @implemented
375  */
376 PMDL
377 NTAPI
MmCreateMdl(IN PMDL Mdl,IN PVOID Base,IN SIZE_T Length)378 MmCreateMdl(IN PMDL Mdl,
379             IN PVOID Base,
380             IN SIZE_T Length)
381 {
382     SIZE_T Size;
383 
384     //
385     // Check if we don't have an MDL built
386     //
387     if (!Mdl)
388     {
389         //
390         // Calculate the size we'll need  and allocate the MDL
391         //
392         Size = MmSizeOfMdl(Base, Length);
393         Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
394         if (!Mdl) return NULL;
395     }
396 
397     //
398     // Initialize it
399     //
400     MmInitializeMdl(Mdl, Base, Length);
401     return Mdl;
402 }
403 
404 /*
405  * @implemented
406  */
407 SIZE_T
408 NTAPI
MmSizeOfMdl(IN PVOID Base,IN SIZE_T Length)409 MmSizeOfMdl(IN PVOID Base,
410             IN SIZE_T Length)
411 {
412     //
413     // Return the MDL size
414     //
415     return sizeof(MDL) +
416            (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
417 }
418 
419 /*
420  * @implemented
421  */
422 VOID
423 NTAPI
MmBuildMdlForNonPagedPool(IN PMDL Mdl)424 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
425 {
426     PPFN_NUMBER MdlPages, EndPage;
427     PFN_NUMBER Pfn, PageCount;
428     PVOID Base;
429     PMMPTE PointerPte;
430 
431     //
432     // Sanity checks
433     //
434     ASSERT(Mdl->ByteCount != 0);
435     ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
436                              MDL_MAPPED_TO_SYSTEM_VA |
437                              MDL_SOURCE_IS_NONPAGED_POOL |
438                              MDL_PARTIAL)) == 0);
439 
440     //
441     // We know the MDL isn't associated to a process now
442     //
443     Mdl->Process = NULL;
444 
445     //
446     // Get page and VA information
447     //
448     MdlPages = (PPFN_NUMBER)(Mdl + 1);
449     Base = Mdl->StartVa;
450 
451     //
452     // Set the system address and now get the page count
453     //
454     Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
455     PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
456                                                Mdl->ByteCount);
457     ASSERT(PageCount != 0);
458     EndPage = MdlPages + PageCount;
459 
460     //
461     // Loop the PTEs
462     //
463     PointerPte = MiAddressToPte(Base);
464     do
465     {
466         //
467         // Write the PFN
468         //
469         Pfn = PFN_FROM_PTE(PointerPte++);
470         *MdlPages++ = Pfn;
471     } while (MdlPages < EndPage);
472 
473     //
474     // Set the nonpaged pool flag
475     //
476     Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
477 
478     //
479     // Check if this is an I/O mapping
480     //
481     if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
482 }
483 
484 /*
485  * @implemented
486  */
487 PMDL
488 NTAPI
MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,IN PHYSICAL_ADDRESS HighAddress,IN PHYSICAL_ADDRESS SkipBytes,IN SIZE_T TotalBytes)489 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
490                       IN PHYSICAL_ADDRESS HighAddress,
491                       IN PHYSICAL_ADDRESS SkipBytes,
492                       IN SIZE_T TotalBytes)
493 {
494     //
495     // Call the internal routine
496     //
497     return MiAllocatePagesForMdl(LowAddress,
498                                  HighAddress,
499                                  SkipBytes,
500                                  TotalBytes,
501                                  MiNotMapped,
502                                  0);
503 }
504 
505 /*
506  * @implemented
507  */
508 PMDL
509 NTAPI
MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,IN PHYSICAL_ADDRESS HighAddress,IN PHYSICAL_ADDRESS SkipBytes,IN SIZE_T TotalBytes,IN MEMORY_CACHING_TYPE CacheType,IN ULONG Flags)510 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
511                         IN PHYSICAL_ADDRESS HighAddress,
512                         IN PHYSICAL_ADDRESS SkipBytes,
513                         IN SIZE_T TotalBytes,
514                         IN MEMORY_CACHING_TYPE CacheType,
515                         IN ULONG Flags)
516 {
517     MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
518 
519     //
520     // Check for invalid cache type
521     //
522     if (CacheType > MmWriteCombined)
523     {
524         //
525         // Normalize to default
526         //
527         CacheAttribute = MiNotMapped;
528     }
529     else
530     {
531         //
532         // Conver to internal caching attribute
533         //
534         CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
535     }
536 
537     //
538     // Only these flags are allowed
539     //
540     if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
541     {
542         //
543         // Silently fail
544         //
545         return NULL;
546     }
547 
548     //
549     // Call the internal routine
550     //
551     return MiAllocatePagesForMdl(LowAddress,
552                                  HighAddress,
553                                  SkipBytes,
554                                  TotalBytes,
555                                  CacheAttribute,
556                                  Flags);
557 }
558 
559 /*
560  * @implemented
561  */
562 VOID
563 NTAPI
MmFreePagesFromMdl(IN PMDL Mdl)564 MmFreePagesFromMdl(IN PMDL Mdl)
565 {
566     PVOID Base;
567     PPFN_NUMBER Pages;
568     LONG NumberOfPages;
569     PMMPFN Pfn1;
570     KIRQL OldIrql;
571     DPRINT("Freeing MDL: %p\n", Mdl);
572 
573     //
574     // Sanity checks
575     //
576     ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
577     ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
578     ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
579 
580     //
581     // Get address and page information
582     //
583     Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
584     NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
585 
586     //
587     // Acquire PFN lock
588     //
589     OldIrql = MiAcquirePfnLock();
590 
591     //
592     // Loop all the MDL pages
593     //
594     Pages = (PPFN_NUMBER)(Mdl + 1);
595     do
596     {
597         //
598         // Reached the last page
599         //
600         if (*Pages == LIST_HEAD) break;
601 
602         //
603         // Get the page entry
604         //
605         Pfn1 = MiGetPfnEntry(*Pages);
606         ASSERT(Pfn1);
607         ASSERT(Pfn1->u2.ShareCount == 1);
608         ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
609         if (Pfn1->u4.PteFrame != 0x1FFEDCB)
610         {
611             /* Corrupted PFN entry or invalid free */
612             KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
613         }
614 
615         //
616         // Clear it
617         //
618         Pfn1->u3.e1.StartOfAllocation = 0;
619         Pfn1->u3.e1.EndOfAllocation = 0;
620         Pfn1->u3.e1.PageLocation = StandbyPageList;
621         Pfn1->u2.ShareCount = 0;
622 
623         //
624         // Dereference it
625         //
626         ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
627         if (Pfn1->u3.e2.ReferenceCount != 1)
628         {
629             /* Just take off one reference */
630             InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
631         }
632         else
633         {
634             /* We'll be nuking the whole page */
635             MiDecrementReferenceCount(Pfn1, *Pages);
636         }
637 
638         //
639         // Clear this page and move on
640         //
641         *Pages++ = LIST_HEAD;
642     } while (--NumberOfPages != 0);
643 
644     //
645     // Release the lock
646     //
647     MiReleasePfnLock(OldIrql);
648 
649     //
650     // Remove the pages locked flag
651     //
652     Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
653 }
654 
655 /*
656  * @implemented
657  */
658 PVOID
659 NTAPI
MmMapLockedPagesSpecifyCache(IN PMDL Mdl,IN KPROCESSOR_MODE AccessMode,IN MEMORY_CACHING_TYPE CacheType,IN PVOID BaseAddress,IN ULONG BugCheckOnFailure,IN ULONG Priority)660 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
661                              IN KPROCESSOR_MODE AccessMode,
662                              IN MEMORY_CACHING_TYPE CacheType,
663                              IN PVOID BaseAddress,
664                              IN ULONG BugCheckOnFailure,
665                              IN ULONG Priority) // MM_PAGE_PRIORITY
666 {
667     PVOID Base;
668     PPFN_NUMBER MdlPages, LastPage;
669     PFN_COUNT PageCount;
670     BOOLEAN IsIoMapping;
671     MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
672     PMMPTE PointerPte;
673     MMPTE TempPte;
674 
675     //
676     // Sanity check
677     //
678     ASSERT(Mdl->ByteCount != 0);
679 
680     //
681     // Get the base
682     //
683     Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
684 
685     //
686     // Handle kernel case first
687     //
688     if (AccessMode == KernelMode)
689     {
690         //
691         // Get the list of pages and count
692         //
693         MdlPages = (PPFN_NUMBER)(Mdl + 1);
694         PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
695         LastPage = MdlPages + PageCount;
696 
697         //
698         // Sanity checks
699         //
700         ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
701                                  MDL_SOURCE_IS_NONPAGED_POOL |
702                                  MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
703         ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
704 
705         //
706         // Get the correct cache type
707         //
708         IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
709         CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
710 
711         //
712         // Reserve the PTEs
713         //
714         PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
715         if (!PointerPte)
716         {
717             //
718             // If it can fail, return NULL
719             //
720             if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
721 
722             //
723             // Should we bugcheck?
724             //
725             if (!BugCheckOnFailure) return NULL;
726 
727             //
728             // Yes, crash the system
729             //
730             KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
731         }
732 
733         //
734         // Get the mapped address
735         //
736         Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
737 
738         //
739         // Get the template
740         //
741         TempPte = ValidKernelPte;
742         switch (CacheAttribute)
743         {
744             case MiNonCached:
745 
746                 //
747                 // Disable caching
748                 //
749                 MI_PAGE_DISABLE_CACHE(&TempPte);
750                 MI_PAGE_WRITE_THROUGH(&TempPte);
751                 break;
752 
753             case MiWriteCombined:
754 
755                 //
756                 // Enable write combining
757                 //
758                 MI_PAGE_DISABLE_CACHE(&TempPte);
759                 MI_PAGE_WRITE_COMBINED(&TempPte);
760                 break;
761 
762             default:
763                 //
764                 // Nothing to do
765                 //
766                 break;
767         }
768 
769         //
770         // Loop all PTEs
771         //
772         do
773         {
774             //
775             // We're done here
776             //
777             if (*MdlPages == LIST_HEAD) break;
778 
779             //
780             // Write the PTE
781             //
782             TempPte.u.Hard.PageFrameNumber = *MdlPages;
783             MI_WRITE_VALID_PTE(PointerPte++, TempPte);
784         } while (++MdlPages < LastPage);
785 
786         //
787         // Mark it as mapped
788         //
789         ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
790         Mdl->MappedSystemVa = Base;
791         Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
792 
793         //
794         // Check if it was partial
795         //
796         if (Mdl->MdlFlags & MDL_PARTIAL)
797         {
798             //
799             // Write the appropriate flag here too
800             //
801             Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
802         }
803 
804         //
805         // Return the mapped address
806         //
807         return Base;
808     }
809 
810     return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
811 }
812 
813 /*
814  * @implemented
815  */
816 PVOID
817 NTAPI
MmMapLockedPages(IN PMDL Mdl,IN KPROCESSOR_MODE AccessMode)818 MmMapLockedPages(IN PMDL Mdl,
819                  IN KPROCESSOR_MODE AccessMode)
820 {
821     //
822     // Call the extended version
823     //
824     return MmMapLockedPagesSpecifyCache(Mdl,
825                                         AccessMode,
826                                         MmCached,
827                                         NULL,
828                                         TRUE,
829                                         HighPagePriority);
830 }
831 
832 /*
833  * @implemented
834  */
835 VOID
836 NTAPI
MmUnmapLockedPages(IN PVOID BaseAddress,IN PMDL Mdl)837 MmUnmapLockedPages(IN PVOID BaseAddress,
838                    IN PMDL Mdl)
839 {
840     PVOID Base;
841     PFN_COUNT PageCount, ExtraPageCount;
842     PPFN_NUMBER MdlPages;
843     PMMPTE PointerPte;
844 
845     //
846     // Sanity check
847     //
848     ASSERT(Mdl->ByteCount != 0);
849 
850     //
851     // Check if this is a kernel request
852     //
853     if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
854     {
855         //
856         // Get base and count information
857         //
858         Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
859         PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
860 
861         //
862         // Sanity checks
863         //
864         ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
865         ASSERT(PageCount != 0);
866         ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
867 
868         //
869         // Get the PTE
870         //
871         PointerPte = MiAddressToPte(BaseAddress);
872 
873         //
874         // This should be a resident system PTE
875         //
876         ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
877         ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
878         ASSERT(PointerPte->u.Hard.Valid == 1);
879 
880         //
881         // Check if the caller wants us to free advanced pages
882         //
883         if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
884         {
885             //
886             // Get the MDL page array
887             //
888             MdlPages = MmGetMdlPfnArray(Mdl);
889 
890             /* Number of extra pages stored after the PFN array */
891             ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount);
892 
893             //
894             // Do the math
895             //
896             PageCount += ExtraPageCount;
897             PointerPte -= ExtraPageCount;
898             ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
899             ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
900 
901             //
902             // Get the new base address
903             //
904             BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
905                                   (ExtraPageCount << PAGE_SHIFT));
906         }
907 
908         //
909         // Remove flags
910         //
911         Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
912                            MDL_PARTIAL_HAS_BEEN_MAPPED |
913                            MDL_FREE_EXTRA_PTES);
914 
915         //
916         // Release the system PTEs
917         //
918         MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
919     }
920     else
921     {
922         MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
923     }
924 }
925 
926 /*
927  * @implemented
928  */
929 VOID
930 NTAPI
MmProbeAndLockPages(IN PMDL Mdl,IN KPROCESSOR_MODE AccessMode,IN LOCK_OPERATION Operation)931 MmProbeAndLockPages(IN PMDL Mdl,
932                     IN KPROCESSOR_MODE AccessMode,
933                     IN LOCK_OPERATION Operation)
934 {
935     PPFN_NUMBER MdlPages;
936     PVOID Base, Address, LastAddress, StartAddress;
937     ULONG LockPages, TotalPages;
938     NTSTATUS Status = STATUS_SUCCESS;
939     PEPROCESS CurrentProcess;
940     NTSTATUS ProbeStatus;
941     PMMPTE PointerPte, LastPte;
942     PMMPDE PointerPde;
943 #if (_MI_PAGING_LEVELS >= 3)
944     PMMPDE PointerPpe;
945 #endif
946 #if (_MI_PAGING_LEVELS == 4)
947     PMMPDE PointerPxe;
948 #endif
949     PFN_NUMBER PageFrameIndex;
950     BOOLEAN UsePfnLock;
951     KIRQL OldIrql;
952     PMMPFN Pfn1;
953     DPRINT("Probing MDL: %p\n", Mdl);
954 
955     //
956     // Sanity checks
957     //
958     ASSERT(Mdl->ByteCount != 0);
959     ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
960     ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
961     ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
962                              MDL_MAPPED_TO_SYSTEM_VA |
963                              MDL_SOURCE_IS_NONPAGED_POOL |
964                              MDL_PARTIAL |
965                              MDL_IO_SPACE)) == 0);
966 
967     //
968     // Get page and base information
969     //
970     MdlPages = (PPFN_NUMBER)(Mdl + 1);
971     Base = Mdl->StartVa;
972 
973     //
974     // Get the addresses and how many pages we span (and need to lock)
975     //
976     Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
977     LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
978     LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
979     ASSERT(LockPages != 0);
980 
981     /* Block invalid access */
982     if ((AccessMode != KernelMode) &&
983         ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
984     {
985         /* Caller should be in SEH, raise the error */
986         *MdlPages = LIST_HEAD;
987         ExRaiseStatus(STATUS_ACCESS_VIOLATION);
988     }
989 
990     //
991     // Get the process
992     //
993     if (Address <= MM_HIGHEST_USER_ADDRESS)
994     {
995         //
996         // Get the process
997         //
998         CurrentProcess = PsGetCurrentProcess();
999     }
1000     else
1001     {
1002         //
1003         // No process
1004         //
1005         CurrentProcess = NULL;
1006     }
1007 
1008     //
1009     // Save the number of pages we'll have to lock, and the start address
1010     //
1011     TotalPages = LockPages;
1012     StartAddress = Address;
1013 
1014     /* Large pages not supported */
1015     ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
1016 
1017     //
1018     // Now probe them
1019     //
1020     ProbeStatus = STATUS_SUCCESS;
1021     _SEH2_TRY
1022     {
1023         //
1024         // Enter probe loop
1025         //
1026         do
1027         {
1028             //
1029             // Assume failure
1030             //
1031             *MdlPages = LIST_HEAD;
1032 
1033             //
1034             // Read
1035             //
1036             *(volatile CHAR*)Address;
1037 
1038             //
1039             // Check if this is write access (only probe for user-mode)
1040             //
1041             if ((Operation != IoReadAccess) &&
1042                 (Address <= MM_HIGHEST_USER_ADDRESS))
1043             {
1044                 //
1045                 // Probe for write too
1046                 //
1047                 ProbeForWriteChar(Address);
1048             }
1049 
1050             //
1051             // Next address...
1052             //
1053             Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
1054 
1055             //
1056             // Next page...
1057             //
1058             LockPages--;
1059             MdlPages++;
1060         } while (Address < LastAddress);
1061 
1062         //
1063         // Reset back to the original page
1064         //
1065         ASSERT(LockPages == 0);
1066         MdlPages = (PPFN_NUMBER)(Mdl + 1);
1067     }
1068     _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
1069     {
1070         //
1071         // Oops :(
1072         //
1073         ProbeStatus = _SEH2_GetExceptionCode();
1074     }
1075     _SEH2_END;
1076 
1077     //
1078     // So how did that go?
1079     //
1080     if (ProbeStatus != STATUS_SUCCESS)
1081     {
1082         //
1083         // Fail
1084         //
1085         DPRINT1("MDL PROBE FAILED!\n");
1086         Mdl->Process = NULL;
1087         ExRaiseStatus(ProbeStatus);
1088     }
1089 
1090     //
1091     // Get the PTE and PDE
1092     //
1093     PointerPte = MiAddressToPte(StartAddress);
1094     PointerPde = MiAddressToPde(StartAddress);
1095 #if (_MI_PAGING_LEVELS >= 3)
1096     PointerPpe = MiAddressToPpe(StartAddress);
1097 #endif
1098 #if (_MI_PAGING_LEVELS == 4)
1099     PointerPxe = MiAddressToPxe(StartAddress);
1100 #endif
1101 
1102     //
1103     // Sanity check
1104     //
1105     ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
1106 
1107     //
1108     // Check what kind of operation this is
1109     //
1110     if (Operation != IoReadAccess)
1111     {
1112         //
1113         // Set the write flag
1114         //
1115         Mdl->MdlFlags |= MDL_WRITE_OPERATION;
1116     }
1117     else
1118     {
1119         //
1120         // Remove the write flag
1121         //
1122         Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
1123     }
1124 
1125     //
1126     // Mark the MDL as locked *now*
1127     //
1128     Mdl->MdlFlags |= MDL_PAGES_LOCKED;
1129 
1130     //
1131     // Check if this came from kernel mode
1132     //
1133     if (Base > MM_HIGHEST_USER_ADDRESS)
1134     {
1135         //
1136         // We should not have a process
1137         //
1138         ASSERT(CurrentProcess == NULL);
1139         Mdl->Process = NULL;
1140 
1141         //
1142         // In kernel mode, we don't need to check for write access
1143         //
1144         Operation = IoReadAccess;
1145 
1146         //
1147         // Use the PFN lock
1148         //
1149         UsePfnLock = TRUE;
1150         OldIrql = MiAcquirePfnLock();
1151     }
1152     else
1153     {
1154         //
1155         // Sanity checks
1156         //
1157         ASSERT(TotalPages != 0);
1158         ASSERT(CurrentProcess == PsGetCurrentProcess());
1159 
1160         //
1161         // Track locked pages
1162         //
1163         InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
1164                                     TotalPages);
1165 
1166         //
1167         // Save the process
1168         //
1169         Mdl->Process = CurrentProcess;
1170 
1171         /* Lock the process working set */
1172         MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1173         UsePfnLock = FALSE;
1174         OldIrql = MM_NOIRQL;
1175     }
1176 
1177     //
1178     // Get the last PTE
1179     //
1180     LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
1181 
1182     //
1183     // Loop the pages
1184     //
1185     do
1186     {
1187         //
1188         // Assume failure and check for non-mapped pages
1189         //
1190         *MdlPages = LIST_HEAD;
1191         while (
1192 #if (_MI_PAGING_LEVELS == 4)
1193                (PointerPxe->u.Hard.Valid == 0) ||
1194 #endif
1195 #if (_MI_PAGING_LEVELS >= 3)
1196                (PointerPpe->u.Hard.Valid == 0) ||
1197 #endif
1198                (PointerPde->u.Hard.Valid == 0) ||
1199                (PointerPte->u.Hard.Valid == 0))
1200         {
1201             //
1202             // What kind of lock were we using?
1203             //
1204             if (UsePfnLock)
1205             {
1206                 //
1207                 // Release PFN lock
1208                 //
1209                 MiReleasePfnLock(OldIrql);
1210             }
1211             else
1212             {
1213                 /* Release process working set */
1214                 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1215             }
1216 
1217             //
1218             // Access the page
1219             //
1220             Address = MiPteToAddress(PointerPte);
1221 
1222             //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1223             Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)(ULONG_PTR)0xBADBADA3BADBADA3ULL);
1224             if (!NT_SUCCESS(Status))
1225             {
1226                 //
1227                 // Fail
1228                 //
1229                 DPRINT1("Access fault failed\n");
1230                 goto Cleanup;
1231             }
1232 
1233             //
1234             // What lock should we use?
1235             //
1236             if (UsePfnLock)
1237             {
1238                 //
1239                 // Grab the PFN lock
1240                 //
1241                 OldIrql = MiAcquirePfnLock();
1242             }
1243             else
1244             {
1245                 /* Lock the process working set */
1246                 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1247             }
1248         }
1249 
1250         //
1251         // Check if this was a write or modify
1252         //
1253         if (Operation != IoReadAccess)
1254         {
1255             //
1256             // Check if the PTE is not writable
1257             //
1258             if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
1259             {
1260                 //
1261                 // Check if it's copy on write
1262                 //
1263                 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
1264                 {
1265                     //
1266                     // Get the base address and allow a change for user-mode
1267                     //
1268                     Address = MiPteToAddress(PointerPte);
1269                     if (Address <= MM_HIGHEST_USER_ADDRESS)
1270                     {
1271                         //
1272                         // What kind of lock were we using?
1273                         //
1274                         if (UsePfnLock)
1275                         {
1276                             //
1277                             // Release PFN lock
1278                             //
1279                             MiReleasePfnLock(OldIrql);
1280                         }
1281                         else
1282                         {
1283                             /* Release process working set */
1284                             MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1285                         }
1286 
1287                         //
1288                         // Access the page
1289                         //
1290 
1291                         //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1292                         Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)(ULONG_PTR)0xBADBADA3BADBADA3ULL);
1293                         if (!NT_SUCCESS(Status))
1294                         {
1295                             //
1296                             // Fail
1297                             //
1298                             DPRINT1("Access fault failed\n");
1299                             goto Cleanup;
1300                         }
1301 
1302                         //
1303                         // Re-acquire the lock
1304                         //
1305                         if (UsePfnLock)
1306                         {
1307                             //
1308                             // Grab the PFN lock
1309                             //
1310                             OldIrql = MiAcquirePfnLock();
1311                         }
1312                         else
1313                         {
1314                             /* Lock the process working set */
1315                             MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1316                         }
1317 
1318                         //
1319                         // Start over
1320                         //
1321                         continue;
1322                     }
1323                 }
1324 
1325                 //
1326                 // Fail, since we won't allow this
1327                 //
1328                 Status = STATUS_ACCESS_VIOLATION;
1329                 goto CleanupWithLock;
1330             }
1331         }
1332 
1333         //
1334         // Grab the PFN
1335         //
1336         PageFrameIndex = PFN_FROM_PTE(PointerPte);
1337         Pfn1 = MiGetPfnEntry(PageFrameIndex);
1338         if (Pfn1)
1339         {
1340             /* Either this is for kernel-mode, or the working set is held */
1341             ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
1342 
1343             /* No Physical VADs supported yet */
1344             if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
1345 
1346             /* This address should already exist and be fully valid */
1347             MiReferenceProbedPageAndBumpLockCount(Pfn1);
1348         }
1349         else
1350         {
1351             //
1352             // For I/O addresses, just remember this
1353             //
1354             Mdl->MdlFlags |= MDL_IO_SPACE;
1355         }
1356 
1357         //
1358         // Write the page and move on
1359         //
1360         *MdlPages++ = PageFrameIndex;
1361         PointerPte++;
1362 
1363         /* Check if we're on a PDE boundary */
1364         if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
1365 #if (_MI_PAGING_LEVELS >= 3)
1366         if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
1367 #endif
1368 #if (_MI_PAGING_LEVELS == 4)
1369         if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
1370 #endif
1371 
1372     } while (PointerPte <= LastPte);
1373 
1374     //
1375     // What kind of lock were we using?
1376     //
1377     if (UsePfnLock)
1378     {
1379         //
1380         // Release PFN lock
1381         //
1382         MiReleasePfnLock(OldIrql);
1383     }
1384     else
1385     {
1386         /* Release process working set */
1387         MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1388     }
1389 
1390     //
1391     // Sanity check
1392     //
1393     ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1394     return;
1395 
1396 CleanupWithLock:
1397     //
1398     // This is the failure path
1399     //
1400     ASSERT(!NT_SUCCESS(Status));
1401 
1402     //
1403     // What kind of lock were we using?
1404     //
1405     if (UsePfnLock)
1406     {
1407         //
1408         // Release PFN lock
1409         //
1410         MiReleasePfnLock(OldIrql);
1411     }
1412     else
1413     {
1414         /* Release process working set */
1415         MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1416     }
1417 Cleanup:
1418     //
1419     // Pages must be locked so MmUnlock can work
1420     //
1421     ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1422     MmUnlockPages(Mdl);
1423 
1424     //
1425     // Raise the error
1426     //
1427     ExRaiseStatus(Status);
1428 }
1429 
1430 /*
1431  * @implemented
1432  */
1433 VOID
1434 NTAPI
MmUnlockPages(IN PMDL Mdl)1435 MmUnlockPages(IN PMDL Mdl)
1436 {
1437     PPFN_NUMBER MdlPages, LastPage;
1438     PEPROCESS Process;
1439     PVOID Base;
1440     ULONG Flags, PageCount;
1441     KIRQL OldIrql;
1442     PMMPFN Pfn1;
1443     DPRINT("Unlocking MDL: %p\n", Mdl);
1444 
1445     //
1446     // Sanity checks
1447     //
1448     ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1449     ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1450     ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1451     ASSERT(Mdl->ByteCount != 0);
1452 
1453     //
1454     // Get the process associated and capture the flags which are volatile
1455     //
1456     Process = Mdl->Process;
1457     Flags = Mdl->MdlFlags;
1458 
1459     //
1460     // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1461     //
1462     if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1463     {
1464         //
1465         // Unmap the pages from system space
1466         //
1467         MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1468     }
1469 
1470     //
1471     // Get the page count
1472     //
1473     MdlPages = (PPFN_NUMBER)(Mdl + 1);
1474     Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1475     PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1476     ASSERT(PageCount != 0);
1477 
1478     //
1479     // We don't support AWE
1480     //
1481     if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1482 
1483     //
1484     // Check if the buffer is mapped I/O space
1485     //
1486     if (Flags & MDL_IO_SPACE)
1487     {
1488         //
1489         // Acquire PFN lock
1490         //
1491         OldIrql = MiAcquirePfnLock();
1492 
1493         //
1494         // Loop every page
1495         //
1496         LastPage = MdlPages + PageCount;
1497         do
1498         {
1499             //
1500             // Last page, break out
1501             //
1502             if (*MdlPages == LIST_HEAD) break;
1503 
1504             //
1505             // Check if this page is in the PFN database
1506             //
1507             Pfn1 = MiGetPfnEntry(*MdlPages);
1508             if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1);
1509         } while (++MdlPages < LastPage);
1510 
1511         //
1512         // Release the lock
1513         //
1514         MiReleasePfnLock(OldIrql);
1515 
1516         //
1517         // Check if we have a process
1518         //
1519         if (Process)
1520         {
1521             //
1522             // Handle the accounting of locked pages
1523             //
1524             ASSERT(Process->NumberOfLockedPages > 0);
1525             InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1526                                         -(LONG_PTR)PageCount);
1527         }
1528 
1529         //
1530         // We're done
1531         //
1532         Mdl->MdlFlags &= ~MDL_IO_SPACE;
1533         Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1534         return;
1535     }
1536 
1537     //
1538     // Check if we have a process
1539     //
1540     if (Process)
1541     {
1542         //
1543         // Handle the accounting of locked pages
1544         //
1545         ASSERT(Process->NumberOfLockedPages > 0);
1546         InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1547                                     -(LONG_PTR)PageCount);
1548     }
1549 
1550     //
1551     // Loop every page
1552     //
1553     LastPage = MdlPages + PageCount;
1554     do
1555     {
1556         //
1557         // Last page reached
1558         //
1559         if (*MdlPages == LIST_HEAD)
1560         {
1561             //
1562             // Were there no pages at all?
1563             //
1564             if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1565             {
1566                 //
1567                 // We're already done
1568                 //
1569                 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1570                 return;
1571             }
1572 
1573             //
1574             // Otherwise, stop here
1575             //
1576             LastPage = MdlPages;
1577             break;
1578         }
1579 
1580         /* Save the PFN entry instead for the secondary loop */
1581         *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
1582         ASSERT(*MdlPages != 0);
1583     } while (++MdlPages < LastPage);
1584 
1585     //
1586     // Reset pointer
1587     //
1588     MdlPages = (PPFN_NUMBER)(Mdl + 1);
1589 
1590     //
1591     // Now grab the PFN lock for the actual unlock and dereference
1592     //
1593     OldIrql = MiAcquirePfnLock();
1594     do
1595     {
1596         /* Get the current entry and reference count */
1597         Pfn1 = (PMMPFN)*MdlPages;
1598         MiDereferencePfnAndDropLockCount(Pfn1);
1599     } while (++MdlPages < LastPage);
1600 
1601     //
1602     // Release the lock
1603     //
1604     MiReleasePfnLock(OldIrql);
1605 
1606     //
1607     // We're done
1608     //
1609     Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1610 }
1611 
1612 /*
1613  * @unimplemented
1614  */
1615 NTSTATUS
1616 NTAPI
MmAdvanceMdl(IN PMDL Mdl,IN ULONG NumberOfBytes)1617 MmAdvanceMdl(IN PMDL Mdl,
1618              IN ULONG NumberOfBytes)
1619 {
1620     UNIMPLEMENTED;
1621     return STATUS_NOT_IMPLEMENTED;
1622 }
1623 
1624 /*
1625  * @unimplemented
1626  */
1627 PVOID
1628 NTAPI
MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,IN ULONG PoolTag,IN PMDL MemoryDescriptorList,IN MEMORY_CACHING_TYPE CacheType)1629 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1630                                     IN ULONG PoolTag,
1631                                     IN PMDL MemoryDescriptorList,
1632                                     IN MEMORY_CACHING_TYPE CacheType)
1633 {
1634     UNIMPLEMENTED;
1635     return 0;
1636 }
1637 
1638 /*
1639  * @unimplemented
1640  */
1641 VOID
1642 NTAPI
MmUnmapReservedMapping(IN PVOID BaseAddress,IN ULONG PoolTag,IN PMDL MemoryDescriptorList)1643 MmUnmapReservedMapping(IN PVOID BaseAddress,
1644                        IN ULONG PoolTag,
1645                        IN PMDL MemoryDescriptorList)
1646 {
1647     UNIMPLEMENTED;
1648 }
1649 
1650 /*
1651  * @unimplemented
1652  */
1653 NTSTATUS
1654 NTAPI
MmPrefetchPages(IN ULONG NumberOfLists,IN PREAD_LIST * ReadLists)1655 MmPrefetchPages(IN ULONG NumberOfLists,
1656                 IN PREAD_LIST *ReadLists)
1657 {
1658     UNIMPLEMENTED;
1659     return STATUS_NOT_IMPLEMENTED;
1660 }
1661 
1662 /*
1663  * @unimplemented
1664  */
1665 NTSTATUS
1666 NTAPI
MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,IN ULONG NewProtect)1667 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1668                           IN ULONG NewProtect)
1669 {
1670     UNIMPLEMENTED;
1671     return STATUS_NOT_IMPLEMENTED;
1672 }
1673 
1674 /**
1675  * @brief
1676  * Probes and locks virtual pages in memory for the specified process.
1677  *
1678  * @param[in,out] MemoryDescriptorList
1679  * Memory Descriptor List (MDL) containing the buffer to be probed and locked.
1680  *
1681  * @param[in] Process
1682  * The process for which the buffer should be probed and locked.
1683  *
1684  * @param[in] AccessMode
1685  * Access mode for probing the pages. Can be KernelMode or UserMode.
1686  *
1687  * @param[in] LockOperation
1688  * The type of the probing and locking operation. Can be IoReadAccess, IoWriteAccess or IoModifyAccess.
1689  *
1690  * @return
1691  * Nothing.
1692  *
1693  * @see MmProbeAndLockPages
1694  *
1695  * @remarks Must be called at IRQL <= APC_LEVEL
1696  */
_IRQL_requires_max_(APC_LEVEL)1697 _IRQL_requires_max_(APC_LEVEL)
1698 VOID
1699 NTAPI
1700 MmProbeAndLockProcessPages(
1701     _Inout_ PMDL MemoryDescriptorList,
1702     _In_ PEPROCESS Process,
1703     _In_ KPROCESSOR_MODE AccessMode,
1704     _In_ LOCK_OPERATION Operation)
1705 {
1706     KAPC_STATE ApcState;
1707     BOOLEAN IsAttached = FALSE;
1708 
1709     if (Process != PsGetCurrentProcess())
1710     {
1711         KeStackAttachProcess(&Process->Pcb, &ApcState);
1712         IsAttached = TRUE;
1713     }
1714 
1715     /* Protect in try/finally to ensure we detach even if MmProbeAndLockPages() throws an exception */
1716     _SEH2_TRY
1717     {
1718         MmProbeAndLockPages(MemoryDescriptorList, AccessMode, Operation);
1719     }
1720     _SEH2_FINALLY
1721     {
1722         if (IsAttached)
1723             KeUnstackDetachProcess(&ApcState);
1724     }
1725     _SEH2_END;
1726 }
1727 
1728 /*
1729  * @unimplemented
1730  */
1731 VOID
1732 NTAPI
MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,IN LARGE_INTEGER PageList[],IN KPROCESSOR_MODE AccessMode,IN LOCK_OPERATION Operation)1733 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1734                             IN LARGE_INTEGER PageList[],
1735                             IN KPROCESSOR_MODE AccessMode,
1736                             IN LOCK_OPERATION Operation)
1737 {
1738     UNIMPLEMENTED;
1739 }
1740 
1741 /*
1742  * @unimplemented
1743  */
1744 VOID
1745 NTAPI
MmMapMemoryDumpMdl(IN PMDL Mdl)1746 MmMapMemoryDumpMdl(IN PMDL Mdl)
1747 {
1748     UNIMPLEMENTED;
1749 }
1750 
1751 /* EOF */
1752