xref: /reactos/ntoskrnl/mm/ARM3/mdlsup.c (revision b3a5eeb1)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/mdlsup.c
5  * PURPOSE:         ARM Memory Manager Memory Descriptor List (MDL) Management
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 /* GLOBALS ********************************************************************/
19 
20 BOOLEAN MmTrackPtes;
21 BOOLEAN MmTrackLockedPages;
22 SIZE_T MmSystemLockPagesCount;
23 
24 ULONG MiCacheOverride[MiNotMapped + 1];
25 
26 /* INTERNAL FUNCTIONS *********************************************************/
27 static
28 PVOID
29 NTAPI
30 MiMapLockedPagesInUserSpace(
31     _In_ PMDL Mdl,
32     _In_ PVOID StartVa,
33     _In_ MEMORY_CACHING_TYPE CacheType,
34     _In_opt_ PVOID BaseAddress)
35 {
36     NTSTATUS Status;
37     PEPROCESS Process = PsGetCurrentProcess();
38     PETHREAD Thread = PsGetCurrentThread();
39     TABLE_SEARCH_RESULT Result;
40     MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
41     MI_PFN_CACHE_ATTRIBUTE EffectiveCacheAttribute;
42     BOOLEAN IsIoMapping;
43     KIRQL OldIrql;
44     ULONG_PTR StartingVa;
45     ULONG_PTR EndingVa;
46     PMMADDRESS_NODE Parent;
47     PMMVAD_LONG Vad;
48     ULONG NumberOfPages;
49     PMMPTE PointerPte;
50     PMMPDE PointerPde;
51     MMPTE TempPte;
52     PPFN_NUMBER MdlPages;
53     PMMPFN Pfn1;
54     PMMPFN Pfn2;
55     BOOLEAN AddressSpaceLocked = FALSE;
56 
57     PAGED_CODE();
58 
59     DPRINT("MiMapLockedPagesInUserSpace(%p, %p, 0x%x, %p)\n",
60            Mdl, StartVa, CacheType, BaseAddress);
61 
62     NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa,
63                                                    MmGetMdlByteCount(Mdl));
64     MdlPages = MmGetMdlPfnArray(Mdl);
65 
66     ASSERT(CacheType <= MmWriteCombined);
67 
68     IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
69     CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
70 
71     /* Large pages are always cached, make sure we're not asking for those */
72     if (CacheAttribute != MiCached)
73     {
74         DPRINT1("FIXME: Need to check for large pages\n");
75     }
76 
77     /* Allocate a VAD for our mapped region */
78     Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
79     if (Vad == NULL)
80     {
81         Status = STATUS_INSUFFICIENT_RESOURCES;
82         goto Error;
83     }
84 
85     /* Initialize PhysicalMemory VAD */
86     RtlZeroMemory(Vad, sizeof(*Vad));
87     Vad->u2.VadFlags2.LongVad = 1;
88     Vad->u.VadFlags.VadType = VadDevicePhysicalMemory;
89     Vad->u.VadFlags.Protection = MM_READWRITE;
90     Vad->u.VadFlags.PrivateMemory = 1;
91 
92     /* Did the caller specify an address? */
93     if (BaseAddress == NULL)
94     {
95         /* We get to pick the address */
96         MmLockAddressSpace(&Process->Vm);
97         AddressSpaceLocked = TRUE;
98         if (Process->VmDeleted)
99         {
100             Status = STATUS_PROCESS_IS_TERMINATING;
101             goto Error;
102         }
103 
104         Result = MiFindEmptyAddressRangeInTree(NumberOfPages << PAGE_SHIFT,
105                                                MM_VIRTMEM_GRANULARITY,
106                                                &Process->VadRoot,
107                                                &Parent,
108                                                &StartingVa);
109         if (Result == TableFoundNode)
110         {
111             Status = STATUS_NO_MEMORY;
112             goto Error;
113         }
114         EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1;
115         BaseAddress = (PVOID)StartingVa;
116     }
117     else
118     {
119         /* Caller specified a base address */
120         StartingVa = (ULONG_PTR)BaseAddress;
121         EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1;
122 
123         /* Make sure it's valid */
124         if (BYTE_OFFSET(StartingVa) != 0 ||
125             EndingVa <= StartingVa ||
126             EndingVa > (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS)
127         {
128             Status = STATUS_INVALID_ADDRESS;
129             goto Error;
130         }
131 
132         MmLockAddressSpace(&Process->Vm);
133         AddressSpaceLocked = TRUE;
134         if (Process->VmDeleted)
135         {
136             Status = STATUS_PROCESS_IS_TERMINATING;
137             goto Error;
138         }
139 
140         /* Check if it's already in use */
141         Result = MiCheckForConflictingNode(StartingVa >> PAGE_SHIFT,
142                                            EndingVa >> PAGE_SHIFT,
143                                            &Process->VadRoot,
144                                            &Parent);
145         if (Result == TableFoundNode)
146         {
147             Status = STATUS_CONFLICTING_ADDRESSES;
148             goto Error;
149         }
150     }
151 
152     Vad->StartingVpn = StartingVa >> PAGE_SHIFT;
153     Vad->EndingVpn = EndingVa >> PAGE_SHIFT;
154 
155     MiLockProcessWorkingSetUnsafe(Process, Thread);
156 
157     ASSERT(Vad->EndingVpn >= Vad->StartingVpn);
158 
159     MiInsertVad((PMMVAD)Vad, &Process->VadRoot);
160 
161     /* Check if this is uncached */
162     if (CacheAttribute != MiCached)
163     {
164         /* Flush all caches */
165         KeFlushEntireTb(TRUE, TRUE);
166         KeInvalidateAllCaches();
167     }
168 
169     PointerPte = MiAddressToPte(BaseAddress);
170     while (NumberOfPages != 0 &&
171            *MdlPages != LIST_HEAD)
172     {
173         PointerPde = MiPteToPde(PointerPte);
174         MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
175         ASSERT(PointerPte->u.Hard.Valid == 0);
176 
177         /* Add a PDE reference for each page */
178         MiIncrementPageTableReferences(BaseAddress);
179 
180         /* Set up our basic user PTE */
181         MI_MAKE_HARDWARE_PTE_USER(&TempPte,
182                                   PointerPte,
183                                   MM_READWRITE,
184                                   *MdlPages);
185 
186         EffectiveCacheAttribute = CacheAttribute;
187 
188         /* We need to respect the PFN's caching information in some cases */
189         Pfn2 = MiGetPfnEntry(*MdlPages);
190         if (Pfn2 != NULL)
191         {
192             ASSERT(Pfn2->u3.e2.ReferenceCount != 0);
193 
194             switch (Pfn2->u3.e1.CacheAttribute)
195             {
196                 case MiNonCached:
197                     if (CacheAttribute != MiNonCached)
198                     {
199                         MiCacheOverride[1]++;
200                         EffectiveCacheAttribute = MiNonCached;
201                     }
202                     break;
203 
204                 case MiCached:
205                     if (CacheAttribute != MiCached)
206                     {
207                         MiCacheOverride[0]++;
208                         EffectiveCacheAttribute = MiCached;
209                     }
210                     break;
211 
212                 case MiWriteCombined:
213                     if (CacheAttribute != MiWriteCombined)
214                     {
215                         MiCacheOverride[2]++;
216                         EffectiveCacheAttribute = MiWriteCombined;
217                     }
218                     break;
219 
220                 default:
221                     /* We don't support AWE magic (MiNotMapped) */
222                     DPRINT1("FIXME: MiNotMapped is not supported\n");
223                     ASSERT(FALSE);
224                     break;
225             }
226         }
227 
228         /* Configure caching */
229         switch (EffectiveCacheAttribute)
230         {
231             case MiNonCached:
232                 MI_PAGE_DISABLE_CACHE(&TempPte);
233                 MI_PAGE_WRITE_THROUGH(&TempPte);
234                 break;
235             case MiCached:
236                 break;
237             case MiWriteCombined:
238                 MI_PAGE_DISABLE_CACHE(&TempPte);
239                 MI_PAGE_WRITE_COMBINED(&TempPte);
240                 break;
241             default:
242                 ASSERT(FALSE);
243                 break;
244         }
245 
246         /* Make the page valid */
247         MI_WRITE_VALID_PTE(PointerPte, TempPte);
248 
249         /* Acquire a share count */
250         Pfn1 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
251         DPRINT("Incrementing %p from %p\n", Pfn1, _ReturnAddress());
252         OldIrql = MiAcquirePfnLock();
253         Pfn1->u2.ShareCount++;
254         MiReleasePfnLock(OldIrql);
255 
256         /* Next page */
257         MdlPages++;
258         PointerPte++;
259         NumberOfPages--;
260         BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
261     }
262 
263     MiUnlockProcessWorkingSetUnsafe(Process, Thread);
264     ASSERT(AddressSpaceLocked);
265     MmUnlockAddressSpace(&Process->Vm);
266 
267     ASSERT(StartingVa != 0);
268     return (PVOID)((ULONG_PTR)StartingVa + MmGetMdlByteOffset(Mdl));
269 
270 Error:
271     if (AddressSpaceLocked)
272     {
273         MmUnlockAddressSpace(&Process->Vm);
274     }
275     if (Vad != NULL)
276     {
277         ExFreePoolWithTag(Vad, 'ldaV');
278     }
279     ExRaiseStatus(Status);
280 }
281 
282 static
283 VOID
284 NTAPI
285 MiUnmapLockedPagesInUserSpace(
286     _In_ PVOID BaseAddress,
287     _In_ PMDL Mdl)
288 {
289     PEPROCESS Process = PsGetCurrentProcess();
290     PETHREAD Thread = PsGetCurrentThread();
291     PMMVAD Vad;
292     PMMPTE PointerPte;
293     PMMPDE PointerPde;
294     KIRQL OldIrql;
295     ULONG NumberOfPages;
296     PPFN_NUMBER MdlPages;
297     PFN_NUMBER PageTablePage;
298 
299     DPRINT("MiUnmapLockedPagesInUserSpace(%p, %p)\n", BaseAddress, Mdl);
300 
301     NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl),
302                                                    MmGetMdlByteCount(Mdl));
303     ASSERT(NumberOfPages != 0);
304     MdlPages = MmGetMdlPfnArray(Mdl);
305 
306     /* Find the VAD */
307     MmLockAddressSpace(&Process->Vm);
308     Vad = MiLocateAddress(BaseAddress);
309     if (!Vad ||
310         Vad->u.VadFlags.VadType != VadDevicePhysicalMemory)
311     {
312         DPRINT1("MiUnmapLockedPagesInUserSpace invalid for %p\n", BaseAddress);
313         MmUnlockAddressSpace(&Process->Vm);
314         return;
315     }
316 
317     MiLockProcessWorkingSetUnsafe(Process, Thread);
318 
319     /* Remove it from the process VAD tree */
320     ASSERT(Process->VadRoot.NumberGenericTableElements >= 1);
321     MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot);
322 
323     /* MiRemoveNode should have removed us if we were the hint */
324     ASSERT(Process->VadRoot.NodeHint != Vad);
325 
326     PointerPte = MiAddressToPte(BaseAddress);
327     OldIrql = MiAcquirePfnLock();
328     while (NumberOfPages != 0 &&
329            *MdlPages != LIST_HEAD)
330     {
331         ASSERT(MiAddressToPte(PointerPte)->u.Hard.Valid == 1);
332         ASSERT(PointerPte->u.Hard.Valid == 1);
333 
334         /* Invalidate it */
335         MI_ERASE_PTE(PointerPte);
336 
337         /* We invalidated this PTE, so dereference the PDE */
338         PointerPde = MiAddressToPde(BaseAddress);
339         PageTablePage = PointerPde->u.Hard.PageFrameNumber;
340         MiDecrementShareCount(MiGetPfnEntry(PageTablePage), PageTablePage);
341 
342         if (MiDecrementPageTableReferences(BaseAddress) == 0)
343         {
344             ASSERT(MiIsPteOnPdeBoundary(PointerPte + 1) || (NumberOfPages == 1));
345             MiDeletePde(PointerPde, Process);
346         }
347 
348         /* Next page */
349         PointerPte++;
350         NumberOfPages--;
351         BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
352         MdlPages++;
353     }
354 
355     KeFlushProcessTb();
356     MiReleasePfnLock(OldIrql);
357     MiUnlockProcessWorkingSetUnsafe(Process, Thread);
358     MmUnlockAddressSpace(&Process->Vm);
359     ExFreePoolWithTag(Vad, 'ldaV');
360 }
361 
362 /* PUBLIC FUNCTIONS ***********************************************************/
363 
364 /*
365  * @implemented
366  */
367 PMDL
368 NTAPI
369 MmCreateMdl(IN PMDL Mdl,
370             IN PVOID Base,
371             IN SIZE_T Length)
372 {
373     SIZE_T Size;
374 
375     //
376     // Check if we don't have an MDL built
377     //
378     if (!Mdl)
379     {
380         //
381         // Calculate the size we'll need  and allocate the MDL
382         //
383         Size = MmSizeOfMdl(Base, Length);
384         Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
385         if (!Mdl) return NULL;
386     }
387 
388     //
389     // Initialize it
390     //
391     MmInitializeMdl(Mdl, Base, Length);
392     return Mdl;
393 }
394 
395 /*
396  * @implemented
397  */
398 SIZE_T
399 NTAPI
400 MmSizeOfMdl(IN PVOID Base,
401             IN SIZE_T Length)
402 {
403     //
404     // Return the MDL size
405     //
406     return sizeof(MDL) +
407            (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
408 }
409 
410 /*
411  * @implemented
412  */
413 VOID
414 NTAPI
415 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
416 {
417     PPFN_NUMBER MdlPages, EndPage;
418     PFN_NUMBER Pfn, PageCount;
419     PVOID Base;
420     PMMPTE PointerPte;
421 
422     //
423     // Sanity checks
424     //
425     ASSERT(Mdl->ByteCount != 0);
426     ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
427                              MDL_MAPPED_TO_SYSTEM_VA |
428                              MDL_SOURCE_IS_NONPAGED_POOL |
429                              MDL_PARTIAL)) == 0);
430 
431     //
432     // We know the MDL isn't associated to a process now
433     //
434     Mdl->Process = NULL;
435 
436     //
437     // Get page and VA information
438     //
439     MdlPages = (PPFN_NUMBER)(Mdl + 1);
440     Base = Mdl->StartVa;
441 
442     //
443     // Set the system address and now get the page count
444     //
445     Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
446     PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
447                                                Mdl->ByteCount);
448     ASSERT(PageCount != 0);
449     EndPage = MdlPages + PageCount;
450 
451     //
452     // Loop the PTEs
453     //
454     PointerPte = MiAddressToPte(Base);
455     do
456     {
457         //
458         // Write the PFN
459         //
460         Pfn = PFN_FROM_PTE(PointerPte++);
461         *MdlPages++ = Pfn;
462     } while (MdlPages < EndPage);
463 
464     //
465     // Set the nonpaged pool flag
466     //
467     Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
468 
469     //
470     // Check if this is an I/O mapping
471     //
472     if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
473 }
474 
475 /*
476  * @implemented
477  */
478 PMDL
479 NTAPI
480 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
481                       IN PHYSICAL_ADDRESS HighAddress,
482                       IN PHYSICAL_ADDRESS SkipBytes,
483                       IN SIZE_T TotalBytes)
484 {
485     //
486     // Call the internal routine
487     //
488     return MiAllocatePagesForMdl(LowAddress,
489                                  HighAddress,
490                                  SkipBytes,
491                                  TotalBytes,
492                                  MiNotMapped,
493                                  0);
494 }
495 
496 /*
497  * @implemented
498  */
499 PMDL
500 NTAPI
501 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
502                         IN PHYSICAL_ADDRESS HighAddress,
503                         IN PHYSICAL_ADDRESS SkipBytes,
504                         IN SIZE_T TotalBytes,
505                         IN MEMORY_CACHING_TYPE CacheType,
506                         IN ULONG Flags)
507 {
508     MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
509 
510     //
511     // Check for invalid cache type
512     //
513     if (CacheType > MmWriteCombined)
514     {
515         //
516         // Normalize to default
517         //
518         CacheAttribute = MiNotMapped;
519     }
520     else
521     {
522         //
523         // Conver to internal caching attribute
524         //
525         CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
526     }
527 
528     //
529     // Only these flags are allowed
530     //
531     if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
532     {
533         //
534         // Silently fail
535         //
536         return NULL;
537     }
538 
539     //
540     // Call the internal routine
541     //
542     return MiAllocatePagesForMdl(LowAddress,
543                                  HighAddress,
544                                  SkipBytes,
545                                  TotalBytes,
546                                  CacheAttribute,
547                                  Flags);
548 }
549 
550 /*
551  * @implemented
552  */
553 VOID
554 NTAPI
555 MmFreePagesFromMdl(IN PMDL Mdl)
556 {
557     PVOID Base;
558     PPFN_NUMBER Pages;
559     LONG NumberOfPages;
560     PMMPFN Pfn1;
561     KIRQL OldIrql;
562     DPRINT("Freeing MDL: %p\n", Mdl);
563 
564     //
565     // Sanity checks
566     //
567     ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
568     ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
569     ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
570 
571     //
572     // Get address and page information
573     //
574     Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
575     NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
576 
577     //
578     // Acquire PFN lock
579     //
580     OldIrql = MiAcquirePfnLock();
581 
582     //
583     // Loop all the MDL pages
584     //
585     Pages = (PPFN_NUMBER)(Mdl + 1);
586     do
587     {
588         //
589         // Reached the last page
590         //
591         if (*Pages == LIST_HEAD) break;
592 
593         //
594         // Get the page entry
595         //
596         Pfn1 = MiGetPfnEntry(*Pages);
597         ASSERT(Pfn1);
598         ASSERT(Pfn1->u2.ShareCount == 1);
599         ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
600         if (Pfn1->u4.PteFrame != 0x1FFEDCB)
601         {
602             /* Corrupted PFN entry or invalid free */
603             KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
604         }
605 
606         //
607         // Clear it
608         //
609         Pfn1->u3.e1.StartOfAllocation = 0;
610         Pfn1->u3.e1.EndOfAllocation = 0;
611         Pfn1->u3.e1.PageLocation = StandbyPageList;
612         Pfn1->u2.ShareCount = 0;
613 
614         //
615         // Dereference it
616         //
617         ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
618         if (Pfn1->u3.e2.ReferenceCount != 1)
619         {
620             /* Just take off one reference */
621             InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
622         }
623         else
624         {
625             /* We'll be nuking the whole page */
626             MiDecrementReferenceCount(Pfn1, *Pages);
627         }
628 
629         //
630         // Clear this page and move on
631         //
632         *Pages++ = LIST_HEAD;
633     } while (--NumberOfPages != 0);
634 
635     //
636     // Release the lock
637     //
638     MiReleasePfnLock(OldIrql);
639 
640     //
641     // Remove the pages locked flag
642     //
643     Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
644 }
645 
646 /*
647  * @implemented
648  */
649 PVOID
650 NTAPI
651 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
652                              IN KPROCESSOR_MODE AccessMode,
653                              IN MEMORY_CACHING_TYPE CacheType,
654                              IN PVOID BaseAddress,
655                              IN ULONG BugCheckOnFailure,
656                              IN ULONG Priority) // MM_PAGE_PRIORITY
657 {
658     PVOID Base;
659     PPFN_NUMBER MdlPages, LastPage;
660     PFN_COUNT PageCount;
661     BOOLEAN IsIoMapping;
662     MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
663     PMMPTE PointerPte;
664     MMPTE TempPte;
665 
666     //
667     // Sanity check
668     //
669     ASSERT(Mdl->ByteCount != 0);
670 
671     //
672     // Get the base
673     //
674     Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
675 
676     //
677     // Handle kernel case first
678     //
679     if (AccessMode == KernelMode)
680     {
681         //
682         // Get the list of pages and count
683         //
684         MdlPages = (PPFN_NUMBER)(Mdl + 1);
685         PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
686         LastPage = MdlPages + PageCount;
687 
688         //
689         // Sanity checks
690         //
691         ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
692                                  MDL_SOURCE_IS_NONPAGED_POOL |
693                                  MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
694         ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
695 
696         //
697         // Get the correct cache type
698         //
699         IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
700         CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
701 
702         //
703         // Reserve the PTEs
704         //
705         PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
706         if (!PointerPte)
707         {
708             //
709             // If it can fail, return NULL
710             //
711             if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
712 
713             //
714             // Should we bugcheck?
715             //
716             if (!BugCheckOnFailure) return NULL;
717 
718             //
719             // Yes, crash the system
720             //
721             KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
722         }
723 
724         //
725         // Get the mapped address
726         //
727         Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
728 
729         //
730         // Get the template
731         //
732         TempPte = ValidKernelPte;
733         switch (CacheAttribute)
734         {
735             case MiNonCached:
736 
737                 //
738                 // Disable caching
739                 //
740                 MI_PAGE_DISABLE_CACHE(&TempPte);
741                 MI_PAGE_WRITE_THROUGH(&TempPte);
742                 break;
743 
744             case MiWriteCombined:
745 
746                 //
747                 // Enable write combining
748                 //
749                 MI_PAGE_DISABLE_CACHE(&TempPte);
750                 MI_PAGE_WRITE_COMBINED(&TempPte);
751                 break;
752 
753             default:
754                 //
755                 // Nothing to do
756                 //
757                 break;
758         }
759 
760         //
761         // Loop all PTEs
762         //
763         do
764         {
765             //
766             // We're done here
767             //
768             if (*MdlPages == LIST_HEAD) break;
769 
770             //
771             // Write the PTE
772             //
773             TempPte.u.Hard.PageFrameNumber = *MdlPages;
774             MI_WRITE_VALID_PTE(PointerPte++, TempPte);
775         } while (++MdlPages < LastPage);
776 
777         //
778         // Mark it as mapped
779         //
780         ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
781         Mdl->MappedSystemVa = Base;
782         Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
783 
784         //
785         // Check if it was partial
786         //
787         if (Mdl->MdlFlags & MDL_PARTIAL)
788         {
789             //
790             // Write the appropriate flag here too
791             //
792             Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
793         }
794 
795         //
796         // Return the mapped address
797         //
798         return Base;
799     }
800 
801     return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
802 }
803 
804 /*
805  * @implemented
806  */
807 PVOID
808 NTAPI
809 MmMapLockedPages(IN PMDL Mdl,
810                  IN KPROCESSOR_MODE AccessMode)
811 {
812     //
813     // Call the extended version
814     //
815     return MmMapLockedPagesSpecifyCache(Mdl,
816                                         AccessMode,
817                                         MmCached,
818                                         NULL,
819                                         TRUE,
820                                         HighPagePriority);
821 }
822 
823 /*
824  * @implemented
825  */
826 VOID
827 NTAPI
828 MmUnmapLockedPages(IN PVOID BaseAddress,
829                    IN PMDL Mdl)
830 {
831     PVOID Base;
832     PFN_COUNT PageCount, ExtraPageCount;
833     PPFN_NUMBER MdlPages;
834     PMMPTE PointerPte;
835 
836     //
837     // Sanity check
838     //
839     ASSERT(Mdl->ByteCount != 0);
840 
841     //
842     // Check if this is a kernel request
843     //
844     if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
845     {
846         //
847         // Get base and count information
848         //
849         Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
850         PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
851 
852         //
853         // Sanity checks
854         //
855         ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
856         ASSERT(PageCount != 0);
857         ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
858 
859         //
860         // Get the PTE
861         //
862         PointerPte = MiAddressToPte(BaseAddress);
863 
864         //
865         // This should be a resident system PTE
866         //
867         ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
868         ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
869         ASSERT(PointerPte->u.Hard.Valid == 1);
870 
871         //
872         // Check if the caller wants us to free advanced pages
873         //
874         if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
875         {
876             //
877             // Get the MDL page array
878             //
879             MdlPages = MmGetMdlPfnArray(Mdl);
880 
881             /* Number of extra pages stored after the PFN array */
882             ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount);
883 
884             //
885             // Do the math
886             //
887             PageCount += ExtraPageCount;
888             PointerPte -= ExtraPageCount;
889             ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
890             ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
891 
892             //
893             // Get the new base address
894             //
895             BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
896                                   (ExtraPageCount << PAGE_SHIFT));
897         }
898 
899         //
900         // Remove flags
901         //
902         Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
903                            MDL_PARTIAL_HAS_BEEN_MAPPED |
904                            MDL_FREE_EXTRA_PTES);
905 
906         //
907         // Release the system PTEs
908         //
909         MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
910     }
911     else
912     {
913         MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
914     }
915 }
916 
917 /*
918  * @implemented
919  */
920 VOID
921 NTAPI
922 MmProbeAndLockPages(IN PMDL Mdl,
923                     IN KPROCESSOR_MODE AccessMode,
924                     IN LOCK_OPERATION Operation)
925 {
926     PPFN_NUMBER MdlPages;
927     PVOID Base, Address, LastAddress, StartAddress;
928     ULONG LockPages, TotalPages;
929     NTSTATUS Status = STATUS_SUCCESS;
930     PEPROCESS CurrentProcess;
931     NTSTATUS ProbeStatus;
932     PMMPTE PointerPte, LastPte;
933     PMMPDE PointerPde;
934 #if (_MI_PAGING_LEVELS >= 3)
935     PMMPDE PointerPpe;
936 #endif
937 #if (_MI_PAGING_LEVELS == 4)
938     PMMPDE PointerPxe;
939 #endif
940     PFN_NUMBER PageFrameIndex;
941     BOOLEAN UsePfnLock;
942     KIRQL OldIrql;
943     PMMPFN Pfn1;
944     DPRINT("Probing MDL: %p\n", Mdl);
945 
946     //
947     // Sanity checks
948     //
949     ASSERT(Mdl->ByteCount != 0);
950     ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
951     ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
952     ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
953                              MDL_MAPPED_TO_SYSTEM_VA |
954                              MDL_SOURCE_IS_NONPAGED_POOL |
955                              MDL_PARTIAL |
956                              MDL_IO_SPACE)) == 0);
957 
958     //
959     // Get page and base information
960     //
961     MdlPages = (PPFN_NUMBER)(Mdl + 1);
962     Base = Mdl->StartVa;
963 
964     //
965     // Get the addresses and how many pages we span (and need to lock)
966     //
967     Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
968     LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
969     LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
970     ASSERT(LockPages != 0);
971 
972     /* Block invalid access */
973     if ((AccessMode != KernelMode) &&
974         ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
975     {
976         /* Caller should be in SEH, raise the error */
977         *MdlPages = LIST_HEAD;
978         ExRaiseStatus(STATUS_ACCESS_VIOLATION);
979     }
980 
981     //
982     // Get the process
983     //
984     if (Address <= MM_HIGHEST_USER_ADDRESS)
985     {
986         //
987         // Get the process
988         //
989         CurrentProcess = PsGetCurrentProcess();
990     }
991     else
992     {
993         //
994         // No process
995         //
996         CurrentProcess = NULL;
997     }
998 
999     //
1000     // Save the number of pages we'll have to lock, and the start address
1001     //
1002     TotalPages = LockPages;
1003     StartAddress = Address;
1004 
1005     /* Large pages not supported */
1006     ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
1007 
1008     //
1009     // Now probe them
1010     //
1011     ProbeStatus = STATUS_SUCCESS;
1012     _SEH2_TRY
1013     {
1014         //
1015         // Enter probe loop
1016         //
1017         do
1018         {
1019             //
1020             // Assume failure
1021             //
1022             *MdlPages = LIST_HEAD;
1023 
1024             //
1025             // Read
1026             //
1027             *(volatile CHAR*)Address;
1028 
1029             //
1030             // Check if this is write access (only probe for user-mode)
1031             //
1032             if ((Operation != IoReadAccess) &&
1033                 (Address <= MM_HIGHEST_USER_ADDRESS))
1034             {
1035                 //
1036                 // Probe for write too
1037                 //
1038                 ProbeForWriteChar(Address);
1039             }
1040 
1041             //
1042             // Next address...
1043             //
1044             Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
1045 
1046             //
1047             // Next page...
1048             //
1049             LockPages--;
1050             MdlPages++;
1051         } while (Address < LastAddress);
1052 
1053         //
1054         // Reset back to the original page
1055         //
1056         ASSERT(LockPages == 0);
1057         MdlPages = (PPFN_NUMBER)(Mdl + 1);
1058     }
1059     _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
1060     {
1061         //
1062         // Oops :(
1063         //
1064         ProbeStatus = _SEH2_GetExceptionCode();
1065     }
1066     _SEH2_END;
1067 
1068     //
1069     // So how did that go?
1070     //
1071     if (ProbeStatus != STATUS_SUCCESS)
1072     {
1073         //
1074         // Fail
1075         //
1076         DPRINT1("MDL PROBE FAILED!\n");
1077         Mdl->Process = NULL;
1078         ExRaiseStatus(ProbeStatus);
1079     }
1080 
1081     //
1082     // Get the PTE and PDE
1083     //
1084     PointerPte = MiAddressToPte(StartAddress);
1085     PointerPde = MiAddressToPde(StartAddress);
1086 #if (_MI_PAGING_LEVELS >= 3)
1087     PointerPpe = MiAddressToPpe(StartAddress);
1088 #endif
1089 #if (_MI_PAGING_LEVELS == 4)
1090     PointerPxe = MiAddressToPxe(StartAddress);
1091 #endif
1092 
1093     //
1094     // Sanity check
1095     //
1096     ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
1097 
1098     //
1099     // Check what kind of operation this is
1100     //
1101     if (Operation != IoReadAccess)
1102     {
1103         //
1104         // Set the write flag
1105         //
1106         Mdl->MdlFlags |= MDL_WRITE_OPERATION;
1107     }
1108     else
1109     {
1110         //
1111         // Remove the write flag
1112         //
1113         Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
1114     }
1115 
1116     //
1117     // Mark the MDL as locked *now*
1118     //
1119     Mdl->MdlFlags |= MDL_PAGES_LOCKED;
1120 
1121     //
1122     // Check if this came from kernel mode
1123     //
1124     if (Base > MM_HIGHEST_USER_ADDRESS)
1125     {
1126         //
1127         // We should not have a process
1128         //
1129         ASSERT(CurrentProcess == NULL);
1130         Mdl->Process = NULL;
1131 
1132         //
1133         // In kernel mode, we don't need to check for write access
1134         //
1135         Operation = IoReadAccess;
1136 
1137         //
1138         // Use the PFN lock
1139         //
1140         UsePfnLock = TRUE;
1141         OldIrql = MiAcquirePfnLock();
1142     }
1143     else
1144     {
1145         //
1146         // Sanity checks
1147         //
1148         ASSERT(TotalPages != 0);
1149         ASSERT(CurrentProcess == PsGetCurrentProcess());
1150 
1151         //
1152         // Track locked pages
1153         //
1154         InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
1155                                     TotalPages);
1156 
1157         //
1158         // Save the process
1159         //
1160         Mdl->Process = CurrentProcess;
1161 
1162         /* Lock the process working set */
1163         MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1164         UsePfnLock = FALSE;
1165         OldIrql = MM_NOIRQL;
1166     }
1167 
1168     //
1169     // Get the last PTE
1170     //
1171     LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
1172 
1173     //
1174     // Loop the pages
1175     //
1176     do
1177     {
1178         //
1179         // Assume failure and check for non-mapped pages
1180         //
1181         *MdlPages = LIST_HEAD;
1182         while (
1183 #if (_MI_PAGING_LEVELS == 4)
1184                (PointerPxe->u.Hard.Valid == 0) ||
1185 #endif
1186 #if (_MI_PAGING_LEVELS >= 3)
1187                (PointerPpe->u.Hard.Valid == 0) ||
1188 #endif
1189                (PointerPde->u.Hard.Valid == 0) ||
1190                (PointerPte->u.Hard.Valid == 0))
1191         {
1192             //
1193             // What kind of lock were we using?
1194             //
1195             if (UsePfnLock)
1196             {
1197                 //
1198                 // Release PFN lock
1199                 //
1200                 MiReleasePfnLock(OldIrql);
1201             }
1202             else
1203             {
1204                 /* Release process working set */
1205                 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1206             }
1207 
1208             //
1209             // Access the page
1210             //
1211             Address = MiPteToAddress(PointerPte);
1212 
1213             //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1214             Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)(ULONG_PTR)0xBADBADA3BADBADA3ULL);
1215             if (!NT_SUCCESS(Status))
1216             {
1217                 //
1218                 // Fail
1219                 //
1220                 DPRINT1("Access fault failed\n");
1221                 goto Cleanup;
1222             }
1223 
1224             //
1225             // What lock should we use?
1226             //
1227             if (UsePfnLock)
1228             {
1229                 //
1230                 // Grab the PFN lock
1231                 //
1232                 OldIrql = MiAcquirePfnLock();
1233             }
1234             else
1235             {
1236                 /* Lock the process working set */
1237                 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1238             }
1239         }
1240 
1241         //
1242         // Check if this was a write or modify
1243         //
1244         if (Operation != IoReadAccess)
1245         {
1246             //
1247             // Check if the PTE is not writable
1248             //
1249             if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
1250             {
1251                 //
1252                 // Check if it's copy on write
1253                 //
1254                 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
1255                 {
1256                     //
1257                     // Get the base address and allow a change for user-mode
1258                     //
1259                     Address = MiPteToAddress(PointerPte);
1260                     if (Address <= MM_HIGHEST_USER_ADDRESS)
1261                     {
1262                         //
1263                         // What kind of lock were we using?
1264                         //
1265                         if (UsePfnLock)
1266                         {
1267                             //
1268                             // Release PFN lock
1269                             //
1270                             MiReleasePfnLock(OldIrql);
1271                         }
1272                         else
1273                         {
1274                             /* Release process working set */
1275                             MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1276                         }
1277 
1278                         //
1279                         // Access the page
1280                         //
1281 
1282                         //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1283                         Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)(ULONG_PTR)0xBADBADA3BADBADA3ULL);
1284                         if (!NT_SUCCESS(Status))
1285                         {
1286                             //
1287                             // Fail
1288                             //
1289                             DPRINT1("Access fault failed\n");
1290                             goto Cleanup;
1291                         }
1292 
1293                         //
1294                         // Re-acquire the lock
1295                         //
1296                         if (UsePfnLock)
1297                         {
1298                             //
1299                             // Grab the PFN lock
1300                             //
1301                             OldIrql = MiAcquirePfnLock();
1302                         }
1303                         else
1304                         {
1305                             /* Lock the process working set */
1306                             MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1307                         }
1308 
1309                         //
1310                         // Start over
1311                         //
1312                         continue;
1313                     }
1314                 }
1315 
1316                 //
1317                 // Fail, since we won't allow this
1318                 //
1319                 Status = STATUS_ACCESS_VIOLATION;
1320                 goto CleanupWithLock;
1321             }
1322         }
1323 
1324         //
1325         // Grab the PFN
1326         //
1327         PageFrameIndex = PFN_FROM_PTE(PointerPte);
1328         Pfn1 = MiGetPfnEntry(PageFrameIndex);
1329         if (Pfn1)
1330         {
1331             /* Either this is for kernel-mode, or the working set is held */
1332             ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
1333 
1334             /* No Physical VADs supported yet */
1335             if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
1336 
1337             /* This address should already exist and be fully valid */
1338             MiReferenceProbedPageAndBumpLockCount(Pfn1);
1339         }
1340         else
1341         {
1342             //
1343             // For I/O addresses, just remember this
1344             //
1345             Mdl->MdlFlags |= MDL_IO_SPACE;
1346         }
1347 
1348         //
1349         // Write the page and move on
1350         //
1351         *MdlPages++ = PageFrameIndex;
1352         PointerPte++;
1353 
1354         /* Check if we're on a PDE boundary */
1355         if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
1356 #if (_MI_PAGING_LEVELS >= 3)
1357         if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
1358 #endif
1359 #if (_MI_PAGING_LEVELS == 4)
1360         if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
1361 #endif
1362 
1363     } while (PointerPte <= LastPte);
1364 
1365     //
1366     // What kind of lock were we using?
1367     //
1368     if (UsePfnLock)
1369     {
1370         //
1371         // Release PFN lock
1372         //
1373         MiReleasePfnLock(OldIrql);
1374     }
1375     else
1376     {
1377         /* Release process working set */
1378         MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1379     }
1380 
1381     //
1382     // Sanity check
1383     //
1384     ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1385     return;
1386 
1387 CleanupWithLock:
1388     //
1389     // This is the failure path
1390     //
1391     ASSERT(!NT_SUCCESS(Status));
1392 
1393     //
1394     // What kind of lock were we using?
1395     //
1396     if (UsePfnLock)
1397     {
1398         //
1399         // Release PFN lock
1400         //
1401         MiReleasePfnLock(OldIrql);
1402     }
1403     else
1404     {
1405         /* Release process working set */
1406         MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1407     }
1408 Cleanup:
1409     //
1410     // Pages must be locked so MmUnlock can work
1411     //
1412     ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1413     MmUnlockPages(Mdl);
1414 
1415     //
1416     // Raise the error
1417     //
1418     ExRaiseStatus(Status);
1419 }
1420 
1421 /*
1422  * @implemented
1423  */
1424 VOID
1425 NTAPI
1426 MmUnlockPages(IN PMDL Mdl)
1427 {
1428     PPFN_NUMBER MdlPages, LastPage;
1429     PEPROCESS Process;
1430     PVOID Base;
1431     ULONG Flags, PageCount;
1432     KIRQL OldIrql;
1433     PMMPFN Pfn1;
1434     DPRINT("Unlocking MDL: %p\n", Mdl);
1435 
1436     //
1437     // Sanity checks
1438     //
1439     ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1440     ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1441     ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1442     ASSERT(Mdl->ByteCount != 0);
1443 
1444     //
1445     // Get the process associated and capture the flags which are volatile
1446     //
1447     Process = Mdl->Process;
1448     Flags = Mdl->MdlFlags;
1449 
1450     //
1451     // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1452     //
1453     if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1454     {
1455         //
1456         // Unmap the pages from system space
1457         //
1458         MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1459     }
1460 
1461     //
1462     // Get the page count
1463     //
1464     MdlPages = (PPFN_NUMBER)(Mdl + 1);
1465     Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1466     PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1467     ASSERT(PageCount != 0);
1468 
1469     //
1470     // We don't support AWE
1471     //
1472     if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1473 
1474     //
1475     // Check if the buffer is mapped I/O space
1476     //
1477     if (Flags & MDL_IO_SPACE)
1478     {
1479         //
1480         // Acquire PFN lock
1481         //
1482         OldIrql = MiAcquirePfnLock();
1483 
1484         //
1485         // Loop every page
1486         //
1487         LastPage = MdlPages + PageCount;
1488         do
1489         {
1490             //
1491             // Last page, break out
1492             //
1493             if (*MdlPages == LIST_HEAD) break;
1494 
1495             //
1496             // Check if this page is in the PFN database
1497             //
1498             Pfn1 = MiGetPfnEntry(*MdlPages);
1499             if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1);
1500         } while (++MdlPages < LastPage);
1501 
1502         //
1503         // Release the lock
1504         //
1505         MiReleasePfnLock(OldIrql);
1506 
1507         //
1508         // Check if we have a process
1509         //
1510         if (Process)
1511         {
1512             //
1513             // Handle the accounting of locked pages
1514             //
1515             ASSERT(Process->NumberOfLockedPages > 0);
1516             InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1517                                         -(LONG_PTR)PageCount);
1518         }
1519 
1520         //
1521         // We're done
1522         //
1523         Mdl->MdlFlags &= ~MDL_IO_SPACE;
1524         Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1525         return;
1526     }
1527 
1528     //
1529     // Check if we have a process
1530     //
1531     if (Process)
1532     {
1533         //
1534         // Handle the accounting of locked pages
1535         //
1536         ASSERT(Process->NumberOfLockedPages > 0);
1537         InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1538                                     -(LONG_PTR)PageCount);
1539     }
1540 
1541     //
1542     // Loop every page
1543     //
1544     LastPage = MdlPages + PageCount;
1545     do
1546     {
1547         //
1548         // Last page reached
1549         //
1550         if (*MdlPages == LIST_HEAD)
1551         {
1552             //
1553             // Were there no pages at all?
1554             //
1555             if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1556             {
1557                 //
1558                 // We're already done
1559                 //
1560                 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1561                 return;
1562             }
1563 
1564             //
1565             // Otherwise, stop here
1566             //
1567             LastPage = MdlPages;
1568             break;
1569         }
1570 
1571         /* Save the PFN entry instead for the secondary loop */
1572         *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
1573         ASSERT(*MdlPages != 0);
1574     } while (++MdlPages < LastPage);
1575 
1576     //
1577     // Reset pointer
1578     //
1579     MdlPages = (PPFN_NUMBER)(Mdl + 1);
1580 
1581     //
1582     // Now grab the PFN lock for the actual unlock and dereference
1583     //
1584     OldIrql = MiAcquirePfnLock();
1585     do
1586     {
1587         /* Get the current entry and reference count */
1588         Pfn1 = (PMMPFN)*MdlPages;
1589         MiDereferencePfnAndDropLockCount(Pfn1);
1590     } while (++MdlPages < LastPage);
1591 
1592     //
1593     // Release the lock
1594     //
1595     MiReleasePfnLock(OldIrql);
1596 
1597     //
1598     // We're done
1599     //
1600     Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1601 }
1602 
1603 /*
1604  * @unimplemented
1605  */
1606 NTSTATUS
1607 NTAPI
1608 MmAdvanceMdl(IN PMDL Mdl,
1609              IN ULONG NumberOfBytes)
1610 {
1611     UNIMPLEMENTED;
1612     return STATUS_NOT_IMPLEMENTED;
1613 }
1614 
1615 /*
1616  * @unimplemented
1617  */
1618 PVOID
1619 NTAPI
1620 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1621                                     IN ULONG PoolTag,
1622                                     IN PMDL MemoryDescriptorList,
1623                                     IN MEMORY_CACHING_TYPE CacheType)
1624 {
1625     UNIMPLEMENTED;
1626     return 0;
1627 }
1628 
1629 /*
1630  * @unimplemented
1631  */
1632 VOID
1633 NTAPI
1634 MmUnmapReservedMapping(IN PVOID BaseAddress,
1635                        IN ULONG PoolTag,
1636                        IN PMDL MemoryDescriptorList)
1637 {
1638     UNIMPLEMENTED;
1639 }
1640 
1641 /*
1642  * @unimplemented
1643  */
1644 NTSTATUS
1645 NTAPI
1646 MmPrefetchPages(IN ULONG NumberOfLists,
1647                 IN PREAD_LIST *ReadLists)
1648 {
1649     UNIMPLEMENTED;
1650     return STATUS_NOT_IMPLEMENTED;
1651 }
1652 
1653 /*
1654  * @unimplemented
1655  */
1656 NTSTATUS
1657 NTAPI
1658 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1659                           IN ULONG NewProtect)
1660 {
1661     UNIMPLEMENTED;
1662     return STATUS_NOT_IMPLEMENTED;
1663 }
1664 
1665 /*
1666  * @unimplemented
1667  */
1668 VOID
1669 NTAPI
1670 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1671                            IN PEPROCESS Process,
1672                            IN KPROCESSOR_MODE AccessMode,
1673                            IN LOCK_OPERATION Operation)
1674 {
1675     UNIMPLEMENTED;
1676 }
1677 
1678 
1679 /*
1680  * @unimplemented
1681  */
1682 VOID
1683 NTAPI
1684 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1685                             IN LARGE_INTEGER PageList[],
1686                             IN KPROCESSOR_MODE AccessMode,
1687                             IN LOCK_OPERATION Operation)
1688 {
1689     UNIMPLEMENTED;
1690 }
1691 
1692 /*
1693  * @unimplemented
1694  */
1695 VOID
1696 NTAPI
1697 MmMapMemoryDumpMdl(IN PMDL Mdl)
1698 {
1699     UNIMPLEMENTED;
1700 }
1701 
1702 /* EOF */
1703