1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/special.c
5 * PURPOSE: ARM Memory Manager Special Pool implementation
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /*
10 References:
11 http://msdn.microsoft.com/en-us/library/ff551832(v=VS.85).aspx
12 */
13
14 /* INCLUDES *******************************************************************/
15
16 #include <ntoskrnl.h>
17 #define NDEBUG
18 #include <debug.h>
19
20 #define MODULE_INVOLVED_IN_ARM3
21 #include <mm/ARM3/miarm.h>
22
23 extern ULONG ExpPoolFlags;
24 extern PMMPTE MmSystemPteBase;
25
26 PMMPTE
27 NTAPI
28 MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
29 IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
30 IN ULONG Alignment);
31
32 /* GLOBALS ********************************************************************/
33
34 #define SPECIAL_POOL_PAGED_PTE 0x2000
35 #define SPECIAL_POOL_NONPAGED_PTE 0x4000
36 #define SPECIAL_POOL_PAGED 0x8000
37
38 PVOID MmSpecialPoolStart;
39 PVOID MmSpecialPoolEnd;
40 PVOID MiSpecialPoolExtra;
41 ULONG MiSpecialPoolExtraCount;
42
43 PMMPTE MiSpecialPoolFirstPte;
44 PMMPTE MiSpecialPoolLastPte;
45
46 PFN_COUNT MmSpecialPagesInUse;
47 PFN_COUNT MmSpecialPagesInUsePeak;
48 PFN_COUNT MiSpecialPagesPagable;
49 PFN_COUNT MiSpecialPagesPagablePeak;
50 PFN_COUNT MiSpecialPagesNonPaged;
51 PFN_COUNT MiSpecialPagesNonPagedPeak;
52 PFN_COUNT MiSpecialPagesNonPagedMaximum;
53
54 BOOLEAN MmSpecialPoolCatchOverruns = TRUE;
55
56 typedef struct _MI_FREED_SPECIAL_POOL
57 {
58 POOL_HEADER OverlaidPoolHeader;
59 /* TODO: Add overlaid verifier pool header */
60 ULONG Signature;
61 ULONG TickCount;
62 ULONG NumberOfBytesRequested;
63 BOOLEAN Pagable;
64 PVOID VirtualAddress;
65 PVOID StackPointer;
66 ULONG StackBytes;
67 PETHREAD Thread;
68 UCHAR StackData[0x400];
69 } MI_FREED_SPECIAL_POOL, *PMI_FREED_SPECIAL_POOL;
70
71 /* PRIVATE FUNCTIONS **********************************************************/
72
73 VOID NTAPI MiTestSpecialPool(VOID);
74
75 BOOLEAN
76 NTAPI
MmUseSpecialPool(SIZE_T NumberOfBytes,ULONG Tag)77 MmUseSpecialPool(SIZE_T NumberOfBytes, ULONG Tag)
78 {
79 /* Special pool is not suitable for allocations bigger than 1 page */
80 if (NumberOfBytes > (PAGE_SIZE - sizeof(POOL_HEADER)))
81 {
82 return FALSE;
83 }
84
85 if (MmSpecialPoolTag == '*')
86 {
87 return TRUE;
88 }
89
90 return Tag == MmSpecialPoolTag;
91 }
92
93 BOOLEAN
94 NTAPI
MmIsSpecialPoolAddress(PVOID P)95 MmIsSpecialPoolAddress(PVOID P)
96 {
97 return ((P >= MmSpecialPoolStart) &&
98 (P <= MmSpecialPoolEnd));
99 }
100
101 BOOLEAN
102 NTAPI
MmIsSpecialPoolAddressFree(PVOID P)103 MmIsSpecialPoolAddressFree(PVOID P)
104 {
105 PMMPTE PointerPte;
106
107 ASSERT(MmIsSpecialPoolAddress(P));
108 PointerPte = MiAddressToPte(P);
109
110 if (PointerPte->u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE ||
111 PointerPte->u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE)
112 {
113 /* Guard page PTE */
114 return FALSE;
115 }
116
117 /* Free PTE */
118 return TRUE;
119 }
120
121 VOID
122 NTAPI
MiInitializeSpecialPool(VOID)123 MiInitializeSpecialPool(VOID)
124 {
125 ULONG SpecialPoolPtes, i;
126 PMMPTE PointerPte;
127
128 /* Check if there is a special pool tag */
129 if ((MmSpecialPoolTag == 0) ||
130 (MmSpecialPoolTag == -1)) return;
131
132 /* Calculate number of system PTEs for the special pool */
133 if (MmNumberOfSystemPtes >= 0x3000)
134 SpecialPoolPtes = MmNumberOfSystemPtes / 3;
135 else
136 SpecialPoolPtes = MmNumberOfSystemPtes / 6;
137
138 /* Don't let the number go too high */
139 if (SpecialPoolPtes > 0x6000) SpecialPoolPtes = 0x6000;
140
141 /* Round up to the page size */
142 SpecialPoolPtes = PAGE_ROUND_UP(SpecialPoolPtes);
143
144 ASSERT((SpecialPoolPtes & (PTE_PER_PAGE - 1)) == 0);
145
146 /* Reserve those PTEs */
147 do
148 {
149 PointerPte = MiReserveAlignedSystemPtes(SpecialPoolPtes,
150 SystemPteSpace,
151 /*0x400000*/0); // FIXME:
152 if (PointerPte) break;
153
154 /* Reserving didn't work, so try to reduce the requested size */
155 ASSERT(SpecialPoolPtes >= PTE_PER_PAGE);
156 SpecialPoolPtes -= PTE_PER_PAGE;
157 } while (SpecialPoolPtes);
158
159 /* Fail if we couldn't reserve them at all */
160 if (!SpecialPoolPtes) return;
161
162 /* Make sure we got enough */
163 ASSERT(SpecialPoolPtes >= PTE_PER_PAGE);
164
165 /* Save first PTE and its address */
166 MiSpecialPoolFirstPte = PointerPte;
167 MmSpecialPoolStart = MiPteToAddress(PointerPte);
168
169 for (i = 0; i < PTE_PER_PAGE / 2; i++)
170 {
171 /* Point it to the next entry */
172 PointerPte->u.List.NextEntry = &PointerPte[2] - MmSystemPteBase;
173
174 /* Move to the next pair */
175 PointerPte += 2;
176 }
177
178 /* Save extra values */
179 MiSpecialPoolExtra = PointerPte;
180 MiSpecialPoolExtraCount = SpecialPoolPtes - PTE_PER_PAGE;
181
182 /* Mark the previous PTE as the last one */
183 MiSpecialPoolLastPte = PointerPte - 2;
184 MiSpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
185
186 /* Save end address of the special pool */
187 MmSpecialPoolEnd = MiPteToAddress(MiSpecialPoolLastPte + 1);
188
189 /* Calculate maximum non-paged part of the special pool */
190 MiSpecialPagesNonPagedMaximum = MmResidentAvailablePages >> 4;
191
192 /* And limit it if it turned out to be too big */
193 if (MmNumberOfPhysicalPages > 0x3FFF)
194 MiSpecialPagesNonPagedMaximum = MmResidentAvailablePages >> 3;
195
196 DPRINT1("Special pool start %p - end %p\n", MmSpecialPoolStart, MmSpecialPoolEnd);
197 ExpPoolFlags |= POOL_FLAG_SPECIAL_POOL;
198
199 //MiTestSpecialPool();
200 }
201
202 NTSTATUS
203 NTAPI
MmExpandSpecialPool(VOID)204 MmExpandSpecialPool(VOID)
205 {
206 ULONG i;
207 PMMPTE PointerPte;
208
209 MI_ASSERT_PFN_LOCK_HELD();
210
211 if (MiSpecialPoolExtraCount == 0)
212 return STATUS_INSUFFICIENT_RESOURCES;
213
214 PointerPte = MiSpecialPoolExtra;
215 ASSERT(MiSpecialPoolFirstPte == MiSpecialPoolLastPte);
216 ASSERT(MiSpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
217 MiSpecialPoolFirstPte->u.List.NextEntry = PointerPte - MmSystemPteBase;
218
219 ASSERT(MiSpecialPoolExtraCount >= PTE_PER_PAGE);
220 for (i = 0; i < PTE_PER_PAGE / 2; i++)
221 {
222 /* Point it to the next entry */
223 PointerPte->u.List.NextEntry = &PointerPte[2] - MmSystemPteBase;
224
225 /* Move to the next pair */
226 PointerPte += 2;
227 }
228
229 /* Save remaining extra values */
230 MiSpecialPoolExtra = PointerPte;
231 MiSpecialPoolExtraCount -= PTE_PER_PAGE;
232
233 /* Mark the previous PTE as the last one */
234 MiSpecialPoolLastPte = PointerPte - 2;
235 MiSpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
236
237 /* Save new end address of the special pool */
238 MmSpecialPoolEnd = MiPteToAddress(MiSpecialPoolLastPte + 1);
239
240 return STATUS_SUCCESS;
241 }
242
243 PVOID
244 NTAPI
MmAllocateSpecialPool(SIZE_T NumberOfBytes,ULONG Tag,POOL_TYPE PoolType,ULONG SpecialType)245 MmAllocateSpecialPool(SIZE_T NumberOfBytes, ULONG Tag, POOL_TYPE PoolType, ULONG SpecialType)
246 {
247 KIRQL Irql;
248 MMPTE TempPte = ValidKernelPte;
249 PMMPTE PointerPte;
250 PFN_NUMBER PageFrameNumber;
251 LARGE_INTEGER TickCount;
252 PVOID Entry;
253 PPOOL_HEADER Header;
254 PFN_COUNT PagesInUse;
255
256 DPRINT("MmAllocateSpecialPool(%x %x %x %x)\n", NumberOfBytes, Tag, PoolType, SpecialType);
257
258 /* Check if the pool is initialized and quit if it's not */
259 if (!MiSpecialPoolFirstPte) return NULL;
260
261 /* Get the pool type */
262 PoolType &= BASE_POOL_TYPE_MASK;
263
264 /* Check whether current IRQL matches the pool type */
265 Irql = KeGetCurrentIrql();
266
267 if (((PoolType == PagedPool) && (Irql > APC_LEVEL)) ||
268 ((PoolType != PagedPool) && (Irql > DISPATCH_LEVEL)))
269 {
270 /* Bad caller */
271 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
272 Irql,
273 PoolType,
274 NumberOfBytes,
275 0x30);
276 }
277
278 /* Some allocations from Mm must never use special pool */
279 if (Tag == 'tSmM')
280 {
281 /* Reject and let normal pool handle it */
282 return NULL;
283 }
284
285 /* TODO: Take into account various limitations */
286
287 /* Heed the maximum limit of nonpaged pages */
288 if ((PoolType == NonPagedPool) &&
289 (MiSpecialPagesNonPaged > MiSpecialPagesNonPagedMaximum))
290 {
291 return NULL;
292 }
293
294 /* Lock PFN database */
295 Irql = MiAcquirePfnLock();
296
297 /* Reject allocation in case amount of available pages is too small */
298 if (MmAvailablePages < 0x100)
299 {
300 /* Release the PFN database lock */
301 MiReleasePfnLock(Irql);
302 DPRINT1("Special pool: MmAvailablePages 0x%x is too small\n", MmAvailablePages);
303 return NULL;
304 }
305
306 /* Check if special pool PTE list is exhausted */
307 if (MiSpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST)
308 {
309 /* Try to expand it */
310 if (!NT_SUCCESS(MmExpandSpecialPool()))
311 {
312 /* No reserves left, reject this allocation */
313 static int once;
314 MiReleasePfnLock(Irql);
315 if (!once++) DPRINT1("Special pool: No PTEs left!\n");
316 return NULL;
317 }
318 ASSERT(MiSpecialPoolFirstPte->u.List.NextEntry != MM_EMPTY_PTE_LIST);
319 }
320
321 /* Save allocation time */
322 KeQueryTickCount(&TickCount);
323
324 /* Get a pointer to the first PTE */
325 PointerPte = MiSpecialPoolFirstPte;
326
327 /* Set the first PTE pointer to the next one in the list */
328 MiSpecialPoolFirstPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
329
330 /* Allocate a physical page */
331 if (PoolType == PagedPool)
332 {
333 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
334 }
335 else
336 {
337 MI_SET_USAGE(MI_USAGE_NONPAGED_POOL);
338 }
339 MI_SET_PROCESS2("Kernel-Special");
340 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
341
342 /* Initialize PFN and make it valid */
343 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
344 MiInitializePfnAndMakePteValid(PageFrameNumber, PointerPte, TempPte);
345
346 /* Release the PFN database lock */
347 MiReleasePfnLock(Irql);
348
349 /* Increase page counter */
350 PagesInUse = InterlockedIncrementUL(&MmSpecialPagesInUse);
351 if (PagesInUse > MmSpecialPagesInUsePeak)
352 MmSpecialPagesInUsePeak = PagesInUse;
353
354 /* Put some content into the page. Low value of tick count would do */
355 Entry = MiPteToAddress(PointerPte);
356 RtlFillMemory(Entry, PAGE_SIZE, TickCount.LowPart);
357
358 /* Calculate header and entry addresses */
359 if ((SpecialType != 0) &&
360 ((SpecialType == 1) || (!MmSpecialPoolCatchOverruns)))
361 {
362 /* We catch underruns. Data is at the beginning of the page */
363 Header = (PPOOL_HEADER)((PUCHAR)Entry + PAGE_SIZE - sizeof(POOL_HEADER));
364 }
365 else
366 {
367 /* We catch overruns. Data is at the end of the page */
368 Header = (PPOOL_HEADER)Entry;
369 Entry = (PVOID)((ULONG_PTR)((PUCHAR)Entry - NumberOfBytes + PAGE_SIZE) & ~((LONG_PTR)sizeof(POOL_HEADER) - 1));
370 }
371
372 /* Initialize the header */
373 RtlZeroMemory(Header, sizeof(POOL_HEADER));
374
375 /* Save allocation size there */
376 Header->Ulong1 = (ULONG)NumberOfBytes;
377
378 /* Make sure it's all good */
379 ASSERT((NumberOfBytes <= PAGE_SIZE - sizeof(POOL_HEADER)) &&
380 (PAGE_SIZE <= 32 * 1024));
381
382 /* Mark it as paged or nonpaged */
383 if (PoolType == PagedPool)
384 {
385 /* Add pagedpool flag into the pool header too */
386 Header->Ulong1 |= SPECIAL_POOL_PAGED;
387
388 /* Also mark the next PTE as special-pool-paged */
389 PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_PAGED_PTE;
390
391 /* Increase pagable counter */
392 PagesInUse = InterlockedIncrementUL(&MiSpecialPagesPagable);
393 if (PagesInUse > MiSpecialPagesPagablePeak)
394 MiSpecialPagesPagablePeak = PagesInUse;
395 }
396 else
397 {
398 /* Mark the next PTE as special-pool-nonpaged */
399 PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_NONPAGED_PTE;
400
401 /* Increase nonpaged counter */
402 PagesInUse = InterlockedIncrementUL(&MiSpecialPagesNonPaged);
403 if (PagesInUse > MiSpecialPagesNonPagedPeak)
404 MiSpecialPagesNonPagedPeak = PagesInUse;
405 }
406
407 /* Finally save tag and put allocation time into the header's blocksize.
408 That time will be used to check memory consistency within the allocated
409 page. */
410 Header->PoolTag = Tag;
411 Header->BlockSize = (UCHAR)TickCount.LowPart;
412 DPRINT("%p\n", Entry);
413 return Entry;
414 }
415
416 VOID
417 NTAPI
MiSpecialPoolCheckPattern(PUCHAR P,PPOOL_HEADER Header)418 MiSpecialPoolCheckPattern(PUCHAR P, PPOOL_HEADER Header)
419 {
420 ULONG BytesToCheck, BytesRequested, Index;
421 PUCHAR Ptr;
422
423 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
424 BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;
425 ASSERT(BytesRequested <= PAGE_SIZE - sizeof(POOL_HEADER));
426
427 /* Get a pointer to the end of user's area */
428 Ptr = P + BytesRequested;
429
430 /* Calculate how many bytes to check */
431 BytesToCheck = (ULONG)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - Ptr);
432
433 /* Remove pool header size if we're catching underruns */
434 if (((ULONG_PTR)P & (PAGE_SIZE - 1)) == 0)
435 {
436 /* User buffer is located in the beginning of the page */
437 BytesToCheck -= sizeof(POOL_HEADER);
438 }
439
440 /* Check the pattern after user buffer */
441 for (Index = 0; Index < BytesToCheck; Index++)
442 {
443 /* Bugcheck if bytes don't match */
444 if (Ptr[Index] != Header->BlockSize)
445 {
446 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
447 (ULONG_PTR)P,
448 (ULONG_PTR)&Ptr[Index],
449 Header->BlockSize,
450 0x24);
451 }
452 }
453 }
454
455 VOID
456 NTAPI
MmFreeSpecialPool(PVOID P)457 MmFreeSpecialPool(PVOID P)
458 {
459 PMMPTE PointerPte;
460 PPOOL_HEADER Header;
461 BOOLEAN Overruns = FALSE;
462 KIRQL Irql = KeGetCurrentIrql();
463 POOL_TYPE PoolType;
464 ULONG BytesRequested, BytesReal = 0;
465 ULONG PtrOffset;
466 PUCHAR b;
467 PMI_FREED_SPECIAL_POOL FreedHeader;
468 LARGE_INTEGER TickCount;
469 PMMPFN Pfn;
470
471 DPRINT("MmFreeSpecialPool(%p)\n", P);
472
473 /* Get the PTE */
474 PointerPte = MiAddressToPte(P);
475
476 /* Check if it's valid */
477 if (PointerPte->u.Hard.Valid == 0)
478 {
479 /* Bugcheck if it has NOACCESS or 0 set as protection */
480 if (PointerPte->u.Soft.Protection == MM_NOACCESS ||
481 !PointerPte->u.Soft.Protection)
482 {
483 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
484 (ULONG_PTR)P,
485 (ULONG_PTR)PointerPte,
486 0,
487 0x20);
488 }
489 }
490
491 /* Determine if it's a underruns or overruns pool pointer */
492 PtrOffset = (ULONG)((ULONG_PTR)P & (PAGE_SIZE - 1));
493 if (PtrOffset)
494 {
495 /* Pool catches overruns */
496 Header = PAGE_ALIGN(P);
497 Overruns = TRUE;
498 }
499 else
500 {
501 /* Pool catches underruns */
502 Header = (PPOOL_HEADER)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - sizeof(POOL_HEADER));
503 }
504
505 /* Check if it's non paged pool */
506 if ((Header->Ulong1 & SPECIAL_POOL_PAGED) == 0)
507 {
508 /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
509 PoolType = NonPagedPool;
510 ASSERT(PointerPte[1].u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE);
511 if (Irql > DISPATCH_LEVEL)
512 {
513 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
514 Irql,
515 PoolType,
516 (ULONG_PTR)P,
517 0x31);
518 }
519 }
520 else
521 {
522 /* Paged allocation, ensure */
523 PoolType = PagedPool;
524 ASSERT(PointerPte[1].u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE);
525 if (Irql > APC_LEVEL)
526 {
527 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
528 Irql,
529 PoolType,
530 (ULONG_PTR)P,
531 0x31);
532 }
533 }
534
535 /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
536 BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;
537 ASSERT(BytesRequested <= PAGE_SIZE - sizeof(POOL_HEADER));
538
539 /* Check memory before the allocated user buffer in case of overruns detection */
540 if (Overruns)
541 {
542 /* Calculate the real placement of the buffer */
543 BytesReal = PAGE_SIZE - PtrOffset;
544
545 /* If they mismatch, it's unrecoverable */
546 if (BytesRequested > BytesReal)
547 {
548 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
549 (ULONG_PTR)P,
550 BytesRequested,
551 BytesReal,
552 0x21);
553 }
554
555 if (BytesRequested + sizeof(POOL_HEADER) < BytesReal)
556 {
557 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
558 (ULONG_PTR)P,
559 BytesRequested,
560 BytesReal,
561 0x22);
562 }
563
564 /* Actually check the memory pattern */
565 for (b = (PUCHAR)(Header + 1); b < (PUCHAR)P; b++)
566 {
567 if (*b != Header->BlockSize)
568 {
569 /* Bytes mismatch */
570 KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION,
571 (ULONG_PTR)P,
572 (ULONG_PTR)b,
573 Header->BlockSize,
574 0x23);
575 }
576 }
577 }
578
579 /* Check the memory pattern after the user buffer */
580 MiSpecialPoolCheckPattern(P, Header);
581
582 /* Fill the freed header */
583 KeQueryTickCount(&TickCount);
584 FreedHeader = (PMI_FREED_SPECIAL_POOL)PAGE_ALIGN(P);
585 FreedHeader->Signature = 0x98764321;
586 FreedHeader->TickCount = TickCount.LowPart;
587 FreedHeader->NumberOfBytesRequested = BytesRequested;
588 FreedHeader->Pagable = PoolType;
589 FreedHeader->VirtualAddress = P;
590 FreedHeader->Thread = PsGetCurrentThread();
591 /* TODO: Fill StackPointer and StackBytes */
592 FreedHeader->StackPointer = NULL;
593 FreedHeader->StackBytes = 0;
594
595 if (PoolType == NonPagedPool)
596 {
597 /* Non pagable. Get PFN element corresponding to the PTE */
598 Pfn = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
599
600 /* Count the page as free */
601 InterlockedDecrementUL(&MiSpecialPagesNonPaged);
602
603 /* Lock PFN database */
604 Irql = MiAcquirePfnLock();
605
606 /* Delete this PFN */
607 MI_SET_PFN_DELETED(Pfn);
608
609 /* Decrement share count of this PFN */
610 MiDecrementShareCount(Pfn, PointerPte->u.Hard.PageFrameNumber);
611
612 MI_ERASE_PTE(PointerPte);
613
614 /* Flush the TLB */
615 //FIXME: Use KeFlushSingleTb() instead
616 KeFlushEntireTb(TRUE, TRUE);
617 }
618 else
619 {
620 /* Pagable. Delete that virtual address */
621 MiDeleteSystemPageableVm(PointerPte, 1, 0, NULL);
622
623 /* Count the page as free */
624 InterlockedDecrementUL(&MiSpecialPagesPagable);
625
626 /* Lock PFN database */
627 Irql = MiAcquirePfnLock();
628 }
629
630 /* Mark next PTE as invalid */
631 MI_ERASE_PTE(PointerPte + 1);
632
633 /* Make sure that the last entry is really the last one */
634 ASSERT(MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
635
636 /* Update the current last PTE next pointer */
637 MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase;
638
639 /* PointerPte becomes the new last PTE */
640 PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
641 MiSpecialPoolLastPte = PointerPte;
642
643 /* Release the PFN database lock */
644 MiReleasePfnLock(Irql);
645
646 /* Update page counter */
647 InterlockedDecrementUL(&MmSpecialPagesInUse);
648 }
649
650 VOID
651 NTAPI
MiTestSpecialPool(VOID)652 MiTestSpecialPool(VOID)
653 {
654 ULONG i;
655 PVOID p1, p2[100];
656 //PUCHAR p3;
657 ULONG ByteSize;
658 POOL_TYPE PoolType = PagedPool;
659
660 // First allocate/free
661 for (i=0; i<100; i++)
662 {
663 ByteSize = (100 * (i+1)) % (PAGE_SIZE - sizeof(POOL_HEADER));
664 p1 = MmAllocateSpecialPool(ByteSize, 'TEST', PoolType, 0);
665 DPRINT1("p1 %p size %lu\n", p1, ByteSize);
666 MmFreeSpecialPool(p1);
667 }
668
669 // Now allocate all at once, then free at once
670 for (i=0; i<100; i++)
671 {
672 ByteSize = (100 * (i+1)) % (PAGE_SIZE - sizeof(POOL_HEADER));
673 p2[i] = MmAllocateSpecialPool(ByteSize, 'TEST', PoolType, 0);
674 DPRINT1("p2[%lu] %p size %lu\n", i, p1, ByteSize);
675 }
676 for (i=0; i<100; i++)
677 {
678 DPRINT1("Freeing %p\n", p2[i]);
679 MmFreeSpecialPool(p2[i]);
680 }
681
682 // Overrun the buffer to test
683 //ByteSize = 16;
684 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 0);
685 //p3[ByteSize] = 0xF1; // This should cause an exception
686
687 // Underrun the buffer to test
688 //p3 = MmAllocateSpecialPool(ByteSize, 'TEST', NonPagedPool, 1);
689 //p3--;
690 //*p3 = 0xF1; // This should cause an exception
691
692 }
693
694 /* EOF */
695