xref: /reactos/ntoskrnl/mm/ARM3/expool.c (revision 6b700c6a)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/expool.c
5  * PURPOSE:         ARM Memory Manager Executive Pool Manager
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20 
21 /* GLOBALS ********************************************************************/
22 
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24 
25 /*
26  * This defines when we shrink or expand the table.
27  * 3 --> keep the number of used entries in the 33%-66% of the table capacity.
28  * 4 --> 25% - 75%
29  * etc.
30  */
31 #define POOL_BIG_TABLE_USE_RATE 4
32 
33 typedef struct _POOL_DPC_CONTEXT
34 {
35     PPOOL_TRACKER_TABLE PoolTrackTable;
36     SIZE_T PoolTrackTableSize;
37     PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
38     SIZE_T PoolTrackTableSizeExpansion;
39 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
40 
41 ULONG ExpNumberOfPagedPools;
42 POOL_DESCRIPTOR NonPagedPoolDescriptor;
43 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
44 PPOOL_DESCRIPTOR PoolVector[2];
45 PKGUARDED_MUTEX ExpPagedPoolMutex;
46 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
47 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
48 ULONG ExpBigTableExpansionFailed;
49 PPOOL_TRACKER_TABLE PoolTrackTable;
50 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
51 KSPIN_LOCK ExpTaggedPoolLock;
52 ULONG PoolHitTag;
53 BOOLEAN ExStopBadTags;
54 KSPIN_LOCK ExpLargePoolTableLock;
55 ULONG ExpPoolBigEntriesInUse;
56 ULONG ExpPoolFlags;
57 ULONG ExPoolFailures;
58 ULONGLONG MiLastPoolDumpTime;
59 
60 /* Pool block/header/list access macros */
61 #define POOL_ENTRY(x)       (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
62 #define POOL_FREE_BLOCK(x)  (PLIST_ENTRY)((ULONG_PTR)(x)  + sizeof(POOL_HEADER))
63 #define POOL_BLOCK(x, i)    (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
64 #define POOL_NEXT_BLOCK(x)  POOL_BLOCK((x), (x)->BlockSize)
65 #define POOL_PREV_BLOCK(x)  POOL_BLOCK((x), -((x)->PreviousSize))
66 
67 /*
68  * Pool list access debug macros, similar to Arthur's pfnlist.c work.
69  * Microsoft actually implements similar checks in the Windows Server 2003 SP1
70  * pool code, but only for checked builds.
71  *
72  * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
73  * that these checks are done even on retail builds, due to the increasing
74  * number of kernel-mode attacks which depend on dangling list pointers and other
75  * kinds of list-based attacks.
76  *
77  * For now, I will leave these checks on all the time, but later they are likely
78  * to be DBG-only, at least until there are enough kernel-mode security attacks
79  * against ReactOS to warrant the performance hit.
80  *
81  * For now, these are not made inline, so we can get good stack traces.
82  */
83 PLIST_ENTRY
84 NTAPI
85 ExpDecodePoolLink(IN PLIST_ENTRY Link)
86 {
87     return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
88 }
89 
90 PLIST_ENTRY
91 NTAPI
92 ExpEncodePoolLink(IN PLIST_ENTRY Link)
93 {
94     return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
95 }
96 
97 VOID
98 NTAPI
99 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
100 {
101     if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
102         (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
103     {
104         KeBugCheckEx(BAD_POOL_HEADER,
105                      3,
106                      (ULONG_PTR)ListHead,
107                      (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
108                      (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
109     }
110 }
111 
112 VOID
113 NTAPI
114 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
115 {
116     ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
117 }
118 
119 BOOLEAN
120 NTAPI
121 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
122 {
123     return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
124 }
125 
126 VOID
127 NTAPI
128 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
129 {
130     PLIST_ENTRY Blink, Flink;
131     Flink = ExpDecodePoolLink(Entry->Flink);
132     Blink = ExpDecodePoolLink(Entry->Blink);
133     Flink->Blink = ExpEncodePoolLink(Blink);
134     Blink->Flink = ExpEncodePoolLink(Flink);
135 }
136 
137 PLIST_ENTRY
138 NTAPI
139 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
140 {
141     PLIST_ENTRY Entry, Flink;
142     Entry = ExpDecodePoolLink(ListHead->Flink);
143     Flink = ExpDecodePoolLink(Entry->Flink);
144     ListHead->Flink = ExpEncodePoolLink(Flink);
145     Flink->Blink = ExpEncodePoolLink(ListHead);
146     return Entry;
147 }
148 
149 PLIST_ENTRY
150 NTAPI
151 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
152 {
153     PLIST_ENTRY Entry, Blink;
154     Entry = ExpDecodePoolLink(ListHead->Blink);
155     Blink = ExpDecodePoolLink(Entry->Blink);
156     ListHead->Blink = ExpEncodePoolLink(Blink);
157     Blink->Flink = ExpEncodePoolLink(ListHead);
158     return Entry;
159 }
160 
161 VOID
162 NTAPI
163 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
164                       IN PLIST_ENTRY Entry)
165 {
166     PLIST_ENTRY Blink;
167     ExpCheckPoolLinks(ListHead);
168     Blink = ExpDecodePoolLink(ListHead->Blink);
169     Entry->Flink = ExpEncodePoolLink(ListHead);
170     Entry->Blink = ExpEncodePoolLink(Blink);
171     Blink->Flink = ExpEncodePoolLink(Entry);
172     ListHead->Blink = ExpEncodePoolLink(Entry);
173     ExpCheckPoolLinks(ListHead);
174 }
175 
176 VOID
177 NTAPI
178 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
179                       IN PLIST_ENTRY Entry)
180 {
181     PLIST_ENTRY Flink;
182     ExpCheckPoolLinks(ListHead);
183     Flink = ExpDecodePoolLink(ListHead->Flink);
184     Entry->Flink = ExpEncodePoolLink(Flink);
185     Entry->Blink = ExpEncodePoolLink(ListHead);
186     Flink->Blink = ExpEncodePoolLink(Entry);
187     ListHead->Flink = ExpEncodePoolLink(Entry);
188     ExpCheckPoolLinks(ListHead);
189 }
190 
191 VOID
192 NTAPI
193 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
194 {
195     PPOOL_HEADER PreviousEntry, NextEntry;
196 
197     /* Is there a block before this one? */
198     if (Entry->PreviousSize)
199     {
200         /* Get it */
201         PreviousEntry = POOL_PREV_BLOCK(Entry);
202 
203         /* The two blocks must be on the same page! */
204         if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
205         {
206             /* Something is awry */
207             KeBugCheckEx(BAD_POOL_HEADER,
208                          6,
209                          (ULONG_PTR)PreviousEntry,
210                          __LINE__,
211                          (ULONG_PTR)Entry);
212         }
213 
214         /* This block should also indicate that it's as large as we think it is */
215         if (PreviousEntry->BlockSize != Entry->PreviousSize)
216         {
217             /* Otherwise, someone corrupted one of the sizes */
218             DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
219                     PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
220                     Entry->PreviousSize, (char *)&Entry->PoolTag);
221             KeBugCheckEx(BAD_POOL_HEADER,
222                          5,
223                          (ULONG_PTR)PreviousEntry,
224                          __LINE__,
225                          (ULONG_PTR)Entry);
226         }
227     }
228     else if (PAGE_ALIGN(Entry) != Entry)
229     {
230         /* If there's no block before us, we are the first block, so we should be on a page boundary */
231         KeBugCheckEx(BAD_POOL_HEADER,
232                      7,
233                      0,
234                      __LINE__,
235                      (ULONG_PTR)Entry);
236     }
237 
238     /* This block must have a size */
239     if (!Entry->BlockSize)
240     {
241         /* Someone must've corrupted this field */
242         if (Entry->PreviousSize)
243         {
244             PreviousEntry = POOL_PREV_BLOCK(Entry);
245             DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
246                     (char *)&PreviousEntry->PoolTag,
247                     (char *)&Entry->PoolTag);
248         }
249         else
250         {
251             DPRINT1("Entry tag %.4s\n",
252                     (char *)&Entry->PoolTag);
253         }
254         KeBugCheckEx(BAD_POOL_HEADER,
255                      8,
256                      0,
257                      __LINE__,
258                      (ULONG_PTR)Entry);
259     }
260 
261     /* Okay, now get the next block */
262     NextEntry = POOL_NEXT_BLOCK(Entry);
263 
264     /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
265     if (PAGE_ALIGN(NextEntry) != NextEntry)
266     {
267         /* The two blocks must be on the same page! */
268         if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
269         {
270             /* Something is messed up */
271             KeBugCheckEx(BAD_POOL_HEADER,
272                          9,
273                          (ULONG_PTR)NextEntry,
274                          __LINE__,
275                          (ULONG_PTR)Entry);
276         }
277 
278         /* And this block should think we are as large as we truly are */
279         if (NextEntry->PreviousSize != Entry->BlockSize)
280         {
281             /* Otherwise, someone corrupted the field */
282             DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
283                     Entry->BlockSize, (char *)&Entry->PoolTag,
284                     NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
285             KeBugCheckEx(BAD_POOL_HEADER,
286                          5,
287                          (ULONG_PTR)NextEntry,
288                          __LINE__,
289                          (ULONG_PTR)Entry);
290         }
291     }
292 }
293 
294 VOID
295 NTAPI
296 ExpCheckPoolAllocation(
297     PVOID P,
298     POOL_TYPE PoolType,
299     ULONG Tag)
300 {
301     PPOOL_HEADER Entry;
302     ULONG i;
303     KIRQL OldIrql;
304     POOL_TYPE RealPoolType;
305 
306     /* Get the pool header */
307     Entry = ((PPOOL_HEADER)P) - 1;
308 
309     /* Check if this is a large allocation */
310     if (PAGE_ALIGN(P) == P)
311     {
312         /* Lock the pool table */
313         KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
314 
315         /* Find the pool tag */
316         for (i = 0; i < PoolBigPageTableSize; i++)
317         {
318             /* Check if this is our allocation */
319             if (PoolBigPageTable[i].Va == P)
320             {
321                 /* Make sure the tag is ok */
322                 if (PoolBigPageTable[i].Key != Tag)
323                 {
324                     KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
325                 }
326 
327                 break;
328             }
329         }
330 
331         /* Release the lock */
332         KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
333 
334         if (i == PoolBigPageTableSize)
335         {
336             /* Did not find the allocation */
337             //ASSERT(FALSE);
338         }
339 
340         /* Get Pool type by address */
341         RealPoolType = MmDeterminePoolType(P);
342     }
343     else
344     {
345         /* Verify the tag */
346         if (Entry->PoolTag != Tag)
347         {
348             DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
349                     &Tag, &Entry->PoolTag, Entry->PoolTag);
350             KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
351         }
352 
353         /* Check the rest of the header */
354         ExpCheckPoolHeader(Entry);
355 
356         /* Get Pool type from entry */
357         RealPoolType = (Entry->PoolType - 1);
358     }
359 
360     /* Should we check the pool type? */
361     if (PoolType != -1)
362     {
363         /* Verify the pool type */
364         if (RealPoolType != PoolType)
365         {
366             DPRINT1("Wrong pool type! Expected %s, got %s\n",
367                     PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
368                     (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
369             KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
370         }
371     }
372 }
373 
374 VOID
375 NTAPI
376 ExpCheckPoolBlocks(IN PVOID Block)
377 {
378     BOOLEAN FoundBlock = FALSE;
379     SIZE_T Size = 0;
380     PPOOL_HEADER Entry;
381 
382     /* Get the first entry for this page, make sure it really is the first */
383     Entry = PAGE_ALIGN(Block);
384     ASSERT(Entry->PreviousSize == 0);
385 
386     /* Now scan each entry */
387     while (TRUE)
388     {
389         /* When we actually found our block, remember this */
390         if (Entry == Block) FoundBlock = TRUE;
391 
392         /* Now validate this block header */
393         ExpCheckPoolHeader(Entry);
394 
395         /* And go to the next one, keeping track of our size */
396         Size += Entry->BlockSize;
397         Entry = POOL_NEXT_BLOCK(Entry);
398 
399         /* If we hit the last block, stop */
400         if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
401 
402         /* If we hit the end of the page, stop */
403         if (PAGE_ALIGN(Entry) == Entry) break;
404     }
405 
406     /* We must've found our block, and we must have hit the end of the page */
407     if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
408     {
409         /* Otherwise, the blocks are messed up */
410         KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
411     }
412 }
413 
414 FORCEINLINE
415 VOID
416 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
417                       IN SIZE_T NumberOfBytes,
418                       IN PVOID Entry)
419 {
420     //
421     // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
422     // be DISPATCH_LEVEL or lower for Non Paged Pool
423     //
424     if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
425         (KeGetCurrentIrql() > APC_LEVEL) :
426         (KeGetCurrentIrql() > DISPATCH_LEVEL))
427     {
428         //
429         // Take the system down
430         //
431         KeBugCheckEx(BAD_POOL_CALLER,
432                      !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
433                      KeGetCurrentIrql(),
434                      PoolType,
435                      !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
436     }
437 }
438 
439 FORCEINLINE
440 ULONG
441 ExpComputeHashForTag(IN ULONG Tag,
442                      IN SIZE_T BucketMask)
443 {
444     //
445     // Compute the hash by multiplying with a large prime number and then XORing
446     // with the HIDWORD of the result.
447     //
448     // Finally, AND with the bucket mask to generate a valid index/bucket into
449     // the table
450     //
451     ULONGLONG Result = (ULONGLONG)40543 * Tag;
452     return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
453 }
454 
455 FORCEINLINE
456 ULONG
457 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
458 {
459     ULONG Result;
460     //
461     // Compute the hash by converting the address into a page number, and then
462     // XORing each nibble with the next one.
463     //
464     // We do *NOT* AND with the bucket mask at this point because big table expansion
465     // might happen. Therefore, the final step of the hash must be performed
466     // while holding the expansion pushlock, and this is why we call this a
467     // "partial" hash only.
468     //
469     Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
470     return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
471 }
472 
473 #if DBG
474 /*
475  * FORCEINLINE
476  * BOOLEAN
477  * ExpTagAllowPrint(CHAR Tag);
478  */
479 #define ExpTagAllowPrint(Tag)   \
480     ((Tag) >= 0x20 /* Space */ && (Tag) <= 0x7E /* Tilde */)
481 
482 #ifdef KDBG
483 #define MiDumperPrint(dbg, fmt, ...)        \
484     if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
485     else DPRINT1(fmt, ##__VA_ARGS__)
486 #else
487 #define MiDumperPrint(dbg, fmt, ...)        \
488     DPRINT1(fmt, ##__VA_ARGS__)
489 #endif
490 
491 VOID
492 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
493 {
494     SIZE_T i;
495     BOOLEAN Verbose;
496 
497     //
498     // Only print header if called from OOM situation
499     //
500     if (!CalledFromDbg)
501     {
502         DPRINT1("---------------------\n");
503         DPRINT1("Out of memory dumper!\n");
504     }
505 #ifdef KDBG
506     else
507     {
508         KdbpPrint("Pool Used:\n");
509     }
510 #endif
511 
512     //
513     // Remember whether we'll have to be verbose
514     // This is the only supported flag!
515     //
516     Verbose = BooleanFlagOn(Flags, 1);
517 
518     //
519     // Print table header
520     //
521     if (Verbose)
522     {
523         MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
524         MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
525     }
526     else
527     {
528         MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
529         MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
530     }
531 
532     //
533     // We'll extract allocations for all the tracked pools
534     //
535     for (i = 0; i < PoolTrackTableSize; ++i)
536     {
537         PPOOL_TRACKER_TABLE TableEntry;
538 
539         TableEntry = &PoolTrackTable[i];
540 
541         //
542         // We only care about tags which have allocated memory
543         //
544         if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
545         {
546             //
547             // If there's a tag, attempt to do a pretty print
548             // only if it matches the caller's tag, or if
549             // any tag is allowed
550             // For checking whether it matches caller's tag,
551             // use the mask to make sure not to mess with the wildcards
552             //
553             if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
554                 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
555             {
556                 CHAR Tag[4];
557 
558                 //
559                 // Extract each 'component' and check whether they are printable
560                 //
561                 Tag[0] = TableEntry->Key & 0xFF;
562                 Tag[1] = TableEntry->Key >> 8 & 0xFF;
563                 Tag[2] = TableEntry->Key >> 16 & 0xFF;
564                 Tag[3] = TableEntry->Key >> 24 & 0xFF;
565 
566                 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
567                 {
568                     //
569                     // Print in direct order to make !poolused TAG usage easier
570                     //
571                     if (Verbose)
572                     {
573                         MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
574                                       TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
575                                       (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
576                                       TableEntry->PagedAllocs, TableEntry->PagedFrees,
577                                       (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
578                     }
579                     else
580                     {
581                         MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
582                                       TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
583                                       TableEntry->PagedAllocs, TableEntry->PagedBytes);
584                     }
585                 }
586                 else
587                 {
588                     if (Verbose)
589                     {
590                         MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
591                                       TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
592                                       (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
593                                       TableEntry->PagedAllocs, TableEntry->PagedFrees,
594                                       (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
595                     }
596                     else
597                     {
598                         MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
599                                       TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
600                                       TableEntry->PagedAllocs, TableEntry->PagedBytes);
601                     }
602                 }
603             }
604             else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
605             {
606                 if (Verbose)
607                 {
608                     MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
609                                   TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
610                                   (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
611                                   TableEntry->PagedAllocs, TableEntry->PagedFrees,
612                                   (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
613                 }
614                 else
615                 {
616                     MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
617                                   TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
618                                   TableEntry->PagedAllocs, TableEntry->PagedBytes);
619                 }
620             }
621         }
622     }
623 
624     if (!CalledFromDbg)
625     {
626         DPRINT1("---------------------\n");
627     }
628 }
629 #endif
630 
631 /* PRIVATE FUNCTIONS **********************************************************/
632 
633 CODE_SEG("INIT")
634 VOID
635 NTAPI
636 ExpSeedHotTags(VOID)
637 {
638     ULONG i, Key, Hash, Index;
639     PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
640     ULONG TagList[] =
641     {
642         '  oI',
643         ' laH',
644         'PldM',
645         'LooP',
646         'tSbO',
647         ' prI',
648         'bdDN',
649         'LprI',
650         'pOoI',
651         ' ldM',
652         'eliF',
653         'aVMC',
654         'dSeS',
655         'CFtN',
656         'looP',
657         'rPCT',
658         'bNMC',
659         'dTeS',
660         'sFtN',
661         'TPCT',
662         'CPCT',
663         ' yeK',
664         'qSbO',
665         'mNoI',
666         'aEoI',
667         'cPCT',
668         'aFtN',
669         '0ftN',
670         'tceS',
671         'SprI',
672         'ekoT',
673         '  eS',
674         'lCbO',
675         'cScC',
676         'lFtN',
677         'cAeS',
678         'mfSF',
679         'kWcC',
680         'miSF',
681         'CdfA',
682         'EdfA',
683         'orSF',
684         'nftN',
685         'PRIU',
686         'rFpN',
687         'RFpN',
688         'aPeS',
689         'sUeS',
690         'FpcA',
691         'MpcA',
692         'cSeS',
693         'mNbO',
694         'sFpN',
695         'uLeS',
696         'DPcS',
697         'nevE',
698         'vrqR',
699         'ldaV',
700         '  pP',
701         'SdaV',
702         ' daV',
703         'LdaV',
704         'FdaV',
705         ' GIB',
706     };
707 
708     //
709     // Loop all 64 hot tags
710     //
711     ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
712     for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
713     {
714         //
715         // Get the current tag, and compute its hash in the tracker table
716         //
717         Key = TagList[i];
718         Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
719 
720         //
721         // Loop all the hashes in this index/bucket
722         //
723         Index = Hash;
724         while (TRUE)
725         {
726             //
727             // Find an empty entry, and make sure this isn't the last hash that
728             // can fit.
729             //
730             // On checked builds, also make sure this is the first time we are
731             // seeding this tag.
732             //
733             ASSERT(TrackTable[Hash].Key != Key);
734             if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
735             {
736                 //
737                 // It has been seeded, move on to the next tag
738                 //
739                 TrackTable[Hash].Key = Key;
740                 break;
741             }
742 
743             //
744             // This entry was already taken, compute the next possible hash while
745             // making sure we're not back at our initial index.
746             //
747             ASSERT(TrackTable[Hash].Key != Key);
748             Hash = (Hash + 1) & PoolTrackTableMask;
749             if (Hash == Index) break;
750         }
751     }
752 }
753 
754 VOID
755 NTAPI
756 ExpRemovePoolTracker(IN ULONG Key,
757                      IN SIZE_T NumberOfBytes,
758                      IN POOL_TYPE PoolType)
759 {
760     ULONG Hash, Index;
761     PPOOL_TRACKER_TABLE Table, TableEntry;
762     SIZE_T TableMask, TableSize;
763 
764     //
765     // Remove the PROTECTED_POOL flag which is not part of the tag
766     //
767     Key &= ~PROTECTED_POOL;
768 
769     //
770     // With WinDBG you can set a tag you want to break on when an allocation is
771     // attempted
772     //
773     if (Key == PoolHitTag) DbgBreakPoint();
774 
775     //
776     // Why the double indirection? Because normally this function is also used
777     // when doing session pool allocations, which has another set of tables,
778     // sizes, and masks that live in session pool. Now we don't support session
779     // pool so we only ever use the regular tables, but I'm keeping the code this
780     // way so that the day we DO support session pool, it won't require that
781     // many changes
782     //
783     Table = PoolTrackTable;
784     TableMask = PoolTrackTableMask;
785     TableSize = PoolTrackTableSize;
786     DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
787 
788     //
789     // Compute the hash for this key, and loop all the possible buckets
790     //
791     Hash = ExpComputeHashForTag(Key, TableMask);
792     Index = Hash;
793     while (TRUE)
794     {
795         //
796         // Have we found the entry for this tag? */
797         //
798         TableEntry = &Table[Hash];
799         if (TableEntry->Key == Key)
800         {
801             //
802             // Decrement the counters depending on if this was paged or nonpaged
803             // pool
804             //
805             if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
806             {
807                 InterlockedIncrement(&TableEntry->NonPagedFrees);
808                 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
809                                             -(SSIZE_T)NumberOfBytes);
810                 return;
811             }
812             InterlockedIncrement(&TableEntry->PagedFrees);
813             InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
814                                         -(SSIZE_T)NumberOfBytes);
815             return;
816         }
817 
818         //
819         // We should have only ended up with an empty entry if we've reached
820         // the last bucket
821         //
822         if (!TableEntry->Key)
823         {
824             DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
825                     Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
826             ASSERT(Hash == TableMask);
827         }
828 
829         //
830         // This path is hit when we don't have an entry, and the current bucket
831         // is full, so we simply try the next one
832         //
833         Hash = (Hash + 1) & TableMask;
834         if (Hash == Index) break;
835     }
836 
837     //
838     // And finally this path is hit when all the buckets are full, and we need
839     // some expansion. This path is not yet supported in ReactOS and so we'll
840     // ignore the tag
841     //
842     DPRINT1("Out of pool tag space, ignoring...\n");
843 }
844 
845 VOID
846 NTAPI
847 ExpInsertPoolTracker(IN ULONG Key,
848                      IN SIZE_T NumberOfBytes,
849                      IN POOL_TYPE PoolType)
850 {
851     ULONG Hash, Index;
852     KIRQL OldIrql;
853     PPOOL_TRACKER_TABLE Table, TableEntry;
854     SIZE_T TableMask, TableSize;
855 
856     //
857     // Remove the PROTECTED_POOL flag which is not part of the tag
858     //
859     Key &= ~PROTECTED_POOL;
860 
861     //
862     // With WinDBG you can set a tag you want to break on when an allocation is
863     // attempted
864     //
865     if (Key == PoolHitTag) DbgBreakPoint();
866 
867     //
868     // There is also an internal flag you can set to break on malformed tags
869     //
870     if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
871 
872     //
873     // ASSERT on ReactOS features not yet supported
874     //
875     ASSERT(!(PoolType & SESSION_POOL_MASK));
876     ASSERT(KeGetCurrentProcessorNumber() == 0);
877 
878     //
879     // Why the double indirection? Because normally this function is also used
880     // when doing session pool allocations, which has another set of tables,
881     // sizes, and masks that live in session pool. Now we don't support session
882     // pool so we only ever use the regular tables, but I'm keeping the code this
883     // way so that the day we DO support session pool, it won't require that
884     // many changes
885     //
886     Table = PoolTrackTable;
887     TableMask = PoolTrackTableMask;
888     TableSize = PoolTrackTableSize;
889     DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
890 
891     //
892     // Compute the hash for this key, and loop all the possible buckets
893     //
894     Hash = ExpComputeHashForTag(Key, TableMask);
895     Index = Hash;
896     while (TRUE)
897     {
898         //
899         // Do we already have an entry for this tag? */
900         //
901         TableEntry = &Table[Hash];
902         if (TableEntry->Key == Key)
903         {
904             //
905             // Increment the counters depending on if this was paged or nonpaged
906             // pool
907             //
908             if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
909             {
910                 InterlockedIncrement(&TableEntry->NonPagedAllocs);
911                 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
912                 return;
913             }
914             InterlockedIncrement(&TableEntry->PagedAllocs);
915             InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
916             return;
917         }
918 
919         //
920         // We don't have an entry yet, but we've found a free bucket for it
921         //
922         if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
923         {
924             //
925             // We need to hold the lock while creating a new entry, since other
926             // processors might be in this code path as well
927             //
928             ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
929             if (!PoolTrackTable[Hash].Key)
930             {
931                 //
932                 // We've won the race, so now create this entry in the bucket
933                 //
934                 ASSERT(Table[Hash].Key == 0);
935                 PoolTrackTable[Hash].Key = Key;
936                 TableEntry->Key = Key;
937             }
938             ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
939 
940             //
941             // Now we force the loop to run again, and we should now end up in
942             // the code path above which does the interlocked increments...
943             //
944             continue;
945         }
946 
947         //
948         // This path is hit when we don't have an entry, and the current bucket
949         // is full, so we simply try the next one
950         //
951         Hash = (Hash + 1) & TableMask;
952         if (Hash == Index) break;
953     }
954 
955     //
956     // And finally this path is hit when all the buckets are full, and we need
957     // some expansion. This path is not yet supported in ReactOS and so we'll
958     // ignore the tag
959     //
960     DPRINT1("Out of pool tag space, ignoring...\n");
961 }
962 
963 CODE_SEG("INIT")
964 VOID
965 NTAPI
966 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
967                            IN POOL_TYPE PoolType,
968                            IN ULONG PoolIndex,
969                            IN ULONG Threshold,
970                            IN PVOID PoolLock)
971 {
972     PLIST_ENTRY NextEntry, LastEntry;
973 
974     //
975     // Setup the descriptor based on the caller's request
976     //
977     PoolDescriptor->PoolType = PoolType;
978     PoolDescriptor->PoolIndex = PoolIndex;
979     PoolDescriptor->Threshold = Threshold;
980     PoolDescriptor->LockAddress = PoolLock;
981 
982     //
983     // Initialize accounting data
984     //
985     PoolDescriptor->RunningAllocs = 0;
986     PoolDescriptor->RunningDeAllocs = 0;
987     PoolDescriptor->TotalPages = 0;
988     PoolDescriptor->TotalBytes = 0;
989     PoolDescriptor->TotalBigPages = 0;
990 
991     //
992     // Nothing pending for now
993     //
994     PoolDescriptor->PendingFrees = NULL;
995     PoolDescriptor->PendingFreeDepth = 0;
996 
997     //
998     // Loop all the descriptor's allocation lists and initialize them
999     //
1000     NextEntry = PoolDescriptor->ListHeads;
1001     LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
1002     while (NextEntry < LastEntry)
1003     {
1004         ExpInitializePoolListHead(NextEntry);
1005         NextEntry++;
1006     }
1007 
1008     //
1009     // Note that ReactOS does not support Session Pool Yet
1010     //
1011     ASSERT(PoolType != PagedPoolSession);
1012 }
1013 
1014 CODE_SEG("INIT")
1015 VOID
1016 NTAPI
1017 InitializePool(IN POOL_TYPE PoolType,
1018                IN ULONG Threshold)
1019 {
1020     PPOOL_DESCRIPTOR Descriptor;
1021     SIZE_T TableSize;
1022     ULONG i;
1023 
1024     //
1025     // Check what kind of pool this is
1026     //
1027     if (PoolType == NonPagedPool)
1028     {
1029         //
1030         // Compute the track table size and convert it from a power of two to an
1031         // actual byte size
1032         //
1033         // NOTE: On checked builds, we'll assert if the registry table size was
1034         // invalid, while on retail builds we'll just break out of the loop at
1035         // that point.
1036         //
1037         TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1038         for (i = 0; i < 32; i++)
1039         {
1040             if (TableSize & 1)
1041             {
1042                 ASSERT((TableSize & ~1) == 0);
1043                 if (!(TableSize & ~1)) break;
1044             }
1045             TableSize >>= 1;
1046         }
1047 
1048         //
1049         // If we hit bit 32, than no size was defined in the registry, so
1050         // we'll use the default size of 2048 entries.
1051         //
1052         // Otherwise, use the size from the registry, as long as it's not
1053         // smaller than 64 entries.
1054         //
1055         if (i == 32)
1056         {
1057             PoolTrackTableSize = 2048;
1058         }
1059         else
1060         {
1061             PoolTrackTableSize = max(1 << i, 64);
1062         }
1063 
1064         //
1065         // Loop trying with the biggest specified size first, and cut it down
1066         // by a power of two each iteration in case not enough memory exist
1067         //
1068         while (TRUE)
1069         {
1070             //
1071             // Do not allow overflow
1072             //
1073             if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1074             {
1075                 PoolTrackTableSize >>= 1;
1076                 continue;
1077             }
1078 
1079             //
1080             // Allocate the tracker table and exit the loop if this worked
1081             //
1082             PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
1083                                                  (PoolTrackTableSize + 1) *
1084                                                  sizeof(POOL_TRACKER_TABLE));
1085             if (PoolTrackTable) break;
1086 
1087             //
1088             // Otherwise, as long as we're not down to the last bit, keep
1089             // iterating
1090             //
1091             if (PoolTrackTableSize == 1)
1092             {
1093                 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1094                              TableSize,
1095                              0xFFFFFFFF,
1096                              0xFFFFFFFF,
1097                              0xFFFFFFFF);
1098             }
1099             PoolTrackTableSize >>= 1;
1100         }
1101 
1102         //
1103         // Add one entry, compute the hash, and zero the table
1104         //
1105         PoolTrackTableSize++;
1106         PoolTrackTableMask = PoolTrackTableSize - 2;
1107 
1108         RtlZeroMemory(PoolTrackTable,
1109                       PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1110 
1111         //
1112         // Finally, add the most used tags to speed up those allocations
1113         //
1114         ExpSeedHotTags();
1115 
1116         //
1117         // We now do the exact same thing with the tracker table for big pages
1118         //
1119         TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1120         for (i = 0; i < 32; i++)
1121         {
1122             if (TableSize & 1)
1123             {
1124                 ASSERT((TableSize & ~1) == 0);
1125                 if (!(TableSize & ~1)) break;
1126             }
1127             TableSize >>= 1;
1128         }
1129 
1130         //
1131         // For big pages, the default tracker table is 4096 entries, while the
1132         // minimum is still 64
1133         //
1134         if (i == 32)
1135         {
1136             PoolBigPageTableSize = 4096;
1137         }
1138         else
1139         {
1140             PoolBigPageTableSize = max(1 << i, 64);
1141         }
1142 
1143         //
1144         // Again, run the exact same loop we ran earlier, but this time for the
1145         // big pool tracker instead
1146         //
1147         while (TRUE)
1148         {
1149             if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1150             {
1151                 PoolBigPageTableSize >>= 1;
1152                 continue;
1153             }
1154 
1155             PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1156                                                    PoolBigPageTableSize *
1157                                                    sizeof(POOL_TRACKER_BIG_PAGES));
1158             if (PoolBigPageTable) break;
1159 
1160             if (PoolBigPageTableSize == 1)
1161             {
1162                 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1163                              TableSize,
1164                              0xFFFFFFFF,
1165                              0xFFFFFFFF,
1166                              0xFFFFFFFF);
1167             }
1168 
1169             PoolBigPageTableSize >>= 1;
1170         }
1171 
1172         //
1173         // An extra entry is not needed for for the big pool tracker, so just
1174         // compute the hash and zero it
1175         //
1176         PoolBigPageTableHash = PoolBigPageTableSize - 1;
1177         RtlZeroMemory(PoolBigPageTable,
1178                       PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1179         for (i = 0; i < PoolBigPageTableSize; i++)
1180         {
1181             PoolBigPageTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1182         }
1183 
1184         //
1185         // During development, print this out so we can see what's happening
1186         //
1187         DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1188                 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1189         DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1190                 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1191 
1192         //
1193         // Insert the generic tracker for all of big pool
1194         //
1195         ExpInsertPoolTracker('looP',
1196                              ROUND_TO_PAGES(PoolBigPageTableSize *
1197                                             sizeof(POOL_TRACKER_BIG_PAGES)),
1198                              NonPagedPool);
1199 
1200         //
1201         // No support for NUMA systems at this time
1202         //
1203         ASSERT(KeNumberNodes == 1);
1204 
1205         //
1206         // Initialize the tag spinlock
1207         //
1208         KeInitializeSpinLock(&ExpTaggedPoolLock);
1209 
1210         //
1211         // Initialize the nonpaged pool descriptor
1212         //
1213         PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1214         ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1215                                    NonPagedPool,
1216                                    0,
1217                                    Threshold,
1218                                    NULL);
1219     }
1220     else
1221     {
1222         //
1223         // No support for NUMA systems at this time
1224         //
1225         ASSERT(KeNumberNodes == 1);
1226 
1227         //
1228         // Allocate the pool descriptor
1229         //
1230         Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1231                                            sizeof(KGUARDED_MUTEX) +
1232                                            sizeof(POOL_DESCRIPTOR),
1233                                            'looP');
1234         if (!Descriptor)
1235         {
1236             //
1237             // This is really bad...
1238             //
1239             KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1240                          0,
1241                          -1,
1242                          -1,
1243                          -1);
1244         }
1245 
1246         //
1247         // Setup the vector and guarded mutex for paged pool
1248         //
1249         PoolVector[PagedPool] = Descriptor;
1250         ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1251         ExpPagedPoolDescriptor[0] = Descriptor;
1252         KeInitializeGuardedMutex(ExpPagedPoolMutex);
1253         ExInitializePoolDescriptor(Descriptor,
1254                                    PagedPool,
1255                                    0,
1256                                    Threshold,
1257                                    ExpPagedPoolMutex);
1258 
1259         //
1260         // Insert the generic tracker for all of nonpaged pool
1261         //
1262         ExpInsertPoolTracker('looP',
1263                              ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1264                              NonPagedPool);
1265     }
1266 }
1267 
1268 FORCEINLINE
1269 KIRQL
1270 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1271 {
1272     //
1273     // Check if this is nonpaged pool
1274     //
1275     if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1276     {
1277         //
1278         // Use the queued spin lock
1279         //
1280         return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1281     }
1282     else
1283     {
1284         //
1285         // Use the guarded mutex
1286         //
1287         KeAcquireGuardedMutex(Descriptor->LockAddress);
1288         return APC_LEVEL;
1289     }
1290 }
1291 
1292 FORCEINLINE
1293 VOID
1294 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1295              IN KIRQL OldIrql)
1296 {
1297     //
1298     // Check if this is nonpaged pool
1299     //
1300     if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1301     {
1302         //
1303         // Use the queued spin lock
1304         //
1305         KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1306     }
1307     else
1308     {
1309         //
1310         // Use the guarded mutex
1311         //
1312         KeReleaseGuardedMutex(Descriptor->LockAddress);
1313     }
1314 }
1315 
1316 VOID
1317 NTAPI
1318 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1319                         IN PVOID DeferredContext,
1320                         IN PVOID SystemArgument1,
1321                         IN PVOID SystemArgument2)
1322 {
1323     PPOOL_DPC_CONTEXT Context = DeferredContext;
1324     UNREFERENCED_PARAMETER(Dpc);
1325     ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1326 
1327     //
1328     // Make sure we win the race, and if we did, copy the data atomically
1329     //
1330     if (KeSignalCallDpcSynchronize(SystemArgument2))
1331     {
1332         RtlCopyMemory(Context->PoolTrackTable,
1333                       PoolTrackTable,
1334                       Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1335 
1336         //
1337         // This is here because ReactOS does not yet support expansion
1338         //
1339         ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1340     }
1341 
1342     //
1343     // Regardless of whether we won or not, we must now synchronize and then
1344     // decrement the barrier since this is one more processor that has completed
1345     // the callback.
1346     //
1347     KeSignalCallDpcSynchronize(SystemArgument2);
1348     KeSignalCallDpcDone(SystemArgument1);
1349 }
1350 
1351 NTSTATUS
1352 NTAPI
1353 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1354                  IN ULONG SystemInformationLength,
1355                  IN OUT PULONG ReturnLength OPTIONAL)
1356 {
1357     ULONG TableSize, CurrentLength;
1358     ULONG EntryCount;
1359     NTSTATUS Status = STATUS_SUCCESS;
1360     PSYSTEM_POOLTAG TagEntry;
1361     PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1362     POOL_DPC_CONTEXT Context;
1363     ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1364 
1365     //
1366     // Keep track of how much data the caller's buffer must hold
1367     //
1368     CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1369 
1370     //
1371     // Initialize the caller's buffer
1372     //
1373     TagEntry = &SystemInformation->TagInfo[0];
1374     SystemInformation->Count = 0;
1375 
1376     //
1377     // Capture the number of entries, and the total size needed to make a copy
1378     // of the table
1379     //
1380     EntryCount = (ULONG)PoolTrackTableSize;
1381     TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1382 
1383     //
1384     // Allocate the "Generic DPC" temporary buffer
1385     //
1386     Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1387     if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1388 
1389     //
1390     // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1391     //
1392     Context.PoolTrackTable = Buffer;
1393     Context.PoolTrackTableSize = PoolTrackTableSize;
1394     Context.PoolTrackTableExpansion = NULL;
1395     Context.PoolTrackTableSizeExpansion = 0;
1396     KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1397 
1398     //
1399     // Now parse the results
1400     //
1401     for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1402     {
1403         //
1404         // If the entry is empty, skip it
1405         //
1406         if (!TrackerEntry->Key) continue;
1407 
1408         //
1409         // Otherwise, add one more entry to the caller's buffer, and ensure that
1410         // enough space has been allocated in it
1411         //
1412         SystemInformation->Count++;
1413         CurrentLength += sizeof(*TagEntry);
1414         if (SystemInformationLength < CurrentLength)
1415         {
1416             //
1417             // The caller's buffer is too small, so set a failure code. The
1418             // caller will know the count, as well as how much space is needed.
1419             //
1420             // We do NOT break out of the loop, because we want to keep incrementing
1421             // the Count as well as CurrentLength so that the caller can know the
1422             // final numbers
1423             //
1424             Status = STATUS_INFO_LENGTH_MISMATCH;
1425         }
1426         else
1427         {
1428             //
1429             // Small sanity check that our accounting is working correctly
1430             //
1431             ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1432             ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1433 
1434             //
1435             // Return the data into the caller's buffer
1436             //
1437             TagEntry->TagUlong = TrackerEntry->Key;
1438             TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1439             TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1440             TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1441             TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1442             TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1443             TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1444             TagEntry++;
1445         }
1446     }
1447 
1448     //
1449     // Free the "Generic DPC" temporary buffer, return the buffer length and status
1450     //
1451     ExFreePoolWithTag(Buffer, 'ofnI');
1452     if (ReturnLength) *ReturnLength = CurrentLength;
1453     return Status;
1454 }
1455 
1456 _IRQL_requires_(DISPATCH_LEVEL)
1457 static
1458 BOOLEAN
1459 ExpReallocateBigPageTable(
1460     _In_ _IRQL_restores_ KIRQL OldIrql,
1461     _In_ BOOLEAN Shrink)
1462 {
1463     SIZE_T OldSize = PoolBigPageTableSize;
1464     SIZE_T NewSize, NewSizeInBytes;
1465     PPOOL_TRACKER_BIG_PAGES NewTable;
1466     PPOOL_TRACKER_BIG_PAGES OldTable;
1467     ULONG i;
1468     ULONG PagesFreed;
1469     ULONG Hash;
1470     ULONG HashMask;
1471 
1472     /* Must be holding ExpLargePoolTableLock */
1473     ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1474 
1475     /* Make sure we don't overflow */
1476     if (Shrink)
1477     {
1478         NewSize = OldSize / 2;
1479 
1480         /* Make sure we don't shrink too much. */
1481         ASSERT(NewSize >= ExpPoolBigEntriesInUse);
1482 
1483         NewSize = ALIGN_UP_BY(NewSize, PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES));
1484         ASSERT(NewSize <= OldSize);
1485 
1486         /* If there is only one page left, then keep it around. Not a failure either. */
1487         if (NewSize == OldSize)
1488         {
1489             ASSERT(NewSize == (PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES)));
1490             KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1491             return TRUE;
1492         }
1493     }
1494     else
1495     {
1496         if (!NT_SUCCESS(RtlSIZETMult(2, OldSize, &NewSize)))
1497         {
1498             DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
1499             KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1500             return FALSE;
1501         }
1502 
1503         /* Make sure we don't stupidly waste pages */
1504         NewSize = ALIGN_DOWN_BY(NewSize, PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES));
1505         ASSERT(NewSize > OldSize);
1506     }
1507 
1508     if (!NT_SUCCESS(RtlSIZETMult(sizeof(POOL_TRACKER_BIG_PAGES), NewSize, &NewSizeInBytes)))
1509     {
1510         DPRINT1("Overflow while calculating big page table size. Size=%lu\n", OldSize);
1511         KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1512         return FALSE;
1513     }
1514 
1515     NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes);
1516     if (NewTable == NULL)
1517     {
1518         DPRINT("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes);
1519         KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1520         return FALSE;
1521     }
1522 
1523     DPRINT("%s big pool tracker table to %lu entries\n", Shrink ? "Shrinking" : "Expanding", NewSize);
1524 
1525     /* Initialize the new table */
1526     RtlZeroMemory(NewTable, NewSizeInBytes);
1527     for (i = 0; i < NewSize; i++)
1528     {
1529         NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1530     }
1531 
1532     /* Copy over all items */
1533     OldTable = PoolBigPageTable;
1534     HashMask = NewSize - 1;
1535     for (i = 0; i < OldSize; i++)
1536     {
1537         /* Skip over empty items */
1538         if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE)
1539         {
1540             continue;
1541         }
1542 
1543         /* Recalculate the hash due to the new table size */
1544         Hash = ExpComputePartialHashForAddress(OldTable[i].Va) % HashMask;
1545 
1546         /* Find the location in the new table */
1547         while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE))
1548         {
1549             if (++Hash == NewSize)
1550                 Hash = 0;
1551         }
1552 
1553         /* We must have space */
1554         ASSERT((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE);
1555 
1556         /* Finally, copy the item */
1557         NewTable[Hash] = OldTable[i];
1558     }
1559 
1560     /* Activate the new table */
1561     PoolBigPageTable = NewTable;
1562     PoolBigPageTableSize = NewSize;
1563     PoolBigPageTableHash = PoolBigPageTableSize - 1;
1564 
1565     /* Release the lock, we're done changing global state */
1566     KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1567 
1568     /* Free the old table and update our tracker */
1569     PagesFreed = MiFreePoolPages(OldTable);
1570     ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0);
1571     ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0);
1572 
1573     return TRUE;
1574 }
1575 
1576 BOOLEAN
1577 NTAPI
1578 ExpAddTagForBigPages(IN PVOID Va,
1579                      IN ULONG Key,
1580                      IN ULONG NumberOfPages,
1581                      IN POOL_TYPE PoolType)
1582 {
1583     ULONG Hash, i = 0;
1584     PVOID OldVa;
1585     KIRQL OldIrql;
1586     SIZE_T TableSize;
1587     PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1588     ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1589     ASSERT(!(PoolType & SESSION_POOL_MASK));
1590 
1591     //
1592     // As the table is expandable, these values must only be read after acquiring
1593     // the lock to avoid a teared access during an expansion
1594     // NOTE: Windows uses a special reader/writer SpinLock to improve
1595     // performance in the common case (add/remove a tracker entry)
1596     //
1597 Retry:
1598     Hash = ExpComputePartialHashForAddress(Va);
1599     KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1600     Hash &= PoolBigPageTableHash;
1601     TableSize = PoolBigPageTableSize;
1602 
1603     //
1604     // We loop from the current hash bucket to the end of the table, and then
1605     // rollover to hash bucket 0 and keep going from there. If we return back
1606     // to the beginning, then we attempt expansion at the bottom of the loop
1607     //
1608     EntryStart = Entry = &PoolBigPageTable[Hash];
1609     EntryEnd = &PoolBigPageTable[TableSize];
1610     do
1611     {
1612         //
1613         // Make sure that this is a free entry and attempt to atomically make the
1614         // entry busy now
1615         // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock
1616         //
1617         OldVa = Entry->Va;
1618         if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1619             (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa)))
1620         {
1621             //
1622             // We now own this entry, write down the size and the pool tag
1623             //
1624             Entry->Key = Key;
1625             Entry->NumberOfPages = NumberOfPages;
1626 
1627             //
1628             // Add one more entry to the count, and see if we're getting within
1629             // 75% of the table size, at which point we'll do an expansion now
1630             // to avoid blocking too hard later on.
1631             //
1632             // Note that we only do this if it's also been the 16th time that we
1633             // keep losing the race or that we are not finding a free entry anymore,
1634             // which implies a massive number of concurrent big pool allocations.
1635             //
1636             ExpPoolBigEntriesInUse++;
1637             if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize * (POOL_BIG_TABLE_USE_RATE - 1) / POOL_BIG_TABLE_USE_RATE)))
1638             {
1639                 DPRINT("Attempting expansion since we now have %lu entries\n",
1640                         ExpPoolBigEntriesInUse);
1641                 ASSERT(TableSize == PoolBigPageTableSize);
1642                 ExpReallocateBigPageTable(OldIrql, FALSE);
1643                 return TRUE;
1644             }
1645 
1646             //
1647             // We have our entry, return
1648             //
1649             KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1650             return TRUE;
1651         }
1652 
1653         //
1654         // We don't have our entry yet, so keep trying, making the entry list
1655         // circular if we reach the last entry. We'll eventually break out of
1656         // the loop once we've rolled over and returned back to our original
1657         // hash bucket
1658         //
1659         i++;
1660         if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1661     } while (Entry != EntryStart);
1662 
1663     //
1664     // This means there's no free hash buckets whatsoever, so we now have
1665     // to attempt expanding the table
1666     //
1667     ASSERT(TableSize == PoolBigPageTableSize);
1668     if (ExpReallocateBigPageTable(OldIrql, FALSE))
1669     {
1670         goto Retry;
1671     }
1672     ExpBigTableExpansionFailed++;
1673     DPRINT1("Big pool table expansion failed\n");
1674     return FALSE;
1675 }
1676 
1677 ULONG
1678 NTAPI
1679 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1680                             OUT PULONG_PTR BigPages,
1681                             IN POOL_TYPE PoolType)
1682 {
1683     BOOLEAN FirstTry = TRUE;
1684     SIZE_T TableSize;
1685     KIRQL OldIrql;
1686     ULONG PoolTag, Hash;
1687     PPOOL_TRACKER_BIG_PAGES Entry;
1688     ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1689     ASSERT(!(PoolType & SESSION_POOL_MASK));
1690 
1691     //
1692     // As the table is expandable, these values must only be read after acquiring
1693     // the lock to avoid a teared access during an expansion
1694     //
1695     Hash = ExpComputePartialHashForAddress(Va);
1696     KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1697     Hash &= PoolBigPageTableHash;
1698     TableSize = PoolBigPageTableSize;
1699 
1700     //
1701     // Loop while trying to find this big page allocation
1702     //
1703     while (PoolBigPageTable[Hash].Va != Va)
1704     {
1705         //
1706         // Increment the size until we go past the end of the table
1707         //
1708         if (++Hash >= TableSize)
1709         {
1710             //
1711             // Is this the second time we've tried?
1712             //
1713             if (!FirstTry)
1714             {
1715                 //
1716                 // This means it was never inserted into the pool table and it
1717                 // received the special "BIG" tag -- return that and return 0
1718                 // so that the code can ask Mm for the page count instead
1719                 //
1720                 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1721                 *BigPages = 0;
1722                 return ' GIB';
1723             }
1724 
1725             //
1726             // The first time this happens, reset the hash index and try again
1727             //
1728             Hash = 0;
1729             FirstTry = FALSE;
1730         }
1731     }
1732 
1733     //
1734     // Now capture all the information we need from the entry, since after we
1735     // release the lock, the data can change
1736     //
1737     Entry = &PoolBigPageTable[Hash];
1738     *BigPages = Entry->NumberOfPages;
1739     PoolTag = Entry->Key;
1740 
1741     //
1742     // Set the free bit, and decrement the number of allocations. Finally, release
1743     // the lock and return the tag that was located
1744     //
1745     Entry->Va = (PVOID)((ULONG_PTR)Entry->Va | POOL_BIG_TABLE_ENTRY_FREE);
1746 
1747     ExpPoolBigEntriesInUse--;
1748 
1749     /* If reaching 12.5% of the size (or whatever integer rounding gets us to),
1750      * halve the allocation size, which will get us to 25% of space used. */
1751     if (ExpPoolBigEntriesInUse < (PoolBigPageTableSize / (POOL_BIG_TABLE_USE_RATE * 2)))
1752     {
1753         /* Shrink the table. */
1754         ExpReallocateBigPageTable(OldIrql, TRUE);
1755     }
1756     else
1757     {
1758         KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1759     }
1760     return PoolTag;
1761 }
1762 
1763 VOID
1764 NTAPI
1765 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1766                  OUT PULONG NonPagedPoolPages,
1767                  OUT PULONG PagedPoolAllocs,
1768                  OUT PULONG PagedPoolFrees,
1769                  OUT PULONG PagedPoolLookasideHits,
1770                  OUT PULONG NonPagedPoolAllocs,
1771                  OUT PULONG NonPagedPoolFrees,
1772                  OUT PULONG NonPagedPoolLookasideHits)
1773 {
1774     ULONG i;
1775     PPOOL_DESCRIPTOR PoolDesc;
1776 
1777     //
1778     // Assume all failures
1779     //
1780     *PagedPoolPages = 0;
1781     *PagedPoolAllocs = 0;
1782     *PagedPoolFrees = 0;
1783 
1784     //
1785     // Tally up the totals for all the apged pool
1786     //
1787     for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1788     {
1789         PoolDesc = ExpPagedPoolDescriptor[i];
1790         *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1791         *PagedPoolAllocs += PoolDesc->RunningAllocs;
1792         *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1793     }
1794 
1795     //
1796     // The first non-paged pool has a hardcoded well-known descriptor name
1797     //
1798     PoolDesc = &NonPagedPoolDescriptor;
1799     *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1800     *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1801     *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1802 
1803     //
1804     // If the system has more than one non-paged pool, copy the other descriptor
1805     // totals as well
1806     //
1807 #if 0
1808     if (ExpNumberOfNonPagedPools > 1)
1809     {
1810         for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1811         {
1812             PoolDesc = ExpNonPagedPoolDescriptor[i];
1813             *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1814             *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1815             *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1816         }
1817     }
1818 #endif
1819 
1820     //
1821     // Get the amount of hits in the system lookaside lists
1822     //
1823     if (!IsListEmpty(&ExPoolLookasideListHead))
1824     {
1825         PLIST_ENTRY ListEntry;
1826 
1827         for (ListEntry = ExPoolLookasideListHead.Flink;
1828              ListEntry != &ExPoolLookasideListHead;
1829              ListEntry = ListEntry->Flink)
1830         {
1831             PGENERAL_LOOKASIDE Lookaside;
1832 
1833             Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry);
1834 
1835             if (Lookaside->Type == NonPagedPool)
1836             {
1837                 *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
1838             }
1839             else
1840             {
1841                 *PagedPoolLookasideHits += Lookaside->AllocateHits;
1842             }
1843         }
1844     }
1845 }
1846 
1847 VOID
1848 NTAPI
1849 ExReturnPoolQuota(IN PVOID P)
1850 {
1851     PPOOL_HEADER Entry;
1852     POOL_TYPE PoolType;
1853     USHORT BlockSize;
1854     PEPROCESS Process;
1855 
1856     if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1857         (MmIsSpecialPoolAddress(P)))
1858     {
1859         return;
1860     }
1861 
1862     Entry = P;
1863     Entry--;
1864     ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1865 
1866     PoolType = Entry->PoolType - 1;
1867     BlockSize = Entry->BlockSize;
1868 
1869     if (PoolType & QUOTA_POOL_MASK)
1870     {
1871         Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1872         ASSERT(Process != NULL);
1873         if (Process)
1874         {
1875             if (Process->Pcb.Header.Type != ProcessObject)
1876             {
1877                 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1878                         Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1879                 KeBugCheckEx(BAD_POOL_CALLER,
1880                              POOL_BILLED_PROCESS_INVALID,
1881                              (ULONG_PTR)P,
1882                              Entry->PoolTag,
1883                              (ULONG_PTR)Process);
1884             }
1885             ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1886             PsReturnPoolQuota(Process,
1887                               PoolType & BASE_POOL_TYPE_MASK,
1888                               BlockSize * POOL_BLOCK_SIZE);
1889             ObDereferenceObject(Process);
1890         }
1891     }
1892 }
1893 
1894 /* PUBLIC FUNCTIONS ***********************************************************/
1895 
1896 /*
1897  * @implemented
1898  */
1899 PVOID
1900 NTAPI
1901 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1902                       IN SIZE_T NumberOfBytes,
1903                       IN ULONG Tag)
1904 {
1905     PPOOL_DESCRIPTOR PoolDesc;
1906     PLIST_ENTRY ListHead;
1907     PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1908     KIRQL OldIrql;
1909     USHORT BlockSize, i;
1910     ULONG OriginalType;
1911     PKPRCB Prcb = KeGetCurrentPrcb();
1912     PGENERAL_LOOKASIDE LookasideList;
1913 
1914     //
1915     // Some sanity checks
1916     //
1917     ASSERT(Tag != 0);
1918     ASSERT(Tag != ' GIB');
1919     ASSERT(NumberOfBytes != 0);
1920     ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1921 
1922     //
1923     // Not supported in ReactOS
1924     //
1925     ASSERT(!(PoolType & SESSION_POOL_MASK));
1926 
1927     //
1928     // Check if verifier or special pool is enabled
1929     //
1930     if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1931     {
1932         //
1933         // For verifier, we should call the verification routine
1934         //
1935         if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1936         {
1937             DPRINT1("Driver Verifier is not yet supported\n");
1938         }
1939 
1940         //
1941         // For special pool, we check if this is a suitable allocation and do
1942         // the special allocation if needed
1943         //
1944         if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1945         {
1946             //
1947             // Check if this is a special pool allocation
1948             //
1949             if (MmUseSpecialPool(NumberOfBytes, Tag))
1950             {
1951                 //
1952                 // Try to allocate using special pool
1953                 //
1954                 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1955                 if (Entry) return Entry;
1956             }
1957         }
1958     }
1959 
1960     //
1961     // Get the pool type and its corresponding vector for this request
1962     //
1963     OriginalType = PoolType;
1964     PoolType = PoolType & BASE_POOL_TYPE_MASK;
1965     PoolDesc = PoolVector[PoolType];
1966     ASSERT(PoolDesc != NULL);
1967 
1968     //
1969     // Check if this is a big page allocation
1970     //
1971     if (NumberOfBytes > POOL_MAX_ALLOC)
1972     {
1973         //
1974         // Allocate pages for it
1975         //
1976         Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1977         if (!Entry)
1978         {
1979 #if DBG
1980             //
1981             // Out of memory, display current consumption
1982             // Let's consider that if the caller wanted more
1983             // than a hundred pages, that's a bogus caller
1984             // and we are not out of memory. Dump at most
1985             // once a second to avoid spamming the log.
1986             //
1987             if (NumberOfBytes < 100 * PAGE_SIZE &&
1988                 KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000)
1989             {
1990                 MiDumpPoolConsumers(FALSE, 0, 0, 0);
1991                 MiLastPoolDumpTime = KeQueryInterruptTime();
1992             }
1993 #endif
1994 
1995             //
1996             // Must succeed pool is deprecated, but still supported. These allocation
1997             // failures must cause an immediate bugcheck
1998             //
1999             if (OriginalType & MUST_SUCCEED_POOL_MASK)
2000             {
2001                 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2002                              NumberOfBytes,
2003                              NonPagedPoolDescriptor.TotalPages,
2004                              NonPagedPoolDescriptor.TotalBigPages,
2005                              0);
2006             }
2007 
2008             //
2009             // Internal debugging
2010             //
2011             ExPoolFailures++;
2012 
2013             //
2014             // This flag requests printing failures, and can also further specify
2015             // breaking on failures
2016             //
2017             if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2018             {
2019                 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2020                         NumberOfBytes,
2021                         OriginalType);
2022                 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2023             }
2024 
2025             //
2026             // Finally, this flag requests an exception, which we are more than
2027             // happy to raise!
2028             //
2029             if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2030             {
2031                 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2032             }
2033 
2034             return NULL;
2035         }
2036 
2037         //
2038         // Increment required counters
2039         //
2040         InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2041                                (LONG)BYTES_TO_PAGES(NumberOfBytes));
2042         InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
2043         InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2044 
2045         //
2046         // Add a tag for the big page allocation and switch to the generic "BIG"
2047         // tag if we failed to do so, then insert a tracker for this alloation.
2048         //
2049         if (!ExpAddTagForBigPages(Entry,
2050                                   Tag,
2051                                   (ULONG)BYTES_TO_PAGES(NumberOfBytes),
2052                                   OriginalType))
2053         {
2054             Tag = ' GIB';
2055         }
2056         ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
2057         return Entry;
2058     }
2059 
2060     //
2061     // Should never request 0 bytes from the pool, but since so many drivers do
2062     // it, we'll just assume they want 1 byte, based on NT's similar behavior
2063     //
2064     if (!NumberOfBytes) NumberOfBytes = 1;
2065 
2066     //
2067     // A pool allocation is defined by its data, a linked list to connect it to
2068     // the free list (if necessary), and a pool header to store accounting info.
2069     // Calculate this size, then convert it into a block size (units of pool
2070     // headers)
2071     //
2072     // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
2073     // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
2074     // the direct allocation of pages.
2075     //
2076     i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
2077                  / POOL_BLOCK_SIZE);
2078     ASSERT(i < POOL_LISTS_PER_PAGE);
2079 
2080     //
2081     // Handle lookaside list optimization for both paged and nonpaged pool
2082     //
2083     if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
2084     {
2085         //
2086         // Try popping it from the per-CPU lookaside list
2087         //
2088         LookasideList = (PoolType == PagedPool) ?
2089                          Prcb->PPPagedLookasideList[i - 1].P :
2090                          Prcb->PPNPagedLookasideList[i - 1].P;
2091         LookasideList->TotalAllocates++;
2092         Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
2093         if (!Entry)
2094         {
2095             //
2096             // We failed, try popping it from the global list
2097             //
2098             LookasideList = (PoolType == PagedPool) ?
2099                              Prcb->PPPagedLookasideList[i - 1].L :
2100                              Prcb->PPNPagedLookasideList[i - 1].L;
2101             LookasideList->TotalAllocates++;
2102             Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
2103         }
2104 
2105         //
2106         // If we were able to pop it, update the accounting and return the block
2107         //
2108         if (Entry)
2109         {
2110             LookasideList->AllocateHits++;
2111 
2112             //
2113             // Get the real entry, write down its pool type, and track it
2114             //
2115             Entry--;
2116             Entry->PoolType = OriginalType + 1;
2117             ExpInsertPoolTracker(Tag,
2118                                  Entry->BlockSize * POOL_BLOCK_SIZE,
2119                                  OriginalType);
2120 
2121             //
2122             // Return the pool allocation
2123             //
2124             Entry->PoolTag = Tag;
2125             (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2126             (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2127             return POOL_FREE_BLOCK(Entry);
2128         }
2129     }
2130 
2131     //
2132     // Loop in the free lists looking for a block if this size. Start with the
2133     // list optimized for this kind of size lookup
2134     //
2135     ListHead = &PoolDesc->ListHeads[i];
2136     do
2137     {
2138         //
2139         // Are there any free entries available on this list?
2140         //
2141         if (!ExpIsPoolListEmpty(ListHead))
2142         {
2143             //
2144             // Acquire the pool lock now
2145             //
2146             OldIrql = ExLockPool(PoolDesc);
2147 
2148             //
2149             // And make sure the list still has entries
2150             //
2151             if (ExpIsPoolListEmpty(ListHead))
2152             {
2153                 //
2154                 // Someone raced us (and won) before we had a chance to acquire
2155                 // the lock.
2156                 //
2157                 // Try again!
2158                 //
2159                 ExUnlockPool(PoolDesc, OldIrql);
2160                 continue;
2161             }
2162 
2163             //
2164             // Remove a free entry from the list
2165             // Note that due to the way we insert free blocks into multiple lists
2166             // there is a guarantee that any block on this list will either be
2167             // of the correct size, or perhaps larger.
2168             //
2169             ExpCheckPoolLinks(ListHead);
2170             Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
2171             ExpCheckPoolLinks(ListHead);
2172             ExpCheckPoolBlocks(Entry);
2173             ASSERT(Entry->BlockSize >= i);
2174             ASSERT(Entry->PoolType == 0);
2175 
2176             //
2177             // Check if this block is larger that what we need. The block could
2178             // not possibly be smaller, due to the reason explained above (and
2179             // we would've asserted on a checked build if this was the case).
2180             //
2181             if (Entry->BlockSize != i)
2182             {
2183                 //
2184                 // Is there an entry before this one?
2185                 //
2186                 if (Entry->PreviousSize == 0)
2187                 {
2188                     //
2189                     // There isn't anyone before us, so take the next block and
2190                     // turn it into a fragment that contains the leftover data
2191                     // that we don't need to satisfy the caller's request
2192                     //
2193                     FragmentEntry = POOL_BLOCK(Entry, i);
2194                     FragmentEntry->BlockSize = Entry->BlockSize - i;
2195 
2196                     //
2197                     // And make it point back to us
2198                     //
2199                     FragmentEntry->PreviousSize = i;
2200 
2201                     //
2202                     // Now get the block that follows the new fragment and check
2203                     // if it's still on the same page as us (and not at the end)
2204                     //
2205                     NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2206                     if (PAGE_ALIGN(NextEntry) != NextEntry)
2207                     {
2208                         //
2209                         // Adjust this next block to point to our newly created
2210                         // fragment block
2211                         //
2212                         NextEntry->PreviousSize = FragmentEntry->BlockSize;
2213                     }
2214                 }
2215                 else
2216                 {
2217                     //
2218                     // There is a free entry before us, which we know is smaller
2219                     // so we'll make this entry the fragment instead
2220                     //
2221                     FragmentEntry = Entry;
2222 
2223                     //
2224                     // And then we'll remove from it the actual size required.
2225                     // Now the entry is a leftover free fragment
2226                     //
2227                     Entry->BlockSize -= i;
2228 
2229                     //
2230                     // Now let's go to the next entry after the fragment (which
2231                     // used to point to our original free entry) and make it
2232                     // reference the new fragment entry instead.
2233                     //
2234                     // This is the entry that will actually end up holding the
2235                     // allocation!
2236                     //
2237                     Entry = POOL_NEXT_BLOCK(Entry);
2238                     Entry->PreviousSize = FragmentEntry->BlockSize;
2239 
2240                     //
2241                     // And now let's go to the entry after that one and check if
2242                     // it's still on the same page, and not at the end
2243                     //
2244                     NextEntry = POOL_BLOCK(Entry, i);
2245                     if (PAGE_ALIGN(NextEntry) != NextEntry)
2246                     {
2247                         //
2248                         // Make it reference the allocation entry
2249                         //
2250                         NextEntry->PreviousSize = i;
2251                     }
2252                 }
2253 
2254                 //
2255                 // Now our (allocation) entry is the right size
2256                 //
2257                 Entry->BlockSize = i;
2258 
2259                 //
2260                 // And the next entry is now the free fragment which contains
2261                 // the remaining difference between how big the original entry
2262                 // was, and the actual size the caller needs/requested.
2263                 //
2264                 FragmentEntry->PoolType = 0;
2265                 BlockSize = FragmentEntry->BlockSize;
2266 
2267                 //
2268                 // Now check if enough free bytes remained for us to have a
2269                 // "full" entry, which contains enough bytes for a linked list
2270                 // and thus can be used for allocations (up to 8 bytes...)
2271                 //
2272                 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2273                 if (BlockSize != 1)
2274                 {
2275                     //
2276                     // Insert the free entry into the free list for this size
2277                     //
2278                     ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2279                                           POOL_FREE_BLOCK(FragmentEntry));
2280                     ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2281                 }
2282             }
2283 
2284             //
2285             // We have found an entry for this allocation, so set the pool type
2286             // and release the lock since we're done
2287             //
2288             Entry->PoolType = OriginalType + 1;
2289             ExpCheckPoolBlocks(Entry);
2290             ExUnlockPool(PoolDesc, OldIrql);
2291 
2292             //
2293             // Increment required counters
2294             //
2295             InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2296             InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2297 
2298             //
2299             // Track this allocation
2300             //
2301             ExpInsertPoolTracker(Tag,
2302                                  Entry->BlockSize * POOL_BLOCK_SIZE,
2303                                  OriginalType);
2304 
2305             //
2306             // Return the pool allocation
2307             //
2308             Entry->PoolTag = Tag;
2309             (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2310             (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2311             return POOL_FREE_BLOCK(Entry);
2312         }
2313     } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2314 
2315     //
2316     // There were no free entries left, so we have to allocate a new fresh page
2317     //
2318     Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2319     if (!Entry)
2320     {
2321 #if DBG
2322         //
2323         // Out of memory, display current consumption
2324         // Let's consider that if the caller wanted more
2325         // than a hundred pages, that's a bogus caller
2326         // and we are not out of memory. Dump at most
2327         // once a second to avoid spamming the log.
2328         //
2329         if (NumberOfBytes < 100 * PAGE_SIZE &&
2330             KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000)
2331         {
2332             MiDumpPoolConsumers(FALSE, 0, 0, 0);
2333             MiLastPoolDumpTime = KeQueryInterruptTime();
2334         }
2335 #endif
2336 
2337         //
2338         // Must succeed pool is deprecated, but still supported. These allocation
2339         // failures must cause an immediate bugcheck
2340         //
2341         if (OriginalType & MUST_SUCCEED_POOL_MASK)
2342         {
2343             KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2344                          PAGE_SIZE,
2345                          NonPagedPoolDescriptor.TotalPages,
2346                          NonPagedPoolDescriptor.TotalBigPages,
2347                          0);
2348         }
2349 
2350         //
2351         // Internal debugging
2352         //
2353         ExPoolFailures++;
2354 
2355         //
2356         // This flag requests printing failures, and can also further specify
2357         // breaking on failures
2358         //
2359         if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2360         {
2361             DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2362                     NumberOfBytes,
2363                     OriginalType);
2364             if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2365         }
2366 
2367         //
2368         // Finally, this flag requests an exception, which we are more than
2369         // happy to raise!
2370         //
2371         if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2372         {
2373             ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2374         }
2375 
2376         //
2377         // Return NULL to the caller in all other cases
2378         //
2379         return NULL;
2380     }
2381 
2382     //
2383     // Setup the entry data
2384     //
2385     Entry->Ulong1 = 0;
2386     Entry->BlockSize = i;
2387     Entry->PoolType = OriginalType + 1;
2388 
2389     //
2390     // This page will have two entries -- one for the allocation (which we just
2391     // created above), and one for the remaining free bytes, which we're about
2392     // to create now. The free bytes are the whole page minus what was allocated
2393     // and then converted into units of block headers.
2394     //
2395     BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2396     FragmentEntry = POOL_BLOCK(Entry, i);
2397     FragmentEntry->Ulong1 = 0;
2398     FragmentEntry->BlockSize = BlockSize;
2399     FragmentEntry->PreviousSize = i;
2400 
2401     //
2402     // Increment required counters
2403     //
2404     InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2405     InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2406 
2407     //
2408     // Now check if enough free bytes remained for us to have a "full" entry,
2409     // which contains enough bytes for a linked list and thus can be used for
2410     // allocations (up to 8 bytes...)
2411     //
2412     if (FragmentEntry->BlockSize != 1)
2413     {
2414         //
2415         // Excellent -- acquire the pool lock
2416         //
2417         OldIrql = ExLockPool(PoolDesc);
2418 
2419         //
2420         // And insert the free entry into the free list for this block size
2421         //
2422         ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2423         ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2424                               POOL_FREE_BLOCK(FragmentEntry));
2425         ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2426 
2427         //
2428         // Release the pool lock
2429         //
2430         ExpCheckPoolBlocks(Entry);
2431         ExUnlockPool(PoolDesc, OldIrql);
2432     }
2433     else
2434     {
2435         //
2436         // Simply do a sanity check
2437         //
2438         ExpCheckPoolBlocks(Entry);
2439     }
2440 
2441     //
2442     // Increment performance counters and track this allocation
2443     //
2444     InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2445     ExpInsertPoolTracker(Tag,
2446                          Entry->BlockSize * POOL_BLOCK_SIZE,
2447                          OriginalType);
2448 
2449     //
2450     // And return the pool allocation
2451     //
2452     ExpCheckPoolBlocks(Entry);
2453     Entry->PoolTag = Tag;
2454     return POOL_FREE_BLOCK(Entry);
2455 }
2456 
2457 /*
2458  * @implemented
2459  */
2460 PVOID
2461 NTAPI
2462 ExAllocatePool(POOL_TYPE PoolType,
2463                SIZE_T NumberOfBytes)
2464 {
2465     ULONG Tag = TAG_NONE;
2466 #if 0 && DBG
2467     PLDR_DATA_TABLE_ENTRY LdrEntry;
2468 
2469     /* Use the first four letters of the driver name, or "None" if unavailable */
2470     LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2471                 ? MiLookupDataTableEntry(_ReturnAddress())
2472                 : NULL;
2473     if (LdrEntry)
2474     {
2475         ULONG i;
2476         Tag = 0;
2477         for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2478             Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2479         for (; i < 4; i++)
2480             Tag = Tag >> 8 | ' ' << 24;
2481     }
2482 #endif
2483     return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2484 }
2485 
2486 /*
2487  * @implemented
2488  */
2489 VOID
2490 NTAPI
2491 ExFreePoolWithTag(IN PVOID P,
2492                   IN ULONG TagToFree)
2493 {
2494     PPOOL_HEADER Entry, NextEntry;
2495     USHORT BlockSize;
2496     KIRQL OldIrql;
2497     POOL_TYPE PoolType;
2498     PPOOL_DESCRIPTOR PoolDesc;
2499     ULONG Tag;
2500     BOOLEAN Combined = FALSE;
2501     PFN_NUMBER PageCount, RealPageCount;
2502     PKPRCB Prcb = KeGetCurrentPrcb();
2503     PGENERAL_LOOKASIDE LookasideList;
2504     PEPROCESS Process;
2505 
2506     //
2507     // Check if any of the debug flags are enabled
2508     //
2509     if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2510                         POOL_FLAG_CHECK_WORKERS |
2511                         POOL_FLAG_CHECK_RESOURCES |
2512                         POOL_FLAG_VERIFIER |
2513                         POOL_FLAG_CHECK_DEADLOCK |
2514                         POOL_FLAG_SPECIAL_POOL))
2515     {
2516         //
2517         // Check if special pool is enabled
2518         //
2519         if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2520         {
2521             //
2522             // Check if it was allocated from a special pool
2523             //
2524             if (MmIsSpecialPoolAddress(P))
2525             {
2526                 //
2527                 // Was deadlock verification also enabled? We can do some extra
2528                 // checks at this point
2529                 //
2530                 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2531                 {
2532                     DPRINT1("Verifier not yet supported\n");
2533                 }
2534 
2535                 //
2536                 // It is, so handle it via special pool free routine
2537                 //
2538                 MmFreeSpecialPool(P);
2539                 return;
2540             }
2541         }
2542 
2543         //
2544         // For non-big page allocations, we'll do a bunch of checks in here
2545         //
2546         if (PAGE_ALIGN(P) != P)
2547         {
2548             //
2549             // Get the entry for this pool allocation
2550             // The pointer math here may look wrong or confusing, but it is quite right
2551             //
2552             Entry = P;
2553             Entry--;
2554 
2555             //
2556             // Get the pool type
2557             //
2558             PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2559 
2560             //
2561             // FIXME: Many other debugging checks go here
2562             //
2563             ExpCheckPoolIrqlLevel(PoolType, 0, P);
2564         }
2565     }
2566 
2567     //
2568     // Check if this is a big page allocation
2569     //
2570     if (PAGE_ALIGN(P) == P)
2571     {
2572         //
2573         // We need to find the tag for it, so first we need to find out what
2574         // kind of allocation this was (paged or nonpaged), then we can go
2575         // ahead and try finding the tag for it. Remember to get rid of the
2576         // PROTECTED_POOL tag if it's found.
2577         //
2578         // Note that if at insertion time, we failed to add the tag for a big
2579         // pool allocation, we used a special tag called 'BIG' to identify the
2580         // allocation, and we may get this tag back. In this scenario, we must
2581         // manually get the size of the allocation by actually counting through
2582         // the PFN database.
2583         //
2584         PoolType = MmDeterminePoolType(P);
2585         ExpCheckPoolIrqlLevel(PoolType, 0, P);
2586         Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2587         if (!Tag)
2588         {
2589             DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2590             ASSERT(Tag == ' GIB');
2591             PageCount = 1; // We are going to lie! This might screw up accounting?
2592         }
2593         else if (Tag & PROTECTED_POOL)
2594         {
2595             Tag &= ~PROTECTED_POOL;
2596         }
2597 
2598         //
2599         // Check block tag
2600         //
2601         if (TagToFree && TagToFree != Tag)
2602         {
2603             DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2604 #if DBG
2605             /* Do not bugcheck in case this is a big allocation for which we didn't manage to insert the tag */
2606             if (Tag != ' GIB')
2607                 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2608 #endif
2609         }
2610 
2611         //
2612         // We have our tag and our page count, so we can go ahead and remove this
2613         // tracker now
2614         //
2615         ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2616 
2617         //
2618         // Check if any of the debug flags are enabled
2619         //
2620         if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2621                             POOL_FLAG_CHECK_WORKERS |
2622                             POOL_FLAG_CHECK_RESOURCES |
2623                             POOL_FLAG_CHECK_DEADLOCK))
2624         {
2625             //
2626             // Was deadlock verification also enabled? We can do some extra
2627             // checks at this point
2628             //
2629             if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2630             {
2631                 DPRINT1("Verifier not yet supported\n");
2632             }
2633 
2634             //
2635             // FIXME: Many debugging checks go here
2636             //
2637         }
2638 
2639         //
2640         // Update counters
2641         //
2642         PoolDesc = PoolVector[PoolType];
2643         InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2644         InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2645                                     -(LONG_PTR)(PageCount << PAGE_SHIFT));
2646 
2647         //
2648         // Do the real free now and update the last counter with the big page count
2649         //
2650         RealPageCount = MiFreePoolPages(P);
2651         ASSERT(RealPageCount == PageCount);
2652         InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2653                                -(LONG)RealPageCount);
2654         return;
2655     }
2656 
2657     //
2658     // Get the entry for this pool allocation
2659     // The pointer math here may look wrong or confusing, but it is quite right
2660     //
2661     Entry = P;
2662     Entry--;
2663     ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2664 
2665     //
2666     // Get the size of the entry, and it's pool type, then load the descriptor
2667     // for this pool type
2668     //
2669     BlockSize = Entry->BlockSize;
2670     PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2671     PoolDesc = PoolVector[PoolType];
2672 
2673     //
2674     // Make sure that the IRQL makes sense
2675     //
2676     ExpCheckPoolIrqlLevel(PoolType, 0, P);
2677 
2678     //
2679     // Get the pool tag and get rid of the PROTECTED_POOL flag
2680     //
2681     Tag = Entry->PoolTag;
2682     if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2683 
2684     //
2685     // Check block tag
2686     //
2687     if (TagToFree && TagToFree != Tag)
2688     {
2689         DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2690 #if DBG
2691         KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2692 #endif
2693     }
2694 
2695     //
2696     // Track the removal of this allocation
2697     //
2698     ExpRemovePoolTracker(Tag,
2699                          BlockSize * POOL_BLOCK_SIZE,
2700                          Entry->PoolType - 1);
2701 
2702     //
2703     // Release pool quota, if any
2704     //
2705     if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2706     {
2707         Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2708         if (Process)
2709         {
2710             if (Process->Pcb.Header.Type != ProcessObject)
2711             {
2712                 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2713                         Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2714                 KeBugCheckEx(BAD_POOL_CALLER,
2715                              POOL_BILLED_PROCESS_INVALID,
2716                              (ULONG_PTR)P,
2717                              Tag,
2718                              (ULONG_PTR)Process);
2719             }
2720             PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2721             ObDereferenceObject(Process);
2722         }
2723     }
2724 
2725     //
2726     // Is this allocation small enough to have come from a lookaside list?
2727     //
2728     if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2729     {
2730         //
2731         // Try pushing it into the per-CPU lookaside list
2732         //
2733         LookasideList = (PoolType == PagedPool) ?
2734                          Prcb->PPPagedLookasideList[BlockSize - 1].P :
2735                          Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2736         LookasideList->TotalFrees++;
2737         if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2738         {
2739             LookasideList->FreeHits++;
2740             InterlockedPushEntrySList(&LookasideList->ListHead, P);
2741             return;
2742         }
2743 
2744         //
2745         // We failed, try to push it into the global lookaside list
2746         //
2747         LookasideList = (PoolType == PagedPool) ?
2748                          Prcb->PPPagedLookasideList[BlockSize - 1].L :
2749                          Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2750         LookasideList->TotalFrees++;
2751         if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2752         {
2753             LookasideList->FreeHits++;
2754             InterlockedPushEntrySList(&LookasideList->ListHead, P);
2755             return;
2756         }
2757     }
2758 
2759     //
2760     // Get the pointer to the next entry
2761     //
2762     NextEntry = POOL_BLOCK(Entry, BlockSize);
2763 
2764     //
2765     // Update performance counters
2766     //
2767     InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2768     InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2769 
2770     //
2771     // Acquire the pool lock
2772     //
2773     OldIrql = ExLockPool(PoolDesc);
2774 
2775     //
2776     // Check if the next allocation is at the end of the page
2777     //
2778     ExpCheckPoolBlocks(Entry);
2779     if (PAGE_ALIGN(NextEntry) != NextEntry)
2780     {
2781         //
2782         // We may be able to combine the block if it's free
2783         //
2784         if (NextEntry->PoolType == 0)
2785         {
2786             //
2787             // The next block is free, so we'll do a combine
2788             //
2789             Combined = TRUE;
2790 
2791             //
2792             // Make sure there's actual data in the block -- anything smaller
2793             // than this means we only have the header, so there's no linked list
2794             // for us to remove
2795             //
2796             if ((NextEntry->BlockSize != 1))
2797             {
2798                 //
2799                 // The block is at least big enough to have a linked list, so go
2800                 // ahead and remove it
2801                 //
2802                 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2803                 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2804                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2805                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2806             }
2807 
2808             //
2809             // Our entry is now combined with the next entry
2810             //
2811             Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2812         }
2813     }
2814 
2815     //
2816     // Now check if there was a previous entry on the same page as us
2817     //
2818     if (Entry->PreviousSize)
2819     {
2820         //
2821         // Great, grab that entry and check if it's free
2822         //
2823         NextEntry = POOL_PREV_BLOCK(Entry);
2824         if (NextEntry->PoolType == 0)
2825         {
2826             //
2827             // It is, so we can do a combine
2828             //
2829             Combined = TRUE;
2830 
2831             //
2832             // Make sure there's actual data in the block -- anything smaller
2833             // than this means we only have the header so there's no linked list
2834             // for us to remove
2835             //
2836             if ((NextEntry->BlockSize != 1))
2837             {
2838                 //
2839                 // The block is at least big enough to have a linked list, so go
2840                 // ahead and remove it
2841                 //
2842                 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2843                 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2844                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2845                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2846             }
2847 
2848             //
2849             // Combine our original block (which might've already been combined
2850             // with the next block), into the previous block
2851             //
2852             NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2853 
2854             //
2855             // And now we'll work with the previous block instead
2856             //
2857             Entry = NextEntry;
2858         }
2859     }
2860 
2861     //
2862     // By now, it may have been possible for our combined blocks to actually
2863     // have made up a full page (if there were only 2-3 allocations on the
2864     // page, they could've all been combined).
2865     //
2866     if ((PAGE_ALIGN(Entry) == Entry) &&
2867         (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2868     {
2869         //
2870         // In this case, release the pool lock, update the performance counter,
2871         // and free the page
2872         //
2873         ExUnlockPool(PoolDesc, OldIrql);
2874         InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2875         MiFreePoolPages(Entry);
2876         return;
2877     }
2878 
2879     //
2880     // Otherwise, we now have a free block (or a combination of 2 or 3)
2881     //
2882     Entry->PoolType = 0;
2883     BlockSize = Entry->BlockSize;
2884     ASSERT(BlockSize != 1);
2885 
2886     //
2887     // Check if we actually did combine it with anyone
2888     //
2889     if (Combined)
2890     {
2891         //
2892         // Get the first combined block (either our original to begin with, or
2893         // the one after the original, depending if we combined with the previous)
2894         //
2895         NextEntry = POOL_NEXT_BLOCK(Entry);
2896 
2897         //
2898         // As long as the next block isn't on a page boundary, have it point
2899         // back to us
2900         //
2901         if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2902     }
2903 
2904     //
2905     // Insert this new free block, and release the pool lock
2906     //
2907     ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2908     ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2909     ExUnlockPool(PoolDesc, OldIrql);
2910 }
2911 
2912 /*
2913  * @implemented
2914  */
2915 VOID
2916 NTAPI
2917 ExFreePool(PVOID P)
2918 {
2919     //
2920     // Just free without checking for the tag
2921     //
2922     ExFreePoolWithTag(P, 0);
2923 }
2924 
2925 /*
2926  * @unimplemented
2927  */
2928 SIZE_T
2929 NTAPI
2930 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2931                      OUT PBOOLEAN QuotaCharged)
2932 {
2933     //
2934     // Not implemented
2935     //
2936     UNIMPLEMENTED;
2937     return FALSE;
2938 }
2939 
2940 /*
2941  * @implemented
2942  */
2943 
2944 PVOID
2945 NTAPI
2946 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2947                         IN SIZE_T NumberOfBytes)
2948 {
2949     //
2950     // Allocate the pool
2951     //
2952     return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2953 }
2954 
2955 /*
2956  * @implemented
2957  */
2958 PVOID
2959 NTAPI
2960 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2961                               IN SIZE_T NumberOfBytes,
2962                               IN ULONG Tag,
2963                               IN EX_POOL_PRIORITY Priority)
2964 {
2965     PVOID Buffer;
2966 
2967     //
2968     // Allocate the pool
2969     //
2970     Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2971     if (Buffer == NULL)
2972     {
2973         UNIMPLEMENTED;
2974     }
2975 
2976     return Buffer;
2977 }
2978 
2979 /*
2980  * @implemented
2981  */
2982 PVOID
2983 NTAPI
2984 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2985                            IN SIZE_T NumberOfBytes,
2986                            IN ULONG Tag)
2987 {
2988     BOOLEAN Raise = TRUE;
2989     PVOID Buffer;
2990     PPOOL_HEADER Entry;
2991     NTSTATUS Status;
2992     PEPROCESS Process = PsGetCurrentProcess();
2993 
2994     //
2995     // Check if we should fail instead of raising an exception
2996     //
2997     if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2998     {
2999         Raise = FALSE;
3000         PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
3001     }
3002 
3003     //
3004     // Inject the pool quota mask
3005     //
3006     PoolType += QUOTA_POOL_MASK;
3007 
3008     //
3009     // Check if we have enough space to add the quota owner process, as long as
3010     // this isn't the system process, which never gets charged quota
3011     //
3012     ASSERT(NumberOfBytes != 0);
3013     if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
3014         (Process != PsInitialSystemProcess))
3015     {
3016         //
3017         // Add space for our EPROCESS pointer
3018         //
3019         NumberOfBytes += sizeof(PEPROCESS);
3020     }
3021     else
3022     {
3023         //
3024         // We won't be able to store the pointer, so don't use quota for this
3025         //
3026         PoolType -= QUOTA_POOL_MASK;
3027     }
3028 
3029     //
3030     // Allocate the pool buffer now
3031     //
3032     Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
3033 
3034     //
3035     // If the buffer is page-aligned, this is a large page allocation and we
3036     // won't touch it
3037     //
3038     if (PAGE_ALIGN(Buffer) != Buffer)
3039     {
3040         //
3041         // Also if special pool is enabled, and this was allocated from there,
3042         // we won't touch it either
3043         //
3044         if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
3045             (MmIsSpecialPoolAddress(Buffer)))
3046         {
3047             return Buffer;
3048         }
3049 
3050         //
3051         // If it wasn't actually allocated with quota charges, ignore it too
3052         //
3053         if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
3054 
3055         //
3056         // If this is the system process, we don't charge quota, so ignore
3057         //
3058         if (Process == PsInitialSystemProcess) return Buffer;
3059 
3060         //
3061         // Actually go and charge quota for the process now
3062         //
3063         Entry = POOL_ENTRY(Buffer);
3064         Status = PsChargeProcessPoolQuota(Process,
3065                                           PoolType & BASE_POOL_TYPE_MASK,
3066                                           Entry->BlockSize * POOL_BLOCK_SIZE);
3067         if (!NT_SUCCESS(Status))
3068         {
3069             //
3070             // Quota failed, back out the allocation, clear the owner, and fail
3071             //
3072             ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
3073             ExFreePoolWithTag(Buffer, Tag);
3074             if (Raise) RtlRaiseStatus(Status);
3075             return NULL;
3076         }
3077 
3078         //
3079         // Quota worked, write the owner and then reference it before returning
3080         //
3081         ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
3082         ObReferenceObject(Process);
3083     }
3084     else if (!(Buffer) && (Raise))
3085     {
3086         //
3087         // The allocation failed, raise an error if we are in raise mode
3088         //
3089         RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
3090     }
3091 
3092     //
3093     // Return the allocated buffer
3094     //
3095     return Buffer;
3096 }
3097 
3098 /* EOF */
3099