xref: /reactos/ntoskrnl/mm/ARM3/expool.c (revision f04935d8)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/expool.c
5  * PURPOSE:         ARM Memory Manager Executive Pool Manager
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20 
21 /* GLOBALS ********************************************************************/
22 
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24 
25 typedef struct _POOL_DPC_CONTEXT
26 {
27     PPOOL_TRACKER_TABLE PoolTrackTable;
28     SIZE_T PoolTrackTableSize;
29     PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30     SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32 
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 ULONG ExpBigTableExpansionFailed;
41 PPOOL_TRACKER_TABLE PoolTrackTable;
42 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
43 KSPIN_LOCK ExpTaggedPoolLock;
44 ULONG PoolHitTag;
45 BOOLEAN ExStopBadTags;
46 KSPIN_LOCK ExpLargePoolTableLock;
47 ULONG ExpPoolBigEntriesInUse;
48 ULONG ExpPoolFlags;
49 ULONG ExPoolFailures;
50 
51 /* Pool block/header/list access macros */
52 #define POOL_ENTRY(x)       (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
53 #define POOL_FREE_BLOCK(x)  (PLIST_ENTRY)((ULONG_PTR)(x)  + sizeof(POOL_HEADER))
54 #define POOL_BLOCK(x, i)    (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
55 #define POOL_NEXT_BLOCK(x)  POOL_BLOCK((x), (x)->BlockSize)
56 #define POOL_PREV_BLOCK(x)  POOL_BLOCK((x), -((x)->PreviousSize))
57 
58 /*
59  * Pool list access debug macros, similar to Arthur's pfnlist.c work.
60  * Microsoft actually implements similar checks in the Windows Server 2003 SP1
61  * pool code, but only for checked builds.
62  *
63  * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
64  * that these checks are done even on retail builds, due to the increasing
65  * number of kernel-mode attacks which depend on dangling list pointers and other
66  * kinds of list-based attacks.
67  *
68  * For now, I will leave these checks on all the time, but later they are likely
69  * to be DBG-only, at least until there are enough kernel-mode security attacks
70  * against ReactOS to warrant the performance hit.
71  *
72  * For now, these are not made inline, so we can get good stack traces.
73  */
74 PLIST_ENTRY
75 NTAPI
76 ExpDecodePoolLink(IN PLIST_ENTRY Link)
77 {
78     return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
79 }
80 
81 PLIST_ENTRY
82 NTAPI
83 ExpEncodePoolLink(IN PLIST_ENTRY Link)
84 {
85     return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
86 }
87 
88 VOID
89 NTAPI
90 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
91 {
92     if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
93         (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
94     {
95         KeBugCheckEx(BAD_POOL_HEADER,
96                      3,
97                      (ULONG_PTR)ListHead,
98                      (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
99                      (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
100     }
101 }
102 
103 VOID
104 NTAPI
105 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
106 {
107     ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
108 }
109 
110 BOOLEAN
111 NTAPI
112 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
113 {
114     return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
115 }
116 
117 VOID
118 NTAPI
119 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
120 {
121     PLIST_ENTRY Blink, Flink;
122     Flink = ExpDecodePoolLink(Entry->Flink);
123     Blink = ExpDecodePoolLink(Entry->Blink);
124     Flink->Blink = ExpEncodePoolLink(Blink);
125     Blink->Flink = ExpEncodePoolLink(Flink);
126 }
127 
128 PLIST_ENTRY
129 NTAPI
130 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
131 {
132     PLIST_ENTRY Entry, Flink;
133     Entry = ExpDecodePoolLink(ListHead->Flink);
134     Flink = ExpDecodePoolLink(Entry->Flink);
135     ListHead->Flink = ExpEncodePoolLink(Flink);
136     Flink->Blink = ExpEncodePoolLink(ListHead);
137     return Entry;
138 }
139 
140 PLIST_ENTRY
141 NTAPI
142 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
143 {
144     PLIST_ENTRY Entry, Blink;
145     Entry = ExpDecodePoolLink(ListHead->Blink);
146     Blink = ExpDecodePoolLink(Entry->Blink);
147     ListHead->Blink = ExpEncodePoolLink(Blink);
148     Blink->Flink = ExpEncodePoolLink(ListHead);
149     return Entry;
150 }
151 
152 VOID
153 NTAPI
154 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
155                       IN PLIST_ENTRY Entry)
156 {
157     PLIST_ENTRY Blink;
158     ExpCheckPoolLinks(ListHead);
159     Blink = ExpDecodePoolLink(ListHead->Blink);
160     Entry->Flink = ExpEncodePoolLink(ListHead);
161     Entry->Blink = ExpEncodePoolLink(Blink);
162     Blink->Flink = ExpEncodePoolLink(Entry);
163     ListHead->Blink = ExpEncodePoolLink(Entry);
164     ExpCheckPoolLinks(ListHead);
165 }
166 
167 VOID
168 NTAPI
169 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
170                       IN PLIST_ENTRY Entry)
171 {
172     PLIST_ENTRY Flink;
173     ExpCheckPoolLinks(ListHead);
174     Flink = ExpDecodePoolLink(ListHead->Flink);
175     Entry->Flink = ExpEncodePoolLink(Flink);
176     Entry->Blink = ExpEncodePoolLink(ListHead);
177     Flink->Blink = ExpEncodePoolLink(Entry);
178     ListHead->Flink = ExpEncodePoolLink(Entry);
179     ExpCheckPoolLinks(ListHead);
180 }
181 
182 VOID
183 NTAPI
184 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
185 {
186     PPOOL_HEADER PreviousEntry, NextEntry;
187 
188     /* Is there a block before this one? */
189     if (Entry->PreviousSize)
190     {
191         /* Get it */
192         PreviousEntry = POOL_PREV_BLOCK(Entry);
193 
194         /* The two blocks must be on the same page! */
195         if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
196         {
197             /* Something is awry */
198             KeBugCheckEx(BAD_POOL_HEADER,
199                          6,
200                          (ULONG_PTR)PreviousEntry,
201                          __LINE__,
202                          (ULONG_PTR)Entry);
203         }
204 
205         /* This block should also indicate that it's as large as we think it is */
206         if (PreviousEntry->BlockSize != Entry->PreviousSize)
207         {
208             /* Otherwise, someone corrupted one of the sizes */
209             DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
210                     PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
211                     Entry->PreviousSize, (char *)&Entry->PoolTag);
212             KeBugCheckEx(BAD_POOL_HEADER,
213                          5,
214                          (ULONG_PTR)PreviousEntry,
215                          __LINE__,
216                          (ULONG_PTR)Entry);
217         }
218     }
219     else if (PAGE_ALIGN(Entry) != Entry)
220     {
221         /* If there's no block before us, we are the first block, so we should be on a page boundary */
222         KeBugCheckEx(BAD_POOL_HEADER,
223                      7,
224                      0,
225                      __LINE__,
226                      (ULONG_PTR)Entry);
227     }
228 
229     /* This block must have a size */
230     if (!Entry->BlockSize)
231     {
232         /* Someone must've corrupted this field */
233         if (Entry->PreviousSize)
234         {
235             PreviousEntry = POOL_PREV_BLOCK(Entry);
236             DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
237                     (char *)&PreviousEntry->PoolTag,
238                     (char *)&Entry->PoolTag);
239         }
240         else
241         {
242             DPRINT1("Entry tag %.4s\n",
243                     (char *)&Entry->PoolTag);
244         }
245         KeBugCheckEx(BAD_POOL_HEADER,
246                      8,
247                      0,
248                      __LINE__,
249                      (ULONG_PTR)Entry);
250     }
251 
252     /* Okay, now get the next block */
253     NextEntry = POOL_NEXT_BLOCK(Entry);
254 
255     /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
256     if (PAGE_ALIGN(NextEntry) != NextEntry)
257     {
258         /* The two blocks must be on the same page! */
259         if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
260         {
261             /* Something is messed up */
262             KeBugCheckEx(BAD_POOL_HEADER,
263                          9,
264                          (ULONG_PTR)NextEntry,
265                          __LINE__,
266                          (ULONG_PTR)Entry);
267         }
268 
269         /* And this block should think we are as large as we truly are */
270         if (NextEntry->PreviousSize != Entry->BlockSize)
271         {
272             /* Otherwise, someone corrupted the field */
273             DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
274                     Entry->BlockSize, (char *)&Entry->PoolTag,
275                     NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
276             KeBugCheckEx(BAD_POOL_HEADER,
277                          5,
278                          (ULONG_PTR)NextEntry,
279                          __LINE__,
280                          (ULONG_PTR)Entry);
281         }
282     }
283 }
284 
285 VOID
286 NTAPI
287 ExpCheckPoolAllocation(
288     PVOID P,
289     POOL_TYPE PoolType,
290     ULONG Tag)
291 {
292     PPOOL_HEADER Entry;
293     ULONG i;
294     KIRQL OldIrql;
295     POOL_TYPE RealPoolType;
296 
297     /* Get the pool header */
298     Entry = ((PPOOL_HEADER)P) - 1;
299 
300     /* Check if this is a large allocation */
301     if (PAGE_ALIGN(P) == P)
302     {
303         /* Lock the pool table */
304         KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
305 
306         /* Find the pool tag */
307         for (i = 0; i < PoolBigPageTableSize; i++)
308         {
309             /* Check if this is our allocation */
310             if (PoolBigPageTable[i].Va == P)
311             {
312                 /* Make sure the tag is ok */
313                 if (PoolBigPageTable[i].Key != Tag)
314                 {
315                     KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
316                 }
317 
318                 break;
319             }
320         }
321 
322         /* Release the lock */
323         KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
324 
325         if (i == PoolBigPageTableSize)
326         {
327             /* Did not find the allocation */
328             //ASSERT(FALSE);
329         }
330 
331         /* Get Pool type by address */
332         RealPoolType = MmDeterminePoolType(P);
333     }
334     else
335     {
336         /* Verify the tag */
337         if (Entry->PoolTag != Tag)
338         {
339             DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
340                     &Tag, &Entry->PoolTag, Entry->PoolTag);
341             KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
342         }
343 
344         /* Check the rest of the header */
345         ExpCheckPoolHeader(Entry);
346 
347         /* Get Pool type from entry */
348         RealPoolType = (Entry->PoolType - 1);
349     }
350 
351     /* Should we check the pool type? */
352     if (PoolType != -1)
353     {
354         /* Verify the pool type */
355         if (RealPoolType != PoolType)
356         {
357             DPRINT1("Wrong pool type! Expected %s, got %s\n",
358                     PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
359                     (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
360             KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
361         }
362     }
363 }
364 
365 VOID
366 NTAPI
367 ExpCheckPoolBlocks(IN PVOID Block)
368 {
369     BOOLEAN FoundBlock = FALSE;
370     SIZE_T Size = 0;
371     PPOOL_HEADER Entry;
372 
373     /* Get the first entry for this page, make sure it really is the first */
374     Entry = PAGE_ALIGN(Block);
375     ASSERT(Entry->PreviousSize == 0);
376 
377     /* Now scan each entry */
378     while (TRUE)
379     {
380         /* When we actually found our block, remember this */
381         if (Entry == Block) FoundBlock = TRUE;
382 
383         /* Now validate this block header */
384         ExpCheckPoolHeader(Entry);
385 
386         /* And go to the next one, keeping track of our size */
387         Size += Entry->BlockSize;
388         Entry = POOL_NEXT_BLOCK(Entry);
389 
390         /* If we hit the last block, stop */
391         if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
392 
393         /* If we hit the end of the page, stop */
394         if (PAGE_ALIGN(Entry) == Entry) break;
395     }
396 
397     /* We must've found our block, and we must have hit the end of the page */
398     if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
399     {
400         /* Otherwise, the blocks are messed up */
401         KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
402     }
403 }
404 
405 FORCEINLINE
406 VOID
407 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
408                       IN SIZE_T NumberOfBytes,
409                       IN PVOID Entry)
410 {
411     //
412     // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
413     // be DISPATCH_LEVEL or lower for Non Paged Pool
414     //
415     if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
416         (KeGetCurrentIrql() > APC_LEVEL) :
417         (KeGetCurrentIrql() > DISPATCH_LEVEL))
418     {
419         //
420         // Take the system down
421         //
422         KeBugCheckEx(BAD_POOL_CALLER,
423                      !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
424                      KeGetCurrentIrql(),
425                      PoolType,
426                      !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
427     }
428 }
429 
430 FORCEINLINE
431 ULONG
432 ExpComputeHashForTag(IN ULONG Tag,
433                      IN SIZE_T BucketMask)
434 {
435     //
436     // Compute the hash by multiplying with a large prime number and then XORing
437     // with the HIDWORD of the result.
438     //
439     // Finally, AND with the bucket mask to generate a valid index/bucket into
440     // the table
441     //
442     ULONGLONG Result = (ULONGLONG)40543 * Tag;
443     return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
444 }
445 
446 FORCEINLINE
447 ULONG
448 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
449 {
450     ULONG Result;
451     //
452     // Compute the hash by converting the address into a page number, and then
453     // XORing each nibble with the next one.
454     //
455     // We do *NOT* AND with the bucket mask at this point because big table expansion
456     // might happen. Therefore, the final step of the hash must be performed
457     // while holding the expansion pushlock, and this is why we call this a
458     // "partial" hash only.
459     //
460     Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
461     return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
462 }
463 
464 #if DBG
465 FORCEINLINE
466 BOOLEAN
467 ExpTagAllowPrint(CHAR Tag)
468 {
469     if ((Tag >= 'a' && Tag <= 'z') ||
470         (Tag >= 'A' && Tag <= 'Z') ||
471         (Tag >= '0' && Tag <= '9') ||
472         Tag == ' ' || Tag == '=' ||
473         Tag == '?' || Tag == '@')
474     {
475         return TRUE;
476     }
477 
478     return FALSE;
479 }
480 
481 #ifdef KDBG
482 #define MiDumperPrint(dbg, fmt, ...)        \
483     if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
484     else DPRINT1(fmt, ##__VA_ARGS__)
485 #else
486 #define MiDumperPrint(dbg, fmt, ...)        \
487     DPRINT1(fmt, ##__VA_ARGS__)
488 #endif
489 
490 VOID
491 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
492 {
493     SIZE_T i;
494     BOOLEAN Verbose;
495 
496     //
497     // Only print header if called from OOM situation
498     //
499     if (!CalledFromDbg)
500     {
501         DPRINT1("---------------------\n");
502         DPRINT1("Out of memory dumper!\n");
503     }
504 #ifdef KDBG
505     else
506     {
507         KdbpPrint("Pool Used:\n");
508     }
509 #endif
510 
511     //
512     // Remember whether we'll have to be verbose
513     // This is the only supported flag!
514     //
515     Verbose = BooleanFlagOn(Flags, 1);
516 
517     //
518     // Print table header
519     //
520     if (Verbose)
521     {
522         MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
523         MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
524     }
525     else
526     {
527         MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
528         MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
529     }
530 
531     //
532     // We'll extract allocations for all the tracked pools
533     //
534     for (i = 0; i < PoolTrackTableSize; ++i)
535     {
536         PPOOL_TRACKER_TABLE TableEntry;
537 
538         TableEntry = &PoolTrackTable[i];
539 
540         //
541         // We only care about tags which have allocated memory
542         //
543         if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
544         {
545             //
546             // If there's a tag, attempt to do a pretty print
547             // only if it matches the caller's tag, or if
548             // any tag is allowed
549             // For checking whether it matches caller's tag,
550             // use the mask to make sure not to mess with the wildcards
551             //
552             if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
553                 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
554             {
555                 CHAR Tag[4];
556 
557                 //
558                 // Extract each 'component' and check whether they are printable
559                 //
560                 Tag[0] = TableEntry->Key & 0xFF;
561                 Tag[1] = TableEntry->Key >> 8 & 0xFF;
562                 Tag[2] = TableEntry->Key >> 16 & 0xFF;
563                 Tag[3] = TableEntry->Key >> 24 & 0xFF;
564 
565                 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
566                 {
567                     //
568                     // Print in direct order to make !poolused TAG usage easier
569                     //
570                     if (Verbose)
571                     {
572                         MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
573                                       TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
574                                       (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
575                                       TableEntry->PagedAllocs, TableEntry->PagedFrees,
576                                       (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
577                     }
578                     else
579                     {
580                         MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
581                                       TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
582                                       TableEntry->PagedAllocs, TableEntry->PagedBytes);
583                     }
584                 }
585                 else
586                 {
587                     if (Verbose)
588                     {
589                         MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
590                                       TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
591                                       (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
592                                       TableEntry->PagedAllocs, TableEntry->PagedFrees,
593                                       (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
594                     }
595                     else
596                     {
597                         MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
598                                       TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
599                                       TableEntry->PagedAllocs, TableEntry->PagedBytes);
600                     }
601                 }
602             }
603             else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
604             {
605                 if (Verbose)
606                 {
607                     MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
608                                   TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
609                                   (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
610                                   TableEntry->PagedAllocs, TableEntry->PagedFrees,
611                                   (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
612                 }
613                 else
614                 {
615                     MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
616                                   TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
617                                   TableEntry->PagedAllocs, TableEntry->PagedBytes);
618                 }
619             }
620         }
621     }
622 
623     if (!CalledFromDbg)
624     {
625         DPRINT1("---------------------\n");
626     }
627 }
628 #endif
629 
630 /* PRIVATE FUNCTIONS **********************************************************/
631 
632 INIT_FUNCTION
633 VOID
634 NTAPI
635 ExpSeedHotTags(VOID)
636 {
637     ULONG i, Key, Hash, Index;
638     PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
639     ULONG TagList[] =
640     {
641         '  oI',
642         ' laH',
643         'PldM',
644         'LooP',
645         'tSbO',
646         ' prI',
647         'bdDN',
648         'LprI',
649         'pOoI',
650         ' ldM',
651         'eliF',
652         'aVMC',
653         'dSeS',
654         'CFtN',
655         'looP',
656         'rPCT',
657         'bNMC',
658         'dTeS',
659         'sFtN',
660         'TPCT',
661         'CPCT',
662         ' yeK',
663         'qSbO',
664         'mNoI',
665         'aEoI',
666         'cPCT',
667         'aFtN',
668         '0ftN',
669         'tceS',
670         'SprI',
671         'ekoT',
672         '  eS',
673         'lCbO',
674         'cScC',
675         'lFtN',
676         'cAeS',
677         'mfSF',
678         'kWcC',
679         'miSF',
680         'CdfA',
681         'EdfA',
682         'orSF',
683         'nftN',
684         'PRIU',
685         'rFpN',
686         'RFpN',
687         'aPeS',
688         'sUeS',
689         'FpcA',
690         'MpcA',
691         'cSeS',
692         'mNbO',
693         'sFpN',
694         'uLeS',
695         'DPcS',
696         'nevE',
697         'vrqR',
698         'ldaV',
699         '  pP',
700         'SdaV',
701         ' daV',
702         'LdaV',
703         'FdaV',
704         ' GIB',
705     };
706 
707     //
708     // Loop all 64 hot tags
709     //
710     ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
711     for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
712     {
713         //
714         // Get the current tag, and compute its hash in the tracker table
715         //
716         Key = TagList[i];
717         Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
718 
719         //
720         // Loop all the hashes in this index/bucket
721         //
722         Index = Hash;
723         while (TRUE)
724         {
725             //
726             // Find an empty entry, and make sure this isn't the last hash that
727             // can fit.
728             //
729             // On checked builds, also make sure this is the first time we are
730             // seeding this tag.
731             //
732             ASSERT(TrackTable[Hash].Key != Key);
733             if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
734             {
735                 //
736                 // It has been seeded, move on to the next tag
737                 //
738                 TrackTable[Hash].Key = Key;
739                 break;
740             }
741 
742             //
743             // This entry was already taken, compute the next possible hash while
744             // making sure we're not back at our initial index.
745             //
746             ASSERT(TrackTable[Hash].Key != Key);
747             Hash = (Hash + 1) & PoolTrackTableMask;
748             if (Hash == Index) break;
749         }
750     }
751 }
752 
753 VOID
754 NTAPI
755 ExpRemovePoolTracker(IN ULONG Key,
756                      IN SIZE_T NumberOfBytes,
757                      IN POOL_TYPE PoolType)
758 {
759     ULONG Hash, Index;
760     PPOOL_TRACKER_TABLE Table, TableEntry;
761     SIZE_T TableMask, TableSize;
762 
763     //
764     // Remove the PROTECTED_POOL flag which is not part of the tag
765     //
766     Key &= ~PROTECTED_POOL;
767 
768     //
769     // With WinDBG you can set a tag you want to break on when an allocation is
770     // attempted
771     //
772     if (Key == PoolHitTag) DbgBreakPoint();
773 
774     //
775     // Why the double indirection? Because normally this function is also used
776     // when doing session pool allocations, which has another set of tables,
777     // sizes, and masks that live in session pool. Now we don't support session
778     // pool so we only ever use the regular tables, but I'm keeping the code this
779     // way so that the day we DO support session pool, it won't require that
780     // many changes
781     //
782     Table = PoolTrackTable;
783     TableMask = PoolTrackTableMask;
784     TableSize = PoolTrackTableSize;
785     DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
786 
787     //
788     // Compute the hash for this key, and loop all the possible buckets
789     //
790     Hash = ExpComputeHashForTag(Key, TableMask);
791     Index = Hash;
792     while (TRUE)
793     {
794         //
795         // Have we found the entry for this tag? */
796         //
797         TableEntry = &Table[Hash];
798         if (TableEntry->Key == Key)
799         {
800             //
801             // Decrement the counters depending on if this was paged or nonpaged
802             // pool
803             //
804             if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
805             {
806                 InterlockedIncrement(&TableEntry->NonPagedFrees);
807                 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
808                                             -(SSIZE_T)NumberOfBytes);
809                 return;
810             }
811             InterlockedIncrement(&TableEntry->PagedFrees);
812             InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
813                                         -(SSIZE_T)NumberOfBytes);
814             return;
815         }
816 
817         //
818         // We should have only ended up with an empty entry if we've reached
819         // the last bucket
820         //
821         if (!TableEntry->Key)
822         {
823             DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
824                     Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
825             ASSERT(Hash == TableMask);
826         }
827 
828         //
829         // This path is hit when we don't have an entry, and the current bucket
830         // is full, so we simply try the next one
831         //
832         Hash = (Hash + 1) & TableMask;
833         if (Hash == Index) break;
834     }
835 
836     //
837     // And finally this path is hit when all the buckets are full, and we need
838     // some expansion. This path is not yet supported in ReactOS and so we'll
839     // ignore the tag
840     //
841     DPRINT1("Out of pool tag space, ignoring...\n");
842 }
843 
844 VOID
845 NTAPI
846 ExpInsertPoolTracker(IN ULONG Key,
847                      IN SIZE_T NumberOfBytes,
848                      IN POOL_TYPE PoolType)
849 {
850     ULONG Hash, Index;
851     KIRQL OldIrql;
852     PPOOL_TRACKER_TABLE Table, TableEntry;
853     SIZE_T TableMask, TableSize;
854 
855     //
856     // Remove the PROTECTED_POOL flag which is not part of the tag
857     //
858     Key &= ~PROTECTED_POOL;
859 
860     //
861     // With WinDBG you can set a tag you want to break on when an allocation is
862     // attempted
863     //
864     if (Key == PoolHitTag) DbgBreakPoint();
865 
866     //
867     // There is also an internal flag you can set to break on malformed tags
868     //
869     if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
870 
871     //
872     // ASSERT on ReactOS features not yet supported
873     //
874     ASSERT(!(PoolType & SESSION_POOL_MASK));
875     ASSERT(KeGetCurrentProcessorNumber() == 0);
876 
877     //
878     // Why the double indirection? Because normally this function is also used
879     // when doing session pool allocations, which has another set of tables,
880     // sizes, and masks that live in session pool. Now we don't support session
881     // pool so we only ever use the regular tables, but I'm keeping the code this
882     // way so that the day we DO support session pool, it won't require that
883     // many changes
884     //
885     Table = PoolTrackTable;
886     TableMask = PoolTrackTableMask;
887     TableSize = PoolTrackTableSize;
888     DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
889 
890     //
891     // Compute the hash for this key, and loop all the possible buckets
892     //
893     Hash = ExpComputeHashForTag(Key, TableMask);
894     Index = Hash;
895     while (TRUE)
896     {
897         //
898         // Do we already have an entry for this tag? */
899         //
900         TableEntry = &Table[Hash];
901         if (TableEntry->Key == Key)
902         {
903             //
904             // Increment the counters depending on if this was paged or nonpaged
905             // pool
906             //
907             if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
908             {
909                 InterlockedIncrement(&TableEntry->NonPagedAllocs);
910                 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
911                 return;
912             }
913             InterlockedIncrement(&TableEntry->PagedAllocs);
914             InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
915             return;
916         }
917 
918         //
919         // We don't have an entry yet, but we've found a free bucket for it
920         //
921         if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
922         {
923             //
924             // We need to hold the lock while creating a new entry, since other
925             // processors might be in this code path as well
926             //
927             ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
928             if (!PoolTrackTable[Hash].Key)
929             {
930                 //
931                 // We've won the race, so now create this entry in the bucket
932                 //
933                 ASSERT(Table[Hash].Key == 0);
934                 PoolTrackTable[Hash].Key = Key;
935                 TableEntry->Key = Key;
936             }
937             ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
938 
939             //
940             // Now we force the loop to run again, and we should now end up in
941             // the code path above which does the interlocked increments...
942             //
943             continue;
944         }
945 
946         //
947         // This path is hit when we don't have an entry, and the current bucket
948         // is full, so we simply try the next one
949         //
950         Hash = (Hash + 1) & TableMask;
951         if (Hash == Index) break;
952     }
953 
954     //
955     // And finally this path is hit when all the buckets are full, and we need
956     // some expansion. This path is not yet supported in ReactOS and so we'll
957     // ignore the tag
958     //
959     DPRINT1("Out of pool tag space, ignoring...\n");
960 }
961 
962 INIT_FUNCTION
963 VOID
964 NTAPI
965 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
966                            IN POOL_TYPE PoolType,
967                            IN ULONG PoolIndex,
968                            IN ULONG Threshold,
969                            IN PVOID PoolLock)
970 {
971     PLIST_ENTRY NextEntry, LastEntry;
972 
973     //
974     // Setup the descriptor based on the caller's request
975     //
976     PoolDescriptor->PoolType = PoolType;
977     PoolDescriptor->PoolIndex = PoolIndex;
978     PoolDescriptor->Threshold = Threshold;
979     PoolDescriptor->LockAddress = PoolLock;
980 
981     //
982     // Initialize accounting data
983     //
984     PoolDescriptor->RunningAllocs = 0;
985     PoolDescriptor->RunningDeAllocs = 0;
986     PoolDescriptor->TotalPages = 0;
987     PoolDescriptor->TotalBytes = 0;
988     PoolDescriptor->TotalBigPages = 0;
989 
990     //
991     // Nothing pending for now
992     //
993     PoolDescriptor->PendingFrees = NULL;
994     PoolDescriptor->PendingFreeDepth = 0;
995 
996     //
997     // Loop all the descriptor's allocation lists and initialize them
998     //
999     NextEntry = PoolDescriptor->ListHeads;
1000     LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
1001     while (NextEntry < LastEntry)
1002     {
1003         ExpInitializePoolListHead(NextEntry);
1004         NextEntry++;
1005     }
1006 
1007     //
1008     // Note that ReactOS does not support Session Pool Yet
1009     //
1010     ASSERT(PoolType != PagedPoolSession);
1011 }
1012 
1013 INIT_FUNCTION
1014 VOID
1015 NTAPI
1016 InitializePool(IN POOL_TYPE PoolType,
1017                IN ULONG Threshold)
1018 {
1019     PPOOL_DESCRIPTOR Descriptor;
1020     SIZE_T TableSize;
1021     ULONG i;
1022 
1023     //
1024     // Check what kind of pool this is
1025     //
1026     if (PoolType == NonPagedPool)
1027     {
1028         //
1029         // Compute the track table size and convert it from a power of two to an
1030         // actual byte size
1031         //
1032         // NOTE: On checked builds, we'll assert if the registry table size was
1033         // invalid, while on retail builds we'll just break out of the loop at
1034         // that point.
1035         //
1036         TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1037         for (i = 0; i < 32; i++)
1038         {
1039             if (TableSize & 1)
1040             {
1041                 ASSERT((TableSize & ~1) == 0);
1042                 if (!(TableSize & ~1)) break;
1043             }
1044             TableSize >>= 1;
1045         }
1046 
1047         //
1048         // If we hit bit 32, than no size was defined in the registry, so
1049         // we'll use the default size of 2048 entries.
1050         //
1051         // Otherwise, use the size from the registry, as long as it's not
1052         // smaller than 64 entries.
1053         //
1054         if (i == 32)
1055         {
1056             PoolTrackTableSize = 2048;
1057         }
1058         else
1059         {
1060             PoolTrackTableSize = max(1 << i, 64);
1061         }
1062 
1063         //
1064         // Loop trying with the biggest specified size first, and cut it down
1065         // by a power of two each iteration in case not enough memory exist
1066         //
1067         while (TRUE)
1068         {
1069             //
1070             // Do not allow overflow
1071             //
1072             if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1073             {
1074                 PoolTrackTableSize >>= 1;
1075                 continue;
1076             }
1077 
1078             //
1079             // Allocate the tracker table and exit the loop if this worked
1080             //
1081             PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
1082                                                  (PoolTrackTableSize + 1) *
1083                                                  sizeof(POOL_TRACKER_TABLE));
1084             if (PoolTrackTable) break;
1085 
1086             //
1087             // Otherwise, as long as we're not down to the last bit, keep
1088             // iterating
1089             //
1090             if (PoolTrackTableSize == 1)
1091             {
1092                 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1093                              TableSize,
1094                              0xFFFFFFFF,
1095                              0xFFFFFFFF,
1096                              0xFFFFFFFF);
1097             }
1098             PoolTrackTableSize >>= 1;
1099         }
1100 
1101         //
1102         // Add one entry, compute the hash, and zero the table
1103         //
1104         PoolTrackTableSize++;
1105         PoolTrackTableMask = PoolTrackTableSize - 2;
1106 
1107         RtlZeroMemory(PoolTrackTable,
1108                       PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1109 
1110         //
1111         // Finally, add the most used tags to speed up those allocations
1112         //
1113         ExpSeedHotTags();
1114 
1115         //
1116         // We now do the exact same thing with the tracker table for big pages
1117         //
1118         TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1119         for (i = 0; i < 32; i++)
1120         {
1121             if (TableSize & 1)
1122             {
1123                 ASSERT((TableSize & ~1) == 0);
1124                 if (!(TableSize & ~1)) break;
1125             }
1126             TableSize >>= 1;
1127         }
1128 
1129         //
1130         // For big pages, the default tracker table is 4096 entries, while the
1131         // minimum is still 64
1132         //
1133         if (i == 32)
1134         {
1135             PoolBigPageTableSize = 4096;
1136         }
1137         else
1138         {
1139             PoolBigPageTableSize = max(1 << i, 64);
1140         }
1141 
1142         //
1143         // Again, run the exact same loop we ran earlier, but this time for the
1144         // big pool tracker instead
1145         //
1146         while (TRUE)
1147         {
1148             if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1149             {
1150                 PoolBigPageTableSize >>= 1;
1151                 continue;
1152             }
1153 
1154             PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1155                                                    PoolBigPageTableSize *
1156                                                    sizeof(POOL_TRACKER_BIG_PAGES));
1157             if (PoolBigPageTable) break;
1158 
1159             if (PoolBigPageTableSize == 1)
1160             {
1161                 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1162                              TableSize,
1163                              0xFFFFFFFF,
1164                              0xFFFFFFFF,
1165                              0xFFFFFFFF);
1166             }
1167 
1168             PoolBigPageTableSize >>= 1;
1169         }
1170 
1171         //
1172         // An extra entry is not needed for for the big pool tracker, so just
1173         // compute the hash and zero it
1174         //
1175         PoolBigPageTableHash = PoolBigPageTableSize - 1;
1176         RtlZeroMemory(PoolBigPageTable,
1177                       PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1178         for (i = 0; i < PoolBigPageTableSize; i++)
1179         {
1180             PoolBigPageTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1181         }
1182 
1183         //
1184         // During development, print this out so we can see what's happening
1185         //
1186         DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1187                 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1188         DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1189                 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1190 
1191         //
1192         // Insert the generic tracker for all of big pool
1193         //
1194         ExpInsertPoolTracker('looP',
1195                              ROUND_TO_PAGES(PoolBigPageTableSize *
1196                                             sizeof(POOL_TRACKER_BIG_PAGES)),
1197                              NonPagedPool);
1198 
1199         //
1200         // No support for NUMA systems at this time
1201         //
1202         ASSERT(KeNumberNodes == 1);
1203 
1204         //
1205         // Initialize the tag spinlock
1206         //
1207         KeInitializeSpinLock(&ExpTaggedPoolLock);
1208 
1209         //
1210         // Initialize the nonpaged pool descriptor
1211         //
1212         PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1213         ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1214                                    NonPagedPool,
1215                                    0,
1216                                    Threshold,
1217                                    NULL);
1218     }
1219     else
1220     {
1221         //
1222         // No support for NUMA systems at this time
1223         //
1224         ASSERT(KeNumberNodes == 1);
1225 
1226         //
1227         // Allocate the pool descriptor
1228         //
1229         Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1230                                            sizeof(KGUARDED_MUTEX) +
1231                                            sizeof(POOL_DESCRIPTOR),
1232                                            'looP');
1233         if (!Descriptor)
1234         {
1235             //
1236             // This is really bad...
1237             //
1238             KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1239                          0,
1240                          -1,
1241                          -1,
1242                          -1);
1243         }
1244 
1245         //
1246         // Setup the vector and guarded mutex for paged pool
1247         //
1248         PoolVector[PagedPool] = Descriptor;
1249         ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1250         ExpPagedPoolDescriptor[0] = Descriptor;
1251         KeInitializeGuardedMutex(ExpPagedPoolMutex);
1252         ExInitializePoolDescriptor(Descriptor,
1253                                    PagedPool,
1254                                    0,
1255                                    Threshold,
1256                                    ExpPagedPoolMutex);
1257 
1258         //
1259         // Insert the generic tracker for all of nonpaged pool
1260         //
1261         ExpInsertPoolTracker('looP',
1262                              ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1263                              NonPagedPool);
1264     }
1265 }
1266 
1267 FORCEINLINE
1268 KIRQL
1269 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1270 {
1271     //
1272     // Check if this is nonpaged pool
1273     //
1274     if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1275     {
1276         //
1277         // Use the queued spin lock
1278         //
1279         return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1280     }
1281     else
1282     {
1283         //
1284         // Use the guarded mutex
1285         //
1286         KeAcquireGuardedMutex(Descriptor->LockAddress);
1287         return APC_LEVEL;
1288     }
1289 }
1290 
1291 FORCEINLINE
1292 VOID
1293 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1294              IN KIRQL OldIrql)
1295 {
1296     //
1297     // Check if this is nonpaged pool
1298     //
1299     if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1300     {
1301         //
1302         // Use the queued spin lock
1303         //
1304         KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1305     }
1306     else
1307     {
1308         //
1309         // Use the guarded mutex
1310         //
1311         KeReleaseGuardedMutex(Descriptor->LockAddress);
1312     }
1313 }
1314 
1315 VOID
1316 NTAPI
1317 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1318                         IN PVOID DeferredContext,
1319                         IN PVOID SystemArgument1,
1320                         IN PVOID SystemArgument2)
1321 {
1322     PPOOL_DPC_CONTEXT Context = DeferredContext;
1323     UNREFERENCED_PARAMETER(Dpc);
1324     ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1325 
1326     //
1327     // Make sure we win the race, and if we did, copy the data atomically
1328     //
1329     if (KeSignalCallDpcSynchronize(SystemArgument2))
1330     {
1331         RtlCopyMemory(Context->PoolTrackTable,
1332                       PoolTrackTable,
1333                       Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1334 
1335         //
1336         // This is here because ReactOS does not yet support expansion
1337         //
1338         ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1339     }
1340 
1341     //
1342     // Regardless of whether we won or not, we must now synchronize and then
1343     // decrement the barrier since this is one more processor that has completed
1344     // the callback.
1345     //
1346     KeSignalCallDpcSynchronize(SystemArgument2);
1347     KeSignalCallDpcDone(SystemArgument1);
1348 }
1349 
1350 NTSTATUS
1351 NTAPI
1352 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1353                  IN ULONG SystemInformationLength,
1354                  IN OUT PULONG ReturnLength OPTIONAL)
1355 {
1356     ULONG TableSize, CurrentLength;
1357     ULONG EntryCount;
1358     NTSTATUS Status = STATUS_SUCCESS;
1359     PSYSTEM_POOLTAG TagEntry;
1360     PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1361     POOL_DPC_CONTEXT Context;
1362     ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1363 
1364     //
1365     // Keep track of how much data the caller's buffer must hold
1366     //
1367     CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1368 
1369     //
1370     // Initialize the caller's buffer
1371     //
1372     TagEntry = &SystemInformation->TagInfo[0];
1373     SystemInformation->Count = 0;
1374 
1375     //
1376     // Capture the number of entries, and the total size needed to make a copy
1377     // of the table
1378     //
1379     EntryCount = (ULONG)PoolTrackTableSize;
1380     TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1381 
1382     //
1383     // Allocate the "Generic DPC" temporary buffer
1384     //
1385     Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1386     if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1387 
1388     //
1389     // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1390     //
1391     Context.PoolTrackTable = Buffer;
1392     Context.PoolTrackTableSize = PoolTrackTableSize;
1393     Context.PoolTrackTableExpansion = NULL;
1394     Context.PoolTrackTableSizeExpansion = 0;
1395     KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1396 
1397     //
1398     // Now parse the results
1399     //
1400     for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1401     {
1402         //
1403         // If the entry is empty, skip it
1404         //
1405         if (!TrackerEntry->Key) continue;
1406 
1407         //
1408         // Otherwise, add one more entry to the caller's buffer, and ensure that
1409         // enough space has been allocated in it
1410         //
1411         SystemInformation->Count++;
1412         CurrentLength += sizeof(*TagEntry);
1413         if (SystemInformationLength < CurrentLength)
1414         {
1415             //
1416             // The caller's buffer is too small, so set a failure code. The
1417             // caller will know the count, as well as how much space is needed.
1418             //
1419             // We do NOT break out of the loop, because we want to keep incrementing
1420             // the Count as well as CurrentLength so that the caller can know the
1421             // final numbers
1422             //
1423             Status = STATUS_INFO_LENGTH_MISMATCH;
1424         }
1425         else
1426         {
1427             //
1428             // Small sanity check that our accounting is working correctly
1429             //
1430             ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1431             ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1432 
1433             //
1434             // Return the data into the caller's buffer
1435             //
1436             TagEntry->TagUlong = TrackerEntry->Key;
1437             TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1438             TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1439             TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1440             TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1441             TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1442             TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1443             TagEntry++;
1444         }
1445     }
1446 
1447     //
1448     // Free the "Generic DPC" temporary buffer, return the buffer length and status
1449     //
1450     ExFreePoolWithTag(Buffer, 'ofnI');
1451     if (ReturnLength) *ReturnLength = CurrentLength;
1452     return Status;
1453 }
1454 
1455 _IRQL_requires_(DISPATCH_LEVEL)
1456 BOOLEAN
1457 NTAPI
1458 ExpExpandBigPageTable(
1459     _In_ _IRQL_restores_ KIRQL OldIrql)
1460 {
1461     ULONG OldSize = PoolBigPageTableSize;
1462     ULONG NewSize = 2 * OldSize;
1463     ULONG NewSizeInBytes;
1464     PPOOL_TRACKER_BIG_PAGES NewTable;
1465     PPOOL_TRACKER_BIG_PAGES OldTable;
1466     ULONG i;
1467     ULONG PagesFreed;
1468     ULONG Hash;
1469     ULONG HashMask;
1470 
1471     /* Must be holding ExpLargePoolTableLock */
1472     ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1473 
1474     /* Make sure we don't overflow */
1475     if (!NT_SUCCESS(RtlULongMult(2,
1476                                  OldSize * sizeof(POOL_TRACKER_BIG_PAGES),
1477                                  &NewSizeInBytes)))
1478     {
1479         DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
1480         KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1481         return FALSE;
1482     }
1483 
1484     NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes);
1485     if (NewTable == NULL)
1486     {
1487         DPRINT1("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes);
1488         KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1489         return FALSE;
1490     }
1491 
1492     DPRINT("Expanding big pool tracker table to %lu entries\n", NewSize);
1493 
1494     /* Initialize the new table */
1495     RtlZeroMemory(NewTable, NewSizeInBytes);
1496     for (i = 0; i < NewSize; i++)
1497     {
1498         NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1499     }
1500 
1501     /* Copy over all items */
1502     OldTable = PoolBigPageTable;
1503     HashMask = NewSize - 1;
1504     for (i = 0; i < OldSize; i++)
1505     {
1506         /* Skip over empty items */
1507         if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE)
1508         {
1509             continue;
1510         }
1511 
1512         /* Recalculate the hash due to the new table size */
1513         Hash = ExpComputePartialHashForAddress(OldTable[i].Va) & HashMask;
1514 
1515         /* Find the location in the new table */
1516         while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE))
1517         {
1518             Hash = (Hash + 1) & HashMask;
1519         }
1520 
1521         /* We just enlarged the table, so we must have space */
1522         ASSERT((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE);
1523 
1524         /* Finally, copy the item */
1525         NewTable[Hash] = OldTable[i];
1526     }
1527 
1528     /* Activate the new table */
1529     PoolBigPageTable = NewTable;
1530     PoolBigPageTableSize = NewSize;
1531     PoolBigPageTableHash = PoolBigPageTableSize - 1;
1532 
1533     /* Release the lock, we're done changing global state */
1534     KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1535 
1536     /* Free the old table and update our tracker */
1537     PagesFreed = MiFreePoolPages(OldTable);
1538     ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0);
1539     ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0);
1540 
1541     return TRUE;
1542 }
1543 
1544 BOOLEAN
1545 NTAPI
1546 ExpAddTagForBigPages(IN PVOID Va,
1547                      IN ULONG Key,
1548                      IN ULONG NumberOfPages,
1549                      IN POOL_TYPE PoolType)
1550 {
1551     ULONG Hash, i = 0;
1552     PVOID OldVa;
1553     KIRQL OldIrql;
1554     SIZE_T TableSize;
1555     PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1556     ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1557     ASSERT(!(PoolType & SESSION_POOL_MASK));
1558 
1559     //
1560     // As the table is expandable, these values must only be read after acquiring
1561     // the lock to avoid a teared access during an expansion
1562     // NOTE: Windows uses a special reader/writer SpinLock to improve
1563     // performance in the common case (add/remove a tracker entry)
1564     //
1565 Retry:
1566     Hash = ExpComputePartialHashForAddress(Va);
1567     KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1568     Hash &= PoolBigPageTableHash;
1569     TableSize = PoolBigPageTableSize;
1570 
1571     //
1572     // We loop from the current hash bucket to the end of the table, and then
1573     // rollover to hash bucket 0 and keep going from there. If we return back
1574     // to the beginning, then we attempt expansion at the bottom of the loop
1575     //
1576     EntryStart = Entry = &PoolBigPageTable[Hash];
1577     EntryEnd = &PoolBigPageTable[TableSize];
1578     do
1579     {
1580         //
1581         // Make sure that this is a free entry and attempt to atomically make the
1582         // entry busy now
1583         // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock
1584         //
1585         OldVa = Entry->Va;
1586         if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1587             (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa)))
1588         {
1589             //
1590             // We now own this entry, write down the size and the pool tag
1591             //
1592             Entry->Key = Key;
1593             Entry->NumberOfPages = NumberOfPages;
1594 
1595             //
1596             // Add one more entry to the count, and see if we're getting within
1597             // 25% of the table size, at which point we'll do an expansion now
1598             // to avoid blocking too hard later on.
1599             //
1600             // Note that we only do this if it's also been the 16th time that we
1601             // keep losing the race or that we are not finding a free entry anymore,
1602             // which implies a massive number of concurrent big pool allocations.
1603             //
1604             InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1605             if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1606             {
1607                 DPRINT("Attempting expansion since we now have %lu entries\n",
1608                         ExpPoolBigEntriesInUse);
1609                 ASSERT(TableSize == PoolBigPageTableSize);
1610                 ExpExpandBigPageTable(OldIrql);
1611                 return TRUE;
1612             }
1613 
1614             //
1615             // We have our entry, return
1616             //
1617             KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1618             return TRUE;
1619         }
1620 
1621         //
1622         // We don't have our entry yet, so keep trying, making the entry list
1623         // circular if we reach the last entry. We'll eventually break out of
1624         // the loop once we've rolled over and returned back to our original
1625         // hash bucket
1626         //
1627         i++;
1628         if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1629     } while (Entry != EntryStart);
1630 
1631     //
1632     // This means there's no free hash buckets whatsoever, so we now have
1633     // to attempt expanding the table
1634     //
1635     ASSERT(TableSize == PoolBigPageTableSize);
1636     if (ExpExpandBigPageTable(OldIrql))
1637     {
1638         goto Retry;
1639     }
1640     ExpBigTableExpansionFailed++;
1641     DPRINT1("Big pool table expansion failed\n");
1642     return FALSE;
1643 }
1644 
1645 ULONG
1646 NTAPI
1647 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1648                             OUT PULONG_PTR BigPages,
1649                             IN POOL_TYPE PoolType)
1650 {
1651     BOOLEAN FirstTry = TRUE;
1652     SIZE_T TableSize;
1653     KIRQL OldIrql;
1654     ULONG PoolTag, Hash;
1655     PPOOL_TRACKER_BIG_PAGES Entry;
1656     ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1657     ASSERT(!(PoolType & SESSION_POOL_MASK));
1658 
1659     //
1660     // As the table is expandable, these values must only be read after acquiring
1661     // the lock to avoid a teared access during an expansion
1662     //
1663     Hash = ExpComputePartialHashForAddress(Va);
1664     KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1665     Hash &= PoolBigPageTableHash;
1666     TableSize = PoolBigPageTableSize;
1667 
1668     //
1669     // Loop while trying to find this big page allocation
1670     //
1671     while (PoolBigPageTable[Hash].Va != Va)
1672     {
1673         //
1674         // Increment the size until we go past the end of the table
1675         //
1676         if (++Hash >= TableSize)
1677         {
1678             //
1679             // Is this the second time we've tried?
1680             //
1681             if (!FirstTry)
1682             {
1683                 //
1684                 // This means it was never inserted into the pool table and it
1685                 // received the special "BIG" tag -- return that and return 0
1686                 // so that the code can ask Mm for the page count instead
1687                 //
1688                 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1689                 *BigPages = 0;
1690                 return ' GIB';
1691             }
1692 
1693             //
1694             // The first time this happens, reset the hash index and try again
1695             //
1696             Hash = 0;
1697             FirstTry = FALSE;
1698         }
1699     }
1700 
1701     //
1702     // Now capture all the information we need from the entry, since after we
1703     // release the lock, the data can change
1704     //
1705     Entry = &PoolBigPageTable[Hash];
1706     *BigPages = Entry->NumberOfPages;
1707     PoolTag = Entry->Key;
1708 
1709     //
1710     // Set the free bit, and decrement the number of allocations. Finally, release
1711     // the lock and return the tag that was located
1712     //
1713     InterlockedIncrement((PLONG)&Entry->Va);
1714     InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1715     KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1716     return PoolTag;
1717 }
1718 
1719 VOID
1720 NTAPI
1721 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1722                  OUT PULONG NonPagedPoolPages,
1723                  OUT PULONG PagedPoolAllocs,
1724                  OUT PULONG PagedPoolFrees,
1725                  OUT PULONG PagedPoolLookasideHits,
1726                  OUT PULONG NonPagedPoolAllocs,
1727                  OUT PULONG NonPagedPoolFrees,
1728                  OUT PULONG NonPagedPoolLookasideHits)
1729 {
1730     ULONG i;
1731     PPOOL_DESCRIPTOR PoolDesc;
1732 
1733     //
1734     // Assume all failures
1735     //
1736     *PagedPoolPages = 0;
1737     *PagedPoolAllocs = 0;
1738     *PagedPoolFrees = 0;
1739 
1740     //
1741     // Tally up the totals for all the apged pool
1742     //
1743     for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1744     {
1745         PoolDesc = ExpPagedPoolDescriptor[i];
1746         *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1747         *PagedPoolAllocs += PoolDesc->RunningAllocs;
1748         *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1749     }
1750 
1751     //
1752     // The first non-paged pool has a hardcoded well-known descriptor name
1753     //
1754     PoolDesc = &NonPagedPoolDescriptor;
1755     *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1756     *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1757     *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1758 
1759     //
1760     // If the system has more than one non-paged pool, copy the other descriptor
1761     // totals as well
1762     //
1763 #if 0
1764     if (ExpNumberOfNonPagedPools > 1)
1765     {
1766         for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1767         {
1768             PoolDesc = ExpNonPagedPoolDescriptor[i];
1769             *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1770             *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1771             *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1772         }
1773     }
1774 #endif
1775 
1776     //
1777     // Get the amount of hits in the system lookaside lists
1778     //
1779     if (!IsListEmpty(&ExPoolLookasideListHead))
1780     {
1781         PLIST_ENTRY ListEntry;
1782 
1783         for (ListEntry = ExPoolLookasideListHead.Flink;
1784              ListEntry != &ExPoolLookasideListHead;
1785              ListEntry = ListEntry->Flink)
1786         {
1787             PGENERAL_LOOKASIDE Lookaside;
1788 
1789             Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry);
1790 
1791             if (Lookaside->Type == NonPagedPool)
1792             {
1793                 *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
1794             }
1795             else
1796             {
1797                 *PagedPoolLookasideHits += Lookaside->AllocateHits;
1798             }
1799         }
1800     }
1801 }
1802 
1803 VOID
1804 NTAPI
1805 ExReturnPoolQuota(IN PVOID P)
1806 {
1807     PPOOL_HEADER Entry;
1808     POOL_TYPE PoolType;
1809     USHORT BlockSize;
1810     PEPROCESS Process;
1811 
1812     if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1813         (MmIsSpecialPoolAddress(P)))
1814     {
1815         return;
1816     }
1817 
1818     Entry = P;
1819     Entry--;
1820     ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1821 
1822     PoolType = Entry->PoolType - 1;
1823     BlockSize = Entry->BlockSize;
1824 
1825     if (PoolType & QUOTA_POOL_MASK)
1826     {
1827         Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1828         ASSERT(Process != NULL);
1829         if (Process)
1830         {
1831             if (Process->Pcb.Header.Type != ProcessObject)
1832             {
1833                 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1834                         Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1835                 KeBugCheckEx(BAD_POOL_CALLER,
1836                              0x0D,
1837                              (ULONG_PTR)P,
1838                              Entry->PoolTag,
1839                              (ULONG_PTR)Process);
1840             }
1841             ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1842             PsReturnPoolQuota(Process,
1843                               PoolType & BASE_POOL_TYPE_MASK,
1844                               BlockSize * POOL_BLOCK_SIZE);
1845             ObDereferenceObject(Process);
1846         }
1847     }
1848 }
1849 
1850 /* PUBLIC FUNCTIONS ***********************************************************/
1851 
1852 /*
1853  * @implemented
1854  */
1855 PVOID
1856 NTAPI
1857 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1858                       IN SIZE_T NumberOfBytes,
1859                       IN ULONG Tag)
1860 {
1861     PPOOL_DESCRIPTOR PoolDesc;
1862     PLIST_ENTRY ListHead;
1863     PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1864     KIRQL OldIrql;
1865     USHORT BlockSize, i;
1866     ULONG OriginalType;
1867     PKPRCB Prcb = KeGetCurrentPrcb();
1868     PGENERAL_LOOKASIDE LookasideList;
1869 
1870     //
1871     // Some sanity checks
1872     //
1873     ASSERT(Tag != 0);
1874     ASSERT(Tag != ' GIB');
1875     ASSERT(NumberOfBytes != 0);
1876     ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1877 
1878     //
1879     // Not supported in ReactOS
1880     //
1881     ASSERT(!(PoolType & SESSION_POOL_MASK));
1882 
1883     //
1884     // Check if verifier or special pool is enabled
1885     //
1886     if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1887     {
1888         //
1889         // For verifier, we should call the verification routine
1890         //
1891         if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1892         {
1893             DPRINT1("Driver Verifier is not yet supported\n");
1894         }
1895 
1896         //
1897         // For special pool, we check if this is a suitable allocation and do
1898         // the special allocation if needed
1899         //
1900         if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1901         {
1902             //
1903             // Check if this is a special pool allocation
1904             //
1905             if (MmUseSpecialPool(NumberOfBytes, Tag))
1906             {
1907                 //
1908                 // Try to allocate using special pool
1909                 //
1910                 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1911                 if (Entry) return Entry;
1912             }
1913         }
1914     }
1915 
1916     //
1917     // Get the pool type and its corresponding vector for this request
1918     //
1919     OriginalType = PoolType;
1920     PoolType = PoolType & BASE_POOL_TYPE_MASK;
1921     PoolDesc = PoolVector[PoolType];
1922     ASSERT(PoolDesc != NULL);
1923 
1924     //
1925     // Check if this is a big page allocation
1926     //
1927     if (NumberOfBytes > POOL_MAX_ALLOC)
1928     {
1929         //
1930         // Allocate pages for it
1931         //
1932         Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1933         if (!Entry)
1934         {
1935 #if DBG
1936             //
1937             // Out of memory, display current consumption
1938             // Let's consider that if the caller wanted more
1939             // than a hundred pages, that's a bogus caller
1940             // and we are not out of memory
1941             //
1942             if (NumberOfBytes < 100 * PAGE_SIZE)
1943             {
1944                 MiDumpPoolConsumers(FALSE, 0, 0, 0);
1945             }
1946 #endif
1947 
1948             //
1949             // Must succeed pool is deprecated, but still supported. These allocation
1950             // failures must cause an immediate bugcheck
1951             //
1952             if (OriginalType & MUST_SUCCEED_POOL_MASK)
1953             {
1954                 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1955                              NumberOfBytes,
1956                              NonPagedPoolDescriptor.TotalPages,
1957                              NonPagedPoolDescriptor.TotalBigPages,
1958                              0);
1959             }
1960 
1961             //
1962             // Internal debugging
1963             //
1964             ExPoolFailures++;
1965 
1966             //
1967             // This flag requests printing failures, and can also further specify
1968             // breaking on failures
1969             //
1970             if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1971             {
1972                 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1973                         NumberOfBytes,
1974                         OriginalType);
1975                 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1976             }
1977 
1978             //
1979             // Finally, this flag requests an exception, which we are more than
1980             // happy to raise!
1981             //
1982             if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1983             {
1984                 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1985             }
1986 
1987             return NULL;
1988         }
1989 
1990         //
1991         // Increment required counters
1992         //
1993         InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1994                                (LONG)BYTES_TO_PAGES(NumberOfBytes));
1995         InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1996         InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1997 
1998         //
1999         // Add a tag for the big page allocation and switch to the generic "BIG"
2000         // tag if we failed to do so, then insert a tracker for this alloation.
2001         //
2002         if (!ExpAddTagForBigPages(Entry,
2003                                   Tag,
2004                                   (ULONG)BYTES_TO_PAGES(NumberOfBytes),
2005                                   OriginalType))
2006         {
2007             Tag = ' GIB';
2008         }
2009         ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
2010         return Entry;
2011     }
2012 
2013     //
2014     // Should never request 0 bytes from the pool, but since so many drivers do
2015     // it, we'll just assume they want 1 byte, based on NT's similar behavior
2016     //
2017     if (!NumberOfBytes) NumberOfBytes = 1;
2018 
2019     //
2020     // A pool allocation is defined by its data, a linked list to connect it to
2021     // the free list (if necessary), and a pool header to store accounting info.
2022     // Calculate this size, then convert it into a block size (units of pool
2023     // headers)
2024     //
2025     // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
2026     // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
2027     // the direct allocation of pages.
2028     //
2029     i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
2030                  / POOL_BLOCK_SIZE);
2031     ASSERT(i < POOL_LISTS_PER_PAGE);
2032 
2033     //
2034     // Handle lookaside list optimization for both paged and nonpaged pool
2035     //
2036     if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
2037     {
2038         //
2039         // Try popping it from the per-CPU lookaside list
2040         //
2041         LookasideList = (PoolType == PagedPool) ?
2042                          Prcb->PPPagedLookasideList[i - 1].P :
2043                          Prcb->PPNPagedLookasideList[i - 1].P;
2044         LookasideList->TotalAllocates++;
2045         Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
2046         if (!Entry)
2047         {
2048             //
2049             // We failed, try popping it from the global list
2050             //
2051             LookasideList = (PoolType == PagedPool) ?
2052                              Prcb->PPPagedLookasideList[i - 1].L :
2053                              Prcb->PPNPagedLookasideList[i - 1].L;
2054             LookasideList->TotalAllocates++;
2055             Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
2056         }
2057 
2058         //
2059         // If we were able to pop it, update the accounting and return the block
2060         //
2061         if (Entry)
2062         {
2063             LookasideList->AllocateHits++;
2064 
2065             //
2066             // Get the real entry, write down its pool type, and track it
2067             //
2068             Entry--;
2069             Entry->PoolType = OriginalType + 1;
2070             ExpInsertPoolTracker(Tag,
2071                                  Entry->BlockSize * POOL_BLOCK_SIZE,
2072                                  OriginalType);
2073 
2074             //
2075             // Return the pool allocation
2076             //
2077             Entry->PoolTag = Tag;
2078             (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2079             (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2080             return POOL_FREE_BLOCK(Entry);
2081         }
2082     }
2083 
2084     //
2085     // Loop in the free lists looking for a block if this size. Start with the
2086     // list optimized for this kind of size lookup
2087     //
2088     ListHead = &PoolDesc->ListHeads[i];
2089     do
2090     {
2091         //
2092         // Are there any free entries available on this list?
2093         //
2094         if (!ExpIsPoolListEmpty(ListHead))
2095         {
2096             //
2097             // Acquire the pool lock now
2098             //
2099             OldIrql = ExLockPool(PoolDesc);
2100 
2101             //
2102             // And make sure the list still has entries
2103             //
2104             if (ExpIsPoolListEmpty(ListHead))
2105             {
2106                 //
2107                 // Someone raced us (and won) before we had a chance to acquire
2108                 // the lock.
2109                 //
2110                 // Try again!
2111                 //
2112                 ExUnlockPool(PoolDesc, OldIrql);
2113                 continue;
2114             }
2115 
2116             //
2117             // Remove a free entry from the list
2118             // Note that due to the way we insert free blocks into multiple lists
2119             // there is a guarantee that any block on this list will either be
2120             // of the correct size, or perhaps larger.
2121             //
2122             ExpCheckPoolLinks(ListHead);
2123             Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
2124             ExpCheckPoolLinks(ListHead);
2125             ExpCheckPoolBlocks(Entry);
2126             ASSERT(Entry->BlockSize >= i);
2127             ASSERT(Entry->PoolType == 0);
2128 
2129             //
2130             // Check if this block is larger that what we need. The block could
2131             // not possibly be smaller, due to the reason explained above (and
2132             // we would've asserted on a checked build if this was the case).
2133             //
2134             if (Entry->BlockSize != i)
2135             {
2136                 //
2137                 // Is there an entry before this one?
2138                 //
2139                 if (Entry->PreviousSize == 0)
2140                 {
2141                     //
2142                     // There isn't anyone before us, so take the next block and
2143                     // turn it into a fragment that contains the leftover data
2144                     // that we don't need to satisfy the caller's request
2145                     //
2146                     FragmentEntry = POOL_BLOCK(Entry, i);
2147                     FragmentEntry->BlockSize = Entry->BlockSize - i;
2148 
2149                     //
2150                     // And make it point back to us
2151                     //
2152                     FragmentEntry->PreviousSize = i;
2153 
2154                     //
2155                     // Now get the block that follows the new fragment and check
2156                     // if it's still on the same page as us (and not at the end)
2157                     //
2158                     NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2159                     if (PAGE_ALIGN(NextEntry) != NextEntry)
2160                     {
2161                         //
2162                         // Adjust this next block to point to our newly created
2163                         // fragment block
2164                         //
2165                         NextEntry->PreviousSize = FragmentEntry->BlockSize;
2166                     }
2167                 }
2168                 else
2169                 {
2170                     //
2171                     // There is a free entry before us, which we know is smaller
2172                     // so we'll make this entry the fragment instead
2173                     //
2174                     FragmentEntry = Entry;
2175 
2176                     //
2177                     // And then we'll remove from it the actual size required.
2178                     // Now the entry is a leftover free fragment
2179                     //
2180                     Entry->BlockSize -= i;
2181 
2182                     //
2183                     // Now let's go to the next entry after the fragment (which
2184                     // used to point to our original free entry) and make it
2185                     // reference the new fragment entry instead.
2186                     //
2187                     // This is the entry that will actually end up holding the
2188                     // allocation!
2189                     //
2190                     Entry = POOL_NEXT_BLOCK(Entry);
2191                     Entry->PreviousSize = FragmentEntry->BlockSize;
2192 
2193                     //
2194                     // And now let's go to the entry after that one and check if
2195                     // it's still on the same page, and not at the end
2196                     //
2197                     NextEntry = POOL_BLOCK(Entry, i);
2198                     if (PAGE_ALIGN(NextEntry) != NextEntry)
2199                     {
2200                         //
2201                         // Make it reference the allocation entry
2202                         //
2203                         NextEntry->PreviousSize = i;
2204                     }
2205                 }
2206 
2207                 //
2208                 // Now our (allocation) entry is the right size
2209                 //
2210                 Entry->BlockSize = i;
2211 
2212                 //
2213                 // And the next entry is now the free fragment which contains
2214                 // the remaining difference between how big the original entry
2215                 // was, and the actual size the caller needs/requested.
2216                 //
2217                 FragmentEntry->PoolType = 0;
2218                 BlockSize = FragmentEntry->BlockSize;
2219 
2220                 //
2221                 // Now check if enough free bytes remained for us to have a
2222                 // "full" entry, which contains enough bytes for a linked list
2223                 // and thus can be used for allocations (up to 8 bytes...)
2224                 //
2225                 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2226                 if (BlockSize != 1)
2227                 {
2228                     //
2229                     // Insert the free entry into the free list for this size
2230                     //
2231                     ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2232                                           POOL_FREE_BLOCK(FragmentEntry));
2233                     ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2234                 }
2235             }
2236 
2237             //
2238             // We have found an entry for this allocation, so set the pool type
2239             // and release the lock since we're done
2240             //
2241             Entry->PoolType = OriginalType + 1;
2242             ExpCheckPoolBlocks(Entry);
2243             ExUnlockPool(PoolDesc, OldIrql);
2244 
2245             //
2246             // Increment required counters
2247             //
2248             InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2249             InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2250 
2251             //
2252             // Track this allocation
2253             //
2254             ExpInsertPoolTracker(Tag,
2255                                  Entry->BlockSize * POOL_BLOCK_SIZE,
2256                                  OriginalType);
2257 
2258             //
2259             // Return the pool allocation
2260             //
2261             Entry->PoolTag = Tag;
2262             (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2263             (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2264             return POOL_FREE_BLOCK(Entry);
2265         }
2266     } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2267 
2268     //
2269     // There were no free entries left, so we have to allocate a new fresh page
2270     //
2271     Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2272     if (!Entry)
2273     {
2274 #if DBG
2275         //
2276         // Out of memory, display current consumption
2277         // Let's consider that if the caller wanted more
2278         // than a hundred pages, that's a bogus caller
2279         // and we are not out of memory
2280         //
2281         if (NumberOfBytes < 100 * PAGE_SIZE)
2282         {
2283             MiDumpPoolConsumers(FALSE, 0, 0, 0);
2284         }
2285 #endif
2286 
2287         //
2288         // Must succeed pool is deprecated, but still supported. These allocation
2289         // failures must cause an immediate bugcheck
2290         //
2291         if (OriginalType & MUST_SUCCEED_POOL_MASK)
2292         {
2293             KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2294                          PAGE_SIZE,
2295                          NonPagedPoolDescriptor.TotalPages,
2296                          NonPagedPoolDescriptor.TotalBigPages,
2297                          0);
2298         }
2299 
2300         //
2301         // Internal debugging
2302         //
2303         ExPoolFailures++;
2304 
2305         //
2306         // This flag requests printing failures, and can also further specify
2307         // breaking on failures
2308         //
2309         if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2310         {
2311             DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2312                     NumberOfBytes,
2313                     OriginalType);
2314             if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2315         }
2316 
2317         //
2318         // Finally, this flag requests an exception, which we are more than
2319         // happy to raise!
2320         //
2321         if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2322         {
2323             ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2324         }
2325 
2326         //
2327         // Return NULL to the caller in all other cases
2328         //
2329         return NULL;
2330     }
2331 
2332     //
2333     // Setup the entry data
2334     //
2335     Entry->Ulong1 = 0;
2336     Entry->BlockSize = i;
2337     Entry->PoolType = OriginalType + 1;
2338 
2339     //
2340     // This page will have two entries -- one for the allocation (which we just
2341     // created above), and one for the remaining free bytes, which we're about
2342     // to create now. The free bytes are the whole page minus what was allocated
2343     // and then converted into units of block headers.
2344     //
2345     BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2346     FragmentEntry = POOL_BLOCK(Entry, i);
2347     FragmentEntry->Ulong1 = 0;
2348     FragmentEntry->BlockSize = BlockSize;
2349     FragmentEntry->PreviousSize = i;
2350 
2351     //
2352     // Increment required counters
2353     //
2354     InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2355     InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2356 
2357     //
2358     // Now check if enough free bytes remained for us to have a "full" entry,
2359     // which contains enough bytes for a linked list and thus can be used for
2360     // allocations (up to 8 bytes...)
2361     //
2362     if (FragmentEntry->BlockSize != 1)
2363     {
2364         //
2365         // Excellent -- acquire the pool lock
2366         //
2367         OldIrql = ExLockPool(PoolDesc);
2368 
2369         //
2370         // And insert the free entry into the free list for this block size
2371         //
2372         ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2373         ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2374                               POOL_FREE_BLOCK(FragmentEntry));
2375         ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2376 
2377         //
2378         // Release the pool lock
2379         //
2380         ExpCheckPoolBlocks(Entry);
2381         ExUnlockPool(PoolDesc, OldIrql);
2382     }
2383     else
2384     {
2385         //
2386         // Simply do a sanity check
2387         //
2388         ExpCheckPoolBlocks(Entry);
2389     }
2390 
2391     //
2392     // Increment performance counters and track this allocation
2393     //
2394     InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2395     ExpInsertPoolTracker(Tag,
2396                          Entry->BlockSize * POOL_BLOCK_SIZE,
2397                          OriginalType);
2398 
2399     //
2400     // And return the pool allocation
2401     //
2402     ExpCheckPoolBlocks(Entry);
2403     Entry->PoolTag = Tag;
2404     return POOL_FREE_BLOCK(Entry);
2405 }
2406 
2407 /*
2408  * @implemented
2409  */
2410 PVOID
2411 NTAPI
2412 ExAllocatePool(POOL_TYPE PoolType,
2413                SIZE_T NumberOfBytes)
2414 {
2415     ULONG Tag = TAG_NONE;
2416 #if 0 && DBG
2417     PLDR_DATA_TABLE_ENTRY LdrEntry;
2418 
2419     /* Use the first four letters of the driver name, or "None" if unavailable */
2420     LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2421                 ? MiLookupDataTableEntry(_ReturnAddress())
2422                 : NULL;
2423     if (LdrEntry)
2424     {
2425         ULONG i;
2426         Tag = 0;
2427         for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2428             Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2429         for (; i < 4; i++)
2430             Tag = Tag >> 8 | ' ' << 24;
2431     }
2432 #endif
2433     return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2434 }
2435 
2436 /*
2437  * @implemented
2438  */
2439 VOID
2440 NTAPI
2441 ExFreePoolWithTag(IN PVOID P,
2442                   IN ULONG TagToFree)
2443 {
2444     PPOOL_HEADER Entry, NextEntry;
2445     USHORT BlockSize;
2446     KIRQL OldIrql;
2447     POOL_TYPE PoolType;
2448     PPOOL_DESCRIPTOR PoolDesc;
2449     ULONG Tag;
2450     BOOLEAN Combined = FALSE;
2451     PFN_NUMBER PageCount, RealPageCount;
2452     PKPRCB Prcb = KeGetCurrentPrcb();
2453     PGENERAL_LOOKASIDE LookasideList;
2454     PEPROCESS Process;
2455 
2456     //
2457     // Check if any of the debug flags are enabled
2458     //
2459     if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2460                         POOL_FLAG_CHECK_WORKERS |
2461                         POOL_FLAG_CHECK_RESOURCES |
2462                         POOL_FLAG_VERIFIER |
2463                         POOL_FLAG_CHECK_DEADLOCK |
2464                         POOL_FLAG_SPECIAL_POOL))
2465     {
2466         //
2467         // Check if special pool is enabled
2468         //
2469         if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2470         {
2471             //
2472             // Check if it was allocated from a special pool
2473             //
2474             if (MmIsSpecialPoolAddress(P))
2475             {
2476                 //
2477                 // Was deadlock verification also enabled? We can do some extra
2478                 // checks at this point
2479                 //
2480                 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2481                 {
2482                     DPRINT1("Verifier not yet supported\n");
2483                 }
2484 
2485                 //
2486                 // It is, so handle it via special pool free routine
2487                 //
2488                 MmFreeSpecialPool(P);
2489                 return;
2490             }
2491         }
2492 
2493         //
2494         // For non-big page allocations, we'll do a bunch of checks in here
2495         //
2496         if (PAGE_ALIGN(P) != P)
2497         {
2498             //
2499             // Get the entry for this pool allocation
2500             // The pointer math here may look wrong or confusing, but it is quite right
2501             //
2502             Entry = P;
2503             Entry--;
2504 
2505             //
2506             // Get the pool type
2507             //
2508             PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2509 
2510             //
2511             // FIXME: Many other debugging checks go here
2512             //
2513             ExpCheckPoolIrqlLevel(PoolType, 0, P);
2514         }
2515     }
2516 
2517     //
2518     // Check if this is a big page allocation
2519     //
2520     if (PAGE_ALIGN(P) == P)
2521     {
2522         //
2523         // We need to find the tag for it, so first we need to find out what
2524         // kind of allocation this was (paged or nonpaged), then we can go
2525         // ahead and try finding the tag for it. Remember to get rid of the
2526         // PROTECTED_POOL tag if it's found.
2527         //
2528         // Note that if at insertion time, we failed to add the tag for a big
2529         // pool allocation, we used a special tag called 'BIG' to identify the
2530         // allocation, and we may get this tag back. In this scenario, we must
2531         // manually get the size of the allocation by actually counting through
2532         // the PFN database.
2533         //
2534         PoolType = MmDeterminePoolType(P);
2535         ExpCheckPoolIrqlLevel(PoolType, 0, P);
2536         Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2537         if (!Tag)
2538         {
2539             DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2540             ASSERT(Tag == ' GIB');
2541             PageCount = 1; // We are going to lie! This might screw up accounting?
2542         }
2543         else if (Tag & PROTECTED_POOL)
2544         {
2545             Tag &= ~PROTECTED_POOL;
2546         }
2547 
2548         //
2549         // Check block tag
2550         //
2551         if (TagToFree && TagToFree != Tag)
2552         {
2553             DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2554             KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2555         }
2556 
2557         //
2558         // We have our tag and our page count, so we can go ahead and remove this
2559         // tracker now
2560         //
2561         ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2562 
2563         //
2564         // Check if any of the debug flags are enabled
2565         //
2566         if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2567                             POOL_FLAG_CHECK_WORKERS |
2568                             POOL_FLAG_CHECK_RESOURCES |
2569                             POOL_FLAG_CHECK_DEADLOCK))
2570         {
2571             //
2572             // Was deadlock verification also enabled? We can do some extra
2573             // checks at this point
2574             //
2575             if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2576             {
2577                 DPRINT1("Verifier not yet supported\n");
2578             }
2579 
2580             //
2581             // FIXME: Many debugging checks go here
2582             //
2583         }
2584 
2585         //
2586         // Update counters
2587         //
2588         PoolDesc = PoolVector[PoolType];
2589         InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2590         InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2591                                     -(LONG_PTR)(PageCount << PAGE_SHIFT));
2592 
2593         //
2594         // Do the real free now and update the last counter with the big page count
2595         //
2596         RealPageCount = MiFreePoolPages(P);
2597         ASSERT(RealPageCount == PageCount);
2598         InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2599                                -(LONG)RealPageCount);
2600         return;
2601     }
2602 
2603     //
2604     // Get the entry for this pool allocation
2605     // The pointer math here may look wrong or confusing, but it is quite right
2606     //
2607     Entry = P;
2608     Entry--;
2609     ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2610 
2611     //
2612     // Get the size of the entry, and it's pool type, then load the descriptor
2613     // for this pool type
2614     //
2615     BlockSize = Entry->BlockSize;
2616     PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2617     PoolDesc = PoolVector[PoolType];
2618 
2619     //
2620     // Make sure that the IRQL makes sense
2621     //
2622     ExpCheckPoolIrqlLevel(PoolType, 0, P);
2623 
2624     //
2625     // Get the pool tag and get rid of the PROTECTED_POOL flag
2626     //
2627     Tag = Entry->PoolTag;
2628     if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2629 
2630     //
2631     // Check block tag
2632     //
2633     if (TagToFree && TagToFree != Tag)
2634     {
2635         DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2636         KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2637     }
2638 
2639     //
2640     // Track the removal of this allocation
2641     //
2642     ExpRemovePoolTracker(Tag,
2643                          BlockSize * POOL_BLOCK_SIZE,
2644                          Entry->PoolType - 1);
2645 
2646     //
2647     // Release pool quota, if any
2648     //
2649     if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2650     {
2651         Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2652         if (Process)
2653         {
2654             if (Process->Pcb.Header.Type != ProcessObject)
2655             {
2656                 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2657                         Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2658                 KeBugCheckEx(BAD_POOL_CALLER,
2659                              0x0D,
2660                              (ULONG_PTR)P,
2661                              Tag,
2662                              (ULONG_PTR)Process);
2663             }
2664             PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2665             ObDereferenceObject(Process);
2666         }
2667     }
2668 
2669     //
2670     // Is this allocation small enough to have come from a lookaside list?
2671     //
2672     if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2673     {
2674         //
2675         // Try pushing it into the per-CPU lookaside list
2676         //
2677         LookasideList = (PoolType == PagedPool) ?
2678                          Prcb->PPPagedLookasideList[BlockSize - 1].P :
2679                          Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2680         LookasideList->TotalFrees++;
2681         if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2682         {
2683             LookasideList->FreeHits++;
2684             InterlockedPushEntrySList(&LookasideList->ListHead, P);
2685             return;
2686         }
2687 
2688         //
2689         // We failed, try to push it into the global lookaside list
2690         //
2691         LookasideList = (PoolType == PagedPool) ?
2692                          Prcb->PPPagedLookasideList[BlockSize - 1].L :
2693                          Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2694         LookasideList->TotalFrees++;
2695         if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2696         {
2697             LookasideList->FreeHits++;
2698             InterlockedPushEntrySList(&LookasideList->ListHead, P);
2699             return;
2700         }
2701     }
2702 
2703     //
2704     // Get the pointer to the next entry
2705     //
2706     NextEntry = POOL_BLOCK(Entry, BlockSize);
2707 
2708     //
2709     // Update performance counters
2710     //
2711     InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2712     InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2713 
2714     //
2715     // Acquire the pool lock
2716     //
2717     OldIrql = ExLockPool(PoolDesc);
2718 
2719     //
2720     // Check if the next allocation is at the end of the page
2721     //
2722     ExpCheckPoolBlocks(Entry);
2723     if (PAGE_ALIGN(NextEntry) != NextEntry)
2724     {
2725         //
2726         // We may be able to combine the block if it's free
2727         //
2728         if (NextEntry->PoolType == 0)
2729         {
2730             //
2731             // The next block is free, so we'll do a combine
2732             //
2733             Combined = TRUE;
2734 
2735             //
2736             // Make sure there's actual data in the block -- anything smaller
2737             // than this means we only have the header, so there's no linked list
2738             // for us to remove
2739             //
2740             if ((NextEntry->BlockSize != 1))
2741             {
2742                 //
2743                 // The block is at least big enough to have a linked list, so go
2744                 // ahead and remove it
2745                 //
2746                 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2747                 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2748                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2749                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2750             }
2751 
2752             //
2753             // Our entry is now combined with the next entry
2754             //
2755             Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2756         }
2757     }
2758 
2759     //
2760     // Now check if there was a previous entry on the same page as us
2761     //
2762     if (Entry->PreviousSize)
2763     {
2764         //
2765         // Great, grab that entry and check if it's free
2766         //
2767         NextEntry = POOL_PREV_BLOCK(Entry);
2768         if (NextEntry->PoolType == 0)
2769         {
2770             //
2771             // It is, so we can do a combine
2772             //
2773             Combined = TRUE;
2774 
2775             //
2776             // Make sure there's actual data in the block -- anything smaller
2777             // than this means we only have the header so there's no linked list
2778             // for us to remove
2779             //
2780             if ((NextEntry->BlockSize != 1))
2781             {
2782                 //
2783                 // The block is at least big enough to have a linked list, so go
2784                 // ahead and remove it
2785                 //
2786                 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2787                 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2788                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2789                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2790             }
2791 
2792             //
2793             // Combine our original block (which might've already been combined
2794             // with the next block), into the previous block
2795             //
2796             NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2797 
2798             //
2799             // And now we'll work with the previous block instead
2800             //
2801             Entry = NextEntry;
2802         }
2803     }
2804 
2805     //
2806     // By now, it may have been possible for our combined blocks to actually
2807     // have made up a full page (if there were only 2-3 allocations on the
2808     // page, they could've all been combined).
2809     //
2810     if ((PAGE_ALIGN(Entry) == Entry) &&
2811         (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2812     {
2813         //
2814         // In this case, release the pool lock, update the performance counter,
2815         // and free the page
2816         //
2817         ExUnlockPool(PoolDesc, OldIrql);
2818         InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2819         MiFreePoolPages(Entry);
2820         return;
2821     }
2822 
2823     //
2824     // Otherwise, we now have a free block (or a combination of 2 or 3)
2825     //
2826     Entry->PoolType = 0;
2827     BlockSize = Entry->BlockSize;
2828     ASSERT(BlockSize != 1);
2829 
2830     //
2831     // Check if we actually did combine it with anyone
2832     //
2833     if (Combined)
2834     {
2835         //
2836         // Get the first combined block (either our original to begin with, or
2837         // the one after the original, depending if we combined with the previous)
2838         //
2839         NextEntry = POOL_NEXT_BLOCK(Entry);
2840 
2841         //
2842         // As long as the next block isn't on a page boundary, have it point
2843         // back to us
2844         //
2845         if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2846     }
2847 
2848     //
2849     // Insert this new free block, and release the pool lock
2850     //
2851     ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2852     ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2853     ExUnlockPool(PoolDesc, OldIrql);
2854 }
2855 
2856 /*
2857  * @implemented
2858  */
2859 VOID
2860 NTAPI
2861 ExFreePool(PVOID P)
2862 {
2863     //
2864     // Just free without checking for the tag
2865     //
2866     ExFreePoolWithTag(P, 0);
2867 }
2868 
2869 /*
2870  * @unimplemented
2871  */
2872 SIZE_T
2873 NTAPI
2874 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2875                      OUT PBOOLEAN QuotaCharged)
2876 {
2877     //
2878     // Not implemented
2879     //
2880     UNIMPLEMENTED;
2881     return FALSE;
2882 }
2883 
2884 /*
2885  * @implemented
2886  */
2887 
2888 PVOID
2889 NTAPI
2890 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2891                         IN SIZE_T NumberOfBytes)
2892 {
2893     //
2894     // Allocate the pool
2895     //
2896     return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2897 }
2898 
2899 /*
2900  * @implemented
2901  */
2902 PVOID
2903 NTAPI
2904 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2905                               IN SIZE_T NumberOfBytes,
2906                               IN ULONG Tag,
2907                               IN EX_POOL_PRIORITY Priority)
2908 {
2909     PVOID Buffer;
2910 
2911     //
2912     // Allocate the pool
2913     //
2914     Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2915     if (Buffer == NULL)
2916     {
2917         UNIMPLEMENTED;
2918     }
2919 
2920     return Buffer;
2921 }
2922 
2923 /*
2924  * @implemented
2925  */
2926 PVOID
2927 NTAPI
2928 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2929                            IN SIZE_T NumberOfBytes,
2930                            IN ULONG Tag)
2931 {
2932     BOOLEAN Raise = TRUE;
2933     PVOID Buffer;
2934     PPOOL_HEADER Entry;
2935     NTSTATUS Status;
2936     PEPROCESS Process = PsGetCurrentProcess();
2937 
2938     //
2939     // Check if we should fail instead of raising an exception
2940     //
2941     if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2942     {
2943         Raise = FALSE;
2944         PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2945     }
2946 
2947     //
2948     // Inject the pool quota mask
2949     //
2950     PoolType += QUOTA_POOL_MASK;
2951 
2952     //
2953     // Check if we have enough space to add the quota owner process, as long as
2954     // this isn't the system process, which never gets charged quota
2955     //
2956     ASSERT(NumberOfBytes != 0);
2957     if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2958         (Process != PsInitialSystemProcess))
2959     {
2960         //
2961         // Add space for our EPROCESS pointer
2962         //
2963         NumberOfBytes += sizeof(PEPROCESS);
2964     }
2965     else
2966     {
2967         //
2968         // We won't be able to store the pointer, so don't use quota for this
2969         //
2970         PoolType -= QUOTA_POOL_MASK;
2971     }
2972 
2973     //
2974     // Allocate the pool buffer now
2975     //
2976     Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2977 
2978     //
2979     // If the buffer is page-aligned, this is a large page allocation and we
2980     // won't touch it
2981     //
2982     if (PAGE_ALIGN(Buffer) != Buffer)
2983     {
2984         //
2985         // Also if special pool is enabled, and this was allocated from there,
2986         // we won't touch it either
2987         //
2988         if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2989             (MmIsSpecialPoolAddress(Buffer)))
2990         {
2991             return Buffer;
2992         }
2993 
2994         //
2995         // If it wasn't actually allocated with quota charges, ignore it too
2996         //
2997         if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2998 
2999         //
3000         // If this is the system process, we don't charge quota, so ignore
3001         //
3002         if (Process == PsInitialSystemProcess) return Buffer;
3003 
3004         //
3005         // Actually go and charge quota for the process now
3006         //
3007         Entry = POOL_ENTRY(Buffer);
3008         Status = PsChargeProcessPoolQuota(Process,
3009                                           PoolType & BASE_POOL_TYPE_MASK,
3010                                           Entry->BlockSize * POOL_BLOCK_SIZE);
3011         if (!NT_SUCCESS(Status))
3012         {
3013             //
3014             // Quota failed, back out the allocation, clear the owner, and fail
3015             //
3016             ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
3017             ExFreePoolWithTag(Buffer, Tag);
3018             if (Raise) RtlRaiseStatus(Status);
3019             return NULL;
3020         }
3021 
3022         //
3023         // Quota worked, write the owner and then reference it before returning
3024         //
3025         ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
3026         ObReferenceObject(Process);
3027     }
3028     else if (!(Buffer) && (Raise))
3029     {
3030         //
3031         // The allocation failed, raise an error if we are in raise mode
3032         //
3033         RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
3034     }
3035 
3036     //
3037     // Return the allocated buffer
3038     //
3039     return Buffer;
3040 }
3041 
3042 /* EOF */
3043