xref: /reactos/ntoskrnl/mm/ARM3/expool.c (revision e9d8fa57)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/expool.c
5  * PURPOSE:         ARM Memory Manager Executive Pool Manager
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20 
21 /* GLOBALS ********************************************************************/
22 
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24 
25 typedef struct _POOL_DPC_CONTEXT
26 {
27     PPOOL_TRACKER_TABLE PoolTrackTable;
28     SIZE_T PoolTrackTableSize;
29     PPOOL_TRACKER_TABLE PoolTrackTableExpansion;
30     SIZE_T PoolTrackTableSizeExpansion;
31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT;
32 
33 ULONG ExpNumberOfPagedPools;
34 POOL_DESCRIPTOR NonPagedPoolDescriptor;
35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1];
36 PPOOL_DESCRIPTOR PoolVector[2];
37 PKGUARDED_MUTEX ExpPagedPoolMutex;
38 SIZE_T PoolTrackTableSize, PoolTrackTableMask;
39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash;
40 PPOOL_TRACKER_TABLE PoolTrackTable;
41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable;
42 KSPIN_LOCK ExpTaggedPoolLock;
43 ULONG PoolHitTag;
44 BOOLEAN ExStopBadTags;
45 KSPIN_LOCK ExpLargePoolTableLock;
46 ULONG ExpPoolBigEntriesInUse;
47 ULONG ExpPoolFlags;
48 ULONG ExPoolFailures;
49 
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x)       (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x)  (PLIST_ENTRY)((ULONG_PTR)(x)  + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i)    (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x)  POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x)  POOL_BLOCK((x), -((x)->PreviousSize))
56 
57 /*
58  * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59  * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60  * pool code, but only for checked builds.
61  *
62  * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63  * that these checks are done even on retail builds, due to the increasing
64  * number of kernel-mode attacks which depend on dangling list pointers and other
65  * kinds of list-based attacks.
66  *
67  * For now, I will leave these checks on all the time, but later they are likely
68  * to be DBG-only, at least until there are enough kernel-mode security attacks
69  * against ReactOS to warrant the performance hit.
70  *
71  * For now, these are not made inline, so we can get good stack traces.
72  */
73 PLIST_ENTRY
74 NTAPI
75 ExpDecodePoolLink(IN PLIST_ENTRY Link)
76 {
77     return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79 
80 PLIST_ENTRY
81 NTAPI
82 ExpEncodePoolLink(IN PLIST_ENTRY Link)
83 {
84     return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86 
87 VOID
88 NTAPI
89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
90 {
91     if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92         (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93     {
94         KeBugCheckEx(BAD_POOL_HEADER,
95                      3,
96                      (ULONG_PTR)ListHead,
97                      (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink),
98                      (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
99     }
100 }
101 
102 VOID
103 NTAPI
104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
105 {
106     ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108 
109 BOOLEAN
110 NTAPI
111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
112 {
113     return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115 
116 VOID
117 NTAPI
118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
119 {
120     PLIST_ENTRY Blink, Flink;
121     Flink = ExpDecodePoolLink(Entry->Flink);
122     Blink = ExpDecodePoolLink(Entry->Blink);
123     Flink->Blink = ExpEncodePoolLink(Blink);
124     Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126 
127 PLIST_ENTRY
128 NTAPI
129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
130 {
131     PLIST_ENTRY Entry, Flink;
132     Entry = ExpDecodePoolLink(ListHead->Flink);
133     Flink = ExpDecodePoolLink(Entry->Flink);
134     ListHead->Flink = ExpEncodePoolLink(Flink);
135     Flink->Blink = ExpEncodePoolLink(ListHead);
136     return Entry;
137 }
138 
139 PLIST_ENTRY
140 NTAPI
141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
142 {
143     PLIST_ENTRY Entry, Blink;
144     Entry = ExpDecodePoolLink(ListHead->Blink);
145     Blink = ExpDecodePoolLink(Entry->Blink);
146     ListHead->Blink = ExpEncodePoolLink(Blink);
147     Blink->Flink = ExpEncodePoolLink(ListHead);
148     return Entry;
149 }
150 
151 VOID
152 NTAPI
153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead,
154                       IN PLIST_ENTRY Entry)
155 {
156     PLIST_ENTRY Blink;
157     ExpCheckPoolLinks(ListHead);
158     Blink = ExpDecodePoolLink(ListHead->Blink);
159     Entry->Flink = ExpEncodePoolLink(ListHead);
160     Entry->Blink = ExpEncodePoolLink(Blink);
161     Blink->Flink = ExpEncodePoolLink(Entry);
162     ListHead->Blink = ExpEncodePoolLink(Entry);
163     ExpCheckPoolLinks(ListHead);
164 }
165 
166 VOID
167 NTAPI
168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead,
169                       IN PLIST_ENTRY Entry)
170 {
171     PLIST_ENTRY Flink;
172     ExpCheckPoolLinks(ListHead);
173     Flink = ExpDecodePoolLink(ListHead->Flink);
174     Entry->Flink = ExpEncodePoolLink(Flink);
175     Entry->Blink = ExpEncodePoolLink(ListHead);
176     Flink->Blink = ExpEncodePoolLink(Entry);
177     ListHead->Flink = ExpEncodePoolLink(Entry);
178     ExpCheckPoolLinks(ListHead);
179 }
180 
181 VOID
182 NTAPI
183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
184 {
185     PPOOL_HEADER PreviousEntry, NextEntry;
186 
187     /* Is there a block before this one? */
188     if (Entry->PreviousSize)
189     {
190         /* Get it */
191         PreviousEntry = POOL_PREV_BLOCK(Entry);
192 
193         /* The two blocks must be on the same page! */
194         if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195         {
196             /* Something is awry */
197             KeBugCheckEx(BAD_POOL_HEADER,
198                          6,
199                          (ULONG_PTR)PreviousEntry,
200                          __LINE__,
201                          (ULONG_PTR)Entry);
202         }
203 
204         /* This block should also indicate that it's as large as we think it is */
205         if (PreviousEntry->BlockSize != Entry->PreviousSize)
206         {
207             /* Otherwise, someone corrupted one of the sizes */
208             DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209                     PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210                     Entry->PreviousSize, (char *)&Entry->PoolTag);
211             KeBugCheckEx(BAD_POOL_HEADER,
212                          5,
213                          (ULONG_PTR)PreviousEntry,
214                          __LINE__,
215                          (ULONG_PTR)Entry);
216         }
217     }
218     else if (PAGE_ALIGN(Entry) != Entry)
219     {
220         /* If there's no block before us, we are the first block, so we should be on a page boundary */
221         KeBugCheckEx(BAD_POOL_HEADER,
222                      7,
223                      0,
224                      __LINE__,
225                      (ULONG_PTR)Entry);
226     }
227 
228     /* This block must have a size */
229     if (!Entry->BlockSize)
230     {
231         /* Someone must've corrupted this field */
232         if (Entry->PreviousSize)
233         {
234             PreviousEntry = POOL_PREV_BLOCK(Entry);
235             DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236                     (char *)&PreviousEntry->PoolTag,
237                     (char *)&Entry->PoolTag);
238         }
239         else
240         {
241             DPRINT1("Entry tag %.4s\n",
242                     (char *)&Entry->PoolTag);
243         }
244         KeBugCheckEx(BAD_POOL_HEADER,
245                      8,
246                      0,
247                      __LINE__,
248                      (ULONG_PTR)Entry);
249     }
250 
251     /* Okay, now get the next block */
252     NextEntry = POOL_NEXT_BLOCK(Entry);
253 
254     /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255     if (PAGE_ALIGN(NextEntry) != NextEntry)
256     {
257         /* The two blocks must be on the same page! */
258         if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259         {
260             /* Something is messed up */
261             KeBugCheckEx(BAD_POOL_HEADER,
262                          9,
263                          (ULONG_PTR)NextEntry,
264                          __LINE__,
265                          (ULONG_PTR)Entry);
266         }
267 
268         /* And this block should think we are as large as we truly are */
269         if (NextEntry->PreviousSize != Entry->BlockSize)
270         {
271             /* Otherwise, someone corrupted the field */
272             DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273                     Entry->BlockSize, (char *)&Entry->PoolTag,
274                     NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275             KeBugCheckEx(BAD_POOL_HEADER,
276                          5,
277                          (ULONG_PTR)NextEntry,
278                          __LINE__,
279                          (ULONG_PTR)Entry);
280         }
281     }
282 }
283 
284 VOID
285 NTAPI
286 ExpCheckPoolAllocation(
287     PVOID P,
288     POOL_TYPE PoolType,
289     ULONG Tag)
290 {
291     PPOOL_HEADER Entry;
292     ULONG i;
293     KIRQL OldIrql;
294     POOL_TYPE RealPoolType;
295 
296     /* Get the pool header */
297     Entry = ((PPOOL_HEADER)P) - 1;
298 
299     /* Check if this is a large allocation */
300     if (PAGE_ALIGN(P) == P)
301     {
302         /* Lock the pool table */
303         KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
304 
305         /* Find the pool tag */
306         for (i = 0; i < PoolBigPageTableSize; i++)
307         {
308             /* Check if this is our allocation */
309             if (PoolBigPageTable[i].Va == P)
310             {
311                 /* Make sure the tag is ok */
312                 if (PoolBigPageTable[i].Key != Tag)
313                 {
314                     KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315                 }
316 
317                 break;
318             }
319         }
320 
321         /* Release the lock */
322         KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
323 
324         if (i == PoolBigPageTableSize)
325         {
326             /* Did not find the allocation */
327             //ASSERT(FALSE);
328         }
329 
330         /* Get Pool type by address */
331         RealPoolType = MmDeterminePoolType(P);
332     }
333     else
334     {
335         /* Verify the tag */
336         if (Entry->PoolTag != Tag)
337         {
338             DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339                     &Tag, &Entry->PoolTag, Entry->PoolTag);
340             KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341         }
342 
343         /* Check the rest of the header */
344         ExpCheckPoolHeader(Entry);
345 
346         /* Get Pool type from entry */
347         RealPoolType = (Entry->PoolType - 1);
348     }
349 
350     /* Should we check the pool type? */
351     if (PoolType != -1)
352     {
353         /* Verify the pool type */
354         if (RealPoolType != PoolType)
355         {
356             DPRINT1("Wrong pool type! Expected %s, got %s\n",
357                     PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358                     (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359             KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360         }
361     }
362 }
363 
364 VOID
365 NTAPI
366 ExpCheckPoolBlocks(IN PVOID Block)
367 {
368     BOOLEAN FoundBlock = FALSE;
369     SIZE_T Size = 0;
370     PPOOL_HEADER Entry;
371 
372     /* Get the first entry for this page, make sure it really is the first */
373     Entry = PAGE_ALIGN(Block);
374     ASSERT(Entry->PreviousSize == 0);
375 
376     /* Now scan each entry */
377     while (TRUE)
378     {
379         /* When we actually found our block, remember this */
380         if (Entry == Block) FoundBlock = TRUE;
381 
382         /* Now validate this block header */
383         ExpCheckPoolHeader(Entry);
384 
385         /* And go to the next one, keeping track of our size */
386         Size += Entry->BlockSize;
387         Entry = POOL_NEXT_BLOCK(Entry);
388 
389         /* If we hit the last block, stop */
390         if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391 
392         /* If we hit the end of the page, stop */
393         if (PAGE_ALIGN(Entry) == Entry) break;
394     }
395 
396     /* We must've found our block, and we must have hit the end of the page */
397     if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398     {
399         /* Otherwise, the blocks are messed up */
400         KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401     }
402 }
403 
404 FORCEINLINE
405 VOID
406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType,
407                       IN SIZE_T NumberOfBytes,
408                       IN PVOID Entry)
409 {
410     //
411     // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412     // be DISPATCH_LEVEL or lower for Non Paged Pool
413     //
414     if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
415         (KeGetCurrentIrql() > APC_LEVEL) :
416         (KeGetCurrentIrql() > DISPATCH_LEVEL))
417     {
418         //
419         // Take the system down
420         //
421         KeBugCheckEx(BAD_POOL_CALLER,
422                      !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID,
423                      KeGetCurrentIrql(),
424                      PoolType,
425                      !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426     }
427 }
428 
429 FORCEINLINE
430 ULONG
431 ExpComputeHashForTag(IN ULONG Tag,
432                      IN SIZE_T BucketMask)
433 {
434     //
435     // Compute the hash by multiplying with a large prime number and then XORing
436     // with the HIDWORD of the result.
437     //
438     // Finally, AND with the bucket mask to generate a valid index/bucket into
439     // the table
440     //
441     ULONGLONG Result = (ULONGLONG)40543 * Tag;
442     return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444 
445 FORCEINLINE
446 ULONG
447 ExpComputePartialHashForAddress(IN PVOID BaseAddress)
448 {
449     ULONG Result;
450     //
451     // Compute the hash by converting the address into a page number, and then
452     // XORing each nibble with the next one.
453     //
454     // We do *NOT* AND with the bucket mask at this point because big table expansion
455     // might happen. Therefore, the final step of the hash must be performed
456     // while holding the expansion pushlock, and this is why we call this a
457     // "partial" hash only.
458     //
459     Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460     return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462 
463 #if DBG
464 FORCEINLINE
465 BOOLEAN
466 ExpTagAllowPrint(CHAR Tag)
467 {
468     if ((Tag >= 'a' && Tag <= 'z') ||
469         (Tag >= 'A' && Tag <= 'Z') ||
470         Tag == ' ')
471     {
472         return TRUE;
473     }
474 
475     return FALSE;
476 }
477 
478 VOID
479 MiDumpNonPagedPoolConsumers(VOID)
480 {
481     SIZE_T i;
482 
483     DPRINT1("---------------------\n");
484     DPRINT1("Out of memory dumper!\n");
485 
486     //
487     // We'll extract allocations for all the tracked pools
488     //
489     for (i = 0; i < PoolTrackTableSize; ++i)
490     {
491         PPOOL_TRACKER_TABLE TableEntry;
492 
493         TableEntry = &PoolTrackTable[i];
494 
495         //
496         // We only care about non paged
497         //
498         if (TableEntry->NonPagedBytes != 0)
499         {
500             //
501             // If there's a tag, attempt to do a pretty print
502             //
503             if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE)
504             {
505                 CHAR Tag[4];
506 
507                 //
508                 // Extract each 'component' and check whether they are printable
509                 //
510                 Tag[0] = TableEntry->Key & 0xFF;
511                 Tag[1] = TableEntry->Key >> 8 & 0xFF;
512                 Tag[2] = TableEntry->Key >> 16 & 0xFF;
513                 Tag[3] = TableEntry->Key >> 24 & 0xFF;
514 
515                 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
516                 {
517                     //
518                     // Print in reversed order to match what is in source code
519                     //
520                     DPRINT1("Tag: '%c%c%c%c', Size: %ld\n", Tag[3], Tag[2], Tag[1], Tag[0], TableEntry->NonPagedBytes);
521                 }
522                 else
523                 {
524                     DPRINT1("Tag: %x, Size: %ld\n", TableEntry->Key, TableEntry->NonPagedBytes);
525                 }
526             }
527             else
528             {
529                 DPRINT1("Anon, Size: %ld\n", TableEntry->NonPagedBytes);
530             }
531         }
532     }
533 
534     DPRINT1("---------------------\n");
535 }
536 #endif
537 
538 /* PRIVATE FUNCTIONS **********************************************************/
539 
540 VOID
541 NTAPI
542 INIT_SECTION
543 ExpSeedHotTags(VOID)
544 {
545     ULONG i, Key, Hash, Index;
546     PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
547     ULONG TagList[] =
548     {
549         '  oI',
550         ' laH',
551         'PldM',
552         'LooP',
553         'tSbO',
554         ' prI',
555         'bdDN',
556         'LprI',
557         'pOoI',
558         ' ldM',
559         'eliF',
560         'aVMC',
561         'dSeS',
562         'CFtN',
563         'looP',
564         'rPCT',
565         'bNMC',
566         'dTeS',
567         'sFtN',
568         'TPCT',
569         'CPCT',
570         ' yeK',
571         'qSbO',
572         'mNoI',
573         'aEoI',
574         'cPCT',
575         'aFtN',
576         '0ftN',
577         'tceS',
578         'SprI',
579         'ekoT',
580         '  eS',
581         'lCbO',
582         'cScC',
583         'lFtN',
584         'cAeS',
585         'mfSF',
586         'kWcC',
587         'miSF',
588         'CdfA',
589         'EdfA',
590         'orSF',
591         'nftN',
592         'PRIU',
593         'rFpN',
594         'RFpN',
595         'aPeS',
596         'sUeS',
597         'FpcA',
598         'MpcA',
599         'cSeS',
600         'mNbO',
601         'sFpN',
602         'uLeS',
603         'DPcS',
604         'nevE',
605         'vrqR',
606         'ldaV',
607         '  pP',
608         'SdaV',
609         ' daV',
610         'LdaV',
611         'FdaV',
612         ' GIB',
613     };
614 
615     //
616     // Loop all 64 hot tags
617     //
618     ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
619     for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
620     {
621         //
622         // Get the current tag, and compute its hash in the tracker table
623         //
624         Key = TagList[i];
625         Hash = ExpComputeHashForTag(Key, PoolTrackTableMask);
626 
627         //
628         // Loop all the hashes in this index/bucket
629         //
630         Index = Hash;
631         while (TRUE)
632         {
633             //
634             // Find an empty entry, and make sure this isn't the last hash that
635             // can fit.
636             //
637             // On checked builds, also make sure this is the first time we are
638             // seeding this tag.
639             //
640             ASSERT(TrackTable[Hash].Key != Key);
641             if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
642             {
643                 //
644                 // It has been seeded, move on to the next tag
645                 //
646                 TrackTable[Hash].Key = Key;
647                 break;
648             }
649 
650             //
651             // This entry was already taken, compute the next possible hash while
652             // making sure we're not back at our initial index.
653             //
654             ASSERT(TrackTable[Hash].Key != Key);
655             Hash = (Hash + 1) & PoolTrackTableMask;
656             if (Hash == Index) break;
657         }
658     }
659 }
660 
661 VOID
662 NTAPI
663 ExpRemovePoolTracker(IN ULONG Key,
664                      IN SIZE_T NumberOfBytes,
665                      IN POOL_TYPE PoolType)
666 {
667     ULONG Hash, Index;
668     PPOOL_TRACKER_TABLE Table, TableEntry;
669     SIZE_T TableMask, TableSize;
670 
671     //
672     // Remove the PROTECTED_POOL flag which is not part of the tag
673     //
674     Key &= ~PROTECTED_POOL;
675 
676     //
677     // With WinDBG you can set a tag you want to break on when an allocation is
678     // attempted
679     //
680     if (Key == PoolHitTag) DbgBreakPoint();
681 
682     //
683     // Why the double indirection? Because normally this function is also used
684     // when doing session pool allocations, which has another set of tables,
685     // sizes, and masks that live in session pool. Now we don't support session
686     // pool so we only ever use the regular tables, but I'm keeping the code this
687     // way so that the day we DO support session pool, it won't require that
688     // many changes
689     //
690     Table = PoolTrackTable;
691     TableMask = PoolTrackTableMask;
692     TableSize = PoolTrackTableSize;
693     DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
694 
695     //
696     // Compute the hash for this key, and loop all the possible buckets
697     //
698     Hash = ExpComputeHashForTag(Key, TableMask);
699     Index = Hash;
700     while (TRUE)
701     {
702         //
703         // Have we found the entry for this tag? */
704         //
705         TableEntry = &Table[Hash];
706         if (TableEntry->Key == Key)
707         {
708             //
709             // Decrement the counters depending on if this was paged or nonpaged
710             // pool
711             //
712             if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
713             {
714                 InterlockedIncrement(&TableEntry->NonPagedFrees);
715                 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
716                                             -(SSIZE_T)NumberOfBytes);
717                 return;
718             }
719             InterlockedIncrement(&TableEntry->PagedFrees);
720             InterlockedExchangeAddSizeT(&TableEntry->PagedBytes,
721                                         -(SSIZE_T)NumberOfBytes);
722             return;
723         }
724 
725         //
726         // We should have only ended up with an empty entry if we've reached
727         // the last bucket
728         //
729         if (!TableEntry->Key)
730         {
731             DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
732                     Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
733             ASSERT(Hash == TableMask);
734         }
735 
736         //
737         // This path is hit when we don't have an entry, and the current bucket
738         // is full, so we simply try the next one
739         //
740         Hash = (Hash + 1) & TableMask;
741         if (Hash == Index) break;
742     }
743 
744     //
745     // And finally this path is hit when all the buckets are full, and we need
746     // some expansion. This path is not yet supported in ReactOS and so we'll
747     // ignore the tag
748     //
749     DPRINT1("Out of pool tag space, ignoring...\n");
750 }
751 
752 VOID
753 NTAPI
754 ExpInsertPoolTracker(IN ULONG Key,
755                      IN SIZE_T NumberOfBytes,
756                      IN POOL_TYPE PoolType)
757 {
758     ULONG Hash, Index;
759     KIRQL OldIrql;
760     PPOOL_TRACKER_TABLE Table, TableEntry;
761     SIZE_T TableMask, TableSize;
762 
763     //
764     // Remove the PROTECTED_POOL flag which is not part of the tag
765     //
766     Key &= ~PROTECTED_POOL;
767 
768     //
769     // With WinDBG you can set a tag you want to break on when an allocation is
770     // attempted
771     //
772     if (Key == PoolHitTag) DbgBreakPoint();
773 
774     //
775     // There is also an internal flag you can set to break on malformed tags
776     //
777     if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
778 
779     //
780     // ASSERT on ReactOS features not yet supported
781     //
782     ASSERT(!(PoolType & SESSION_POOL_MASK));
783     ASSERT(KeGetCurrentProcessorNumber() == 0);
784 
785     //
786     // Why the double indirection? Because normally this function is also used
787     // when doing session pool allocations, which has another set of tables,
788     // sizes, and masks that live in session pool. Now we don't support session
789     // pool so we only ever use the regular tables, but I'm keeping the code this
790     // way so that the day we DO support session pool, it won't require that
791     // many changes
792     //
793     Table = PoolTrackTable;
794     TableMask = PoolTrackTableMask;
795     TableSize = PoolTrackTableSize;
796     DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize);
797 
798     //
799     // Compute the hash for this key, and loop all the possible buckets
800     //
801     Hash = ExpComputeHashForTag(Key, TableMask);
802     Index = Hash;
803     while (TRUE)
804     {
805         //
806         // Do we already have an entry for this tag? */
807         //
808         TableEntry = &Table[Hash];
809         if (TableEntry->Key == Key)
810         {
811             //
812             // Increment the counters depending on if this was paged or nonpaged
813             // pool
814             //
815             if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
816             {
817                 InterlockedIncrement(&TableEntry->NonPagedAllocs);
818                 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
819                 return;
820             }
821             InterlockedIncrement(&TableEntry->PagedAllocs);
822             InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
823             return;
824         }
825 
826         //
827         // We don't have an entry yet, but we've found a free bucket for it
828         //
829         if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
830         {
831             //
832             // We need to hold the lock while creating a new entry, since other
833             // processors might be in this code path as well
834             //
835             ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql);
836             if (!PoolTrackTable[Hash].Key)
837             {
838                 //
839                 // We've won the race, so now create this entry in the bucket
840                 //
841                 ASSERT(Table[Hash].Key == 0);
842                 PoolTrackTable[Hash].Key = Key;
843                 TableEntry->Key = Key;
844             }
845             ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql);
846 
847             //
848             // Now we force the loop to run again, and we should now end up in
849             // the code path above which does the interlocked increments...
850             //
851             continue;
852         }
853 
854         //
855         // This path is hit when we don't have an entry, and the current bucket
856         // is full, so we simply try the next one
857         //
858         Hash = (Hash + 1) & TableMask;
859         if (Hash == Index) break;
860     }
861 
862     //
863     // And finally this path is hit when all the buckets are full, and we need
864     // some expansion. This path is not yet supported in ReactOS and so we'll
865     // ignore the tag
866     //
867     DPRINT1("Out of pool tag space, ignoring...\n");
868 }
869 
870 VOID
871 NTAPI
872 INIT_SECTION
873 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
874                            IN POOL_TYPE PoolType,
875                            IN ULONG PoolIndex,
876                            IN ULONG Threshold,
877                            IN PVOID PoolLock)
878 {
879     PLIST_ENTRY NextEntry, LastEntry;
880 
881     //
882     // Setup the descriptor based on the caller's request
883     //
884     PoolDescriptor->PoolType = PoolType;
885     PoolDescriptor->PoolIndex = PoolIndex;
886     PoolDescriptor->Threshold = Threshold;
887     PoolDescriptor->LockAddress = PoolLock;
888 
889     //
890     // Initialize accounting data
891     //
892     PoolDescriptor->RunningAllocs = 0;
893     PoolDescriptor->RunningDeAllocs = 0;
894     PoolDescriptor->TotalPages = 0;
895     PoolDescriptor->TotalBytes = 0;
896     PoolDescriptor->TotalBigPages = 0;
897 
898     //
899     // Nothing pending for now
900     //
901     PoolDescriptor->PendingFrees = NULL;
902     PoolDescriptor->PendingFreeDepth = 0;
903 
904     //
905     // Loop all the descriptor's allocation lists and initialize them
906     //
907     NextEntry = PoolDescriptor->ListHeads;
908     LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
909     while (NextEntry < LastEntry)
910     {
911         ExpInitializePoolListHead(NextEntry);
912         NextEntry++;
913     }
914 
915     //
916     // Note that ReactOS does not support Session Pool Yet
917     //
918     ASSERT(PoolType != PagedPoolSession);
919 }
920 
921 VOID
922 NTAPI
923 INIT_SECTION
924 InitializePool(IN POOL_TYPE PoolType,
925                IN ULONG Threshold)
926 {
927     PPOOL_DESCRIPTOR Descriptor;
928     SIZE_T TableSize;
929     ULONG i;
930 
931     //
932     // Check what kind of pool this is
933     //
934     if (PoolType == NonPagedPool)
935     {
936         //
937         // Compute the track table size and convert it from a power of two to an
938         // actual byte size
939         //
940         // NOTE: On checked builds, we'll assert if the registry table size was
941         // invalid, while on retail builds we'll just break out of the loop at
942         // that point.
943         //
944         TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
945         for (i = 0; i < 32; i++)
946         {
947             if (TableSize & 1)
948             {
949                 ASSERT((TableSize & ~1) == 0);
950                 if (!(TableSize & ~1)) break;
951             }
952             TableSize >>= 1;
953         }
954 
955         //
956         // If we hit bit 32, than no size was defined in the registry, so
957         // we'll use the default size of 2048 entries.
958         //
959         // Otherwise, use the size from the registry, as long as it's not
960         // smaller than 64 entries.
961         //
962         if (i == 32)
963         {
964             PoolTrackTableSize = 2048;
965         }
966         else
967         {
968             PoolTrackTableSize = max(1 << i, 64);
969         }
970 
971         //
972         // Loop trying with the biggest specified size first, and cut it down
973         // by a power of two each iteration in case not enough memory exist
974         //
975         while (TRUE)
976         {
977             //
978             // Do not allow overflow
979             //
980             if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
981             {
982                 PoolTrackTableSize >>= 1;
983                 continue;
984             }
985 
986             //
987             // Allocate the tracker table and exit the loop if this worked
988             //
989             PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
990                                                  (PoolTrackTableSize + 1) *
991                                                  sizeof(POOL_TRACKER_TABLE));
992             if (PoolTrackTable) break;
993 
994             //
995             // Otherwise, as long as we're not down to the last bit, keep
996             // iterating
997             //
998             if (PoolTrackTableSize == 1)
999             {
1000                 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1001                              TableSize,
1002                              0xFFFFFFFF,
1003                              0xFFFFFFFF,
1004                              0xFFFFFFFF);
1005             }
1006             PoolTrackTableSize >>= 1;
1007         }
1008 
1009         //
1010         // Add one entry, compute the hash, and zero the table
1011         //
1012         PoolTrackTableSize++;
1013         PoolTrackTableMask = PoolTrackTableSize - 2;
1014 
1015         RtlZeroMemory(PoolTrackTable,
1016                       PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1017 
1018         //
1019         // Finally, add the most used tags to speed up those allocations
1020         //
1021         ExpSeedHotTags();
1022 
1023         //
1024         // We now do the exact same thing with the tracker table for big pages
1025         //
1026         TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8);
1027         for (i = 0; i < 32; i++)
1028         {
1029             if (TableSize & 1)
1030             {
1031                 ASSERT((TableSize & ~1) == 0);
1032                 if (!(TableSize & ~1)) break;
1033             }
1034             TableSize >>= 1;
1035         }
1036 
1037         //
1038         // For big pages, the default tracker table is 4096 entries, while the
1039         // minimum is still 64
1040         //
1041         if (i == 32)
1042         {
1043             PoolBigPageTableSize = 4096;
1044         }
1045         else
1046         {
1047             PoolBigPageTableSize = max(1 << i, 64);
1048         }
1049 
1050         //
1051         // Again, run the exact same loop we ran earlier, but this time for the
1052         // big pool tracker instead
1053         //
1054         while (TRUE)
1055         {
1056             if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES)))
1057             {
1058                 PoolBigPageTableSize >>= 1;
1059                 continue;
1060             }
1061 
1062             PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
1063                                                    PoolBigPageTableSize *
1064                                                    sizeof(POOL_TRACKER_BIG_PAGES));
1065             if (PoolBigPageTable) break;
1066 
1067             if (PoolBigPageTableSize == 1)
1068             {
1069                 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1070                              TableSize,
1071                              0xFFFFFFFF,
1072                              0xFFFFFFFF,
1073                              0xFFFFFFFF);
1074             }
1075 
1076             PoolBigPageTableSize >>= 1;
1077         }
1078 
1079         //
1080         // An extra entry is not needed for for the big pool tracker, so just
1081         // compute the hash and zero it
1082         //
1083         PoolBigPageTableHash = PoolBigPageTableSize - 1;
1084         RtlZeroMemory(PoolBigPageTable,
1085                       PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1086         for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1087 
1088         //
1089         // During development, print this out so we can see what's happening
1090         //
1091         DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1092                 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1093         DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1094                 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1095 
1096         //
1097         // Insert the generic tracker for all of big pool
1098         //
1099         ExpInsertPoolTracker('looP',
1100                              ROUND_TO_PAGES(PoolBigPageTableSize *
1101                                             sizeof(POOL_TRACKER_BIG_PAGES)),
1102                              NonPagedPool);
1103 
1104         //
1105         // No support for NUMA systems at this time
1106         //
1107         ASSERT(KeNumberNodes == 1);
1108 
1109         //
1110         // Initialize the tag spinlock
1111         //
1112         KeInitializeSpinLock(&ExpTaggedPoolLock);
1113 
1114         //
1115         // Initialize the nonpaged pool descriptor
1116         //
1117         PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1118         ExInitializePoolDescriptor(PoolVector[NonPagedPool],
1119                                    NonPagedPool,
1120                                    0,
1121                                    Threshold,
1122                                    NULL);
1123     }
1124     else
1125     {
1126         //
1127         // No support for NUMA systems at this time
1128         //
1129         ASSERT(KeNumberNodes == 1);
1130 
1131         //
1132         // Allocate the pool descriptor
1133         //
1134         Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1135                                            sizeof(KGUARDED_MUTEX) +
1136                                            sizeof(POOL_DESCRIPTOR),
1137                                            'looP');
1138         if (!Descriptor)
1139         {
1140             //
1141             // This is really bad...
1142             //
1143             KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1144                          0,
1145                          -1,
1146                          -1,
1147                          -1);
1148         }
1149 
1150         //
1151         // Setup the vector and guarded mutex for paged pool
1152         //
1153         PoolVector[PagedPool] = Descriptor;
1154         ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1155         ExpPagedPoolDescriptor[0] = Descriptor;
1156         KeInitializeGuardedMutex(ExpPagedPoolMutex);
1157         ExInitializePoolDescriptor(Descriptor,
1158                                    PagedPool,
1159                                    0,
1160                                    Threshold,
1161                                    ExpPagedPoolMutex);
1162 
1163         //
1164         // Insert the generic tracker for all of nonpaged pool
1165         //
1166         ExpInsertPoolTracker('looP',
1167                              ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)),
1168                              NonPagedPool);
1169     }
1170 }
1171 
1172 FORCEINLINE
1173 KIRQL
1174 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
1175 {
1176     //
1177     // Check if this is nonpaged pool
1178     //
1179     if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1180     {
1181         //
1182         // Use the queued spin lock
1183         //
1184         return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
1185     }
1186     else
1187     {
1188         //
1189         // Use the guarded mutex
1190         //
1191         KeAcquireGuardedMutex(Descriptor->LockAddress);
1192         return APC_LEVEL;
1193     }
1194 }
1195 
1196 FORCEINLINE
1197 VOID
1198 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor,
1199              IN KIRQL OldIrql)
1200 {
1201     //
1202     // Check if this is nonpaged pool
1203     //
1204     if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1205     {
1206         //
1207         // Use the queued spin lock
1208         //
1209         KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
1210     }
1211     else
1212     {
1213         //
1214         // Use the guarded mutex
1215         //
1216         KeReleaseGuardedMutex(Descriptor->LockAddress);
1217     }
1218 }
1219 
1220 VOID
1221 NTAPI
1222 ExpGetPoolTagInfoTarget(IN PKDPC Dpc,
1223                         IN PVOID DeferredContext,
1224                         IN PVOID SystemArgument1,
1225                         IN PVOID SystemArgument2)
1226 {
1227     PPOOL_DPC_CONTEXT Context = DeferredContext;
1228     UNREFERENCED_PARAMETER(Dpc);
1229     ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
1230 
1231     //
1232     // Make sure we win the race, and if we did, copy the data atomically
1233     //
1234     if (KeSignalCallDpcSynchronize(SystemArgument2))
1235     {
1236         RtlCopyMemory(Context->PoolTrackTable,
1237                       PoolTrackTable,
1238                       Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1239 
1240         //
1241         // This is here because ReactOS does not yet support expansion
1242         //
1243         ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1244     }
1245 
1246     //
1247     // Regardless of whether we won or not, we must now synchronize and then
1248     // decrement the barrier since this is one more processor that has completed
1249     // the callback.
1250     //
1251     KeSignalCallDpcSynchronize(SystemArgument2);
1252     KeSignalCallDpcDone(SystemArgument1);
1253 }
1254 
1255 NTSTATUS
1256 NTAPI
1257 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation,
1258                  IN ULONG SystemInformationLength,
1259                  IN OUT PULONG ReturnLength OPTIONAL)
1260 {
1261     ULONG TableSize, CurrentLength;
1262     ULONG EntryCount;
1263     NTSTATUS Status = STATUS_SUCCESS;
1264     PSYSTEM_POOLTAG TagEntry;
1265     PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1266     POOL_DPC_CONTEXT Context;
1267     ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
1268 
1269     //
1270     // Keep track of how much data the caller's buffer must hold
1271     //
1272     CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1273 
1274     //
1275     // Initialize the caller's buffer
1276     //
1277     TagEntry = &SystemInformation->TagInfo[0];
1278     SystemInformation->Count = 0;
1279 
1280     //
1281     // Capture the number of entries, and the total size needed to make a copy
1282     // of the table
1283     //
1284     EntryCount = (ULONG)PoolTrackTableSize;
1285     TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1286 
1287     //
1288     // Allocate the "Generic DPC" temporary buffer
1289     //
1290     Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1291     if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1292 
1293     //
1294     // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1295     //
1296     Context.PoolTrackTable = Buffer;
1297     Context.PoolTrackTableSize = PoolTrackTableSize;
1298     Context.PoolTrackTableExpansion = NULL;
1299     Context.PoolTrackTableSizeExpansion = 0;
1300     KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context);
1301 
1302     //
1303     // Now parse the results
1304     //
1305     for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1306     {
1307         //
1308         // If the entry is empty, skip it
1309         //
1310         if (!TrackerEntry->Key) continue;
1311 
1312         //
1313         // Otherwise, add one more entry to the caller's buffer, and ensure that
1314         // enough space has been allocated in it
1315         //
1316         SystemInformation->Count++;
1317         CurrentLength += sizeof(*TagEntry);
1318         if (SystemInformationLength < CurrentLength)
1319         {
1320             //
1321             // The caller's buffer is too small, so set a failure code. The
1322             // caller will know the count, as well as how much space is needed.
1323             //
1324             // We do NOT break out of the loop, because we want to keep incrementing
1325             // the Count as well as CurrentLength so that the caller can know the
1326             // final numbers
1327             //
1328             Status = STATUS_INFO_LENGTH_MISMATCH;
1329         }
1330         else
1331         {
1332             //
1333             // Small sanity check that our accounting is working correctly
1334             //
1335             ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1336             ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1337 
1338             //
1339             // Return the data into the caller's buffer
1340             //
1341             TagEntry->TagUlong = TrackerEntry->Key;
1342             TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1343             TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1344             TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1345             TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1346             TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1347             TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1348             TagEntry++;
1349         }
1350     }
1351 
1352     //
1353     // Free the "Generic DPC" temporary buffer, return the buffer length and status
1354     //
1355     ExFreePoolWithTag(Buffer, 'ofnI');
1356     if (ReturnLength) *ReturnLength = CurrentLength;
1357     return Status;
1358 }
1359 
1360 BOOLEAN
1361 NTAPI
1362 ExpAddTagForBigPages(IN PVOID Va,
1363                      IN ULONG Key,
1364                      IN ULONG NumberOfPages,
1365                      IN POOL_TYPE PoolType)
1366 {
1367     ULONG Hash, i = 0;
1368     PVOID OldVa;
1369     KIRQL OldIrql;
1370     SIZE_T TableSize;
1371     PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1372     ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1373     ASSERT(!(PoolType & SESSION_POOL_MASK));
1374 
1375     //
1376     // As the table is expandable, these values must only be read after acquiring
1377     // the lock to avoid a teared access during an expansion
1378     //
1379     Hash = ExpComputePartialHashForAddress(Va);
1380     KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1381     Hash &= PoolBigPageTableHash;
1382     TableSize = PoolBigPageTableSize;
1383 
1384     //
1385     // We loop from the current hash bucket to the end of the table, and then
1386     // rollover to hash bucket 0 and keep going from there. If we return back
1387     // to the beginning, then we attempt expansion at the bottom of the loop
1388     //
1389     EntryStart = Entry = &PoolBigPageTable[Hash];
1390     EntryEnd = &PoolBigPageTable[TableSize];
1391     do
1392     {
1393         //
1394         // Make sure that this is a free entry and attempt to atomically make the
1395         // entry busy now
1396         //
1397         OldVa = Entry->Va;
1398         if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1399             (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1400         {
1401             //
1402             // We now own this entry, write down the size and the pool tag
1403             //
1404             Entry->Key = Key;
1405             Entry->NumberOfPages = NumberOfPages;
1406 
1407             //
1408             // Add one more entry to the count, and see if we're getting within
1409             // 25% of the table size, at which point we'll do an expansion now
1410             // to avoid blocking too hard later on.
1411             //
1412             // Note that we only do this if it's also been the 16th time that we
1413             // keep losing the race or that we are not finding a free entry anymore,
1414             // which implies a massive number of concurrent big pool allocations.
1415             //
1416             InterlockedIncrementUL(&ExpPoolBigEntriesInUse);
1417             if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1418             {
1419                 DPRINT("Should attempt expansion since we now have %lu entries\n",
1420                         ExpPoolBigEntriesInUse);
1421             }
1422 
1423             //
1424             // We have our entry, return
1425             //
1426             KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1427             return TRUE;
1428         }
1429 
1430         //
1431         // We don't have our entry yet, so keep trying, making the entry list
1432         // circular if we reach the last entry. We'll eventually break out of
1433         // the loop once we've rolled over and returned back to our original
1434         // hash bucket
1435         //
1436         i++;
1437         if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1438     } while (Entry != EntryStart);
1439 
1440     //
1441     // This means there's no free hash buckets whatsoever, so we would now have
1442     // to attempt expanding the table
1443     //
1444     DPRINT1("Big pool expansion needed, not implemented!\n");
1445     KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1446     return FALSE;
1447 }
1448 
1449 ULONG
1450 NTAPI
1451 ExpFindAndRemoveTagBigPages(IN PVOID Va,
1452                             OUT PULONG_PTR BigPages,
1453                             IN POOL_TYPE PoolType)
1454 {
1455     BOOLEAN FirstTry = TRUE;
1456     SIZE_T TableSize;
1457     KIRQL OldIrql;
1458     ULONG PoolTag, Hash;
1459     PPOOL_TRACKER_BIG_PAGES Entry;
1460     ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0);
1461     ASSERT(!(PoolType & SESSION_POOL_MASK));
1462 
1463     //
1464     // As the table is expandable, these values must only be read after acquiring
1465     // the lock to avoid a teared access during an expansion
1466     //
1467     Hash = ExpComputePartialHashForAddress(Va);
1468     KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql);
1469     Hash &= PoolBigPageTableHash;
1470     TableSize = PoolBigPageTableSize;
1471 
1472     //
1473     // Loop while trying to find this big page allocation
1474     //
1475     while (PoolBigPageTable[Hash].Va != Va)
1476     {
1477         //
1478         // Increment the size until we go past the end of the table
1479         //
1480         if (++Hash >= TableSize)
1481         {
1482             //
1483             // Is this the second time we've tried?
1484             //
1485             if (!FirstTry)
1486             {
1487                 //
1488                 // This means it was never inserted into the pool table and it
1489                 // received the special "BIG" tag -- return that and return 0
1490                 // so that the code can ask Mm for the page count instead
1491                 //
1492                 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1493                 *BigPages = 0;
1494                 return ' GIB';
1495             }
1496 
1497             //
1498             // The first time this happens, reset the hash index and try again
1499             //
1500             Hash = 0;
1501             FirstTry = FALSE;
1502         }
1503     }
1504 
1505     //
1506     // Now capture all the information we need from the entry, since after we
1507     // release the lock, the data can change
1508     //
1509     Entry = &PoolBigPageTable[Hash];
1510     *BigPages = Entry->NumberOfPages;
1511     PoolTag = Entry->Key;
1512 
1513     //
1514     // Set the free bit, and decrement the number of allocations. Finally, release
1515     // the lock and return the tag that was located
1516     //
1517     InterlockedIncrement((PLONG)&Entry->Va);
1518     InterlockedDecrementUL(&ExpPoolBigEntriesInUse);
1519     KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql);
1520     return PoolTag;
1521 }
1522 
1523 VOID
1524 NTAPI
1525 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1526                  OUT PULONG NonPagedPoolPages,
1527                  OUT PULONG PagedPoolAllocs,
1528                  OUT PULONG PagedPoolFrees,
1529                  OUT PULONG PagedPoolLookasideHits,
1530                  OUT PULONG NonPagedPoolAllocs,
1531                  OUT PULONG NonPagedPoolFrees,
1532                  OUT PULONG NonPagedPoolLookasideHits)
1533 {
1534     ULONG i;
1535     PPOOL_DESCRIPTOR PoolDesc;
1536 
1537     //
1538     // Assume all failures
1539     //
1540     *PagedPoolPages = 0;
1541     *PagedPoolAllocs = 0;
1542     *PagedPoolFrees = 0;
1543 
1544     //
1545     // Tally up the totals for all the apged pool
1546     //
1547     for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1548     {
1549         PoolDesc = ExpPagedPoolDescriptor[i];
1550         *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1551         *PagedPoolAllocs += PoolDesc->RunningAllocs;
1552         *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1553     }
1554 
1555     //
1556     // The first non-paged pool has a hardcoded well-known descriptor name
1557     //
1558     PoolDesc = &NonPagedPoolDescriptor;
1559     *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1560     *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1561     *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1562 
1563     //
1564     // If the system has more than one non-paged pool, copy the other descriptor
1565     // totals as well
1566     //
1567 #if 0
1568     if (ExpNumberOfNonPagedPools > 1)
1569     {
1570         for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1571         {
1572             PoolDesc = ExpNonPagedPoolDescriptor[i];
1573             *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1574             *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1575             *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1576         }
1577     }
1578 #endif
1579 
1580     //
1581     // FIXME: Not yet supported
1582     //
1583     *NonPagedPoolLookasideHits += 0;
1584     *PagedPoolLookasideHits += 0;
1585 }
1586 
1587 VOID
1588 NTAPI
1589 ExReturnPoolQuota(IN PVOID P)
1590 {
1591     PPOOL_HEADER Entry;
1592     POOL_TYPE PoolType;
1593     USHORT BlockSize;
1594     PEPROCESS Process;
1595 
1596     if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
1597         (MmIsSpecialPoolAddress(P)))
1598     {
1599         return;
1600     }
1601 
1602     Entry = P;
1603     Entry--;
1604     ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1605 
1606     PoolType = Entry->PoolType - 1;
1607     BlockSize = Entry->BlockSize;
1608 
1609     if (PoolType & QUOTA_POOL_MASK)
1610     {
1611         Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1612         ASSERT(Process != NULL);
1613         if (Process)
1614         {
1615             if (Process->Pcb.Header.Type != ProcessObject)
1616             {
1617                 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1618                         Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1619                 KeBugCheckEx(BAD_POOL_CALLER,
1620                              0x0D,
1621                              (ULONG_PTR)P,
1622                              Entry->PoolTag,
1623                              (ULONG_PTR)Process);
1624             }
1625             ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1626             PsReturnPoolQuota(Process,
1627                               PoolType & BASE_POOL_TYPE_MASK,
1628                               BlockSize * POOL_BLOCK_SIZE);
1629             ObDereferenceObject(Process);
1630         }
1631     }
1632 }
1633 
1634 /* PUBLIC FUNCTIONS ***********************************************************/
1635 
1636 /*
1637  * @implemented
1638  */
1639 PVOID
1640 NTAPI
1641 ExAllocatePoolWithTag(IN POOL_TYPE PoolType,
1642                       IN SIZE_T NumberOfBytes,
1643                       IN ULONG Tag)
1644 {
1645     PPOOL_DESCRIPTOR PoolDesc;
1646     PLIST_ENTRY ListHead;
1647     PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1648     KIRQL OldIrql;
1649     USHORT BlockSize, i;
1650     ULONG OriginalType;
1651     PKPRCB Prcb = KeGetCurrentPrcb();
1652     PGENERAL_LOOKASIDE LookasideList;
1653 
1654     //
1655     // Some sanity checks
1656     //
1657     ASSERT(Tag != 0);
1658     ASSERT(Tag != ' GIB');
1659     ASSERT(NumberOfBytes != 0);
1660     ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1661 
1662     //
1663     // Not supported in ReactOS
1664     //
1665     ASSERT(!(PoolType & SESSION_POOL_MASK));
1666 
1667     //
1668     // Check if verifier or special pool is enabled
1669     //
1670     if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL))
1671     {
1672         //
1673         // For verifier, we should call the verification routine
1674         //
1675         if (ExpPoolFlags & POOL_FLAG_VERIFIER)
1676         {
1677             DPRINT1("Driver Verifier is not yet supported\n");
1678         }
1679 
1680         //
1681         // For special pool, we check if this is a suitable allocation and do
1682         // the special allocation if needed
1683         //
1684         if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
1685         {
1686             //
1687             // Check if this is a special pool allocation
1688             //
1689             if (MmUseSpecialPool(NumberOfBytes, Tag))
1690             {
1691                 //
1692                 // Try to allocate using special pool
1693                 //
1694                 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1695                 if (Entry) return Entry;
1696             }
1697         }
1698     }
1699 
1700     //
1701     // Get the pool type and its corresponding vector for this request
1702     //
1703     OriginalType = PoolType;
1704     PoolType = PoolType & BASE_POOL_TYPE_MASK;
1705     PoolDesc = PoolVector[PoolType];
1706     ASSERT(PoolDesc != NULL);
1707 
1708     //
1709     // Check if this is a big page allocation
1710     //
1711     if (NumberOfBytes > POOL_MAX_ALLOC)
1712     {
1713         //
1714         // Allocate pages for it
1715         //
1716         Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1717         if (!Entry)
1718         {
1719 #if DBG
1720             //
1721             // If non paged backed, display current consumption
1722             //
1723             if ((OriginalType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1724             {
1725                 MiDumpNonPagedPoolConsumers();
1726             }
1727 #endif
1728 
1729             //
1730             // Must succeed pool is deprecated, but still supported. These allocation
1731             // failures must cause an immediate bugcheck
1732             //
1733             if (OriginalType & MUST_SUCCEED_POOL_MASK)
1734             {
1735                 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1736                              NumberOfBytes,
1737                              NonPagedPoolDescriptor.TotalPages,
1738                              NonPagedPoolDescriptor.TotalBigPages,
1739                              0);
1740             }
1741 
1742             //
1743             // Internal debugging
1744             //
1745             ExPoolFailures++;
1746 
1747             //
1748             // This flag requests printing failures, and can also further specify
1749             // breaking on failures
1750             //
1751             if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
1752             {
1753                 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1754                         NumberOfBytes,
1755                         OriginalType);
1756                 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
1757             }
1758 
1759             //
1760             // Finally, this flag requests an exception, which we are more than
1761             // happy to raise!
1762             //
1763             if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1764             {
1765                 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
1766             }
1767 
1768             return NULL;
1769         }
1770 
1771         //
1772         // Increment required counters
1773         //
1774         InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
1775                                (LONG)BYTES_TO_PAGES(NumberOfBytes));
1776         InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1777         InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
1778 
1779         //
1780         // Add a tag for the big page allocation and switch to the generic "BIG"
1781         // tag if we failed to do so, then insert a tracker for this alloation.
1782         //
1783         if (!ExpAddTagForBigPages(Entry,
1784                                   Tag,
1785                                   (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1786                                   OriginalType))
1787         {
1788             Tag = ' GIB';
1789         }
1790         ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1791         return Entry;
1792     }
1793 
1794     //
1795     // Should never request 0 bytes from the pool, but since so many drivers do
1796     // it, we'll just assume they want 1 byte, based on NT's similar behavior
1797     //
1798     if (!NumberOfBytes) NumberOfBytes = 1;
1799 
1800     //
1801     // A pool allocation is defined by its data, a linked list to connect it to
1802     // the free list (if necessary), and a pool header to store accounting info.
1803     // Calculate this size, then convert it into a block size (units of pool
1804     // headers)
1805     //
1806     // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1807     // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1808     // the direct allocation of pages.
1809     //
1810     i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1811                  / POOL_BLOCK_SIZE);
1812     ASSERT(i < POOL_LISTS_PER_PAGE);
1813 
1814     //
1815     // Handle lookaside list optimization for both paged and nonpaged pool
1816     //
1817     if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1818     {
1819         //
1820         // Try popping it from the per-CPU lookaside list
1821         //
1822         LookasideList = (PoolType == PagedPool) ?
1823                          Prcb->PPPagedLookasideList[i - 1].P :
1824                          Prcb->PPNPagedLookasideList[i - 1].P;
1825         LookasideList->TotalAllocates++;
1826         Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1827         if (!Entry)
1828         {
1829             //
1830             // We failed, try popping it from the global list
1831             //
1832             LookasideList = (PoolType == PagedPool) ?
1833                              Prcb->PPPagedLookasideList[i - 1].L :
1834                              Prcb->PPNPagedLookasideList[i - 1].L;
1835             LookasideList->TotalAllocates++;
1836             Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1837         }
1838 
1839         //
1840         // If we were able to pop it, update the accounting and return the block
1841         //
1842         if (Entry)
1843         {
1844             LookasideList->AllocateHits++;
1845 
1846             //
1847             // Get the real entry, write down its pool type, and track it
1848             //
1849             Entry--;
1850             Entry->PoolType = OriginalType + 1;
1851             ExpInsertPoolTracker(Tag,
1852                                  Entry->BlockSize * POOL_BLOCK_SIZE,
1853                                  OriginalType);
1854 
1855             //
1856             // Return the pool allocation
1857             //
1858             Entry->PoolTag = Tag;
1859             (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1860             (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1861             return POOL_FREE_BLOCK(Entry);
1862         }
1863     }
1864 
1865     //
1866     // Loop in the free lists looking for a block if this size. Start with the
1867     // list optimized for this kind of size lookup
1868     //
1869     ListHead = &PoolDesc->ListHeads[i];
1870     do
1871     {
1872         //
1873         // Are there any free entries available on this list?
1874         //
1875         if (!ExpIsPoolListEmpty(ListHead))
1876         {
1877             //
1878             // Acquire the pool lock now
1879             //
1880             OldIrql = ExLockPool(PoolDesc);
1881 
1882             //
1883             // And make sure the list still has entries
1884             //
1885             if (ExpIsPoolListEmpty(ListHead))
1886             {
1887                 //
1888                 // Someone raced us (and won) before we had a chance to acquire
1889                 // the lock.
1890                 //
1891                 // Try again!
1892                 //
1893                 ExUnlockPool(PoolDesc, OldIrql);
1894                 continue;
1895             }
1896 
1897             //
1898             // Remove a free entry from the list
1899             // Note that due to the way we insert free blocks into multiple lists
1900             // there is a guarantee that any block on this list will either be
1901             // of the correct size, or perhaps larger.
1902             //
1903             ExpCheckPoolLinks(ListHead);
1904             Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1905             ExpCheckPoolLinks(ListHead);
1906             ExpCheckPoolBlocks(Entry);
1907             ASSERT(Entry->BlockSize >= i);
1908             ASSERT(Entry->PoolType == 0);
1909 
1910             //
1911             // Check if this block is larger that what we need. The block could
1912             // not possibly be smaller, due to the reason explained above (and
1913             // we would've asserted on a checked build if this was the case).
1914             //
1915             if (Entry->BlockSize != i)
1916             {
1917                 //
1918                 // Is there an entry before this one?
1919                 //
1920                 if (Entry->PreviousSize == 0)
1921                 {
1922                     //
1923                     // There isn't anyone before us, so take the next block and
1924                     // turn it into a fragment that contains the leftover data
1925                     // that we don't need to satisfy the caller's request
1926                     //
1927                     FragmentEntry = POOL_BLOCK(Entry, i);
1928                     FragmentEntry->BlockSize = Entry->BlockSize - i;
1929 
1930                     //
1931                     // And make it point back to us
1932                     //
1933                     FragmentEntry->PreviousSize = i;
1934 
1935                     //
1936                     // Now get the block that follows the new fragment and check
1937                     // if it's still on the same page as us (and not at the end)
1938                     //
1939                     NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1940                     if (PAGE_ALIGN(NextEntry) != NextEntry)
1941                     {
1942                         //
1943                         // Adjust this next block to point to our newly created
1944                         // fragment block
1945                         //
1946                         NextEntry->PreviousSize = FragmentEntry->BlockSize;
1947                     }
1948                 }
1949                 else
1950                 {
1951                     //
1952                     // There is a free entry before us, which we know is smaller
1953                     // so we'll make this entry the fragment instead
1954                     //
1955                     FragmentEntry = Entry;
1956 
1957                     //
1958                     // And then we'll remove from it the actual size required.
1959                     // Now the entry is a leftover free fragment
1960                     //
1961                     Entry->BlockSize -= i;
1962 
1963                     //
1964                     // Now let's go to the next entry after the fragment (which
1965                     // used to point to our original free entry) and make it
1966                     // reference the new fragment entry instead.
1967                     //
1968                     // This is the entry that will actually end up holding the
1969                     // allocation!
1970                     //
1971                     Entry = POOL_NEXT_BLOCK(Entry);
1972                     Entry->PreviousSize = FragmentEntry->BlockSize;
1973 
1974                     //
1975                     // And now let's go to the entry after that one and check if
1976                     // it's still on the same page, and not at the end
1977                     //
1978                     NextEntry = POOL_BLOCK(Entry, i);
1979                     if (PAGE_ALIGN(NextEntry) != NextEntry)
1980                     {
1981                         //
1982                         // Make it reference the allocation entry
1983                         //
1984                         NextEntry->PreviousSize = i;
1985                     }
1986                 }
1987 
1988                 //
1989                 // Now our (allocation) entry is the right size
1990                 //
1991                 Entry->BlockSize = i;
1992 
1993                 //
1994                 // And the next entry is now the free fragment which contains
1995                 // the remaining difference between how big the original entry
1996                 // was, and the actual size the caller needs/requested.
1997                 //
1998                 FragmentEntry->PoolType = 0;
1999                 BlockSize = FragmentEntry->BlockSize;
2000 
2001                 //
2002                 // Now check if enough free bytes remained for us to have a
2003                 // "full" entry, which contains enough bytes for a linked list
2004                 // and thus can be used for allocations (up to 8 bytes...)
2005                 //
2006                 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2007                 if (BlockSize != 1)
2008                 {
2009                     //
2010                     // Insert the free entry into the free list for this size
2011                     //
2012                     ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2013                                           POOL_FREE_BLOCK(FragmentEntry));
2014                     ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2015                 }
2016             }
2017 
2018             //
2019             // We have found an entry for this allocation, so set the pool type
2020             // and release the lock since we're done
2021             //
2022             Entry->PoolType = OriginalType + 1;
2023             ExpCheckPoolBlocks(Entry);
2024             ExUnlockPool(PoolDesc, OldIrql);
2025 
2026             //
2027             // Increment required counters
2028             //
2029             InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2030             InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2031 
2032             //
2033             // Track this allocation
2034             //
2035             ExpInsertPoolTracker(Tag,
2036                                  Entry->BlockSize * POOL_BLOCK_SIZE,
2037                                  OriginalType);
2038 
2039             //
2040             // Return the pool allocation
2041             //
2042             Entry->PoolTag = Tag;
2043             (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2044             (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2045             return POOL_FREE_BLOCK(Entry);
2046         }
2047     } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2048 
2049     //
2050     // There were no free entries left, so we have to allocate a new fresh page
2051     //
2052     Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2053     if (!Entry)
2054     {
2055 #if DBG
2056         //
2057         // If non paged backed, display current consumption
2058         //
2059         if ((OriginalType & BASE_POOL_TYPE_MASK) == NonPagedPool)
2060         {
2061             MiDumpNonPagedPoolConsumers();
2062         }
2063 #endif
2064 
2065         //
2066         // Must succeed pool is deprecated, but still supported. These allocation
2067         // failures must cause an immediate bugcheck
2068         //
2069         if (OriginalType & MUST_SUCCEED_POOL_MASK)
2070         {
2071             KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2072                          PAGE_SIZE,
2073                          NonPagedPoolDescriptor.TotalPages,
2074                          NonPagedPoolDescriptor.TotalBigPages,
2075                          0);
2076         }
2077 
2078         //
2079         // Internal debugging
2080         //
2081         ExPoolFailures++;
2082 
2083         //
2084         // This flag requests printing failures, and can also further specify
2085         // breaking on failures
2086         //
2087         if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE)
2088         {
2089             DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2090                     NumberOfBytes,
2091                     OriginalType);
2092             if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint();
2093         }
2094 
2095         //
2096         // Finally, this flag requests an exception, which we are more than
2097         // happy to raise!
2098         //
2099         if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2100         {
2101             ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2102         }
2103 
2104         //
2105         // Return NULL to the caller in all other cases
2106         //
2107         return NULL;
2108     }
2109 
2110     //
2111     // Setup the entry data
2112     //
2113     Entry->Ulong1 = 0;
2114     Entry->BlockSize = i;
2115     Entry->PoolType = OriginalType + 1;
2116 
2117     //
2118     // This page will have two entries -- one for the allocation (which we just
2119     // created above), and one for the remaining free bytes, which we're about
2120     // to create now. The free bytes are the whole page minus what was allocated
2121     // and then converted into units of block headers.
2122     //
2123     BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2124     FragmentEntry = POOL_BLOCK(Entry, i);
2125     FragmentEntry->Ulong1 = 0;
2126     FragmentEntry->BlockSize = BlockSize;
2127     FragmentEntry->PreviousSize = i;
2128 
2129     //
2130     // Increment required counters
2131     //
2132     InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2133     InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2134 
2135     //
2136     // Now check if enough free bytes remained for us to have a "full" entry,
2137     // which contains enough bytes for a linked list and thus can be used for
2138     // allocations (up to 8 bytes...)
2139     //
2140     if (FragmentEntry->BlockSize != 1)
2141     {
2142         //
2143         // Excellent -- acquire the pool lock
2144         //
2145         OldIrql = ExLockPool(PoolDesc);
2146 
2147         //
2148         // And insert the free entry into the free list for this block size
2149         //
2150         ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2151         ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2152                               POOL_FREE_BLOCK(FragmentEntry));
2153         ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2154 
2155         //
2156         // Release the pool lock
2157         //
2158         ExpCheckPoolBlocks(Entry);
2159         ExUnlockPool(PoolDesc, OldIrql);
2160     }
2161     else
2162     {
2163         //
2164         // Simply do a sanity check
2165         //
2166         ExpCheckPoolBlocks(Entry);
2167     }
2168 
2169     //
2170     // Increment performance counters and track this allocation
2171     //
2172     InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs);
2173     ExpInsertPoolTracker(Tag,
2174                          Entry->BlockSize * POOL_BLOCK_SIZE,
2175                          OriginalType);
2176 
2177     //
2178     // And return the pool allocation
2179     //
2180     ExpCheckPoolBlocks(Entry);
2181     Entry->PoolTag = Tag;
2182     return POOL_FREE_BLOCK(Entry);
2183 }
2184 
2185 /*
2186  * @implemented
2187  */
2188 PVOID
2189 NTAPI
2190 ExAllocatePool(POOL_TYPE PoolType,
2191                SIZE_T NumberOfBytes)
2192 {
2193     ULONG Tag = TAG_NONE;
2194 #if 0 && DBG
2195     PLDR_DATA_TABLE_ENTRY LdrEntry;
2196 
2197     /* Use the first four letters of the driver name, or "None" if unavailable */
2198     LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2199                 ? MiLookupDataTableEntry(_ReturnAddress())
2200                 : NULL;
2201     if (LdrEntry)
2202     {
2203         ULONG i;
2204         Tag = 0;
2205         for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2206             Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2207         for (; i < 4; i++)
2208             Tag = Tag >> 8 | ' ' << 24;
2209     }
2210 #endif
2211     return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2212 }
2213 
2214 /*
2215  * @implemented
2216  */
2217 VOID
2218 NTAPI
2219 ExFreePoolWithTag(IN PVOID P,
2220                   IN ULONG TagToFree)
2221 {
2222     PPOOL_HEADER Entry, NextEntry;
2223     USHORT BlockSize;
2224     KIRQL OldIrql;
2225     POOL_TYPE PoolType;
2226     PPOOL_DESCRIPTOR PoolDesc;
2227     ULONG Tag;
2228     BOOLEAN Combined = FALSE;
2229     PFN_NUMBER PageCount, RealPageCount;
2230     PKPRCB Prcb = KeGetCurrentPrcb();
2231     PGENERAL_LOOKASIDE LookasideList;
2232     PEPROCESS Process;
2233 
2234     //
2235     // Check if any of the debug flags are enabled
2236     //
2237     if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2238                         POOL_FLAG_CHECK_WORKERS |
2239                         POOL_FLAG_CHECK_RESOURCES |
2240                         POOL_FLAG_VERIFIER |
2241                         POOL_FLAG_CHECK_DEADLOCK |
2242                         POOL_FLAG_SPECIAL_POOL))
2243     {
2244         //
2245         // Check if special pool is enabled
2246         //
2247         if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL)
2248         {
2249             //
2250             // Check if it was allocated from a special pool
2251             //
2252             if (MmIsSpecialPoolAddress(P))
2253             {
2254                 //
2255                 // Was deadlock verification also enabled? We can do some extra
2256                 // checks at this point
2257                 //
2258                 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2259                 {
2260                     DPRINT1("Verifier not yet supported\n");
2261                 }
2262 
2263                 //
2264                 // It is, so handle it via special pool free routine
2265                 //
2266                 MmFreeSpecialPool(P);
2267                 return;
2268             }
2269         }
2270 
2271         //
2272         // For non-big page allocations, we'll do a bunch of checks in here
2273         //
2274         if (PAGE_ALIGN(P) != P)
2275         {
2276             //
2277             // Get the entry for this pool allocation
2278             // The pointer math here may look wrong or confusing, but it is quite right
2279             //
2280             Entry = P;
2281             Entry--;
2282 
2283             //
2284             // Get the pool type
2285             //
2286             PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2287 
2288             //
2289             // FIXME: Many other debugging checks go here
2290             //
2291             ExpCheckPoolIrqlLevel(PoolType, 0, P);
2292         }
2293     }
2294 
2295     //
2296     // Check if this is a big page allocation
2297     //
2298     if (PAGE_ALIGN(P) == P)
2299     {
2300         //
2301         // We need to find the tag for it, so first we need to find out what
2302         // kind of allocation this was (paged or nonpaged), then we can go
2303         // ahead and try finding the tag for it. Remember to get rid of the
2304         // PROTECTED_POOL tag if it's found.
2305         //
2306         // Note that if at insertion time, we failed to add the tag for a big
2307         // pool allocation, we used a special tag called 'BIG' to identify the
2308         // allocation, and we may get this tag back. In this scenario, we must
2309         // manually get the size of the allocation by actually counting through
2310         // the PFN database.
2311         //
2312         PoolType = MmDeterminePoolType(P);
2313         ExpCheckPoolIrqlLevel(PoolType, 0, P);
2314         Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2315         if (!Tag)
2316         {
2317             DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2318             ASSERT(Tag == ' GIB');
2319             PageCount = 1; // We are going to lie! This might screw up accounting?
2320         }
2321         else if (Tag & PROTECTED_POOL)
2322         {
2323             Tag &= ~PROTECTED_POOL;
2324         }
2325 
2326         //
2327         // Check block tag
2328         //
2329         if (TagToFree && TagToFree != Tag)
2330         {
2331             DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2332             KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2333         }
2334 
2335         //
2336         // We have our tag and our page count, so we can go ahead and remove this
2337         // tracker now
2338         //
2339         ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2340 
2341         //
2342         // Check if any of the debug flags are enabled
2343         //
2344         if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS |
2345                             POOL_FLAG_CHECK_WORKERS |
2346                             POOL_FLAG_CHECK_RESOURCES |
2347                             POOL_FLAG_CHECK_DEADLOCK))
2348         {
2349             //
2350             // Was deadlock verification also enabled? We can do some extra
2351             // checks at this point
2352             //
2353             if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK)
2354             {
2355                 DPRINT1("Verifier not yet supported\n");
2356             }
2357 
2358             //
2359             // FIXME: Many debugging checks go here
2360             //
2361         }
2362 
2363         //
2364         // Update counters
2365         //
2366         PoolDesc = PoolVector[PoolType];
2367         InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2368         InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes,
2369                                     -(LONG_PTR)(PageCount << PAGE_SHIFT));
2370 
2371         //
2372         // Do the real free now and update the last counter with the big page count
2373         //
2374         RealPageCount = MiFreePoolPages(P);
2375         ASSERT(RealPageCount == PageCount);
2376         InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages,
2377                                -(LONG)RealPageCount);
2378         return;
2379     }
2380 
2381     //
2382     // Get the entry for this pool allocation
2383     // The pointer math here may look wrong or confusing, but it is quite right
2384     //
2385     Entry = P;
2386     Entry--;
2387     ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2388 
2389     //
2390     // Get the size of the entry, and it's pool type, then load the descriptor
2391     // for this pool type
2392     //
2393     BlockSize = Entry->BlockSize;
2394     PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2395     PoolDesc = PoolVector[PoolType];
2396 
2397     //
2398     // Make sure that the IRQL makes sense
2399     //
2400     ExpCheckPoolIrqlLevel(PoolType, 0, P);
2401 
2402     //
2403     // Get the pool tag and get rid of the PROTECTED_POOL flag
2404     //
2405     Tag = Entry->PoolTag;
2406     if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2407 
2408     //
2409     // Check block tag
2410     //
2411     if (TagToFree && TagToFree != Tag)
2412     {
2413         DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2414         KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2415     }
2416 
2417     //
2418     // Track the removal of this allocation
2419     //
2420     ExpRemovePoolTracker(Tag,
2421                          BlockSize * POOL_BLOCK_SIZE,
2422                          Entry->PoolType - 1);
2423 
2424     //
2425     // Release pool quota, if any
2426     //
2427     if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2428     {
2429         Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2430         if (Process)
2431         {
2432             if (Process->Pcb.Header.Type != ProcessObject)
2433             {
2434                 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2435                         Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2436                 KeBugCheckEx(BAD_POOL_CALLER,
2437                              0x0D,
2438                              (ULONG_PTR)P,
2439                              Tag,
2440                              (ULONG_PTR)Process);
2441             }
2442             PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2443             ObDereferenceObject(Process);
2444         }
2445     }
2446 
2447     //
2448     // Is this allocation small enough to have come from a lookaside list?
2449     //
2450     if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2451     {
2452         //
2453         // Try pushing it into the per-CPU lookaside list
2454         //
2455         LookasideList = (PoolType == PagedPool) ?
2456                          Prcb->PPPagedLookasideList[BlockSize - 1].P :
2457                          Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2458         LookasideList->TotalFrees++;
2459         if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2460         {
2461             LookasideList->FreeHits++;
2462             InterlockedPushEntrySList(&LookasideList->ListHead, P);
2463             return;
2464         }
2465 
2466         //
2467         // We failed, try to push it into the global lookaside list
2468         //
2469         LookasideList = (PoolType == PagedPool) ?
2470                          Prcb->PPPagedLookasideList[BlockSize - 1].L :
2471                          Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2472         LookasideList->TotalFrees++;
2473         if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2474         {
2475             LookasideList->FreeHits++;
2476             InterlockedPushEntrySList(&LookasideList->ListHead, P);
2477             return;
2478         }
2479     }
2480 
2481     //
2482     // Get the pointer to the next entry
2483     //
2484     NextEntry = POOL_BLOCK(Entry, BlockSize);
2485 
2486     //
2487     // Update performance counters
2488     //
2489     InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs);
2490     InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2491 
2492     //
2493     // Acquire the pool lock
2494     //
2495     OldIrql = ExLockPool(PoolDesc);
2496 
2497     //
2498     // Check if the next allocation is at the end of the page
2499     //
2500     ExpCheckPoolBlocks(Entry);
2501     if (PAGE_ALIGN(NextEntry) != NextEntry)
2502     {
2503         //
2504         // We may be able to combine the block if it's free
2505         //
2506         if (NextEntry->PoolType == 0)
2507         {
2508             //
2509             // The next block is free, so we'll do a combine
2510             //
2511             Combined = TRUE;
2512 
2513             //
2514             // Make sure there's actual data in the block -- anything smaller
2515             // than this means we only have the header, so there's no linked list
2516             // for us to remove
2517             //
2518             if ((NextEntry->BlockSize != 1))
2519             {
2520                 //
2521                 // The block is at least big enough to have a linked list, so go
2522                 // ahead and remove it
2523                 //
2524                 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2525                 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2526                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2527                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2528             }
2529 
2530             //
2531             // Our entry is now combined with the next entry
2532             //
2533             Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2534         }
2535     }
2536 
2537     //
2538     // Now check if there was a previous entry on the same page as us
2539     //
2540     if (Entry->PreviousSize)
2541     {
2542         //
2543         // Great, grab that entry and check if it's free
2544         //
2545         NextEntry = POOL_PREV_BLOCK(Entry);
2546         if (NextEntry->PoolType == 0)
2547         {
2548             //
2549             // It is, so we can do a combine
2550             //
2551             Combined = TRUE;
2552 
2553             //
2554             // Make sure there's actual data in the block -- anything smaller
2555             // than this means we only have the header so there's no linked list
2556             // for us to remove
2557             //
2558             if ((NextEntry->BlockSize != 1))
2559             {
2560                 //
2561                 // The block is at least big enough to have a linked list, so go
2562                 // ahead and remove it
2563                 //
2564                 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2565                 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry));
2566                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2567                 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2568             }
2569 
2570             //
2571             // Combine our original block (which might've already been combined
2572             // with the next block), into the previous block
2573             //
2574             NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2575 
2576             //
2577             // And now we'll work with the previous block instead
2578             //
2579             Entry = NextEntry;
2580         }
2581     }
2582 
2583     //
2584     // By now, it may have been possible for our combined blocks to actually
2585     // have made up a full page (if there were only 2-3 allocations on the
2586     // page, they could've all been combined).
2587     //
2588     if ((PAGE_ALIGN(Entry) == Entry) &&
2589         (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2590     {
2591         //
2592         // In this case, release the pool lock, update the performance counter,
2593         // and free the page
2594         //
2595         ExUnlockPool(PoolDesc, OldIrql);
2596         InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2597         MiFreePoolPages(Entry);
2598         return;
2599     }
2600 
2601     //
2602     // Otherwise, we now have a free block (or a combination of 2 or 3)
2603     //
2604     Entry->PoolType = 0;
2605     BlockSize = Entry->BlockSize;
2606     ASSERT(BlockSize != 1);
2607 
2608     //
2609     // Check if we actually did combine it with anyone
2610     //
2611     if (Combined)
2612     {
2613         //
2614         // Get the first combined block (either our original to begin with, or
2615         // the one after the original, depending if we combined with the previous)
2616         //
2617         NextEntry = POOL_NEXT_BLOCK(Entry);
2618 
2619         //
2620         // As long as the next block isn't on a page boundary, have it point
2621         // back to us
2622         //
2623         if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2624     }
2625 
2626     //
2627     // Insert this new free block, and release the pool lock
2628     //
2629     ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2630     ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry));
2631     ExUnlockPool(PoolDesc, OldIrql);
2632 }
2633 
2634 /*
2635  * @implemented
2636  */
2637 VOID
2638 NTAPI
2639 ExFreePool(PVOID P)
2640 {
2641     //
2642     // Just free without checking for the tag
2643     //
2644     ExFreePoolWithTag(P, 0);
2645 }
2646 
2647 /*
2648  * @unimplemented
2649  */
2650 SIZE_T
2651 NTAPI
2652 ExQueryPoolBlockSize(IN PVOID PoolBlock,
2653                      OUT PBOOLEAN QuotaCharged)
2654 {
2655     //
2656     // Not implemented
2657     //
2658     UNIMPLEMENTED;
2659     return FALSE;
2660 }
2661 
2662 /*
2663  * @implemented
2664  */
2665 
2666 PVOID
2667 NTAPI
2668 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType,
2669                         IN SIZE_T NumberOfBytes)
2670 {
2671     //
2672     // Allocate the pool
2673     //
2674     return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2675 }
2676 
2677 /*
2678  * @implemented
2679  */
2680 PVOID
2681 NTAPI
2682 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType,
2683                               IN SIZE_T NumberOfBytes,
2684                               IN ULONG Tag,
2685                               IN EX_POOL_PRIORITY Priority)
2686 {
2687     PVOID Buffer;
2688 
2689     //
2690     // Allocate the pool
2691     //
2692     Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2693     if (Buffer == NULL)
2694     {
2695         UNIMPLEMENTED;
2696     }
2697 
2698     return Buffer;
2699 }
2700 
2701 /*
2702  * @implemented
2703  */
2704 PVOID
2705 NTAPI
2706 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType,
2707                            IN SIZE_T NumberOfBytes,
2708                            IN ULONG Tag)
2709 {
2710     BOOLEAN Raise = TRUE;
2711     PVOID Buffer;
2712     PPOOL_HEADER Entry;
2713     NTSTATUS Status;
2714     PEPROCESS Process = PsGetCurrentProcess();
2715 
2716     //
2717     // Check if we should fail instead of raising an exception
2718     //
2719     if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2720     {
2721         Raise = FALSE;
2722         PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2723     }
2724 
2725     //
2726     // Inject the pool quota mask
2727     //
2728     PoolType += QUOTA_POOL_MASK;
2729 
2730     //
2731     // Check if we have enough space to add the quota owner process, as long as
2732     // this isn't the system process, which never gets charged quota
2733     //
2734     ASSERT(NumberOfBytes != 0);
2735     if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2736         (Process != PsInitialSystemProcess))
2737     {
2738         //
2739         // Add space for our EPROCESS pointer
2740         //
2741         NumberOfBytes += sizeof(PEPROCESS);
2742     }
2743     else
2744     {
2745         //
2746         // We won't be able to store the pointer, so don't use quota for this
2747         //
2748         PoolType -= QUOTA_POOL_MASK;
2749     }
2750 
2751     //
2752     // Allocate the pool buffer now
2753     //
2754     Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2755 
2756     //
2757     // If the buffer is page-aligned, this is a large page allocation and we
2758     // won't touch it
2759     //
2760     if (PAGE_ALIGN(Buffer) != Buffer)
2761     {
2762         //
2763         // Also if special pool is enabled, and this was allocated from there,
2764         // we won't touch it either
2765         //
2766         if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) &&
2767             (MmIsSpecialPoolAddress(Buffer)))
2768         {
2769             return Buffer;
2770         }
2771 
2772         //
2773         // If it wasn't actually allocated with quota charges, ignore it too
2774         //
2775         if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2776 
2777         //
2778         // If this is the system process, we don't charge quota, so ignore
2779         //
2780         if (Process == PsInitialSystemProcess) return Buffer;
2781 
2782         //
2783         // Actually go and charge quota for the process now
2784         //
2785         Entry = POOL_ENTRY(Buffer);
2786         Status = PsChargeProcessPoolQuota(Process,
2787                                           PoolType & BASE_POOL_TYPE_MASK,
2788                                           Entry->BlockSize * POOL_BLOCK_SIZE);
2789         if (!NT_SUCCESS(Status))
2790         {
2791             //
2792             // Quota failed, back out the allocation, clear the owner, and fail
2793             //
2794             ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2795             ExFreePoolWithTag(Buffer, Tag);
2796             if (Raise) RtlRaiseStatus(Status);
2797             return NULL;
2798         }
2799 
2800         //
2801         // Quota worked, write the owner and then reference it before returning
2802         //
2803         ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2804         ObReferenceObject(Process);
2805     }
2806     else if (!(Buffer) && (Raise))
2807     {
2808         //
2809         // The allocation failed, raise an error if we are in raise mode
2810         //
2811         RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES);
2812     }
2813 
2814     //
2815     // Return the allocated buffer
2816     //
2817     return Buffer;
2818 }
2819 
2820 #if DBG && defined(KDBG)
2821 
2822 BOOLEAN
2823 ExpKdbgExtPool(
2824     ULONG Argc,
2825     PCHAR Argv[])
2826 {
2827     ULONG_PTR Address = 0, Flags = 0;
2828     PVOID PoolPage;
2829     PPOOL_HEADER Entry;
2830     BOOLEAN ThisOne;
2831     PULONG Data;
2832 
2833     if (Argc > 1)
2834     {
2835         /* Get address */
2836         if (!KdbpGetHexNumber(Argv[1], &Address))
2837         {
2838             KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2839             return TRUE;
2840         }
2841     }
2842 
2843     if (Argc > 2)
2844     {
2845         /* Get address */
2846         if (!KdbpGetHexNumber(Argv[1], &Flags))
2847         {
2848             KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2849             return TRUE;
2850         }
2851     }
2852 
2853     /* Check if we got an address */
2854     if (Address != 0)
2855     {
2856         /* Get the base page */
2857         PoolPage = PAGE_ALIGN(Address);
2858     }
2859     else
2860     {
2861         KdbpPrint("Heap is unimplemented\n");
2862         return TRUE;
2863     }
2864 
2865     /* No paging support! */
2866     if (!MmIsAddressValid(PoolPage))
2867     {
2868         KdbpPrint("Address not accessible!\n");
2869         return TRUE;
2870     }
2871 
2872     /* Get pool type */
2873     if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2874         KdbpPrint("Allocation is from PagedPool region\n");
2875     else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2876         KdbpPrint("Allocation is from NonPagedPool region\n");
2877     else
2878     {
2879         KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2880         return TRUE;
2881     }
2882 
2883     /* Loop all entries of that page */
2884     Entry = PoolPage;
2885     do
2886     {
2887         /* Check if the address is within that entry */
2888         ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2889                    (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2890 
2891         if (!(Flags & 1) || ThisOne)
2892         {
2893             /* Print the line */
2894             KdbpPrint("%c%p size: %4d previous size: %4d  %s  %.4s\n",
2895                      ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2896                      (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free)     "),
2897                      (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2898         }
2899 
2900         if (Flags & 1)
2901         {
2902             Data = (PULONG)(Entry + 1);
2903             KdbpPrint("    %p  %08lx %08lx %08lx %08lx\n"
2904                      "    %p  %08lx %08lx %08lx %08lx\n",
2905                      &Data[0], Data[0], Data[1], Data[2], Data[3],
2906                      &Data[4], Data[4], Data[5], Data[6], Data[7]);
2907         }
2908 
2909         /* Go to next entry */
2910         Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2911     }
2912     while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2913 
2914     return TRUE;
2915 }
2916 
2917 #endif // DBG && KDBG
2918 
2919 /* EOF */
2920