1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: BSD - See COPYING.ARM in the top level directory 4 * FILE: ntoskrnl/mm/ARM3/expool.c 5 * PURPOSE: ARM Memory Manager Executive Pool Manager 6 * PROGRAMMERS: ReactOS Portable Systems Group 7 */ 8 9 /* INCLUDES *******************************************************************/ 10 11 #include <ntoskrnl.h> 12 #define NDEBUG 13 #include <debug.h> 14 15 #define MODULE_INVOLVED_IN_ARM3 16 #include <mm/ARM3/miarm.h> 17 18 #undef ExAllocatePoolWithQuota 19 #undef ExAllocatePoolWithQuotaTag 20 21 /* GLOBALS ********************************************************************/ 22 23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1 24 25 typedef struct _POOL_DPC_CONTEXT 26 { 27 PPOOL_TRACKER_TABLE PoolTrackTable; 28 SIZE_T PoolTrackTableSize; 29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion; 30 SIZE_T PoolTrackTableSizeExpansion; 31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT; 32 33 ULONG ExpNumberOfPagedPools; 34 POOL_DESCRIPTOR NonPagedPoolDescriptor; 35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1]; 36 PPOOL_DESCRIPTOR PoolVector[2]; 37 PKGUARDED_MUTEX ExpPagedPoolMutex; 38 SIZE_T PoolTrackTableSize, PoolTrackTableMask; 39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash; 40 ULONG ExpBigTableExpansionFailed; 41 PPOOL_TRACKER_TABLE PoolTrackTable; 42 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable; 43 KSPIN_LOCK ExpTaggedPoolLock; 44 ULONG PoolHitTag; 45 BOOLEAN ExStopBadTags; 46 KSPIN_LOCK ExpLargePoolTableLock; 47 ULONG ExpPoolBigEntriesInUse; 48 ULONG ExpPoolFlags; 49 ULONG ExPoolFailures; 50 ULONGLONG MiLastPoolDumpTime; 51 52 /* Pool block/header/list access macros */ 53 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER)) 54 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER)) 55 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE)) 56 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize) 57 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize)) 58 59 /* 60 * Pool list access debug macros, similar to Arthur's pfnlist.c work. 61 * Microsoft actually implements similar checks in the Windows Server 2003 SP1 62 * pool code, but only for checked builds. 63 * 64 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates 65 * that these checks are done even on retail builds, due to the increasing 66 * number of kernel-mode attacks which depend on dangling list pointers and other 67 * kinds of list-based attacks. 68 * 69 * For now, I will leave these checks on all the time, but later they are likely 70 * to be DBG-only, at least until there are enough kernel-mode security attacks 71 * against ReactOS to warrant the performance hit. 72 * 73 * For now, these are not made inline, so we can get good stack traces. 74 */ 75 PLIST_ENTRY 76 NTAPI 77 ExpDecodePoolLink(IN PLIST_ENTRY Link) 78 { 79 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1); 80 } 81 82 PLIST_ENTRY 83 NTAPI 84 ExpEncodePoolLink(IN PLIST_ENTRY Link) 85 { 86 return (PLIST_ENTRY)((ULONG_PTR)Link | 1); 87 } 88 89 VOID 90 NTAPI 91 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead) 92 { 93 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) || 94 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead)) 95 { 96 KeBugCheckEx(BAD_POOL_HEADER, 97 3, 98 (ULONG_PTR)ListHead, 99 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink), 100 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink)); 101 } 102 } 103 104 VOID 105 NTAPI 106 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead) 107 { 108 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead); 109 } 110 111 BOOLEAN 112 NTAPI 113 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead) 114 { 115 return (ExpDecodePoolLink(ListHead->Flink) == ListHead); 116 } 117 118 VOID 119 NTAPI 120 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry) 121 { 122 PLIST_ENTRY Blink, Flink; 123 Flink = ExpDecodePoolLink(Entry->Flink); 124 Blink = ExpDecodePoolLink(Entry->Blink); 125 Flink->Blink = ExpEncodePoolLink(Blink); 126 Blink->Flink = ExpEncodePoolLink(Flink); 127 } 128 129 PLIST_ENTRY 130 NTAPI 131 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead) 132 { 133 PLIST_ENTRY Entry, Flink; 134 Entry = ExpDecodePoolLink(ListHead->Flink); 135 Flink = ExpDecodePoolLink(Entry->Flink); 136 ListHead->Flink = ExpEncodePoolLink(Flink); 137 Flink->Blink = ExpEncodePoolLink(ListHead); 138 return Entry; 139 } 140 141 PLIST_ENTRY 142 NTAPI 143 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead) 144 { 145 PLIST_ENTRY Entry, Blink; 146 Entry = ExpDecodePoolLink(ListHead->Blink); 147 Blink = ExpDecodePoolLink(Entry->Blink); 148 ListHead->Blink = ExpEncodePoolLink(Blink); 149 Blink->Flink = ExpEncodePoolLink(ListHead); 150 return Entry; 151 } 152 153 VOID 154 NTAPI 155 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, 156 IN PLIST_ENTRY Entry) 157 { 158 PLIST_ENTRY Blink; 159 ExpCheckPoolLinks(ListHead); 160 Blink = ExpDecodePoolLink(ListHead->Blink); 161 Entry->Flink = ExpEncodePoolLink(ListHead); 162 Entry->Blink = ExpEncodePoolLink(Blink); 163 Blink->Flink = ExpEncodePoolLink(Entry); 164 ListHead->Blink = ExpEncodePoolLink(Entry); 165 ExpCheckPoolLinks(ListHead); 166 } 167 168 VOID 169 NTAPI 170 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, 171 IN PLIST_ENTRY Entry) 172 { 173 PLIST_ENTRY Flink; 174 ExpCheckPoolLinks(ListHead); 175 Flink = ExpDecodePoolLink(ListHead->Flink); 176 Entry->Flink = ExpEncodePoolLink(Flink); 177 Entry->Blink = ExpEncodePoolLink(ListHead); 178 Flink->Blink = ExpEncodePoolLink(Entry); 179 ListHead->Flink = ExpEncodePoolLink(Entry); 180 ExpCheckPoolLinks(ListHead); 181 } 182 183 VOID 184 NTAPI 185 ExpCheckPoolHeader(IN PPOOL_HEADER Entry) 186 { 187 PPOOL_HEADER PreviousEntry, NextEntry; 188 189 /* Is there a block before this one? */ 190 if (Entry->PreviousSize) 191 { 192 /* Get it */ 193 PreviousEntry = POOL_PREV_BLOCK(Entry); 194 195 /* The two blocks must be on the same page! */ 196 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry)) 197 { 198 /* Something is awry */ 199 KeBugCheckEx(BAD_POOL_HEADER, 200 6, 201 (ULONG_PTR)PreviousEntry, 202 __LINE__, 203 (ULONG_PTR)Entry); 204 } 205 206 /* This block should also indicate that it's as large as we think it is */ 207 if (PreviousEntry->BlockSize != Entry->PreviousSize) 208 { 209 /* Otherwise, someone corrupted one of the sizes */ 210 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n", 211 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag, 212 Entry->PreviousSize, (char *)&Entry->PoolTag); 213 KeBugCheckEx(BAD_POOL_HEADER, 214 5, 215 (ULONG_PTR)PreviousEntry, 216 __LINE__, 217 (ULONG_PTR)Entry); 218 } 219 } 220 else if (PAGE_ALIGN(Entry) != Entry) 221 { 222 /* If there's no block before us, we are the first block, so we should be on a page boundary */ 223 KeBugCheckEx(BAD_POOL_HEADER, 224 7, 225 0, 226 __LINE__, 227 (ULONG_PTR)Entry); 228 } 229 230 /* This block must have a size */ 231 if (!Entry->BlockSize) 232 { 233 /* Someone must've corrupted this field */ 234 if (Entry->PreviousSize) 235 { 236 PreviousEntry = POOL_PREV_BLOCK(Entry); 237 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n", 238 (char *)&PreviousEntry->PoolTag, 239 (char *)&Entry->PoolTag); 240 } 241 else 242 { 243 DPRINT1("Entry tag %.4s\n", 244 (char *)&Entry->PoolTag); 245 } 246 KeBugCheckEx(BAD_POOL_HEADER, 247 8, 248 0, 249 __LINE__, 250 (ULONG_PTR)Entry); 251 } 252 253 /* Okay, now get the next block */ 254 NextEntry = POOL_NEXT_BLOCK(Entry); 255 256 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */ 257 if (PAGE_ALIGN(NextEntry) != NextEntry) 258 { 259 /* The two blocks must be on the same page! */ 260 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry)) 261 { 262 /* Something is messed up */ 263 KeBugCheckEx(BAD_POOL_HEADER, 264 9, 265 (ULONG_PTR)NextEntry, 266 __LINE__, 267 (ULONG_PTR)Entry); 268 } 269 270 /* And this block should think we are as large as we truly are */ 271 if (NextEntry->PreviousSize != Entry->BlockSize) 272 { 273 /* Otherwise, someone corrupted the field */ 274 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n", 275 Entry->BlockSize, (char *)&Entry->PoolTag, 276 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag); 277 KeBugCheckEx(BAD_POOL_HEADER, 278 5, 279 (ULONG_PTR)NextEntry, 280 __LINE__, 281 (ULONG_PTR)Entry); 282 } 283 } 284 } 285 286 VOID 287 NTAPI 288 ExpCheckPoolAllocation( 289 PVOID P, 290 POOL_TYPE PoolType, 291 ULONG Tag) 292 { 293 PPOOL_HEADER Entry; 294 ULONG i; 295 KIRQL OldIrql; 296 POOL_TYPE RealPoolType; 297 298 /* Get the pool header */ 299 Entry = ((PPOOL_HEADER)P) - 1; 300 301 /* Check if this is a large allocation */ 302 if (PAGE_ALIGN(P) == P) 303 { 304 /* Lock the pool table */ 305 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 306 307 /* Find the pool tag */ 308 for (i = 0; i < PoolBigPageTableSize; i++) 309 { 310 /* Check if this is our allocation */ 311 if (PoolBigPageTable[i].Va == P) 312 { 313 /* Make sure the tag is ok */ 314 if (PoolBigPageTable[i].Key != Tag) 315 { 316 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag); 317 } 318 319 break; 320 } 321 } 322 323 /* Release the lock */ 324 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 325 326 if (i == PoolBigPageTableSize) 327 { 328 /* Did not find the allocation */ 329 //ASSERT(FALSE); 330 } 331 332 /* Get Pool type by address */ 333 RealPoolType = MmDeterminePoolType(P); 334 } 335 else 336 { 337 /* Verify the tag */ 338 if (Entry->PoolTag != Tag) 339 { 340 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n", 341 &Tag, &Entry->PoolTag, Entry->PoolTag); 342 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag); 343 } 344 345 /* Check the rest of the header */ 346 ExpCheckPoolHeader(Entry); 347 348 /* Get Pool type from entry */ 349 RealPoolType = (Entry->PoolType - 1); 350 } 351 352 /* Should we check the pool type? */ 353 if (PoolType != -1) 354 { 355 /* Verify the pool type */ 356 if (RealPoolType != PoolType) 357 { 358 DPRINT1("Wrong pool type! Expected %s, got %s\n", 359 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool", 360 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool"); 361 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag); 362 } 363 } 364 } 365 366 VOID 367 NTAPI 368 ExpCheckPoolBlocks(IN PVOID Block) 369 { 370 BOOLEAN FoundBlock = FALSE; 371 SIZE_T Size = 0; 372 PPOOL_HEADER Entry; 373 374 /* Get the first entry for this page, make sure it really is the first */ 375 Entry = PAGE_ALIGN(Block); 376 ASSERT(Entry->PreviousSize == 0); 377 378 /* Now scan each entry */ 379 while (TRUE) 380 { 381 /* When we actually found our block, remember this */ 382 if (Entry == Block) FoundBlock = TRUE; 383 384 /* Now validate this block header */ 385 ExpCheckPoolHeader(Entry); 386 387 /* And go to the next one, keeping track of our size */ 388 Size += Entry->BlockSize; 389 Entry = POOL_NEXT_BLOCK(Entry); 390 391 /* If we hit the last block, stop */ 392 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break; 393 394 /* If we hit the end of the page, stop */ 395 if (PAGE_ALIGN(Entry) == Entry) break; 396 } 397 398 /* We must've found our block, and we must have hit the end of the page */ 399 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock)) 400 { 401 /* Otherwise, the blocks are messed up */ 402 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry); 403 } 404 } 405 406 FORCEINLINE 407 VOID 408 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, 409 IN SIZE_T NumberOfBytes, 410 IN PVOID Entry) 411 { 412 // 413 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must 414 // be DISPATCH_LEVEL or lower for Non Paged Pool 415 // 416 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ? 417 (KeGetCurrentIrql() > APC_LEVEL) : 418 (KeGetCurrentIrql() > DISPATCH_LEVEL)) 419 { 420 // 421 // Take the system down 422 // 423 KeBugCheckEx(BAD_POOL_CALLER, 424 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID, 425 KeGetCurrentIrql(), 426 PoolType, 427 !Entry ? NumberOfBytes : (ULONG_PTR)Entry); 428 } 429 } 430 431 FORCEINLINE 432 ULONG 433 ExpComputeHashForTag(IN ULONG Tag, 434 IN SIZE_T BucketMask) 435 { 436 // 437 // Compute the hash by multiplying with a large prime number and then XORing 438 // with the HIDWORD of the result. 439 // 440 // Finally, AND with the bucket mask to generate a valid index/bucket into 441 // the table 442 // 443 ULONGLONG Result = (ULONGLONG)40543 * Tag; 444 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32)); 445 } 446 447 FORCEINLINE 448 ULONG 449 ExpComputePartialHashForAddress(IN PVOID BaseAddress) 450 { 451 ULONG Result; 452 // 453 // Compute the hash by converting the address into a page number, and then 454 // XORing each nibble with the next one. 455 // 456 // We do *NOT* AND with the bucket mask at this point because big table expansion 457 // might happen. Therefore, the final step of the hash must be performed 458 // while holding the expansion pushlock, and this is why we call this a 459 // "partial" hash only. 460 // 461 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT); 462 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result; 463 } 464 465 #if DBG 466 FORCEINLINE 467 BOOLEAN 468 ExpTagAllowPrint(CHAR Tag) 469 { 470 if ((Tag >= 'a' && Tag <= 'z') || 471 (Tag >= 'A' && Tag <= 'Z') || 472 (Tag >= '0' && Tag <= '9') || 473 Tag == ' ' || Tag == '=' || 474 Tag == '?' || Tag == '@') 475 { 476 return TRUE; 477 } 478 479 return FALSE; 480 } 481 482 #ifdef KDBG 483 #define MiDumperPrint(dbg, fmt, ...) \ 484 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \ 485 else DPRINT1(fmt, ##__VA_ARGS__) 486 #else 487 #define MiDumperPrint(dbg, fmt, ...) \ 488 DPRINT1(fmt, ##__VA_ARGS__) 489 #endif 490 491 VOID 492 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags) 493 { 494 SIZE_T i; 495 BOOLEAN Verbose; 496 497 // 498 // Only print header if called from OOM situation 499 // 500 if (!CalledFromDbg) 501 { 502 DPRINT1("---------------------\n"); 503 DPRINT1("Out of memory dumper!\n"); 504 } 505 #ifdef KDBG 506 else 507 { 508 KdbpPrint("Pool Used:\n"); 509 } 510 #endif 511 512 // 513 // Remember whether we'll have to be verbose 514 // This is the only supported flag! 515 // 516 Verbose = BooleanFlagOn(Flags, 1); 517 518 // 519 // Print table header 520 // 521 if (Verbose) 522 { 523 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n"); 524 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n"); 525 } 526 else 527 { 528 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n"); 529 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n"); 530 } 531 532 // 533 // We'll extract allocations for all the tracked pools 534 // 535 for (i = 0; i < PoolTrackTableSize; ++i) 536 { 537 PPOOL_TRACKER_TABLE TableEntry; 538 539 TableEntry = &PoolTrackTable[i]; 540 541 // 542 // We only care about tags which have allocated memory 543 // 544 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0) 545 { 546 // 547 // If there's a tag, attempt to do a pretty print 548 // only if it matches the caller's tag, or if 549 // any tag is allowed 550 // For checking whether it matches caller's tag, 551 // use the mask to make sure not to mess with the wildcards 552 // 553 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE && 554 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask))) 555 { 556 CHAR Tag[4]; 557 558 // 559 // Extract each 'component' and check whether they are printable 560 // 561 Tag[0] = TableEntry->Key & 0xFF; 562 Tag[1] = TableEntry->Key >> 8 & 0xFF; 563 Tag[2] = TableEntry->Key >> 16 & 0xFF; 564 Tag[3] = TableEntry->Key >> 24 & 0xFF; 565 566 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3])) 567 { 568 // 569 // Print in direct order to make !poolused TAG usage easier 570 // 571 if (Verbose) 572 { 573 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3], 574 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 575 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 576 TableEntry->PagedAllocs, TableEntry->PagedFrees, 577 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 578 } 579 else 580 { 581 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3], 582 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 583 TableEntry->PagedAllocs, TableEntry->PagedBytes); 584 } 585 } 586 else 587 { 588 if (Verbose) 589 { 590 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key, 591 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 592 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 593 TableEntry->PagedAllocs, TableEntry->PagedFrees, 594 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 595 } 596 else 597 { 598 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key, 599 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 600 TableEntry->PagedAllocs, TableEntry->PagedBytes); 601 } 602 } 603 } 604 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask)) 605 { 606 if (Verbose) 607 { 608 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", 609 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 610 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 611 TableEntry->PagedAllocs, TableEntry->PagedFrees, 612 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 613 } 614 else 615 { 616 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", 617 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 618 TableEntry->PagedAllocs, TableEntry->PagedBytes); 619 } 620 } 621 } 622 } 623 624 if (!CalledFromDbg) 625 { 626 DPRINT1("---------------------\n"); 627 } 628 } 629 #endif 630 631 /* PRIVATE FUNCTIONS **********************************************************/ 632 633 INIT_FUNCTION 634 VOID 635 NTAPI 636 ExpSeedHotTags(VOID) 637 { 638 ULONG i, Key, Hash, Index; 639 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable; 640 ULONG TagList[] = 641 { 642 ' oI', 643 ' laH', 644 'PldM', 645 'LooP', 646 'tSbO', 647 ' prI', 648 'bdDN', 649 'LprI', 650 'pOoI', 651 ' ldM', 652 'eliF', 653 'aVMC', 654 'dSeS', 655 'CFtN', 656 'looP', 657 'rPCT', 658 'bNMC', 659 'dTeS', 660 'sFtN', 661 'TPCT', 662 'CPCT', 663 ' yeK', 664 'qSbO', 665 'mNoI', 666 'aEoI', 667 'cPCT', 668 'aFtN', 669 '0ftN', 670 'tceS', 671 'SprI', 672 'ekoT', 673 ' eS', 674 'lCbO', 675 'cScC', 676 'lFtN', 677 'cAeS', 678 'mfSF', 679 'kWcC', 680 'miSF', 681 'CdfA', 682 'EdfA', 683 'orSF', 684 'nftN', 685 'PRIU', 686 'rFpN', 687 'RFpN', 688 'aPeS', 689 'sUeS', 690 'FpcA', 691 'MpcA', 692 'cSeS', 693 'mNbO', 694 'sFpN', 695 'uLeS', 696 'DPcS', 697 'nevE', 698 'vrqR', 699 'ldaV', 700 ' pP', 701 'SdaV', 702 ' daV', 703 'LdaV', 704 'FdaV', 705 ' GIB', 706 }; 707 708 // 709 // Loop all 64 hot tags 710 // 711 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64); 712 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++) 713 { 714 // 715 // Get the current tag, and compute its hash in the tracker table 716 // 717 Key = TagList[i]; 718 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask); 719 720 // 721 // Loop all the hashes in this index/bucket 722 // 723 Index = Hash; 724 while (TRUE) 725 { 726 // 727 // Find an empty entry, and make sure this isn't the last hash that 728 // can fit. 729 // 730 // On checked builds, also make sure this is the first time we are 731 // seeding this tag. 732 // 733 ASSERT(TrackTable[Hash].Key != Key); 734 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1)) 735 { 736 // 737 // It has been seeded, move on to the next tag 738 // 739 TrackTable[Hash].Key = Key; 740 break; 741 } 742 743 // 744 // This entry was already taken, compute the next possible hash while 745 // making sure we're not back at our initial index. 746 // 747 ASSERT(TrackTable[Hash].Key != Key); 748 Hash = (Hash + 1) & PoolTrackTableMask; 749 if (Hash == Index) break; 750 } 751 } 752 } 753 754 VOID 755 NTAPI 756 ExpRemovePoolTracker(IN ULONG Key, 757 IN SIZE_T NumberOfBytes, 758 IN POOL_TYPE PoolType) 759 { 760 ULONG Hash, Index; 761 PPOOL_TRACKER_TABLE Table, TableEntry; 762 SIZE_T TableMask, TableSize; 763 764 // 765 // Remove the PROTECTED_POOL flag which is not part of the tag 766 // 767 Key &= ~PROTECTED_POOL; 768 769 // 770 // With WinDBG you can set a tag you want to break on when an allocation is 771 // attempted 772 // 773 if (Key == PoolHitTag) DbgBreakPoint(); 774 775 // 776 // Why the double indirection? Because normally this function is also used 777 // when doing session pool allocations, which has another set of tables, 778 // sizes, and masks that live in session pool. Now we don't support session 779 // pool so we only ever use the regular tables, but I'm keeping the code this 780 // way so that the day we DO support session pool, it won't require that 781 // many changes 782 // 783 Table = PoolTrackTable; 784 TableMask = PoolTrackTableMask; 785 TableSize = PoolTrackTableSize; 786 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize); 787 788 // 789 // Compute the hash for this key, and loop all the possible buckets 790 // 791 Hash = ExpComputeHashForTag(Key, TableMask); 792 Index = Hash; 793 while (TRUE) 794 { 795 // 796 // Have we found the entry for this tag? */ 797 // 798 TableEntry = &Table[Hash]; 799 if (TableEntry->Key == Key) 800 { 801 // 802 // Decrement the counters depending on if this was paged or nonpaged 803 // pool 804 // 805 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 806 { 807 InterlockedIncrement(&TableEntry->NonPagedFrees); 808 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, 809 -(SSIZE_T)NumberOfBytes); 810 return; 811 } 812 InterlockedIncrement(&TableEntry->PagedFrees); 813 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, 814 -(SSIZE_T)NumberOfBytes); 815 return; 816 } 817 818 // 819 // We should have only ended up with an empty entry if we've reached 820 // the last bucket 821 // 822 if (!TableEntry->Key) 823 { 824 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n", 825 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType); 826 ASSERT(Hash == TableMask); 827 } 828 829 // 830 // This path is hit when we don't have an entry, and the current bucket 831 // is full, so we simply try the next one 832 // 833 Hash = (Hash + 1) & TableMask; 834 if (Hash == Index) break; 835 } 836 837 // 838 // And finally this path is hit when all the buckets are full, and we need 839 // some expansion. This path is not yet supported in ReactOS and so we'll 840 // ignore the tag 841 // 842 DPRINT1("Out of pool tag space, ignoring...\n"); 843 } 844 845 VOID 846 NTAPI 847 ExpInsertPoolTracker(IN ULONG Key, 848 IN SIZE_T NumberOfBytes, 849 IN POOL_TYPE PoolType) 850 { 851 ULONG Hash, Index; 852 KIRQL OldIrql; 853 PPOOL_TRACKER_TABLE Table, TableEntry; 854 SIZE_T TableMask, TableSize; 855 856 // 857 // Remove the PROTECTED_POOL flag which is not part of the tag 858 // 859 Key &= ~PROTECTED_POOL; 860 861 // 862 // With WinDBG you can set a tag you want to break on when an allocation is 863 // attempted 864 // 865 if (Key == PoolHitTag) DbgBreakPoint(); 866 867 // 868 // There is also an internal flag you can set to break on malformed tags 869 // 870 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00); 871 872 // 873 // ASSERT on ReactOS features not yet supported 874 // 875 ASSERT(!(PoolType & SESSION_POOL_MASK)); 876 ASSERT(KeGetCurrentProcessorNumber() == 0); 877 878 // 879 // Why the double indirection? Because normally this function is also used 880 // when doing session pool allocations, which has another set of tables, 881 // sizes, and masks that live in session pool. Now we don't support session 882 // pool so we only ever use the regular tables, but I'm keeping the code this 883 // way so that the day we DO support session pool, it won't require that 884 // many changes 885 // 886 Table = PoolTrackTable; 887 TableMask = PoolTrackTableMask; 888 TableSize = PoolTrackTableSize; 889 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize); 890 891 // 892 // Compute the hash for this key, and loop all the possible buckets 893 // 894 Hash = ExpComputeHashForTag(Key, TableMask); 895 Index = Hash; 896 while (TRUE) 897 { 898 // 899 // Do we already have an entry for this tag? */ 900 // 901 TableEntry = &Table[Hash]; 902 if (TableEntry->Key == Key) 903 { 904 // 905 // Increment the counters depending on if this was paged or nonpaged 906 // pool 907 // 908 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 909 { 910 InterlockedIncrement(&TableEntry->NonPagedAllocs); 911 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes); 912 return; 913 } 914 InterlockedIncrement(&TableEntry->PagedAllocs); 915 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes); 916 return; 917 } 918 919 // 920 // We don't have an entry yet, but we've found a free bucket for it 921 // 922 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1)) 923 { 924 // 925 // We need to hold the lock while creating a new entry, since other 926 // processors might be in this code path as well 927 // 928 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql); 929 if (!PoolTrackTable[Hash].Key) 930 { 931 // 932 // We've won the race, so now create this entry in the bucket 933 // 934 ASSERT(Table[Hash].Key == 0); 935 PoolTrackTable[Hash].Key = Key; 936 TableEntry->Key = Key; 937 } 938 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 939 940 // 941 // Now we force the loop to run again, and we should now end up in 942 // the code path above which does the interlocked increments... 943 // 944 continue; 945 } 946 947 // 948 // This path is hit when we don't have an entry, and the current bucket 949 // is full, so we simply try the next one 950 // 951 Hash = (Hash + 1) & TableMask; 952 if (Hash == Index) break; 953 } 954 955 // 956 // And finally this path is hit when all the buckets are full, and we need 957 // some expansion. This path is not yet supported in ReactOS and so we'll 958 // ignore the tag 959 // 960 DPRINT1("Out of pool tag space, ignoring...\n"); 961 } 962 963 INIT_FUNCTION 964 VOID 965 NTAPI 966 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, 967 IN POOL_TYPE PoolType, 968 IN ULONG PoolIndex, 969 IN ULONG Threshold, 970 IN PVOID PoolLock) 971 { 972 PLIST_ENTRY NextEntry, LastEntry; 973 974 // 975 // Setup the descriptor based on the caller's request 976 // 977 PoolDescriptor->PoolType = PoolType; 978 PoolDescriptor->PoolIndex = PoolIndex; 979 PoolDescriptor->Threshold = Threshold; 980 PoolDescriptor->LockAddress = PoolLock; 981 982 // 983 // Initialize accounting data 984 // 985 PoolDescriptor->RunningAllocs = 0; 986 PoolDescriptor->RunningDeAllocs = 0; 987 PoolDescriptor->TotalPages = 0; 988 PoolDescriptor->TotalBytes = 0; 989 PoolDescriptor->TotalBigPages = 0; 990 991 // 992 // Nothing pending for now 993 // 994 PoolDescriptor->PendingFrees = NULL; 995 PoolDescriptor->PendingFreeDepth = 0; 996 997 // 998 // Loop all the descriptor's allocation lists and initialize them 999 // 1000 NextEntry = PoolDescriptor->ListHeads; 1001 LastEntry = NextEntry + POOL_LISTS_PER_PAGE; 1002 while (NextEntry < LastEntry) 1003 { 1004 ExpInitializePoolListHead(NextEntry); 1005 NextEntry++; 1006 } 1007 1008 // 1009 // Note that ReactOS does not support Session Pool Yet 1010 // 1011 ASSERT(PoolType != PagedPoolSession); 1012 } 1013 1014 INIT_FUNCTION 1015 VOID 1016 NTAPI 1017 InitializePool(IN POOL_TYPE PoolType, 1018 IN ULONG Threshold) 1019 { 1020 PPOOL_DESCRIPTOR Descriptor; 1021 SIZE_T TableSize; 1022 ULONG i; 1023 1024 // 1025 // Check what kind of pool this is 1026 // 1027 if (PoolType == NonPagedPool) 1028 { 1029 // 1030 // Compute the track table size and convert it from a power of two to an 1031 // actual byte size 1032 // 1033 // NOTE: On checked builds, we'll assert if the registry table size was 1034 // invalid, while on retail builds we'll just break out of the loop at 1035 // that point. 1036 // 1037 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8); 1038 for (i = 0; i < 32; i++) 1039 { 1040 if (TableSize & 1) 1041 { 1042 ASSERT((TableSize & ~1) == 0); 1043 if (!(TableSize & ~1)) break; 1044 } 1045 TableSize >>= 1; 1046 } 1047 1048 // 1049 // If we hit bit 32, than no size was defined in the registry, so 1050 // we'll use the default size of 2048 entries. 1051 // 1052 // Otherwise, use the size from the registry, as long as it's not 1053 // smaller than 64 entries. 1054 // 1055 if (i == 32) 1056 { 1057 PoolTrackTableSize = 2048; 1058 } 1059 else 1060 { 1061 PoolTrackTableSize = max(1 << i, 64); 1062 } 1063 1064 // 1065 // Loop trying with the biggest specified size first, and cut it down 1066 // by a power of two each iteration in case not enough memory exist 1067 // 1068 while (TRUE) 1069 { 1070 // 1071 // Do not allow overflow 1072 // 1073 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE))) 1074 { 1075 PoolTrackTableSize >>= 1; 1076 continue; 1077 } 1078 1079 // 1080 // Allocate the tracker table and exit the loop if this worked 1081 // 1082 PoolTrackTable = MiAllocatePoolPages(NonPagedPool, 1083 (PoolTrackTableSize + 1) * 1084 sizeof(POOL_TRACKER_TABLE)); 1085 if (PoolTrackTable) break; 1086 1087 // 1088 // Otherwise, as long as we're not down to the last bit, keep 1089 // iterating 1090 // 1091 if (PoolTrackTableSize == 1) 1092 { 1093 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1094 TableSize, 1095 0xFFFFFFFF, 1096 0xFFFFFFFF, 1097 0xFFFFFFFF); 1098 } 1099 PoolTrackTableSize >>= 1; 1100 } 1101 1102 // 1103 // Add one entry, compute the hash, and zero the table 1104 // 1105 PoolTrackTableSize++; 1106 PoolTrackTableMask = PoolTrackTableSize - 2; 1107 1108 RtlZeroMemory(PoolTrackTable, 1109 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1110 1111 // 1112 // Finally, add the most used tags to speed up those allocations 1113 // 1114 ExpSeedHotTags(); 1115 1116 // 1117 // We now do the exact same thing with the tracker table for big pages 1118 // 1119 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8); 1120 for (i = 0; i < 32; i++) 1121 { 1122 if (TableSize & 1) 1123 { 1124 ASSERT((TableSize & ~1) == 0); 1125 if (!(TableSize & ~1)) break; 1126 } 1127 TableSize >>= 1; 1128 } 1129 1130 // 1131 // For big pages, the default tracker table is 4096 entries, while the 1132 // minimum is still 64 1133 // 1134 if (i == 32) 1135 { 1136 PoolBigPageTableSize = 4096; 1137 } 1138 else 1139 { 1140 PoolBigPageTableSize = max(1 << i, 64); 1141 } 1142 1143 // 1144 // Again, run the exact same loop we ran earlier, but this time for the 1145 // big pool tracker instead 1146 // 1147 while (TRUE) 1148 { 1149 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES))) 1150 { 1151 PoolBigPageTableSize >>= 1; 1152 continue; 1153 } 1154 1155 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool, 1156 PoolBigPageTableSize * 1157 sizeof(POOL_TRACKER_BIG_PAGES)); 1158 if (PoolBigPageTable) break; 1159 1160 if (PoolBigPageTableSize == 1) 1161 { 1162 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1163 TableSize, 1164 0xFFFFFFFF, 1165 0xFFFFFFFF, 1166 0xFFFFFFFF); 1167 } 1168 1169 PoolBigPageTableSize >>= 1; 1170 } 1171 1172 // 1173 // An extra entry is not needed for for the big pool tracker, so just 1174 // compute the hash and zero it 1175 // 1176 PoolBigPageTableHash = PoolBigPageTableSize - 1; 1177 RtlZeroMemory(PoolBigPageTable, 1178 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)); 1179 for (i = 0; i < PoolBigPageTableSize; i++) 1180 { 1181 PoolBigPageTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE; 1182 } 1183 1184 // 1185 // During development, print this out so we can see what's happening 1186 // 1187 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n", 1188 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1189 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n", 1190 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)); 1191 1192 // 1193 // Insert the generic tracker for all of big pool 1194 // 1195 ExpInsertPoolTracker('looP', 1196 ROUND_TO_PAGES(PoolBigPageTableSize * 1197 sizeof(POOL_TRACKER_BIG_PAGES)), 1198 NonPagedPool); 1199 1200 // 1201 // No support for NUMA systems at this time 1202 // 1203 ASSERT(KeNumberNodes == 1); 1204 1205 // 1206 // Initialize the tag spinlock 1207 // 1208 KeInitializeSpinLock(&ExpTaggedPoolLock); 1209 1210 // 1211 // Initialize the nonpaged pool descriptor 1212 // 1213 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor; 1214 ExInitializePoolDescriptor(PoolVector[NonPagedPool], 1215 NonPagedPool, 1216 0, 1217 Threshold, 1218 NULL); 1219 } 1220 else 1221 { 1222 // 1223 // No support for NUMA systems at this time 1224 // 1225 ASSERT(KeNumberNodes == 1); 1226 1227 // 1228 // Allocate the pool descriptor 1229 // 1230 Descriptor = ExAllocatePoolWithTag(NonPagedPool, 1231 sizeof(KGUARDED_MUTEX) + 1232 sizeof(POOL_DESCRIPTOR), 1233 'looP'); 1234 if (!Descriptor) 1235 { 1236 // 1237 // This is really bad... 1238 // 1239 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1240 0, 1241 -1, 1242 -1, 1243 -1); 1244 } 1245 1246 // 1247 // Setup the vector and guarded mutex for paged pool 1248 // 1249 PoolVector[PagedPool] = Descriptor; 1250 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1); 1251 ExpPagedPoolDescriptor[0] = Descriptor; 1252 KeInitializeGuardedMutex(ExpPagedPoolMutex); 1253 ExInitializePoolDescriptor(Descriptor, 1254 PagedPool, 1255 0, 1256 Threshold, 1257 ExpPagedPoolMutex); 1258 1259 // 1260 // Insert the generic tracker for all of nonpaged pool 1261 // 1262 ExpInsertPoolTracker('looP', 1263 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)), 1264 NonPagedPool); 1265 } 1266 } 1267 1268 FORCEINLINE 1269 KIRQL 1270 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor) 1271 { 1272 // 1273 // Check if this is nonpaged pool 1274 // 1275 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 1276 { 1277 // 1278 // Use the queued spin lock 1279 // 1280 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock); 1281 } 1282 else 1283 { 1284 // 1285 // Use the guarded mutex 1286 // 1287 KeAcquireGuardedMutex(Descriptor->LockAddress); 1288 return APC_LEVEL; 1289 } 1290 } 1291 1292 FORCEINLINE 1293 VOID 1294 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, 1295 IN KIRQL OldIrql) 1296 { 1297 // 1298 // Check if this is nonpaged pool 1299 // 1300 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 1301 { 1302 // 1303 // Use the queued spin lock 1304 // 1305 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql); 1306 } 1307 else 1308 { 1309 // 1310 // Use the guarded mutex 1311 // 1312 KeReleaseGuardedMutex(Descriptor->LockAddress); 1313 } 1314 } 1315 1316 VOID 1317 NTAPI 1318 ExpGetPoolTagInfoTarget(IN PKDPC Dpc, 1319 IN PVOID DeferredContext, 1320 IN PVOID SystemArgument1, 1321 IN PVOID SystemArgument2) 1322 { 1323 PPOOL_DPC_CONTEXT Context = DeferredContext; 1324 UNREFERENCED_PARAMETER(Dpc); 1325 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 1326 1327 // 1328 // Make sure we win the race, and if we did, copy the data atomically 1329 // 1330 if (KeSignalCallDpcSynchronize(SystemArgument2)) 1331 { 1332 RtlCopyMemory(Context->PoolTrackTable, 1333 PoolTrackTable, 1334 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1335 1336 // 1337 // This is here because ReactOS does not yet support expansion 1338 // 1339 ASSERT(Context->PoolTrackTableSizeExpansion == 0); 1340 } 1341 1342 // 1343 // Regardless of whether we won or not, we must now synchronize and then 1344 // decrement the barrier since this is one more processor that has completed 1345 // the callback. 1346 // 1347 KeSignalCallDpcSynchronize(SystemArgument2); 1348 KeSignalCallDpcDone(SystemArgument1); 1349 } 1350 1351 NTSTATUS 1352 NTAPI 1353 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, 1354 IN ULONG SystemInformationLength, 1355 IN OUT PULONG ReturnLength OPTIONAL) 1356 { 1357 ULONG TableSize, CurrentLength; 1358 ULONG EntryCount; 1359 NTSTATUS Status = STATUS_SUCCESS; 1360 PSYSTEM_POOLTAG TagEntry; 1361 PPOOL_TRACKER_TABLE Buffer, TrackerEntry; 1362 POOL_DPC_CONTEXT Context; 1363 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); 1364 1365 // 1366 // Keep track of how much data the caller's buffer must hold 1367 // 1368 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo); 1369 1370 // 1371 // Initialize the caller's buffer 1372 // 1373 TagEntry = &SystemInformation->TagInfo[0]; 1374 SystemInformation->Count = 0; 1375 1376 // 1377 // Capture the number of entries, and the total size needed to make a copy 1378 // of the table 1379 // 1380 EntryCount = (ULONG)PoolTrackTableSize; 1381 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE); 1382 1383 // 1384 // Allocate the "Generic DPC" temporary buffer 1385 // 1386 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI'); 1387 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES; 1388 1389 // 1390 // Do a "Generic DPC" to atomically retrieve the tag and allocation data 1391 // 1392 Context.PoolTrackTable = Buffer; 1393 Context.PoolTrackTableSize = PoolTrackTableSize; 1394 Context.PoolTrackTableExpansion = NULL; 1395 Context.PoolTrackTableSizeExpansion = 0; 1396 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context); 1397 1398 // 1399 // Now parse the results 1400 // 1401 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++) 1402 { 1403 // 1404 // If the entry is empty, skip it 1405 // 1406 if (!TrackerEntry->Key) continue; 1407 1408 // 1409 // Otherwise, add one more entry to the caller's buffer, and ensure that 1410 // enough space has been allocated in it 1411 // 1412 SystemInformation->Count++; 1413 CurrentLength += sizeof(*TagEntry); 1414 if (SystemInformationLength < CurrentLength) 1415 { 1416 // 1417 // The caller's buffer is too small, so set a failure code. The 1418 // caller will know the count, as well as how much space is needed. 1419 // 1420 // We do NOT break out of the loop, because we want to keep incrementing 1421 // the Count as well as CurrentLength so that the caller can know the 1422 // final numbers 1423 // 1424 Status = STATUS_INFO_LENGTH_MISMATCH; 1425 } 1426 else 1427 { 1428 // 1429 // Small sanity check that our accounting is working correctly 1430 // 1431 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees); 1432 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees); 1433 1434 // 1435 // Return the data into the caller's buffer 1436 // 1437 TagEntry->TagUlong = TrackerEntry->Key; 1438 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs; 1439 TagEntry->PagedFrees = TrackerEntry->PagedFrees; 1440 TagEntry->PagedUsed = TrackerEntry->PagedBytes; 1441 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs; 1442 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees; 1443 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes; 1444 TagEntry++; 1445 } 1446 } 1447 1448 // 1449 // Free the "Generic DPC" temporary buffer, return the buffer length and status 1450 // 1451 ExFreePoolWithTag(Buffer, 'ofnI'); 1452 if (ReturnLength) *ReturnLength = CurrentLength; 1453 return Status; 1454 } 1455 1456 _IRQL_requires_(DISPATCH_LEVEL) 1457 BOOLEAN 1458 NTAPI 1459 ExpExpandBigPageTable( 1460 _In_ _IRQL_restores_ KIRQL OldIrql) 1461 { 1462 ULONG OldSize = PoolBigPageTableSize; 1463 ULONG NewSize = 2 * OldSize; 1464 ULONG NewSizeInBytes; 1465 PPOOL_TRACKER_BIG_PAGES NewTable; 1466 PPOOL_TRACKER_BIG_PAGES OldTable; 1467 ULONG i; 1468 ULONG PagesFreed; 1469 ULONG Hash; 1470 ULONG HashMask; 1471 1472 /* Must be holding ExpLargePoolTableLock */ 1473 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 1474 1475 /* Make sure we don't overflow */ 1476 if (!NT_SUCCESS(RtlULongMult(2, 1477 OldSize * sizeof(POOL_TRACKER_BIG_PAGES), 1478 &NewSizeInBytes))) 1479 { 1480 DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize); 1481 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1482 return FALSE; 1483 } 1484 1485 NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes); 1486 if (NewTable == NULL) 1487 { 1488 DPRINT1("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes); 1489 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1490 return FALSE; 1491 } 1492 1493 DPRINT("Expanding big pool tracker table to %lu entries\n", NewSize); 1494 1495 /* Initialize the new table */ 1496 RtlZeroMemory(NewTable, NewSizeInBytes); 1497 for (i = 0; i < NewSize; i++) 1498 { 1499 NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE; 1500 } 1501 1502 /* Copy over all items */ 1503 OldTable = PoolBigPageTable; 1504 HashMask = NewSize - 1; 1505 for (i = 0; i < OldSize; i++) 1506 { 1507 /* Skip over empty items */ 1508 if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE) 1509 { 1510 continue; 1511 } 1512 1513 /* Recalculate the hash due to the new table size */ 1514 Hash = ExpComputePartialHashForAddress(OldTable[i].Va) & HashMask; 1515 1516 /* Find the location in the new table */ 1517 while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE)) 1518 { 1519 Hash = (Hash + 1) & HashMask; 1520 } 1521 1522 /* We just enlarged the table, so we must have space */ 1523 ASSERT((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE); 1524 1525 /* Finally, copy the item */ 1526 NewTable[Hash] = OldTable[i]; 1527 } 1528 1529 /* Activate the new table */ 1530 PoolBigPageTable = NewTable; 1531 PoolBigPageTableSize = NewSize; 1532 PoolBigPageTableHash = PoolBigPageTableSize - 1; 1533 1534 /* Release the lock, we're done changing global state */ 1535 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1536 1537 /* Free the old table and update our tracker */ 1538 PagesFreed = MiFreePoolPages(OldTable); 1539 ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0); 1540 ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0); 1541 1542 return TRUE; 1543 } 1544 1545 BOOLEAN 1546 NTAPI 1547 ExpAddTagForBigPages(IN PVOID Va, 1548 IN ULONG Key, 1549 IN ULONG NumberOfPages, 1550 IN POOL_TYPE PoolType) 1551 { 1552 ULONG Hash, i = 0; 1553 PVOID OldVa; 1554 KIRQL OldIrql; 1555 SIZE_T TableSize; 1556 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart; 1557 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0); 1558 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1559 1560 // 1561 // As the table is expandable, these values must only be read after acquiring 1562 // the lock to avoid a teared access during an expansion 1563 // NOTE: Windows uses a special reader/writer SpinLock to improve 1564 // performance in the common case (add/remove a tracker entry) 1565 // 1566 Retry: 1567 Hash = ExpComputePartialHashForAddress(Va); 1568 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 1569 Hash &= PoolBigPageTableHash; 1570 TableSize = PoolBigPageTableSize; 1571 1572 // 1573 // We loop from the current hash bucket to the end of the table, and then 1574 // rollover to hash bucket 0 and keep going from there. If we return back 1575 // to the beginning, then we attempt expansion at the bottom of the loop 1576 // 1577 EntryStart = Entry = &PoolBigPageTable[Hash]; 1578 EntryEnd = &PoolBigPageTable[TableSize]; 1579 do 1580 { 1581 // 1582 // Make sure that this is a free entry and attempt to atomically make the 1583 // entry busy now 1584 // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock 1585 // 1586 OldVa = Entry->Va; 1587 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) && 1588 (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))) 1589 { 1590 // 1591 // We now own this entry, write down the size and the pool tag 1592 // 1593 Entry->Key = Key; 1594 Entry->NumberOfPages = NumberOfPages; 1595 1596 // 1597 // Add one more entry to the count, and see if we're getting within 1598 // 25% of the table size, at which point we'll do an expansion now 1599 // to avoid blocking too hard later on. 1600 // 1601 // Note that we only do this if it's also been the 16th time that we 1602 // keep losing the race or that we are not finding a free entry anymore, 1603 // which implies a massive number of concurrent big pool allocations. 1604 // 1605 InterlockedIncrementUL(&ExpPoolBigEntriesInUse); 1606 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4))) 1607 { 1608 DPRINT("Attempting expansion since we now have %lu entries\n", 1609 ExpPoolBigEntriesInUse); 1610 ASSERT(TableSize == PoolBigPageTableSize); 1611 ExpExpandBigPageTable(OldIrql); 1612 return TRUE; 1613 } 1614 1615 // 1616 // We have our entry, return 1617 // 1618 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1619 return TRUE; 1620 } 1621 1622 // 1623 // We don't have our entry yet, so keep trying, making the entry list 1624 // circular if we reach the last entry. We'll eventually break out of 1625 // the loop once we've rolled over and returned back to our original 1626 // hash bucket 1627 // 1628 i++; 1629 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0]; 1630 } while (Entry != EntryStart); 1631 1632 // 1633 // This means there's no free hash buckets whatsoever, so we now have 1634 // to attempt expanding the table 1635 // 1636 ASSERT(TableSize == PoolBigPageTableSize); 1637 if (ExpExpandBigPageTable(OldIrql)) 1638 { 1639 goto Retry; 1640 } 1641 ExpBigTableExpansionFailed++; 1642 DPRINT1("Big pool table expansion failed\n"); 1643 return FALSE; 1644 } 1645 1646 ULONG 1647 NTAPI 1648 ExpFindAndRemoveTagBigPages(IN PVOID Va, 1649 OUT PULONG_PTR BigPages, 1650 IN POOL_TYPE PoolType) 1651 { 1652 BOOLEAN FirstTry = TRUE; 1653 SIZE_T TableSize; 1654 KIRQL OldIrql; 1655 ULONG PoolTag, Hash; 1656 PPOOL_TRACKER_BIG_PAGES Entry; 1657 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0); 1658 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1659 1660 // 1661 // As the table is expandable, these values must only be read after acquiring 1662 // the lock to avoid a teared access during an expansion 1663 // 1664 Hash = ExpComputePartialHashForAddress(Va); 1665 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 1666 Hash &= PoolBigPageTableHash; 1667 TableSize = PoolBigPageTableSize; 1668 1669 // 1670 // Loop while trying to find this big page allocation 1671 // 1672 while (PoolBigPageTable[Hash].Va != Va) 1673 { 1674 // 1675 // Increment the size until we go past the end of the table 1676 // 1677 if (++Hash >= TableSize) 1678 { 1679 // 1680 // Is this the second time we've tried? 1681 // 1682 if (!FirstTry) 1683 { 1684 // 1685 // This means it was never inserted into the pool table and it 1686 // received the special "BIG" tag -- return that and return 0 1687 // so that the code can ask Mm for the page count instead 1688 // 1689 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1690 *BigPages = 0; 1691 return ' GIB'; 1692 } 1693 1694 // 1695 // The first time this happens, reset the hash index and try again 1696 // 1697 Hash = 0; 1698 FirstTry = FALSE; 1699 } 1700 } 1701 1702 // 1703 // Now capture all the information we need from the entry, since after we 1704 // release the lock, the data can change 1705 // 1706 Entry = &PoolBigPageTable[Hash]; 1707 *BigPages = Entry->NumberOfPages; 1708 PoolTag = Entry->Key; 1709 1710 // 1711 // Set the free bit, and decrement the number of allocations. Finally, release 1712 // the lock and return the tag that was located 1713 // 1714 InterlockedIncrement((PLONG)&Entry->Va); 1715 InterlockedDecrementUL(&ExpPoolBigEntriesInUse); 1716 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1717 return PoolTag; 1718 } 1719 1720 VOID 1721 NTAPI 1722 ExQueryPoolUsage(OUT PULONG PagedPoolPages, 1723 OUT PULONG NonPagedPoolPages, 1724 OUT PULONG PagedPoolAllocs, 1725 OUT PULONG PagedPoolFrees, 1726 OUT PULONG PagedPoolLookasideHits, 1727 OUT PULONG NonPagedPoolAllocs, 1728 OUT PULONG NonPagedPoolFrees, 1729 OUT PULONG NonPagedPoolLookasideHits) 1730 { 1731 ULONG i; 1732 PPOOL_DESCRIPTOR PoolDesc; 1733 1734 // 1735 // Assume all failures 1736 // 1737 *PagedPoolPages = 0; 1738 *PagedPoolAllocs = 0; 1739 *PagedPoolFrees = 0; 1740 1741 // 1742 // Tally up the totals for all the apged pool 1743 // 1744 for (i = 0; i < ExpNumberOfPagedPools + 1; i++) 1745 { 1746 PoolDesc = ExpPagedPoolDescriptor[i]; 1747 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1748 *PagedPoolAllocs += PoolDesc->RunningAllocs; 1749 *PagedPoolFrees += PoolDesc->RunningDeAllocs; 1750 } 1751 1752 // 1753 // The first non-paged pool has a hardcoded well-known descriptor name 1754 // 1755 PoolDesc = &NonPagedPoolDescriptor; 1756 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1757 *NonPagedPoolAllocs = PoolDesc->RunningAllocs; 1758 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs; 1759 1760 // 1761 // If the system has more than one non-paged pool, copy the other descriptor 1762 // totals as well 1763 // 1764 #if 0 1765 if (ExpNumberOfNonPagedPools > 1) 1766 { 1767 for (i = 0; i < ExpNumberOfNonPagedPools; i++) 1768 { 1769 PoolDesc = ExpNonPagedPoolDescriptor[i]; 1770 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1771 *NonPagedPoolAllocs += PoolDesc->RunningAllocs; 1772 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs; 1773 } 1774 } 1775 #endif 1776 1777 // 1778 // Get the amount of hits in the system lookaside lists 1779 // 1780 if (!IsListEmpty(&ExPoolLookasideListHead)) 1781 { 1782 PLIST_ENTRY ListEntry; 1783 1784 for (ListEntry = ExPoolLookasideListHead.Flink; 1785 ListEntry != &ExPoolLookasideListHead; 1786 ListEntry = ListEntry->Flink) 1787 { 1788 PGENERAL_LOOKASIDE Lookaside; 1789 1790 Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry); 1791 1792 if (Lookaside->Type == NonPagedPool) 1793 { 1794 *NonPagedPoolLookasideHits += Lookaside->AllocateHits; 1795 } 1796 else 1797 { 1798 *PagedPoolLookasideHits += Lookaside->AllocateHits; 1799 } 1800 } 1801 } 1802 } 1803 1804 VOID 1805 NTAPI 1806 ExReturnPoolQuota(IN PVOID P) 1807 { 1808 PPOOL_HEADER Entry; 1809 POOL_TYPE PoolType; 1810 USHORT BlockSize; 1811 PEPROCESS Process; 1812 1813 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) && 1814 (MmIsSpecialPoolAddress(P))) 1815 { 1816 return; 1817 } 1818 1819 Entry = P; 1820 Entry--; 1821 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0); 1822 1823 PoolType = Entry->PoolType - 1; 1824 BlockSize = Entry->BlockSize; 1825 1826 if (PoolType & QUOTA_POOL_MASK) 1827 { 1828 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1]; 1829 ASSERT(Process != NULL); 1830 if (Process) 1831 { 1832 if (Process->Pcb.Header.Type != ProcessObject) 1833 { 1834 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n", 1835 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize); 1836 KeBugCheckEx(BAD_POOL_CALLER, 1837 0x0D, 1838 (ULONG_PTR)P, 1839 Entry->PoolTag, 1840 (ULONG_PTR)Process); 1841 } 1842 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL; 1843 PsReturnPoolQuota(Process, 1844 PoolType & BASE_POOL_TYPE_MASK, 1845 BlockSize * POOL_BLOCK_SIZE); 1846 ObDereferenceObject(Process); 1847 } 1848 } 1849 } 1850 1851 /* PUBLIC FUNCTIONS ***********************************************************/ 1852 1853 /* 1854 * @implemented 1855 */ 1856 PVOID 1857 NTAPI 1858 ExAllocatePoolWithTag(IN POOL_TYPE PoolType, 1859 IN SIZE_T NumberOfBytes, 1860 IN ULONG Tag) 1861 { 1862 PPOOL_DESCRIPTOR PoolDesc; 1863 PLIST_ENTRY ListHead; 1864 PPOOL_HEADER Entry, NextEntry, FragmentEntry; 1865 KIRQL OldIrql; 1866 USHORT BlockSize, i; 1867 ULONG OriginalType; 1868 PKPRCB Prcb = KeGetCurrentPrcb(); 1869 PGENERAL_LOOKASIDE LookasideList; 1870 1871 // 1872 // Some sanity checks 1873 // 1874 ASSERT(Tag != 0); 1875 ASSERT(Tag != ' GIB'); 1876 ASSERT(NumberOfBytes != 0); 1877 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL); 1878 1879 // 1880 // Not supported in ReactOS 1881 // 1882 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1883 1884 // 1885 // Check if verifier or special pool is enabled 1886 // 1887 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL)) 1888 { 1889 // 1890 // For verifier, we should call the verification routine 1891 // 1892 if (ExpPoolFlags & POOL_FLAG_VERIFIER) 1893 { 1894 DPRINT1("Driver Verifier is not yet supported\n"); 1895 } 1896 1897 // 1898 // For special pool, we check if this is a suitable allocation and do 1899 // the special allocation if needed 1900 // 1901 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) 1902 { 1903 // 1904 // Check if this is a special pool allocation 1905 // 1906 if (MmUseSpecialPool(NumberOfBytes, Tag)) 1907 { 1908 // 1909 // Try to allocate using special pool 1910 // 1911 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2); 1912 if (Entry) return Entry; 1913 } 1914 } 1915 } 1916 1917 // 1918 // Get the pool type and its corresponding vector for this request 1919 // 1920 OriginalType = PoolType; 1921 PoolType = PoolType & BASE_POOL_TYPE_MASK; 1922 PoolDesc = PoolVector[PoolType]; 1923 ASSERT(PoolDesc != NULL); 1924 1925 // 1926 // Check if this is a big page allocation 1927 // 1928 if (NumberOfBytes > POOL_MAX_ALLOC) 1929 { 1930 // 1931 // Allocate pages for it 1932 // 1933 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes); 1934 if (!Entry) 1935 { 1936 #if DBG 1937 // 1938 // Out of memory, display current consumption 1939 // Let's consider that if the caller wanted more 1940 // than a hundred pages, that's a bogus caller 1941 // and we are not out of memory. Dump at most 1942 // once a second to avoid spamming the log. 1943 // 1944 if (NumberOfBytes < 100 * PAGE_SIZE && 1945 KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000) 1946 { 1947 MiDumpPoolConsumers(FALSE, 0, 0, 0); 1948 MiLastPoolDumpTime = KeQueryInterruptTime(); 1949 } 1950 #endif 1951 1952 // 1953 // Must succeed pool is deprecated, but still supported. These allocation 1954 // failures must cause an immediate bugcheck 1955 // 1956 if (OriginalType & MUST_SUCCEED_POOL_MASK) 1957 { 1958 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1959 NumberOfBytes, 1960 NonPagedPoolDescriptor.TotalPages, 1961 NonPagedPoolDescriptor.TotalBigPages, 1962 0); 1963 } 1964 1965 // 1966 // Internal debugging 1967 // 1968 ExPoolFailures++; 1969 1970 // 1971 // This flag requests printing failures, and can also further specify 1972 // breaking on failures 1973 // 1974 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE) 1975 { 1976 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n", 1977 NumberOfBytes, 1978 OriginalType); 1979 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint(); 1980 } 1981 1982 // 1983 // Finally, this flag requests an exception, which we are more than 1984 // happy to raise! 1985 // 1986 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE) 1987 { 1988 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 1989 } 1990 1991 return NULL; 1992 } 1993 1994 // 1995 // Increment required counters 1996 // 1997 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, 1998 (LONG)BYTES_TO_PAGES(NumberOfBytes)); 1999 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes); 2000 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2001 2002 // 2003 // Add a tag for the big page allocation and switch to the generic "BIG" 2004 // tag if we failed to do so, then insert a tracker for this alloation. 2005 // 2006 if (!ExpAddTagForBigPages(Entry, 2007 Tag, 2008 (ULONG)BYTES_TO_PAGES(NumberOfBytes), 2009 OriginalType)) 2010 { 2011 Tag = ' GIB'; 2012 } 2013 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType); 2014 return Entry; 2015 } 2016 2017 // 2018 // Should never request 0 bytes from the pool, but since so many drivers do 2019 // it, we'll just assume they want 1 byte, based on NT's similar behavior 2020 // 2021 if (!NumberOfBytes) NumberOfBytes = 1; 2022 2023 // 2024 // A pool allocation is defined by its data, a linked list to connect it to 2025 // the free list (if necessary), and a pool header to store accounting info. 2026 // Calculate this size, then convert it into a block size (units of pool 2027 // headers) 2028 // 2029 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such 2030 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in 2031 // the direct allocation of pages. 2032 // 2033 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) 2034 / POOL_BLOCK_SIZE); 2035 ASSERT(i < POOL_LISTS_PER_PAGE); 2036 2037 // 2038 // Handle lookaside list optimization for both paged and nonpaged pool 2039 // 2040 if (i <= NUMBER_POOL_LOOKASIDE_LISTS) 2041 { 2042 // 2043 // Try popping it from the per-CPU lookaside list 2044 // 2045 LookasideList = (PoolType == PagedPool) ? 2046 Prcb->PPPagedLookasideList[i - 1].P : 2047 Prcb->PPNPagedLookasideList[i - 1].P; 2048 LookasideList->TotalAllocates++; 2049 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead); 2050 if (!Entry) 2051 { 2052 // 2053 // We failed, try popping it from the global list 2054 // 2055 LookasideList = (PoolType == PagedPool) ? 2056 Prcb->PPPagedLookasideList[i - 1].L : 2057 Prcb->PPNPagedLookasideList[i - 1].L; 2058 LookasideList->TotalAllocates++; 2059 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead); 2060 } 2061 2062 // 2063 // If we were able to pop it, update the accounting and return the block 2064 // 2065 if (Entry) 2066 { 2067 LookasideList->AllocateHits++; 2068 2069 // 2070 // Get the real entry, write down its pool type, and track it 2071 // 2072 Entry--; 2073 Entry->PoolType = OriginalType + 1; 2074 ExpInsertPoolTracker(Tag, 2075 Entry->BlockSize * POOL_BLOCK_SIZE, 2076 OriginalType); 2077 2078 // 2079 // Return the pool allocation 2080 // 2081 Entry->PoolTag = Tag; 2082 (POOL_FREE_BLOCK(Entry))->Flink = NULL; 2083 (POOL_FREE_BLOCK(Entry))->Blink = NULL; 2084 return POOL_FREE_BLOCK(Entry); 2085 } 2086 } 2087 2088 // 2089 // Loop in the free lists looking for a block if this size. Start with the 2090 // list optimized for this kind of size lookup 2091 // 2092 ListHead = &PoolDesc->ListHeads[i]; 2093 do 2094 { 2095 // 2096 // Are there any free entries available on this list? 2097 // 2098 if (!ExpIsPoolListEmpty(ListHead)) 2099 { 2100 // 2101 // Acquire the pool lock now 2102 // 2103 OldIrql = ExLockPool(PoolDesc); 2104 2105 // 2106 // And make sure the list still has entries 2107 // 2108 if (ExpIsPoolListEmpty(ListHead)) 2109 { 2110 // 2111 // Someone raced us (and won) before we had a chance to acquire 2112 // the lock. 2113 // 2114 // Try again! 2115 // 2116 ExUnlockPool(PoolDesc, OldIrql); 2117 continue; 2118 } 2119 2120 // 2121 // Remove a free entry from the list 2122 // Note that due to the way we insert free blocks into multiple lists 2123 // there is a guarantee that any block on this list will either be 2124 // of the correct size, or perhaps larger. 2125 // 2126 ExpCheckPoolLinks(ListHead); 2127 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead)); 2128 ExpCheckPoolLinks(ListHead); 2129 ExpCheckPoolBlocks(Entry); 2130 ASSERT(Entry->BlockSize >= i); 2131 ASSERT(Entry->PoolType == 0); 2132 2133 // 2134 // Check if this block is larger that what we need. The block could 2135 // not possibly be smaller, due to the reason explained above (and 2136 // we would've asserted on a checked build if this was the case). 2137 // 2138 if (Entry->BlockSize != i) 2139 { 2140 // 2141 // Is there an entry before this one? 2142 // 2143 if (Entry->PreviousSize == 0) 2144 { 2145 // 2146 // There isn't anyone before us, so take the next block and 2147 // turn it into a fragment that contains the leftover data 2148 // that we don't need to satisfy the caller's request 2149 // 2150 FragmentEntry = POOL_BLOCK(Entry, i); 2151 FragmentEntry->BlockSize = Entry->BlockSize - i; 2152 2153 // 2154 // And make it point back to us 2155 // 2156 FragmentEntry->PreviousSize = i; 2157 2158 // 2159 // Now get the block that follows the new fragment and check 2160 // if it's still on the same page as us (and not at the end) 2161 // 2162 NextEntry = POOL_NEXT_BLOCK(FragmentEntry); 2163 if (PAGE_ALIGN(NextEntry) != NextEntry) 2164 { 2165 // 2166 // Adjust this next block to point to our newly created 2167 // fragment block 2168 // 2169 NextEntry->PreviousSize = FragmentEntry->BlockSize; 2170 } 2171 } 2172 else 2173 { 2174 // 2175 // There is a free entry before us, which we know is smaller 2176 // so we'll make this entry the fragment instead 2177 // 2178 FragmentEntry = Entry; 2179 2180 // 2181 // And then we'll remove from it the actual size required. 2182 // Now the entry is a leftover free fragment 2183 // 2184 Entry->BlockSize -= i; 2185 2186 // 2187 // Now let's go to the next entry after the fragment (which 2188 // used to point to our original free entry) and make it 2189 // reference the new fragment entry instead. 2190 // 2191 // This is the entry that will actually end up holding the 2192 // allocation! 2193 // 2194 Entry = POOL_NEXT_BLOCK(Entry); 2195 Entry->PreviousSize = FragmentEntry->BlockSize; 2196 2197 // 2198 // And now let's go to the entry after that one and check if 2199 // it's still on the same page, and not at the end 2200 // 2201 NextEntry = POOL_BLOCK(Entry, i); 2202 if (PAGE_ALIGN(NextEntry) != NextEntry) 2203 { 2204 // 2205 // Make it reference the allocation entry 2206 // 2207 NextEntry->PreviousSize = i; 2208 } 2209 } 2210 2211 // 2212 // Now our (allocation) entry is the right size 2213 // 2214 Entry->BlockSize = i; 2215 2216 // 2217 // And the next entry is now the free fragment which contains 2218 // the remaining difference between how big the original entry 2219 // was, and the actual size the caller needs/requested. 2220 // 2221 FragmentEntry->PoolType = 0; 2222 BlockSize = FragmentEntry->BlockSize; 2223 2224 // 2225 // Now check if enough free bytes remained for us to have a 2226 // "full" entry, which contains enough bytes for a linked list 2227 // and thus can be used for allocations (up to 8 bytes...) 2228 // 2229 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]); 2230 if (BlockSize != 1) 2231 { 2232 // 2233 // Insert the free entry into the free list for this size 2234 // 2235 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1], 2236 POOL_FREE_BLOCK(FragmentEntry)); 2237 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry)); 2238 } 2239 } 2240 2241 // 2242 // We have found an entry for this allocation, so set the pool type 2243 // and release the lock since we're done 2244 // 2245 Entry->PoolType = OriginalType + 1; 2246 ExpCheckPoolBlocks(Entry); 2247 ExUnlockPool(PoolDesc, OldIrql); 2248 2249 // 2250 // Increment required counters 2251 // 2252 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE); 2253 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2254 2255 // 2256 // Track this allocation 2257 // 2258 ExpInsertPoolTracker(Tag, 2259 Entry->BlockSize * POOL_BLOCK_SIZE, 2260 OriginalType); 2261 2262 // 2263 // Return the pool allocation 2264 // 2265 Entry->PoolTag = Tag; 2266 (POOL_FREE_BLOCK(Entry))->Flink = NULL; 2267 (POOL_FREE_BLOCK(Entry))->Blink = NULL; 2268 return POOL_FREE_BLOCK(Entry); 2269 } 2270 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]); 2271 2272 // 2273 // There were no free entries left, so we have to allocate a new fresh page 2274 // 2275 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE); 2276 if (!Entry) 2277 { 2278 #if DBG 2279 // 2280 // Out of memory, display current consumption 2281 // Let's consider that if the caller wanted more 2282 // than a hundred pages, that's a bogus caller 2283 // and we are not out of memory. Dump at most 2284 // once a second to avoid spamming the log. 2285 // 2286 if (NumberOfBytes < 100 * PAGE_SIZE && 2287 KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000) 2288 { 2289 MiDumpPoolConsumers(FALSE, 0, 0, 0); 2290 MiLastPoolDumpTime = KeQueryInterruptTime(); 2291 } 2292 #endif 2293 2294 // 2295 // Must succeed pool is deprecated, but still supported. These allocation 2296 // failures must cause an immediate bugcheck 2297 // 2298 if (OriginalType & MUST_SUCCEED_POOL_MASK) 2299 { 2300 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 2301 PAGE_SIZE, 2302 NonPagedPoolDescriptor.TotalPages, 2303 NonPagedPoolDescriptor.TotalBigPages, 2304 0); 2305 } 2306 2307 // 2308 // Internal debugging 2309 // 2310 ExPoolFailures++; 2311 2312 // 2313 // This flag requests printing failures, and can also further specify 2314 // breaking on failures 2315 // 2316 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE) 2317 { 2318 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n", 2319 NumberOfBytes, 2320 OriginalType); 2321 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint(); 2322 } 2323 2324 // 2325 // Finally, this flag requests an exception, which we are more than 2326 // happy to raise! 2327 // 2328 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE) 2329 { 2330 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 2331 } 2332 2333 // 2334 // Return NULL to the caller in all other cases 2335 // 2336 return NULL; 2337 } 2338 2339 // 2340 // Setup the entry data 2341 // 2342 Entry->Ulong1 = 0; 2343 Entry->BlockSize = i; 2344 Entry->PoolType = OriginalType + 1; 2345 2346 // 2347 // This page will have two entries -- one for the allocation (which we just 2348 // created above), and one for the remaining free bytes, which we're about 2349 // to create now. The free bytes are the whole page minus what was allocated 2350 // and then converted into units of block headers. 2351 // 2352 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i; 2353 FragmentEntry = POOL_BLOCK(Entry, i); 2354 FragmentEntry->Ulong1 = 0; 2355 FragmentEntry->BlockSize = BlockSize; 2356 FragmentEntry->PreviousSize = i; 2357 2358 // 2359 // Increment required counters 2360 // 2361 InterlockedIncrement((PLONG)&PoolDesc->TotalPages); 2362 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE); 2363 2364 // 2365 // Now check if enough free bytes remained for us to have a "full" entry, 2366 // which contains enough bytes for a linked list and thus can be used for 2367 // allocations (up to 8 bytes...) 2368 // 2369 if (FragmentEntry->BlockSize != 1) 2370 { 2371 // 2372 // Excellent -- acquire the pool lock 2373 // 2374 OldIrql = ExLockPool(PoolDesc); 2375 2376 // 2377 // And insert the free entry into the free list for this block size 2378 // 2379 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]); 2380 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1], 2381 POOL_FREE_BLOCK(FragmentEntry)); 2382 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry)); 2383 2384 // 2385 // Release the pool lock 2386 // 2387 ExpCheckPoolBlocks(Entry); 2388 ExUnlockPool(PoolDesc, OldIrql); 2389 } 2390 else 2391 { 2392 // 2393 // Simply do a sanity check 2394 // 2395 ExpCheckPoolBlocks(Entry); 2396 } 2397 2398 // 2399 // Increment performance counters and track this allocation 2400 // 2401 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2402 ExpInsertPoolTracker(Tag, 2403 Entry->BlockSize * POOL_BLOCK_SIZE, 2404 OriginalType); 2405 2406 // 2407 // And return the pool allocation 2408 // 2409 ExpCheckPoolBlocks(Entry); 2410 Entry->PoolTag = Tag; 2411 return POOL_FREE_BLOCK(Entry); 2412 } 2413 2414 /* 2415 * @implemented 2416 */ 2417 PVOID 2418 NTAPI 2419 ExAllocatePool(POOL_TYPE PoolType, 2420 SIZE_T NumberOfBytes) 2421 { 2422 ULONG Tag = TAG_NONE; 2423 #if 0 && DBG 2424 PLDR_DATA_TABLE_ENTRY LdrEntry; 2425 2426 /* Use the first four letters of the driver name, or "None" if unavailable */ 2427 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL 2428 ? MiLookupDataTableEntry(_ReturnAddress()) 2429 : NULL; 2430 if (LdrEntry) 2431 { 2432 ULONG i; 2433 Tag = 0; 2434 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++) 2435 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24; 2436 for (; i < 4; i++) 2437 Tag = Tag >> 8 | ' ' << 24; 2438 } 2439 #endif 2440 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2441 } 2442 2443 /* 2444 * @implemented 2445 */ 2446 VOID 2447 NTAPI 2448 ExFreePoolWithTag(IN PVOID P, 2449 IN ULONG TagToFree) 2450 { 2451 PPOOL_HEADER Entry, NextEntry; 2452 USHORT BlockSize; 2453 KIRQL OldIrql; 2454 POOL_TYPE PoolType; 2455 PPOOL_DESCRIPTOR PoolDesc; 2456 ULONG Tag; 2457 BOOLEAN Combined = FALSE; 2458 PFN_NUMBER PageCount, RealPageCount; 2459 PKPRCB Prcb = KeGetCurrentPrcb(); 2460 PGENERAL_LOOKASIDE LookasideList; 2461 PEPROCESS Process; 2462 2463 // 2464 // Check if any of the debug flags are enabled 2465 // 2466 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS | 2467 POOL_FLAG_CHECK_WORKERS | 2468 POOL_FLAG_CHECK_RESOURCES | 2469 POOL_FLAG_VERIFIER | 2470 POOL_FLAG_CHECK_DEADLOCK | 2471 POOL_FLAG_SPECIAL_POOL)) 2472 { 2473 // 2474 // Check if special pool is enabled 2475 // 2476 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) 2477 { 2478 // 2479 // Check if it was allocated from a special pool 2480 // 2481 if (MmIsSpecialPoolAddress(P)) 2482 { 2483 // 2484 // Was deadlock verification also enabled? We can do some extra 2485 // checks at this point 2486 // 2487 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK) 2488 { 2489 DPRINT1("Verifier not yet supported\n"); 2490 } 2491 2492 // 2493 // It is, so handle it via special pool free routine 2494 // 2495 MmFreeSpecialPool(P); 2496 return; 2497 } 2498 } 2499 2500 // 2501 // For non-big page allocations, we'll do a bunch of checks in here 2502 // 2503 if (PAGE_ALIGN(P) != P) 2504 { 2505 // 2506 // Get the entry for this pool allocation 2507 // The pointer math here may look wrong or confusing, but it is quite right 2508 // 2509 Entry = P; 2510 Entry--; 2511 2512 // 2513 // Get the pool type 2514 // 2515 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK; 2516 2517 // 2518 // FIXME: Many other debugging checks go here 2519 // 2520 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2521 } 2522 } 2523 2524 // 2525 // Check if this is a big page allocation 2526 // 2527 if (PAGE_ALIGN(P) == P) 2528 { 2529 // 2530 // We need to find the tag for it, so first we need to find out what 2531 // kind of allocation this was (paged or nonpaged), then we can go 2532 // ahead and try finding the tag for it. Remember to get rid of the 2533 // PROTECTED_POOL tag if it's found. 2534 // 2535 // Note that if at insertion time, we failed to add the tag for a big 2536 // pool allocation, we used a special tag called 'BIG' to identify the 2537 // allocation, and we may get this tag back. In this scenario, we must 2538 // manually get the size of the allocation by actually counting through 2539 // the PFN database. 2540 // 2541 PoolType = MmDeterminePoolType(P); 2542 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2543 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType); 2544 if (!Tag) 2545 { 2546 DPRINT1("We do not know the size of this allocation. This is not yet supported\n"); 2547 ASSERT(Tag == ' GIB'); 2548 PageCount = 1; // We are going to lie! This might screw up accounting? 2549 } 2550 else if (Tag & PROTECTED_POOL) 2551 { 2552 Tag &= ~PROTECTED_POOL; 2553 } 2554 2555 // 2556 // Check block tag 2557 // 2558 if (TagToFree && TagToFree != Tag) 2559 { 2560 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag); 2561 #if DBG 2562 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree); 2563 #endif 2564 } 2565 2566 // 2567 // We have our tag and our page count, so we can go ahead and remove this 2568 // tracker now 2569 // 2570 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType); 2571 2572 // 2573 // Check if any of the debug flags are enabled 2574 // 2575 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS | 2576 POOL_FLAG_CHECK_WORKERS | 2577 POOL_FLAG_CHECK_RESOURCES | 2578 POOL_FLAG_CHECK_DEADLOCK)) 2579 { 2580 // 2581 // Was deadlock verification also enabled? We can do some extra 2582 // checks at this point 2583 // 2584 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK) 2585 { 2586 DPRINT1("Verifier not yet supported\n"); 2587 } 2588 2589 // 2590 // FIXME: Many debugging checks go here 2591 // 2592 } 2593 2594 // 2595 // Update counters 2596 // 2597 PoolDesc = PoolVector[PoolType]; 2598 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs); 2599 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, 2600 -(LONG_PTR)(PageCount << PAGE_SHIFT)); 2601 2602 // 2603 // Do the real free now and update the last counter with the big page count 2604 // 2605 RealPageCount = MiFreePoolPages(P); 2606 ASSERT(RealPageCount == PageCount); 2607 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, 2608 -(LONG)RealPageCount); 2609 return; 2610 } 2611 2612 // 2613 // Get the entry for this pool allocation 2614 // The pointer math here may look wrong or confusing, but it is quite right 2615 // 2616 Entry = P; 2617 Entry--; 2618 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0); 2619 2620 // 2621 // Get the size of the entry, and it's pool type, then load the descriptor 2622 // for this pool type 2623 // 2624 BlockSize = Entry->BlockSize; 2625 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK; 2626 PoolDesc = PoolVector[PoolType]; 2627 2628 // 2629 // Make sure that the IRQL makes sense 2630 // 2631 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2632 2633 // 2634 // Get the pool tag and get rid of the PROTECTED_POOL flag 2635 // 2636 Tag = Entry->PoolTag; 2637 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL; 2638 2639 // 2640 // Check block tag 2641 // 2642 if (TagToFree && TagToFree != Tag) 2643 { 2644 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag); 2645 #if DBG 2646 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree); 2647 #endif 2648 } 2649 2650 // 2651 // Track the removal of this allocation 2652 // 2653 ExpRemovePoolTracker(Tag, 2654 BlockSize * POOL_BLOCK_SIZE, 2655 Entry->PoolType - 1); 2656 2657 // 2658 // Release pool quota, if any 2659 // 2660 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK) 2661 { 2662 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1]; 2663 if (Process) 2664 { 2665 if (Process->Pcb.Header.Type != ProcessObject) 2666 { 2667 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n", 2668 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize); 2669 KeBugCheckEx(BAD_POOL_CALLER, 2670 0x0D, 2671 (ULONG_PTR)P, 2672 Tag, 2673 (ULONG_PTR)Process); 2674 } 2675 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE); 2676 ObDereferenceObject(Process); 2677 } 2678 } 2679 2680 // 2681 // Is this allocation small enough to have come from a lookaside list? 2682 // 2683 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS) 2684 { 2685 // 2686 // Try pushing it into the per-CPU lookaside list 2687 // 2688 LookasideList = (PoolType == PagedPool) ? 2689 Prcb->PPPagedLookasideList[BlockSize - 1].P : 2690 Prcb->PPNPagedLookasideList[BlockSize - 1].P; 2691 LookasideList->TotalFrees++; 2692 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) 2693 { 2694 LookasideList->FreeHits++; 2695 InterlockedPushEntrySList(&LookasideList->ListHead, P); 2696 return; 2697 } 2698 2699 // 2700 // We failed, try to push it into the global lookaside list 2701 // 2702 LookasideList = (PoolType == PagedPool) ? 2703 Prcb->PPPagedLookasideList[BlockSize - 1].L : 2704 Prcb->PPNPagedLookasideList[BlockSize - 1].L; 2705 LookasideList->TotalFrees++; 2706 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) 2707 { 2708 LookasideList->FreeHits++; 2709 InterlockedPushEntrySList(&LookasideList->ListHead, P); 2710 return; 2711 } 2712 } 2713 2714 // 2715 // Get the pointer to the next entry 2716 // 2717 NextEntry = POOL_BLOCK(Entry, BlockSize); 2718 2719 // 2720 // Update performance counters 2721 // 2722 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs); 2723 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE); 2724 2725 // 2726 // Acquire the pool lock 2727 // 2728 OldIrql = ExLockPool(PoolDesc); 2729 2730 // 2731 // Check if the next allocation is at the end of the page 2732 // 2733 ExpCheckPoolBlocks(Entry); 2734 if (PAGE_ALIGN(NextEntry) != NextEntry) 2735 { 2736 // 2737 // We may be able to combine the block if it's free 2738 // 2739 if (NextEntry->PoolType == 0) 2740 { 2741 // 2742 // The next block is free, so we'll do a combine 2743 // 2744 Combined = TRUE; 2745 2746 // 2747 // Make sure there's actual data in the block -- anything smaller 2748 // than this means we only have the header, so there's no linked list 2749 // for us to remove 2750 // 2751 if ((NextEntry->BlockSize != 1)) 2752 { 2753 // 2754 // The block is at least big enough to have a linked list, so go 2755 // ahead and remove it 2756 // 2757 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry)); 2758 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry)); 2759 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink)); 2760 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink)); 2761 } 2762 2763 // 2764 // Our entry is now combined with the next entry 2765 // 2766 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize; 2767 } 2768 } 2769 2770 // 2771 // Now check if there was a previous entry on the same page as us 2772 // 2773 if (Entry->PreviousSize) 2774 { 2775 // 2776 // Great, grab that entry and check if it's free 2777 // 2778 NextEntry = POOL_PREV_BLOCK(Entry); 2779 if (NextEntry->PoolType == 0) 2780 { 2781 // 2782 // It is, so we can do a combine 2783 // 2784 Combined = TRUE; 2785 2786 // 2787 // Make sure there's actual data in the block -- anything smaller 2788 // than this means we only have the header so there's no linked list 2789 // for us to remove 2790 // 2791 if ((NextEntry->BlockSize != 1)) 2792 { 2793 // 2794 // The block is at least big enough to have a linked list, so go 2795 // ahead and remove it 2796 // 2797 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry)); 2798 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry)); 2799 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink)); 2800 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink)); 2801 } 2802 2803 // 2804 // Combine our original block (which might've already been combined 2805 // with the next block), into the previous block 2806 // 2807 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize; 2808 2809 // 2810 // And now we'll work with the previous block instead 2811 // 2812 Entry = NextEntry; 2813 } 2814 } 2815 2816 // 2817 // By now, it may have been possible for our combined blocks to actually 2818 // have made up a full page (if there were only 2-3 allocations on the 2819 // page, they could've all been combined). 2820 // 2821 if ((PAGE_ALIGN(Entry) == Entry) && 2822 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry))) 2823 { 2824 // 2825 // In this case, release the pool lock, update the performance counter, 2826 // and free the page 2827 // 2828 ExUnlockPool(PoolDesc, OldIrql); 2829 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1); 2830 MiFreePoolPages(Entry); 2831 return; 2832 } 2833 2834 // 2835 // Otherwise, we now have a free block (or a combination of 2 or 3) 2836 // 2837 Entry->PoolType = 0; 2838 BlockSize = Entry->BlockSize; 2839 ASSERT(BlockSize != 1); 2840 2841 // 2842 // Check if we actually did combine it with anyone 2843 // 2844 if (Combined) 2845 { 2846 // 2847 // Get the first combined block (either our original to begin with, or 2848 // the one after the original, depending if we combined with the previous) 2849 // 2850 NextEntry = POOL_NEXT_BLOCK(Entry); 2851 2852 // 2853 // As long as the next block isn't on a page boundary, have it point 2854 // back to us 2855 // 2856 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize; 2857 } 2858 2859 // 2860 // Insert this new free block, and release the pool lock 2861 // 2862 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry)); 2863 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry)); 2864 ExUnlockPool(PoolDesc, OldIrql); 2865 } 2866 2867 /* 2868 * @implemented 2869 */ 2870 VOID 2871 NTAPI 2872 ExFreePool(PVOID P) 2873 { 2874 // 2875 // Just free without checking for the tag 2876 // 2877 ExFreePoolWithTag(P, 0); 2878 } 2879 2880 /* 2881 * @unimplemented 2882 */ 2883 SIZE_T 2884 NTAPI 2885 ExQueryPoolBlockSize(IN PVOID PoolBlock, 2886 OUT PBOOLEAN QuotaCharged) 2887 { 2888 // 2889 // Not implemented 2890 // 2891 UNIMPLEMENTED; 2892 return FALSE; 2893 } 2894 2895 /* 2896 * @implemented 2897 */ 2898 2899 PVOID 2900 NTAPI 2901 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, 2902 IN SIZE_T NumberOfBytes) 2903 { 2904 // 2905 // Allocate the pool 2906 // 2907 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE); 2908 } 2909 2910 /* 2911 * @implemented 2912 */ 2913 PVOID 2914 NTAPI 2915 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, 2916 IN SIZE_T NumberOfBytes, 2917 IN ULONG Tag, 2918 IN EX_POOL_PRIORITY Priority) 2919 { 2920 PVOID Buffer; 2921 2922 // 2923 // Allocate the pool 2924 // 2925 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2926 if (Buffer == NULL) 2927 { 2928 UNIMPLEMENTED; 2929 } 2930 2931 return Buffer; 2932 } 2933 2934 /* 2935 * @implemented 2936 */ 2937 PVOID 2938 NTAPI 2939 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType, 2940 IN SIZE_T NumberOfBytes, 2941 IN ULONG Tag) 2942 { 2943 BOOLEAN Raise = TRUE; 2944 PVOID Buffer; 2945 PPOOL_HEADER Entry; 2946 NTSTATUS Status; 2947 PEPROCESS Process = PsGetCurrentProcess(); 2948 2949 // 2950 // Check if we should fail instead of raising an exception 2951 // 2952 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE) 2953 { 2954 Raise = FALSE; 2955 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE; 2956 } 2957 2958 // 2959 // Inject the pool quota mask 2960 // 2961 PoolType += QUOTA_POOL_MASK; 2962 2963 // 2964 // Check if we have enough space to add the quota owner process, as long as 2965 // this isn't the system process, which never gets charged quota 2966 // 2967 ASSERT(NumberOfBytes != 0); 2968 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) && 2969 (Process != PsInitialSystemProcess)) 2970 { 2971 // 2972 // Add space for our EPROCESS pointer 2973 // 2974 NumberOfBytes += sizeof(PEPROCESS); 2975 } 2976 else 2977 { 2978 // 2979 // We won't be able to store the pointer, so don't use quota for this 2980 // 2981 PoolType -= QUOTA_POOL_MASK; 2982 } 2983 2984 // 2985 // Allocate the pool buffer now 2986 // 2987 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2988 2989 // 2990 // If the buffer is page-aligned, this is a large page allocation and we 2991 // won't touch it 2992 // 2993 if (PAGE_ALIGN(Buffer) != Buffer) 2994 { 2995 // 2996 // Also if special pool is enabled, and this was allocated from there, 2997 // we won't touch it either 2998 // 2999 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) && 3000 (MmIsSpecialPoolAddress(Buffer))) 3001 { 3002 return Buffer; 3003 } 3004 3005 // 3006 // If it wasn't actually allocated with quota charges, ignore it too 3007 // 3008 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer; 3009 3010 // 3011 // If this is the system process, we don't charge quota, so ignore 3012 // 3013 if (Process == PsInitialSystemProcess) return Buffer; 3014 3015 // 3016 // Actually go and charge quota for the process now 3017 // 3018 Entry = POOL_ENTRY(Buffer); 3019 Status = PsChargeProcessPoolQuota(Process, 3020 PoolType & BASE_POOL_TYPE_MASK, 3021 Entry->BlockSize * POOL_BLOCK_SIZE); 3022 if (!NT_SUCCESS(Status)) 3023 { 3024 // 3025 // Quota failed, back out the allocation, clear the owner, and fail 3026 // 3027 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL; 3028 ExFreePoolWithTag(Buffer, Tag); 3029 if (Raise) RtlRaiseStatus(Status); 3030 return NULL; 3031 } 3032 3033 // 3034 // Quota worked, write the owner and then reference it before returning 3035 // 3036 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process; 3037 ObReferenceObject(Process); 3038 } 3039 else if (!(Buffer) && (Raise)) 3040 { 3041 // 3042 // The allocation failed, raise an error if we are in raise mode 3043 // 3044 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 3045 } 3046 3047 // 3048 // Return the allocated buffer 3049 // 3050 return Buffer; 3051 } 3052 3053 /* EOF */ 3054