1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: BSD - See COPYING.ARM in the top level directory 4 * FILE: ntoskrnl/mm/ARM3/expool.c 5 * PURPOSE: ARM Memory Manager Executive Pool Manager 6 * PROGRAMMERS: ReactOS Portable Systems Group 7 */ 8 9 /* INCLUDES *******************************************************************/ 10 11 #include <ntoskrnl.h> 12 #define NDEBUG 13 #include <debug.h> 14 15 #define MODULE_INVOLVED_IN_ARM3 16 #include <mm/ARM3/miarm.h> 17 18 #undef ExAllocatePoolWithQuota 19 #undef ExAllocatePoolWithQuotaTag 20 21 /* GLOBALS ********************************************************************/ 22 23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1 24 25 typedef struct _POOL_DPC_CONTEXT 26 { 27 PPOOL_TRACKER_TABLE PoolTrackTable; 28 SIZE_T PoolTrackTableSize; 29 PPOOL_TRACKER_TABLE PoolTrackTableExpansion; 30 SIZE_T PoolTrackTableSizeExpansion; 31 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT; 32 33 ULONG ExpNumberOfPagedPools; 34 POOL_DESCRIPTOR NonPagedPoolDescriptor; 35 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1]; 36 PPOOL_DESCRIPTOR PoolVector[2]; 37 PKGUARDED_MUTEX ExpPagedPoolMutex; 38 SIZE_T PoolTrackTableSize, PoolTrackTableMask; 39 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash; 40 PPOOL_TRACKER_TABLE PoolTrackTable; 41 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable; 42 KSPIN_LOCK ExpTaggedPoolLock; 43 ULONG PoolHitTag; 44 BOOLEAN ExStopBadTags; 45 KSPIN_LOCK ExpLargePoolTableLock; 46 ULONG ExpPoolBigEntriesInUse; 47 ULONG ExpPoolFlags; 48 ULONG ExPoolFailures; 49 50 /* Pool block/header/list access macros */ 51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER)) 52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER)) 53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE)) 54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize) 55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize)) 56 57 /* 58 * Pool list access debug macros, similar to Arthur's pfnlist.c work. 59 * Microsoft actually implements similar checks in the Windows Server 2003 SP1 60 * pool code, but only for checked builds. 61 * 62 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates 63 * that these checks are done even on retail builds, due to the increasing 64 * number of kernel-mode attacks which depend on dangling list pointers and other 65 * kinds of list-based attacks. 66 * 67 * For now, I will leave these checks on all the time, but later they are likely 68 * to be DBG-only, at least until there are enough kernel-mode security attacks 69 * against ReactOS to warrant the performance hit. 70 * 71 * For now, these are not made inline, so we can get good stack traces. 72 */ 73 PLIST_ENTRY 74 NTAPI 75 ExpDecodePoolLink(IN PLIST_ENTRY Link) 76 { 77 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1); 78 } 79 80 PLIST_ENTRY 81 NTAPI 82 ExpEncodePoolLink(IN PLIST_ENTRY Link) 83 { 84 return (PLIST_ENTRY)((ULONG_PTR)Link | 1); 85 } 86 87 VOID 88 NTAPI 89 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead) 90 { 91 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) || 92 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead)) 93 { 94 KeBugCheckEx(BAD_POOL_HEADER, 95 3, 96 (ULONG_PTR)ListHead, 97 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink), 98 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink)); 99 } 100 } 101 102 VOID 103 NTAPI 104 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead) 105 { 106 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead); 107 } 108 109 BOOLEAN 110 NTAPI 111 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead) 112 { 113 return (ExpDecodePoolLink(ListHead->Flink) == ListHead); 114 } 115 116 VOID 117 NTAPI 118 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry) 119 { 120 PLIST_ENTRY Blink, Flink; 121 Flink = ExpDecodePoolLink(Entry->Flink); 122 Blink = ExpDecodePoolLink(Entry->Blink); 123 Flink->Blink = ExpEncodePoolLink(Blink); 124 Blink->Flink = ExpEncodePoolLink(Flink); 125 } 126 127 PLIST_ENTRY 128 NTAPI 129 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead) 130 { 131 PLIST_ENTRY Entry, Flink; 132 Entry = ExpDecodePoolLink(ListHead->Flink); 133 Flink = ExpDecodePoolLink(Entry->Flink); 134 ListHead->Flink = ExpEncodePoolLink(Flink); 135 Flink->Blink = ExpEncodePoolLink(ListHead); 136 return Entry; 137 } 138 139 PLIST_ENTRY 140 NTAPI 141 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead) 142 { 143 PLIST_ENTRY Entry, Blink; 144 Entry = ExpDecodePoolLink(ListHead->Blink); 145 Blink = ExpDecodePoolLink(Entry->Blink); 146 ListHead->Blink = ExpEncodePoolLink(Blink); 147 Blink->Flink = ExpEncodePoolLink(ListHead); 148 return Entry; 149 } 150 151 VOID 152 NTAPI 153 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, 154 IN PLIST_ENTRY Entry) 155 { 156 PLIST_ENTRY Blink; 157 ExpCheckPoolLinks(ListHead); 158 Blink = ExpDecodePoolLink(ListHead->Blink); 159 Entry->Flink = ExpEncodePoolLink(ListHead); 160 Entry->Blink = ExpEncodePoolLink(Blink); 161 Blink->Flink = ExpEncodePoolLink(Entry); 162 ListHead->Blink = ExpEncodePoolLink(Entry); 163 ExpCheckPoolLinks(ListHead); 164 } 165 166 VOID 167 NTAPI 168 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, 169 IN PLIST_ENTRY Entry) 170 { 171 PLIST_ENTRY Flink; 172 ExpCheckPoolLinks(ListHead); 173 Flink = ExpDecodePoolLink(ListHead->Flink); 174 Entry->Flink = ExpEncodePoolLink(Flink); 175 Entry->Blink = ExpEncodePoolLink(ListHead); 176 Flink->Blink = ExpEncodePoolLink(Entry); 177 ListHead->Flink = ExpEncodePoolLink(Entry); 178 ExpCheckPoolLinks(ListHead); 179 } 180 181 VOID 182 NTAPI 183 ExpCheckPoolHeader(IN PPOOL_HEADER Entry) 184 { 185 PPOOL_HEADER PreviousEntry, NextEntry; 186 187 /* Is there a block before this one? */ 188 if (Entry->PreviousSize) 189 { 190 /* Get it */ 191 PreviousEntry = POOL_PREV_BLOCK(Entry); 192 193 /* The two blocks must be on the same page! */ 194 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry)) 195 { 196 /* Something is awry */ 197 KeBugCheckEx(BAD_POOL_HEADER, 198 6, 199 (ULONG_PTR)PreviousEntry, 200 __LINE__, 201 (ULONG_PTR)Entry); 202 } 203 204 /* This block should also indicate that it's as large as we think it is */ 205 if (PreviousEntry->BlockSize != Entry->PreviousSize) 206 { 207 /* Otherwise, someone corrupted one of the sizes */ 208 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n", 209 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag, 210 Entry->PreviousSize, (char *)&Entry->PoolTag); 211 KeBugCheckEx(BAD_POOL_HEADER, 212 5, 213 (ULONG_PTR)PreviousEntry, 214 __LINE__, 215 (ULONG_PTR)Entry); 216 } 217 } 218 else if (PAGE_ALIGN(Entry) != Entry) 219 { 220 /* If there's no block before us, we are the first block, so we should be on a page boundary */ 221 KeBugCheckEx(BAD_POOL_HEADER, 222 7, 223 0, 224 __LINE__, 225 (ULONG_PTR)Entry); 226 } 227 228 /* This block must have a size */ 229 if (!Entry->BlockSize) 230 { 231 /* Someone must've corrupted this field */ 232 if (Entry->PreviousSize) 233 { 234 PreviousEntry = POOL_PREV_BLOCK(Entry); 235 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n", 236 (char *)&PreviousEntry->PoolTag, 237 (char *)&Entry->PoolTag); 238 } 239 else 240 { 241 DPRINT1("Entry tag %.4s\n", 242 (char *)&Entry->PoolTag); 243 } 244 KeBugCheckEx(BAD_POOL_HEADER, 245 8, 246 0, 247 __LINE__, 248 (ULONG_PTR)Entry); 249 } 250 251 /* Okay, now get the next block */ 252 NextEntry = POOL_NEXT_BLOCK(Entry); 253 254 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */ 255 if (PAGE_ALIGN(NextEntry) != NextEntry) 256 { 257 /* The two blocks must be on the same page! */ 258 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry)) 259 { 260 /* Something is messed up */ 261 KeBugCheckEx(BAD_POOL_HEADER, 262 9, 263 (ULONG_PTR)NextEntry, 264 __LINE__, 265 (ULONG_PTR)Entry); 266 } 267 268 /* And this block should think we are as large as we truly are */ 269 if (NextEntry->PreviousSize != Entry->BlockSize) 270 { 271 /* Otherwise, someone corrupted the field */ 272 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n", 273 Entry->BlockSize, (char *)&Entry->PoolTag, 274 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag); 275 KeBugCheckEx(BAD_POOL_HEADER, 276 5, 277 (ULONG_PTR)NextEntry, 278 __LINE__, 279 (ULONG_PTR)Entry); 280 } 281 } 282 } 283 284 VOID 285 NTAPI 286 ExpCheckPoolAllocation( 287 PVOID P, 288 POOL_TYPE PoolType, 289 ULONG Tag) 290 { 291 PPOOL_HEADER Entry; 292 ULONG i; 293 KIRQL OldIrql; 294 POOL_TYPE RealPoolType; 295 296 /* Get the pool header */ 297 Entry = ((PPOOL_HEADER)P) - 1; 298 299 /* Check if this is a large allocation */ 300 if (PAGE_ALIGN(P) == P) 301 { 302 /* Lock the pool table */ 303 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 304 305 /* Find the pool tag */ 306 for (i = 0; i < PoolBigPageTableSize; i++) 307 { 308 /* Check if this is our allocation */ 309 if (PoolBigPageTable[i].Va == P) 310 { 311 /* Make sure the tag is ok */ 312 if (PoolBigPageTable[i].Key != Tag) 313 { 314 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag); 315 } 316 317 break; 318 } 319 } 320 321 /* Release the lock */ 322 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 323 324 if (i == PoolBigPageTableSize) 325 { 326 /* Did not find the allocation */ 327 //ASSERT(FALSE); 328 } 329 330 /* Get Pool type by address */ 331 RealPoolType = MmDeterminePoolType(P); 332 } 333 else 334 { 335 /* Verify the tag */ 336 if (Entry->PoolTag != Tag) 337 { 338 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n", 339 &Tag, &Entry->PoolTag, Entry->PoolTag); 340 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag); 341 } 342 343 /* Check the rest of the header */ 344 ExpCheckPoolHeader(Entry); 345 346 /* Get Pool type from entry */ 347 RealPoolType = (Entry->PoolType - 1); 348 } 349 350 /* Should we check the pool type? */ 351 if (PoolType != -1) 352 { 353 /* Verify the pool type */ 354 if (RealPoolType != PoolType) 355 { 356 DPRINT1("Wrong pool type! Expected %s, got %s\n", 357 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool", 358 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool"); 359 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag); 360 } 361 } 362 } 363 364 VOID 365 NTAPI 366 ExpCheckPoolBlocks(IN PVOID Block) 367 { 368 BOOLEAN FoundBlock = FALSE; 369 SIZE_T Size = 0; 370 PPOOL_HEADER Entry; 371 372 /* Get the first entry for this page, make sure it really is the first */ 373 Entry = PAGE_ALIGN(Block); 374 ASSERT(Entry->PreviousSize == 0); 375 376 /* Now scan each entry */ 377 while (TRUE) 378 { 379 /* When we actually found our block, remember this */ 380 if (Entry == Block) FoundBlock = TRUE; 381 382 /* Now validate this block header */ 383 ExpCheckPoolHeader(Entry); 384 385 /* And go to the next one, keeping track of our size */ 386 Size += Entry->BlockSize; 387 Entry = POOL_NEXT_BLOCK(Entry); 388 389 /* If we hit the last block, stop */ 390 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break; 391 392 /* If we hit the end of the page, stop */ 393 if (PAGE_ALIGN(Entry) == Entry) break; 394 } 395 396 /* We must've found our block, and we must have hit the end of the page */ 397 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock)) 398 { 399 /* Otherwise, the blocks are messed up */ 400 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry); 401 } 402 } 403 404 FORCEINLINE 405 VOID 406 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, 407 IN SIZE_T NumberOfBytes, 408 IN PVOID Entry) 409 { 410 // 411 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must 412 // be DISPATCH_LEVEL or lower for Non Paged Pool 413 // 414 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ? 415 (KeGetCurrentIrql() > APC_LEVEL) : 416 (KeGetCurrentIrql() > DISPATCH_LEVEL)) 417 { 418 // 419 // Take the system down 420 // 421 KeBugCheckEx(BAD_POOL_CALLER, 422 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID, 423 KeGetCurrentIrql(), 424 PoolType, 425 !Entry ? NumberOfBytes : (ULONG_PTR)Entry); 426 } 427 } 428 429 FORCEINLINE 430 ULONG 431 ExpComputeHashForTag(IN ULONG Tag, 432 IN SIZE_T BucketMask) 433 { 434 // 435 // Compute the hash by multiplying with a large prime number and then XORing 436 // with the HIDWORD of the result. 437 // 438 // Finally, AND with the bucket mask to generate a valid index/bucket into 439 // the table 440 // 441 ULONGLONG Result = (ULONGLONG)40543 * Tag; 442 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32)); 443 } 444 445 FORCEINLINE 446 ULONG 447 ExpComputePartialHashForAddress(IN PVOID BaseAddress) 448 { 449 ULONG Result; 450 // 451 // Compute the hash by converting the address into a page number, and then 452 // XORing each nibble with the next one. 453 // 454 // We do *NOT* AND with the bucket mask at this point because big table expansion 455 // might happen. Therefore, the final step of the hash must be performed 456 // while holding the expansion pushlock, and this is why we call this a 457 // "partial" hash only. 458 // 459 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT); 460 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result; 461 } 462 463 #if DBG 464 FORCEINLINE 465 BOOLEAN 466 ExpTagAllowPrint(CHAR Tag) 467 { 468 if ((Tag >= 'a' && Tag <= 'z') || 469 (Tag >= 'A' && Tag <= 'Z') || 470 (Tag >= '0' && Tag <= '9') || 471 Tag == ' ' || Tag == '=' || 472 Tag == '?' || Tag == '@') 473 { 474 return TRUE; 475 } 476 477 return FALSE; 478 } 479 480 #ifdef KDBG 481 #define MiDumperPrint(dbg, fmt, ...) \ 482 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \ 483 else DPRINT1(fmt, ##__VA_ARGS__) 484 #else 485 #define MiDumperPrint(dbg, fmt, ...) \ 486 DPRINT1(fmt, ##__VA_ARGS__) 487 #endif 488 489 VOID 490 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags) 491 { 492 SIZE_T i; 493 BOOLEAN Verbose; 494 495 // 496 // Only print header if called from OOM situation 497 // 498 if (!CalledFromDbg) 499 { 500 DPRINT1("---------------------\n"); 501 DPRINT1("Out of memory dumper!\n"); 502 } 503 #ifdef KDBG 504 else 505 { 506 KdbpPrint("Pool Used:\n"); 507 } 508 #endif 509 510 // 511 // Remember whether we'll have to be verbose 512 // This is the only supported flag! 513 // 514 Verbose = BooleanFlagOn(Flags, 1); 515 516 // 517 // Print table header 518 // 519 if (Verbose) 520 { 521 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n"); 522 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n"); 523 } 524 else 525 { 526 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n"); 527 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n"); 528 } 529 530 // 531 // We'll extract allocations for all the tracked pools 532 // 533 for (i = 0; i < PoolTrackTableSize; ++i) 534 { 535 PPOOL_TRACKER_TABLE TableEntry; 536 537 TableEntry = &PoolTrackTable[i]; 538 539 // 540 // We only care about tags which have allocated memory 541 // 542 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0) 543 { 544 // 545 // If there's a tag, attempt to do a pretty print 546 // only if it matches the caller's tag, or if 547 // any tag is allowed 548 // For checking whether it matches caller's tag, 549 // use the mask to make sure not to mess with the wildcards 550 // 551 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE && 552 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask))) 553 { 554 CHAR Tag[4]; 555 556 // 557 // Extract each 'component' and check whether they are printable 558 // 559 Tag[0] = TableEntry->Key & 0xFF; 560 Tag[1] = TableEntry->Key >> 8 & 0xFF; 561 Tag[2] = TableEntry->Key >> 16 & 0xFF; 562 Tag[3] = TableEntry->Key >> 24 & 0xFF; 563 564 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3])) 565 { 566 // 567 // Print in direct order to make !poolused TAG usage easier 568 // 569 if (Verbose) 570 { 571 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3], 572 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 573 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 574 TableEntry->PagedAllocs, TableEntry->PagedFrees, 575 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 576 } 577 else 578 { 579 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3], 580 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 581 TableEntry->PagedAllocs, TableEntry->PagedBytes); 582 } 583 } 584 else 585 { 586 if (Verbose) 587 { 588 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key, 589 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 590 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 591 TableEntry->PagedAllocs, TableEntry->PagedFrees, 592 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 593 } 594 else 595 { 596 MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key, 597 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 598 TableEntry->PagedAllocs, TableEntry->PagedBytes); 599 } 600 } 601 } 602 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask)) 603 { 604 if (Verbose) 605 { 606 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", 607 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 608 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 609 TableEntry->PagedAllocs, TableEntry->PagedFrees, 610 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 611 } 612 else 613 { 614 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", 615 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 616 TableEntry->PagedAllocs, TableEntry->PagedBytes); 617 } 618 } 619 } 620 } 621 622 if (!CalledFromDbg) 623 { 624 DPRINT1("---------------------\n"); 625 } 626 } 627 #endif 628 629 /* PRIVATE FUNCTIONS **********************************************************/ 630 631 VOID 632 NTAPI 633 INIT_SECTION 634 ExpSeedHotTags(VOID) 635 { 636 ULONG i, Key, Hash, Index; 637 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable; 638 ULONG TagList[] = 639 { 640 ' oI', 641 ' laH', 642 'PldM', 643 'LooP', 644 'tSbO', 645 ' prI', 646 'bdDN', 647 'LprI', 648 'pOoI', 649 ' ldM', 650 'eliF', 651 'aVMC', 652 'dSeS', 653 'CFtN', 654 'looP', 655 'rPCT', 656 'bNMC', 657 'dTeS', 658 'sFtN', 659 'TPCT', 660 'CPCT', 661 ' yeK', 662 'qSbO', 663 'mNoI', 664 'aEoI', 665 'cPCT', 666 'aFtN', 667 '0ftN', 668 'tceS', 669 'SprI', 670 'ekoT', 671 ' eS', 672 'lCbO', 673 'cScC', 674 'lFtN', 675 'cAeS', 676 'mfSF', 677 'kWcC', 678 'miSF', 679 'CdfA', 680 'EdfA', 681 'orSF', 682 'nftN', 683 'PRIU', 684 'rFpN', 685 'RFpN', 686 'aPeS', 687 'sUeS', 688 'FpcA', 689 'MpcA', 690 'cSeS', 691 'mNbO', 692 'sFpN', 693 'uLeS', 694 'DPcS', 695 'nevE', 696 'vrqR', 697 'ldaV', 698 ' pP', 699 'SdaV', 700 ' daV', 701 'LdaV', 702 'FdaV', 703 ' GIB', 704 }; 705 706 // 707 // Loop all 64 hot tags 708 // 709 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64); 710 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++) 711 { 712 // 713 // Get the current tag, and compute its hash in the tracker table 714 // 715 Key = TagList[i]; 716 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask); 717 718 // 719 // Loop all the hashes in this index/bucket 720 // 721 Index = Hash; 722 while (TRUE) 723 { 724 // 725 // Find an empty entry, and make sure this isn't the last hash that 726 // can fit. 727 // 728 // On checked builds, also make sure this is the first time we are 729 // seeding this tag. 730 // 731 ASSERT(TrackTable[Hash].Key != Key); 732 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1)) 733 { 734 // 735 // It has been seeded, move on to the next tag 736 // 737 TrackTable[Hash].Key = Key; 738 break; 739 } 740 741 // 742 // This entry was already taken, compute the next possible hash while 743 // making sure we're not back at our initial index. 744 // 745 ASSERT(TrackTable[Hash].Key != Key); 746 Hash = (Hash + 1) & PoolTrackTableMask; 747 if (Hash == Index) break; 748 } 749 } 750 } 751 752 VOID 753 NTAPI 754 ExpRemovePoolTracker(IN ULONG Key, 755 IN SIZE_T NumberOfBytes, 756 IN POOL_TYPE PoolType) 757 { 758 ULONG Hash, Index; 759 PPOOL_TRACKER_TABLE Table, TableEntry; 760 SIZE_T TableMask, TableSize; 761 762 // 763 // Remove the PROTECTED_POOL flag which is not part of the tag 764 // 765 Key &= ~PROTECTED_POOL; 766 767 // 768 // With WinDBG you can set a tag you want to break on when an allocation is 769 // attempted 770 // 771 if (Key == PoolHitTag) DbgBreakPoint(); 772 773 // 774 // Why the double indirection? Because normally this function is also used 775 // when doing session pool allocations, which has another set of tables, 776 // sizes, and masks that live in session pool. Now we don't support session 777 // pool so we only ever use the regular tables, but I'm keeping the code this 778 // way so that the day we DO support session pool, it won't require that 779 // many changes 780 // 781 Table = PoolTrackTable; 782 TableMask = PoolTrackTableMask; 783 TableSize = PoolTrackTableSize; 784 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize); 785 786 // 787 // Compute the hash for this key, and loop all the possible buckets 788 // 789 Hash = ExpComputeHashForTag(Key, TableMask); 790 Index = Hash; 791 while (TRUE) 792 { 793 // 794 // Have we found the entry for this tag? */ 795 // 796 TableEntry = &Table[Hash]; 797 if (TableEntry->Key == Key) 798 { 799 // 800 // Decrement the counters depending on if this was paged or nonpaged 801 // pool 802 // 803 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 804 { 805 InterlockedIncrement(&TableEntry->NonPagedFrees); 806 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, 807 -(SSIZE_T)NumberOfBytes); 808 return; 809 } 810 InterlockedIncrement(&TableEntry->PagedFrees); 811 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, 812 -(SSIZE_T)NumberOfBytes); 813 return; 814 } 815 816 // 817 // We should have only ended up with an empty entry if we've reached 818 // the last bucket 819 // 820 if (!TableEntry->Key) 821 { 822 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n", 823 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType); 824 ASSERT(Hash == TableMask); 825 } 826 827 // 828 // This path is hit when we don't have an entry, and the current bucket 829 // is full, so we simply try the next one 830 // 831 Hash = (Hash + 1) & TableMask; 832 if (Hash == Index) break; 833 } 834 835 // 836 // And finally this path is hit when all the buckets are full, and we need 837 // some expansion. This path is not yet supported in ReactOS and so we'll 838 // ignore the tag 839 // 840 DPRINT1("Out of pool tag space, ignoring...\n"); 841 } 842 843 VOID 844 NTAPI 845 ExpInsertPoolTracker(IN ULONG Key, 846 IN SIZE_T NumberOfBytes, 847 IN POOL_TYPE PoolType) 848 { 849 ULONG Hash, Index; 850 KIRQL OldIrql; 851 PPOOL_TRACKER_TABLE Table, TableEntry; 852 SIZE_T TableMask, TableSize; 853 854 // 855 // Remove the PROTECTED_POOL flag which is not part of the tag 856 // 857 Key &= ~PROTECTED_POOL; 858 859 // 860 // With WinDBG you can set a tag you want to break on when an allocation is 861 // attempted 862 // 863 if (Key == PoolHitTag) DbgBreakPoint(); 864 865 // 866 // There is also an internal flag you can set to break on malformed tags 867 // 868 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00); 869 870 // 871 // ASSERT on ReactOS features not yet supported 872 // 873 ASSERT(!(PoolType & SESSION_POOL_MASK)); 874 ASSERT(KeGetCurrentProcessorNumber() == 0); 875 876 // 877 // Why the double indirection? Because normally this function is also used 878 // when doing session pool allocations, which has another set of tables, 879 // sizes, and masks that live in session pool. Now we don't support session 880 // pool so we only ever use the regular tables, but I'm keeping the code this 881 // way so that the day we DO support session pool, it won't require that 882 // many changes 883 // 884 Table = PoolTrackTable; 885 TableMask = PoolTrackTableMask; 886 TableSize = PoolTrackTableSize; 887 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize); 888 889 // 890 // Compute the hash for this key, and loop all the possible buckets 891 // 892 Hash = ExpComputeHashForTag(Key, TableMask); 893 Index = Hash; 894 while (TRUE) 895 { 896 // 897 // Do we already have an entry for this tag? */ 898 // 899 TableEntry = &Table[Hash]; 900 if (TableEntry->Key == Key) 901 { 902 // 903 // Increment the counters depending on if this was paged or nonpaged 904 // pool 905 // 906 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 907 { 908 InterlockedIncrement(&TableEntry->NonPagedAllocs); 909 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes); 910 return; 911 } 912 InterlockedIncrement(&TableEntry->PagedAllocs); 913 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes); 914 return; 915 } 916 917 // 918 // We don't have an entry yet, but we've found a free bucket for it 919 // 920 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1)) 921 { 922 // 923 // We need to hold the lock while creating a new entry, since other 924 // processors might be in this code path as well 925 // 926 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql); 927 if (!PoolTrackTable[Hash].Key) 928 { 929 // 930 // We've won the race, so now create this entry in the bucket 931 // 932 ASSERT(Table[Hash].Key == 0); 933 PoolTrackTable[Hash].Key = Key; 934 TableEntry->Key = Key; 935 } 936 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 937 938 // 939 // Now we force the loop to run again, and we should now end up in 940 // the code path above which does the interlocked increments... 941 // 942 continue; 943 } 944 945 // 946 // This path is hit when we don't have an entry, and the current bucket 947 // is full, so we simply try the next one 948 // 949 Hash = (Hash + 1) & TableMask; 950 if (Hash == Index) break; 951 } 952 953 // 954 // And finally this path is hit when all the buckets are full, and we need 955 // some expansion. This path is not yet supported in ReactOS and so we'll 956 // ignore the tag 957 // 958 DPRINT1("Out of pool tag space, ignoring...\n"); 959 } 960 961 VOID 962 NTAPI 963 INIT_SECTION 964 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, 965 IN POOL_TYPE PoolType, 966 IN ULONG PoolIndex, 967 IN ULONG Threshold, 968 IN PVOID PoolLock) 969 { 970 PLIST_ENTRY NextEntry, LastEntry; 971 972 // 973 // Setup the descriptor based on the caller's request 974 // 975 PoolDescriptor->PoolType = PoolType; 976 PoolDescriptor->PoolIndex = PoolIndex; 977 PoolDescriptor->Threshold = Threshold; 978 PoolDescriptor->LockAddress = PoolLock; 979 980 // 981 // Initialize accounting data 982 // 983 PoolDescriptor->RunningAllocs = 0; 984 PoolDescriptor->RunningDeAllocs = 0; 985 PoolDescriptor->TotalPages = 0; 986 PoolDescriptor->TotalBytes = 0; 987 PoolDescriptor->TotalBigPages = 0; 988 989 // 990 // Nothing pending for now 991 // 992 PoolDescriptor->PendingFrees = NULL; 993 PoolDescriptor->PendingFreeDepth = 0; 994 995 // 996 // Loop all the descriptor's allocation lists and initialize them 997 // 998 NextEntry = PoolDescriptor->ListHeads; 999 LastEntry = NextEntry + POOL_LISTS_PER_PAGE; 1000 while (NextEntry < LastEntry) 1001 { 1002 ExpInitializePoolListHead(NextEntry); 1003 NextEntry++; 1004 } 1005 1006 // 1007 // Note that ReactOS does not support Session Pool Yet 1008 // 1009 ASSERT(PoolType != PagedPoolSession); 1010 } 1011 1012 VOID 1013 NTAPI 1014 INIT_SECTION 1015 InitializePool(IN POOL_TYPE PoolType, 1016 IN ULONG Threshold) 1017 { 1018 PPOOL_DESCRIPTOR Descriptor; 1019 SIZE_T TableSize; 1020 ULONG i; 1021 1022 // 1023 // Check what kind of pool this is 1024 // 1025 if (PoolType == NonPagedPool) 1026 { 1027 // 1028 // Compute the track table size and convert it from a power of two to an 1029 // actual byte size 1030 // 1031 // NOTE: On checked builds, we'll assert if the registry table size was 1032 // invalid, while on retail builds we'll just break out of the loop at 1033 // that point. 1034 // 1035 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8); 1036 for (i = 0; i < 32; i++) 1037 { 1038 if (TableSize & 1) 1039 { 1040 ASSERT((TableSize & ~1) == 0); 1041 if (!(TableSize & ~1)) break; 1042 } 1043 TableSize >>= 1; 1044 } 1045 1046 // 1047 // If we hit bit 32, than no size was defined in the registry, so 1048 // we'll use the default size of 2048 entries. 1049 // 1050 // Otherwise, use the size from the registry, as long as it's not 1051 // smaller than 64 entries. 1052 // 1053 if (i == 32) 1054 { 1055 PoolTrackTableSize = 2048; 1056 } 1057 else 1058 { 1059 PoolTrackTableSize = max(1 << i, 64); 1060 } 1061 1062 // 1063 // Loop trying with the biggest specified size first, and cut it down 1064 // by a power of two each iteration in case not enough memory exist 1065 // 1066 while (TRUE) 1067 { 1068 // 1069 // Do not allow overflow 1070 // 1071 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE))) 1072 { 1073 PoolTrackTableSize >>= 1; 1074 continue; 1075 } 1076 1077 // 1078 // Allocate the tracker table and exit the loop if this worked 1079 // 1080 PoolTrackTable = MiAllocatePoolPages(NonPagedPool, 1081 (PoolTrackTableSize + 1) * 1082 sizeof(POOL_TRACKER_TABLE)); 1083 if (PoolTrackTable) break; 1084 1085 // 1086 // Otherwise, as long as we're not down to the last bit, keep 1087 // iterating 1088 // 1089 if (PoolTrackTableSize == 1) 1090 { 1091 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1092 TableSize, 1093 0xFFFFFFFF, 1094 0xFFFFFFFF, 1095 0xFFFFFFFF); 1096 } 1097 PoolTrackTableSize >>= 1; 1098 } 1099 1100 // 1101 // Add one entry, compute the hash, and zero the table 1102 // 1103 PoolTrackTableSize++; 1104 PoolTrackTableMask = PoolTrackTableSize - 2; 1105 1106 RtlZeroMemory(PoolTrackTable, 1107 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1108 1109 // 1110 // Finally, add the most used tags to speed up those allocations 1111 // 1112 ExpSeedHotTags(); 1113 1114 // 1115 // We now do the exact same thing with the tracker table for big pages 1116 // 1117 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8); 1118 for (i = 0; i < 32; i++) 1119 { 1120 if (TableSize & 1) 1121 { 1122 ASSERT((TableSize & ~1) == 0); 1123 if (!(TableSize & ~1)) break; 1124 } 1125 TableSize >>= 1; 1126 } 1127 1128 // 1129 // For big pages, the default tracker table is 4096 entries, while the 1130 // minimum is still 64 1131 // 1132 if (i == 32) 1133 { 1134 PoolBigPageTableSize = 4096; 1135 } 1136 else 1137 { 1138 PoolBigPageTableSize = max(1 << i, 64); 1139 } 1140 1141 // 1142 // Again, run the exact same loop we ran earlier, but this time for the 1143 // big pool tracker instead 1144 // 1145 while (TRUE) 1146 { 1147 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES))) 1148 { 1149 PoolBigPageTableSize >>= 1; 1150 continue; 1151 } 1152 1153 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool, 1154 PoolBigPageTableSize * 1155 sizeof(POOL_TRACKER_BIG_PAGES)); 1156 if (PoolBigPageTable) break; 1157 1158 if (PoolBigPageTableSize == 1) 1159 { 1160 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1161 TableSize, 1162 0xFFFFFFFF, 1163 0xFFFFFFFF, 1164 0xFFFFFFFF); 1165 } 1166 1167 PoolBigPageTableSize >>= 1; 1168 } 1169 1170 // 1171 // An extra entry is not needed for for the big pool tracker, so just 1172 // compute the hash and zero it 1173 // 1174 PoolBigPageTableHash = PoolBigPageTableSize - 1; 1175 RtlZeroMemory(PoolBigPageTable, 1176 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)); 1177 for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1; 1178 1179 // 1180 // During development, print this out so we can see what's happening 1181 // 1182 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n", 1183 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1184 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n", 1185 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)); 1186 1187 // 1188 // Insert the generic tracker for all of big pool 1189 // 1190 ExpInsertPoolTracker('looP', 1191 ROUND_TO_PAGES(PoolBigPageTableSize * 1192 sizeof(POOL_TRACKER_BIG_PAGES)), 1193 NonPagedPool); 1194 1195 // 1196 // No support for NUMA systems at this time 1197 // 1198 ASSERT(KeNumberNodes == 1); 1199 1200 // 1201 // Initialize the tag spinlock 1202 // 1203 KeInitializeSpinLock(&ExpTaggedPoolLock); 1204 1205 // 1206 // Initialize the nonpaged pool descriptor 1207 // 1208 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor; 1209 ExInitializePoolDescriptor(PoolVector[NonPagedPool], 1210 NonPagedPool, 1211 0, 1212 Threshold, 1213 NULL); 1214 } 1215 else 1216 { 1217 // 1218 // No support for NUMA systems at this time 1219 // 1220 ASSERT(KeNumberNodes == 1); 1221 1222 // 1223 // Allocate the pool descriptor 1224 // 1225 Descriptor = ExAllocatePoolWithTag(NonPagedPool, 1226 sizeof(KGUARDED_MUTEX) + 1227 sizeof(POOL_DESCRIPTOR), 1228 'looP'); 1229 if (!Descriptor) 1230 { 1231 // 1232 // This is really bad... 1233 // 1234 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1235 0, 1236 -1, 1237 -1, 1238 -1); 1239 } 1240 1241 // 1242 // Setup the vector and guarded mutex for paged pool 1243 // 1244 PoolVector[PagedPool] = Descriptor; 1245 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1); 1246 ExpPagedPoolDescriptor[0] = Descriptor; 1247 KeInitializeGuardedMutex(ExpPagedPoolMutex); 1248 ExInitializePoolDescriptor(Descriptor, 1249 PagedPool, 1250 0, 1251 Threshold, 1252 ExpPagedPoolMutex); 1253 1254 // 1255 // Insert the generic tracker for all of nonpaged pool 1256 // 1257 ExpInsertPoolTracker('looP', 1258 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)), 1259 NonPagedPool); 1260 } 1261 } 1262 1263 FORCEINLINE 1264 KIRQL 1265 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor) 1266 { 1267 // 1268 // Check if this is nonpaged pool 1269 // 1270 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 1271 { 1272 // 1273 // Use the queued spin lock 1274 // 1275 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock); 1276 } 1277 else 1278 { 1279 // 1280 // Use the guarded mutex 1281 // 1282 KeAcquireGuardedMutex(Descriptor->LockAddress); 1283 return APC_LEVEL; 1284 } 1285 } 1286 1287 FORCEINLINE 1288 VOID 1289 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, 1290 IN KIRQL OldIrql) 1291 { 1292 // 1293 // Check if this is nonpaged pool 1294 // 1295 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 1296 { 1297 // 1298 // Use the queued spin lock 1299 // 1300 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql); 1301 } 1302 else 1303 { 1304 // 1305 // Use the guarded mutex 1306 // 1307 KeReleaseGuardedMutex(Descriptor->LockAddress); 1308 } 1309 } 1310 1311 VOID 1312 NTAPI 1313 ExpGetPoolTagInfoTarget(IN PKDPC Dpc, 1314 IN PVOID DeferredContext, 1315 IN PVOID SystemArgument1, 1316 IN PVOID SystemArgument2) 1317 { 1318 PPOOL_DPC_CONTEXT Context = DeferredContext; 1319 UNREFERENCED_PARAMETER(Dpc); 1320 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 1321 1322 // 1323 // Make sure we win the race, and if we did, copy the data atomically 1324 // 1325 if (KeSignalCallDpcSynchronize(SystemArgument2)) 1326 { 1327 RtlCopyMemory(Context->PoolTrackTable, 1328 PoolTrackTable, 1329 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1330 1331 // 1332 // This is here because ReactOS does not yet support expansion 1333 // 1334 ASSERT(Context->PoolTrackTableSizeExpansion == 0); 1335 } 1336 1337 // 1338 // Regardless of whether we won or not, we must now synchronize and then 1339 // decrement the barrier since this is one more processor that has completed 1340 // the callback. 1341 // 1342 KeSignalCallDpcSynchronize(SystemArgument2); 1343 KeSignalCallDpcDone(SystemArgument1); 1344 } 1345 1346 NTSTATUS 1347 NTAPI 1348 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, 1349 IN ULONG SystemInformationLength, 1350 IN OUT PULONG ReturnLength OPTIONAL) 1351 { 1352 ULONG TableSize, CurrentLength; 1353 ULONG EntryCount; 1354 NTSTATUS Status = STATUS_SUCCESS; 1355 PSYSTEM_POOLTAG TagEntry; 1356 PPOOL_TRACKER_TABLE Buffer, TrackerEntry; 1357 POOL_DPC_CONTEXT Context; 1358 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); 1359 1360 // 1361 // Keep track of how much data the caller's buffer must hold 1362 // 1363 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo); 1364 1365 // 1366 // Initialize the caller's buffer 1367 // 1368 TagEntry = &SystemInformation->TagInfo[0]; 1369 SystemInformation->Count = 0; 1370 1371 // 1372 // Capture the number of entries, and the total size needed to make a copy 1373 // of the table 1374 // 1375 EntryCount = (ULONG)PoolTrackTableSize; 1376 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE); 1377 1378 // 1379 // Allocate the "Generic DPC" temporary buffer 1380 // 1381 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI'); 1382 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES; 1383 1384 // 1385 // Do a "Generic DPC" to atomically retrieve the tag and allocation data 1386 // 1387 Context.PoolTrackTable = Buffer; 1388 Context.PoolTrackTableSize = PoolTrackTableSize; 1389 Context.PoolTrackTableExpansion = NULL; 1390 Context.PoolTrackTableSizeExpansion = 0; 1391 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context); 1392 1393 // 1394 // Now parse the results 1395 // 1396 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++) 1397 { 1398 // 1399 // If the entry is empty, skip it 1400 // 1401 if (!TrackerEntry->Key) continue; 1402 1403 // 1404 // Otherwise, add one more entry to the caller's buffer, and ensure that 1405 // enough space has been allocated in it 1406 // 1407 SystemInformation->Count++; 1408 CurrentLength += sizeof(*TagEntry); 1409 if (SystemInformationLength < CurrentLength) 1410 { 1411 // 1412 // The caller's buffer is too small, so set a failure code. The 1413 // caller will know the count, as well as how much space is needed. 1414 // 1415 // We do NOT break out of the loop, because we want to keep incrementing 1416 // the Count as well as CurrentLength so that the caller can know the 1417 // final numbers 1418 // 1419 Status = STATUS_INFO_LENGTH_MISMATCH; 1420 } 1421 else 1422 { 1423 // 1424 // Small sanity check that our accounting is working correctly 1425 // 1426 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees); 1427 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees); 1428 1429 // 1430 // Return the data into the caller's buffer 1431 // 1432 TagEntry->TagUlong = TrackerEntry->Key; 1433 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs; 1434 TagEntry->PagedFrees = TrackerEntry->PagedFrees; 1435 TagEntry->PagedUsed = TrackerEntry->PagedBytes; 1436 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs; 1437 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees; 1438 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes; 1439 TagEntry++; 1440 } 1441 } 1442 1443 // 1444 // Free the "Generic DPC" temporary buffer, return the buffer length and status 1445 // 1446 ExFreePoolWithTag(Buffer, 'ofnI'); 1447 if (ReturnLength) *ReturnLength = CurrentLength; 1448 return Status; 1449 } 1450 1451 BOOLEAN 1452 NTAPI 1453 ExpAddTagForBigPages(IN PVOID Va, 1454 IN ULONG Key, 1455 IN ULONG NumberOfPages, 1456 IN POOL_TYPE PoolType) 1457 { 1458 ULONG Hash, i = 0; 1459 PVOID OldVa; 1460 KIRQL OldIrql; 1461 SIZE_T TableSize; 1462 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart; 1463 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0); 1464 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1465 1466 // 1467 // As the table is expandable, these values must only be read after acquiring 1468 // the lock to avoid a teared access during an expansion 1469 // 1470 Hash = ExpComputePartialHashForAddress(Va); 1471 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 1472 Hash &= PoolBigPageTableHash; 1473 TableSize = PoolBigPageTableSize; 1474 1475 // 1476 // We loop from the current hash bucket to the end of the table, and then 1477 // rollover to hash bucket 0 and keep going from there. If we return back 1478 // to the beginning, then we attempt expansion at the bottom of the loop 1479 // 1480 EntryStart = Entry = &PoolBigPageTable[Hash]; 1481 EntryEnd = &PoolBigPageTable[TableSize]; 1482 do 1483 { 1484 // 1485 // Make sure that this is a free entry and attempt to atomically make the 1486 // entry busy now 1487 // 1488 OldVa = Entry->Va; 1489 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) && 1490 (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa)) 1491 { 1492 // 1493 // We now own this entry, write down the size and the pool tag 1494 // 1495 Entry->Key = Key; 1496 Entry->NumberOfPages = NumberOfPages; 1497 1498 // 1499 // Add one more entry to the count, and see if we're getting within 1500 // 25% of the table size, at which point we'll do an expansion now 1501 // to avoid blocking too hard later on. 1502 // 1503 // Note that we only do this if it's also been the 16th time that we 1504 // keep losing the race or that we are not finding a free entry anymore, 1505 // which implies a massive number of concurrent big pool allocations. 1506 // 1507 InterlockedIncrementUL(&ExpPoolBigEntriesInUse); 1508 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4))) 1509 { 1510 DPRINT("Should attempt expansion since we now have %lu entries\n", 1511 ExpPoolBigEntriesInUse); 1512 } 1513 1514 // 1515 // We have our entry, return 1516 // 1517 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1518 return TRUE; 1519 } 1520 1521 // 1522 // We don't have our entry yet, so keep trying, making the entry list 1523 // circular if we reach the last entry. We'll eventually break out of 1524 // the loop once we've rolled over and returned back to our original 1525 // hash bucket 1526 // 1527 i++; 1528 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0]; 1529 } while (Entry != EntryStart); 1530 1531 // 1532 // This means there's no free hash buckets whatsoever, so we would now have 1533 // to attempt expanding the table 1534 // 1535 DPRINT1("Big pool expansion needed, not implemented!\n"); 1536 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1537 return FALSE; 1538 } 1539 1540 ULONG 1541 NTAPI 1542 ExpFindAndRemoveTagBigPages(IN PVOID Va, 1543 OUT PULONG_PTR BigPages, 1544 IN POOL_TYPE PoolType) 1545 { 1546 BOOLEAN FirstTry = TRUE; 1547 SIZE_T TableSize; 1548 KIRQL OldIrql; 1549 ULONG PoolTag, Hash; 1550 PPOOL_TRACKER_BIG_PAGES Entry; 1551 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0); 1552 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1553 1554 // 1555 // As the table is expandable, these values must only be read after acquiring 1556 // the lock to avoid a teared access during an expansion 1557 // 1558 Hash = ExpComputePartialHashForAddress(Va); 1559 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 1560 Hash &= PoolBigPageTableHash; 1561 TableSize = PoolBigPageTableSize; 1562 1563 // 1564 // Loop while trying to find this big page allocation 1565 // 1566 while (PoolBigPageTable[Hash].Va != Va) 1567 { 1568 // 1569 // Increment the size until we go past the end of the table 1570 // 1571 if (++Hash >= TableSize) 1572 { 1573 // 1574 // Is this the second time we've tried? 1575 // 1576 if (!FirstTry) 1577 { 1578 // 1579 // This means it was never inserted into the pool table and it 1580 // received the special "BIG" tag -- return that and return 0 1581 // so that the code can ask Mm for the page count instead 1582 // 1583 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1584 *BigPages = 0; 1585 return ' GIB'; 1586 } 1587 1588 // 1589 // The first time this happens, reset the hash index and try again 1590 // 1591 Hash = 0; 1592 FirstTry = FALSE; 1593 } 1594 } 1595 1596 // 1597 // Now capture all the information we need from the entry, since after we 1598 // release the lock, the data can change 1599 // 1600 Entry = &PoolBigPageTable[Hash]; 1601 *BigPages = Entry->NumberOfPages; 1602 PoolTag = Entry->Key; 1603 1604 // 1605 // Set the free bit, and decrement the number of allocations. Finally, release 1606 // the lock and return the tag that was located 1607 // 1608 InterlockedIncrement((PLONG)&Entry->Va); 1609 InterlockedDecrementUL(&ExpPoolBigEntriesInUse); 1610 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1611 return PoolTag; 1612 } 1613 1614 VOID 1615 NTAPI 1616 ExQueryPoolUsage(OUT PULONG PagedPoolPages, 1617 OUT PULONG NonPagedPoolPages, 1618 OUT PULONG PagedPoolAllocs, 1619 OUT PULONG PagedPoolFrees, 1620 OUT PULONG PagedPoolLookasideHits, 1621 OUT PULONG NonPagedPoolAllocs, 1622 OUT PULONG NonPagedPoolFrees, 1623 OUT PULONG NonPagedPoolLookasideHits) 1624 { 1625 ULONG i; 1626 PPOOL_DESCRIPTOR PoolDesc; 1627 1628 // 1629 // Assume all failures 1630 // 1631 *PagedPoolPages = 0; 1632 *PagedPoolAllocs = 0; 1633 *PagedPoolFrees = 0; 1634 1635 // 1636 // Tally up the totals for all the apged pool 1637 // 1638 for (i = 0; i < ExpNumberOfPagedPools + 1; i++) 1639 { 1640 PoolDesc = ExpPagedPoolDescriptor[i]; 1641 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1642 *PagedPoolAllocs += PoolDesc->RunningAllocs; 1643 *PagedPoolFrees += PoolDesc->RunningDeAllocs; 1644 } 1645 1646 // 1647 // The first non-paged pool has a hardcoded well-known descriptor name 1648 // 1649 PoolDesc = &NonPagedPoolDescriptor; 1650 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1651 *NonPagedPoolAllocs = PoolDesc->RunningAllocs; 1652 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs; 1653 1654 // 1655 // If the system has more than one non-paged pool, copy the other descriptor 1656 // totals as well 1657 // 1658 #if 0 1659 if (ExpNumberOfNonPagedPools > 1) 1660 { 1661 for (i = 0; i < ExpNumberOfNonPagedPools; i++) 1662 { 1663 PoolDesc = ExpNonPagedPoolDescriptor[i]; 1664 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1665 *NonPagedPoolAllocs += PoolDesc->RunningAllocs; 1666 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs; 1667 } 1668 } 1669 #endif 1670 1671 // 1672 // FIXME: Not yet supported 1673 // 1674 *NonPagedPoolLookasideHits += 0; 1675 *PagedPoolLookasideHits += 0; 1676 } 1677 1678 VOID 1679 NTAPI 1680 ExReturnPoolQuota(IN PVOID P) 1681 { 1682 PPOOL_HEADER Entry; 1683 POOL_TYPE PoolType; 1684 USHORT BlockSize; 1685 PEPROCESS Process; 1686 1687 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) && 1688 (MmIsSpecialPoolAddress(P))) 1689 { 1690 return; 1691 } 1692 1693 Entry = P; 1694 Entry--; 1695 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0); 1696 1697 PoolType = Entry->PoolType - 1; 1698 BlockSize = Entry->BlockSize; 1699 1700 if (PoolType & QUOTA_POOL_MASK) 1701 { 1702 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1]; 1703 ASSERT(Process != NULL); 1704 if (Process) 1705 { 1706 if (Process->Pcb.Header.Type != ProcessObject) 1707 { 1708 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n", 1709 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize); 1710 KeBugCheckEx(BAD_POOL_CALLER, 1711 0x0D, 1712 (ULONG_PTR)P, 1713 Entry->PoolTag, 1714 (ULONG_PTR)Process); 1715 } 1716 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL; 1717 PsReturnPoolQuota(Process, 1718 PoolType & BASE_POOL_TYPE_MASK, 1719 BlockSize * POOL_BLOCK_SIZE); 1720 ObDereferenceObject(Process); 1721 } 1722 } 1723 } 1724 1725 /* PUBLIC FUNCTIONS ***********************************************************/ 1726 1727 /* 1728 * @implemented 1729 */ 1730 PVOID 1731 NTAPI 1732 ExAllocatePoolWithTag(IN POOL_TYPE PoolType, 1733 IN SIZE_T NumberOfBytes, 1734 IN ULONG Tag) 1735 { 1736 PPOOL_DESCRIPTOR PoolDesc; 1737 PLIST_ENTRY ListHead; 1738 PPOOL_HEADER Entry, NextEntry, FragmentEntry; 1739 KIRQL OldIrql; 1740 USHORT BlockSize, i; 1741 ULONG OriginalType; 1742 PKPRCB Prcb = KeGetCurrentPrcb(); 1743 PGENERAL_LOOKASIDE LookasideList; 1744 1745 // 1746 // Some sanity checks 1747 // 1748 ASSERT(Tag != 0); 1749 ASSERT(Tag != ' GIB'); 1750 ASSERT(NumberOfBytes != 0); 1751 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL); 1752 1753 // 1754 // Not supported in ReactOS 1755 // 1756 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1757 1758 // 1759 // Check if verifier or special pool is enabled 1760 // 1761 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL)) 1762 { 1763 // 1764 // For verifier, we should call the verification routine 1765 // 1766 if (ExpPoolFlags & POOL_FLAG_VERIFIER) 1767 { 1768 DPRINT1("Driver Verifier is not yet supported\n"); 1769 } 1770 1771 // 1772 // For special pool, we check if this is a suitable allocation and do 1773 // the special allocation if needed 1774 // 1775 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) 1776 { 1777 // 1778 // Check if this is a special pool allocation 1779 // 1780 if (MmUseSpecialPool(NumberOfBytes, Tag)) 1781 { 1782 // 1783 // Try to allocate using special pool 1784 // 1785 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2); 1786 if (Entry) return Entry; 1787 } 1788 } 1789 } 1790 1791 // 1792 // Get the pool type and its corresponding vector for this request 1793 // 1794 OriginalType = PoolType; 1795 PoolType = PoolType & BASE_POOL_TYPE_MASK; 1796 PoolDesc = PoolVector[PoolType]; 1797 ASSERT(PoolDesc != NULL); 1798 1799 // 1800 // Check if this is a big page allocation 1801 // 1802 if (NumberOfBytes > POOL_MAX_ALLOC) 1803 { 1804 // 1805 // Allocate pages for it 1806 // 1807 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes); 1808 if (!Entry) 1809 { 1810 #if DBG 1811 // 1812 // Out of memory, display current consumption 1813 // Let's consider that if the caller wanted more 1814 // than a hundred pages, that's a bogus caller 1815 // and we are not out of memory 1816 // 1817 if (NumberOfBytes < 100 * PAGE_SIZE) 1818 { 1819 MiDumpPoolConsumers(FALSE, 0, 0, 0); 1820 } 1821 #endif 1822 1823 // 1824 // Must succeed pool is deprecated, but still supported. These allocation 1825 // failures must cause an immediate bugcheck 1826 // 1827 if (OriginalType & MUST_SUCCEED_POOL_MASK) 1828 { 1829 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1830 NumberOfBytes, 1831 NonPagedPoolDescriptor.TotalPages, 1832 NonPagedPoolDescriptor.TotalBigPages, 1833 0); 1834 } 1835 1836 // 1837 // Internal debugging 1838 // 1839 ExPoolFailures++; 1840 1841 // 1842 // This flag requests printing failures, and can also further specify 1843 // breaking on failures 1844 // 1845 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE) 1846 { 1847 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n", 1848 NumberOfBytes, 1849 OriginalType); 1850 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint(); 1851 } 1852 1853 // 1854 // Finally, this flag requests an exception, which we are more than 1855 // happy to raise! 1856 // 1857 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE) 1858 { 1859 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 1860 } 1861 1862 return NULL; 1863 } 1864 1865 // 1866 // Increment required counters 1867 // 1868 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, 1869 (LONG)BYTES_TO_PAGES(NumberOfBytes)); 1870 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes); 1871 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 1872 1873 // 1874 // Add a tag for the big page allocation and switch to the generic "BIG" 1875 // tag if we failed to do so, then insert a tracker for this alloation. 1876 // 1877 if (!ExpAddTagForBigPages(Entry, 1878 Tag, 1879 (ULONG)BYTES_TO_PAGES(NumberOfBytes), 1880 OriginalType)) 1881 { 1882 Tag = ' GIB'; 1883 } 1884 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType); 1885 return Entry; 1886 } 1887 1888 // 1889 // Should never request 0 bytes from the pool, but since so many drivers do 1890 // it, we'll just assume they want 1 byte, based on NT's similar behavior 1891 // 1892 if (!NumberOfBytes) NumberOfBytes = 1; 1893 1894 // 1895 // A pool allocation is defined by its data, a linked list to connect it to 1896 // the free list (if necessary), and a pool header to store accounting info. 1897 // Calculate this size, then convert it into a block size (units of pool 1898 // headers) 1899 // 1900 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such 1901 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in 1902 // the direct allocation of pages. 1903 // 1904 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) 1905 / POOL_BLOCK_SIZE); 1906 ASSERT(i < POOL_LISTS_PER_PAGE); 1907 1908 // 1909 // Handle lookaside list optimization for both paged and nonpaged pool 1910 // 1911 if (i <= NUMBER_POOL_LOOKASIDE_LISTS) 1912 { 1913 // 1914 // Try popping it from the per-CPU lookaside list 1915 // 1916 LookasideList = (PoolType == PagedPool) ? 1917 Prcb->PPPagedLookasideList[i - 1].P : 1918 Prcb->PPNPagedLookasideList[i - 1].P; 1919 LookasideList->TotalAllocates++; 1920 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead); 1921 if (!Entry) 1922 { 1923 // 1924 // We failed, try popping it from the global list 1925 // 1926 LookasideList = (PoolType == PagedPool) ? 1927 Prcb->PPPagedLookasideList[i - 1].L : 1928 Prcb->PPNPagedLookasideList[i - 1].L; 1929 LookasideList->TotalAllocates++; 1930 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead); 1931 } 1932 1933 // 1934 // If we were able to pop it, update the accounting and return the block 1935 // 1936 if (Entry) 1937 { 1938 LookasideList->AllocateHits++; 1939 1940 // 1941 // Get the real entry, write down its pool type, and track it 1942 // 1943 Entry--; 1944 Entry->PoolType = OriginalType + 1; 1945 ExpInsertPoolTracker(Tag, 1946 Entry->BlockSize * POOL_BLOCK_SIZE, 1947 OriginalType); 1948 1949 // 1950 // Return the pool allocation 1951 // 1952 Entry->PoolTag = Tag; 1953 (POOL_FREE_BLOCK(Entry))->Flink = NULL; 1954 (POOL_FREE_BLOCK(Entry))->Blink = NULL; 1955 return POOL_FREE_BLOCK(Entry); 1956 } 1957 } 1958 1959 // 1960 // Loop in the free lists looking for a block if this size. Start with the 1961 // list optimized for this kind of size lookup 1962 // 1963 ListHead = &PoolDesc->ListHeads[i]; 1964 do 1965 { 1966 // 1967 // Are there any free entries available on this list? 1968 // 1969 if (!ExpIsPoolListEmpty(ListHead)) 1970 { 1971 // 1972 // Acquire the pool lock now 1973 // 1974 OldIrql = ExLockPool(PoolDesc); 1975 1976 // 1977 // And make sure the list still has entries 1978 // 1979 if (ExpIsPoolListEmpty(ListHead)) 1980 { 1981 // 1982 // Someone raced us (and won) before we had a chance to acquire 1983 // the lock. 1984 // 1985 // Try again! 1986 // 1987 ExUnlockPool(PoolDesc, OldIrql); 1988 continue; 1989 } 1990 1991 // 1992 // Remove a free entry from the list 1993 // Note that due to the way we insert free blocks into multiple lists 1994 // there is a guarantee that any block on this list will either be 1995 // of the correct size, or perhaps larger. 1996 // 1997 ExpCheckPoolLinks(ListHead); 1998 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead)); 1999 ExpCheckPoolLinks(ListHead); 2000 ExpCheckPoolBlocks(Entry); 2001 ASSERT(Entry->BlockSize >= i); 2002 ASSERT(Entry->PoolType == 0); 2003 2004 // 2005 // Check if this block is larger that what we need. The block could 2006 // not possibly be smaller, due to the reason explained above (and 2007 // we would've asserted on a checked build if this was the case). 2008 // 2009 if (Entry->BlockSize != i) 2010 { 2011 // 2012 // Is there an entry before this one? 2013 // 2014 if (Entry->PreviousSize == 0) 2015 { 2016 // 2017 // There isn't anyone before us, so take the next block and 2018 // turn it into a fragment that contains the leftover data 2019 // that we don't need to satisfy the caller's request 2020 // 2021 FragmentEntry = POOL_BLOCK(Entry, i); 2022 FragmentEntry->BlockSize = Entry->BlockSize - i; 2023 2024 // 2025 // And make it point back to us 2026 // 2027 FragmentEntry->PreviousSize = i; 2028 2029 // 2030 // Now get the block that follows the new fragment and check 2031 // if it's still on the same page as us (and not at the end) 2032 // 2033 NextEntry = POOL_NEXT_BLOCK(FragmentEntry); 2034 if (PAGE_ALIGN(NextEntry) != NextEntry) 2035 { 2036 // 2037 // Adjust this next block to point to our newly created 2038 // fragment block 2039 // 2040 NextEntry->PreviousSize = FragmentEntry->BlockSize; 2041 } 2042 } 2043 else 2044 { 2045 // 2046 // There is a free entry before us, which we know is smaller 2047 // so we'll make this entry the fragment instead 2048 // 2049 FragmentEntry = Entry; 2050 2051 // 2052 // And then we'll remove from it the actual size required. 2053 // Now the entry is a leftover free fragment 2054 // 2055 Entry->BlockSize -= i; 2056 2057 // 2058 // Now let's go to the next entry after the fragment (which 2059 // used to point to our original free entry) and make it 2060 // reference the new fragment entry instead. 2061 // 2062 // This is the entry that will actually end up holding the 2063 // allocation! 2064 // 2065 Entry = POOL_NEXT_BLOCK(Entry); 2066 Entry->PreviousSize = FragmentEntry->BlockSize; 2067 2068 // 2069 // And now let's go to the entry after that one and check if 2070 // it's still on the same page, and not at the end 2071 // 2072 NextEntry = POOL_BLOCK(Entry, i); 2073 if (PAGE_ALIGN(NextEntry) != NextEntry) 2074 { 2075 // 2076 // Make it reference the allocation entry 2077 // 2078 NextEntry->PreviousSize = i; 2079 } 2080 } 2081 2082 // 2083 // Now our (allocation) entry is the right size 2084 // 2085 Entry->BlockSize = i; 2086 2087 // 2088 // And the next entry is now the free fragment which contains 2089 // the remaining difference between how big the original entry 2090 // was, and the actual size the caller needs/requested. 2091 // 2092 FragmentEntry->PoolType = 0; 2093 BlockSize = FragmentEntry->BlockSize; 2094 2095 // 2096 // Now check if enough free bytes remained for us to have a 2097 // "full" entry, which contains enough bytes for a linked list 2098 // and thus can be used for allocations (up to 8 bytes...) 2099 // 2100 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]); 2101 if (BlockSize != 1) 2102 { 2103 // 2104 // Insert the free entry into the free list for this size 2105 // 2106 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1], 2107 POOL_FREE_BLOCK(FragmentEntry)); 2108 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry)); 2109 } 2110 } 2111 2112 // 2113 // We have found an entry for this allocation, so set the pool type 2114 // and release the lock since we're done 2115 // 2116 Entry->PoolType = OriginalType + 1; 2117 ExpCheckPoolBlocks(Entry); 2118 ExUnlockPool(PoolDesc, OldIrql); 2119 2120 // 2121 // Increment required counters 2122 // 2123 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE); 2124 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2125 2126 // 2127 // Track this allocation 2128 // 2129 ExpInsertPoolTracker(Tag, 2130 Entry->BlockSize * POOL_BLOCK_SIZE, 2131 OriginalType); 2132 2133 // 2134 // Return the pool allocation 2135 // 2136 Entry->PoolTag = Tag; 2137 (POOL_FREE_BLOCK(Entry))->Flink = NULL; 2138 (POOL_FREE_BLOCK(Entry))->Blink = NULL; 2139 return POOL_FREE_BLOCK(Entry); 2140 } 2141 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]); 2142 2143 // 2144 // There were no free entries left, so we have to allocate a new fresh page 2145 // 2146 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE); 2147 if (!Entry) 2148 { 2149 #if DBG 2150 // 2151 // Out of memory, display current consumption 2152 // Let's consider that if the caller wanted more 2153 // than a hundred pages, that's a bogus caller 2154 // and we are not out of memory 2155 // 2156 if (NumberOfBytes < 100 * PAGE_SIZE) 2157 { 2158 MiDumpPoolConsumers(FALSE, 0, 0, 0); 2159 } 2160 #endif 2161 2162 // 2163 // Must succeed pool is deprecated, but still supported. These allocation 2164 // failures must cause an immediate bugcheck 2165 // 2166 if (OriginalType & MUST_SUCCEED_POOL_MASK) 2167 { 2168 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 2169 PAGE_SIZE, 2170 NonPagedPoolDescriptor.TotalPages, 2171 NonPagedPoolDescriptor.TotalBigPages, 2172 0); 2173 } 2174 2175 // 2176 // Internal debugging 2177 // 2178 ExPoolFailures++; 2179 2180 // 2181 // This flag requests printing failures, and can also further specify 2182 // breaking on failures 2183 // 2184 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE) 2185 { 2186 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n", 2187 NumberOfBytes, 2188 OriginalType); 2189 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint(); 2190 } 2191 2192 // 2193 // Finally, this flag requests an exception, which we are more than 2194 // happy to raise! 2195 // 2196 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE) 2197 { 2198 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 2199 } 2200 2201 // 2202 // Return NULL to the caller in all other cases 2203 // 2204 return NULL; 2205 } 2206 2207 // 2208 // Setup the entry data 2209 // 2210 Entry->Ulong1 = 0; 2211 Entry->BlockSize = i; 2212 Entry->PoolType = OriginalType + 1; 2213 2214 // 2215 // This page will have two entries -- one for the allocation (which we just 2216 // created above), and one for the remaining free bytes, which we're about 2217 // to create now. The free bytes are the whole page minus what was allocated 2218 // and then converted into units of block headers. 2219 // 2220 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i; 2221 FragmentEntry = POOL_BLOCK(Entry, i); 2222 FragmentEntry->Ulong1 = 0; 2223 FragmentEntry->BlockSize = BlockSize; 2224 FragmentEntry->PreviousSize = i; 2225 2226 // 2227 // Increment required counters 2228 // 2229 InterlockedIncrement((PLONG)&PoolDesc->TotalPages); 2230 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE); 2231 2232 // 2233 // Now check if enough free bytes remained for us to have a "full" entry, 2234 // which contains enough bytes for a linked list and thus can be used for 2235 // allocations (up to 8 bytes...) 2236 // 2237 if (FragmentEntry->BlockSize != 1) 2238 { 2239 // 2240 // Excellent -- acquire the pool lock 2241 // 2242 OldIrql = ExLockPool(PoolDesc); 2243 2244 // 2245 // And insert the free entry into the free list for this block size 2246 // 2247 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]); 2248 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1], 2249 POOL_FREE_BLOCK(FragmentEntry)); 2250 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry)); 2251 2252 // 2253 // Release the pool lock 2254 // 2255 ExpCheckPoolBlocks(Entry); 2256 ExUnlockPool(PoolDesc, OldIrql); 2257 } 2258 else 2259 { 2260 // 2261 // Simply do a sanity check 2262 // 2263 ExpCheckPoolBlocks(Entry); 2264 } 2265 2266 // 2267 // Increment performance counters and track this allocation 2268 // 2269 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2270 ExpInsertPoolTracker(Tag, 2271 Entry->BlockSize * POOL_BLOCK_SIZE, 2272 OriginalType); 2273 2274 // 2275 // And return the pool allocation 2276 // 2277 ExpCheckPoolBlocks(Entry); 2278 Entry->PoolTag = Tag; 2279 return POOL_FREE_BLOCK(Entry); 2280 } 2281 2282 /* 2283 * @implemented 2284 */ 2285 PVOID 2286 NTAPI 2287 ExAllocatePool(POOL_TYPE PoolType, 2288 SIZE_T NumberOfBytes) 2289 { 2290 ULONG Tag = TAG_NONE; 2291 #if 0 && DBG 2292 PLDR_DATA_TABLE_ENTRY LdrEntry; 2293 2294 /* Use the first four letters of the driver name, or "None" if unavailable */ 2295 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL 2296 ? MiLookupDataTableEntry(_ReturnAddress()) 2297 : NULL; 2298 if (LdrEntry) 2299 { 2300 ULONG i; 2301 Tag = 0; 2302 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++) 2303 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24; 2304 for (; i < 4; i++) 2305 Tag = Tag >> 8 | ' ' << 24; 2306 } 2307 #endif 2308 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2309 } 2310 2311 /* 2312 * @implemented 2313 */ 2314 VOID 2315 NTAPI 2316 ExFreePoolWithTag(IN PVOID P, 2317 IN ULONG TagToFree) 2318 { 2319 PPOOL_HEADER Entry, NextEntry; 2320 USHORT BlockSize; 2321 KIRQL OldIrql; 2322 POOL_TYPE PoolType; 2323 PPOOL_DESCRIPTOR PoolDesc; 2324 ULONG Tag; 2325 BOOLEAN Combined = FALSE; 2326 PFN_NUMBER PageCount, RealPageCount; 2327 PKPRCB Prcb = KeGetCurrentPrcb(); 2328 PGENERAL_LOOKASIDE LookasideList; 2329 PEPROCESS Process; 2330 2331 // 2332 // Check if any of the debug flags are enabled 2333 // 2334 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS | 2335 POOL_FLAG_CHECK_WORKERS | 2336 POOL_FLAG_CHECK_RESOURCES | 2337 POOL_FLAG_VERIFIER | 2338 POOL_FLAG_CHECK_DEADLOCK | 2339 POOL_FLAG_SPECIAL_POOL)) 2340 { 2341 // 2342 // Check if special pool is enabled 2343 // 2344 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) 2345 { 2346 // 2347 // Check if it was allocated from a special pool 2348 // 2349 if (MmIsSpecialPoolAddress(P)) 2350 { 2351 // 2352 // Was deadlock verification also enabled? We can do some extra 2353 // checks at this point 2354 // 2355 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK) 2356 { 2357 DPRINT1("Verifier not yet supported\n"); 2358 } 2359 2360 // 2361 // It is, so handle it via special pool free routine 2362 // 2363 MmFreeSpecialPool(P); 2364 return; 2365 } 2366 } 2367 2368 // 2369 // For non-big page allocations, we'll do a bunch of checks in here 2370 // 2371 if (PAGE_ALIGN(P) != P) 2372 { 2373 // 2374 // Get the entry for this pool allocation 2375 // The pointer math here may look wrong or confusing, but it is quite right 2376 // 2377 Entry = P; 2378 Entry--; 2379 2380 // 2381 // Get the pool type 2382 // 2383 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK; 2384 2385 // 2386 // FIXME: Many other debugging checks go here 2387 // 2388 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2389 } 2390 } 2391 2392 // 2393 // Check if this is a big page allocation 2394 // 2395 if (PAGE_ALIGN(P) == P) 2396 { 2397 // 2398 // We need to find the tag for it, so first we need to find out what 2399 // kind of allocation this was (paged or nonpaged), then we can go 2400 // ahead and try finding the tag for it. Remember to get rid of the 2401 // PROTECTED_POOL tag if it's found. 2402 // 2403 // Note that if at insertion time, we failed to add the tag for a big 2404 // pool allocation, we used a special tag called 'BIG' to identify the 2405 // allocation, and we may get this tag back. In this scenario, we must 2406 // manually get the size of the allocation by actually counting through 2407 // the PFN database. 2408 // 2409 PoolType = MmDeterminePoolType(P); 2410 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2411 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType); 2412 if (!Tag) 2413 { 2414 DPRINT1("We do not know the size of this allocation. This is not yet supported\n"); 2415 ASSERT(Tag == ' GIB'); 2416 PageCount = 1; // We are going to lie! This might screw up accounting? 2417 } 2418 else if (Tag & PROTECTED_POOL) 2419 { 2420 Tag &= ~PROTECTED_POOL; 2421 } 2422 2423 // 2424 // Check block tag 2425 // 2426 if (TagToFree && TagToFree != Tag) 2427 { 2428 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag); 2429 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree); 2430 } 2431 2432 // 2433 // We have our tag and our page count, so we can go ahead and remove this 2434 // tracker now 2435 // 2436 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType); 2437 2438 // 2439 // Check if any of the debug flags are enabled 2440 // 2441 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS | 2442 POOL_FLAG_CHECK_WORKERS | 2443 POOL_FLAG_CHECK_RESOURCES | 2444 POOL_FLAG_CHECK_DEADLOCK)) 2445 { 2446 // 2447 // Was deadlock verification also enabled? We can do some extra 2448 // checks at this point 2449 // 2450 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK) 2451 { 2452 DPRINT1("Verifier not yet supported\n"); 2453 } 2454 2455 // 2456 // FIXME: Many debugging checks go here 2457 // 2458 } 2459 2460 // 2461 // Update counters 2462 // 2463 PoolDesc = PoolVector[PoolType]; 2464 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs); 2465 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, 2466 -(LONG_PTR)(PageCount << PAGE_SHIFT)); 2467 2468 // 2469 // Do the real free now and update the last counter with the big page count 2470 // 2471 RealPageCount = MiFreePoolPages(P); 2472 ASSERT(RealPageCount == PageCount); 2473 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, 2474 -(LONG)RealPageCount); 2475 return; 2476 } 2477 2478 // 2479 // Get the entry for this pool allocation 2480 // The pointer math here may look wrong or confusing, but it is quite right 2481 // 2482 Entry = P; 2483 Entry--; 2484 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0); 2485 2486 // 2487 // Get the size of the entry, and it's pool type, then load the descriptor 2488 // for this pool type 2489 // 2490 BlockSize = Entry->BlockSize; 2491 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK; 2492 PoolDesc = PoolVector[PoolType]; 2493 2494 // 2495 // Make sure that the IRQL makes sense 2496 // 2497 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2498 2499 // 2500 // Get the pool tag and get rid of the PROTECTED_POOL flag 2501 // 2502 Tag = Entry->PoolTag; 2503 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL; 2504 2505 // 2506 // Check block tag 2507 // 2508 if (TagToFree && TagToFree != Tag) 2509 { 2510 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag); 2511 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree); 2512 } 2513 2514 // 2515 // Track the removal of this allocation 2516 // 2517 ExpRemovePoolTracker(Tag, 2518 BlockSize * POOL_BLOCK_SIZE, 2519 Entry->PoolType - 1); 2520 2521 // 2522 // Release pool quota, if any 2523 // 2524 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK) 2525 { 2526 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1]; 2527 if (Process) 2528 { 2529 if (Process->Pcb.Header.Type != ProcessObject) 2530 { 2531 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n", 2532 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize); 2533 KeBugCheckEx(BAD_POOL_CALLER, 2534 0x0D, 2535 (ULONG_PTR)P, 2536 Tag, 2537 (ULONG_PTR)Process); 2538 } 2539 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE); 2540 ObDereferenceObject(Process); 2541 } 2542 } 2543 2544 // 2545 // Is this allocation small enough to have come from a lookaside list? 2546 // 2547 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS) 2548 { 2549 // 2550 // Try pushing it into the per-CPU lookaside list 2551 // 2552 LookasideList = (PoolType == PagedPool) ? 2553 Prcb->PPPagedLookasideList[BlockSize - 1].P : 2554 Prcb->PPNPagedLookasideList[BlockSize - 1].P; 2555 LookasideList->TotalFrees++; 2556 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) 2557 { 2558 LookasideList->FreeHits++; 2559 InterlockedPushEntrySList(&LookasideList->ListHead, P); 2560 return; 2561 } 2562 2563 // 2564 // We failed, try to push it into the global lookaside list 2565 // 2566 LookasideList = (PoolType == PagedPool) ? 2567 Prcb->PPPagedLookasideList[BlockSize - 1].L : 2568 Prcb->PPNPagedLookasideList[BlockSize - 1].L; 2569 LookasideList->TotalFrees++; 2570 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) 2571 { 2572 LookasideList->FreeHits++; 2573 InterlockedPushEntrySList(&LookasideList->ListHead, P); 2574 return; 2575 } 2576 } 2577 2578 // 2579 // Get the pointer to the next entry 2580 // 2581 NextEntry = POOL_BLOCK(Entry, BlockSize); 2582 2583 // 2584 // Update performance counters 2585 // 2586 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs); 2587 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE); 2588 2589 // 2590 // Acquire the pool lock 2591 // 2592 OldIrql = ExLockPool(PoolDesc); 2593 2594 // 2595 // Check if the next allocation is at the end of the page 2596 // 2597 ExpCheckPoolBlocks(Entry); 2598 if (PAGE_ALIGN(NextEntry) != NextEntry) 2599 { 2600 // 2601 // We may be able to combine the block if it's free 2602 // 2603 if (NextEntry->PoolType == 0) 2604 { 2605 // 2606 // The next block is free, so we'll do a combine 2607 // 2608 Combined = TRUE; 2609 2610 // 2611 // Make sure there's actual data in the block -- anything smaller 2612 // than this means we only have the header, so there's no linked list 2613 // for us to remove 2614 // 2615 if ((NextEntry->BlockSize != 1)) 2616 { 2617 // 2618 // The block is at least big enough to have a linked list, so go 2619 // ahead and remove it 2620 // 2621 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry)); 2622 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry)); 2623 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink)); 2624 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink)); 2625 } 2626 2627 // 2628 // Our entry is now combined with the next entry 2629 // 2630 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize; 2631 } 2632 } 2633 2634 // 2635 // Now check if there was a previous entry on the same page as us 2636 // 2637 if (Entry->PreviousSize) 2638 { 2639 // 2640 // Great, grab that entry and check if it's free 2641 // 2642 NextEntry = POOL_PREV_BLOCK(Entry); 2643 if (NextEntry->PoolType == 0) 2644 { 2645 // 2646 // It is, so we can do a combine 2647 // 2648 Combined = TRUE; 2649 2650 // 2651 // Make sure there's actual data in the block -- anything smaller 2652 // than this means we only have the header so there's no linked list 2653 // for us to remove 2654 // 2655 if ((NextEntry->BlockSize != 1)) 2656 { 2657 // 2658 // The block is at least big enough to have a linked list, so go 2659 // ahead and remove it 2660 // 2661 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry)); 2662 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry)); 2663 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink)); 2664 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink)); 2665 } 2666 2667 // 2668 // Combine our original block (which might've already been combined 2669 // with the next block), into the previous block 2670 // 2671 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize; 2672 2673 // 2674 // And now we'll work with the previous block instead 2675 // 2676 Entry = NextEntry; 2677 } 2678 } 2679 2680 // 2681 // By now, it may have been possible for our combined blocks to actually 2682 // have made up a full page (if there were only 2-3 allocations on the 2683 // page, they could've all been combined). 2684 // 2685 if ((PAGE_ALIGN(Entry) == Entry) && 2686 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry))) 2687 { 2688 // 2689 // In this case, release the pool lock, update the performance counter, 2690 // and free the page 2691 // 2692 ExUnlockPool(PoolDesc, OldIrql); 2693 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1); 2694 MiFreePoolPages(Entry); 2695 return; 2696 } 2697 2698 // 2699 // Otherwise, we now have a free block (or a combination of 2 or 3) 2700 // 2701 Entry->PoolType = 0; 2702 BlockSize = Entry->BlockSize; 2703 ASSERT(BlockSize != 1); 2704 2705 // 2706 // Check if we actually did combine it with anyone 2707 // 2708 if (Combined) 2709 { 2710 // 2711 // Get the first combined block (either our original to begin with, or 2712 // the one after the original, depending if we combined with the previous) 2713 // 2714 NextEntry = POOL_NEXT_BLOCK(Entry); 2715 2716 // 2717 // As long as the next block isn't on a page boundary, have it point 2718 // back to us 2719 // 2720 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize; 2721 } 2722 2723 // 2724 // Insert this new free block, and release the pool lock 2725 // 2726 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry)); 2727 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry)); 2728 ExUnlockPool(PoolDesc, OldIrql); 2729 } 2730 2731 /* 2732 * @implemented 2733 */ 2734 VOID 2735 NTAPI 2736 ExFreePool(PVOID P) 2737 { 2738 // 2739 // Just free without checking for the tag 2740 // 2741 ExFreePoolWithTag(P, 0); 2742 } 2743 2744 /* 2745 * @unimplemented 2746 */ 2747 SIZE_T 2748 NTAPI 2749 ExQueryPoolBlockSize(IN PVOID PoolBlock, 2750 OUT PBOOLEAN QuotaCharged) 2751 { 2752 // 2753 // Not implemented 2754 // 2755 UNIMPLEMENTED; 2756 return FALSE; 2757 } 2758 2759 /* 2760 * @implemented 2761 */ 2762 2763 PVOID 2764 NTAPI 2765 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, 2766 IN SIZE_T NumberOfBytes) 2767 { 2768 // 2769 // Allocate the pool 2770 // 2771 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE); 2772 } 2773 2774 /* 2775 * @implemented 2776 */ 2777 PVOID 2778 NTAPI 2779 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, 2780 IN SIZE_T NumberOfBytes, 2781 IN ULONG Tag, 2782 IN EX_POOL_PRIORITY Priority) 2783 { 2784 PVOID Buffer; 2785 2786 // 2787 // Allocate the pool 2788 // 2789 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2790 if (Buffer == NULL) 2791 { 2792 UNIMPLEMENTED; 2793 } 2794 2795 return Buffer; 2796 } 2797 2798 /* 2799 * @implemented 2800 */ 2801 PVOID 2802 NTAPI 2803 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType, 2804 IN SIZE_T NumberOfBytes, 2805 IN ULONG Tag) 2806 { 2807 BOOLEAN Raise = TRUE; 2808 PVOID Buffer; 2809 PPOOL_HEADER Entry; 2810 NTSTATUS Status; 2811 PEPROCESS Process = PsGetCurrentProcess(); 2812 2813 // 2814 // Check if we should fail instead of raising an exception 2815 // 2816 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE) 2817 { 2818 Raise = FALSE; 2819 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE; 2820 } 2821 2822 // 2823 // Inject the pool quota mask 2824 // 2825 PoolType += QUOTA_POOL_MASK; 2826 2827 // 2828 // Check if we have enough space to add the quota owner process, as long as 2829 // this isn't the system process, which never gets charged quota 2830 // 2831 ASSERT(NumberOfBytes != 0); 2832 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) && 2833 (Process != PsInitialSystemProcess)) 2834 { 2835 // 2836 // Add space for our EPROCESS pointer 2837 // 2838 NumberOfBytes += sizeof(PEPROCESS); 2839 } 2840 else 2841 { 2842 // 2843 // We won't be able to store the pointer, so don't use quota for this 2844 // 2845 PoolType -= QUOTA_POOL_MASK; 2846 } 2847 2848 // 2849 // Allocate the pool buffer now 2850 // 2851 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2852 2853 // 2854 // If the buffer is page-aligned, this is a large page allocation and we 2855 // won't touch it 2856 // 2857 if (PAGE_ALIGN(Buffer) != Buffer) 2858 { 2859 // 2860 // Also if special pool is enabled, and this was allocated from there, 2861 // we won't touch it either 2862 // 2863 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) && 2864 (MmIsSpecialPoolAddress(Buffer))) 2865 { 2866 return Buffer; 2867 } 2868 2869 // 2870 // If it wasn't actually allocated with quota charges, ignore it too 2871 // 2872 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer; 2873 2874 // 2875 // If this is the system process, we don't charge quota, so ignore 2876 // 2877 if (Process == PsInitialSystemProcess) return Buffer; 2878 2879 // 2880 // Actually go and charge quota for the process now 2881 // 2882 Entry = POOL_ENTRY(Buffer); 2883 Status = PsChargeProcessPoolQuota(Process, 2884 PoolType & BASE_POOL_TYPE_MASK, 2885 Entry->BlockSize * POOL_BLOCK_SIZE); 2886 if (!NT_SUCCESS(Status)) 2887 { 2888 // 2889 // Quota failed, back out the allocation, clear the owner, and fail 2890 // 2891 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL; 2892 ExFreePoolWithTag(Buffer, Tag); 2893 if (Raise) RtlRaiseStatus(Status); 2894 return NULL; 2895 } 2896 2897 // 2898 // Quota worked, write the owner and then reference it before returning 2899 // 2900 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process; 2901 ObReferenceObject(Process); 2902 } 2903 else if (!(Buffer) && (Raise)) 2904 { 2905 // 2906 // The allocation failed, raise an error if we are in raise mode 2907 // 2908 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 2909 } 2910 2911 // 2912 // Return the allocated buffer 2913 // 2914 return Buffer; 2915 } 2916 2917 #if DBG && defined(KDBG) 2918 2919 BOOLEAN 2920 ExpKdbgExtPool( 2921 ULONG Argc, 2922 PCHAR Argv[]) 2923 { 2924 ULONG_PTR Address = 0, Flags = 0; 2925 PVOID PoolPage; 2926 PPOOL_HEADER Entry; 2927 BOOLEAN ThisOne; 2928 PULONG Data; 2929 2930 if (Argc > 1) 2931 { 2932 /* Get address */ 2933 if (!KdbpGetHexNumber(Argv[1], &Address)) 2934 { 2935 KdbpPrint("Invalid parameter: %s\n", Argv[0]); 2936 return TRUE; 2937 } 2938 } 2939 2940 if (Argc > 2) 2941 { 2942 /* Get address */ 2943 if (!KdbpGetHexNumber(Argv[1], &Flags)) 2944 { 2945 KdbpPrint("Invalid parameter: %s\n", Argv[0]); 2946 return TRUE; 2947 } 2948 } 2949 2950 /* Check if we got an address */ 2951 if (Address != 0) 2952 { 2953 /* Get the base page */ 2954 PoolPage = PAGE_ALIGN(Address); 2955 } 2956 else 2957 { 2958 KdbpPrint("Heap is unimplemented\n"); 2959 return TRUE; 2960 } 2961 2962 /* No paging support! */ 2963 if (!MmIsAddressValid(PoolPage)) 2964 { 2965 KdbpPrint("Address not accessible!\n"); 2966 return TRUE; 2967 } 2968 2969 /* Get pool type */ 2970 if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd)) 2971 KdbpPrint("Allocation is from PagedPool region\n"); 2972 else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd)) 2973 KdbpPrint("Allocation is from NonPagedPool region\n"); 2974 else 2975 { 2976 KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address); 2977 return TRUE; 2978 } 2979 2980 /* Loop all entries of that page */ 2981 Entry = PoolPage; 2982 do 2983 { 2984 /* Check if the address is within that entry */ 2985 ThisOne = ((Address >= (ULONG_PTR)Entry) && 2986 (Address < (ULONG_PTR)(Entry + Entry->BlockSize))); 2987 2988 if (!(Flags & 1) || ThisOne) 2989 { 2990 /* Print the line */ 2991 KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n", 2992 ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize, 2993 (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "), 2994 (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag); 2995 } 2996 2997 if (Flags & 1) 2998 { 2999 Data = (PULONG)(Entry + 1); 3000 KdbpPrint(" %p %08lx %08lx %08lx %08lx\n" 3001 " %p %08lx %08lx %08lx %08lx\n", 3002 &Data[0], Data[0], Data[1], Data[2], Data[3], 3003 &Data[4], Data[4], Data[5], Data[6], Data[7]); 3004 } 3005 3006 /* Go to next entry */ 3007 Entry = POOL_BLOCK(Entry, Entry->BlockSize); 3008 } 3009 while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE)); 3010 3011 return TRUE; 3012 } 3013 3014 static 3015 VOID 3016 ExpKdbgExtPoolUsedGetTag(PCHAR Arg, PULONG Tag, PULONG Mask) 3017 { 3018 CHAR Tmp[4]; 3019 ULONG Len; 3020 USHORT i; 3021 3022 /* Get the tag */ 3023 Len = strlen(Arg); 3024 if (Len > 4) 3025 { 3026 Len = 4; 3027 } 3028 3029 /* Generate the mask to have wildcards support */ 3030 for (i = 0; i < Len; ++i) 3031 { 3032 Tmp[i] = Arg[i]; 3033 if (Tmp[i] != '?') 3034 { 3035 *Mask |= (0xFF << i * 8); 3036 } 3037 } 3038 3039 /* Get the tag in the ulong form */ 3040 *Tag = *((PULONG)Tmp); 3041 } 3042 3043 BOOLEAN 3044 ExpKdbgExtPoolUsed( 3045 ULONG Argc, 3046 PCHAR Argv[]) 3047 { 3048 ULONG Tag = 0; 3049 ULONG Mask = 0; 3050 ULONG Flags = 0; 3051 3052 if (Argc > 1) 3053 { 3054 /* If we have 2+ args, easy: flags then tag */ 3055 if (Argc > 2) 3056 { 3057 ExpKdbgExtPoolUsedGetTag(Argv[2], &Tag, &Mask); 3058 if (!KdbpGetHexNumber(Argv[1], &Flags)) 3059 { 3060 KdbpPrint("Invalid parameter: %s\n", Argv[0]); 3061 } 3062 } 3063 else 3064 { 3065 /* Otherwise, try to find out whether that's flags */ 3066 if (strlen(Argv[1]) == 1 || 3067 (strlen(Argv[1]) == 3 && Argv[1][0] == '0' && Argv[1][1] == 'x')) 3068 { 3069 /* Fallback: if reading flags failed, assume it's a tag */ 3070 if (!KdbpGetHexNumber(Argv[1], &Flags)) 3071 { 3072 ExpKdbgExtPoolUsedGetTag(Argv[1], &Tag, &Mask); 3073 } 3074 } 3075 /* Or tag */ 3076 else 3077 { 3078 ExpKdbgExtPoolUsedGetTag(Argv[1], &Tag, &Mask); 3079 } 3080 } 3081 } 3082 3083 /* Call the dumper */ 3084 MiDumpPoolConsumers(TRUE, Tag, Mask, Flags); 3085 3086 return TRUE; 3087 } 3088 3089 #endif // DBG && KDBG 3090 3091 /* EOF */ 3092