1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: BSD - See COPYING.ARM in the top level directory 4 * FILE: ntoskrnl/mm/ARM3/expool.c 5 * PURPOSE: ARM Memory Manager Executive Pool Manager 6 * PROGRAMMERS: ReactOS Portable Systems Group 7 */ 8 9 /* INCLUDES *******************************************************************/ 10 11 #include <ntoskrnl.h> 12 #define NDEBUG 13 #include <debug.h> 14 15 #define MODULE_INVOLVED_IN_ARM3 16 #include <mm/ARM3/miarm.h> 17 18 #undef ExAllocatePoolWithQuota 19 #undef ExAllocatePoolWithQuotaTag 20 21 /* GLOBALS ********************************************************************/ 22 23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1 24 25 /* 26 * This defines when we shrink or expand the table. 27 * 3 --> keep the number of used entries in the 33%-66% of the table capacity. 28 * 4 --> 25% - 75% 29 * etc. 30 */ 31 #define POOL_BIG_TABLE_USE_RATE 4 32 33 typedef struct _POOL_DPC_CONTEXT 34 { 35 PPOOL_TRACKER_TABLE PoolTrackTable; 36 SIZE_T PoolTrackTableSize; 37 PPOOL_TRACKER_TABLE PoolTrackTableExpansion; 38 SIZE_T PoolTrackTableSizeExpansion; 39 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT; 40 41 ULONG ExpNumberOfPagedPools; 42 POOL_DESCRIPTOR NonPagedPoolDescriptor; 43 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1]; 44 PPOOL_DESCRIPTOR PoolVector[2]; 45 PKGUARDED_MUTEX ExpPagedPoolMutex; 46 SIZE_T PoolTrackTableSize, PoolTrackTableMask; 47 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash; 48 ULONG ExpBigTableExpansionFailed; 49 PPOOL_TRACKER_TABLE PoolTrackTable; 50 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable; 51 KSPIN_LOCK ExpTaggedPoolLock; 52 ULONG PoolHitTag; 53 BOOLEAN ExStopBadTags; 54 KSPIN_LOCK ExpLargePoolTableLock; 55 ULONG ExpPoolBigEntriesInUse; 56 ULONG ExpPoolFlags; 57 ULONG ExPoolFailures; 58 ULONGLONG MiLastPoolDumpTime; 59 60 /* Pool block/header/list access macros */ 61 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER)) 62 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER)) 63 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE)) 64 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize) 65 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize)) 66 67 /* 68 * Pool list access debug macros, similar to Arthur's pfnlist.c work. 69 * Microsoft actually implements similar checks in the Windows Server 2003 SP1 70 * pool code, but only for checked builds. 71 * 72 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates 73 * that these checks are done even on retail builds, due to the increasing 74 * number of kernel-mode attacks which depend on dangling list pointers and other 75 * kinds of list-based attacks. 76 * 77 * For now, I will leave these checks on all the time, but later they are likely 78 * to be DBG-only, at least until there are enough kernel-mode security attacks 79 * against ReactOS to warrant the performance hit. 80 * 81 * For now, these are not made inline, so we can get good stack traces. 82 */ 83 PLIST_ENTRY 84 NTAPI 85 ExpDecodePoolLink(IN PLIST_ENTRY Link) 86 { 87 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1); 88 } 89 90 PLIST_ENTRY 91 NTAPI 92 ExpEncodePoolLink(IN PLIST_ENTRY Link) 93 { 94 return (PLIST_ENTRY)((ULONG_PTR)Link | 1); 95 } 96 97 VOID 98 NTAPI 99 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead) 100 { 101 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) || 102 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead)) 103 { 104 KeBugCheckEx(BAD_POOL_HEADER, 105 3, 106 (ULONG_PTR)ListHead, 107 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink), 108 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink)); 109 } 110 } 111 112 VOID 113 NTAPI 114 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead) 115 { 116 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead); 117 } 118 119 BOOLEAN 120 NTAPI 121 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead) 122 { 123 return (ExpDecodePoolLink(ListHead->Flink) == ListHead); 124 } 125 126 VOID 127 NTAPI 128 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry) 129 { 130 PLIST_ENTRY Blink, Flink; 131 Flink = ExpDecodePoolLink(Entry->Flink); 132 Blink = ExpDecodePoolLink(Entry->Blink); 133 Flink->Blink = ExpEncodePoolLink(Blink); 134 Blink->Flink = ExpEncodePoolLink(Flink); 135 } 136 137 PLIST_ENTRY 138 NTAPI 139 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead) 140 { 141 PLIST_ENTRY Entry, Flink; 142 Entry = ExpDecodePoolLink(ListHead->Flink); 143 Flink = ExpDecodePoolLink(Entry->Flink); 144 ListHead->Flink = ExpEncodePoolLink(Flink); 145 Flink->Blink = ExpEncodePoolLink(ListHead); 146 return Entry; 147 } 148 149 PLIST_ENTRY 150 NTAPI 151 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead) 152 { 153 PLIST_ENTRY Entry, Blink; 154 Entry = ExpDecodePoolLink(ListHead->Blink); 155 Blink = ExpDecodePoolLink(Entry->Blink); 156 ListHead->Blink = ExpEncodePoolLink(Blink); 157 Blink->Flink = ExpEncodePoolLink(ListHead); 158 return Entry; 159 } 160 161 VOID 162 NTAPI 163 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, 164 IN PLIST_ENTRY Entry) 165 { 166 PLIST_ENTRY Blink; 167 ExpCheckPoolLinks(ListHead); 168 Blink = ExpDecodePoolLink(ListHead->Blink); 169 Entry->Flink = ExpEncodePoolLink(ListHead); 170 Entry->Blink = ExpEncodePoolLink(Blink); 171 Blink->Flink = ExpEncodePoolLink(Entry); 172 ListHead->Blink = ExpEncodePoolLink(Entry); 173 ExpCheckPoolLinks(ListHead); 174 } 175 176 VOID 177 NTAPI 178 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, 179 IN PLIST_ENTRY Entry) 180 { 181 PLIST_ENTRY Flink; 182 ExpCheckPoolLinks(ListHead); 183 Flink = ExpDecodePoolLink(ListHead->Flink); 184 Entry->Flink = ExpEncodePoolLink(Flink); 185 Entry->Blink = ExpEncodePoolLink(ListHead); 186 Flink->Blink = ExpEncodePoolLink(Entry); 187 ListHead->Flink = ExpEncodePoolLink(Entry); 188 ExpCheckPoolLinks(ListHead); 189 } 190 191 VOID 192 NTAPI 193 ExpCheckPoolHeader(IN PPOOL_HEADER Entry) 194 { 195 PPOOL_HEADER PreviousEntry, NextEntry; 196 197 /* Is there a block before this one? */ 198 if (Entry->PreviousSize) 199 { 200 /* Get it */ 201 PreviousEntry = POOL_PREV_BLOCK(Entry); 202 203 /* The two blocks must be on the same page! */ 204 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry)) 205 { 206 /* Something is awry */ 207 KeBugCheckEx(BAD_POOL_HEADER, 208 6, 209 (ULONG_PTR)PreviousEntry, 210 __LINE__, 211 (ULONG_PTR)Entry); 212 } 213 214 /* This block should also indicate that it's as large as we think it is */ 215 if (PreviousEntry->BlockSize != Entry->PreviousSize) 216 { 217 /* Otherwise, someone corrupted one of the sizes */ 218 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n", 219 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag, 220 Entry->PreviousSize, (char *)&Entry->PoolTag); 221 KeBugCheckEx(BAD_POOL_HEADER, 222 5, 223 (ULONG_PTR)PreviousEntry, 224 __LINE__, 225 (ULONG_PTR)Entry); 226 } 227 } 228 else if (PAGE_ALIGN(Entry) != Entry) 229 { 230 /* If there's no block before us, we are the first block, so we should be on a page boundary */ 231 KeBugCheckEx(BAD_POOL_HEADER, 232 7, 233 0, 234 __LINE__, 235 (ULONG_PTR)Entry); 236 } 237 238 /* This block must have a size */ 239 if (!Entry->BlockSize) 240 { 241 /* Someone must've corrupted this field */ 242 if (Entry->PreviousSize) 243 { 244 PreviousEntry = POOL_PREV_BLOCK(Entry); 245 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n", 246 (char *)&PreviousEntry->PoolTag, 247 (char *)&Entry->PoolTag); 248 } 249 else 250 { 251 DPRINT1("Entry tag %.4s\n", 252 (char *)&Entry->PoolTag); 253 } 254 KeBugCheckEx(BAD_POOL_HEADER, 255 8, 256 0, 257 __LINE__, 258 (ULONG_PTR)Entry); 259 } 260 261 /* Okay, now get the next block */ 262 NextEntry = POOL_NEXT_BLOCK(Entry); 263 264 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */ 265 if (PAGE_ALIGN(NextEntry) != NextEntry) 266 { 267 /* The two blocks must be on the same page! */ 268 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry)) 269 { 270 /* Something is messed up */ 271 KeBugCheckEx(BAD_POOL_HEADER, 272 9, 273 (ULONG_PTR)NextEntry, 274 __LINE__, 275 (ULONG_PTR)Entry); 276 } 277 278 /* And this block should think we are as large as we truly are */ 279 if (NextEntry->PreviousSize != Entry->BlockSize) 280 { 281 /* Otherwise, someone corrupted the field */ 282 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n", 283 Entry->BlockSize, (char *)&Entry->PoolTag, 284 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag); 285 KeBugCheckEx(BAD_POOL_HEADER, 286 5, 287 (ULONG_PTR)NextEntry, 288 __LINE__, 289 (ULONG_PTR)Entry); 290 } 291 } 292 } 293 294 VOID 295 NTAPI 296 ExpCheckPoolAllocation( 297 PVOID P, 298 POOL_TYPE PoolType, 299 ULONG Tag) 300 { 301 PPOOL_HEADER Entry; 302 ULONG i; 303 KIRQL OldIrql; 304 POOL_TYPE RealPoolType; 305 306 /* Get the pool header */ 307 Entry = ((PPOOL_HEADER)P) - 1; 308 309 /* Check if this is a large allocation */ 310 if (PAGE_ALIGN(P) == P) 311 { 312 /* Lock the pool table */ 313 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 314 315 /* Find the pool tag */ 316 for (i = 0; i < PoolBigPageTableSize; i++) 317 { 318 /* Check if this is our allocation */ 319 if (PoolBigPageTable[i].Va == P) 320 { 321 /* Make sure the tag is ok */ 322 if (PoolBigPageTable[i].Key != Tag) 323 { 324 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag); 325 } 326 327 break; 328 } 329 } 330 331 /* Release the lock */ 332 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 333 334 if (i == PoolBigPageTableSize) 335 { 336 /* Did not find the allocation */ 337 //ASSERT(FALSE); 338 } 339 340 /* Get Pool type by address */ 341 RealPoolType = MmDeterminePoolType(P); 342 } 343 else 344 { 345 /* Verify the tag */ 346 if (Entry->PoolTag != Tag) 347 { 348 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n", 349 &Tag, &Entry->PoolTag, Entry->PoolTag); 350 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag); 351 } 352 353 /* Check the rest of the header */ 354 ExpCheckPoolHeader(Entry); 355 356 /* Get Pool type from entry */ 357 RealPoolType = (Entry->PoolType - 1); 358 } 359 360 /* Should we check the pool type? */ 361 if (PoolType != -1) 362 { 363 /* Verify the pool type */ 364 if (RealPoolType != PoolType) 365 { 366 DPRINT1("Wrong pool type! Expected %s, got %s\n", 367 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool", 368 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool"); 369 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag); 370 } 371 } 372 } 373 374 VOID 375 NTAPI 376 ExpCheckPoolBlocks(IN PVOID Block) 377 { 378 BOOLEAN FoundBlock = FALSE; 379 SIZE_T Size = 0; 380 PPOOL_HEADER Entry; 381 382 /* Get the first entry for this page, make sure it really is the first */ 383 Entry = PAGE_ALIGN(Block); 384 ASSERT(Entry->PreviousSize == 0); 385 386 /* Now scan each entry */ 387 while (TRUE) 388 { 389 /* When we actually found our block, remember this */ 390 if (Entry == Block) FoundBlock = TRUE; 391 392 /* Now validate this block header */ 393 ExpCheckPoolHeader(Entry); 394 395 /* And go to the next one, keeping track of our size */ 396 Size += Entry->BlockSize; 397 Entry = POOL_NEXT_BLOCK(Entry); 398 399 /* If we hit the last block, stop */ 400 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break; 401 402 /* If we hit the end of the page, stop */ 403 if (PAGE_ALIGN(Entry) == Entry) break; 404 } 405 406 /* We must've found our block, and we must have hit the end of the page */ 407 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock)) 408 { 409 /* Otherwise, the blocks are messed up */ 410 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry); 411 } 412 } 413 414 FORCEINLINE 415 VOID 416 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, 417 IN SIZE_T NumberOfBytes, 418 IN PVOID Entry) 419 { 420 // 421 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must 422 // be DISPATCH_LEVEL or lower for Non Paged Pool 423 // 424 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ? 425 (KeGetCurrentIrql() > APC_LEVEL) : 426 (KeGetCurrentIrql() > DISPATCH_LEVEL)) 427 { 428 // 429 // Take the system down 430 // 431 KeBugCheckEx(BAD_POOL_CALLER, 432 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID, 433 KeGetCurrentIrql(), 434 PoolType, 435 !Entry ? NumberOfBytes : (ULONG_PTR)Entry); 436 } 437 } 438 439 FORCEINLINE 440 ULONG 441 ExpComputeHashForTag(IN ULONG Tag, 442 IN SIZE_T BucketMask) 443 { 444 // 445 // Compute the hash by multiplying with a large prime number and then XORing 446 // with the HIDWORD of the result. 447 // 448 // Finally, AND with the bucket mask to generate a valid index/bucket into 449 // the table 450 // 451 ULONGLONG Result = (ULONGLONG)40543 * Tag; 452 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32)); 453 } 454 455 FORCEINLINE 456 ULONG 457 ExpComputePartialHashForAddress(IN PVOID BaseAddress) 458 { 459 ULONG Result; 460 // 461 // Compute the hash by converting the address into a page number, and then 462 // XORing each nibble with the next one. 463 // 464 // We do *NOT* AND with the bucket mask at this point because big table expansion 465 // might happen. Therefore, the final step of the hash must be performed 466 // while holding the expansion pushlock, and this is why we call this a 467 // "partial" hash only. 468 // 469 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT); 470 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result; 471 } 472 473 #if DBG 474 /* 475 * FORCEINLINE 476 * BOOLEAN 477 * ExpTagAllowPrint(CHAR Tag); 478 */ 479 #define ExpTagAllowPrint(Tag) \ 480 ((Tag) >= 0x20 /* Space */ && (Tag) <= 0x7E /* Tilde */) 481 482 #ifdef KDBG 483 #include <kdbg/kdb.h> 484 #endif 485 486 #ifdef KDBG 487 #define MiDumperPrint(dbg, fmt, ...) \ 488 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \ 489 else DPRINT1(fmt, ##__VA_ARGS__) 490 #else 491 #define MiDumperPrint(dbg, fmt, ...) \ 492 DPRINT1(fmt, ##__VA_ARGS__) 493 #endif 494 495 VOID 496 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags) 497 { 498 SIZE_T i; 499 BOOLEAN Verbose; 500 501 // 502 // Only print header if called from OOM situation 503 // 504 if (!CalledFromDbg) 505 { 506 DPRINT1("---------------------\n"); 507 DPRINT1("Out of memory dumper!\n"); 508 } 509 #ifdef KDBG 510 else 511 { 512 KdbpPrint("Pool Used:\n"); 513 } 514 #endif 515 516 // 517 // Remember whether we'll have to be verbose 518 // This is the only supported flag! 519 // 520 Verbose = BooleanFlagOn(Flags, 1); 521 522 // 523 // Print table header 524 // 525 if (Verbose) 526 { 527 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n"); 528 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n"); 529 } 530 else 531 { 532 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n"); 533 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n"); 534 } 535 536 // 537 // We'll extract allocations for all the tracked pools 538 // 539 for (i = 0; i < PoolTrackTableSize; ++i) 540 { 541 PPOOL_TRACKER_TABLE TableEntry; 542 543 TableEntry = &PoolTrackTable[i]; 544 545 // 546 // We only care about tags which have allocated memory 547 // 548 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0) 549 { 550 // 551 // If there's a tag, attempt to do a pretty print 552 // only if it matches the caller's tag, or if 553 // any tag is allowed 554 // For checking whether it matches caller's tag, 555 // use the mask to make sure not to mess with the wildcards 556 // 557 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE && 558 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask))) 559 { 560 CHAR Tag[4]; 561 562 // 563 // Extract each 'component' and check whether they are printable 564 // 565 Tag[0] = TableEntry->Key & 0xFF; 566 Tag[1] = TableEntry->Key >> 8 & 0xFF; 567 Tag[2] = TableEntry->Key >> 16 & 0xFF; 568 Tag[3] = TableEntry->Key >> 24 & 0xFF; 569 570 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3])) 571 { 572 // 573 // Print in direct order to make !poolused TAG usage easier 574 // 575 if (Verbose) 576 { 577 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3], 578 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 579 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 580 TableEntry->PagedAllocs, TableEntry->PagedFrees, 581 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 582 } 583 else 584 { 585 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3], 586 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 587 TableEntry->PagedAllocs, TableEntry->PagedBytes); 588 } 589 } 590 else 591 { 592 if (Verbose) 593 { 594 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key, 595 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 596 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 597 TableEntry->PagedAllocs, TableEntry->PagedFrees, 598 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 599 } 600 else 601 { 602 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key, 603 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 604 TableEntry->PagedAllocs, TableEntry->PagedBytes); 605 } 606 } 607 } 608 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask)) 609 { 610 if (Verbose) 611 { 612 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", 613 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 614 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 615 TableEntry->PagedAllocs, TableEntry->PagedFrees, 616 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 617 } 618 else 619 { 620 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", 621 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 622 TableEntry->PagedAllocs, TableEntry->PagedBytes); 623 } 624 } 625 } 626 } 627 628 if (!CalledFromDbg) 629 { 630 DPRINT1("---------------------\n"); 631 } 632 } 633 #endif 634 635 /* PRIVATE FUNCTIONS **********************************************************/ 636 637 CODE_SEG("INIT") 638 VOID 639 NTAPI 640 ExpSeedHotTags(VOID) 641 { 642 ULONG i, Key, Hash, Index; 643 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable; 644 ULONG TagList[] = 645 { 646 ' oI', 647 ' laH', 648 'PldM', 649 'LooP', 650 'tSbO', 651 ' prI', 652 'bdDN', 653 'LprI', 654 'pOoI', 655 ' ldM', 656 'eliF', 657 'aVMC', 658 'dSeS', 659 'CFtN', 660 'looP', 661 'rPCT', 662 'bNMC', 663 'dTeS', 664 'sFtN', 665 'TPCT', 666 'CPCT', 667 ' yeK', 668 'qSbO', 669 'mNoI', 670 'aEoI', 671 'cPCT', 672 'aFtN', 673 '0ftN', 674 'tceS', 675 'SprI', 676 'ekoT', 677 ' eS', 678 'lCbO', 679 'cScC', 680 'lFtN', 681 'cAeS', 682 'mfSF', 683 'kWcC', 684 'miSF', 685 'CdfA', 686 'EdfA', 687 'orSF', 688 'nftN', 689 'PRIU', 690 'rFpN', 691 'RFpN', 692 'aPeS', 693 'sUeS', 694 'FpcA', 695 'MpcA', 696 'cSeS', 697 'mNbO', 698 'sFpN', 699 'uLeS', 700 'DPcS', 701 'nevE', 702 'vrqR', 703 'ldaV', 704 ' pP', 705 'SdaV', 706 ' daV', 707 'LdaV', 708 'FdaV', 709 ' GIB', 710 }; 711 712 // 713 // Loop all 64 hot tags 714 // 715 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64); 716 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++) 717 { 718 // 719 // Get the current tag, and compute its hash in the tracker table 720 // 721 Key = TagList[i]; 722 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask); 723 724 // 725 // Loop all the hashes in this index/bucket 726 // 727 Index = Hash; 728 while (TRUE) 729 { 730 // 731 // Find an empty entry, and make sure this isn't the last hash that 732 // can fit. 733 // 734 // On checked builds, also make sure this is the first time we are 735 // seeding this tag. 736 // 737 ASSERT(TrackTable[Hash].Key != Key); 738 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1)) 739 { 740 // 741 // It has been seeded, move on to the next tag 742 // 743 TrackTable[Hash].Key = Key; 744 break; 745 } 746 747 // 748 // This entry was already taken, compute the next possible hash while 749 // making sure we're not back at our initial index. 750 // 751 ASSERT(TrackTable[Hash].Key != Key); 752 Hash = (Hash + 1) & PoolTrackTableMask; 753 if (Hash == Index) break; 754 } 755 } 756 } 757 758 VOID 759 NTAPI 760 ExpRemovePoolTracker(IN ULONG Key, 761 IN SIZE_T NumberOfBytes, 762 IN POOL_TYPE PoolType) 763 { 764 ULONG Hash, Index; 765 PPOOL_TRACKER_TABLE Table, TableEntry; 766 SIZE_T TableMask, TableSize; 767 768 // 769 // Remove the PROTECTED_POOL flag which is not part of the tag 770 // 771 Key &= ~PROTECTED_POOL; 772 773 // 774 // With WinDBG you can set a tag you want to break on when an allocation is 775 // attempted 776 // 777 if (Key == PoolHitTag) DbgBreakPoint(); 778 779 // 780 // Why the double indirection? Because normally this function is also used 781 // when doing session pool allocations, which has another set of tables, 782 // sizes, and masks that live in session pool. Now we don't support session 783 // pool so we only ever use the regular tables, but I'm keeping the code this 784 // way so that the day we DO support session pool, it won't require that 785 // many changes 786 // 787 Table = PoolTrackTable; 788 TableMask = PoolTrackTableMask; 789 TableSize = PoolTrackTableSize; 790 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize); 791 792 // 793 // Compute the hash for this key, and loop all the possible buckets 794 // 795 Hash = ExpComputeHashForTag(Key, TableMask); 796 Index = Hash; 797 while (TRUE) 798 { 799 // 800 // Have we found the entry for this tag? */ 801 // 802 TableEntry = &Table[Hash]; 803 if (TableEntry->Key == Key) 804 { 805 // 806 // Decrement the counters depending on if this was paged or nonpaged 807 // pool 808 // 809 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 810 { 811 InterlockedIncrement(&TableEntry->NonPagedFrees); 812 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, 813 -(SSIZE_T)NumberOfBytes); 814 return; 815 } 816 InterlockedIncrement(&TableEntry->PagedFrees); 817 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, 818 -(SSIZE_T)NumberOfBytes); 819 return; 820 } 821 822 // 823 // We should have only ended up with an empty entry if we've reached 824 // the last bucket 825 // 826 if (!TableEntry->Key) 827 { 828 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n", 829 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType); 830 ASSERT(Hash == TableMask); 831 } 832 833 // 834 // This path is hit when we don't have an entry, and the current bucket 835 // is full, so we simply try the next one 836 // 837 Hash = (Hash + 1) & TableMask; 838 if (Hash == Index) break; 839 } 840 841 // 842 // And finally this path is hit when all the buckets are full, and we need 843 // some expansion. This path is not yet supported in ReactOS and so we'll 844 // ignore the tag 845 // 846 DPRINT1("Out of pool tag space, ignoring...\n"); 847 } 848 849 VOID 850 NTAPI 851 ExpInsertPoolTracker(IN ULONG Key, 852 IN SIZE_T NumberOfBytes, 853 IN POOL_TYPE PoolType) 854 { 855 ULONG Hash, Index; 856 KIRQL OldIrql; 857 PPOOL_TRACKER_TABLE Table, TableEntry; 858 SIZE_T TableMask, TableSize; 859 860 // 861 // Remove the PROTECTED_POOL flag which is not part of the tag 862 // 863 Key &= ~PROTECTED_POOL; 864 865 // 866 // With WinDBG you can set a tag you want to break on when an allocation is 867 // attempted 868 // 869 if (Key == PoolHitTag) DbgBreakPoint(); 870 871 // 872 // There is also an internal flag you can set to break on malformed tags 873 // 874 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00); 875 876 // 877 // ASSERT on ReactOS features not yet supported 878 // 879 ASSERT(!(PoolType & SESSION_POOL_MASK)); 880 881 // 882 // Why the double indirection? Because normally this function is also used 883 // when doing session pool allocations, which has another set of tables, 884 // sizes, and masks that live in session pool. Now we don't support session 885 // pool so we only ever use the regular tables, but I'm keeping the code this 886 // way so that the day we DO support session pool, it won't require that 887 // many changes 888 // 889 Table = PoolTrackTable; 890 TableMask = PoolTrackTableMask; 891 TableSize = PoolTrackTableSize; 892 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize); 893 894 // 895 // Compute the hash for this key, and loop all the possible buckets 896 // 897 Hash = ExpComputeHashForTag(Key, TableMask); 898 Index = Hash; 899 while (TRUE) 900 { 901 // 902 // Do we already have an entry for this tag? */ 903 // 904 TableEntry = &Table[Hash]; 905 if (TableEntry->Key == Key) 906 { 907 // 908 // Increment the counters depending on if this was paged or nonpaged 909 // pool 910 // 911 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 912 { 913 InterlockedIncrement(&TableEntry->NonPagedAllocs); 914 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes); 915 return; 916 } 917 InterlockedIncrement(&TableEntry->PagedAllocs); 918 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes); 919 return; 920 } 921 922 // 923 // We don't have an entry yet, but we've found a free bucket for it 924 // 925 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1)) 926 { 927 // 928 // We need to hold the lock while creating a new entry, since other 929 // processors might be in this code path as well 930 // 931 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql); 932 if (!PoolTrackTable[Hash].Key) 933 { 934 // 935 // We've won the race, so now create this entry in the bucket 936 // 937 ASSERT(Table[Hash].Key == 0); 938 PoolTrackTable[Hash].Key = Key; 939 TableEntry->Key = Key; 940 } 941 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 942 943 // 944 // Now we force the loop to run again, and we should now end up in 945 // the code path above which does the interlocked increments... 946 // 947 continue; 948 } 949 950 // 951 // This path is hit when we don't have an entry, and the current bucket 952 // is full, so we simply try the next one 953 // 954 Hash = (Hash + 1) & TableMask; 955 if (Hash == Index) break; 956 } 957 958 // 959 // And finally this path is hit when all the buckets are full, and we need 960 // some expansion. This path is not yet supported in ReactOS and so we'll 961 // ignore the tag 962 // 963 DPRINT1("Out of pool tag space, ignoring...\n"); 964 } 965 966 CODE_SEG("INIT") 967 VOID 968 NTAPI 969 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, 970 IN POOL_TYPE PoolType, 971 IN ULONG PoolIndex, 972 IN ULONG Threshold, 973 IN PVOID PoolLock) 974 { 975 PLIST_ENTRY NextEntry, LastEntry; 976 977 // 978 // Setup the descriptor based on the caller's request 979 // 980 PoolDescriptor->PoolType = PoolType; 981 PoolDescriptor->PoolIndex = PoolIndex; 982 PoolDescriptor->Threshold = Threshold; 983 PoolDescriptor->LockAddress = PoolLock; 984 985 // 986 // Initialize accounting data 987 // 988 PoolDescriptor->RunningAllocs = 0; 989 PoolDescriptor->RunningDeAllocs = 0; 990 PoolDescriptor->TotalPages = 0; 991 PoolDescriptor->TotalBytes = 0; 992 PoolDescriptor->TotalBigPages = 0; 993 994 // 995 // Nothing pending for now 996 // 997 PoolDescriptor->PendingFrees = NULL; 998 PoolDescriptor->PendingFreeDepth = 0; 999 1000 // 1001 // Loop all the descriptor's allocation lists and initialize them 1002 // 1003 NextEntry = PoolDescriptor->ListHeads; 1004 LastEntry = NextEntry + POOL_LISTS_PER_PAGE; 1005 while (NextEntry < LastEntry) 1006 { 1007 ExpInitializePoolListHead(NextEntry); 1008 NextEntry++; 1009 } 1010 1011 // 1012 // Note that ReactOS does not support Session Pool Yet 1013 // 1014 ASSERT(PoolType != PagedPoolSession); 1015 } 1016 1017 CODE_SEG("INIT") 1018 VOID 1019 NTAPI 1020 InitializePool(IN POOL_TYPE PoolType, 1021 IN ULONG Threshold) 1022 { 1023 PPOOL_DESCRIPTOR Descriptor; 1024 SIZE_T TableSize; 1025 ULONG i; 1026 1027 // 1028 // Check what kind of pool this is 1029 // 1030 if (PoolType == NonPagedPool) 1031 { 1032 // 1033 // Compute the track table size and convert it from a power of two to an 1034 // actual byte size 1035 // 1036 // NOTE: On checked builds, we'll assert if the registry table size was 1037 // invalid, while on retail builds we'll just break out of the loop at 1038 // that point. 1039 // 1040 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8); 1041 for (i = 0; i < 32; i++) 1042 { 1043 if (TableSize & 1) 1044 { 1045 ASSERT((TableSize & ~1) == 0); 1046 if (!(TableSize & ~1)) break; 1047 } 1048 TableSize >>= 1; 1049 } 1050 1051 // 1052 // If we hit bit 32, than no size was defined in the registry, so 1053 // we'll use the default size of 2048 entries. 1054 // 1055 // Otherwise, use the size from the registry, as long as it's not 1056 // smaller than 64 entries. 1057 // 1058 if (i == 32) 1059 { 1060 PoolTrackTableSize = 2048; 1061 } 1062 else 1063 { 1064 PoolTrackTableSize = max(1 << i, 64); 1065 } 1066 1067 // 1068 // Loop trying with the biggest specified size first, and cut it down 1069 // by a power of two each iteration in case not enough memory exist 1070 // 1071 while (TRUE) 1072 { 1073 // 1074 // Do not allow overflow 1075 // 1076 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE))) 1077 { 1078 PoolTrackTableSize >>= 1; 1079 continue; 1080 } 1081 1082 // 1083 // Allocate the tracker table and exit the loop if this worked 1084 // 1085 PoolTrackTable = MiAllocatePoolPages(NonPagedPool, 1086 (PoolTrackTableSize + 1) * 1087 sizeof(POOL_TRACKER_TABLE)); 1088 if (PoolTrackTable) break; 1089 1090 // 1091 // Otherwise, as long as we're not down to the last bit, keep 1092 // iterating 1093 // 1094 if (PoolTrackTableSize == 1) 1095 { 1096 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1097 TableSize, 1098 0xFFFFFFFF, 1099 0xFFFFFFFF, 1100 0xFFFFFFFF); 1101 } 1102 PoolTrackTableSize >>= 1; 1103 } 1104 1105 // 1106 // Add one entry, compute the hash, and zero the table 1107 // 1108 PoolTrackTableSize++; 1109 PoolTrackTableMask = PoolTrackTableSize - 2; 1110 1111 RtlZeroMemory(PoolTrackTable, 1112 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1113 1114 // 1115 // Finally, add the most used tags to speed up those allocations 1116 // 1117 ExpSeedHotTags(); 1118 1119 // 1120 // We now do the exact same thing with the tracker table for big pages 1121 // 1122 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8); 1123 for (i = 0; i < 32; i++) 1124 { 1125 if (TableSize & 1) 1126 { 1127 ASSERT((TableSize & ~1) == 0); 1128 if (!(TableSize & ~1)) break; 1129 } 1130 TableSize >>= 1; 1131 } 1132 1133 // 1134 // For big pages, the default tracker table is 4096 entries, while the 1135 // minimum is still 64 1136 // 1137 if (i == 32) 1138 { 1139 PoolBigPageTableSize = 4096; 1140 } 1141 else 1142 { 1143 PoolBigPageTableSize = max(1 << i, 64); 1144 } 1145 1146 // 1147 // Again, run the exact same loop we ran earlier, but this time for the 1148 // big pool tracker instead 1149 // 1150 while (TRUE) 1151 { 1152 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES))) 1153 { 1154 PoolBigPageTableSize >>= 1; 1155 continue; 1156 } 1157 1158 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool, 1159 PoolBigPageTableSize * 1160 sizeof(POOL_TRACKER_BIG_PAGES)); 1161 if (PoolBigPageTable) break; 1162 1163 if (PoolBigPageTableSize == 1) 1164 { 1165 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1166 TableSize, 1167 0xFFFFFFFF, 1168 0xFFFFFFFF, 1169 0xFFFFFFFF); 1170 } 1171 1172 PoolBigPageTableSize >>= 1; 1173 } 1174 1175 // 1176 // An extra entry is not needed for for the big pool tracker, so just 1177 // compute the hash and zero it 1178 // 1179 PoolBigPageTableHash = PoolBigPageTableSize - 1; 1180 RtlZeroMemory(PoolBigPageTable, 1181 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)); 1182 for (i = 0; i < PoolBigPageTableSize; i++) 1183 { 1184 PoolBigPageTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE; 1185 } 1186 1187 // 1188 // During development, print this out so we can see what's happening 1189 // 1190 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n", 1191 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1192 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n", 1193 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)); 1194 1195 // 1196 // Insert the generic tracker for all of big pool 1197 // 1198 ExpInsertPoolTracker('looP', 1199 ROUND_TO_PAGES(PoolBigPageTableSize * 1200 sizeof(POOL_TRACKER_BIG_PAGES)), 1201 NonPagedPool); 1202 1203 // 1204 // No support for NUMA systems at this time 1205 // 1206 ASSERT(KeNumberNodes == 1); 1207 1208 // 1209 // Initialize the tag spinlock 1210 // 1211 KeInitializeSpinLock(&ExpTaggedPoolLock); 1212 1213 // 1214 // Initialize the nonpaged pool descriptor 1215 // 1216 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor; 1217 ExInitializePoolDescriptor(PoolVector[NonPagedPool], 1218 NonPagedPool, 1219 0, 1220 Threshold, 1221 NULL); 1222 } 1223 else 1224 { 1225 // 1226 // No support for NUMA systems at this time 1227 // 1228 ASSERT(KeNumberNodes == 1); 1229 1230 // 1231 // Allocate the pool descriptor 1232 // 1233 Descriptor = ExAllocatePoolWithTag(NonPagedPool, 1234 sizeof(KGUARDED_MUTEX) + 1235 sizeof(POOL_DESCRIPTOR), 1236 'looP'); 1237 if (!Descriptor) 1238 { 1239 // 1240 // This is really bad... 1241 // 1242 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1243 0, 1244 -1, 1245 -1, 1246 -1); 1247 } 1248 1249 // 1250 // Setup the vector and guarded mutex for paged pool 1251 // 1252 PoolVector[PagedPool] = Descriptor; 1253 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1); 1254 ExpPagedPoolDescriptor[0] = Descriptor; 1255 KeInitializeGuardedMutex(ExpPagedPoolMutex); 1256 ExInitializePoolDescriptor(Descriptor, 1257 PagedPool, 1258 0, 1259 Threshold, 1260 ExpPagedPoolMutex); 1261 1262 // 1263 // Insert the generic tracker for all of nonpaged pool 1264 // 1265 ExpInsertPoolTracker('looP', 1266 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)), 1267 NonPagedPool); 1268 } 1269 } 1270 1271 FORCEINLINE 1272 KIRQL 1273 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor) 1274 { 1275 // 1276 // Check if this is nonpaged pool 1277 // 1278 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 1279 { 1280 // 1281 // Use the queued spin lock 1282 // 1283 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock); 1284 } 1285 else 1286 { 1287 // 1288 // Use the guarded mutex 1289 // 1290 KeAcquireGuardedMutex(Descriptor->LockAddress); 1291 return APC_LEVEL; 1292 } 1293 } 1294 1295 FORCEINLINE 1296 VOID 1297 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, 1298 IN KIRQL OldIrql) 1299 { 1300 // 1301 // Check if this is nonpaged pool 1302 // 1303 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 1304 { 1305 // 1306 // Use the queued spin lock 1307 // 1308 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql); 1309 } 1310 else 1311 { 1312 // 1313 // Use the guarded mutex 1314 // 1315 KeReleaseGuardedMutex(Descriptor->LockAddress); 1316 } 1317 } 1318 1319 VOID 1320 NTAPI 1321 ExpGetPoolTagInfoTarget(IN PKDPC Dpc, 1322 IN PVOID DeferredContext, 1323 IN PVOID SystemArgument1, 1324 IN PVOID SystemArgument2) 1325 { 1326 PPOOL_DPC_CONTEXT Context = DeferredContext; 1327 UNREFERENCED_PARAMETER(Dpc); 1328 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 1329 1330 // 1331 // Make sure we win the race, and if we did, copy the data atomically 1332 // 1333 if (KeSignalCallDpcSynchronize(SystemArgument2)) 1334 { 1335 RtlCopyMemory(Context->PoolTrackTable, 1336 PoolTrackTable, 1337 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1338 1339 // 1340 // This is here because ReactOS does not yet support expansion 1341 // 1342 ASSERT(Context->PoolTrackTableSizeExpansion == 0); 1343 } 1344 1345 // 1346 // Regardless of whether we won or not, we must now synchronize and then 1347 // decrement the barrier since this is one more processor that has completed 1348 // the callback. 1349 // 1350 KeSignalCallDpcSynchronize(SystemArgument2); 1351 KeSignalCallDpcDone(SystemArgument1); 1352 } 1353 1354 NTSTATUS 1355 NTAPI 1356 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, 1357 IN ULONG SystemInformationLength, 1358 IN OUT PULONG ReturnLength OPTIONAL) 1359 { 1360 ULONG TableSize, CurrentLength; 1361 ULONG EntryCount; 1362 NTSTATUS Status = STATUS_SUCCESS; 1363 PSYSTEM_POOLTAG TagEntry; 1364 PPOOL_TRACKER_TABLE Buffer, TrackerEntry; 1365 POOL_DPC_CONTEXT Context; 1366 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); 1367 1368 // 1369 // Keep track of how much data the caller's buffer must hold 1370 // 1371 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo); 1372 1373 // 1374 // Initialize the caller's buffer 1375 // 1376 TagEntry = &SystemInformation->TagInfo[0]; 1377 SystemInformation->Count = 0; 1378 1379 // 1380 // Capture the number of entries, and the total size needed to make a copy 1381 // of the table 1382 // 1383 EntryCount = (ULONG)PoolTrackTableSize; 1384 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE); 1385 1386 // 1387 // Allocate the "Generic DPC" temporary buffer 1388 // 1389 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI'); 1390 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES; 1391 1392 // 1393 // Do a "Generic DPC" to atomically retrieve the tag and allocation data 1394 // 1395 Context.PoolTrackTable = Buffer; 1396 Context.PoolTrackTableSize = PoolTrackTableSize; 1397 Context.PoolTrackTableExpansion = NULL; 1398 Context.PoolTrackTableSizeExpansion = 0; 1399 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context); 1400 1401 // 1402 // Now parse the results 1403 // 1404 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++) 1405 { 1406 // 1407 // If the entry is empty, skip it 1408 // 1409 if (!TrackerEntry->Key) continue; 1410 1411 // 1412 // Otherwise, add one more entry to the caller's buffer, and ensure that 1413 // enough space has been allocated in it 1414 // 1415 SystemInformation->Count++; 1416 CurrentLength += sizeof(*TagEntry); 1417 if (SystemInformationLength < CurrentLength) 1418 { 1419 // 1420 // The caller's buffer is too small, so set a failure code. The 1421 // caller will know the count, as well as how much space is needed. 1422 // 1423 // We do NOT break out of the loop, because we want to keep incrementing 1424 // the Count as well as CurrentLength so that the caller can know the 1425 // final numbers 1426 // 1427 Status = STATUS_INFO_LENGTH_MISMATCH; 1428 } 1429 else 1430 { 1431 // 1432 // Small sanity check that our accounting is working correctly 1433 // 1434 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees); 1435 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees); 1436 1437 // 1438 // Return the data into the caller's buffer 1439 // 1440 TagEntry->TagUlong = TrackerEntry->Key; 1441 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs; 1442 TagEntry->PagedFrees = TrackerEntry->PagedFrees; 1443 TagEntry->PagedUsed = TrackerEntry->PagedBytes; 1444 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs; 1445 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees; 1446 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes; 1447 TagEntry++; 1448 } 1449 } 1450 1451 // 1452 // Free the "Generic DPC" temporary buffer, return the buffer length and status 1453 // 1454 ExFreePoolWithTag(Buffer, 'ofnI'); 1455 if (ReturnLength) *ReturnLength = CurrentLength; 1456 return Status; 1457 } 1458 1459 _IRQL_requires_(DISPATCH_LEVEL) 1460 static 1461 BOOLEAN 1462 ExpReallocateBigPageTable( 1463 _In_ _IRQL_restores_ KIRQL OldIrql, 1464 _In_ BOOLEAN Shrink) 1465 { 1466 SIZE_T OldSize = PoolBigPageTableSize; 1467 SIZE_T NewSize, NewSizeInBytes; 1468 PPOOL_TRACKER_BIG_PAGES NewTable; 1469 PPOOL_TRACKER_BIG_PAGES OldTable; 1470 ULONG i; 1471 ULONG PagesFreed; 1472 ULONG Hash; 1473 ULONG HashMask; 1474 1475 /* Must be holding ExpLargePoolTableLock */ 1476 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 1477 1478 /* Make sure we don't overflow */ 1479 if (Shrink) 1480 { 1481 NewSize = OldSize / 2; 1482 1483 /* Make sure we don't shrink too much. */ 1484 ASSERT(NewSize >= ExpPoolBigEntriesInUse); 1485 1486 NewSize = ALIGN_UP_BY(NewSize, PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES)); 1487 ASSERT(NewSize <= OldSize); 1488 1489 /* If there is only one page left, then keep it around. Not a failure either. */ 1490 if (NewSize == OldSize) 1491 { 1492 ASSERT(NewSize == (PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES))); 1493 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1494 return TRUE; 1495 } 1496 } 1497 else 1498 { 1499 if (!NT_SUCCESS(RtlSIZETMult(2, OldSize, &NewSize))) 1500 { 1501 DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize); 1502 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1503 return FALSE; 1504 } 1505 1506 /* Make sure we don't stupidly waste pages */ 1507 NewSize = ALIGN_DOWN_BY(NewSize, PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES)); 1508 ASSERT(NewSize > OldSize); 1509 } 1510 1511 if (!NT_SUCCESS(RtlSIZETMult(sizeof(POOL_TRACKER_BIG_PAGES), NewSize, &NewSizeInBytes))) 1512 { 1513 DPRINT1("Overflow while calculating big page table size. Size=%lu\n", OldSize); 1514 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1515 return FALSE; 1516 } 1517 1518 NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes); 1519 if (NewTable == NULL) 1520 { 1521 DPRINT("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes); 1522 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1523 return FALSE; 1524 } 1525 1526 DPRINT("%s big pool tracker table to %lu entries\n", Shrink ? "Shrinking" : "Expanding", NewSize); 1527 1528 /* Initialize the new table */ 1529 RtlZeroMemory(NewTable, NewSizeInBytes); 1530 for (i = 0; i < NewSize; i++) 1531 { 1532 NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE; 1533 } 1534 1535 /* Copy over all items */ 1536 OldTable = PoolBigPageTable; 1537 HashMask = NewSize - 1; 1538 for (i = 0; i < OldSize; i++) 1539 { 1540 /* Skip over empty items */ 1541 if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE) 1542 { 1543 continue; 1544 } 1545 1546 /* Recalculate the hash due to the new table size */ 1547 Hash = ExpComputePartialHashForAddress(OldTable[i].Va) % HashMask; 1548 1549 /* Find the location in the new table */ 1550 while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE)) 1551 { 1552 if (++Hash == NewSize) 1553 Hash = 0; 1554 } 1555 1556 /* We must have space */ 1557 ASSERT((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE); 1558 1559 /* Finally, copy the item */ 1560 NewTable[Hash] = OldTable[i]; 1561 } 1562 1563 /* Activate the new table */ 1564 PoolBigPageTable = NewTable; 1565 PoolBigPageTableSize = NewSize; 1566 PoolBigPageTableHash = PoolBigPageTableSize - 1; 1567 1568 /* Release the lock, we're done changing global state */ 1569 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1570 1571 /* Free the old table and update our tracker */ 1572 PagesFreed = MiFreePoolPages(OldTable); 1573 ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0); 1574 ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0); 1575 1576 return TRUE; 1577 } 1578 1579 BOOLEAN 1580 NTAPI 1581 ExpAddTagForBigPages(IN PVOID Va, 1582 IN ULONG Key, 1583 IN ULONG NumberOfPages, 1584 IN POOL_TYPE PoolType) 1585 { 1586 ULONG Hash, i = 0; 1587 PVOID OldVa; 1588 KIRQL OldIrql; 1589 SIZE_T TableSize; 1590 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart; 1591 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0); 1592 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1593 1594 // 1595 // As the table is expandable, these values must only be read after acquiring 1596 // the lock to avoid a teared access during an expansion 1597 // NOTE: Windows uses a special reader/writer SpinLock to improve 1598 // performance in the common case (add/remove a tracker entry) 1599 // 1600 Retry: 1601 Hash = ExpComputePartialHashForAddress(Va); 1602 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 1603 Hash &= PoolBigPageTableHash; 1604 TableSize = PoolBigPageTableSize; 1605 1606 // 1607 // We loop from the current hash bucket to the end of the table, and then 1608 // rollover to hash bucket 0 and keep going from there. If we return back 1609 // to the beginning, then we attempt expansion at the bottom of the loop 1610 // 1611 EntryStart = Entry = &PoolBigPageTable[Hash]; 1612 EntryEnd = &PoolBigPageTable[TableSize]; 1613 do 1614 { 1615 // 1616 // Make sure that this is a free entry and attempt to atomically make the 1617 // entry busy now 1618 // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock 1619 // 1620 OldVa = Entry->Va; 1621 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) && 1622 (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))) 1623 { 1624 // 1625 // We now own this entry, write down the size and the pool tag 1626 // 1627 Entry->Key = Key; 1628 Entry->NumberOfPages = NumberOfPages; 1629 1630 // 1631 // Add one more entry to the count, and see if we're getting within 1632 // 75% of the table size, at which point we'll do an expansion now 1633 // to avoid blocking too hard later on. 1634 // 1635 // Note that we only do this if it's also been the 16th time that we 1636 // keep losing the race or that we are not finding a free entry anymore, 1637 // which implies a massive number of concurrent big pool allocations. 1638 // 1639 ExpPoolBigEntriesInUse++; 1640 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize * (POOL_BIG_TABLE_USE_RATE - 1) / POOL_BIG_TABLE_USE_RATE))) 1641 { 1642 DPRINT("Attempting expansion since we now have %lu entries\n", 1643 ExpPoolBigEntriesInUse); 1644 ASSERT(TableSize == PoolBigPageTableSize); 1645 ExpReallocateBigPageTable(OldIrql, FALSE); 1646 return TRUE; 1647 } 1648 1649 // 1650 // We have our entry, return 1651 // 1652 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1653 return TRUE; 1654 } 1655 1656 // 1657 // We don't have our entry yet, so keep trying, making the entry list 1658 // circular if we reach the last entry. We'll eventually break out of 1659 // the loop once we've rolled over and returned back to our original 1660 // hash bucket 1661 // 1662 i++; 1663 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0]; 1664 } while (Entry != EntryStart); 1665 1666 // 1667 // This means there's no free hash buckets whatsoever, so we now have 1668 // to attempt expanding the table 1669 // 1670 ASSERT(TableSize == PoolBigPageTableSize); 1671 if (ExpReallocateBigPageTable(OldIrql, FALSE)) 1672 { 1673 goto Retry; 1674 } 1675 ExpBigTableExpansionFailed++; 1676 DPRINT1("Big pool table expansion failed\n"); 1677 return FALSE; 1678 } 1679 1680 ULONG 1681 NTAPI 1682 ExpFindAndRemoveTagBigPages(IN PVOID Va, 1683 OUT PULONG_PTR BigPages, 1684 IN POOL_TYPE PoolType) 1685 { 1686 BOOLEAN FirstTry = TRUE; 1687 SIZE_T TableSize; 1688 KIRQL OldIrql; 1689 ULONG PoolTag, Hash; 1690 PPOOL_TRACKER_BIG_PAGES Entry; 1691 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0); 1692 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1693 1694 // 1695 // As the table is expandable, these values must only be read after acquiring 1696 // the lock to avoid a teared access during an expansion 1697 // 1698 Hash = ExpComputePartialHashForAddress(Va); 1699 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 1700 Hash &= PoolBigPageTableHash; 1701 TableSize = PoolBigPageTableSize; 1702 1703 // 1704 // Loop while trying to find this big page allocation 1705 // 1706 while (PoolBigPageTable[Hash].Va != Va) 1707 { 1708 // 1709 // Increment the size until we go past the end of the table 1710 // 1711 if (++Hash >= TableSize) 1712 { 1713 // 1714 // Is this the second time we've tried? 1715 // 1716 if (!FirstTry) 1717 { 1718 // 1719 // This means it was never inserted into the pool table and it 1720 // received the special "BIG" tag -- return that and return 0 1721 // so that the code can ask Mm for the page count instead 1722 // 1723 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1724 *BigPages = 0; 1725 return ' GIB'; 1726 } 1727 1728 // 1729 // The first time this happens, reset the hash index and try again 1730 // 1731 Hash = 0; 1732 FirstTry = FALSE; 1733 } 1734 } 1735 1736 // 1737 // Now capture all the information we need from the entry, since after we 1738 // release the lock, the data can change 1739 // 1740 Entry = &PoolBigPageTable[Hash]; 1741 *BigPages = Entry->NumberOfPages; 1742 PoolTag = Entry->Key; 1743 1744 // 1745 // Set the free bit, and decrement the number of allocations. Finally, release 1746 // the lock and return the tag that was located 1747 // 1748 Entry->Va = (PVOID)((ULONG_PTR)Entry->Va | POOL_BIG_TABLE_ENTRY_FREE); 1749 1750 ExpPoolBigEntriesInUse--; 1751 1752 /* If reaching 12.5% of the size (or whatever integer rounding gets us to), 1753 * halve the allocation size, which will get us to 25% of space used. */ 1754 if (ExpPoolBigEntriesInUse < (PoolBigPageTableSize / (POOL_BIG_TABLE_USE_RATE * 2))) 1755 { 1756 /* Shrink the table. */ 1757 ExpReallocateBigPageTable(OldIrql, TRUE); 1758 } 1759 else 1760 { 1761 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1762 } 1763 return PoolTag; 1764 } 1765 1766 VOID 1767 NTAPI 1768 ExQueryPoolUsage(OUT PULONG PagedPoolPages, 1769 OUT PULONG NonPagedPoolPages, 1770 OUT PULONG PagedPoolAllocs, 1771 OUT PULONG PagedPoolFrees, 1772 OUT PULONG PagedPoolLookasideHits, 1773 OUT PULONG NonPagedPoolAllocs, 1774 OUT PULONG NonPagedPoolFrees, 1775 OUT PULONG NonPagedPoolLookasideHits) 1776 { 1777 ULONG i; 1778 PPOOL_DESCRIPTOR PoolDesc; 1779 1780 // 1781 // Assume all failures 1782 // 1783 *PagedPoolPages = 0; 1784 *PagedPoolAllocs = 0; 1785 *PagedPoolFrees = 0; 1786 1787 // 1788 // Tally up the totals for all the apged pool 1789 // 1790 for (i = 0; i < ExpNumberOfPagedPools + 1; i++) 1791 { 1792 PoolDesc = ExpPagedPoolDescriptor[i]; 1793 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1794 *PagedPoolAllocs += PoolDesc->RunningAllocs; 1795 *PagedPoolFrees += PoolDesc->RunningDeAllocs; 1796 } 1797 1798 // 1799 // The first non-paged pool has a hardcoded well-known descriptor name 1800 // 1801 PoolDesc = &NonPagedPoolDescriptor; 1802 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1803 *NonPagedPoolAllocs = PoolDesc->RunningAllocs; 1804 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs; 1805 1806 // 1807 // If the system has more than one non-paged pool, copy the other descriptor 1808 // totals as well 1809 // 1810 #if 0 1811 if (ExpNumberOfNonPagedPools > 1) 1812 { 1813 for (i = 0; i < ExpNumberOfNonPagedPools; i++) 1814 { 1815 PoolDesc = ExpNonPagedPoolDescriptor[i]; 1816 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1817 *NonPagedPoolAllocs += PoolDesc->RunningAllocs; 1818 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs; 1819 } 1820 } 1821 #endif 1822 1823 // 1824 // Get the amount of hits in the system lookaside lists 1825 // 1826 if (!IsListEmpty(&ExPoolLookasideListHead)) 1827 { 1828 PLIST_ENTRY ListEntry; 1829 1830 for (ListEntry = ExPoolLookasideListHead.Flink; 1831 ListEntry != &ExPoolLookasideListHead; 1832 ListEntry = ListEntry->Flink) 1833 { 1834 PGENERAL_LOOKASIDE Lookaside; 1835 1836 Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry); 1837 1838 if (Lookaside->Type == NonPagedPool) 1839 { 1840 *NonPagedPoolLookasideHits += Lookaside->AllocateHits; 1841 } 1842 else 1843 { 1844 *PagedPoolLookasideHits += Lookaside->AllocateHits; 1845 } 1846 } 1847 } 1848 } 1849 1850 VOID 1851 NTAPI 1852 ExReturnPoolQuota(IN PVOID P) 1853 { 1854 PPOOL_HEADER Entry; 1855 POOL_TYPE PoolType; 1856 USHORT BlockSize; 1857 PEPROCESS Process; 1858 1859 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) && 1860 (MmIsSpecialPoolAddress(P))) 1861 { 1862 return; 1863 } 1864 1865 Entry = P; 1866 Entry--; 1867 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0); 1868 1869 PoolType = Entry->PoolType - 1; 1870 BlockSize = Entry->BlockSize; 1871 1872 if (PoolType & QUOTA_POOL_MASK) 1873 { 1874 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1]; 1875 ASSERT(Process != NULL); 1876 if (Process) 1877 { 1878 if (Process->Pcb.Header.Type != ProcessObject) 1879 { 1880 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n", 1881 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize); 1882 KeBugCheckEx(BAD_POOL_CALLER, 1883 POOL_BILLED_PROCESS_INVALID, 1884 (ULONG_PTR)P, 1885 Entry->PoolTag, 1886 (ULONG_PTR)Process); 1887 } 1888 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL; 1889 PsReturnPoolQuota(Process, 1890 PoolType & BASE_POOL_TYPE_MASK, 1891 BlockSize * POOL_BLOCK_SIZE); 1892 ObDereferenceObject(Process); 1893 } 1894 } 1895 } 1896 1897 /* PUBLIC FUNCTIONS ***********************************************************/ 1898 1899 /* 1900 * @implemented 1901 */ 1902 PVOID 1903 NTAPI 1904 ExAllocatePoolWithTag(IN POOL_TYPE PoolType, 1905 IN SIZE_T NumberOfBytes, 1906 IN ULONG Tag) 1907 { 1908 PPOOL_DESCRIPTOR PoolDesc; 1909 PLIST_ENTRY ListHead; 1910 PPOOL_HEADER Entry, NextEntry, FragmentEntry; 1911 KIRQL OldIrql; 1912 USHORT BlockSize, i; 1913 ULONG OriginalType; 1914 PKPRCB Prcb = KeGetCurrentPrcb(); 1915 PGENERAL_LOOKASIDE LookasideList; 1916 1917 // 1918 // Some sanity checks 1919 // 1920 ASSERT(Tag != 0); 1921 ASSERT(Tag != ' GIB'); 1922 ASSERT(NumberOfBytes != 0); 1923 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL); 1924 1925 // 1926 // Not supported in ReactOS 1927 // 1928 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1929 1930 // 1931 // Check if verifier or special pool is enabled 1932 // 1933 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL)) 1934 { 1935 // 1936 // For verifier, we should call the verification routine 1937 // 1938 if (ExpPoolFlags & POOL_FLAG_VERIFIER) 1939 { 1940 DPRINT1("Driver Verifier is not yet supported\n"); 1941 } 1942 1943 // 1944 // For special pool, we check if this is a suitable allocation and do 1945 // the special allocation if needed 1946 // 1947 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) 1948 { 1949 // 1950 // Check if this is a special pool allocation 1951 // 1952 if (MmUseSpecialPool(NumberOfBytes, Tag)) 1953 { 1954 // 1955 // Try to allocate using special pool 1956 // 1957 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2); 1958 if (Entry) return Entry; 1959 } 1960 } 1961 } 1962 1963 // 1964 // Get the pool type and its corresponding vector for this request 1965 // 1966 OriginalType = PoolType; 1967 PoolType = PoolType & BASE_POOL_TYPE_MASK; 1968 PoolDesc = PoolVector[PoolType]; 1969 ASSERT(PoolDesc != NULL); 1970 1971 // 1972 // Check if this is a big page allocation 1973 // 1974 if (NumberOfBytes > POOL_MAX_ALLOC) 1975 { 1976 // 1977 // Allocate pages for it 1978 // 1979 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes); 1980 if (!Entry) 1981 { 1982 #if DBG 1983 // 1984 // Out of memory, display current consumption 1985 // Let's consider that if the caller wanted more 1986 // than a hundred pages, that's a bogus caller 1987 // and we are not out of memory. Dump at most 1988 // once a second to avoid spamming the log. 1989 // 1990 if (NumberOfBytes < 100 * PAGE_SIZE && 1991 KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000) 1992 { 1993 MiDumpPoolConsumers(FALSE, 0, 0, 0); 1994 MiLastPoolDumpTime = KeQueryInterruptTime(); 1995 } 1996 #endif 1997 1998 // 1999 // Must succeed pool is deprecated, but still supported. These allocation 2000 // failures must cause an immediate bugcheck 2001 // 2002 if (OriginalType & MUST_SUCCEED_POOL_MASK) 2003 { 2004 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 2005 NumberOfBytes, 2006 NonPagedPoolDescriptor.TotalPages, 2007 NonPagedPoolDescriptor.TotalBigPages, 2008 0); 2009 } 2010 2011 // 2012 // Internal debugging 2013 // 2014 ExPoolFailures++; 2015 2016 // 2017 // This flag requests printing failures, and can also further specify 2018 // breaking on failures 2019 // 2020 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE) 2021 { 2022 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n", 2023 NumberOfBytes, 2024 OriginalType); 2025 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint(); 2026 } 2027 2028 // 2029 // Finally, this flag requests an exception, which we are more than 2030 // happy to raise! 2031 // 2032 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE) 2033 { 2034 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 2035 } 2036 2037 return NULL; 2038 } 2039 2040 // 2041 // Increment required counters 2042 // 2043 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, 2044 (LONG)BYTES_TO_PAGES(NumberOfBytes)); 2045 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes); 2046 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2047 2048 // 2049 // Add a tag for the big page allocation and switch to the generic "BIG" 2050 // tag if we failed to do so, then insert a tracker for this alloation. 2051 // 2052 if (!ExpAddTagForBigPages(Entry, 2053 Tag, 2054 (ULONG)BYTES_TO_PAGES(NumberOfBytes), 2055 OriginalType)) 2056 { 2057 Tag = ' GIB'; 2058 } 2059 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType); 2060 return Entry; 2061 } 2062 2063 // 2064 // Should never request 0 bytes from the pool, but since so many drivers do 2065 // it, we'll just assume they want 1 byte, based on NT's similar behavior 2066 // 2067 if (!NumberOfBytes) NumberOfBytes = 1; 2068 2069 // 2070 // A pool allocation is defined by its data, a linked list to connect it to 2071 // the free list (if necessary), and a pool header to store accounting info. 2072 // Calculate this size, then convert it into a block size (units of pool 2073 // headers) 2074 // 2075 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such 2076 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in 2077 // the direct allocation of pages. 2078 // 2079 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) 2080 / POOL_BLOCK_SIZE); 2081 ASSERT(i < POOL_LISTS_PER_PAGE); 2082 2083 // 2084 // Handle lookaside list optimization for both paged and nonpaged pool 2085 // 2086 if (i <= NUMBER_POOL_LOOKASIDE_LISTS) 2087 { 2088 // 2089 // Try popping it from the per-CPU lookaside list 2090 // 2091 LookasideList = (PoolType == PagedPool) ? 2092 Prcb->PPPagedLookasideList[i - 1].P : 2093 Prcb->PPNPagedLookasideList[i - 1].P; 2094 LookasideList->TotalAllocates++; 2095 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead); 2096 if (!Entry) 2097 { 2098 // 2099 // We failed, try popping it from the global list 2100 // 2101 LookasideList = (PoolType == PagedPool) ? 2102 Prcb->PPPagedLookasideList[i - 1].L : 2103 Prcb->PPNPagedLookasideList[i - 1].L; 2104 LookasideList->TotalAllocates++; 2105 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead); 2106 } 2107 2108 // 2109 // If we were able to pop it, update the accounting and return the block 2110 // 2111 if (Entry) 2112 { 2113 LookasideList->AllocateHits++; 2114 2115 // 2116 // Get the real entry, write down its pool type, and track it 2117 // 2118 Entry--; 2119 Entry->PoolType = OriginalType + 1; 2120 ExpInsertPoolTracker(Tag, 2121 Entry->BlockSize * POOL_BLOCK_SIZE, 2122 OriginalType); 2123 2124 // 2125 // Return the pool allocation 2126 // 2127 Entry->PoolTag = Tag; 2128 (POOL_FREE_BLOCK(Entry))->Flink = NULL; 2129 (POOL_FREE_BLOCK(Entry))->Blink = NULL; 2130 return POOL_FREE_BLOCK(Entry); 2131 } 2132 } 2133 2134 // 2135 // Loop in the free lists looking for a block if this size. Start with the 2136 // list optimized for this kind of size lookup 2137 // 2138 ListHead = &PoolDesc->ListHeads[i]; 2139 do 2140 { 2141 // 2142 // Are there any free entries available on this list? 2143 // 2144 if (!ExpIsPoolListEmpty(ListHead)) 2145 { 2146 // 2147 // Acquire the pool lock now 2148 // 2149 OldIrql = ExLockPool(PoolDesc); 2150 2151 // 2152 // And make sure the list still has entries 2153 // 2154 if (ExpIsPoolListEmpty(ListHead)) 2155 { 2156 // 2157 // Someone raced us (and won) before we had a chance to acquire 2158 // the lock. 2159 // 2160 // Try again! 2161 // 2162 ExUnlockPool(PoolDesc, OldIrql); 2163 continue; 2164 } 2165 2166 // 2167 // Remove a free entry from the list 2168 // Note that due to the way we insert free blocks into multiple lists 2169 // there is a guarantee that any block on this list will either be 2170 // of the correct size, or perhaps larger. 2171 // 2172 ExpCheckPoolLinks(ListHead); 2173 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead)); 2174 ExpCheckPoolLinks(ListHead); 2175 ExpCheckPoolBlocks(Entry); 2176 ASSERT(Entry->BlockSize >= i); 2177 ASSERT(Entry->PoolType == 0); 2178 2179 // 2180 // Check if this block is larger that what we need. The block could 2181 // not possibly be smaller, due to the reason explained above (and 2182 // we would've asserted on a checked build if this was the case). 2183 // 2184 if (Entry->BlockSize != i) 2185 { 2186 // 2187 // Is there an entry before this one? 2188 // 2189 if (Entry->PreviousSize == 0) 2190 { 2191 // 2192 // There isn't anyone before us, so take the next block and 2193 // turn it into a fragment that contains the leftover data 2194 // that we don't need to satisfy the caller's request 2195 // 2196 FragmentEntry = POOL_BLOCK(Entry, i); 2197 FragmentEntry->BlockSize = Entry->BlockSize - i; 2198 2199 // 2200 // And make it point back to us 2201 // 2202 FragmentEntry->PreviousSize = i; 2203 2204 // 2205 // Now get the block that follows the new fragment and check 2206 // if it's still on the same page as us (and not at the end) 2207 // 2208 NextEntry = POOL_NEXT_BLOCK(FragmentEntry); 2209 if (PAGE_ALIGN(NextEntry) != NextEntry) 2210 { 2211 // 2212 // Adjust this next block to point to our newly created 2213 // fragment block 2214 // 2215 NextEntry->PreviousSize = FragmentEntry->BlockSize; 2216 } 2217 } 2218 else 2219 { 2220 // 2221 // There is a free entry before us, which we know is smaller 2222 // so we'll make this entry the fragment instead 2223 // 2224 FragmentEntry = Entry; 2225 2226 // 2227 // And then we'll remove from it the actual size required. 2228 // Now the entry is a leftover free fragment 2229 // 2230 Entry->BlockSize -= i; 2231 2232 // 2233 // Now let's go to the next entry after the fragment (which 2234 // used to point to our original free entry) and make it 2235 // reference the new fragment entry instead. 2236 // 2237 // This is the entry that will actually end up holding the 2238 // allocation! 2239 // 2240 Entry = POOL_NEXT_BLOCK(Entry); 2241 Entry->PreviousSize = FragmentEntry->BlockSize; 2242 2243 // 2244 // And now let's go to the entry after that one and check if 2245 // it's still on the same page, and not at the end 2246 // 2247 NextEntry = POOL_BLOCK(Entry, i); 2248 if (PAGE_ALIGN(NextEntry) != NextEntry) 2249 { 2250 // 2251 // Make it reference the allocation entry 2252 // 2253 NextEntry->PreviousSize = i; 2254 } 2255 } 2256 2257 // 2258 // Now our (allocation) entry is the right size 2259 // 2260 Entry->BlockSize = i; 2261 2262 // 2263 // And the next entry is now the free fragment which contains 2264 // the remaining difference between how big the original entry 2265 // was, and the actual size the caller needs/requested. 2266 // 2267 FragmentEntry->PoolType = 0; 2268 BlockSize = FragmentEntry->BlockSize; 2269 2270 // 2271 // Now check if enough free bytes remained for us to have a 2272 // "full" entry, which contains enough bytes for a linked list 2273 // and thus can be used for allocations (up to 8 bytes...) 2274 // 2275 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]); 2276 if (BlockSize != 1) 2277 { 2278 // 2279 // Insert the free entry into the free list for this size 2280 // 2281 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1], 2282 POOL_FREE_BLOCK(FragmentEntry)); 2283 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry)); 2284 } 2285 } 2286 2287 // 2288 // We have found an entry for this allocation, so set the pool type 2289 // and release the lock since we're done 2290 // 2291 Entry->PoolType = OriginalType + 1; 2292 ExpCheckPoolBlocks(Entry); 2293 ExUnlockPool(PoolDesc, OldIrql); 2294 2295 // 2296 // Increment required counters 2297 // 2298 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE); 2299 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2300 2301 // 2302 // Track this allocation 2303 // 2304 ExpInsertPoolTracker(Tag, 2305 Entry->BlockSize * POOL_BLOCK_SIZE, 2306 OriginalType); 2307 2308 // 2309 // Return the pool allocation 2310 // 2311 Entry->PoolTag = Tag; 2312 (POOL_FREE_BLOCK(Entry))->Flink = NULL; 2313 (POOL_FREE_BLOCK(Entry))->Blink = NULL; 2314 return POOL_FREE_BLOCK(Entry); 2315 } 2316 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]); 2317 2318 // 2319 // There were no free entries left, so we have to allocate a new fresh page 2320 // 2321 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE); 2322 if (!Entry) 2323 { 2324 #if DBG 2325 // 2326 // Out of memory, display current consumption 2327 // Let's consider that if the caller wanted more 2328 // than a hundred pages, that's a bogus caller 2329 // and we are not out of memory. Dump at most 2330 // once a second to avoid spamming the log. 2331 // 2332 if (NumberOfBytes < 100 * PAGE_SIZE && 2333 KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000) 2334 { 2335 MiDumpPoolConsumers(FALSE, 0, 0, 0); 2336 MiLastPoolDumpTime = KeQueryInterruptTime(); 2337 } 2338 #endif 2339 2340 // 2341 // Must succeed pool is deprecated, but still supported. These allocation 2342 // failures must cause an immediate bugcheck 2343 // 2344 if (OriginalType & MUST_SUCCEED_POOL_MASK) 2345 { 2346 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 2347 PAGE_SIZE, 2348 NonPagedPoolDescriptor.TotalPages, 2349 NonPagedPoolDescriptor.TotalBigPages, 2350 0); 2351 } 2352 2353 // 2354 // Internal debugging 2355 // 2356 ExPoolFailures++; 2357 2358 // 2359 // This flag requests printing failures, and can also further specify 2360 // breaking on failures 2361 // 2362 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE) 2363 { 2364 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n", 2365 NumberOfBytes, 2366 OriginalType); 2367 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint(); 2368 } 2369 2370 // 2371 // Finally, this flag requests an exception, which we are more than 2372 // happy to raise! 2373 // 2374 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE) 2375 { 2376 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 2377 } 2378 2379 // 2380 // Return NULL to the caller in all other cases 2381 // 2382 return NULL; 2383 } 2384 2385 // 2386 // Setup the entry data 2387 // 2388 Entry->Ulong1 = 0; 2389 Entry->BlockSize = i; 2390 Entry->PoolType = OriginalType + 1; 2391 2392 // 2393 // This page will have two entries -- one for the allocation (which we just 2394 // created above), and one for the remaining free bytes, which we're about 2395 // to create now. The free bytes are the whole page minus what was allocated 2396 // and then converted into units of block headers. 2397 // 2398 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i; 2399 FragmentEntry = POOL_BLOCK(Entry, i); 2400 FragmentEntry->Ulong1 = 0; 2401 FragmentEntry->BlockSize = BlockSize; 2402 FragmentEntry->PreviousSize = i; 2403 2404 // 2405 // Increment required counters 2406 // 2407 InterlockedIncrement((PLONG)&PoolDesc->TotalPages); 2408 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE); 2409 2410 // 2411 // Now check if enough free bytes remained for us to have a "full" entry, 2412 // which contains enough bytes for a linked list and thus can be used for 2413 // allocations (up to 8 bytes...) 2414 // 2415 if (FragmentEntry->BlockSize != 1) 2416 { 2417 // 2418 // Excellent -- acquire the pool lock 2419 // 2420 OldIrql = ExLockPool(PoolDesc); 2421 2422 // 2423 // And insert the free entry into the free list for this block size 2424 // 2425 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]); 2426 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1], 2427 POOL_FREE_BLOCK(FragmentEntry)); 2428 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry)); 2429 2430 // 2431 // Release the pool lock 2432 // 2433 ExpCheckPoolBlocks(Entry); 2434 ExUnlockPool(PoolDesc, OldIrql); 2435 } 2436 else 2437 { 2438 // 2439 // Simply do a sanity check 2440 // 2441 ExpCheckPoolBlocks(Entry); 2442 } 2443 2444 // 2445 // Increment performance counters and track this allocation 2446 // 2447 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2448 ExpInsertPoolTracker(Tag, 2449 Entry->BlockSize * POOL_BLOCK_SIZE, 2450 OriginalType); 2451 2452 // 2453 // And return the pool allocation 2454 // 2455 ExpCheckPoolBlocks(Entry); 2456 Entry->PoolTag = Tag; 2457 return POOL_FREE_BLOCK(Entry); 2458 } 2459 2460 /* 2461 * @implemented 2462 */ 2463 PVOID 2464 NTAPI 2465 ExAllocatePool(POOL_TYPE PoolType, 2466 SIZE_T NumberOfBytes) 2467 { 2468 ULONG Tag = TAG_NONE; 2469 #if 0 && DBG 2470 PLDR_DATA_TABLE_ENTRY LdrEntry; 2471 2472 /* Use the first four letters of the driver name, or "None" if unavailable */ 2473 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL 2474 ? MiLookupDataTableEntry(_ReturnAddress()) 2475 : NULL; 2476 if (LdrEntry) 2477 { 2478 ULONG i; 2479 Tag = 0; 2480 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++) 2481 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24; 2482 for (; i < 4; i++) 2483 Tag = Tag >> 8 | ' ' << 24; 2484 } 2485 #endif 2486 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2487 } 2488 2489 /* 2490 * @implemented 2491 */ 2492 VOID 2493 NTAPI 2494 ExFreePoolWithTag(IN PVOID P, 2495 IN ULONG TagToFree) 2496 { 2497 PPOOL_HEADER Entry, NextEntry; 2498 USHORT BlockSize; 2499 KIRQL OldIrql; 2500 POOL_TYPE PoolType; 2501 PPOOL_DESCRIPTOR PoolDesc; 2502 ULONG Tag; 2503 BOOLEAN Combined = FALSE; 2504 PFN_NUMBER PageCount, RealPageCount; 2505 PKPRCB Prcb = KeGetCurrentPrcb(); 2506 PGENERAL_LOOKASIDE LookasideList; 2507 PEPROCESS Process; 2508 2509 // 2510 // Check if any of the debug flags are enabled 2511 // 2512 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS | 2513 POOL_FLAG_CHECK_WORKERS | 2514 POOL_FLAG_CHECK_RESOURCES | 2515 POOL_FLAG_VERIFIER | 2516 POOL_FLAG_CHECK_DEADLOCK | 2517 POOL_FLAG_SPECIAL_POOL)) 2518 { 2519 // 2520 // Check if special pool is enabled 2521 // 2522 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) 2523 { 2524 // 2525 // Check if it was allocated from a special pool 2526 // 2527 if (MmIsSpecialPoolAddress(P)) 2528 { 2529 // 2530 // Was deadlock verification also enabled? We can do some extra 2531 // checks at this point 2532 // 2533 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK) 2534 { 2535 DPRINT1("Verifier not yet supported\n"); 2536 } 2537 2538 // 2539 // It is, so handle it via special pool free routine 2540 // 2541 MmFreeSpecialPool(P); 2542 return; 2543 } 2544 } 2545 2546 // 2547 // For non-big page allocations, we'll do a bunch of checks in here 2548 // 2549 if (PAGE_ALIGN(P) != P) 2550 { 2551 // 2552 // Get the entry for this pool allocation 2553 // The pointer math here may look wrong or confusing, but it is quite right 2554 // 2555 Entry = P; 2556 Entry--; 2557 2558 // 2559 // Get the pool type 2560 // 2561 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK; 2562 2563 // 2564 // FIXME: Many other debugging checks go here 2565 // 2566 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2567 } 2568 } 2569 2570 // 2571 // Check if this is a big page allocation 2572 // 2573 if (PAGE_ALIGN(P) == P) 2574 { 2575 // 2576 // We need to find the tag for it, so first we need to find out what 2577 // kind of allocation this was (paged or nonpaged), then we can go 2578 // ahead and try finding the tag for it. Remember to get rid of the 2579 // PROTECTED_POOL tag if it's found. 2580 // 2581 // Note that if at insertion time, we failed to add the tag for a big 2582 // pool allocation, we used a special tag called 'BIG' to identify the 2583 // allocation, and we may get this tag back. In this scenario, we must 2584 // manually get the size of the allocation by actually counting through 2585 // the PFN database. 2586 // 2587 PoolType = MmDeterminePoolType(P); 2588 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2589 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType); 2590 if (!Tag) 2591 { 2592 DPRINT1("We do not know the size of this allocation. This is not yet supported\n"); 2593 ASSERT(Tag == ' GIB'); 2594 PageCount = 1; // We are going to lie! This might screw up accounting? 2595 } 2596 else if (Tag & PROTECTED_POOL) 2597 { 2598 Tag &= ~PROTECTED_POOL; 2599 } 2600 2601 // 2602 // Check block tag 2603 // 2604 if (TagToFree && TagToFree != Tag) 2605 { 2606 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag); 2607 #if DBG 2608 /* Do not bugcheck in case this is a big allocation for which we didn't manage to insert the tag */ 2609 if (Tag != ' GIB') 2610 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree); 2611 #endif 2612 } 2613 2614 // 2615 // We have our tag and our page count, so we can go ahead and remove this 2616 // tracker now 2617 // 2618 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType); 2619 2620 // 2621 // Check if any of the debug flags are enabled 2622 // 2623 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS | 2624 POOL_FLAG_CHECK_WORKERS | 2625 POOL_FLAG_CHECK_RESOURCES | 2626 POOL_FLAG_CHECK_DEADLOCK)) 2627 { 2628 // 2629 // Was deadlock verification also enabled? We can do some extra 2630 // checks at this point 2631 // 2632 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK) 2633 { 2634 DPRINT1("Verifier not yet supported\n"); 2635 } 2636 2637 // 2638 // FIXME: Many debugging checks go here 2639 // 2640 } 2641 2642 // 2643 // Update counters 2644 // 2645 PoolDesc = PoolVector[PoolType]; 2646 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs); 2647 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, 2648 -(LONG_PTR)(PageCount << PAGE_SHIFT)); 2649 2650 // 2651 // Do the real free now and update the last counter with the big page count 2652 // 2653 RealPageCount = MiFreePoolPages(P); 2654 ASSERT(RealPageCount == PageCount); 2655 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, 2656 -(LONG)RealPageCount); 2657 return; 2658 } 2659 2660 // 2661 // Get the entry for this pool allocation 2662 // The pointer math here may look wrong or confusing, but it is quite right 2663 // 2664 Entry = P; 2665 Entry--; 2666 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0); 2667 2668 // 2669 // Get the size of the entry, and it's pool type, then load the descriptor 2670 // for this pool type 2671 // 2672 BlockSize = Entry->BlockSize; 2673 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK; 2674 PoolDesc = PoolVector[PoolType]; 2675 2676 // 2677 // Make sure that the IRQL makes sense 2678 // 2679 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2680 2681 // 2682 // Get the pool tag and get rid of the PROTECTED_POOL flag 2683 // 2684 Tag = Entry->PoolTag; 2685 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL; 2686 2687 // 2688 // Check block tag 2689 // 2690 if (TagToFree && TagToFree != Tag) 2691 { 2692 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag); 2693 #if DBG 2694 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree); 2695 #endif 2696 } 2697 2698 // 2699 // Track the removal of this allocation 2700 // 2701 ExpRemovePoolTracker(Tag, 2702 BlockSize * POOL_BLOCK_SIZE, 2703 Entry->PoolType - 1); 2704 2705 // 2706 // Release pool quota, if any 2707 // 2708 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK) 2709 { 2710 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1]; 2711 if (Process) 2712 { 2713 if (Process->Pcb.Header.Type != ProcessObject) 2714 { 2715 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n", 2716 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize); 2717 KeBugCheckEx(BAD_POOL_CALLER, 2718 POOL_BILLED_PROCESS_INVALID, 2719 (ULONG_PTR)P, 2720 Tag, 2721 (ULONG_PTR)Process); 2722 } 2723 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE); 2724 ObDereferenceObject(Process); 2725 } 2726 } 2727 2728 // 2729 // Is this allocation small enough to have come from a lookaside list? 2730 // 2731 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS) 2732 { 2733 // 2734 // Try pushing it into the per-CPU lookaside list 2735 // 2736 LookasideList = (PoolType == PagedPool) ? 2737 Prcb->PPPagedLookasideList[BlockSize - 1].P : 2738 Prcb->PPNPagedLookasideList[BlockSize - 1].P; 2739 LookasideList->TotalFrees++; 2740 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) 2741 { 2742 LookasideList->FreeHits++; 2743 InterlockedPushEntrySList(&LookasideList->ListHead, P); 2744 return; 2745 } 2746 2747 // 2748 // We failed, try to push it into the global lookaside list 2749 // 2750 LookasideList = (PoolType == PagedPool) ? 2751 Prcb->PPPagedLookasideList[BlockSize - 1].L : 2752 Prcb->PPNPagedLookasideList[BlockSize - 1].L; 2753 LookasideList->TotalFrees++; 2754 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) 2755 { 2756 LookasideList->FreeHits++; 2757 InterlockedPushEntrySList(&LookasideList->ListHead, P); 2758 return; 2759 } 2760 } 2761 2762 // 2763 // Get the pointer to the next entry 2764 // 2765 NextEntry = POOL_BLOCK(Entry, BlockSize); 2766 2767 // 2768 // Update performance counters 2769 // 2770 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs); 2771 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE); 2772 2773 // 2774 // Acquire the pool lock 2775 // 2776 OldIrql = ExLockPool(PoolDesc); 2777 2778 // 2779 // Check if the next allocation is at the end of the page 2780 // 2781 ExpCheckPoolBlocks(Entry); 2782 if (PAGE_ALIGN(NextEntry) != NextEntry) 2783 { 2784 // 2785 // We may be able to combine the block if it's free 2786 // 2787 if (NextEntry->PoolType == 0) 2788 { 2789 // 2790 // The next block is free, so we'll do a combine 2791 // 2792 Combined = TRUE; 2793 2794 // 2795 // Make sure there's actual data in the block -- anything smaller 2796 // than this means we only have the header, so there's no linked list 2797 // for us to remove 2798 // 2799 if ((NextEntry->BlockSize != 1)) 2800 { 2801 // 2802 // The block is at least big enough to have a linked list, so go 2803 // ahead and remove it 2804 // 2805 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry)); 2806 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry)); 2807 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink)); 2808 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink)); 2809 } 2810 2811 // 2812 // Our entry is now combined with the next entry 2813 // 2814 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize; 2815 } 2816 } 2817 2818 // 2819 // Now check if there was a previous entry on the same page as us 2820 // 2821 if (Entry->PreviousSize) 2822 { 2823 // 2824 // Great, grab that entry and check if it's free 2825 // 2826 NextEntry = POOL_PREV_BLOCK(Entry); 2827 if (NextEntry->PoolType == 0) 2828 { 2829 // 2830 // It is, so we can do a combine 2831 // 2832 Combined = TRUE; 2833 2834 // 2835 // Make sure there's actual data in the block -- anything smaller 2836 // than this means we only have the header so there's no linked list 2837 // for us to remove 2838 // 2839 if ((NextEntry->BlockSize != 1)) 2840 { 2841 // 2842 // The block is at least big enough to have a linked list, so go 2843 // ahead and remove it 2844 // 2845 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry)); 2846 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry)); 2847 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink)); 2848 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink)); 2849 } 2850 2851 // 2852 // Combine our original block (which might've already been combined 2853 // with the next block), into the previous block 2854 // 2855 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize; 2856 2857 // 2858 // And now we'll work with the previous block instead 2859 // 2860 Entry = NextEntry; 2861 } 2862 } 2863 2864 // 2865 // By now, it may have been possible for our combined blocks to actually 2866 // have made up a full page (if there were only 2-3 allocations on the 2867 // page, they could've all been combined). 2868 // 2869 if ((PAGE_ALIGN(Entry) == Entry) && 2870 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry))) 2871 { 2872 // 2873 // In this case, release the pool lock, update the performance counter, 2874 // and free the page 2875 // 2876 ExUnlockPool(PoolDesc, OldIrql); 2877 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1); 2878 MiFreePoolPages(Entry); 2879 return; 2880 } 2881 2882 // 2883 // Otherwise, we now have a free block (or a combination of 2 or 3) 2884 // 2885 Entry->PoolType = 0; 2886 BlockSize = Entry->BlockSize; 2887 ASSERT(BlockSize != 1); 2888 2889 // 2890 // Check if we actually did combine it with anyone 2891 // 2892 if (Combined) 2893 { 2894 // 2895 // Get the first combined block (either our original to begin with, or 2896 // the one after the original, depending if we combined with the previous) 2897 // 2898 NextEntry = POOL_NEXT_BLOCK(Entry); 2899 2900 // 2901 // As long as the next block isn't on a page boundary, have it point 2902 // back to us 2903 // 2904 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize; 2905 } 2906 2907 // 2908 // Insert this new free block, and release the pool lock 2909 // 2910 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry)); 2911 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry)); 2912 ExUnlockPool(PoolDesc, OldIrql); 2913 } 2914 2915 /* 2916 * @implemented 2917 */ 2918 VOID 2919 NTAPI 2920 ExFreePool(PVOID P) 2921 { 2922 // 2923 // Just free without checking for the tag 2924 // 2925 ExFreePoolWithTag(P, 0); 2926 } 2927 2928 /* 2929 * @unimplemented 2930 */ 2931 SIZE_T 2932 NTAPI 2933 ExQueryPoolBlockSize(IN PVOID PoolBlock, 2934 OUT PBOOLEAN QuotaCharged) 2935 { 2936 // 2937 // Not implemented 2938 // 2939 UNIMPLEMENTED; 2940 return FALSE; 2941 } 2942 2943 /* 2944 * @implemented 2945 */ 2946 2947 PVOID 2948 NTAPI 2949 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, 2950 IN SIZE_T NumberOfBytes) 2951 { 2952 // 2953 // Allocate the pool 2954 // 2955 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE); 2956 } 2957 2958 /* 2959 * @implemented 2960 */ 2961 PVOID 2962 NTAPI 2963 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, 2964 IN SIZE_T NumberOfBytes, 2965 IN ULONG Tag, 2966 IN EX_POOL_PRIORITY Priority) 2967 { 2968 PVOID Buffer; 2969 2970 // 2971 // Allocate the pool 2972 // 2973 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2974 if (Buffer == NULL) 2975 { 2976 UNIMPLEMENTED; 2977 } 2978 2979 return Buffer; 2980 } 2981 2982 /* 2983 * @implemented 2984 */ 2985 PVOID 2986 NTAPI 2987 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType, 2988 IN SIZE_T NumberOfBytes, 2989 IN ULONG Tag) 2990 { 2991 BOOLEAN Raise = TRUE; 2992 PVOID Buffer; 2993 PPOOL_HEADER Entry; 2994 NTSTATUS Status; 2995 PEPROCESS Process = PsGetCurrentProcess(); 2996 2997 // 2998 // Check if we should fail instead of raising an exception 2999 // 3000 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE) 3001 { 3002 Raise = FALSE; 3003 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE; 3004 } 3005 3006 // 3007 // Inject the pool quota mask 3008 // 3009 PoolType += QUOTA_POOL_MASK; 3010 3011 // 3012 // Check if we have enough space to add the quota owner process, as long as 3013 // this isn't the system process, which never gets charged quota 3014 // 3015 ASSERT(NumberOfBytes != 0); 3016 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) && 3017 (Process != PsInitialSystemProcess)) 3018 { 3019 // 3020 // Add space for our EPROCESS pointer 3021 // 3022 NumberOfBytes += sizeof(PEPROCESS); 3023 } 3024 else 3025 { 3026 // 3027 // We won't be able to store the pointer, so don't use quota for this 3028 // 3029 PoolType -= QUOTA_POOL_MASK; 3030 } 3031 3032 // 3033 // Allocate the pool buffer now 3034 // 3035 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 3036 3037 // 3038 // If the buffer is page-aligned, this is a large page allocation and we 3039 // won't touch it 3040 // 3041 if (PAGE_ALIGN(Buffer) != Buffer) 3042 { 3043 // 3044 // Also if special pool is enabled, and this was allocated from there, 3045 // we won't touch it either 3046 // 3047 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) && 3048 (MmIsSpecialPoolAddress(Buffer))) 3049 { 3050 return Buffer; 3051 } 3052 3053 // 3054 // If it wasn't actually allocated with quota charges, ignore it too 3055 // 3056 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer; 3057 3058 // 3059 // If this is the system process, we don't charge quota, so ignore 3060 // 3061 if (Process == PsInitialSystemProcess) return Buffer; 3062 3063 // 3064 // Actually go and charge quota for the process now 3065 // 3066 Entry = POOL_ENTRY(Buffer); 3067 Status = PsChargeProcessPoolQuota(Process, 3068 PoolType & BASE_POOL_TYPE_MASK, 3069 Entry->BlockSize * POOL_BLOCK_SIZE); 3070 if (!NT_SUCCESS(Status)) 3071 { 3072 // 3073 // Quota failed, back out the allocation, clear the owner, and fail 3074 // 3075 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL; 3076 ExFreePoolWithTag(Buffer, Tag); 3077 if (Raise) RtlRaiseStatus(Status); 3078 return NULL; 3079 } 3080 3081 // 3082 // Quota worked, write the owner and then reference it before returning 3083 // 3084 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process; 3085 ObReferenceObject(Process); 3086 } 3087 else if (!(Buffer) && (Raise)) 3088 { 3089 // 3090 // The allocation failed, raise an error if we are in raise mode 3091 // 3092 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 3093 } 3094 3095 // 3096 // Return the allocated buffer 3097 // 3098 return Buffer; 3099 } 3100 3101 /* EOF */ 3102