1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: BSD - See COPYING.ARM in the top level directory 4 * FILE: ntoskrnl/mm/ARM3/expool.c 5 * PURPOSE: ARM Memory Manager Executive Pool Manager 6 * PROGRAMMERS: ReactOS Portable Systems Group 7 */ 8 9 /* INCLUDES *******************************************************************/ 10 11 #include <ntoskrnl.h> 12 #define NDEBUG 13 #include <debug.h> 14 15 #define MODULE_INVOLVED_IN_ARM3 16 #include <mm/ARM3/miarm.h> 17 18 #undef ExAllocatePoolWithQuota 19 #undef ExAllocatePoolWithQuotaTag 20 21 /* GLOBALS ********************************************************************/ 22 23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1 24 25 /* 26 * This defines when we shrink or expand the table. 27 * 3 --> keep the number of used entries in the 33%-66% of the table capacity. 28 * 4 --> 25% - 75% 29 * etc. 30 */ 31 #define POOL_BIG_TABLE_USE_RATE 4 32 33 typedef struct _POOL_DPC_CONTEXT 34 { 35 PPOOL_TRACKER_TABLE PoolTrackTable; 36 SIZE_T PoolTrackTableSize; 37 PPOOL_TRACKER_TABLE PoolTrackTableExpansion; 38 SIZE_T PoolTrackTableSizeExpansion; 39 } POOL_DPC_CONTEXT, *PPOOL_DPC_CONTEXT; 40 41 ULONG ExpNumberOfPagedPools; 42 POOL_DESCRIPTOR NonPagedPoolDescriptor; 43 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16 + 1]; 44 PPOOL_DESCRIPTOR PoolVector[2]; 45 PKGUARDED_MUTEX ExpPagedPoolMutex; 46 SIZE_T PoolTrackTableSize, PoolTrackTableMask; 47 SIZE_T PoolBigPageTableSize, PoolBigPageTableHash; 48 ULONG ExpBigTableExpansionFailed; 49 PPOOL_TRACKER_TABLE PoolTrackTable; 50 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable; 51 KSPIN_LOCK ExpTaggedPoolLock; 52 ULONG PoolHitTag; 53 BOOLEAN ExStopBadTags; 54 KSPIN_LOCK ExpLargePoolTableLock; 55 ULONG ExpPoolBigEntriesInUse; 56 ULONG ExpPoolFlags; 57 ULONG ExPoolFailures; 58 ULONGLONG MiLastPoolDumpTime; 59 60 /* Pool block/header/list access macros */ 61 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER)) 62 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER)) 63 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE)) 64 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize) 65 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize)) 66 67 /* 68 * Pool list access debug macros, similar to Arthur's pfnlist.c work. 69 * Microsoft actually implements similar checks in the Windows Server 2003 SP1 70 * pool code, but only for checked builds. 71 * 72 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates 73 * that these checks are done even on retail builds, due to the increasing 74 * number of kernel-mode attacks which depend on dangling list pointers and other 75 * kinds of list-based attacks. 76 * 77 * For now, I will leave these checks on all the time, but later they are likely 78 * to be DBG-only, at least until there are enough kernel-mode security attacks 79 * against ReactOS to warrant the performance hit. 80 * 81 * For now, these are not made inline, so we can get good stack traces. 82 */ 83 PLIST_ENTRY 84 NTAPI 85 ExpDecodePoolLink(IN PLIST_ENTRY Link) 86 { 87 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1); 88 } 89 90 PLIST_ENTRY 91 NTAPI 92 ExpEncodePoolLink(IN PLIST_ENTRY Link) 93 { 94 return (PLIST_ENTRY)((ULONG_PTR)Link | 1); 95 } 96 97 VOID 98 NTAPI 99 ExpCheckPoolLinks(IN PLIST_ENTRY ListHead) 100 { 101 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) || 102 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead)) 103 { 104 KeBugCheckEx(BAD_POOL_HEADER, 105 3, 106 (ULONG_PTR)ListHead, 107 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink), 108 (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink)); 109 } 110 } 111 112 VOID 113 NTAPI 114 ExpInitializePoolListHead(IN PLIST_ENTRY ListHead) 115 { 116 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead); 117 } 118 119 BOOLEAN 120 NTAPI 121 ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead) 122 { 123 return (ExpDecodePoolLink(ListHead->Flink) == ListHead); 124 } 125 126 VOID 127 NTAPI 128 ExpRemovePoolEntryList(IN PLIST_ENTRY Entry) 129 { 130 PLIST_ENTRY Blink, Flink; 131 Flink = ExpDecodePoolLink(Entry->Flink); 132 Blink = ExpDecodePoolLink(Entry->Blink); 133 Flink->Blink = ExpEncodePoolLink(Blink); 134 Blink->Flink = ExpEncodePoolLink(Flink); 135 } 136 137 PLIST_ENTRY 138 NTAPI 139 ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead) 140 { 141 PLIST_ENTRY Entry, Flink; 142 Entry = ExpDecodePoolLink(ListHead->Flink); 143 Flink = ExpDecodePoolLink(Entry->Flink); 144 ListHead->Flink = ExpEncodePoolLink(Flink); 145 Flink->Blink = ExpEncodePoolLink(ListHead); 146 return Entry; 147 } 148 149 PLIST_ENTRY 150 NTAPI 151 ExpRemovePoolTailList(IN PLIST_ENTRY ListHead) 152 { 153 PLIST_ENTRY Entry, Blink; 154 Entry = ExpDecodePoolLink(ListHead->Blink); 155 Blink = ExpDecodePoolLink(Entry->Blink); 156 ListHead->Blink = ExpEncodePoolLink(Blink); 157 Blink->Flink = ExpEncodePoolLink(ListHead); 158 return Entry; 159 } 160 161 VOID 162 NTAPI 163 ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, 164 IN PLIST_ENTRY Entry) 165 { 166 PLIST_ENTRY Blink; 167 ExpCheckPoolLinks(ListHead); 168 Blink = ExpDecodePoolLink(ListHead->Blink); 169 Entry->Flink = ExpEncodePoolLink(ListHead); 170 Entry->Blink = ExpEncodePoolLink(Blink); 171 Blink->Flink = ExpEncodePoolLink(Entry); 172 ListHead->Blink = ExpEncodePoolLink(Entry); 173 ExpCheckPoolLinks(ListHead); 174 } 175 176 VOID 177 NTAPI 178 ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, 179 IN PLIST_ENTRY Entry) 180 { 181 PLIST_ENTRY Flink; 182 ExpCheckPoolLinks(ListHead); 183 Flink = ExpDecodePoolLink(ListHead->Flink); 184 Entry->Flink = ExpEncodePoolLink(Flink); 185 Entry->Blink = ExpEncodePoolLink(ListHead); 186 Flink->Blink = ExpEncodePoolLink(Entry); 187 ListHead->Flink = ExpEncodePoolLink(Entry); 188 ExpCheckPoolLinks(ListHead); 189 } 190 191 VOID 192 NTAPI 193 ExpCheckPoolHeader(IN PPOOL_HEADER Entry) 194 { 195 PPOOL_HEADER PreviousEntry, NextEntry; 196 197 /* Is there a block before this one? */ 198 if (Entry->PreviousSize) 199 { 200 /* Get it */ 201 PreviousEntry = POOL_PREV_BLOCK(Entry); 202 203 /* The two blocks must be on the same page! */ 204 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry)) 205 { 206 /* Something is awry */ 207 KeBugCheckEx(BAD_POOL_HEADER, 208 6, 209 (ULONG_PTR)PreviousEntry, 210 __LINE__, 211 (ULONG_PTR)Entry); 212 } 213 214 /* This block should also indicate that it's as large as we think it is */ 215 if (PreviousEntry->BlockSize != Entry->PreviousSize) 216 { 217 /* Otherwise, someone corrupted one of the sizes */ 218 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n", 219 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag, 220 Entry->PreviousSize, (char *)&Entry->PoolTag); 221 KeBugCheckEx(BAD_POOL_HEADER, 222 5, 223 (ULONG_PTR)PreviousEntry, 224 __LINE__, 225 (ULONG_PTR)Entry); 226 } 227 } 228 else if (PAGE_ALIGN(Entry) != Entry) 229 { 230 /* If there's no block before us, we are the first block, so we should be on a page boundary */ 231 KeBugCheckEx(BAD_POOL_HEADER, 232 7, 233 0, 234 __LINE__, 235 (ULONG_PTR)Entry); 236 } 237 238 /* This block must have a size */ 239 if (!Entry->BlockSize) 240 { 241 /* Someone must've corrupted this field */ 242 if (Entry->PreviousSize) 243 { 244 PreviousEntry = POOL_PREV_BLOCK(Entry); 245 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n", 246 (char *)&PreviousEntry->PoolTag, 247 (char *)&Entry->PoolTag); 248 } 249 else 250 { 251 DPRINT1("Entry tag %.4s\n", 252 (char *)&Entry->PoolTag); 253 } 254 KeBugCheckEx(BAD_POOL_HEADER, 255 8, 256 0, 257 __LINE__, 258 (ULONG_PTR)Entry); 259 } 260 261 /* Okay, now get the next block */ 262 NextEntry = POOL_NEXT_BLOCK(Entry); 263 264 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */ 265 if (PAGE_ALIGN(NextEntry) != NextEntry) 266 { 267 /* The two blocks must be on the same page! */ 268 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry)) 269 { 270 /* Something is messed up */ 271 KeBugCheckEx(BAD_POOL_HEADER, 272 9, 273 (ULONG_PTR)NextEntry, 274 __LINE__, 275 (ULONG_PTR)Entry); 276 } 277 278 /* And this block should think we are as large as we truly are */ 279 if (NextEntry->PreviousSize != Entry->BlockSize) 280 { 281 /* Otherwise, someone corrupted the field */ 282 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n", 283 Entry->BlockSize, (char *)&Entry->PoolTag, 284 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag); 285 KeBugCheckEx(BAD_POOL_HEADER, 286 5, 287 (ULONG_PTR)NextEntry, 288 __LINE__, 289 (ULONG_PTR)Entry); 290 } 291 } 292 } 293 294 VOID 295 NTAPI 296 ExpCheckPoolAllocation( 297 PVOID P, 298 POOL_TYPE PoolType, 299 ULONG Tag) 300 { 301 PPOOL_HEADER Entry; 302 ULONG i; 303 KIRQL OldIrql; 304 POOL_TYPE RealPoolType; 305 306 /* Get the pool header */ 307 Entry = ((PPOOL_HEADER)P) - 1; 308 309 /* Check if this is a large allocation */ 310 if (PAGE_ALIGN(P) == P) 311 { 312 /* Lock the pool table */ 313 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 314 315 /* Find the pool tag */ 316 for (i = 0; i < PoolBigPageTableSize; i++) 317 { 318 /* Check if this is our allocation */ 319 if (PoolBigPageTable[i].Va == P) 320 { 321 /* Make sure the tag is ok */ 322 if (PoolBigPageTable[i].Key != Tag) 323 { 324 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag); 325 } 326 327 break; 328 } 329 } 330 331 /* Release the lock */ 332 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 333 334 if (i == PoolBigPageTableSize) 335 { 336 /* Did not find the allocation */ 337 //ASSERT(FALSE); 338 } 339 340 /* Get Pool type by address */ 341 RealPoolType = MmDeterminePoolType(P); 342 } 343 else 344 { 345 /* Verify the tag */ 346 if (Entry->PoolTag != Tag) 347 { 348 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n", 349 &Tag, &Entry->PoolTag, Entry->PoolTag); 350 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag); 351 } 352 353 /* Check the rest of the header */ 354 ExpCheckPoolHeader(Entry); 355 356 /* Get Pool type from entry */ 357 RealPoolType = (Entry->PoolType - 1); 358 } 359 360 /* Should we check the pool type? */ 361 if (PoolType != -1) 362 { 363 /* Verify the pool type */ 364 if (RealPoolType != PoolType) 365 { 366 DPRINT1("Wrong pool type! Expected %s, got %s\n", 367 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool", 368 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool"); 369 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag); 370 } 371 } 372 } 373 374 VOID 375 NTAPI 376 ExpCheckPoolBlocks(IN PVOID Block) 377 { 378 BOOLEAN FoundBlock = FALSE; 379 SIZE_T Size = 0; 380 PPOOL_HEADER Entry; 381 382 /* Get the first entry for this page, make sure it really is the first */ 383 Entry = PAGE_ALIGN(Block); 384 ASSERT(Entry->PreviousSize == 0); 385 386 /* Now scan each entry */ 387 while (TRUE) 388 { 389 /* When we actually found our block, remember this */ 390 if (Entry == Block) FoundBlock = TRUE; 391 392 /* Now validate this block header */ 393 ExpCheckPoolHeader(Entry); 394 395 /* And go to the next one, keeping track of our size */ 396 Size += Entry->BlockSize; 397 Entry = POOL_NEXT_BLOCK(Entry); 398 399 /* If we hit the last block, stop */ 400 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break; 401 402 /* If we hit the end of the page, stop */ 403 if (PAGE_ALIGN(Entry) == Entry) break; 404 } 405 406 /* We must've found our block, and we must have hit the end of the page */ 407 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock)) 408 { 409 /* Otherwise, the blocks are messed up */ 410 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry); 411 } 412 } 413 414 FORCEINLINE 415 VOID 416 ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, 417 IN SIZE_T NumberOfBytes, 418 IN PVOID Entry) 419 { 420 // 421 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must 422 // be DISPATCH_LEVEL or lower for Non Paged Pool 423 // 424 if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ? 425 (KeGetCurrentIrql() > APC_LEVEL) : 426 (KeGetCurrentIrql() > DISPATCH_LEVEL)) 427 { 428 // 429 // Take the system down 430 // 431 KeBugCheckEx(BAD_POOL_CALLER, 432 !Entry ? POOL_ALLOC_IRQL_INVALID : POOL_FREE_IRQL_INVALID, 433 KeGetCurrentIrql(), 434 PoolType, 435 !Entry ? NumberOfBytes : (ULONG_PTR)Entry); 436 } 437 } 438 439 FORCEINLINE 440 ULONG 441 ExpComputeHashForTag(IN ULONG Tag, 442 IN SIZE_T BucketMask) 443 { 444 // 445 // Compute the hash by multiplying with a large prime number and then XORing 446 // with the HIDWORD of the result. 447 // 448 // Finally, AND with the bucket mask to generate a valid index/bucket into 449 // the table 450 // 451 ULONGLONG Result = (ULONGLONG)40543 * Tag; 452 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32)); 453 } 454 455 FORCEINLINE 456 ULONG 457 ExpComputePartialHashForAddress(IN PVOID BaseAddress) 458 { 459 ULONG Result; 460 // 461 // Compute the hash by converting the address into a page number, and then 462 // XORing each nibble with the next one. 463 // 464 // We do *NOT* AND with the bucket mask at this point because big table expansion 465 // might happen. Therefore, the final step of the hash must be performed 466 // while holding the expansion pushlock, and this is why we call this a 467 // "partial" hash only. 468 // 469 Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT); 470 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result; 471 } 472 473 #if DBG 474 /* 475 * FORCEINLINE 476 * BOOLEAN 477 * ExpTagAllowPrint(CHAR Tag); 478 */ 479 #define ExpTagAllowPrint(Tag) \ 480 ((Tag) >= 0x20 /* Space */ && (Tag) <= 0x7E /* Tilde */) 481 482 #ifdef KDBG 483 #include <kdbg/kdb.h> 484 #endif 485 486 #ifdef KDBG 487 #define MiDumperPrint(dbg, fmt, ...) \ 488 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \ 489 else DPRINT1(fmt, ##__VA_ARGS__) 490 #else 491 #define MiDumperPrint(dbg, fmt, ...) \ 492 DPRINT1(fmt, ##__VA_ARGS__) 493 #endif 494 495 VOID 496 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags) 497 { 498 SIZE_T i; 499 BOOLEAN Verbose; 500 501 // 502 // Only print header if called from OOM situation 503 // 504 if (!CalledFromDbg) 505 { 506 DPRINT1("---------------------\n"); 507 DPRINT1("Out of memory dumper!\n"); 508 } 509 #ifdef KDBG 510 else 511 { 512 KdbpPrint("Pool Used:\n"); 513 } 514 #endif 515 516 // 517 // Remember whether we'll have to be verbose 518 // This is the only supported flag! 519 // 520 Verbose = BooleanFlagOn(Flags, 1); 521 522 // 523 // Print table header 524 // 525 if (Verbose) 526 { 527 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n"); 528 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n"); 529 } 530 else 531 { 532 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n"); 533 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n"); 534 } 535 536 // 537 // We'll extract allocations for all the tracked pools 538 // 539 for (i = 0; i < PoolTrackTableSize; ++i) 540 { 541 PPOOL_TRACKER_TABLE TableEntry; 542 543 TableEntry = &PoolTrackTable[i]; 544 545 // 546 // We only care about tags which have allocated memory 547 // 548 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0) 549 { 550 // 551 // If there's a tag, attempt to do a pretty print 552 // only if it matches the caller's tag, or if 553 // any tag is allowed 554 // For checking whether it matches caller's tag, 555 // use the mask to make sure not to mess with the wildcards 556 // 557 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE && 558 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask))) 559 { 560 CHAR Tag[4]; 561 562 // 563 // Extract each 'component' and check whether they are printable 564 // 565 Tag[0] = TableEntry->Key & 0xFF; 566 Tag[1] = TableEntry->Key >> 8 & 0xFF; 567 Tag[2] = TableEntry->Key >> 16 & 0xFF; 568 Tag[3] = TableEntry->Key >> 24 & 0xFF; 569 570 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3])) 571 { 572 // 573 // Print in direct order to make !poolused TAG usage easier 574 // 575 if (Verbose) 576 { 577 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3], 578 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 579 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 580 TableEntry->PagedAllocs, TableEntry->PagedFrees, 581 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 582 } 583 else 584 { 585 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3], 586 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 587 TableEntry->PagedAllocs, TableEntry->PagedBytes); 588 } 589 } 590 else 591 { 592 if (Verbose) 593 { 594 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key, 595 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 596 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 597 TableEntry->PagedAllocs, TableEntry->PagedFrees, 598 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 599 } 600 else 601 { 602 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key, 603 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 604 TableEntry->PagedAllocs, TableEntry->PagedBytes); 605 } 606 } 607 } 608 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask)) 609 { 610 if (Verbose) 611 { 612 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", 613 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees, 614 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes, 615 TableEntry->PagedAllocs, TableEntry->PagedFrees, 616 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes); 617 } 618 else 619 { 620 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", 621 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes, 622 TableEntry->PagedAllocs, TableEntry->PagedBytes); 623 } 624 } 625 } 626 } 627 628 if (!CalledFromDbg) 629 { 630 DPRINT1("---------------------\n"); 631 } 632 } 633 #endif 634 635 /* PRIVATE FUNCTIONS **********************************************************/ 636 637 CODE_SEG("INIT") 638 VOID 639 NTAPI 640 ExpSeedHotTags(VOID) 641 { 642 ULONG i, Key, Hash, Index; 643 PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable; 644 ULONG TagList[] = 645 { 646 ' oI', 647 ' laH', 648 'PldM', 649 'LooP', 650 'tSbO', 651 ' prI', 652 'bdDN', 653 'LprI', 654 'pOoI', 655 ' ldM', 656 'eliF', 657 'aVMC', 658 'dSeS', 659 'CFtN', 660 'looP', 661 'rPCT', 662 'bNMC', 663 'dTeS', 664 'sFtN', 665 'TPCT', 666 'CPCT', 667 ' yeK', 668 'qSbO', 669 'mNoI', 670 'aEoI', 671 'cPCT', 672 'aFtN', 673 '0ftN', 674 'tceS', 675 'SprI', 676 'ekoT', 677 ' eS', 678 'lCbO', 679 'cScC', 680 'lFtN', 681 'cAeS', 682 'mfSF', 683 'kWcC', 684 'miSF', 685 'CdfA', 686 'EdfA', 687 'orSF', 688 'nftN', 689 'PRIU', 690 'rFpN', 691 'RFpN', 692 'aPeS', 693 'sUeS', 694 'FpcA', 695 'MpcA', 696 'cSeS', 697 'mNbO', 698 'sFpN', 699 'uLeS', 700 'DPcS', 701 'nevE', 702 'vrqR', 703 'ldaV', 704 ' pP', 705 'SdaV', 706 ' daV', 707 'LdaV', 708 'FdaV', 709 ' GIB', 710 }; 711 712 // 713 // Loop all 64 hot tags 714 // 715 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64); 716 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++) 717 { 718 // 719 // Get the current tag, and compute its hash in the tracker table 720 // 721 Key = TagList[i]; 722 Hash = ExpComputeHashForTag(Key, PoolTrackTableMask); 723 724 // 725 // Loop all the hashes in this index/bucket 726 // 727 Index = Hash; 728 while (TRUE) 729 { 730 // 731 // Find an empty entry, and make sure this isn't the last hash that 732 // can fit. 733 // 734 // On checked builds, also make sure this is the first time we are 735 // seeding this tag. 736 // 737 ASSERT(TrackTable[Hash].Key != Key); 738 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1)) 739 { 740 // 741 // It has been seeded, move on to the next tag 742 // 743 TrackTable[Hash].Key = Key; 744 break; 745 } 746 747 // 748 // This entry was already taken, compute the next possible hash while 749 // making sure we're not back at our initial index. 750 // 751 ASSERT(TrackTable[Hash].Key != Key); 752 Hash = (Hash + 1) & PoolTrackTableMask; 753 if (Hash == Index) break; 754 } 755 } 756 } 757 758 VOID 759 NTAPI 760 ExpRemovePoolTracker(IN ULONG Key, 761 IN SIZE_T NumberOfBytes, 762 IN POOL_TYPE PoolType) 763 { 764 ULONG Hash, Index; 765 PPOOL_TRACKER_TABLE Table, TableEntry; 766 SIZE_T TableMask, TableSize; 767 768 // 769 // Remove the PROTECTED_POOL flag which is not part of the tag 770 // 771 Key &= ~PROTECTED_POOL; 772 773 // 774 // With WinDBG you can set a tag you want to break on when an allocation is 775 // attempted 776 // 777 if (Key == PoolHitTag) DbgBreakPoint(); 778 779 // 780 // Why the double indirection? Because normally this function is also used 781 // when doing session pool allocations, which has another set of tables, 782 // sizes, and masks that live in session pool. Now we don't support session 783 // pool so we only ever use the regular tables, but I'm keeping the code this 784 // way so that the day we DO support session pool, it won't require that 785 // many changes 786 // 787 Table = PoolTrackTable; 788 TableMask = PoolTrackTableMask; 789 TableSize = PoolTrackTableSize; 790 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize); 791 792 // 793 // Compute the hash for this key, and loop all the possible buckets 794 // 795 Hash = ExpComputeHashForTag(Key, TableMask); 796 Index = Hash; 797 while (TRUE) 798 { 799 // 800 // Have we found the entry for this tag? */ 801 // 802 TableEntry = &Table[Hash]; 803 if (TableEntry->Key == Key) 804 { 805 // 806 // Decrement the counters depending on if this was paged or nonpaged 807 // pool 808 // 809 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 810 { 811 InterlockedIncrement(&TableEntry->NonPagedFrees); 812 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, 813 -(SSIZE_T)NumberOfBytes); 814 return; 815 } 816 InterlockedIncrement(&TableEntry->PagedFrees); 817 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, 818 -(SSIZE_T)NumberOfBytes); 819 return; 820 } 821 822 // 823 // We should have only ended up with an empty entry if we've reached 824 // the last bucket 825 // 826 if (!TableEntry->Key) 827 { 828 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n", 829 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType); 830 ASSERT(Hash == TableMask); 831 } 832 833 // 834 // This path is hit when we don't have an entry, and the current bucket 835 // is full, so we simply try the next one 836 // 837 Hash = (Hash + 1) & TableMask; 838 if (Hash == Index) break; 839 } 840 841 // 842 // And finally this path is hit when all the buckets are full, and we need 843 // some expansion. This path is not yet supported in ReactOS and so we'll 844 // ignore the tag 845 // 846 DPRINT1("Out of pool tag space, ignoring...\n"); 847 } 848 849 VOID 850 NTAPI 851 ExpInsertPoolTracker(IN ULONG Key, 852 IN SIZE_T NumberOfBytes, 853 IN POOL_TYPE PoolType) 854 { 855 ULONG Hash, Index; 856 KIRQL OldIrql; 857 PPOOL_TRACKER_TABLE Table, TableEntry; 858 SIZE_T TableMask, TableSize; 859 860 // 861 // Remove the PROTECTED_POOL flag which is not part of the tag 862 // 863 Key &= ~PROTECTED_POOL; 864 865 // 866 // With WinDBG you can set a tag you want to break on when an allocation is 867 // attempted 868 // 869 if (Key == PoolHitTag) DbgBreakPoint(); 870 871 // 872 // There is also an internal flag you can set to break on malformed tags 873 // 874 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00); 875 876 // 877 // ASSERT on ReactOS features not yet supported 878 // 879 ASSERT(!(PoolType & SESSION_POOL_MASK)); 880 ASSERT(KeGetCurrentProcessorNumber() == 0); 881 882 // 883 // Why the double indirection? Because normally this function is also used 884 // when doing session pool allocations, which has another set of tables, 885 // sizes, and masks that live in session pool. Now we don't support session 886 // pool so we only ever use the regular tables, but I'm keeping the code this 887 // way so that the day we DO support session pool, it won't require that 888 // many changes 889 // 890 Table = PoolTrackTable; 891 TableMask = PoolTrackTableMask; 892 TableSize = PoolTrackTableSize; 893 DBG_UNREFERENCED_LOCAL_VARIABLE(TableSize); 894 895 // 896 // Compute the hash for this key, and loop all the possible buckets 897 // 898 Hash = ExpComputeHashForTag(Key, TableMask); 899 Index = Hash; 900 while (TRUE) 901 { 902 // 903 // Do we already have an entry for this tag? */ 904 // 905 TableEntry = &Table[Hash]; 906 if (TableEntry->Key == Key) 907 { 908 // 909 // Increment the counters depending on if this was paged or nonpaged 910 // pool 911 // 912 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 913 { 914 InterlockedIncrement(&TableEntry->NonPagedAllocs); 915 InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes); 916 return; 917 } 918 InterlockedIncrement(&TableEntry->PagedAllocs); 919 InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes); 920 return; 921 } 922 923 // 924 // We don't have an entry yet, but we've found a free bucket for it 925 // 926 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1)) 927 { 928 // 929 // We need to hold the lock while creating a new entry, since other 930 // processors might be in this code path as well 931 // 932 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql); 933 if (!PoolTrackTable[Hash].Key) 934 { 935 // 936 // We've won the race, so now create this entry in the bucket 937 // 938 ASSERT(Table[Hash].Key == 0); 939 PoolTrackTable[Hash].Key = Key; 940 TableEntry->Key = Key; 941 } 942 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 943 944 // 945 // Now we force the loop to run again, and we should now end up in 946 // the code path above which does the interlocked increments... 947 // 948 continue; 949 } 950 951 // 952 // This path is hit when we don't have an entry, and the current bucket 953 // is full, so we simply try the next one 954 // 955 Hash = (Hash + 1) & TableMask; 956 if (Hash == Index) break; 957 } 958 959 // 960 // And finally this path is hit when all the buckets are full, and we need 961 // some expansion. This path is not yet supported in ReactOS and so we'll 962 // ignore the tag 963 // 964 DPRINT1("Out of pool tag space, ignoring...\n"); 965 } 966 967 CODE_SEG("INIT") 968 VOID 969 NTAPI 970 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, 971 IN POOL_TYPE PoolType, 972 IN ULONG PoolIndex, 973 IN ULONG Threshold, 974 IN PVOID PoolLock) 975 { 976 PLIST_ENTRY NextEntry, LastEntry; 977 978 // 979 // Setup the descriptor based on the caller's request 980 // 981 PoolDescriptor->PoolType = PoolType; 982 PoolDescriptor->PoolIndex = PoolIndex; 983 PoolDescriptor->Threshold = Threshold; 984 PoolDescriptor->LockAddress = PoolLock; 985 986 // 987 // Initialize accounting data 988 // 989 PoolDescriptor->RunningAllocs = 0; 990 PoolDescriptor->RunningDeAllocs = 0; 991 PoolDescriptor->TotalPages = 0; 992 PoolDescriptor->TotalBytes = 0; 993 PoolDescriptor->TotalBigPages = 0; 994 995 // 996 // Nothing pending for now 997 // 998 PoolDescriptor->PendingFrees = NULL; 999 PoolDescriptor->PendingFreeDepth = 0; 1000 1001 // 1002 // Loop all the descriptor's allocation lists and initialize them 1003 // 1004 NextEntry = PoolDescriptor->ListHeads; 1005 LastEntry = NextEntry + POOL_LISTS_PER_PAGE; 1006 while (NextEntry < LastEntry) 1007 { 1008 ExpInitializePoolListHead(NextEntry); 1009 NextEntry++; 1010 } 1011 1012 // 1013 // Note that ReactOS does not support Session Pool Yet 1014 // 1015 ASSERT(PoolType != PagedPoolSession); 1016 } 1017 1018 CODE_SEG("INIT") 1019 VOID 1020 NTAPI 1021 InitializePool(IN POOL_TYPE PoolType, 1022 IN ULONG Threshold) 1023 { 1024 PPOOL_DESCRIPTOR Descriptor; 1025 SIZE_T TableSize; 1026 ULONG i; 1027 1028 // 1029 // Check what kind of pool this is 1030 // 1031 if (PoolType == NonPagedPool) 1032 { 1033 // 1034 // Compute the track table size and convert it from a power of two to an 1035 // actual byte size 1036 // 1037 // NOTE: On checked builds, we'll assert if the registry table size was 1038 // invalid, while on retail builds we'll just break out of the loop at 1039 // that point. 1040 // 1041 TableSize = min(PoolTrackTableSize, MmSizeOfNonPagedPoolInBytes >> 8); 1042 for (i = 0; i < 32; i++) 1043 { 1044 if (TableSize & 1) 1045 { 1046 ASSERT((TableSize & ~1) == 0); 1047 if (!(TableSize & ~1)) break; 1048 } 1049 TableSize >>= 1; 1050 } 1051 1052 // 1053 // If we hit bit 32, than no size was defined in the registry, so 1054 // we'll use the default size of 2048 entries. 1055 // 1056 // Otherwise, use the size from the registry, as long as it's not 1057 // smaller than 64 entries. 1058 // 1059 if (i == 32) 1060 { 1061 PoolTrackTableSize = 2048; 1062 } 1063 else 1064 { 1065 PoolTrackTableSize = max(1 << i, 64); 1066 } 1067 1068 // 1069 // Loop trying with the biggest specified size first, and cut it down 1070 // by a power of two each iteration in case not enough memory exist 1071 // 1072 while (TRUE) 1073 { 1074 // 1075 // Do not allow overflow 1076 // 1077 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE))) 1078 { 1079 PoolTrackTableSize >>= 1; 1080 continue; 1081 } 1082 1083 // 1084 // Allocate the tracker table and exit the loop if this worked 1085 // 1086 PoolTrackTable = MiAllocatePoolPages(NonPagedPool, 1087 (PoolTrackTableSize + 1) * 1088 sizeof(POOL_TRACKER_TABLE)); 1089 if (PoolTrackTable) break; 1090 1091 // 1092 // Otherwise, as long as we're not down to the last bit, keep 1093 // iterating 1094 // 1095 if (PoolTrackTableSize == 1) 1096 { 1097 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1098 TableSize, 1099 0xFFFFFFFF, 1100 0xFFFFFFFF, 1101 0xFFFFFFFF); 1102 } 1103 PoolTrackTableSize >>= 1; 1104 } 1105 1106 // 1107 // Add one entry, compute the hash, and zero the table 1108 // 1109 PoolTrackTableSize++; 1110 PoolTrackTableMask = PoolTrackTableSize - 2; 1111 1112 RtlZeroMemory(PoolTrackTable, 1113 PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1114 1115 // 1116 // Finally, add the most used tags to speed up those allocations 1117 // 1118 ExpSeedHotTags(); 1119 1120 // 1121 // We now do the exact same thing with the tracker table for big pages 1122 // 1123 TableSize = min(PoolBigPageTableSize, MmSizeOfNonPagedPoolInBytes >> 8); 1124 for (i = 0; i < 32; i++) 1125 { 1126 if (TableSize & 1) 1127 { 1128 ASSERT((TableSize & ~1) == 0); 1129 if (!(TableSize & ~1)) break; 1130 } 1131 TableSize >>= 1; 1132 } 1133 1134 // 1135 // For big pages, the default tracker table is 4096 entries, while the 1136 // minimum is still 64 1137 // 1138 if (i == 32) 1139 { 1140 PoolBigPageTableSize = 4096; 1141 } 1142 else 1143 { 1144 PoolBigPageTableSize = max(1 << i, 64); 1145 } 1146 1147 // 1148 // Again, run the exact same loop we ran earlier, but this time for the 1149 // big pool tracker instead 1150 // 1151 while (TRUE) 1152 { 1153 if ((PoolBigPageTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_PAGES))) 1154 { 1155 PoolBigPageTableSize >>= 1; 1156 continue; 1157 } 1158 1159 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool, 1160 PoolBigPageTableSize * 1161 sizeof(POOL_TRACKER_BIG_PAGES)); 1162 if (PoolBigPageTable) break; 1163 1164 if (PoolBigPageTableSize == 1) 1165 { 1166 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1167 TableSize, 1168 0xFFFFFFFF, 1169 0xFFFFFFFF, 1170 0xFFFFFFFF); 1171 } 1172 1173 PoolBigPageTableSize >>= 1; 1174 } 1175 1176 // 1177 // An extra entry is not needed for for the big pool tracker, so just 1178 // compute the hash and zero it 1179 // 1180 PoolBigPageTableHash = PoolBigPageTableSize - 1; 1181 RtlZeroMemory(PoolBigPageTable, 1182 PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)); 1183 for (i = 0; i < PoolBigPageTableSize; i++) 1184 { 1185 PoolBigPageTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE; 1186 } 1187 1188 // 1189 // During development, print this out so we can see what's happening 1190 // 1191 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n", 1192 PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1193 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n", 1194 PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)); 1195 1196 // 1197 // Insert the generic tracker for all of big pool 1198 // 1199 ExpInsertPoolTracker('looP', 1200 ROUND_TO_PAGES(PoolBigPageTableSize * 1201 sizeof(POOL_TRACKER_BIG_PAGES)), 1202 NonPagedPool); 1203 1204 // 1205 // No support for NUMA systems at this time 1206 // 1207 ASSERT(KeNumberNodes == 1); 1208 1209 // 1210 // Initialize the tag spinlock 1211 // 1212 KeInitializeSpinLock(&ExpTaggedPoolLock); 1213 1214 // 1215 // Initialize the nonpaged pool descriptor 1216 // 1217 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor; 1218 ExInitializePoolDescriptor(PoolVector[NonPagedPool], 1219 NonPagedPool, 1220 0, 1221 Threshold, 1222 NULL); 1223 } 1224 else 1225 { 1226 // 1227 // No support for NUMA systems at this time 1228 // 1229 ASSERT(KeNumberNodes == 1); 1230 1231 // 1232 // Allocate the pool descriptor 1233 // 1234 Descriptor = ExAllocatePoolWithTag(NonPagedPool, 1235 sizeof(KGUARDED_MUTEX) + 1236 sizeof(POOL_DESCRIPTOR), 1237 'looP'); 1238 if (!Descriptor) 1239 { 1240 // 1241 // This is really bad... 1242 // 1243 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 1244 0, 1245 -1, 1246 -1, 1247 -1); 1248 } 1249 1250 // 1251 // Setup the vector and guarded mutex for paged pool 1252 // 1253 PoolVector[PagedPool] = Descriptor; 1254 ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1); 1255 ExpPagedPoolDescriptor[0] = Descriptor; 1256 KeInitializeGuardedMutex(ExpPagedPoolMutex); 1257 ExInitializePoolDescriptor(Descriptor, 1258 PagedPool, 1259 0, 1260 Threshold, 1261 ExpPagedPoolMutex); 1262 1263 // 1264 // Insert the generic tracker for all of nonpaged pool 1265 // 1266 ExpInsertPoolTracker('looP', 1267 ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)), 1268 NonPagedPool); 1269 } 1270 } 1271 1272 FORCEINLINE 1273 KIRQL 1274 ExLockPool(IN PPOOL_DESCRIPTOR Descriptor) 1275 { 1276 // 1277 // Check if this is nonpaged pool 1278 // 1279 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 1280 { 1281 // 1282 // Use the queued spin lock 1283 // 1284 return KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock); 1285 } 1286 else 1287 { 1288 // 1289 // Use the guarded mutex 1290 // 1291 KeAcquireGuardedMutex(Descriptor->LockAddress); 1292 return APC_LEVEL; 1293 } 1294 } 1295 1296 FORCEINLINE 1297 VOID 1298 ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, 1299 IN KIRQL OldIrql) 1300 { 1301 // 1302 // Check if this is nonpaged pool 1303 // 1304 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) 1305 { 1306 // 1307 // Use the queued spin lock 1308 // 1309 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql); 1310 } 1311 else 1312 { 1313 // 1314 // Use the guarded mutex 1315 // 1316 KeReleaseGuardedMutex(Descriptor->LockAddress); 1317 } 1318 } 1319 1320 VOID 1321 NTAPI 1322 ExpGetPoolTagInfoTarget(IN PKDPC Dpc, 1323 IN PVOID DeferredContext, 1324 IN PVOID SystemArgument1, 1325 IN PVOID SystemArgument2) 1326 { 1327 PPOOL_DPC_CONTEXT Context = DeferredContext; 1328 UNREFERENCED_PARAMETER(Dpc); 1329 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 1330 1331 // 1332 // Make sure we win the race, and if we did, copy the data atomically 1333 // 1334 if (KeSignalCallDpcSynchronize(SystemArgument2)) 1335 { 1336 RtlCopyMemory(Context->PoolTrackTable, 1337 PoolTrackTable, 1338 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 1339 1340 // 1341 // This is here because ReactOS does not yet support expansion 1342 // 1343 ASSERT(Context->PoolTrackTableSizeExpansion == 0); 1344 } 1345 1346 // 1347 // Regardless of whether we won or not, we must now synchronize and then 1348 // decrement the barrier since this is one more processor that has completed 1349 // the callback. 1350 // 1351 KeSignalCallDpcSynchronize(SystemArgument2); 1352 KeSignalCallDpcDone(SystemArgument1); 1353 } 1354 1355 NTSTATUS 1356 NTAPI 1357 ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, 1358 IN ULONG SystemInformationLength, 1359 IN OUT PULONG ReturnLength OPTIONAL) 1360 { 1361 ULONG TableSize, CurrentLength; 1362 ULONG EntryCount; 1363 NTSTATUS Status = STATUS_SUCCESS; 1364 PSYSTEM_POOLTAG TagEntry; 1365 PPOOL_TRACKER_TABLE Buffer, TrackerEntry; 1366 POOL_DPC_CONTEXT Context; 1367 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); 1368 1369 // 1370 // Keep track of how much data the caller's buffer must hold 1371 // 1372 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo); 1373 1374 // 1375 // Initialize the caller's buffer 1376 // 1377 TagEntry = &SystemInformation->TagInfo[0]; 1378 SystemInformation->Count = 0; 1379 1380 // 1381 // Capture the number of entries, and the total size needed to make a copy 1382 // of the table 1383 // 1384 EntryCount = (ULONG)PoolTrackTableSize; 1385 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE); 1386 1387 // 1388 // Allocate the "Generic DPC" temporary buffer 1389 // 1390 Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI'); 1391 if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES; 1392 1393 // 1394 // Do a "Generic DPC" to atomically retrieve the tag and allocation data 1395 // 1396 Context.PoolTrackTable = Buffer; 1397 Context.PoolTrackTableSize = PoolTrackTableSize; 1398 Context.PoolTrackTableExpansion = NULL; 1399 Context.PoolTrackTableSizeExpansion = 0; 1400 KeGenericCallDpc(ExpGetPoolTagInfoTarget, &Context); 1401 1402 // 1403 // Now parse the results 1404 // 1405 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++) 1406 { 1407 // 1408 // If the entry is empty, skip it 1409 // 1410 if (!TrackerEntry->Key) continue; 1411 1412 // 1413 // Otherwise, add one more entry to the caller's buffer, and ensure that 1414 // enough space has been allocated in it 1415 // 1416 SystemInformation->Count++; 1417 CurrentLength += sizeof(*TagEntry); 1418 if (SystemInformationLength < CurrentLength) 1419 { 1420 // 1421 // The caller's buffer is too small, so set a failure code. The 1422 // caller will know the count, as well as how much space is needed. 1423 // 1424 // We do NOT break out of the loop, because we want to keep incrementing 1425 // the Count as well as CurrentLength so that the caller can know the 1426 // final numbers 1427 // 1428 Status = STATUS_INFO_LENGTH_MISMATCH; 1429 } 1430 else 1431 { 1432 // 1433 // Small sanity check that our accounting is working correctly 1434 // 1435 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees); 1436 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees); 1437 1438 // 1439 // Return the data into the caller's buffer 1440 // 1441 TagEntry->TagUlong = TrackerEntry->Key; 1442 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs; 1443 TagEntry->PagedFrees = TrackerEntry->PagedFrees; 1444 TagEntry->PagedUsed = TrackerEntry->PagedBytes; 1445 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs; 1446 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees; 1447 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes; 1448 TagEntry++; 1449 } 1450 } 1451 1452 // 1453 // Free the "Generic DPC" temporary buffer, return the buffer length and status 1454 // 1455 ExFreePoolWithTag(Buffer, 'ofnI'); 1456 if (ReturnLength) *ReturnLength = CurrentLength; 1457 return Status; 1458 } 1459 1460 _IRQL_requires_(DISPATCH_LEVEL) 1461 static 1462 BOOLEAN 1463 ExpReallocateBigPageTable( 1464 _In_ _IRQL_restores_ KIRQL OldIrql, 1465 _In_ BOOLEAN Shrink) 1466 { 1467 SIZE_T OldSize = PoolBigPageTableSize; 1468 SIZE_T NewSize, NewSizeInBytes; 1469 PPOOL_TRACKER_BIG_PAGES NewTable; 1470 PPOOL_TRACKER_BIG_PAGES OldTable; 1471 ULONG i; 1472 ULONG PagesFreed; 1473 ULONG Hash; 1474 ULONG HashMask; 1475 1476 /* Must be holding ExpLargePoolTableLock */ 1477 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 1478 1479 /* Make sure we don't overflow */ 1480 if (Shrink) 1481 { 1482 NewSize = OldSize / 2; 1483 1484 /* Make sure we don't shrink too much. */ 1485 ASSERT(NewSize >= ExpPoolBigEntriesInUse); 1486 1487 NewSize = ALIGN_UP_BY(NewSize, PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES)); 1488 ASSERT(NewSize <= OldSize); 1489 1490 /* If there is only one page left, then keep it around. Not a failure either. */ 1491 if (NewSize == OldSize) 1492 { 1493 ASSERT(NewSize == (PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES))); 1494 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1495 return TRUE; 1496 } 1497 } 1498 else 1499 { 1500 if (!NT_SUCCESS(RtlSIZETMult(2, OldSize, &NewSize))) 1501 { 1502 DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize); 1503 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1504 return FALSE; 1505 } 1506 1507 /* Make sure we don't stupidly waste pages */ 1508 NewSize = ALIGN_DOWN_BY(NewSize, PAGE_SIZE / sizeof(POOL_TRACKER_BIG_PAGES)); 1509 ASSERT(NewSize > OldSize); 1510 } 1511 1512 if (!NT_SUCCESS(RtlSIZETMult(sizeof(POOL_TRACKER_BIG_PAGES), NewSize, &NewSizeInBytes))) 1513 { 1514 DPRINT1("Overflow while calculating big page table size. Size=%lu\n", OldSize); 1515 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1516 return FALSE; 1517 } 1518 1519 NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes); 1520 if (NewTable == NULL) 1521 { 1522 DPRINT("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes); 1523 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1524 return FALSE; 1525 } 1526 1527 DPRINT("%s big pool tracker table to %lu entries\n", Shrink ? "Shrinking" : "Expanding", NewSize); 1528 1529 /* Initialize the new table */ 1530 RtlZeroMemory(NewTable, NewSizeInBytes); 1531 for (i = 0; i < NewSize; i++) 1532 { 1533 NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE; 1534 } 1535 1536 /* Copy over all items */ 1537 OldTable = PoolBigPageTable; 1538 HashMask = NewSize - 1; 1539 for (i = 0; i < OldSize; i++) 1540 { 1541 /* Skip over empty items */ 1542 if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE) 1543 { 1544 continue; 1545 } 1546 1547 /* Recalculate the hash due to the new table size */ 1548 Hash = ExpComputePartialHashForAddress(OldTable[i].Va) % HashMask; 1549 1550 /* Find the location in the new table */ 1551 while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE)) 1552 { 1553 if (++Hash == NewSize) 1554 Hash = 0; 1555 } 1556 1557 /* We must have space */ 1558 ASSERT((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE); 1559 1560 /* Finally, copy the item */ 1561 NewTable[Hash] = OldTable[i]; 1562 } 1563 1564 /* Activate the new table */ 1565 PoolBigPageTable = NewTable; 1566 PoolBigPageTableSize = NewSize; 1567 PoolBigPageTableHash = PoolBigPageTableSize - 1; 1568 1569 /* Release the lock, we're done changing global state */ 1570 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1571 1572 /* Free the old table and update our tracker */ 1573 PagesFreed = MiFreePoolPages(OldTable); 1574 ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0); 1575 ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0); 1576 1577 return TRUE; 1578 } 1579 1580 BOOLEAN 1581 NTAPI 1582 ExpAddTagForBigPages(IN PVOID Va, 1583 IN ULONG Key, 1584 IN ULONG NumberOfPages, 1585 IN POOL_TYPE PoolType) 1586 { 1587 ULONG Hash, i = 0; 1588 PVOID OldVa; 1589 KIRQL OldIrql; 1590 SIZE_T TableSize; 1591 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart; 1592 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0); 1593 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1594 1595 // 1596 // As the table is expandable, these values must only be read after acquiring 1597 // the lock to avoid a teared access during an expansion 1598 // NOTE: Windows uses a special reader/writer SpinLock to improve 1599 // performance in the common case (add/remove a tracker entry) 1600 // 1601 Retry: 1602 Hash = ExpComputePartialHashForAddress(Va); 1603 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 1604 Hash &= PoolBigPageTableHash; 1605 TableSize = PoolBigPageTableSize; 1606 1607 // 1608 // We loop from the current hash bucket to the end of the table, and then 1609 // rollover to hash bucket 0 and keep going from there. If we return back 1610 // to the beginning, then we attempt expansion at the bottom of the loop 1611 // 1612 EntryStart = Entry = &PoolBigPageTable[Hash]; 1613 EntryEnd = &PoolBigPageTable[TableSize]; 1614 do 1615 { 1616 // 1617 // Make sure that this is a free entry and attempt to atomically make the 1618 // entry busy now 1619 // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock 1620 // 1621 OldVa = Entry->Va; 1622 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) && 1623 (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))) 1624 { 1625 // 1626 // We now own this entry, write down the size and the pool tag 1627 // 1628 Entry->Key = Key; 1629 Entry->NumberOfPages = NumberOfPages; 1630 1631 // 1632 // Add one more entry to the count, and see if we're getting within 1633 // 75% of the table size, at which point we'll do an expansion now 1634 // to avoid blocking too hard later on. 1635 // 1636 // Note that we only do this if it's also been the 16th time that we 1637 // keep losing the race or that we are not finding a free entry anymore, 1638 // which implies a massive number of concurrent big pool allocations. 1639 // 1640 ExpPoolBigEntriesInUse++; 1641 if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize * (POOL_BIG_TABLE_USE_RATE - 1) / POOL_BIG_TABLE_USE_RATE))) 1642 { 1643 DPRINT("Attempting expansion since we now have %lu entries\n", 1644 ExpPoolBigEntriesInUse); 1645 ASSERT(TableSize == PoolBigPageTableSize); 1646 ExpReallocateBigPageTable(OldIrql, FALSE); 1647 return TRUE; 1648 } 1649 1650 // 1651 // We have our entry, return 1652 // 1653 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1654 return TRUE; 1655 } 1656 1657 // 1658 // We don't have our entry yet, so keep trying, making the entry list 1659 // circular if we reach the last entry. We'll eventually break out of 1660 // the loop once we've rolled over and returned back to our original 1661 // hash bucket 1662 // 1663 i++; 1664 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0]; 1665 } while (Entry != EntryStart); 1666 1667 // 1668 // This means there's no free hash buckets whatsoever, so we now have 1669 // to attempt expanding the table 1670 // 1671 ASSERT(TableSize == PoolBigPageTableSize); 1672 if (ExpReallocateBigPageTable(OldIrql, FALSE)) 1673 { 1674 goto Retry; 1675 } 1676 ExpBigTableExpansionFailed++; 1677 DPRINT1("Big pool table expansion failed\n"); 1678 return FALSE; 1679 } 1680 1681 ULONG 1682 NTAPI 1683 ExpFindAndRemoveTagBigPages(IN PVOID Va, 1684 OUT PULONG_PTR BigPages, 1685 IN POOL_TYPE PoolType) 1686 { 1687 BOOLEAN FirstTry = TRUE; 1688 SIZE_T TableSize; 1689 KIRQL OldIrql; 1690 ULONG PoolTag, Hash; 1691 PPOOL_TRACKER_BIG_PAGES Entry; 1692 ASSERT(((ULONG_PTR)Va & POOL_BIG_TABLE_ENTRY_FREE) == 0); 1693 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1694 1695 // 1696 // As the table is expandable, these values must only be read after acquiring 1697 // the lock to avoid a teared access during an expansion 1698 // 1699 Hash = ExpComputePartialHashForAddress(Va); 1700 KeAcquireSpinLock(&ExpLargePoolTableLock, &OldIrql); 1701 Hash &= PoolBigPageTableHash; 1702 TableSize = PoolBigPageTableSize; 1703 1704 // 1705 // Loop while trying to find this big page allocation 1706 // 1707 while (PoolBigPageTable[Hash].Va != Va) 1708 { 1709 // 1710 // Increment the size until we go past the end of the table 1711 // 1712 if (++Hash >= TableSize) 1713 { 1714 // 1715 // Is this the second time we've tried? 1716 // 1717 if (!FirstTry) 1718 { 1719 // 1720 // This means it was never inserted into the pool table and it 1721 // received the special "BIG" tag -- return that and return 0 1722 // so that the code can ask Mm for the page count instead 1723 // 1724 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1725 *BigPages = 0; 1726 return ' GIB'; 1727 } 1728 1729 // 1730 // The first time this happens, reset the hash index and try again 1731 // 1732 Hash = 0; 1733 FirstTry = FALSE; 1734 } 1735 } 1736 1737 // 1738 // Now capture all the information we need from the entry, since after we 1739 // release the lock, the data can change 1740 // 1741 Entry = &PoolBigPageTable[Hash]; 1742 *BigPages = Entry->NumberOfPages; 1743 PoolTag = Entry->Key; 1744 1745 // 1746 // Set the free bit, and decrement the number of allocations. Finally, release 1747 // the lock and return the tag that was located 1748 // 1749 Entry->Va = (PVOID)((ULONG_PTR)Entry->Va | POOL_BIG_TABLE_ENTRY_FREE); 1750 1751 ExpPoolBigEntriesInUse--; 1752 1753 /* If reaching 12.5% of the size (or whatever integer rounding gets us to), 1754 * halve the allocation size, which will get us to 25% of space used. */ 1755 if (ExpPoolBigEntriesInUse < (PoolBigPageTableSize / (POOL_BIG_TABLE_USE_RATE * 2))) 1756 { 1757 /* Shrink the table. */ 1758 ExpReallocateBigPageTable(OldIrql, TRUE); 1759 } 1760 else 1761 { 1762 KeReleaseSpinLock(&ExpLargePoolTableLock, OldIrql); 1763 } 1764 return PoolTag; 1765 } 1766 1767 VOID 1768 NTAPI 1769 ExQueryPoolUsage(OUT PULONG PagedPoolPages, 1770 OUT PULONG NonPagedPoolPages, 1771 OUT PULONG PagedPoolAllocs, 1772 OUT PULONG PagedPoolFrees, 1773 OUT PULONG PagedPoolLookasideHits, 1774 OUT PULONG NonPagedPoolAllocs, 1775 OUT PULONG NonPagedPoolFrees, 1776 OUT PULONG NonPagedPoolLookasideHits) 1777 { 1778 ULONG i; 1779 PPOOL_DESCRIPTOR PoolDesc; 1780 1781 // 1782 // Assume all failures 1783 // 1784 *PagedPoolPages = 0; 1785 *PagedPoolAllocs = 0; 1786 *PagedPoolFrees = 0; 1787 1788 // 1789 // Tally up the totals for all the apged pool 1790 // 1791 for (i = 0; i < ExpNumberOfPagedPools + 1; i++) 1792 { 1793 PoolDesc = ExpPagedPoolDescriptor[i]; 1794 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1795 *PagedPoolAllocs += PoolDesc->RunningAllocs; 1796 *PagedPoolFrees += PoolDesc->RunningDeAllocs; 1797 } 1798 1799 // 1800 // The first non-paged pool has a hardcoded well-known descriptor name 1801 // 1802 PoolDesc = &NonPagedPoolDescriptor; 1803 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1804 *NonPagedPoolAllocs = PoolDesc->RunningAllocs; 1805 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs; 1806 1807 // 1808 // If the system has more than one non-paged pool, copy the other descriptor 1809 // totals as well 1810 // 1811 #if 0 1812 if (ExpNumberOfNonPagedPools > 1) 1813 { 1814 for (i = 0; i < ExpNumberOfNonPagedPools; i++) 1815 { 1816 PoolDesc = ExpNonPagedPoolDescriptor[i]; 1817 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages; 1818 *NonPagedPoolAllocs += PoolDesc->RunningAllocs; 1819 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs; 1820 } 1821 } 1822 #endif 1823 1824 // 1825 // Get the amount of hits in the system lookaside lists 1826 // 1827 if (!IsListEmpty(&ExPoolLookasideListHead)) 1828 { 1829 PLIST_ENTRY ListEntry; 1830 1831 for (ListEntry = ExPoolLookasideListHead.Flink; 1832 ListEntry != &ExPoolLookasideListHead; 1833 ListEntry = ListEntry->Flink) 1834 { 1835 PGENERAL_LOOKASIDE Lookaside; 1836 1837 Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry); 1838 1839 if (Lookaside->Type == NonPagedPool) 1840 { 1841 *NonPagedPoolLookasideHits += Lookaside->AllocateHits; 1842 } 1843 else 1844 { 1845 *PagedPoolLookasideHits += Lookaside->AllocateHits; 1846 } 1847 } 1848 } 1849 } 1850 1851 VOID 1852 NTAPI 1853 ExReturnPoolQuota(IN PVOID P) 1854 { 1855 PPOOL_HEADER Entry; 1856 POOL_TYPE PoolType; 1857 USHORT BlockSize; 1858 PEPROCESS Process; 1859 1860 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) && 1861 (MmIsSpecialPoolAddress(P))) 1862 { 1863 return; 1864 } 1865 1866 Entry = P; 1867 Entry--; 1868 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0); 1869 1870 PoolType = Entry->PoolType - 1; 1871 BlockSize = Entry->BlockSize; 1872 1873 if (PoolType & QUOTA_POOL_MASK) 1874 { 1875 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1]; 1876 ASSERT(Process != NULL); 1877 if (Process) 1878 { 1879 if (Process->Pcb.Header.Type != ProcessObject) 1880 { 1881 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n", 1882 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize); 1883 KeBugCheckEx(BAD_POOL_CALLER, 1884 POOL_BILLED_PROCESS_INVALID, 1885 (ULONG_PTR)P, 1886 Entry->PoolTag, 1887 (ULONG_PTR)Process); 1888 } 1889 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL; 1890 PsReturnPoolQuota(Process, 1891 PoolType & BASE_POOL_TYPE_MASK, 1892 BlockSize * POOL_BLOCK_SIZE); 1893 ObDereferenceObject(Process); 1894 } 1895 } 1896 } 1897 1898 /* PUBLIC FUNCTIONS ***********************************************************/ 1899 1900 /* 1901 * @implemented 1902 */ 1903 PVOID 1904 NTAPI 1905 ExAllocatePoolWithTag(IN POOL_TYPE PoolType, 1906 IN SIZE_T NumberOfBytes, 1907 IN ULONG Tag) 1908 { 1909 PPOOL_DESCRIPTOR PoolDesc; 1910 PLIST_ENTRY ListHead; 1911 PPOOL_HEADER Entry, NextEntry, FragmentEntry; 1912 KIRQL OldIrql; 1913 USHORT BlockSize, i; 1914 ULONG OriginalType; 1915 PKPRCB Prcb = KeGetCurrentPrcb(); 1916 PGENERAL_LOOKASIDE LookasideList; 1917 1918 // 1919 // Some sanity checks 1920 // 1921 ASSERT(Tag != 0); 1922 ASSERT(Tag != ' GIB'); 1923 ASSERT(NumberOfBytes != 0); 1924 ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL); 1925 1926 // 1927 // Not supported in ReactOS 1928 // 1929 ASSERT(!(PoolType & SESSION_POOL_MASK)); 1930 1931 // 1932 // Check if verifier or special pool is enabled 1933 // 1934 if (ExpPoolFlags & (POOL_FLAG_VERIFIER | POOL_FLAG_SPECIAL_POOL)) 1935 { 1936 // 1937 // For verifier, we should call the verification routine 1938 // 1939 if (ExpPoolFlags & POOL_FLAG_VERIFIER) 1940 { 1941 DPRINT1("Driver Verifier is not yet supported\n"); 1942 } 1943 1944 // 1945 // For special pool, we check if this is a suitable allocation and do 1946 // the special allocation if needed 1947 // 1948 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) 1949 { 1950 // 1951 // Check if this is a special pool allocation 1952 // 1953 if (MmUseSpecialPool(NumberOfBytes, Tag)) 1954 { 1955 // 1956 // Try to allocate using special pool 1957 // 1958 Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2); 1959 if (Entry) return Entry; 1960 } 1961 } 1962 } 1963 1964 // 1965 // Get the pool type and its corresponding vector for this request 1966 // 1967 OriginalType = PoolType; 1968 PoolType = PoolType & BASE_POOL_TYPE_MASK; 1969 PoolDesc = PoolVector[PoolType]; 1970 ASSERT(PoolDesc != NULL); 1971 1972 // 1973 // Check if this is a big page allocation 1974 // 1975 if (NumberOfBytes > POOL_MAX_ALLOC) 1976 { 1977 // 1978 // Allocate pages for it 1979 // 1980 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes); 1981 if (!Entry) 1982 { 1983 #if DBG 1984 // 1985 // Out of memory, display current consumption 1986 // Let's consider that if the caller wanted more 1987 // than a hundred pages, that's a bogus caller 1988 // and we are not out of memory. Dump at most 1989 // once a second to avoid spamming the log. 1990 // 1991 if (NumberOfBytes < 100 * PAGE_SIZE && 1992 KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000) 1993 { 1994 MiDumpPoolConsumers(FALSE, 0, 0, 0); 1995 MiLastPoolDumpTime = KeQueryInterruptTime(); 1996 } 1997 #endif 1998 1999 // 2000 // Must succeed pool is deprecated, but still supported. These allocation 2001 // failures must cause an immediate bugcheck 2002 // 2003 if (OriginalType & MUST_SUCCEED_POOL_MASK) 2004 { 2005 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 2006 NumberOfBytes, 2007 NonPagedPoolDescriptor.TotalPages, 2008 NonPagedPoolDescriptor.TotalBigPages, 2009 0); 2010 } 2011 2012 // 2013 // Internal debugging 2014 // 2015 ExPoolFailures++; 2016 2017 // 2018 // This flag requests printing failures, and can also further specify 2019 // breaking on failures 2020 // 2021 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE) 2022 { 2023 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n", 2024 NumberOfBytes, 2025 OriginalType); 2026 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint(); 2027 } 2028 2029 // 2030 // Finally, this flag requests an exception, which we are more than 2031 // happy to raise! 2032 // 2033 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE) 2034 { 2035 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 2036 } 2037 2038 return NULL; 2039 } 2040 2041 // 2042 // Increment required counters 2043 // 2044 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, 2045 (LONG)BYTES_TO_PAGES(NumberOfBytes)); 2046 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes); 2047 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2048 2049 // 2050 // Add a tag for the big page allocation and switch to the generic "BIG" 2051 // tag if we failed to do so, then insert a tracker for this alloation. 2052 // 2053 if (!ExpAddTagForBigPages(Entry, 2054 Tag, 2055 (ULONG)BYTES_TO_PAGES(NumberOfBytes), 2056 OriginalType)) 2057 { 2058 Tag = ' GIB'; 2059 } 2060 ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType); 2061 return Entry; 2062 } 2063 2064 // 2065 // Should never request 0 bytes from the pool, but since so many drivers do 2066 // it, we'll just assume they want 1 byte, based on NT's similar behavior 2067 // 2068 if (!NumberOfBytes) NumberOfBytes = 1; 2069 2070 // 2071 // A pool allocation is defined by its data, a linked list to connect it to 2072 // the free list (if necessary), and a pool header to store accounting info. 2073 // Calculate this size, then convert it into a block size (units of pool 2074 // headers) 2075 // 2076 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such 2077 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in 2078 // the direct allocation of pages. 2079 // 2080 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1)) 2081 / POOL_BLOCK_SIZE); 2082 ASSERT(i < POOL_LISTS_PER_PAGE); 2083 2084 // 2085 // Handle lookaside list optimization for both paged and nonpaged pool 2086 // 2087 if (i <= NUMBER_POOL_LOOKASIDE_LISTS) 2088 { 2089 // 2090 // Try popping it from the per-CPU lookaside list 2091 // 2092 LookasideList = (PoolType == PagedPool) ? 2093 Prcb->PPPagedLookasideList[i - 1].P : 2094 Prcb->PPNPagedLookasideList[i - 1].P; 2095 LookasideList->TotalAllocates++; 2096 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead); 2097 if (!Entry) 2098 { 2099 // 2100 // We failed, try popping it from the global list 2101 // 2102 LookasideList = (PoolType == PagedPool) ? 2103 Prcb->PPPagedLookasideList[i - 1].L : 2104 Prcb->PPNPagedLookasideList[i - 1].L; 2105 LookasideList->TotalAllocates++; 2106 Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead); 2107 } 2108 2109 // 2110 // If we were able to pop it, update the accounting and return the block 2111 // 2112 if (Entry) 2113 { 2114 LookasideList->AllocateHits++; 2115 2116 // 2117 // Get the real entry, write down its pool type, and track it 2118 // 2119 Entry--; 2120 Entry->PoolType = OriginalType + 1; 2121 ExpInsertPoolTracker(Tag, 2122 Entry->BlockSize * POOL_BLOCK_SIZE, 2123 OriginalType); 2124 2125 // 2126 // Return the pool allocation 2127 // 2128 Entry->PoolTag = Tag; 2129 (POOL_FREE_BLOCK(Entry))->Flink = NULL; 2130 (POOL_FREE_BLOCK(Entry))->Blink = NULL; 2131 return POOL_FREE_BLOCK(Entry); 2132 } 2133 } 2134 2135 // 2136 // Loop in the free lists looking for a block if this size. Start with the 2137 // list optimized for this kind of size lookup 2138 // 2139 ListHead = &PoolDesc->ListHeads[i]; 2140 do 2141 { 2142 // 2143 // Are there any free entries available on this list? 2144 // 2145 if (!ExpIsPoolListEmpty(ListHead)) 2146 { 2147 // 2148 // Acquire the pool lock now 2149 // 2150 OldIrql = ExLockPool(PoolDesc); 2151 2152 // 2153 // And make sure the list still has entries 2154 // 2155 if (ExpIsPoolListEmpty(ListHead)) 2156 { 2157 // 2158 // Someone raced us (and won) before we had a chance to acquire 2159 // the lock. 2160 // 2161 // Try again! 2162 // 2163 ExUnlockPool(PoolDesc, OldIrql); 2164 continue; 2165 } 2166 2167 // 2168 // Remove a free entry from the list 2169 // Note that due to the way we insert free blocks into multiple lists 2170 // there is a guarantee that any block on this list will either be 2171 // of the correct size, or perhaps larger. 2172 // 2173 ExpCheckPoolLinks(ListHead); 2174 Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead)); 2175 ExpCheckPoolLinks(ListHead); 2176 ExpCheckPoolBlocks(Entry); 2177 ASSERT(Entry->BlockSize >= i); 2178 ASSERT(Entry->PoolType == 0); 2179 2180 // 2181 // Check if this block is larger that what we need. The block could 2182 // not possibly be smaller, due to the reason explained above (and 2183 // we would've asserted on a checked build if this was the case). 2184 // 2185 if (Entry->BlockSize != i) 2186 { 2187 // 2188 // Is there an entry before this one? 2189 // 2190 if (Entry->PreviousSize == 0) 2191 { 2192 // 2193 // There isn't anyone before us, so take the next block and 2194 // turn it into a fragment that contains the leftover data 2195 // that we don't need to satisfy the caller's request 2196 // 2197 FragmentEntry = POOL_BLOCK(Entry, i); 2198 FragmentEntry->BlockSize = Entry->BlockSize - i; 2199 2200 // 2201 // And make it point back to us 2202 // 2203 FragmentEntry->PreviousSize = i; 2204 2205 // 2206 // Now get the block that follows the new fragment and check 2207 // if it's still on the same page as us (and not at the end) 2208 // 2209 NextEntry = POOL_NEXT_BLOCK(FragmentEntry); 2210 if (PAGE_ALIGN(NextEntry) != NextEntry) 2211 { 2212 // 2213 // Adjust this next block to point to our newly created 2214 // fragment block 2215 // 2216 NextEntry->PreviousSize = FragmentEntry->BlockSize; 2217 } 2218 } 2219 else 2220 { 2221 // 2222 // There is a free entry before us, which we know is smaller 2223 // so we'll make this entry the fragment instead 2224 // 2225 FragmentEntry = Entry; 2226 2227 // 2228 // And then we'll remove from it the actual size required. 2229 // Now the entry is a leftover free fragment 2230 // 2231 Entry->BlockSize -= i; 2232 2233 // 2234 // Now let's go to the next entry after the fragment (which 2235 // used to point to our original free entry) and make it 2236 // reference the new fragment entry instead. 2237 // 2238 // This is the entry that will actually end up holding the 2239 // allocation! 2240 // 2241 Entry = POOL_NEXT_BLOCK(Entry); 2242 Entry->PreviousSize = FragmentEntry->BlockSize; 2243 2244 // 2245 // And now let's go to the entry after that one and check if 2246 // it's still on the same page, and not at the end 2247 // 2248 NextEntry = POOL_BLOCK(Entry, i); 2249 if (PAGE_ALIGN(NextEntry) != NextEntry) 2250 { 2251 // 2252 // Make it reference the allocation entry 2253 // 2254 NextEntry->PreviousSize = i; 2255 } 2256 } 2257 2258 // 2259 // Now our (allocation) entry is the right size 2260 // 2261 Entry->BlockSize = i; 2262 2263 // 2264 // And the next entry is now the free fragment which contains 2265 // the remaining difference between how big the original entry 2266 // was, and the actual size the caller needs/requested. 2267 // 2268 FragmentEntry->PoolType = 0; 2269 BlockSize = FragmentEntry->BlockSize; 2270 2271 // 2272 // Now check if enough free bytes remained for us to have a 2273 // "full" entry, which contains enough bytes for a linked list 2274 // and thus can be used for allocations (up to 8 bytes...) 2275 // 2276 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]); 2277 if (BlockSize != 1) 2278 { 2279 // 2280 // Insert the free entry into the free list for this size 2281 // 2282 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1], 2283 POOL_FREE_BLOCK(FragmentEntry)); 2284 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry)); 2285 } 2286 } 2287 2288 // 2289 // We have found an entry for this allocation, so set the pool type 2290 // and release the lock since we're done 2291 // 2292 Entry->PoolType = OriginalType + 1; 2293 ExpCheckPoolBlocks(Entry); 2294 ExUnlockPool(PoolDesc, OldIrql); 2295 2296 // 2297 // Increment required counters 2298 // 2299 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE); 2300 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2301 2302 // 2303 // Track this allocation 2304 // 2305 ExpInsertPoolTracker(Tag, 2306 Entry->BlockSize * POOL_BLOCK_SIZE, 2307 OriginalType); 2308 2309 // 2310 // Return the pool allocation 2311 // 2312 Entry->PoolTag = Tag; 2313 (POOL_FREE_BLOCK(Entry))->Flink = NULL; 2314 (POOL_FREE_BLOCK(Entry))->Blink = NULL; 2315 return POOL_FREE_BLOCK(Entry); 2316 } 2317 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]); 2318 2319 // 2320 // There were no free entries left, so we have to allocate a new fresh page 2321 // 2322 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE); 2323 if (!Entry) 2324 { 2325 #if DBG 2326 // 2327 // Out of memory, display current consumption 2328 // Let's consider that if the caller wanted more 2329 // than a hundred pages, that's a bogus caller 2330 // and we are not out of memory. Dump at most 2331 // once a second to avoid spamming the log. 2332 // 2333 if (NumberOfBytes < 100 * PAGE_SIZE && 2334 KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000) 2335 { 2336 MiDumpPoolConsumers(FALSE, 0, 0, 0); 2337 MiLastPoolDumpTime = KeQueryInterruptTime(); 2338 } 2339 #endif 2340 2341 // 2342 // Must succeed pool is deprecated, but still supported. These allocation 2343 // failures must cause an immediate bugcheck 2344 // 2345 if (OriginalType & MUST_SUCCEED_POOL_MASK) 2346 { 2347 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY, 2348 PAGE_SIZE, 2349 NonPagedPoolDescriptor.TotalPages, 2350 NonPagedPoolDescriptor.TotalBigPages, 2351 0); 2352 } 2353 2354 // 2355 // Internal debugging 2356 // 2357 ExPoolFailures++; 2358 2359 // 2360 // This flag requests printing failures, and can also further specify 2361 // breaking on failures 2362 // 2363 if (ExpPoolFlags & POOL_FLAG_DBGPRINT_ON_FAILURE) 2364 { 2365 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n", 2366 NumberOfBytes, 2367 OriginalType); 2368 if (ExpPoolFlags & POOL_FLAG_CRASH_ON_FAILURE) DbgBreakPoint(); 2369 } 2370 2371 // 2372 // Finally, this flag requests an exception, which we are more than 2373 // happy to raise! 2374 // 2375 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE) 2376 { 2377 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 2378 } 2379 2380 // 2381 // Return NULL to the caller in all other cases 2382 // 2383 return NULL; 2384 } 2385 2386 // 2387 // Setup the entry data 2388 // 2389 Entry->Ulong1 = 0; 2390 Entry->BlockSize = i; 2391 Entry->PoolType = OriginalType + 1; 2392 2393 // 2394 // This page will have two entries -- one for the allocation (which we just 2395 // created above), and one for the remaining free bytes, which we're about 2396 // to create now. The free bytes are the whole page minus what was allocated 2397 // and then converted into units of block headers. 2398 // 2399 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i; 2400 FragmentEntry = POOL_BLOCK(Entry, i); 2401 FragmentEntry->Ulong1 = 0; 2402 FragmentEntry->BlockSize = BlockSize; 2403 FragmentEntry->PreviousSize = i; 2404 2405 // 2406 // Increment required counters 2407 // 2408 InterlockedIncrement((PLONG)&PoolDesc->TotalPages); 2409 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE); 2410 2411 // 2412 // Now check if enough free bytes remained for us to have a "full" entry, 2413 // which contains enough bytes for a linked list and thus can be used for 2414 // allocations (up to 8 bytes...) 2415 // 2416 if (FragmentEntry->BlockSize != 1) 2417 { 2418 // 2419 // Excellent -- acquire the pool lock 2420 // 2421 OldIrql = ExLockPool(PoolDesc); 2422 2423 // 2424 // And insert the free entry into the free list for this block size 2425 // 2426 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]); 2427 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1], 2428 POOL_FREE_BLOCK(FragmentEntry)); 2429 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry)); 2430 2431 // 2432 // Release the pool lock 2433 // 2434 ExpCheckPoolBlocks(Entry); 2435 ExUnlockPool(PoolDesc, OldIrql); 2436 } 2437 else 2438 { 2439 // 2440 // Simply do a sanity check 2441 // 2442 ExpCheckPoolBlocks(Entry); 2443 } 2444 2445 // 2446 // Increment performance counters and track this allocation 2447 // 2448 InterlockedIncrement((PLONG)&PoolDesc->RunningAllocs); 2449 ExpInsertPoolTracker(Tag, 2450 Entry->BlockSize * POOL_BLOCK_SIZE, 2451 OriginalType); 2452 2453 // 2454 // And return the pool allocation 2455 // 2456 ExpCheckPoolBlocks(Entry); 2457 Entry->PoolTag = Tag; 2458 return POOL_FREE_BLOCK(Entry); 2459 } 2460 2461 /* 2462 * @implemented 2463 */ 2464 PVOID 2465 NTAPI 2466 ExAllocatePool(POOL_TYPE PoolType, 2467 SIZE_T NumberOfBytes) 2468 { 2469 ULONG Tag = TAG_NONE; 2470 #if 0 && DBG 2471 PLDR_DATA_TABLE_ENTRY LdrEntry; 2472 2473 /* Use the first four letters of the driver name, or "None" if unavailable */ 2474 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL 2475 ? MiLookupDataTableEntry(_ReturnAddress()) 2476 : NULL; 2477 if (LdrEntry) 2478 { 2479 ULONG i; 2480 Tag = 0; 2481 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++) 2482 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24; 2483 for (; i < 4; i++) 2484 Tag = Tag >> 8 | ' ' << 24; 2485 } 2486 #endif 2487 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2488 } 2489 2490 /* 2491 * @implemented 2492 */ 2493 VOID 2494 NTAPI 2495 ExFreePoolWithTag(IN PVOID P, 2496 IN ULONG TagToFree) 2497 { 2498 PPOOL_HEADER Entry, NextEntry; 2499 USHORT BlockSize; 2500 KIRQL OldIrql; 2501 POOL_TYPE PoolType; 2502 PPOOL_DESCRIPTOR PoolDesc; 2503 ULONG Tag; 2504 BOOLEAN Combined = FALSE; 2505 PFN_NUMBER PageCount, RealPageCount; 2506 PKPRCB Prcb = KeGetCurrentPrcb(); 2507 PGENERAL_LOOKASIDE LookasideList; 2508 PEPROCESS Process; 2509 2510 // 2511 // Check if any of the debug flags are enabled 2512 // 2513 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS | 2514 POOL_FLAG_CHECK_WORKERS | 2515 POOL_FLAG_CHECK_RESOURCES | 2516 POOL_FLAG_VERIFIER | 2517 POOL_FLAG_CHECK_DEADLOCK | 2518 POOL_FLAG_SPECIAL_POOL)) 2519 { 2520 // 2521 // Check if special pool is enabled 2522 // 2523 if (ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) 2524 { 2525 // 2526 // Check if it was allocated from a special pool 2527 // 2528 if (MmIsSpecialPoolAddress(P)) 2529 { 2530 // 2531 // Was deadlock verification also enabled? We can do some extra 2532 // checks at this point 2533 // 2534 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK) 2535 { 2536 DPRINT1("Verifier not yet supported\n"); 2537 } 2538 2539 // 2540 // It is, so handle it via special pool free routine 2541 // 2542 MmFreeSpecialPool(P); 2543 return; 2544 } 2545 } 2546 2547 // 2548 // For non-big page allocations, we'll do a bunch of checks in here 2549 // 2550 if (PAGE_ALIGN(P) != P) 2551 { 2552 // 2553 // Get the entry for this pool allocation 2554 // The pointer math here may look wrong or confusing, but it is quite right 2555 // 2556 Entry = P; 2557 Entry--; 2558 2559 // 2560 // Get the pool type 2561 // 2562 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK; 2563 2564 // 2565 // FIXME: Many other debugging checks go here 2566 // 2567 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2568 } 2569 } 2570 2571 // 2572 // Check if this is a big page allocation 2573 // 2574 if (PAGE_ALIGN(P) == P) 2575 { 2576 // 2577 // We need to find the tag for it, so first we need to find out what 2578 // kind of allocation this was (paged or nonpaged), then we can go 2579 // ahead and try finding the tag for it. Remember to get rid of the 2580 // PROTECTED_POOL tag if it's found. 2581 // 2582 // Note that if at insertion time, we failed to add the tag for a big 2583 // pool allocation, we used a special tag called 'BIG' to identify the 2584 // allocation, and we may get this tag back. In this scenario, we must 2585 // manually get the size of the allocation by actually counting through 2586 // the PFN database. 2587 // 2588 PoolType = MmDeterminePoolType(P); 2589 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2590 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType); 2591 if (!Tag) 2592 { 2593 DPRINT1("We do not know the size of this allocation. This is not yet supported\n"); 2594 ASSERT(Tag == ' GIB'); 2595 PageCount = 1; // We are going to lie! This might screw up accounting? 2596 } 2597 else if (Tag & PROTECTED_POOL) 2598 { 2599 Tag &= ~PROTECTED_POOL; 2600 } 2601 2602 // 2603 // Check block tag 2604 // 2605 if (TagToFree && TagToFree != Tag) 2606 { 2607 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag); 2608 #if DBG 2609 /* Do not bugcheck in case this is a big allocation for which we didn't manage to insert the tag */ 2610 if (Tag != ' GIB') 2611 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree); 2612 #endif 2613 } 2614 2615 // 2616 // We have our tag and our page count, so we can go ahead and remove this 2617 // tracker now 2618 // 2619 ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType); 2620 2621 // 2622 // Check if any of the debug flags are enabled 2623 // 2624 if (ExpPoolFlags & (POOL_FLAG_CHECK_TIMERS | 2625 POOL_FLAG_CHECK_WORKERS | 2626 POOL_FLAG_CHECK_RESOURCES | 2627 POOL_FLAG_CHECK_DEADLOCK)) 2628 { 2629 // 2630 // Was deadlock verification also enabled? We can do some extra 2631 // checks at this point 2632 // 2633 if (ExpPoolFlags & POOL_FLAG_CHECK_DEADLOCK) 2634 { 2635 DPRINT1("Verifier not yet supported\n"); 2636 } 2637 2638 // 2639 // FIXME: Many debugging checks go here 2640 // 2641 } 2642 2643 // 2644 // Update counters 2645 // 2646 PoolDesc = PoolVector[PoolType]; 2647 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs); 2648 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, 2649 -(LONG_PTR)(PageCount << PAGE_SHIFT)); 2650 2651 // 2652 // Do the real free now and update the last counter with the big page count 2653 // 2654 RealPageCount = MiFreePoolPages(P); 2655 ASSERT(RealPageCount == PageCount); 2656 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalBigPages, 2657 -(LONG)RealPageCount); 2658 return; 2659 } 2660 2661 // 2662 // Get the entry for this pool allocation 2663 // The pointer math here may look wrong or confusing, but it is quite right 2664 // 2665 Entry = P; 2666 Entry--; 2667 ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0); 2668 2669 // 2670 // Get the size of the entry, and it's pool type, then load the descriptor 2671 // for this pool type 2672 // 2673 BlockSize = Entry->BlockSize; 2674 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK; 2675 PoolDesc = PoolVector[PoolType]; 2676 2677 // 2678 // Make sure that the IRQL makes sense 2679 // 2680 ExpCheckPoolIrqlLevel(PoolType, 0, P); 2681 2682 // 2683 // Get the pool tag and get rid of the PROTECTED_POOL flag 2684 // 2685 Tag = Entry->PoolTag; 2686 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL; 2687 2688 // 2689 // Check block tag 2690 // 2691 if (TagToFree && TagToFree != Tag) 2692 { 2693 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag); 2694 #if DBG 2695 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree); 2696 #endif 2697 } 2698 2699 // 2700 // Track the removal of this allocation 2701 // 2702 ExpRemovePoolTracker(Tag, 2703 BlockSize * POOL_BLOCK_SIZE, 2704 Entry->PoolType - 1); 2705 2706 // 2707 // Release pool quota, if any 2708 // 2709 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK) 2710 { 2711 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1]; 2712 if (Process) 2713 { 2714 if (Process->Pcb.Header.Type != ProcessObject) 2715 { 2716 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n", 2717 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize); 2718 KeBugCheckEx(BAD_POOL_CALLER, 2719 POOL_BILLED_PROCESS_INVALID, 2720 (ULONG_PTR)P, 2721 Tag, 2722 (ULONG_PTR)Process); 2723 } 2724 PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE); 2725 ObDereferenceObject(Process); 2726 } 2727 } 2728 2729 // 2730 // Is this allocation small enough to have come from a lookaside list? 2731 // 2732 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS) 2733 { 2734 // 2735 // Try pushing it into the per-CPU lookaside list 2736 // 2737 LookasideList = (PoolType == PagedPool) ? 2738 Prcb->PPPagedLookasideList[BlockSize - 1].P : 2739 Prcb->PPNPagedLookasideList[BlockSize - 1].P; 2740 LookasideList->TotalFrees++; 2741 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) 2742 { 2743 LookasideList->FreeHits++; 2744 InterlockedPushEntrySList(&LookasideList->ListHead, P); 2745 return; 2746 } 2747 2748 // 2749 // We failed, try to push it into the global lookaside list 2750 // 2751 LookasideList = (PoolType == PagedPool) ? 2752 Prcb->PPPagedLookasideList[BlockSize - 1].L : 2753 Prcb->PPNPagedLookasideList[BlockSize - 1].L; 2754 LookasideList->TotalFrees++; 2755 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth) 2756 { 2757 LookasideList->FreeHits++; 2758 InterlockedPushEntrySList(&LookasideList->ListHead, P); 2759 return; 2760 } 2761 } 2762 2763 // 2764 // Get the pointer to the next entry 2765 // 2766 NextEntry = POOL_BLOCK(Entry, BlockSize); 2767 2768 // 2769 // Update performance counters 2770 // 2771 InterlockedIncrement((PLONG)&PoolDesc->RunningDeAllocs); 2772 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE); 2773 2774 // 2775 // Acquire the pool lock 2776 // 2777 OldIrql = ExLockPool(PoolDesc); 2778 2779 // 2780 // Check if the next allocation is at the end of the page 2781 // 2782 ExpCheckPoolBlocks(Entry); 2783 if (PAGE_ALIGN(NextEntry) != NextEntry) 2784 { 2785 // 2786 // We may be able to combine the block if it's free 2787 // 2788 if (NextEntry->PoolType == 0) 2789 { 2790 // 2791 // The next block is free, so we'll do a combine 2792 // 2793 Combined = TRUE; 2794 2795 // 2796 // Make sure there's actual data in the block -- anything smaller 2797 // than this means we only have the header, so there's no linked list 2798 // for us to remove 2799 // 2800 if ((NextEntry->BlockSize != 1)) 2801 { 2802 // 2803 // The block is at least big enough to have a linked list, so go 2804 // ahead and remove it 2805 // 2806 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry)); 2807 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry)); 2808 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink)); 2809 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink)); 2810 } 2811 2812 // 2813 // Our entry is now combined with the next entry 2814 // 2815 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize; 2816 } 2817 } 2818 2819 // 2820 // Now check if there was a previous entry on the same page as us 2821 // 2822 if (Entry->PreviousSize) 2823 { 2824 // 2825 // Great, grab that entry and check if it's free 2826 // 2827 NextEntry = POOL_PREV_BLOCK(Entry); 2828 if (NextEntry->PoolType == 0) 2829 { 2830 // 2831 // It is, so we can do a combine 2832 // 2833 Combined = TRUE; 2834 2835 // 2836 // Make sure there's actual data in the block -- anything smaller 2837 // than this means we only have the header so there's no linked list 2838 // for us to remove 2839 // 2840 if ((NextEntry->BlockSize != 1)) 2841 { 2842 // 2843 // The block is at least big enough to have a linked list, so go 2844 // ahead and remove it 2845 // 2846 ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry)); 2847 ExpRemovePoolEntryList(POOL_FREE_BLOCK(NextEntry)); 2848 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink)); 2849 ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink)); 2850 } 2851 2852 // 2853 // Combine our original block (which might've already been combined 2854 // with the next block), into the previous block 2855 // 2856 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize; 2857 2858 // 2859 // And now we'll work with the previous block instead 2860 // 2861 Entry = NextEntry; 2862 } 2863 } 2864 2865 // 2866 // By now, it may have been possible for our combined blocks to actually 2867 // have made up a full page (if there were only 2-3 allocations on the 2868 // page, they could've all been combined). 2869 // 2870 if ((PAGE_ALIGN(Entry) == Entry) && 2871 (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry))) 2872 { 2873 // 2874 // In this case, release the pool lock, update the performance counter, 2875 // and free the page 2876 // 2877 ExUnlockPool(PoolDesc, OldIrql); 2878 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1); 2879 MiFreePoolPages(Entry); 2880 return; 2881 } 2882 2883 // 2884 // Otherwise, we now have a free block (or a combination of 2 or 3) 2885 // 2886 Entry->PoolType = 0; 2887 BlockSize = Entry->BlockSize; 2888 ASSERT(BlockSize != 1); 2889 2890 // 2891 // Check if we actually did combine it with anyone 2892 // 2893 if (Combined) 2894 { 2895 // 2896 // Get the first combined block (either our original to begin with, or 2897 // the one after the original, depending if we combined with the previous) 2898 // 2899 NextEntry = POOL_NEXT_BLOCK(Entry); 2900 2901 // 2902 // As long as the next block isn't on a page boundary, have it point 2903 // back to us 2904 // 2905 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize; 2906 } 2907 2908 // 2909 // Insert this new free block, and release the pool lock 2910 // 2911 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry)); 2912 ExpCheckPoolLinks(POOL_FREE_BLOCK(Entry)); 2913 ExUnlockPool(PoolDesc, OldIrql); 2914 } 2915 2916 /* 2917 * @implemented 2918 */ 2919 VOID 2920 NTAPI 2921 ExFreePool(PVOID P) 2922 { 2923 // 2924 // Just free without checking for the tag 2925 // 2926 ExFreePoolWithTag(P, 0); 2927 } 2928 2929 /* 2930 * @unimplemented 2931 */ 2932 SIZE_T 2933 NTAPI 2934 ExQueryPoolBlockSize(IN PVOID PoolBlock, 2935 OUT PBOOLEAN QuotaCharged) 2936 { 2937 // 2938 // Not implemented 2939 // 2940 UNIMPLEMENTED; 2941 return FALSE; 2942 } 2943 2944 /* 2945 * @implemented 2946 */ 2947 2948 PVOID 2949 NTAPI 2950 ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, 2951 IN SIZE_T NumberOfBytes) 2952 { 2953 // 2954 // Allocate the pool 2955 // 2956 return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE); 2957 } 2958 2959 /* 2960 * @implemented 2961 */ 2962 PVOID 2963 NTAPI 2964 ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, 2965 IN SIZE_T NumberOfBytes, 2966 IN ULONG Tag, 2967 IN EX_POOL_PRIORITY Priority) 2968 { 2969 PVOID Buffer; 2970 2971 // 2972 // Allocate the pool 2973 // 2974 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 2975 if (Buffer == NULL) 2976 { 2977 UNIMPLEMENTED; 2978 } 2979 2980 return Buffer; 2981 } 2982 2983 /* 2984 * @implemented 2985 */ 2986 PVOID 2987 NTAPI 2988 ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType, 2989 IN SIZE_T NumberOfBytes, 2990 IN ULONG Tag) 2991 { 2992 BOOLEAN Raise = TRUE; 2993 PVOID Buffer; 2994 PPOOL_HEADER Entry; 2995 NTSTATUS Status; 2996 PEPROCESS Process = PsGetCurrentProcess(); 2997 2998 // 2999 // Check if we should fail instead of raising an exception 3000 // 3001 if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE) 3002 { 3003 Raise = FALSE; 3004 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE; 3005 } 3006 3007 // 3008 // Inject the pool quota mask 3009 // 3010 PoolType += QUOTA_POOL_MASK; 3011 3012 // 3013 // Check if we have enough space to add the quota owner process, as long as 3014 // this isn't the system process, which never gets charged quota 3015 // 3016 ASSERT(NumberOfBytes != 0); 3017 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) && 3018 (Process != PsInitialSystemProcess)) 3019 { 3020 // 3021 // Add space for our EPROCESS pointer 3022 // 3023 NumberOfBytes += sizeof(PEPROCESS); 3024 } 3025 else 3026 { 3027 // 3028 // We won't be able to store the pointer, so don't use quota for this 3029 // 3030 PoolType -= QUOTA_POOL_MASK; 3031 } 3032 3033 // 3034 // Allocate the pool buffer now 3035 // 3036 Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 3037 3038 // 3039 // If the buffer is page-aligned, this is a large page allocation and we 3040 // won't touch it 3041 // 3042 if (PAGE_ALIGN(Buffer) != Buffer) 3043 { 3044 // 3045 // Also if special pool is enabled, and this was allocated from there, 3046 // we won't touch it either 3047 // 3048 if ((ExpPoolFlags & POOL_FLAG_SPECIAL_POOL) && 3049 (MmIsSpecialPoolAddress(Buffer))) 3050 { 3051 return Buffer; 3052 } 3053 3054 // 3055 // If it wasn't actually allocated with quota charges, ignore it too 3056 // 3057 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer; 3058 3059 // 3060 // If this is the system process, we don't charge quota, so ignore 3061 // 3062 if (Process == PsInitialSystemProcess) return Buffer; 3063 3064 // 3065 // Actually go and charge quota for the process now 3066 // 3067 Entry = POOL_ENTRY(Buffer); 3068 Status = PsChargeProcessPoolQuota(Process, 3069 PoolType & BASE_POOL_TYPE_MASK, 3070 Entry->BlockSize * POOL_BLOCK_SIZE); 3071 if (!NT_SUCCESS(Status)) 3072 { 3073 // 3074 // Quota failed, back out the allocation, clear the owner, and fail 3075 // 3076 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL; 3077 ExFreePoolWithTag(Buffer, Tag); 3078 if (Raise) RtlRaiseStatus(Status); 3079 return NULL; 3080 } 3081 3082 // 3083 // Quota worked, write the owner and then reference it before returning 3084 // 3085 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process; 3086 ObReferenceObject(Process); 3087 } 3088 else if (!(Buffer) && (Raise)) 3089 { 3090 // 3091 // The allocation failed, raise an error if we are in raise mode 3092 // 3093 RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 3094 } 3095 3096 // 3097 // Return the allocated buffer 3098 // 3099 return Buffer; 3100 } 3101 3102 /* EOF */ 3103