1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/rtl/libsupp.c 5 * PURPOSE: RTL Support Routines 6 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net) 7 * Gunnar Dalsnes 8 */ 9 10 /* INCLUDES ******************************************************************/ 11 12 #include <ntoskrnl.h> 13 #define NDEBUG 14 #include <debug.h> 15 16 #define TAG_ATMT 'TotA' /* Atom table */ 17 #define TAG_RTHL 'LHtR' /* Heap Lock */ 18 19 extern ULONG NtGlobalFlag; 20 21 typedef struct _RTL_RANGE_ENTRY 22 { 23 LIST_ENTRY Entry; 24 RTL_RANGE Range; 25 } RTL_RANGE_ENTRY, *PRTL_RANGE_ENTRY; 26 27 PAGED_LOOKASIDE_LIST RtlpRangeListEntryLookasideList; 28 SIZE_T RtlpAllocDeallocQueryBufferSize = 128; 29 30 /* FUNCTIONS *****************************************************************/ 31 32 PVOID 33 NTAPI 34 RtlPcToFileHeader( 35 IN PVOID PcValue, 36 OUT PVOID *BaseOfImage) 37 { 38 PLDR_DATA_TABLE_ENTRY LdrEntry; 39 BOOLEAN InSystem; 40 KIRQL OldIrql; 41 42 /* Get the base for this file */ 43 if ((ULONG_PTR)PcValue > (ULONG_PTR)MmHighestUserAddress) 44 { 45 /* Acquire the loaded module spinlock */ 46 KeAcquireSpinLock(&PsLoadedModuleSpinLock, &OldIrql); 47 48 /* We are in kernel */ 49 *BaseOfImage = KiPcToFileHeader(PcValue, &LdrEntry, FALSE, &InSystem); 50 51 /* Release lock */ 52 KeReleaseSpinLock(&PsLoadedModuleSpinLock, OldIrql); 53 } 54 else 55 { 56 /* User mode is not handled here! */ 57 *BaseOfImage = NULL; 58 } 59 60 return *BaseOfImage; 61 } 62 63 VOID 64 NTAPI 65 RtlInitializeRangeListPackage(VOID) 66 { 67 /* Setup the lookaside list for allocations (not used yet) */ 68 ExInitializePagedLookasideList(&RtlpRangeListEntryLookasideList, 69 NULL, 70 NULL, 71 POOL_COLD_ALLOCATION, 72 sizeof(RTL_RANGE_ENTRY), 73 'elRR', 74 16); 75 } 76 77 BOOLEAN 78 NTAPI 79 RtlpCheckForActiveDebugger(VOID) 80 { 81 /* This check is meaningless in kernel-mode */ 82 return FALSE; 83 } 84 85 BOOLEAN 86 NTAPI 87 RtlpSetInDbgPrint(VOID) 88 { 89 /* Nothing to set in kernel mode */ 90 return FALSE; 91 } 92 93 VOID 94 NTAPI 95 RtlpClearInDbgPrint(VOID) 96 { 97 /* Nothing to clear in kernel mode */ 98 } 99 100 KPROCESSOR_MODE 101 NTAPI 102 RtlpGetMode(VOID) 103 { 104 return KernelMode; 105 } 106 107 PVOID 108 NTAPI 109 RtlpAllocateMemory(ULONG Bytes, 110 ULONG Tag) 111 { 112 return ExAllocatePoolWithTag(PagedPool, 113 (SIZE_T)Bytes, 114 Tag); 115 } 116 117 118 #define TAG_USTR 'RTSU' 119 #define TAG_ASTR 'RTSA' 120 #define TAG_OSTR 'RTSO' 121 VOID 122 NTAPI 123 RtlpFreeMemory(PVOID Mem, 124 ULONG Tag) 125 { 126 if (Tag == TAG_ASTR || Tag == TAG_OSTR || Tag == TAG_USTR) 127 ExFreePool(Mem); 128 else 129 ExFreePoolWithTag(Mem, Tag); 130 } 131 132 /* 133 * @implemented 134 */ 135 VOID NTAPI 136 RtlAcquirePebLock(VOID) 137 { 138 139 } 140 141 /* 142 * @implemented 143 */ 144 VOID NTAPI 145 RtlReleasePebLock(VOID) 146 { 147 148 } 149 150 NTSTATUS 151 NTAPI 152 LdrShutdownThread(VOID) 153 { 154 return STATUS_SUCCESS; 155 } 156 157 158 PPEB 159 NTAPI 160 RtlGetCurrentPeb(VOID) 161 { 162 return ((PEPROCESS)(KeGetCurrentThread()->ApcState.Process))->Peb; 163 } 164 165 NTSTATUS 166 NTAPI 167 RtlDeleteHeapLock(IN OUT PHEAP_LOCK Lock) 168 { 169 ExDeleteResourceLite(&Lock->Resource); 170 ExFreePoolWithTag(Lock, TAG_RTHL); 171 172 return STATUS_SUCCESS; 173 } 174 175 NTSTATUS 176 NTAPI 177 RtlEnterHeapLock(IN OUT PHEAP_LOCK Lock, IN BOOLEAN Exclusive) 178 { 179 KeEnterCriticalRegion(); 180 181 if (Exclusive) 182 ExAcquireResourceExclusiveLite(&Lock->Resource, TRUE); 183 else 184 ExAcquireResourceSharedLite(&Lock->Resource, TRUE); 185 186 return STATUS_SUCCESS; 187 } 188 189 BOOLEAN 190 NTAPI 191 RtlTryEnterHeapLock(IN OUT PHEAP_LOCK Lock, IN BOOLEAN Exclusive) 192 { 193 BOOLEAN Success; 194 KeEnterCriticalRegion(); 195 196 if (Exclusive) 197 Success = ExAcquireResourceExclusiveLite(&Lock->Resource, FALSE); 198 else 199 Success = ExAcquireResourceSharedLite(&Lock->Resource, FALSE); 200 201 if (!Success) 202 KeLeaveCriticalRegion(); 203 204 return Success; 205 } 206 207 NTSTATUS 208 NTAPI 209 RtlInitializeHeapLock(IN OUT PHEAP_LOCK *Lock) 210 { 211 PHEAP_LOCK HeapLock = ExAllocatePoolWithTag(NonPagedPool, 212 sizeof(HEAP_LOCK), 213 TAG_RTHL); 214 if (HeapLock == NULL) 215 return STATUS_NO_MEMORY; 216 217 ExInitializeResourceLite(&HeapLock->Resource); 218 *Lock = HeapLock; 219 220 return STATUS_SUCCESS; 221 } 222 223 NTSTATUS 224 NTAPI 225 RtlLeaveHeapLock(IN OUT PHEAP_LOCK Lock) 226 { 227 ExReleaseResourceLite(&Lock->Resource); 228 KeLeaveCriticalRegion(); 229 230 return STATUS_SUCCESS; 231 } 232 233 struct _HEAP; 234 235 VOID 236 NTAPI 237 RtlpAddHeapToProcessList(struct _HEAP *Heap) 238 { 239 UNREFERENCED_PARAMETER(Heap); 240 } 241 242 VOID 243 NTAPI 244 RtlpRemoveHeapFromProcessList(struct _HEAP *Heap) 245 { 246 UNREFERENCED_PARAMETER(Heap); 247 } 248 249 VOID 250 RtlInitializeHeapManager(VOID) 251 { 252 } 253 254 #if DBG 255 VOID FASTCALL 256 CHECK_PAGED_CODE_RTL(char *file, int line) 257 { 258 if(KeGetCurrentIrql() > APC_LEVEL) 259 { 260 DbgPrint("%s:%i: Pagable code called at IRQL > APC_LEVEL (%u)\n", file, line, KeGetCurrentIrql()); 261 ASSERT(FALSE); 262 } 263 } 264 #endif 265 266 VOID 267 NTAPI 268 RtlpSetHeapParameters(IN PRTL_HEAP_PARAMETERS Parameters) 269 { 270 /* Apply defaults for non-set parameters */ 271 if (!Parameters->SegmentCommit) Parameters->SegmentCommit = MmHeapSegmentCommit; 272 if (!Parameters->SegmentReserve) Parameters->SegmentReserve = MmHeapSegmentReserve; 273 if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold; 274 if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold; 275 } 276 277 VOID 278 NTAPI 279 RtlpCheckLogException(IN PEXCEPTION_RECORD ExceptionRecord, 280 IN PCONTEXT ContextRecord, 281 IN PVOID ContextData, 282 IN ULONG Size) 283 { 284 /* Check the global flag */ 285 if (NtGlobalFlag & FLG_ENABLE_EXCEPTION_LOGGING) 286 { 287 /* FIXME: Log this exception */ 288 } 289 } 290 291 BOOLEAN 292 NTAPI 293 RtlpHandleDpcStackException(IN PEXCEPTION_REGISTRATION_RECORD RegistrationFrame, 294 IN ULONG_PTR RegistrationFrameEnd, 295 IN OUT PULONG_PTR StackLow, 296 IN OUT PULONG_PTR StackHigh) 297 { 298 PKPRCB Prcb; 299 ULONG_PTR DpcStack; 300 301 /* Check if we are at DISPATCH or higher */ 302 if (KeGetCurrentIrql() >= DISPATCH_LEVEL) 303 { 304 /* Get the PRCB and DPC Stack */ 305 Prcb = KeGetCurrentPrcb(); 306 DpcStack = (ULONG_PTR)Prcb->DpcStack; 307 308 /* Check if we are in a DPC and the stack matches */ 309 if ((Prcb->DpcRoutineActive) && 310 (RegistrationFrameEnd <= DpcStack) && 311 ((ULONG_PTR)RegistrationFrame >= DpcStack - KERNEL_STACK_SIZE)) 312 { 313 /* Update the limits to the DPC Stack's */ 314 *StackHigh = DpcStack; 315 *StackLow = DpcStack - KERNEL_STACK_SIZE; 316 return TRUE; 317 } 318 } 319 320 /* Not in DPC stack */ 321 return FALSE; 322 } 323 324 #if !defined(_ARM_) && !defined(_AMD64_) 325 326 BOOLEAN 327 NTAPI 328 RtlpCaptureStackLimits(IN ULONG_PTR Ebp, 329 IN ULONG_PTR *StackBegin, 330 IN ULONG_PTR *StackEnd) 331 { 332 PKTHREAD Thread = KeGetCurrentThread(); 333 334 /* Don't even try at ISR level or later */ 335 if (KeGetCurrentIrql() > DISPATCH_LEVEL) return FALSE; 336 337 /* Start with defaults */ 338 *StackBegin = Thread->StackLimit; 339 *StackEnd = (ULONG_PTR)Thread->StackBase; 340 341 /* Check if EBP is inside the stack */ 342 if ((*StackBegin <= Ebp) && (Ebp <= *StackEnd)) 343 { 344 /* Then make the stack start at EBP */ 345 *StackBegin = Ebp; 346 } 347 else 348 { 349 /* Now we're going to assume we're on the DPC stack */ 350 *StackEnd = (ULONG_PTR)(KeGetPcr()->Prcb->DpcStack); 351 *StackBegin = *StackEnd - KERNEL_STACK_SIZE; 352 353 /* Check if we seem to be on the DPC stack */ 354 if ((*StackEnd) && (*StackBegin < Ebp) && (Ebp <= *StackEnd)) 355 { 356 /* We're on the DPC stack */ 357 *StackBegin = Ebp; 358 } 359 else 360 { 361 /* We're somewhere else entirely... use EBP for safety */ 362 *StackBegin = Ebp; 363 *StackEnd = (ULONG_PTR)PAGE_ALIGN(*StackBegin); 364 } 365 } 366 367 /* Return success */ 368 return TRUE; 369 } 370 371 /* 372 * @implemented 373 */ 374 ULONG 375 NTAPI 376 RtlWalkFrameChain(OUT PVOID *Callers, 377 IN ULONG Count, 378 IN ULONG Flags) 379 { 380 ULONG_PTR Stack, NewStack, StackBegin, StackEnd = 0; 381 ULONG Eip; 382 BOOLEAN Result, StopSearch = FALSE; 383 ULONG i = 0; 384 PETHREAD Thread = PsGetCurrentThread(); 385 PTEB Teb; 386 PKTRAP_FRAME TrapFrame; 387 388 /* Get current EBP */ 389 #if defined(_M_IX86) 390 #if defined __GNUC__ 391 __asm__("mov %%ebp, %0" : "=r" (Stack) : ); 392 #elif defined(_MSC_VER) 393 __asm mov Stack, ebp 394 #endif 395 #elif defined(_M_MIPS) 396 __asm__("move $sp, %0" : "=r" (Stack) : ); 397 #elif defined(_M_PPC) 398 __asm__("mr %0,1" : "=r" (Stack) : ); 399 #elif defined(_M_ARM) 400 __asm__("mov sp, %0" : "=r"(Stack) : ); 401 #else 402 #error Unknown architecture 403 #endif 404 405 /* Set it as the stack begin limit as well */ 406 StackBegin = (ULONG_PTR)Stack; 407 408 /* Check if we're called for non-logging mode */ 409 if (!Flags) 410 { 411 /* Get the actual safe limits */ 412 Result = RtlpCaptureStackLimits((ULONG_PTR)Stack, 413 &StackBegin, 414 &StackEnd); 415 if (!Result) return 0; 416 } 417 418 /* Use a SEH block for maximum protection */ 419 _SEH2_TRY 420 { 421 /* Check if we want the user-mode stack frame */ 422 if (Flags == 1) 423 { 424 /* Get the trap frame and TEB */ 425 TrapFrame = KeGetTrapFrame(&Thread->Tcb); 426 Teb = Thread->Tcb.Teb; 427 428 /* Make sure we can trust the TEB and trap frame */ 429 if (!(Teb) || 430 (KeIsAttachedProcess()) || 431 (KeGetCurrentIrql() >= DISPATCH_LEVEL)) 432 { 433 /* Invalid or unsafe attempt to get the stack */ 434 _SEH2_YIELD(return 0;) 435 } 436 437 /* Get the stack limits */ 438 StackBegin = (ULONG_PTR)Teb->NtTib.StackLimit; 439 StackEnd = (ULONG_PTR)Teb->NtTib.StackBase; 440 #ifdef _M_IX86 441 Stack = TrapFrame->Ebp; 442 #elif defined(_M_PPC) 443 Stack = TrapFrame->Gpr1; 444 #else 445 #error Unknown architecture 446 #endif 447 448 /* Validate them */ 449 if (StackEnd <= StackBegin) _SEH2_YIELD(return 0); 450 ProbeForRead((PVOID)StackBegin, 451 StackEnd - StackBegin, 452 sizeof(CHAR)); 453 } 454 455 /* Loop the frames */ 456 for (i = 0; i < Count; i++) 457 { 458 /* 459 * Leave if we're past the stack, 460 * if we're before the stack, 461 * or if we've reached ourselves. 462 */ 463 if ((Stack >= StackEnd) || 464 (!i ? (Stack < StackBegin) : (Stack <= StackBegin)) || 465 ((StackEnd - Stack) < (2 * sizeof(ULONG_PTR)))) 466 { 467 /* We're done or hit a bad address */ 468 break; 469 } 470 471 /* Get new stack and EIP */ 472 NewStack = *(PULONG_PTR)Stack; 473 Eip = *(PULONG_PTR)(Stack + sizeof(ULONG_PTR)); 474 475 /* Check if the new pointer is above the oldone and past the end */ 476 if (!((Stack < NewStack) && (NewStack < StackEnd))) 477 { 478 /* Stop searching after this entry */ 479 StopSearch = TRUE; 480 } 481 482 /* Also make sure that the EIP isn't a stack address */ 483 if ((StackBegin < Eip) && (Eip < StackEnd)) break; 484 485 /* Check if we reached a user-mode address */ 486 if (!(Flags) && !(Eip & 0x80000000)) break; // FIXME: 3GB breakage 487 488 /* Save this frame */ 489 Callers[i] = (PVOID)Eip; 490 491 /* Check if we should continue */ 492 if (StopSearch) 493 { 494 /* Return the next index */ 495 i++; 496 break; 497 } 498 499 /* Move to the next stack */ 500 Stack = NewStack; 501 } 502 } 503 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) 504 { 505 /* No index */ 506 i = 0; 507 } 508 _SEH2_END; 509 510 /* Return frames parsed */ 511 return i; 512 } 513 514 #endif 515 516 #if defined(_M_AMD64) || defined(_M_ARM) 517 VOID 518 NTAPI 519 RtlpGetStackLimits( 520 OUT PULONG_PTR LowLimit, 521 OUT PULONG_PTR HighLimit) 522 { 523 PKTHREAD CurrentThread = KeGetCurrentThread(); 524 *HighLimit = (ULONG_PTR)CurrentThread->InitialStack; 525 *LowLimit = (ULONG_PTR)CurrentThread->StackLimit; 526 } 527 #endif 528 529 /* RTL Atom Tables ************************************************************/ 530 531 NTSTATUS 532 RtlpInitAtomTableLock(PRTL_ATOM_TABLE AtomTable) 533 { 534 ExInitializeFastMutex(&AtomTable->FastMutex); 535 536 return STATUS_SUCCESS; 537 } 538 539 540 VOID 541 RtlpDestroyAtomTableLock(PRTL_ATOM_TABLE AtomTable) 542 { 543 } 544 545 546 BOOLEAN 547 RtlpLockAtomTable(PRTL_ATOM_TABLE AtomTable) 548 { 549 ExAcquireFastMutex(&AtomTable->FastMutex); 550 return TRUE; 551 } 552 553 VOID 554 RtlpUnlockAtomTable(PRTL_ATOM_TABLE AtomTable) 555 { 556 ExReleaseFastMutex(&AtomTable->FastMutex); 557 } 558 559 BOOLEAN 560 RtlpCreateAtomHandleTable(PRTL_ATOM_TABLE AtomTable) 561 { 562 AtomTable->ExHandleTable = ExCreateHandleTable(NULL); 563 return (AtomTable->ExHandleTable != NULL); 564 } 565 566 BOOLEAN 567 NTAPI 568 RtlpCloseHandleCallback( 569 IN PHANDLE_TABLE_ENTRY HandleTableEntry, 570 IN HANDLE Handle, 571 IN PVOID HandleTable) 572 { 573 /* Destroy and unlock the handle entry */ 574 return ExDestroyHandle(HandleTable, Handle, HandleTableEntry); 575 } 576 577 VOID 578 RtlpDestroyAtomHandleTable(PRTL_ATOM_TABLE AtomTable) 579 { 580 if (AtomTable->ExHandleTable) 581 { 582 ExSweepHandleTable(AtomTable->ExHandleTable, 583 RtlpCloseHandleCallback, 584 AtomTable->ExHandleTable); 585 ExDestroyHandleTable(AtomTable->ExHandleTable, NULL); 586 AtomTable->ExHandleTable = NULL; 587 } 588 } 589 590 PRTL_ATOM_TABLE 591 RtlpAllocAtomTable(ULONG Size) 592 { 593 PRTL_ATOM_TABLE Table = ExAllocatePoolWithTag(NonPagedPool, 594 Size, 595 TAG_ATMT); 596 if (Table != NULL) 597 { 598 RtlZeroMemory(Table, 599 Size); 600 } 601 602 return Table; 603 } 604 605 VOID 606 RtlpFreeAtomTable(PRTL_ATOM_TABLE AtomTable) 607 { 608 ExFreePoolWithTag(AtomTable, TAG_ATMT); 609 } 610 611 PRTL_ATOM_TABLE_ENTRY 612 RtlpAllocAtomTableEntry(ULONG Size) 613 { 614 PRTL_ATOM_TABLE_ENTRY Entry; 615 616 Entry = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_ATMT); 617 if (Entry != NULL) 618 { 619 RtlZeroMemory(Entry, Size); 620 } 621 622 return Entry; 623 } 624 625 VOID 626 RtlpFreeAtomTableEntry(PRTL_ATOM_TABLE_ENTRY Entry) 627 { 628 ExFreePoolWithTag(Entry, TAG_ATMT); 629 } 630 631 VOID 632 RtlpFreeAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry) 633 { 634 ExDestroyHandle(AtomTable->ExHandleTable, 635 (HANDLE)((ULONG_PTR)Entry->HandleIndex << 2), 636 NULL); 637 } 638 639 BOOLEAN 640 RtlpCreateAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry) 641 { 642 HANDLE_TABLE_ENTRY ExEntry; 643 HANDLE Handle; 644 USHORT HandleIndex; 645 646 /* Initialize ex handle table entry */ 647 ExEntry.Object = Entry; 648 ExEntry.GrantedAccess = 0x1; /* FIXME - valid handle */ 649 650 /* Create ex handle */ 651 Handle = ExCreateHandle(AtomTable->ExHandleTable, 652 &ExEntry); 653 if (!Handle) return FALSE; 654 655 /* Calculate HandleIndex (by getting rid of the first two bits) */ 656 HandleIndex = (USHORT)((ULONG_PTR)Handle >> 2); 657 658 /* Index must be less than 0xC000 */ 659 if (HandleIndex >= 0xC000) 660 { 661 /* Destroy ex handle */ 662 ExDestroyHandle(AtomTable->ExHandleTable, 663 Handle, 664 NULL); 665 666 /* Return failure */ 667 return FALSE; 668 } 669 670 /* Initialize atom table entry */ 671 Entry->HandleIndex = HandleIndex; 672 Entry->Atom = 0xC000 + HandleIndex; 673 674 /* Return success */ 675 return TRUE; 676 } 677 678 PRTL_ATOM_TABLE_ENTRY 679 RtlpGetAtomEntry(PRTL_ATOM_TABLE AtomTable, ULONG Index) 680 { 681 PHANDLE_TABLE_ENTRY ExEntry; 682 PRTL_ATOM_TABLE_ENTRY Entry = NULL; 683 684 /* NOTE: There's no need to explicitly enter a critical region because it's 685 guaranteed that we're in a critical region right now (as we hold 686 the atom table lock) */ 687 688 ExEntry = ExMapHandleToPointer(AtomTable->ExHandleTable, 689 (HANDLE)((ULONG_PTR)Index << 2)); 690 if (ExEntry != NULL) 691 { 692 Entry = ExEntry->Object; 693 694 ExUnlockHandleTableEntry(AtomTable->ExHandleTable, 695 ExEntry); 696 } 697 698 return Entry; 699 } 700 701 /* Ldr SEH-Protected access to IMAGE_NT_HEADERS */ 702 703 /* Rtl SEH-Free version of this */ 704 NTSTATUS 705 NTAPI 706 RtlpImageNtHeaderEx( 707 _In_ ULONG Flags, 708 _In_ PVOID Base, 709 _In_ ULONG64 Size, 710 _Out_ PIMAGE_NT_HEADERS *OutHeaders); 711 712 /* 713 * @implemented 714 * @note: This is here, so that we do not drag SEH into rosload, freeldr and bootmgfw 715 */ 716 NTSTATUS 717 NTAPI 718 RtlImageNtHeaderEx( 719 _In_ ULONG Flags, 720 _In_ PVOID Base, 721 _In_ ULONG64 Size, 722 _Out_ PIMAGE_NT_HEADERS *OutHeaders) 723 { 724 NTSTATUS Status; 725 726 /* Assume failure. This is also done in RtlpImageNtHeaderEx, but this is guarded by SEH. */ 727 if (OutHeaders != NULL) 728 *OutHeaders = NULL; 729 730 _SEH2_TRY 731 { 732 Status = RtlpImageNtHeaderEx(Flags, Base, Size, OutHeaders); 733 } 734 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) 735 { 736 /* Fail with the SEH error */ 737 Status = _SEH2_GetExceptionCode(); 738 } 739 _SEH2_END; 740 741 return Status; 742 } 743 744 /* 745 * Ldr Resource support code 746 */ 747 748 IMAGE_RESOURCE_DIRECTORY *find_entry_by_name( IMAGE_RESOURCE_DIRECTORY *dir, 749 LPCWSTR name, void *root, 750 int want_dir ); 751 IMAGE_RESOURCE_DIRECTORY *find_entry_by_id( IMAGE_RESOURCE_DIRECTORY *dir, 752 USHORT id, void *root, int want_dir ); 753 IMAGE_RESOURCE_DIRECTORY *find_first_entry( IMAGE_RESOURCE_DIRECTORY *dir, 754 void *root, int want_dir ); 755 756 /********************************************************************** 757 * find_entry 758 * 759 * Find a resource entry 760 */ 761 NTSTATUS find_entry( PVOID BaseAddress, LDR_RESOURCE_INFO *info, 762 ULONG level, void **ret, int want_dir ) 763 { 764 ULONG size; 765 void *root; 766 IMAGE_RESOURCE_DIRECTORY *resdirptr; 767 768 root = RtlImageDirectoryEntryToData( BaseAddress, TRUE, IMAGE_DIRECTORY_ENTRY_RESOURCE, &size ); 769 if (!root) return STATUS_RESOURCE_DATA_NOT_FOUND; 770 if (size < sizeof(*resdirptr)) return STATUS_RESOURCE_DATA_NOT_FOUND; 771 resdirptr = root; 772 773 if (!level--) goto done; 774 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Type, root, want_dir || level ))) 775 return STATUS_RESOURCE_TYPE_NOT_FOUND; 776 if (!level--) return STATUS_SUCCESS; 777 778 resdirptr = *ret; 779 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Name, root, want_dir || level ))) 780 return STATUS_RESOURCE_NAME_NOT_FOUND; 781 if (!level--) return STATUS_SUCCESS; 782 if (level) return STATUS_INVALID_PARAMETER; /* level > 3 */ 783 784 resdirptr = *ret; 785 786 if ((*ret = find_first_entry( resdirptr, root, want_dir ))) return STATUS_SUCCESS; 787 788 return STATUS_RESOURCE_DATA_NOT_FOUND; 789 790 done: 791 *ret = resdirptr; 792 return STATUS_SUCCESS; 793 } 794 795 NTSTATUS 796 NTAPI 797 RtlpSafeCopyMemory( 798 _Out_writes_bytes_all_(Length) VOID UNALIGNED *Destination, 799 _In_reads_bytes_(Length) CONST VOID UNALIGNED *Source, 800 _In_ SIZE_T Length) 801 { 802 _SEH2_TRY 803 { 804 RtlCopyMemory(Destination, Source, Length); 805 } 806 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) 807 { 808 _SEH2_YIELD(return _SEH2_GetExceptionCode()); 809 } 810 _SEH2_END; 811 812 return STATUS_SUCCESS; 813 } 814 815 BOOLEAN 816 NTAPI 817 RtlCallVectoredExceptionHandlers(_In_ PEXCEPTION_RECORD ExceptionRecord, 818 _In_ PCONTEXT Context) 819 { 820 /* In the kernel we don't have vectored exception handlers */ 821 return FALSE; 822 } 823 824 VOID 825 NTAPI 826 RtlCallVectoredContinueHandlers(_In_ PEXCEPTION_RECORD ExceptionRecord, 827 _In_ PCONTEXT Context) 828 { 829 /* No vectored continue handlers either in kernel mode */ 830 return; 831 } 832 833 /* EOF */ 834