xref: /reactos/ntoskrnl/rtl/libsupp.c (revision 32d615fc)
1 /*
2  * COPYRIGHT:       See COPYING in the top level directory
3  * PROJECT:         ReactOS kernel
4  * FILE:            ntoskrnl/rtl/libsupp.c
5  * PURPOSE:         RTL Support Routines
6  * PROGRAMMERS:     Alex Ionescu (alex@relsoft.net)
7  *                  Gunnar Dalsnes
8  */
9 
10 /* INCLUDES ******************************************************************/
11 
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15 
16 extern ULONG NtGlobalFlag;
17 
18 typedef struct _RTL_RANGE_ENTRY
19 {
20     LIST_ENTRY Entry;
21     RTL_RANGE Range;
22 } RTL_RANGE_ENTRY, *PRTL_RANGE_ENTRY;
23 
24 PAGED_LOOKASIDE_LIST RtlpRangeListEntryLookasideList;
25 SIZE_T RtlpAllocDeallocQueryBufferSize = 128;
26 
27 /* FUNCTIONS *****************************************************************/
28 
29 PVOID
30 NTAPI
31 RtlPcToFileHeader(
32     IN  PVOID PcValue,
33     OUT PVOID *BaseOfImage)
34 {
35     PLDR_DATA_TABLE_ENTRY LdrEntry;
36     BOOLEAN InSystem;
37     KIRQL OldIrql;
38 
39     /* Get the base for this file */
40     if ((ULONG_PTR)PcValue > (ULONG_PTR)MmHighestUserAddress)
41     {
42         /* Acquire the loaded module spinlock */
43         KeAcquireSpinLock(&PsLoadedModuleSpinLock, &OldIrql);
44 
45         /* We are in kernel */
46         *BaseOfImage = KiPcToFileHeader(PcValue, &LdrEntry, FALSE, &InSystem);
47 
48         /* Release lock */
49         KeReleaseSpinLock(&PsLoadedModuleSpinLock, OldIrql);
50     }
51     else
52     {
53         /* User mode is not handled here! */
54         *BaseOfImage = NULL;
55     }
56 
57     return *BaseOfImage;
58 }
59 
60 VOID
61 NTAPI
62 RtlInitializeRangeListPackage(VOID)
63 {
64     /* Setup the lookaside list for allocations (not used yet) */
65     ExInitializePagedLookasideList(&RtlpRangeListEntryLookasideList,
66                                    NULL,
67                                    NULL,
68                                    POOL_COLD_ALLOCATION,
69                                    sizeof(RTL_RANGE_ENTRY),
70                                    'elRR',
71                                    16);
72 }
73 
74 BOOLEAN
75 NTAPI
76 RtlpCheckForActiveDebugger(VOID)
77 {
78     /* This check is meaningless in kernel-mode */
79     return FALSE;
80 }
81 
82 BOOLEAN
83 NTAPI
84 RtlpSetInDbgPrint(VOID)
85 {
86     /* Nothing to set in kernel mode */
87     return FALSE;
88 }
89 
90 VOID
91 NTAPI
92 RtlpClearInDbgPrint(VOID)
93 {
94     /* Nothing to clear in kernel mode */
95 }
96 
97 KPROCESSOR_MODE
98 NTAPI
99 RtlpGetMode(VOID)
100 {
101    return KernelMode;
102 }
103 
104 PVOID
105 NTAPI
106 RtlpAllocateMemory(ULONG Bytes,
107                    ULONG Tag)
108 {
109     return ExAllocatePoolWithTag(PagedPool,
110                                  (SIZE_T)Bytes,
111                                  Tag);
112 }
113 
114 VOID
115 NTAPI
116 RtlpFreeMemory(PVOID Mem,
117                ULONG Tag)
118 {
119     if (Tag == TAG_ASTR || Tag == TAG_OSTR || Tag == TAG_USTR)
120         ExFreePool(Mem);
121     else
122         ExFreePoolWithTag(Mem, Tag);
123 }
124 
125 /*
126  * @implemented
127  */
128 VOID NTAPI
129 RtlAcquirePebLock(VOID)
130 {
131 
132 }
133 
134 /*
135  * @implemented
136  */
137 VOID NTAPI
138 RtlReleasePebLock(VOID)
139 {
140 
141 }
142 
143 NTSTATUS
144 NTAPI
145 LdrShutdownThread(VOID)
146 {
147     return STATUS_SUCCESS;
148 }
149 
150 
151 PPEB
152 NTAPI
153 RtlGetCurrentPeb(VOID)
154 {
155    return ((PEPROCESS)(KeGetCurrentThread()->ApcState.Process))->Peb;
156 }
157 
158 NTSTATUS
159 NTAPI
160 RtlDeleteHeapLock(IN OUT PHEAP_LOCK Lock)
161 {
162     ExDeleteResourceLite(&Lock->Resource);
163     ExFreePoolWithTag(Lock, TAG_RTHL);
164 
165     return STATUS_SUCCESS;
166 }
167 
168 NTSTATUS
169 NTAPI
170 RtlEnterHeapLock(IN OUT PHEAP_LOCK Lock, IN BOOLEAN Exclusive)
171 {
172     KeEnterCriticalRegion();
173 
174     if (Exclusive)
175         ExAcquireResourceExclusiveLite(&Lock->Resource, TRUE);
176     else
177         ExAcquireResourceSharedLite(&Lock->Resource, TRUE);
178 
179     return STATUS_SUCCESS;
180 }
181 
182 BOOLEAN
183 NTAPI
184 RtlTryEnterHeapLock(IN OUT PHEAP_LOCK Lock, IN BOOLEAN Exclusive)
185 {
186     BOOLEAN Success;
187     KeEnterCriticalRegion();
188 
189     if (Exclusive)
190         Success = ExAcquireResourceExclusiveLite(&Lock->Resource, FALSE);
191     else
192         Success = ExAcquireResourceSharedLite(&Lock->Resource, FALSE);
193 
194     if (!Success)
195         KeLeaveCriticalRegion();
196 
197     return Success;
198 }
199 
200 NTSTATUS
201 NTAPI
202 RtlInitializeHeapLock(IN OUT PHEAP_LOCK *Lock)
203 {
204     PHEAP_LOCK HeapLock = ExAllocatePoolWithTag(NonPagedPool,
205                                                 sizeof(HEAP_LOCK),
206                                                 TAG_RTHL);
207     if (HeapLock == NULL)
208         return STATUS_NO_MEMORY;
209 
210     ExInitializeResourceLite(&HeapLock->Resource);
211     *Lock = HeapLock;
212 
213     return STATUS_SUCCESS;
214 }
215 
216 NTSTATUS
217 NTAPI
218 RtlLeaveHeapLock(IN OUT PHEAP_LOCK Lock)
219 {
220     ExReleaseResourceLite(&Lock->Resource);
221     KeLeaveCriticalRegion();
222 
223     return STATUS_SUCCESS;
224 }
225 
226 struct _HEAP;
227 
228 VOID
229 NTAPI
230 RtlpAddHeapToProcessList(struct _HEAP *Heap)
231 {
232     UNREFERENCED_PARAMETER(Heap);
233 }
234 
235 VOID
236 NTAPI
237 RtlpRemoveHeapFromProcessList(struct _HEAP *Heap)
238 {
239     UNREFERENCED_PARAMETER(Heap);
240 }
241 
242 VOID
243 RtlInitializeHeapManager(VOID)
244 {
245 }
246 
247 #if DBG
248 VOID FASTCALL
249 CHECK_PAGED_CODE_RTL(char *file, int line)
250 {
251   if(KeGetCurrentIrql() > APC_LEVEL)
252   {
253     DbgPrint("%s:%i: Pagable code called at IRQL > APC_LEVEL (%u)\n", file, line, KeGetCurrentIrql());
254     ASSERT(FALSE);
255   }
256 }
257 #endif
258 
259 VOID
260 NTAPI
261 RtlpSetHeapParameters(IN PRTL_HEAP_PARAMETERS Parameters)
262 {
263     /* Apply defaults for non-set parameters */
264     if (!Parameters->SegmentCommit) Parameters->SegmentCommit = MmHeapSegmentCommit;
265     if (!Parameters->SegmentReserve) Parameters->SegmentReserve = MmHeapSegmentReserve;
266     if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold;
267     if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold;
268 }
269 
270 VOID
271 NTAPI
272 RtlpCheckLogException(IN PEXCEPTION_RECORD ExceptionRecord,
273                       IN PCONTEXT ContextRecord,
274                       IN PVOID ContextData,
275                       IN ULONG Size)
276 {
277     /* Check the global flag */
278     if (NtGlobalFlag & FLG_ENABLE_EXCEPTION_LOGGING)
279     {
280         /* FIXME: Log this exception */
281     }
282 }
283 
284 BOOLEAN
285 NTAPI
286 RtlpHandleDpcStackException(IN PEXCEPTION_REGISTRATION_RECORD RegistrationFrame,
287                             IN ULONG_PTR RegistrationFrameEnd,
288                             IN OUT PULONG_PTR StackLow,
289                             IN OUT PULONG_PTR StackHigh)
290 {
291     PKPRCB Prcb;
292     ULONG_PTR DpcStack;
293 
294     /* Check if we are at DISPATCH or higher */
295     if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
296     {
297         /* Get the PRCB and DPC Stack */
298         Prcb = KeGetCurrentPrcb();
299         DpcStack = (ULONG_PTR)Prcb->DpcStack;
300 
301         /* Check if we are in a DPC and the stack matches */
302         if ((Prcb->DpcRoutineActive) &&
303             (RegistrationFrameEnd <= DpcStack) &&
304             ((ULONG_PTR)RegistrationFrame >= DpcStack - KERNEL_STACK_SIZE))
305         {
306             /* Update the limits to the DPC Stack's */
307             *StackHigh = DpcStack;
308             *StackLow = DpcStack - KERNEL_STACK_SIZE;
309             return TRUE;
310         }
311     }
312 
313     /* Not in DPC stack */
314     return FALSE;
315 }
316 
317 #if !defined(_ARM_) && !defined(_AMD64_)
318 
319 BOOLEAN
320 NTAPI
321 RtlpCaptureStackLimits(IN ULONG_PTR Ebp,
322                        IN ULONG_PTR *StackBegin,
323                        IN ULONG_PTR *StackEnd)
324 {
325     PKTHREAD Thread = KeGetCurrentThread();
326 
327     /* Don't even try at ISR level or later */
328     if (KeGetCurrentIrql() > DISPATCH_LEVEL) return FALSE;
329 
330     /* Start with defaults */
331     *StackBegin = Thread->StackLimit;
332     *StackEnd = (ULONG_PTR)Thread->StackBase;
333 
334     /* Check if EBP is inside the stack */
335     if ((*StackBegin <= Ebp) && (Ebp <= *StackEnd))
336     {
337         /* Then make the stack start at EBP */
338         *StackBegin = Ebp;
339     }
340     else
341     {
342         /* Now we're going to assume we're on the DPC stack */
343         *StackEnd = (ULONG_PTR)(KeGetPcr()->Prcb->DpcStack);
344         *StackBegin = *StackEnd - KERNEL_STACK_SIZE;
345 
346         /* Check if we seem to be on the DPC stack */
347         if ((*StackEnd) && (*StackBegin < Ebp) && (Ebp <= *StackEnd))
348         {
349             /* We're on the DPC stack */
350             *StackBegin = Ebp;
351         }
352         else
353         {
354             /* We're somewhere else entirely... use EBP for safety */
355             *StackBegin = Ebp;
356             *StackEnd = (ULONG_PTR)PAGE_ALIGN(*StackBegin);
357         }
358     }
359 
360     /* Return success */
361     return TRUE;
362 }
363 
364 /*
365  * @implemented
366  */
367 ULONG
368 NTAPI
369 RtlWalkFrameChain(OUT PVOID *Callers,
370                   IN ULONG Count,
371                   IN ULONG Flags)
372 {
373     ULONG_PTR Stack, NewStack, StackBegin, StackEnd = 0;
374     ULONG Eip;
375     BOOLEAN Result, StopSearch = FALSE;
376     ULONG i = 0;
377     PETHREAD Thread = PsGetCurrentThread();
378     PTEB Teb;
379     PKTRAP_FRAME TrapFrame;
380 
381     /* Get current EBP */
382 #if defined(_M_IX86)
383 #if defined __GNUC__
384     __asm__("mov %%ebp, %0" : "=r" (Stack) : );
385 #elif defined(_MSC_VER)
386     __asm mov Stack, ebp
387 #endif
388 #elif defined(_M_MIPS)
389         __asm__("move $sp, %0" : "=r" (Stack) : );
390 #elif defined(_M_PPC)
391     __asm__("mr %0,1" : "=r" (Stack) : );
392 #elif defined(_M_ARM)
393     __asm__("mov sp, %0" : "=r"(Stack) : );
394 #else
395 #error Unknown architecture
396 #endif
397 
398     /* Set it as the stack begin limit as well */
399     StackBegin = (ULONG_PTR)Stack;
400 
401     /* Check if we're called for non-logging mode */
402     if (!Flags)
403     {
404         /* Get the actual safe limits */
405         Result = RtlpCaptureStackLimits((ULONG_PTR)Stack,
406                                         &StackBegin,
407                                         &StackEnd);
408         if (!Result) return 0;
409     }
410 
411     /* Use a SEH block for maximum protection */
412     _SEH2_TRY
413     {
414         /* Check if we want the user-mode stack frame */
415         if (Flags == 1)
416         {
417             /* Get the trap frame and TEB */
418             TrapFrame = KeGetTrapFrame(&Thread->Tcb);
419             Teb = Thread->Tcb.Teb;
420 
421             /* Make sure we can trust the TEB and trap frame */
422             if (!(Teb) ||
423                 (KeIsAttachedProcess()) ||
424                 (KeGetCurrentIrql() >= DISPATCH_LEVEL))
425             {
426                 /* Invalid or unsafe attempt to get the stack */
427                 _SEH2_YIELD(return 0;)
428             }
429 
430             /* Get the stack limits */
431             StackBegin = (ULONG_PTR)Teb->NtTib.StackLimit;
432             StackEnd = (ULONG_PTR)Teb->NtTib.StackBase;
433 #ifdef _M_IX86
434             Stack = TrapFrame->Ebp;
435 #elif defined(_M_PPC)
436             Stack = TrapFrame->Gpr1;
437 #else
438 #error Unknown architecture
439 #endif
440 
441             /* Validate them */
442             if (StackEnd <= StackBegin) _SEH2_YIELD(return 0);
443             ProbeForRead((PVOID)StackBegin,
444                          StackEnd - StackBegin,
445                          sizeof(CHAR));
446         }
447 
448         /* Loop the frames */
449         for (i = 0; i < Count; i++)
450         {
451             /*
452              * Leave if we're past the stack,
453              * if we're before the stack,
454              * or if we've reached ourselves.
455              */
456             if ((Stack >= StackEnd) ||
457                 (!i ? (Stack < StackBegin) : (Stack <= StackBegin)) ||
458                 ((StackEnd - Stack) < (2 * sizeof(ULONG_PTR))))
459             {
460                 /* We're done or hit a bad address */
461                 break;
462             }
463 
464             /* Get new stack and EIP */
465             NewStack = *(PULONG_PTR)Stack;
466             Eip = *(PULONG_PTR)(Stack + sizeof(ULONG_PTR));
467 
468             /* Check if the new pointer is above the oldone and past the end */
469             if (!((Stack < NewStack) && (NewStack < StackEnd)))
470             {
471                 /* Stop searching after this entry */
472                 StopSearch = TRUE;
473             }
474 
475             /* Also make sure that the EIP isn't a stack address */
476             if ((StackBegin < Eip) && (Eip < StackEnd)) break;
477 
478             /* Check if we reached a user-mode address */
479             if (!(Flags) && !(Eip & 0x80000000)) break; // FIXME: 3GB breakage
480 
481             /* Save this frame */
482             Callers[i] = (PVOID)Eip;
483 
484             /* Check if we should continue */
485             if (StopSearch)
486             {
487                 /* Return the next index */
488                 i++;
489                 break;
490             }
491 
492             /* Move to the next stack */
493             Stack = NewStack;
494         }
495     }
496     _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
497     {
498         /* No index */
499         i = 0;
500     }
501     _SEH2_END;
502 
503     /* Return frames parsed */
504     return i;
505 }
506 
507 #endif
508 
509 VOID
510 NTAPI
511 RtlpGetStackLimits(
512     OUT PULONG_PTR LowLimit,
513     OUT PULONG_PTR HighLimit)
514 {
515     PKTHREAD CurrentThread = KeGetCurrentThread();
516     *LowLimit = (ULONG_PTR)CurrentThread->StackLimit;
517 #ifdef _M_IX86
518     *HighLimit = (ULONG_PTR)CurrentThread->InitialStack -
519         sizeof(FX_SAVE_AREA);
520 #else
521     *HighLimit = (ULONG_PTR)CurrentThread->InitialStack;
522 #endif
523 }
524 
525 /* RTL Atom Tables ************************************************************/
526 
527 NTSTATUS
528 RtlpInitAtomTableLock(PRTL_ATOM_TABLE AtomTable)
529 {
530    ExInitializeFastMutex(&AtomTable->FastMutex);
531 
532    return STATUS_SUCCESS;
533 }
534 
535 
536 VOID
537 RtlpDestroyAtomTableLock(PRTL_ATOM_TABLE AtomTable)
538 {
539 }
540 
541 
542 BOOLEAN
543 RtlpLockAtomTable(PRTL_ATOM_TABLE AtomTable)
544 {
545    ExAcquireFastMutex(&AtomTable->FastMutex);
546    return TRUE;
547 }
548 
549 VOID
550 RtlpUnlockAtomTable(PRTL_ATOM_TABLE AtomTable)
551 {
552    ExReleaseFastMutex(&AtomTable->FastMutex);
553 }
554 
555 BOOLEAN
556 RtlpCreateAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
557 {
558    AtomTable->ExHandleTable = ExCreateHandleTable(NULL);
559    return (AtomTable->ExHandleTable != NULL);
560 }
561 
562 BOOLEAN
563 NTAPI
564 RtlpCloseHandleCallback(
565     IN PHANDLE_TABLE_ENTRY HandleTableEntry,
566     IN HANDLE Handle,
567     IN PVOID HandleTable)
568 {
569     /* Destroy and unlock the handle entry */
570     return ExDestroyHandle(HandleTable, Handle, HandleTableEntry);
571 }
572 
573 VOID
574 RtlpDestroyAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
575 {
576    if (AtomTable->ExHandleTable)
577    {
578       ExSweepHandleTable(AtomTable->ExHandleTable,
579                          RtlpCloseHandleCallback,
580                          AtomTable->ExHandleTable);
581       ExDestroyHandleTable(AtomTable->ExHandleTable, NULL);
582       AtomTable->ExHandleTable = NULL;
583    }
584 }
585 
586 PRTL_ATOM_TABLE
587 RtlpAllocAtomTable(ULONG Size)
588 {
589    PRTL_ATOM_TABLE Table = ExAllocatePoolWithTag(NonPagedPool,
590                                                  Size,
591                                                  TAG_ATMT);
592    if (Table != NULL)
593    {
594       RtlZeroMemory(Table,
595                     Size);
596    }
597 
598    return Table;
599 }
600 
601 VOID
602 RtlpFreeAtomTable(PRTL_ATOM_TABLE AtomTable)
603 {
604    ExFreePoolWithTag(AtomTable, TAG_ATMT);
605 }
606 
607 PRTL_ATOM_TABLE_ENTRY
608 RtlpAllocAtomTableEntry(ULONG Size)
609 {
610     PRTL_ATOM_TABLE_ENTRY Entry;
611 
612     Entry = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_ATMT);
613     if (Entry != NULL)
614     {
615         RtlZeroMemory(Entry, Size);
616     }
617 
618     return Entry;
619 }
620 
621 VOID
622 RtlpFreeAtomTableEntry(PRTL_ATOM_TABLE_ENTRY Entry)
623 {
624     ExFreePoolWithTag(Entry, TAG_ATMT);
625 }
626 
627 VOID
628 RtlpFreeAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
629 {
630    ExDestroyHandle(AtomTable->ExHandleTable,
631                    (HANDLE)((ULONG_PTR)Entry->HandleIndex << 2),
632                    NULL);
633 }
634 
635 BOOLEAN
636 RtlpCreateAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
637 {
638    HANDLE_TABLE_ENTRY ExEntry;
639    HANDLE Handle;
640    USHORT HandleIndex;
641 
642    /* Initialize ex handle table entry */
643    ExEntry.Object = Entry;
644    ExEntry.GrantedAccess = 0x1; /* FIXME - valid handle */
645 
646    /* Create ex handle */
647    Handle = ExCreateHandle(AtomTable->ExHandleTable,
648                            &ExEntry);
649    if (!Handle) return FALSE;
650 
651    /* Calculate HandleIndex (by getting rid of the first two bits) */
652    HandleIndex = (USHORT)((ULONG_PTR)Handle >> 2);
653 
654    /* Index must be less than 0xC000 */
655    if (HandleIndex >= 0xC000)
656    {
657        /* Destroy ex handle */
658        ExDestroyHandle(AtomTable->ExHandleTable,
659                        Handle,
660                        NULL);
661 
662        /* Return failure */
663        return FALSE;
664    }
665 
666    /* Initialize atom table entry */
667    Entry->HandleIndex = HandleIndex;
668    Entry->Atom = 0xC000 + HandleIndex;
669 
670    /* Return success */
671    return TRUE;
672 }
673 
674 PRTL_ATOM_TABLE_ENTRY
675 RtlpGetAtomEntry(PRTL_ATOM_TABLE AtomTable, ULONG Index)
676 {
677    PHANDLE_TABLE_ENTRY ExEntry;
678    PRTL_ATOM_TABLE_ENTRY Entry = NULL;
679 
680    /* NOTE: There's no need to explicitly enter a critical region because it's
681             guaranteed that we're in a critical region right now (as we hold
682             the atom table lock) */
683 
684    ExEntry = ExMapHandleToPointer(AtomTable->ExHandleTable,
685                                   (HANDLE)((ULONG_PTR)Index << 2));
686    if (ExEntry != NULL)
687    {
688       Entry = ExEntry->Object;
689 
690       ExUnlockHandleTableEntry(AtomTable->ExHandleTable,
691                                ExEntry);
692    }
693 
694    return Entry;
695 }
696 
697 /* Ldr SEH-Protected access to IMAGE_NT_HEADERS */
698 
699 /* Rtl SEH-Free version of this */
700 NTSTATUS
701 NTAPI
702 RtlpImageNtHeaderEx(
703     _In_ ULONG Flags,
704     _In_ PVOID Base,
705     _In_ ULONG64 Size,
706     _Out_ PIMAGE_NT_HEADERS *OutHeaders);
707 
708 /*
709  * @implemented
710  * @note: This is here, so that we do not drag SEH into rosload, freeldr and bootmgfw
711  */
712 NTSTATUS
713 NTAPI
714 RtlImageNtHeaderEx(
715     _In_ ULONG Flags,
716     _In_ PVOID Base,
717     _In_ ULONG64 Size,
718     _Out_ PIMAGE_NT_HEADERS *OutHeaders)
719 {
720     NTSTATUS Status;
721 
722     /* Assume failure. This is also done in RtlpImageNtHeaderEx, but this is guarded by SEH. */
723     if (OutHeaders != NULL)
724         *OutHeaders = NULL;
725 
726     _SEH2_TRY
727     {
728         Status = RtlpImageNtHeaderEx(Flags, Base, Size, OutHeaders);
729     }
730     _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
731     {
732         /* Fail with the SEH error */
733         Status = _SEH2_GetExceptionCode();
734     }
735     _SEH2_END;
736 
737     return Status;
738 }
739 
740 /*
741  * Ldr Resource support code
742  */
743 
744 IMAGE_RESOURCE_DIRECTORY *find_entry_by_name( IMAGE_RESOURCE_DIRECTORY *dir,
745                                               LPCWSTR name, void *root,
746                                               int want_dir );
747 IMAGE_RESOURCE_DIRECTORY *find_entry_by_id( IMAGE_RESOURCE_DIRECTORY *dir,
748                                             USHORT id, void *root, int want_dir );
749 IMAGE_RESOURCE_DIRECTORY *find_first_entry( IMAGE_RESOURCE_DIRECTORY *dir,
750                                             void *root, int want_dir );
751 
752 /**********************************************************************
753  *  find_entry
754  *
755  * Find a resource entry
756  */
757 NTSTATUS find_entry( PVOID BaseAddress, LDR_RESOURCE_INFO *info,
758                      ULONG level, void **ret, int want_dir )
759 {
760     ULONG size;
761     void *root;
762     IMAGE_RESOURCE_DIRECTORY *resdirptr;
763 
764     root = RtlImageDirectoryEntryToData( BaseAddress, TRUE, IMAGE_DIRECTORY_ENTRY_RESOURCE, &size );
765     if (!root) return STATUS_RESOURCE_DATA_NOT_FOUND;
766     if (size < sizeof(*resdirptr)) return STATUS_RESOURCE_DATA_NOT_FOUND;
767     resdirptr = root;
768 
769     if (!level--) goto done;
770     if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Type, root, want_dir || level )))
771         return STATUS_RESOURCE_TYPE_NOT_FOUND;
772     if (!level--) return STATUS_SUCCESS;
773 
774     resdirptr = *ret;
775     if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Name, root, want_dir || level )))
776         return STATUS_RESOURCE_NAME_NOT_FOUND;
777     if (!level--) return STATUS_SUCCESS;
778     if (level) return STATUS_INVALID_PARAMETER;  /* level > 3 */
779 
780     resdirptr = *ret;
781 
782     if ((*ret = find_first_entry( resdirptr, root, want_dir ))) return STATUS_SUCCESS;
783 
784     return STATUS_RESOURCE_DATA_NOT_FOUND;
785 
786 done:
787     *ret = resdirptr;
788     return STATUS_SUCCESS;
789 }
790 
791 NTSTATUS
792 NTAPI
793 RtlpSafeCopyMemory(
794    _Out_writes_bytes_all_(Length) VOID UNALIGNED *Destination,
795    _In_reads_bytes_(Length) CONST VOID UNALIGNED *Source,
796    _In_ SIZE_T Length)
797 {
798     _SEH2_TRY
799     {
800         RtlCopyMemory(Destination, Source, Length);
801     }
802     _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
803     {
804         _SEH2_YIELD(return _SEH2_GetExceptionCode());
805     }
806     _SEH2_END;
807 
808     return STATUS_SUCCESS;
809 }
810 
811 BOOLEAN
812 NTAPI
813 RtlCallVectoredExceptionHandlers(_In_ PEXCEPTION_RECORD ExceptionRecord,
814                                  _In_ PCONTEXT Context)
815 {
816     /* In the kernel we don't have vectored exception handlers */
817     return FALSE;
818 }
819 
820 VOID
821 NTAPI
822 RtlCallVectoredContinueHandlers(_In_ PEXCEPTION_RECORD ExceptionRecord,
823                                 _In_ PCONTEXT Context)
824 {
825     /* No vectored continue handlers either in kernel mode */
826     return;
827 }
828 
829 #ifdef _M_AMD64
830 
831 PRUNTIME_FUNCTION
832 NTAPI
833 RtlpLookupDynamicFunctionEntry(
834     _In_ DWORD64 ControlPc,
835     _Out_ PDWORD64 ImageBase,
836     _In_ PUNWIND_HISTORY_TABLE HistoryTable)
837 {
838     /* No support for dynamic function tables in the kernel */
839     return NULL;
840 }
841 
842 #endif
843 
844 /* EOF */
845