1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 #include "common.h"
5 #include "CommonTypes.h"
6 #include "CommonMacros.h"
7 #include "daccess.h"
8 #include "PalRedhawkCommon.h"
9 #include "PalRedhawk.h"
10 #include "rhassert.h"
11 #include "CommonMacros.inl"
12 #include "regdisplay.h"
13 #include "TargetPtrs.h"
14 #include "eetype.h"
15 #include "ObjectLayout.h"
16 #include "varint.h"
17
18 #include "gcinfo.h"
19 #include "RHCodeMan.h"
20
21 #include "ICodeManager.h"
22
23
24 // Ensure that EEMethodInfo fits into the space reserved by MethodInfo
25 static_assert(sizeof(EEMethodInfo) <= sizeof(MethodInfo), "EEMethodInfo does not fit into a MethodInfo");
26
GetEEMethodInfo(MethodInfo * pMethodInfo)27 EEMethodInfo * GetEEMethodInfo(MethodInfo * pMethodInfo)
28 {
29 return (EEMethodInfo *)pMethodInfo;
30 }
31
ReportObject(GCEnumContext * hCallback,PTR_PTR_Object p,UInt32 flags)32 inline void ReportObject(GCEnumContext * hCallback, PTR_PTR_Object p, UInt32 flags)
33 {
34 (hCallback->pCallback)(hCallback, (PTR_PTR_VOID)p, flags);
35 }
36
37 //
38 // This template is used to map from the CalleeSavedRegNum enum to the correct field in the REGDISPLAY struct.
39 // It should compile away to simply an inlined field access. Since we intentionally have conditionals that
40 // are constant at compile-time, we need to disable the level-4 warning related to that.
41 //
42 #ifdef _TARGET_ARM_
43
44 #pragma warning(push)
45 #pragma warning(disable:4127) // conditional expression is constant
46 template <CalleeSavedRegNum regNum>
GetRegObjectAddr(REGDISPLAY * pContext)47 PTR_PTR_Object GetRegObjectAddr(REGDISPLAY * pContext)
48 {
49 switch (regNum)
50 {
51 case CSR_NUM_R4: return (PTR_PTR_Object)pContext->pR4;
52 case CSR_NUM_R5: return (PTR_PTR_Object)pContext->pR5;
53 case CSR_NUM_R6: return (PTR_PTR_Object)pContext->pR6;
54 case CSR_NUM_R7: return (PTR_PTR_Object)pContext->pR7;
55 case CSR_NUM_R8: return (PTR_PTR_Object)pContext->pR8;
56 case CSR_NUM_R9: return (PTR_PTR_Object)pContext->pR9;
57 case CSR_NUM_R10: return (PTR_PTR_Object)pContext->pR10;
58 case CSR_NUM_R11: return (PTR_PTR_Object)pContext->pR11;
59 // NOTE: LR is omitted because it may not be live except as a 'scratch' reg
60 }
61 UNREACHABLE_MSG("unexpected CalleeSavedRegNum");
62 }
63 #pragma warning(pop)
64
GetRegObjectAddr(CalleeSavedRegNum regNum,REGDISPLAY * pContext)65 PTR_PTR_Object GetRegObjectAddr(CalleeSavedRegNum regNum, REGDISPLAY * pContext)
66 {
67 switch (regNum)
68 {
69 case CSR_NUM_R4: return (PTR_PTR_Object)pContext->pR4;
70 case CSR_NUM_R5: return (PTR_PTR_Object)pContext->pR5;
71 case CSR_NUM_R6: return (PTR_PTR_Object)pContext->pR6;
72 case CSR_NUM_R7: return (PTR_PTR_Object)pContext->pR7;
73 case CSR_NUM_R8: return (PTR_PTR_Object)pContext->pR8;
74 case CSR_NUM_R9: return (PTR_PTR_Object)pContext->pR9;
75 case CSR_NUM_R10: return (PTR_PTR_Object)pContext->pR10;
76 case CSR_NUM_R11: return (PTR_PTR_Object)pContext->pR11;
77 // NOTE: LR is omitted because it may not be live except as a 'scratch' reg
78 }
79 UNREACHABLE_MSG("unexpected CalleeSavedRegNum");
80 }
81
GetScratchRegObjectAddr(ScratchRegNum regNum,REGDISPLAY * pContext)82 PTR_PTR_Object GetScratchRegObjectAddr(ScratchRegNum regNum, REGDISPLAY * pContext)
83 {
84 switch (regNum)
85 {
86 case SR_NUM_R0: return (PTR_PTR_Object)pContext->pR0;
87 case SR_NUM_R1: return (PTR_PTR_Object)pContext->pR1;
88 case SR_NUM_R2: return (PTR_PTR_Object)pContext->pR2;
89 case SR_NUM_R3: return (PTR_PTR_Object)pContext->pR3;
90 case SR_NUM_R12: return (PTR_PTR_Object)pContext->pR12;
91 case SR_NUM_LR: return (PTR_PTR_Object)pContext->pLR;
92 }
93 UNREACHABLE_MSG("unexpected ScratchRegNum");
94 }
95
ReportRegisterSet(UInt8 regSet,REGDISPLAY * pContext,GCEnumContext * hCallback)96 void ReportRegisterSet(UInt8 regSet, REGDISPLAY * pContext, GCEnumContext * hCallback)
97 {
98 // 2. 00lRRRRR - normal "register set" encoding, pinned and interior attributes both false
99 // a. l - this is the last descriptor
100 // b. RRRRR - this is the register mask for { r4, r5, r6, r7, r8 }
101
102 if (regSet & CSR_MASK_R4) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_R4>(pContext), 0); }
103 if (regSet & CSR_MASK_R5) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_R5>(pContext), 0); }
104 if (regSet & CSR_MASK_R6) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_R6>(pContext), 0); }
105 if (regSet & CSR_MASK_R7) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_R7>(pContext), 0); }
106 if (regSet & CSR_MASK_R8) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_R8>(pContext), 0); }
107 }
108
109 #elif defined(_TARGET_ARM64_)
110
111 #pragma warning(push)
112 #pragma warning(disable:4127) // conditional expression is constant
113 template <CalleeSavedRegNum regNum>
GetRegObjectAddr(REGDISPLAY * pContext)114 PTR_PTR_Object GetRegObjectAddr(REGDISPLAY * pContext)
115 {
116 switch (regNum)
117 {
118 case CSR_NUM_X19: return (PTR_PTR_Object)pContext->pX19;
119 case CSR_NUM_X20: return (PTR_PTR_Object)pContext->pX20;
120 case CSR_NUM_X21: return (PTR_PTR_Object)pContext->pX21;
121 case CSR_NUM_X22: return (PTR_PTR_Object)pContext->pX22;
122 case CSR_NUM_X23: return (PTR_PTR_Object)pContext->pX23;
123 case CSR_NUM_X24: return (PTR_PTR_Object)pContext->pX24;
124 case CSR_NUM_X25: return (PTR_PTR_Object)pContext->pX25;
125 case CSR_NUM_X26: return (PTR_PTR_Object)pContext->pX26;
126 case CSR_NUM_X27: return (PTR_PTR_Object)pContext->pX27;
127 case CSR_NUM_X28: return (PTR_PTR_Object)pContext->pX28;
128 case CSR_NUM_FP : return (PTR_PTR_Object)pContext->pFP ;
129 }
130 UNREACHABLE_MSG("unexpected CalleeSavedRegNum");
131 }
132 #pragma warning(pop)
133
GetRegObjectAddr(CalleeSavedRegNum regNum,REGDISPLAY * pContext)134 PTR_PTR_Object GetRegObjectAddr(CalleeSavedRegNum regNum, REGDISPLAY * pContext)
135 {
136 switch (regNum)
137 {
138 case CSR_NUM_X19: return (PTR_PTR_Object)pContext->pX19;
139 case CSR_NUM_X20: return (PTR_PTR_Object)pContext->pX20;
140 case CSR_NUM_X21: return (PTR_PTR_Object)pContext->pX21;
141 case CSR_NUM_X22: return (PTR_PTR_Object)pContext->pX22;
142 case CSR_NUM_X23: return (PTR_PTR_Object)pContext->pX23;
143 case CSR_NUM_X24: return (PTR_PTR_Object)pContext->pX24;
144 case CSR_NUM_X25: return (PTR_PTR_Object)pContext->pX25;
145 case CSR_NUM_X26: return (PTR_PTR_Object)pContext->pX26;
146 case CSR_NUM_X27: return (PTR_PTR_Object)pContext->pX27;
147 case CSR_NUM_X28: return (PTR_PTR_Object)pContext->pX28;
148 case CSR_NUM_FP : return (PTR_PTR_Object)pContext->pFP ;
149 }
150 UNREACHABLE_MSG("unexpected CalleeSavedRegNum");
151 }
152
GetScratchRegObjectAddr(ScratchRegNum regNum,REGDISPLAY * pContext)153 PTR_PTR_Object GetScratchRegObjectAddr(ScratchRegNum regNum, REGDISPLAY * pContext)
154 {
155 switch (regNum)
156 {
157 case SR_NUM_X0: return (PTR_PTR_Object)pContext->pX0;
158 case SR_NUM_X1: return (PTR_PTR_Object)pContext->pX1;
159 case SR_NUM_X2: return (PTR_PTR_Object)pContext->pX2;
160 case SR_NUM_X3: return (PTR_PTR_Object)pContext->pX3;
161 case SR_NUM_X4: return (PTR_PTR_Object)pContext->pX4;
162 case SR_NUM_X5: return (PTR_PTR_Object)pContext->pX5;
163 case SR_NUM_X6: return (PTR_PTR_Object)pContext->pX6;
164 case SR_NUM_X7: return (PTR_PTR_Object)pContext->pX7;
165 case SR_NUM_X8: return (PTR_PTR_Object)pContext->pX8;
166 case SR_NUM_X9: return (PTR_PTR_Object)pContext->pX9;
167 case SR_NUM_X10: return (PTR_PTR_Object)pContext->pX10;
168 case SR_NUM_X11: return (PTR_PTR_Object)pContext->pX11;
169 case SR_NUM_X12: return (PTR_PTR_Object)pContext->pX12;
170 case SR_NUM_X13: return (PTR_PTR_Object)pContext->pX13;
171 case SR_NUM_X14: return (PTR_PTR_Object)pContext->pX14;
172 case SR_NUM_X15: return (PTR_PTR_Object)pContext->pX15;
173 case SR_NUM_XIP0: return (PTR_PTR_Object)pContext->pX16;
174 case SR_NUM_XIP1: return (PTR_PTR_Object)pContext->pX17;
175 case SR_NUM_LR: return (PTR_PTR_Object)pContext->pLR;
176 }
177 UNREACHABLE_MSG("unexpected ScratchRegNum");
178 }
179
ReportRegisterSet(UInt8 firstEncByte,REGDISPLAY * pContext,GCEnumContext * hCallback,PTR_UInt8 & pCursor)180 void ReportRegisterSet(UInt8 firstEncByte, REGDISPLAY * pContext, GCEnumContext * hCallback, PTR_UInt8 & pCursor)
181 {
182 // 2. 00lvRRRR [RRRRRRRR] - normal "register set" encoding, pinned and interior attributes both false
183 // a. l - this is the last descriptor
184 // b. v - extra byte follows
185 // c. RRRR - register mask for { lr, x19-x21 }
186 // d. RRRRRRRR - register mask for { x22-x28, fp } iff 'v' is 1
187
188 UInt16 regSet = (firstEncByte & 0xF);
189 if (firstEncByte & 0x10) { regSet |= (*pCursor++ << 4); }
190
191 ASSERT(!(regSet & CSR_MASK_LR));
192 if (regSet & CSR_MASK_X19) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X19>(pContext), 0); }
193 if (regSet & CSR_MASK_X20) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X20>(pContext), 0); }
194 if (regSet & CSR_MASK_X21) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X21>(pContext), 0); }
195 if (regSet & CSR_MASK_X22) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X22>(pContext), 0); }
196 if (regSet & CSR_MASK_X23) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X23>(pContext), 0); }
197 if (regSet & CSR_MASK_X24) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X24>(pContext), 0); }
198 if (regSet & CSR_MASK_X25) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X25>(pContext), 0); }
199 if (regSet & CSR_MASK_X26) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X26>(pContext), 0); }
200 if (regSet & CSR_MASK_X27) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X27>(pContext), 0); }
201 if (regSet & CSR_MASK_X28) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X28>(pContext), 0); }
202 if (regSet & CSR_MASK_FP ) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_FP >(pContext), 0); }
203 }
204
205 #else // _TARGET_ARM_ || _TARGET_ARM64_
206
207 #pragma warning(push)
208 #pragma warning(disable:4127) // conditional expression is constant
209 template <CalleeSavedRegNum regNum>
GetRegObjectAddr(REGDISPLAY * pContext)210 PTR_PTR_Object GetRegObjectAddr(REGDISPLAY * pContext)
211 {
212 switch (regNum)
213 {
214 case CSR_NUM_RBX: return (PTR_PTR_Object)pContext->pRbx;
215 case CSR_NUM_RSI: return (PTR_PTR_Object)pContext->pRsi;
216 case CSR_NUM_RDI: return (PTR_PTR_Object)pContext->pRdi;
217 case CSR_NUM_RBP: return (PTR_PTR_Object)pContext->pRbp;
218 #ifdef _TARGET_AMD64_
219 case CSR_NUM_R12: return (PTR_PTR_Object)pContext->pR12;
220 case CSR_NUM_R13: return (PTR_PTR_Object)pContext->pR13;
221 case CSR_NUM_R14: return (PTR_PTR_Object)pContext->pR14;
222 case CSR_NUM_R15: return (PTR_PTR_Object)pContext->pR15;
223 #endif // _TARGET_AMD64_
224 }
225 UNREACHABLE_MSG("unexpected CalleeSavedRegNum");
226 }
227 #pragma warning(pop)
228
GetRegObjectAddr(CalleeSavedRegNum regNum,REGDISPLAY * pContext)229 PTR_PTR_Object GetRegObjectAddr(CalleeSavedRegNum regNum, REGDISPLAY * pContext)
230 {
231 switch (regNum)
232 {
233 case CSR_NUM_RBX: return (PTR_PTR_Object)pContext->pRbx;
234 case CSR_NUM_RSI: return (PTR_PTR_Object)pContext->pRsi;
235 case CSR_NUM_RDI: return (PTR_PTR_Object)pContext->pRdi;
236 case CSR_NUM_RBP: return (PTR_PTR_Object)pContext->pRbp;
237 #ifdef _TARGET_AMD64_
238 case CSR_NUM_R12: return (PTR_PTR_Object)pContext->pR12;
239 case CSR_NUM_R13: return (PTR_PTR_Object)pContext->pR13;
240 case CSR_NUM_R14: return (PTR_PTR_Object)pContext->pR14;
241 case CSR_NUM_R15: return (PTR_PTR_Object)pContext->pR15;
242 #endif // _TARGET_AMD64_
243 }
244 UNREACHABLE_MSG("unexpected CalleeSavedRegNum");
245 }
246
GetScratchRegObjectAddr(ScratchRegNum regNum,REGDISPLAY * pContext)247 PTR_PTR_Object GetScratchRegObjectAddr(ScratchRegNum regNum, REGDISPLAY * pContext)
248 {
249 switch (regNum)
250 {
251 case SR_NUM_RAX: return (PTR_PTR_Object)pContext->pRax;
252 case SR_NUM_RCX: return (PTR_PTR_Object)pContext->pRcx;
253 case SR_NUM_RDX: return (PTR_PTR_Object)pContext->pRdx;
254 #ifdef _TARGET_AMD64_
255 case SR_NUM_R8 : return (PTR_PTR_Object)pContext->pR8;
256 case SR_NUM_R9 : return (PTR_PTR_Object)pContext->pR9;
257 case SR_NUM_R10: return (PTR_PTR_Object)pContext->pR10;
258 case SR_NUM_R11: return (PTR_PTR_Object)pContext->pR11;
259 #endif // _TARGET_AMD64_
260 }
261 UNREACHABLE_MSG("unexpected ScratchRegNum");
262 }
263
ReportRegisterSet(UInt8 regSet,REGDISPLAY * pContext,GCEnumContext * hCallback)264 void ReportRegisterSet(UInt8 regSet, REGDISPLAY * pContext, GCEnumContext * hCallback)
265 {
266 // 2. 00lRRRRR - normal "register set" encoding, pinned and interior attributes both false
267 // a. l - this is the last descriptor
268 // b. RRRRR - this is the register mask for { rbx, rsi, rdi, rbp, r12 }
269
270 if (regSet & CSR_MASK_RBX) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_RBX>(pContext), 0); }
271 if (regSet & CSR_MASK_RSI) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_RSI>(pContext), 0); }
272 if (regSet & CSR_MASK_RDI) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_RDI>(pContext), 0); }
273 if (regSet & CSR_MASK_RBP) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_RBP>(pContext), 0); }
274 #ifdef _TARGET_AMD64_
275 if (regSet & CSR_MASK_R12) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_R12>(pContext), 0); }
276 #endif
277 }
278
279 #endif // _TARGET_ARM_
280
ReportRegister(UInt8 regEnc,REGDISPLAY * pContext,GCEnumContext * hCallback,PTR_UInt8 & pCursor)281 void ReportRegister(UInt8 regEnc, REGDISPLAY * pContext, GCEnumContext * hCallback, PTR_UInt8 & pCursor)
282 {
283 // 3. 01liprrr [ARM64 register] - more general register encoding with pinned and interior attributes
284 // a. l - last descriptor
285 // b. i - interior
286 // c. p - pinned
287 // d. rrr - register number { rbx, rsi, rdi, rbp, r12, r13, r14, r15 }, ARM = { r4-r11 }, ARM64 = { x19-x25 }
288 // ARM64: if rrr = 0, the register number { x26-x28, fp } follows in the next byte
289
290 UInt32 flags = 0;
291 if (regEnc & 0x08) { flags |= GC_CALL_PINNED; }
292 if (regEnc & 0x10) { flags |= GC_CALL_INTERIOR; }
293
294 UInt8 regNum = (regEnc & 0x07);
295 #ifdef _TARGET_ARM64_
296 if (!regNum) { regNum = *pCursor++; }
297 #else
298 UNREFERENCED_PARAMETER(pCursor);
299 #endif
300 PTR_PTR_Object pRoot = GetRegObjectAddr((CalleeSavedRegNum)regNum, pContext);
301 ReportObject(hCallback, pRoot, flags);
302 }
303
ReportLocalSlot(UInt32 slotNum,REGDISPLAY * pContext,GCEnumContext * hCallback,GCInfoHeader * pHeader)304 void ReportLocalSlot(UInt32 slotNum, REGDISPLAY * pContext, GCEnumContext * hCallback, GCInfoHeader * pHeader)
305 {
306 // In order to map from a 'local slot' to a frame pointer offset, we need to consult the GCInfoHeader of
307 // the main code body, but all we have is the GCInfoHeader of the funclet. So, for now, this is
308 // disallowed. A larger encoding must be used.
309 ASSERT_MSG(!pHeader->IsFunclet(), "A 'local slot' encoding should not be used in a funclet.");
310
311 if (pHeader->HasFramePointer())
312 {
313 Int32 rbpOffset;
314 #ifdef _TARGET_ARM_
315 // ARM places the FP at the top of the locals area.
316 rbpOffset = pHeader->GetFrameSize() - ((slotNum + 1) * sizeof(void *));
317 #elif defined(_TARGET_ARM64_)
318 if (pHeader->AreFPLROnTop())
319 rbpOffset = -(Int32)((slotNum + 1) * sizeof(void *));
320 else
321 rbpOffset = ((slotNum + 2) * sizeof(void *));
322 #else
323 # ifdef _TARGET_AMD64_
324 if (pHeader->GetFramePointerOffset() != 0)
325 rbpOffset = (slotNum * sizeof(void *));
326 else
327 # endif // _TARGET_AMD64_
328 rbpOffset = -pHeader->GetPreservedRegsSaveSize() - (slotNum * sizeof(void *));
329 #endif
330 PTR_PTR_Object pRoot = (PTR_PTR_Object)(pContext->GetFP() + rbpOffset);
331 ReportObject(hCallback, pRoot, 0);
332 }
333 else
334 {
335 #ifdef _TARGET_X86_
336 // @TODO: X86: need to pass in current stack level
337 UNREACHABLE_MSG("NYI - ESP frames");
338 #endif // _TARGET_X86_
339
340 Int32 rspOffset = pHeader->GetFrameSize() - ((slotNum + 1) * sizeof(void *));
341 PTR_PTR_Object pRoot = (PTR_PTR_Object)(pContext->GetSP() + rspOffset);
342 ReportObject(hCallback, pRoot, 0);
343 }
344 }
345
ReportStackSlot(bool framePointerBased,Int32 offset,UInt32 gcFlags,REGDISPLAY * pContext,GCEnumContext * hCallback,bool hasDynamicAlignment)346 void ReportStackSlot(bool framePointerBased, Int32 offset, UInt32 gcFlags, REGDISPLAY * pContext,
347 GCEnumContext * hCallback, bool hasDynamicAlignment)
348 {
349 UIntNative basePointer;
350 if (framePointerBased)
351 {
352 #ifdef _TARGET_X86_
353 if (hasDynamicAlignment && offset >= 0)
354 basePointer = pContext->GetPP();
355 else
356 #else
357 // avoid warning about unused parameter
358 hasDynamicAlignment;
359 #endif // _TARGET_X86_
360 basePointer = pContext->GetFP();
361 }
362 else
363 {
364 basePointer = pContext->GetSP();
365 }
366 PTR_PTR_Object pRoot = (PTR_PTR_Object)(basePointer + offset);
367 ReportObject(hCallback, pRoot, gcFlags);
368 }
369
ReportLocalSlots(UInt8 localsEnc,REGDISPLAY * pContext,GCEnumContext * hCallback,GCInfoHeader * pHeader)370 void ReportLocalSlots(UInt8 localsEnc, REGDISPLAY * pContext, GCEnumContext * hCallback, GCInfoHeader * pHeader)
371 {
372 if (localsEnc & 0x10)
373 {
374 // 4. 10l1SSSS - "local stack slot set" encoding, pinned and interior attributes both false
375 // a. l - last descriptor
376 // b. SSSS - set of "local slots" #0 - #3 - local slot 0 is at offset -8 from the last pushed
377 // callee saved register, local slot 1 is at offset - 16, etc - in other words, these are the
378 // slots normally used for locals
379 if (localsEnc & 0x01) { ReportLocalSlot(0, pContext, hCallback, pHeader); }
380 if (localsEnc & 0x02) { ReportLocalSlot(1, pContext, hCallback, pHeader); }
381 if (localsEnc & 0x04) { ReportLocalSlot(2, pContext, hCallback, pHeader); }
382 if (localsEnc & 0x08) { ReportLocalSlot(3, pContext, hCallback, pHeader); }
383 }
384 else
385 {
386 // 5. 10l0ssss - "local slot" encoding, pinned and interior attributes are both false
387 // a. l - last descriptor
388 // b. ssss - "local slot" #4 - #19
389 UInt32 localNum = (localsEnc & 0xF) + 4;
390 ReportLocalSlot(localNum, pContext, hCallback, pHeader);
391 }
392 }
393
ReportStackSlots(UInt8 firstEncByte,REGDISPLAY * pContext,GCEnumContext * hCallback,PTR_UInt8 & pCursor,bool hasDynamicAlignment)394 void ReportStackSlots(UInt8 firstEncByte, REGDISPLAY * pContext, GCEnumContext * hCallback, PTR_UInt8 & pCursor, bool hasDynamicAlignment)
395 {
396 // 6. 11lipfsm {offset} [mask] - [multiple] stack slot encoding
397 // a. l - last descriptor
398 // b. i - interior attribute
399 // c. p - pinned attribute
400 // d. f - 1: frame pointer relative, 0: sp relative
401 // e. s - offset sign
402 // f. m - mask follows
403 // g. offset - variable length unsigned integer
404 // h. mask - variable length unsigned integer (only present if m-bit is 1) - this can describe
405 // multiple stack locations with the same attributes. E.g., if you want to describe stack
406 // locations 0x20, 0x28, 0x38, you would give a (starting) offset of 0x20 and a mask of
407 // 000000101 = 0x05. Up to 33 stack locations can be described.
408
409 UInt32 flags = 0;
410 if (firstEncByte & 0x08) { flags |= GC_CALL_PINNED; }
411 if (firstEncByte & 0x10) { flags |= GC_CALL_INTERIOR; }
412
413 bool framePointerBased = (firstEncByte & 0x04);
414 bool isNegative = (firstEncByte & 0x02);
415 bool hasMask = (firstEncByte & 0x01);
416
417 Int32 offset = (Int32) VarInt::ReadUnsigned(pCursor);
418 ASSERT(offset >= 0);
419
420 ReportStackSlot(framePointerBased, (isNegative ? -offset : offset), flags,
421 pContext, hCallback, hasDynamicAlignment);
422
423 if (hasMask)
424 {
425 UInt32 mask = VarInt::ReadUnsigned(pCursor);
426 while (mask != 0)
427 {
428 offset += sizeof(void *);
429 if (mask & 0x01)
430 {
431 ReportStackSlot(framePointerBased, (isNegative ? -offset : offset), flags,
432 pContext, hCallback, hasDynamicAlignment);
433 }
434 mask >>= 1;
435 }
436 }
437 }
438
439 // Reads a 7-bit-encoded register mask:
440 // - 0RRRRRRR for non-ARM64 registers and { x0-x6 } ARM64 registers
441 // - 1RRRRRRR 0RRRRRRR for { x0-x13 } ARM64 registers
442 // - 1RRRRRRR 1RRRRRRR 000RRRRR for { x0-x15, xip0, xip1, lr } ARM64 registers
ReadRegisterMaskBy7Bit(PTR_UInt8 & pCursor)443 UInt32 ReadRegisterMaskBy7Bit(PTR_UInt8 & pCursor)
444 {
445 #ifndef _TARGET_ARM64_
446 ASSERT(!(*pCursor & 0x80));
447 return *pCursor++;
448 #else // !_TARGET_ARM64_
449 UInt32 byte0 = *pCursor++;
450 if (!(byte0 & 0x80))
451 {
452 return byte0;
453 }
454
455 UInt32 byte1 = *pCursor++;
456 if (!(byte1 & 0x80))
457 {
458 // XOR with 0x80 discards the most significant bit of byte0
459 return (byte1 << 7) ^ byte0 ^ 0x80;
460 }
461
462 UInt32 byte2 = *pCursor++;
463 ASSERT(!(byte2 & 0xe0));
464 // XOR with 0x4080 discards the most significant bits of byte0 and byte1
465 return (byte2 << 14) ^ (byte1 << 7) ^ byte0 ^ 0x4080;
466 #endif // !_TARGET_ARM64_
467 }
468
ReportScratchRegs(UInt8 firstEncByte,REGDISPLAY * pContext,GCEnumContext * hCallback,PTR_UInt8 & pCursor)469 void ReportScratchRegs(UInt8 firstEncByte, REGDISPLAY * pContext, GCEnumContext * hCallback, PTR_UInt8 & pCursor)
470 {
471 // 7. 11lip010 0RRRRRRR [0IIIIIII] [0PPPPPPP] - live scratch reg reporting, this uses the SP-xxx encoding
472 // from #6 since we cannot have stack locations at negative
473 // offsets from SP.
474 // a. l - last descriptor
475 // b. i - interior byte present
476 // c. p - pinned byte present
477 // d. RRRRRRR - scratch register mask for { rax, rcx, rdx, r8, r9, r10, r11 }, ARM = { r0-r3, r12, lr }
478 // e. IIIIIII - interior scratch register mask for { rax, rcx, rdx, r8, r9, r10, r11 } iff 'i' is 1
479 // f. PPPPPPP - pinned scratch register mask for { rax, rcx, rdx, r8, r9, r10, r11 } iff 'p' is 1
480 //
481 // For ARM64 the scheme above is extended to support the bigger register set:
482 // - 11lip010 0RRRRRRR [0IIIIIII] [0PPPPPPP] for { x0-x6 }
483 // - 11lip010 1RRRRRRR 0RRRRRRR [[1IIIIIII] 0IIIIIII] [[1PPPPPPP] 0PPPPPPP] for { x0-x13 }
484 // - 11lip010 1RRRRRRR 1RRRRRRR 000RRRRR [0*2(1IIIIIII) 000IIIII] [0*2(1PPPPPPP) 000PPPPP] for { x0-x15, xip0, xip1, lr }
485
486 UInt32 regs = ReadRegisterMaskBy7Bit(pCursor);
487 UInt32 byrefRegs = (firstEncByte & 0x10) ? ReadRegisterMaskBy7Bit(pCursor) : 0;
488 UInt32 pinnedRegs = (firstEncByte & 0x08) ? ReadRegisterMaskBy7Bit(pCursor) : 0;
489
490 for (UInt32 reg = 0; reg < RBM_SCRATCH_REG_COUNT; reg++)
491 {
492 UInt32 regMask = (1 << reg);
493
494 if (regs & regMask)
495 {
496 UInt32 flags = 0;
497 if (pinnedRegs & regMask) { flags |= GC_CALL_PINNED; }
498 if (byrefRegs & regMask) { flags |= GC_CALL_INTERIOR; }
499
500 PTR_PTR_Object pRoot = GetScratchRegObjectAddr((ScratchRegNum)reg, pContext);
501 if (pRoot != NULL)
502 ReportObject(hCallback, pRoot, flags);
503 }
504 }
505 }
506
507 // Enumerate all live object references in that function using the virtual register set. Same reference
508 // location cannot be enumerated multiple times (but all differenct references pointing to the same object
509 // have to be individually enumerated).
510 // Returns success of operation.
EnumGcRefs(MethodGcInfoPointers * pMethodInfo,UInt32 codeOffset,REGDISPLAY * pContext,GCEnumContext * hCallback)511 void EECodeManager::EnumGcRefs(MethodGcInfoPointers * pMethodInfo,
512 UInt32 codeOffset,
513 REGDISPLAY * pContext,
514 GCEnumContext * hCallback)
515 {
516 PTR_UInt8 pbCallsiteStringBlob = pMethodInfo->m_pbCallsiteStringBlob;
517 PTR_UInt8 pbDeltaShortcutTable = pMethodInfo->m_pbDeltaShortcutTable;
518 PTR_UInt8 pCursor = pMethodInfo->m_pbEncodedSafePointList;
519
520 // Early-out for the common case of no callsites
521 if ((pCursor == NULL) || (*pCursor == 0xFF))
522 return;
523
524 UInt32 commonVarCount = 0;
525 PTR_UInt8 commonVarStart = NULL;
526 if (pMethodInfo->GetGCInfoHeader()->HasCommonVars())
527 {
528 // remember only the count and the start of the table, to avoid allocating memory
529 // this is a design compromise
530 commonVarCount = VarInt::ReadUnsigned(pCursor);
531 commonVarStart = pCursor;
532 for (UInt32 i = 0; i < commonVarCount; i++)
533 {
534 VarInt::SkipUnsigned(pCursor);
535 }
536 }
537
538 // -------------------------------------------------------------------------------------------------------
539 // Decode the method GC info
540 // -------------------------------------------------------------------------------------------------------
541 //
542 // This loop scans through the 'method info' to find a callsite offset which matches the incoming code
543 // offset. Once it's found, we break out and have a pointer into the 'callsite info blob' which will
544 // point at a string describing the roots that must be reported at this particular callsite. This loop
545 // needs to be fast because it's linear with respect to the number of callsites in a method.
546 //
547 // -------------------------------------------------------------------------------------------------------
548 //
549 // 0ddddccc -- SMALL ENCODING
550 //
551 // -- dddd is an index into the delta shortcut table
552 // -- ccc is an offset into the callsite strings blob
553 //
554 // 1ddddddd { info offset } -- BIG ENCODING
555 //
556 // -- ddddddd is a 7-bit delta
557 // -- { info offset } is a variable-length unsigned encoding of the offset into the callsite
558 // strings blob for this callsite.
559 //
560 // 10000000 { delta } -- FORWARDER
561 //
562 // -- { delta } is a variable-length unsigned encoding of the offset to the next callsite
563 //
564 // 11111111 -- STRING TERMINATOR
565 //
566
567 UInt32 callCodeOffset = codeOffset;
568 UInt32 curCodeOffset = 0;
569 IntNative infoOffset = 0;
570
571 while (curCodeOffset < callCodeOffset)
572 {
573 ContinueUnconditionally:
574 UInt8 b = *pCursor++;
575
576 if ((b & 0x80) == 0)
577 {
578 // SMALL ENCODING
579 infoOffset = (b & 0x7);
580 curCodeOffset += pbDeltaShortcutTable[b >> 3];
581 }
582 else
583 {
584 UInt8 lowBits = (b & 0x7F);
585 // FORWARDER
586 if (lowBits == 0)
587 {
588 curCodeOffset += VarInt::ReadUnsigned(pCursor);
589 // N.B. a forwarder entry is always followed by another 'real' entry. The curCodeOffset that
590 // results from consuming the forwarder entry is an INTERMEDIATE VALUE and doesn't represent
591 // a code offset of an actual callsite-with-GC-info. But this intermediate value could
592 // inadvertently match some other callsite between the last callsite-with-GC-info and the next
593 // callsite-with-GC-info. To prevent this inadvertent match from happening, we must bypass
594 // the loop termination-condition test. Therefore, 'continue' cannot be used here and we must
595 // use a goto.
596 goto ContinueUnconditionally;
597 }
598 else
599 if (lowBits == 0x7F) // STRING TERMINATOR
600 break;
601
602 // BIG ENCODING
603 curCodeOffset += lowBits;
604
605 // N.B. this returns the negative of the length of the unsigned!
606 infoOffset = VarInt::SkipUnsigned(pCursor);
607 }
608 }
609
610 // If we reached the end of the scan loop without finding a matching callsite offset, then there must not
611 // be any roots to report to the GC.
612 if (curCodeOffset != callCodeOffset)
613 return;
614
615 // If we were in the BIG ENCODING case, the infoOffset wil be negative. So we backup pCursor and actually
616 // decode the unsigned here. This keeps the main loop above tighter by removing the conditional and
617 // decode from the body of the loop.
618 if (infoOffset < 0)
619 {
620 pCursor += infoOffset;
621 infoOffset = VarInt::ReadUnsigned(pCursor);
622 }
623
624 //
625 // -------------------------------------------------------------------------------------------------------
626 // Decode the callsite root string
627 // -------------------------------------------------------------------------------------------------------
628 //
629 // 1. Call sites with nothing to report are not encoded
630 //
631 // 2. 00lRRRRR - normal "register set" encoding, pinned and interior attributes both false
632 // a. l - this is the last descriptor
633 // b. RRRRR - this is the register mask for { rbx, rsi, rdi, rbp, r12 }, ARM = { r4-r8 }
634 //
635 // For ARM64 the scheme above is extended to support the bigger register set:
636 // 00lvRRRR [RRRRRRRR] - normal "register set" encoding, pinned and interior attributes both false
637 // a. l - this is the last descriptor
638 // b. v - extra byte follows
639 // c. RRRR - register mask for { lr, x19-x21 }
640 // d. RRRRRRRR - register mask for { x22-x28, fp } iff 'v' is 1
641 //
642 // 3. 01liprrr [ARM64 register] - more general register encoding with pinned and interior attributes
643 // a. l - last descriptor
644 // b. i - interior
645 // c. p - pinned
646 // d. rrr - register number { rbx, rsi, rdi, rbp, r12, r13, r14, r15 }, ARM = { r4-r11 }, ARM64 = { x19-x25 }
647 // ARM64: if rrr = 0, the register number { x26-x28, fp } follows in the next byte
648 //
649 // 4. 10l1SSSS - "local stack slot set" encoding, pinned and interior attributes both false
650 // a. l - last descriptor
651 // b. SSSS - set of "local slots" #0 - #3 - local slot 0 is at offset -8 from the last pushed
652 // callee saved register, local slot 1 is at offset - 16, etc - in other words, these are the
653 // slots normally used for locals. The non-sensical encoding with SSSS = 0000 is reserved for
654 // the "common vars" case under 8 below.
655 //
656 // 5. 10l0ssss - "local slot" encoding
657 // a. l - last descriptor
658 // b. ssss - "local slot" #4 - #19
659 //
660 // 6. 11lipfsm {offset} [mask] - [multiple] stack slot encoding
661 // a. l - last descriptor
662 // b. i - interior attribute
663 // c. p - pinned attribute
664 // d. f - 1: frame pointer relative, 0: sp relative
665 // e. s - offset sign
666 // f. m - mask follows
667 // g. offset - variable length unsigned integer
668 // h. mask - variable length unsigned integer (only present if m-bit is 1) - this can describe
669 // multiple stack locations with the same attributes. E.g., if you want to describe stack
670 // locations 0x20, 0x28, 0x38, you would give a (starting) offset of 0x20 and a mask of
671 // 000000101 = 0x05. Up to 33 stack locations can be described.
672 //
673 // 7. 11lip010 0RRRRRRR [0IIIIIII] [0PPPPPPP] - live scratch reg reporting, this uses the SP-xxx encoding
674 // from #6 since we cannot have stack locations at negative
675 // offsets from SP.
676 // a. l - last descriptor
677 // b. i - interior byte present
678 // c. p - pinned byte present
679 // d. RRRRRRR - scratch register mask for { rax, rcx, rdx, r8, r9, r10, r11 }, ARM = { r0-r3, r12, lr }
680 // e. IIIIIII - interior scratch register mask for { rax, rcx, rdx, r8, r9, r10, r11 } iff 'i' is 1
681 // f. PPPPPPP - pinned scratch register mask for { rax, rcx, rdx, r8, r9, r10, r11 } iff 'p' is 1
682 //
683 // For ARM64 the scheme above is extended to support the bigger register set:
684 // - 11lip010 0RRRRRRR [0IIIIIII] [0PPPPPPP] for { x0-x6 }
685 // - 11lip010 1RRRRRRR 0RRRRRRR [[1IIIIIII] 0IIIIIII] [[1PPPPPPP] 0PPPPPPP] for { x0-x13 }
686 // - 11lip010 1RRRRRRR 1RRRRRRR 000RRRRR [0*2(1IIIIIII) 000IIIII] [0*2(1PPPPPPP) 000PPPPP] for { x0-x15, xip0, xip1, lr }
687 //
688 // 8. 10z10000 [ common var index ] - "common var" encoding - the common var index references a root string
689 // common to several call sites
690 // a. z - common var index is 0
691 // b. common var index - 0-based index referring to one of the "common var" root strings.
692 // only present if z-bit is 0
693 //
694 // this encoding is case 4, "local stack slot set", with the set SSSS = 0
695 // this case is non-sensical and hence unused for case 4
696 //
697 PTR_UInt8 pbCallsiteString = pbCallsiteStringBlob + (int)infoOffset;
698
699 bool isLastEncoding;
700 pCursor = pbCallsiteString;
701 do
702 {
703 UInt8 b = *pCursor++;
704 isLastEncoding = ((b & 0x20) == 0x20);
705
706 switch (b & 0xC0)
707 {
708 case 0x00:
709 // case 2 -- "register set"
710 #ifndef _TARGET_ARM64_
711 ReportRegisterSet(b, pContext, hCallback);
712 #else
713 ReportRegisterSet(b, pContext, hCallback, pCursor);
714 #endif
715 break;
716 case 0x40:
717 // case 3 -- "register"
718 ReportRegister(b, pContext, hCallback, pCursor);
719 break;
720 case 0x80:
721 // case 4 -- "local slot set"
722 // case 5 -- "local slot"
723 // case 8 -- "common var"
724 if ((b & 0xDF) == 0x90)
725 {
726 // case 8 -- "common var"
727
728 UInt32 commonVarIndex = 0;
729 if ((b & 0x20) == 0)
730 {
731 // obtain the 0 - based index
732 commonVarIndex = VarInt::ReadUnsigned(pCursor);
733 ASSERT(commonVarIndex < commonVarCount);
734 }
735
736 // skip the info offsets for the common var strings before ours
737 // this is a linear search, but the number of common vars should be
738 // significantly smaller than the number of call sites,
739 // plus SkipUnsigned is pretty fast, so we should be ok.
740 pCursor = commonVarStart;
741 for (UInt32 i = 0; i < commonVarIndex; i++)
742 {
743 VarInt::SkipUnsigned(pCursor);
744 }
745
746 // read the info offset for our common var string
747 infoOffset = VarInt::ReadUnsigned(pCursor);
748
749 // continue reading at that location - this is analogous to a tail call...
750 pCursor = pbCallsiteStringBlob + infoOffset;
751 isLastEncoding = false;
752 }
753 else
754 {
755 ReportLocalSlots(b, pContext, hCallback, pMethodInfo->GetGCInfoHeader());
756 }
757 break;
758 case 0xC0:
759 if ((b & 0xC7) == 0xC2)
760 // case 7 -- "scratch reg reporting"
761 ReportScratchRegs(b, pContext, hCallback, pCursor);
762 else
763 {
764 bool hasDynamicAlignment = pMethodInfo->GetGCInfoHeader()->HasDynamicAlignment();
765 #ifdef _TARGET_X86_
766 ASSERT_MSG(!hasDynamicAlignment || pMethodInfo->GetGCInfoHeader()->GetParamPointerReg() == RN_EBX, "NYI: non-EBX param pointer");
767 #endif
768 // case 6 -- "stack slot" / "stack slot set"
769 ReportStackSlots(b, pContext, hCallback, pCursor, hasDynamicAlignment);
770 }
771 break;
772 }
773 }
774 while (!isLastEncoding);
775
776 return;
777 }
778
779 #ifdef DACCESS_COMPILE
780 #define ASSERT_OR_DAC_RETURN_FALSE(x) if(!(x)) return false;
781 #else
782 #define ASSERT_OR_DAC_RETURN_FALSE(x) ASSERT(x)
783 #endif
784
785 // Unwind the current stack frame, i.e. update the virtual register set in pContext. This will be similar to
786 // the state after the function returns back to caller (IP points to after the call, Frame and Stack pointer
787 // has been reset, callee-saved registers restored, callee-UNsaved registers are trashed)
788 // Returns success of operation.
789 // NOTE: When making changes to this function, it is important to check whether corresponding changes
790 // are needed in GetConservativeUpperBoundForOutgoingArgs.
UnwindStackFrame(GCInfoHeader * pInfoHeader,REGDISPLAY * pContext)791 bool EECodeManager::UnwindStackFrame(GCInfoHeader * pInfoHeader,
792 REGDISPLAY * pContext)
793 {
794 // We could implement this unwind if we wanted, but there really isn't any reason
795 ASSERT(pInfoHeader->GetReturnKind() != GCInfoHeader::MRK_ReturnsToNative);
796
797 bool ebpFrame = pInfoHeader->HasFramePointer();
798
799 //
800 // Just unwind based on the info header
801 //
802 Int32 saveSize = pInfoHeader->GetPreservedRegsSaveSize();
803 UIntNative rawRSP;
804
805 #if defined(_TARGET_AMD64_)
806
807 if (ebpFrame)
808 {
809 saveSize -= sizeof(void *); // don't count RBP
810 Int32 framePointerOffset = 0;
811 framePointerOffset = pInfoHeader->GetFramePointerOffset();
812 rawRSP = pContext->GetFP() - saveSize - framePointerOffset;
813 }
814 else
815 {
816 rawRSP = pContext->GetSP() + pInfoHeader->GetFrameSize();
817 }
818
819 PTR_UIntNative RSP = (PTR_UIntNative)rawRSP;
820
821 #if !defined(UNIX_AMD64_ABI)
822 if (pInfoHeader->HasSavedXmmRegs())
823 {
824 typedef DPTR(Fp128) PTR_Fp128;
825 PTR_Fp128 xmmSaveArea = (PTR_Fp128)(rawRSP & ~0xf);
826 UInt32 savedXmmRegMask = pInfoHeader->GetSavedXmmRegMask();
827 // should be a subset of xmm6-xmm15
828 ASSERT((savedXmmRegMask & 0xffff003f) == 0);
829 savedXmmRegMask >>= 6;
830 for (int regIndex = 0; savedXmmRegMask != 0; regIndex++, savedXmmRegMask >>= 1)
831 {
832 if (savedXmmRegMask & 1)
833 {
834 --xmmSaveArea;
835 pContext->Xmm[regIndex] = *xmmSaveArea;
836 }
837 }
838 }
839 #endif
840
841 if (saveSize > 0)
842 {
843 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
844 if (regMask & CSR_MASK_R15) { pContext->pR15 = RSP++; }
845 if (regMask & CSR_MASK_R14) { pContext->pR14 = RSP++; }
846 if (regMask & CSR_MASK_R13) { pContext->pR13 = RSP++; }
847 if (regMask & CSR_MASK_R12) { pContext->pR12 = RSP++; }
848 if (regMask & CSR_MASK_RDI) { pContext->pRdi = RSP++; }
849 if (regMask & CSR_MASK_RSI) { pContext->pRsi = RSP++; }
850 if (regMask & CSR_MASK_RBX) { pContext->pRbx = RSP++; }
851 }
852
853 if (ebpFrame)
854 {
855 pContext->pRbp = RSP++;
856 }
857
858 // handle dynamic frame alignment
859 if (pInfoHeader->HasDynamicAlignment())
860 {
861 UNREACHABLE_MSG("Dynamic frame alignment not supported on this platform");
862 }
863
864 pContext->SetAddrOfIP((PTR_PCODE)RSP); // save off the return address location
865 pContext->SetIP(*RSP++); // pop the return address
866
867 #elif defined(_TARGET_X86_)
868
869 // @TODO .. ESP-based methods with stack changes
870 ASSERT_MSG(ebpFrame || !pInfoHeader->HasStackChanges(), "NYI -- ESP-based methods with stack changes");
871
872 if (ebpFrame)
873 {
874 saveSize -= sizeof(void *); // don't count RBP
875 Int32 framePointerOffset = 0;
876 rawRSP = pContext->GetFP() - saveSize - framePointerOffset;
877 }
878 else
879 {
880 rawRSP = pContext->GetSP() + pInfoHeader->GetFrameSize();
881 }
882
883 PTR_UIntNative RSP = (PTR_UIntNative)rawRSP;
884
885 int registerSaveDisplacement = 0;
886 // registers saved at bottom of frame in Project N
887 registerSaveDisplacement = pInfoHeader->GetFrameSize();
888
889 if (saveSize > 0)
890 {
891 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
892 ASSERT_MSG(ebpFrame || !(regMask & CSR_MASK_RBP), "We should never use EBP as a preserved register");
893 ASSERT_MSG(!(regMask & CSR_MASK_RBX) || !pInfoHeader->HasDynamicAlignment(), "Can't have EBX as preserved regster and dynamic alignment frame pointer")
894 if (regMask & CSR_MASK_RBX) { pContext->pRbx = (PTR_UIntNative)((PTR_UInt8)RSP - registerSaveDisplacement); ++RSP; } // registers saved at bottom of frame
895 if (regMask & CSR_MASK_RSI) { pContext->pRsi = (PTR_UIntNative)((PTR_UInt8)RSP - registerSaveDisplacement); ++RSP; } // registers saved at bottom of frame
896 if (regMask & CSR_MASK_RDI) { pContext->pRdi = (PTR_UIntNative)((PTR_UInt8)RSP - registerSaveDisplacement); ++RSP; } // registers saved at bottom of frame
897 }
898
899 if (ebpFrame)
900 {
901 pContext->pRbp = RSP++;
902 }
903
904 // handle dynamic frame alignment
905 if (pInfoHeader->HasDynamicAlignment())
906 {
907 ASSERT_MSG(pInfoHeader->GetParamPointerReg() == RN_EBX, "NYI: non-EBX param pointer");
908 // For x86 dynamically-aligned frames, we have two frame pointers, like this:
909 //
910 // esp -> [main frame]
911 // ebp -> ebp save
912 // return address (copy)
913 // [variable-sized alignment allocation]
914 // ebx -> ebx save
915 // Return Address
916 //
917 // We've unwound the stack to the copy of the return address. We must continue to unwind the stack
918 // and restore EBX. Because of the variable sized space on the stack, the only way to get at EBX's
919 // saved location is to read it from the current value of EBX. EBX points at the stack location to
920 // which previous EBX was saved.
921 RSP = (PTR_UIntNative)*(pContext->pRbx); // RSP now points to EBX save location
922 pContext->pRbx = RSP++; // RSP now points to original caller pushed return address.
923 }
924
925 pContext->SetAddrOfIP((PTR_PCODE)RSP); // save off the return address location
926 pContext->SetIP(*RSP++); // pop the return address
927
928 // pop the callee-popped args
929 RSP += (pInfoHeader->GetReturnPopSize() / sizeof(UIntNative));
930
931 #elif defined(_TARGET_ARM_)
932
933 if (ebpFrame)
934 {
935 rawRSP = pContext->GetFP() + pInfoHeader->GetFrameSize();
936 }
937 else
938 {
939 rawRSP = pContext->GetSP() + pInfoHeader->GetFrameSize();
940 }
941
942 PTR_UIntNative RSP = (PTR_UIntNative)rawRSP;
943
944 UInt8 vfpRegPushedCount = pInfoHeader->GetVfpRegPushedCount();
945 UInt8 vfpRegFirstPushed = pInfoHeader->GetVfpRegFirstPushed();
946 UInt32 regIndex = vfpRegFirstPushed - 8;
947 while (vfpRegPushedCount-- > 0)
948 {
949 ASSERT(regIndex < 8);
950 pContext->D[regIndex] = *(PTR_UInt64)RSP;
951 regIndex++;
952 RSP = (PTR_UIntNative)((PTR_UInt8)RSP + sizeof(UInt64));
953 }
954
955 if (saveSize > 0)
956 {
957 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
958 if (regMask & CSR_MASK_R4) { pContext->pR4 = RSP++; }
959 if (regMask & CSR_MASK_R5) { pContext->pR5 = RSP++; }
960 if (regMask & CSR_MASK_R6) { pContext->pR6 = RSP++; }
961 if (regMask & CSR_MASK_R7) { pContext->pR7 = RSP++; }
962 if (regMask & CSR_MASK_R8) { pContext->pR8 = RSP++; }
963 if (regMask & CSR_MASK_R9) { pContext->pR9 = RSP++; }
964 if (regMask & CSR_MASK_R10) { pContext->pR10 = RSP++; }
965 if (regMask & CSR_MASK_R11) { pContext->pR11 = RSP++; }
966 }
967
968 // handle dynamic frame alignment
969 if (pInfoHeader->HasDynamicAlignment())
970 {
971 UNREACHABLE_MSG("Dynamic frame alignment not supported on this platform");
972 }
973
974 pContext->SetAddrOfIP((PTR_PCODE)RSP); // save off the return address location
975 pContext->SetIP(*RSP++); // pop the return address
976
977 RSP += pInfoHeader->ParmRegsPushedCount();
978
979 #elif defined(_TARGET_ARM64_)
980
981 if (ebpFrame)
982 {
983 rawRSP = pContext->GetFP();
984 }
985 else
986 {
987 rawRSP = pContext->GetSP();
988 }
989
990 PTR_UIntNative RSP = (PTR_UIntNative)rawRSP;
991
992 if (ebpFrame)
993 {
994 pContext->pFP = RSP++;
995 pContext->SetAddrOfIP((PTR_PCODE)RSP); // save off the return address location
996 pContext->SetIP(*RSP++); // pop the return address
997 }
998
999 if (!pInfoHeader->AreFPLROnTop())
1000 {
1001 RSP = (PTR_UIntNative)(rawRSP + pInfoHeader->GetFrameSize());
1002 ASSERT(!pInfoHeader->HasGSCookie());
1003 }
1004
1005 if (saveSize > 0)
1006 {
1007 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
1008 if (regMask & CSR_MASK_LR)
1009 {
1010 ASSERT_MSG(!ebpFrame, "Chained frame cannot have CSR_MASK_LR mask set");
1011 pContext->SetAddrOfIP((PTR_PCODE)RSP); // save off the return address location
1012 pContext->SetIP(*RSP++); // pop the return address
1013 }
1014 if (regMask & CSR_MASK_X19) { pContext->pX19 = RSP++; }
1015 if (regMask & CSR_MASK_X20) { pContext->pX20 = RSP++; }
1016 if (regMask & CSR_MASK_X21) { pContext->pX21 = RSP++; }
1017 if (regMask & CSR_MASK_X22) { pContext->pX22 = RSP++; }
1018 if (regMask & CSR_MASK_X23) { pContext->pX23 = RSP++; }
1019 if (regMask & CSR_MASK_X24) { pContext->pX24 = RSP++; }
1020 if (regMask & CSR_MASK_X25) { pContext->pX25 = RSP++; }
1021 if (regMask & CSR_MASK_X26) { pContext->pX26 = RSP++; }
1022 if (regMask & CSR_MASK_X27) { pContext->pX27 = RSP++; }
1023 if (regMask & CSR_MASK_X28) { pContext->pX28 = RSP++; }
1024 if (regMask & CSR_MASK_FP ) { ASSERT(!ebpFrame); pContext->pFP = RSP++; }
1025 }
1026
1027 UInt8 vfpRegMask = (UInt8)pInfoHeader->GetVfpRegsPushedMask();
1028 if (vfpRegMask)
1029 {
1030 UInt8 regIndex = 0; // Indices 0-7 correspond to D8-D15
1031 do
1032 {
1033 ASSERT(regIndex < 8);
1034 if (vfpRegMask & 1)
1035 pContext->D[regIndex] = *RSP++;
1036
1037 vfpRegMask >>= 1;
1038 regIndex++;
1039 } while (vfpRegMask);
1040 }
1041
1042
1043 // handle dynamic frame alignment
1044 if (pInfoHeader->HasDynamicAlignment())
1045 {
1046 UNREACHABLE_MSG("Dynamic frame alignment not supported on this platform");
1047 }
1048
1049 RSP += pInfoHeader->ParmRegsPushedCount();
1050
1051 #else
1052
1053 #error NYI - For this arch
1054
1055 #endif
1056
1057 pContext->SetSP((UIntNative) dac_cast<TADDR>(RSP));
1058 return true;
1059 }
1060
GetReversePInvokeSaveFrame(GCInfoHeader * pHeader,REGDISPLAY * pContext)1061 PTR_VOID EECodeManager::GetReversePInvokeSaveFrame(GCInfoHeader * pHeader, REGDISPLAY * pContext)
1062 {
1063 if (pHeader->GetReturnKind() != GCInfoHeader::MRK_ReturnsToNative)
1064 return NULL;
1065
1066 Int32 frameOffset = pHeader->GetReversePinvokeFrameOffset();
1067
1068 return *(PTR_PTR_VOID)(pContext->GetFP() + frameOffset);
1069 }
1070
1071 // Given a virtual register set that has been unwound back to an active callsite within the
1072 // supplied method, this function computes an upper bound value that is guaranteed to be at
1073 // or above the top of the block of stack-passed arguments (if any) that flowed into the
1074 // callsite when the call was made. This upper bound helps the runtime apply conservative
1075 // GC reporting to stack-passed arguments in situations where it has no knowledge of the
1076 // callsite signature.
GetConservativeUpperBoundForOutgoingArgs(GCInfoHeader * pInfoHeader,REGDISPLAY * pContext)1077 UIntNative EECodeManager::GetConservativeUpperBoundForOutgoingArgs(GCInfoHeader * pInfoHeader, REGDISPLAY * pContext)
1078 {
1079 UIntNative upperBound;
1080
1081 if (pInfoHeader->GetReturnKind() == GCInfoHeader::MRK_ReturnsToNative)
1082 {
1083 // Reverse PInvoke case. The embedded reverse PInvoke frame is guaranteed to reside above
1084 // all outgoing arguments.
1085 upperBound = pContext->GetFP() + pInfoHeader->GetReversePinvokeFrameOffset();
1086 }
1087 else
1088 {
1089 if (pInfoHeader->HasFramePointer())
1090 {
1091 #if defined(_TARGET_ARM_)
1092
1093 // ARM frame pointer case. The frame size indicates the distance between the frame pointer
1094 // and the lowest callee-saved register. The lowest callee-saved register is guaranteed to
1095 // reside above all outgoing arguments.
1096 ASSERT(pInfoHeader->GetSavedRegs() != 0);
1097 upperBound = pContext->GetFP() + pInfoHeader->GetFrameSize();
1098
1099 #elif defined(_TARGET_ARM64_)
1100
1101 PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
1102
1103 #elif defined(_TARGET_X86_)
1104
1105 // x86 frame pointer case. The frame pointer is guaranteed to point to the pushed RBP
1106 // value found at the top of the frame. The pushed RBP value is guaranteed to reside above
1107 // all outgoing arguments.
1108 upperBound = pContext->GetFP();
1109
1110 #elif defined(_TARGET_AMD64_)
1111
1112 // amd64 frame pointer case. Like on x86, it is guaranteed that there is a pushed RBP
1113 // value at the top of the frame which resides above all outgoing arguments. Unlike x86,
1114 // the frame pointer generally points to a location that is separated from the pushed RBP
1115 // value by an offset that is recorded in the info header. Recover the address of the
1116 // pushed RBP value by subtracting this offset.
1117 upperBound = pContext->GetFP() - pInfoHeader->GetFramePointerOffset();
1118
1119 #else
1120 #error NYI - For this arch
1121 #endif
1122 }
1123 else
1124 {
1125 // No frame pointer is available. In the absence of a frame pointer, the frame size
1126 // indicates the distance between the post-prolog SP and the preserved registers (if any).
1127 // Adding the frame size to the SP is guaranteed to yield an address above all outgoing
1128 // arguments.
1129 //
1130 // If this frame contains one or more callee-saved register (guaranteed on ARM since at
1131 // least LR is saved in all functions that contain callsites), then the computed address
1132 // will point at the lowest callee-saved register (or possibly above it in the x86 case
1133 // where registers are saved at the bottom of the frame).
1134 //
1135 // If the frame contains no callee-saved registers (impossible on ARM), then the computed
1136 // address will point to the pushed return address.
1137
1138 upperBound = pContext->GetSP() + pInfoHeader->GetFrameSize();
1139
1140 #if defined(_TARGET_ARM_)
1141 ASSERT(pInfoHeader->GetSavedRegs() != 0);
1142 #elif defined(_TARGET_ARM64_)
1143 PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
1144 #endif
1145 }
1146 }
1147
1148 return upperBound;
1149 }
1150
GetFramePointer(GCInfoHeader * pUnwindInfo,REGDISPLAY * pContext)1151 PTR_VOID EECodeManager::GetFramePointer(GCInfoHeader * pUnwindInfo,
1152 REGDISPLAY * pContext)
1153 {
1154 return (pUnwindInfo->HasFramePointer() || pUnwindInfo->IsFunclet())
1155 ? (PTR_VOID)pContext->GetFP()
1156 : NULL;
1157 }
1158
1159 #ifndef DACCESS_COMPILE
1160
GetReturnAddressLocationForHijack(GCInfoHeader * pGCInfoHeader,UInt32 cbMethodCodeSize,PTR_UInt8 pbEpilogTable,UInt32 codeOffset,REGDISPLAY * pContext)1161 PTR_PTR_VOID EECodeManager::GetReturnAddressLocationForHijack(
1162 GCInfoHeader * pGCInfoHeader,
1163 UInt32 cbMethodCodeSize,
1164 PTR_UInt8 pbEpilogTable,
1165 UInt32 codeOffset,
1166 REGDISPLAY * pContext)
1167 {
1168 GCInfoHeader * pHeader = pGCInfoHeader;
1169
1170 // We *could* hijack a reverse-pinvoke method, but it doesn't get us much because we already synchronize
1171 // with the GC on the way back to native code.
1172 if (pHeader->GetReturnKind() == GCInfoHeader::MRK_ReturnsToNative)
1173 return NULL;
1174
1175 if (pHeader->IsFunclet())
1176 return NULL;
1177
1178 if (codeOffset < pHeader->GetPrologSize())
1179 {
1180 // @TODO: NYI -- hijack in prolog
1181 return NULL;
1182 }
1183
1184 #ifdef _ARM_
1185 // We cannot get the return address unless LR has
1186 // be saved in the prolog.
1187 if (!pHeader->IsRegSaved(CSR_MASK_LR))
1188 return NULL;
1189 #elif defined(_ARM64_)
1190 // We can get return address if LR was saved either with FP or on its own:
1191 bool ebpFrame = pHeader->HasFramePointer();
1192 if (!ebpFrame && !pHeader->IsRegSaved(CSR_MASK_LR)) {
1193 return NULL;
1194 }
1195 #endif // _ARM_
1196
1197 void ** ppvResult;
1198
1199 UInt32 epilogOffset = 0;
1200 UInt32 epilogSize = 0;
1201 if (GetEpilogOffset(pGCInfoHeader, cbMethodCodeSize, pbEpilogTable, codeOffset, &epilogOffset, &epilogSize))
1202 {
1203 #ifdef _ARM_
1204 // Disable hijacking from epilogs on ARM until we implement GetReturnAddressLocationFromEpilog.
1205 return NULL;
1206 #elif defined(_ARM64_)
1207 // Disable hijacking from epilogs on ARM64:
1208 return NULL;
1209 #else
1210 ppvResult = GetReturnAddressLocationFromEpilog(pHeader, pContext, epilogOffset, epilogSize);
1211 // Early out if GetReturnAddressLocationFromEpilog indicates a non-hijackable epilog (e.g. exception
1212 // throw epilog or tail call).
1213 if (ppvResult == NULL)
1214 return NULL;
1215 goto Finished;
1216 #endif
1217 }
1218
1219 #ifdef _ARM_
1220 // ARM always sets up R11 as an OS frame chain pointer to enable fast ETW stack walking (except in the
1221 // case where LR is not pushed, but that was handled above). The protocol specifies that the return
1222 // address is pushed at [r11, #4].
1223 ppvResult = (void **)((*pContext->pR11) + sizeof(void *));
1224 goto Finished;
1225 #elif _ARM64_
1226 ppvResult = (void **)(pContext->pLR);
1227 goto Finished;
1228 #else
1229
1230 // We are in the body of the method, so just find the return address using the unwind info.
1231 if (pHeader->HasFramePointer())
1232 {
1233 #ifdef _X86_
1234 if (pHeader->HasDynamicAlignment())
1235 {
1236 // In this case, we have the normal EBP frame pointer, but also an EBX frame pointer. Use the EBX
1237 // one, because the return address associated with that frame pointer is the one we're actually
1238 // going to return to. The other one (next to EBP) is only for EBP-chain-walking.
1239 ppvResult = (void **)((*pContext->pRbx) + sizeof(void *));
1240 goto Finished;
1241 }
1242 #endif
1243
1244 Int32 framePointerOffset = 0;
1245 #ifdef _AMD64_
1246 framePointerOffset = pHeader->GetFramePointerOffset();
1247 #endif
1248 ppvResult = (void **)((*pContext->pRbp) + sizeof(void *) - framePointerOffset);
1249 goto Finished;
1250 }
1251
1252 {
1253 // We do not have a frame pointer, but we are also not in the prolog or epilog
1254
1255 UInt8 * RSP = (UInt8 *)pContext->GetSP();
1256 RSP += pHeader->GetFrameSize();
1257 RSP += pHeader->GetPreservedRegsSaveSize();
1258
1259 // RSP should point to the return address now.
1260 ppvResult = (void**)RSP;
1261 }
1262 goto Finished;
1263 #endif
1264
1265 Finished:
1266 return ppvResult;
1267 }
1268
1269 #endif
1270
GetReturnValueKind(GCInfoHeader * pInfoHeader)1271 GCRefKind EECodeManager::GetReturnValueKind(GCInfoHeader * pInfoHeader)
1272 {
1273 static_assert((GCRefKind)GCInfoHeader::MRK_ReturnsScalar == GCRK_Scalar, "GCInfoHeader::MRK_ReturnsScalar does not match GCRK_Scalar");
1274 static_assert((GCRefKind)GCInfoHeader::MRK_ReturnsObject == GCRK_Object, "GCInfoHeader::MRK_ReturnsObject does not match GCRK_Object");
1275 static_assert((GCRefKind)GCInfoHeader::MRK_ReturnsByref == GCRK_Byref, "GCInfoHeader::MRK_ReturnsByref does not match GCRK_Byref");
1276
1277 GCInfoHeader::MethodReturnKind retKind = pInfoHeader->GetReturnKind();
1278 switch (retKind)
1279 {
1280 case GCInfoHeader::MRK_ReturnsScalar:
1281 case GCInfoHeader::MRK_ReturnsToNative:
1282 return GCRK_Scalar;
1283 case GCInfoHeader::MRK_ReturnsObject:
1284 return GCRK_Object;
1285 case GCInfoHeader::MRK_ReturnsByref:
1286 return GCRK_Byref;
1287 default:
1288 break;
1289 }
1290 UNREACHABLE_MSG("unexpected return kind");
1291 }
1292
GetEpilogOffset(GCInfoHeader * pInfoHeader,UInt32 cbMethodCodeSize,PTR_UInt8 pbEpilogTable,UInt32 codeOffset,UInt32 * epilogOffsetOut,UInt32 * epilogSizeOut)1293 bool EECodeManager::GetEpilogOffset(
1294 GCInfoHeader * pInfoHeader, UInt32 cbMethodCodeSize, PTR_UInt8 pbEpilogTable,
1295 UInt32 codeOffset, UInt32 * epilogOffsetOut, UInt32 * epilogSizeOut)
1296 {
1297 UInt32 epilogStart;
1298
1299 if (pInfoHeader->IsEpilogAtEnd())
1300 {
1301 ASSERT(pInfoHeader->GetEpilogCount() == 1);
1302 UInt32 epilogSize = pInfoHeader->GetFixedEpilogSize();
1303
1304 epilogStart = cbMethodCodeSize - epilogSize;
1305
1306 // If we're at offset 0, it's equivalent to being in the body of the method
1307 if (codeOffset > epilogStart)
1308 {
1309 *epilogOffsetOut = codeOffset - epilogStart;
1310 ASSERT(pInfoHeader->IsValidEpilogOffset(*epilogOffsetOut, epilogSize));
1311 *epilogSizeOut = epilogSize;
1312 return true;
1313 }
1314 return false;
1315 }
1316
1317 epilogStart = 0;
1318 bool hasVaryingEpilogSizes = pInfoHeader->HasVaryingEpilogSizes();
1319 for (UInt32 idx = 0; idx < pInfoHeader->GetEpilogCount(); idx++)
1320 {
1321 epilogStart += VarInt::ReadUnsigned(pbEpilogTable);
1322 UInt32 epilogSize = hasVaryingEpilogSizes ? VarInt::ReadUnsigned(pbEpilogTable) : pInfoHeader->GetFixedEpilogSize();
1323
1324 // If we're at offset 0, it's equivalent to being in the body of the method
1325 if ((epilogStart < codeOffset) && (codeOffset < (epilogStart + epilogSize)))
1326 {
1327 *epilogOffsetOut = codeOffset - epilogStart;
1328 ASSERT(pInfoHeader->IsValidEpilogOffset(*epilogOffsetOut, epilogSize));
1329 *epilogSizeOut = epilogSize;
1330 return true;
1331 }
1332 }
1333 return false;
1334 }
1335
1336 #ifndef DACCESS_COMPILE
1337
GetReturnAddressLocationFromEpilog(GCInfoHeader * pInfoHeader,REGDISPLAY * pContext,UInt32 epilogOffset,UInt32 epilogSize)1338 void ** EECodeManager::GetReturnAddressLocationFromEpilog(GCInfoHeader * pInfoHeader, REGDISPLAY * pContext,
1339 UInt32 epilogOffset, UInt32 epilogSize)
1340 {
1341 UNREFERENCED_PARAMETER(epilogSize);
1342 ASSERT(pInfoHeader->IsValidEpilogOffset(epilogOffset, epilogSize));
1343 UInt8 * pbCurrentIP = (UInt8 *) pContext->GetIP();
1344 UInt8 * pbEpilogStart = pbCurrentIP - epilogOffset;
1345
1346 //ASSERT(VerifyEpilogBytes(pInfoHeader, (Code *)pbEpilogStart));
1347 // We could find the return address of a native-callable method, but it's not very useful at the moment.
1348 ASSERT(pInfoHeader->GetReturnKind() != GCInfoHeader::MRK_ReturnsToNative);
1349 UInt8 * pbEpilog = pbEpilogStart;
1350
1351 #ifdef _X86_
1352
1353 if (pInfoHeader->HasFramePointer())
1354 {
1355 {
1356 // New Project N frames
1357
1358 int frameSize = pInfoHeader->GetFrameSize();
1359 Int32 saveSize = pInfoHeader->GetPreservedRegsSaveSize() - sizeof(void*);
1360 int distance = frameSize + saveSize;
1361
1362 if (saveSize > 0 || (0x8D == *pbEpilog) /* localloc frame */ )
1363 {
1364 // regenerate original sp
1365
1366 // lea esp, [ebp-xxx]
1367 ASSERT_MSG(0x8D == *pbEpilog, "expected lea esp, [ebp-frame size]");
1368
1369 if (distance <= 128)
1370 {
1371 // short format (constant as 8-bit integer
1372 ASSERT_MSG(0x65 == *(pbEpilog + 1), "expected lea esp, [ebp-frame size]");
1373 ASSERT_MSG((UInt8)(-distance) == *(pbEpilog + 2), "expected lea esp, [ebp-frame size]");
1374 pbEpilog += 3;
1375 }
1376 else
1377 {
1378 // long formant (constant as 32-bit integer)
1379 ASSERT_MSG(0xA5 == *(pbEpilog + 1), "expected lea esp, [ebp-frame size]");
1380 ASSERT_MSG(-distance == *(Int32*)(pbEpilog + 2), "expected lea esp, [ebp-frame size]");
1381 pbEpilog += 6;
1382 }
1383
1384 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
1385 if (regMask & CSR_MASK_RBX) pbEpilog++; // pop ebx -- 5B
1386 if (regMask & CSR_MASK_RSI) pbEpilog++; // pop esi -- 5E
1387 if (regMask & CSR_MASK_RDI) pbEpilog++; // pop edi -- 5F
1388 }
1389
1390 if (frameSize > 0)
1391 {
1392 // set esp to to EBP frame chain location
1393 ASSERT_MSG(0x8B == *pbEpilog, "expected 'mov esp, ebp'");
1394 ASSERT_MSG(0xE5 == *(pbEpilog + 1), "expected 'mov esp, ebp'");
1395 pbEpilog += 2;
1396 }
1397
1398 ASSERT_MSG(0x5d == *pbEpilog, "expected 'pop ebp'");
1399
1400 // Just use the EBP frame if we haven't popped it yet
1401 if (pbCurrentIP <= pbEpilog)
1402 return (void **)((*(pContext->pRbp)) + sizeof(void *));
1403
1404 ++pbEpilog; // advance past 'pop ebp'
1405
1406 if (pInfoHeader->HasDynamicAlignment())
1407 {
1408 // For x86 dynamically-aligned frames, we have two frame pointers, like this:
1409 //
1410 // esp -> [main frame]
1411 // ebp -> ebp save
1412 // return address
1413 // [variable-sized alignment allocation]
1414 // ebx -> ebx save
1415 // Return Address
1416 //
1417 // The epilog looks like this, with the corresponding changes to the return address location.
1418 //
1419 // Correct return address location
1420 // --------------------------------
1421 // -------------------------------> ebp + 4 (or ebx + 4)
1422 // lea esp, [ebp-XXX]
1423 // pop esi
1424 // mov esp, ebp
1425 // pop ebp
1426 // -------------------------------> ebx + 4
1427 // mov esp, ebx
1428 // pop ebx
1429 // -------------------------------> esp
1430 // ret
1431
1432 ASSERT_MSG(pInfoHeader->GetParamPointerReg() == RN_EBX, "NYI: non-EBX param pointer");
1433
1434 ASSERT_MSG(0x8B == *pbEpilog, "expected 'mov esp, ebx'");
1435 ASSERT_MSG(0xE3 == *(pbEpilog + 1), "expected 'mov esp, ebx'");
1436
1437 // At this point the return address is at EBX+4, we fall-through to the code below since it's
1438 // the same there as well.
1439
1440 pbEpilog += 2; // advance past 'mov esp, ebx'
1441
1442 ASSERT_MSG(0x5b == *pbEpilog, "expected 'pop ebx'");
1443
1444 // at this point the return address is at EBX+4
1445 if (pbCurrentIP == pbEpilog)
1446 return (void **)((*(pContext->pRbx)) + sizeof(void *));
1447
1448 ++pbEpilog; // advance past 'pop ebx'
1449 }
1450
1451 // EBP has been popped, dynamic alignment has been undone, so ESP points at the return address
1452 return (void **)(pContext->SP);
1453 }
1454 }
1455 else
1456 {
1457 ASSERT_MSG(!pInfoHeader->HasStackChanges(), "NYI -- dynamic push/pop");
1458
1459 UIntNative RSP = pContext->SP;
1460
1461 int frameSize = pInfoHeader->GetFrameSize();
1462
1463 if (pbCurrentIP <= pbEpilog)
1464 RSP += frameSize;
1465
1466 if (frameSize == sizeof(void*))
1467 pbEpilog++; // 0x59, pop ecx
1468 else if ((Int8)frameSize == frameSize)
1469 pbEpilog += 3; // add esp, imm8 -- 83 c4 BYTE(frameSize)
1470 else
1471 pbEpilog += 6; // add esp, imm32 -- 81 c4 DWORD(frameSize)
1472
1473 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
1474
1475 ASSERT_MSG(!(regMask & CSR_MASK_RBP),
1476 "We only expect RBP to be used as the frame pointer, never as a free preserved reg");
1477
1478 if (regMask & CSR_MASK_RBX)
1479 {
1480 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1481 pbEpilog += 1; // pop ebx -- 5B
1482 }
1483
1484 if (regMask & CSR_MASK_RSI)
1485 {
1486 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1487 pbEpilog += 1; // pop esi -- 5E
1488 }
1489
1490 if (regMask & CSR_MASK_RDI)
1491 {
1492 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1493 pbEpilog += 1; // pop edi -- 5F
1494 }
1495
1496 return (void **)(RSP);
1497 }
1498
1499 #elif defined(_AMD64_)
1500
1501 int frameSize = pInfoHeader->GetFrameSize();
1502 if (pInfoHeader->HasFramePointer())
1503 {
1504 bool isNewStyleFP = pInfoHeader->IsFramePointerOffsetFromSP();
1505 int preservedRegSize = pInfoHeader->GetPreservedRegsSaveSize();
1506
1507 int encodedFPOffset = isNewStyleFP ? frameSize - pInfoHeader->GetFramePointerOffsetFromSP()
1508 : -preservedRegSize + sizeof(void*);
1509
1510 // 'lea rsp, [rbp + offset]' // 48 8d 65 xx
1511 // 48 8d a5 xx xx xx xx
1512 if ((encodedFPOffset > 127) || (encodedFPOffset < -128))
1513 pbEpilog += 7;
1514 else
1515 pbEpilog += 4;
1516
1517 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
1518
1519 if (regMask & CSR_MASK_R15) pbEpilog += 2; // pop r15 -- 41 5F
1520 if (regMask & CSR_MASK_R14) pbEpilog += 2; // pop r14 -- 41 5E
1521 if (regMask & CSR_MASK_R13) pbEpilog += 2; // pop r13 -- 41 5D
1522 if (regMask & CSR_MASK_R12) pbEpilog += 2; // pop r12 -- 41 5C
1523 if (regMask & CSR_MASK_RDI) pbEpilog++; // pop rdi -- 5F
1524 if (regMask & CSR_MASK_RSI) pbEpilog++; // pop rsi -- 5E
1525 if (regMask & CSR_MASK_RBX) pbEpilog++; // pop rbx -- 5B
1526
1527 ASSERT_MSG(0x5d == *pbEpilog, "expected pop ebp");
1528
1529 // If RBP hasn't been popped yet, we can calculate the return address location from RBP.
1530 if (pbCurrentIP <= pbEpilog)
1531 return (void **)(*(pContext->pRbp) + encodedFPOffset + preservedRegSize);
1532
1533 // EBP has been popped, so RSP points at the return address
1534 return (void **) (pContext->SP);
1535 }
1536 else
1537 {
1538 UIntNative RSP = pContext->SP;
1539
1540 if (frameSize)
1541 {
1542 if (pbCurrentIP <= pbEpilog)
1543 RSP += frameSize;
1544
1545 if (frameSize < 128)
1546 {
1547 // 'add rsp, frameSize' // 48 83 c4 xx
1548 pbEpilog += 4;
1549 }
1550 else
1551 {
1552 // 'add rsp, frameSize' // 48 81 c4 xx xx xx xx
1553 pbEpilog += 7;
1554 }
1555 }
1556
1557 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
1558
1559 ASSERT_MSG(!(regMask & CSR_MASK_RBP),
1560 "We only expect RBP to be used as the frame pointer, never as a free preserved reg");
1561
1562 if (regMask & CSR_MASK_R15)
1563 {
1564 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1565 pbEpilog += 2; // pop r15 -- 41 5F
1566 }
1567
1568 if (regMask & CSR_MASK_R14)
1569 {
1570 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1571 pbEpilog += 2; // pop r14 -- 41 5E
1572 }
1573
1574 if (regMask & CSR_MASK_R13)
1575 {
1576 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1577 pbEpilog += 2; // pop r13 -- 41 5D
1578 }
1579
1580 if (regMask & CSR_MASK_R12)
1581 {
1582 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1583 pbEpilog += 2; // pop r12 -- 41 5C
1584 }
1585
1586 if (regMask & CSR_MASK_RDI)
1587 {
1588 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1589 pbEpilog += 1; // pop rdi -- 5F
1590 }
1591
1592 if (regMask & CSR_MASK_RSI)
1593 {
1594 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1595 pbEpilog += 1; // pop rsi -- 5E
1596 }
1597
1598 if (regMask & CSR_MASK_RBX)
1599 {
1600 if (pbCurrentIP <= pbEpilog) { RSP += sizeof(void*); }
1601 pbEpilog += 1; // pop rbx -- 5B
1602 }
1603
1604 return (void **) (RSP);
1605 }
1606
1607 #elif defined(_ARM_)
1608
1609 UInt16 * pwEpilog = (UInt16*)pbEpilog;
1610
1611 if (pwEpilog[0] == 0x46bd)
1612 {
1613 // mov sp, fp
1614 ASSERT(pInfoHeader->HasFramePointer());
1615 pwEpilog++;
1616 }
1617
1618 if (pInfoHeader->HasFramePointer() || pInfoHeader->GetFrameSize() > 0)
1619 {
1620 if ((pwEpilog[0] & 0xff80) == 0xb000)
1621 {
1622 // add sp, sp, #frameSize
1623 pwEpilog++;
1624 }
1625 else if (((pwEpilog[0] & 0xfbf0) == 0xf200) && ((pwEpilog[1] & 0x8f00) == 0x0d00))
1626 {
1627 // add sp, reg, #imm12
1628 pwEpilog += 2;
1629 }
1630 else if (((pwEpilog[0] & 0xfbf0) == 0xf240) && ((pwEpilog[1] & 0x8f00) == 0x0c00))
1631 {
1632 // movw r12, #imm16
1633 pwEpilog += 2;
1634
1635 if (((pwEpilog[0] & 0xfbf0) == 0xf2c0) && ((pwEpilog[1] & 0x8f00) == 0x0c00))
1636 {
1637 // movt r12, #imm16
1638 pwEpilog += 2;
1639 }
1640
1641 // add sp, sp, r12
1642 ASSERT((pwEpilog[0] == 0xeb0d) && (pwEpilog[1] == 0x0d0c));
1643 pwEpilog += 2;
1644 }
1645 }
1646
1647 // vpop {...}
1648 while (((pwEpilog[0] & ~(1<<6)) == 0xecbd) && ((pwEpilog[1] & 0x0f01) == 0x0b00))
1649 pwEpilog += 2;
1650
1651 // pop {...}
1652 UInt16 wPopRegs = 0;
1653 if ((pwEpilog[0] & 0xfe00) == 0xbc00)
1654 {
1655 // 16-bit pop.
1656 wPopRegs = pwEpilog[0] & 0xff;
1657 if ((pwEpilog[0] & 0x100) != 0)
1658 wPopRegs |= 1<<15;
1659 pwEpilog++;
1660 }
1661 else if (pwEpilog[0] == 0xe8bd)
1662 {
1663 // 32-bit pop.
1664 wPopRegs = pwEpilog[1];
1665 pwEpilog += 2;
1666 }
1667 else if ((pwEpilog[0] == 0xf85d) && ((pwEpilog[1] & 0x0fff) == 0xb04))
1668 {
1669 // Single register pop.
1670 int reg = pwEpilog[1] >> 12;
1671 wPopRegs |= 1 << reg;
1672 pwEpilog += 2;
1673 }
1674
1675 if (wPopRegs & (1 << 11))
1676 {
1677 // Popped r11 (the OS frame chain pointer). If we pushed this then we were required to push lr
1678 // immediately under it. (Can't directly assert that LR is popped since there are several ways we
1679 // might do this).
1680 if (pbCurrentIP < (UInt8*)pwEpilog)
1681 {
1682 // Executing in epilog prior to pop, so the return address is at [r11, #4].
1683 return (void**)((*pContext->pR11) + 4);
1684 }
1685 }
1686 else
1687 {
1688 // We didn't push r11 so therefore we didn't push lr (the invariant is that both or neither are
1689 // pushed). So it doesn't matter where in the epilog we're executing, the return address has always
1690 // been in lr.
1691 return (void**)pContext->pLR;
1692 }
1693
1694 if (wPopRegs & (1 << 15))
1695 {
1696 // Popped pc. This is a direct result of pushing lr and we only ever push lr if and only if we're also
1697 // pushing r11 to form an OS frame chain. If we didn't return above that means we somehow popped r11
1698 // and lr into pc and somehow landed up at the next instruction (i.e. past the end of the epilog). So
1699 // this case is an error.
1700 ASSERT_UNCONDITIONALLY("Walked off end of epilog");
1701 return NULL;
1702 }
1703
1704 if ((pwEpilog[0] == 0xf85d) && ((pwEpilog[1] & 0xff00) == 0xfb00))
1705 {
1706 // ldr pc, [sp], #imm8
1707 // Case where lr was pushed but we couldn't pop it with the other registers because we had some
1708 // additional stack to clean up (homed argument registers). Return address is at the top of the stack
1709 // in this case.
1710 return (void**)pContext->SP;
1711 }
1712
1713 if ((pwEpilog[0] & 0xff80) == 0xb000)
1714 {
1715 // add sp, sp, #imm7
1716 // Case where we have stack cleanup (homed argument registers) but we need to return via a branch for
1717 // some reason (such as tail calls).
1718 pwEpilog++;
1719 }
1720
1721 if ((pwEpilog[0] & 0xff87) == 0x4700)
1722 {
1723 // bx <reg>
1724 // Branch via register. This is a simple return if <reg> is lr, otherwise assume it's an EH throw and
1725 // return NULL to indicate do not hijack.
1726 if (((pwEpilog[0] & 0x0078) >> 3) == 14)
1727 return (void**)pContext->pLR;
1728 return NULL;
1729 }
1730
1731 if (((pwEpilog[0] & 0xf800) == 0xf000) && ((pwEpilog[1] & 0xd000) == 0x9000))
1732 {
1733 // b <imm>
1734 // Direct branch. Looks like a tail call. These aren't hijackable (without writing the instruction
1735 // stream) so return NULL to indicate do not hijack here.
1736 return NULL;
1737 }
1738
1739 // Shouldn't be any other instructions in the epilog.
1740 UNREACHABLE_MSG("Unknown epilog instruction");
1741 return NULL;
1742
1743 #elif defined(_ARM64_)
1744 UNREFERENCED_PARAMETER(pInfoHeader);
1745 UNREFERENCED_PARAMETER(pbEpilog);
1746 PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
1747
1748 #endif
1749 }
1750
1751 #ifdef _DEBUG
1752
FindNextEpilog(GCInfoHeader * pInfoHeader,UInt32 methodSize,PTR_UInt8 pbEpilogTable,Int32 * pEpilogStartOffsetInOut,UInt32 * pEpilogSizeOut)1753 bool EECodeManager::FindNextEpilog(GCInfoHeader * pInfoHeader, UInt32 methodSize, PTR_UInt8 pbEpilogTable,
1754 Int32 * pEpilogStartOffsetInOut, UInt32 * pEpilogSizeOut)
1755 {
1756 Int32 startOffset = *pEpilogStartOffsetInOut;
1757 Int32 thisOffset = 0;
1758
1759 if (pInfoHeader->IsEpilogAtEnd())
1760 {
1761 ASSERT(pInfoHeader->GetEpilogCount() == 1);
1762 UInt32 epilogSize = pInfoHeader->GetFixedEpilogSize();
1763 thisOffset = methodSize - epilogSize;
1764 *pEpilogStartOffsetInOut = thisOffset;
1765 *pEpilogSizeOut = epilogSize;
1766 return (thisOffset > startOffset);
1767 }
1768
1769 bool hasVaryingEpilogSizes = pInfoHeader->HasVaryingEpilogSizes();
1770 for (UInt32 idx = 0; idx < pInfoHeader->GetEpilogCount(); idx++)
1771 {
1772 thisOffset += VarInt::ReadUnsigned(pbEpilogTable);
1773 UInt32 epilogSize = hasVaryingEpilogSizes ? VarInt::ReadUnsigned(pbEpilogTable) : pInfoHeader->GetFixedEpilogSize();
1774 if (thisOffset > startOffset)
1775 {
1776 *pEpilogStartOffsetInOut = thisOffset;
1777 *pEpilogSizeOut = epilogSize;
1778 return true;
1779 }
1780 }
1781
1782 return false;
1783 }
1784
1785 #ifdef _ARM_
1786 #define IS_FRAMELESS() ((pInfoHeader->GetSavedRegs() & CSR_MASK_LR) == 0)
1787 #elif defined(_ARM64_)
IsFramelessArm64(void)1788 inline bool IsFramelessArm64(void)
1789 {
1790 PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
1791 }
1792 #define IS_FRAMELESS() (IsFramelessArm64())
1793 #else
1794 #define IS_FRAMELESS() (!pInfoHeader->HasFramePointer())
1795 #endif
1796
CheckHijackInEpilog(GCInfoHeader * pInfoHeader,Code * pEpilog,Code * pEpilogStart,UInt32 epilogSize)1797 void CheckHijackInEpilog(GCInfoHeader * pInfoHeader, Code * pEpilog, Code * pEpilogStart, UInt32 epilogSize)
1798 {
1799 ASSERT(pInfoHeader->GetReturnKind() != GCInfoHeader::MRK_ReturnsToNative);
1800 if (IS_FRAMELESS())
1801 return;
1802
1803 UIntNative SUCCESS_VAL = 0x22222200;
1804 UIntNative RSP_TEST_VAL = SUCCESS_VAL;
1805 UIntNative RBP_TEST_VAL = (RSP_TEST_VAL - sizeof(void *));
1806
1807 REGDISPLAY context;
1808 #if defined(_X86_)
1809 context.pRbx = &RBP_TEST_VAL;
1810 context.pRbp = &RBP_TEST_VAL;
1811 context.SP = RSP_TEST_VAL;
1812 #elif defined(_AMD64_)
1813
1814 int frameSize = pInfoHeader->GetFrameSize();
1815 bool isNewStyleFP = pInfoHeader->IsFramePointerOffsetFromSP();
1816 int preservedRegSize = pInfoHeader->GetPreservedRegsSaveSize();
1817
1818 int encodedFPOffset = isNewStyleFP ? frameSize - pInfoHeader->GetFramePointerOffsetFromSP()
1819 : -preservedRegSize + sizeof(void*);
1820
1821 RBP_TEST_VAL = SUCCESS_VAL - encodedFPOffset - preservedRegSize;
1822
1823 context.pRbp = &RBP_TEST_VAL;
1824 context.SP = RSP_TEST_VAL;
1825 #elif defined(_ARM_)
1826 context.pR11 = &RBP_TEST_VAL;
1827 context.SP = RSP_TEST_VAL;
1828 #elif defined(_ARM64_)
1829 UNREFERENCED_PARAMETER(RBP_TEST_VAL);
1830 PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
1831 #endif
1832
1833 context.SetIP((PCODE)pEpilog);
1834
1835 void ** result = EECodeManager::GetReturnAddressLocationFromEpilog(pInfoHeader, &context,
1836 (UInt32)((Code*)pEpilog - pEpilogStart), epilogSize);
1837
1838 ASSERT(SUCCESS_VAL == (UIntNative)result || NULL == result);
1839 }
1840
1841 #define CHECK_HIJACK_IN_EPILOG() CheckHijackInEpilog(pInfoHeader, (Code *)pEpilog, (Code *)pEpilogStart, epilogSize)
1842
1843 #define VERIFY_FAILURE() \
1844 { \
1845 ASSERT_UNCONDITIONALLY("VERIFY_FAILURE"); \
1846 return false; \
1847 } \
1848
1849 #ifdef _X86_
VerifyEpilogBytesX86(GCInfoHeader * pInfoHeader,Code * pEpilogStart,UInt32 epilogSize)1850 bool VerifyEpilogBytesX86(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt32 epilogSize)
1851 {
1852 Code * pEpilog = pEpilogStart;
1853
1854 // NativeCallable methods aren't return-address-hijacked, so we don't care about the epilog format.
1855 bool returnsToNative = (pInfoHeader->GetReturnKind() == GCInfoHeader::MRK_ReturnsToNative);
1856 if (returnsToNative)
1857 return true;
1858
1859 if (pInfoHeader->HasFramePointer())
1860 {
1861 {
1862 // ProjectN frames
1863
1864 CHECK_HIJACK_IN_EPILOG();
1865
1866 int frameSize = pInfoHeader->GetFrameSize();
1867 Int32 saveSize = pInfoHeader->GetPreservedRegsSaveSize() - sizeof(void*); // don't count EBP
1868 int distance = frameSize + saveSize;
1869
1870 if (saveSize > 0 || (*pEpilog==0x8d) /* localloc frame */ )
1871 {
1872 // lea esp, [ebp-xxx]
1873
1874 if (*pEpilog++ != 0x8d)
1875 VERIFY_FAILURE();
1876
1877 if (distance <= 128)
1878 {
1879 if (*pEpilog++ != 0x65)
1880 VERIFY_FAILURE();
1881 if (*pEpilog++ != ((UInt8)-distance))
1882 VERIFY_FAILURE();
1883 }
1884 else
1885 {
1886 if (*pEpilog++ != 0xa5)
1887 VERIFY_FAILURE();
1888 if (*((Int32*&)pEpilog)++ != -distance)
1889 VERIFY_FAILURE();
1890 }
1891
1892 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
1893
1894 CHECK_HIJACK_IN_EPILOG();
1895 if (regMask & CSR_MASK_RBX)
1896 if (*pEpilog++ != 0x5b) // pop ebx
1897 VERIFY_FAILURE();
1898
1899 CHECK_HIJACK_IN_EPILOG();
1900 if (regMask & CSR_MASK_RSI)
1901 if (*pEpilog++ != 0x5e) // pop esi
1902 VERIFY_FAILURE();
1903
1904 CHECK_HIJACK_IN_EPILOG();
1905 if (regMask & CSR_MASK_RDI)
1906 if (*pEpilog++ != 0x5f) // pop edi
1907 VERIFY_FAILURE();
1908 }
1909
1910 // Reset ESP if necessary
1911 if (frameSize > 0)
1912 {
1913 // 'mov esp, ebp'
1914 CHECK_HIJACK_IN_EPILOG();
1915 if (*pEpilog++ != 0x8b)
1916 VERIFY_FAILURE();
1917 if (*pEpilog++ != 0xE5)
1918 VERIFY_FAILURE();
1919 }
1920
1921 // pop ebp
1922 CHECK_HIJACK_IN_EPILOG();
1923 if (*pEpilog++ != 0x5d)
1924 VERIFY_FAILURE();
1925
1926 if (pInfoHeader->HasDynamicAlignment())
1927 {
1928 ASSERT_MSG(pInfoHeader->GetParamPointerReg() == RN_EBX, "Expecting EBX as param pointer reg");
1929 ASSERT_MSG(!(pInfoHeader->GetSavedRegs() & CSR_MASK_RBX), "Not expecting param pointer reg to be saved explicitly");
1930
1931 // expect 'mov esp, ebx'
1932 CHECK_HIJACK_IN_EPILOG();
1933 if (*pEpilog++ != 0x8b || *pEpilog++ != 0xE3)
1934 {
1935 VERIFY_FAILURE();
1936 }
1937
1938 // pop ebx
1939 CHECK_HIJACK_IN_EPILOG();
1940 if (*pEpilog++ != 0x5b)
1941 VERIFY_FAILURE();
1942 }
1943 }
1944 }
1945 else
1946 {
1947 CHECK_HIJACK_IN_EPILOG();
1948 int frameSize = pInfoHeader->GetFrameSize();
1949 if (frameSize == 0)
1950 {
1951 }
1952 else if (frameSize == sizeof(void*))
1953 {
1954 if (*pEpilog++ != 0x59) // pop ecx
1955 VERIFY_FAILURE();
1956 }
1957 else if ((Int8)frameSize == frameSize)
1958 {
1959 // add esp, imm8
1960 if (*pEpilog++ != 0x83)
1961 VERIFY_FAILURE();
1962 if (*pEpilog++ != 0xc4)
1963 VERIFY_FAILURE();
1964 if (*pEpilog++ != frameSize)
1965 VERIFY_FAILURE();
1966 }
1967 else
1968 {
1969 // add esp, imm32
1970 if (*pEpilog++ != 0x81)
1971 VERIFY_FAILURE();
1972 if (*pEpilog++ != 0xc4)
1973 VERIFY_FAILURE();
1974 if ((*((Int32*)pEpilog))++ != frameSize)
1975 VERIFY_FAILURE();
1976 }
1977
1978 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
1979
1980 #if 1
1981 ASSERT_MSG(!(pInfoHeader->GetSavedRegs() & CSR_MASK_RBP),
1982 "We only expect RBP to be used as the frame pointer, never as a free preserved reg");
1983 #else
1984 CHECK_HIJACK_IN_EPILOG();
1985 if (regMask & CSR_MASK_RBP)
1986 if (*pEpilog++ != 0x5d) // pop ebp
1987 VERIFY_FAILURE();
1988 #endif
1989
1990 CHECK_HIJACK_IN_EPILOG();
1991 if (regMask & CSR_MASK_RBX)
1992 if (*pEpilog++ != 0x5b) // pop ebx
1993 VERIFY_FAILURE();
1994
1995 CHECK_HIJACK_IN_EPILOG();
1996 if (regMask & CSR_MASK_RSI)
1997 if (*pEpilog++ != 0x5e) // pop esi
1998 VERIFY_FAILURE();
1999
2000 CHECK_HIJACK_IN_EPILOG();
2001 if (regMask & CSR_MASK_RDI)
2002 if (*pEpilog++ != 0x5f) // pop edi
2003 VERIFY_FAILURE();
2004 }
2005
2006 CHECK_HIJACK_IN_EPILOG();
2007
2008 // Note: the last instruction of the epilog may be one of many possibilities: ret, rep ret, jmp offset,
2009 // or jmp [offset]. Each is a different size, but still just one instruction, which is just fine.
2010 // Therefore, from here down, pEpilog may be beyond "epilog start + size".
2011
2012 if (*pEpilog == 0xE9)
2013 {
2014 pEpilog += 5; // jmp offset (tail call direct)
2015 }
2016 else if (*pEpilog == 0xFF)
2017 {
2018 pEpilog += 6; // jmp [offset] (tail call indirect)
2019 }
2020 else
2021 {
2022 if (*pEpilog == 0xf3) // optional: rep prefix
2023 pEpilog++;
2024
2025 UInt32 retPopSize = pInfoHeader->GetReturnPopSize();
2026 if (retPopSize == 0)
2027 {
2028 if (*pEpilog++ != 0xC3) // ret
2029 VERIFY_FAILURE();
2030 }
2031 else
2032 {
2033 if (*pEpilog++ != 0xC2) // ret NNNN
2034 VERIFY_FAILURE();
2035 if (*((UInt16 *)pEpilog) != retPopSize)
2036 VERIFY_FAILURE();
2037 pEpilog += 2;
2038 }
2039 }
2040
2041 return true;
2042 }
2043 #endif // _X86_
2044 #ifdef _AMD64_
VerifyEpilogBytesAMD64(GCInfoHeader * pInfoHeader,Code * pEpilogStart,UInt32 epilogSize)2045 bool VerifyEpilogBytesAMD64(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt32 epilogSize)
2046 {
2047 Code * pEpilog = pEpilogStart;
2048
2049 // NativeCallable methods aren't return-address-hijacked, so we don't care about the epilog format.
2050 bool returnsToNative = (pInfoHeader->GetReturnKind() == GCInfoHeader::MRK_ReturnsToNative);
2051 if (returnsToNative)
2052 return true;
2053
2054 CHECK_HIJACK_IN_EPILOG();
2055
2056 bool ebpFrame = pInfoHeader->HasFramePointer();
2057 int frameSize = pInfoHeader->GetFrameSize();
2058 if (ebpFrame)
2059 {
2060 ASSERT(RN_EBP == pInfoHeader->GetFramePointerReg());
2061
2062 bool isNewStyleFP = pInfoHeader->IsFramePointerOffsetFromSP();
2063 int preservedRegSize = pInfoHeader->GetPreservedRegsSaveSize();
2064
2065 Int32 offset = isNewStyleFP ? frameSize - pInfoHeader->GetFramePointerOffsetFromSP()
2066 : -preservedRegSize + sizeof(void*);
2067
2068 // 'lea rsp, [rbp - offset]'
2069 if (*pEpilog++ != 0x48)
2070 VERIFY_FAILURE();
2071 if (*pEpilog++ != 0x8d)
2072 VERIFY_FAILURE();
2073
2074 if ((offset > 127) || (offset < -128))
2075 {
2076 if (*pEpilog++ != 0xA5)
2077 VERIFY_FAILURE();
2078 if (*((Int32*&)pEpilog)++ != offset)
2079 VERIFY_FAILURE();
2080 }
2081 else
2082 {
2083 if (*pEpilog++ != 0x65)
2084 VERIFY_FAILURE();
2085 if (((Int8)*pEpilog++) != offset)
2086 VERIFY_FAILURE();
2087 }
2088 }
2089 else if (frameSize)
2090 {
2091 if (frameSize < 128)
2092 {
2093 // 'add rsp, frameSize' // 48 83 c4 xx
2094 if (*pEpilog++ != 0x48)
2095 VERIFY_FAILURE();
2096 if (*pEpilog++ != 0x83)
2097 VERIFY_FAILURE();
2098 if (*pEpilog++ != 0xc4)
2099 VERIFY_FAILURE();
2100 if (*pEpilog++ != ((UInt8)frameSize))
2101 VERIFY_FAILURE();
2102 }
2103 else
2104 {
2105 // 'add rsp, frameSize' // 48 81 c4 xx xx xx xx
2106 if (*pEpilog++ != 0x48)
2107 VERIFY_FAILURE();
2108 if (*pEpilog++ != 0x81)
2109 VERIFY_FAILURE();
2110 if (*pEpilog++ != 0xc4)
2111 VERIFY_FAILURE();
2112 if (*((Int32*&)pEpilog)++ != frameSize)
2113 VERIFY_FAILURE();
2114 }
2115 }
2116
2117 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
2118
2119 CHECK_HIJACK_IN_EPILOG();
2120 if (regMask & CSR_MASK_R15)
2121 {
2122 // pop r15
2123 if (*pEpilog++ != 0x41)
2124 VERIFY_FAILURE();
2125 if (*pEpilog++ != 0x5f)
2126 VERIFY_FAILURE();
2127 }
2128
2129 CHECK_HIJACK_IN_EPILOG();
2130 if (regMask & CSR_MASK_R14)
2131 {
2132 // pop r14
2133 if (*pEpilog++ != 0x41)
2134 VERIFY_FAILURE();
2135 if (*pEpilog++ != 0x5e)
2136 VERIFY_FAILURE();
2137 }
2138
2139 CHECK_HIJACK_IN_EPILOG();
2140 if (regMask & CSR_MASK_R13)
2141 {
2142 // pop r13
2143 if (*pEpilog++ != 0x41)
2144 VERIFY_FAILURE();
2145 if (*pEpilog++ != 0x5d)
2146 VERIFY_FAILURE();
2147 }
2148
2149 CHECK_HIJACK_IN_EPILOG();
2150 if (regMask & CSR_MASK_R12)
2151 {
2152 // pop r12
2153 if (*pEpilog++ != 0x41)
2154 VERIFY_FAILURE();
2155 if (*pEpilog++ != 0x5c)
2156 VERIFY_FAILURE();
2157 }
2158
2159 CHECK_HIJACK_IN_EPILOG();
2160 if (regMask & CSR_MASK_RDI)
2161 if (*pEpilog++ != 0x5f) // pop rdi
2162 VERIFY_FAILURE();
2163
2164 CHECK_HIJACK_IN_EPILOG();
2165 if (regMask & CSR_MASK_RSI)
2166 if (*pEpilog++ != 0x5e) // pop rsi
2167 VERIFY_FAILURE();
2168
2169 CHECK_HIJACK_IN_EPILOG();
2170 if (regMask & CSR_MASK_RBX)
2171 if (*pEpilog++ != 0x5b) // pop rbx
2172 VERIFY_FAILURE();
2173
2174 if (ebpFrame)
2175 {
2176 CHECK_HIJACK_IN_EPILOG();
2177 if (*pEpilog++ != 0x5d) // pop rbp
2178 VERIFY_FAILURE();
2179 }
2180
2181 CHECK_HIJACK_IN_EPILOG();
2182
2183 // Note: the last instruction of the epilog may be one of many possibilities: ret, rep ret, rex jmp rax.
2184 // Each is a different size, but still just one instruction, which is just fine. Therefore, from here
2185 // down, pEpilog may be beyond "epilog start + size".
2186
2187 if (*pEpilog == 0x48)
2188 {
2189 // rex jmp rax (tail call)
2190 pEpilog++;
2191
2192 if (*pEpilog++ != 0xff)
2193 VERIFY_FAILURE();
2194 if (*pEpilog++ != 0xe0)
2195 VERIFY_FAILURE();
2196 }
2197 else
2198 {
2199 // rep (OPTIONAL)
2200 if (*pEpilog == 0xf3)
2201 pEpilog++;
2202 // ret
2203 if (*pEpilog++ != 0xc3)
2204 VERIFY_FAILURE();
2205 }
2206
2207 return true;
2208 }
2209 #endif // _AMD64_
2210
2211 #ifdef _ARM_
VerifyEpilogBytesARM(GCInfoHeader * pInfoHeader,Code * pEpilogStart,UInt32 epilogSize)2212 bool VerifyEpilogBytesARM(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt32 epilogSize)
2213 {
2214 if (((size_t)pEpilogStart) & 1)
2215 pEpilogStart--;
2216
2217 UInt16 * pEpilog = (UInt16 *)pEpilogStart;
2218
2219 // NativeCallable methods aren't return-address-hijacked, so we don't care about the epilog format.
2220 bool returnsToNative = (pInfoHeader->GetReturnKind() == GCInfoHeader::MRK_ReturnsToNative);
2221 if (returnsToNative)
2222 return true;
2223
2224 CHECK_HIJACK_IN_EPILOG();
2225
2226 int stackPopSize = 0;
2227 bool r7Cleanup = false;
2228
2229 int frameSize = pInfoHeader->GetFrameSize();
2230 bool r7Frame = pInfoHeader->HasFramePointer();
2231
2232 if (pEpilog[0] == 0x46bd)
2233 {
2234 // 'mov sp,fp'
2235 if (!r7Frame)
2236 VERIFY_FAILURE();
2237 r7Cleanup = true;
2238 pEpilog++;
2239 }
2240
2241 CHECK_HIJACK_IN_EPILOG();
2242
2243 if (frameSize > 0 || r7Frame)
2244 {
2245 if ((pEpilog[0] & 0xff80) == 0xb000)
2246 {
2247 // 'add sp, sp, #frameSize' // b0xx
2248 stackPopSize = (*pEpilog & 0x7f) << 2;
2249 pEpilog++;
2250 }
2251 else if ((pEpilog[0] & 0xfbf0) == 0xf200 && (pEpilog[1] & 0x8f00) == 0x0d00)
2252 {
2253 // 'add sp,reg,#imm12
2254 int reg = pEpilog[0] & 0x000f;
2255 if (reg == 0xd)
2256 ;
2257 else if (reg == 0x7 && r7Frame)
2258 r7Cleanup = true;
2259 else
2260 VERIFY_FAILURE();
2261 stackPopSize = (((pEpilog[0] >> 10) & 0x1) << 11) + (((pEpilog[1] >> 12) & 0x07) << 8) + (pEpilog[1] & 0xff);
2262 pEpilog += 2;
2263 }
2264 else if ((pEpilog[0] & 0xfbf0) == 0xf240 && (pEpilog[1] & 0x8f00) == 0x0c00)
2265 {
2266 // movw r12,imm16
2267 stackPopSize = ((pEpilog[0] & 0xf) << 12) + (((pEpilog[0] >> 10) & 0x1) << 11) + (((pEpilog[1] >> 12) & 0x07) << 8) + (pEpilog[1] & 0xff);
2268 pEpilog += 2;
2269
2270 // movt present as well?
2271 if ((pEpilog[0] & 0xfbf0) == 0xf2c0 && (pEpilog[1] & 0x8f00) == 0x0c00)
2272 {
2273 int highWord = ((pEpilog[0] & 0xf) << 12) + (((pEpilog[0] >> 10) & 0x1) << 11) + (((pEpilog[1] >> 12) & 0x07) << 8) + (pEpilog[1] & 0xff);
2274 stackPopSize += highWord << 16;
2275 pEpilog += 2;
2276 }
2277
2278 // expect add sp,sp,r12
2279 if (pEpilog[0] != 0xeb0d || pEpilog[1] != 0x0d0c)
2280 VERIFY_FAILURE();
2281 pEpilog += 2;
2282 }
2283 }
2284
2285 CHECK_HIJACK_IN_EPILOG();
2286
2287 // check for vpop instructions to match what's in the info hdr
2288 Int32 vfpRegFirstPushedExpected = pInfoHeader->GetVfpRegFirstPushed();
2289 Int32 vfpRegPushedCountExpected = pInfoHeader->GetVfpRegPushedCount();
2290 while ((pEpilog[0] & ~(1<<6)) == 0xecbd && (pEpilog[1] & 0x0f01) == 0x0b00)
2291 {
2292 Int32 vfpRegFirstPushedActual = (((pEpilog[0] >> 6) & 1) << 4) | (pEpilog[1] >> 12);
2293 Int32 vfpRegPushedCountActual = (pEpilog[1] & 0xff) >> 1;
2294 if (vfpRegFirstPushedExpected == 0 && vfpRegPushedCountExpected == 0)
2295 {
2296 VERIFY_FAILURE();
2297 }
2298 else
2299 {
2300 if (vfpRegFirstPushedActual != vfpRegFirstPushedExpected || vfpRegPushedCountActual > vfpRegPushedCountExpected)
2301 VERIFY_FAILURE();
2302
2303 // if we are still here, there are more than 16 registers to pop, so we expect another vpop
2304 // adjust the "expected" variables accordingly
2305 vfpRegFirstPushedExpected += vfpRegPushedCountActual;
2306 vfpRegPushedCountExpected -= vfpRegPushedCountActual;
2307 }
2308
2309 pEpilog += 2;
2310
2311 CHECK_HIJACK_IN_EPILOG();
2312 }
2313 if (vfpRegPushedCountExpected != 0)
2314 VERIFY_FAILURE();
2315
2316 CalleeSavedRegMask regMask = pInfoHeader->GetSavedRegs();
2317
2318 // figure out what set of registers should be popped
2319 int shouldPopRegMask = 0;
2320 if (regMask & CSR_MASK_R4)
2321 shouldPopRegMask |= 1<<4;
2322 if (regMask & CSR_MASK_R5)
2323 shouldPopRegMask |= 1<<5;
2324 if (regMask & CSR_MASK_R6)
2325 shouldPopRegMask |= 1<<6;
2326 if (regMask & CSR_MASK_R7)
2327 shouldPopRegMask |= 1<<7;
2328 if (regMask & CSR_MASK_R8)
2329 shouldPopRegMask |= 1<<8;
2330 if (regMask & CSR_MASK_R9)
2331 shouldPopRegMask |= 1<<9;
2332 if (regMask & CSR_MASK_R10)
2333 shouldPopRegMask |= 1<<10;
2334 if (regMask & CSR_MASK_R11)
2335 shouldPopRegMask |= 1<<11;
2336 if (regMask & CSR_MASK_LR)
2337 shouldPopRegMask |= 1<<15;
2338
2339 // figure out what set of registers is actually popped
2340 int actuallyPopRegMask = 0;
2341 if ((pEpilog[0] & 0xfe00) == 0xbc00)
2342 {
2343 actuallyPopRegMask = pEpilog[0] & 0xff;
2344 if ((pEpilog[0] & 0x100) != 0)
2345 actuallyPopRegMask |= 1<<15;
2346 pEpilog++;
2347 }
2348 else if (pEpilog[0] == 0xe8bd)
2349 {
2350 // 32-bit instruction
2351 actuallyPopRegMask = pEpilog[1];
2352 pEpilog += 2;
2353 }
2354 else if (pEpilog[0] == 0xf85d && (pEpilog[1] & 0x0fff) == 0xb04)
2355 {
2356 // we just pop one register
2357 int reg = pEpilog[1] >> 12;
2358 actuallyPopRegMask |= 1 << reg;
2359 pEpilog += 2;
2360 }
2361
2362 // have we popped some low registers to clean up the stack?
2363 if (stackPopSize == 0 && (actuallyPopRegMask & 0x0f) != 0)
2364 {
2365 // the low registers count towards the stack pop size
2366 if (actuallyPopRegMask & 0x1)
2367 stackPopSize += POINTER_SIZE;
2368 if (actuallyPopRegMask & 0x2)
2369 stackPopSize += POINTER_SIZE;
2370 if (actuallyPopRegMask & 0x4)
2371 stackPopSize += POINTER_SIZE;
2372 if (actuallyPopRegMask & 0x8)
2373 stackPopSize += POINTER_SIZE;
2374
2375 // remove the bits now accounted for
2376 actuallyPopRegMask &= ~0x0f;
2377 }
2378
2379 if (r7Cleanup)
2380 {
2381 if (stackPopSize != frameSize)
2382 VERIFY_FAILURE();
2383 }
2384 else
2385 {
2386 if (r7Frame)
2387 {
2388 // in this case the whole frame size may be larger than the r7 frame size we know about
2389 if (stackPopSize < frameSize)
2390 VERIFY_FAILURE();
2391 }
2392 else
2393 {
2394 if (stackPopSize != frameSize)
2395 VERIFY_FAILURE();
2396 }
2397 }
2398
2399 UInt16 stackCleanupWords = pInfoHeader->ParmRegsPushedCount();
2400
2401 if (shouldPopRegMask == actuallyPopRegMask)
2402 {
2403 // we got what we expected
2404
2405 if ((actuallyPopRegMask & (1<<15)) != 0)
2406 {
2407 // if we popped pc, then this is the end of the epilog
2408
2409 // however, if we still have pushed argument registers to cleanup,
2410 // we shouldn't get here
2411 if (pInfoHeader->AreParmRegsPushed())
2412 VERIFY_FAILURE();
2413
2414 return true;
2415 }
2416 }
2417 else
2418 {
2419 // does this work out if we assume it's a call that pops
2420 // lr instead of pc and then terminates in a jump to reg?
2421 shouldPopRegMask ^= (1<<15)|(1<<14);
2422 if (shouldPopRegMask == actuallyPopRegMask)
2423 {
2424 // fine
2425 }
2426 else if (shouldPopRegMask == actuallyPopRegMask + (1<<14))
2427 {
2428 // we expected the epilog to pop lr, but it didn't
2429 // this may be a return with an additional stack cleanup
2430 // or a throw epilog that doesn't need lr anymore
2431 stackCleanupWords += 1;
2432 }
2433 else
2434 {
2435 VERIFY_FAILURE();
2436 }
2437 }
2438
2439 if (stackCleanupWords)
2440 {
2441 CHECK_HIJACK_IN_EPILOG();
2442
2443 // we may have "ldr pc,[sp],#stackCleanupWords*4"
2444 if (pEpilog[0] == 0xf85d && pEpilog[1] == 0xfb00 + stackCleanupWords*4)
2445 {
2446 // fine, and end of the epilog
2447 pEpilog += 2;
2448 return true;
2449 }
2450 // otherwise we should just have "add sp,#stackCleanupWords*4"
2451 else if (*pEpilog == 0xb000 + stackCleanupWords)
2452 {
2453 pEpilog += 1;
2454 }
2455 else
2456 {
2457 //
2458 VERIFY_FAILURE();
2459 }
2460 }
2461
2462 CHECK_HIJACK_IN_EPILOG();
2463
2464 // we are satisfied if we see indirect jump through a register here
2465 // may be lr for normal return, or another register for tail calls
2466 if ((*pEpilog & 0xff87) == 0x4700)
2467 return true;
2468
2469 // otherwise we expect to see a 32-bit branch
2470 if ((pEpilog[0] & 0xf800) == 0xf000 && (pEpilog[1] & 0xd000) == 0x9000)
2471 return true;
2472
2473 VERIFY_FAILURE();
2474
2475 return false;
2476 }
2477 #elif defined(_ARM64_)
VerifyEpilogBytesARM64(GCInfoHeader * pInfoHeader,Code * pEpilogStart,UInt32 epilogSize)2478 bool VerifyEpilogBytesARM64(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt32 epilogSize)
2479 {
2480 UNREFERENCED_PARAMETER(pInfoHeader);
2481 UNREFERENCED_PARAMETER(pEpilogStart);
2482 UNREFERENCED_PARAMETER(epilogSize);
2483 PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
2484 }
2485 #endif // _ARM_
2486
VerifyEpilogBytes(GCInfoHeader * pInfoHeader,Code * pEpilogStart,UInt32 epilogSize)2487 bool EECodeManager::VerifyEpilogBytes(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt32 epilogSize)
2488 {
2489 #ifdef _X86_
2490 return VerifyEpilogBytesX86(pInfoHeader, pEpilogStart, epilogSize);
2491 #elif defined(_AMD64_)
2492 return VerifyEpilogBytesAMD64(pInfoHeader, pEpilogStart, epilogSize);
2493 #elif defined(_ARM_)
2494 return VerifyEpilogBytesARM(pInfoHeader, pEpilogStart, epilogSize);
2495 #elif defined(_ARM64_)
2496 return VerifyEpilogBytesARM64(pInfoHeader, pEpilogStart, epilogSize);
2497 #endif
2498 }
2499
VerifyProlog(EEMethodInfo *)2500 void EECodeManager::VerifyProlog(EEMethodInfo * /*pMethodInfo*/)
2501 {
2502 }
2503
VerifyEpilog(EEMethodInfo * pMethodInfo)2504 void EECodeManager::VerifyEpilog(EEMethodInfo * pMethodInfo)
2505 {
2506 // @TODO: verify epilogs of funclets
2507 GCInfoHeader * pHeader = pMethodInfo->GetGCInfoHeader();
2508
2509 Int32 epilogStart = -1;
2510 UInt32 epilogCount = 0;
2511 UInt32 epilogSize = 0;
2512
2513 while (FindNextEpilog(pHeader, pMethodInfo->GetCodeSize(),
2514 pMethodInfo->GetEpilogTable(), &epilogStart, &epilogSize))
2515 {
2516 ASSERT(epilogStart >= 0);
2517 epilogCount++;
2518 Int32 codeOffset = epilogStart;
2519 Code * ip = ((Code *)pMethodInfo->GetCode()) + codeOffset;
2520
2521 ASSERT(VerifyEpilogBytes(pHeader, ip, epilogSize));
2522 }
2523
2524 ASSERT(epilogCount == pHeader->GetEpilogCount());
2525 }
2526
2527 #include "gcdump.h"
DumpGCInfo(EEMethodInfo * pMethodInfo,UInt8 * pbDeltaShortcutTable,UInt8 * pbUnwindInfoBlob,UInt8 * pbCallsiteInfoBlob)2528 void EECodeManager::DumpGCInfo(EEMethodInfo * pMethodInfo,
2529 UInt8 * pbDeltaShortcutTable,
2530 UInt8 * pbUnwindInfoBlob,
2531 UInt8 * pbCallsiteInfoBlob)
2532 {
2533 GCDump gcd;
2534 GCInfoHeader hdr;
2535
2536 UInt8 * pbRawGCInfo = pMethodInfo->GetRawGCInfo();
2537
2538 GCDump::Tables tables = { pbDeltaShortcutTable, pbUnwindInfoBlob, pbCallsiteInfoBlob };
2539
2540 size_t cbHdr = gcd.DumpInfoHeader(pbRawGCInfo, &tables, &hdr);
2541 gcd.DumpGCTable(pbRawGCInfo + cbHdr, &tables, hdr);
2542 }
2543
2544 #endif // _DEBUG
2545 #endif // !DACCESS_COMPILE
2546
2547
2548 // The controlPC parameter is used to decode the right GCInfoHeader in the case of an EH funclet
Init(PTR_VOID pvCode,UInt32 cbCodeSize,PTR_UInt8 pbRawGCInfo,PTR_VOID pvEHInfo)2549 void EEMethodInfo::Init(PTR_VOID pvCode, UInt32 cbCodeSize, PTR_UInt8 pbRawGCInfo, PTR_VOID pvEHInfo)
2550 {
2551 m_pvCode = pvCode;
2552 m_cbCodeSize = cbCodeSize;
2553 m_pbRawGCInfo = pbRawGCInfo;
2554 m_pvEHInfo = pvEHInfo;
2555
2556 m_pbGCInfo = (PTR_UInt8)(size_t)-1;
2557
2558 m_infoHdr.Init();
2559 }
2560
DecodeGCInfoHeader(UInt32 methodOffset,PTR_UInt8 pbUnwindInfoBlob)2561 void EEMethodInfo::DecodeGCInfoHeader(UInt32 methodOffset, PTR_UInt8 pbUnwindInfoBlob)
2562 {
2563 PTR_UInt8 pbGcInfo = m_pbRawGCInfo;
2564 PTR_UInt8 pbStackChangeString;
2565 PTR_UInt8 pbUnwindInfo;
2566
2567 UInt32 unwindInfoBlobOffset = VarInt::ReadUnsigned(pbGcInfo);
2568 bool inlineUnwindInfo = (unwindInfoBlobOffset == 0);
2569
2570 if (inlineUnwindInfo)
2571 {
2572 // it is inline..
2573 pbUnwindInfo = pbGcInfo;
2574 size_t headerSize;
2575 pbStackChangeString = m_infoHdr.DecodeHeader(methodOffset, pbUnwindInfo, &headerSize);
2576 pbGcInfo += headerSize;
2577 }
2578 else
2579 {
2580 // The offset was adjusted by 1 to reserve the 0 encoding for the inline case, so we re-adjust it to
2581 // the actual offset here.
2582 pbUnwindInfo = pbUnwindInfoBlob + unwindInfoBlobOffset - 1;
2583 pbStackChangeString = m_infoHdr.DecodeHeader(methodOffset, pbUnwindInfo, NULL);
2584 }
2585
2586 m_pbEpilogTable = pbGcInfo;
2587
2588 //
2589 // skip past epilog table
2590 //
2591 if (!m_infoHdr.IsEpilogAtEnd())
2592 {
2593 for (UInt32 i = 0; i < m_infoHdr.GetEpilogCount(); i++)
2594 {
2595 VarInt::SkipUnsigned(pbGcInfo);
2596 if (m_infoHdr.HasVaryingEpilogSizes())
2597 VarInt::SkipUnsigned(pbGcInfo);
2598 }
2599 }
2600
2601 m_pbGCInfo = pbGcInfo;
2602 }
2603
GetGCInfo()2604 PTR_UInt8 EEMethodInfo::GetGCInfo()
2605 {
2606 ASSERT_MSG(m_pbGCInfo != (PTR_UInt8)(size_t)-1,
2607 "You must call DecodeGCInfoHeader first");
2608
2609 ASSERT(m_pbGCInfo != NULL);
2610 return m_pbGCInfo;
2611 }
2612
GetEpilogTable()2613 PTR_UInt8 EEMethodInfo::GetEpilogTable()
2614 {
2615 ASSERT_MSG(m_pbGCInfo != (PTR_UInt8)(size_t)-1,
2616 "You must call DecodeGCInfoHeader first");
2617
2618 ASSERT(m_pbEpilogTable != NULL);
2619 return m_pbEpilogTable;
2620 }
2621
GetGCInfoHeader()2622 GCInfoHeader * EEMethodInfo::GetGCInfoHeader()
2623 {
2624 ASSERT_MSG(m_pbGCInfo != (PTR_UInt8)(size_t)-1,
2625 "You must call DecodeGCInfoHeader first");
2626
2627 return &m_infoHdr;
2628 }
2629