1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 #include "common.h"
5 #include "CommonTypes.h"
6 #include "CommonMacros.h"
7 #include "daccess.h"
8 #include "PalRedhawkCommon.h"
9 #include "PalRedhawk.h"
10 #include "rhassert.h"
11 #include "slist.h"
12 #include "holder.h"
13 #include "gcrhinterface.h"
14 #include "shash.h"
15 #include "RWLock.h"
16 #include "module.h"
17 #include "varint.h"
18 #include "rhbinder.h"
19 #include "Crst.h"
20 #include "regdisplay.h"
21 #include "StackFrameIterator.h"
22 #include "thread.h"
23 #include "event.h"
24 #include "RuntimeInstance.h"
25 #include "eetype.h"
26 #include "ObjectLayout.h"
27 #include "threadstore.h"
28
29 #include "CommonMacros.inl"
30 #include "slist.inl"
31 #include "shash.inl"
32
33 #include "gcinfo.h"
34 #include "RHCodeMan.h"
35
36 #include "rheventtrace.h"
37
38 // #define LOG_MODULE_LOAD_VERIFICATION
39
40 #ifndef DACCESS_COMPILE
41
42 EXTERN_C UInt32_BOOL g_fGcStressStarted;
43
Module(ModuleHeader * pModuleHeader)44 Module::Module(ModuleHeader *pModuleHeader) :
45 m_pNext(),
46 m_pbDeltaShortcutTable(NULL),
47 m_pModuleHeader(pModuleHeader),
48 m_MethodList(),
49 m_fFinalizerInitComplete(false)
50 {
51 }
52
Create(ModuleHeader * pModuleHeader)53 Module * Module::Create(ModuleHeader *pModuleHeader)
54 {
55 // There's only one module header version for now. If we ever need to change it in a breaking fashion this
56 // is where we could put some code to try and handle downlevel modules with some form of compatibility
57 // mode (or just fail the module creation).
58 ASSERT(pModuleHeader->Version == ModuleHeader::CURRENT_VERSION);
59
60 NewHolder<Module> pNewModule = new (nothrow) Module(pModuleHeader);
61 if (NULL == pNewModule)
62 return NULL;
63
64 if (!pNewModule->m_MethodList.Init(pModuleHeader))
65 return NULL;
66
67 pNewModule->m_pEHTypeTable = pModuleHeader->GetEHInfo();
68 pNewModule->m_pbDeltaShortcutTable = pNewModule->m_MethodList.GetDeltaShortcutTablePtr();
69 pNewModule->m_pStaticsGCInfo = dac_cast<PTR_StaticGcDesc>(pModuleHeader->GetStaticsGCInfo());
70 pNewModule->m_pStaticsGCDataSection = pModuleHeader->GetStaticsGCDataSection();
71 pNewModule->m_pThreadStaticsGCInfo = dac_cast<PTR_StaticGcDesc>(pModuleHeader->GetThreadStaticsGCInfo());
72
73 if (pModuleHeader->RraFrozenObjects != ModuleHeader::NULL_RRA)
74 {
75 ASSERT(pModuleHeader->SizeFrozenObjects != 0);
76 pNewModule->m_FrozenSegment = RedhawkGCInterface::RegisterFrozenSection(
77 pModuleHeader->GetFrozenObjects(), pModuleHeader->SizeFrozenObjects);
78 if (pNewModule->m_FrozenSegment == NULL)
79 return NULL;
80 }
81
82 // Determine OS module handle. This assumes that only one Redhawk module can exist in a given PE image,
83 // which is true for now. It's also exposed by a number of exports (RhGetModuleFromEEType etc.) so if
84 // we ever rethink this then the public contract needs to change as well.
85 pNewModule->m_hOsModuleHandle = PalGetModuleHandleFromPointer(pModuleHeader);
86 if (!pNewModule->m_hOsModuleHandle)
87 {
88 ASSERT_UNCONDITIONALLY("Failed to locate our own module handle");
89 return NULL;
90 }
91
92 #ifdef FEATURE_CUSTOM_IMPORTS
93 Module::DoCustomImports(pModuleHeader);
94 #endif // FEATURE_CUSTOM_IMPORTS
95
96 // do generic unification
97 if (pModuleHeader->CountOfGenericUnificationDescs > 0)
98 {
99 if (!GetRuntimeInstance()->UnifyGenerics((GenericUnificationDesc *)pModuleHeader->GetGenericUnificationDescs(),
100 pModuleHeader->CountOfGenericUnificationDescs,
101 (void **)pModuleHeader->GetGenericUnificationIndirCells(),
102 pModuleHeader->CountOfGenericUnificationIndirCells))
103 {
104 return NULL;
105 }
106 }
107
108 #ifdef _DEBUG
109 #ifdef LOG_MODULE_LOAD_VERIFICATION
110 printf("\nModule: 0x%p\n", pNewModule->m_hOsModuleHandle);
111 #endif // LOG_MODULE_LOAD_VERIFICATION
112 //
113 // Run through every byte of every method in the module and do some sanity-checking. Exclude stub code.
114 //
115 UInt32 textLength = pModuleHeader->RegionSize[ModuleHeader::TEXT_REGION] - pModuleHeader->SizeStubCode;
116 UInt8 * pbText = pModuleHeader->RegionPtr[ModuleHeader::TEXT_REGION];
117
118 UInt32 uMethodSize = 0;
119 UInt32 uMethodIndex = 0;
120 UInt32 uMethodStartSectionOffset = 0;
121 UInt32 uExpectedMethodIndex = 0;
122 UInt32 uExpectedMethodStartSectionOffset = 0;
123 UInt32 uTextSectionOffset = 0;
124 UInt32 nMethods = pNewModule->m_MethodList.GetNumMethodsDEBUG();
125
126 UInt32 nIndirCells = pNewModule->m_pModuleHeader->CountOfLoopIndirCells;
127 UIntNative * pShadowBuffer = new (nothrow) UIntNative[nIndirCells];
128 UIntNative * pIndirCells = (UIntNative *)pNewModule->m_pModuleHeader->GetLoopIndirCells();
129 memcpy(pShadowBuffer, pIndirCells, nIndirCells * sizeof(UIntNative));
130
131 EEMethodInfo methodInfo;
132
133 for (; uTextSectionOffset < textLength; uTextSectionOffset += uMethodSize)
134 {
135 pNewModule->m_MethodList.GetMethodInfo(
136 uTextSectionOffset, &uMethodIndex, &uMethodStartSectionOffset, &uMethodSize);
137
138
139 #ifdef LOG_MODULE_LOAD_VERIFICATION
140 printf("0x%08x: %3d 0x%08x 0x%08x\n",
141 uTextSectionOffset, uMethodIndex, uMethodStartSectionOffset, uMethodSize);
142 #endif // LOG_MODULE_LOAD_VERIFICATION
143
144 ASSERT(uExpectedMethodStartSectionOffset == uMethodStartSectionOffset);
145 uExpectedMethodStartSectionOffset += uMethodSize;
146
147 ASSERT(uExpectedMethodIndex == uMethodIndex);
148 uExpectedMethodIndex++;
149
150 //
151 // verify that every offset in the method gives the same result
152 // *every* offsets turns out to be too slow - try 10 offsets in the method
153 //
154 UInt32 step = max(uMethodSize/10, 1);
155 for (UInt32 i = 0; i < uMethodSize; i += step)
156 {
157 UInt32 uMI;
158 UInt32 uMSSO;
159 UInt32 uMS;
160
161 pNewModule->m_MethodList.GetMethodInfo(uTextSectionOffset + i, &uMI, &uMSSO, &uMS);
162
163 ASSERT(uMI == uMethodIndex);
164 ASSERT(uMSSO == uMethodStartSectionOffset);
165 ASSERT(uMS == uMethodSize);
166 }
167
168 //
169 // calculate the method info
170 //
171
172 UInt8 * pbMethod = pbText + uMethodStartSectionOffset;
173 UInt8 * pbGCInfo = pNewModule->m_MethodList.GetGCInfo(uMethodIndex);
174 void * pvEHInfo = pNewModule->m_MethodList.GetEHInfo(uMethodIndex);
175
176 methodInfo.Init(pbMethod, uMethodSize, pbGCInfo, pvEHInfo);
177
178 methodInfo.DecodeGCInfoHeader(0, pNewModule->GetUnwindInfoBlob());
179
180 //
181 // do some verifications..
182 //
183 #ifdef LOG_MODULE_LOAD_VERIFICATION
184 EECodeManager::DumpGCInfo(&methodInfo,
185 pNewModule->GetDeltaShortcutTable(),
186 pNewModule->GetUnwindInfoBlob(),
187 pNewModule->GetCallsiteStringBlob());
188 #endif // LOG_MODULE_LOAD_VERIFICATION
189
190 EECodeManager::VerifyProlog(&methodInfo);
191 EECodeManager::VerifyEpilog(&methodInfo);
192
193 pNewModule->UnsynchronizedHijackMethodLoops((MethodInfo *)&methodInfo);
194
195 if (uExpectedMethodIndex >= nMethods)
196 break;
197 }
198
199 for (UInt32 i = 0; i < nIndirCells; i++)
200 {
201 ASSERT(pShadowBuffer[i] != pIndirCells[i]); // make sure we hijacked all of them
202 }
203
204 pNewModule->UnsynchronizedResetHijackedLoops();
205
206 if (!g_fGcStressStarted) // UnsynchronizedResetHijackedLoops won't do anything under gcstress
207 {
208 for (UInt32 i = 0; i < nIndirCells; i++)
209 {
210 ASSERT(pShadowBuffer[i] == pIndirCells[i]); // make sure we reset them properly
211 }
212 }
213
214 delete[] pShadowBuffer;
215
216 if (g_fGcStressStarted)
217 pNewModule->UnsynchronizedHijackAllLoops();
218
219 #ifdef LOG_MODULE_LOAD_VERIFICATION
220 printf("0x%08x: --- 0x%08x \n", (uTextSectionOffset + uMethodSize),
221 (uMethodStartSectionOffset + uMethodSize));
222 #endif // LOG_MODULE_LOAD_VERIFICATION
223 #endif // _DEBUG
224
225 #ifdef FEATURE_ETW
226 ETW::LoaderLog::SendModuleEvent(pNewModule);
227 #endif // FEATURE_ETW
228
229 // Run any initialization functions for native code that was linked into the image using the binder's
230 // /nativelink option.
231 if (pNewModule->m_pModuleHeader->RraNativeInitFunctions != ModuleHeader::NULL_RRA)
232 {
233 typedef void (* NativeInitFunctionPtr)();
234 UInt32 cInitFunctions = pNewModule->m_pModuleHeader->CountNativeInitFunctions;
235 NativeInitFunctionPtr * pInitFunctions = (NativeInitFunctionPtr*)(pNewModule->m_pModuleHeader->RegionPtr[ModuleHeader::RDATA_REGION] +
236 pNewModule->m_pModuleHeader->RraNativeInitFunctions);
237 for (UInt32 i = 0; i < cInitFunctions; i++)
238 pInitFunctions[i]();
239 }
240
241 pNewModule.SuppressRelease();
242 return pNewModule;
243 }
244
Destroy()245 void Module::Destroy()
246 {
247 delete this;
248 }
249
~Module()250 Module::~Module()
251 {
252 }
253
254 #endif // !DACCESS_COMPILE
255
256
GetModuleHeader()257 PTR_ModuleHeader Module::GetModuleHeader()
258 {
259 return m_pModuleHeader;
260 }
261
262
263 // We have three separate range checks for the data regions we might be interested in. We do this rather than
264 // have a single, all-in-one, method to force callers to consider which ranges are applicable. In many cases
265 // the caller knows an address can only legally lie in one specific range and we'd rather force them to
266 // specify that than pay for redundant range checks in many cases.
ContainsCodeAddress(PTR_VOID pvAddr)267 bool Module::ContainsCodeAddress(PTR_VOID pvAddr)
268 {
269 // We explicitly omit the stub code from this check. Use ContainsStubAddress to determine if
270 // an address belongs to the stub portion of the module's TEXT_REGION.
271 TADDR pAddr = dac_cast<TADDR>(pvAddr);
272 TADDR pSectionStart = dac_cast<TADDR>(m_pModuleHeader->RegionPtr[ModuleHeader::TEXT_REGION]);
273 TADDR pSectionLimit = pSectionStart + m_pModuleHeader->RegionSize[ModuleHeader::TEXT_REGION]
274 - m_pModuleHeader->SizeStubCode;
275 return (pAddr >= pSectionStart) && (pAddr < pSectionLimit);
276 }
277
ContainsDataAddress(PTR_VOID pvAddr)278 bool Module::ContainsDataAddress(PTR_VOID pvAddr)
279 {
280 TADDR pAddr = dac_cast<TADDR>(pvAddr);
281 TADDR pSectionStart = dac_cast<TADDR>(m_pModuleHeader->RegionPtr[ModuleHeader::DATA_REGION]);
282 TADDR pSectionLimit = pSectionStart + m_pModuleHeader->RegionSize[ModuleHeader::DATA_REGION];
283 return (pAddr >= pSectionStart) && (pAddr < pSectionLimit);
284 }
285
ContainsReadOnlyDataAddress(PTR_VOID pvAddr)286 bool Module::ContainsReadOnlyDataAddress(PTR_VOID pvAddr)
287 {
288 TADDR pAddr = dac_cast<TADDR>(pvAddr);
289 TADDR pSectionStart = dac_cast<TADDR>(m_pModuleHeader->RegionPtr[ModuleHeader::RDATA_REGION]);
290 TADDR pSectionLimit = pSectionStart + m_pModuleHeader->RegionSize[ModuleHeader::RDATA_REGION];
291 return (pAddr >= pSectionStart) && (pAddr < pSectionLimit);
292 }
293
ContainsStubAddress(PTR_VOID pvAddr)294 bool Module::ContainsStubAddress(PTR_VOID pvAddr)
295 {
296 // Determines if the address belongs to the stub portion of the TEXT_REGION section.
297 TADDR pAddr = dac_cast<TADDR>(pvAddr);
298 TADDR pSectionStart = dac_cast<TADDR>(m_pModuleHeader->RegionPtr[ModuleHeader::TEXT_REGION])
299 + m_pModuleHeader->RegionSize[ModuleHeader::TEXT_REGION]
300 - m_pModuleHeader->SizeStubCode;
301 TADDR pSectionLimit = pSectionStart + m_pModuleHeader->SizeStubCode;
302 return (pAddr >= pSectionStart) && (pAddr < pSectionLimit);
303 }
304
FindMethodStartAddress(PTR_VOID ControlPC)305 PTR_UInt8 Module::FindMethodStartAddress(PTR_VOID ControlPC)
306 {
307 if (!ContainsCodeAddress(ControlPC))
308 return NULL;
309
310 PTR_UInt8 pbControlPC = dac_cast<PTR_UInt8>(ControlPC);
311
312 UInt32 uMethodSize;
313 UInt32 uMethodIndex;
314 UInt32 uMethodStartSectionOffset;
315
316 PTR_UInt8 pbTextSectionStart = m_pModuleHeader->RegionPtr[ModuleHeader::TEXT_REGION];
317 UInt32 uTextSectionOffset = (UInt32)(pbControlPC - pbTextSectionStart);
318 m_MethodList.GetMethodInfo(uTextSectionOffset, &uMethodIndex, &uMethodStartSectionOffset, &uMethodSize);
319
320 PTR_UInt8 methodStartAddr = pbTextSectionStart + uMethodStartSectionOffset;
321 return methodStartAddr;
322 }
323
FindMethodInfo(PTR_VOID ControlPC,MethodInfo * pMethodInfoOut)324 bool Module::FindMethodInfo(PTR_VOID ControlPC,
325 MethodInfo * pMethodInfoOut)
326 {
327 if (!ContainsCodeAddress(ControlPC))
328 return false;
329
330 PTR_UInt8 pbControlPC = dac_cast<PTR_UInt8>(ControlPC);
331
332 UInt32 uMethodSize;
333 UInt32 uMethodIndex;
334 UInt32 uMethodStartSectionOffset;
335
336 PTR_UInt8 pbTextSectionStart = m_pModuleHeader->RegionPtr[ModuleHeader::TEXT_REGION];
337 UInt32 uTextSectionOffset = (UInt32)(pbControlPC - pbTextSectionStart);
338 m_MethodList.GetMethodInfo(uTextSectionOffset, &uMethodIndex, &uMethodStartSectionOffset, &uMethodSize);
339
340 PTR_UInt8 pbGCInfo = (PTR_UInt8) m_MethodList.GetGCInfo(uMethodIndex);
341 PTR_VOID pvEHInfo = m_MethodList.GetEHInfo(uMethodIndex);
342
343 EEMethodInfo * pEEMethodInfo = (EEMethodInfo *)pMethodInfoOut;
344
345 pEEMethodInfo->Init(pbTextSectionStart + uMethodStartSectionOffset, uMethodSize, pbGCInfo, pvEHInfo);
346
347 UInt32 codeOffset = (UInt32)(pbControlPC - (PTR_UInt8)pEEMethodInfo->GetCode());
348 #ifdef _ARM_
349 codeOffset &= ~1;
350 #endif
351
352 pEEMethodInfo->DecodeGCInfoHeader(codeOffset, GetUnwindInfoBlob());
353
354 return true;
355 }
356
GetUnwindInfoBlob()357 PTR_UInt8 Module::GetUnwindInfoBlob()
358 {
359 return m_pModuleHeader->GetUnwindInfoBlob();
360 }
361
GetCallsiteStringBlob()362 PTR_UInt8 Module::GetCallsiteStringBlob()
363 {
364 return m_pModuleHeader->GetCallsiteInfoBlob();
365 }
366
GetDeltaShortcutTable()367 PTR_UInt8 Module::GetDeltaShortcutTable()
368 {
369 return m_pbDeltaShortcutTable;
370 }
371
EnumStaticGCRefsBlock(void * pfnCallback,void * pvCallbackData,PTR_StaticGcDesc pStaticGcInfo,PTR_UInt8 pbStaticData)372 void Module::EnumStaticGCRefsBlock(void * pfnCallback, void * pvCallbackData, PTR_StaticGcDesc pStaticGcInfo, PTR_UInt8 pbStaticData)
373 {
374 if (pStaticGcInfo == NULL)
375 return;
376
377 for (UInt32 idxSeries = 0; idxSeries < pStaticGcInfo->m_numSeries; idxSeries++)
378 {
379 PTR_StaticGcDescGCSeries pSeries = dac_cast<PTR_StaticGcDescGCSeries>(dac_cast<TADDR>(pStaticGcInfo) +
380 offsetof(StaticGcDesc, m_series) +
381 (idxSeries * sizeof(StaticGcDesc::GCSeries)));
382
383 ASSERT(IS_ALIGNED(dac_cast<TADDR>(pbStaticData), sizeof(RtuObjectRef)));
384 ASSERT(IS_ALIGNED(pSeries->m_startOffset, sizeof(RtuObjectRef)));
385 ASSERT(IS_ALIGNED(pSeries->m_size, sizeof(RtuObjectRef)));
386
387 PTR_RtuObjectRef pRefLocation = dac_cast<PTR_RtuObjectRef>(pbStaticData + pSeries->m_startOffset);
388 UInt32 numObjects = pSeries->m_size / sizeof(RtuObjectRef);
389
390 RedhawkGCInterface::BulkEnumGcObjRef(pRefLocation, numObjects, pfnCallback, pvCallbackData);
391 }
392 }
393
EnumStaticGCRefs(void * pfnCallback,void * pvCallbackData)394 void Module::EnumStaticGCRefs(void * pfnCallback, void * pvCallbackData)
395 {
396 // Regular statics.
397 EnumStaticGCRefsBlock(pfnCallback, pvCallbackData, m_pStaticsGCInfo, m_pStaticsGCDataSection);
398
399 // Thread local statics.
400 if (m_pThreadStaticsGCInfo != NULL)
401 {
402 FOREACH_THREAD(pThread)
403 {
404 // To calculate the address of the data for each thread's TLS fields we need two values:
405 // 1) The TLS slot index allocated for this module by the OS loader. We keep a pointer to this
406 // value in the module header.
407 // 2) The offset into the TLS block at which Redhawk-specific data begins. This is zero for
408 // modules generated by the binder in PE mode, but maybe something else for COFF-mode modules
409 // (if some of the native code we're linked with also uses thread locals). We keep this offset
410 // in the module header as well.
411 EnumStaticGCRefsBlock(pfnCallback, pvCallbackData, m_pThreadStaticsGCInfo,
412 pThread->GetThreadLocalStorage(*m_pModuleHeader->PointerToTlsIndex,
413 m_pModuleHeader->TlsStartOffset));
414 }
415 END_FOREACH_THREAD
416 }
417 }
418
IsFunclet(MethodInfo * pMethodInfo)419 bool Module::IsFunclet(MethodInfo * pMethodInfo)
420 {
421 return GetEEMethodInfo(pMethodInfo)->GetGCInfoHeader()->IsFunclet();
422 }
423
GetFramePointer(MethodInfo * pMethodInfo,REGDISPLAY * pRegisterSet)424 PTR_VOID Module::GetFramePointer(MethodInfo * pMethodInfo,
425 REGDISPLAY * pRegisterSet)
426 {
427 return EECodeManager::GetFramePointer(GetEEMethodInfo(pMethodInfo)->GetGCInfoHeader(), pRegisterSet);
428 }
429
EnumGcRefs(MethodInfo * pMethodInfo,PTR_VOID safePointAddress,REGDISPLAY * pRegisterSet,GCEnumContext * hCallback)430 void Module::EnumGcRefs(MethodInfo * pMethodInfo,
431 PTR_VOID safePointAddress,
432 REGDISPLAY * pRegisterSet,
433 GCEnumContext * hCallback)
434 {
435
436 MethodGcInfoPointers infoPtrs;
437 infoPtrs.m_pGCInfoHeader = GetEEMethodInfo(pMethodInfo)->GetGCInfoHeader();
438 infoPtrs.m_pbEncodedSafePointList = GetEEMethodInfo(pMethodInfo)->GetGCInfo();
439 infoPtrs.m_pbCallsiteStringBlob = GetCallsiteStringBlob();
440 infoPtrs.m_pbDeltaShortcutTable = GetDeltaShortcutTable();
441
442 UInt32 codeOffset = (UInt32)(dac_cast<TADDR>(safePointAddress) - dac_cast<TADDR>(GetEEMethodInfo(pMethodInfo)->GetCode()));
443 ASSERT(codeOffset < GetEEMethodInfo(pMethodInfo)->GetCodeSize())
444 EECodeManager::EnumGcRefs(&infoPtrs, codeOffset, pRegisterSet, hCallback);
445 }
446
UnwindStackFrame(MethodInfo * pMethodInfo,REGDISPLAY * pRegisterSet,PTR_VOID * ppPreviousTransitionFrame)447 bool Module::UnwindStackFrame(MethodInfo * pMethodInfo,
448 REGDISPLAY * pRegisterSet,
449 PTR_VOID * ppPreviousTransitionFrame)
450 {
451 EEMethodInfo * pEEMethodInfo = GetEEMethodInfo(pMethodInfo);
452
453 *ppPreviousTransitionFrame = EECodeManager::GetReversePInvokeSaveFrame(pEEMethodInfo->GetGCInfoHeader(), pRegisterSet);
454 if (*ppPreviousTransitionFrame != NULL)
455 return true;
456
457 return EECodeManager::UnwindStackFrame(pEEMethodInfo->GetGCInfoHeader(), pRegisterSet);
458 }
459
GetConservativeUpperBoundForOutgoingArgs(MethodInfo * pMethodInfo,REGDISPLAY * pRegisterSet)460 UIntNative Module::GetConservativeUpperBoundForOutgoingArgs(MethodInfo * pMethodInfo,
461 REGDISPLAY * pRegisterSet)
462 {
463 return EECodeManager::GetConservativeUpperBoundForOutgoingArgs(
464 GetEEMethodInfo(pMethodInfo)->GetGCInfoHeader(), pRegisterSet);
465 }
466
GetReturnAddressHijackInfo(MethodInfo * pMethodInfo,REGDISPLAY * pRegisterSet,PTR_PTR_VOID * ppvRetAddrLocation,GCRefKind * pRetValueKind)467 bool Module::GetReturnAddressHijackInfo(MethodInfo * pMethodInfo,
468 REGDISPLAY * pRegisterSet,
469 PTR_PTR_VOID * ppvRetAddrLocation,
470 GCRefKind * pRetValueKind)
471 {
472 #ifdef DACCESS_COMPILE
473 UNREFERENCED_PARAMETER(pMethodInfo);
474 UNREFERENCED_PARAMETER(pRegisterSet);
475 UNREFERENCED_PARAMETER(ppvRetAddrLocation);
476 UNREFERENCED_PARAMETER(pRetValueKind);
477 return false;
478 #else
479 EEMethodInfo * pEEMethodInfo = GetEEMethodInfo(pMethodInfo);
480 GCInfoHeader * pInfoHeader = pEEMethodInfo->GetGCInfoHeader();
481
482 PTR_UInt8 controlPC = (PTR_UInt8)pRegisterSet->GetIP();
483 UInt32 codeOffset = (UInt32)(controlPC - (PTR_UInt8)pEEMethodInfo->GetCode());
484 PTR_PTR_VOID pRetAddr = EECodeManager::GetReturnAddressLocationForHijack(
485 pInfoHeader,
486 pEEMethodInfo->GetCodeSize(),
487 pEEMethodInfo->GetEpilogTable(),
488 codeOffset,
489 pRegisterSet);
490
491 if (pRetAddr == NULL)
492 return false;
493
494 *ppvRetAddrLocation = pRetAddr;
495 *pRetValueKind = EECodeManager::GetReturnValueKind(pInfoHeader);
496
497 return true;
498 #endif
499 }
500
501 struct EEEHEnumState
502 {
503 PTR_UInt8 pMethodStartAddress;
504 PTR_UInt8 pEHInfo;
505 UInt32 uClause;
506 UInt32 nClauses;
507 };
508
509 // Ensure that EEEHEnumState fits into the space reserved by EHEnumState
510 static_assert(sizeof(EEEHEnumState) <= sizeof(EHEnumState), "EEEHEnumState does not fit into EHEnumState");
511
EHEnumInit(MethodInfo * pMethodInfo,PTR_VOID * pMethodStartAddressOut,EHEnumState * pEHEnumStateOut)512 bool Module::EHEnumInit(MethodInfo * pMethodInfo, PTR_VOID * pMethodStartAddressOut, EHEnumState * pEHEnumStateOut)
513 {
514 EEMethodInfo * pInfo = GetEEMethodInfo(pMethodInfo);
515
516 PTR_VOID pEHInfo = pInfo->GetEHInfo();
517 if (pEHInfo == NULL)
518 return false;
519
520 *pMethodStartAddressOut = pInfo->GetCode();
521
522 EEEHEnumState * pEnumState = (EEEHEnumState *)pEHEnumStateOut;
523 pEnumState->pMethodStartAddress = (PTR_UInt8)pInfo->GetCode();
524 pEnumState->pEHInfo = (PTR_UInt8)pEHInfo;
525 pEnumState->uClause = 0;
526 pEnumState->nClauses = VarInt::ReadUnsigned(pEnumState->pEHInfo);
527
528 return true;
529 }
530
EHEnumNext(EHEnumState * pEHEnumState,EHClause * pEHClauseOut)531 bool Module::EHEnumNext(EHEnumState * pEHEnumState, EHClause * pEHClauseOut)
532 {
533 EEEHEnumState * pEnumState = (EEEHEnumState *)pEHEnumState;
534
535 if (pEnumState->uClause >= pEnumState->nClauses)
536 return false;
537 pEnumState->uClause++;
538
539 pEHClauseOut->m_tryStartOffset = VarInt::ReadUnsigned(pEnumState->pEHInfo);
540
541 UInt32 tryEndDeltaAndClauseKind = VarInt::ReadUnsigned(pEnumState->pEHInfo);
542 pEHClauseOut->m_clauseKind = (EHClauseKind)(tryEndDeltaAndClauseKind & 0x3);
543 pEHClauseOut->m_tryEndOffset = pEHClauseOut->m_tryStartOffset + (tryEndDeltaAndClauseKind >> 2);
544
545 // For each clause, we have up to 4 integers:
546 // 1) try start offset
547 // 2) (try length << 2) | clauseKind
548 // 3) if (typed || fault || filter) { handler start offset }
549 // 4a) if (typed) { index into type table }
550 // 4b) if (filter) { filter start offset }
551 //
552 // The first two integers have already been decoded
553 UInt8* methodStartAddress = dac_cast<UInt8*>(pEnumState->pMethodStartAddress);
554 switch (pEHClauseOut->m_clauseKind)
555 {
556 case EH_CLAUSE_TYPED:
557 pEHClauseOut->m_handlerAddress = methodStartAddress + VarInt::ReadUnsigned(pEnumState->pEHInfo);
558
559 {
560 UInt32 typeIndex = VarInt::ReadUnsigned(pEnumState->pEHInfo);
561
562 void * pvTargetType = ((void **) m_pEHTypeTable)[typeIndex];
563
564 // We distinguish between these two cases by inspecting the low bit
565 // of the EHTypeTable entry. If it is set, the entry points to an
566 // indirection cell.
567 if ((((TADDR)pvTargetType) & 1) == 1)
568 pvTargetType = *(void**)(((UInt8*)pvTargetType) - 1);
569
570 pEHClauseOut->m_pTargetType = pvTargetType;
571 }
572 break;
573 case EH_CLAUSE_FAULT:
574 pEHClauseOut->m_handlerAddress = methodStartAddress + VarInt::ReadUnsigned(pEnumState->pEHInfo);
575 break;
576 case EH_CLAUSE_FILTER:
577 pEHClauseOut->m_handlerAddress = methodStartAddress + VarInt::ReadUnsigned(pEnumState->pEHInfo);
578 pEHClauseOut->m_filterAddress = methodStartAddress + VarInt::ReadUnsigned(pEnumState->pEHInfo);
579 break;
580 default:
581 ASSERT_UNCONDITIONALLY("Unexpected EHClauseKind");
582 break;
583 }
584
585 return true;
586 }
587
GetMethodStartAddress(MethodInfo * pMethodInfo)588 PTR_VOID Module::GetMethodStartAddress(MethodInfo * pMethodInfo)
589 {
590 EEMethodInfo * pInfo = GetEEMethodInfo(pMethodInfo);
591 PTR_VOID pvStartAddress = pInfo->GetCode();
592 #ifndef DACCESS_COMPILE
593 // this may be the start of the cold section of a method -
594 // we really want to obtain the start of the hot section instead
595
596 // obtain the mapping information - if there is none, return what we have
597 ColdToHotMapping *pColdToHotMapping = (ColdToHotMapping *)m_pModuleHeader->GetColdToHotMappingInfo();
598 if (pColdToHotMapping == nullptr)
599 return pvStartAddress;
600
601 // this start address better be in this module
602 ASSERT(ContainsCodeAddress(pvStartAddress));
603
604 PTR_UInt8 pbStartAddress = dac_cast<PTR_UInt8>(pvStartAddress);
605
606 UInt32 uMethodSize;
607 UInt32 uMethodIndex;
608 UInt32 uMethodStartSectionOffset;
609
610 // repeat the lookup of the method index - this is a bit inefficient, but probably
611 // better than burdening the EEMethodInfo with storing the rarely required index
612 PTR_UInt8 pbTextSectionStart = m_pModuleHeader->RegionPtr[ModuleHeader::TEXT_REGION];
613 UInt32 uTextSectionOffset = (UInt32)(pbStartAddress - pbTextSectionStart);
614 m_MethodList.GetMethodInfo(uTextSectionOffset, &uMethodIndex, &uMethodStartSectionOffset, &uMethodSize);
615
616 // we should have got the start of this body already, whether hot or cold
617 ASSERT(uMethodStartSectionOffset == uTextSectionOffset);
618
619 UInt32 uSubSectionCount = pColdToHotMapping->subSectionCount;
620 SubSectionDesc *pSubSection = (SubSectionDesc *)pColdToHotMapping->subSection;
621 UInt32 *pHotRVA = (UInt32 *)(pSubSection + uSubSectionCount);
622
623 // iterate over the subsections, trying to find the correct range
624 for (UInt32 uSubSectionIndex = 0; uSubSectionIndex < uSubSectionCount; uSubSectionIndex++)
625 {
626 // is the method index in the hot range? If so, we are done
627 if (uMethodIndex < pSubSection->hotMethodCount)
628 return pvStartAddress;
629 uMethodIndex -= pSubSection->hotMethodCount;
630
631 // is the method index in the cold range?
632 if (uMethodIndex < pSubSection->coldMethodCount)
633 {
634 UInt32 hotRVA = pHotRVA[uMethodIndex];
635 pvStartAddress = GetBaseAddress() + hotRVA;
636
637 // this start address better be in this module
638 ASSERT(ContainsCodeAddress(pvStartAddress));
639
640 return pvStartAddress;
641 }
642 uMethodIndex -= pSubSection->coldMethodCount;
643 pHotRVA += pSubSection->coldMethodCount;
644 pSubSection += 1;
645 }
646 ASSERT_UNCONDITIONALLY("MethodIndex not found");
647 #endif // DACCESS_COMPILE
648 return pvStartAddress;
649 }
650
GetFuncletSafePointForIncomingLiveReferences(Module * pModule,EEMethodInfo * pInfo,UInt32 funcletStart)651 static PTR_VOID GetFuncletSafePointForIncomingLiveReferences(Module * pModule, EEMethodInfo * pInfo, UInt32 funcletStart)
652 {
653 // The binder will encode a GC safe point (as appropriate) at the first code offset after the
654 // prolog to represent the "incoming" GC references. This safe point is 'special' because it
655 // doesn't occur at an offset that would otherwise be a safe point. Additionally, it doesn't
656 // report any scratch registers that might actually be live at that point in the funclet code
657 // (namely the incoming Exception object). In other words, this is just a convenient way to reuse
658 // the existing infrastructure to get our GC roots reported for a hardware fault at a non-GC-safe
659 // point.
660
661 // N.B. - we cannot side-effect the current m_methodInfo or other state variables other than
662 // m_ControlPC and m_codeOffset because, although we've remapped the control PC, it's not really
663 // where we are unwinding from. We're just pretending that we're in the funclet for GC reporting
664 // purposes, but the unwind needs to happen from the original location.
665
666 EEMethodInfo tempInfo;
667
668 PTR_UInt8 methodStart = (PTR_UInt8)pInfo->GetCode();
669 tempInfo.Init(methodStart, pInfo->GetCodeSize(), pInfo->GetRawGCInfo(), pInfo->GetEHInfo());
670
671 tempInfo.DecodeGCInfoHeader(funcletStart, pModule->GetUnwindInfoBlob());
672
673 GCInfoHeader * pHeader = tempInfo.GetGCInfoHeader();
674 UInt32 cbProlog = pHeader->GetPrologSize();
675 UInt32 codeOffset = funcletStart + cbProlog;
676 #ifdef _ARM_
677 codeOffset &= ~1;
678 #endif
679
680 return methodStart + codeOffset;
681 }
682
RemapHardwareFaultToGCSafePoint(MethodInfo * pMethodInfo,PTR_VOID controlPC)683 PTR_VOID Module::RemapHardwareFaultToGCSafePoint(MethodInfo * pMethodInfo, PTR_VOID controlPC)
684 {
685 EEMethodInfo * pInfo = GetEEMethodInfo(pMethodInfo);
686
687 EHEnumState ehEnum;
688 PTR_VOID pMethodStartAddress;
689 if (!EHEnumInit(pMethodInfo, &pMethodStartAddress, &ehEnum))
690 return controlPC;
691
692 PTR_UInt8 methodStart = (PTR_UInt8)pInfo->GetCode();
693 UInt32 codeOffset = (UInt32)((PTR_UInt8)controlPC - methodStart);
694 EHClause ehClause;
695 while (EHEnumNext(&ehEnum, &ehClause))
696 {
697 if ((ehClause.m_tryStartOffset <= codeOffset) && (codeOffset < ehClause.m_tryEndOffset))
698 {
699 UInt32 handlerOffset = (UInt32)(dac_cast<PTR_UInt8>(ehClause.m_handlerAddress) - methodStart);
700 return GetFuncletSafePointForIncomingLiveReferences(this, pInfo, handlerOffset);
701 }
702 }
703
704 // We didn't find a try region covering our PC. However, if the PC is in a funclet, we must do more work.
705 GCInfoHeader * pThisFuncletUnwindInfo = pInfo->GetGCInfoHeader();
706 if (!pThisFuncletUnwindInfo->IsFunclet())
707 return controlPC;
708
709 // For funclets, we must correlate the funclet to its corresponding try region and check for enclosing try
710 // regions that might catch the exception as it "escapes" the funclet.
711
712 UInt32 thisFuncletOffset = pThisFuncletUnwindInfo->GetFuncletOffset();
713
714 UInt32 tryRegionStart = 0;
715 UInt32 tryRegionEnd = 0;
716 bool foundTryRegion = false;
717
718 EHEnumInit(pMethodInfo, &pMethodStartAddress, &ehEnum);
719
720 while (EHEnumNext(&ehEnum, &ehClause))
721 {
722 UInt32 handlerOffset = (UInt32)(dac_cast<PTR_UInt8>(ehClause.m_handlerAddress) - methodStart);
723 if (foundTryRegion && (ehClause.m_tryStartOffset <= tryRegionStart) && (tryRegionEnd <= ehClause.m_tryEndOffset))
724 {
725 // the regions aren't nested if they have exactly the same range.
726 if ((ehClause.m_tryStartOffset != tryRegionStart) || (tryRegionEnd != ehClause.m_tryEndOffset))
727 {
728 return GetFuncletSafePointForIncomingLiveReferences(this, pInfo, handlerOffset);
729 }
730 }
731
732 if (handlerOffset == thisFuncletOffset)
733 {
734 tryRegionStart = ehClause.m_tryStartOffset;
735 tryRegionEnd = ehClause.m_tryEndOffset;
736 foundTryRegion = true;
737 // After we find the target region, we can just keep looking without reseting our iterator. This
738 // is because the clauses are emitted in an "inside-out" order, so we know that enclosing clauses
739 // may only appear after the target clause.
740 }
741 }
742 ASSERT(foundTryRegion);
743 return controlPC;
744 }
745
746 #ifndef DACCESS_COMPILE
747
748 //------------------------------------------------------------------------------------------------------------
749 // @TODO: the following functions are related to throwing exceptions out of Rtm. If we did not have to throw
750 // out of Rtm, then we would note have to have the code below to get a classlib exception object given
751 // an exception id, or the special functions to back up the MDIL THROW_* instructions, or the allocation
752 // failure helper. If we could move to a world where we never throw out of Rtm, perhaps by moving parts
753 // of Rtm that do need to throw out to Bartok- or Binder-generated functions, then we could remove all of this.
754 //------------------------------------------------------------------------------------------------------------
755
756 // Return the Module that is the "classlib module" for this Module. This is the module that was supplied as
757 // the classlib when this module was bound. This module typically defines System.Object and other base types.
758 // The classlib module is also required to export two functions needed by the runtime to implement exception
759 // handling and fail fast.
GetClasslibModule()760 Module * Module::GetClasslibModule()
761 {
762 // Every non-classlib module has a RVA to a IAT entry for System.Object in the classlib module it
763 // was compiled against. Therefore, we can use that address to locate the Module for the classlib module.
764 // If this is a classlib module, then we can just return it.
765 if (IsClasslibModule())
766 {
767 return this;
768 }
769
770 void ** ppSystemObjectEEType = (void**)(m_pModuleHeader->RegionPtr[ModuleHeader::IAT_REGION] +
771 m_pModuleHeader->RraSystemObjectEEType);
772
773 return GetRuntimeInstance()->FindModuleByReadOnlyDataAddress(*ppSystemObjectEEType);
774 }
775
IsClasslibModule()776 bool Module::IsClasslibModule()
777 {
778 return (m_pModuleHeader->RraSystemObjectEEType == ModuleHeader::NULL_RRA);
779 }
780
781 // Array eetypes have a common base type defined by the classlib module
GetArrayBaseType()782 EEType * Module::GetArrayBaseType()
783 {
784 // find the class lib module
785 Module * pClasslibModule = GetClasslibModule();
786
787 // find the System.Array EEType
788 EEType * pArrayBaseType = (EEType *)(pClasslibModule->m_pModuleHeader->RegionPtr[ModuleHeader::RDATA_REGION] +
789 pClasslibModule->m_pModuleHeader->RraArrayBaseEEType);
790
791 // we expect to find a canonical type (not cloned, not array, not "other")
792 ASSERT(pArrayBaseType->IsCanonical());
793
794 return pArrayBaseType;
795 }
796
797 // Return the classlib-defined helper.
GetClasslibFunction(ClasslibFunctionId functionId)798 void * Module::GetClasslibFunction(ClasslibFunctionId functionId)
799 {
800 // First, delegate the call to the classlib module that this module was compiled against.
801 if (!IsClasslibModule())
802 return GetClasslibModule()->GetClasslibFunction(functionId);
803
804 // Lookup the method and return it. If we don't find it, we just return NULL.
805 void * pMethod;
806
807 switch (functionId)
808 {
809 case ClasslibFunctionId::GetRuntimeException:
810 pMethod = m_pModuleHeader->Get_GetRuntimeException();
811 break;
812 case ClasslibFunctionId::AppendExceptionStackFrame:
813 pMethod = m_pModuleHeader->Get_AppendExceptionStackFrame();
814 break;
815 case ClasslibFunctionId::FailFast:
816 pMethod = m_pModuleHeader->Get_FailFast();
817 break;
818 case ClasslibFunctionId::UnhandledExceptionHandler:
819 pMethod = m_pModuleHeader->Get_UnhandledExceptionHandler();
820 break;
821 case ClasslibFunctionId::CheckStaticClassConstruction:
822 pMethod = m_pModuleHeader->Get_CheckStaticClassConstruction();
823 break;
824 case ClasslibFunctionId::OnFirstChanceException:
825 pMethod = m_pModuleHeader->Get_OnFirstChanceException();
826 break;
827 default:
828 pMethod = NULL;
829 break;
830 }
831
832 return pMethod;
833 }
834
GetAssociatedData(PTR_VOID ControlPC)835 PTR_VOID Module::GetAssociatedData(PTR_VOID ControlPC)
836 {
837 UNREFERENCED_PARAMETER(ControlPC);
838
839 // Not supported for ProjectN.
840 return NULL;
841 }
842
843 // Get classlib-defined helper for running deferred static class constructors. Returns NULL if this is not the
844 // classlib module or the classlib doesn't implement this callback.
GetClasslibCheckStaticClassConstruction()845 void * Module::GetClasslibCheckStaticClassConstruction()
846 {
847 return m_pModuleHeader->Get_CheckStaticClassConstruction();
848 }
849
850 // Returns the classlib-defined helper for initializing the finalizer thread. The contract is that it will be
851 // run before any object based on that classlib is finalized.
GetClasslibInitializeFinalizerThread()852 void * Module::GetClasslibInitializeFinalizerThread()
853 {
854 return m_pModuleHeader->Get_InitializeFinalizerThread();
855 }
856
857 // Returns true if this module is part of the OS module specified by hOsHandle.
IsContainedBy(HANDLE hOsHandle)858 bool Module::IsContainedBy(HANDLE hOsHandle)
859 {
860 return m_hOsModuleHandle == hOsHandle;
861 }
862
UnregisterFrozenSection()863 void Module::UnregisterFrozenSection()
864 {
865 RedhawkGCInterface::UnregisterFrozenSection(m_FrozenSegment);
866 }
867
868 //
869 // Hijack the loops within the method referred to by pMethodInfo.
870 // WARNING: Only one thread may call this at a time (i.e. the thread performing suspension of all others).
UnsynchronizedHijackMethodLoops(MethodInfo * pMethodInfo)871 void Module::UnsynchronizedHijackMethodLoops(MethodInfo * pMethodInfo)
872 {
873 void ** ppvIndirCells = (void **)m_pModuleHeader->GetLoopIndirCells();
874 UInt32 nIndirCells = m_pModuleHeader->CountOfLoopIndirCells;
875 if (nIndirCells == 0)
876 return;
877
878 EEMethodInfo * pEEMethodInfo = GetEEMethodInfo(pMethodInfo);
879
880 void * pvMethodStart = pEEMethodInfo->GetCode();
881 void * pvMethodEnd = ((UInt8 *)pvMethodStart) + pEEMethodInfo->GetCodeSize();
882
883 void * pvRedirStubsStart = m_pModuleHeader->GetLoopRedirTargets();
884 void * pvRedirStubsEnd = ((UInt8 *)pvRedirStubsStart) + GcPollInfo::EntryIndexToStubOffset(nIndirCells);
885
886 #ifdef _TARGET_ARM_
887 // on ARM, there is just one redir stub, because we can compute the indir cell index
888 // from the indir cell pointer left in r12
889 // to make the range tests below work, bump up the end by one byte
890 ASSERT(pvRedirStubsStart == pvRedirStubsEnd);
891 pvRedirStubsEnd = (void *)(((UInt8 *)pvRedirStubsEnd)+1);
892 #endif // _TARGET_ARM_
893
894
895 void ** ppvStart = &ppvIndirCells[0];
896 void ** ppvEnd = &ppvIndirCells[nIndirCells];
897 void ** ppvTest;
898
899 while ((ppvStart + 1) < ppvEnd)
900 {
901 ppvTest = ppvStart + ((ppvEnd - ppvStart)/2);
902 void * cellContents = *ppvTest;
903
904 // look to see if the cell has already been hijacked
905 if ((pvRedirStubsStart <= cellContents) && (cellContents < pvRedirStubsEnd))
906 {
907 void ** ppvCur = ppvTest;
908 // try incrementing ppvTest until it hits ppvEnd
909 while (++ppvCur < ppvEnd)
910 {
911 cellContents = *ppvCur;
912 if ((pvRedirStubsStart > cellContents) || (cellContents >= pvRedirStubsEnd))
913 break;
914 }
915 if (ppvCur == ppvEnd)
916 {
917 // We hit the end and didn't find any non-hijacked cells,
918 // so let's shrink the range and start over.
919 ppvEnd = ppvTest;
920 continue;
921 }
922 }
923
924 if (pvMethodStart >= cellContents)
925 {
926 ppvStart = ppvTest;
927 }
928 else if (pvMethodStart < cellContents)
929 {
930 ppvEnd = ppvTest;
931 }
932 }
933 ppvTest = ppvStart;
934
935 // At this point start and end are pointing to consecutive entries
936 ASSERT((ppvStart + 1) == ppvEnd);
937
938 // Reset start and end.
939 ppvStart = &ppvIndirCells[0];
940 ppvEnd = &ppvIndirCells[nIndirCells];
941
942 // We shouldn't have walked off the end of the array
943 ASSERT((ppvStart <= ppvTest) && (ppvTest < ppvEnd));
944
945 // ppvTest may point the the cell before the first cell in the method or to the first cell in the method.
946 // So we must test it separately to see whether or not to hijack it.
947 if (*ppvTest < pvMethodStart)
948 ppvTest++;
949
950 UInt8 * pbDirtyBitmap = m_pModuleHeader->GetLoopIndirCellChunkBitmap();;
951
952 // now hijack all the entries to the end of the method
953 for (;;)
954 {
955 void * cellContents = *ppvTest;
956
957 // skip already hijacked cells
958 while ((pvRedirStubsStart <= cellContents) && (cellContents < pvRedirStubsEnd) && (ppvTest < ppvEnd))
959 {
960 ppvTest++;
961 cellContents = *ppvTest;
962 }
963 if (ppvTest >= ppvEnd) // walked off the end of the array
964 break;
965 if (cellContents >= pvMethodEnd) // walked off the end of the method
966 break;
967
968 UInt32 entryIndex = (UInt32)(ppvTest - ppvIndirCells);
969
970 UnsynchronizedHijackLoop(ppvTest, entryIndex, pvRedirStubsStart, pbDirtyBitmap);
971
972 ppvTest++;
973 }
974 }
975
976 // WARNING: Caller must perform synchronization!
UnsynchronizedResetHijackedLoops()977 void Module::UnsynchronizedResetHijackedLoops()
978 {
979 if (g_fGcStressStarted)
980 return; // don't ever reset loop hijacks when GC stress is enabled
981
982 if (m_pModuleHeader == nullptr) // @TODO: simple modules and loop hijacking
983 return;
984
985 void ** ppvIndirCells = (void **)m_pModuleHeader->GetLoopIndirCells();
986 UInt32 nIndirCells = m_pModuleHeader->CountOfLoopIndirCells;
987 if (nIndirCells == 0)
988 return;
989
990 UInt8 * pbDirtyBitmapStart = m_pModuleHeader->GetLoopIndirCellChunkBitmap();
991 UInt32 cellsPerByte = (GcPollInfo::indirCellsPerBitmapBit * 8);
992 UInt32 nBitmapBytes = (nIndirCells + (cellsPerByte - 1)) / cellsPerByte; // round up to the next byte
993 UInt8 * pbDirtyBitmapEnd = pbDirtyBitmapStart + nBitmapBytes;
994
995 void ** ppvCurIndirCell = &ppvIndirCells[0];
996 void ** ppvIndirCellsEnd = &ppvIndirCells[nIndirCells];
997
998 UInt8 * pbTargetsInfoStart = m_pModuleHeader->GetLoopTargets();
999 UInt8 * pbCurrentChunkPtr = pbTargetsInfoStart;
1000
1001 for (UInt8 * pbBitmapCursor = pbDirtyBitmapStart; pbBitmapCursor < pbDirtyBitmapEnd; pbBitmapCursor++)
1002 {
1003 UInt8 currentByte = *pbBitmapCursor;
1004
1005 for (UInt8 mask = 0x80; mask > 0; mask >>= 1)
1006 {
1007 if (currentByte & mask)
1008 {
1009 UInt32 currentChunkOffset = VarInt::ReadUnsigned(pbCurrentChunkPtr);
1010 UInt8 * pbChunkInfo = pbTargetsInfoStart + currentChunkOffset;
1011 UInt32 targetOffset = VarInt::ReadUnsigned(pbChunkInfo);
1012
1013 for (void ** ppvTemp = ppvCurIndirCell;
1014 ppvTemp < (ppvCurIndirCell + GcPollInfo::indirCellsPerBitmapBit);
1015 ppvTemp++)
1016 {
1017 if (ppvTemp >= ppvIndirCellsEnd)
1018 return; // the last byte was only partially populated
1019
1020 *ppvTemp = m_pModuleHeader->RegionPtr[ModuleHeader::TEXT_REGION] + targetOffset;
1021 targetOffset += VarInt::ReadUnsigned(pbChunkInfo);
1022 }
1023
1024 // WARNING: This not synchronized! -- We expect to perform these actions only when
1025 // all threads are suspended for GC.
1026 currentByte ^= mask; // reset the bit in the bitmap
1027 ASSERT((currentByte & mask) == 0);
1028 }
1029 else
1030 {
1031 VarInt::SkipUnsigned(pbCurrentChunkPtr);
1032 }
1033 ppvCurIndirCell += GcPollInfo::indirCellsPerBitmapBit;
1034 }
1035 }
1036 }
1037
RecoverLoopHijackTarget(UInt32 entryIndex,ModuleHeader * pModuleHeader)1038 void * Module::RecoverLoopHijackTarget(UInt32 entryIndex, ModuleHeader * pModuleHeader)
1039 {
1040 // read lock scope
1041 {
1042 ReaderWriterLock::ReadHolder readHolder(&m_loopHijackMapLock);
1043 void * pvLoopTarget;
1044 if (m_loopHijackIndexToTargetMap.Lookup(entryIndex, &pvLoopTarget))
1045 {
1046 return pvLoopTarget;
1047 }
1048 }
1049
1050 UInt8 * pbTargetsInfoStart = pModuleHeader->GetLoopTargets();
1051 UInt8 * pbCurrentChunkPtr = pbTargetsInfoStart;
1052
1053 UInt32 bitIndex = entryIndex / GcPollInfo::indirCellsPerBitmapBit;
1054 for (UInt32 idx = 0; idx < bitIndex; idx++)
1055 {
1056 VarInt::SkipUnsigned(pbCurrentChunkPtr);
1057 }
1058
1059 UInt32 currentChunkOffset = VarInt::ReadUnsigned(pbCurrentChunkPtr);
1060 UInt8 * pbCurrentInfo = pbTargetsInfoStart + currentChunkOffset;
1061 UInt32 targetOffset = VarInt::ReadUnsigned(pbCurrentInfo);
1062
1063 for (UInt32 chunkSubIndex = entryIndex - (bitIndex * GcPollInfo::indirCellsPerBitmapBit);
1064 chunkSubIndex > 0;
1065 chunkSubIndex--)
1066 {
1067 targetOffset += VarInt::ReadUnsigned(pbCurrentInfo);
1068 }
1069
1070 void * pvLoopTarget = pModuleHeader->RegionPtr[ModuleHeader::TEXT_REGION] + targetOffset;
1071
1072 // write lock scope
1073 {
1074 ReaderWriterLock::WriteHolder writeHolder(&m_loopHijackMapLock);
1075 KeyValuePair<UInt32, void *> newEntry = { entryIndex, pvLoopTarget };
1076 m_loopHijackIndexToTargetMap.AddOrReplace(newEntry);
1077 }
1078
1079 return pvLoopTarget;
1080 }
1081
UnsynchronizedHijackAllLoops()1082 void Module::UnsynchronizedHijackAllLoops()
1083 {
1084 void ** ppvIndirCells = (void **)m_pModuleHeader->GetLoopIndirCells();
1085 UInt32 nIndirCells = m_pModuleHeader->CountOfLoopIndirCells;
1086 if (nIndirCells == 0)
1087 return;
1088
1089 void * pvRedirStubsStart = m_pModuleHeader->GetLoopRedirTargets();
1090 UInt8 * pbDirtyBitmap = m_pModuleHeader->GetLoopIndirCellChunkBitmap();
1091
1092 for (UInt32 idx = 0; idx < nIndirCells; idx++)
1093 {
1094 UnsynchronizedHijackLoop(&ppvIndirCells[idx], idx, pvRedirStubsStart, pbDirtyBitmap);
1095 }
1096 }
1097
1098 // static
UnsynchronizedHijackLoop(void ** ppvIndirectionCell,UInt32 cellIndex,void * pvRedirStubsStart,UInt8 * pbDirtyBitmap)1099 void Module::UnsynchronizedHijackLoop(void ** ppvIndirectionCell, UInt32 cellIndex,
1100 void * pvRedirStubsStart, UInt8 * pbDirtyBitmap)
1101 {
1102 //
1103 // set the dirty bit
1104 //
1105 UInt32 bitmapByteIndex = cellIndex / (GcPollInfo::indirCellsPerBitmapBit * 8);
1106 UInt32 bitmapBitIndex = (cellIndex / GcPollInfo::indirCellsPerBitmapBit) % 8;
1107 UInt8 bitMask = 1 << (7 - bitmapBitIndex);
1108 UInt8 * pBitmapByte = &pbDirtyBitmap[bitmapByteIndex];
1109
1110 // WARNING: The assumption here is that there is only one thread ever updating this bitmap (i.e. the
1111 // thread performing the suspension of all other threads). If this assumption is violated, then this
1112 // code is broken because it does a read-modify-write which could overwrite other writers' updates.
1113 UInt8 newByte = (*pBitmapByte) | bitMask;
1114 *((UInt8 *)pBitmapByte) = newByte;
1115
1116 //
1117 // hijack the loop's indirection cell
1118 //
1119 *ppvIndirectionCell = ((UInt8 *)pvRedirStubsStart) + GcPollInfo::EntryIndexToStubOffset(cellIndex);
1120 }
1121
GetDispatchMapLookupTable()1122 DispatchMap ** Module::GetDispatchMapLookupTable()
1123 {
1124 return (DispatchMap**)(m_pModuleHeader->RegionPtr[ModuleHeader::RDATA_REGION] +
1125 m_pModuleHeader->RraDispatchMapLookupTable);
1126 }
1127
GetOsModuleHandle()1128 HANDLE Module::GetOsModuleHandle()
1129 {
1130 return m_hOsModuleHandle;
1131 }
1132
GetReadOnlyBlobs(UInt32 * pcbBlobs)1133 BlobHeader * Module::GetReadOnlyBlobs(UInt32 * pcbBlobs)
1134 {
1135 *pcbBlobs = m_pModuleHeader->SizeReadOnlyBlobs;
1136 return (BlobHeader*)m_pModuleHeader->GetReadOnlyBlobs();
1137 }
1138
1139 #ifdef FEATURE_CUSTOM_IMPORTS
1140
1141 #define IMAGE_ORDINAL_FLAG64 0x8000000000000000
1142 #define IMAGE_ORDINAL_FLAG32 0x80000000
1143
1144 #ifdef _TARGET_AMD64_
1145 #define TARGET_IMAGE_ORDINAL_FLAG IMAGE_ORDINAL_FLAG64
1146 #else
1147 #define TARGET_IMAGE_ORDINAL_FLAG IMAGE_ORDINAL_FLAG32
1148 #endif
1149
1150 /*static*/
DoCustomImports(ModuleHeader * pModuleHeader)1151 void Module::DoCustomImports(ModuleHeader * pModuleHeader)
1152 {
1153 // Address issue 432987: rather than AV on invalid ordinals, it's better to fail fast, so turn the
1154 // asserts below into conditional failfast calls
1155 #define ASSERT_FAILFAST(cond) if (!(cond)) RhFailFast()
1156
1157 CustomImportDescriptor *customImportTable = (CustomImportDescriptor *)pModuleHeader->GetCustomImportDescriptors();
1158 UInt32 countCustomImports = pModuleHeader->CountCustomImportDescriptors;
1159
1160 // obtain base address for this module
1161 PTR_UInt8 thisBaseAddress = (PTR_UInt8)PalGetModuleHandleFromPointer(pModuleHeader);
1162
1163 for (UInt32 i = 0; i < countCustomImports; i++)
1164 {
1165 // obtain address of indirection cell pointing to the EAT for the exporting module
1166 UInt32 **ptrPtrEAT = (UInt32 **)(thisBaseAddress + customImportTable[i].RvaEATAddr);
1167
1168 // obtain the EAT by derefencing
1169 UInt32 *ptrEAT = *ptrPtrEAT;
1170
1171 // obtain the exporting module
1172 HANDLE hExportingModule = PalGetModuleHandleFromPointer(ptrEAT);
1173
1174 // obtain the base address of the exporting module
1175 PTR_UInt8 targetBaseAddress = (PTR_UInt8)hExportingModule;
1176
1177 // obtain the address of the IAT and the number of entries
1178 UIntTarget *ptrIAT = (UIntTarget *)(thisBaseAddress + customImportTable[i].RvaIAT);
1179 UInt32 countIAT = customImportTable[i].CountIAT;
1180
1181 if (i == 0)
1182 {
1183 // the first entry is a dummy entry that points to a flag
1184 UInt32 *pFlag = (UInt32 *)ptrIAT;
1185
1186 // the ptr to the EAT indirection cell also points to the flag
1187 ASSERT_FAILFAST((UInt32 *)ptrPtrEAT == pFlag);
1188
1189 // the number of IAT entries should be zero
1190 ASSERT_FAILFAST(countIAT == 0);
1191
1192 // if the flag is set, it means we have fixed up this module already
1193 // this is our check against infinite recursion
1194 if (*pFlag == TRUE)
1195 return;
1196
1197 // if the flag is not set, it must be clear
1198 ASSERT_FAILFAST(*pFlag == FALSE);
1199
1200 // set the flag
1201 *pFlag = TRUE;
1202 }
1203 else
1204 {
1205 // iterate over the IAT, replacing ordinals with real addresses
1206 for (UInt32 j = 0; j < countIAT; j++)
1207 {
1208 // obtain the ordinal
1209 UIntTarget ordinal = ptrIAT[j];
1210
1211 // the ordinals should have the high bit set
1212 ASSERT_FAILFAST((ordinal & TARGET_IMAGE_ORDINAL_FLAG) != 0);
1213
1214 // the ordinals should be in increasing order, for perf reasons
1215 ASSERT_FAILFAST(j+1 == countIAT || ordinal < ptrIAT[j+1]);
1216
1217 ordinal &= ~TARGET_IMAGE_ORDINAL_FLAG;
1218
1219 // sanity check: limit ordinals to < 1 Million
1220 ASSERT_FAILFAST(ordinal < 1024 * 1024);
1221
1222 // obtain the target RVA
1223 UInt32 targetRVA = ptrEAT[ordinal];
1224
1225 // obtain the target address by adding the base address of the exporting module
1226 UIntTarget targetAddr = (UIntTarget)(targetBaseAddress + targetRVA);
1227
1228 // write the target address to the IAT slot, overwriting the ordinal
1229 ptrIAT[j] = targetAddr;
1230 }
1231 // find the module header of the target module - this is a bit of a hack
1232 // as we assume the header is at the start of the first section
1233 // currently this is true for ProjectN files unless it's built by the native
1234 // linker from COFF files
1235 ModuleHeader *pTargetModuleHeader = (ModuleHeader *)(targetBaseAddress + 0x1000);
1236
1237 // recursively fixup the target module as well - this is because our eager cctors may call
1238 // methods in the target module, which again may call imports of the target module
1239 DoCustomImports(pTargetModuleHeader);
1240 }
1241 }
1242 #undef ASSERT_FAILFAST
1243 }
1244
1245 #endif // FEATURE_CUSTOM_IMPORTS
1246
1247 #endif // DACCESS_COMPILE
1248
1249 #ifdef DACCESS_COMPILE
DacSize(TADDR addr)1250 UInt32 StaticGcDesc::DacSize(TADDR addr)
1251 {
1252 uint32_t numSeries = 0;
1253 DacReadAll(addr + offsetof(StaticGcDesc, m_numSeries), &numSeries, sizeof(numSeries), true);
1254
1255 return (UInt32)(offsetof(StaticGcDesc, m_series) + (numSeries * sizeof(GCSeries)));
1256 }
1257 #endif // DACCESS_COMPILE
1258