1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 #ifndef __GCENV_BASE_INCLUDED__
5 #define __GCENV_BASE_INCLUDED__
6 //
7 // Sets up basic environment for CLR GC
8 //
9
10 #define FEATURE_REDHAWK 1
11 #define FEATURE_CONSERVATIVE_GC 1
12
13 #define GCENV_INCLUDED
14
15 #define REDHAWK_PALIMPORT extern "C"
16 #define REDHAWK_PALAPI __stdcall
17
18 #ifndef _MSC_VER
19 #define __stdcall
20 #ifdef __clang__
21 #define __forceinline __attribute__((always_inline)) inline
22 #else // __clang__
23 #define __forceinline inline
24 #endif // __clang__
25 #endif // !_MSC_VER
26
27 #ifndef SIZE_T_MAX
28 #define SIZE_T_MAX ((size_t)-1)
29 #endif
30 #ifndef SSIZE_T_MAX
31 #define SSIZE_T_MAX ((ptrdiff_t)(SIZE_T_MAX / 2))
32 #endif
33
34 #ifndef _INC_WINDOWS
35 // -----------------------------------------------------------------------------------------------------------
36 //
37 // Aliases for Win32 types
38 //
39
40 typedef uint32_t BOOL;
41 typedef uint32_t DWORD;
42
43 // -----------------------------------------------------------------------------------------------------------
44 // HRESULT subset.
45
46 #ifdef PLATFORM_UNIX
47 typedef int32_t HRESULT;
48 #else
49 // this must exactly match the typedef used by windows.h
50 typedef long HRESULT;
51 #endif
52
53 #define SUCCEEDED(_hr) ((HRESULT)(_hr) >= 0)
54 #define FAILED(_hr) ((HRESULT)(_hr) < 0)
55
HRESULT_FROM_WIN32(unsigned long x)56 inline HRESULT HRESULT_FROM_WIN32(unsigned long x)
57 {
58 return (HRESULT)(x) <= 0 ? (HRESULT)(x) : (HRESULT) (((x) & 0x0000FFFF) | (7 << 16) | 0x80000000);
59 }
60
61 #define S_OK 0x0
62 #define S_FALSE 0x1
63 #define E_FAIL 0x80004005
64 #define E_OUTOFMEMORY 0x8007000E
65 #define E_UNEXPECTED 0x8000FFFF
66 #define E_NOTIMPL 0x80004001
67 #define E_INVALIDARG 0x80070057
68
69 #define NOERROR 0x0
70 #define ERROR_TIMEOUT 1460
71
72 #define TRUE true
73 #define FALSE false
74
75 #define CALLBACK __stdcall
76 #define FORCEINLINE __forceinline
77
78 #define INFINITE 0xFFFFFFFF
79
80 #define ZeroMemory(Destination,Length) memset((Destination),0,(Length))
81
82 #ifndef _countof
83 #define _countof(_array) (sizeof(_array)/sizeof(_array[0]))
84 #endif
85
86 #ifndef min
87 #define min(a,b) (((a) < (b)) ? (a) : (b))
88 #endif
89
90 #ifndef max
91 #define max(a,b) (((a) > (b)) ? (a) : (b))
92 #endif
93
94 #define C_ASSERT(cond) static_assert( cond, #cond )
95
96 #define UNREFERENCED_PARAMETER(P) (void)(P)
97
98 #ifdef PLATFORM_UNIX
99 #define _vsnprintf_s(string, sizeInBytes, count, format, args) vsnprintf(string, sizeInBytes, format, args)
100 #define sprintf_s snprintf
101 #define swprintf_s swprintf
102 #endif
103
104 #ifdef UNICODE
105 #define _tcslen wcslen
106 #define _tcscpy wcscpy
107 #define _stprintf_s swprintf_s
108 #define _tfopen _wfopen
109 #else
110 #define _tcslen strlen
111 #define _tcscpy strcpy
112 #define _stprintf_s sprintf_s
113 #define _tfopen fopen
114 #endif
115
116 #define WINAPI __stdcall
117
118 typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter);
119
120 #define WAIT_OBJECT_0 0
121 #define WAIT_TIMEOUT 258
122 #define WAIT_FAILED 0xFFFFFFFF
123
124 #if defined(_MSC_VER)
125 #if defined(_ARM_)
126
YieldProcessor()127 __forceinline void YieldProcessor() { }
128 extern "C" void __emit(const unsigned __int32 opcode);
129 #pragma intrinsic(__emit)
130 #define MemoryBarrier() { __emit(0xF3BF); __emit(0x8F5F); }
131
132 #elif defined(_ARM64_)
133
134 extern "C" void __yield(void);
135 #pragma intrinsic(__yield)
YieldProcessor()136 __forceinline void YieldProcessor() { __yield();}
137
138 extern "C" void __dmb(const unsigned __int32 _Type);
139 #pragma intrinsic(__dmb)
140 #define MemoryBarrier() { __dmb(_ARM64_BARRIER_SY); }
141
142 #elif defined(_AMD64_)
143
144 extern "C" void
145 _mm_pause (
146 void
147 );
148
149 extern "C" void
150 _mm_mfence (
151 void
152 );
153
154 #pragma intrinsic(_mm_pause)
155 #pragma intrinsic(_mm_mfence)
156
157 #define YieldProcessor _mm_pause
158 #define MemoryBarrier _mm_mfence
159
160 #elif defined(_X86_)
161
162 #define YieldProcessor() __asm { rep nop }
163
MemoryBarrier()164 __forceinline void MemoryBarrier()
165 {
166 int32_t Barrier;
167 __asm {
168 xchg Barrier, eax
169 }
170 }
171
172 #else // !_ARM_ && !_AMD64_ && !_X86_
173 #error Unsupported architecture
174 #endif
175 #else // _MSC_VER
176
177 #endif // _MSC_VER
178
179 typedef struct _PROCESSOR_NUMBER {
180 uint16_t Group;
181 uint8_t Number;
182 uint8_t Reserved;
183 } PROCESSOR_NUMBER, *PPROCESSOR_NUMBER;
184
185 #endif // _INC_WINDOWS
186
187 // -----------------------------------------------------------------------------------------------------------
188 //
189 // The subset of the contract code required by the GC/HandleTable sources. If Redhawk moves to support
190 // contracts these local definitions will disappear and be replaced by real implementations.
191 //
192
193 #define LEAF_CONTRACT
194 #define LIMITED_METHOD_CONTRACT
195 #define LIMITED_METHOD_DAC_CONTRACT
196 #define WRAPPER_CONTRACT
197 #define WRAPPER_NO_CONTRACT
198 #define STATIC_CONTRACT_LEAF
199 #define STATIC_CONTRACT_DEBUG_ONLY
200 #define STATIC_CONTRACT_NOTHROW
201 #define STATIC_CONTRACT_CAN_TAKE_LOCK
202 #define STATIC_CONTRACT_SO_TOLERANT
203 #define STATIC_CONTRACT_GC_NOTRIGGER
204 #define STATIC_CONTRACT_MODE_COOPERATIVE
205 #define CONTRACTL
206 #define CONTRACT(_expr)
207 #define CONTRACT_VOID
208 #define THROWS
209 #define NOTHROW
210 #define INSTANCE_CHECK
211 #define MODE_COOPERATIVE
212 #define MODE_ANY
213 #define SO_INTOLERANT
214 #define SO_TOLERANT
215 #define GC_TRIGGERS
216 #define GC_NOTRIGGER
217 #define CAN_TAKE_LOCK
218 #define SUPPORTS_DAC
219 #define FORBID_FAULT
220 #define CONTRACTL_END
221 #define CONTRACT_END
222 #define TRIGGERSGC()
223 #define WRAPPER(_contract)
224 #define DISABLED(_contract)
225 #define INJECT_FAULT(_expr)
226 #define INJECTFAULT_HANDLETABLE 0x1
227 #define INJECTFAULT_GCHEAP 0x2
228 #define FAULT_NOT_FATAL()
229 #define BEGIN_DEBUG_ONLY_CODE
230 #define END_DEBUG_ONLY_CODE
231 #define BEGIN_GETTHREAD_ALLOWED
232 #define END_GETTHREAD_ALLOWED
233 #define LEAF_DAC_CONTRACT
234 #define PRECONDITION(_expr)
235 #define POSTCONDITION(_expr)
236 #define RETURN return
237 #define CONDITIONAL_CONTRACT_VIOLATION(_violation, _expr)
238
239 // -----------------------------------------------------------------------------------------------------------
240 //
241 // Data access macros
242 //
243 #ifdef DACCESS_COMPILE
244 #include "daccess.h"
245 #else // DACCESS_COMPILE
246 typedef uintptr_t TADDR;
247
248 #define PTR_TO_TADDR(ptr) ((TADDR)(ptr))
249
250 #define DPTR(type) type*
251 #define SPTR(type) type*
252
253 #define GVAL_DECL(type, var) \
254 extern type var
255 #define GVAL_IMPL(type, var) \
256 type var
257 #define GVAL_IMPL_INIT(type, var, init) \
258 type var = init
259
260 #define GPTR_DECL(type, var) \
261 extern type* var
262 #define GPTR_IMPL(type, var) \
263 type* var
264 #define GPTR_IMPL_INIT(type, var, init) \
265 type* var = init
266
267 #define SPTR_DECL(type, var) \
268 static type* var
269 #define SPTR_IMPL(type, cls, var) \
270 type * cls::var
271 #define SPTR_IMPL_NS(type, ns, cls, var) \
272 type * cls::var
273 #define SPTR_IMPL_NS_INIT(type, ns, cls, var, init) \
274 type * cls::var = init
275
276 #define SVAL_DECL(type, var) \
277 static type var
278 #define SVAL_IMPL_NS(type, ns, cls, var) \
279 type cls::var
280 #define SVAL_IMPL_NS_INIT(type, ns, cls, var, init) \
281 type cls::var = init
282
283 #define GARY_DECL(type, var, size) \
284 extern type var[size]
285 #define GARY_IMPL(type, var, size) \
286 type var[size]
287
288 struct _DacGlobals;
289 #endif // DACCESS_COMPILE
290
291 typedef DPTR(size_t) PTR_size_t;
292 typedef DPTR(uint8_t) PTR_uint8_t;
293
294 // -----------------------------------------------------------------------------------------------------------
295
296 #define DATA_ALIGNMENT sizeof(uintptr_t)
297
298 #define RAW_KEYWORD(x) x
299
300 #define DECLSPEC_ALIGN(x) __declspec(align(x))
301
302 #define OS_PAGE_SIZE 4096
303
304 #ifndef _ASSERTE
305 #define _ASSERTE(_expr) ASSERT(_expr)
306 #endif
307
308 #define CONSISTENCY_CHECK(_expr) ASSERT(_expr)
309
310 #define PREFIX_ASSUME(cond) ASSERT(cond)
311
312 #define EEPOLICY_HANDLE_FATAL_ERROR(error) ASSERT(!"EEPOLICY_HANDLE_FATAL_ERROR")
313
314 #define UI64(_literal) _literal##ULL
315
316 class ObjHeader;
317 class MethodTable;
318 class Object;
319 class ArrayBase;
320
321 // Various types used to refer to object references or handles. This will get more complex if we decide
322 // Redhawk wants to wrap object references in the debug build.
323 typedef DPTR(Object) PTR_Object;
324 typedef DPTR(PTR_Object) PTR_PTR_Object;
325
326 typedef PTR_Object OBJECTREF;
327 typedef PTR_PTR_Object PTR_OBJECTREF;
328 typedef PTR_Object _UNCHECKED_OBJECTREF;
329 typedef PTR_PTR_Object PTR_UNCHECKED_OBJECTREF;
330
331 #ifndef DACCESS_COMPILE
332 struct OBJECTHANDLE__
333 {
334 void* unused;
335 };
336 typedef struct OBJECTHANDLE__* OBJECTHANDLE;
337 #else
338 typedef TADDR OBJECTHANDLE;
339 #endif
340
341 // With no object reference wrapping the following macros are very simple.
342 #define ObjectToOBJECTREF(_obj) (OBJECTREF)(_obj)
343 #define OBJECTREFToObject(_obj) (Object*)(_obj)
344
345 #define VALIDATEOBJECTREF(_objref) _objref;
346
347 #define VOLATILE(T) T volatile
348
349 //
350 // This code is extremely compiler- and CPU-specific, and will need to be altered to
351 // support new compilers and/or CPUs. Here we enforce that we can only compile using
352 // VC++, or Clang on x86, AMD64, ARM and ARM64.
353 //
354 #if !defined(_MSC_VER) && !defined(__clang__)
355 #error The Volatile type is currently only defined for Visual C++ and Clang
356 #endif
357
358 #if defined(__clang__) && !defined(_X86_) && !defined(_AMD64_) && !defined(_ARM_) && !defined(_ARM64_) && !defined(_WASM_)
359 #error The Volatile type is currently only defined for Clang when targeting x86, AMD64, ARM, ARM64 or WASM
360 #endif
361
362 #if defined(__clang__)
363 #if defined(_ARM_) || defined(_ARM64_)
364 // This is functionally equivalent to the MemoryBarrier() macro used on ARM on Windows.
365 #define VOLATILE_MEMORY_BARRIER() asm volatile ("dmb sy" : : : "memory")
366 #else
367 //
368 // For Clang, we prevent reordering by the compiler by inserting the following after a volatile
369 // load (to prevent subsequent operations from moving before the read), and before a volatile
370 // write (to prevent prior operations from moving past the write). We don't need to do anything
371 // special to prevent CPU reorderings, because the x86 and AMD64 architectures are already
372 // sufficiently constrained for our purposes. If we ever need to run on weaker CPU architectures
373 // (such as PowerPC), then we will need to do more work.
374 //
375 // Please do not use this macro outside of this file. It is subject to change or removal without
376 // notice.
377 //
378 #define VOLATILE_MEMORY_BARRIER() asm volatile ("" : : : "memory")
379 #endif // !_ARM_
380 #elif defined(_ARM_) && _ISO_VOLATILE
381 // ARM has a very weak memory model and very few tools to control that model. We're forced to perform a full
382 // memory barrier to preserve the volatile semantics. Technically this is only necessary on MP systems but we
383 // currently don't have a cheap way to determine the number of CPUs from this header file. Revisit this if it
384 // turns out to be a performance issue for the uni-proc case.
385 #define VOLATILE_MEMORY_BARRIER() MemoryBarrier()
386 #else
387 //
388 // On VC++, reorderings at the compiler and machine level are prevented by the use of the
389 // "volatile" keyword in VolatileLoad and VolatileStore. This should work on any CPU architecture
390 // targeted by VC++ with /iso_volatile-.
391 //
392 #define VOLATILE_MEMORY_BARRIER()
393 #endif
394
395 //
396 // VolatileLoad loads a T from a pointer to T. It is guaranteed that this load will not be optimized
397 // away by the compiler, and that any operation that occurs after this load, in program order, will
398 // not be moved before this load. In general it is not guaranteed that the load will be atomic, though
399 // this is the case for most aligned scalar data types. If you need atomic loads or stores, you need
400 // to consult the compiler and CPU manuals to find which circumstances allow atomicity.
401 //
402 template<typename T>
403 inline
VolatileLoad(T const * pt)404 T VolatileLoad(T const * pt)
405 {
406 T val = *(T volatile const *)pt;
407 VOLATILE_MEMORY_BARRIER();
408 return val;
409 }
410
411 template<typename T>
412 inline
VolatileLoadWithoutBarrier(T const * pt)413 T VolatileLoadWithoutBarrier(T const * pt)
414 {
415 #ifndef DACCESS_COMPILE
416 T val = *(T volatile const *)pt;
417 #else
418 T val = *pt;
419 #endif
420 return val;
421 }
422
423 //
424 // VolatileStore stores a T into the target of a pointer to T. Is is guaranteed that this store will
425 // not be optimized away by the compiler, and that any operation that occurs before this store, in program
426 // order, will not be moved after this store. In general, it is not guaranteed that the store will be
427 // atomic, though this is the case for most aligned scalar data types. If you need atomic loads or stores,
428 // you need to consult the compiler and CPU manuals to find which circumstances allow atomicity.
429 //
430 template<typename T>
431 inline
VolatileStore(T * pt,T val)432 void VolatileStore(T* pt, T val)
433 {
434 VOLATILE_MEMORY_BARRIER();
435 *(T volatile *)pt = val;
436 }
437
438 extern GCSystemInfo g_SystemInfo;
439
440 extern MethodTable * g_pFreeObjectMethodTable;
441
442 extern int32_t g_TrapReturningThreads;
443
444 //
445 // Locks
446 //
447
448 struct gc_alloc_context;
449 class Thread;
450
451 Thread * GetThread();
452
453 typedef void (CALLBACK *HANDLESCANPROC)(PTR_UNCHECKED_OBJECTREF pref, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2);
454
455 bool IsGCSpecialThread();
456
dbgOnly_IsSpecialEEThread()457 inline bool dbgOnly_IsSpecialEEThread()
458 {
459 return false;
460 }
461
462 #define ClrFlsSetThreadType(type)
463
464 //
465 // Performance logging
466 //
467
468 #define COUNTER_ONLY(x)
469
470 //#include "etmdummy.h"
471 //#define ETW_EVENT_ENABLED(e,f) false
472
473 namespace ETW
474 {
475 typedef enum _GC_ROOT_KIND {
476 GC_ROOT_STACK = 0,
477 GC_ROOT_FQ = 1,
478 GC_ROOT_HANDLES = 2,
479 GC_ROOT_OLDER = 3,
480 GC_ROOT_SIZEDREF = 4,
481 GC_ROOT_OVERFLOW = 5
482 } GC_ROOT_KIND;
483 };
484
485 //
486 // Logging
487 //
488
489 void LogSpewAlways(const char *fmt, ...);
490
491 #define DEFAULT_GC_PRN_LVL 3
492
493 // -----------------------------------------------------------------------------------------------------------
494
495 bool IsGCThread();
496
497 class CLRConfig
498 {
499 public:
500 enum CLRConfigTypes
501 {
502 UNSUPPORTED_GCLogEnabled,
503 UNSUPPORTED_GCLogFile,
504 UNSUPPORTED_GCLogFileSize,
505 UNSUPPORTED_GCConfigLogEnabled,
506 UNSUPPORTED_GCConfigLogFile,
507 UNSUPPORTED_BGCSpinCount,
508 UNSUPPORTED_BGCSpin,
509 EXTERNAL_GCStressStart,
510 INTERNAL_GCStressStartAtJit,
511 INTERNAL_DbgDACSkipVerifyDlls,
512 Config_COUNT
513 };
514
515 typedef CLRConfigTypes ConfigDWORDInfo;
516 typedef CLRConfigTypes ConfigStringInfo;
517
518 static uint32_t GetConfigValue(ConfigDWORDInfo eType);
519 static HRESULT GetConfigValue(ConfigStringInfo /*eType*/, __out_z TCHAR * * outVal);
520 };
521
FitsInU1(uint64_t val)522 inline bool FitsInU1(uint64_t val)
523 {
524 return val == (uint64_t)(uint8_t)val;
525 }
526
527 // -----------------------------------------------------------------------------------------------------------
528 //
529 // AppDomain emulation. The we don't have these in Redhawk so instead we emulate the bare minimum of the API
530 // touched by the GC/HandleTable and pretend we have precisely one (default) appdomain.
531 //
532
533 #define RH_DEFAULT_DOMAIN_ID 1
534
535 struct ADIndex
536 {
537 DWORD m_dwIndex;
538
ADIndexADIndex539 ADIndex () : m_dwIndex(RH_DEFAULT_DOMAIN_ID) {}
ADIndexADIndex540 explicit ADIndex (DWORD id) : m_dwIndex(id) {}
541 BOOL operator==(const ADIndex& ad) const { return m_dwIndex == ad.m_dwIndex; }
542 BOOL operator!=(const ADIndex& ad) const { return m_dwIndex != ad.m_dwIndex; }
543 };
544
545 class AppDomain
546 {
547 public:
GetIndex()548 ADIndex GetIndex() { return ADIndex(RH_DEFAULT_DOMAIN_ID); }
IsRudeUnload()549 BOOL IsRudeUnload() { return FALSE; }
NoAccessToHandleTable()550 BOOL NoAccessToHandleTable() { return FALSE; }
DecNumSizedRefHandles()551 void DecNumSizedRefHandles() {}
552 };
553
554 class SystemDomain
555 {
556 public:
System()557 static SystemDomain *System() { return NULL; }
GetAppDomainAtIndex(ADIndex)558 static AppDomain *GetAppDomainAtIndex(ADIndex /*index*/) { return (AppDomain *)-1; }
AppDomainBeingUnloaded()559 static AppDomain *AppDomainBeingUnloaded() { return NULL; }
DefaultDomain()560 AppDomain *DefaultDomain() { return NULL; }
GetTotalNumSizedRefHandles()561 DWORD GetTotalNumSizedRefHandles() { return 0; }
562 };
563
564 #ifdef STRESS_HEAP
565 namespace GCStressPolicy
566 {
567 static volatile int32_t s_cGcStressDisables;
568
IsEnabled()569 inline bool IsEnabled() { return s_cGcStressDisables == 0; }
GlobalDisable()570 inline void GlobalDisable() { Interlocked::Increment(&s_cGcStressDisables); }
GlobalEnable()571 inline void GlobalEnable() { Interlocked::Decrement(&s_cGcStressDisables); }
572 }
573
574 enum gcs_trigger_points
575 {
576 cfg_any,
577 };
578
579 template <enum gcs_trigger_points tp>
580 class GCStress
581 {
582 public:
583 static inline bool IsEnabled()
584 {
585 return g_pConfig->GetGCStressLevel() != 0;
586 }
587 };
588 #endif // STRESS_HEAP
589
590 class NumaNodeInfo
591 {
592 public:
593 static bool CanEnableGCNumaAware();
594 static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number);
595 static bool GetNumaProcessorNodeEx(PPROCESSOR_NUMBER proc_no, uint16_t * node_no);
596 };
597
598 class CPUGroupInfo
599 {
600 public:
601 static bool CanEnableGCCPUGroups();
602 static uint32_t GetNumActiveProcessors();
603 static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number);
604 };
605
606
607 #endif // __GCENV_BASE_INCLUDED__
608