1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 //
5
6
7 //
8 // Provides an abstraction over platform specific calling conventions (specifically, the calling convention
9 // utilized by the JIT on that platform). The caller enumerates each argument of a signature in turn, and is
10 // provided with information mapping that argument into registers and/or stack locations.
11 //
12
13 #ifndef __CALLING_CONVENTION_INCLUDED
14 #define __CALLING_CONVENTION_INCLUDED
15
16 BOOL IsRetBuffPassedAsFirstArg();
17
18 // Describes how a single argument is laid out in registers and/or stack locations when given as an input to a
19 // managed method as part of a larger signature.
20 //
21 // Locations are split into floating point registers, general registers and stack offsets. Registers are
22 // obviously architecture dependent but are represented as a zero-based index into the usual sequence in which
23 // such registers are allocated for input on the platform in question. For instance:
24 // X86: 0 == ecx, 1 == edx
25 // ARM: 0 == r0, 1 == r1, 2 == r2 etc.
26 //
27 // Stack locations are represented as offsets from the stack pointer (at the point of the call). The offset is
28 // given as an index of a pointer sized slot. Similarly the size of data on the stack is given in slot-sized
29 // units. For instance, given an index of 2 and a size of 3:
30 // X86: argument starts at [ESP + 8] and is 12 bytes long
31 // AMD64: argument starts at [RSP + 16] and is 24 bytes long
32 //
33 // The structure is flexible enough to describe an argument that is split over several (consecutive) registers
34 // and possibly on to the stack as well.
35 struct ArgLocDesc
36 {
37 int m_idxFloatReg; // First floating point register used (or -1)
38 int m_cFloatReg; // Count of floating point registers used (or 0)
39
40 int m_idxGenReg; // First general register used (or -1)
41 int m_cGenReg; // Count of general registers used (or 0)
42
43 int m_idxStack; // First stack slot used (or -1)
44 int m_cStack; // Count of stack slots used (or 0)
45
46 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
47
48 EEClass* m_eeClass; // For structs passed in register, it points to the EEClass of the struct
49
50 #endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
51
52 #if defined(_TARGET_ARM64_)
53 bool m_isSinglePrecision; // For determining if HFA is single or double
54 // precision
55 #endif // defined(_TARGET_ARM64_)
56
57 #if defined(_TARGET_ARM_)
58 BOOL m_fRequires64BitAlignment; // True if the argument should always be aligned (in registers or on the stack
59 #endif
60
ArgLocDescArgLocDesc61 ArgLocDesc()
62 {
63 Init();
64 }
65
66 // Initialize to represent a non-placed argument (no register or stack slots referenced).
InitArgLocDesc67 void Init()
68 {
69 m_idxFloatReg = -1;
70 m_cFloatReg = 0;
71 m_idxGenReg = -1;
72 m_cGenReg = 0;
73 m_idxStack = -1;
74 m_cStack = 0;
75 #if defined(_TARGET_ARM_)
76 m_fRequires64BitAlignment = FALSE;
77 #endif
78 #if defined(_TARGET_ARM64_)
79 m_isSinglePrecision = FALSE;
80 #endif // defined(_TARGET_ARM64_)
81 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
82 m_eeClass = NULL;
83 #endif
84 }
85 };
86
87 //
88 // TransitionBlock is layout of stack frame of method call, saved argument registers and saved callee saved registers. Even though not
89 // all fields are used all the time, we use uniform form for simplicity.
90 //
91 struct TransitionBlock
92 {
93 #if defined(_TARGET_X86_)
94 ArgumentRegisters m_argumentRegisters;
95 CalleeSavedRegisters m_calleeSavedRegisters;
96 TADDR m_ReturnAddress;
97 #elif defined(_TARGET_AMD64_)
98 #ifdef UNIX_AMD64_ABI
99 ArgumentRegisters m_argumentRegisters;
100 #endif
101 CalleeSavedRegisters m_calleeSavedRegisters;
102 TADDR m_ReturnAddress;
103 #elif defined(_TARGET_ARM_)
104 union {
105 CalleeSavedRegisters m_calleeSavedRegisters;
106 // alias saved link register as m_ReturnAddress
107 struct {
108 INT32 r4, r5, r6, r7, r8, r9, r10;
109 INT32 r11;
110 TADDR m_ReturnAddress;
111 };
112 };
113 ArgumentRegisters m_argumentRegisters;
114 #elif defined(_TARGET_ARM64_)
115 union {
116 CalleeSavedRegisters m_calleeSavedRegisters;
117 struct {
118 INT64 x29; // frame pointer
119 TADDR m_ReturnAddress;
120 INT64 x19, x20, x21, x22, x23, x24, x25, x26, x27, x28;
121 };
122 };
123 ArgumentRegisters m_argumentRegisters;
124 TADDR padding; // Keep size of TransitionBlock as multiple of 16-byte. Simplifies code in PROLOG_WITH_TRANSITION_BLOCK
125 #else
126 PORTABILITY_ASSERT("TransitionBlock");
127 #endif
128
129 // The transition block should define everything pushed by callee. The code assumes in number of places that
130 // end of the transition block is caller's stack pointer.
131
GetOffsetOfReturnAddressTransitionBlock132 static int GetOffsetOfReturnAddress()
133 {
134 LIMITED_METHOD_CONTRACT;
135 return offsetof(TransitionBlock, m_ReturnAddress);
136 }
137
GetOffsetOfArgsTransitionBlock138 static BYTE GetOffsetOfArgs()
139 {
140 LIMITED_METHOD_CONTRACT;
141 return sizeof(TransitionBlock);
142 }
143
GetOffsetOfArgumentRegistersTransitionBlock144 static int GetOffsetOfArgumentRegisters()
145 {
146 LIMITED_METHOD_CONTRACT;
147 int offs;
148 #if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
149 offs = sizeof(TransitionBlock);
150 #else
151 offs = offsetof(TransitionBlock, m_argumentRegisters);
152 #endif
153 return offs;
154 }
155
IsStackArgumentOffsetTransitionBlock156 static BOOL IsStackArgumentOffset(int offset)
157 {
158 LIMITED_METHOD_CONTRACT;
159
160 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
161 return offset >= sizeof(TransitionBlock);
162 #else
163 int ofsArgRegs = GetOffsetOfArgumentRegisters();
164
165 return offset >= (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE);
166 #endif
167 }
168
IsArgumentRegisterOffsetTransitionBlock169 static BOOL IsArgumentRegisterOffset(int offset)
170 {
171 LIMITED_METHOD_CONTRACT;
172
173 int ofsArgRegs = GetOffsetOfArgumentRegisters();
174
175 return offset >= ofsArgRegs && offset < (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE);
176 }
177
178 #ifndef _TARGET_X86_
GetArgumentIndexFromOffsetTransitionBlock179 static UINT GetArgumentIndexFromOffset(int offset)
180 {
181 LIMITED_METHOD_CONTRACT;
182
183 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
184 _ASSERTE(offset != TransitionBlock::StructInRegsOffset);
185 #endif
186 return (offset - GetOffsetOfArgumentRegisters()) / sizeof(TADDR);
187 }
188
GetStackArgumentIndexFromOffsetTransitionBlock189 static UINT GetStackArgumentIndexFromOffset(int offset)
190 {
191 LIMITED_METHOD_CONTRACT;
192
193 return (offset - TransitionBlock::GetOffsetOfArgs()) / STACK_ELEM_SIZE;
194 }
195
196 #endif
197
198 #ifdef CALLDESCR_FPARGREGS
IsFloatArgumentRegisterOffsetTransitionBlock199 static BOOL IsFloatArgumentRegisterOffset(int offset)
200 {
201 LIMITED_METHOD_CONTRACT;
202 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
203 return (offset != TransitionBlock::StructInRegsOffset) && (offset < 0);
204 #else
205 return offset < 0;
206 #endif
207 }
208
209 // Check if an argument has floating point register, that means that it is
210 // either a floating point argument or a struct passed in registers that
211 // has a floating point member.
HasFloatRegisterTransitionBlock212 static BOOL HasFloatRegister(int offset, ArgLocDesc* argLocDescForStructInRegs)
213 {
214 LIMITED_METHOD_CONTRACT;
215 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
216 if (offset == TransitionBlock::StructInRegsOffset)
217 {
218 return argLocDescForStructInRegs->m_cFloatReg > 0;
219 }
220 #endif
221 return offset < 0;
222 }
223
GetOffsetOfFloatArgumentRegistersTransitionBlock224 static int GetOffsetOfFloatArgumentRegisters()
225 {
226 LIMITED_METHOD_CONTRACT;
227 return -GetNegSpaceSize();
228 }
229 #endif // CALLDESCR_FPARGREGS
230
GetOffsetOfCalleeSavedRegistersTransitionBlock231 static int GetOffsetOfCalleeSavedRegisters()
232 {
233 LIMITED_METHOD_CONTRACT;
234 return offsetof(TransitionBlock, m_calleeSavedRegisters);
235 }
236
GetNegSpaceSizeTransitionBlock237 static int GetNegSpaceSize()
238 {
239 LIMITED_METHOD_CONTRACT;
240 int negSpaceSize = 0;
241 #ifdef CALLDESCR_FPARGREGS
242 negSpaceSize += sizeof(FloatArgumentRegisters);
243 #endif
244 #ifdef _TARGET_ARM_
245 negSpaceSize += sizeof(TADDR); // padding to make FloatArgumentRegisters address 8-byte aligned
246 #endif
247 return negSpaceSize;
248 }
249
250 static const int InvalidOffset = -1;
251 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
252 // Special offset value to represent struct passed in registers. Such a struct can span both
253 // general purpose and floating point registers, so it can have two different offsets.
254 static const int StructInRegsOffset = -2;
255 #endif
256 };
257
258 //-----------------------------------------------------------------------
259 // ArgIterator is helper for dealing with calling conventions.
260 // It is tightly coupled with TransitionBlock. It uses offsets into
261 // TransitionBlock to represent argument locations for efficiency
262 // reasons. Alternatively, it can also return ArgLocDesc for less
263 // performance critical code.
264 //
265 // The ARGITERATOR_BASE argument of the template is provider of the parsed
266 // method signature. Typically, the arg iterator works on top of MetaSig.
267 // Reflection invoke uses alternative implementation to save signature parsing
268 // time because of it has the parsed signature available.
269 //-----------------------------------------------------------------------
270 template<class ARGITERATOR_BASE>
271 class ArgIteratorTemplate : public ARGITERATOR_BASE
272 {
273 public:
274 //------------------------------------------------------------
275 // Constructor
276 //------------------------------------------------------------
ArgIteratorTemplate()277 ArgIteratorTemplate()
278 {
279 WRAPPER_NO_CONTRACT;
280 m_dwFlags = 0;
281 }
282
SizeOfArgStack()283 UINT SizeOfArgStack()
284 {
285 WRAPPER_NO_CONTRACT;
286 if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED))
287 ForceSigWalk();
288 _ASSERTE((m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED) != 0);
289 return m_nSizeOfArgStack;
290 }
291
292 // For use with ArgIterator. This function computes the amount of additional
293 // memory required above the TransitionBlock. The parameter offsets
294 // returned by ArgIteratorTemplate::GetNextOffset are relative to a
295 // FramedMethodFrame, and may be in either of these regions.
SizeOfFrameArgumentArray()296 UINT SizeOfFrameArgumentArray()
297 {
298 WRAPPER_NO_CONTRACT;
299
300 UINT size = SizeOfArgStack();
301
302 #if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
303 // The argument registers are not included in the stack size on AMD64
304 size += ARGUMENTREGISTERS_SIZE;
305 #endif
306
307 return size;
308 }
309
310 //------------------------------------------------------------------------
311
312 #ifdef _TARGET_X86_
CbStackPop()313 UINT CbStackPop()
314 {
315 WRAPPER_NO_CONTRACT;
316
317 if (this->IsVarArg())
318 return 0;
319 else
320 return SizeOfArgStack();
321 }
322 #endif
323
324 // Is there a hidden parameter for the return parameter?
325 //
HasRetBuffArg()326 BOOL HasRetBuffArg()
327 {
328 WRAPPER_NO_CONTRACT;
329 if (!(m_dwFlags & RETURN_FLAGS_COMPUTED))
330 ComputeReturnFlags();
331 return (m_dwFlags & RETURN_HAS_RET_BUFFER);
332 }
333
GetFPReturnSize()334 UINT GetFPReturnSize()
335 {
336 WRAPPER_NO_CONTRACT;
337 if (!(m_dwFlags & RETURN_FLAGS_COMPUTED))
338 ComputeReturnFlags();
339 return m_dwFlags >> RETURN_FP_SIZE_SHIFT;
340 }
341
342 #ifdef _TARGET_X86_
343 //=========================================================================
344 // Indicates whether an argument is to be put in a register using the
345 // default IL calling convention. This should be called on each parameter
346 // in the order it appears in the call signature. For a non-static method,
347 // this function should also be called once for the "this" argument, prior
348 // to calling it for the "real" arguments. Pass in a typ of ELEMENT_TYPE_CLASS.
349 //
350 // *pNumRegistersUsed: [in,out]: keeps track of the number of argument
351 // registers assigned previously. The caller should
352 // initialize this variable to 0 - then each call
353 // will update it.
354 //
355 // typ: the signature type
356 //=========================================================================
IsArgumentInRegister(int * pNumRegistersUsed,CorElementType typ)357 static BOOL IsArgumentInRegister(int * pNumRegistersUsed, CorElementType typ)
358 {
359 LIMITED_METHOD_CONTRACT;
360 if ( (*pNumRegistersUsed) < NUM_ARGUMENT_REGISTERS) {
361 if (gElementTypeInfo[typ].m_enregister) {
362 (*pNumRegistersUsed)++;
363 return(TRUE);
364 }
365 }
366
367 return(FALSE);
368 }
369 #endif // _TARGET_X86_
370
371 #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
372
373 // Note that this overload does not handle varargs
IsArgPassedByRef(TypeHandle th)374 static BOOL IsArgPassedByRef(TypeHandle th)
375 {
376 LIMITED_METHOD_CONTRACT;
377
378 _ASSERTE(!th.IsNull());
379
380 // This method only works for valuetypes. It includes true value types,
381 // primitives, enums and TypedReference.
382 _ASSERTE(th.IsValueType());
383
384 size_t size = th.GetSize();
385 #ifdef _TARGET_AMD64_
386 return IsArgPassedByRef(size);
387 #elif defined(_TARGET_ARM64_)
388 // Composites greater than 16 bytes are passed by reference
389 return ((size > ENREGISTERED_PARAMTYPE_MAXSIZE) && !th.IsHFA());
390 #else
391 PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef");
392 return FALSE;
393 #endif
394 }
395
396 #ifdef _TARGET_AMD64_
397 // This overload should only be used in AMD64-specific code only.
IsArgPassedByRef(size_t size)398 static BOOL IsArgPassedByRef(size_t size)
399 {
400 LIMITED_METHOD_CONTRACT;
401
402 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
403 // No arguments are passed by reference on AMD64 on Unix
404 return FALSE;
405 #else
406 // If the size is bigger than ENREGISTERED_PARAM_TYPE_MAXSIZE, or if the size is NOT a power of 2, then
407 // the argument is passed by reference.
408 return (size > ENREGISTERED_PARAMTYPE_MAXSIZE) || ((size & (size-1)) != 0);
409 #endif
410 }
411 #endif // _TARGET_AMD64_
412
413 // This overload should be used for varargs only.
IsVarArgPassedByRef(size_t size)414 static BOOL IsVarArgPassedByRef(size_t size)
415 {
416 LIMITED_METHOD_CONTRACT;
417
418 #ifdef _TARGET_AMD64_
419 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
420 PORTABILITY_ASSERT("ArgIteratorTemplate::IsVarArgPassedByRef");
421 return FALSE;
422 #else // FEATURE_UNIX_AMD64_STRUCT_PASSING
423 return IsArgPassedByRef(size);
424 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
425
426 #else
427 return (size > ENREGISTERED_PARAMTYPE_MAXSIZE);
428 #endif
429 }
430
IsArgPassedByRef()431 BOOL IsArgPassedByRef()
432 {
433 LIMITED_METHOD_CONTRACT;
434
435 #ifdef _TARGET_AMD64_
436 return IsArgPassedByRef(m_argSize);
437 #elif defined(_TARGET_ARM64_)
438 if (m_argType == ELEMENT_TYPE_VALUETYPE)
439 {
440 _ASSERTE(!m_argTypeHandle.IsNull());
441 return ((m_argSize > ENREGISTERED_PARAMTYPE_MAXSIZE) && (!m_argTypeHandle.IsHFA() || this->IsVarArg()));
442 }
443 return FALSE;
444 #else
445 PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef");
446 return FALSE;
447 #endif
448 }
449
450 #endif // ENREGISTERED_PARAMTYPE_MAXSIZE
451
452 //------------------------------------------------------------
453 // Return the offsets of the special arguments
454 //------------------------------------------------------------
455
456 static int GetThisOffset();
457
458 int GetRetBuffArgOffset();
459 int GetVASigCookieOffset();
460 int GetParamTypeArgOffset();
461
462 //------------------------------------------------------------
463 // Each time this is called, this returns a byte offset of the next
464 // argument from the TransitionBlock* pointer.
465 //
466 // Returns TransitionBlock::InvalidOffset once you've hit the end
467 // of the list.
468 //------------------------------------------------------------
469 int GetNextOffset();
470
471 CorElementType GetArgType(TypeHandle *pTypeHandle = NULL)
472 {
473 LIMITED_METHOD_CONTRACT;
474 if (pTypeHandle != NULL)
475 {
476 *pTypeHandle = m_argTypeHandle;
477 }
478 return m_argType;
479 }
480
GetArgSize()481 int GetArgSize()
482 {
483 LIMITED_METHOD_CONTRACT;
484 return m_argSize;
485 }
486
487 void ForceSigWalk();
488
489 #ifndef _TARGET_X86_
490 // Accessors for built in argument descriptions of the special implicit parameters not mentioned directly
491 // in signatures (this pointer and the like). Whether or not these can be used successfully before all the
492 // explicit arguments have been scanned is platform dependent.
GetThisLoc(ArgLocDesc * pLoc)493 void GetThisLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetThisOffset(), pLoc); }
GetRetBuffArgLoc(ArgLocDesc * pLoc)494 void GetRetBuffArgLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetRetBuffArgOffset(), pLoc); }
GetParamTypeLoc(ArgLocDesc * pLoc)495 void GetParamTypeLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetParamTypeArgOffset(), pLoc); }
GetVASigCookieLoc(ArgLocDesc * pLoc)496 void GetVASigCookieLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetVASigCookieOffset(), pLoc); }
497 #endif // !_TARGET_X86_
498
GetArgLocDescForStructInRegs()499 ArgLocDesc* GetArgLocDescForStructInRegs()
500 {
501 #if (defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined (_TARGET_ARM64_)
502 return m_hasArgLocDescForStructInRegs ? &m_argLocDescForStructInRegs : NULL;
503 #else
504 return NULL;
505 #endif
506 }
507
508 #ifdef _TARGET_ARM_
509 // Get layout information for the argument that the ArgIterator is currently visiting.
GetArgLoc(int argOffset,ArgLocDesc * pLoc)510 void GetArgLoc(int argOffset, ArgLocDesc *pLoc)
511 {
512 LIMITED_METHOD_CONTRACT;
513
514 pLoc->Init();
515
516 pLoc->m_fRequires64BitAlignment = m_fRequires64BitAlignment;
517
518 int cSlots = (GetArgSize() + 3) / 4;
519
520 if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
521 {
522 pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 4;
523 pLoc->m_cFloatReg = cSlots;
524 return;
525 }
526
527 if (!TransitionBlock::IsStackArgumentOffset(argOffset))
528 {
529 pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
530
531 if (cSlots <= (4 - pLoc->m_idxGenReg))
532 {
533 pLoc->m_cGenReg = cSlots;
534 }
535 else
536 {
537 pLoc->m_cGenReg = 4 - pLoc->m_idxGenReg;
538
539 pLoc->m_idxStack = 0;
540 pLoc->m_cStack = cSlots - pLoc->m_cGenReg;
541 }
542 }
543 else
544 {
545 pLoc->m_idxStack = TransitionBlock::GetStackArgumentIndexFromOffset(argOffset);
546 pLoc->m_cStack = cSlots;
547 }
548 }
549 #endif // _TARGET_ARM_
550
551 #ifdef _TARGET_ARM64_
552 // Get layout information for the argument that the ArgIterator is currently visiting.
GetArgLoc(int argOffset,ArgLocDesc * pLoc)553 void GetArgLoc(int argOffset, ArgLocDesc *pLoc)
554 {
555 LIMITED_METHOD_CONTRACT;
556
557 pLoc->Init();
558
559 if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
560 {
561 // Dividing by 8 as size of each register in FloatArgumentRegisters is 8 bytes.
562 pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 8;
563
564 if (!m_argTypeHandle.IsNull() && m_argTypeHandle.IsHFA())
565 {
566 CorElementType type = m_argTypeHandle.GetHFAType();
567 bool isFloatType = (type == ELEMENT_TYPE_R4);
568
569 pLoc->m_cFloatReg = isFloatType ? GetArgSize()/sizeof(float): GetArgSize()/sizeof(double);
570 pLoc->m_isSinglePrecision = isFloatType;
571 }
572 else
573 {
574 pLoc->m_cFloatReg = 1;
575 }
576 return;
577 }
578
579 int cSlots = (GetArgSize() + 7)/ 8;
580
581 // Composites greater than 16bytes are passed by reference
582 if (GetArgType() == ELEMENT_TYPE_VALUETYPE && GetArgSize() > ENREGISTERED_PARAMTYPE_MAXSIZE)
583 {
584 cSlots = 1;
585 }
586
587 if (!TransitionBlock::IsStackArgumentOffset(argOffset))
588 {
589 pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
590 pLoc->m_cGenReg = cSlots;
591 }
592 else
593 {
594 pLoc->m_idxStack = TransitionBlock::GetStackArgumentIndexFromOffset(argOffset);
595 pLoc->m_cStack = cSlots;
596 }
597 }
598 #endif // _TARGET_ARM64_
599
600 #if defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI)
601 // Get layout information for the argument that the ArgIterator is currently visiting.
GetArgLoc(int argOffset,ArgLocDesc * pLoc)602 void GetArgLoc(int argOffset, ArgLocDesc* pLoc)
603 {
604 LIMITED_METHOD_CONTRACT;
605
606 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
607 if (m_hasArgLocDescForStructInRegs)
608 {
609 *pLoc = m_argLocDescForStructInRegs;
610 return;
611 }
612 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
613
614 if (argOffset == TransitionBlock::StructInRegsOffset)
615 {
616 // We always already have argLocDesc for structs passed in registers, we
617 // compute it in the GetNextOffset for those since it is always needed.
618 _ASSERTE(false);
619 return;
620 }
621
622 pLoc->Init();
623
624 if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
625 {
626 // Dividing by 16 as size of each register in FloatArgumentRegisters is 16 bytes.
627 pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 16;
628 pLoc->m_cFloatReg = 1;
629 }
630 else if (!TransitionBlock::IsStackArgumentOffset(argOffset))
631 {
632 pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
633 pLoc->m_cGenReg = 1;
634 }
635 else
636 {
637 pLoc->m_idxStack = TransitionBlock::GetStackArgumentIndexFromOffset(argOffset);
638 pLoc->m_cStack = (GetArgSize() + STACK_ELEM_SIZE - 1) / STACK_ELEM_SIZE;
639 }
640 }
641 #endif // _TARGET_AMD64_ && UNIX_AMD64_ABI
642
643 protected:
644 DWORD m_dwFlags; // Cached flags
645 int m_nSizeOfArgStack; // Cached value of SizeOfArgStack
646
647 DWORD m_argNum;
648
649 // Cached information about last argument
650 CorElementType m_argType;
651 int m_argSize;
652 TypeHandle m_argTypeHandle;
653 #if (defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)) || defined(_TARGET_ARM64_)
654 ArgLocDesc m_argLocDescForStructInRegs;
655 bool m_hasArgLocDescForStructInRegs;
656 #endif // _TARGET_AMD64_ && UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
657
658 #ifdef _TARGET_X86_
659 int m_curOfs; // Current position of the stack iterator
660 int m_numRegistersUsed;
661 #endif
662
663 #ifdef _TARGET_AMD64_
664 #ifdef UNIX_AMD64_ABI
665 int m_idxGenReg; // Next general register to be assigned a value
666 int m_idxStack; // Next stack slot to be assigned a value
667 int m_idxFPReg; // Next floating point register to be assigned a value
668 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
669 bool m_fArgInRegisters; // Indicates that the current argument is stored in registers
670 #endif
671 #else
672 int m_curOfs; // Current position of the stack iterator
673 #endif
674 #endif
675
676 #ifdef _TARGET_ARM_
677 int m_idxGenReg; // Next general register to be assigned a value
678 int m_idxStack; // Next stack slot to be assigned a value
679
680 WORD m_wFPRegs; // Bitmask of available floating point argument registers (s0-s15/d0-d7)
681 bool m_fRequires64BitAlignment; // Cached info about the current arg
682 #endif
683
684 #ifdef _TARGET_ARM64_
685 int m_idxGenReg; // Next general register to be assigned a value
686 int m_idxStack; // Next stack slot to be assigned a value
687 int m_idxFPReg; // Next FP register to be assigned a value
688 #endif
689
690 enum {
691 ITERATION_STARTED = 0x0001, // Started iterating over arguments
692 SIZE_OF_ARG_STACK_COMPUTED = 0x0002,
693 RETURN_FLAGS_COMPUTED = 0x0004,
694 RETURN_HAS_RET_BUFFER = 0x0008, // Cached value of HasRetBuffArg
695
696 #ifdef _TARGET_X86_
697 PARAM_TYPE_REGISTER_MASK = 0x0030,
698 PARAM_TYPE_REGISTER_STACK = 0x0010,
699 PARAM_TYPE_REGISTER_ECX = 0x0020,
700 PARAM_TYPE_REGISTER_EDX = 0x0030,
701 #endif
702
703 METHOD_INVOKE_NEEDS_ACTIVATION = 0x0040, // Flag used by ArgIteratorForMethodInvoke
704
705 RETURN_FP_SIZE_SHIFT = 8, // The rest of the flags is cached value of GetFPReturnSize
706 };
707
708 void ComputeReturnFlags();
709
710 #ifndef _TARGET_X86_
GetSimpleLoc(int offset,ArgLocDesc * pLoc)711 void GetSimpleLoc(int offset, ArgLocDesc * pLoc)
712 {
713 WRAPPER_NO_CONTRACT;
714 pLoc->Init();
715 pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(offset);
716 pLoc->m_cGenReg = 1;
717 }
718 #endif
719 };
720
721
722 template<class ARGITERATOR_BASE>
GetThisOffset()723 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetThisOffset()
724 {
725 WRAPPER_NO_CONTRACT;
726
727 // This pointer is in the first argument register by default
728 int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
729
730 #ifdef _TARGET_X86_
731 // x86 is special as always
732 ret += offsetof(ArgumentRegisters, ECX);
733 #endif
734
735 return ret;
736 }
737
738 template<class ARGITERATOR_BASE>
GetRetBuffArgOffset()739 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetRetBuffArgOffset()
740 {
741 WRAPPER_NO_CONTRACT;
742
743 _ASSERTE(this->HasRetBuffArg());
744
745 // RetBuf arg is in the second argument register by default
746 int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
747
748 #if _TARGET_X86_
749 // x86 is special as always
750 ret += this->HasThis() ? offsetof(ArgumentRegisters, EDX) : offsetof(ArgumentRegisters, ECX);
751 #elif _TARGET_ARM64_
752 ret += (int) offsetof(ArgumentRegisters, x[8]);
753 #else
754 if (this->HasThis())
755 ret += sizeof(void *);
756 #endif
757
758 return ret;
759 }
760
761 template<class ARGITERATOR_BASE>
GetVASigCookieOffset()762 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetVASigCookieOffset()
763 {
764 WRAPPER_NO_CONTRACT;
765
766 _ASSERTE(this->IsVarArg());
767
768 #if defined(_TARGET_X86_)
769 // x86 is special as always
770 return sizeof(TransitionBlock);
771 #else
772 // VaSig cookie is after this and retbuf arguments by default.
773 int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
774
775 if (this->HasThis())
776 {
777 ret += sizeof(void*);
778 }
779
780 if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
781 {
782 ret += sizeof(void*);
783 }
784
785 return ret;
786 #endif
787 }
788
789 //-----------------------------------------------------------
790 // Get the extra param offset for shared generic code
791 //-----------------------------------------------------------
792 template<class ARGITERATOR_BASE>
GetParamTypeArgOffset()793 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetParamTypeArgOffset()
794 {
795 CONTRACTL
796 {
797 INSTANCE_CHECK;
798 if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
799 if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
800 if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
801 MODE_ANY;
802 }
803 CONTRACTL_END
804
805 _ASSERTE(this->HasParamType());
806
807 #ifdef _TARGET_X86_
808 // x86 is special as always
809 if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED))
810 ForceSigWalk();
811
812 switch (m_dwFlags & PARAM_TYPE_REGISTER_MASK)
813 {
814 case PARAM_TYPE_REGISTER_ECX:
815 return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, ECX);
816 case PARAM_TYPE_REGISTER_EDX:
817 return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, EDX);
818 default:
819 break;
820 }
821
822 // The param type arg is last stack argument otherwise
823 return sizeof(TransitionBlock);
824 #else
825 // The hidden arg is after this and retbuf arguments by default.
826 int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
827
828 if (this->HasThis())
829 {
830 ret += sizeof(void*);
831 }
832
833 if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
834 {
835 ret += sizeof(void*);
836 }
837
838 return ret;
839 #endif
840 }
841
842 // To avoid corner case bugs, limit maximum size of the arguments with sufficient margin
843 #define MAX_ARG_SIZE 0xFFFFFF
844
845 //------------------------------------------------------------
846 // Each time this is called, this returns a byte offset of the next
847 // argument from the Frame* pointer. This offset can be positive *or* negative.
848 //
849 // Returns TransitionBlock::InvalidOffset once you've hit the end of the list.
850 //------------------------------------------------------------
851 template<class ARGITERATOR_BASE>
GetNextOffset()852 int ArgIteratorTemplate<ARGITERATOR_BASE>::GetNextOffset()
853 {
854 WRAPPER_NO_CONTRACT;
855 SUPPORTS_DAC;
856
857 if (!(m_dwFlags & ITERATION_STARTED))
858 {
859 int numRegistersUsed = 0;
860
861 if (this->HasThis())
862 numRegistersUsed++;
863
864 if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
865 numRegistersUsed++;
866
867 _ASSERTE(!this->IsVarArg() || !this->HasParamType());
868
869 #ifndef _TARGET_X86_
870 if (this->IsVarArg() || this->HasParamType())
871 {
872 numRegistersUsed++;
873 }
874 #endif
875
876 #ifdef _TARGET_X86_
877 if (this->IsVarArg())
878 {
879 numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs
880 }
881
882 #ifdef FEATURE_INTERPRETER
883 BYTE callconv = CallConv();
884 switch (callconv)
885 {
886 case IMAGE_CEE_CS_CALLCONV_C:
887 case IMAGE_CEE_CS_CALLCONV_STDCALL:
888 m_numRegistersUsed = NUM_ARGUMENT_REGISTERS;
889 m_curOfs = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *);
890 m_fUnmanagedCallConv = true;
891 break;
892
893 case IMAGE_CEE_CS_CALLCONV_THISCALL:
894 case IMAGE_CEE_CS_CALLCONV_FASTCALL:
895 _ASSERTE_MSG(false, "Unsupported calling convention.");
896
897 default:
898 m_fUnmanagedCallConv = false;
899 m_numRegistersUsed = numRegistersUsed;
900 m_curOfs = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack();
901 }
902 #else
903 m_numRegistersUsed = numRegistersUsed;
904 m_curOfs = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack();
905 #endif
906
907 #elif defined(_TARGET_AMD64_)
908 #ifdef UNIX_AMD64_ABI
909 m_idxGenReg = numRegistersUsed;
910 m_idxStack = 0;
911 m_idxFPReg = 0;
912 #else
913 m_curOfs = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *);
914 #endif
915 #elif defined(_TARGET_ARM_)
916 m_idxGenReg = numRegistersUsed;
917 m_idxStack = 0;
918
919 m_wFPRegs = 0;
920 #elif defined(_TARGET_ARM64_)
921 m_idxGenReg = numRegistersUsed;
922 m_idxStack = 0;
923
924 m_idxFPReg = 0;
925 #else
926 PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset");
927 #endif
928
929 m_argNum = 0;
930
931 m_dwFlags |= ITERATION_STARTED;
932 }
933
934 if (m_argNum == this->NumFixedArgs())
935 return TransitionBlock::InvalidOffset;
936
937 TypeHandle thValueType;
938 CorElementType argType = this->GetNextArgumentType(m_argNum++, &thValueType);
939
940 int argSize = MetaSig::GetElemSize(argType, thValueType);
941
942 m_argType = argType;
943 m_argSize = argSize;
944 m_argTypeHandle = thValueType;
945
946 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
947 m_hasArgLocDescForStructInRegs = false;
948 #endif
949
950 #ifdef _TARGET_X86_
951 #ifdef FEATURE_INTERPRETER
952 if (m_fUnmanagedCallConv)
953 {
954 int argOfs = m_curOfs;
955 m_curOfs += StackElemSize(argSize);
956 return argOfs;
957 }
958 #endif
959 if (IsArgumentInRegister(&m_numRegistersUsed, argType))
960 {
961 return TransitionBlock::GetOffsetOfArgumentRegisters() + (NUM_ARGUMENT_REGISTERS - m_numRegistersUsed) * sizeof(void *);
962 }
963
964 m_curOfs -= StackElemSize(argSize);
965 _ASSERTE(m_curOfs >= TransitionBlock::GetOffsetOfArgs());
966 return m_curOfs;
967 #elif defined(_TARGET_AMD64_)
968 #ifdef UNIX_AMD64_ABI
969
970 m_fArgInRegisters = true;
971
972 int cFPRegs = 0;
973 int cGenRegs = 0;
974 int cbArg = StackElemSize(argSize);
975
976 switch (argType)
977 {
978
979 case ELEMENT_TYPE_R4:
980 // 32-bit floating point argument.
981 cFPRegs = 1;
982 break;
983
984 case ELEMENT_TYPE_R8:
985 // 64-bit floating point argument.
986 cFPRegs = 1;
987 break;
988
989 case ELEMENT_TYPE_VALUETYPE:
990 {
991 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
992 MethodTable *pMT = m_argTypeHandle.AsMethodTable();
993 if (pMT->IsRegPassedStruct())
994 {
995 EEClass* eeClass = pMT->GetClass();
996 cGenRegs = 0;
997 for (int i = 0; i < eeClass->GetNumberEightBytes(); i++)
998 {
999 switch (eeClass->GetEightByteClassification(i))
1000 {
1001 case SystemVClassificationTypeInteger:
1002 case SystemVClassificationTypeIntegerReference:
1003 case SystemVClassificationTypeIntegerByRef:
1004 cGenRegs++;
1005 break;
1006 case SystemVClassificationTypeSSE:
1007 cFPRegs++;
1008 break;
1009 default:
1010 _ASSERTE(false);
1011 break;
1012 }
1013 }
1014
1015 // Check if we have enough registers available for the struct passing
1016 if ((cFPRegs + m_idxFPReg <= NUM_FLOAT_ARGUMENT_REGISTERS) && (cGenRegs + m_idxGenReg) <= NUM_ARGUMENT_REGISTERS)
1017 {
1018 m_argLocDescForStructInRegs.Init();
1019 m_argLocDescForStructInRegs.m_cGenReg = cGenRegs;
1020 m_argLocDescForStructInRegs.m_cFloatReg = cFPRegs;
1021 m_argLocDescForStructInRegs.m_idxGenReg = m_idxGenReg;
1022 m_argLocDescForStructInRegs.m_idxFloatReg = m_idxFPReg;
1023 m_argLocDescForStructInRegs.m_eeClass = eeClass;
1024
1025 m_hasArgLocDescForStructInRegs = true;
1026
1027 m_idxGenReg += cGenRegs;
1028 m_idxFPReg += cFPRegs;
1029
1030 return TransitionBlock::StructInRegsOffset;
1031 }
1032 }
1033
1034 // Set the register counts to indicate that this argument will not be passed in registers
1035 cFPRegs = 0;
1036 cGenRegs = 0;
1037
1038 #else // FEATURE_UNIX_AMD64_STRUCT_PASSING
1039 argSize = sizeof(TADDR);
1040 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
1041
1042 break;
1043 }
1044
1045 default:
1046 cGenRegs = cbArg / 8; // GP reg size
1047 break;
1048 }
1049
1050 if ((cFPRegs > 0) && (cFPRegs + m_idxFPReg <= NUM_FLOAT_ARGUMENT_REGISTERS))
1051 {
1052 int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 16;
1053 m_idxFPReg += cFPRegs;
1054 return argOfs;
1055 }
1056 else if ((cGenRegs > 0) && (m_idxGenReg + cGenRegs <= NUM_ARGUMENT_REGISTERS))
1057 {
1058 int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8;
1059 m_idxGenReg += cGenRegs;
1060 return argOfs;
1061 }
1062
1063 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1064 m_fArgInRegisters = false;
1065 #endif
1066
1067 int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * STACK_ELEM_SIZE;
1068
1069 int cArgSlots = cbArg / STACK_ELEM_SIZE;
1070 m_idxStack += cArgSlots;
1071
1072 return argOfs;
1073 #else
1074 // Each argument takes exactly one slot on AMD64 on Windows
1075 int argOfs = m_curOfs;
1076 m_curOfs += sizeof(void *);
1077 return argOfs;
1078 #endif
1079 #elif defined(_TARGET_ARM_)
1080 // First look at the underlying type of the argument to determine some basic properties:
1081 // 1) The size of the argument in bytes (rounded up to the stack slot size of 4 if necessary).
1082 // 2) Whether the argument represents a floating point primitive (ELEMENT_TYPE_R4 or ELEMENT_TYPE_R8).
1083 // 3) Whether the argument requires 64-bit alignment (anything that contains a Int64/UInt64).
1084
1085 bool fFloatingPoint = false;
1086 bool fRequiresAlign64Bit = false;
1087
1088 switch (argType)
1089 {
1090 case ELEMENT_TYPE_I8:
1091 case ELEMENT_TYPE_U8:
1092 // 64-bit integers require 64-bit alignment on ARM.
1093 fRequiresAlign64Bit = true;
1094 break;
1095
1096 case ELEMENT_TYPE_R4:
1097 // 32-bit floating point argument.
1098 fFloatingPoint = true;
1099 break;
1100
1101 case ELEMENT_TYPE_R8:
1102 // 64-bit floating point argument.
1103 fFloatingPoint = true;
1104 fRequiresAlign64Bit = true;
1105 break;
1106
1107 case ELEMENT_TYPE_VALUETYPE:
1108 {
1109 // Value type case: extract the alignment requirement, note that this has to handle
1110 // the interop "native value types".
1111 fRequiresAlign64Bit = thValueType.RequiresAlign8();
1112
1113 #ifdef FEATURE_HFA
1114 // Handle HFAs: packed structures of 1-4 floats or doubles that are passed in FP argument
1115 // registers if possible.
1116 if (thValueType.IsHFA())
1117 {
1118 fFloatingPoint = true;
1119 }
1120 #endif
1121
1122 break;
1123 }
1124
1125 default:
1126 // The default is are 4-byte arguments (or promoted to 4 bytes), non-FP and don't require any
1127 // 64-bit alignment.
1128 break;
1129 }
1130
1131 // Now attempt to place the argument into some combination of floating point or general registers and
1132 // the stack.
1133
1134 // Save the alignment requirement
1135 m_fRequires64BitAlignment = fRequiresAlign64Bit;
1136
1137 int cbArg = StackElemSize(argSize);
1138 int cArgSlots = cbArg / 4;
1139
1140 // Ignore floating point argument placement in registers if we're dealing with a vararg function (the ABI
1141 // specifies this so that vararg processing on the callee side is simplified).
1142 #ifndef ARM_SOFTFP
1143 if (fFloatingPoint && !this->IsVarArg())
1144 {
1145 // Handle floating point (primitive) arguments.
1146
1147 // First determine whether we can place the argument in VFP registers. There are 16 32-bit
1148 // and 8 64-bit argument registers that share the same register space (e.g. D0 overlaps S0 and
1149 // S1). The ABI specifies that VFP values will be passed in the lowest sequence of registers that
1150 // haven't been used yet and have the required alignment. So the sequence (float, double, float)
1151 // would be mapped to (S0, D1, S1) or (S0, S2/S3, S1).
1152 //
1153 // We use a 16-bit bitmap to record which registers have been used so far.
1154 //
1155 // So we can use the same basic loop for each argument type (float, double or HFA struct) we set up
1156 // the following input parameters based on the size and alignment requirements of the arguments:
1157 // wAllocMask : bitmask of the number of 32-bit registers we need (1 for 1, 3 for 2, 7 for 3 etc.)
1158 // cSteps : number of loop iterations it'll take to search the 16 registers
1159 // cShift : how many bits to shift the allocation mask on each attempt
1160
1161 WORD wAllocMask = (1 << (cbArg / 4)) - 1;
1162 WORD cSteps = (WORD)(fRequiresAlign64Bit ? 9 - (cbArg / 8) : 17 - (cbArg / 4));
1163 WORD cShift = fRequiresAlign64Bit ? 2 : 1;
1164
1165 // Look through the availability bitmask for a free register or register pair.
1166 for (WORD i = 0; i < cSteps; i++)
1167 {
1168 if ((m_wFPRegs & wAllocMask) == 0)
1169 {
1170 // We found one, mark the register or registers as used.
1171 m_wFPRegs |= wAllocMask;
1172
1173 // Indicate the registers used to the caller and return.
1174 return TransitionBlock::GetOffsetOfFloatArgumentRegisters() + (i * cShift * 4);
1175 }
1176 wAllocMask <<= cShift;
1177 }
1178
1179 // The FP argument is going to live on the stack. Once this happens the ABI demands we mark all FP
1180 // registers as unavailable.
1181 m_wFPRegs = 0xffff;
1182
1183 // Doubles or HFAs containing doubles need the stack aligned appropriately.
1184 if (fRequiresAlign64Bit)
1185 m_idxStack = ALIGN_UP(m_idxStack, 2);
1186
1187 // Indicate the stack location of the argument to the caller.
1188 int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 4;
1189
1190 // Record the stack usage.
1191 m_idxStack += cArgSlots;
1192
1193 return argOfs;
1194 }
1195 #endif // ARM_SOFTFP
1196
1197 //
1198 // Handle the non-floating point case.
1199 //
1200
1201 if (m_idxGenReg < 4)
1202 {
1203 if (fRequiresAlign64Bit)
1204 {
1205 // The argument requires 64-bit alignment. Align either the next general argument register if
1206 // we have any left. See step C.3 in the algorithm in the ABI spec.
1207 m_idxGenReg = ALIGN_UP(m_idxGenReg, 2);
1208 }
1209
1210 int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 4;
1211
1212 int cRemainingRegs = 4 - m_idxGenReg;
1213 if (cArgSlots <= cRemainingRegs)
1214 {
1215 // Mark the registers just allocated as used.
1216 m_idxGenReg += cArgSlots;
1217 return argOfs;
1218 }
1219
1220 // The ABI supports splitting a non-FP argument across registers and the stack. But this is
1221 // disabled if the FP arguments already overflowed onto the stack (i.e. the stack index is not
1222 // zero). The following code marks the general argument registers as exhausted if this condition
1223 // holds. See steps C.5 in the algorithm in the ABI spec.
1224
1225 m_idxGenReg = 4;
1226
1227 if (m_idxStack == 0)
1228 {
1229 m_idxStack += cArgSlots - cRemainingRegs;
1230 return argOfs;
1231 }
1232 }
1233
1234 if (fRequiresAlign64Bit)
1235 {
1236 // The argument requires 64-bit alignment. If it is going to be passed on the stack, align
1237 // the next stack slot. See step C.6 in the algorithm in the ABI spec.
1238 m_idxStack = ALIGN_UP(m_idxStack, 2);
1239 }
1240
1241 int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 4;
1242
1243 // Advance the stack pointer over the argument just placed.
1244 m_idxStack += cArgSlots;
1245
1246 return argOfs;
1247 #elif defined(_TARGET_ARM64_)
1248
1249 int cFPRegs = 0;
1250
1251 switch (argType)
1252 {
1253
1254 case ELEMENT_TYPE_R4:
1255 // 32-bit floating point argument.
1256 cFPRegs = 1;
1257 break;
1258
1259 case ELEMENT_TYPE_R8:
1260 // 64-bit floating point argument.
1261 cFPRegs = 1;
1262 break;
1263
1264 case ELEMENT_TYPE_VALUETYPE:
1265 {
1266 // Handle HFAs: packed structures of 2-4 floats or doubles that are passed in FP argument
1267 // registers if possible.
1268 if (thValueType.IsHFA())
1269 {
1270 CorElementType type = thValueType.GetHFAType();
1271 bool isFloatType = (type == ELEMENT_TYPE_R4);
1272
1273 cFPRegs = (type == ELEMENT_TYPE_R4)? (argSize/sizeof(float)): (argSize/sizeof(double));
1274
1275 m_argLocDescForStructInRegs.Init();
1276 m_argLocDescForStructInRegs.m_cFloatReg = cFPRegs;
1277 m_argLocDescForStructInRegs.m_idxFloatReg = m_idxFPReg;
1278
1279 m_argLocDescForStructInRegs.m_isSinglePrecision = isFloatType;
1280
1281 m_hasArgLocDescForStructInRegs = true;
1282 }
1283 else
1284 {
1285 // Composite greater than 16bytes should be passed by reference
1286 if (argSize > ENREGISTERED_PARAMTYPE_MAXSIZE)
1287 {
1288 argSize = sizeof(TADDR);
1289 }
1290 }
1291
1292 break;
1293 }
1294
1295 default:
1296 break;
1297 }
1298
1299 int cbArg = StackElemSize(argSize);
1300 int cArgSlots = cbArg / STACK_ELEM_SIZE;
1301
1302 if (cFPRegs>0 && !this->IsVarArg())
1303 {
1304 if (cFPRegs + m_idxFPReg <= 8)
1305 {
1306 int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 8;
1307 m_idxFPReg += cFPRegs;
1308 return argOfs;
1309 }
1310 else
1311 {
1312 m_idxFPReg = 8;
1313 }
1314 }
1315 else
1316 {
1317 if (m_idxGenReg + cArgSlots <= 8)
1318 {
1319 int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8;
1320 m_idxGenReg += cArgSlots;
1321 return argOfs;
1322 }
1323 else
1324 {
1325 m_idxGenReg = 8;
1326 }
1327 }
1328
1329 int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 8;
1330 m_idxStack += cArgSlots;
1331 return argOfs;
1332 #else
1333 PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset");
1334 return TransitionBlock::InvalidOffset;
1335 #endif
1336 }
1337
1338 template<class ARGITERATOR_BASE>
ComputeReturnFlags()1339 void ArgIteratorTemplate<ARGITERATOR_BASE>::ComputeReturnFlags()
1340 {
1341 CONTRACTL
1342 {
1343 INSTANCE_CHECK;
1344 if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
1345 if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
1346 if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
1347 MODE_ANY;
1348 }
1349 CONTRACTL_END
1350
1351 TypeHandle thValueType;
1352 CorElementType type = this->GetReturnType(&thValueType);
1353
1354 DWORD flags = RETURN_FLAGS_COMPUTED;
1355 switch (type)
1356 {
1357 case ELEMENT_TYPE_TYPEDBYREF:
1358 #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
1359 if (sizeof(TypedByRef) > ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE)
1360 flags |= RETURN_HAS_RET_BUFFER;
1361 #else
1362 flags |= RETURN_HAS_RET_BUFFER;
1363 #endif
1364 break;
1365
1366 case ELEMENT_TYPE_R4:
1367 #ifndef ARM_SOFTFP
1368 flags |= sizeof(float) << RETURN_FP_SIZE_SHIFT;
1369 #endif
1370 break;
1371
1372 case ELEMENT_TYPE_R8:
1373 #ifndef ARM_SOFTFP
1374 flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT;
1375 #endif
1376 break;
1377
1378 case ELEMENT_TYPE_VALUETYPE:
1379 #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
1380 {
1381 _ASSERTE(!thValueType.IsNull());
1382
1383 #if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1384 MethodTable *pMT = thValueType.AsMethodTable();
1385 if (pMT->IsRegPassedStruct())
1386 {
1387 EEClass* eeClass = pMT->GetClass();
1388
1389 if (eeClass->GetNumberEightBytes() == 1)
1390 {
1391 // Structs occupying just one eightbyte are treated as int / double
1392 if (eeClass->GetEightByteClassification(0) == SystemVClassificationTypeSSE)
1393 {
1394 flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT;
1395 }
1396 }
1397 else
1398 {
1399 // Size of the struct is 16 bytes
1400 flags |= (16 << RETURN_FP_SIZE_SHIFT);
1401 // The lowest two bits of the size encode the order of the int and SSE fields
1402 if (eeClass->GetEightByteClassification(0) == SystemVClassificationTypeSSE)
1403 {
1404 flags |= (1 << RETURN_FP_SIZE_SHIFT);
1405 }
1406
1407 if (eeClass->GetEightByteClassification(1) == SystemVClassificationTypeSSE)
1408 {
1409 flags |= (2 << RETURN_FP_SIZE_SHIFT);
1410 }
1411 }
1412
1413 break;
1414 }
1415 #else // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
1416
1417 #ifdef FEATURE_HFA
1418 if (thValueType.IsHFA() && !this->IsVarArg())
1419 {
1420 CorElementType hfaType = thValueType.GetHFAType();
1421
1422 flags |= (hfaType == ELEMENT_TYPE_R4) ?
1423 ((4 * sizeof(float)) << RETURN_FP_SIZE_SHIFT) :
1424 ((4 * sizeof(double)) << RETURN_FP_SIZE_SHIFT);
1425
1426 break;
1427 }
1428 #endif
1429
1430 size_t size = thValueType.GetSize();
1431
1432 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1433 // Return value types of size which are not powers of 2 using a RetBuffArg
1434 if ((size & (size-1)) != 0)
1435 {
1436 flags |= RETURN_HAS_RET_BUFFER;
1437 break;
1438 }
1439 #endif
1440
1441 if (size <= ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE)
1442 break;
1443 #endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
1444 }
1445 #endif // ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
1446
1447 // Value types are returned using return buffer by default
1448 flags |= RETURN_HAS_RET_BUFFER;
1449 break;
1450
1451 default:
1452 break;
1453 }
1454
1455 m_dwFlags |= flags;
1456 }
1457
1458 template<class ARGITERATOR_BASE>
ForceSigWalk()1459 void ArgIteratorTemplate<ARGITERATOR_BASE>::ForceSigWalk()
1460 {
1461 CONTRACTL
1462 {
1463 INSTANCE_CHECK;
1464 if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
1465 if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
1466 if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
1467 MODE_ANY;
1468 }
1469 CONTRACTL_END
1470
1471 // This can be only used before the actual argument iteration started
1472 _ASSERTE((m_dwFlags & ITERATION_STARTED) == 0);
1473
1474 #ifdef _TARGET_X86_
1475 //
1476 // x86 is special as always
1477 //
1478
1479 int numRegistersUsed = 0;
1480 int nSizeOfArgStack = 0;
1481
1482 if (this->HasThis())
1483 numRegistersUsed++;
1484
1485 if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg())
1486 numRegistersUsed++;
1487
1488 if (this->IsVarArg())
1489 {
1490 nSizeOfArgStack += sizeof(void *);
1491 numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs
1492 }
1493
1494 #ifdef FEATURE_INTERPRETER
1495 BYTE callconv = CallConv();
1496 switch (callconv)
1497 {
1498 case IMAGE_CEE_CS_CALLCONV_C:
1499 case IMAGE_CEE_CS_CALLCONV_STDCALL:
1500 numRegistersUsed = NUM_ARGUMENT_REGISTERS;
1501 nSizeOfArgStack = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *);
1502 break;
1503
1504 case IMAGE_CEE_CS_CALLCONV_THISCALL:
1505 case IMAGE_CEE_CS_CALLCONV_FASTCALL:
1506 _ASSERTE_MSG(false, "Unsupported calling convention.");
1507 default:
1508 }
1509 #endif // FEATURE_INTERPRETER
1510
1511 DWORD nArgs = this->NumFixedArgs();
1512 for (DWORD i = 0; i < nArgs; i++)
1513 {
1514 TypeHandle thValueType;
1515 CorElementType type = this->GetNextArgumentType(i, &thValueType);
1516
1517 if (!IsArgumentInRegister(&numRegistersUsed, type))
1518 {
1519 int structSize = MetaSig::GetElemSize(type, thValueType);
1520
1521 nSizeOfArgStack += StackElemSize(structSize);
1522
1523 #ifndef DACCESS_COMPILE
1524 if (nSizeOfArgStack > MAX_ARG_SIZE)
1525 {
1526 #ifdef _DEBUG
1527 // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode.
1528 // The contract violation is required to workaround bug in the static contract analyzer.
1529 _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
1530 CONTRACT_VIOLATION(ThrowsViolation);
1531 #endif
1532 COMPlusThrow(kNotSupportedException);
1533 }
1534 #endif
1535 }
1536 }
1537
1538 if (this->HasParamType())
1539 {
1540 DWORD paramTypeFlags = 0;
1541 if (numRegistersUsed < NUM_ARGUMENT_REGISTERS)
1542 {
1543 numRegistersUsed++;
1544 paramTypeFlags = (numRegistersUsed == 1) ?
1545 PARAM_TYPE_REGISTER_ECX : PARAM_TYPE_REGISTER_EDX;
1546 }
1547 else
1548 {
1549 nSizeOfArgStack += sizeof(void *);
1550 paramTypeFlags = PARAM_TYPE_REGISTER_STACK;
1551 }
1552 m_dwFlags |= paramTypeFlags;
1553 }
1554
1555 #else // _TARGET_X86_
1556
1557 int maxOffset = TransitionBlock::GetOffsetOfArgs();
1558
1559 int ofs;
1560 while (TransitionBlock::InvalidOffset != (ofs = GetNextOffset()))
1561 {
1562 int stackElemSize;
1563
1564 #ifdef _TARGET_AMD64_
1565 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
1566 if (m_fArgInRegisters)
1567 {
1568 // Arguments passed in registers don't consume any stack
1569 continue;
1570 }
1571
1572 stackElemSize = StackElemSize(GetArgSize());
1573 #else // FEATURE_UNIX_AMD64_STRUCT_PASSING
1574 // All stack arguments take just one stack slot on AMD64 because of arguments bigger
1575 // than a stack slot are passed by reference.
1576 stackElemSize = STACK_ELEM_SIZE;
1577 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
1578 #else // _TARGET_AMD64_
1579 stackElemSize = StackElemSize(GetArgSize());
1580 #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
1581 if (IsArgPassedByRef())
1582 stackElemSize = STACK_ELEM_SIZE;
1583 #endif
1584 #endif // _TARGET_AMD64_
1585
1586 int endOfs = ofs + stackElemSize;
1587 if (endOfs > maxOffset)
1588 {
1589 #if !defined(DACCESS_COMPILE)
1590 if (endOfs > MAX_ARG_SIZE)
1591 {
1592 #ifdef _DEBUG
1593 // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode.
1594 // The contract violation is required to workaround bug in the static contract analyzer.
1595 _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
1596 CONTRACT_VIOLATION(ThrowsViolation);
1597 #endif
1598 COMPlusThrow(kNotSupportedException);
1599 }
1600 #endif
1601 maxOffset = endOfs;
1602 }
1603 }
1604 // Clear the iterator started flag
1605 m_dwFlags &= ~ITERATION_STARTED;
1606
1607 int nSizeOfArgStack = maxOffset - TransitionBlock::GetOffsetOfArgs();
1608
1609 #if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
1610 nSizeOfArgStack = (nSizeOfArgStack > (int)sizeof(ArgumentRegisters)) ?
1611 (nSizeOfArgStack - sizeof(ArgumentRegisters)) : 0;
1612 #endif
1613
1614 #endif // _TARGET_X86_
1615
1616 // Cache the result
1617 m_nSizeOfArgStack = nSizeOfArgStack;
1618 m_dwFlags |= SIZE_OF_ARG_STACK_COMPUTED;
1619
1620 this->Reset();
1621 }
1622
1623 class ArgIteratorBase
1624 {
1625 protected:
1626 MetaSig * m_pSig;
1627
GetReturnType(TypeHandle * pthValueType)1628 FORCEINLINE CorElementType GetReturnType(TypeHandle * pthValueType)
1629 {
1630 WRAPPER_NO_CONTRACT;
1631 #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
1632 return m_pSig->GetReturnTypeNormalized(pthValueType);
1633 #else
1634 return m_pSig->GetReturnTypeNormalized();
1635 #endif
1636 }
1637
GetNextArgumentType(DWORD iArg,TypeHandle * pthValueType)1638 FORCEINLINE CorElementType GetNextArgumentType(DWORD iArg, TypeHandle * pthValueType)
1639 {
1640 WRAPPER_NO_CONTRACT;
1641 _ASSERTE(iArg == m_pSig->GetArgNum());
1642 CorElementType et = m_pSig->PeekArgNormalized(pthValueType);
1643 m_pSig->SkipArg();
1644 return et;
1645 }
1646
Reset()1647 FORCEINLINE void Reset()
1648 {
1649 WRAPPER_NO_CONTRACT;
1650 m_pSig->Reset();
1651 }
1652
1653 public:
HasThis()1654 BOOL HasThis()
1655 {
1656 LIMITED_METHOD_CONTRACT;
1657 return m_pSig->HasThis();
1658 }
1659
HasParamType()1660 BOOL HasParamType()
1661 {
1662 LIMITED_METHOD_CONTRACT;
1663 return m_pSig->GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE;
1664 }
1665
IsVarArg()1666 BOOL IsVarArg()
1667 {
1668 LIMITED_METHOD_CONTRACT;
1669 return m_pSig->IsVarArg() || m_pSig->IsTreatAsVarArg();
1670 }
1671
NumFixedArgs()1672 DWORD NumFixedArgs()
1673 {
1674 LIMITED_METHOD_CONTRACT;
1675 return m_pSig->NumFixedArgs();
1676 }
1677
1678 #ifdef FEATURE_INTERPRETER
CallConv()1679 BYTE CallConv()
1680 {
1681 return m_pSig->GetCallingConvention();
1682 }
1683 #endif // FEATURE_INTERPRETER
1684
1685 //
1686 // The following is used by the profiler to dig into the iterator for
1687 // discovering if the method has a This pointer or a return buffer.
1688 // Do not use this to re-initialize the signature, use the exposed Init()
1689 // method in this class.
1690 //
GetSig(void)1691 MetaSig *GetSig(void)
1692 {
1693 return m_pSig;
1694 }
1695 };
1696
1697 class ArgIterator : public ArgIteratorTemplate<ArgIteratorBase>
1698 {
1699 public:
ArgIterator(MetaSig * pSig)1700 ArgIterator(MetaSig * pSig)
1701 {
1702 m_pSig = pSig;
1703 }
1704
1705 // This API returns true if we are returning a structure in registers instead of using a byref return buffer
HasNonStandardByvalReturn()1706 BOOL HasNonStandardByvalReturn()
1707 {
1708 WRAPPER_NO_CONTRACT;
1709
1710 #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
1711 CorElementType type = m_pSig->GetReturnTypeNormalized();
1712 return (type == ELEMENT_TYPE_VALUETYPE || type == ELEMENT_TYPE_TYPEDBYREF) && !HasRetBuffArg();
1713 #else
1714 return FALSE;
1715 #endif
1716 }
1717 };
1718
1719 // Conventience helper
HasRetBuffArg(MetaSig * pSig)1720 inline BOOL HasRetBuffArg(MetaSig * pSig)
1721 {
1722 WRAPPER_NO_CONTRACT;
1723 ArgIterator argit(pSig);
1724 return argit.HasRetBuffArg();
1725 }
1726
1727 #ifdef UNIX_X86_ABI
1728 // For UNIX_X86_ABI and unmanaged function, we always need RetBuf if the return type is VALUETYPE
HasRetBuffArgUnmanagedFixup(MetaSig * pSig)1729 inline BOOL HasRetBuffArgUnmanagedFixup(MetaSig * pSig)
1730 {
1731 WRAPPER_NO_CONTRACT;
1732 // We cannot just pSig->GetReturnType() here since it will return ELEMENT_TYPE_VALUETYPE for enums
1733 CorElementType type = pSig->GetRetTypeHandleThrowing().GetVerifierCorElementType();
1734 return type == ELEMENT_TYPE_VALUETYPE;
1735 }
1736 #endif
1737
IsRetBuffPassedAsFirstArg()1738 inline BOOL IsRetBuffPassedAsFirstArg()
1739 {
1740 WRAPPER_NO_CONTRACT;
1741 #ifndef _TARGET_ARM64_
1742 return TRUE;
1743 #else
1744 return FALSE;
1745 #endif
1746 }
1747
1748 #endif // __CALLING_CONVENTION_INCLUDED
1749