1 // AsmJit - Machine code generation for C++
2 //
3 //  * Official AsmJit Home Page: https://asmjit.com
4 //  * Official Github Repository: https://github.com/asmjit/asmjit
5 //
6 // Copyright (c) 2008-2020 The AsmJit Authors
7 //
8 // This software is provided 'as-is', without any express or implied
9 // warranty. In no event will the authors be held liable for any damages
10 // arising from the use of this software.
11 //
12 // Permission is granted to anyone to use this software for any purpose,
13 // including commercial applications, and to alter it and redistribute it
14 // freely, subject to the following restrictions:
15 //
16 // 1. The origin of this software must not be misrepresented; you must not
17 //    claim that you wrote the original software. If you use this software
18 //    in a product, an acknowledgment in the product documentation would be
19 //    appreciated but is not required.
20 // 2. Altered source versions must be plainly marked as such, and must not be
21 //    misrepresented as being the original software.
22 // 3. This notice may not be removed or altered from any source distribution.
23 
24 #include "../core/api-build_p.h"
25 #ifdef ASMJIT_BUILD_X86
26 
27 #include "../x86/x86callconv_p.h"
28 #include "../x86/x86operand.h"
29 
30 ASMJIT_BEGIN_SUB_NAMESPACE(x86)
31 
32 // ============================================================================
33 // [asmjit::x86::CallConvInternal - Init]
34 // ============================================================================
35 
36 namespace CallConvInternal {
37 
shouldThreatAsCDeclIn64BitMode(uint32_t ccId)38 static inline bool shouldThreatAsCDeclIn64BitMode(uint32_t ccId) noexcept {
39   return ccId == CallConv::kIdCDecl ||
40          ccId == CallConv::kIdStdCall ||
41          ccId == CallConv::kIdThisCall ||
42          ccId == CallConv::kIdFastCall ||
43          ccId == CallConv::kIdRegParm1 ||
44          ccId == CallConv::kIdRegParm2 ||
45          ccId == CallConv::kIdRegParm3;
46 }
47 
init(CallConv & cc,uint32_t ccId,const Environment & environment)48 ASMJIT_FAVOR_SIZE Error init(CallConv& cc, uint32_t ccId, const Environment& environment) noexcept {
49   constexpr uint32_t kGroupGp   = Reg::kGroupGp;
50   constexpr uint32_t kGroupVec  = Reg::kGroupVec;
51   constexpr uint32_t kGroupMm   = Reg::kGroupMm;
52   constexpr uint32_t kGroupKReg = Reg::kGroupKReg;
53 
54   constexpr uint32_t kZax = Gp::kIdAx;
55   constexpr uint32_t kZbx = Gp::kIdBx;
56   constexpr uint32_t kZcx = Gp::kIdCx;
57   constexpr uint32_t kZdx = Gp::kIdDx;
58   constexpr uint32_t kZsp = Gp::kIdSp;
59   constexpr uint32_t kZbp = Gp::kIdBp;
60   constexpr uint32_t kZsi = Gp::kIdSi;
61   constexpr uint32_t kZdi = Gp::kIdDi;
62 
63   bool winABI = environment.isPlatformWindows() || environment.isAbiMSVC();
64 
65   cc.setArch(environment.arch());
66 
67   if (environment.is32Bit()) {
68     bool isStandardCallConv = true;
69 
70     cc.setPreservedRegs(Reg::kGroupGp, Support::bitMask(Gp::kIdBx, Gp::kIdSp, Gp::kIdBp, Gp::kIdSi, Gp::kIdDi));
71     cc.setNaturalStackAlignment(4);
72 
73     switch (ccId) {
74       case CallConv::kIdCDecl:
75         break;
76 
77       case CallConv::kIdStdCall:
78         cc.setFlags(CallConv::kFlagCalleePopsStack);
79         break;
80 
81       case CallConv::kIdFastCall:
82         cc.setFlags(CallConv::kFlagCalleePopsStack);
83         cc.setPassedOrder(kGroupGp, kZcx, kZdx);
84         break;
85 
86       case CallConv::kIdVectorCall:
87         cc.setFlags(CallConv::kFlagCalleePopsStack);
88         cc.setPassedOrder(kGroupGp, kZcx, kZdx);
89         cc.setPassedOrder(kGroupVec, 0, 1, 2, 3, 4, 5);
90         break;
91 
92       case CallConv::kIdThisCall:
93         // NOTE: Even MINGW (starting with GCC 4.7.0) now uses __thiscall on MS Windows,
94         // so we won't bail to any other calling convention if __thiscall was specified.
95         if (winABI) {
96           cc.setFlags(CallConv::kFlagCalleePopsStack);
97           cc.setPassedOrder(kGroupGp, kZcx);
98         }
99         else {
100           ccId = CallConv::kIdCDecl;
101         }
102         break;
103 
104       case CallConv::kIdRegParm1:
105         cc.setPassedOrder(kGroupGp, kZax);
106         break;
107 
108       case CallConv::kIdRegParm2:
109         cc.setPassedOrder(kGroupGp, kZax, kZdx);
110         break;
111 
112       case CallConv::kIdRegParm3:
113         cc.setPassedOrder(kGroupGp, kZax, kZdx, kZcx);
114         break;
115 
116       case CallConv::kIdLightCall2:
117       case CallConv::kIdLightCall3:
118       case CallConv::kIdLightCall4: {
119         uint32_t n = (ccId - CallConv::kIdLightCall2) + 2;
120 
121         cc.setFlags(CallConv::kFlagPassFloatsByVec);
122         cc.setPassedOrder(kGroupGp, kZax, kZdx, kZcx, kZsi, kZdi);
123         cc.setPassedOrder(kGroupMm, 0, 1, 2, 3, 4, 5, 6, 7);
124         cc.setPassedOrder(kGroupVec, 0, 1, 2, 3, 4, 5, 6, 7);
125         cc.setPassedOrder(kGroupKReg, 0, 1, 2, 3, 4, 5, 6, 7);
126         cc.setPreservedRegs(kGroupGp, Support::lsbMask<uint32_t>(8));
127         cc.setPreservedRegs(kGroupVec, Support::lsbMask<uint32_t>(8) & ~Support::lsbMask<uint32_t>(n));
128 
129         cc.setNaturalStackAlignment(16);
130         isStandardCallConv = false;
131         break;
132       }
133 
134       default:
135         return DebugUtils::errored(kErrorInvalidArgument);
136     }
137 
138     if (isStandardCallConv) {
139       // MMX arguments is something where compiler vendors disagree. For example
140       // GCC and MSVC would pass first three via registers and the rest via stack,
141       // however Clang passes all via stack. Returning MMX registers is even more
142       // fun, where GCC uses MM0, but Clang uses EAX:EDX pair. I'm not sure it's
143       // something we should be worried about as MMX is deprecated anyway.
144       cc.setPassedOrder(kGroupMm, 0, 1, 2);
145 
146       // Vector arguments (XMM|YMM|ZMM) are passed via registers. However, if the
147       // function is variadic then they have to be passed via stack.
148       cc.setPassedOrder(kGroupVec, 0, 1, 2);
149 
150       // Functions with variable arguments always use stack for MM and vector
151       // arguments.
152       cc.addFlags(CallConv::kFlagPassVecByStackIfVA);
153     }
154 
155     if (ccId == CallConv::kIdCDecl) {
156       cc.addFlags(CallConv::kFlagVarArgCompatible);
157     }
158   }
159   else {
160     // Preprocess the calling convention into a common id as many conventions
161     // are normally ignored even by C/C++ compilers and treated as `__cdecl`.
162     if (shouldThreatAsCDeclIn64BitMode(ccId))
163       ccId = winABI ? CallConv::kIdX64Windows : CallConv::kIdX64SystemV;
164 
165     switch (ccId) {
166       case CallConv::kIdX64SystemV: {
167         cc.setFlags(CallConv::kFlagPassFloatsByVec |
168                     CallConv::kFlagPassMmxByXmm    |
169                     CallConv::kFlagVarArgCompatible);
170         cc.setNaturalStackAlignment(16);
171         cc.setRedZoneSize(128);
172         cc.setPassedOrder(kGroupGp, kZdi, kZsi, kZdx, kZcx, 8, 9);
173         cc.setPassedOrder(kGroupVec, 0, 1, 2, 3, 4, 5, 6, 7);
174         cc.setPreservedRegs(kGroupGp, Support::bitMask(kZbx, kZsp, kZbp, 12, 13, 14, 15));
175         break;
176       }
177 
178       case CallConv::kIdX64Windows: {
179         cc.setStrategy(CallConv::kStrategyX64Windows);
180         cc.setFlags(CallConv::kFlagPassFloatsByVec |
181                     CallConv::kFlagIndirectVecArgs |
182                     CallConv::kFlagPassMmxByGp     |
183                     CallConv::kFlagVarArgCompatible);
184         cc.setNaturalStackAlignment(16);
185         // Maximum 4 arguments in registers, each adds 8 bytes to the spill zone.
186         cc.setSpillZoneSize(4 * 8);
187         cc.setPassedOrder(kGroupGp, kZcx, kZdx, 8, 9);
188         cc.setPassedOrder(kGroupVec, 0, 1, 2, 3);
189         cc.setPreservedRegs(kGroupGp, Support::bitMask(kZbx, kZsp, kZbp, kZsi, kZdi, 12, 13, 14, 15));
190         cc.setPreservedRegs(kGroupVec, Support::bitMask(6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
191         break;
192       }
193 
194       case CallConv::kIdVectorCall: {
195         cc.setStrategy(CallConv::kStrategyX64VectorCall);
196         cc.setFlags(CallConv::kFlagPassFloatsByVec |
197                     CallConv::kFlagPassMmxByGp     );
198         cc.setNaturalStackAlignment(16);
199         // Maximum 6 arguments in registers, each adds 8 bytes to the spill zone.
200         cc.setSpillZoneSize(6 * 8);
201         cc.setPassedOrder(kGroupGp, kZcx, kZdx, 8, 9);
202         cc.setPassedOrder(kGroupVec, 0, 1, 2, 3, 4, 5);
203         cc.setPreservedRegs(kGroupGp, Support::bitMask(kZbx, kZsp, kZbp, kZsi, kZdi, 12, 13, 14, 15));
204         cc.setPreservedRegs(kGroupVec, Support::bitMask(6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
205         break;
206       }
207 
208       case CallConv::kIdLightCall2:
209       case CallConv::kIdLightCall3:
210       case CallConv::kIdLightCall4: {
211         uint32_t n = (ccId - CallConv::kIdLightCall2) + 2;
212 
213         cc.setFlags(CallConv::kFlagPassFloatsByVec);
214         cc.setNaturalStackAlignment(16);
215         cc.setPassedOrder(kGroupGp, kZax, kZdx, kZcx, kZsi, kZdi);
216         cc.setPassedOrder(kGroupMm, 0, 1, 2, 3, 4, 5, 6, 7);
217         cc.setPassedOrder(kGroupVec, 0, 1, 2, 3, 4, 5, 6, 7);
218         cc.setPassedOrder(kGroupKReg, 0, 1, 2, 3, 4, 5, 6, 7);
219 
220         cc.setPreservedRegs(kGroupGp, Support::lsbMask<uint32_t>(16));
221         cc.setPreservedRegs(kGroupVec, ~Support::lsbMask<uint32_t>(n));
222         break;
223       }
224 
225       default:
226         return DebugUtils::errored(kErrorInvalidArgument);
227     }
228   }
229 
230   cc.setId(ccId);
231   return kErrorOk;
232 }
233 
234 } // {CallConvInternal}
235 
236 ASMJIT_END_SUB_NAMESPACE
237 
238 #endif // ASMJIT_BUILD_X86
239