1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x64/MacroAssembler-x64.h"
8
9 #include "jit/BaselineFrame.h"
10 #include "jit/JitFrames.h"
11 #include "jit/JitRuntime.h"
12 #include "jit/MacroAssembler.h"
13 #include "jit/MoveEmitter.h"
14 #include "util/Memory.h"
15 #include "vm/BigIntType.h"
16 #include "vm/JitActivation.h" // js::jit::JitActivation
17 #include "vm/JSContext.h"
18 #include "vm/StringType.h"
19
20 #include "jit/MacroAssembler-inl.h"
21
22 using namespace js;
23 using namespace js::jit;
24
loadConstantDouble(double d,FloatRegister dest)25 void MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest) {
26 if (maybeInlineDouble(d, dest)) {
27 return;
28 }
29 Double* dbl = getDouble(d);
30 if (!dbl) {
31 return;
32 }
33 // The constants will be stored in a pool appended to the text (see
34 // finish()), so they will always be a fixed distance from the
35 // instructions which reference them. This allows the instructions to use
36 // PC-relative addressing. Use "jump" label support code, because we need
37 // the same PC-relative address patching that jumps use.
38 JmpSrc j = masm.vmovsd_ripr(dest.encoding());
39 propagateOOM(dbl->uses.append(j));
40 }
41
loadConstantFloat32(float f,FloatRegister dest)42 void MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest) {
43 if (maybeInlineFloat(f, dest)) {
44 return;
45 }
46 Float* flt = getFloat(f);
47 if (!flt) {
48 return;
49 }
50 // See comment in loadConstantDouble
51 JmpSrc j = masm.vmovss_ripr(dest.encoding());
52 propagateOOM(flt->uses.append(j));
53 }
54
vpRiprOpSimd128(const SimdConstant & v,FloatRegister reg,JmpSrc (X86Encoding::BaseAssemblerX64::* op)(X86Encoding::XMMRegisterID id))55 void MacroAssemblerX64::vpRiprOpSimd128(
56 const SimdConstant& v, FloatRegister reg,
57 JmpSrc (X86Encoding::BaseAssemblerX64::*op)(
58 X86Encoding::XMMRegisterID id)) {
59 SimdData* val = getSimdData(v);
60 if (!val) {
61 return;
62 }
63 JmpSrc j = (masm.*op)(reg.encoding());
64 propagateOOM(val->uses.append(j));
65 }
66
vpRiprOpSimd128(const SimdConstant & v,FloatRegister src,FloatRegister dest,JmpSrc (X86Encoding::BaseAssemblerX64::* op)(X86Encoding::XMMRegisterID srcId,X86Encoding::XMMRegisterID destId))67 void MacroAssemblerX64::vpRiprOpSimd128(
68 const SimdConstant& v, FloatRegister src, FloatRegister dest,
69 JmpSrc (X86Encoding::BaseAssemblerX64::*op)(
70 X86Encoding::XMMRegisterID srcId, X86Encoding::XMMRegisterID destId)) {
71 SimdData* val = getSimdData(v);
72 if (!val) {
73 return;
74 }
75 JmpSrc j = (masm.*op)(src.encoding(), dest.encoding());
76 propagateOOM(val->uses.append(j));
77 }
78
loadConstantSimd128Int(const SimdConstant & v,FloatRegister dest)79 void MacroAssemblerX64::loadConstantSimd128Int(const SimdConstant& v,
80 FloatRegister dest) {
81 if (maybeInlineSimd128Int(v, dest)) {
82 return;
83 }
84 vpRiprOpSimd128(v, dest, &X86Encoding::BaseAssemblerX64::vmovdqa_ripr);
85 }
86
loadConstantSimd128Float(const SimdConstant & v,FloatRegister dest)87 void MacroAssemblerX64::loadConstantSimd128Float(const SimdConstant& v,
88 FloatRegister dest) {
89 if (maybeInlineSimd128Float(v, dest)) {
90 return;
91 }
92 vpRiprOpSimd128(v, dest, &X86Encoding::BaseAssemblerX64::vmovaps_ripr);
93 }
94
vpaddbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)95 void MacroAssemblerX64::vpaddbSimd128(const SimdConstant& v, FloatRegister lhs,
96 FloatRegister dest) {
97 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddb_ripr);
98 }
99
vpaddwSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)100 void MacroAssemblerX64::vpaddwSimd128(const SimdConstant& v, FloatRegister lhs,
101 FloatRegister dest) {
102 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddw_ripr);
103 }
104
vpadddSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)105 void MacroAssemblerX64::vpadddSimd128(const SimdConstant& v, FloatRegister lhs,
106 FloatRegister dest) {
107 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddd_ripr);
108 }
109
vpaddqSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)110 void MacroAssemblerX64::vpaddqSimd128(const SimdConstant& v, FloatRegister lhs,
111 FloatRegister dest) {
112 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddq_ripr);
113 }
114
vpsubbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)115 void MacroAssemblerX64::vpsubbSimd128(const SimdConstant& v, FloatRegister lhs,
116 FloatRegister dest) {
117 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubb_ripr);
118 }
119
vpsubwSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)120 void MacroAssemblerX64::vpsubwSimd128(const SimdConstant& v, FloatRegister lhs,
121 FloatRegister dest) {
122 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubw_ripr);
123 }
124
vpsubdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)125 void MacroAssemblerX64::vpsubdSimd128(const SimdConstant& v, FloatRegister lhs,
126 FloatRegister dest) {
127 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubd_ripr);
128 }
129
vpsubqSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)130 void MacroAssemblerX64::vpsubqSimd128(const SimdConstant& v, FloatRegister lhs,
131 FloatRegister dest) {
132 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubq_ripr);
133 }
134
vpmullwSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)135 void MacroAssemblerX64::vpmullwSimd128(const SimdConstant& v, FloatRegister lhs,
136 FloatRegister dest) {
137 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmullw_ripr);
138 }
139
vpmulldSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)140 void MacroAssemblerX64::vpmulldSimd128(const SimdConstant& v, FloatRegister lhs,
141 FloatRegister dest) {
142 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmulld_ripr);
143 }
144
vpaddsbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)145 void MacroAssemblerX64::vpaddsbSimd128(const SimdConstant& v, FloatRegister lhs,
146 FloatRegister dest) {
147 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddsb_ripr);
148 }
149
vpaddusbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)150 void MacroAssemblerX64::vpaddusbSimd128(const SimdConstant& v,
151 FloatRegister lhs, FloatRegister dest) {
152 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddusb_ripr);
153 }
154
vpaddswSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)155 void MacroAssemblerX64::vpaddswSimd128(const SimdConstant& v, FloatRegister lhs,
156 FloatRegister dest) {
157 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddsw_ripr);
158 }
159
vpadduswSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)160 void MacroAssemblerX64::vpadduswSimd128(const SimdConstant& v,
161 FloatRegister lhs, FloatRegister dest) {
162 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddusw_ripr);
163 }
164
vpsubsbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)165 void MacroAssemblerX64::vpsubsbSimd128(const SimdConstant& v, FloatRegister lhs,
166 FloatRegister dest) {
167 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubsb_ripr);
168 }
169
vpsubusbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)170 void MacroAssemblerX64::vpsubusbSimd128(const SimdConstant& v,
171 FloatRegister lhs, FloatRegister dest) {
172 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubusb_ripr);
173 }
174
vpsubswSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)175 void MacroAssemblerX64::vpsubswSimd128(const SimdConstant& v, FloatRegister lhs,
176 FloatRegister dest) {
177 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubsw_ripr);
178 }
179
vpsubuswSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)180 void MacroAssemblerX64::vpsubuswSimd128(const SimdConstant& v,
181 FloatRegister lhs, FloatRegister dest) {
182 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubusw_ripr);
183 }
184
vpminsbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)185 void MacroAssemblerX64::vpminsbSimd128(const SimdConstant& v, FloatRegister lhs,
186 FloatRegister dest) {
187 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminsb_ripr);
188 }
189
vpminubSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)190 void MacroAssemblerX64::vpminubSimd128(const SimdConstant& v, FloatRegister lhs,
191 FloatRegister dest) {
192 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminub_ripr);
193 }
194
vpminswSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)195 void MacroAssemblerX64::vpminswSimd128(const SimdConstant& v, FloatRegister lhs,
196 FloatRegister dest) {
197 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminsw_ripr);
198 }
199
vpminuwSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)200 void MacroAssemblerX64::vpminuwSimd128(const SimdConstant& v, FloatRegister lhs,
201 FloatRegister dest) {
202 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminuw_ripr);
203 }
204
vpminsdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)205 void MacroAssemblerX64::vpminsdSimd128(const SimdConstant& v, FloatRegister lhs,
206 FloatRegister dest) {
207 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminsd_ripr);
208 }
209
vpminudSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)210 void MacroAssemblerX64::vpminudSimd128(const SimdConstant& v, FloatRegister lhs,
211 FloatRegister dest) {
212 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminud_ripr);
213 }
214
vpmaxsbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)215 void MacroAssemblerX64::vpmaxsbSimd128(const SimdConstant& v, FloatRegister lhs,
216 FloatRegister dest) {
217 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxsb_ripr);
218 }
219
vpmaxubSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)220 void MacroAssemblerX64::vpmaxubSimd128(const SimdConstant& v, FloatRegister lhs,
221 FloatRegister dest) {
222 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxub_ripr);
223 }
224
vpmaxswSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)225 void MacroAssemblerX64::vpmaxswSimd128(const SimdConstant& v, FloatRegister lhs,
226 FloatRegister dest) {
227 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxsw_ripr);
228 }
229
vpmaxuwSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)230 void MacroAssemblerX64::vpmaxuwSimd128(const SimdConstant& v, FloatRegister lhs,
231 FloatRegister dest) {
232 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxuw_ripr);
233 }
234
vpmaxsdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)235 void MacroAssemblerX64::vpmaxsdSimd128(const SimdConstant& v, FloatRegister lhs,
236 FloatRegister dest) {
237 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxsd_ripr);
238 }
239
vpmaxudSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)240 void MacroAssemblerX64::vpmaxudSimd128(const SimdConstant& v, FloatRegister lhs,
241 FloatRegister dest) {
242 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxud_ripr);
243 }
244
vpandSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)245 void MacroAssemblerX64::vpandSimd128(const SimdConstant& v, FloatRegister lhs,
246 FloatRegister dest) {
247 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpand_ripr);
248 }
249
vpxorSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)250 void MacroAssemblerX64::vpxorSimd128(const SimdConstant& v, FloatRegister lhs,
251 FloatRegister dest) {
252 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpxor_ripr);
253 }
254
vporSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)255 void MacroAssemblerX64::vporSimd128(const SimdConstant& v, FloatRegister lhs,
256 FloatRegister dest) {
257 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpor_ripr);
258 }
259
vaddpsSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)260 void MacroAssemblerX64::vaddpsSimd128(const SimdConstant& v, FloatRegister lhs,
261 FloatRegister dest) {
262 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vaddps_ripr);
263 }
264
vaddpdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)265 void MacroAssemblerX64::vaddpdSimd128(const SimdConstant& v, FloatRegister lhs,
266 FloatRegister dest) {
267 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vaddpd_ripr);
268 }
269
vsubpsSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)270 void MacroAssemblerX64::vsubpsSimd128(const SimdConstant& v, FloatRegister lhs,
271 FloatRegister dest) {
272 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vsubps_ripr);
273 }
274
vsubpdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)275 void MacroAssemblerX64::vsubpdSimd128(const SimdConstant& v, FloatRegister lhs,
276 FloatRegister dest) {
277 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vsubpd_ripr);
278 }
279
vdivpsSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)280 void MacroAssemblerX64::vdivpsSimd128(const SimdConstant& v, FloatRegister lhs,
281 FloatRegister dest) {
282 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vdivps_ripr);
283 }
284
vdivpdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)285 void MacroAssemblerX64::vdivpdSimd128(const SimdConstant& v, FloatRegister lhs,
286 FloatRegister dest) {
287 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vdivpd_ripr);
288 }
289
vmulpsSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)290 void MacroAssemblerX64::vmulpsSimd128(const SimdConstant& v, FloatRegister lhs,
291 FloatRegister dest) {
292 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vmulps_ripr);
293 }
294
vmulpdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)295 void MacroAssemblerX64::vmulpdSimd128(const SimdConstant& v, FloatRegister lhs,
296 FloatRegister dest) {
297 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vmulpd_ripr);
298 }
299
vpacksswbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)300 void MacroAssemblerX64::vpacksswbSimd128(const SimdConstant& v,
301 FloatRegister lhs,
302 FloatRegister dest) {
303 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpacksswb_ripr);
304 }
305
vpackuswbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)306 void MacroAssemblerX64::vpackuswbSimd128(const SimdConstant& v,
307 FloatRegister lhs,
308 FloatRegister dest) {
309 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpackuswb_ripr);
310 }
311
vpackssdwSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)312 void MacroAssemblerX64::vpackssdwSimd128(const SimdConstant& v,
313 FloatRegister lhs,
314 FloatRegister dest) {
315 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpackssdw_ripr);
316 }
317
vpackusdwSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)318 void MacroAssemblerX64::vpackusdwSimd128(const SimdConstant& v,
319 FloatRegister lhs,
320 FloatRegister dest) {
321 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpackusdw_ripr);
322 }
323
vpshufbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)324 void MacroAssemblerX64::vpshufbSimd128(const SimdConstant& v, FloatRegister lhs,
325 FloatRegister dest) {
326 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpshufb_ripr);
327 }
328
vptestSimd128(const SimdConstant & v,FloatRegister lhs)329 void MacroAssemblerX64::vptestSimd128(const SimdConstant& v,
330 FloatRegister lhs) {
331 vpRiprOpSimd128(v, lhs, &X86Encoding::BaseAssemblerX64::vptest_ripr);
332 }
333
vpmaddwdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)334 void MacroAssemblerX64::vpmaddwdSimd128(const SimdConstant& v,
335 FloatRegister lhs, FloatRegister dest) {
336 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaddwd_ripr);
337 }
338
vpcmpeqbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)339 void MacroAssemblerX64::vpcmpeqbSimd128(const SimdConstant& v,
340 FloatRegister lhs, FloatRegister dest) {
341 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpeqb_ripr);
342 }
343
vpcmpgtbSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)344 void MacroAssemblerX64::vpcmpgtbSimd128(const SimdConstant& v,
345 FloatRegister lhs, FloatRegister dest) {
346 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpgtb_ripr);
347 }
348
vpcmpeqwSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)349 void MacroAssemblerX64::vpcmpeqwSimd128(const SimdConstant& v,
350 FloatRegister lhs, FloatRegister dest) {
351 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpeqw_ripr);
352 }
353
vpcmpgtwSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)354 void MacroAssemblerX64::vpcmpgtwSimd128(const SimdConstant& v,
355 FloatRegister lhs, FloatRegister dest) {
356 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpgtw_ripr);
357 }
358
vpcmpeqdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)359 void MacroAssemblerX64::vpcmpeqdSimd128(const SimdConstant& v,
360 FloatRegister lhs, FloatRegister dest) {
361 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpeqd_ripr);
362 }
363
vpcmpgtdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)364 void MacroAssemblerX64::vpcmpgtdSimd128(const SimdConstant& v,
365 FloatRegister lhs, FloatRegister dest) {
366 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpgtd_ripr);
367 }
368
vcmpeqpsSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)369 void MacroAssemblerX64::vcmpeqpsSimd128(const SimdConstant& v,
370 FloatRegister lhs, FloatRegister dest) {
371 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpeqps_ripr);
372 }
373
vcmpneqpsSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)374 void MacroAssemblerX64::vcmpneqpsSimd128(const SimdConstant& v,
375 FloatRegister lhs,
376 FloatRegister dest) {
377 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpneqps_ripr);
378 }
379
vcmpltpsSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)380 void MacroAssemblerX64::vcmpltpsSimd128(const SimdConstant& v,
381 FloatRegister lhs, FloatRegister dest) {
382 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpltps_ripr);
383 }
384
vcmplepsSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)385 void MacroAssemblerX64::vcmplepsSimd128(const SimdConstant& v,
386 FloatRegister lhs, FloatRegister dest) {
387 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpleps_ripr);
388 }
389
vcmpeqpdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)390 void MacroAssemblerX64::vcmpeqpdSimd128(const SimdConstant& v,
391 FloatRegister lhs, FloatRegister dest) {
392 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpeqpd_ripr);
393 }
394
vcmpneqpdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)395 void MacroAssemblerX64::vcmpneqpdSimd128(const SimdConstant& v,
396 FloatRegister lhs,
397 FloatRegister dest) {
398 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpneqpd_ripr);
399 }
400
vcmpltpdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)401 void MacroAssemblerX64::vcmpltpdSimd128(const SimdConstant& v,
402 FloatRegister lhs, FloatRegister dest) {
403 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpltpd_ripr);
404 }
405
vcmplepdSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)406 void MacroAssemblerX64::vcmplepdSimd128(const SimdConstant& v,
407 FloatRegister lhs, FloatRegister dest) {
408 vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmplepd_ripr);
409 }
410
vpmaddubswSimd128(const SimdConstant & v,FloatRegister lhs,FloatRegister dest)411 void MacroAssemblerX64::vpmaddubswSimd128(const SimdConstant& v,
412 FloatRegister lhs,
413 FloatRegister dest) {
414 vpRiprOpSimd128(v, lhs, dest,
415 &X86Encoding::BaseAssemblerX64::vpmaddubsw_ripr);
416 }
417
bindOffsets(const MacroAssemblerX86Shared::UsesVector & uses)418 void MacroAssemblerX64::bindOffsets(
419 const MacroAssemblerX86Shared::UsesVector& uses) {
420 for (JmpSrc src : uses) {
421 JmpDst dst(currentOffset());
422 // Using linkJump here is safe, as explained in the comment in
423 // loadConstantDouble.
424 masm.linkJump(src, dst);
425 }
426 }
427
finish()428 void MacroAssemblerX64::finish() {
429 if (!doubles_.empty()) {
430 masm.haltingAlign(sizeof(double));
431 }
432 for (const Double& d : doubles_) {
433 bindOffsets(d.uses);
434 masm.doubleConstant(d.value);
435 }
436
437 if (!floats_.empty()) {
438 masm.haltingAlign(sizeof(float));
439 }
440 for (const Float& f : floats_) {
441 bindOffsets(f.uses);
442 masm.floatConstant(f.value);
443 }
444
445 // SIMD memory values must be suitably aligned.
446 if (!simds_.empty()) {
447 masm.haltingAlign(SimdMemoryAlignment);
448 }
449 for (const SimdData& v : simds_) {
450 bindOffsets(v.uses);
451 masm.simd128Constant(v.value.bytes());
452 }
453
454 MacroAssemblerX86Shared::finish();
455 }
456
boxValue(JSValueType type,Register src,Register dest)457 void MacroAssemblerX64::boxValue(JSValueType type, Register src,
458 Register dest) {
459 MOZ_ASSERT(src != dest);
460
461 JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
462 #ifdef DEBUG
463 if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
464 Label upper32BitsZeroed;
465 movePtr(ImmWord(UINT32_MAX), dest);
466 asMasm().branchPtr(Assembler::BelowOrEqual, src, dest, &upper32BitsZeroed);
467 breakpoint();
468 bind(&upper32BitsZeroed);
469 }
470 #endif
471 mov(ImmShiftedTag(tag), dest);
472 orq(src, dest);
473 }
474
handleFailureWithHandlerTail(Label * profilerExitTail)475 void MacroAssemblerX64::handleFailureWithHandlerTail(Label* profilerExitTail) {
476 // Reserve space for exception information.
477 subq(Imm32(sizeof(ResumeFromException)), rsp);
478 movq(rsp, rax);
479
480 // Call the handler.
481 using Fn = void (*)(ResumeFromException * rfe);
482 asMasm().setupUnalignedABICall(rcx);
483 asMasm().passABIArg(rax);
484 asMasm().callWithABI<Fn, HandleException>(
485 MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
486
487 Label entryFrame;
488 Label catch_;
489 Label finally;
490 Label return_;
491 Label bailout;
492 Label wasm;
493 Label wasmCatch;
494
495 load32(Address(rsp, offsetof(ResumeFromException, kind)), rax);
496 asMasm().branch32(Assembler::Equal, rax,
497 Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
498 &entryFrame);
499 asMasm().branch32(Assembler::Equal, rax,
500 Imm32(ResumeFromException::RESUME_CATCH), &catch_);
501 asMasm().branch32(Assembler::Equal, rax,
502 Imm32(ResumeFromException::RESUME_FINALLY), &finally);
503 asMasm().branch32(Assembler::Equal, rax,
504 Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
505 asMasm().branch32(Assembler::Equal, rax,
506 Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
507 asMasm().branch32(Assembler::Equal, rax,
508 Imm32(ResumeFromException::RESUME_WASM), &wasm);
509 asMasm().branch32(Assembler::Equal, rax,
510 Imm32(ResumeFromException::RESUME_WASM_CATCH), &wasmCatch);
511
512 breakpoint(); // Invalid kind.
513
514 // No exception handler. Load the error value, load the new stack pointer
515 // and return from the entry frame.
516 bind(&entryFrame);
517 asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
518 loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
519 ret();
520
521 // If we found a catch handler, this must be a baseline frame. Restore state
522 // and jump to the catch block.
523 bind(&catch_);
524 loadPtr(Address(rsp, offsetof(ResumeFromException, target)), rax);
525 loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
526 loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
527 jmp(Operand(rax));
528
529 // If we found a finally block, this must be a baseline frame. Push
530 // two values expected by JSOp::Retsub: BooleanValue(true) and the
531 // exception.
532 bind(&finally);
533 ValueOperand exception = ValueOperand(rcx);
534 loadValue(Address(esp, offsetof(ResumeFromException, exception)), exception);
535
536 loadPtr(Address(rsp, offsetof(ResumeFromException, target)), rax);
537 loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
538 loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
539
540 pushValue(BooleanValue(true));
541 pushValue(exception);
542 jmp(Operand(rax));
543
544 // Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
545 bind(&return_);
546 loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
547 loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
548 loadValue(Address(rbp, BaselineFrame::reverseOffsetOfReturnValue()),
549 JSReturnOperand);
550 movq(rbp, rsp);
551 pop(rbp);
552
553 // If profiling is enabled, then update the lastProfilingFrame to refer to
554 // caller frame before returning.
555 {
556 Label skipProfilingInstrumentation;
557 AbsoluteAddress addressOfEnabled(
558 GetJitContext()->runtime->geckoProfiler().addressOfEnabled());
559 asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
560 &skipProfilingInstrumentation);
561 jump(profilerExitTail);
562 bind(&skipProfilingInstrumentation);
563 }
564
565 ret();
566
567 // If we are bailing out to baseline to handle an exception, jump to the
568 // bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
569 bind(&bailout);
570 loadPtr(Address(esp, offsetof(ResumeFromException, bailoutInfo)), r9);
571 move32(Imm32(1), ReturnReg);
572 jmp(Operand(rsp, offsetof(ResumeFromException, target)));
573
574 // If we are throwing and the innermost frame was a wasm frame, reset SP and
575 // FP; SP is pointing to the unwound return address to the wasm entry, so
576 // we can just ret().
577 bind(&wasm);
578 loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
579 loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
580 masm.ret();
581
582 // Found a wasm catch handler, restore state and jump to it.
583 bind(&wasmCatch);
584 loadPtr(Address(rsp, offsetof(ResumeFromException, target)), rax);
585 loadPtr(Address(rsp, offsetof(ResumeFromException, framePointer)), rbp);
586 loadPtr(Address(rsp, offsetof(ResumeFromException, stackPointer)), rsp);
587 jmp(Operand(rax));
588 }
589
profilerEnterFrame(Register framePtr,Register scratch)590 void MacroAssemblerX64::profilerEnterFrame(Register framePtr,
591 Register scratch) {
592 asMasm().loadJSContext(scratch);
593 loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
594 storePtr(framePtr,
595 Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
596 storePtr(ImmPtr(nullptr),
597 Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
598 }
599
profilerExitFrame()600 void MacroAssemblerX64::profilerExitFrame() {
601 jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
602 }
603
testStringTruthy(bool truthy,const ValueOperand & value)604 Assembler::Condition MacroAssemblerX64::testStringTruthy(
605 bool truthy, const ValueOperand& value) {
606 ScratchRegisterScope scratch(asMasm());
607 unboxString(value, scratch);
608 cmp32(Operand(scratch, JSString::offsetOfLength()), Imm32(0));
609 return truthy ? Assembler::NotEqual : Assembler::Equal;
610 }
611
testBigIntTruthy(bool truthy,const ValueOperand & value)612 Assembler::Condition MacroAssemblerX64::testBigIntTruthy(
613 bool truthy, const ValueOperand& value) {
614 ScratchRegisterScope scratch(asMasm());
615 unboxBigInt(value, scratch);
616 cmp32(Operand(scratch, JS::BigInt::offsetOfDigitLength()), Imm32(0));
617 return truthy ? Assembler::NotEqual : Assembler::Equal;
618 }
619
asMasm()620 MacroAssembler& MacroAssemblerX64::asMasm() {
621 return *static_cast<MacroAssembler*>(this);
622 }
623
asMasm() const624 const MacroAssembler& MacroAssemblerX64::asMasm() const {
625 return *static_cast<const MacroAssembler*>(this);
626 }
627
subFromStackPtr(Imm32 imm32)628 void MacroAssembler::subFromStackPtr(Imm32 imm32) {
629 if (imm32.value) {
630 // On windows, we cannot skip very far down the stack without touching the
631 // memory pages in-between. This is a corner-case code for situations where
632 // the Ion frame data for a piece of code is very large. To handle this
633 // special case, for frames over 4k in size we allocate memory on the stack
634 // incrementally, touching it as we go.
635 //
636 // When the amount is quite large, which it can be, we emit an actual loop,
637 // in order to keep the function prologue compact. Compactness is a
638 // requirement for eg Wasm's CodeRange data structure, which can encode only
639 // 8-bit offsets.
640 uint32_t amountLeft = imm32.value;
641 uint32_t fullPages = amountLeft / 4096;
642 if (fullPages <= 8) {
643 while (amountLeft > 4096) {
644 subq(Imm32(4096), StackPointer);
645 store32(Imm32(0), Address(StackPointer, 0));
646 amountLeft -= 4096;
647 }
648 subq(Imm32(amountLeft), StackPointer);
649 } else {
650 ScratchRegisterScope scratch(*this);
651 Label top;
652 move32(Imm32(fullPages), scratch);
653 bind(&top);
654 subq(Imm32(4096), StackPointer);
655 store32(Imm32(0), Address(StackPointer, 0));
656 subl(Imm32(1), scratch);
657 j(Assembler::NonZero, &top);
658 amountLeft -= fullPages * 4096;
659 if (amountLeft) {
660 subq(Imm32(amountLeft), StackPointer);
661 }
662 }
663 }
664 }
665
convertDoubleToPtr(FloatRegister src,Register dest,Label * fail,bool negativeZeroCheck)666 void MacroAssemblerX64::convertDoubleToPtr(FloatRegister src, Register dest,
667 Label* fail,
668 bool negativeZeroCheck) {
669 // Check for -0.0
670 if (negativeZeroCheck) {
671 branchNegativeZero(src, dest, fail);
672 }
673
674 ScratchDoubleScope scratch(asMasm());
675 vcvttsd2sq(src, dest);
676 asMasm().convertInt64ToDouble(Register64(dest), scratch);
677 vucomisd(scratch, src);
678 j(Assembler::Parity, fail);
679 j(Assembler::NotEqual, fail);
680 }
681
682 //{{{ check_macroassembler_style
683 // ===============================================================
684 // ABI function calls.
685
setupUnalignedABICall(Register scratch)686 void MacroAssembler::setupUnalignedABICall(Register scratch) {
687 setupNativeABICall();
688 dynamicAlignment_ = true;
689
690 movq(rsp, scratch);
691 andq(Imm32(~(ABIStackAlignment - 1)), rsp);
692 push(scratch);
693 }
694
callWithABIPre(uint32_t * stackAdjust,bool callFromWasm)695 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
696 MOZ_ASSERT(inCall_);
697 uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
698
699 if (dynamicAlignment_) {
700 // sizeof(intptr_t) accounts for the saved stack pointer pushed by
701 // setupUnalignedABICall.
702 stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
703 ABIStackAlignment);
704 } else {
705 uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
706 stackForCall += ComputeByteAlignment(
707 stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
708 }
709
710 *stackAdjust = stackForCall;
711 reserveStack(stackForCall);
712
713 // Position all arguments.
714 {
715 enoughMemory_ &= moveResolver_.resolve();
716 if (!enoughMemory_) {
717 return;
718 }
719
720 MoveEmitter emitter(*this);
721 emitter.emit(moveResolver_);
722 emitter.finish();
723 }
724
725 assertStackAlignment(ABIStackAlignment);
726 }
727
callWithABIPost(uint32_t stackAdjust,MoveOp::Type result,bool cleanupArg)728 void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
729 bool cleanupArg) {
730 freeStack(stackAdjust);
731 if (dynamicAlignment_) {
732 pop(rsp);
733 }
734
735 #ifdef DEBUG
736 MOZ_ASSERT(inCall_);
737 inCall_ = false;
738 #endif
739 }
740
IsIntArgReg(Register reg)741 static bool IsIntArgReg(Register reg) {
742 for (uint32_t i = 0; i < NumIntArgRegs; i++) {
743 if (IntArgRegs[i] == reg) {
744 return true;
745 }
746 }
747
748 return false;
749 }
750
callWithABINoProfiler(Register fun,MoveOp::Type result)751 void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
752 if (IsIntArgReg(fun)) {
753 // Callee register may be clobbered for an argument. Move the callee to
754 // r10, a volatile, non-argument register.
755 propagateOOM(moveResolver_.addMove(MoveOperand(fun), MoveOperand(r10),
756 MoveOp::GENERAL));
757 fun = r10;
758 }
759
760 MOZ_ASSERT(!IsIntArgReg(fun));
761
762 uint32_t stackAdjust;
763 callWithABIPre(&stackAdjust);
764 call(fun);
765 callWithABIPost(stackAdjust, result);
766 }
767
callWithABINoProfiler(const Address & fun,MoveOp::Type result)768 void MacroAssembler::callWithABINoProfiler(const Address& fun,
769 MoveOp::Type result) {
770 Address safeFun = fun;
771 if (IsIntArgReg(safeFun.base)) {
772 // Callee register may be clobbered for an argument. Move the callee to
773 // r10, a volatile, non-argument register.
774 propagateOOM(moveResolver_.addMove(MoveOperand(fun.base), MoveOperand(r10),
775 MoveOp::GENERAL));
776 safeFun.base = r10;
777 }
778
779 MOZ_ASSERT(!IsIntArgReg(safeFun.base));
780
781 uint32_t stackAdjust;
782 callWithABIPre(&stackAdjust);
783 call(safeFun);
784 callWithABIPost(stackAdjust, result);
785 }
786
787 // ===============================================================
788 // Move instructions
789
moveValue(const TypedOrValueRegister & src,const ValueOperand & dest)790 void MacroAssembler::moveValue(const TypedOrValueRegister& src,
791 const ValueOperand& dest) {
792 if (src.hasValue()) {
793 moveValue(src.valueReg(), dest);
794 return;
795 }
796
797 MIRType type = src.type();
798 AnyRegister reg = src.typedReg();
799
800 if (!IsFloatingPointType(type)) {
801 boxValue(ValueTypeFromMIRType(type), reg.gpr(), dest.valueReg());
802 return;
803 }
804
805 ScratchDoubleScope scratch(*this);
806 FloatRegister freg = reg.fpu();
807 if (type == MIRType::Float32) {
808 convertFloat32ToDouble(freg, scratch);
809 freg = scratch;
810 }
811 boxDouble(freg, dest, freg);
812 }
813
moveValue(const ValueOperand & src,const ValueOperand & dest)814 void MacroAssembler::moveValue(const ValueOperand& src,
815 const ValueOperand& dest) {
816 if (src == dest) {
817 return;
818 }
819 movq(src.valueReg(), dest.valueReg());
820 }
821
moveValue(const Value & src,const ValueOperand & dest)822 void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
823 movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
824 writeDataRelocation(src);
825 }
826
827 // ===============================================================
828 // Branch functions
829
loadStoreBuffer(Register ptr,Register buffer)830 void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
831 if (ptr != buffer) {
832 movePtr(ptr, buffer);
833 }
834 orPtr(Imm32(gc::ChunkMask), buffer);
835 loadPtr(Address(buffer, gc::ChunkStoreBufferOffsetFromLastByte), buffer);
836 }
837
branchPtrInNurseryChunk(Condition cond,Register ptr,Register temp,Label * label)838 void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
839 Register temp, Label* label) {
840 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
841
842 ScratchRegisterScope scratch(*this);
843 MOZ_ASSERT(ptr != temp);
844 MOZ_ASSERT(ptr != scratch);
845
846 movePtr(ptr, scratch);
847 orPtr(Imm32(gc::ChunkMask), scratch);
848 branchPtr(InvertCondition(cond),
849 Address(scratch, gc::ChunkStoreBufferOffsetFromLastByte),
850 ImmWord(0), label);
851 }
852
853 template <typename T>
branchValueIsNurseryCellImpl(Condition cond,const T & value,Register temp,Label * label)854 void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
855 const T& value, Register temp,
856 Label* label) {
857 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
858 MOZ_ASSERT(temp != InvalidReg);
859
860 Label done;
861 branchTestGCThing(Assembler::NotEqual, value,
862 cond == Assembler::Equal ? &done : label);
863
864 unboxGCThingForGCBarrier(value, temp);
865 orPtr(Imm32(gc::ChunkMask), temp);
866 branchPtr(InvertCondition(cond),
867 Address(temp, gc::ChunkStoreBufferOffsetFromLastByte), ImmWord(0),
868 label);
869
870 bind(&done);
871 }
872
branchValueIsNurseryCell(Condition cond,const Address & address,Register temp,Label * label)873 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
874 const Address& address,
875 Register temp, Label* label) {
876 branchValueIsNurseryCellImpl(cond, address, temp, label);
877 }
878
branchValueIsNurseryCell(Condition cond,ValueOperand value,Register temp,Label * label)879 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
880 ValueOperand value, Register temp,
881 Label* label) {
882 branchValueIsNurseryCellImpl(cond, value, temp, label);
883 }
884
branchTestValue(Condition cond,const ValueOperand & lhs,const Value & rhs,Label * label)885 void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
886 const Value& rhs, Label* label) {
887 MOZ_ASSERT(cond == Equal || cond == NotEqual);
888 ScratchRegisterScope scratch(*this);
889 MOZ_ASSERT(lhs.valueReg() != scratch);
890 moveValue(rhs, ValueOperand(scratch));
891 cmpPtr(lhs.valueReg(), scratch);
892 j(cond, label);
893 }
894
895 // ========================================================================
896 // Memory access primitives.
897 template <typename T>
storeUnboxedValue(const ConstantOrRegister & value,MIRType valueType,const T & dest,MIRType slotType)898 void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
899 MIRType valueType, const T& dest,
900 MIRType slotType) {
901 if (valueType == MIRType::Double) {
902 boxDouble(value.reg().typedReg().fpu(), dest);
903 return;
904 }
905
906 // For known integers and booleans, we can just store the unboxed value if
907 // the slot has the same type.
908 if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) &&
909 slotType == valueType) {
910 if (value.constant()) {
911 Value val = value.value();
912 if (valueType == MIRType::Int32) {
913 store32(Imm32(val.toInt32()), dest);
914 } else {
915 store32(Imm32(val.toBoolean() ? 1 : 0), dest);
916 }
917 } else {
918 store32(value.reg().typedReg().gpr(), dest);
919 }
920 return;
921 }
922
923 if (value.constant()) {
924 storeValue(value.value(), dest);
925 } else {
926 storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
927 dest);
928 }
929 }
930
931 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
932 MIRType valueType,
933 const Address& dest,
934 MIRType slotType);
935 template void MacroAssembler::storeUnboxedValue(
936 const ConstantOrRegister& value, MIRType valueType,
937 const BaseObjectElementIndex& dest, MIRType slotType);
938
PushBoxed(FloatRegister reg)939 void MacroAssembler::PushBoxed(FloatRegister reg) {
940 subq(Imm32(sizeof(double)), StackPointer);
941 boxDouble(reg, Address(StackPointer, 0));
942 adjustFrame(sizeof(double));
943 }
944
945 // ========================================================================
946 // wasm support
947
wasmLoad(const wasm::MemoryAccessDesc & access,Operand srcAddr,AnyRegister out)948 void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
949 Operand srcAddr, AnyRegister out) {
950 // NOTE: the generated code must match the assembly code in gen_load in
951 // GenerateAtomicOperations.py
952 memoryBarrierBefore(access.sync());
953
954 MOZ_ASSERT_IF(
955 access.isZeroExtendSimd128Load(),
956 access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
957 MOZ_ASSERT_IF(
958 access.isSplatSimd128Load(),
959 access.type() == Scalar::Uint8 || access.type() == Scalar::Uint16 ||
960 access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
961 MOZ_ASSERT_IF(access.isWidenSimd128Load(), access.type() == Scalar::Float64);
962
963 append(access, size());
964 switch (access.type()) {
965 case Scalar::Int8:
966 movsbl(srcAddr, out.gpr());
967 break;
968 case Scalar::Uint8:
969 if (access.isSplatSimd128Load()) {
970 vbroadcastb(srcAddr, out.fpu());
971 } else {
972 movzbl(srcAddr, out.gpr());
973 }
974 break;
975 case Scalar::Int16:
976 movswl(srcAddr, out.gpr());
977 break;
978 case Scalar::Uint16:
979 if (access.isSplatSimd128Load()) {
980 vbroadcastw(srcAddr, out.fpu());
981 } else {
982 movzwl(srcAddr, out.gpr());
983 }
984 break;
985 case Scalar::Int32:
986 case Scalar::Uint32:
987 movl(srcAddr, out.gpr());
988 break;
989 case Scalar::Float32:
990 if (access.isSplatSimd128Load()) {
991 vbroadcastss(srcAddr, out.fpu());
992 } else {
993 // vmovss does the right thing also for access.isZeroExtendSimd128Load()
994 vmovss(srcAddr, out.fpu());
995 }
996 break;
997 case Scalar::Float64:
998 if (access.isSplatSimd128Load()) {
999 vmovddup(srcAddr, out.fpu());
1000 } else if (access.isWidenSimd128Load()) {
1001 switch (access.widenSimdOp()) {
1002 case wasm::SimdOp::V128Load8x8S:
1003 vpmovsxbw(srcAddr, out.fpu());
1004 break;
1005 case wasm::SimdOp::V128Load8x8U:
1006 vpmovzxbw(srcAddr, out.fpu());
1007 break;
1008 case wasm::SimdOp::V128Load16x4S:
1009 vpmovsxwd(srcAddr, out.fpu());
1010 break;
1011 case wasm::SimdOp::V128Load16x4U:
1012 vpmovzxwd(srcAddr, out.fpu());
1013 break;
1014 case wasm::SimdOp::V128Load32x2S:
1015 vpmovsxdq(srcAddr, out.fpu());
1016 break;
1017 case wasm::SimdOp::V128Load32x2U:
1018 vpmovzxdq(srcAddr, out.fpu());
1019 break;
1020 default:
1021 MOZ_CRASH("Unexpected widening op for wasmLoad");
1022 }
1023 } else {
1024 // vmovsd does the right thing also for access.isZeroExtendSimd128Load()
1025 vmovsd(srcAddr, out.fpu());
1026 }
1027 break;
1028 case Scalar::Simd128:
1029 MacroAssemblerX64::loadUnalignedSimd128(srcAddr, out.fpu());
1030 break;
1031 case Scalar::Int64:
1032 MOZ_CRASH("int64 loads must use load64");
1033 case Scalar::BigInt64:
1034 case Scalar::BigUint64:
1035 case Scalar::Uint8Clamped:
1036 case Scalar::MaxTypedArrayViewType:
1037 MOZ_CRASH("unexpected scalar type for wasmLoad");
1038 }
1039
1040 memoryBarrierAfter(access.sync());
1041 }
1042
wasmLoadI64(const wasm::MemoryAccessDesc & access,Operand srcAddr,Register64 out)1043 void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
1044 Operand srcAddr, Register64 out) {
1045 // NOTE: the generated code must match the assembly code in gen_load in
1046 // GenerateAtomicOperations.py
1047 memoryBarrierBefore(access.sync());
1048
1049 append(access, size());
1050 switch (access.type()) {
1051 case Scalar::Int8:
1052 movsbq(srcAddr, out.reg);
1053 break;
1054 case Scalar::Uint8:
1055 movzbq(srcAddr, out.reg);
1056 break;
1057 case Scalar::Int16:
1058 movswq(srcAddr, out.reg);
1059 break;
1060 case Scalar::Uint16:
1061 movzwq(srcAddr, out.reg);
1062 break;
1063 case Scalar::Int32:
1064 movslq(srcAddr, out.reg);
1065 break;
1066 // Int32 to int64 moves zero-extend by default.
1067 case Scalar::Uint32:
1068 movl(srcAddr, out.reg);
1069 break;
1070 case Scalar::Int64:
1071 movq(srcAddr, out.reg);
1072 break;
1073 case Scalar::Float32:
1074 case Scalar::Float64:
1075 case Scalar::Simd128:
1076 MOZ_CRASH("float loads must use wasmLoad");
1077 case Scalar::Uint8Clamped:
1078 case Scalar::BigInt64:
1079 case Scalar::BigUint64:
1080 case Scalar::MaxTypedArrayViewType:
1081 MOZ_CRASH("unexpected scalar type for wasmLoadI64");
1082 }
1083
1084 memoryBarrierAfter(access.sync());
1085 }
1086
wasmStore(const wasm::MemoryAccessDesc & access,AnyRegister value,Operand dstAddr)1087 void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
1088 AnyRegister value, Operand dstAddr) {
1089 // NOTE: the generated code must match the assembly code in gen_store in
1090 // GenerateAtomicOperations.py
1091 memoryBarrierBefore(access.sync());
1092
1093 append(access, masm.size());
1094 switch (access.type()) {
1095 case Scalar::Int8:
1096 case Scalar::Uint8:
1097 movb(value.gpr(), dstAddr);
1098 break;
1099 case Scalar::Int16:
1100 case Scalar::Uint16:
1101 movw(value.gpr(), dstAddr);
1102 break;
1103 case Scalar::Int32:
1104 case Scalar::Uint32:
1105 movl(value.gpr(), dstAddr);
1106 break;
1107 case Scalar::Int64:
1108 movq(value.gpr(), dstAddr);
1109 break;
1110 case Scalar::Float32:
1111 storeUncanonicalizedFloat32(value.fpu(), dstAddr);
1112 break;
1113 case Scalar::Float64:
1114 storeUncanonicalizedDouble(value.fpu(), dstAddr);
1115 break;
1116 case Scalar::Simd128:
1117 MacroAssemblerX64::storeUnalignedSimd128(value.fpu(), dstAddr);
1118 break;
1119 case Scalar::Uint8Clamped:
1120 case Scalar::BigInt64:
1121 case Scalar::BigUint64:
1122 case Scalar::MaxTypedArrayViewType:
1123 MOZ_CRASH("unexpected array type");
1124 }
1125
1126 memoryBarrierAfter(access.sync());
1127 }
1128
wasmTruncateDoubleToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)1129 void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
1130 Register output,
1131 bool isSaturating,
1132 Label* oolEntry) {
1133 vcvttsd2sq(input, output);
1134
1135 // Check that the result is in the uint32_t range.
1136 ScratchRegisterScope scratch(*this);
1137 move32(Imm32(0xffffffff), scratch);
1138 cmpq(scratch, output);
1139 j(Assembler::Above, oolEntry);
1140 }
1141
wasmTruncateFloat32ToUInt32(FloatRegister input,Register output,bool isSaturating,Label * oolEntry)1142 void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
1143 Register output,
1144 bool isSaturating,
1145 Label* oolEntry) {
1146 vcvttss2sq(input, output);
1147
1148 // Check that the result is in the uint32_t range.
1149 ScratchRegisterScope scratch(*this);
1150 move32(Imm32(0xffffffff), scratch);
1151 cmpq(scratch, output);
1152 j(Assembler::Above, oolEntry);
1153 }
1154
wasmTruncateDoubleToInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempReg)1155 void MacroAssembler::wasmTruncateDoubleToInt64(
1156 FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
1157 Label* oolRejoin, FloatRegister tempReg) {
1158 vcvttsd2sq(input, output.reg);
1159 cmpq(Imm32(1), output.reg);
1160 j(Assembler::Overflow, oolEntry);
1161 bind(oolRejoin);
1162 }
1163
wasmTruncateFloat32ToInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempReg)1164 void MacroAssembler::wasmTruncateFloat32ToInt64(
1165 FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
1166 Label* oolRejoin, FloatRegister tempReg) {
1167 vcvttss2sq(input, output.reg);
1168 cmpq(Imm32(1), output.reg);
1169 j(Assembler::Overflow, oolEntry);
1170 bind(oolRejoin);
1171 }
1172
wasmTruncateDoubleToUInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempReg)1173 void MacroAssembler::wasmTruncateDoubleToUInt64(
1174 FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
1175 Label* oolRejoin, FloatRegister tempReg) {
1176 // If the input < INT64_MAX, vcvttsd2sq will do the right thing, so
1177 // we use it directly. Else, we subtract INT64_MAX, convert to int64,
1178 // and then add INT64_MAX to the result.
1179
1180 Label isLarge;
1181
1182 ScratchDoubleScope scratch(*this);
1183 loadConstantDouble(double(0x8000000000000000), scratch);
1184 branchDouble(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
1185 vcvttsd2sq(input, output.reg);
1186 testq(output.reg, output.reg);
1187 j(Assembler::Signed, oolEntry);
1188 jump(oolRejoin);
1189
1190 bind(&isLarge);
1191
1192 moveDouble(input, tempReg);
1193 vsubsd(scratch, tempReg, tempReg);
1194 vcvttsd2sq(tempReg, output.reg);
1195 testq(output.reg, output.reg);
1196 j(Assembler::Signed, oolEntry);
1197 or64(Imm64(0x8000000000000000), output);
1198
1199 bind(oolRejoin);
1200 }
1201
wasmTruncateFloat32ToUInt64(FloatRegister input,Register64 output,bool isSaturating,Label * oolEntry,Label * oolRejoin,FloatRegister tempReg)1202 void MacroAssembler::wasmTruncateFloat32ToUInt64(
1203 FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
1204 Label* oolRejoin, FloatRegister tempReg) {
1205 // If the input < INT64_MAX, vcvttss2sq will do the right thing, so
1206 // we use it directly. Else, we subtract INT64_MAX, convert to int64,
1207 // and then add INT64_MAX to the result.
1208
1209 Label isLarge;
1210
1211 ScratchFloat32Scope scratch(*this);
1212 loadConstantFloat32(float(0x8000000000000000), scratch);
1213 branchFloat(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
1214 vcvttss2sq(input, output.reg);
1215 testq(output.reg, output.reg);
1216 j(Assembler::Signed, oolEntry);
1217 jump(oolRejoin);
1218
1219 bind(&isLarge);
1220
1221 moveFloat32(input, tempReg);
1222 vsubss(scratch, tempReg, tempReg);
1223 vcvttss2sq(tempReg, output.reg);
1224 testq(output.reg, output.reg);
1225 j(Assembler::Signed, oolEntry);
1226 or64(Imm64(0x8000000000000000), output);
1227
1228 bind(oolRejoin);
1229 }
1230
widenInt32(Register r)1231 void MacroAssembler::widenInt32(Register r) {
1232 move32To64ZeroExtend(r, Register64(r));
1233 }
1234
1235 // ========================================================================
1236 // Convert floating point.
1237
convertInt64ToDouble(Register64 input,FloatRegister output)1238 void MacroAssembler::convertInt64ToDouble(Register64 input,
1239 FloatRegister output) {
1240 // Zero the output register to break dependencies, see convertInt32ToDouble.
1241 zeroDouble(output);
1242
1243 vcvtsq2sd(input.reg, output, output);
1244 }
1245
convertInt64ToFloat32(Register64 input,FloatRegister output)1246 void MacroAssembler::convertInt64ToFloat32(Register64 input,
1247 FloatRegister output) {
1248 // Zero the output register to break dependencies, see convertInt32ToDouble.
1249 zeroFloat32(output);
1250
1251 vcvtsq2ss(input.reg, output, output);
1252 }
1253
convertUInt64ToDoubleNeedsTemp()1254 bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return true; }
1255
convertUInt64ToDouble(Register64 input,FloatRegister output,Register temp)1256 void MacroAssembler::convertUInt64ToDouble(Register64 input,
1257 FloatRegister output,
1258 Register temp) {
1259 // Zero the output register to break dependencies, see convertInt32ToDouble.
1260 zeroDouble(output);
1261
1262 // If the input's sign bit is not set we use vcvtsq2sd directly.
1263 // Else, we divide by 2 and keep the LSB, convert to double, and multiply
1264 // the result by 2.
1265 Label done;
1266 Label isSigned;
1267
1268 testq(input.reg, input.reg);
1269 j(Assembler::Signed, &isSigned);
1270 vcvtsq2sd(input.reg, output, output);
1271 jump(&done);
1272
1273 bind(&isSigned);
1274
1275 ScratchRegisterScope scratch(*this);
1276 mov(input.reg, scratch);
1277 mov(input.reg, temp);
1278 shrq(Imm32(1), scratch);
1279 andq(Imm32(1), temp);
1280 orq(temp, scratch);
1281
1282 vcvtsq2sd(scratch, output, output);
1283 vaddsd(output, output, output);
1284
1285 bind(&done);
1286 }
1287
convertUInt64ToFloat32(Register64 input,FloatRegister output,Register temp)1288 void MacroAssembler::convertUInt64ToFloat32(Register64 input,
1289 FloatRegister output,
1290 Register temp) {
1291 // Zero the output register to break dependencies, see convertInt32ToDouble.
1292 zeroFloat32(output);
1293
1294 // See comment in convertUInt64ToDouble.
1295 Label done;
1296 Label isSigned;
1297
1298 testq(input.reg, input.reg);
1299 j(Assembler::Signed, &isSigned);
1300 vcvtsq2ss(input.reg, output, output);
1301 jump(&done);
1302
1303 bind(&isSigned);
1304
1305 ScratchRegisterScope scratch(*this);
1306 mov(input.reg, scratch);
1307 mov(input.reg, temp);
1308 shrq(Imm32(1), scratch);
1309 andq(Imm32(1), temp);
1310 orq(temp, scratch);
1311
1312 vcvtsq2ss(scratch, output, output);
1313 vaddss(output, output, output);
1314
1315 bind(&done);
1316 }
1317
convertIntPtrToDouble(Register src,FloatRegister dest)1318 void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
1319 convertInt64ToDouble(Register64(src), dest);
1320 }
1321
1322 // ========================================================================
1323 // Primitive atomic operations.
1324
wasmCompareExchange64(const wasm::MemoryAccessDesc & access,const Address & mem,Register64 expected,Register64 replacement,Register64 output)1325 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
1326 const Address& mem,
1327 Register64 expected,
1328 Register64 replacement,
1329 Register64 output) {
1330 MOZ_ASSERT(output.reg == rax);
1331 if (expected != output) {
1332 movq(expected.reg, output.reg);
1333 }
1334 append(access, size());
1335 lock_cmpxchgq(replacement.reg, Operand(mem));
1336 }
1337
wasmCompareExchange64(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register64 expected,Register64 replacement,Register64 output)1338 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
1339 const BaseIndex& mem,
1340 Register64 expected,
1341 Register64 replacement,
1342 Register64 output) {
1343 MOZ_ASSERT(output.reg == rax);
1344 if (expected != output) {
1345 movq(expected.reg, output.reg);
1346 }
1347 append(access, size());
1348 lock_cmpxchgq(replacement.reg, Operand(mem));
1349 }
1350
wasmAtomicExchange64(const wasm::MemoryAccessDesc & access,const Address & mem,Register64 value,Register64 output)1351 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
1352 const Address& mem, Register64 value,
1353 Register64 output) {
1354 if (value != output) {
1355 movq(value.reg, output.reg);
1356 }
1357 append(access, masm.size());
1358 xchgq(output.reg, Operand(mem));
1359 }
1360
wasmAtomicExchange64(const wasm::MemoryAccessDesc & access,const BaseIndex & mem,Register64 value,Register64 output)1361 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
1362 const BaseIndex& mem,
1363 Register64 value, Register64 output) {
1364 if (value != output) {
1365 movq(value.reg, output.reg);
1366 }
1367 append(access, masm.size());
1368 xchgq(output.reg, Operand(mem));
1369 }
1370
1371 template <typename T>
AtomicFetchOp64(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,AtomicOp op,Register value,const T & mem,Register temp,Register output)1372 static void AtomicFetchOp64(MacroAssembler& masm,
1373 const wasm::MemoryAccessDesc* access, AtomicOp op,
1374 Register value, const T& mem, Register temp,
1375 Register output) {
1376 // NOTE: the generated code must match the assembly code in gen_fetchop in
1377 // GenerateAtomicOperations.py
1378 if (op == AtomicFetchAddOp) {
1379 if (value != output) {
1380 masm.movq(value, output);
1381 }
1382 if (access) {
1383 masm.append(*access, masm.size());
1384 }
1385 masm.lock_xaddq(output, Operand(mem));
1386 } else if (op == AtomicFetchSubOp) {
1387 if (value != output) {
1388 masm.movq(value, output);
1389 }
1390 masm.negq(output);
1391 if (access) {
1392 masm.append(*access, masm.size());
1393 }
1394 masm.lock_xaddq(output, Operand(mem));
1395 } else {
1396 Label again;
1397 MOZ_ASSERT(output == rax);
1398 MOZ_ASSERT(value != output);
1399 MOZ_ASSERT(value != temp);
1400 MOZ_ASSERT(temp != output);
1401 if (access) {
1402 masm.append(*access, masm.size());
1403 }
1404 masm.movq(Operand(mem), rax);
1405 masm.bind(&again);
1406 masm.movq(rax, temp);
1407 switch (op) {
1408 case AtomicFetchAndOp:
1409 masm.andq(value, temp);
1410 break;
1411 case AtomicFetchOrOp:
1412 masm.orq(value, temp);
1413 break;
1414 case AtomicFetchXorOp:
1415 masm.xorq(value, temp);
1416 break;
1417 default:
1418 MOZ_CRASH();
1419 }
1420 masm.lock_cmpxchgq(temp, Operand(mem));
1421 masm.j(MacroAssembler::NonZero, &again);
1422 }
1423 }
1424
wasmAtomicFetchOp64(const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const Address & mem,Register64 temp,Register64 output)1425 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
1426 AtomicOp op, Register64 value,
1427 const Address& mem, Register64 temp,
1428 Register64 output) {
1429 AtomicFetchOp64(*this, &access, op, value.reg, mem, temp.reg, output.reg);
1430 }
1431
wasmAtomicFetchOp64(const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp,Register64 output)1432 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
1433 AtomicOp op, Register64 value,
1434 const BaseIndex& mem, Register64 temp,
1435 Register64 output) {
1436 AtomicFetchOp64(*this, &access, op, value.reg, mem, temp.reg, output.reg);
1437 }
1438
1439 template <typename T>
AtomicEffectOp64(MacroAssembler & masm,const wasm::MemoryAccessDesc * access,AtomicOp op,Register value,const T & mem)1440 static void AtomicEffectOp64(MacroAssembler& masm,
1441 const wasm::MemoryAccessDesc* access, AtomicOp op,
1442 Register value, const T& mem) {
1443 if (access) {
1444 masm.append(*access, masm.size());
1445 }
1446 switch (op) {
1447 case AtomicFetchAddOp:
1448 masm.lock_addq(value, Operand(mem));
1449 break;
1450 case AtomicFetchSubOp:
1451 masm.lock_subq(value, Operand(mem));
1452 break;
1453 case AtomicFetchAndOp:
1454 masm.lock_andq(value, Operand(mem));
1455 break;
1456 case AtomicFetchOrOp:
1457 masm.lock_orq(value, Operand(mem));
1458 break;
1459 case AtomicFetchXorOp:
1460 masm.lock_xorq(value, Operand(mem));
1461 break;
1462 default:
1463 MOZ_CRASH();
1464 }
1465 }
1466
wasmAtomicEffectOp64(const wasm::MemoryAccessDesc & access,AtomicOp op,Register64 value,const BaseIndex & mem)1467 void MacroAssembler::wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access,
1468 AtomicOp op, Register64 value,
1469 const BaseIndex& mem) {
1470 AtomicEffectOp64(*this, &access, op, value.reg, mem);
1471 }
1472
compareExchange64(const Synchronization &,const Address & mem,Register64 expected,Register64 replacement,Register64 output)1473 void MacroAssembler::compareExchange64(const Synchronization&,
1474 const Address& mem, Register64 expected,
1475 Register64 replacement,
1476 Register64 output) {
1477 // NOTE: the generated code must match the assembly code in gen_cmpxchg in
1478 // GenerateAtomicOperations.py
1479 MOZ_ASSERT(output.reg == rax);
1480 if (expected != output) {
1481 movq(expected.reg, output.reg);
1482 }
1483 lock_cmpxchgq(replacement.reg, Operand(mem));
1484 }
1485
compareExchange64(const Synchronization &,const BaseIndex & mem,Register64 expected,Register64 replacement,Register64 output)1486 void MacroAssembler::compareExchange64(const Synchronization&,
1487 const BaseIndex& mem,
1488 Register64 expected,
1489 Register64 replacement,
1490 Register64 output) {
1491 MOZ_ASSERT(output.reg == rax);
1492 if (expected != output) {
1493 movq(expected.reg, output.reg);
1494 }
1495 lock_cmpxchgq(replacement.reg, Operand(mem));
1496 }
1497
atomicExchange64(const Synchronization &,const Address & mem,Register64 value,Register64 output)1498 void MacroAssembler::atomicExchange64(const Synchronization&,
1499 const Address& mem, Register64 value,
1500 Register64 output) {
1501 // NOTE: the generated code must match the assembly code in gen_exchange in
1502 // GenerateAtomicOperations.py
1503 if (value != output) {
1504 movq(value.reg, output.reg);
1505 }
1506 xchgq(output.reg, Operand(mem));
1507 }
1508
atomicExchange64(const Synchronization &,const BaseIndex & mem,Register64 value,Register64 output)1509 void MacroAssembler::atomicExchange64(const Synchronization&,
1510 const BaseIndex& mem, Register64 value,
1511 Register64 output) {
1512 if (value != output) {
1513 movq(value.reg, output.reg);
1514 }
1515 xchgq(output.reg, Operand(mem));
1516 }
1517
atomicFetchOp64(const Synchronization & sync,AtomicOp op,Register64 value,const Address & mem,Register64 temp,Register64 output)1518 void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
1519 Register64 value, const Address& mem,
1520 Register64 temp, Register64 output) {
1521 AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
1522 }
1523
atomicFetchOp64(const Synchronization & sync,AtomicOp op,Register64 value,const BaseIndex & mem,Register64 temp,Register64 output)1524 void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
1525 Register64 value, const BaseIndex& mem,
1526 Register64 temp, Register64 output) {
1527 AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
1528 }
1529
atomicEffectOp64(const Synchronization & sync,AtomicOp op,Register64 value,const Address & mem)1530 void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
1531 Register64 value, const Address& mem) {
1532 AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
1533 }
1534
atomicEffectOp64(const Synchronization & sync,AtomicOp op,Register64 value,const BaseIndex & mem)1535 void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
1536 Register64 value, const BaseIndex& mem) {
1537 AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
1538 }
1539
moveNearAddressWithPatch(Register dest)1540 CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
1541 return leaRipRelative(dest);
1542 }
1543
patchNearAddressMove(CodeLocationLabel loc,CodeLocationLabel target)1544 void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
1545 CodeLocationLabel target) {
1546 ptrdiff_t off = target - loc;
1547 MOZ_ASSERT(off > ptrdiff_t(INT32_MIN));
1548 MOZ_ASSERT(off < ptrdiff_t(INT32_MAX));
1549 PatchWrite_Imm32(loc, Imm32(off));
1550 }
1551
wasmBoundsCheck64(Condition cond,Register64 index,Register64 boundsCheckLimit,Label * ok)1552 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
1553 Register64 boundsCheckLimit, Label* ok) {
1554 cmpPtr(index.reg, boundsCheckLimit.reg);
1555 j(cond, ok);
1556 if (JitOptions.spectreIndexMasking) {
1557 cmovCCq(cond, Operand(boundsCheckLimit.reg), index.reg);
1558 }
1559 }
1560
wasmBoundsCheck64(Condition cond,Register64 index,Address boundsCheckLimit,Label * ok)1561 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
1562 Address boundsCheckLimit, Label* ok) {
1563 cmpPtr(index.reg, Operand(boundsCheckLimit));
1564 j(cond, ok);
1565 if (JitOptions.spectreIndexMasking) {
1566 cmovCCq(cond, Operand(boundsCheckLimit), index.reg);
1567 }
1568 }
1569
1570 // ========================================================================
1571 // Integer compare-then-conditionally-load/move operations.
1572
1573 // cmpMove, Cond-Reg-Reg-Reg-Reg cases
1574
1575 template <>
cmpMove(Condition cond,Register lhs,Register rhs,Register falseVal,Register trueValAndDest)1576 void MacroAssemblerX64::cmpMove<32, 32>(Condition cond, Register lhs,
1577 Register rhs, Register falseVal,
1578 Register trueValAndDest) {
1579 cmp32(lhs, rhs);
1580 cmovCCl(cond, Operand(falseVal), trueValAndDest);
1581 }
1582 template <>
cmpMove(Condition cond,Register lhs,Register rhs,Register falseVal,Register trueValAndDest)1583 void MacroAssemblerX64::cmpMove<32, 64>(Condition cond, Register lhs,
1584 Register rhs, Register falseVal,
1585 Register trueValAndDest) {
1586 cmp32(lhs, rhs);
1587 cmovCCq(cond, Operand(falseVal), trueValAndDest);
1588 }
1589 template <>
cmpMove(Condition cond,Register lhs,Register rhs,Register falseVal,Register trueValAndDest)1590 void MacroAssemblerX64::cmpMove<64, 32>(Condition cond, Register lhs,
1591 Register rhs, Register falseVal,
1592 Register trueValAndDest) {
1593 cmpPtr(lhs, rhs);
1594 cmovCCl(cond, Operand(falseVal), trueValAndDest);
1595 }
1596 template <>
cmpMove(Condition cond,Register lhs,Register rhs,Register falseVal,Register trueValAndDest)1597 void MacroAssemblerX64::cmpMove<64, 64>(Condition cond, Register lhs,
1598 Register rhs, Register falseVal,
1599 Register trueValAndDest) {
1600 cmpPtr(lhs, rhs);
1601 cmovCCq(cond, Operand(falseVal), trueValAndDest);
1602 }
1603
1604 // cmpMove, Cond-Reg-Addr-Reg-Reg cases
1605
1606 template <>
cmpMove(Condition cond,Register lhs,const Address & rhs,Register falseVal,Register trueValAndDest)1607 void MacroAssemblerX64::cmpMove<32, 32>(Condition cond, Register lhs,
1608 const Address& rhs, Register falseVal,
1609 Register trueValAndDest) {
1610 cmp32(lhs, Operand(rhs));
1611 cmovCCl(cond, Operand(falseVal), trueValAndDest);
1612 }
1613 template <>
cmpMove(Condition cond,Register lhs,const Address & rhs,Register falseVal,Register trueValAndDest)1614 void MacroAssemblerX64::cmpMove<32, 64>(Condition cond, Register lhs,
1615 const Address& rhs, Register falseVal,
1616 Register trueValAndDest) {
1617 cmp32(lhs, Operand(rhs));
1618 cmovCCq(cond, Operand(falseVal), trueValAndDest);
1619 }
1620 template <>
cmpMove(Condition cond,Register lhs,const Address & rhs,Register falseVal,Register trueValAndDest)1621 void MacroAssemblerX64::cmpMove<64, 32>(Condition cond, Register lhs,
1622 const Address& rhs, Register falseVal,
1623 Register trueValAndDest) {
1624 cmpPtr(lhs, Operand(rhs));
1625 cmovCCl(cond, Operand(falseVal), trueValAndDest);
1626 }
1627 template <>
cmpMove(Condition cond,Register lhs,const Address & rhs,Register falseVal,Register trueValAndDest)1628 void MacroAssemblerX64::cmpMove<64, 64>(Condition cond, Register lhs,
1629 const Address& rhs, Register falseVal,
1630 Register trueValAndDest) {
1631 cmpPtr(lhs, Operand(rhs));
1632 cmovCCq(cond, Operand(falseVal), trueValAndDest);
1633 }
1634
1635 // cmpLoad, Cond-Reg-Reg-Addr-Reg cases
1636
1637 template <>
cmpLoad(Condition cond,Register lhs,Register rhs,const Address & falseVal,Register trueValAndDest)1638 void MacroAssemblerX64::cmpLoad<32, 32>(Condition cond, Register lhs,
1639 Register rhs, const Address& falseVal,
1640 Register trueValAndDest) {
1641 cmp32(lhs, rhs);
1642 cmovCCl(cond, Operand(falseVal), trueValAndDest);
1643 }
1644 template <>
cmpLoad(Condition cond,Register lhs,Register rhs,const Address & falseVal,Register trueValAndDest)1645 void MacroAssemblerX64::cmpLoad<32, 64>(Condition cond, Register lhs,
1646 Register rhs, const Address& falseVal,
1647 Register trueValAndDest) {
1648 cmp32(lhs, rhs);
1649 cmovCCq(cond, Operand(falseVal), trueValAndDest);
1650 }
1651 template <>
cmpLoad(Condition cond,Register lhs,Register rhs,const Address & falseVal,Register trueValAndDest)1652 void MacroAssemblerX64::cmpLoad<64, 32>(Condition cond, Register lhs,
1653 Register rhs, const Address& falseVal,
1654 Register trueValAndDest) {
1655 cmpPtr(lhs, rhs);
1656 cmovCCl(cond, Operand(falseVal), trueValAndDest);
1657 }
1658 template <>
cmpLoad(Condition cond,Register lhs,Register rhs,const Address & falseVal,Register trueValAndDest)1659 void MacroAssemblerX64::cmpLoad<64, 64>(Condition cond, Register lhs,
1660 Register rhs, const Address& falseVal,
1661 Register trueValAndDest) {
1662 cmpPtr(lhs, rhs);
1663 cmovCCq(cond, Operand(falseVal), trueValAndDest);
1664 }
1665
1666 // cmpLoad, Cond-Reg-Addr-Addr-Reg cases
1667
1668 template <>
cmpLoad(Condition cond,Register lhs,const Address & rhs,const Address & falseVal,Register trueValAndDest)1669 void MacroAssemblerX64::cmpLoad<32, 32>(Condition cond, Register lhs,
1670 const Address& rhs,
1671 const Address& falseVal,
1672 Register trueValAndDest) {
1673 cmp32(lhs, Operand(rhs));
1674 cmovCCl(cond, Operand(falseVal), trueValAndDest);
1675 }
1676 template <>
cmpLoad(Condition cond,Register lhs,const Address & rhs,const Address & falseVal,Register trueValAndDest)1677 void MacroAssemblerX64::cmpLoad<32, 64>(Condition cond, Register lhs,
1678 const Address& rhs,
1679 const Address& falseVal,
1680 Register trueValAndDest) {
1681 cmp32(lhs, Operand(rhs));
1682 cmovCCq(cond, Operand(falseVal), trueValAndDest);
1683 }
1684 template <>
cmpLoad(Condition cond,Register lhs,const Address & rhs,const Address & falseVal,Register trueValAndDest)1685 void MacroAssemblerX64::cmpLoad<64, 32>(Condition cond, Register lhs,
1686 const Address& rhs,
1687 const Address& falseVal,
1688 Register trueValAndDest) {
1689 cmpPtr(lhs, Operand(rhs));
1690 cmovCCl(cond, Operand(falseVal), trueValAndDest);
1691 }
1692 template <>
cmpLoad(Condition cond,Register lhs,const Address & rhs,const Address & falseVal,Register trueValAndDest)1693 void MacroAssemblerX64::cmpLoad<64, 64>(Condition cond, Register lhs,
1694 const Address& rhs,
1695 const Address& falseVal,
1696 Register trueValAndDest) {
1697 cmpPtr(lhs, Operand(rhs));
1698 cmovCCq(cond, Operand(falseVal), trueValAndDest);
1699 }
1700
1701 //}}} check_macroassembler_style
1702