xref: /qemu/tcg/tcg-op-ldst.c (revision 7c1f51bf)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg.h"
28 #include "tcg/tcg-temp-internal.h"
29 #include "tcg/tcg-op.h"
30 #include "tcg/tcg-mo.h"
31 #include "exec/plugin-gen.h"
32 #include "tcg-internal.h"
33 
34 
35 static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
36 {
37     /* Trigger the asserts within as early as possible.  */
38     unsigned a_bits = get_alignment_bits(op);
39 
40     /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
41     if (a_bits == (op & MO_SIZE)) {
42         op = (op & ~MO_AMASK) | MO_ALIGN;
43     }
44 
45     switch (op & MO_SIZE) {
46     case MO_8:
47         op &= ~MO_BSWAP;
48         break;
49     case MO_16:
50         break;
51     case MO_32:
52         if (!is64) {
53             op &= ~MO_SIGN;
54         }
55         break;
56     case MO_64:
57         if (is64) {
58             op &= ~MO_SIGN;
59             break;
60         }
61         /* fall through */
62     default:
63         g_assert_not_reached();
64     }
65     if (st) {
66         op &= ~MO_SIGN;
67     }
68     return op;
69 }
70 
71 static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh,
72                      TCGTemp *addr, MemOpIdx oi)
73 {
74     if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) {
75         if (vh) {
76             tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
77         } else {
78             tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi);
79         }
80     } else {
81         /* See TCGV_LOW/HIGH. */
82         TCGTemp *al = addr + HOST_BIG_ENDIAN;
83         TCGTemp *ah = addr + !HOST_BIG_ENDIAN;
84 
85         if (vh) {
86             tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh),
87                         temp_arg(al), temp_arg(ah), oi);
88         } else {
89             tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi);
90         }
91     }
92 }
93 
94 static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
95 {
96     if (TCG_TARGET_REG_BITS == 32) {
97         TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
98         TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
99         gen_ldst(opc, vl, vh, addr, oi);
100     } else {
101         gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi);
102     }
103 }
104 
105 static void tcg_gen_req_mo(TCGBar type)
106 {
107 #ifdef TCG_GUEST_DEFAULT_MO
108     type &= TCG_GUEST_DEFAULT_MO;
109 #endif
110     type &= ~TCG_TARGET_DEFAULT_MO;
111     if (type) {
112         tcg_gen_mb(type | TCG_BAR_SC);
113     }
114 }
115 
116 /* Only required for loads, where value might overlap addr. */
117 static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr)
118 {
119 #ifdef CONFIG_PLUGIN
120     if (tcg_ctx->plugin_insn != NULL) {
121         /* Save a copy of the vaddr for use after a load.  */
122         TCGv_i64 temp = tcg_temp_ebb_new_i64();
123         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
124             tcg_gen_extu_i32_i64(temp, temp_tcgv_i32(addr));
125         } else {
126             tcg_gen_mov_i64(temp, temp_tcgv_i64(addr));
127         }
128         return temp;
129     }
130 #endif
131     return NULL;
132 }
133 
134 static void
135 plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
136                          enum qemu_plugin_mem_rw rw)
137 {
138 #ifdef CONFIG_PLUGIN
139     if (tcg_ctx->plugin_insn != NULL) {
140         qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
141 
142         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
143             if (!copy_addr) {
144                 copy_addr = tcg_temp_ebb_new_i64();
145                 tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr));
146             }
147             plugin_gen_empty_mem_callback(copy_addr, info);
148             tcg_temp_free_i64(copy_addr);
149         } else {
150             if (copy_addr) {
151                 plugin_gen_empty_mem_callback(copy_addr, info);
152                 tcg_temp_free_i64(copy_addr);
153             } else {
154                 plugin_gen_empty_mem_callback(temp_tcgv_i64(orig_addr), info);
155             }
156         }
157     }
158 #endif
159 }
160 
161 static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
162                                     TCGArg idx, MemOp memop)
163 {
164     MemOp orig_memop;
165     MemOpIdx orig_oi, oi;
166     TCGv_i64 copy_addr;
167     TCGOpcode opc;
168 
169     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
170     orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
171     orig_oi = oi = make_memop_idx(memop, idx);
172 
173     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
174         memop &= ~MO_BSWAP;
175         /* The bswap primitive benefits from zero-extended input.  */
176         if ((memop & MO_SSIZE) == MO_SW) {
177             memop &= ~MO_SIGN;
178         }
179         oi = make_memop_idx(memop, idx);
180     }
181 
182     copy_addr = plugin_maybe_preserve_addr(addr);
183     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
184         opc = INDEX_op_qemu_ld_a32_i32;
185     } else {
186         opc = INDEX_op_qemu_ld_a64_i32;
187     }
188     gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
189     plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
190 
191     if ((orig_memop ^ memop) & MO_BSWAP) {
192         switch (orig_memop & MO_SIZE) {
193         case MO_16:
194             tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN
195                                            ? TCG_BSWAP_IZ | TCG_BSWAP_OS
196                                            : TCG_BSWAP_IZ | TCG_BSWAP_OZ));
197             break;
198         case MO_32:
199             tcg_gen_bswap32_i32(val, val);
200             break;
201         default:
202             g_assert_not_reached();
203         }
204     }
205 }
206 
207 void tcg_gen_qemu_ld_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
208                              MemOp memop, TCGType addr_type)
209 {
210     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
211     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
212     tcg_gen_qemu_ld_i32_int(val, addr, idx, memop);
213 }
214 
215 static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
216                                     TCGArg idx, MemOp memop)
217 {
218     TCGv_i32 swap = NULL;
219     MemOpIdx orig_oi, oi;
220     TCGOpcode opc;
221 
222     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
223     memop = tcg_canonicalize_memop(memop, 0, 1);
224     orig_oi = oi = make_memop_idx(memop, idx);
225 
226     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
227         swap = tcg_temp_ebb_new_i32();
228         switch (memop & MO_SIZE) {
229         case MO_16:
230             tcg_gen_bswap16_i32(swap, val, 0);
231             break;
232         case MO_32:
233             tcg_gen_bswap32_i32(swap, val);
234             break;
235         default:
236             g_assert_not_reached();
237         }
238         val = swap;
239         memop &= ~MO_BSWAP;
240         oi = make_memop_idx(memop, idx);
241     }
242 
243     if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
244         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
245             opc = INDEX_op_qemu_st8_a32_i32;
246         } else {
247             opc = INDEX_op_qemu_st8_a64_i32;
248         }
249     } else {
250         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
251             opc = INDEX_op_qemu_st_a32_i32;
252         } else {
253             opc = INDEX_op_qemu_st_a64_i32;
254         }
255     }
256     gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
257     plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
258 
259     if (swap) {
260         tcg_temp_free_i32(swap);
261     }
262 }
263 
264 void tcg_gen_qemu_st_i32_chk(TCGv_i32 val, TCGTemp *addr, TCGArg idx,
265                              MemOp memop, TCGType addr_type)
266 {
267     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
268     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
269     tcg_gen_qemu_st_i32_int(val, addr, idx, memop);
270 }
271 
272 static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
273                                     TCGArg idx, MemOp memop)
274 {
275     MemOp orig_memop;
276     MemOpIdx orig_oi, oi;
277     TCGv_i64 copy_addr;
278     TCGOpcode opc;
279 
280     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
281         tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
282         if (memop & MO_SIGN) {
283             tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
284         } else {
285             tcg_gen_movi_i32(TCGV_HIGH(val), 0);
286         }
287         return;
288     }
289 
290     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
291     orig_memop = memop = tcg_canonicalize_memop(memop, 1, 0);
292     orig_oi = oi = make_memop_idx(memop, idx);
293 
294     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
295         memop &= ~MO_BSWAP;
296         /* The bswap primitive benefits from zero-extended input.  */
297         if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
298             memop &= ~MO_SIGN;
299         }
300         oi = make_memop_idx(memop, idx);
301     }
302 
303     copy_addr = plugin_maybe_preserve_addr(addr);
304     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
305         opc = INDEX_op_qemu_ld_a32_i64;
306     } else {
307         opc = INDEX_op_qemu_ld_a64_i64;
308     }
309     gen_ldst_i64(opc, val, addr, oi);
310     plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
311 
312     if ((orig_memop ^ memop) & MO_BSWAP) {
313         int flags = (orig_memop & MO_SIGN
314                      ? TCG_BSWAP_IZ | TCG_BSWAP_OS
315                      : TCG_BSWAP_IZ | TCG_BSWAP_OZ);
316         switch (orig_memop & MO_SIZE) {
317         case MO_16:
318             tcg_gen_bswap16_i64(val, val, flags);
319             break;
320         case MO_32:
321             tcg_gen_bswap32_i64(val, val, flags);
322             break;
323         case MO_64:
324             tcg_gen_bswap64_i64(val, val);
325             break;
326         default:
327             g_assert_not_reached();
328         }
329     }
330 }
331 
332 void tcg_gen_qemu_ld_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
333                              MemOp memop, TCGType addr_type)
334 {
335     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
336     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
337     tcg_gen_qemu_ld_i64_int(val, addr, idx, memop);
338 }
339 
340 static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
341                                     TCGArg idx, MemOp memop)
342 {
343     TCGv_i64 swap = NULL;
344     MemOpIdx orig_oi, oi;
345     TCGOpcode opc;
346 
347     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
348         tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
349         return;
350     }
351 
352     tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
353     memop = tcg_canonicalize_memop(memop, 1, 1);
354     orig_oi = oi = make_memop_idx(memop, idx);
355 
356     if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
357         swap = tcg_temp_ebb_new_i64();
358         switch (memop & MO_SIZE) {
359         case MO_16:
360             tcg_gen_bswap16_i64(swap, val, 0);
361             break;
362         case MO_32:
363             tcg_gen_bswap32_i64(swap, val, 0);
364             break;
365         case MO_64:
366             tcg_gen_bswap64_i64(swap, val);
367             break;
368         default:
369             g_assert_not_reached();
370         }
371         val = swap;
372         memop &= ~MO_BSWAP;
373         oi = make_memop_idx(memop, idx);
374     }
375 
376     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
377         opc = INDEX_op_qemu_st_a32_i64;
378     } else {
379         opc = INDEX_op_qemu_st_a64_i64;
380     }
381     gen_ldst_i64(opc, val, addr, oi);
382     plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
383 
384     if (swap) {
385         tcg_temp_free_i64(swap);
386     }
387 }
388 
389 void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
390                              MemOp memop, TCGType addr_type)
391 {
392     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
393     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
394     tcg_gen_qemu_st_i64_int(val, addr, idx, memop);
395 }
396 
397 /*
398  * Return true if @mop, without knowledge of the pointer alignment,
399  * does not require 16-byte atomicity, and it would be adventagous
400  * to avoid a call to a helper function.
401  */
402 static bool use_two_i64_for_i128(MemOp mop)
403 {
404 #ifdef CONFIG_SOFTMMU
405     /* Two softmmu tlb lookups is larger than one function call. */
406     return false;
407 #else
408     /*
409      * For user-only, two 64-bit operations may well be smaller than a call.
410      * Determine if that would be legal for the requested atomicity.
411      */
412     switch (mop & MO_ATOM_MASK) {
413     case MO_ATOM_NONE:
414     case MO_ATOM_IFALIGN_PAIR:
415         return true;
416     case MO_ATOM_IFALIGN:
417     case MO_ATOM_SUBALIGN:
418     case MO_ATOM_WITHIN16:
419     case MO_ATOM_WITHIN16_PAIR:
420         /* In a serialized context, no atomicity is required. */
421         return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL);
422     default:
423         g_assert_not_reached();
424     }
425 #endif
426 }
427 
428 static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
429 {
430     MemOp mop_1 = orig, mop_2;
431 
432     /* Reduce the size to 64-bit. */
433     mop_1 = (mop_1 & ~MO_SIZE) | MO_64;
434 
435     /* Retain the alignment constraints of the original. */
436     switch (orig & MO_AMASK) {
437     case MO_UNALN:
438     case MO_ALIGN_2:
439     case MO_ALIGN_4:
440         mop_2 = mop_1;
441         break;
442     case MO_ALIGN_8:
443         /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */
444         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
445         mop_2 = mop_1;
446         break;
447     case MO_ALIGN:
448         /* Second has 8-byte alignment; first has 16-byte alignment. */
449         mop_2 = mop_1;
450         mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16;
451         break;
452     case MO_ALIGN_16:
453     case MO_ALIGN_32:
454     case MO_ALIGN_64:
455         /* Second has 8-byte alignment; first retains original. */
456         mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN;
457         break;
458     default:
459         g_assert_not_reached();
460     }
461 
462     /* Use a memory ordering implemented by the host. */
463     if ((orig & MO_BSWAP) && !tcg_target_has_memory_bswap(mop_1)) {
464         mop_1 &= ~MO_BSWAP;
465         mop_2 &= ~MO_BSWAP;
466     }
467 
468     ret[0] = mop_1;
469     ret[1] = mop_2;
470 }
471 
472 static TCGv_i64 maybe_extend_addr64(TCGTemp *addr)
473 {
474     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
475         TCGv_i64 a64 = tcg_temp_ebb_new_i64();
476         tcg_gen_extu_i32_i64(a64, temp_tcgv_i32(addr));
477         return a64;
478     }
479     return temp_tcgv_i64(addr);
480 }
481 
482 static void maybe_free_addr64(TCGv_i64 a64)
483 {
484     if (tcg_ctx->addr_type == TCG_TYPE_I32) {
485         tcg_temp_free_i64(a64);
486     }
487 }
488 
489 static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
490                                      TCGArg idx, MemOp memop)
491 {
492     const MemOpIdx orig_oi = make_memop_idx(memop, idx);
493     TCGv_i64 ext_addr = NULL;
494     TCGOpcode opc;
495 
496     tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
497 
498     /* TODO: For now, force 32-bit hosts to use the helper. */
499     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
500         TCGv_i64 lo, hi;
501         bool need_bswap = false;
502         MemOpIdx oi = orig_oi;
503 
504         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
505             lo = TCGV128_HIGH(val);
506             hi = TCGV128_LOW(val);
507             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
508             need_bswap = true;
509         } else {
510             lo = TCGV128_LOW(val);
511             hi = TCGV128_HIGH(val);
512         }
513 
514         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
515             opc = INDEX_op_qemu_ld_a32_i128;
516         } else {
517             opc = INDEX_op_qemu_ld_a64_i128;
518         }
519         gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
520 
521         if (need_bswap) {
522             tcg_gen_bswap64_i64(lo, lo);
523             tcg_gen_bswap64_i64(hi, hi);
524         }
525     } else if (use_two_i64_for_i128(memop)) {
526         MemOp mop[2];
527         TCGTemp *addr_p8;
528         TCGv_i64 x, y;
529         bool need_bswap;
530 
531         canonicalize_memop_i128_as_i64(mop, memop);
532         need_bswap = (mop[0] ^ memop) & MO_BSWAP;
533 
534         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
535             opc = INDEX_op_qemu_ld_a32_i64;
536         } else {
537             opc = INDEX_op_qemu_ld_a64_i64;
538         }
539 
540         /*
541          * Since there are no global TCGv_i128, there is no visible state
542          * changed if the second load faults.  Load directly into the two
543          * subwords.
544          */
545         if ((memop & MO_BSWAP) == MO_LE) {
546             x = TCGV128_LOW(val);
547             y = TCGV128_HIGH(val);
548         } else {
549             x = TCGV128_HIGH(val);
550             y = TCGV128_LOW(val);
551         }
552 
553         gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
554 
555         if (need_bswap) {
556             tcg_gen_bswap64_i64(x, x);
557         }
558 
559         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
560             TCGv_i32 t = tcg_temp_ebb_new_i32();
561             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
562             addr_p8 = tcgv_i32_temp(t);
563         } else {
564             TCGv_i64 t = tcg_temp_ebb_new_i64();
565             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
566             addr_p8 = tcgv_i64_temp(t);
567         }
568 
569         gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
570         tcg_temp_free_internal(addr_p8);
571 
572         if (need_bswap) {
573             tcg_gen_bswap64_i64(y, y);
574         }
575     } else {
576         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
577             ext_addr = tcg_temp_ebb_new_i64();
578             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
579             addr = tcgv_i64_temp(ext_addr);
580         }
581         gen_helper_ld_i128(val, cpu_env, temp_tcgv_i64(addr),
582                            tcg_constant_i32(orig_oi));
583     }
584 
585     plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R);
586 }
587 
588 void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
589                               MemOp memop, TCGType addr_type)
590 {
591     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
592     tcg_debug_assert((memop & MO_SIZE) == MO_128);
593     tcg_debug_assert((memop & MO_SIGN) == 0);
594     tcg_gen_qemu_ld_i128_int(val, addr, idx, memop);
595 }
596 
597 static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
598                                      TCGArg idx, MemOp memop)
599 {
600     const MemOpIdx orig_oi = make_memop_idx(memop, idx);
601     TCGv_i64 ext_addr = NULL;
602     TCGOpcode opc;
603 
604     tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
605 
606     /* TODO: For now, force 32-bit hosts to use the helper. */
607 
608     if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
609         TCGv_i64 lo, hi;
610         MemOpIdx oi = orig_oi;
611         bool need_bswap = false;
612 
613         if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
614             lo = tcg_temp_ebb_new_i64();
615             hi = tcg_temp_ebb_new_i64();
616             tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val));
617             tcg_gen_bswap64_i64(hi, TCGV128_LOW(val));
618             oi = make_memop_idx(memop & ~MO_BSWAP, idx);
619             need_bswap = true;
620         } else {
621             lo = TCGV128_LOW(val);
622             hi = TCGV128_HIGH(val);
623         }
624 
625         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
626             opc = INDEX_op_qemu_st_a32_i128;
627         } else {
628             opc = INDEX_op_qemu_st_a64_i128;
629         }
630         gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
631 
632         if (need_bswap) {
633             tcg_temp_free_i64(lo);
634             tcg_temp_free_i64(hi);
635         }
636     } else if (use_two_i64_for_i128(memop)) {
637         MemOp mop[2];
638         TCGTemp *addr_p8;
639         TCGv_i64 x, y, b = NULL;
640 
641         canonicalize_memop_i128_as_i64(mop, memop);
642 
643         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
644             opc = INDEX_op_qemu_st_a32_i64;
645         } else {
646             opc = INDEX_op_qemu_st_a64_i64;
647         }
648 
649         if ((memop & MO_BSWAP) == MO_LE) {
650             x = TCGV128_LOW(val);
651             y = TCGV128_HIGH(val);
652         } else {
653             x = TCGV128_HIGH(val);
654             y = TCGV128_LOW(val);
655         }
656 
657         if ((mop[0] ^ memop) & MO_BSWAP) {
658             b = tcg_temp_ebb_new_i64();
659             tcg_gen_bswap64_i64(b, x);
660             x = b;
661         }
662 
663         gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
664 
665         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
666             TCGv_i32 t = tcg_temp_ebb_new_i32();
667             tcg_gen_addi_i32(t, temp_tcgv_i32(addr), 8);
668             addr_p8 = tcgv_i32_temp(t);
669         } else {
670             TCGv_i64 t = tcg_temp_ebb_new_i64();
671             tcg_gen_addi_i64(t, temp_tcgv_i64(addr), 8);
672             addr_p8 = tcgv_i64_temp(t);
673         }
674 
675         if (b) {
676             tcg_gen_bswap64_i64(b, y);
677             gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx));
678             tcg_temp_free_i64(b);
679         } else {
680             gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
681         }
682         tcg_temp_free_internal(addr_p8);
683     } else {
684         if (tcg_ctx->addr_type == TCG_TYPE_I32) {
685             ext_addr = tcg_temp_ebb_new_i64();
686             tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
687             addr = tcgv_i64_temp(ext_addr);
688         }
689         gen_helper_st_i128(cpu_env, temp_tcgv_i64(addr), val,
690                            tcg_constant_i32(orig_oi));
691     }
692 
693     plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_W);
694 }
695 
696 void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
697                               MemOp memop, TCGType addr_type)
698 {
699     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
700     tcg_debug_assert((memop & MO_SIZE) == MO_128);
701     tcg_debug_assert((memop & MO_SIGN) == 0);
702     tcg_gen_qemu_st_i128_int(val, addr, idx, memop);
703 }
704 
705 static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
706 {
707     switch (opc & MO_SSIZE) {
708     case MO_SB:
709         tcg_gen_ext8s_i32(ret, val);
710         break;
711     case MO_UB:
712         tcg_gen_ext8u_i32(ret, val);
713         break;
714     case MO_SW:
715         tcg_gen_ext16s_i32(ret, val);
716         break;
717     case MO_UW:
718         tcg_gen_ext16u_i32(ret, val);
719         break;
720     default:
721         tcg_gen_mov_i32(ret, val);
722         break;
723     }
724 }
725 
726 static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
727 {
728     switch (opc & MO_SSIZE) {
729     case MO_SB:
730         tcg_gen_ext8s_i64(ret, val);
731         break;
732     case MO_UB:
733         tcg_gen_ext8u_i64(ret, val);
734         break;
735     case MO_SW:
736         tcg_gen_ext16s_i64(ret, val);
737         break;
738     case MO_UW:
739         tcg_gen_ext16u_i64(ret, val);
740         break;
741     case MO_SL:
742         tcg_gen_ext32s_i64(ret, val);
743         break;
744     case MO_UL:
745         tcg_gen_ext32u_i64(ret, val);
746         break;
747     default:
748         tcg_gen_mov_i64(ret, val);
749         break;
750     }
751 }
752 
753 typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64,
754                                   TCGv_i32, TCGv_i32, TCGv_i32);
755 typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64,
756                                   TCGv_i64, TCGv_i64, TCGv_i32);
757 typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64,
758                                    TCGv_i128, TCGv_i128, TCGv_i32);
759 typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64,
760                                   TCGv_i32, TCGv_i32);
761 typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64,
762                                   TCGv_i64, TCGv_i32);
763 
764 #ifdef CONFIG_ATOMIC64
765 # define WITH_ATOMIC64(X) X,
766 #else
767 # define WITH_ATOMIC64(X)
768 #endif
769 #ifdef CONFIG_CMPXCHG128
770 # define WITH_ATOMIC128(X) X,
771 #else
772 # define WITH_ATOMIC128(X)
773 #endif
774 
775 static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
776     [MO_8] = gen_helper_atomic_cmpxchgb,
777     [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
778     [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
779     [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
780     [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
781     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
782     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
783     WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le)
784     WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be)
785 };
786 
787 static void tcg_gen_nonatomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
788                                               TCGv_i32 cmpv, TCGv_i32 newv,
789                                               TCGArg idx, MemOp memop)
790 {
791     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
792     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
793 
794     tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
795 
796     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop & ~MO_SIGN);
797     tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
798     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
799     tcg_temp_free_i32(t2);
800 
801     if (memop & MO_SIGN) {
802         tcg_gen_ext_i32(retv, t1, memop);
803     } else {
804         tcg_gen_mov_i32(retv, t1);
805     }
806     tcg_temp_free_i32(t1);
807 }
808 
809 void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
810                                        TCGv_i32 cmpv, TCGv_i32 newv,
811                                        TCGArg idx, MemOp memop,
812                                        TCGType addr_type)
813 {
814     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
815     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
816     tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
817 }
818 
819 static void tcg_gen_atomic_cmpxchg_i32_int(TCGv_i32 retv, TCGTemp *addr,
820                                            TCGv_i32 cmpv, TCGv_i32 newv,
821                                            TCGArg idx, MemOp memop)
822 {
823     gen_atomic_cx_i32 gen;
824     TCGv_i64 a64;
825     MemOpIdx oi;
826 
827     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
828         tcg_gen_nonatomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
829         return;
830     }
831 
832     memop = tcg_canonicalize_memop(memop, 0, 0);
833     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
834     tcg_debug_assert(gen != NULL);
835 
836     oi = make_memop_idx(memop & ~MO_SIGN, idx);
837     a64 = maybe_extend_addr64(addr);
838     gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
839     maybe_free_addr64(a64);
840 
841     if (memop & MO_SIGN) {
842         tcg_gen_ext_i32(retv, retv, memop);
843     }
844 }
845 
846 void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32 retv, TCGTemp *addr,
847                                     TCGv_i32 cmpv, TCGv_i32 newv,
848                                     TCGArg idx, MemOp memop,
849                                     TCGType addr_type)
850 {
851     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
852     tcg_debug_assert((memop & MO_SIZE) <= MO_32);
853     tcg_gen_atomic_cmpxchg_i32_int(retv, addr, cmpv, newv, idx, memop);
854 }
855 
856 static void tcg_gen_nonatomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
857                                               TCGv_i64 cmpv, TCGv_i64 newv,
858                                               TCGArg idx, MemOp memop)
859 {
860     TCGv_i64 t1, t2;
861 
862     if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
863         tcg_gen_nonatomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
864                                           TCGV_LOW(newv), idx, memop);
865         if (memop & MO_SIGN) {
866             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
867         } else {
868             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
869         }
870         return;
871     }
872 
873     t1 = tcg_temp_ebb_new_i64();
874     t2 = tcg_temp_ebb_new_i64();
875 
876     tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
877 
878     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop & ~MO_SIGN);
879     tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
880     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
881     tcg_temp_free_i64(t2);
882 
883     if (memop & MO_SIGN) {
884         tcg_gen_ext_i64(retv, t1, memop);
885     } else {
886         tcg_gen_mov_i64(retv, t1);
887     }
888     tcg_temp_free_i64(t1);
889 }
890 
891 void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
892                                        TCGv_i64 cmpv, TCGv_i64 newv,
893                                        TCGArg idx, MemOp memop,
894                                        TCGType addr_type)
895 {
896     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
897     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
898     tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
899 }
900 
901 static void tcg_gen_atomic_cmpxchg_i64_int(TCGv_i64 retv, TCGTemp *addr,
902                                            TCGv_i64 cmpv, TCGv_i64 newv,
903                                            TCGArg idx, MemOp memop)
904 {
905     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
906         tcg_gen_nonatomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
907         return;
908     }
909 
910     if ((memop & MO_SIZE) == MO_64) {
911         gen_atomic_cx_i64 gen;
912 
913         memop = tcg_canonicalize_memop(memop, 1, 0);
914         gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
915         if (gen) {
916             MemOpIdx oi = make_memop_idx(memop, idx);
917             TCGv_i64 a64 = maybe_extend_addr64(addr);
918             gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
919             maybe_free_addr64(a64);
920             return;
921         }
922 
923         gen_helper_exit_atomic(cpu_env);
924 
925         /*
926          * Produce a result for a well-formed opcode stream.  This satisfies
927          * liveness for set before used, which happens before this dead code
928          * is removed.
929          */
930         tcg_gen_movi_i64(retv, 0);
931         return;
932     }
933 
934     if (TCG_TARGET_REG_BITS == 32) {
935         tcg_gen_atomic_cmpxchg_i32_int(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
936                                        TCGV_LOW(newv), idx, memop);
937         if (memop & MO_SIGN) {
938             tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
939         } else {
940             tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
941         }
942     } else {
943         TCGv_i32 c32 = tcg_temp_ebb_new_i32();
944         TCGv_i32 n32 = tcg_temp_ebb_new_i32();
945         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
946 
947         tcg_gen_extrl_i64_i32(c32, cmpv);
948         tcg_gen_extrl_i64_i32(n32, newv);
949         tcg_gen_atomic_cmpxchg_i32_int(r32, addr, c32, n32,
950                                        idx, memop & ~MO_SIGN);
951         tcg_temp_free_i32(c32);
952         tcg_temp_free_i32(n32);
953 
954         tcg_gen_extu_i32_i64(retv, r32);
955         tcg_temp_free_i32(r32);
956 
957         if (memop & MO_SIGN) {
958             tcg_gen_ext_i64(retv, retv, memop);
959         }
960     }
961 }
962 
963 void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64 retv, TCGTemp *addr,
964                                     TCGv_i64 cmpv, TCGv_i64 newv,
965                                     TCGArg idx, MemOp memop, TCGType addr_type)
966 {
967     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
968     tcg_debug_assert((memop & MO_SIZE) <= MO_64);
969     tcg_gen_atomic_cmpxchg_i64_int(retv, addr, cmpv, newv, idx, memop);
970 }
971 
972 static void tcg_gen_nonatomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
973                                                TCGv_i128 cmpv, TCGv_i128 newv,
974                                                TCGArg idx, MemOp memop)
975 {
976     if (TCG_TARGET_REG_BITS == 32) {
977         /* Inline expansion below is simply too large for 32-bit hosts. */
978         gen_atomic_cx_i128 gen = ((memop & MO_BSWAP) == MO_LE
979                                   ? gen_helper_nonatomic_cmpxchgo_le
980                                   : gen_helper_nonatomic_cmpxchgo_be);
981         MemOpIdx oi = make_memop_idx(memop, idx);
982         TCGv_i64 a64 = maybe_extend_addr64(addr);
983 
984         gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
985         maybe_free_addr64(a64);
986     } else {
987         TCGv_i128 oldv = tcg_temp_ebb_new_i128();
988         TCGv_i128 tmpv = tcg_temp_ebb_new_i128();
989         TCGv_i64 t0 = tcg_temp_ebb_new_i64();
990         TCGv_i64 t1 = tcg_temp_ebb_new_i64();
991         TCGv_i64 z = tcg_constant_i64(0);
992 
993         tcg_gen_qemu_ld_i128_int(oldv, addr, idx, memop);
994 
995         /* Compare i128 */
996         tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv));
997         tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv));
998         tcg_gen_or_i64(t0, t0, t1);
999 
1000         /* tmpv = equal ? newv : oldv */
1001         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z,
1002                             TCGV128_LOW(newv), TCGV128_LOW(oldv));
1003         tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z,
1004                             TCGV128_HIGH(newv), TCGV128_HIGH(oldv));
1005 
1006         /* Unconditional writeback. */
1007         tcg_gen_qemu_st_i128_int(tmpv, addr, idx, memop);
1008         tcg_gen_mov_i128(retv, oldv);
1009 
1010         tcg_temp_free_i64(t0);
1011         tcg_temp_free_i64(t1);
1012         tcg_temp_free_i128(tmpv);
1013         tcg_temp_free_i128(oldv);
1014     }
1015 }
1016 
1017 void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1018                                         TCGv_i128 cmpv, TCGv_i128 newv,
1019                                         TCGArg idx, MemOp memop,
1020                                         TCGType addr_type)
1021 {
1022     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1023     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1024     tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1025 }
1026 
1027 static void tcg_gen_atomic_cmpxchg_i128_int(TCGv_i128 retv, TCGTemp *addr,
1028                                             TCGv_i128 cmpv, TCGv_i128 newv,
1029                                             TCGArg idx, MemOp memop)
1030 {
1031     gen_atomic_cx_i128 gen;
1032 
1033     if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
1034         tcg_gen_nonatomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1035         return;
1036     }
1037 
1038     gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
1039     if (gen) {
1040         MemOpIdx oi = make_memop_idx(memop, idx);
1041         TCGv_i64 a64 = maybe_extend_addr64(addr);
1042         gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
1043         maybe_free_addr64(a64);
1044         return;
1045     }
1046 
1047     gen_helper_exit_atomic(cpu_env);
1048 
1049     /*
1050      * Produce a result for a well-formed opcode stream.  This satisfies
1051      * liveness for set before used, which happens before this dead code
1052      * is removed.
1053      */
1054     tcg_gen_movi_i64(TCGV128_LOW(retv), 0);
1055     tcg_gen_movi_i64(TCGV128_HIGH(retv), 0);
1056 }
1057 
1058 void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128 retv, TCGTemp *addr,
1059                                      TCGv_i128 cmpv, TCGv_i128 newv,
1060                                      TCGArg idx, MemOp memop,
1061                                      TCGType addr_type)
1062 {
1063     tcg_debug_assert(addr_type == tcg_ctx->addr_type);
1064     tcg_debug_assert((memop & (MO_SIZE | MO_SIGN)) == MO_128);
1065     tcg_gen_atomic_cmpxchg_i128_int(retv, addr, cmpv, newv, idx, memop);
1066 }
1067 
1068 static void do_nonatomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1069                                 TCGArg idx, MemOp memop, bool new_val,
1070                                 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1071 {
1072     TCGv_i32 t1 = tcg_temp_ebb_new_i32();
1073     TCGv_i32 t2 = tcg_temp_ebb_new_i32();
1074 
1075     memop = tcg_canonicalize_memop(memop, 0, 0);
1076 
1077     tcg_gen_qemu_ld_i32_int(t1, addr, idx, memop);
1078     tcg_gen_ext_i32(t2, val, memop);
1079     gen(t2, t1, t2);
1080     tcg_gen_qemu_st_i32_int(t2, addr, idx, memop);
1081 
1082     tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop);
1083     tcg_temp_free_i32(t1);
1084     tcg_temp_free_i32(t2);
1085 }
1086 
1087 static void do_atomic_op_i32(TCGv_i32 ret, TCGTemp *addr, TCGv_i32 val,
1088                              TCGArg idx, MemOp memop, void * const table[])
1089 {
1090     gen_atomic_op_i32 gen;
1091     TCGv_i64 a64;
1092     MemOpIdx oi;
1093 
1094     memop = tcg_canonicalize_memop(memop, 0, 0);
1095 
1096     gen = table[memop & (MO_SIZE | MO_BSWAP)];
1097     tcg_debug_assert(gen != NULL);
1098 
1099     oi = make_memop_idx(memop & ~MO_SIGN, idx);
1100     a64 = maybe_extend_addr64(addr);
1101     gen(ret, cpu_env, a64, val, tcg_constant_i32(oi));
1102     maybe_free_addr64(a64);
1103 
1104     if (memop & MO_SIGN) {
1105         tcg_gen_ext_i32(ret, ret, memop);
1106     }
1107 }
1108 
1109 static void do_nonatomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1110                                 TCGArg idx, MemOp memop, bool new_val,
1111                                 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1112 {
1113     TCGv_i64 t1 = tcg_temp_ebb_new_i64();
1114     TCGv_i64 t2 = tcg_temp_ebb_new_i64();
1115 
1116     memop = tcg_canonicalize_memop(memop, 1, 0);
1117 
1118     tcg_gen_qemu_ld_i64_int(t1, addr, idx, memop);
1119     tcg_gen_ext_i64(t2, val, memop);
1120     gen(t2, t1, t2);
1121     tcg_gen_qemu_st_i64_int(t2, addr, idx, memop);
1122 
1123     tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop);
1124     tcg_temp_free_i64(t1);
1125     tcg_temp_free_i64(t2);
1126 }
1127 
1128 static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
1129                              TCGArg idx, MemOp memop, void * const table[])
1130 {
1131     memop = tcg_canonicalize_memop(memop, 1, 0);
1132 
1133     if ((memop & MO_SIZE) == MO_64) {
1134         gen_atomic_op_i64 gen = table[memop & (MO_SIZE | MO_BSWAP)];
1135 
1136         if (gen) {
1137             MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
1138             TCGv_i64 a64 = maybe_extend_addr64(addr);
1139             gen(ret, cpu_env, a64, val, tcg_constant_i32(oi));
1140             maybe_free_addr64(a64);
1141             return;
1142         }
1143 
1144         gen_helper_exit_atomic(cpu_env);
1145         /* Produce a result, so that we have a well-formed opcode stream
1146            with respect to uses of the result in the (dead) code following.  */
1147         tcg_gen_movi_i64(ret, 0);
1148     } else {
1149         TCGv_i32 v32 = tcg_temp_ebb_new_i32();
1150         TCGv_i32 r32 = tcg_temp_ebb_new_i32();
1151 
1152         tcg_gen_extrl_i64_i32(v32, val);
1153         do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table);
1154         tcg_temp_free_i32(v32);
1155 
1156         tcg_gen_extu_i32_i64(ret, r32);
1157         tcg_temp_free_i32(r32);
1158 
1159         if (memop & MO_SIGN) {
1160             tcg_gen_ext_i64(ret, ret, memop);
1161         }
1162     }
1163 }
1164 
1165 #define GEN_ATOMIC_HELPER(NAME, OP, NEW)                                \
1166 static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = {          \
1167     [MO_8] = gen_helper_atomic_##NAME##b,                               \
1168     [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le,                   \
1169     [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be,                   \
1170     [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le,                   \
1171     [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be,                   \
1172     WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le)     \
1173     WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be)     \
1174 };                                                                      \
1175 void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr,       \
1176                                      TCGv_i32 val, TCGArg idx,          \
1177                                      MemOp memop, TCGType addr_type)    \
1178 {                                                                       \
1179     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1180     tcg_debug_assert((memop & MO_SIZE) <= MO_32);                       \
1181     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1182         do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME);     \
1183     } else {                                                            \
1184         do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW,            \
1185                             tcg_gen_##OP##_i32);                        \
1186     }                                                                   \
1187 }                                                                       \
1188 void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr,       \
1189                                      TCGv_i64 val, TCGArg idx,          \
1190                                      MemOp memop, TCGType addr_type)    \
1191 {                                                                       \
1192     tcg_debug_assert(addr_type == tcg_ctx->addr_type);                  \
1193     tcg_debug_assert((memop & MO_SIZE) <= MO_64);                       \
1194     if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {                        \
1195         do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME);     \
1196     } else {                                                            \
1197         do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW,            \
1198                             tcg_gen_##OP##_i64);                        \
1199     }                                                                   \
1200 }
1201 
1202 GEN_ATOMIC_HELPER(fetch_add, add, 0)
1203 GEN_ATOMIC_HELPER(fetch_and, and, 0)
1204 GEN_ATOMIC_HELPER(fetch_or, or, 0)
1205 GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
1206 GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
1207 GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
1208 GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
1209 GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
1210 
1211 GEN_ATOMIC_HELPER(add_fetch, add, 1)
1212 GEN_ATOMIC_HELPER(and_fetch, and, 1)
1213 GEN_ATOMIC_HELPER(or_fetch, or, 1)
1214 GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
1215 GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
1216 GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
1217 GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
1218 GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
1219 
1220 static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
1221 {
1222     tcg_gen_mov_i32(r, b);
1223 }
1224 
1225 static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
1226 {
1227     tcg_gen_mov_i64(r, b);
1228 }
1229 
1230 GEN_ATOMIC_HELPER(xchg, mov2, 0)
1231 
1232 #undef GEN_ATOMIC_HELPER
1233