xref: /qemu/accel/tcg/cputlb.c (revision 372b69f5)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
29 #include "tcg/tcg.h"
30 #include "qemu/error-report.h"
31 #include "exec/log.h"
32 #include "exec/helper-proto-common.h"
33 #include "qemu/atomic.h"
34 #include "qemu/atomic128.h"
35 #include "exec/translate-all.h"
36 #include "trace.h"
37 #include "tb-hash.h"
38 #include "internal-common.h"
39 #include "internal-target.h"
40 #ifdef CONFIG_PLUGIN
41 #include "qemu/plugin-memory.h"
42 #endif
43 #include "tcg/tcg-ldst.h"
44 #include "tcg/oversized-guest.h"
45 
46 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
47 /* #define DEBUG_TLB */
48 /* #define DEBUG_TLB_LOG */
49 
50 #ifdef DEBUG_TLB
51 # define DEBUG_TLB_GATE 1
52 # ifdef DEBUG_TLB_LOG
53 #  define DEBUG_TLB_LOG_GATE 1
54 # else
55 #  define DEBUG_TLB_LOG_GATE 0
56 # endif
57 #else
58 # define DEBUG_TLB_GATE 0
59 # define DEBUG_TLB_LOG_GATE 0
60 #endif
61 
62 #define tlb_debug(fmt, ...) do { \
63     if (DEBUG_TLB_LOG_GATE) { \
64         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
65                       ## __VA_ARGS__); \
66     } else if (DEBUG_TLB_GATE) { \
67         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
68     } \
69 } while (0)
70 
71 #define assert_cpu_is_self(cpu) do {                              \
72         if (DEBUG_TLB_GATE) {                                     \
73             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
74         }                                                         \
75     } while (0)
76 
77 /* run_on_cpu_data.target_ptr should always be big enough for a
78  * vaddr even on 32 bit builds
79  */
80 QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
81 
82 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
83  */
84 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
85 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
86 
87 static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
88 {
89     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
90 }
91 
92 static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
93 {
94     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
95 }
96 
97 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
98                              size_t max_entries)
99 {
100     desc->window_begin_ns = ns;
101     desc->window_max_entries = max_entries;
102 }
103 
104 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
105 {
106     CPUJumpCache *jc = cpu->tb_jmp_cache;
107     int i, i0;
108 
109     if (unlikely(!jc)) {
110         return;
111     }
112 
113     i0 = tb_jmp_cache_hash_page(page_addr);
114     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
115         qatomic_set(&jc->array[i0 + i].tb, NULL);
116     }
117 }
118 
119 /**
120  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
121  * @desc: The CPUTLBDesc portion of the TLB
122  * @fast: The CPUTLBDescFast portion of the same TLB
123  *
124  * Called with tlb_lock_held.
125  *
126  * We have two main constraints when resizing a TLB: (1) we only resize it
127  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
128  * the array or unnecessarily flushing it), which means we do not control how
129  * frequently the resizing can occur; (2) we don't have access to the guest's
130  * future scheduling decisions, and therefore have to decide the magnitude of
131  * the resize based on past observations.
132  *
133  * In general, a memory-hungry process can benefit greatly from an appropriately
134  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
135  * we just have to make the TLB as large as possible; while an oversized TLB
136  * results in minimal TLB miss rates, it also takes longer to be flushed
137  * (flushes can be _very_ frequent), and the reduced locality can also hurt
138  * performance.
139  *
140  * To achieve near-optimal performance for all kinds of workloads, we:
141  *
142  * 1. Aggressively increase the size of the TLB when the use rate of the
143  * TLB being flushed is high, since it is likely that in the near future this
144  * memory-hungry process will execute again, and its memory hungriness will
145  * probably be similar.
146  *
147  * 2. Slowly reduce the size of the TLB as the use rate declines over a
148  * reasonably large time window. The rationale is that if in such a time window
149  * we have not observed a high TLB use rate, it is likely that we won't observe
150  * it in the near future. In that case, once a time window expires we downsize
151  * the TLB to match the maximum use rate observed in the window.
152  *
153  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
154  * since in that range performance is likely near-optimal. Recall that the TLB
155  * is direct mapped, so we want the use rate to be low (or at least not too
156  * high), since otherwise we are likely to have a significant amount of
157  * conflict misses.
158  */
159 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
160                                   int64_t now)
161 {
162     size_t old_size = tlb_n_entries(fast);
163     size_t rate;
164     size_t new_size = old_size;
165     int64_t window_len_ms = 100;
166     int64_t window_len_ns = window_len_ms * 1000 * 1000;
167     bool window_expired = now > desc->window_begin_ns + window_len_ns;
168 
169     if (desc->n_used_entries > desc->window_max_entries) {
170         desc->window_max_entries = desc->n_used_entries;
171     }
172     rate = desc->window_max_entries * 100 / old_size;
173 
174     if (rate > 70) {
175         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
176     } else if (rate < 30 && window_expired) {
177         size_t ceil = pow2ceil(desc->window_max_entries);
178         size_t expected_rate = desc->window_max_entries * 100 / ceil;
179 
180         /*
181          * Avoid undersizing when the max number of entries seen is just below
182          * a pow2. For instance, if max_entries == 1025, the expected use rate
183          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
184          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
185          * later. Thus, make sure that the expected use rate remains below 70%.
186          * (and since we double the size, that means the lowest rate we'd
187          * expect to get is 35%, which is still in the 30-70% range where
188          * we consider that the size is appropriate.)
189          */
190         if (expected_rate > 70) {
191             ceil *= 2;
192         }
193         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
194     }
195 
196     if (new_size == old_size) {
197         if (window_expired) {
198             tlb_window_reset(desc, now, desc->n_used_entries);
199         }
200         return;
201     }
202 
203     g_free(fast->table);
204     g_free(desc->fulltlb);
205 
206     tlb_window_reset(desc, now, 0);
207     /* desc->n_used_entries is cleared by the caller */
208     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
209     fast->table = g_try_new(CPUTLBEntry, new_size);
210     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
211 
212     /*
213      * If the allocations fail, try smaller sizes. We just freed some
214      * memory, so going back to half of new_size has a good chance of working.
215      * Increased memory pressure elsewhere in the system might cause the
216      * allocations to fail though, so we progressively reduce the allocation
217      * size, aborting if we cannot even allocate the smallest TLB we support.
218      */
219     while (fast->table == NULL || desc->fulltlb == NULL) {
220         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
221             error_report("%s: %s", __func__, strerror(errno));
222             abort();
223         }
224         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
225         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
226 
227         g_free(fast->table);
228         g_free(desc->fulltlb);
229         fast->table = g_try_new(CPUTLBEntry, new_size);
230         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
231     }
232 }
233 
234 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
235 {
236     desc->n_used_entries = 0;
237     desc->large_page_addr = -1;
238     desc->large_page_mask = -1;
239     desc->vindex = 0;
240     memset(fast->table, -1, sizeof_tlb(fast));
241     memset(desc->vtable, -1, sizeof(desc->vtable));
242 }
243 
244 static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
245                                         int64_t now)
246 {
247     CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
248     CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
249 
250     tlb_mmu_resize_locked(desc, fast, now);
251     tlb_mmu_flush_locked(desc, fast);
252 }
253 
254 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
255 {
256     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
257 
258     tlb_window_reset(desc, now, 0);
259     desc->n_used_entries = 0;
260     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
261     fast->table = g_new(CPUTLBEntry, n_entries);
262     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
263     tlb_mmu_flush_locked(desc, fast);
264 }
265 
266 static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
267 {
268     cpu->neg.tlb.d[mmu_idx].n_used_entries++;
269 }
270 
271 static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
272 {
273     cpu->neg.tlb.d[mmu_idx].n_used_entries--;
274 }
275 
276 void tlb_init(CPUState *cpu)
277 {
278     int64_t now = get_clock_realtime();
279     int i;
280 
281     qemu_spin_init(&cpu->neg.tlb.c.lock);
282 
283     /* All tlbs are initialized flushed. */
284     cpu->neg.tlb.c.dirty = 0;
285 
286     for (i = 0; i < NB_MMU_MODES; i++) {
287         tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
288     }
289 }
290 
291 void tlb_destroy(CPUState *cpu)
292 {
293     int i;
294 
295     qemu_spin_destroy(&cpu->neg.tlb.c.lock);
296     for (i = 0; i < NB_MMU_MODES; i++) {
297         CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
298         CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
299 
300         g_free(fast->table);
301         g_free(desc->fulltlb);
302     }
303 }
304 
305 /* flush_all_helper: run fn across all cpus
306  *
307  * If the wait flag is set then the src cpu's helper will be queued as
308  * "safe" work and the loop exited creating a synchronisation point
309  * where all queued work will be finished before execution starts
310  * again.
311  */
312 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
313                              run_on_cpu_data d)
314 {
315     CPUState *cpu;
316 
317     CPU_FOREACH(cpu) {
318         if (cpu != src) {
319             async_run_on_cpu(cpu, fn, d);
320         }
321     }
322 }
323 
324 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
325 {
326     uint16_t asked = data.host_int;
327     uint16_t all_dirty, work, to_clean;
328     int64_t now = get_clock_realtime();
329 
330     assert_cpu_is_self(cpu);
331 
332     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
333 
334     qemu_spin_lock(&cpu->neg.tlb.c.lock);
335 
336     all_dirty = cpu->neg.tlb.c.dirty;
337     to_clean = asked & all_dirty;
338     all_dirty &= ~to_clean;
339     cpu->neg.tlb.c.dirty = all_dirty;
340 
341     for (work = to_clean; work != 0; work &= work - 1) {
342         int mmu_idx = ctz32(work);
343         tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
344     }
345 
346     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
347 
348     tcg_flush_jmp_cache(cpu);
349 
350     if (to_clean == ALL_MMUIDX_BITS) {
351         qatomic_set(&cpu->neg.tlb.c.full_flush_count,
352                     cpu->neg.tlb.c.full_flush_count + 1);
353     } else {
354         qatomic_set(&cpu->neg.tlb.c.part_flush_count,
355                     cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
356         if (to_clean != asked) {
357             qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
358                         cpu->neg.tlb.c.elide_flush_count +
359                         ctpop16(asked & ~to_clean));
360         }
361     }
362 }
363 
364 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
365 {
366     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
367 
368     if (cpu->created && !qemu_cpu_is_self(cpu)) {
369         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
370                          RUN_ON_CPU_HOST_INT(idxmap));
371     } else {
372         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
373     }
374 }
375 
376 void tlb_flush(CPUState *cpu)
377 {
378     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
379 }
380 
381 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
382 {
383     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
384 
385     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
386 
387     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
388     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
389 }
390 
391 void tlb_flush_all_cpus(CPUState *src_cpu)
392 {
393     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
394 }
395 
396 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
397 {
398     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
399 
400     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
401 
402     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
403     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
404 }
405 
406 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
407 {
408     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
409 }
410 
411 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
412                                       vaddr page, vaddr mask)
413 {
414     page &= mask;
415     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
416 
417     return (page == (tlb_entry->addr_read & mask) ||
418             page == (tlb_addr_write(tlb_entry) & mask) ||
419             page == (tlb_entry->addr_code & mask));
420 }
421 
422 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
423 {
424     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
425 }
426 
427 /**
428  * tlb_entry_is_empty - return true if the entry is not in use
429  * @te: pointer to CPUTLBEntry
430  */
431 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
432 {
433     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
434 }
435 
436 /* Called with tlb_c.lock held */
437 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
438                                         vaddr page,
439                                         vaddr mask)
440 {
441     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
442         memset(tlb_entry, -1, sizeof(*tlb_entry));
443         return true;
444     }
445     return false;
446 }
447 
448 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
449 {
450     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
451 }
452 
453 /* Called with tlb_c.lock held */
454 static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
455                                             vaddr page,
456                                             vaddr mask)
457 {
458     CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
459     int k;
460 
461     assert_cpu_is_self(cpu);
462     for (k = 0; k < CPU_VTLB_SIZE; k++) {
463         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
464             tlb_n_used_entries_dec(cpu, mmu_idx);
465         }
466     }
467 }
468 
469 static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
470                                               vaddr page)
471 {
472     tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
473 }
474 
475 static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
476 {
477     vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
478     vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
479 
480     /* Check if we need to flush due to large pages.  */
481     if ((page & lp_mask) == lp_addr) {
482         tlb_debug("forcing full flush midx %d (%016"
483                   VADDR_PRIx "/%016" VADDR_PRIx ")\n",
484                   midx, lp_addr, lp_mask);
485         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
486     } else {
487         if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
488             tlb_n_used_entries_dec(cpu, midx);
489         }
490         tlb_flush_vtlb_page_locked(cpu, midx, page);
491     }
492 }
493 
494 /**
495  * tlb_flush_page_by_mmuidx_async_0:
496  * @cpu: cpu on which to flush
497  * @addr: page of virtual address to flush
498  * @idxmap: set of mmu_idx to flush
499  *
500  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
501  * at @addr from the tlbs indicated by @idxmap from @cpu.
502  */
503 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
504                                              vaddr addr,
505                                              uint16_t idxmap)
506 {
507     int mmu_idx;
508 
509     assert_cpu_is_self(cpu);
510 
511     tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
512 
513     qemu_spin_lock(&cpu->neg.tlb.c.lock);
514     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
515         if ((idxmap >> mmu_idx) & 1) {
516             tlb_flush_page_locked(cpu, mmu_idx, addr);
517         }
518     }
519     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
520 
521     /*
522      * Discard jump cache entries for any tb which might potentially
523      * overlap the flushed page, which includes the previous.
524      */
525     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
526     tb_jmp_cache_clear_page(cpu, addr);
527 }
528 
529 /**
530  * tlb_flush_page_by_mmuidx_async_1:
531  * @cpu: cpu on which to flush
532  * @data: encoded addr + idxmap
533  *
534  * Helper for tlb_flush_page_by_mmuidx and friends, called through
535  * async_run_on_cpu.  The idxmap parameter is encoded in the page
536  * offset of the target_ptr field.  This limits the set of mmu_idx
537  * that can be passed via this method.
538  */
539 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
540                                              run_on_cpu_data data)
541 {
542     vaddr addr_and_idxmap = data.target_ptr;
543     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
544     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
545 
546     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
547 }
548 
549 typedef struct {
550     vaddr addr;
551     uint16_t idxmap;
552 } TLBFlushPageByMMUIdxData;
553 
554 /**
555  * tlb_flush_page_by_mmuidx_async_2:
556  * @cpu: cpu on which to flush
557  * @data: allocated addr + idxmap
558  *
559  * Helper for tlb_flush_page_by_mmuidx and friends, called through
560  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
561  * TLBFlushPageByMMUIdxData structure that has been allocated
562  * specifically for this helper.  Free the structure when done.
563  */
564 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
565                                              run_on_cpu_data data)
566 {
567     TLBFlushPageByMMUIdxData *d = data.host_ptr;
568 
569     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
570     g_free(d);
571 }
572 
573 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
574 {
575     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
576 
577     /* This should already be page aligned */
578     addr &= TARGET_PAGE_MASK;
579 
580     if (qemu_cpu_is_self(cpu)) {
581         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
582     } else if (idxmap < TARGET_PAGE_SIZE) {
583         /*
584          * Most targets have only a few mmu_idx.  In the case where
585          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
586          * allocating memory for this operation.
587          */
588         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
589                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
590     } else {
591         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
592 
593         /* Otherwise allocate a structure, freed by the worker.  */
594         d->addr = addr;
595         d->idxmap = idxmap;
596         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
597                          RUN_ON_CPU_HOST_PTR(d));
598     }
599 }
600 
601 void tlb_flush_page(CPUState *cpu, vaddr addr)
602 {
603     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
604 }
605 
606 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
607                                        uint16_t idxmap)
608 {
609     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
610 
611     /* This should already be page aligned */
612     addr &= TARGET_PAGE_MASK;
613 
614     /*
615      * Allocate memory to hold addr+idxmap only when needed.
616      * See tlb_flush_page_by_mmuidx for details.
617      */
618     if (idxmap < TARGET_PAGE_SIZE) {
619         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
620                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
621     } else {
622         CPUState *dst_cpu;
623 
624         /* Allocate a separate data block for each destination cpu.  */
625         CPU_FOREACH(dst_cpu) {
626             if (dst_cpu != src_cpu) {
627                 TLBFlushPageByMMUIdxData *d
628                     = g_new(TLBFlushPageByMMUIdxData, 1);
629 
630                 d->addr = addr;
631                 d->idxmap = idxmap;
632                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
633                                  RUN_ON_CPU_HOST_PTR(d));
634             }
635         }
636     }
637 
638     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
639 }
640 
641 void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
642 {
643     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
644 }
645 
646 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
647                                               vaddr addr,
648                                               uint16_t idxmap)
649 {
650     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
651 
652     /* This should already be page aligned */
653     addr &= TARGET_PAGE_MASK;
654 
655     /*
656      * Allocate memory to hold addr+idxmap only when needed.
657      * See tlb_flush_page_by_mmuidx for details.
658      */
659     if (idxmap < TARGET_PAGE_SIZE) {
660         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
661                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
662         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
663                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
664     } else {
665         CPUState *dst_cpu;
666         TLBFlushPageByMMUIdxData *d;
667 
668         /* Allocate a separate data block for each destination cpu.  */
669         CPU_FOREACH(dst_cpu) {
670             if (dst_cpu != src_cpu) {
671                 d = g_new(TLBFlushPageByMMUIdxData, 1);
672                 d->addr = addr;
673                 d->idxmap = idxmap;
674                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
675                                  RUN_ON_CPU_HOST_PTR(d));
676             }
677         }
678 
679         d = g_new(TLBFlushPageByMMUIdxData, 1);
680         d->addr = addr;
681         d->idxmap = idxmap;
682         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
683                               RUN_ON_CPU_HOST_PTR(d));
684     }
685 }
686 
687 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
688 {
689     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
690 }
691 
692 static void tlb_flush_range_locked(CPUState *cpu, int midx,
693                                    vaddr addr, vaddr len,
694                                    unsigned bits)
695 {
696     CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
697     CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
698     vaddr mask = MAKE_64BIT_MASK(0, bits);
699 
700     /*
701      * If @bits is smaller than the tlb size, there may be multiple entries
702      * within the TLB; otherwise all addresses that match under @mask hit
703      * the same TLB entry.
704      * TODO: Perhaps allow bits to be a few bits less than the size.
705      * For now, just flush the entire TLB.
706      *
707      * If @len is larger than the tlb size, then it will take longer to
708      * test all of the entries in the TLB than it will to flush it all.
709      */
710     if (mask < f->mask || len > f->mask) {
711         tlb_debug("forcing full flush midx %d ("
712                   "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
713                   midx, addr, mask, len);
714         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
715         return;
716     }
717 
718     /*
719      * Check if we need to flush due to large pages.
720      * Because large_page_mask contains all 1's from the msb,
721      * we only need to test the end of the range.
722      */
723     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
724         tlb_debug("forcing full flush midx %d ("
725                   "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
726                   midx, d->large_page_addr, d->large_page_mask);
727         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
728         return;
729     }
730 
731     for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
732         vaddr page = addr + i;
733         CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
734 
735         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
736             tlb_n_used_entries_dec(cpu, midx);
737         }
738         tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
739     }
740 }
741 
742 typedef struct {
743     vaddr addr;
744     vaddr len;
745     uint16_t idxmap;
746     uint16_t bits;
747 } TLBFlushRangeData;
748 
749 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
750                                               TLBFlushRangeData d)
751 {
752     int mmu_idx;
753 
754     assert_cpu_is_self(cpu);
755 
756     tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
757               d.addr, d.bits, d.len, d.idxmap);
758 
759     qemu_spin_lock(&cpu->neg.tlb.c.lock);
760     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
761         if ((d.idxmap >> mmu_idx) & 1) {
762             tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
763         }
764     }
765     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
766 
767     /*
768      * If the length is larger than the jump cache size, then it will take
769      * longer to clear each entry individually than it will to clear it all.
770      */
771     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
772         tcg_flush_jmp_cache(cpu);
773         return;
774     }
775 
776     /*
777      * Discard jump cache entries for any tb which might potentially
778      * overlap the flushed pages, which includes the previous.
779      */
780     d.addr -= TARGET_PAGE_SIZE;
781     for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
782         tb_jmp_cache_clear_page(cpu, d.addr);
783         d.addr += TARGET_PAGE_SIZE;
784     }
785 }
786 
787 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
788                                               run_on_cpu_data data)
789 {
790     TLBFlushRangeData *d = data.host_ptr;
791     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
792     g_free(d);
793 }
794 
795 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
796                                vaddr len, uint16_t idxmap,
797                                unsigned bits)
798 {
799     TLBFlushRangeData d;
800 
801     /*
802      * If all bits are significant, and len is small,
803      * this devolves to tlb_flush_page.
804      */
805     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
806         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
807         return;
808     }
809     /* If no page bits are significant, this devolves to tlb_flush. */
810     if (bits < TARGET_PAGE_BITS) {
811         tlb_flush_by_mmuidx(cpu, idxmap);
812         return;
813     }
814 
815     /* This should already be page aligned */
816     d.addr = addr & TARGET_PAGE_MASK;
817     d.len = len;
818     d.idxmap = idxmap;
819     d.bits = bits;
820 
821     if (qemu_cpu_is_self(cpu)) {
822         tlb_flush_range_by_mmuidx_async_0(cpu, d);
823     } else {
824         /* Otherwise allocate a structure, freed by the worker.  */
825         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
826         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
827                          RUN_ON_CPU_HOST_PTR(p));
828     }
829 }
830 
831 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
832                                    uint16_t idxmap, unsigned bits)
833 {
834     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
835 }
836 
837 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
838                                         vaddr addr, vaddr len,
839                                         uint16_t idxmap, unsigned bits)
840 {
841     TLBFlushRangeData d;
842     CPUState *dst_cpu;
843 
844     /*
845      * If all bits are significant, and len is small,
846      * this devolves to tlb_flush_page.
847      */
848     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
849         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
850         return;
851     }
852     /* If no page bits are significant, this devolves to tlb_flush. */
853     if (bits < TARGET_PAGE_BITS) {
854         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
855         return;
856     }
857 
858     /* This should already be page aligned */
859     d.addr = addr & TARGET_PAGE_MASK;
860     d.len = len;
861     d.idxmap = idxmap;
862     d.bits = bits;
863 
864     /* Allocate a separate data block for each destination cpu.  */
865     CPU_FOREACH(dst_cpu) {
866         if (dst_cpu != src_cpu) {
867             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
868             async_run_on_cpu(dst_cpu,
869                              tlb_flush_range_by_mmuidx_async_1,
870                              RUN_ON_CPU_HOST_PTR(p));
871         }
872     }
873 
874     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
875 }
876 
877 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
878                                             vaddr addr, uint16_t idxmap,
879                                             unsigned bits)
880 {
881     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
882                                        idxmap, bits);
883 }
884 
885 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
886                                                vaddr addr,
887                                                vaddr len,
888                                                uint16_t idxmap,
889                                                unsigned bits)
890 {
891     TLBFlushRangeData d, *p;
892     CPUState *dst_cpu;
893 
894     /*
895      * If all bits are significant, and len is small,
896      * this devolves to tlb_flush_page.
897      */
898     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
899         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
900         return;
901     }
902     /* If no page bits are significant, this devolves to tlb_flush. */
903     if (bits < TARGET_PAGE_BITS) {
904         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
905         return;
906     }
907 
908     /* This should already be page aligned */
909     d.addr = addr & TARGET_PAGE_MASK;
910     d.len = len;
911     d.idxmap = idxmap;
912     d.bits = bits;
913 
914     /* Allocate a separate data block for each destination cpu.  */
915     CPU_FOREACH(dst_cpu) {
916         if (dst_cpu != src_cpu) {
917             p = g_memdup(&d, sizeof(d));
918             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
919                              RUN_ON_CPU_HOST_PTR(p));
920         }
921     }
922 
923     p = g_memdup(&d, sizeof(d));
924     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
925                           RUN_ON_CPU_HOST_PTR(p));
926 }
927 
928 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
929                                                    vaddr addr,
930                                                    uint16_t idxmap,
931                                                    unsigned bits)
932 {
933     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
934                                               idxmap, bits);
935 }
936 
937 /* update the TLBs so that writes to code in the virtual page 'addr'
938    can be detected */
939 void tlb_protect_code(ram_addr_t ram_addr)
940 {
941     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
942                                              TARGET_PAGE_SIZE,
943                                              DIRTY_MEMORY_CODE);
944 }
945 
946 /* update the TLB so that writes in physical page 'phys_addr' are no longer
947    tested for self modifying code */
948 void tlb_unprotect_code(ram_addr_t ram_addr)
949 {
950     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
951 }
952 
953 
954 /*
955  * Dirty write flag handling
956  *
957  * When the TCG code writes to a location it looks up the address in
958  * the TLB and uses that data to compute the final address. If any of
959  * the lower bits of the address are set then the slow path is forced.
960  * There are a number of reasons to do this but for normal RAM the
961  * most usual is detecting writes to code regions which may invalidate
962  * generated code.
963  *
964  * Other vCPUs might be reading their TLBs during guest execution, so we update
965  * te->addr_write with qatomic_set. We don't need to worry about this for
966  * oversized guests as MTTCG is disabled for them.
967  *
968  * Called with tlb_c.lock held.
969  */
970 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
971                                          uintptr_t start, uintptr_t length)
972 {
973     uintptr_t addr = tlb_entry->addr_write;
974 
975     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
976                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
977         addr &= TARGET_PAGE_MASK;
978         addr += tlb_entry->addend;
979         if ((addr - start) < length) {
980 #if TARGET_LONG_BITS == 32
981             uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
982             ptr_write += HOST_BIG_ENDIAN;
983             qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
984 #elif TCG_OVERSIZED_GUEST
985             tlb_entry->addr_write |= TLB_NOTDIRTY;
986 #else
987             qatomic_set(&tlb_entry->addr_write,
988                         tlb_entry->addr_write | TLB_NOTDIRTY);
989 #endif
990         }
991     }
992 }
993 
994 /*
995  * Called with tlb_c.lock held.
996  * Called only from the vCPU context, i.e. the TLB's owner thread.
997  */
998 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
999 {
1000     *d = *s;
1001 }
1002 
1003 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1004  * the target vCPU).
1005  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1006  * thing actually updated is the target TLB entry ->addr_write flags.
1007  */
1008 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1009 {
1010     int mmu_idx;
1011 
1012     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1013     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1014         unsigned int i;
1015         unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
1016 
1017         for (i = 0; i < n; i++) {
1018             tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
1019                                          start1, length);
1020         }
1021 
1022         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1023             tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
1024                                          start1, length);
1025         }
1026     }
1027     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1028 }
1029 
1030 /* Called with tlb_c.lock held */
1031 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1032                                          vaddr addr)
1033 {
1034     if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1035         tlb_entry->addr_write = addr;
1036     }
1037 }
1038 
1039 /* update the TLB corresponding to virtual page vaddr
1040    so that it is no longer dirty */
1041 void tlb_set_dirty(CPUState *cpu, vaddr addr)
1042 {
1043     int mmu_idx;
1044 
1045     assert_cpu_is_self(cpu);
1046 
1047     addr &= TARGET_PAGE_MASK;
1048     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1049     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1050         tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
1051     }
1052 
1053     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1054         int k;
1055         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1056             tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
1057         }
1058     }
1059     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1060 }
1061 
1062 /* Our TLB does not support large pages, so remember the area covered by
1063    large pages and trigger a full TLB flush if these are invalidated.  */
1064 static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
1065                                vaddr addr, uint64_t size)
1066 {
1067     vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
1068     vaddr lp_mask = ~(size - 1);
1069 
1070     if (lp_addr == (vaddr)-1) {
1071         /* No previous large page.  */
1072         lp_addr = addr;
1073     } else {
1074         /* Extend the existing region to include the new page.
1075            This is a compromise between unnecessary flushes and
1076            the cost of maintaining a full variable size TLB.  */
1077         lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
1078         while (((lp_addr ^ addr) & lp_mask) != 0) {
1079             lp_mask <<= 1;
1080         }
1081     }
1082     cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1083     cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
1084 }
1085 
1086 static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1087                                    vaddr address, int flags,
1088                                    MMUAccessType access_type, bool enable)
1089 {
1090     if (enable) {
1091         address |= flags & TLB_FLAGS_MASK;
1092         flags &= TLB_SLOW_FLAGS_MASK;
1093         if (flags) {
1094             address |= TLB_FORCE_SLOW;
1095         }
1096     } else {
1097         address = -1;
1098         flags = 0;
1099     }
1100     ent->addr_idx[access_type] = address;
1101     full->slow_flags[access_type] = flags;
1102 }
1103 
1104 /*
1105  * Add a new TLB entry. At most one entry for a given virtual address
1106  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1107  * supplied size is only used by tlb_flush_page.
1108  *
1109  * Called from TCG-generated code, which is under an RCU read-side
1110  * critical section.
1111  */
1112 void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1113                        vaddr addr, CPUTLBEntryFull *full)
1114 {
1115     CPUTLB *tlb = &cpu->neg.tlb;
1116     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1117     MemoryRegionSection *section;
1118     unsigned int index, read_flags, write_flags;
1119     uintptr_t addend;
1120     CPUTLBEntry *te, tn;
1121     hwaddr iotlb, xlat, sz, paddr_page;
1122     vaddr addr_page;
1123     int asidx, wp_flags, prot;
1124     bool is_ram, is_romd;
1125 
1126     assert_cpu_is_self(cpu);
1127 
1128     if (full->lg_page_size <= TARGET_PAGE_BITS) {
1129         sz = TARGET_PAGE_SIZE;
1130     } else {
1131         sz = (hwaddr)1 << full->lg_page_size;
1132         tlb_add_large_page(cpu, mmu_idx, addr, sz);
1133     }
1134     addr_page = addr & TARGET_PAGE_MASK;
1135     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1136 
1137     prot = full->prot;
1138     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1139     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1140                                                 &xlat, &sz, full->attrs, &prot);
1141     assert(sz >= TARGET_PAGE_SIZE);
1142 
1143     tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1144               " prot=%x idx=%d\n",
1145               addr, full->phys_addr, prot, mmu_idx);
1146 
1147     read_flags = 0;
1148     if (full->lg_page_size < TARGET_PAGE_BITS) {
1149         /* Repeat the MMU check and TLB fill on every access.  */
1150         read_flags |= TLB_INVALID_MASK;
1151     }
1152     if (full->attrs.byte_swap) {
1153         read_flags |= TLB_BSWAP;
1154     }
1155 
1156     is_ram = memory_region_is_ram(section->mr);
1157     is_romd = memory_region_is_romd(section->mr);
1158 
1159     if (is_ram || is_romd) {
1160         /* RAM and ROMD both have associated host memory. */
1161         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1162     } else {
1163         /* I/O does not; force the host address to NULL. */
1164         addend = 0;
1165     }
1166 
1167     write_flags = read_flags;
1168     if (is_ram) {
1169         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1170         assert(!(iotlb & ~TARGET_PAGE_MASK));
1171         /*
1172          * Computing is_clean is expensive; avoid all that unless
1173          * the page is actually writable.
1174          */
1175         if (prot & PAGE_WRITE) {
1176             if (section->readonly) {
1177                 write_flags |= TLB_DISCARD_WRITE;
1178             } else if (cpu_physical_memory_is_clean(iotlb)) {
1179                 write_flags |= TLB_NOTDIRTY;
1180             }
1181         }
1182     } else {
1183         /* I/O or ROMD */
1184         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1185         /*
1186          * Writes to romd devices must go through MMIO to enable write.
1187          * Reads to romd devices go through the ram_ptr found above,
1188          * but of course reads to I/O must go through MMIO.
1189          */
1190         write_flags |= TLB_MMIO;
1191         if (!is_romd) {
1192             read_flags = write_flags;
1193         }
1194     }
1195 
1196     wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
1197                                               TARGET_PAGE_SIZE);
1198 
1199     index = tlb_index(cpu, mmu_idx, addr_page);
1200     te = tlb_entry(cpu, mmu_idx, addr_page);
1201 
1202     /*
1203      * Hold the TLB lock for the rest of the function. We could acquire/release
1204      * the lock several times in the function, but it is faster to amortize the
1205      * acquisition cost by acquiring it just once. Note that this leads to
1206      * a longer critical section, but this is not a concern since the TLB lock
1207      * is unlikely to be contended.
1208      */
1209     qemu_spin_lock(&tlb->c.lock);
1210 
1211     /* Note that the tlb is no longer clean.  */
1212     tlb->c.dirty |= 1 << mmu_idx;
1213 
1214     /* Make sure there's no cached translation for the new page.  */
1215     tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
1216 
1217     /*
1218      * Only evict the old entry to the victim tlb if it's for a
1219      * different page; otherwise just overwrite the stale data.
1220      */
1221     if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1222         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1223         CPUTLBEntry *tv = &desc->vtable[vidx];
1224 
1225         /* Evict the old entry into the victim tlb.  */
1226         copy_tlb_helper_locked(tv, te);
1227         desc->vfulltlb[vidx] = desc->fulltlb[index];
1228         tlb_n_used_entries_dec(cpu, mmu_idx);
1229     }
1230 
1231     /* refill the tlb */
1232     /*
1233      * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1234      * aligned ram_addr_t of the page base of the target RAM.
1235      * Otherwise, iotlb contains
1236      *  - a physical section number in the lower TARGET_PAGE_BITS
1237      *  - the offset within section->mr of the page base (I/O, ROMD) with the
1238      *    TARGET_PAGE_BITS masked off.
1239      * We subtract addr_page (which is page aligned and thus won't
1240      * disturb the low bits) to give an offset which can be added to the
1241      * (non-page-aligned) vaddr of the eventual memory access to get
1242      * the MemoryRegion offset for the access. Note that the vaddr we
1243      * subtract here is that of the page base, and not the same as the
1244      * vaddr we add back in io_prepare()/get_page_addr_code().
1245      */
1246     desc->fulltlb[index] = *full;
1247     full = &desc->fulltlb[index];
1248     full->xlat_section = iotlb - addr_page;
1249     full->phys_addr = paddr_page;
1250 
1251     /* Now calculate the new entry */
1252     tn.addend = addend - addr_page;
1253 
1254     tlb_set_compare(full, &tn, addr_page, read_flags,
1255                     MMU_INST_FETCH, prot & PAGE_EXEC);
1256 
1257     if (wp_flags & BP_MEM_READ) {
1258         read_flags |= TLB_WATCHPOINT;
1259     }
1260     tlb_set_compare(full, &tn, addr_page, read_flags,
1261                     MMU_DATA_LOAD, prot & PAGE_READ);
1262 
1263     if (prot & PAGE_WRITE_INV) {
1264         write_flags |= TLB_INVALID_MASK;
1265     }
1266     if (wp_flags & BP_MEM_WRITE) {
1267         write_flags |= TLB_WATCHPOINT;
1268     }
1269     tlb_set_compare(full, &tn, addr_page, write_flags,
1270                     MMU_DATA_STORE, prot & PAGE_WRITE);
1271 
1272     copy_tlb_helper_locked(te, &tn);
1273     tlb_n_used_entries_inc(cpu, mmu_idx);
1274     qemu_spin_unlock(&tlb->c.lock);
1275 }
1276 
1277 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
1278                              hwaddr paddr, MemTxAttrs attrs, int prot,
1279                              int mmu_idx, uint64_t size)
1280 {
1281     CPUTLBEntryFull full = {
1282         .phys_addr = paddr,
1283         .attrs = attrs,
1284         .prot = prot,
1285         .lg_page_size = ctz64(size)
1286     };
1287 
1288     assert(is_power_of_2(size));
1289     tlb_set_page_full(cpu, mmu_idx, addr, &full);
1290 }
1291 
1292 void tlb_set_page(CPUState *cpu, vaddr addr,
1293                   hwaddr paddr, int prot,
1294                   int mmu_idx, uint64_t size)
1295 {
1296     tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1297                             prot, mmu_idx, size);
1298 }
1299 
1300 /*
1301  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1302  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1303  * be discarded and looked up again (e.g. via tlb_entry()).
1304  */
1305 static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1306                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1307 {
1308     bool ok;
1309 
1310     /*
1311      * This is not a probe, so only valid return is success; failure
1312      * should result in exception + longjmp to the cpu loop.
1313      */
1314     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1315                                     access_type, mmu_idx, false, retaddr);
1316     assert(ok);
1317 }
1318 
1319 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1320                                         MMUAccessType access_type,
1321                                         int mmu_idx, uintptr_t retaddr)
1322 {
1323     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1324                                           mmu_idx, retaddr);
1325 }
1326 
1327 static MemoryRegionSection *
1328 io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
1329            MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
1330 {
1331     MemoryRegionSection *section;
1332     hwaddr mr_offset;
1333 
1334     section = iotlb_to_section(cpu, xlat, attrs);
1335     mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
1336     cpu->mem_io_pc = retaddr;
1337     if (!cpu->neg.can_do_io) {
1338         cpu_io_recompile(cpu, retaddr);
1339     }
1340 
1341     *out_offset = mr_offset;
1342     return section;
1343 }
1344 
1345 static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
1346                       unsigned size, MMUAccessType access_type, int mmu_idx,
1347                       MemTxResult response, uintptr_t retaddr)
1348 {
1349     if (!cpu->ignore_memory_transaction_failures
1350         && cpu->cc->tcg_ops->do_transaction_failed) {
1351         hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1352 
1353         cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1354                                                 access_type, mmu_idx,
1355                                                 full->attrs, response, retaddr);
1356     }
1357 }
1358 
1359 /* Return true if ADDR is present in the victim tlb, and has been copied
1360    back to the main tlb.  */
1361 static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
1362                            MMUAccessType access_type, vaddr page)
1363 {
1364     size_t vidx;
1365 
1366     assert_cpu_is_self(cpu);
1367     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1368         CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
1369         uint64_t cmp = tlb_read_idx(vtlb, access_type);
1370 
1371         if (cmp == page) {
1372             /* Found entry in victim tlb, swap tlb and iotlb.  */
1373             CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
1374 
1375             qemu_spin_lock(&cpu->neg.tlb.c.lock);
1376             copy_tlb_helper_locked(&tmptlb, tlb);
1377             copy_tlb_helper_locked(tlb, vtlb);
1378             copy_tlb_helper_locked(vtlb, &tmptlb);
1379             qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1380 
1381             CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1382             CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
1383             CPUTLBEntryFull tmpf;
1384             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1385             return true;
1386         }
1387     }
1388     return false;
1389 }
1390 
1391 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1392                            CPUTLBEntryFull *full, uintptr_t retaddr)
1393 {
1394     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1395 
1396     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1397 
1398     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1399         tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1400     }
1401 
1402     /*
1403      * Set both VGA and migration bits for simplicity and to remove
1404      * the notdirty callback faster.
1405      */
1406     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1407 
1408     /* We remove the notdirty callback only if the code has been flushed. */
1409     if (!cpu_physical_memory_is_clean(ram_addr)) {
1410         trace_memory_notdirty_set_dirty(mem_vaddr);
1411         tlb_set_dirty(cpu, mem_vaddr);
1412     }
1413 }
1414 
1415 static int probe_access_internal(CPUState *cpu, vaddr addr,
1416                                  int fault_size, MMUAccessType access_type,
1417                                  int mmu_idx, bool nonfault,
1418                                  void **phost, CPUTLBEntryFull **pfull,
1419                                  uintptr_t retaddr, bool check_mem_cbs)
1420 {
1421     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1422     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1423     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1424     vaddr page_addr = addr & TARGET_PAGE_MASK;
1425     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
1426     bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
1427     CPUTLBEntryFull *full;
1428 
1429     if (!tlb_hit_page(tlb_addr, page_addr)) {
1430         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
1431             if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
1432                                             mmu_idx, nonfault, retaddr)) {
1433                 /* Non-faulting page table read failed.  */
1434                 *phost = NULL;
1435                 *pfull = NULL;
1436                 return TLB_INVALID_MASK;
1437             }
1438 
1439             /* TLB resize via tlb_fill may have moved the entry.  */
1440             index = tlb_index(cpu, mmu_idx, addr);
1441             entry = tlb_entry(cpu, mmu_idx, addr);
1442 
1443             /*
1444              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1445              * to force the next access through tlb_fill.  We've just
1446              * called tlb_fill, so we know that this entry *is* valid.
1447              */
1448             flags &= ~TLB_INVALID_MASK;
1449         }
1450         tlb_addr = tlb_read_idx(entry, access_type);
1451     }
1452     flags &= tlb_addr;
1453 
1454     *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1455     flags |= full->slow_flags[access_type];
1456 
1457     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1458     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
1459         ||
1460         (access_type != MMU_INST_FETCH && force_mmio)) {
1461         *phost = NULL;
1462         return TLB_MMIO;
1463     }
1464 
1465     /* Everything else is RAM. */
1466     *phost = (void *)((uintptr_t)addr + entry->addend);
1467     return flags;
1468 }
1469 
1470 int probe_access_full(CPUArchState *env, vaddr addr, int size,
1471                       MMUAccessType access_type, int mmu_idx,
1472                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1473                       uintptr_t retaddr)
1474 {
1475     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1476                                       mmu_idx, nonfault, phost, pfull, retaddr,
1477                                       true);
1478 
1479     /* Handle clean RAM pages.  */
1480     if (unlikely(flags & TLB_NOTDIRTY)) {
1481         notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1482         flags &= ~TLB_NOTDIRTY;
1483     }
1484 
1485     return flags;
1486 }
1487 
1488 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
1489                           MMUAccessType access_type, int mmu_idx,
1490                           void **phost, CPUTLBEntryFull **pfull)
1491 {
1492     void *discard_phost;
1493     CPUTLBEntryFull *discard_tlb;
1494 
1495     /* privately handle users that don't need full results */
1496     phost = phost ? phost : &discard_phost;
1497     pfull = pfull ? pfull : &discard_tlb;
1498 
1499     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1500                                       mmu_idx, true, phost, pfull, 0, false);
1501 
1502     /* Handle clean RAM pages.  */
1503     if (unlikely(flags & TLB_NOTDIRTY)) {
1504         notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
1505         flags &= ~TLB_NOTDIRTY;
1506     }
1507 
1508     return flags;
1509 }
1510 
1511 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1512                        MMUAccessType access_type, int mmu_idx,
1513                        bool nonfault, void **phost, uintptr_t retaddr)
1514 {
1515     CPUTLBEntryFull *full;
1516     int flags;
1517 
1518     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1519 
1520     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1521                                   mmu_idx, nonfault, phost, &full, retaddr,
1522                                   true);
1523 
1524     /* Handle clean RAM pages. */
1525     if (unlikely(flags & TLB_NOTDIRTY)) {
1526         notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1527         flags &= ~TLB_NOTDIRTY;
1528     }
1529 
1530     return flags;
1531 }
1532 
1533 void *probe_access(CPUArchState *env, vaddr addr, int size,
1534                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1535 {
1536     CPUTLBEntryFull *full;
1537     void *host;
1538     int flags;
1539 
1540     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1541 
1542     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1543                                   mmu_idx, false, &host, &full, retaddr,
1544                                   true);
1545 
1546     /* Per the interface, size == 0 merely faults the access. */
1547     if (size == 0) {
1548         return NULL;
1549     }
1550 
1551     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1552         /* Handle watchpoints.  */
1553         if (flags & TLB_WATCHPOINT) {
1554             int wp_access = (access_type == MMU_DATA_STORE
1555                              ? BP_MEM_WRITE : BP_MEM_READ);
1556             cpu_check_watchpoint(env_cpu(env), addr, size,
1557                                  full->attrs, wp_access, retaddr);
1558         }
1559 
1560         /* Handle clean RAM pages.  */
1561         if (flags & TLB_NOTDIRTY) {
1562             notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1563         }
1564     }
1565 
1566     return host;
1567 }
1568 
1569 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1570                         MMUAccessType access_type, int mmu_idx)
1571 {
1572     CPUTLBEntryFull *full;
1573     void *host;
1574     int flags;
1575 
1576     flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
1577                                   mmu_idx, true, &host, &full, 0, false);
1578 
1579     /* No combination of flags are expected by the caller. */
1580     return flags ? NULL : host;
1581 }
1582 
1583 /*
1584  * Return a ram_addr_t for the virtual address for execution.
1585  *
1586  * Return -1 if we can't translate and execute from an entire page
1587  * of RAM.  This will force us to execute by loading and translating
1588  * one insn at a time, without caching.
1589  *
1590  * NOTE: This function will trigger an exception if the page is
1591  * not executable.
1592  */
1593 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
1594                                         void **hostp)
1595 {
1596     CPUTLBEntryFull *full;
1597     void *p;
1598 
1599     (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
1600                                 cpu_mmu_index(env, true), false,
1601                                 &p, &full, 0, false);
1602     if (p == NULL) {
1603         return -1;
1604     }
1605 
1606     if (full->lg_page_size < TARGET_PAGE_BITS) {
1607         return -1;
1608     }
1609 
1610     if (hostp) {
1611         *hostp = p;
1612     }
1613     return qemu_ram_addr_from_host_nofail(p);
1614 }
1615 
1616 /* Load/store with atomicity primitives. */
1617 #include "ldst_atomicity.c.inc"
1618 
1619 #ifdef CONFIG_PLUGIN
1620 /*
1621  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1622  * This should be a hot path as we will have just looked this path up
1623  * in the softmmu lookup code (or helper). We don't handle re-fills or
1624  * checking the victim table. This is purely informational.
1625  *
1626  * The one corner case is i/o write, which can cause changes to the
1627  * address space.  Those changes, and the corresponding tlb flush,
1628  * should be delayed until the next TB, so even then this ought not fail.
1629  * But check, Just in Case.
1630  */
1631 bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1632                        bool is_store, struct qemu_plugin_hwaddr *data)
1633 {
1634     CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
1635     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1636     MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
1637     uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
1638     CPUTLBEntryFull *full;
1639 
1640     if (unlikely(!tlb_hit(tlb_addr, addr))) {
1641         return false;
1642     }
1643 
1644     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1645     data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1646 
1647     /* We must have an iotlb entry for MMIO */
1648     if (tlb_addr & TLB_MMIO) {
1649         MemoryRegionSection *section =
1650             iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
1651                              full->attrs);
1652         data->is_io = true;
1653         data->mr = section->mr;
1654     } else {
1655         data->is_io = false;
1656         data->mr = NULL;
1657     }
1658     return true;
1659 }
1660 #endif
1661 
1662 /*
1663  * Probe for a load/store operation.
1664  * Return the host address and into @flags.
1665  */
1666 
1667 typedef struct MMULookupPageData {
1668     CPUTLBEntryFull *full;
1669     void *haddr;
1670     vaddr addr;
1671     int flags;
1672     int size;
1673 } MMULookupPageData;
1674 
1675 typedef struct MMULookupLocals {
1676     MMULookupPageData page[2];
1677     MemOp memop;
1678     int mmu_idx;
1679 } MMULookupLocals;
1680 
1681 /**
1682  * mmu_lookup1: translate one page
1683  * @cpu: generic cpu state
1684  * @data: lookup parameters
1685  * @mmu_idx: virtual address context
1686  * @access_type: load/store/code
1687  * @ra: return address into tcg generated code, or 0
1688  *
1689  * Resolve the translation for the one page at @data.addr, filling in
1690  * the rest of @data with the results.  If the translation fails,
1691  * tlb_fill will longjmp out.  Return true if the softmmu tlb for
1692  * @mmu_idx may have resized.
1693  */
1694 static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
1695                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
1696 {
1697     vaddr addr = data->addr;
1698     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1699     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1700     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1701     bool maybe_resized = false;
1702     CPUTLBEntryFull *full;
1703     int flags;
1704 
1705     /* If the TLB entry is for a different page, reload and try again.  */
1706     if (!tlb_hit(tlb_addr, addr)) {
1707         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
1708                             addr & TARGET_PAGE_MASK)) {
1709             tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
1710             maybe_resized = true;
1711             index = tlb_index(cpu, mmu_idx, addr);
1712             entry = tlb_entry(cpu, mmu_idx, addr);
1713         }
1714         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
1715     }
1716 
1717     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1718     flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
1719     flags |= full->slow_flags[access_type];
1720 
1721     data->full = full;
1722     data->flags = flags;
1723     /* Compute haddr speculatively; depending on flags it might be invalid. */
1724     data->haddr = (void *)((uintptr_t)addr + entry->addend);
1725 
1726     return maybe_resized;
1727 }
1728 
1729 /**
1730  * mmu_watch_or_dirty
1731  * @cpu: generic cpu state
1732  * @data: lookup parameters
1733  * @access_type: load/store/code
1734  * @ra: return address into tcg generated code, or 0
1735  *
1736  * Trigger watchpoints for @data.addr:@data.size;
1737  * record writes to protected clean pages.
1738  */
1739 static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
1740                                MMUAccessType access_type, uintptr_t ra)
1741 {
1742     CPUTLBEntryFull *full = data->full;
1743     vaddr addr = data->addr;
1744     int flags = data->flags;
1745     int size = data->size;
1746 
1747     /* On watchpoint hit, this will longjmp out.  */
1748     if (flags & TLB_WATCHPOINT) {
1749         int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1750         cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
1751         flags &= ~TLB_WATCHPOINT;
1752     }
1753 
1754     /* Note that notdirty is only set for writes. */
1755     if (flags & TLB_NOTDIRTY) {
1756         notdirty_write(cpu, addr, size, full, ra);
1757         flags &= ~TLB_NOTDIRTY;
1758     }
1759     data->flags = flags;
1760 }
1761 
1762 /**
1763  * mmu_lookup: translate page(s)
1764  * @cpu: generic cpu state
1765  * @addr: virtual address
1766  * @oi: combined mmu_idx and MemOp
1767  * @ra: return address into tcg generated code, or 0
1768  * @access_type: load/store/code
1769  * @l: output result
1770  *
1771  * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1772  * bytes.  Return true if the lookup crosses a page boundary.
1773  */
1774 static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1775                        uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
1776 {
1777     unsigned a_bits;
1778     bool crosspage;
1779     int flags;
1780 
1781     l->memop = get_memop(oi);
1782     l->mmu_idx = get_mmuidx(oi);
1783 
1784     tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
1785 
1786     /* Handle CPU specific unaligned behaviour */
1787     a_bits = get_alignment_bits(l->memop);
1788     if (addr & ((1 << a_bits) - 1)) {
1789         cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
1790     }
1791 
1792     l->page[0].addr = addr;
1793     l->page[0].size = memop_size(l->memop);
1794     l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
1795     l->page[1].size = 0;
1796     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
1797 
1798     if (likely(!crosspage)) {
1799         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1800 
1801         flags = l->page[0].flags;
1802         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1803             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1804         }
1805         if (unlikely(flags & TLB_BSWAP)) {
1806             l->memop ^= MO_BSWAP;
1807         }
1808     } else {
1809         /* Finish compute of page crossing. */
1810         int size0 = l->page[1].addr - addr;
1811         l->page[1].size = l->page[0].size - size0;
1812         l->page[0].size = size0;
1813 
1814         /*
1815          * Lookup both pages, recognizing exceptions from either.  If the
1816          * second lookup potentially resized, refresh first CPUTLBEntryFull.
1817          */
1818         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1819         if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
1820             uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
1821             l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
1822         }
1823 
1824         flags = l->page[0].flags | l->page[1].flags;
1825         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1826             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1827             mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
1828         }
1829 
1830         /*
1831          * Since target/sparc is the only user of TLB_BSWAP, and all
1832          * Sparc accesses are aligned, any treatment across two pages
1833          * would be arbitrary.  Refuse it until there's a use.
1834          */
1835         tcg_debug_assert((flags & TLB_BSWAP) == 0);
1836     }
1837 
1838     return crosspage;
1839 }
1840 
1841 /*
1842  * Probe for an atomic operation.  Do not allow unaligned operations,
1843  * or io operations to proceed.  Return the host address.
1844  */
1845 static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1846                                int size, uintptr_t retaddr)
1847 {
1848     uintptr_t mmu_idx = get_mmuidx(oi);
1849     MemOp mop = get_memop(oi);
1850     int a_bits = get_alignment_bits(mop);
1851     uintptr_t index;
1852     CPUTLBEntry *tlbe;
1853     vaddr tlb_addr;
1854     void *hostaddr;
1855     CPUTLBEntryFull *full;
1856 
1857     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1858 
1859     /* Adjust the given return address.  */
1860     retaddr -= GETPC_ADJ;
1861 
1862     /* Enforce guest required alignment.  */
1863     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1864         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1865         cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
1866                              mmu_idx, retaddr);
1867     }
1868 
1869     /* Enforce qemu required alignment.  */
1870     if (unlikely(addr & (size - 1))) {
1871         /* We get here if guest alignment was not requested,
1872            or was not enforced by cpu_unaligned_access above.
1873            We might widen the access and emulate, but for now
1874            mark an exception and exit the cpu loop.  */
1875         goto stop_the_world;
1876     }
1877 
1878     index = tlb_index(cpu, mmu_idx, addr);
1879     tlbe = tlb_entry(cpu, mmu_idx, addr);
1880 
1881     /* Check TLB entry and enforce page permissions.  */
1882     tlb_addr = tlb_addr_write(tlbe);
1883     if (!tlb_hit(tlb_addr, addr)) {
1884         if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
1885                             addr & TARGET_PAGE_MASK)) {
1886             tlb_fill(cpu, addr, size,
1887                      MMU_DATA_STORE, mmu_idx, retaddr);
1888             index = tlb_index(cpu, mmu_idx, addr);
1889             tlbe = tlb_entry(cpu, mmu_idx, addr);
1890         }
1891         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1892     }
1893 
1894     /*
1895      * Let the guest notice RMW on a write-only page.
1896      * We have just verified that the page is writable.
1897      * Subpage lookups may have left TLB_INVALID_MASK set,
1898      * but addr_read will only be -1 if PAGE_READ was unset.
1899      */
1900     if (unlikely(tlbe->addr_read == -1)) {
1901         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
1902         /*
1903          * Since we don't support reads and writes to different
1904          * addresses, and we do have the proper page loaded for
1905          * write, this shouldn't ever return.  But just in case,
1906          * handle via stop-the-world.
1907          */
1908         goto stop_the_world;
1909     }
1910     /* Collect tlb flags for read. */
1911     tlb_addr |= tlbe->addr_read;
1912 
1913     /* Notice an IO access or a needs-MMU-lookup access */
1914     if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
1915         /* There's really nothing that can be done to
1916            support this apart from stop-the-world.  */
1917         goto stop_the_world;
1918     }
1919 
1920     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1921     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1922 
1923     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1924         notdirty_write(cpu, addr, size, full, retaddr);
1925     }
1926 
1927     if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
1928         int wp_flags = 0;
1929 
1930         if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
1931             wp_flags |= BP_MEM_WRITE;
1932         }
1933         if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
1934             wp_flags |= BP_MEM_READ;
1935         }
1936         if (wp_flags) {
1937             cpu_check_watchpoint(cpu, addr, size,
1938                                  full->attrs, wp_flags, retaddr);
1939         }
1940     }
1941 
1942     return hostaddr;
1943 
1944  stop_the_world:
1945     cpu_loop_exit_atomic(cpu, retaddr);
1946 }
1947 
1948 /*
1949  * Load Helpers
1950  *
1951  * We support two different access types. SOFTMMU_CODE_ACCESS is
1952  * specifically for reading instructions from system memory. It is
1953  * called by the translation loop and in some helpers where the code
1954  * is disassembled. It shouldn't be called directly by guest code.
1955  *
1956  * For the benefit of TCG generated code, we want to avoid the
1957  * complication of ABI-specific return type promotion and always
1958  * return a value extended to the register size of the host. This is
1959  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1960  * data, and for that we always have uint64_t.
1961  *
1962  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1963  */
1964 
1965 /**
1966  * do_ld_mmio_beN:
1967  * @cpu: generic cpu state
1968  * @full: page parameters
1969  * @ret_be: accumulated data
1970  * @addr: virtual address
1971  * @size: number of bytes
1972  * @mmu_idx: virtual address context
1973  * @ra: return address into tcg generated code, or 0
1974  * Context: iothread lock held
1975  *
1976  * Load @size bytes from @addr, which is memory-mapped i/o.
1977  * The bytes are concatenated in big-endian order with @ret_be.
1978  */
1979 static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
1980                                 uint64_t ret_be, vaddr addr, int size,
1981                                 int mmu_idx, MMUAccessType type, uintptr_t ra,
1982                                 MemoryRegion *mr, hwaddr mr_offset)
1983 {
1984     do {
1985         MemOp this_mop;
1986         unsigned this_size;
1987         uint64_t val;
1988         MemTxResult r;
1989 
1990         /* Read aligned pieces up to 8 bytes. */
1991         this_mop = ctz32(size | (int)addr | 8);
1992         this_size = 1 << this_mop;
1993         this_mop |= MO_BE;
1994 
1995         r = memory_region_dispatch_read(mr, mr_offset, &val,
1996                                         this_mop, full->attrs);
1997         if (unlikely(r != MEMTX_OK)) {
1998             io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
1999         }
2000         if (this_size == 8) {
2001             return val;
2002         }
2003 
2004         ret_be = (ret_be << (this_size * 8)) | val;
2005         addr += this_size;
2006         mr_offset += this_size;
2007         size -= this_size;
2008     } while (size);
2009 
2010     return ret_be;
2011 }
2012 
2013 static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2014                                uint64_t ret_be, vaddr addr, int size,
2015                                int mmu_idx, MMUAccessType type, uintptr_t ra)
2016 {
2017     MemoryRegionSection *section;
2018     MemoryRegion *mr;
2019     hwaddr mr_offset;
2020     MemTxAttrs attrs;
2021     uint64_t ret;
2022 
2023     tcg_debug_assert(size > 0 && size <= 8);
2024 
2025     attrs = full->attrs;
2026     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2027     mr = section->mr;
2028 
2029     qemu_mutex_lock_iothread();
2030     ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
2031                           type, ra, mr, mr_offset);
2032     qemu_mutex_unlock_iothread();
2033 
2034     return ret;
2035 }
2036 
2037 static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2038                                uint64_t ret_be, vaddr addr, int size,
2039                                int mmu_idx, uintptr_t ra)
2040 {
2041     MemoryRegionSection *section;
2042     MemoryRegion *mr;
2043     hwaddr mr_offset;
2044     MemTxAttrs attrs;
2045     uint64_t a, b;
2046 
2047     tcg_debug_assert(size > 8 && size <= 16);
2048 
2049     attrs = full->attrs;
2050     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2051     mr = section->mr;
2052 
2053     qemu_mutex_lock_iothread();
2054     a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
2055                         MMU_DATA_LOAD, ra, mr, mr_offset);
2056     b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
2057                         MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
2058     qemu_mutex_unlock_iothread();
2059 
2060     return int128_make128(b, a);
2061 }
2062 
2063 /**
2064  * do_ld_bytes_beN
2065  * @p: translation parameters
2066  * @ret_be: accumulated data
2067  *
2068  * Load @p->size bytes from @p->haddr, which is RAM.
2069  * The bytes to concatenated in big-endian order with @ret_be.
2070  */
2071 static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
2072 {
2073     uint8_t *haddr = p->haddr;
2074     int i, size = p->size;
2075 
2076     for (i = 0; i < size; i++) {
2077         ret_be = (ret_be << 8) | haddr[i];
2078     }
2079     return ret_be;
2080 }
2081 
2082 /**
2083  * do_ld_parts_beN
2084  * @p: translation parameters
2085  * @ret_be: accumulated data
2086  *
2087  * As do_ld_bytes_beN, but atomically on each aligned part.
2088  */
2089 static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2090 {
2091     void *haddr = p->haddr;
2092     int size = p->size;
2093 
2094     do {
2095         uint64_t x;
2096         int n;
2097 
2098         /*
2099          * Find minimum of alignment and size.
2100          * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2101          * would have only checked the low bits of addr|size once at the start,
2102          * but is just as easy.
2103          */
2104         switch (((uintptr_t)haddr | size) & 7) {
2105         case 4:
2106             x = cpu_to_be32(load_atomic4(haddr));
2107             ret_be = (ret_be << 32) | x;
2108             n = 4;
2109             break;
2110         case 2:
2111         case 6:
2112             x = cpu_to_be16(load_atomic2(haddr));
2113             ret_be = (ret_be << 16) | x;
2114             n = 2;
2115             break;
2116         default:
2117             x = *(uint8_t *)haddr;
2118             ret_be = (ret_be << 8) | x;
2119             n = 1;
2120             break;
2121         case 0:
2122             g_assert_not_reached();
2123         }
2124         haddr += n;
2125         size -= n;
2126     } while (size != 0);
2127     return ret_be;
2128 }
2129 
2130 /**
2131  * do_ld_parts_be4
2132  * @p: translation parameters
2133  * @ret_be: accumulated data
2134  *
2135  * As do_ld_bytes_beN, but with one atomic load.
2136  * Four aligned bytes are guaranteed to cover the load.
2137  */
2138 static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2139 {
2140     int o = p->addr & 3;
2141     uint32_t x = load_atomic4(p->haddr - o);
2142 
2143     x = cpu_to_be32(x);
2144     x <<= o * 8;
2145     x >>= (4 - p->size) * 8;
2146     return (ret_be << (p->size * 8)) | x;
2147 }
2148 
2149 /**
2150  * do_ld_parts_be8
2151  * @p: translation parameters
2152  * @ret_be: accumulated data
2153  *
2154  * As do_ld_bytes_beN, but with one atomic load.
2155  * Eight aligned bytes are guaranteed to cover the load.
2156  */
2157 static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
2158                                 MMULookupPageData *p, uint64_t ret_be)
2159 {
2160     int o = p->addr & 7;
2161     uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
2162 
2163     x = cpu_to_be64(x);
2164     x <<= o * 8;
2165     x >>= (8 - p->size) * 8;
2166     return (ret_be << (p->size * 8)) | x;
2167 }
2168 
2169 /**
2170  * do_ld_parts_be16
2171  * @p: translation parameters
2172  * @ret_be: accumulated data
2173  *
2174  * As do_ld_bytes_beN, but with one atomic load.
2175  * 16 aligned bytes are guaranteed to cover the load.
2176  */
2177 static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
2178                                MMULookupPageData *p, uint64_t ret_be)
2179 {
2180     int o = p->addr & 15;
2181     Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
2182     int size = p->size;
2183 
2184     if (!HOST_BIG_ENDIAN) {
2185         y = bswap128(y);
2186     }
2187     y = int128_lshift(y, o * 8);
2188     y = int128_urshift(y, (16 - size) * 8);
2189     x = int128_make64(ret_be);
2190     x = int128_lshift(x, size * 8);
2191     return int128_or(x, y);
2192 }
2193 
2194 /*
2195  * Wrapper for the above.
2196  */
2197 static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
2198                           uint64_t ret_be, int mmu_idx, MMUAccessType type,
2199                           MemOp mop, uintptr_t ra)
2200 {
2201     MemOp atom;
2202     unsigned tmp, half_size;
2203 
2204     if (unlikely(p->flags & TLB_MMIO)) {
2205         return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
2206                               mmu_idx, type, ra);
2207     }
2208 
2209     /*
2210      * It is a given that we cross a page and therefore there is no
2211      * atomicity for the load as a whole, but subobjects may need attention.
2212      */
2213     atom = mop & MO_ATOM_MASK;
2214     switch (atom) {
2215     case MO_ATOM_SUBALIGN:
2216         return do_ld_parts_beN(p, ret_be);
2217 
2218     case MO_ATOM_IFALIGN_PAIR:
2219     case MO_ATOM_WITHIN16_PAIR:
2220         tmp = mop & MO_SIZE;
2221         tmp = tmp ? tmp - 1 : 0;
2222         half_size = 1 << tmp;
2223         if (atom == MO_ATOM_IFALIGN_PAIR
2224             ? p->size == half_size
2225             : p->size >= half_size) {
2226             if (!HAVE_al8_fast && p->size < 4) {
2227                 return do_ld_whole_be4(p, ret_be);
2228             } else {
2229                 return do_ld_whole_be8(cpu, ra, p, ret_be);
2230             }
2231         }
2232         /* fall through */
2233 
2234     case MO_ATOM_IFALIGN:
2235     case MO_ATOM_WITHIN16:
2236     case MO_ATOM_NONE:
2237         return do_ld_bytes_beN(p, ret_be);
2238 
2239     default:
2240         g_assert_not_reached();
2241     }
2242 }
2243 
2244 /*
2245  * Wrapper for the above, for 8 < size < 16.
2246  */
2247 static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
2248                           uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
2249 {
2250     int size = p->size;
2251     uint64_t b;
2252     MemOp atom;
2253 
2254     if (unlikely(p->flags & TLB_MMIO)) {
2255         return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
2256     }
2257 
2258     /*
2259      * It is a given that we cross a page and therefore there is no
2260      * atomicity for the load as a whole, but subobjects may need attention.
2261      */
2262     atom = mop & MO_ATOM_MASK;
2263     switch (atom) {
2264     case MO_ATOM_SUBALIGN:
2265         p->size = size - 8;
2266         a = do_ld_parts_beN(p, a);
2267         p->haddr += size - 8;
2268         p->size = 8;
2269         b = do_ld_parts_beN(p, 0);
2270         break;
2271 
2272     case MO_ATOM_WITHIN16_PAIR:
2273         /* Since size > 8, this is the half that must be atomic. */
2274         return do_ld_whole_be16(cpu, ra, p, a);
2275 
2276     case MO_ATOM_IFALIGN_PAIR:
2277         /*
2278          * Since size > 8, both halves are misaligned,
2279          * and so neither is atomic.
2280          */
2281     case MO_ATOM_IFALIGN:
2282     case MO_ATOM_WITHIN16:
2283     case MO_ATOM_NONE:
2284         p->size = size - 8;
2285         a = do_ld_bytes_beN(p, a);
2286         b = ldq_be_p(p->haddr + size - 8);
2287         break;
2288 
2289     default:
2290         g_assert_not_reached();
2291     }
2292 
2293     return int128_make128(b, a);
2294 }
2295 
2296 static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2297                        MMUAccessType type, uintptr_t ra)
2298 {
2299     if (unlikely(p->flags & TLB_MMIO)) {
2300         return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
2301     } else {
2302         return *(uint8_t *)p->haddr;
2303     }
2304 }
2305 
2306 static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2307                         MMUAccessType type, MemOp memop, uintptr_t ra)
2308 {
2309     uint16_t ret;
2310 
2311     if (unlikely(p->flags & TLB_MMIO)) {
2312         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
2313         if ((memop & MO_BSWAP) == MO_LE) {
2314             ret = bswap16(ret);
2315         }
2316     } else {
2317         /* Perform the load host endian, then swap if necessary. */
2318         ret = load_atom_2(cpu, ra, p->haddr, memop);
2319         if (memop & MO_BSWAP) {
2320             ret = bswap16(ret);
2321         }
2322     }
2323     return ret;
2324 }
2325 
2326 static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2327                         MMUAccessType type, MemOp memop, uintptr_t ra)
2328 {
2329     uint32_t ret;
2330 
2331     if (unlikely(p->flags & TLB_MMIO)) {
2332         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
2333         if ((memop & MO_BSWAP) == MO_LE) {
2334             ret = bswap32(ret);
2335         }
2336     } else {
2337         /* Perform the load host endian. */
2338         ret = load_atom_4(cpu, ra, p->haddr, memop);
2339         if (memop & MO_BSWAP) {
2340             ret = bswap32(ret);
2341         }
2342     }
2343     return ret;
2344 }
2345 
2346 static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2347                         MMUAccessType type, MemOp memop, uintptr_t ra)
2348 {
2349     uint64_t ret;
2350 
2351     if (unlikely(p->flags & TLB_MMIO)) {
2352         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
2353         if ((memop & MO_BSWAP) == MO_LE) {
2354             ret = bswap64(ret);
2355         }
2356     } else {
2357         /* Perform the load host endian. */
2358         ret = load_atom_8(cpu, ra, p->haddr, memop);
2359         if (memop & MO_BSWAP) {
2360             ret = bswap64(ret);
2361         }
2362     }
2363     return ret;
2364 }
2365 
2366 static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2367                           uintptr_t ra, MMUAccessType access_type)
2368 {
2369     MMULookupLocals l;
2370     bool crosspage;
2371 
2372     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2373     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2374     tcg_debug_assert(!crosspage);
2375 
2376     return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2377 }
2378 
2379 static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2380                            uintptr_t ra, MMUAccessType access_type)
2381 {
2382     MMULookupLocals l;
2383     bool crosspage;
2384     uint16_t ret;
2385     uint8_t a, b;
2386 
2387     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2388     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2389     if (likely(!crosspage)) {
2390         return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2391     }
2392 
2393     a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2394     b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
2395 
2396     if ((l.memop & MO_BSWAP) == MO_LE) {
2397         ret = a | (b << 8);
2398     } else {
2399         ret = b | (a << 8);
2400     }
2401     return ret;
2402 }
2403 
2404 static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2405                            uintptr_t ra, MMUAccessType access_type)
2406 {
2407     MMULookupLocals l;
2408     bool crosspage;
2409     uint32_t ret;
2410 
2411     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2412     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2413     if (likely(!crosspage)) {
2414         return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2415     }
2416 
2417     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2418     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2419     if ((l.memop & MO_BSWAP) == MO_LE) {
2420         ret = bswap32(ret);
2421     }
2422     return ret;
2423 }
2424 
2425 static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2426                            uintptr_t ra, MMUAccessType access_type)
2427 {
2428     MMULookupLocals l;
2429     bool crosspage;
2430     uint64_t ret;
2431 
2432     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2433     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2434     if (likely(!crosspage)) {
2435         return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2436     }
2437 
2438     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2439     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2440     if ((l.memop & MO_BSWAP) == MO_LE) {
2441         ret = bswap64(ret);
2442     }
2443     return ret;
2444 }
2445 
2446 static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
2447                           MemOpIdx oi, uintptr_t ra)
2448 {
2449     MMULookupLocals l;
2450     bool crosspage;
2451     uint64_t a, b;
2452     Int128 ret;
2453     int first;
2454 
2455     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2456     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
2457     if (likely(!crosspage)) {
2458         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2459             ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
2460                                    l.mmu_idx, ra);
2461             if ((l.memop & MO_BSWAP) == MO_LE) {
2462                 ret = bswap128(ret);
2463             }
2464         } else {
2465             /* Perform the load host endian. */
2466             ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
2467             if (l.memop & MO_BSWAP) {
2468                 ret = bswap128(ret);
2469             }
2470         }
2471         return ret;
2472     }
2473 
2474     first = l.page[0].size;
2475     if (first == 8) {
2476         MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
2477 
2478         a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2479         b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2480         if ((mop8 & MO_BSWAP) == MO_LE) {
2481             ret = int128_make128(a, b);
2482         } else {
2483             ret = int128_make128(b, a);
2484         }
2485         return ret;
2486     }
2487 
2488     if (first < 8) {
2489         a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
2490                       MMU_DATA_LOAD, l.memop, ra);
2491         ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
2492     } else {
2493         ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
2494         b = int128_getlo(ret);
2495         ret = int128_lshift(ret, l.page[1].size * 8);
2496         a = int128_gethi(ret);
2497         b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
2498                       MMU_DATA_LOAD, l.memop, ra);
2499         ret = int128_make128(b, a);
2500     }
2501     if ((l.memop & MO_BSWAP) == MO_LE) {
2502         ret = bswap128(ret);
2503     }
2504     return ret;
2505 }
2506 
2507 /*
2508  * Store Helpers
2509  */
2510 
2511 /**
2512  * do_st_mmio_leN:
2513  * @cpu: generic cpu state
2514  * @full: page parameters
2515  * @val_le: data to store
2516  * @addr: virtual address
2517  * @size: number of bytes
2518  * @mmu_idx: virtual address context
2519  * @ra: return address into tcg generated code, or 0
2520  * Context: iothread lock held
2521  *
2522  * Store @size bytes at @addr, which is memory-mapped i/o.
2523  * The bytes to store are extracted in little-endian order from @val_le;
2524  * return the bytes of @val_le beyond @p->size that have not been stored.
2525  */
2526 static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2527                                 uint64_t val_le, vaddr addr, int size,
2528                                 int mmu_idx, uintptr_t ra,
2529                                 MemoryRegion *mr, hwaddr mr_offset)
2530 {
2531     do {
2532         MemOp this_mop;
2533         unsigned this_size;
2534         MemTxResult r;
2535 
2536         /* Store aligned pieces up to 8 bytes. */
2537         this_mop = ctz32(size | (int)addr | 8);
2538         this_size = 1 << this_mop;
2539         this_mop |= MO_LE;
2540 
2541         r = memory_region_dispatch_write(mr, mr_offset, val_le,
2542                                          this_mop, full->attrs);
2543         if (unlikely(r != MEMTX_OK)) {
2544             io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
2545                       mmu_idx, r, ra);
2546         }
2547         if (this_size == 8) {
2548             return 0;
2549         }
2550 
2551         val_le >>= this_size * 8;
2552         addr += this_size;
2553         mr_offset += this_size;
2554         size -= this_size;
2555     } while (size);
2556 
2557     return val_le;
2558 }
2559 
2560 static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2561                                uint64_t val_le, vaddr addr, int size,
2562                                int mmu_idx, uintptr_t ra)
2563 {
2564     MemoryRegionSection *section;
2565     hwaddr mr_offset;
2566     MemoryRegion *mr;
2567     MemTxAttrs attrs;
2568     uint64_t ret;
2569 
2570     tcg_debug_assert(size > 0 && size <= 8);
2571 
2572     attrs = full->attrs;
2573     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2574     mr = section->mr;
2575 
2576     qemu_mutex_lock_iothread();
2577     ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
2578                           ra, mr, mr_offset);
2579     qemu_mutex_unlock_iothread();
2580 
2581     return ret;
2582 }
2583 
2584 static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2585                                  Int128 val_le, vaddr addr, int size,
2586                                  int mmu_idx, uintptr_t ra)
2587 {
2588     MemoryRegionSection *section;
2589     MemoryRegion *mr;
2590     hwaddr mr_offset;
2591     MemTxAttrs attrs;
2592     uint64_t ret;
2593 
2594     tcg_debug_assert(size > 8 && size <= 16);
2595 
2596     attrs = full->attrs;
2597     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2598     mr = section->mr;
2599 
2600     qemu_mutex_lock_iothread();
2601     int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
2602                     mmu_idx, ra, mr, mr_offset);
2603     ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
2604                           size - 8, mmu_idx, ra, mr, mr_offset + 8);
2605     qemu_mutex_unlock_iothread();
2606 
2607     return ret;
2608 }
2609 
2610 /*
2611  * Wrapper for the above.
2612  */
2613 static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
2614                           uint64_t val_le, int mmu_idx,
2615                           MemOp mop, uintptr_t ra)
2616 {
2617     MemOp atom;
2618     unsigned tmp, half_size;
2619 
2620     if (unlikely(p->flags & TLB_MMIO)) {
2621         return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
2622                               p->size, mmu_idx, ra);
2623     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2624         return val_le >> (p->size * 8);
2625     }
2626 
2627     /*
2628      * It is a given that we cross a page and therefore there is no atomicity
2629      * for the store as a whole, but subobjects may need attention.
2630      */
2631     atom = mop & MO_ATOM_MASK;
2632     switch (atom) {
2633     case MO_ATOM_SUBALIGN:
2634         return store_parts_leN(p->haddr, p->size, val_le);
2635 
2636     case MO_ATOM_IFALIGN_PAIR:
2637     case MO_ATOM_WITHIN16_PAIR:
2638         tmp = mop & MO_SIZE;
2639         tmp = tmp ? tmp - 1 : 0;
2640         half_size = 1 << tmp;
2641         if (atom == MO_ATOM_IFALIGN_PAIR
2642             ? p->size == half_size
2643             : p->size >= half_size) {
2644             if (!HAVE_al8_fast && p->size <= 4) {
2645                 return store_whole_le4(p->haddr, p->size, val_le);
2646             } else if (HAVE_al8) {
2647                 return store_whole_le8(p->haddr, p->size, val_le);
2648             } else {
2649                 cpu_loop_exit_atomic(cpu, ra);
2650             }
2651         }
2652         /* fall through */
2653 
2654     case MO_ATOM_IFALIGN:
2655     case MO_ATOM_WITHIN16:
2656     case MO_ATOM_NONE:
2657         return store_bytes_leN(p->haddr, p->size, val_le);
2658 
2659     default:
2660         g_assert_not_reached();
2661     }
2662 }
2663 
2664 /*
2665  * Wrapper for the above, for 8 < size < 16.
2666  */
2667 static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
2668                             Int128 val_le, int mmu_idx,
2669                             MemOp mop, uintptr_t ra)
2670 {
2671     int size = p->size;
2672     MemOp atom;
2673 
2674     if (unlikely(p->flags & TLB_MMIO)) {
2675         return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
2676                                 size, mmu_idx, ra);
2677     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2678         return int128_gethi(val_le) >> ((size - 8) * 8);
2679     }
2680 
2681     /*
2682      * It is a given that we cross a page and therefore there is no atomicity
2683      * for the store as a whole, but subobjects may need attention.
2684      */
2685     atom = mop & MO_ATOM_MASK;
2686     switch (atom) {
2687     case MO_ATOM_SUBALIGN:
2688         store_parts_leN(p->haddr, 8, int128_getlo(val_le));
2689         return store_parts_leN(p->haddr + 8, p->size - 8,
2690                                int128_gethi(val_le));
2691 
2692     case MO_ATOM_WITHIN16_PAIR:
2693         /* Since size > 8, this is the half that must be atomic. */
2694         if (!HAVE_CMPXCHG128) {
2695             cpu_loop_exit_atomic(cpu, ra);
2696         }
2697         return store_whole_le16(p->haddr, p->size, val_le);
2698 
2699     case MO_ATOM_IFALIGN_PAIR:
2700         /*
2701          * Since size > 8, both halves are misaligned,
2702          * and so neither is atomic.
2703          */
2704     case MO_ATOM_IFALIGN:
2705     case MO_ATOM_WITHIN16:
2706     case MO_ATOM_NONE:
2707         stq_le_p(p->haddr, int128_getlo(val_le));
2708         return store_bytes_leN(p->haddr + 8, p->size - 8,
2709                                int128_gethi(val_le));
2710 
2711     default:
2712         g_assert_not_reached();
2713     }
2714 }
2715 
2716 static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
2717                     int mmu_idx, uintptr_t ra)
2718 {
2719     if (unlikely(p->flags & TLB_MMIO)) {
2720         do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
2721     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2722         /* nothing */
2723     } else {
2724         *(uint8_t *)p->haddr = val;
2725     }
2726 }
2727 
2728 static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
2729                     int mmu_idx, MemOp memop, uintptr_t ra)
2730 {
2731     if (unlikely(p->flags & TLB_MMIO)) {
2732         if ((memop & MO_BSWAP) != MO_LE) {
2733             val = bswap16(val);
2734         }
2735         do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
2736     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2737         /* nothing */
2738     } else {
2739         /* Swap to host endian if necessary, then store. */
2740         if (memop & MO_BSWAP) {
2741             val = bswap16(val);
2742         }
2743         store_atom_2(cpu, ra, p->haddr, memop, val);
2744     }
2745 }
2746 
2747 static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
2748                     int mmu_idx, MemOp memop, uintptr_t ra)
2749 {
2750     if (unlikely(p->flags & TLB_MMIO)) {
2751         if ((memop & MO_BSWAP) != MO_LE) {
2752             val = bswap32(val);
2753         }
2754         do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
2755     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2756         /* nothing */
2757     } else {
2758         /* Swap to host endian if necessary, then store. */
2759         if (memop & MO_BSWAP) {
2760             val = bswap32(val);
2761         }
2762         store_atom_4(cpu, ra, p->haddr, memop, val);
2763     }
2764 }
2765 
2766 static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
2767                     int mmu_idx, MemOp memop, uintptr_t ra)
2768 {
2769     if (unlikely(p->flags & TLB_MMIO)) {
2770         if ((memop & MO_BSWAP) != MO_LE) {
2771             val = bswap64(val);
2772         }
2773         do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
2774     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2775         /* nothing */
2776     } else {
2777         /* Swap to host endian if necessary, then store. */
2778         if (memop & MO_BSWAP) {
2779             val = bswap64(val);
2780         }
2781         store_atom_8(cpu, ra, p->haddr, memop, val);
2782     }
2783 }
2784 
2785 static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
2786                        MemOpIdx oi, uintptr_t ra)
2787 {
2788     MMULookupLocals l;
2789     bool crosspage;
2790 
2791     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2792     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2793     tcg_debug_assert(!crosspage);
2794 
2795     do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
2796 }
2797 
2798 static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
2799                        MemOpIdx oi, uintptr_t ra)
2800 {
2801     MMULookupLocals l;
2802     bool crosspage;
2803     uint8_t a, b;
2804 
2805     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2806     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2807     if (likely(!crosspage)) {
2808         do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2809         return;
2810     }
2811 
2812     if ((l.memop & MO_BSWAP) == MO_LE) {
2813         a = val, b = val >> 8;
2814     } else {
2815         b = val, a = val >> 8;
2816     }
2817     do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
2818     do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
2819 }
2820 
2821 static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
2822                        MemOpIdx oi, uintptr_t ra)
2823 {
2824     MMULookupLocals l;
2825     bool crosspage;
2826 
2827     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2828     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2829     if (likely(!crosspage)) {
2830         do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2831         return;
2832     }
2833 
2834     /* Swap to little endian for simplicity, then store by bytes. */
2835     if ((l.memop & MO_BSWAP) != MO_LE) {
2836         val = bswap32(val);
2837     }
2838     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2839     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2840 }
2841 
2842 static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
2843                        MemOpIdx oi, uintptr_t ra)
2844 {
2845     MMULookupLocals l;
2846     bool crosspage;
2847 
2848     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2849     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2850     if (likely(!crosspage)) {
2851         do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2852         return;
2853     }
2854 
2855     /* Swap to little endian for simplicity, then store by bytes. */
2856     if ((l.memop & MO_BSWAP) != MO_LE) {
2857         val = bswap64(val);
2858     }
2859     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2860     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2861 }
2862 
2863 static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
2864                         MemOpIdx oi, uintptr_t ra)
2865 {
2866     MMULookupLocals l;
2867     bool crosspage;
2868     uint64_t a, b;
2869     int first;
2870 
2871     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2872     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2873     if (likely(!crosspage)) {
2874         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2875             if ((l.memop & MO_BSWAP) != MO_LE) {
2876                 val = bswap128(val);
2877             }
2878             do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
2879         } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
2880             /* nothing */
2881         } else {
2882             /* Swap to host endian if necessary, then store. */
2883             if (l.memop & MO_BSWAP) {
2884                 val = bswap128(val);
2885             }
2886             store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
2887         }
2888         return;
2889     }
2890 
2891     first = l.page[0].size;
2892     if (first == 8) {
2893         MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
2894 
2895         if (l.memop & MO_BSWAP) {
2896             val = bswap128(val);
2897         }
2898         if (HOST_BIG_ENDIAN) {
2899             b = int128_getlo(val), a = int128_gethi(val);
2900         } else {
2901             a = int128_getlo(val), b = int128_gethi(val);
2902         }
2903         do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
2904         do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
2905         return;
2906     }
2907 
2908     if ((l.memop & MO_BSWAP) != MO_LE) {
2909         val = bswap128(val);
2910     }
2911     if (first < 8) {
2912         do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
2913         val = int128_urshift(val, first * 8);
2914         do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2915     } else {
2916         b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2917         do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
2918     }
2919 }
2920 
2921 #include "ldst_common.c.inc"
2922 
2923 /*
2924  * First set of functions passes in OI and RETADDR.
2925  * This makes them callable from other helpers.
2926  */
2927 
2928 #define ATOMIC_NAME(X) \
2929     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2930 
2931 #define ATOMIC_MMU_CLEANUP
2932 
2933 #include "atomic_common.c.inc"
2934 
2935 #define DATA_SIZE 1
2936 #include "atomic_template.h"
2937 
2938 #define DATA_SIZE 2
2939 #include "atomic_template.h"
2940 
2941 #define DATA_SIZE 4
2942 #include "atomic_template.h"
2943 
2944 #ifdef CONFIG_ATOMIC64
2945 #define DATA_SIZE 8
2946 #include "atomic_template.h"
2947 #endif
2948 
2949 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
2950 #define DATA_SIZE 16
2951 #include "atomic_template.h"
2952 #endif
2953 
2954 /* Code access functions.  */
2955 
2956 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2957 {
2958     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2959     return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
2960 }
2961 
2962 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2963 {
2964     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2965     return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
2966 }
2967 
2968 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2969 {
2970     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2971     return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
2972 }
2973 
2974 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2975 {
2976     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
2977     return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
2978 }
2979 
2980 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
2981                          MemOpIdx oi, uintptr_t retaddr)
2982 {
2983     return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
2984 }
2985 
2986 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
2987                           MemOpIdx oi, uintptr_t retaddr)
2988 {
2989     return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
2990 }
2991 
2992 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
2993                           MemOpIdx oi, uintptr_t retaddr)
2994 {
2995     return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
2996 }
2997 
2998 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
2999                           MemOpIdx oi, uintptr_t retaddr)
3000 {
3001     return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3002 }
3003