xref: /qemu/accel/tcg/cputlb.c (revision 573581b1)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/tb-flush.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto-common.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
36 #include "exec/translate-all.h"
37 #include "trace.h"
38 #include "tb-hash.h"
39 #include "internal-common.h"
40 #include "internal-target.h"
41 #ifdef CONFIG_PLUGIN
42 #include "qemu/plugin-memory.h"
43 #endif
44 #include "tcg/tcg-ldst.h"
45 #include "tcg/oversized-guest.h"
46 
47 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
48 /* #define DEBUG_TLB */
49 /* #define DEBUG_TLB_LOG */
50 
51 #ifdef DEBUG_TLB
52 # define DEBUG_TLB_GATE 1
53 # ifdef DEBUG_TLB_LOG
54 #  define DEBUG_TLB_LOG_GATE 1
55 # else
56 #  define DEBUG_TLB_LOG_GATE 0
57 # endif
58 #else
59 # define DEBUG_TLB_GATE 0
60 # define DEBUG_TLB_LOG_GATE 0
61 #endif
62 
63 #define tlb_debug(fmt, ...) do { \
64     if (DEBUG_TLB_LOG_GATE) { \
65         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
66                       ## __VA_ARGS__); \
67     } else if (DEBUG_TLB_GATE) { \
68         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
69     } \
70 } while (0)
71 
72 #define assert_cpu_is_self(cpu) do {                              \
73         if (DEBUG_TLB_GATE) {                                     \
74             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
75         }                                                         \
76     } while (0)
77 
78 /* run_on_cpu_data.target_ptr should always be big enough for a
79  * vaddr even on 32 bit builds
80  */
81 QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
82 
83 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
84  */
85 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
86 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
87 
88 static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
89 {
90     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
91 }
92 
93 static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
94 {
95     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
96 }
97 
98 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
99                              size_t max_entries)
100 {
101     desc->window_begin_ns = ns;
102     desc->window_max_entries = max_entries;
103 }
104 
105 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
106 {
107     CPUJumpCache *jc = cpu->tb_jmp_cache;
108     int i, i0;
109 
110     if (unlikely(!jc)) {
111         return;
112     }
113 
114     i0 = tb_jmp_cache_hash_page(page_addr);
115     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
116         qatomic_set(&jc->array[i0 + i].tb, NULL);
117     }
118 }
119 
120 /**
121  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
122  * @desc: The CPUTLBDesc portion of the TLB
123  * @fast: The CPUTLBDescFast portion of the same TLB
124  *
125  * Called with tlb_lock_held.
126  *
127  * We have two main constraints when resizing a TLB: (1) we only resize it
128  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
129  * the array or unnecessarily flushing it), which means we do not control how
130  * frequently the resizing can occur; (2) we don't have access to the guest's
131  * future scheduling decisions, and therefore have to decide the magnitude of
132  * the resize based on past observations.
133  *
134  * In general, a memory-hungry process can benefit greatly from an appropriately
135  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
136  * we just have to make the TLB as large as possible; while an oversized TLB
137  * results in minimal TLB miss rates, it also takes longer to be flushed
138  * (flushes can be _very_ frequent), and the reduced locality can also hurt
139  * performance.
140  *
141  * To achieve near-optimal performance for all kinds of workloads, we:
142  *
143  * 1. Aggressively increase the size of the TLB when the use rate of the
144  * TLB being flushed is high, since it is likely that in the near future this
145  * memory-hungry process will execute again, and its memory hungriness will
146  * probably be similar.
147  *
148  * 2. Slowly reduce the size of the TLB as the use rate declines over a
149  * reasonably large time window. The rationale is that if in such a time window
150  * we have not observed a high TLB use rate, it is likely that we won't observe
151  * it in the near future. In that case, once a time window expires we downsize
152  * the TLB to match the maximum use rate observed in the window.
153  *
154  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
155  * since in that range performance is likely near-optimal. Recall that the TLB
156  * is direct mapped, so we want the use rate to be low (or at least not too
157  * high), since otherwise we are likely to have a significant amount of
158  * conflict misses.
159  */
160 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
161                                   int64_t now)
162 {
163     size_t old_size = tlb_n_entries(fast);
164     size_t rate;
165     size_t new_size = old_size;
166     int64_t window_len_ms = 100;
167     int64_t window_len_ns = window_len_ms * 1000 * 1000;
168     bool window_expired = now > desc->window_begin_ns + window_len_ns;
169 
170     if (desc->n_used_entries > desc->window_max_entries) {
171         desc->window_max_entries = desc->n_used_entries;
172     }
173     rate = desc->window_max_entries * 100 / old_size;
174 
175     if (rate > 70) {
176         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
177     } else if (rate < 30 && window_expired) {
178         size_t ceil = pow2ceil(desc->window_max_entries);
179         size_t expected_rate = desc->window_max_entries * 100 / ceil;
180 
181         /*
182          * Avoid undersizing when the max number of entries seen is just below
183          * a pow2. For instance, if max_entries == 1025, the expected use rate
184          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
185          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
186          * later. Thus, make sure that the expected use rate remains below 70%.
187          * (and since we double the size, that means the lowest rate we'd
188          * expect to get is 35%, which is still in the 30-70% range where
189          * we consider that the size is appropriate.)
190          */
191         if (expected_rate > 70) {
192             ceil *= 2;
193         }
194         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
195     }
196 
197     if (new_size == old_size) {
198         if (window_expired) {
199             tlb_window_reset(desc, now, desc->n_used_entries);
200         }
201         return;
202     }
203 
204     g_free(fast->table);
205     g_free(desc->fulltlb);
206 
207     tlb_window_reset(desc, now, 0);
208     /* desc->n_used_entries is cleared by the caller */
209     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
210     fast->table = g_try_new(CPUTLBEntry, new_size);
211     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
212 
213     /*
214      * If the allocations fail, try smaller sizes. We just freed some
215      * memory, so going back to half of new_size has a good chance of working.
216      * Increased memory pressure elsewhere in the system might cause the
217      * allocations to fail though, so we progressively reduce the allocation
218      * size, aborting if we cannot even allocate the smallest TLB we support.
219      */
220     while (fast->table == NULL || desc->fulltlb == NULL) {
221         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
222             error_report("%s: %s", __func__, strerror(errno));
223             abort();
224         }
225         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
226         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
227 
228         g_free(fast->table);
229         g_free(desc->fulltlb);
230         fast->table = g_try_new(CPUTLBEntry, new_size);
231         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
232     }
233 }
234 
235 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
236 {
237     desc->n_used_entries = 0;
238     desc->large_page_addr = -1;
239     desc->large_page_mask = -1;
240     desc->vindex = 0;
241     memset(fast->table, -1, sizeof_tlb(fast));
242     memset(desc->vtable, -1, sizeof(desc->vtable));
243 }
244 
245 static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
246                                         int64_t now)
247 {
248     CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
249     CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
250 
251     tlb_mmu_resize_locked(desc, fast, now);
252     tlb_mmu_flush_locked(desc, fast);
253 }
254 
255 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
256 {
257     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
258 
259     tlb_window_reset(desc, now, 0);
260     desc->n_used_entries = 0;
261     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
262     fast->table = g_new(CPUTLBEntry, n_entries);
263     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
264     tlb_mmu_flush_locked(desc, fast);
265 }
266 
267 static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
268 {
269     cpu->neg.tlb.d[mmu_idx].n_used_entries++;
270 }
271 
272 static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
273 {
274     cpu->neg.tlb.d[mmu_idx].n_used_entries--;
275 }
276 
277 void tlb_init(CPUState *cpu)
278 {
279     int64_t now = get_clock_realtime();
280     int i;
281 
282     qemu_spin_init(&cpu->neg.tlb.c.lock);
283 
284     /* All tlbs are initialized flushed. */
285     cpu->neg.tlb.c.dirty = 0;
286 
287     for (i = 0; i < NB_MMU_MODES; i++) {
288         tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
289     }
290 }
291 
292 void tlb_destroy(CPUState *cpu)
293 {
294     int i;
295 
296     qemu_spin_destroy(&cpu->neg.tlb.c.lock);
297     for (i = 0; i < NB_MMU_MODES; i++) {
298         CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
299         CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
300 
301         g_free(fast->table);
302         g_free(desc->fulltlb);
303     }
304 }
305 
306 /* flush_all_helper: run fn across all cpus
307  *
308  * If the wait flag is set then the src cpu's helper will be queued as
309  * "safe" work and the loop exited creating a synchronisation point
310  * where all queued work will be finished before execution starts
311  * again.
312  */
313 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
314                              run_on_cpu_data d)
315 {
316     CPUState *cpu;
317 
318     CPU_FOREACH(cpu) {
319         if (cpu != src) {
320             async_run_on_cpu(cpu, fn, d);
321         }
322     }
323 }
324 
325 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
326 {
327     uint16_t asked = data.host_int;
328     uint16_t all_dirty, work, to_clean;
329     int64_t now = get_clock_realtime();
330 
331     assert_cpu_is_self(cpu);
332 
333     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
334 
335     qemu_spin_lock(&cpu->neg.tlb.c.lock);
336 
337     all_dirty = cpu->neg.tlb.c.dirty;
338     to_clean = asked & all_dirty;
339     all_dirty &= ~to_clean;
340     cpu->neg.tlb.c.dirty = all_dirty;
341 
342     for (work = to_clean; work != 0; work &= work - 1) {
343         int mmu_idx = ctz32(work);
344         tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
345     }
346 
347     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
348 
349     tcg_flush_jmp_cache(cpu);
350 
351     if (to_clean == ALL_MMUIDX_BITS) {
352         qatomic_set(&cpu->neg.tlb.c.full_flush_count,
353                     cpu->neg.tlb.c.full_flush_count + 1);
354     } else {
355         qatomic_set(&cpu->neg.tlb.c.part_flush_count,
356                     cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
357         if (to_clean != asked) {
358             qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
359                         cpu->neg.tlb.c.elide_flush_count +
360                         ctpop16(asked & ~to_clean));
361         }
362     }
363 }
364 
365 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
366 {
367     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
368 
369     if (cpu->created && !qemu_cpu_is_self(cpu)) {
370         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
371                          RUN_ON_CPU_HOST_INT(idxmap));
372     } else {
373         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
374     }
375 }
376 
377 void tlb_flush(CPUState *cpu)
378 {
379     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
380 }
381 
382 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
383 {
384     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
385 
386     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
387 
388     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
389     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
390 }
391 
392 void tlb_flush_all_cpus(CPUState *src_cpu)
393 {
394     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
395 }
396 
397 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
398 {
399     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
400 
401     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
402 
403     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
404     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
405 }
406 
407 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
408 {
409     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
410 }
411 
412 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
413                                       vaddr page, vaddr mask)
414 {
415     page &= mask;
416     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
417 
418     return (page == (tlb_entry->addr_read & mask) ||
419             page == (tlb_addr_write(tlb_entry) & mask) ||
420             page == (tlb_entry->addr_code & mask));
421 }
422 
423 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
424 {
425     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
426 }
427 
428 /**
429  * tlb_entry_is_empty - return true if the entry is not in use
430  * @te: pointer to CPUTLBEntry
431  */
432 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
433 {
434     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
435 }
436 
437 /* Called with tlb_c.lock held */
438 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
439                                         vaddr page,
440                                         vaddr mask)
441 {
442     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
443         memset(tlb_entry, -1, sizeof(*tlb_entry));
444         return true;
445     }
446     return false;
447 }
448 
449 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
450 {
451     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
452 }
453 
454 /* Called with tlb_c.lock held */
455 static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
456                                             vaddr page,
457                                             vaddr mask)
458 {
459     CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
460     int k;
461 
462     assert_cpu_is_self(cpu);
463     for (k = 0; k < CPU_VTLB_SIZE; k++) {
464         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
465             tlb_n_used_entries_dec(cpu, mmu_idx);
466         }
467     }
468 }
469 
470 static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
471                                               vaddr page)
472 {
473     tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
474 }
475 
476 static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
477 {
478     vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
479     vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
480 
481     /* Check if we need to flush due to large pages.  */
482     if ((page & lp_mask) == lp_addr) {
483         tlb_debug("forcing full flush midx %d (%016"
484                   VADDR_PRIx "/%016" VADDR_PRIx ")\n",
485                   midx, lp_addr, lp_mask);
486         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
487     } else {
488         if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
489             tlb_n_used_entries_dec(cpu, midx);
490         }
491         tlb_flush_vtlb_page_locked(cpu, midx, page);
492     }
493 }
494 
495 /**
496  * tlb_flush_page_by_mmuidx_async_0:
497  * @cpu: cpu on which to flush
498  * @addr: page of virtual address to flush
499  * @idxmap: set of mmu_idx to flush
500  *
501  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
502  * at @addr from the tlbs indicated by @idxmap from @cpu.
503  */
504 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
505                                              vaddr addr,
506                                              uint16_t idxmap)
507 {
508     int mmu_idx;
509 
510     assert_cpu_is_self(cpu);
511 
512     tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
513 
514     qemu_spin_lock(&cpu->neg.tlb.c.lock);
515     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
516         if ((idxmap >> mmu_idx) & 1) {
517             tlb_flush_page_locked(cpu, mmu_idx, addr);
518         }
519     }
520     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
521 
522     /*
523      * Discard jump cache entries for any tb which might potentially
524      * overlap the flushed page, which includes the previous.
525      */
526     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
527     tb_jmp_cache_clear_page(cpu, addr);
528 }
529 
530 /**
531  * tlb_flush_page_by_mmuidx_async_1:
532  * @cpu: cpu on which to flush
533  * @data: encoded addr + idxmap
534  *
535  * Helper for tlb_flush_page_by_mmuidx and friends, called through
536  * async_run_on_cpu.  The idxmap parameter is encoded in the page
537  * offset of the target_ptr field.  This limits the set of mmu_idx
538  * that can be passed via this method.
539  */
540 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
541                                              run_on_cpu_data data)
542 {
543     vaddr addr_and_idxmap = data.target_ptr;
544     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
545     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
546 
547     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
548 }
549 
550 typedef struct {
551     vaddr addr;
552     uint16_t idxmap;
553 } TLBFlushPageByMMUIdxData;
554 
555 /**
556  * tlb_flush_page_by_mmuidx_async_2:
557  * @cpu: cpu on which to flush
558  * @data: allocated addr + idxmap
559  *
560  * Helper for tlb_flush_page_by_mmuidx and friends, called through
561  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
562  * TLBFlushPageByMMUIdxData structure that has been allocated
563  * specifically for this helper.  Free the structure when done.
564  */
565 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
566                                              run_on_cpu_data data)
567 {
568     TLBFlushPageByMMUIdxData *d = data.host_ptr;
569 
570     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
571     g_free(d);
572 }
573 
574 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
575 {
576     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
577 
578     /* This should already be page aligned */
579     addr &= TARGET_PAGE_MASK;
580 
581     if (qemu_cpu_is_self(cpu)) {
582         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
583     } else if (idxmap < TARGET_PAGE_SIZE) {
584         /*
585          * Most targets have only a few mmu_idx.  In the case where
586          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
587          * allocating memory for this operation.
588          */
589         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
590                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
591     } else {
592         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
593 
594         /* Otherwise allocate a structure, freed by the worker.  */
595         d->addr = addr;
596         d->idxmap = idxmap;
597         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
598                          RUN_ON_CPU_HOST_PTR(d));
599     }
600 }
601 
602 void tlb_flush_page(CPUState *cpu, vaddr addr)
603 {
604     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
605 }
606 
607 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
608                                        uint16_t idxmap)
609 {
610     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
611 
612     /* This should already be page aligned */
613     addr &= TARGET_PAGE_MASK;
614 
615     /*
616      * Allocate memory to hold addr+idxmap only when needed.
617      * See tlb_flush_page_by_mmuidx for details.
618      */
619     if (idxmap < TARGET_PAGE_SIZE) {
620         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
621                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
622     } else {
623         CPUState *dst_cpu;
624 
625         /* Allocate a separate data block for each destination cpu.  */
626         CPU_FOREACH(dst_cpu) {
627             if (dst_cpu != src_cpu) {
628                 TLBFlushPageByMMUIdxData *d
629                     = g_new(TLBFlushPageByMMUIdxData, 1);
630 
631                 d->addr = addr;
632                 d->idxmap = idxmap;
633                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
634                                  RUN_ON_CPU_HOST_PTR(d));
635             }
636         }
637     }
638 
639     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
640 }
641 
642 void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
643 {
644     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
645 }
646 
647 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
648                                               vaddr addr,
649                                               uint16_t idxmap)
650 {
651     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
652 
653     /* This should already be page aligned */
654     addr &= TARGET_PAGE_MASK;
655 
656     /*
657      * Allocate memory to hold addr+idxmap only when needed.
658      * See tlb_flush_page_by_mmuidx for details.
659      */
660     if (idxmap < TARGET_PAGE_SIZE) {
661         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
662                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
663         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
664                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
665     } else {
666         CPUState *dst_cpu;
667         TLBFlushPageByMMUIdxData *d;
668 
669         /* Allocate a separate data block for each destination cpu.  */
670         CPU_FOREACH(dst_cpu) {
671             if (dst_cpu != src_cpu) {
672                 d = g_new(TLBFlushPageByMMUIdxData, 1);
673                 d->addr = addr;
674                 d->idxmap = idxmap;
675                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
676                                  RUN_ON_CPU_HOST_PTR(d));
677             }
678         }
679 
680         d = g_new(TLBFlushPageByMMUIdxData, 1);
681         d->addr = addr;
682         d->idxmap = idxmap;
683         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
684                               RUN_ON_CPU_HOST_PTR(d));
685     }
686 }
687 
688 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
689 {
690     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
691 }
692 
693 static void tlb_flush_range_locked(CPUState *cpu, int midx,
694                                    vaddr addr, vaddr len,
695                                    unsigned bits)
696 {
697     CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
698     CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
699     vaddr mask = MAKE_64BIT_MASK(0, bits);
700 
701     /*
702      * If @bits is smaller than the tlb size, there may be multiple entries
703      * within the TLB; otherwise all addresses that match under @mask hit
704      * the same TLB entry.
705      * TODO: Perhaps allow bits to be a few bits less than the size.
706      * For now, just flush the entire TLB.
707      *
708      * If @len is larger than the tlb size, then it will take longer to
709      * test all of the entries in the TLB than it will to flush it all.
710      */
711     if (mask < f->mask || len > f->mask) {
712         tlb_debug("forcing full flush midx %d ("
713                   "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
714                   midx, addr, mask, len);
715         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
716         return;
717     }
718 
719     /*
720      * Check if we need to flush due to large pages.
721      * Because large_page_mask contains all 1's from the msb,
722      * we only need to test the end of the range.
723      */
724     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
725         tlb_debug("forcing full flush midx %d ("
726                   "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
727                   midx, d->large_page_addr, d->large_page_mask);
728         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
729         return;
730     }
731 
732     for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
733         vaddr page = addr + i;
734         CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
735 
736         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
737             tlb_n_used_entries_dec(cpu, midx);
738         }
739         tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
740     }
741 }
742 
743 typedef struct {
744     vaddr addr;
745     vaddr len;
746     uint16_t idxmap;
747     uint16_t bits;
748 } TLBFlushRangeData;
749 
750 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
751                                               TLBFlushRangeData d)
752 {
753     int mmu_idx;
754 
755     assert_cpu_is_self(cpu);
756 
757     tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
758               d.addr, d.bits, d.len, d.idxmap);
759 
760     qemu_spin_lock(&cpu->neg.tlb.c.lock);
761     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
762         if ((d.idxmap >> mmu_idx) & 1) {
763             tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
764         }
765     }
766     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
767 
768     /*
769      * If the length is larger than the jump cache size, then it will take
770      * longer to clear each entry individually than it will to clear it all.
771      */
772     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
773         tcg_flush_jmp_cache(cpu);
774         return;
775     }
776 
777     /*
778      * Discard jump cache entries for any tb which might potentially
779      * overlap the flushed pages, which includes the previous.
780      */
781     d.addr -= TARGET_PAGE_SIZE;
782     for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
783         tb_jmp_cache_clear_page(cpu, d.addr);
784         d.addr += TARGET_PAGE_SIZE;
785     }
786 }
787 
788 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
789                                               run_on_cpu_data data)
790 {
791     TLBFlushRangeData *d = data.host_ptr;
792     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
793     g_free(d);
794 }
795 
796 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
797                                vaddr len, uint16_t idxmap,
798                                unsigned bits)
799 {
800     TLBFlushRangeData d;
801 
802     /*
803      * If all bits are significant, and len is small,
804      * this devolves to tlb_flush_page.
805      */
806     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
807         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
808         return;
809     }
810     /* If no page bits are significant, this devolves to tlb_flush. */
811     if (bits < TARGET_PAGE_BITS) {
812         tlb_flush_by_mmuidx(cpu, idxmap);
813         return;
814     }
815 
816     /* This should already be page aligned */
817     d.addr = addr & TARGET_PAGE_MASK;
818     d.len = len;
819     d.idxmap = idxmap;
820     d.bits = bits;
821 
822     if (qemu_cpu_is_self(cpu)) {
823         tlb_flush_range_by_mmuidx_async_0(cpu, d);
824     } else {
825         /* Otherwise allocate a structure, freed by the worker.  */
826         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
827         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
828                          RUN_ON_CPU_HOST_PTR(p));
829     }
830 }
831 
832 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
833                                    uint16_t idxmap, unsigned bits)
834 {
835     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
836 }
837 
838 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
839                                         vaddr addr, vaddr len,
840                                         uint16_t idxmap, unsigned bits)
841 {
842     TLBFlushRangeData d;
843     CPUState *dst_cpu;
844 
845     /*
846      * If all bits are significant, and len is small,
847      * this devolves to tlb_flush_page.
848      */
849     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
850         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
851         return;
852     }
853     /* If no page bits are significant, this devolves to tlb_flush. */
854     if (bits < TARGET_PAGE_BITS) {
855         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
856         return;
857     }
858 
859     /* This should already be page aligned */
860     d.addr = addr & TARGET_PAGE_MASK;
861     d.len = len;
862     d.idxmap = idxmap;
863     d.bits = bits;
864 
865     /* Allocate a separate data block for each destination cpu.  */
866     CPU_FOREACH(dst_cpu) {
867         if (dst_cpu != src_cpu) {
868             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
869             async_run_on_cpu(dst_cpu,
870                              tlb_flush_range_by_mmuidx_async_1,
871                              RUN_ON_CPU_HOST_PTR(p));
872         }
873     }
874 
875     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
876 }
877 
878 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
879                                             vaddr addr, uint16_t idxmap,
880                                             unsigned bits)
881 {
882     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
883                                        idxmap, bits);
884 }
885 
886 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
887                                                vaddr addr,
888                                                vaddr len,
889                                                uint16_t idxmap,
890                                                unsigned bits)
891 {
892     TLBFlushRangeData d, *p;
893     CPUState *dst_cpu;
894 
895     /*
896      * If all bits are significant, and len is small,
897      * this devolves to tlb_flush_page.
898      */
899     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
900         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
901         return;
902     }
903     /* If no page bits are significant, this devolves to tlb_flush. */
904     if (bits < TARGET_PAGE_BITS) {
905         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
906         return;
907     }
908 
909     /* This should already be page aligned */
910     d.addr = addr & TARGET_PAGE_MASK;
911     d.len = len;
912     d.idxmap = idxmap;
913     d.bits = bits;
914 
915     /* Allocate a separate data block for each destination cpu.  */
916     CPU_FOREACH(dst_cpu) {
917         if (dst_cpu != src_cpu) {
918             p = g_memdup(&d, sizeof(d));
919             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
920                              RUN_ON_CPU_HOST_PTR(p));
921         }
922     }
923 
924     p = g_memdup(&d, sizeof(d));
925     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
926                           RUN_ON_CPU_HOST_PTR(p));
927 }
928 
929 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
930                                                    vaddr addr,
931                                                    uint16_t idxmap,
932                                                    unsigned bits)
933 {
934     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
935                                               idxmap, bits);
936 }
937 
938 /* update the TLBs so that writes to code in the virtual page 'addr'
939    can be detected */
940 void tlb_protect_code(ram_addr_t ram_addr)
941 {
942     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
943                                              TARGET_PAGE_SIZE,
944                                              DIRTY_MEMORY_CODE);
945 }
946 
947 /* update the TLB so that writes in physical page 'phys_addr' are no longer
948    tested for self modifying code */
949 void tlb_unprotect_code(ram_addr_t ram_addr)
950 {
951     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
952 }
953 
954 
955 /*
956  * Dirty write flag handling
957  *
958  * When the TCG code writes to a location it looks up the address in
959  * the TLB and uses that data to compute the final address. If any of
960  * the lower bits of the address are set then the slow path is forced.
961  * There are a number of reasons to do this but for normal RAM the
962  * most usual is detecting writes to code regions which may invalidate
963  * generated code.
964  *
965  * Other vCPUs might be reading their TLBs during guest execution, so we update
966  * te->addr_write with qatomic_set. We don't need to worry about this for
967  * oversized guests as MTTCG is disabled for them.
968  *
969  * Called with tlb_c.lock held.
970  */
971 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
972                                          uintptr_t start, uintptr_t length)
973 {
974     uintptr_t addr = tlb_entry->addr_write;
975 
976     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
977                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
978         addr &= TARGET_PAGE_MASK;
979         addr += tlb_entry->addend;
980         if ((addr - start) < length) {
981 #if TARGET_LONG_BITS == 32
982             uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
983             ptr_write += HOST_BIG_ENDIAN;
984             qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
985 #elif TCG_OVERSIZED_GUEST
986             tlb_entry->addr_write |= TLB_NOTDIRTY;
987 #else
988             qatomic_set(&tlb_entry->addr_write,
989                         tlb_entry->addr_write | TLB_NOTDIRTY);
990 #endif
991         }
992     }
993 }
994 
995 /*
996  * Called with tlb_c.lock held.
997  * Called only from the vCPU context, i.e. the TLB's owner thread.
998  */
999 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1000 {
1001     *d = *s;
1002 }
1003 
1004 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1005  * the target vCPU).
1006  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1007  * thing actually updated is the target TLB entry ->addr_write flags.
1008  */
1009 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1010 {
1011     int mmu_idx;
1012 
1013     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1014     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1015         unsigned int i;
1016         unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
1017 
1018         for (i = 0; i < n; i++) {
1019             tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
1020                                          start1, length);
1021         }
1022 
1023         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1024             tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
1025                                          start1, length);
1026         }
1027     }
1028     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1029 }
1030 
1031 /* Called with tlb_c.lock held */
1032 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1033                                          vaddr addr)
1034 {
1035     if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1036         tlb_entry->addr_write = addr;
1037     }
1038 }
1039 
1040 /* update the TLB corresponding to virtual page vaddr
1041    so that it is no longer dirty */
1042 void tlb_set_dirty(CPUState *cpu, vaddr addr)
1043 {
1044     int mmu_idx;
1045 
1046     assert_cpu_is_self(cpu);
1047 
1048     addr &= TARGET_PAGE_MASK;
1049     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1050     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1051         tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
1052     }
1053 
1054     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1055         int k;
1056         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1057             tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
1058         }
1059     }
1060     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1061 }
1062 
1063 /* Our TLB does not support large pages, so remember the area covered by
1064    large pages and trigger a full TLB flush if these are invalidated.  */
1065 static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
1066                                vaddr addr, uint64_t size)
1067 {
1068     vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
1069     vaddr lp_mask = ~(size - 1);
1070 
1071     if (lp_addr == (vaddr)-1) {
1072         /* No previous large page.  */
1073         lp_addr = addr;
1074     } else {
1075         /* Extend the existing region to include the new page.
1076            This is a compromise between unnecessary flushes and
1077            the cost of maintaining a full variable size TLB.  */
1078         lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
1079         while (((lp_addr ^ addr) & lp_mask) != 0) {
1080             lp_mask <<= 1;
1081         }
1082     }
1083     cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1084     cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
1085 }
1086 
1087 static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1088                                    vaddr address, int flags,
1089                                    MMUAccessType access_type, bool enable)
1090 {
1091     if (enable) {
1092         address |= flags & TLB_FLAGS_MASK;
1093         flags &= TLB_SLOW_FLAGS_MASK;
1094         if (flags) {
1095             address |= TLB_FORCE_SLOW;
1096         }
1097     } else {
1098         address = -1;
1099         flags = 0;
1100     }
1101     ent->addr_idx[access_type] = address;
1102     full->slow_flags[access_type] = flags;
1103 }
1104 
1105 /*
1106  * Add a new TLB entry. At most one entry for a given virtual address
1107  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1108  * supplied size is only used by tlb_flush_page.
1109  *
1110  * Called from TCG-generated code, which is under an RCU read-side
1111  * critical section.
1112  */
1113 void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1114                        vaddr addr, CPUTLBEntryFull *full)
1115 {
1116     CPUTLB *tlb = &cpu->neg.tlb;
1117     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1118     MemoryRegionSection *section;
1119     unsigned int index, read_flags, write_flags;
1120     uintptr_t addend;
1121     CPUTLBEntry *te, tn;
1122     hwaddr iotlb, xlat, sz, paddr_page;
1123     vaddr addr_page;
1124     int asidx, wp_flags, prot;
1125     bool is_ram, is_romd;
1126 
1127     assert_cpu_is_self(cpu);
1128 
1129     if (full->lg_page_size <= TARGET_PAGE_BITS) {
1130         sz = TARGET_PAGE_SIZE;
1131     } else {
1132         sz = (hwaddr)1 << full->lg_page_size;
1133         tlb_add_large_page(cpu, mmu_idx, addr, sz);
1134     }
1135     addr_page = addr & TARGET_PAGE_MASK;
1136     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1137 
1138     prot = full->prot;
1139     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1140     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1141                                                 &xlat, &sz, full->attrs, &prot);
1142     assert(sz >= TARGET_PAGE_SIZE);
1143 
1144     tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1145               " prot=%x idx=%d\n",
1146               addr, full->phys_addr, prot, mmu_idx);
1147 
1148     read_flags = full->tlb_fill_flags;
1149     if (full->lg_page_size < TARGET_PAGE_BITS) {
1150         /* Repeat the MMU check and TLB fill on every access.  */
1151         read_flags |= TLB_INVALID_MASK;
1152     }
1153 
1154     is_ram = memory_region_is_ram(section->mr);
1155     is_romd = memory_region_is_romd(section->mr);
1156 
1157     if (is_ram || is_romd) {
1158         /* RAM and ROMD both have associated host memory. */
1159         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1160     } else {
1161         /* I/O does not; force the host address to NULL. */
1162         addend = 0;
1163     }
1164 
1165     write_flags = read_flags;
1166     if (is_ram) {
1167         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1168         assert(!(iotlb & ~TARGET_PAGE_MASK));
1169         /*
1170          * Computing is_clean is expensive; avoid all that unless
1171          * the page is actually writable.
1172          */
1173         if (prot & PAGE_WRITE) {
1174             if (section->readonly) {
1175                 write_flags |= TLB_DISCARD_WRITE;
1176             } else if (cpu_physical_memory_is_clean(iotlb)) {
1177                 write_flags |= TLB_NOTDIRTY;
1178             }
1179         }
1180     } else {
1181         /* I/O or ROMD */
1182         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1183         /*
1184          * Writes to romd devices must go through MMIO to enable write.
1185          * Reads to romd devices go through the ram_ptr found above,
1186          * but of course reads to I/O must go through MMIO.
1187          */
1188         write_flags |= TLB_MMIO;
1189         if (!is_romd) {
1190             read_flags = write_flags;
1191         }
1192     }
1193 
1194     wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
1195                                               TARGET_PAGE_SIZE);
1196 
1197     index = tlb_index(cpu, mmu_idx, addr_page);
1198     te = tlb_entry(cpu, mmu_idx, addr_page);
1199 
1200     /*
1201      * Hold the TLB lock for the rest of the function. We could acquire/release
1202      * the lock several times in the function, but it is faster to amortize the
1203      * acquisition cost by acquiring it just once. Note that this leads to
1204      * a longer critical section, but this is not a concern since the TLB lock
1205      * is unlikely to be contended.
1206      */
1207     qemu_spin_lock(&tlb->c.lock);
1208 
1209     /* Note that the tlb is no longer clean.  */
1210     tlb->c.dirty |= 1 << mmu_idx;
1211 
1212     /* Make sure there's no cached translation for the new page.  */
1213     tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
1214 
1215     /*
1216      * Only evict the old entry to the victim tlb if it's for a
1217      * different page; otherwise just overwrite the stale data.
1218      */
1219     if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1220         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1221         CPUTLBEntry *tv = &desc->vtable[vidx];
1222 
1223         /* Evict the old entry into the victim tlb.  */
1224         copy_tlb_helper_locked(tv, te);
1225         desc->vfulltlb[vidx] = desc->fulltlb[index];
1226         tlb_n_used_entries_dec(cpu, mmu_idx);
1227     }
1228 
1229     /* refill the tlb */
1230     /*
1231      * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1232      * aligned ram_addr_t of the page base of the target RAM.
1233      * Otherwise, iotlb contains
1234      *  - a physical section number in the lower TARGET_PAGE_BITS
1235      *  - the offset within section->mr of the page base (I/O, ROMD) with the
1236      *    TARGET_PAGE_BITS masked off.
1237      * We subtract addr_page (which is page aligned and thus won't
1238      * disturb the low bits) to give an offset which can be added to the
1239      * (non-page-aligned) vaddr of the eventual memory access to get
1240      * the MemoryRegion offset for the access. Note that the vaddr we
1241      * subtract here is that of the page base, and not the same as the
1242      * vaddr we add back in io_prepare()/get_page_addr_code().
1243      */
1244     desc->fulltlb[index] = *full;
1245     full = &desc->fulltlb[index];
1246     full->xlat_section = iotlb - addr_page;
1247     full->phys_addr = paddr_page;
1248 
1249     /* Now calculate the new entry */
1250     tn.addend = addend - addr_page;
1251 
1252     tlb_set_compare(full, &tn, addr_page, read_flags,
1253                     MMU_INST_FETCH, prot & PAGE_EXEC);
1254 
1255     if (wp_flags & BP_MEM_READ) {
1256         read_flags |= TLB_WATCHPOINT;
1257     }
1258     tlb_set_compare(full, &tn, addr_page, read_flags,
1259                     MMU_DATA_LOAD, prot & PAGE_READ);
1260 
1261     if (prot & PAGE_WRITE_INV) {
1262         write_flags |= TLB_INVALID_MASK;
1263     }
1264     if (wp_flags & BP_MEM_WRITE) {
1265         write_flags |= TLB_WATCHPOINT;
1266     }
1267     tlb_set_compare(full, &tn, addr_page, write_flags,
1268                     MMU_DATA_STORE, prot & PAGE_WRITE);
1269 
1270     copy_tlb_helper_locked(te, &tn);
1271     tlb_n_used_entries_inc(cpu, mmu_idx);
1272     qemu_spin_unlock(&tlb->c.lock);
1273 }
1274 
1275 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
1276                              hwaddr paddr, MemTxAttrs attrs, int prot,
1277                              int mmu_idx, uint64_t size)
1278 {
1279     CPUTLBEntryFull full = {
1280         .phys_addr = paddr,
1281         .attrs = attrs,
1282         .prot = prot,
1283         .lg_page_size = ctz64(size)
1284     };
1285 
1286     assert(is_power_of_2(size));
1287     tlb_set_page_full(cpu, mmu_idx, addr, &full);
1288 }
1289 
1290 void tlb_set_page(CPUState *cpu, vaddr addr,
1291                   hwaddr paddr, int prot,
1292                   int mmu_idx, uint64_t size)
1293 {
1294     tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1295                             prot, mmu_idx, size);
1296 }
1297 
1298 /*
1299  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1300  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1301  * be discarded and looked up again (e.g. via tlb_entry()).
1302  */
1303 static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1304                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1305 {
1306     bool ok;
1307 
1308     /*
1309      * This is not a probe, so only valid return is success; failure
1310      * should result in exception + longjmp to the cpu loop.
1311      */
1312     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1313                                     access_type, mmu_idx, false, retaddr);
1314     assert(ok);
1315 }
1316 
1317 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1318                                         MMUAccessType access_type,
1319                                         int mmu_idx, uintptr_t retaddr)
1320 {
1321     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1322                                           mmu_idx, retaddr);
1323 }
1324 
1325 static MemoryRegionSection *
1326 io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
1327            MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
1328 {
1329     MemoryRegionSection *section;
1330     hwaddr mr_offset;
1331 
1332     section = iotlb_to_section(cpu, xlat, attrs);
1333     mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
1334     cpu->mem_io_pc = retaddr;
1335     if (!cpu->neg.can_do_io) {
1336         cpu_io_recompile(cpu, retaddr);
1337     }
1338 
1339     *out_offset = mr_offset;
1340     return section;
1341 }
1342 
1343 static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
1344                       unsigned size, MMUAccessType access_type, int mmu_idx,
1345                       MemTxResult response, uintptr_t retaddr)
1346 {
1347     if (!cpu->ignore_memory_transaction_failures
1348         && cpu->cc->tcg_ops->do_transaction_failed) {
1349         hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1350 
1351         cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1352                                                 access_type, mmu_idx,
1353                                                 full->attrs, response, retaddr);
1354     }
1355 }
1356 
1357 /* Return true if ADDR is present in the victim tlb, and has been copied
1358    back to the main tlb.  */
1359 static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
1360                            MMUAccessType access_type, vaddr page)
1361 {
1362     size_t vidx;
1363 
1364     assert_cpu_is_self(cpu);
1365     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1366         CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
1367         uint64_t cmp = tlb_read_idx(vtlb, access_type);
1368 
1369         if (cmp == page) {
1370             /* Found entry in victim tlb, swap tlb and iotlb.  */
1371             CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
1372 
1373             qemu_spin_lock(&cpu->neg.tlb.c.lock);
1374             copy_tlb_helper_locked(&tmptlb, tlb);
1375             copy_tlb_helper_locked(tlb, vtlb);
1376             copy_tlb_helper_locked(vtlb, &tmptlb);
1377             qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1378 
1379             CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1380             CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
1381             CPUTLBEntryFull tmpf;
1382             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1383             return true;
1384         }
1385     }
1386     return false;
1387 }
1388 
1389 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1390                            CPUTLBEntryFull *full, uintptr_t retaddr)
1391 {
1392     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1393 
1394     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1395 
1396     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1397         tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1398     }
1399 
1400     /*
1401      * Set both VGA and migration bits for simplicity and to remove
1402      * the notdirty callback faster.
1403      */
1404     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1405 
1406     /* We remove the notdirty callback only if the code has been flushed. */
1407     if (!cpu_physical_memory_is_clean(ram_addr)) {
1408         trace_memory_notdirty_set_dirty(mem_vaddr);
1409         tlb_set_dirty(cpu, mem_vaddr);
1410     }
1411 }
1412 
1413 static int probe_access_internal(CPUState *cpu, vaddr addr,
1414                                  int fault_size, MMUAccessType access_type,
1415                                  int mmu_idx, bool nonfault,
1416                                  void **phost, CPUTLBEntryFull **pfull,
1417                                  uintptr_t retaddr, bool check_mem_cbs)
1418 {
1419     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1420     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1421     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1422     vaddr page_addr = addr & TARGET_PAGE_MASK;
1423     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
1424     bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
1425     CPUTLBEntryFull *full;
1426 
1427     if (!tlb_hit_page(tlb_addr, page_addr)) {
1428         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
1429             if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
1430                                             mmu_idx, nonfault, retaddr)) {
1431                 /* Non-faulting page table read failed.  */
1432                 *phost = NULL;
1433                 *pfull = NULL;
1434                 return TLB_INVALID_MASK;
1435             }
1436 
1437             /* TLB resize via tlb_fill may have moved the entry.  */
1438             index = tlb_index(cpu, mmu_idx, addr);
1439             entry = tlb_entry(cpu, mmu_idx, addr);
1440 
1441             /*
1442              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1443              * to force the next access through tlb_fill.  We've just
1444              * called tlb_fill, so we know that this entry *is* valid.
1445              */
1446             flags &= ~TLB_INVALID_MASK;
1447         }
1448         tlb_addr = tlb_read_idx(entry, access_type);
1449     }
1450     flags &= tlb_addr;
1451 
1452     *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1453     flags |= full->slow_flags[access_type];
1454 
1455     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1456     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
1457         || (access_type != MMU_INST_FETCH && force_mmio)) {
1458         *phost = NULL;
1459         return TLB_MMIO;
1460     }
1461 
1462     /* Everything else is RAM. */
1463     *phost = (void *)((uintptr_t)addr + entry->addend);
1464     return flags;
1465 }
1466 
1467 int probe_access_full(CPUArchState *env, vaddr addr, int size,
1468                       MMUAccessType access_type, int mmu_idx,
1469                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1470                       uintptr_t retaddr)
1471 {
1472     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1473                                       mmu_idx, nonfault, phost, pfull, retaddr,
1474                                       true);
1475 
1476     /* Handle clean RAM pages.  */
1477     if (unlikely(flags & TLB_NOTDIRTY)) {
1478         int dirtysize = size == 0 ? 1 : size;
1479         notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
1480         flags &= ~TLB_NOTDIRTY;
1481     }
1482 
1483     return flags;
1484 }
1485 
1486 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
1487                           MMUAccessType access_type, int mmu_idx,
1488                           void **phost, CPUTLBEntryFull **pfull)
1489 {
1490     void *discard_phost;
1491     CPUTLBEntryFull *discard_tlb;
1492 
1493     /* privately handle users that don't need full results */
1494     phost = phost ? phost : &discard_phost;
1495     pfull = pfull ? pfull : &discard_tlb;
1496 
1497     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1498                                       mmu_idx, true, phost, pfull, 0, false);
1499 
1500     /* Handle clean RAM pages.  */
1501     if (unlikely(flags & TLB_NOTDIRTY)) {
1502         int dirtysize = size == 0 ? 1 : size;
1503         notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
1504         flags &= ~TLB_NOTDIRTY;
1505     }
1506 
1507     return flags;
1508 }
1509 
1510 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1511                        MMUAccessType access_type, int mmu_idx,
1512                        bool nonfault, void **phost, uintptr_t retaddr)
1513 {
1514     CPUTLBEntryFull *full;
1515     int flags;
1516 
1517     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1518 
1519     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1520                                   mmu_idx, nonfault, phost, &full, retaddr,
1521                                   true);
1522 
1523     /* Handle clean RAM pages. */
1524     if (unlikely(flags & TLB_NOTDIRTY)) {
1525         int dirtysize = size == 0 ? 1 : size;
1526         notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
1527         flags &= ~TLB_NOTDIRTY;
1528     }
1529 
1530     return flags;
1531 }
1532 
1533 void *probe_access(CPUArchState *env, vaddr addr, int size,
1534                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1535 {
1536     CPUTLBEntryFull *full;
1537     void *host;
1538     int flags;
1539 
1540     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1541 
1542     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1543                                   mmu_idx, false, &host, &full, retaddr,
1544                                   true);
1545 
1546     /* Per the interface, size == 0 merely faults the access. */
1547     if (size == 0) {
1548         return NULL;
1549     }
1550 
1551     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1552         /* Handle watchpoints.  */
1553         if (flags & TLB_WATCHPOINT) {
1554             int wp_access = (access_type == MMU_DATA_STORE
1555                              ? BP_MEM_WRITE : BP_MEM_READ);
1556             cpu_check_watchpoint(env_cpu(env), addr, size,
1557                                  full->attrs, wp_access, retaddr);
1558         }
1559 
1560         /* Handle clean RAM pages.  */
1561         if (flags & TLB_NOTDIRTY) {
1562             notdirty_write(env_cpu(env), addr, size, full, retaddr);
1563         }
1564     }
1565 
1566     return host;
1567 }
1568 
1569 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1570                         MMUAccessType access_type, int mmu_idx)
1571 {
1572     CPUTLBEntryFull *full;
1573     void *host;
1574     int flags;
1575 
1576     flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
1577                                   mmu_idx, true, &host, &full, 0, false);
1578 
1579     /* No combination of flags are expected by the caller. */
1580     return flags ? NULL : host;
1581 }
1582 
1583 /*
1584  * Return a ram_addr_t for the virtual address for execution.
1585  *
1586  * Return -1 if we can't translate and execute from an entire page
1587  * of RAM.  This will force us to execute by loading and translating
1588  * one insn at a time, without caching.
1589  *
1590  * NOTE: This function will trigger an exception if the page is
1591  * not executable.
1592  */
1593 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
1594                                         void **hostp)
1595 {
1596     CPUTLBEntryFull *full;
1597     void *p;
1598 
1599     (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
1600                                 cpu_mmu_index(env_cpu(env), true), false,
1601                                 &p, &full, 0, false);
1602     if (p == NULL) {
1603         return -1;
1604     }
1605 
1606     if (full->lg_page_size < TARGET_PAGE_BITS) {
1607         return -1;
1608     }
1609 
1610     if (hostp) {
1611         *hostp = p;
1612     }
1613     return qemu_ram_addr_from_host_nofail(p);
1614 }
1615 
1616 /* Load/store with atomicity primitives. */
1617 #include "ldst_atomicity.c.inc"
1618 
1619 #ifdef CONFIG_PLUGIN
1620 /*
1621  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1622  * This should be a hot path as we will have just looked this path up
1623  * in the softmmu lookup code (or helper). We don't handle re-fills or
1624  * checking the victim table. This is purely informational.
1625  *
1626  * The one corner case is i/o write, which can cause changes to the
1627  * address space.  Those changes, and the corresponding tlb flush,
1628  * should be delayed until the next TB, so even then this ought not fail.
1629  * But check, Just in Case.
1630  */
1631 bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1632                        bool is_store, struct qemu_plugin_hwaddr *data)
1633 {
1634     CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
1635     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1636     MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
1637     uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
1638     CPUTLBEntryFull *full;
1639 
1640     if (unlikely(!tlb_hit(tlb_addr, addr))) {
1641         return false;
1642     }
1643 
1644     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1645     data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1646 
1647     /* We must have an iotlb entry for MMIO */
1648     if (tlb_addr & TLB_MMIO) {
1649         MemoryRegionSection *section =
1650             iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
1651                              full->attrs);
1652         data->is_io = true;
1653         data->mr = section->mr;
1654     } else {
1655         data->is_io = false;
1656         data->mr = NULL;
1657     }
1658     return true;
1659 }
1660 #endif
1661 
1662 /*
1663  * Probe for a load/store operation.
1664  * Return the host address and into @flags.
1665  */
1666 
1667 typedef struct MMULookupPageData {
1668     CPUTLBEntryFull *full;
1669     void *haddr;
1670     vaddr addr;
1671     int flags;
1672     int size;
1673 } MMULookupPageData;
1674 
1675 typedef struct MMULookupLocals {
1676     MMULookupPageData page[2];
1677     MemOp memop;
1678     int mmu_idx;
1679 } MMULookupLocals;
1680 
1681 /**
1682  * mmu_lookup1: translate one page
1683  * @cpu: generic cpu state
1684  * @data: lookup parameters
1685  * @mmu_idx: virtual address context
1686  * @access_type: load/store/code
1687  * @ra: return address into tcg generated code, or 0
1688  *
1689  * Resolve the translation for the one page at @data.addr, filling in
1690  * the rest of @data with the results.  If the translation fails,
1691  * tlb_fill will longjmp out.  Return true if the softmmu tlb for
1692  * @mmu_idx may have resized.
1693  */
1694 static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
1695                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
1696 {
1697     vaddr addr = data->addr;
1698     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1699     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1700     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1701     bool maybe_resized = false;
1702     CPUTLBEntryFull *full;
1703     int flags;
1704 
1705     /* If the TLB entry is for a different page, reload and try again.  */
1706     if (!tlb_hit(tlb_addr, addr)) {
1707         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
1708                             addr & TARGET_PAGE_MASK)) {
1709             tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
1710             maybe_resized = true;
1711             index = tlb_index(cpu, mmu_idx, addr);
1712             entry = tlb_entry(cpu, mmu_idx, addr);
1713         }
1714         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
1715     }
1716 
1717     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1718     flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
1719     flags |= full->slow_flags[access_type];
1720 
1721     data->full = full;
1722     data->flags = flags;
1723     /* Compute haddr speculatively; depending on flags it might be invalid. */
1724     data->haddr = (void *)((uintptr_t)addr + entry->addend);
1725 
1726     return maybe_resized;
1727 }
1728 
1729 /**
1730  * mmu_watch_or_dirty
1731  * @cpu: generic cpu state
1732  * @data: lookup parameters
1733  * @access_type: load/store/code
1734  * @ra: return address into tcg generated code, or 0
1735  *
1736  * Trigger watchpoints for @data.addr:@data.size;
1737  * record writes to protected clean pages.
1738  */
1739 static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
1740                                MMUAccessType access_type, uintptr_t ra)
1741 {
1742     CPUTLBEntryFull *full = data->full;
1743     vaddr addr = data->addr;
1744     int flags = data->flags;
1745     int size = data->size;
1746 
1747     /* On watchpoint hit, this will longjmp out.  */
1748     if (flags & TLB_WATCHPOINT) {
1749         int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1750         cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
1751         flags &= ~TLB_WATCHPOINT;
1752     }
1753 
1754     /* Note that notdirty is only set for writes. */
1755     if (flags & TLB_NOTDIRTY) {
1756         notdirty_write(cpu, addr, size, full, ra);
1757         flags &= ~TLB_NOTDIRTY;
1758     }
1759     data->flags = flags;
1760 }
1761 
1762 /**
1763  * mmu_lookup: translate page(s)
1764  * @cpu: generic cpu state
1765  * @addr: virtual address
1766  * @oi: combined mmu_idx and MemOp
1767  * @ra: return address into tcg generated code, or 0
1768  * @access_type: load/store/code
1769  * @l: output result
1770  *
1771  * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1772  * bytes.  Return true if the lookup crosses a page boundary.
1773  */
1774 static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1775                        uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
1776 {
1777     unsigned a_bits;
1778     bool crosspage;
1779     int flags;
1780 
1781     l->memop = get_memop(oi);
1782     l->mmu_idx = get_mmuidx(oi);
1783 
1784     tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
1785 
1786     /* Handle CPU specific unaligned behaviour */
1787     a_bits = get_alignment_bits(l->memop);
1788     if (addr & ((1 << a_bits) - 1)) {
1789         cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
1790     }
1791 
1792     l->page[0].addr = addr;
1793     l->page[0].size = memop_size(l->memop);
1794     l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
1795     l->page[1].size = 0;
1796     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
1797 
1798     if (likely(!crosspage)) {
1799         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1800 
1801         flags = l->page[0].flags;
1802         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1803             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1804         }
1805         if (unlikely(flags & TLB_BSWAP)) {
1806             l->memop ^= MO_BSWAP;
1807         }
1808     } else {
1809         /* Finish compute of page crossing. */
1810         int size0 = l->page[1].addr - addr;
1811         l->page[1].size = l->page[0].size - size0;
1812         l->page[0].size = size0;
1813 
1814         /*
1815          * Lookup both pages, recognizing exceptions from either.  If the
1816          * second lookup potentially resized, refresh first CPUTLBEntryFull.
1817          */
1818         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1819         if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
1820             uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
1821             l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
1822         }
1823 
1824         flags = l->page[0].flags | l->page[1].flags;
1825         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1826             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1827             mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
1828         }
1829 
1830         /*
1831          * Since target/sparc is the only user of TLB_BSWAP, and all
1832          * Sparc accesses are aligned, any treatment across two pages
1833          * would be arbitrary.  Refuse it until there's a use.
1834          */
1835         tcg_debug_assert((flags & TLB_BSWAP) == 0);
1836     }
1837 
1838     /*
1839      * This alignment check differs from the one above, in that this is
1840      * based on the atomicity of the operation. The intended use case is
1841      * the ARM memory type field of each PTE, where access to pages with
1842      * Device memory type require alignment.
1843      */
1844     if (unlikely(flags & TLB_CHECK_ALIGNED)) {
1845         MemOp size = l->memop & MO_SIZE;
1846 
1847         switch (l->memop & MO_ATOM_MASK) {
1848         case MO_ATOM_NONE:
1849             size = MO_8;
1850             break;
1851         case MO_ATOM_IFALIGN_PAIR:
1852         case MO_ATOM_WITHIN16_PAIR:
1853             size = size ? size - 1 : 0;
1854             break;
1855         default:
1856             break;
1857         }
1858         if (addr & ((1 << size) - 1)) {
1859             cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
1860         }
1861     }
1862 
1863     return crosspage;
1864 }
1865 
1866 /*
1867  * Probe for an atomic operation.  Do not allow unaligned operations,
1868  * or io operations to proceed.  Return the host address.
1869  */
1870 static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1871                                int size, uintptr_t retaddr)
1872 {
1873     uintptr_t mmu_idx = get_mmuidx(oi);
1874     MemOp mop = get_memop(oi);
1875     int a_bits = get_alignment_bits(mop);
1876     uintptr_t index;
1877     CPUTLBEntry *tlbe;
1878     vaddr tlb_addr;
1879     void *hostaddr;
1880     CPUTLBEntryFull *full;
1881 
1882     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1883 
1884     /* Adjust the given return address.  */
1885     retaddr -= GETPC_ADJ;
1886 
1887     /* Enforce guest required alignment.  */
1888     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1889         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1890         cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
1891                              mmu_idx, retaddr);
1892     }
1893 
1894     /* Enforce qemu required alignment.  */
1895     if (unlikely(addr & (size - 1))) {
1896         /* We get here if guest alignment was not requested,
1897            or was not enforced by cpu_unaligned_access above.
1898            We might widen the access and emulate, but for now
1899            mark an exception and exit the cpu loop.  */
1900         goto stop_the_world;
1901     }
1902 
1903     index = tlb_index(cpu, mmu_idx, addr);
1904     tlbe = tlb_entry(cpu, mmu_idx, addr);
1905 
1906     /* Check TLB entry and enforce page permissions.  */
1907     tlb_addr = tlb_addr_write(tlbe);
1908     if (!tlb_hit(tlb_addr, addr)) {
1909         if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
1910                             addr & TARGET_PAGE_MASK)) {
1911             tlb_fill(cpu, addr, size,
1912                      MMU_DATA_STORE, mmu_idx, retaddr);
1913             index = tlb_index(cpu, mmu_idx, addr);
1914             tlbe = tlb_entry(cpu, mmu_idx, addr);
1915         }
1916         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1917     }
1918 
1919     /*
1920      * Let the guest notice RMW on a write-only page.
1921      * We have just verified that the page is writable.
1922      * Subpage lookups may have left TLB_INVALID_MASK set,
1923      * but addr_read will only be -1 if PAGE_READ was unset.
1924      */
1925     if (unlikely(tlbe->addr_read == -1)) {
1926         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
1927         /*
1928          * Since we don't support reads and writes to different
1929          * addresses, and we do have the proper page loaded for
1930          * write, this shouldn't ever return.  But just in case,
1931          * handle via stop-the-world.
1932          */
1933         goto stop_the_world;
1934     }
1935     /* Collect tlb flags for read. */
1936     tlb_addr |= tlbe->addr_read;
1937 
1938     /* Notice an IO access or a needs-MMU-lookup access */
1939     if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
1940         /* There's really nothing that can be done to
1941            support this apart from stop-the-world.  */
1942         goto stop_the_world;
1943     }
1944 
1945     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1946     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1947 
1948     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1949         notdirty_write(cpu, addr, size, full, retaddr);
1950     }
1951 
1952     if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
1953         int wp_flags = 0;
1954 
1955         if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
1956             wp_flags |= BP_MEM_WRITE;
1957         }
1958         if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
1959             wp_flags |= BP_MEM_READ;
1960         }
1961         if (wp_flags) {
1962             cpu_check_watchpoint(cpu, addr, size,
1963                                  full->attrs, wp_flags, retaddr);
1964         }
1965     }
1966 
1967     return hostaddr;
1968 
1969  stop_the_world:
1970     cpu_loop_exit_atomic(cpu, retaddr);
1971 }
1972 
1973 /*
1974  * Load Helpers
1975  *
1976  * We support two different access types. SOFTMMU_CODE_ACCESS is
1977  * specifically for reading instructions from system memory. It is
1978  * called by the translation loop and in some helpers where the code
1979  * is disassembled. It shouldn't be called directly by guest code.
1980  *
1981  * For the benefit of TCG generated code, we want to avoid the
1982  * complication of ABI-specific return type promotion and always
1983  * return a value extended to the register size of the host. This is
1984  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1985  * data, and for that we always have uint64_t.
1986  *
1987  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1988  */
1989 
1990 /**
1991  * do_ld_mmio_beN:
1992  * @cpu: generic cpu state
1993  * @full: page parameters
1994  * @ret_be: accumulated data
1995  * @addr: virtual address
1996  * @size: number of bytes
1997  * @mmu_idx: virtual address context
1998  * @ra: return address into tcg generated code, or 0
1999  * Context: BQL held
2000  *
2001  * Load @size bytes from @addr, which is memory-mapped i/o.
2002  * The bytes are concatenated in big-endian order with @ret_be.
2003  */
2004 static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2005                                 uint64_t ret_be, vaddr addr, int size,
2006                                 int mmu_idx, MMUAccessType type, uintptr_t ra,
2007                                 MemoryRegion *mr, hwaddr mr_offset)
2008 {
2009     do {
2010         MemOp this_mop;
2011         unsigned this_size;
2012         uint64_t val;
2013         MemTxResult r;
2014 
2015         /* Read aligned pieces up to 8 bytes. */
2016         this_mop = ctz32(size | (int)addr | 8);
2017         this_size = 1 << this_mop;
2018         this_mop |= MO_BE;
2019 
2020         r = memory_region_dispatch_read(mr, mr_offset, &val,
2021                                         this_mop, full->attrs);
2022         if (unlikely(r != MEMTX_OK)) {
2023             io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
2024         }
2025         if (this_size == 8) {
2026             return val;
2027         }
2028 
2029         ret_be = (ret_be << (this_size * 8)) | val;
2030         addr += this_size;
2031         mr_offset += this_size;
2032         size -= this_size;
2033     } while (size);
2034 
2035     return ret_be;
2036 }
2037 
2038 static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2039                                uint64_t ret_be, vaddr addr, int size,
2040                                int mmu_idx, MMUAccessType type, uintptr_t ra)
2041 {
2042     MemoryRegionSection *section;
2043     MemoryRegion *mr;
2044     hwaddr mr_offset;
2045     MemTxAttrs attrs;
2046 
2047     tcg_debug_assert(size > 0 && size <= 8);
2048 
2049     attrs = full->attrs;
2050     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2051     mr = section->mr;
2052 
2053     BQL_LOCK_GUARD();
2054     return int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
2055                            type, ra, mr, mr_offset);
2056 }
2057 
2058 static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2059                                uint64_t ret_be, vaddr addr, int size,
2060                                int mmu_idx, uintptr_t ra)
2061 {
2062     MemoryRegionSection *section;
2063     MemoryRegion *mr;
2064     hwaddr mr_offset;
2065     MemTxAttrs attrs;
2066     uint64_t a, b;
2067 
2068     tcg_debug_assert(size > 8 && size <= 16);
2069 
2070     attrs = full->attrs;
2071     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2072     mr = section->mr;
2073 
2074     BQL_LOCK_GUARD();
2075     a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
2076                         MMU_DATA_LOAD, ra, mr, mr_offset);
2077     b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
2078                         MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
2079     return int128_make128(b, a);
2080 }
2081 
2082 /**
2083  * do_ld_bytes_beN
2084  * @p: translation parameters
2085  * @ret_be: accumulated data
2086  *
2087  * Load @p->size bytes from @p->haddr, which is RAM.
2088  * The bytes to concatenated in big-endian order with @ret_be.
2089  */
2090 static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
2091 {
2092     uint8_t *haddr = p->haddr;
2093     int i, size = p->size;
2094 
2095     for (i = 0; i < size; i++) {
2096         ret_be = (ret_be << 8) | haddr[i];
2097     }
2098     return ret_be;
2099 }
2100 
2101 /**
2102  * do_ld_parts_beN
2103  * @p: translation parameters
2104  * @ret_be: accumulated data
2105  *
2106  * As do_ld_bytes_beN, but atomically on each aligned part.
2107  */
2108 static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2109 {
2110     void *haddr = p->haddr;
2111     int size = p->size;
2112 
2113     do {
2114         uint64_t x;
2115         int n;
2116 
2117         /*
2118          * Find minimum of alignment and size.
2119          * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2120          * would have only checked the low bits of addr|size once at the start,
2121          * but is just as easy.
2122          */
2123         switch (((uintptr_t)haddr | size) & 7) {
2124         case 4:
2125             x = cpu_to_be32(load_atomic4(haddr));
2126             ret_be = (ret_be << 32) | x;
2127             n = 4;
2128             break;
2129         case 2:
2130         case 6:
2131             x = cpu_to_be16(load_atomic2(haddr));
2132             ret_be = (ret_be << 16) | x;
2133             n = 2;
2134             break;
2135         default:
2136             x = *(uint8_t *)haddr;
2137             ret_be = (ret_be << 8) | x;
2138             n = 1;
2139             break;
2140         case 0:
2141             g_assert_not_reached();
2142         }
2143         haddr += n;
2144         size -= n;
2145     } while (size != 0);
2146     return ret_be;
2147 }
2148 
2149 /**
2150  * do_ld_parts_be4
2151  * @p: translation parameters
2152  * @ret_be: accumulated data
2153  *
2154  * As do_ld_bytes_beN, but with one atomic load.
2155  * Four aligned bytes are guaranteed to cover the load.
2156  */
2157 static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2158 {
2159     int o = p->addr & 3;
2160     uint32_t x = load_atomic4(p->haddr - o);
2161 
2162     x = cpu_to_be32(x);
2163     x <<= o * 8;
2164     x >>= (4 - p->size) * 8;
2165     return (ret_be << (p->size * 8)) | x;
2166 }
2167 
2168 /**
2169  * do_ld_parts_be8
2170  * @p: translation parameters
2171  * @ret_be: accumulated data
2172  *
2173  * As do_ld_bytes_beN, but with one atomic load.
2174  * Eight aligned bytes are guaranteed to cover the load.
2175  */
2176 static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
2177                                 MMULookupPageData *p, uint64_t ret_be)
2178 {
2179     int o = p->addr & 7;
2180     uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
2181 
2182     x = cpu_to_be64(x);
2183     x <<= o * 8;
2184     x >>= (8 - p->size) * 8;
2185     return (ret_be << (p->size * 8)) | x;
2186 }
2187 
2188 /**
2189  * do_ld_parts_be16
2190  * @p: translation parameters
2191  * @ret_be: accumulated data
2192  *
2193  * As do_ld_bytes_beN, but with one atomic load.
2194  * 16 aligned bytes are guaranteed to cover the load.
2195  */
2196 static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
2197                                MMULookupPageData *p, uint64_t ret_be)
2198 {
2199     int o = p->addr & 15;
2200     Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
2201     int size = p->size;
2202 
2203     if (!HOST_BIG_ENDIAN) {
2204         y = bswap128(y);
2205     }
2206     y = int128_lshift(y, o * 8);
2207     y = int128_urshift(y, (16 - size) * 8);
2208     x = int128_make64(ret_be);
2209     x = int128_lshift(x, size * 8);
2210     return int128_or(x, y);
2211 }
2212 
2213 /*
2214  * Wrapper for the above.
2215  */
2216 static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
2217                           uint64_t ret_be, int mmu_idx, MMUAccessType type,
2218                           MemOp mop, uintptr_t ra)
2219 {
2220     MemOp atom;
2221     unsigned tmp, half_size;
2222 
2223     if (unlikely(p->flags & TLB_MMIO)) {
2224         return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
2225                               mmu_idx, type, ra);
2226     }
2227 
2228     /*
2229      * It is a given that we cross a page and therefore there is no
2230      * atomicity for the load as a whole, but subobjects may need attention.
2231      */
2232     atom = mop & MO_ATOM_MASK;
2233     switch (atom) {
2234     case MO_ATOM_SUBALIGN:
2235         return do_ld_parts_beN(p, ret_be);
2236 
2237     case MO_ATOM_IFALIGN_PAIR:
2238     case MO_ATOM_WITHIN16_PAIR:
2239         tmp = mop & MO_SIZE;
2240         tmp = tmp ? tmp - 1 : 0;
2241         half_size = 1 << tmp;
2242         if (atom == MO_ATOM_IFALIGN_PAIR
2243             ? p->size == half_size
2244             : p->size >= half_size) {
2245             if (!HAVE_al8_fast && p->size < 4) {
2246                 return do_ld_whole_be4(p, ret_be);
2247             } else {
2248                 return do_ld_whole_be8(cpu, ra, p, ret_be);
2249             }
2250         }
2251         /* fall through */
2252 
2253     case MO_ATOM_IFALIGN:
2254     case MO_ATOM_WITHIN16:
2255     case MO_ATOM_NONE:
2256         return do_ld_bytes_beN(p, ret_be);
2257 
2258     default:
2259         g_assert_not_reached();
2260     }
2261 }
2262 
2263 /*
2264  * Wrapper for the above, for 8 < size < 16.
2265  */
2266 static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
2267                           uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
2268 {
2269     int size = p->size;
2270     uint64_t b;
2271     MemOp atom;
2272 
2273     if (unlikely(p->flags & TLB_MMIO)) {
2274         return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
2275     }
2276 
2277     /*
2278      * It is a given that we cross a page and therefore there is no
2279      * atomicity for the load as a whole, but subobjects may need attention.
2280      */
2281     atom = mop & MO_ATOM_MASK;
2282     switch (atom) {
2283     case MO_ATOM_SUBALIGN:
2284         p->size = size - 8;
2285         a = do_ld_parts_beN(p, a);
2286         p->haddr += size - 8;
2287         p->size = 8;
2288         b = do_ld_parts_beN(p, 0);
2289         break;
2290 
2291     case MO_ATOM_WITHIN16_PAIR:
2292         /* Since size > 8, this is the half that must be atomic. */
2293         return do_ld_whole_be16(cpu, ra, p, a);
2294 
2295     case MO_ATOM_IFALIGN_PAIR:
2296         /*
2297          * Since size > 8, both halves are misaligned,
2298          * and so neither is atomic.
2299          */
2300     case MO_ATOM_IFALIGN:
2301     case MO_ATOM_WITHIN16:
2302     case MO_ATOM_NONE:
2303         p->size = size - 8;
2304         a = do_ld_bytes_beN(p, a);
2305         b = ldq_be_p(p->haddr + size - 8);
2306         break;
2307 
2308     default:
2309         g_assert_not_reached();
2310     }
2311 
2312     return int128_make128(b, a);
2313 }
2314 
2315 static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2316                        MMUAccessType type, uintptr_t ra)
2317 {
2318     if (unlikely(p->flags & TLB_MMIO)) {
2319         return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
2320     } else {
2321         return *(uint8_t *)p->haddr;
2322     }
2323 }
2324 
2325 static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2326                         MMUAccessType type, MemOp memop, uintptr_t ra)
2327 {
2328     uint16_t ret;
2329 
2330     if (unlikely(p->flags & TLB_MMIO)) {
2331         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
2332         if ((memop & MO_BSWAP) == MO_LE) {
2333             ret = bswap16(ret);
2334         }
2335     } else {
2336         /* Perform the load host endian, then swap if necessary. */
2337         ret = load_atom_2(cpu, ra, p->haddr, memop);
2338         if (memop & MO_BSWAP) {
2339             ret = bswap16(ret);
2340         }
2341     }
2342     return ret;
2343 }
2344 
2345 static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2346                         MMUAccessType type, MemOp memop, uintptr_t ra)
2347 {
2348     uint32_t ret;
2349 
2350     if (unlikely(p->flags & TLB_MMIO)) {
2351         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
2352         if ((memop & MO_BSWAP) == MO_LE) {
2353             ret = bswap32(ret);
2354         }
2355     } else {
2356         /* Perform the load host endian. */
2357         ret = load_atom_4(cpu, ra, p->haddr, memop);
2358         if (memop & MO_BSWAP) {
2359             ret = bswap32(ret);
2360         }
2361     }
2362     return ret;
2363 }
2364 
2365 static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2366                         MMUAccessType type, MemOp memop, uintptr_t ra)
2367 {
2368     uint64_t ret;
2369 
2370     if (unlikely(p->flags & TLB_MMIO)) {
2371         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
2372         if ((memop & MO_BSWAP) == MO_LE) {
2373             ret = bswap64(ret);
2374         }
2375     } else {
2376         /* Perform the load host endian. */
2377         ret = load_atom_8(cpu, ra, p->haddr, memop);
2378         if (memop & MO_BSWAP) {
2379             ret = bswap64(ret);
2380         }
2381     }
2382     return ret;
2383 }
2384 
2385 static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2386                           uintptr_t ra, MMUAccessType access_type)
2387 {
2388     MMULookupLocals l;
2389     bool crosspage;
2390 
2391     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2392     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2393     tcg_debug_assert(!crosspage);
2394 
2395     return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2396 }
2397 
2398 static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2399                            uintptr_t ra, MMUAccessType access_type)
2400 {
2401     MMULookupLocals l;
2402     bool crosspage;
2403     uint16_t ret;
2404     uint8_t a, b;
2405 
2406     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2407     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2408     if (likely(!crosspage)) {
2409         return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2410     }
2411 
2412     a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2413     b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
2414 
2415     if ((l.memop & MO_BSWAP) == MO_LE) {
2416         ret = a | (b << 8);
2417     } else {
2418         ret = b | (a << 8);
2419     }
2420     return ret;
2421 }
2422 
2423 static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2424                            uintptr_t ra, MMUAccessType access_type)
2425 {
2426     MMULookupLocals l;
2427     bool crosspage;
2428     uint32_t ret;
2429 
2430     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2431     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2432     if (likely(!crosspage)) {
2433         return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2434     }
2435 
2436     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2437     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2438     if ((l.memop & MO_BSWAP) == MO_LE) {
2439         ret = bswap32(ret);
2440     }
2441     return ret;
2442 }
2443 
2444 static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2445                            uintptr_t ra, MMUAccessType access_type)
2446 {
2447     MMULookupLocals l;
2448     bool crosspage;
2449     uint64_t ret;
2450 
2451     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2452     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2453     if (likely(!crosspage)) {
2454         return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2455     }
2456 
2457     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2458     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2459     if ((l.memop & MO_BSWAP) == MO_LE) {
2460         ret = bswap64(ret);
2461     }
2462     return ret;
2463 }
2464 
2465 static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
2466                           MemOpIdx oi, uintptr_t ra)
2467 {
2468     MMULookupLocals l;
2469     bool crosspage;
2470     uint64_t a, b;
2471     Int128 ret;
2472     int first;
2473 
2474     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2475     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
2476     if (likely(!crosspage)) {
2477         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2478             ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
2479                                    l.mmu_idx, ra);
2480             if ((l.memop & MO_BSWAP) == MO_LE) {
2481                 ret = bswap128(ret);
2482             }
2483         } else {
2484             /* Perform the load host endian. */
2485             ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
2486             if (l.memop & MO_BSWAP) {
2487                 ret = bswap128(ret);
2488             }
2489         }
2490         return ret;
2491     }
2492 
2493     first = l.page[0].size;
2494     if (first == 8) {
2495         MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
2496 
2497         a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2498         b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2499         if ((mop8 & MO_BSWAP) == MO_LE) {
2500             ret = int128_make128(a, b);
2501         } else {
2502             ret = int128_make128(b, a);
2503         }
2504         return ret;
2505     }
2506 
2507     if (first < 8) {
2508         a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
2509                       MMU_DATA_LOAD, l.memop, ra);
2510         ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
2511     } else {
2512         ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
2513         b = int128_getlo(ret);
2514         ret = int128_lshift(ret, l.page[1].size * 8);
2515         a = int128_gethi(ret);
2516         b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
2517                       MMU_DATA_LOAD, l.memop, ra);
2518         ret = int128_make128(b, a);
2519     }
2520     if ((l.memop & MO_BSWAP) == MO_LE) {
2521         ret = bswap128(ret);
2522     }
2523     return ret;
2524 }
2525 
2526 /*
2527  * Store Helpers
2528  */
2529 
2530 /**
2531  * do_st_mmio_leN:
2532  * @cpu: generic cpu state
2533  * @full: page parameters
2534  * @val_le: data to store
2535  * @addr: virtual address
2536  * @size: number of bytes
2537  * @mmu_idx: virtual address context
2538  * @ra: return address into tcg generated code, or 0
2539  * Context: BQL held
2540  *
2541  * Store @size bytes at @addr, which is memory-mapped i/o.
2542  * The bytes to store are extracted in little-endian order from @val_le;
2543  * return the bytes of @val_le beyond @p->size that have not been stored.
2544  */
2545 static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2546                                 uint64_t val_le, vaddr addr, int size,
2547                                 int mmu_idx, uintptr_t ra,
2548                                 MemoryRegion *mr, hwaddr mr_offset)
2549 {
2550     do {
2551         MemOp this_mop;
2552         unsigned this_size;
2553         MemTxResult r;
2554 
2555         /* Store aligned pieces up to 8 bytes. */
2556         this_mop = ctz32(size | (int)addr | 8);
2557         this_size = 1 << this_mop;
2558         this_mop |= MO_LE;
2559 
2560         r = memory_region_dispatch_write(mr, mr_offset, val_le,
2561                                          this_mop, full->attrs);
2562         if (unlikely(r != MEMTX_OK)) {
2563             io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
2564                       mmu_idx, r, ra);
2565         }
2566         if (this_size == 8) {
2567             return 0;
2568         }
2569 
2570         val_le >>= this_size * 8;
2571         addr += this_size;
2572         mr_offset += this_size;
2573         size -= this_size;
2574     } while (size);
2575 
2576     return val_le;
2577 }
2578 
2579 static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2580                                uint64_t val_le, vaddr addr, int size,
2581                                int mmu_idx, uintptr_t ra)
2582 {
2583     MemoryRegionSection *section;
2584     hwaddr mr_offset;
2585     MemoryRegion *mr;
2586     MemTxAttrs attrs;
2587 
2588     tcg_debug_assert(size > 0 && size <= 8);
2589 
2590     attrs = full->attrs;
2591     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2592     mr = section->mr;
2593 
2594     BQL_LOCK_GUARD();
2595     return int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
2596                            ra, mr, mr_offset);
2597 }
2598 
2599 static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2600                                  Int128 val_le, vaddr addr, int size,
2601                                  int mmu_idx, uintptr_t ra)
2602 {
2603     MemoryRegionSection *section;
2604     MemoryRegion *mr;
2605     hwaddr mr_offset;
2606     MemTxAttrs attrs;
2607 
2608     tcg_debug_assert(size > 8 && size <= 16);
2609 
2610     attrs = full->attrs;
2611     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2612     mr = section->mr;
2613 
2614     BQL_LOCK_GUARD();
2615     int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
2616                     mmu_idx, ra, mr, mr_offset);
2617     return int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
2618                            size - 8, mmu_idx, ra, mr, mr_offset + 8);
2619 }
2620 
2621 /*
2622  * Wrapper for the above.
2623  */
2624 static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
2625                           uint64_t val_le, int mmu_idx,
2626                           MemOp mop, uintptr_t ra)
2627 {
2628     MemOp atom;
2629     unsigned tmp, half_size;
2630 
2631     if (unlikely(p->flags & TLB_MMIO)) {
2632         return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
2633                               p->size, mmu_idx, ra);
2634     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2635         return val_le >> (p->size * 8);
2636     }
2637 
2638     /*
2639      * It is a given that we cross a page and therefore there is no atomicity
2640      * for the store as a whole, but subobjects may need attention.
2641      */
2642     atom = mop & MO_ATOM_MASK;
2643     switch (atom) {
2644     case MO_ATOM_SUBALIGN:
2645         return store_parts_leN(p->haddr, p->size, val_le);
2646 
2647     case MO_ATOM_IFALIGN_PAIR:
2648     case MO_ATOM_WITHIN16_PAIR:
2649         tmp = mop & MO_SIZE;
2650         tmp = tmp ? tmp - 1 : 0;
2651         half_size = 1 << tmp;
2652         if (atom == MO_ATOM_IFALIGN_PAIR
2653             ? p->size == half_size
2654             : p->size >= half_size) {
2655             if (!HAVE_al8_fast && p->size <= 4) {
2656                 return store_whole_le4(p->haddr, p->size, val_le);
2657             } else if (HAVE_al8) {
2658                 return store_whole_le8(p->haddr, p->size, val_le);
2659             } else {
2660                 cpu_loop_exit_atomic(cpu, ra);
2661             }
2662         }
2663         /* fall through */
2664 
2665     case MO_ATOM_IFALIGN:
2666     case MO_ATOM_WITHIN16:
2667     case MO_ATOM_NONE:
2668         return store_bytes_leN(p->haddr, p->size, val_le);
2669 
2670     default:
2671         g_assert_not_reached();
2672     }
2673 }
2674 
2675 /*
2676  * Wrapper for the above, for 8 < size < 16.
2677  */
2678 static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
2679                             Int128 val_le, int mmu_idx,
2680                             MemOp mop, uintptr_t ra)
2681 {
2682     int size = p->size;
2683     MemOp atom;
2684 
2685     if (unlikely(p->flags & TLB_MMIO)) {
2686         return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
2687                                 size, mmu_idx, ra);
2688     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2689         return int128_gethi(val_le) >> ((size - 8) * 8);
2690     }
2691 
2692     /*
2693      * It is a given that we cross a page and therefore there is no atomicity
2694      * for the store as a whole, but subobjects may need attention.
2695      */
2696     atom = mop & MO_ATOM_MASK;
2697     switch (atom) {
2698     case MO_ATOM_SUBALIGN:
2699         store_parts_leN(p->haddr, 8, int128_getlo(val_le));
2700         return store_parts_leN(p->haddr + 8, p->size - 8,
2701                                int128_gethi(val_le));
2702 
2703     case MO_ATOM_WITHIN16_PAIR:
2704         /* Since size > 8, this is the half that must be atomic. */
2705         if (!HAVE_CMPXCHG128) {
2706             cpu_loop_exit_atomic(cpu, ra);
2707         }
2708         return store_whole_le16(p->haddr, p->size, val_le);
2709 
2710     case MO_ATOM_IFALIGN_PAIR:
2711         /*
2712          * Since size > 8, both halves are misaligned,
2713          * and so neither is atomic.
2714          */
2715     case MO_ATOM_IFALIGN:
2716     case MO_ATOM_WITHIN16:
2717     case MO_ATOM_NONE:
2718         stq_le_p(p->haddr, int128_getlo(val_le));
2719         return store_bytes_leN(p->haddr + 8, p->size - 8,
2720                                int128_gethi(val_le));
2721 
2722     default:
2723         g_assert_not_reached();
2724     }
2725 }
2726 
2727 static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
2728                     int mmu_idx, uintptr_t ra)
2729 {
2730     if (unlikely(p->flags & TLB_MMIO)) {
2731         do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
2732     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2733         /* nothing */
2734     } else {
2735         *(uint8_t *)p->haddr = val;
2736     }
2737 }
2738 
2739 static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
2740                     int mmu_idx, MemOp memop, uintptr_t ra)
2741 {
2742     if (unlikely(p->flags & TLB_MMIO)) {
2743         if ((memop & MO_BSWAP) != MO_LE) {
2744             val = bswap16(val);
2745         }
2746         do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
2747     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2748         /* nothing */
2749     } else {
2750         /* Swap to host endian if necessary, then store. */
2751         if (memop & MO_BSWAP) {
2752             val = bswap16(val);
2753         }
2754         store_atom_2(cpu, ra, p->haddr, memop, val);
2755     }
2756 }
2757 
2758 static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
2759                     int mmu_idx, MemOp memop, uintptr_t ra)
2760 {
2761     if (unlikely(p->flags & TLB_MMIO)) {
2762         if ((memop & MO_BSWAP) != MO_LE) {
2763             val = bswap32(val);
2764         }
2765         do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
2766     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2767         /* nothing */
2768     } else {
2769         /* Swap to host endian if necessary, then store. */
2770         if (memop & MO_BSWAP) {
2771             val = bswap32(val);
2772         }
2773         store_atom_4(cpu, ra, p->haddr, memop, val);
2774     }
2775 }
2776 
2777 static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
2778                     int mmu_idx, MemOp memop, uintptr_t ra)
2779 {
2780     if (unlikely(p->flags & TLB_MMIO)) {
2781         if ((memop & MO_BSWAP) != MO_LE) {
2782             val = bswap64(val);
2783         }
2784         do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
2785     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2786         /* nothing */
2787     } else {
2788         /* Swap to host endian if necessary, then store. */
2789         if (memop & MO_BSWAP) {
2790             val = bswap64(val);
2791         }
2792         store_atom_8(cpu, ra, p->haddr, memop, val);
2793     }
2794 }
2795 
2796 static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
2797                        MemOpIdx oi, uintptr_t ra)
2798 {
2799     MMULookupLocals l;
2800     bool crosspage;
2801 
2802     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2803     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2804     tcg_debug_assert(!crosspage);
2805 
2806     do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
2807 }
2808 
2809 static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
2810                        MemOpIdx oi, uintptr_t ra)
2811 {
2812     MMULookupLocals l;
2813     bool crosspage;
2814     uint8_t a, b;
2815 
2816     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2817     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2818     if (likely(!crosspage)) {
2819         do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2820         return;
2821     }
2822 
2823     if ((l.memop & MO_BSWAP) == MO_LE) {
2824         a = val, b = val >> 8;
2825     } else {
2826         b = val, a = val >> 8;
2827     }
2828     do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
2829     do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
2830 }
2831 
2832 static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
2833                        MemOpIdx oi, uintptr_t ra)
2834 {
2835     MMULookupLocals l;
2836     bool crosspage;
2837 
2838     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2839     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2840     if (likely(!crosspage)) {
2841         do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2842         return;
2843     }
2844 
2845     /* Swap to little endian for simplicity, then store by bytes. */
2846     if ((l.memop & MO_BSWAP) != MO_LE) {
2847         val = bswap32(val);
2848     }
2849     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2850     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2851 }
2852 
2853 static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
2854                        MemOpIdx oi, uintptr_t ra)
2855 {
2856     MMULookupLocals l;
2857     bool crosspage;
2858 
2859     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2860     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2861     if (likely(!crosspage)) {
2862         do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2863         return;
2864     }
2865 
2866     /* Swap to little endian for simplicity, then store by bytes. */
2867     if ((l.memop & MO_BSWAP) != MO_LE) {
2868         val = bswap64(val);
2869     }
2870     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2871     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2872 }
2873 
2874 static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
2875                         MemOpIdx oi, uintptr_t ra)
2876 {
2877     MMULookupLocals l;
2878     bool crosspage;
2879     uint64_t a, b;
2880     int first;
2881 
2882     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2883     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2884     if (likely(!crosspage)) {
2885         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2886             if ((l.memop & MO_BSWAP) != MO_LE) {
2887                 val = bswap128(val);
2888             }
2889             do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
2890         } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
2891             /* nothing */
2892         } else {
2893             /* Swap to host endian if necessary, then store. */
2894             if (l.memop & MO_BSWAP) {
2895                 val = bswap128(val);
2896             }
2897             store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
2898         }
2899         return;
2900     }
2901 
2902     first = l.page[0].size;
2903     if (first == 8) {
2904         MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
2905 
2906         if (l.memop & MO_BSWAP) {
2907             val = bswap128(val);
2908         }
2909         if (HOST_BIG_ENDIAN) {
2910             b = int128_getlo(val), a = int128_gethi(val);
2911         } else {
2912             a = int128_getlo(val), b = int128_gethi(val);
2913         }
2914         do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
2915         do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
2916         return;
2917     }
2918 
2919     if ((l.memop & MO_BSWAP) != MO_LE) {
2920         val = bswap128(val);
2921     }
2922     if (first < 8) {
2923         do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
2924         val = int128_urshift(val, first * 8);
2925         do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2926     } else {
2927         b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2928         do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
2929     }
2930 }
2931 
2932 #include "ldst_common.c.inc"
2933 
2934 /*
2935  * First set of functions passes in OI and RETADDR.
2936  * This makes them callable from other helpers.
2937  */
2938 
2939 #define ATOMIC_NAME(X) \
2940     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2941 
2942 #define ATOMIC_MMU_CLEANUP
2943 
2944 #include "atomic_common.c.inc"
2945 
2946 #define DATA_SIZE 1
2947 #include "atomic_template.h"
2948 
2949 #define DATA_SIZE 2
2950 #include "atomic_template.h"
2951 
2952 #define DATA_SIZE 4
2953 #include "atomic_template.h"
2954 
2955 #ifdef CONFIG_ATOMIC64
2956 #define DATA_SIZE 8
2957 #include "atomic_template.h"
2958 #endif
2959 
2960 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
2961 #define DATA_SIZE 16
2962 #include "atomic_template.h"
2963 #endif
2964 
2965 /* Code access functions.  */
2966 
2967 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2968 {
2969     CPUState *cs = env_cpu(env);
2970     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
2971     return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
2972 }
2973 
2974 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2975 {
2976     CPUState *cs = env_cpu(env);
2977     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
2978     return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
2979 }
2980 
2981 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2982 {
2983     CPUState *cs = env_cpu(env);
2984     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
2985     return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
2986 }
2987 
2988 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2989 {
2990     CPUState *cs = env_cpu(env);
2991     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
2992     return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
2993 }
2994 
2995 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
2996                          MemOpIdx oi, uintptr_t retaddr)
2997 {
2998     return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
2999 }
3000 
3001 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
3002                           MemOpIdx oi, uintptr_t retaddr)
3003 {
3004     return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3005 }
3006 
3007 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
3008                           MemOpIdx oi, uintptr_t retaddr)
3009 {
3010     return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3011 }
3012 
3013 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
3014                           MemOpIdx oi, uintptr_t retaddr)
3015 {
3016     return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3017 }
3018