xref: /qemu/accel/tcg/cputlb.c (revision d50ef446)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
29 #include "tcg/tcg.h"
30 #include "qemu/error-report.h"
31 #include "exec/log.h"
32 #include "exec/helper-proto-common.h"
33 #include "qemu/atomic.h"
34 #include "qemu/atomic128.h"
35 #include "exec/translate-all.h"
36 #include "trace.h"
37 #include "tb-hash.h"
38 #include "internal.h"
39 #ifdef CONFIG_PLUGIN
40 #include "qemu/plugin-memory.h"
41 #endif
42 #include "tcg/tcg-ldst.h"
43 #include "tcg/oversized-guest.h"
44 
45 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
46 /* #define DEBUG_TLB */
47 /* #define DEBUG_TLB_LOG */
48 
49 #ifdef DEBUG_TLB
50 # define DEBUG_TLB_GATE 1
51 # ifdef DEBUG_TLB_LOG
52 #  define DEBUG_TLB_LOG_GATE 1
53 # else
54 #  define DEBUG_TLB_LOG_GATE 0
55 # endif
56 #else
57 # define DEBUG_TLB_GATE 0
58 # define DEBUG_TLB_LOG_GATE 0
59 #endif
60 
61 #define tlb_debug(fmt, ...) do { \
62     if (DEBUG_TLB_LOG_GATE) { \
63         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
64                       ## __VA_ARGS__); \
65     } else if (DEBUG_TLB_GATE) { \
66         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
67     } \
68 } while (0)
69 
70 #define assert_cpu_is_self(cpu) do {                              \
71         if (DEBUG_TLB_GATE) {                                     \
72             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
73         }                                                         \
74     } while (0)
75 
76 /* run_on_cpu_data.target_ptr should always be big enough for a
77  * vaddr even on 32 bit builds
78  */
79 QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
80 
81 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
82  */
83 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
84 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
85 
86 static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
87 {
88     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
89 }
90 
91 static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
92 {
93     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
94 }
95 
96 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
97                              size_t max_entries)
98 {
99     desc->window_begin_ns = ns;
100     desc->window_max_entries = max_entries;
101 }
102 
103 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
104 {
105     CPUJumpCache *jc = cpu->tb_jmp_cache;
106     int i, i0;
107 
108     if (unlikely(!jc)) {
109         return;
110     }
111 
112     i0 = tb_jmp_cache_hash_page(page_addr);
113     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
114         qatomic_set(&jc->array[i0 + i].tb, NULL);
115     }
116 }
117 
118 /**
119  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
120  * @desc: The CPUTLBDesc portion of the TLB
121  * @fast: The CPUTLBDescFast portion of the same TLB
122  *
123  * Called with tlb_lock_held.
124  *
125  * We have two main constraints when resizing a TLB: (1) we only resize it
126  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
127  * the array or unnecessarily flushing it), which means we do not control how
128  * frequently the resizing can occur; (2) we don't have access to the guest's
129  * future scheduling decisions, and therefore have to decide the magnitude of
130  * the resize based on past observations.
131  *
132  * In general, a memory-hungry process can benefit greatly from an appropriately
133  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
134  * we just have to make the TLB as large as possible; while an oversized TLB
135  * results in minimal TLB miss rates, it also takes longer to be flushed
136  * (flushes can be _very_ frequent), and the reduced locality can also hurt
137  * performance.
138  *
139  * To achieve near-optimal performance for all kinds of workloads, we:
140  *
141  * 1. Aggressively increase the size of the TLB when the use rate of the
142  * TLB being flushed is high, since it is likely that in the near future this
143  * memory-hungry process will execute again, and its memory hungriness will
144  * probably be similar.
145  *
146  * 2. Slowly reduce the size of the TLB as the use rate declines over a
147  * reasonably large time window. The rationale is that if in such a time window
148  * we have not observed a high TLB use rate, it is likely that we won't observe
149  * it in the near future. In that case, once a time window expires we downsize
150  * the TLB to match the maximum use rate observed in the window.
151  *
152  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
153  * since in that range performance is likely near-optimal. Recall that the TLB
154  * is direct mapped, so we want the use rate to be low (or at least not too
155  * high), since otherwise we are likely to have a significant amount of
156  * conflict misses.
157  */
158 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
159                                   int64_t now)
160 {
161     size_t old_size = tlb_n_entries(fast);
162     size_t rate;
163     size_t new_size = old_size;
164     int64_t window_len_ms = 100;
165     int64_t window_len_ns = window_len_ms * 1000 * 1000;
166     bool window_expired = now > desc->window_begin_ns + window_len_ns;
167 
168     if (desc->n_used_entries > desc->window_max_entries) {
169         desc->window_max_entries = desc->n_used_entries;
170     }
171     rate = desc->window_max_entries * 100 / old_size;
172 
173     if (rate > 70) {
174         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
175     } else if (rate < 30 && window_expired) {
176         size_t ceil = pow2ceil(desc->window_max_entries);
177         size_t expected_rate = desc->window_max_entries * 100 / ceil;
178 
179         /*
180          * Avoid undersizing when the max number of entries seen is just below
181          * a pow2. For instance, if max_entries == 1025, the expected use rate
182          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
183          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
184          * later. Thus, make sure that the expected use rate remains below 70%.
185          * (and since we double the size, that means the lowest rate we'd
186          * expect to get is 35%, which is still in the 30-70% range where
187          * we consider that the size is appropriate.)
188          */
189         if (expected_rate > 70) {
190             ceil *= 2;
191         }
192         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
193     }
194 
195     if (new_size == old_size) {
196         if (window_expired) {
197             tlb_window_reset(desc, now, desc->n_used_entries);
198         }
199         return;
200     }
201 
202     g_free(fast->table);
203     g_free(desc->fulltlb);
204 
205     tlb_window_reset(desc, now, 0);
206     /* desc->n_used_entries is cleared by the caller */
207     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
208     fast->table = g_try_new(CPUTLBEntry, new_size);
209     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
210 
211     /*
212      * If the allocations fail, try smaller sizes. We just freed some
213      * memory, so going back to half of new_size has a good chance of working.
214      * Increased memory pressure elsewhere in the system might cause the
215      * allocations to fail though, so we progressively reduce the allocation
216      * size, aborting if we cannot even allocate the smallest TLB we support.
217      */
218     while (fast->table == NULL || desc->fulltlb == NULL) {
219         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
220             error_report("%s: %s", __func__, strerror(errno));
221             abort();
222         }
223         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
224         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
225 
226         g_free(fast->table);
227         g_free(desc->fulltlb);
228         fast->table = g_try_new(CPUTLBEntry, new_size);
229         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
230     }
231 }
232 
233 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
234 {
235     desc->n_used_entries = 0;
236     desc->large_page_addr = -1;
237     desc->large_page_mask = -1;
238     desc->vindex = 0;
239     memset(fast->table, -1, sizeof_tlb(fast));
240     memset(desc->vtable, -1, sizeof(desc->vtable));
241 }
242 
243 static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
244                                         int64_t now)
245 {
246     CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
247     CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
248 
249     tlb_mmu_resize_locked(desc, fast, now);
250     tlb_mmu_flush_locked(desc, fast);
251 }
252 
253 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
254 {
255     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
256 
257     tlb_window_reset(desc, now, 0);
258     desc->n_used_entries = 0;
259     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
260     fast->table = g_new(CPUTLBEntry, n_entries);
261     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
262     tlb_mmu_flush_locked(desc, fast);
263 }
264 
265 static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
266 {
267     cpu->neg.tlb.d[mmu_idx].n_used_entries++;
268 }
269 
270 static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
271 {
272     cpu->neg.tlb.d[mmu_idx].n_used_entries--;
273 }
274 
275 void tlb_init(CPUState *cpu)
276 {
277     int64_t now = get_clock_realtime();
278     int i;
279 
280     qemu_spin_init(&cpu->neg.tlb.c.lock);
281 
282     /* All tlbs are initialized flushed. */
283     cpu->neg.tlb.c.dirty = 0;
284 
285     for (i = 0; i < NB_MMU_MODES; i++) {
286         tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
287     }
288 }
289 
290 void tlb_destroy(CPUState *cpu)
291 {
292     int i;
293 
294     qemu_spin_destroy(&cpu->neg.tlb.c.lock);
295     for (i = 0; i < NB_MMU_MODES; i++) {
296         CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
297         CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
298 
299         g_free(fast->table);
300         g_free(desc->fulltlb);
301     }
302 }
303 
304 /* flush_all_helper: run fn across all cpus
305  *
306  * If the wait flag is set then the src cpu's helper will be queued as
307  * "safe" work and the loop exited creating a synchronisation point
308  * where all queued work will be finished before execution starts
309  * again.
310  */
311 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
312                              run_on_cpu_data d)
313 {
314     CPUState *cpu;
315 
316     CPU_FOREACH(cpu) {
317         if (cpu != src) {
318             async_run_on_cpu(cpu, fn, d);
319         }
320     }
321 }
322 
323 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
324 {
325     CPUState *cpu;
326     size_t full = 0, part = 0, elide = 0;
327 
328     CPU_FOREACH(cpu) {
329         full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
330         part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
331         elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
332     }
333     *pfull = full;
334     *ppart = part;
335     *pelide = elide;
336 }
337 
338 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
339 {
340     uint16_t asked = data.host_int;
341     uint16_t all_dirty, work, to_clean;
342     int64_t now = get_clock_realtime();
343 
344     assert_cpu_is_self(cpu);
345 
346     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
347 
348     qemu_spin_lock(&cpu->neg.tlb.c.lock);
349 
350     all_dirty = cpu->neg.tlb.c.dirty;
351     to_clean = asked & all_dirty;
352     all_dirty &= ~to_clean;
353     cpu->neg.tlb.c.dirty = all_dirty;
354 
355     for (work = to_clean; work != 0; work &= work - 1) {
356         int mmu_idx = ctz32(work);
357         tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
358     }
359 
360     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
361 
362     tcg_flush_jmp_cache(cpu);
363 
364     if (to_clean == ALL_MMUIDX_BITS) {
365         qatomic_set(&cpu->neg.tlb.c.full_flush_count,
366                     cpu->neg.tlb.c.full_flush_count + 1);
367     } else {
368         qatomic_set(&cpu->neg.tlb.c.part_flush_count,
369                     cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
370         if (to_clean != asked) {
371             qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
372                         cpu->neg.tlb.c.elide_flush_count +
373                         ctpop16(asked & ~to_clean));
374         }
375     }
376 }
377 
378 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
379 {
380     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
381 
382     if (cpu->created && !qemu_cpu_is_self(cpu)) {
383         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
384                          RUN_ON_CPU_HOST_INT(idxmap));
385     } else {
386         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
387     }
388 }
389 
390 void tlb_flush(CPUState *cpu)
391 {
392     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
393 }
394 
395 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
396 {
397     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
398 
399     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
400 
401     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
402     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
403 }
404 
405 void tlb_flush_all_cpus(CPUState *src_cpu)
406 {
407     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
408 }
409 
410 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
411 {
412     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
413 
414     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
415 
416     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
417     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
418 }
419 
420 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
421 {
422     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
423 }
424 
425 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
426                                       vaddr page, vaddr mask)
427 {
428     page &= mask;
429     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
430 
431     return (page == (tlb_entry->addr_read & mask) ||
432             page == (tlb_addr_write(tlb_entry) & mask) ||
433             page == (tlb_entry->addr_code & mask));
434 }
435 
436 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
437 {
438     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
439 }
440 
441 /**
442  * tlb_entry_is_empty - return true if the entry is not in use
443  * @te: pointer to CPUTLBEntry
444  */
445 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
446 {
447     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
448 }
449 
450 /* Called with tlb_c.lock held */
451 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
452                                         vaddr page,
453                                         vaddr mask)
454 {
455     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
456         memset(tlb_entry, -1, sizeof(*tlb_entry));
457         return true;
458     }
459     return false;
460 }
461 
462 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
463 {
464     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
465 }
466 
467 /* Called with tlb_c.lock held */
468 static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
469                                             vaddr page,
470                                             vaddr mask)
471 {
472     CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
473     int k;
474 
475     assert_cpu_is_self(cpu);
476     for (k = 0; k < CPU_VTLB_SIZE; k++) {
477         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
478             tlb_n_used_entries_dec(cpu, mmu_idx);
479         }
480     }
481 }
482 
483 static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
484                                               vaddr page)
485 {
486     tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
487 }
488 
489 static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
490 {
491     vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
492     vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
493 
494     /* Check if we need to flush due to large pages.  */
495     if ((page & lp_mask) == lp_addr) {
496         tlb_debug("forcing full flush midx %d (%016"
497                   VADDR_PRIx "/%016" VADDR_PRIx ")\n",
498                   midx, lp_addr, lp_mask);
499         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
500     } else {
501         if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
502             tlb_n_used_entries_dec(cpu, midx);
503         }
504         tlb_flush_vtlb_page_locked(cpu, midx, page);
505     }
506 }
507 
508 /**
509  * tlb_flush_page_by_mmuidx_async_0:
510  * @cpu: cpu on which to flush
511  * @addr: page of virtual address to flush
512  * @idxmap: set of mmu_idx to flush
513  *
514  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
515  * at @addr from the tlbs indicated by @idxmap from @cpu.
516  */
517 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
518                                              vaddr addr,
519                                              uint16_t idxmap)
520 {
521     int mmu_idx;
522 
523     assert_cpu_is_self(cpu);
524 
525     tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
526 
527     qemu_spin_lock(&cpu->neg.tlb.c.lock);
528     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
529         if ((idxmap >> mmu_idx) & 1) {
530             tlb_flush_page_locked(cpu, mmu_idx, addr);
531         }
532     }
533     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
534 
535     /*
536      * Discard jump cache entries for any tb which might potentially
537      * overlap the flushed page, which includes the previous.
538      */
539     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
540     tb_jmp_cache_clear_page(cpu, addr);
541 }
542 
543 /**
544  * tlb_flush_page_by_mmuidx_async_1:
545  * @cpu: cpu on which to flush
546  * @data: encoded addr + idxmap
547  *
548  * Helper for tlb_flush_page_by_mmuidx and friends, called through
549  * async_run_on_cpu.  The idxmap parameter is encoded in the page
550  * offset of the target_ptr field.  This limits the set of mmu_idx
551  * that can be passed via this method.
552  */
553 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
554                                              run_on_cpu_data data)
555 {
556     vaddr addr_and_idxmap = data.target_ptr;
557     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
558     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
559 
560     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
561 }
562 
563 typedef struct {
564     vaddr addr;
565     uint16_t idxmap;
566 } TLBFlushPageByMMUIdxData;
567 
568 /**
569  * tlb_flush_page_by_mmuidx_async_2:
570  * @cpu: cpu on which to flush
571  * @data: allocated addr + idxmap
572  *
573  * Helper for tlb_flush_page_by_mmuidx and friends, called through
574  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
575  * TLBFlushPageByMMUIdxData structure that has been allocated
576  * specifically for this helper.  Free the structure when done.
577  */
578 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
579                                              run_on_cpu_data data)
580 {
581     TLBFlushPageByMMUIdxData *d = data.host_ptr;
582 
583     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
584     g_free(d);
585 }
586 
587 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
588 {
589     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
590 
591     /* This should already be page aligned */
592     addr &= TARGET_PAGE_MASK;
593 
594     if (qemu_cpu_is_self(cpu)) {
595         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
596     } else if (idxmap < TARGET_PAGE_SIZE) {
597         /*
598          * Most targets have only a few mmu_idx.  In the case where
599          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
600          * allocating memory for this operation.
601          */
602         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
603                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
604     } else {
605         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
606 
607         /* Otherwise allocate a structure, freed by the worker.  */
608         d->addr = addr;
609         d->idxmap = idxmap;
610         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
611                          RUN_ON_CPU_HOST_PTR(d));
612     }
613 }
614 
615 void tlb_flush_page(CPUState *cpu, vaddr addr)
616 {
617     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
618 }
619 
620 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
621                                        uint16_t idxmap)
622 {
623     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
624 
625     /* This should already be page aligned */
626     addr &= TARGET_PAGE_MASK;
627 
628     /*
629      * Allocate memory to hold addr+idxmap only when needed.
630      * See tlb_flush_page_by_mmuidx for details.
631      */
632     if (idxmap < TARGET_PAGE_SIZE) {
633         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
634                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
635     } else {
636         CPUState *dst_cpu;
637 
638         /* Allocate a separate data block for each destination cpu.  */
639         CPU_FOREACH(dst_cpu) {
640             if (dst_cpu != src_cpu) {
641                 TLBFlushPageByMMUIdxData *d
642                     = g_new(TLBFlushPageByMMUIdxData, 1);
643 
644                 d->addr = addr;
645                 d->idxmap = idxmap;
646                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
647                                  RUN_ON_CPU_HOST_PTR(d));
648             }
649         }
650     }
651 
652     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
653 }
654 
655 void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
656 {
657     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
658 }
659 
660 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
661                                               vaddr addr,
662                                               uint16_t idxmap)
663 {
664     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
665 
666     /* This should already be page aligned */
667     addr &= TARGET_PAGE_MASK;
668 
669     /*
670      * Allocate memory to hold addr+idxmap only when needed.
671      * See tlb_flush_page_by_mmuidx for details.
672      */
673     if (idxmap < TARGET_PAGE_SIZE) {
674         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
675                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
676         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
677                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
678     } else {
679         CPUState *dst_cpu;
680         TLBFlushPageByMMUIdxData *d;
681 
682         /* Allocate a separate data block for each destination cpu.  */
683         CPU_FOREACH(dst_cpu) {
684             if (dst_cpu != src_cpu) {
685                 d = g_new(TLBFlushPageByMMUIdxData, 1);
686                 d->addr = addr;
687                 d->idxmap = idxmap;
688                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
689                                  RUN_ON_CPU_HOST_PTR(d));
690             }
691         }
692 
693         d = g_new(TLBFlushPageByMMUIdxData, 1);
694         d->addr = addr;
695         d->idxmap = idxmap;
696         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
697                               RUN_ON_CPU_HOST_PTR(d));
698     }
699 }
700 
701 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
702 {
703     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
704 }
705 
706 static void tlb_flush_range_locked(CPUState *cpu, int midx,
707                                    vaddr addr, vaddr len,
708                                    unsigned bits)
709 {
710     CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
711     CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
712     vaddr mask = MAKE_64BIT_MASK(0, bits);
713 
714     /*
715      * If @bits is smaller than the tlb size, there may be multiple entries
716      * within the TLB; otherwise all addresses that match under @mask hit
717      * the same TLB entry.
718      * TODO: Perhaps allow bits to be a few bits less than the size.
719      * For now, just flush the entire TLB.
720      *
721      * If @len is larger than the tlb size, then it will take longer to
722      * test all of the entries in the TLB than it will to flush it all.
723      */
724     if (mask < f->mask || len > f->mask) {
725         tlb_debug("forcing full flush midx %d ("
726                   "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
727                   midx, addr, mask, len);
728         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
729         return;
730     }
731 
732     /*
733      * Check if we need to flush due to large pages.
734      * Because large_page_mask contains all 1's from the msb,
735      * we only need to test the end of the range.
736      */
737     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
738         tlb_debug("forcing full flush midx %d ("
739                   "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
740                   midx, d->large_page_addr, d->large_page_mask);
741         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
742         return;
743     }
744 
745     for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
746         vaddr page = addr + i;
747         CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
748 
749         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
750             tlb_n_used_entries_dec(cpu, midx);
751         }
752         tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
753     }
754 }
755 
756 typedef struct {
757     vaddr addr;
758     vaddr len;
759     uint16_t idxmap;
760     uint16_t bits;
761 } TLBFlushRangeData;
762 
763 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
764                                               TLBFlushRangeData d)
765 {
766     int mmu_idx;
767 
768     assert_cpu_is_self(cpu);
769 
770     tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
771               d.addr, d.bits, d.len, d.idxmap);
772 
773     qemu_spin_lock(&cpu->neg.tlb.c.lock);
774     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
775         if ((d.idxmap >> mmu_idx) & 1) {
776             tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
777         }
778     }
779     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
780 
781     /*
782      * If the length is larger than the jump cache size, then it will take
783      * longer to clear each entry individually than it will to clear it all.
784      */
785     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
786         tcg_flush_jmp_cache(cpu);
787         return;
788     }
789 
790     /*
791      * Discard jump cache entries for any tb which might potentially
792      * overlap the flushed pages, which includes the previous.
793      */
794     d.addr -= TARGET_PAGE_SIZE;
795     for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
796         tb_jmp_cache_clear_page(cpu, d.addr);
797         d.addr += TARGET_PAGE_SIZE;
798     }
799 }
800 
801 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
802                                               run_on_cpu_data data)
803 {
804     TLBFlushRangeData *d = data.host_ptr;
805     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
806     g_free(d);
807 }
808 
809 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
810                                vaddr len, uint16_t idxmap,
811                                unsigned bits)
812 {
813     TLBFlushRangeData d;
814 
815     /*
816      * If all bits are significant, and len is small,
817      * this devolves to tlb_flush_page.
818      */
819     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
820         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
821         return;
822     }
823     /* If no page bits are significant, this devolves to tlb_flush. */
824     if (bits < TARGET_PAGE_BITS) {
825         tlb_flush_by_mmuidx(cpu, idxmap);
826         return;
827     }
828 
829     /* This should already be page aligned */
830     d.addr = addr & TARGET_PAGE_MASK;
831     d.len = len;
832     d.idxmap = idxmap;
833     d.bits = bits;
834 
835     if (qemu_cpu_is_self(cpu)) {
836         tlb_flush_range_by_mmuidx_async_0(cpu, d);
837     } else {
838         /* Otherwise allocate a structure, freed by the worker.  */
839         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
840         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
841                          RUN_ON_CPU_HOST_PTR(p));
842     }
843 }
844 
845 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
846                                    uint16_t idxmap, unsigned bits)
847 {
848     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
849 }
850 
851 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
852                                         vaddr addr, vaddr len,
853                                         uint16_t idxmap, unsigned bits)
854 {
855     TLBFlushRangeData d;
856     CPUState *dst_cpu;
857 
858     /*
859      * If all bits are significant, and len is small,
860      * this devolves to tlb_flush_page.
861      */
862     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
863         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
864         return;
865     }
866     /* If no page bits are significant, this devolves to tlb_flush. */
867     if (bits < TARGET_PAGE_BITS) {
868         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
869         return;
870     }
871 
872     /* This should already be page aligned */
873     d.addr = addr & TARGET_PAGE_MASK;
874     d.len = len;
875     d.idxmap = idxmap;
876     d.bits = bits;
877 
878     /* Allocate a separate data block for each destination cpu.  */
879     CPU_FOREACH(dst_cpu) {
880         if (dst_cpu != src_cpu) {
881             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
882             async_run_on_cpu(dst_cpu,
883                              tlb_flush_range_by_mmuidx_async_1,
884                              RUN_ON_CPU_HOST_PTR(p));
885         }
886     }
887 
888     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
889 }
890 
891 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
892                                             vaddr addr, uint16_t idxmap,
893                                             unsigned bits)
894 {
895     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
896                                        idxmap, bits);
897 }
898 
899 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
900                                                vaddr addr,
901                                                vaddr len,
902                                                uint16_t idxmap,
903                                                unsigned bits)
904 {
905     TLBFlushRangeData d, *p;
906     CPUState *dst_cpu;
907 
908     /*
909      * If all bits are significant, and len is small,
910      * this devolves to tlb_flush_page.
911      */
912     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
913         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
914         return;
915     }
916     /* If no page bits are significant, this devolves to tlb_flush. */
917     if (bits < TARGET_PAGE_BITS) {
918         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
919         return;
920     }
921 
922     /* This should already be page aligned */
923     d.addr = addr & TARGET_PAGE_MASK;
924     d.len = len;
925     d.idxmap = idxmap;
926     d.bits = bits;
927 
928     /* Allocate a separate data block for each destination cpu.  */
929     CPU_FOREACH(dst_cpu) {
930         if (dst_cpu != src_cpu) {
931             p = g_memdup(&d, sizeof(d));
932             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
933                              RUN_ON_CPU_HOST_PTR(p));
934         }
935     }
936 
937     p = g_memdup(&d, sizeof(d));
938     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
939                           RUN_ON_CPU_HOST_PTR(p));
940 }
941 
942 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
943                                                    vaddr addr,
944                                                    uint16_t idxmap,
945                                                    unsigned bits)
946 {
947     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
948                                               idxmap, bits);
949 }
950 
951 /* update the TLBs so that writes to code in the virtual page 'addr'
952    can be detected */
953 void tlb_protect_code(ram_addr_t ram_addr)
954 {
955     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
956                                              TARGET_PAGE_SIZE,
957                                              DIRTY_MEMORY_CODE);
958 }
959 
960 /* update the TLB so that writes in physical page 'phys_addr' are no longer
961    tested for self modifying code */
962 void tlb_unprotect_code(ram_addr_t ram_addr)
963 {
964     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
965 }
966 
967 
968 /*
969  * Dirty write flag handling
970  *
971  * When the TCG code writes to a location it looks up the address in
972  * the TLB and uses that data to compute the final address. If any of
973  * the lower bits of the address are set then the slow path is forced.
974  * There are a number of reasons to do this but for normal RAM the
975  * most usual is detecting writes to code regions which may invalidate
976  * generated code.
977  *
978  * Other vCPUs might be reading their TLBs during guest execution, so we update
979  * te->addr_write with qatomic_set. We don't need to worry about this for
980  * oversized guests as MTTCG is disabled for them.
981  *
982  * Called with tlb_c.lock held.
983  */
984 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
985                                          uintptr_t start, uintptr_t length)
986 {
987     uintptr_t addr = tlb_entry->addr_write;
988 
989     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
990                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
991         addr &= TARGET_PAGE_MASK;
992         addr += tlb_entry->addend;
993         if ((addr - start) < length) {
994 #if TARGET_LONG_BITS == 32
995             uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
996             ptr_write += HOST_BIG_ENDIAN;
997             qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
998 #elif TCG_OVERSIZED_GUEST
999             tlb_entry->addr_write |= TLB_NOTDIRTY;
1000 #else
1001             qatomic_set(&tlb_entry->addr_write,
1002                         tlb_entry->addr_write | TLB_NOTDIRTY);
1003 #endif
1004         }
1005     }
1006 }
1007 
1008 /*
1009  * Called with tlb_c.lock held.
1010  * Called only from the vCPU context, i.e. the TLB's owner thread.
1011  */
1012 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1013 {
1014     *d = *s;
1015 }
1016 
1017 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1018  * the target vCPU).
1019  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1020  * thing actually updated is the target TLB entry ->addr_write flags.
1021  */
1022 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1023 {
1024     int mmu_idx;
1025 
1026     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1027     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1028         unsigned int i;
1029         unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
1030 
1031         for (i = 0; i < n; i++) {
1032             tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
1033                                          start1, length);
1034         }
1035 
1036         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1037             tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
1038                                          start1, length);
1039         }
1040     }
1041     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1042 }
1043 
1044 /* Called with tlb_c.lock held */
1045 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1046                                          vaddr addr)
1047 {
1048     if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1049         tlb_entry->addr_write = addr;
1050     }
1051 }
1052 
1053 /* update the TLB corresponding to virtual page vaddr
1054    so that it is no longer dirty */
1055 void tlb_set_dirty(CPUState *cpu, vaddr addr)
1056 {
1057     int mmu_idx;
1058 
1059     assert_cpu_is_self(cpu);
1060 
1061     addr &= TARGET_PAGE_MASK;
1062     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1063     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1064         tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
1065     }
1066 
1067     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1068         int k;
1069         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1070             tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
1071         }
1072     }
1073     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1074 }
1075 
1076 /* Our TLB does not support large pages, so remember the area covered by
1077    large pages and trigger a full TLB flush if these are invalidated.  */
1078 static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
1079                                vaddr addr, uint64_t size)
1080 {
1081     vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
1082     vaddr lp_mask = ~(size - 1);
1083 
1084     if (lp_addr == (vaddr)-1) {
1085         /* No previous large page.  */
1086         lp_addr = addr;
1087     } else {
1088         /* Extend the existing region to include the new page.
1089            This is a compromise between unnecessary flushes and
1090            the cost of maintaining a full variable size TLB.  */
1091         lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
1092         while (((lp_addr ^ addr) & lp_mask) != 0) {
1093             lp_mask <<= 1;
1094         }
1095     }
1096     cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1097     cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
1098 }
1099 
1100 static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1101                                    vaddr address, int flags,
1102                                    MMUAccessType access_type, bool enable)
1103 {
1104     if (enable) {
1105         address |= flags & TLB_FLAGS_MASK;
1106         flags &= TLB_SLOW_FLAGS_MASK;
1107         if (flags) {
1108             address |= TLB_FORCE_SLOW;
1109         }
1110     } else {
1111         address = -1;
1112         flags = 0;
1113     }
1114     ent->addr_idx[access_type] = address;
1115     full->slow_flags[access_type] = flags;
1116 }
1117 
1118 /*
1119  * Add a new TLB entry. At most one entry for a given virtual address
1120  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1121  * supplied size is only used by tlb_flush_page.
1122  *
1123  * Called from TCG-generated code, which is under an RCU read-side
1124  * critical section.
1125  */
1126 void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1127                        vaddr addr, CPUTLBEntryFull *full)
1128 {
1129     CPUTLB *tlb = &cpu->neg.tlb;
1130     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1131     MemoryRegionSection *section;
1132     unsigned int index, read_flags, write_flags;
1133     uintptr_t addend;
1134     CPUTLBEntry *te, tn;
1135     hwaddr iotlb, xlat, sz, paddr_page;
1136     vaddr addr_page;
1137     int asidx, wp_flags, prot;
1138     bool is_ram, is_romd;
1139 
1140     assert_cpu_is_self(cpu);
1141 
1142     if (full->lg_page_size <= TARGET_PAGE_BITS) {
1143         sz = TARGET_PAGE_SIZE;
1144     } else {
1145         sz = (hwaddr)1 << full->lg_page_size;
1146         tlb_add_large_page(cpu, mmu_idx, addr, sz);
1147     }
1148     addr_page = addr & TARGET_PAGE_MASK;
1149     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1150 
1151     prot = full->prot;
1152     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1153     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1154                                                 &xlat, &sz, full->attrs, &prot);
1155     assert(sz >= TARGET_PAGE_SIZE);
1156 
1157     tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1158               " prot=%x idx=%d\n",
1159               addr, full->phys_addr, prot, mmu_idx);
1160 
1161     read_flags = 0;
1162     if (full->lg_page_size < TARGET_PAGE_BITS) {
1163         /* Repeat the MMU check and TLB fill on every access.  */
1164         read_flags |= TLB_INVALID_MASK;
1165     }
1166     if (full->attrs.byte_swap) {
1167         read_flags |= TLB_BSWAP;
1168     }
1169 
1170     is_ram = memory_region_is_ram(section->mr);
1171     is_romd = memory_region_is_romd(section->mr);
1172 
1173     if (is_ram || is_romd) {
1174         /* RAM and ROMD both have associated host memory. */
1175         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1176     } else {
1177         /* I/O does not; force the host address to NULL. */
1178         addend = 0;
1179     }
1180 
1181     write_flags = read_flags;
1182     if (is_ram) {
1183         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1184         assert(!(iotlb & ~TARGET_PAGE_MASK));
1185         /*
1186          * Computing is_clean is expensive; avoid all that unless
1187          * the page is actually writable.
1188          */
1189         if (prot & PAGE_WRITE) {
1190             if (section->readonly) {
1191                 write_flags |= TLB_DISCARD_WRITE;
1192             } else if (cpu_physical_memory_is_clean(iotlb)) {
1193                 write_flags |= TLB_NOTDIRTY;
1194             }
1195         }
1196     } else {
1197         /* I/O or ROMD */
1198         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1199         /*
1200          * Writes to romd devices must go through MMIO to enable write.
1201          * Reads to romd devices go through the ram_ptr found above,
1202          * but of course reads to I/O must go through MMIO.
1203          */
1204         write_flags |= TLB_MMIO;
1205         if (!is_romd) {
1206             read_flags = write_flags;
1207         }
1208     }
1209 
1210     wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
1211                                               TARGET_PAGE_SIZE);
1212 
1213     index = tlb_index(cpu, mmu_idx, addr_page);
1214     te = tlb_entry(cpu, mmu_idx, addr_page);
1215 
1216     /*
1217      * Hold the TLB lock for the rest of the function. We could acquire/release
1218      * the lock several times in the function, but it is faster to amortize the
1219      * acquisition cost by acquiring it just once. Note that this leads to
1220      * a longer critical section, but this is not a concern since the TLB lock
1221      * is unlikely to be contended.
1222      */
1223     qemu_spin_lock(&tlb->c.lock);
1224 
1225     /* Note that the tlb is no longer clean.  */
1226     tlb->c.dirty |= 1 << mmu_idx;
1227 
1228     /* Make sure there's no cached translation for the new page.  */
1229     tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
1230 
1231     /*
1232      * Only evict the old entry to the victim tlb if it's for a
1233      * different page; otherwise just overwrite the stale data.
1234      */
1235     if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1236         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1237         CPUTLBEntry *tv = &desc->vtable[vidx];
1238 
1239         /* Evict the old entry into the victim tlb.  */
1240         copy_tlb_helper_locked(tv, te);
1241         desc->vfulltlb[vidx] = desc->fulltlb[index];
1242         tlb_n_used_entries_dec(cpu, mmu_idx);
1243     }
1244 
1245     /* refill the tlb */
1246     /*
1247      * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1248      * aligned ram_addr_t of the page base of the target RAM.
1249      * Otherwise, iotlb contains
1250      *  - a physical section number in the lower TARGET_PAGE_BITS
1251      *  - the offset within section->mr of the page base (I/O, ROMD) with the
1252      *    TARGET_PAGE_BITS masked off.
1253      * We subtract addr_page (which is page aligned and thus won't
1254      * disturb the low bits) to give an offset which can be added to the
1255      * (non-page-aligned) vaddr of the eventual memory access to get
1256      * the MemoryRegion offset for the access. Note that the vaddr we
1257      * subtract here is that of the page base, and not the same as the
1258      * vaddr we add back in io_prepare()/get_page_addr_code().
1259      */
1260     desc->fulltlb[index] = *full;
1261     full = &desc->fulltlb[index];
1262     full->xlat_section = iotlb - addr_page;
1263     full->phys_addr = paddr_page;
1264 
1265     /* Now calculate the new entry */
1266     tn.addend = addend - addr_page;
1267 
1268     tlb_set_compare(full, &tn, addr_page, read_flags,
1269                     MMU_INST_FETCH, prot & PAGE_EXEC);
1270 
1271     if (wp_flags & BP_MEM_READ) {
1272         read_flags |= TLB_WATCHPOINT;
1273     }
1274     tlb_set_compare(full, &tn, addr_page, read_flags,
1275                     MMU_DATA_LOAD, prot & PAGE_READ);
1276 
1277     if (prot & PAGE_WRITE_INV) {
1278         write_flags |= TLB_INVALID_MASK;
1279     }
1280     if (wp_flags & BP_MEM_WRITE) {
1281         write_flags |= TLB_WATCHPOINT;
1282     }
1283     tlb_set_compare(full, &tn, addr_page, write_flags,
1284                     MMU_DATA_STORE, prot & PAGE_WRITE);
1285 
1286     copy_tlb_helper_locked(te, &tn);
1287     tlb_n_used_entries_inc(cpu, mmu_idx);
1288     qemu_spin_unlock(&tlb->c.lock);
1289 }
1290 
1291 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
1292                              hwaddr paddr, MemTxAttrs attrs, int prot,
1293                              int mmu_idx, uint64_t size)
1294 {
1295     CPUTLBEntryFull full = {
1296         .phys_addr = paddr,
1297         .attrs = attrs,
1298         .prot = prot,
1299         .lg_page_size = ctz64(size)
1300     };
1301 
1302     assert(is_power_of_2(size));
1303     tlb_set_page_full(cpu, mmu_idx, addr, &full);
1304 }
1305 
1306 void tlb_set_page(CPUState *cpu, vaddr addr,
1307                   hwaddr paddr, int prot,
1308                   int mmu_idx, uint64_t size)
1309 {
1310     tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1311                             prot, mmu_idx, size);
1312 }
1313 
1314 /*
1315  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1316  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1317  * be discarded and looked up again (e.g. via tlb_entry()).
1318  */
1319 static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1320                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1321 {
1322     bool ok;
1323 
1324     /*
1325      * This is not a probe, so only valid return is success; failure
1326      * should result in exception + longjmp to the cpu loop.
1327      */
1328     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1329                                     access_type, mmu_idx, false, retaddr);
1330     assert(ok);
1331 }
1332 
1333 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1334                                         MMUAccessType access_type,
1335                                         int mmu_idx, uintptr_t retaddr)
1336 {
1337     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1338                                           mmu_idx, retaddr);
1339 }
1340 
1341 static MemoryRegionSection *
1342 io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
1343            MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
1344 {
1345     MemoryRegionSection *section;
1346     hwaddr mr_offset;
1347 
1348     section = iotlb_to_section(cpu, xlat, attrs);
1349     mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
1350     cpu->mem_io_pc = retaddr;
1351     if (!cpu->neg.can_do_io) {
1352         cpu_io_recompile(cpu, retaddr);
1353     }
1354 
1355     *out_offset = mr_offset;
1356     return section;
1357 }
1358 
1359 static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
1360                       unsigned size, MMUAccessType access_type, int mmu_idx,
1361                       MemTxResult response, uintptr_t retaddr)
1362 {
1363     if (!cpu->ignore_memory_transaction_failures
1364         && cpu->cc->tcg_ops->do_transaction_failed) {
1365         hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1366 
1367         cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1368                                                 access_type, mmu_idx,
1369                                                 full->attrs, response, retaddr);
1370     }
1371 }
1372 
1373 /* Return true if ADDR is present in the victim tlb, and has been copied
1374    back to the main tlb.  */
1375 static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
1376                            MMUAccessType access_type, vaddr page)
1377 {
1378     size_t vidx;
1379 
1380     assert_cpu_is_self(cpu);
1381     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1382         CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
1383         uint64_t cmp = tlb_read_idx(vtlb, access_type);
1384 
1385         if (cmp == page) {
1386             /* Found entry in victim tlb, swap tlb and iotlb.  */
1387             CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
1388 
1389             qemu_spin_lock(&cpu->neg.tlb.c.lock);
1390             copy_tlb_helper_locked(&tmptlb, tlb);
1391             copy_tlb_helper_locked(tlb, vtlb);
1392             copy_tlb_helper_locked(vtlb, &tmptlb);
1393             qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1394 
1395             CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1396             CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
1397             CPUTLBEntryFull tmpf;
1398             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1399             return true;
1400         }
1401     }
1402     return false;
1403 }
1404 
1405 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1406                            CPUTLBEntryFull *full, uintptr_t retaddr)
1407 {
1408     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1409 
1410     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1411 
1412     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1413         tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1414     }
1415 
1416     /*
1417      * Set both VGA and migration bits for simplicity and to remove
1418      * the notdirty callback faster.
1419      */
1420     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1421 
1422     /* We remove the notdirty callback only if the code has been flushed. */
1423     if (!cpu_physical_memory_is_clean(ram_addr)) {
1424         trace_memory_notdirty_set_dirty(mem_vaddr);
1425         tlb_set_dirty(cpu, mem_vaddr);
1426     }
1427 }
1428 
1429 static int probe_access_internal(CPUState *cpu, vaddr addr,
1430                                  int fault_size, MMUAccessType access_type,
1431                                  int mmu_idx, bool nonfault,
1432                                  void **phost, CPUTLBEntryFull **pfull,
1433                                  uintptr_t retaddr, bool check_mem_cbs)
1434 {
1435     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1436     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1437     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1438     vaddr page_addr = addr & TARGET_PAGE_MASK;
1439     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
1440     bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
1441     CPUTLBEntryFull *full;
1442 
1443     if (!tlb_hit_page(tlb_addr, page_addr)) {
1444         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
1445             if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
1446                                             mmu_idx, nonfault, retaddr)) {
1447                 /* Non-faulting page table read failed.  */
1448                 *phost = NULL;
1449                 *pfull = NULL;
1450                 return TLB_INVALID_MASK;
1451             }
1452 
1453             /* TLB resize via tlb_fill may have moved the entry.  */
1454             index = tlb_index(cpu, mmu_idx, addr);
1455             entry = tlb_entry(cpu, mmu_idx, addr);
1456 
1457             /*
1458              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1459              * to force the next access through tlb_fill.  We've just
1460              * called tlb_fill, so we know that this entry *is* valid.
1461              */
1462             flags &= ~TLB_INVALID_MASK;
1463         }
1464         tlb_addr = tlb_read_idx(entry, access_type);
1465     }
1466     flags &= tlb_addr;
1467 
1468     *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1469     flags |= full->slow_flags[access_type];
1470 
1471     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1472     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
1473         ||
1474         (access_type != MMU_INST_FETCH && force_mmio)) {
1475         *phost = NULL;
1476         return TLB_MMIO;
1477     }
1478 
1479     /* Everything else is RAM. */
1480     *phost = (void *)((uintptr_t)addr + entry->addend);
1481     return flags;
1482 }
1483 
1484 int probe_access_full(CPUArchState *env, vaddr addr, int size,
1485                       MMUAccessType access_type, int mmu_idx,
1486                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1487                       uintptr_t retaddr)
1488 {
1489     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1490                                       mmu_idx, nonfault, phost, pfull, retaddr,
1491                                       true);
1492 
1493     /* Handle clean RAM pages.  */
1494     if (unlikely(flags & TLB_NOTDIRTY)) {
1495         notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1496         flags &= ~TLB_NOTDIRTY;
1497     }
1498 
1499     return flags;
1500 }
1501 
1502 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
1503                           MMUAccessType access_type, int mmu_idx,
1504                           void **phost, CPUTLBEntryFull **pfull)
1505 {
1506     void *discard_phost;
1507     CPUTLBEntryFull *discard_tlb;
1508 
1509     /* privately handle users that don't need full results */
1510     phost = phost ? phost : &discard_phost;
1511     pfull = pfull ? pfull : &discard_tlb;
1512 
1513     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1514                                       mmu_idx, true, phost, pfull, 0, false);
1515 
1516     /* Handle clean RAM pages.  */
1517     if (unlikely(flags & TLB_NOTDIRTY)) {
1518         notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
1519         flags &= ~TLB_NOTDIRTY;
1520     }
1521 
1522     return flags;
1523 }
1524 
1525 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1526                        MMUAccessType access_type, int mmu_idx,
1527                        bool nonfault, void **phost, uintptr_t retaddr)
1528 {
1529     CPUTLBEntryFull *full;
1530     int flags;
1531 
1532     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1533 
1534     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1535                                   mmu_idx, nonfault, phost, &full, retaddr,
1536                                   true);
1537 
1538     /* Handle clean RAM pages. */
1539     if (unlikely(flags & TLB_NOTDIRTY)) {
1540         notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1541         flags &= ~TLB_NOTDIRTY;
1542     }
1543 
1544     return flags;
1545 }
1546 
1547 void *probe_access(CPUArchState *env, vaddr addr, int size,
1548                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1549 {
1550     CPUTLBEntryFull *full;
1551     void *host;
1552     int flags;
1553 
1554     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1555 
1556     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1557                                   mmu_idx, false, &host, &full, retaddr,
1558                                   true);
1559 
1560     /* Per the interface, size == 0 merely faults the access. */
1561     if (size == 0) {
1562         return NULL;
1563     }
1564 
1565     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1566         /* Handle watchpoints.  */
1567         if (flags & TLB_WATCHPOINT) {
1568             int wp_access = (access_type == MMU_DATA_STORE
1569                              ? BP_MEM_WRITE : BP_MEM_READ);
1570             cpu_check_watchpoint(env_cpu(env), addr, size,
1571                                  full->attrs, wp_access, retaddr);
1572         }
1573 
1574         /* Handle clean RAM pages.  */
1575         if (flags & TLB_NOTDIRTY) {
1576             notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1577         }
1578     }
1579 
1580     return host;
1581 }
1582 
1583 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1584                         MMUAccessType access_type, int mmu_idx)
1585 {
1586     CPUTLBEntryFull *full;
1587     void *host;
1588     int flags;
1589 
1590     flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
1591                                   mmu_idx, true, &host, &full, 0, false);
1592 
1593     /* No combination of flags are expected by the caller. */
1594     return flags ? NULL : host;
1595 }
1596 
1597 /*
1598  * Return a ram_addr_t for the virtual address for execution.
1599  *
1600  * Return -1 if we can't translate and execute from an entire page
1601  * of RAM.  This will force us to execute by loading and translating
1602  * one insn at a time, without caching.
1603  *
1604  * NOTE: This function will trigger an exception if the page is
1605  * not executable.
1606  */
1607 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
1608                                         void **hostp)
1609 {
1610     CPUTLBEntryFull *full;
1611     void *p;
1612 
1613     (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
1614                                 cpu_mmu_index(env, true), false,
1615                                 &p, &full, 0, false);
1616     if (p == NULL) {
1617         return -1;
1618     }
1619 
1620     if (full->lg_page_size < TARGET_PAGE_BITS) {
1621         return -1;
1622     }
1623 
1624     if (hostp) {
1625         *hostp = p;
1626     }
1627     return qemu_ram_addr_from_host_nofail(p);
1628 }
1629 
1630 /* Load/store with atomicity primitives. */
1631 #include "ldst_atomicity.c.inc"
1632 
1633 #ifdef CONFIG_PLUGIN
1634 /*
1635  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1636  * This should be a hot path as we will have just looked this path up
1637  * in the softmmu lookup code (or helper). We don't handle re-fills or
1638  * checking the victim table. This is purely informational.
1639  *
1640  * The one corner case is i/o write, which can cause changes to the
1641  * address space.  Those changes, and the corresponding tlb flush,
1642  * should be delayed until the next TB, so even then this ought not fail.
1643  * But check, Just in Case.
1644  */
1645 bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1646                        bool is_store, struct qemu_plugin_hwaddr *data)
1647 {
1648     CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
1649     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1650     MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
1651     uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
1652     CPUTLBEntryFull *full;
1653 
1654     if (unlikely(!tlb_hit(tlb_addr, addr))) {
1655         return false;
1656     }
1657 
1658     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1659     data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1660 
1661     /* We must have an iotlb entry for MMIO */
1662     if (tlb_addr & TLB_MMIO) {
1663         MemoryRegionSection *section =
1664             iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
1665                              full->attrs);
1666         data->is_io = true;
1667         data->mr = section->mr;
1668     } else {
1669         data->is_io = false;
1670         data->mr = NULL;
1671     }
1672     return true;
1673 }
1674 #endif
1675 
1676 /*
1677  * Probe for a load/store operation.
1678  * Return the host address and into @flags.
1679  */
1680 
1681 typedef struct MMULookupPageData {
1682     CPUTLBEntryFull *full;
1683     void *haddr;
1684     vaddr addr;
1685     int flags;
1686     int size;
1687 } MMULookupPageData;
1688 
1689 typedef struct MMULookupLocals {
1690     MMULookupPageData page[2];
1691     MemOp memop;
1692     int mmu_idx;
1693 } MMULookupLocals;
1694 
1695 /**
1696  * mmu_lookup1: translate one page
1697  * @cpu: generic cpu state
1698  * @data: lookup parameters
1699  * @mmu_idx: virtual address context
1700  * @access_type: load/store/code
1701  * @ra: return address into tcg generated code, or 0
1702  *
1703  * Resolve the translation for the one page at @data.addr, filling in
1704  * the rest of @data with the results.  If the translation fails,
1705  * tlb_fill will longjmp out.  Return true if the softmmu tlb for
1706  * @mmu_idx may have resized.
1707  */
1708 static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
1709                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
1710 {
1711     vaddr addr = data->addr;
1712     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1713     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1714     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1715     bool maybe_resized = false;
1716     CPUTLBEntryFull *full;
1717     int flags;
1718 
1719     /* If the TLB entry is for a different page, reload and try again.  */
1720     if (!tlb_hit(tlb_addr, addr)) {
1721         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
1722                             addr & TARGET_PAGE_MASK)) {
1723             tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
1724             maybe_resized = true;
1725             index = tlb_index(cpu, mmu_idx, addr);
1726             entry = tlb_entry(cpu, mmu_idx, addr);
1727         }
1728         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
1729     }
1730 
1731     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1732     flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
1733     flags |= full->slow_flags[access_type];
1734 
1735     data->full = full;
1736     data->flags = flags;
1737     /* Compute haddr speculatively; depending on flags it might be invalid. */
1738     data->haddr = (void *)((uintptr_t)addr + entry->addend);
1739 
1740     return maybe_resized;
1741 }
1742 
1743 /**
1744  * mmu_watch_or_dirty
1745  * @cpu: generic cpu state
1746  * @data: lookup parameters
1747  * @access_type: load/store/code
1748  * @ra: return address into tcg generated code, or 0
1749  *
1750  * Trigger watchpoints for @data.addr:@data.size;
1751  * record writes to protected clean pages.
1752  */
1753 static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
1754                                MMUAccessType access_type, uintptr_t ra)
1755 {
1756     CPUTLBEntryFull *full = data->full;
1757     vaddr addr = data->addr;
1758     int flags = data->flags;
1759     int size = data->size;
1760 
1761     /* On watchpoint hit, this will longjmp out.  */
1762     if (flags & TLB_WATCHPOINT) {
1763         int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1764         cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
1765         flags &= ~TLB_WATCHPOINT;
1766     }
1767 
1768     /* Note that notdirty is only set for writes. */
1769     if (flags & TLB_NOTDIRTY) {
1770         notdirty_write(cpu, addr, size, full, ra);
1771         flags &= ~TLB_NOTDIRTY;
1772     }
1773     data->flags = flags;
1774 }
1775 
1776 /**
1777  * mmu_lookup: translate page(s)
1778  * @cpu: generic cpu state
1779  * @addr: virtual address
1780  * @oi: combined mmu_idx and MemOp
1781  * @ra: return address into tcg generated code, or 0
1782  * @access_type: load/store/code
1783  * @l: output result
1784  *
1785  * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1786  * bytes.  Return true if the lookup crosses a page boundary.
1787  */
1788 static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1789                        uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
1790 {
1791     unsigned a_bits;
1792     bool crosspage;
1793     int flags;
1794 
1795     l->memop = get_memop(oi);
1796     l->mmu_idx = get_mmuidx(oi);
1797 
1798     tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
1799 
1800     /* Handle CPU specific unaligned behaviour */
1801     a_bits = get_alignment_bits(l->memop);
1802     if (addr & ((1 << a_bits) - 1)) {
1803         cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
1804     }
1805 
1806     l->page[0].addr = addr;
1807     l->page[0].size = memop_size(l->memop);
1808     l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
1809     l->page[1].size = 0;
1810     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
1811 
1812     if (likely(!crosspage)) {
1813         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1814 
1815         flags = l->page[0].flags;
1816         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1817             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1818         }
1819         if (unlikely(flags & TLB_BSWAP)) {
1820             l->memop ^= MO_BSWAP;
1821         }
1822     } else {
1823         /* Finish compute of page crossing. */
1824         int size0 = l->page[1].addr - addr;
1825         l->page[1].size = l->page[0].size - size0;
1826         l->page[0].size = size0;
1827 
1828         /*
1829          * Lookup both pages, recognizing exceptions from either.  If the
1830          * second lookup potentially resized, refresh first CPUTLBEntryFull.
1831          */
1832         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1833         if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
1834             uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
1835             l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
1836         }
1837 
1838         flags = l->page[0].flags | l->page[1].flags;
1839         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1840             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1841             mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
1842         }
1843 
1844         /*
1845          * Since target/sparc is the only user of TLB_BSWAP, and all
1846          * Sparc accesses are aligned, any treatment across two pages
1847          * would be arbitrary.  Refuse it until there's a use.
1848          */
1849         tcg_debug_assert((flags & TLB_BSWAP) == 0);
1850     }
1851 
1852     return crosspage;
1853 }
1854 
1855 /*
1856  * Probe for an atomic operation.  Do not allow unaligned operations,
1857  * or io operations to proceed.  Return the host address.
1858  */
1859 static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
1860                                int size, uintptr_t retaddr)
1861 {
1862     uintptr_t mmu_idx = get_mmuidx(oi);
1863     MemOp mop = get_memop(oi);
1864     int a_bits = get_alignment_bits(mop);
1865     uintptr_t index;
1866     CPUTLBEntry *tlbe;
1867     vaddr tlb_addr;
1868     void *hostaddr;
1869     CPUTLBEntryFull *full;
1870 
1871     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1872 
1873     /* Adjust the given return address.  */
1874     retaddr -= GETPC_ADJ;
1875 
1876     /* Enforce guest required alignment.  */
1877     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1878         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1879         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1880                              mmu_idx, retaddr);
1881     }
1882 
1883     /* Enforce qemu required alignment.  */
1884     if (unlikely(addr & (size - 1))) {
1885         /* We get here if guest alignment was not requested,
1886            or was not enforced by cpu_unaligned_access above.
1887            We might widen the access and emulate, but for now
1888            mark an exception and exit the cpu loop.  */
1889         goto stop_the_world;
1890     }
1891 
1892     index = tlb_index(env_cpu(env), mmu_idx, addr);
1893     tlbe = tlb_entry(env_cpu(env), mmu_idx, addr);
1894 
1895     /* Check TLB entry and enforce page permissions.  */
1896     tlb_addr = tlb_addr_write(tlbe);
1897     if (!tlb_hit(tlb_addr, addr)) {
1898         if (!victim_tlb_hit(env_cpu(env), mmu_idx, index, MMU_DATA_STORE,
1899                             addr & TARGET_PAGE_MASK)) {
1900             tlb_fill(env_cpu(env), addr, size,
1901                      MMU_DATA_STORE, mmu_idx, retaddr);
1902             index = tlb_index(env_cpu(env), mmu_idx, addr);
1903             tlbe = tlb_entry(env_cpu(env), mmu_idx, addr);
1904         }
1905         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1906     }
1907 
1908     /*
1909      * Let the guest notice RMW on a write-only page.
1910      * We have just verified that the page is writable.
1911      * Subpage lookups may have left TLB_INVALID_MASK set,
1912      * but addr_read will only be -1 if PAGE_READ was unset.
1913      */
1914     if (unlikely(tlbe->addr_read == -1)) {
1915         tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
1916         /*
1917          * Since we don't support reads and writes to different
1918          * addresses, and we do have the proper page loaded for
1919          * write, this shouldn't ever return.  But just in case,
1920          * handle via stop-the-world.
1921          */
1922         goto stop_the_world;
1923     }
1924     /* Collect tlb flags for read. */
1925     tlb_addr |= tlbe->addr_read;
1926 
1927     /* Notice an IO access or a needs-MMU-lookup access */
1928     if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
1929         /* There's really nothing that can be done to
1930            support this apart from stop-the-world.  */
1931         goto stop_the_world;
1932     }
1933 
1934     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1935     full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1936 
1937     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1938         notdirty_write(env_cpu(env), addr, size, full, retaddr);
1939     }
1940 
1941     if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
1942         int wp_flags = 0;
1943 
1944         if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
1945             wp_flags |= BP_MEM_WRITE;
1946         }
1947         if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
1948             wp_flags |= BP_MEM_READ;
1949         }
1950         if (wp_flags) {
1951             cpu_check_watchpoint(env_cpu(env), addr, size,
1952                                  full->attrs, wp_flags, retaddr);
1953         }
1954     }
1955 
1956     return hostaddr;
1957 
1958  stop_the_world:
1959     cpu_loop_exit_atomic(env_cpu(env), retaddr);
1960 }
1961 
1962 /*
1963  * Load Helpers
1964  *
1965  * We support two different access types. SOFTMMU_CODE_ACCESS is
1966  * specifically for reading instructions from system memory. It is
1967  * called by the translation loop and in some helpers where the code
1968  * is disassembled. It shouldn't be called directly by guest code.
1969  *
1970  * For the benefit of TCG generated code, we want to avoid the
1971  * complication of ABI-specific return type promotion and always
1972  * return a value extended to the register size of the host. This is
1973  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1974  * data, and for that we always have uint64_t.
1975  *
1976  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1977  */
1978 
1979 /**
1980  * do_ld_mmio_beN:
1981  * @cpu: generic cpu state
1982  * @full: page parameters
1983  * @ret_be: accumulated data
1984  * @addr: virtual address
1985  * @size: number of bytes
1986  * @mmu_idx: virtual address context
1987  * @ra: return address into tcg generated code, or 0
1988  * Context: iothread lock held
1989  *
1990  * Load @size bytes from @addr, which is memory-mapped i/o.
1991  * The bytes are concatenated in big-endian order with @ret_be.
1992  */
1993 static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
1994                                 uint64_t ret_be, vaddr addr, int size,
1995                                 int mmu_idx, MMUAccessType type, uintptr_t ra,
1996                                 MemoryRegion *mr, hwaddr mr_offset)
1997 {
1998     do {
1999         MemOp this_mop;
2000         unsigned this_size;
2001         uint64_t val;
2002         MemTxResult r;
2003 
2004         /* Read aligned pieces up to 8 bytes. */
2005         this_mop = ctz32(size | (int)addr | 8);
2006         this_size = 1 << this_mop;
2007         this_mop |= MO_BE;
2008 
2009         r = memory_region_dispatch_read(mr, mr_offset, &val,
2010                                         this_mop, full->attrs);
2011         if (unlikely(r != MEMTX_OK)) {
2012             io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
2013         }
2014         if (this_size == 8) {
2015             return val;
2016         }
2017 
2018         ret_be = (ret_be << (this_size * 8)) | val;
2019         addr += this_size;
2020         mr_offset += this_size;
2021         size -= this_size;
2022     } while (size);
2023 
2024     return ret_be;
2025 }
2026 
2027 static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2028                                uint64_t ret_be, vaddr addr, int size,
2029                                int mmu_idx, MMUAccessType type, uintptr_t ra)
2030 {
2031     MemoryRegionSection *section;
2032     MemoryRegion *mr;
2033     hwaddr mr_offset;
2034     MemTxAttrs attrs;
2035     uint64_t ret;
2036 
2037     tcg_debug_assert(size > 0 && size <= 8);
2038 
2039     attrs = full->attrs;
2040     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2041     mr = section->mr;
2042 
2043     qemu_mutex_lock_iothread();
2044     ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
2045                           type, ra, mr, mr_offset);
2046     qemu_mutex_unlock_iothread();
2047 
2048     return ret;
2049 }
2050 
2051 static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2052                                uint64_t ret_be, vaddr addr, int size,
2053                                int mmu_idx, uintptr_t ra)
2054 {
2055     MemoryRegionSection *section;
2056     MemoryRegion *mr;
2057     hwaddr mr_offset;
2058     MemTxAttrs attrs;
2059     uint64_t a, b;
2060 
2061     tcg_debug_assert(size > 8 && size <= 16);
2062 
2063     attrs = full->attrs;
2064     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2065     mr = section->mr;
2066 
2067     qemu_mutex_lock_iothread();
2068     a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
2069                         MMU_DATA_LOAD, ra, mr, mr_offset);
2070     b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
2071                         MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
2072     qemu_mutex_unlock_iothread();
2073 
2074     return int128_make128(b, a);
2075 }
2076 
2077 /**
2078  * do_ld_bytes_beN
2079  * @p: translation parameters
2080  * @ret_be: accumulated data
2081  *
2082  * Load @p->size bytes from @p->haddr, which is RAM.
2083  * The bytes to concatenated in big-endian order with @ret_be.
2084  */
2085 static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
2086 {
2087     uint8_t *haddr = p->haddr;
2088     int i, size = p->size;
2089 
2090     for (i = 0; i < size; i++) {
2091         ret_be = (ret_be << 8) | haddr[i];
2092     }
2093     return ret_be;
2094 }
2095 
2096 /**
2097  * do_ld_parts_beN
2098  * @p: translation parameters
2099  * @ret_be: accumulated data
2100  *
2101  * As do_ld_bytes_beN, but atomically on each aligned part.
2102  */
2103 static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2104 {
2105     void *haddr = p->haddr;
2106     int size = p->size;
2107 
2108     do {
2109         uint64_t x;
2110         int n;
2111 
2112         /*
2113          * Find minimum of alignment and size.
2114          * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2115          * would have only checked the low bits of addr|size once at the start,
2116          * but is just as easy.
2117          */
2118         switch (((uintptr_t)haddr | size) & 7) {
2119         case 4:
2120             x = cpu_to_be32(load_atomic4(haddr));
2121             ret_be = (ret_be << 32) | x;
2122             n = 4;
2123             break;
2124         case 2:
2125         case 6:
2126             x = cpu_to_be16(load_atomic2(haddr));
2127             ret_be = (ret_be << 16) | x;
2128             n = 2;
2129             break;
2130         default:
2131             x = *(uint8_t *)haddr;
2132             ret_be = (ret_be << 8) | x;
2133             n = 1;
2134             break;
2135         case 0:
2136             g_assert_not_reached();
2137         }
2138         haddr += n;
2139         size -= n;
2140     } while (size != 0);
2141     return ret_be;
2142 }
2143 
2144 /**
2145  * do_ld_parts_be4
2146  * @p: translation parameters
2147  * @ret_be: accumulated data
2148  *
2149  * As do_ld_bytes_beN, but with one atomic load.
2150  * Four aligned bytes are guaranteed to cover the load.
2151  */
2152 static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2153 {
2154     int o = p->addr & 3;
2155     uint32_t x = load_atomic4(p->haddr - o);
2156 
2157     x = cpu_to_be32(x);
2158     x <<= o * 8;
2159     x >>= (4 - p->size) * 8;
2160     return (ret_be << (p->size * 8)) | x;
2161 }
2162 
2163 /**
2164  * do_ld_parts_be8
2165  * @p: translation parameters
2166  * @ret_be: accumulated data
2167  *
2168  * As do_ld_bytes_beN, but with one atomic load.
2169  * Eight aligned bytes are guaranteed to cover the load.
2170  */
2171 static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
2172                                 MMULookupPageData *p, uint64_t ret_be)
2173 {
2174     int o = p->addr & 7;
2175     uint64_t x = load_atomic8_or_exit(cpu_env(cpu), ra, p->haddr - o);
2176 
2177     x = cpu_to_be64(x);
2178     x <<= o * 8;
2179     x >>= (8 - p->size) * 8;
2180     return (ret_be << (p->size * 8)) | x;
2181 }
2182 
2183 /**
2184  * do_ld_parts_be16
2185  * @p: translation parameters
2186  * @ret_be: accumulated data
2187  *
2188  * As do_ld_bytes_beN, but with one atomic load.
2189  * 16 aligned bytes are guaranteed to cover the load.
2190  */
2191 static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
2192                                MMULookupPageData *p, uint64_t ret_be)
2193 {
2194     int o = p->addr & 15;
2195     Int128 x, y = load_atomic16_or_exit(cpu_env(cpu), ra, p->haddr - o);
2196     int size = p->size;
2197 
2198     if (!HOST_BIG_ENDIAN) {
2199         y = bswap128(y);
2200     }
2201     y = int128_lshift(y, o * 8);
2202     y = int128_urshift(y, (16 - size) * 8);
2203     x = int128_make64(ret_be);
2204     x = int128_lshift(x, size * 8);
2205     return int128_or(x, y);
2206 }
2207 
2208 /*
2209  * Wrapper for the above.
2210  */
2211 static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
2212                           uint64_t ret_be, int mmu_idx, MMUAccessType type,
2213                           MemOp mop, uintptr_t ra)
2214 {
2215     MemOp atom;
2216     unsigned tmp, half_size;
2217 
2218     if (unlikely(p->flags & TLB_MMIO)) {
2219         return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
2220                               mmu_idx, type, ra);
2221     }
2222 
2223     /*
2224      * It is a given that we cross a page and therefore there is no
2225      * atomicity for the load as a whole, but subobjects may need attention.
2226      */
2227     atom = mop & MO_ATOM_MASK;
2228     switch (atom) {
2229     case MO_ATOM_SUBALIGN:
2230         return do_ld_parts_beN(p, ret_be);
2231 
2232     case MO_ATOM_IFALIGN_PAIR:
2233     case MO_ATOM_WITHIN16_PAIR:
2234         tmp = mop & MO_SIZE;
2235         tmp = tmp ? tmp - 1 : 0;
2236         half_size = 1 << tmp;
2237         if (atom == MO_ATOM_IFALIGN_PAIR
2238             ? p->size == half_size
2239             : p->size >= half_size) {
2240             if (!HAVE_al8_fast && p->size < 4) {
2241                 return do_ld_whole_be4(p, ret_be);
2242             } else {
2243                 return do_ld_whole_be8(cpu, ra, p, ret_be);
2244             }
2245         }
2246         /* fall through */
2247 
2248     case MO_ATOM_IFALIGN:
2249     case MO_ATOM_WITHIN16:
2250     case MO_ATOM_NONE:
2251         return do_ld_bytes_beN(p, ret_be);
2252 
2253     default:
2254         g_assert_not_reached();
2255     }
2256 }
2257 
2258 /*
2259  * Wrapper for the above, for 8 < size < 16.
2260  */
2261 static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
2262                           uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
2263 {
2264     int size = p->size;
2265     uint64_t b;
2266     MemOp atom;
2267 
2268     if (unlikely(p->flags & TLB_MMIO)) {
2269         return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
2270     }
2271 
2272     /*
2273      * It is a given that we cross a page and therefore there is no
2274      * atomicity for the load as a whole, but subobjects may need attention.
2275      */
2276     atom = mop & MO_ATOM_MASK;
2277     switch (atom) {
2278     case MO_ATOM_SUBALIGN:
2279         p->size = size - 8;
2280         a = do_ld_parts_beN(p, a);
2281         p->haddr += size - 8;
2282         p->size = 8;
2283         b = do_ld_parts_beN(p, 0);
2284         break;
2285 
2286     case MO_ATOM_WITHIN16_PAIR:
2287         /* Since size > 8, this is the half that must be atomic. */
2288         return do_ld_whole_be16(cpu, ra, p, a);
2289 
2290     case MO_ATOM_IFALIGN_PAIR:
2291         /*
2292          * Since size > 8, both halves are misaligned,
2293          * and so neither is atomic.
2294          */
2295     case MO_ATOM_IFALIGN:
2296     case MO_ATOM_WITHIN16:
2297     case MO_ATOM_NONE:
2298         p->size = size - 8;
2299         a = do_ld_bytes_beN(p, a);
2300         b = ldq_be_p(p->haddr + size - 8);
2301         break;
2302 
2303     default:
2304         g_assert_not_reached();
2305     }
2306 
2307     return int128_make128(b, a);
2308 }
2309 
2310 static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2311                        MMUAccessType type, uintptr_t ra)
2312 {
2313     if (unlikely(p->flags & TLB_MMIO)) {
2314         return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
2315     } else {
2316         return *(uint8_t *)p->haddr;
2317     }
2318 }
2319 
2320 static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2321                         MMUAccessType type, MemOp memop, uintptr_t ra)
2322 {
2323     uint16_t ret;
2324 
2325     if (unlikely(p->flags & TLB_MMIO)) {
2326         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
2327         if ((memop & MO_BSWAP) == MO_LE) {
2328             ret = bswap16(ret);
2329         }
2330     } else {
2331         /* Perform the load host endian, then swap if necessary. */
2332         ret = load_atom_2(cpu_env(cpu), ra, p->haddr, memop);
2333         if (memop & MO_BSWAP) {
2334             ret = bswap16(ret);
2335         }
2336     }
2337     return ret;
2338 }
2339 
2340 static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2341                         MMUAccessType type, MemOp memop, uintptr_t ra)
2342 {
2343     uint32_t ret;
2344 
2345     if (unlikely(p->flags & TLB_MMIO)) {
2346         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
2347         if ((memop & MO_BSWAP) == MO_LE) {
2348             ret = bswap32(ret);
2349         }
2350     } else {
2351         /* Perform the load host endian. */
2352         ret = load_atom_4(cpu_env(cpu), ra, p->haddr, memop);
2353         if (memop & MO_BSWAP) {
2354             ret = bswap32(ret);
2355         }
2356     }
2357     return ret;
2358 }
2359 
2360 static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2361                         MMUAccessType type, MemOp memop, uintptr_t ra)
2362 {
2363     uint64_t ret;
2364 
2365     if (unlikely(p->flags & TLB_MMIO)) {
2366         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
2367         if ((memop & MO_BSWAP) == MO_LE) {
2368             ret = bswap64(ret);
2369         }
2370     } else {
2371         /* Perform the load host endian. */
2372         ret = load_atom_8(cpu_env(cpu), ra, p->haddr, memop);
2373         if (memop & MO_BSWAP) {
2374             ret = bswap64(ret);
2375         }
2376     }
2377     return ret;
2378 }
2379 
2380 static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2381                           uintptr_t ra, MMUAccessType access_type)
2382 {
2383     MMULookupLocals l;
2384     bool crosspage;
2385 
2386     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2387     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2388     tcg_debug_assert(!crosspage);
2389 
2390     return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2391 }
2392 
2393 tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
2394                                  MemOpIdx oi, uintptr_t retaddr)
2395 {
2396     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
2397     return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
2398 }
2399 
2400 static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2401                            uintptr_t ra, MMUAccessType access_type)
2402 {
2403     MMULookupLocals l;
2404     bool crosspage;
2405     uint16_t ret;
2406     uint8_t a, b;
2407 
2408     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2409     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2410     if (likely(!crosspage)) {
2411         return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2412     }
2413 
2414     a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2415     b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
2416 
2417     if ((l.memop & MO_BSWAP) == MO_LE) {
2418         ret = a | (b << 8);
2419     } else {
2420         ret = b | (a << 8);
2421     }
2422     return ret;
2423 }
2424 
2425 tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
2426                                  MemOpIdx oi, uintptr_t retaddr)
2427 {
2428     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
2429     return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
2430 }
2431 
2432 static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2433                            uintptr_t ra, MMUAccessType access_type)
2434 {
2435     MMULookupLocals l;
2436     bool crosspage;
2437     uint32_t ret;
2438 
2439     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2440     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2441     if (likely(!crosspage)) {
2442         return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2443     }
2444 
2445     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2446     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2447     if ((l.memop & MO_BSWAP) == MO_LE) {
2448         ret = bswap32(ret);
2449     }
2450     return ret;
2451 }
2452 
2453 tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
2454                                  MemOpIdx oi, uintptr_t retaddr)
2455 {
2456     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
2457     return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
2458 }
2459 
2460 static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2461                            uintptr_t ra, MMUAccessType access_type)
2462 {
2463     MMULookupLocals l;
2464     bool crosspage;
2465     uint64_t ret;
2466 
2467     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2468     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2469     if (likely(!crosspage)) {
2470         return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2471     }
2472 
2473     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2474     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2475     if ((l.memop & MO_BSWAP) == MO_LE) {
2476         ret = bswap64(ret);
2477     }
2478     return ret;
2479 }
2480 
2481 uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
2482                         MemOpIdx oi, uintptr_t retaddr)
2483 {
2484     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
2485     return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
2486 }
2487 
2488 /*
2489  * Provide signed versions of the load routines as well.  We can of course
2490  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2491  */
2492 
2493 tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
2494                                  MemOpIdx oi, uintptr_t retaddr)
2495 {
2496     return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
2497 }
2498 
2499 tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
2500                                  MemOpIdx oi, uintptr_t retaddr)
2501 {
2502     return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
2503 }
2504 
2505 tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
2506                                  MemOpIdx oi, uintptr_t retaddr)
2507 {
2508     return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
2509 }
2510 
2511 static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
2512                           MemOpIdx oi, uintptr_t ra)
2513 {
2514     MMULookupLocals l;
2515     bool crosspage;
2516     uint64_t a, b;
2517     Int128 ret;
2518     int first;
2519 
2520     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2521     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
2522     if (likely(!crosspage)) {
2523         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2524             ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
2525                                    l.mmu_idx, ra);
2526             if ((l.memop & MO_BSWAP) == MO_LE) {
2527                 ret = bswap128(ret);
2528             }
2529         } else {
2530             /* Perform the load host endian. */
2531             ret = load_atom_16(cpu_env(cpu), ra, l.page[0].haddr, l.memop);
2532             if (l.memop & MO_BSWAP) {
2533                 ret = bswap128(ret);
2534             }
2535         }
2536         return ret;
2537     }
2538 
2539     first = l.page[0].size;
2540     if (first == 8) {
2541         MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
2542 
2543         a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2544         b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2545         if ((mop8 & MO_BSWAP) == MO_LE) {
2546             ret = int128_make128(a, b);
2547         } else {
2548             ret = int128_make128(b, a);
2549         }
2550         return ret;
2551     }
2552 
2553     if (first < 8) {
2554         a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
2555                       MMU_DATA_LOAD, l.memop, ra);
2556         ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
2557     } else {
2558         ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
2559         b = int128_getlo(ret);
2560         ret = int128_lshift(ret, l.page[1].size * 8);
2561         a = int128_gethi(ret);
2562         b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
2563                       MMU_DATA_LOAD, l.memop, ra);
2564         ret = int128_make128(b, a);
2565     }
2566     if ((l.memop & MO_BSWAP) == MO_LE) {
2567         ret = bswap128(ret);
2568     }
2569     return ret;
2570 }
2571 
2572 Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
2573                        uint32_t oi, uintptr_t retaddr)
2574 {
2575     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
2576     return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
2577 }
2578 
2579 Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
2580 {
2581     return helper_ld16_mmu(env, addr, oi, GETPC());
2582 }
2583 
2584 /*
2585  * Load helpers for cpu_ldst.h.
2586  */
2587 
2588 static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
2589 {
2590     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2591 }
2592 
2593 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2594 {
2595     uint8_t ret;
2596 
2597     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
2598     ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
2599     plugin_load_cb(env, addr, oi);
2600     return ret;
2601 }
2602 
2603 uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
2604                      MemOpIdx oi, uintptr_t ra)
2605 {
2606     uint16_t ret;
2607 
2608     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
2609     ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
2610     plugin_load_cb(env, addr, oi);
2611     return ret;
2612 }
2613 
2614 uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
2615                      MemOpIdx oi, uintptr_t ra)
2616 {
2617     uint32_t ret;
2618 
2619     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
2620     ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
2621     plugin_load_cb(env, addr, oi);
2622     return ret;
2623 }
2624 
2625 uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
2626                      MemOpIdx oi, uintptr_t ra)
2627 {
2628     uint64_t ret;
2629 
2630     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
2631     ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
2632     plugin_load_cb(env, addr, oi);
2633     return ret;
2634 }
2635 
2636 Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
2637                     MemOpIdx oi, uintptr_t ra)
2638 {
2639     Int128 ret;
2640 
2641     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
2642     ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
2643     plugin_load_cb(env, addr, oi);
2644     return ret;
2645 }
2646 
2647 /*
2648  * Store Helpers
2649  */
2650 
2651 /**
2652  * do_st_mmio_leN:
2653  * @cpu: generic cpu state
2654  * @full: page parameters
2655  * @val_le: data to store
2656  * @addr: virtual address
2657  * @size: number of bytes
2658  * @mmu_idx: virtual address context
2659  * @ra: return address into tcg generated code, or 0
2660  * Context: iothread lock held
2661  *
2662  * Store @size bytes at @addr, which is memory-mapped i/o.
2663  * The bytes to store are extracted in little-endian order from @val_le;
2664  * return the bytes of @val_le beyond @p->size that have not been stored.
2665  */
2666 static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2667                                 uint64_t val_le, vaddr addr, int size,
2668                                 int mmu_idx, uintptr_t ra,
2669                                 MemoryRegion *mr, hwaddr mr_offset)
2670 {
2671     do {
2672         MemOp this_mop;
2673         unsigned this_size;
2674         MemTxResult r;
2675 
2676         /* Store aligned pieces up to 8 bytes. */
2677         this_mop = ctz32(size | (int)addr | 8);
2678         this_size = 1 << this_mop;
2679         this_mop |= MO_LE;
2680 
2681         r = memory_region_dispatch_write(mr, mr_offset, val_le,
2682                                          this_mop, full->attrs);
2683         if (unlikely(r != MEMTX_OK)) {
2684             io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
2685                       mmu_idx, r, ra);
2686         }
2687         if (this_size == 8) {
2688             return 0;
2689         }
2690 
2691         val_le >>= this_size * 8;
2692         addr += this_size;
2693         mr_offset += this_size;
2694         size -= this_size;
2695     } while (size);
2696 
2697     return val_le;
2698 }
2699 
2700 static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2701                                uint64_t val_le, vaddr addr, int size,
2702                                int mmu_idx, uintptr_t ra)
2703 {
2704     MemoryRegionSection *section;
2705     hwaddr mr_offset;
2706     MemoryRegion *mr;
2707     MemTxAttrs attrs;
2708     uint64_t ret;
2709 
2710     tcg_debug_assert(size > 0 && size <= 8);
2711 
2712     attrs = full->attrs;
2713     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2714     mr = section->mr;
2715 
2716     qemu_mutex_lock_iothread();
2717     ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
2718                           ra, mr, mr_offset);
2719     qemu_mutex_unlock_iothread();
2720 
2721     return ret;
2722 }
2723 
2724 static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2725                                  Int128 val_le, vaddr addr, int size,
2726                                  int mmu_idx, uintptr_t ra)
2727 {
2728     MemoryRegionSection *section;
2729     MemoryRegion *mr;
2730     hwaddr mr_offset;
2731     MemTxAttrs attrs;
2732     uint64_t ret;
2733 
2734     tcg_debug_assert(size > 8 && size <= 16);
2735 
2736     attrs = full->attrs;
2737     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2738     mr = section->mr;
2739 
2740     qemu_mutex_lock_iothread();
2741     int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
2742                     mmu_idx, ra, mr, mr_offset);
2743     ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
2744                           size - 8, mmu_idx, ra, mr, mr_offset + 8);
2745     qemu_mutex_unlock_iothread();
2746 
2747     return ret;
2748 }
2749 
2750 /*
2751  * Wrapper for the above.
2752  */
2753 static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
2754                           uint64_t val_le, int mmu_idx,
2755                           MemOp mop, uintptr_t ra)
2756 {
2757     MemOp atom;
2758     unsigned tmp, half_size;
2759 
2760     if (unlikely(p->flags & TLB_MMIO)) {
2761         return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
2762                               p->size, mmu_idx, ra);
2763     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2764         return val_le >> (p->size * 8);
2765     }
2766 
2767     /*
2768      * It is a given that we cross a page and therefore there is no atomicity
2769      * for the store as a whole, but subobjects may need attention.
2770      */
2771     atom = mop & MO_ATOM_MASK;
2772     switch (atom) {
2773     case MO_ATOM_SUBALIGN:
2774         return store_parts_leN(p->haddr, p->size, val_le);
2775 
2776     case MO_ATOM_IFALIGN_PAIR:
2777     case MO_ATOM_WITHIN16_PAIR:
2778         tmp = mop & MO_SIZE;
2779         tmp = tmp ? tmp - 1 : 0;
2780         half_size = 1 << tmp;
2781         if (atom == MO_ATOM_IFALIGN_PAIR
2782             ? p->size == half_size
2783             : p->size >= half_size) {
2784             if (!HAVE_al8_fast && p->size <= 4) {
2785                 return store_whole_le4(p->haddr, p->size, val_le);
2786             } else if (HAVE_al8) {
2787                 return store_whole_le8(p->haddr, p->size, val_le);
2788             } else {
2789                 cpu_loop_exit_atomic(cpu, ra);
2790             }
2791         }
2792         /* fall through */
2793 
2794     case MO_ATOM_IFALIGN:
2795     case MO_ATOM_WITHIN16:
2796     case MO_ATOM_NONE:
2797         return store_bytes_leN(p->haddr, p->size, val_le);
2798 
2799     default:
2800         g_assert_not_reached();
2801     }
2802 }
2803 
2804 /*
2805  * Wrapper for the above, for 8 < size < 16.
2806  */
2807 static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
2808                             Int128 val_le, int mmu_idx,
2809                             MemOp mop, uintptr_t ra)
2810 {
2811     int size = p->size;
2812     MemOp atom;
2813 
2814     if (unlikely(p->flags & TLB_MMIO)) {
2815         return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
2816                                 size, mmu_idx, ra);
2817     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2818         return int128_gethi(val_le) >> ((size - 8) * 8);
2819     }
2820 
2821     /*
2822      * It is a given that we cross a page and therefore there is no atomicity
2823      * for the store as a whole, but subobjects may need attention.
2824      */
2825     atom = mop & MO_ATOM_MASK;
2826     switch (atom) {
2827     case MO_ATOM_SUBALIGN:
2828         store_parts_leN(p->haddr, 8, int128_getlo(val_le));
2829         return store_parts_leN(p->haddr + 8, p->size - 8,
2830                                int128_gethi(val_le));
2831 
2832     case MO_ATOM_WITHIN16_PAIR:
2833         /* Since size > 8, this is the half that must be atomic. */
2834         if (!HAVE_ATOMIC128_RW) {
2835             cpu_loop_exit_atomic(cpu, ra);
2836         }
2837         return store_whole_le16(p->haddr, p->size, val_le);
2838 
2839     case MO_ATOM_IFALIGN_PAIR:
2840         /*
2841          * Since size > 8, both halves are misaligned,
2842          * and so neither is atomic.
2843          */
2844     case MO_ATOM_IFALIGN:
2845     case MO_ATOM_WITHIN16:
2846     case MO_ATOM_NONE:
2847         stq_le_p(p->haddr, int128_getlo(val_le));
2848         return store_bytes_leN(p->haddr + 8, p->size - 8,
2849                                int128_gethi(val_le));
2850 
2851     default:
2852         g_assert_not_reached();
2853     }
2854 }
2855 
2856 static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
2857                     int mmu_idx, uintptr_t ra)
2858 {
2859     if (unlikely(p->flags & TLB_MMIO)) {
2860         do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
2861     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2862         /* nothing */
2863     } else {
2864         *(uint8_t *)p->haddr = val;
2865     }
2866 }
2867 
2868 static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
2869                     int mmu_idx, MemOp memop, uintptr_t ra)
2870 {
2871     if (unlikely(p->flags & TLB_MMIO)) {
2872         if ((memop & MO_BSWAP) != MO_LE) {
2873             val = bswap16(val);
2874         }
2875         do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
2876     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2877         /* nothing */
2878     } else {
2879         /* Swap to host endian if necessary, then store. */
2880         if (memop & MO_BSWAP) {
2881             val = bswap16(val);
2882         }
2883         store_atom_2(cpu_env(cpu), ra, p->haddr, memop, val);
2884     }
2885 }
2886 
2887 static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
2888                     int mmu_idx, MemOp memop, uintptr_t ra)
2889 {
2890     if (unlikely(p->flags & TLB_MMIO)) {
2891         if ((memop & MO_BSWAP) != MO_LE) {
2892             val = bswap32(val);
2893         }
2894         do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
2895     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2896         /* nothing */
2897     } else {
2898         /* Swap to host endian if necessary, then store. */
2899         if (memop & MO_BSWAP) {
2900             val = bswap32(val);
2901         }
2902         store_atom_4(cpu_env(cpu), ra, p->haddr, memop, val);
2903     }
2904 }
2905 
2906 static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
2907                     int mmu_idx, MemOp memop, uintptr_t ra)
2908 {
2909     if (unlikely(p->flags & TLB_MMIO)) {
2910         if ((memop & MO_BSWAP) != MO_LE) {
2911             val = bswap64(val);
2912         }
2913         do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
2914     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2915         /* nothing */
2916     } else {
2917         /* Swap to host endian if necessary, then store. */
2918         if (memop & MO_BSWAP) {
2919             val = bswap64(val);
2920         }
2921         store_atom_8(cpu_env(cpu), ra, p->haddr, memop, val);
2922     }
2923 }
2924 
2925 void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
2926                     MemOpIdx oi, uintptr_t ra)
2927 {
2928     MMULookupLocals l;
2929     bool crosspage;
2930 
2931     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
2932     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2933     crosspage = mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_STORE, &l);
2934     tcg_debug_assert(!crosspage);
2935 
2936     do_st_1(env_cpu(env), &l.page[0], val, l.mmu_idx, ra);
2937 }
2938 
2939 static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
2940                        MemOpIdx oi, uintptr_t ra)
2941 {
2942     MMULookupLocals l;
2943     bool crosspage;
2944     uint8_t a, b;
2945 
2946     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2947     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2948     if (likely(!crosspage)) {
2949         do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2950         return;
2951     }
2952 
2953     if ((l.memop & MO_BSWAP) == MO_LE) {
2954         a = val, b = val >> 8;
2955     } else {
2956         b = val, a = val >> 8;
2957     }
2958     do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
2959     do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
2960 }
2961 
2962 void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
2963                     MemOpIdx oi, uintptr_t retaddr)
2964 {
2965     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
2966     do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
2967 }
2968 
2969 static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
2970                        MemOpIdx oi, uintptr_t ra)
2971 {
2972     MMULookupLocals l;
2973     bool crosspage;
2974 
2975     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2976     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2977     if (likely(!crosspage)) {
2978         do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2979         return;
2980     }
2981 
2982     /* Swap to little endian for simplicity, then store by bytes. */
2983     if ((l.memop & MO_BSWAP) != MO_LE) {
2984         val = bswap32(val);
2985     }
2986     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2987     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2988 }
2989 
2990 void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
2991                     MemOpIdx oi, uintptr_t retaddr)
2992 {
2993     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
2994     do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
2995 }
2996 
2997 static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
2998                        MemOpIdx oi, uintptr_t ra)
2999 {
3000     MMULookupLocals l;
3001     bool crosspage;
3002 
3003     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
3004     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
3005     if (likely(!crosspage)) {
3006         do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
3007         return;
3008     }
3009 
3010     /* Swap to little endian for simplicity, then store by bytes. */
3011     if ((l.memop & MO_BSWAP) != MO_LE) {
3012         val = bswap64(val);
3013     }
3014     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
3015     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
3016 }
3017 
3018 void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
3019                     MemOpIdx oi, uintptr_t retaddr)
3020 {
3021     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
3022     do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
3023 }
3024 
3025 static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
3026                         MemOpIdx oi, uintptr_t ra)
3027 {
3028     MMULookupLocals l;
3029     bool crosspage;
3030     uint64_t a, b;
3031     int first;
3032 
3033     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
3034     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
3035     if (likely(!crosspage)) {
3036         if (unlikely(l.page[0].flags & TLB_MMIO)) {
3037             if ((l.memop & MO_BSWAP) != MO_LE) {
3038                 val = bswap128(val);
3039             }
3040             do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
3041         } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
3042             /* nothing */
3043         } else {
3044             /* Swap to host endian if necessary, then store. */
3045             if (l.memop & MO_BSWAP) {
3046                 val = bswap128(val);
3047             }
3048             store_atom_16(cpu_env(cpu), ra, l.page[0].haddr, l.memop, val);
3049         }
3050         return;
3051     }
3052 
3053     first = l.page[0].size;
3054     if (first == 8) {
3055         MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
3056 
3057         if (l.memop & MO_BSWAP) {
3058             val = bswap128(val);
3059         }
3060         if (HOST_BIG_ENDIAN) {
3061             b = int128_getlo(val), a = int128_gethi(val);
3062         } else {
3063             a = int128_getlo(val), b = int128_gethi(val);
3064         }
3065         do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
3066         do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
3067         return;
3068     }
3069 
3070     if ((l.memop & MO_BSWAP) != MO_LE) {
3071         val = bswap128(val);
3072     }
3073     if (first < 8) {
3074         do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
3075         val = int128_urshift(val, first * 8);
3076         do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
3077     } else {
3078         b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
3079         do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
3080     }
3081 }
3082 
3083 void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
3084                      MemOpIdx oi, uintptr_t retaddr)
3085 {
3086     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
3087     do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
3088 }
3089 
3090 void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
3091 {
3092     helper_st16_mmu(env, addr, val, oi, GETPC());
3093 }
3094 
3095 /*
3096  * Store Helpers for cpu_ldst.h
3097  */
3098 
3099 static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
3100 {
3101     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
3102 }
3103 
3104 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
3105                  MemOpIdx oi, uintptr_t retaddr)
3106 {
3107     helper_stb_mmu(env, addr, val, oi, retaddr);
3108     plugin_store_cb(env, addr, oi);
3109 }
3110 
3111 void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
3112                  MemOpIdx oi, uintptr_t retaddr)
3113 {
3114     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
3115     do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
3116     plugin_store_cb(env, addr, oi);
3117 }
3118 
3119 void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
3120                     MemOpIdx oi, uintptr_t retaddr)
3121 {
3122     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
3123     do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
3124     plugin_store_cb(env, addr, oi);
3125 }
3126 
3127 void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
3128                  MemOpIdx oi, uintptr_t retaddr)
3129 {
3130     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
3131     do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
3132     plugin_store_cb(env, addr, oi);
3133 }
3134 
3135 void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
3136                   MemOpIdx oi, uintptr_t retaddr)
3137 {
3138     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
3139     do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
3140     plugin_store_cb(env, addr, oi);
3141 }
3142 
3143 #include "ldst_common.c.inc"
3144 
3145 /*
3146  * First set of functions passes in OI and RETADDR.
3147  * This makes them callable from other helpers.
3148  */
3149 
3150 #define ATOMIC_NAME(X) \
3151     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
3152 
3153 #define ATOMIC_MMU_CLEANUP
3154 
3155 #include "atomic_common.c.inc"
3156 
3157 #define DATA_SIZE 1
3158 #include "atomic_template.h"
3159 
3160 #define DATA_SIZE 2
3161 #include "atomic_template.h"
3162 
3163 #define DATA_SIZE 4
3164 #include "atomic_template.h"
3165 
3166 #ifdef CONFIG_ATOMIC64
3167 #define DATA_SIZE 8
3168 #include "atomic_template.h"
3169 #endif
3170 
3171 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
3172 #define DATA_SIZE 16
3173 #include "atomic_template.h"
3174 #endif
3175 
3176 /* Code access functions.  */
3177 
3178 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
3179 {
3180     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
3181     return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
3182 }
3183 
3184 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
3185 {
3186     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
3187     return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
3188 }
3189 
3190 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
3191 {
3192     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
3193     return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
3194 }
3195 
3196 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
3197 {
3198     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
3199     return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
3200 }
3201 
3202 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
3203                          MemOpIdx oi, uintptr_t retaddr)
3204 {
3205     return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3206 }
3207 
3208 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
3209                           MemOpIdx oi, uintptr_t retaddr)
3210 {
3211     return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3212 }
3213 
3214 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
3215                           MemOpIdx oi, uintptr_t retaddr)
3216 {
3217     return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3218 }
3219 
3220 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
3221                           MemOpIdx oi, uintptr_t retaddr)
3222 {
3223     return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3224 }
3225