xref: /qemu/accel/tcg/cputlb.c (revision 9b45a025)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "hw/core/tcg-cpu-ops.h"
24 #include "exec/exec-all.h"
25 #include "exec/memory.h"
26 #include "exec/address-spaces.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/cputlb.h"
29 #include "exec/tb-hash.h"
30 #include "exec/memory-internal.h"
31 #include "exec/ram_addr.h"
32 #include "tcg/tcg.h"
33 #include "qemu/error-report.h"
34 #include "exec/log.h"
35 #include "exec/helper-proto.h"
36 #include "qemu/atomic.h"
37 #include "qemu/atomic128.h"
38 #include "exec/translate-all.h"
39 #include "trace/trace-root.h"
40 #include "trace/mem.h"
41 #include "internal.h"
42 #ifdef CONFIG_PLUGIN
43 #include "qemu/plugin-memory.h"
44 #endif
45 
46 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
47 /* #define DEBUG_TLB */
48 /* #define DEBUG_TLB_LOG */
49 
50 #ifdef DEBUG_TLB
51 # define DEBUG_TLB_GATE 1
52 # ifdef DEBUG_TLB_LOG
53 #  define DEBUG_TLB_LOG_GATE 1
54 # else
55 #  define DEBUG_TLB_LOG_GATE 0
56 # endif
57 #else
58 # define DEBUG_TLB_GATE 0
59 # define DEBUG_TLB_LOG_GATE 0
60 #endif
61 
62 #define tlb_debug(fmt, ...) do { \
63     if (DEBUG_TLB_LOG_GATE) { \
64         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
65                       ## __VA_ARGS__); \
66     } else if (DEBUG_TLB_GATE) { \
67         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
68     } \
69 } while (0)
70 
71 #define assert_cpu_is_self(cpu) do {                              \
72         if (DEBUG_TLB_GATE) {                                     \
73             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
74         }                                                         \
75     } while (0)
76 
77 /* run_on_cpu_data.target_ptr should always be big enough for a
78  * target_ulong even on 32 bit builds */
79 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
80 
81 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
82  */
83 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
84 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
85 
86 static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
87 {
88     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
89 }
90 
91 static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
92 {
93     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
94 }
95 
96 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
97                              size_t max_entries)
98 {
99     desc->window_begin_ns = ns;
100     desc->window_max_entries = max_entries;
101 }
102 
103 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
104 {
105     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
106 
107     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
108         qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
109     }
110 }
111 
112 static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
113 {
114     /* Discard jump cache entries for any tb which might potentially
115        overlap the flushed page.  */
116     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
117     tb_jmp_cache_clear_page(cpu, addr);
118 }
119 
120 /**
121  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
122  * @desc: The CPUTLBDesc portion of the TLB
123  * @fast: The CPUTLBDescFast portion of the same TLB
124  *
125  * Called with tlb_lock_held.
126  *
127  * We have two main constraints when resizing a TLB: (1) we only resize it
128  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
129  * the array or unnecessarily flushing it), which means we do not control how
130  * frequently the resizing can occur; (2) we don't have access to the guest's
131  * future scheduling decisions, and therefore have to decide the magnitude of
132  * the resize based on past observations.
133  *
134  * In general, a memory-hungry process can benefit greatly from an appropriately
135  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
136  * we just have to make the TLB as large as possible; while an oversized TLB
137  * results in minimal TLB miss rates, it also takes longer to be flushed
138  * (flushes can be _very_ frequent), and the reduced locality can also hurt
139  * performance.
140  *
141  * To achieve near-optimal performance for all kinds of workloads, we:
142  *
143  * 1. Aggressively increase the size of the TLB when the use rate of the
144  * TLB being flushed is high, since it is likely that in the near future this
145  * memory-hungry process will execute again, and its memory hungriness will
146  * probably be similar.
147  *
148  * 2. Slowly reduce the size of the TLB as the use rate declines over a
149  * reasonably large time window. The rationale is that if in such a time window
150  * we have not observed a high TLB use rate, it is likely that we won't observe
151  * it in the near future. In that case, once a time window expires we downsize
152  * the TLB to match the maximum use rate observed in the window.
153  *
154  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
155  * since in that range performance is likely near-optimal. Recall that the TLB
156  * is direct mapped, so we want the use rate to be low (or at least not too
157  * high), since otherwise we are likely to have a significant amount of
158  * conflict misses.
159  */
160 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
161                                   int64_t now)
162 {
163     size_t old_size = tlb_n_entries(fast);
164     size_t rate;
165     size_t new_size = old_size;
166     int64_t window_len_ms = 100;
167     int64_t window_len_ns = window_len_ms * 1000 * 1000;
168     bool window_expired = now > desc->window_begin_ns + window_len_ns;
169 
170     if (desc->n_used_entries > desc->window_max_entries) {
171         desc->window_max_entries = desc->n_used_entries;
172     }
173     rate = desc->window_max_entries * 100 / old_size;
174 
175     if (rate > 70) {
176         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
177     } else if (rate < 30 && window_expired) {
178         size_t ceil = pow2ceil(desc->window_max_entries);
179         size_t expected_rate = desc->window_max_entries * 100 / ceil;
180 
181         /*
182          * Avoid undersizing when the max number of entries seen is just below
183          * a pow2. For instance, if max_entries == 1025, the expected use rate
184          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
185          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
186          * later. Thus, make sure that the expected use rate remains below 70%.
187          * (and since we double the size, that means the lowest rate we'd
188          * expect to get is 35%, which is still in the 30-70% range where
189          * we consider that the size is appropriate.)
190          */
191         if (expected_rate > 70) {
192             ceil *= 2;
193         }
194         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
195     }
196 
197     if (new_size == old_size) {
198         if (window_expired) {
199             tlb_window_reset(desc, now, desc->n_used_entries);
200         }
201         return;
202     }
203 
204     g_free(fast->table);
205     g_free(desc->iotlb);
206 
207     tlb_window_reset(desc, now, 0);
208     /* desc->n_used_entries is cleared by the caller */
209     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
210     fast->table = g_try_new(CPUTLBEntry, new_size);
211     desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
212 
213     /*
214      * If the allocations fail, try smaller sizes. We just freed some
215      * memory, so going back to half of new_size has a good chance of working.
216      * Increased memory pressure elsewhere in the system might cause the
217      * allocations to fail though, so we progressively reduce the allocation
218      * size, aborting if we cannot even allocate the smallest TLB we support.
219      */
220     while (fast->table == NULL || desc->iotlb == NULL) {
221         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
222             error_report("%s: %s", __func__, strerror(errno));
223             abort();
224         }
225         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
226         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
227 
228         g_free(fast->table);
229         g_free(desc->iotlb);
230         fast->table = g_try_new(CPUTLBEntry, new_size);
231         desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
232     }
233 }
234 
235 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
236 {
237     desc->n_used_entries = 0;
238     desc->large_page_addr = -1;
239     desc->large_page_mask = -1;
240     desc->vindex = 0;
241     memset(fast->table, -1, sizeof_tlb(fast));
242     memset(desc->vtable, -1, sizeof(desc->vtable));
243 }
244 
245 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
246                                         int64_t now)
247 {
248     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
249     CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
250 
251     tlb_mmu_resize_locked(desc, fast, now);
252     tlb_mmu_flush_locked(desc, fast);
253 }
254 
255 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
256 {
257     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
258 
259     tlb_window_reset(desc, now, 0);
260     desc->n_used_entries = 0;
261     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
262     fast->table = g_new(CPUTLBEntry, n_entries);
263     desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
264     tlb_mmu_flush_locked(desc, fast);
265 }
266 
267 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
268 {
269     env_tlb(env)->d[mmu_idx].n_used_entries++;
270 }
271 
272 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
273 {
274     env_tlb(env)->d[mmu_idx].n_used_entries--;
275 }
276 
277 void tlb_init(CPUState *cpu)
278 {
279     CPUArchState *env = cpu->env_ptr;
280     int64_t now = get_clock_realtime();
281     int i;
282 
283     qemu_spin_init(&env_tlb(env)->c.lock);
284 
285     /* All tlbs are initialized flushed. */
286     env_tlb(env)->c.dirty = 0;
287 
288     for (i = 0; i < NB_MMU_MODES; i++) {
289         tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
290     }
291 }
292 
293 void tlb_destroy(CPUState *cpu)
294 {
295     CPUArchState *env = cpu->env_ptr;
296     int i;
297 
298     qemu_spin_destroy(&env_tlb(env)->c.lock);
299     for (i = 0; i < NB_MMU_MODES; i++) {
300         CPUTLBDesc *desc = &env_tlb(env)->d[i];
301         CPUTLBDescFast *fast = &env_tlb(env)->f[i];
302 
303         g_free(fast->table);
304         g_free(desc->iotlb);
305     }
306 }
307 
308 /* flush_all_helper: run fn across all cpus
309  *
310  * If the wait flag is set then the src cpu's helper will be queued as
311  * "safe" work and the loop exited creating a synchronisation point
312  * where all queued work will be finished before execution starts
313  * again.
314  */
315 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
316                              run_on_cpu_data d)
317 {
318     CPUState *cpu;
319 
320     CPU_FOREACH(cpu) {
321         if (cpu != src) {
322             async_run_on_cpu(cpu, fn, d);
323         }
324     }
325 }
326 
327 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
328 {
329     CPUState *cpu;
330     size_t full = 0, part = 0, elide = 0;
331 
332     CPU_FOREACH(cpu) {
333         CPUArchState *env = cpu->env_ptr;
334 
335         full += qatomic_read(&env_tlb(env)->c.full_flush_count);
336         part += qatomic_read(&env_tlb(env)->c.part_flush_count);
337         elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
338     }
339     *pfull = full;
340     *ppart = part;
341     *pelide = elide;
342 }
343 
344 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
345 {
346     CPUArchState *env = cpu->env_ptr;
347     uint16_t asked = data.host_int;
348     uint16_t all_dirty, work, to_clean;
349     int64_t now = get_clock_realtime();
350 
351     assert_cpu_is_self(cpu);
352 
353     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
354 
355     qemu_spin_lock(&env_tlb(env)->c.lock);
356 
357     all_dirty = env_tlb(env)->c.dirty;
358     to_clean = asked & all_dirty;
359     all_dirty &= ~to_clean;
360     env_tlb(env)->c.dirty = all_dirty;
361 
362     for (work = to_clean; work != 0; work &= work - 1) {
363         int mmu_idx = ctz32(work);
364         tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
365     }
366 
367     qemu_spin_unlock(&env_tlb(env)->c.lock);
368 
369     cpu_tb_jmp_cache_clear(cpu);
370 
371     if (to_clean == ALL_MMUIDX_BITS) {
372         qatomic_set(&env_tlb(env)->c.full_flush_count,
373                    env_tlb(env)->c.full_flush_count + 1);
374     } else {
375         qatomic_set(&env_tlb(env)->c.part_flush_count,
376                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
377         if (to_clean != asked) {
378             qatomic_set(&env_tlb(env)->c.elide_flush_count,
379                        env_tlb(env)->c.elide_flush_count +
380                        ctpop16(asked & ~to_clean));
381         }
382     }
383 }
384 
385 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
386 {
387     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
388 
389     if (cpu->created && !qemu_cpu_is_self(cpu)) {
390         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
391                          RUN_ON_CPU_HOST_INT(idxmap));
392     } else {
393         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
394     }
395 }
396 
397 void tlb_flush(CPUState *cpu)
398 {
399     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
400 }
401 
402 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
403 {
404     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
405 
406     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
407 
408     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
409     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
410 }
411 
412 void tlb_flush_all_cpus(CPUState *src_cpu)
413 {
414     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
415 }
416 
417 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
418 {
419     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
420 
421     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
422 
423     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
424     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
425 }
426 
427 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
428 {
429     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
430 }
431 
432 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
433                                       target_ulong page, target_ulong mask)
434 {
435     page &= mask;
436     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
437 
438     return (page == (tlb_entry->addr_read & mask) ||
439             page == (tlb_addr_write(tlb_entry) & mask) ||
440             page == (tlb_entry->addr_code & mask));
441 }
442 
443 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
444                                         target_ulong page)
445 {
446     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
447 }
448 
449 /**
450  * tlb_entry_is_empty - return true if the entry is not in use
451  * @te: pointer to CPUTLBEntry
452  */
453 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
454 {
455     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
456 }
457 
458 /* Called with tlb_c.lock held */
459 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
460                                         target_ulong page,
461                                         target_ulong mask)
462 {
463     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
464         memset(tlb_entry, -1, sizeof(*tlb_entry));
465         return true;
466     }
467     return false;
468 }
469 
470 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
471                                           target_ulong page)
472 {
473     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
474 }
475 
476 /* Called with tlb_c.lock held */
477 static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
478                                             target_ulong page,
479                                             target_ulong mask)
480 {
481     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
482     int k;
483 
484     assert_cpu_is_self(env_cpu(env));
485     for (k = 0; k < CPU_VTLB_SIZE; k++) {
486         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
487             tlb_n_used_entries_dec(env, mmu_idx);
488         }
489     }
490 }
491 
492 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
493                                               target_ulong page)
494 {
495     tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
496 }
497 
498 static void tlb_flush_page_locked(CPUArchState *env, int midx,
499                                   target_ulong page)
500 {
501     target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
502     target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
503 
504     /* Check if we need to flush due to large pages.  */
505     if ((page & lp_mask) == lp_addr) {
506         tlb_debug("forcing full flush midx %d ("
507                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
508                   midx, lp_addr, lp_mask);
509         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
510     } else {
511         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
512             tlb_n_used_entries_dec(env, midx);
513         }
514         tlb_flush_vtlb_page_locked(env, midx, page);
515     }
516 }
517 
518 /**
519  * tlb_flush_page_by_mmuidx_async_0:
520  * @cpu: cpu on which to flush
521  * @addr: page of virtual address to flush
522  * @idxmap: set of mmu_idx to flush
523  *
524  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
525  * at @addr from the tlbs indicated by @idxmap from @cpu.
526  */
527 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
528                                              target_ulong addr,
529                                              uint16_t idxmap)
530 {
531     CPUArchState *env = cpu->env_ptr;
532     int mmu_idx;
533 
534     assert_cpu_is_self(cpu);
535 
536     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
537 
538     qemu_spin_lock(&env_tlb(env)->c.lock);
539     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
540         if ((idxmap >> mmu_idx) & 1) {
541             tlb_flush_page_locked(env, mmu_idx, addr);
542         }
543     }
544     qemu_spin_unlock(&env_tlb(env)->c.lock);
545 
546     tb_flush_jmp_cache(cpu, addr);
547 }
548 
549 /**
550  * tlb_flush_page_by_mmuidx_async_1:
551  * @cpu: cpu on which to flush
552  * @data: encoded addr + idxmap
553  *
554  * Helper for tlb_flush_page_by_mmuidx and friends, called through
555  * async_run_on_cpu.  The idxmap parameter is encoded in the page
556  * offset of the target_ptr field.  This limits the set of mmu_idx
557  * that can be passed via this method.
558  */
559 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
560                                              run_on_cpu_data data)
561 {
562     target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
563     target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
564     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
565 
566     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
567 }
568 
569 typedef struct {
570     target_ulong addr;
571     uint16_t idxmap;
572 } TLBFlushPageByMMUIdxData;
573 
574 /**
575  * tlb_flush_page_by_mmuidx_async_2:
576  * @cpu: cpu on which to flush
577  * @data: allocated addr + idxmap
578  *
579  * Helper for tlb_flush_page_by_mmuidx and friends, called through
580  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
581  * TLBFlushPageByMMUIdxData structure that has been allocated
582  * specifically for this helper.  Free the structure when done.
583  */
584 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
585                                              run_on_cpu_data data)
586 {
587     TLBFlushPageByMMUIdxData *d = data.host_ptr;
588 
589     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
590     g_free(d);
591 }
592 
593 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
594 {
595     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
596 
597     /* This should already be page aligned */
598     addr &= TARGET_PAGE_MASK;
599 
600     if (qemu_cpu_is_self(cpu)) {
601         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
602     } else if (idxmap < TARGET_PAGE_SIZE) {
603         /*
604          * Most targets have only a few mmu_idx.  In the case where
605          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
606          * allocating memory for this operation.
607          */
608         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
609                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
610     } else {
611         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
612 
613         /* Otherwise allocate a structure, freed by the worker.  */
614         d->addr = addr;
615         d->idxmap = idxmap;
616         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
617                          RUN_ON_CPU_HOST_PTR(d));
618     }
619 }
620 
621 void tlb_flush_page(CPUState *cpu, target_ulong addr)
622 {
623     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
624 }
625 
626 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
627                                        uint16_t idxmap)
628 {
629     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
630 
631     /* This should already be page aligned */
632     addr &= TARGET_PAGE_MASK;
633 
634     /*
635      * Allocate memory to hold addr+idxmap only when needed.
636      * See tlb_flush_page_by_mmuidx for details.
637      */
638     if (idxmap < TARGET_PAGE_SIZE) {
639         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
640                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
641     } else {
642         CPUState *dst_cpu;
643 
644         /* Allocate a separate data block for each destination cpu.  */
645         CPU_FOREACH(dst_cpu) {
646             if (dst_cpu != src_cpu) {
647                 TLBFlushPageByMMUIdxData *d
648                     = g_new(TLBFlushPageByMMUIdxData, 1);
649 
650                 d->addr = addr;
651                 d->idxmap = idxmap;
652                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
653                                  RUN_ON_CPU_HOST_PTR(d));
654             }
655         }
656     }
657 
658     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
659 }
660 
661 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
662 {
663     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
664 }
665 
666 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
667                                               target_ulong addr,
668                                               uint16_t idxmap)
669 {
670     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
671 
672     /* This should already be page aligned */
673     addr &= TARGET_PAGE_MASK;
674 
675     /*
676      * Allocate memory to hold addr+idxmap only when needed.
677      * See tlb_flush_page_by_mmuidx for details.
678      */
679     if (idxmap < TARGET_PAGE_SIZE) {
680         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
681                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
682         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
683                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
684     } else {
685         CPUState *dst_cpu;
686         TLBFlushPageByMMUIdxData *d;
687 
688         /* Allocate a separate data block for each destination cpu.  */
689         CPU_FOREACH(dst_cpu) {
690             if (dst_cpu != src_cpu) {
691                 d = g_new(TLBFlushPageByMMUIdxData, 1);
692                 d->addr = addr;
693                 d->idxmap = idxmap;
694                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
695                                  RUN_ON_CPU_HOST_PTR(d));
696             }
697         }
698 
699         d = g_new(TLBFlushPageByMMUIdxData, 1);
700         d->addr = addr;
701         d->idxmap = idxmap;
702         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
703                               RUN_ON_CPU_HOST_PTR(d));
704     }
705 }
706 
707 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
708 {
709     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
710 }
711 
712 static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
713                                        target_ulong page, unsigned bits)
714 {
715     CPUTLBDesc *d = &env_tlb(env)->d[midx];
716     CPUTLBDescFast *f = &env_tlb(env)->f[midx];
717     target_ulong mask = MAKE_64BIT_MASK(0, bits);
718 
719     /*
720      * If @bits is smaller than the tlb size, there may be multiple entries
721      * within the TLB; otherwise all addresses that match under @mask hit
722      * the same TLB entry.
723      *
724      * TODO: Perhaps allow bits to be a few bits less than the size.
725      * For now, just flush the entire TLB.
726      */
727     if (mask < f->mask) {
728         tlb_debug("forcing full flush midx %d ("
729                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
730                   midx, page, mask);
731         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
732         return;
733     }
734 
735     /* Check if we need to flush due to large pages.  */
736     if ((page & d->large_page_mask) == d->large_page_addr) {
737         tlb_debug("forcing full flush midx %d ("
738                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
739                   midx, d->large_page_addr, d->large_page_mask);
740         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
741         return;
742     }
743 
744     if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
745         tlb_n_used_entries_dec(env, midx);
746     }
747     tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
748 }
749 
750 typedef struct {
751     target_ulong addr;
752     uint16_t idxmap;
753     uint16_t bits;
754 } TLBFlushPageBitsByMMUIdxData;
755 
756 static void
757 tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
758                                       TLBFlushPageBitsByMMUIdxData d)
759 {
760     CPUArchState *env = cpu->env_ptr;
761     int mmu_idx;
762 
763     assert_cpu_is_self(cpu);
764 
765     tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n",
766               d.addr, d.bits, d.idxmap);
767 
768     qemu_spin_lock(&env_tlb(env)->c.lock);
769     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
770         if ((d.idxmap >> mmu_idx) & 1) {
771             tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits);
772         }
773     }
774     qemu_spin_unlock(&env_tlb(env)->c.lock);
775 
776     tb_flush_jmp_cache(cpu, d.addr);
777 }
778 
779 static bool encode_pbm_to_runon(run_on_cpu_data *out,
780                                 TLBFlushPageBitsByMMUIdxData d)
781 {
782     /* We need 6 bits to hold to hold @bits up to 63. */
783     if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) {
784         *out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits);
785         return true;
786     }
787     return false;
788 }
789 
790 static TLBFlushPageBitsByMMUIdxData
791 decode_runon_to_pbm(run_on_cpu_data data)
792 {
793     target_ulong addr_map_bits = (target_ulong) data.target_ptr;
794     return (TLBFlushPageBitsByMMUIdxData){
795         .addr = addr_map_bits & TARGET_PAGE_MASK,
796         .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6,
797         .bits = addr_map_bits & 0x3f
798     };
799 }
800 
801 static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu,
802                                                   run_on_cpu_data runon)
803 {
804     tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon));
805 }
806 
807 static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
808                                                   run_on_cpu_data data)
809 {
810     TLBFlushPageBitsByMMUIdxData *d = data.host_ptr;
811     tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d);
812     g_free(d);
813 }
814 
815 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
816                                    uint16_t idxmap, unsigned bits)
817 {
818     TLBFlushPageBitsByMMUIdxData d;
819     run_on_cpu_data runon;
820 
821     /* If all bits are significant, this devolves to tlb_flush_page. */
822     if (bits >= TARGET_LONG_BITS) {
823         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
824         return;
825     }
826     /* If no page bits are significant, this devolves to tlb_flush. */
827     if (bits < TARGET_PAGE_BITS) {
828         tlb_flush_by_mmuidx(cpu, idxmap);
829         return;
830     }
831 
832     /* This should already be page aligned */
833     d.addr = addr & TARGET_PAGE_MASK;
834     d.idxmap = idxmap;
835     d.bits = bits;
836 
837     if (qemu_cpu_is_self(cpu)) {
838         tlb_flush_page_bits_by_mmuidx_async_0(cpu, d);
839     } else if (encode_pbm_to_runon(&runon, d)) {
840         async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
841     } else {
842         TLBFlushPageBitsByMMUIdxData *p
843             = g_new(TLBFlushPageBitsByMMUIdxData, 1);
844 
845         /* Otherwise allocate a structure, freed by the worker.  */
846         *p = d;
847         async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2,
848                          RUN_ON_CPU_HOST_PTR(p));
849     }
850 }
851 
852 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
853                                             target_ulong addr,
854                                             uint16_t idxmap,
855                                             unsigned bits)
856 {
857     TLBFlushPageBitsByMMUIdxData d;
858     run_on_cpu_data runon;
859 
860     /* If all bits are significant, this devolves to tlb_flush_page. */
861     if (bits >= TARGET_LONG_BITS) {
862         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
863         return;
864     }
865     /* If no page bits are significant, this devolves to tlb_flush. */
866     if (bits < TARGET_PAGE_BITS) {
867         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
868         return;
869     }
870 
871     /* This should already be page aligned */
872     d.addr = addr & TARGET_PAGE_MASK;
873     d.idxmap = idxmap;
874     d.bits = bits;
875 
876     if (encode_pbm_to_runon(&runon, d)) {
877         flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
878     } else {
879         CPUState *dst_cpu;
880         TLBFlushPageBitsByMMUIdxData *p;
881 
882         /* Allocate a separate data block for each destination cpu.  */
883         CPU_FOREACH(dst_cpu) {
884             if (dst_cpu != src_cpu) {
885                 p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
886                 *p = d;
887                 async_run_on_cpu(dst_cpu,
888                                  tlb_flush_page_bits_by_mmuidx_async_2,
889                                  RUN_ON_CPU_HOST_PTR(p));
890             }
891         }
892     }
893 
894     tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d);
895 }
896 
897 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
898                                                    target_ulong addr,
899                                                    uint16_t idxmap,
900                                                    unsigned bits)
901 {
902     TLBFlushPageBitsByMMUIdxData d;
903     run_on_cpu_data runon;
904 
905     /* If all bits are significant, this devolves to tlb_flush_page. */
906     if (bits >= TARGET_LONG_BITS) {
907         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
908         return;
909     }
910     /* If no page bits are significant, this devolves to tlb_flush. */
911     if (bits < TARGET_PAGE_BITS) {
912         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
913         return;
914     }
915 
916     /* This should already be page aligned */
917     d.addr = addr & TARGET_PAGE_MASK;
918     d.idxmap = idxmap;
919     d.bits = bits;
920 
921     if (encode_pbm_to_runon(&runon, d)) {
922         flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
923         async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1,
924                               runon);
925     } else {
926         CPUState *dst_cpu;
927         TLBFlushPageBitsByMMUIdxData *p;
928 
929         /* Allocate a separate data block for each destination cpu.  */
930         CPU_FOREACH(dst_cpu) {
931             if (dst_cpu != src_cpu) {
932                 p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
933                 *p = d;
934                 async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
935                                  RUN_ON_CPU_HOST_PTR(p));
936             }
937         }
938 
939         p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
940         *p = d;
941         async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
942                               RUN_ON_CPU_HOST_PTR(p));
943     }
944 }
945 
946 /* update the TLBs so that writes to code in the virtual page 'addr'
947    can be detected */
948 void tlb_protect_code(ram_addr_t ram_addr)
949 {
950     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
951                                              DIRTY_MEMORY_CODE);
952 }
953 
954 /* update the TLB so that writes in physical page 'phys_addr' are no longer
955    tested for self modifying code */
956 void tlb_unprotect_code(ram_addr_t ram_addr)
957 {
958     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
959 }
960 
961 
962 /*
963  * Dirty write flag handling
964  *
965  * When the TCG code writes to a location it looks up the address in
966  * the TLB and uses that data to compute the final address. If any of
967  * the lower bits of the address are set then the slow path is forced.
968  * There are a number of reasons to do this but for normal RAM the
969  * most usual is detecting writes to code regions which may invalidate
970  * generated code.
971  *
972  * Other vCPUs might be reading their TLBs during guest execution, so we update
973  * te->addr_write with qatomic_set. We don't need to worry about this for
974  * oversized guests as MTTCG is disabled for them.
975  *
976  * Called with tlb_c.lock held.
977  */
978 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
979                                          uintptr_t start, uintptr_t length)
980 {
981     uintptr_t addr = tlb_entry->addr_write;
982 
983     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
984                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
985         addr &= TARGET_PAGE_MASK;
986         addr += tlb_entry->addend;
987         if ((addr - start) < length) {
988 #if TCG_OVERSIZED_GUEST
989             tlb_entry->addr_write |= TLB_NOTDIRTY;
990 #else
991             qatomic_set(&tlb_entry->addr_write,
992                        tlb_entry->addr_write | TLB_NOTDIRTY);
993 #endif
994         }
995     }
996 }
997 
998 /*
999  * Called with tlb_c.lock held.
1000  * Called only from the vCPU context, i.e. the TLB's owner thread.
1001  */
1002 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1003 {
1004     *d = *s;
1005 }
1006 
1007 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1008  * the target vCPU).
1009  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1010  * thing actually updated is the target TLB entry ->addr_write flags.
1011  */
1012 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1013 {
1014     CPUArchState *env;
1015 
1016     int mmu_idx;
1017 
1018     env = cpu->env_ptr;
1019     qemu_spin_lock(&env_tlb(env)->c.lock);
1020     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1021         unsigned int i;
1022         unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1023 
1024         for (i = 0; i < n; i++) {
1025             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1026                                          start1, length);
1027         }
1028 
1029         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1030             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1031                                          start1, length);
1032         }
1033     }
1034     qemu_spin_unlock(&env_tlb(env)->c.lock);
1035 }
1036 
1037 /* Called with tlb_c.lock held */
1038 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1039                                          target_ulong vaddr)
1040 {
1041     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1042         tlb_entry->addr_write = vaddr;
1043     }
1044 }
1045 
1046 /* update the TLB corresponding to virtual page vaddr
1047    so that it is no longer dirty */
1048 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1049 {
1050     CPUArchState *env = cpu->env_ptr;
1051     int mmu_idx;
1052 
1053     assert_cpu_is_self(cpu);
1054 
1055     vaddr &= TARGET_PAGE_MASK;
1056     qemu_spin_lock(&env_tlb(env)->c.lock);
1057     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1058         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1059     }
1060 
1061     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1062         int k;
1063         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1064             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1065         }
1066     }
1067     qemu_spin_unlock(&env_tlb(env)->c.lock);
1068 }
1069 
1070 /* Our TLB does not support large pages, so remember the area covered by
1071    large pages and trigger a full TLB flush if these are invalidated.  */
1072 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1073                                target_ulong vaddr, target_ulong size)
1074 {
1075     target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1076     target_ulong lp_mask = ~(size - 1);
1077 
1078     if (lp_addr == (target_ulong)-1) {
1079         /* No previous large page.  */
1080         lp_addr = vaddr;
1081     } else {
1082         /* Extend the existing region to include the new page.
1083            This is a compromise between unnecessary flushes and
1084            the cost of maintaining a full variable size TLB.  */
1085         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1086         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1087             lp_mask <<= 1;
1088         }
1089     }
1090     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1091     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1092 }
1093 
1094 /* Add a new TLB entry. At most one entry for a given virtual address
1095  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1096  * supplied size is only used by tlb_flush_page.
1097  *
1098  * Called from TCG-generated code, which is under an RCU read-side
1099  * critical section.
1100  */
1101 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1102                              hwaddr paddr, MemTxAttrs attrs, int prot,
1103                              int mmu_idx, target_ulong size)
1104 {
1105     CPUArchState *env = cpu->env_ptr;
1106     CPUTLB *tlb = env_tlb(env);
1107     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1108     MemoryRegionSection *section;
1109     unsigned int index;
1110     target_ulong address;
1111     target_ulong write_address;
1112     uintptr_t addend;
1113     CPUTLBEntry *te, tn;
1114     hwaddr iotlb, xlat, sz, paddr_page;
1115     target_ulong vaddr_page;
1116     int asidx = cpu_asidx_from_attrs(cpu, attrs);
1117     int wp_flags;
1118     bool is_ram, is_romd;
1119 
1120     assert_cpu_is_self(cpu);
1121 
1122     if (size <= TARGET_PAGE_SIZE) {
1123         sz = TARGET_PAGE_SIZE;
1124     } else {
1125         tlb_add_large_page(env, mmu_idx, vaddr, size);
1126         sz = size;
1127     }
1128     vaddr_page = vaddr & TARGET_PAGE_MASK;
1129     paddr_page = paddr & TARGET_PAGE_MASK;
1130 
1131     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1132                                                 &xlat, &sz, attrs, &prot);
1133     assert(sz >= TARGET_PAGE_SIZE);
1134 
1135     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1136               " prot=%x idx=%d\n",
1137               vaddr, paddr, prot, mmu_idx);
1138 
1139     address = vaddr_page;
1140     if (size < TARGET_PAGE_SIZE) {
1141         /* Repeat the MMU check and TLB fill on every access.  */
1142         address |= TLB_INVALID_MASK;
1143     }
1144     if (attrs.byte_swap) {
1145         address |= TLB_BSWAP;
1146     }
1147 
1148     is_ram = memory_region_is_ram(section->mr);
1149     is_romd = memory_region_is_romd(section->mr);
1150 
1151     if (is_ram || is_romd) {
1152         /* RAM and ROMD both have associated host memory. */
1153         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1154     } else {
1155         /* I/O does not; force the host address to NULL. */
1156         addend = 0;
1157     }
1158 
1159     write_address = address;
1160     if (is_ram) {
1161         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1162         /*
1163          * Computing is_clean is expensive; avoid all that unless
1164          * the page is actually writable.
1165          */
1166         if (prot & PAGE_WRITE) {
1167             if (section->readonly) {
1168                 write_address |= TLB_DISCARD_WRITE;
1169             } else if (cpu_physical_memory_is_clean(iotlb)) {
1170                 write_address |= TLB_NOTDIRTY;
1171             }
1172         }
1173     } else {
1174         /* I/O or ROMD */
1175         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1176         /*
1177          * Writes to romd devices must go through MMIO to enable write.
1178          * Reads to romd devices go through the ram_ptr found above,
1179          * but of course reads to I/O must go through MMIO.
1180          */
1181         write_address |= TLB_MMIO;
1182         if (!is_romd) {
1183             address = write_address;
1184         }
1185     }
1186 
1187     wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1188                                               TARGET_PAGE_SIZE);
1189 
1190     index = tlb_index(env, mmu_idx, vaddr_page);
1191     te = tlb_entry(env, mmu_idx, vaddr_page);
1192 
1193     /*
1194      * Hold the TLB lock for the rest of the function. We could acquire/release
1195      * the lock several times in the function, but it is faster to amortize the
1196      * acquisition cost by acquiring it just once. Note that this leads to
1197      * a longer critical section, but this is not a concern since the TLB lock
1198      * is unlikely to be contended.
1199      */
1200     qemu_spin_lock(&tlb->c.lock);
1201 
1202     /* Note that the tlb is no longer clean.  */
1203     tlb->c.dirty |= 1 << mmu_idx;
1204 
1205     /* Make sure there's no cached translation for the new page.  */
1206     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1207 
1208     /*
1209      * Only evict the old entry to the victim tlb if it's for a
1210      * different page; otherwise just overwrite the stale data.
1211      */
1212     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1213         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1214         CPUTLBEntry *tv = &desc->vtable[vidx];
1215 
1216         /* Evict the old entry into the victim tlb.  */
1217         copy_tlb_helper_locked(tv, te);
1218         desc->viotlb[vidx] = desc->iotlb[index];
1219         tlb_n_used_entries_dec(env, mmu_idx);
1220     }
1221 
1222     /* refill the tlb */
1223     /*
1224      * At this point iotlb contains a physical section number in the lower
1225      * TARGET_PAGE_BITS, and either
1226      *  + the ram_addr_t of the page base of the target RAM (RAM)
1227      *  + the offset within section->mr of the page base (I/O, ROMD)
1228      * We subtract the vaddr_page (which is page aligned and thus won't
1229      * disturb the low bits) to give an offset which can be added to the
1230      * (non-page-aligned) vaddr of the eventual memory access to get
1231      * the MemoryRegion offset for the access. Note that the vaddr we
1232      * subtract here is that of the page base, and not the same as the
1233      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1234      */
1235     desc->iotlb[index].addr = iotlb - vaddr_page;
1236     desc->iotlb[index].attrs = attrs;
1237 
1238     /* Now calculate the new entry */
1239     tn.addend = addend - vaddr_page;
1240     if (prot & PAGE_READ) {
1241         tn.addr_read = address;
1242         if (wp_flags & BP_MEM_READ) {
1243             tn.addr_read |= TLB_WATCHPOINT;
1244         }
1245     } else {
1246         tn.addr_read = -1;
1247     }
1248 
1249     if (prot & PAGE_EXEC) {
1250         tn.addr_code = address;
1251     } else {
1252         tn.addr_code = -1;
1253     }
1254 
1255     tn.addr_write = -1;
1256     if (prot & PAGE_WRITE) {
1257         tn.addr_write = write_address;
1258         if (prot & PAGE_WRITE_INV) {
1259             tn.addr_write |= TLB_INVALID_MASK;
1260         }
1261         if (wp_flags & BP_MEM_WRITE) {
1262             tn.addr_write |= TLB_WATCHPOINT;
1263         }
1264     }
1265 
1266     copy_tlb_helper_locked(te, &tn);
1267     tlb_n_used_entries_inc(env, mmu_idx);
1268     qemu_spin_unlock(&tlb->c.lock);
1269 }
1270 
1271 /* Add a new TLB entry, but without specifying the memory
1272  * transaction attributes to be used.
1273  */
1274 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1275                   hwaddr paddr, int prot,
1276                   int mmu_idx, target_ulong size)
1277 {
1278     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1279                             prot, mmu_idx, size);
1280 }
1281 
1282 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1283 {
1284     ram_addr_t ram_addr;
1285 
1286     ram_addr = qemu_ram_addr_from_host(ptr);
1287     if (ram_addr == RAM_ADDR_INVALID) {
1288         error_report("Bad ram pointer %p", ptr);
1289         abort();
1290     }
1291     return ram_addr;
1292 }
1293 
1294 /*
1295  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1296  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1297  * be discarded and looked up again (e.g. via tlb_entry()).
1298  */
1299 static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1300                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1301 {
1302     CPUClass *cc = CPU_GET_CLASS(cpu);
1303     bool ok;
1304 
1305     /*
1306      * This is not a probe, so only valid return is success; failure
1307      * should result in exception + longjmp to the cpu loop.
1308      */
1309     ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
1310                                access_type, mmu_idx, false, retaddr);
1311     assert(ok);
1312 }
1313 
1314 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1315                                         MMUAccessType access_type,
1316                                         int mmu_idx, uintptr_t retaddr)
1317 {
1318     CPUClass *cc = CPU_GET_CLASS(cpu);
1319 
1320     cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
1321 }
1322 
1323 static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1324                                           vaddr addr, unsigned size,
1325                                           MMUAccessType access_type,
1326                                           int mmu_idx, MemTxAttrs attrs,
1327                                           MemTxResult response,
1328                                           uintptr_t retaddr)
1329 {
1330     CPUClass *cc = CPU_GET_CLASS(cpu);
1331 
1332     if (!cpu->ignore_memory_transaction_failures &&
1333         cc->tcg_ops->do_transaction_failed) {
1334         cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1335                                            access_type, mmu_idx, attrs,
1336                                            response, retaddr);
1337     }
1338 }
1339 
1340 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1341                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
1342                          MMUAccessType access_type, MemOp op)
1343 {
1344     CPUState *cpu = env_cpu(env);
1345     hwaddr mr_offset;
1346     MemoryRegionSection *section;
1347     MemoryRegion *mr;
1348     uint64_t val;
1349     bool locked = false;
1350     MemTxResult r;
1351 
1352     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1353     mr = section->mr;
1354     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1355     cpu->mem_io_pc = retaddr;
1356     if (!cpu->can_do_io) {
1357         cpu_io_recompile(cpu, retaddr);
1358     }
1359 
1360     if (!qemu_mutex_iothread_locked()) {
1361         qemu_mutex_lock_iothread();
1362         locked = true;
1363     }
1364     r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
1365     if (r != MEMTX_OK) {
1366         hwaddr physaddr = mr_offset +
1367             section->offset_within_address_space -
1368             section->offset_within_region;
1369 
1370         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1371                                mmu_idx, iotlbentry->attrs, r, retaddr);
1372     }
1373     if (locked) {
1374         qemu_mutex_unlock_iothread();
1375     }
1376 
1377     return val;
1378 }
1379 
1380 /*
1381  * Save a potentially trashed IOTLB entry for later lookup by plugin.
1382  * This is read by tlb_plugin_lookup if the iotlb entry doesn't match
1383  * because of the side effect of io_writex changing memory layout.
1384  */
1385 static void save_iotlb_data(CPUState *cs, hwaddr addr,
1386                             MemoryRegionSection *section, hwaddr mr_offset)
1387 {
1388 #ifdef CONFIG_PLUGIN
1389     SavedIOTLB *saved = &cs->saved_iotlb;
1390     saved->addr = addr;
1391     saved->section = section;
1392     saved->mr_offset = mr_offset;
1393 #endif
1394 }
1395 
1396 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1397                       int mmu_idx, uint64_t val, target_ulong addr,
1398                       uintptr_t retaddr, MemOp op)
1399 {
1400     CPUState *cpu = env_cpu(env);
1401     hwaddr mr_offset;
1402     MemoryRegionSection *section;
1403     MemoryRegion *mr;
1404     bool locked = false;
1405     MemTxResult r;
1406 
1407     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1408     mr = section->mr;
1409     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1410     if (!cpu->can_do_io) {
1411         cpu_io_recompile(cpu, retaddr);
1412     }
1413     cpu->mem_io_pc = retaddr;
1414 
1415     /*
1416      * The memory_region_dispatch may trigger a flush/resize
1417      * so for plugins we save the iotlb_data just in case.
1418      */
1419     save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
1420 
1421     if (!qemu_mutex_iothread_locked()) {
1422         qemu_mutex_lock_iothread();
1423         locked = true;
1424     }
1425     r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
1426     if (r != MEMTX_OK) {
1427         hwaddr physaddr = mr_offset +
1428             section->offset_within_address_space -
1429             section->offset_within_region;
1430 
1431         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1432                                MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
1433                                retaddr);
1434     }
1435     if (locked) {
1436         qemu_mutex_unlock_iothread();
1437     }
1438 }
1439 
1440 static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1441 {
1442 #if TCG_OVERSIZED_GUEST
1443     return *(target_ulong *)((uintptr_t)entry + ofs);
1444 #else
1445     /* ofs might correspond to .addr_write, so use qatomic_read */
1446     return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
1447 #endif
1448 }
1449 
1450 /* Return true if ADDR is present in the victim tlb, and has been copied
1451    back to the main tlb.  */
1452 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1453                            size_t elt_ofs, target_ulong page)
1454 {
1455     size_t vidx;
1456 
1457     assert_cpu_is_self(env_cpu(env));
1458     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1459         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1460         target_ulong cmp;
1461 
1462         /* elt_ofs might correspond to .addr_write, so use qatomic_read */
1463 #if TCG_OVERSIZED_GUEST
1464         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1465 #else
1466         cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1467 #endif
1468 
1469         if (cmp == page) {
1470             /* Found entry in victim tlb, swap tlb and iotlb.  */
1471             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1472 
1473             qemu_spin_lock(&env_tlb(env)->c.lock);
1474             copy_tlb_helper_locked(&tmptlb, tlb);
1475             copy_tlb_helper_locked(tlb, vtlb);
1476             copy_tlb_helper_locked(vtlb, &tmptlb);
1477             qemu_spin_unlock(&env_tlb(env)->c.lock);
1478 
1479             CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1480             CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1481             tmpio = *io; *io = *vio; *vio = tmpio;
1482             return true;
1483         }
1484     }
1485     return false;
1486 }
1487 
1488 /* Macro to call the above, with local variables from the use context.  */
1489 #define VICTIM_TLB_HIT(TY, ADDR) \
1490   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1491                  (ADDR) & TARGET_PAGE_MASK)
1492 
1493 /*
1494  * Return a ram_addr_t for the virtual address for execution.
1495  *
1496  * Return -1 if we can't translate and execute from an entire page
1497  * of RAM.  This will force us to execute by loading and translating
1498  * one insn at a time, without caching.
1499  *
1500  * NOTE: This function will trigger an exception if the page is
1501  * not executable.
1502  */
1503 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1504                                         void **hostp)
1505 {
1506     uintptr_t mmu_idx = cpu_mmu_index(env, true);
1507     uintptr_t index = tlb_index(env, mmu_idx, addr);
1508     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1509     void *p;
1510 
1511     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1512         if (!VICTIM_TLB_HIT(addr_code, addr)) {
1513             tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1514             index = tlb_index(env, mmu_idx, addr);
1515             entry = tlb_entry(env, mmu_idx, addr);
1516 
1517             if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1518                 /*
1519                  * The MMU protection covers a smaller range than a target
1520                  * page, so we must redo the MMU check for every insn.
1521                  */
1522                 return -1;
1523             }
1524         }
1525         assert(tlb_hit(entry->addr_code, addr));
1526     }
1527 
1528     if (unlikely(entry->addr_code & TLB_MMIO)) {
1529         /* The region is not backed by RAM.  */
1530         if (hostp) {
1531             *hostp = NULL;
1532         }
1533         return -1;
1534     }
1535 
1536     p = (void *)((uintptr_t)addr + entry->addend);
1537     if (hostp) {
1538         *hostp = p;
1539     }
1540     return qemu_ram_addr_from_host_nofail(p);
1541 }
1542 
1543 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1544 {
1545     return get_page_addr_code_hostp(env, addr, NULL);
1546 }
1547 
1548 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1549                            CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1550 {
1551     ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1552 
1553     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1554 
1555     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1556         struct page_collection *pages
1557             = page_collection_lock(ram_addr, ram_addr + size);
1558         tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1559         page_collection_unlock(pages);
1560     }
1561 
1562     /*
1563      * Set both VGA and migration bits for simplicity and to remove
1564      * the notdirty callback faster.
1565      */
1566     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1567 
1568     /* We remove the notdirty callback only if the code has been flushed. */
1569     if (!cpu_physical_memory_is_clean(ram_addr)) {
1570         trace_memory_notdirty_set_dirty(mem_vaddr);
1571         tlb_set_dirty(cpu, mem_vaddr);
1572     }
1573 }
1574 
1575 static int probe_access_internal(CPUArchState *env, target_ulong addr,
1576                                  int fault_size, MMUAccessType access_type,
1577                                  int mmu_idx, bool nonfault,
1578                                  void **phost, uintptr_t retaddr)
1579 {
1580     uintptr_t index = tlb_index(env, mmu_idx, addr);
1581     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1582     target_ulong tlb_addr, page_addr;
1583     size_t elt_ofs;
1584     int flags;
1585 
1586     switch (access_type) {
1587     case MMU_DATA_LOAD:
1588         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1589         break;
1590     case MMU_DATA_STORE:
1591         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1592         break;
1593     case MMU_INST_FETCH:
1594         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1595         break;
1596     default:
1597         g_assert_not_reached();
1598     }
1599     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1600 
1601     page_addr = addr & TARGET_PAGE_MASK;
1602     if (!tlb_hit_page(tlb_addr, page_addr)) {
1603         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1604             CPUState *cs = env_cpu(env);
1605             CPUClass *cc = CPU_GET_CLASS(cs);
1606 
1607             if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1608                                        mmu_idx, nonfault, retaddr)) {
1609                 /* Non-faulting page table read failed.  */
1610                 *phost = NULL;
1611                 return TLB_INVALID_MASK;
1612             }
1613 
1614             /* TLB resize via tlb_fill may have moved the entry.  */
1615             entry = tlb_entry(env, mmu_idx, addr);
1616         }
1617         tlb_addr = tlb_read_ofs(entry, elt_ofs);
1618     }
1619     flags = tlb_addr & TLB_FLAGS_MASK;
1620 
1621     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1622     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1623         *phost = NULL;
1624         return TLB_MMIO;
1625     }
1626 
1627     /* Everything else is RAM. */
1628     *phost = (void *)((uintptr_t)addr + entry->addend);
1629     return flags;
1630 }
1631 
1632 int probe_access_flags(CPUArchState *env, target_ulong addr,
1633                        MMUAccessType access_type, int mmu_idx,
1634                        bool nonfault, void **phost, uintptr_t retaddr)
1635 {
1636     int flags;
1637 
1638     flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1639                                   nonfault, phost, retaddr);
1640 
1641     /* Handle clean RAM pages.  */
1642     if (unlikely(flags & TLB_NOTDIRTY)) {
1643         uintptr_t index = tlb_index(env, mmu_idx, addr);
1644         CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1645 
1646         notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1647         flags &= ~TLB_NOTDIRTY;
1648     }
1649 
1650     return flags;
1651 }
1652 
1653 void *probe_access(CPUArchState *env, target_ulong addr, int size,
1654                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1655 {
1656     void *host;
1657     int flags;
1658 
1659     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1660 
1661     flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1662                                   false, &host, retaddr);
1663 
1664     /* Per the interface, size == 0 merely faults the access. */
1665     if (size == 0) {
1666         return NULL;
1667     }
1668 
1669     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1670         uintptr_t index = tlb_index(env, mmu_idx, addr);
1671         CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1672 
1673         /* Handle watchpoints.  */
1674         if (flags & TLB_WATCHPOINT) {
1675             int wp_access = (access_type == MMU_DATA_STORE
1676                              ? BP_MEM_WRITE : BP_MEM_READ);
1677             cpu_check_watchpoint(env_cpu(env), addr, size,
1678                                  iotlbentry->attrs, wp_access, retaddr);
1679         }
1680 
1681         /* Handle clean RAM pages.  */
1682         if (flags & TLB_NOTDIRTY) {
1683             notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1684         }
1685     }
1686 
1687     return host;
1688 }
1689 
1690 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1691                         MMUAccessType access_type, int mmu_idx)
1692 {
1693     void *host;
1694     int flags;
1695 
1696     flags = probe_access_internal(env, addr, 0, access_type,
1697                                   mmu_idx, true, &host, 0);
1698 
1699     /* No combination of flags are expected by the caller. */
1700     return flags ? NULL : host;
1701 }
1702 
1703 #ifdef CONFIG_PLUGIN
1704 /*
1705  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1706  * This should be a hot path as we will have just looked this path up
1707  * in the softmmu lookup code (or helper). We don't handle re-fills or
1708  * checking the victim table. This is purely informational.
1709  *
1710  * This almost never fails as the memory access being instrumented
1711  * should have just filled the TLB. The one corner case is io_writex
1712  * which can cause TLB flushes and potential resizing of the TLBs
1713  * losing the information we need. In those cases we need to recover
1714  * data from a copy of the iotlbentry. As long as this always occurs
1715  * from the same thread (which a mem callback will be) this is safe.
1716  */
1717 
1718 bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1719                        bool is_store, struct qemu_plugin_hwaddr *data)
1720 {
1721     CPUArchState *env = cpu->env_ptr;
1722     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1723     uintptr_t index = tlb_index(env, mmu_idx, addr);
1724     target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1725 
1726     if (likely(tlb_hit(tlb_addr, addr))) {
1727         /* We must have an iotlb entry for MMIO */
1728         if (tlb_addr & TLB_MMIO) {
1729             CPUIOTLBEntry *iotlbentry;
1730             iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1731             data->is_io = true;
1732             data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1733             data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1734         } else {
1735             data->is_io = false;
1736             data->v.ram.hostaddr = addr + tlbe->addend;
1737         }
1738         return true;
1739     } else {
1740         SavedIOTLB *saved = &cpu->saved_iotlb;
1741         data->is_io = true;
1742         data->v.io.section = saved->section;
1743         data->v.io.offset = saved->mr_offset;
1744         return true;
1745     }
1746 }
1747 
1748 #endif
1749 
1750 /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1751  * operations, or io operations to proceed.  Return the host address.  */
1752 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1753                                TCGMemOpIdx oi, uintptr_t retaddr)
1754 {
1755     size_t mmu_idx = get_mmuidx(oi);
1756     uintptr_t index = tlb_index(env, mmu_idx, addr);
1757     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1758     target_ulong tlb_addr = tlb_addr_write(tlbe);
1759     MemOp mop = get_memop(oi);
1760     int a_bits = get_alignment_bits(mop);
1761     int s_bits = mop & MO_SIZE;
1762     void *hostaddr;
1763 
1764     /* Adjust the given return address.  */
1765     retaddr -= GETPC_ADJ;
1766 
1767     /* Enforce guest required alignment.  */
1768     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1769         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1770         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1771                              mmu_idx, retaddr);
1772     }
1773 
1774     /* Enforce qemu required alignment.  */
1775     if (unlikely(addr & ((1 << s_bits) - 1))) {
1776         /* We get here if guest alignment was not requested,
1777            or was not enforced by cpu_unaligned_access above.
1778            We might widen the access and emulate, but for now
1779            mark an exception and exit the cpu loop.  */
1780         goto stop_the_world;
1781     }
1782 
1783     /* Check TLB entry and enforce page permissions.  */
1784     if (!tlb_hit(tlb_addr, addr)) {
1785         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1786             tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
1787                      mmu_idx, retaddr);
1788             index = tlb_index(env, mmu_idx, addr);
1789             tlbe = tlb_entry(env, mmu_idx, addr);
1790         }
1791         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1792     }
1793 
1794     /* Notice an IO access or a needs-MMU-lookup access */
1795     if (unlikely(tlb_addr & TLB_MMIO)) {
1796         /* There's really nothing that can be done to
1797            support this apart from stop-the-world.  */
1798         goto stop_the_world;
1799     }
1800 
1801     /* Let the guest notice RMW on a write-only page.  */
1802     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1803         tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1804                  mmu_idx, retaddr);
1805         /* Since we don't support reads and writes to different addresses,
1806            and we do have the proper page loaded for write, this shouldn't
1807            ever return.  But just in case, handle via stop-the-world.  */
1808         goto stop_the_world;
1809     }
1810 
1811     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1812 
1813     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1814         notdirty_write(env_cpu(env), addr, 1 << s_bits,
1815                        &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
1816     }
1817 
1818     return hostaddr;
1819 
1820  stop_the_world:
1821     cpu_loop_exit_atomic(env_cpu(env), retaddr);
1822 }
1823 
1824 /*
1825  * Load Helpers
1826  *
1827  * We support two different access types. SOFTMMU_CODE_ACCESS is
1828  * specifically for reading instructions from system memory. It is
1829  * called by the translation loop and in some helpers where the code
1830  * is disassembled. It shouldn't be called directly by guest code.
1831  */
1832 
1833 typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1834                                 TCGMemOpIdx oi, uintptr_t retaddr);
1835 
1836 static inline uint64_t QEMU_ALWAYS_INLINE
1837 load_memop(const void *haddr, MemOp op)
1838 {
1839     switch (op) {
1840     case MO_UB:
1841         return ldub_p(haddr);
1842     case MO_BEUW:
1843         return lduw_be_p(haddr);
1844     case MO_LEUW:
1845         return lduw_le_p(haddr);
1846     case MO_BEUL:
1847         return (uint32_t)ldl_be_p(haddr);
1848     case MO_LEUL:
1849         return (uint32_t)ldl_le_p(haddr);
1850     case MO_BEQ:
1851         return ldq_be_p(haddr);
1852     case MO_LEQ:
1853         return ldq_le_p(haddr);
1854     default:
1855         qemu_build_not_reached();
1856     }
1857 }
1858 
1859 static inline uint64_t QEMU_ALWAYS_INLINE
1860 load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1861             uintptr_t retaddr, MemOp op, bool code_read,
1862             FullLoadHelper *full_load)
1863 {
1864     uintptr_t mmu_idx = get_mmuidx(oi);
1865     uintptr_t index = tlb_index(env, mmu_idx, addr);
1866     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1867     target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1868     const size_t tlb_off = code_read ?
1869         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1870     const MMUAccessType access_type =
1871         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1872     unsigned a_bits = get_alignment_bits(get_memop(oi));
1873     void *haddr;
1874     uint64_t res;
1875     size_t size = memop_size(op);
1876 
1877     /* Handle CPU specific unaligned behaviour */
1878     if (addr & ((1 << a_bits) - 1)) {
1879         cpu_unaligned_access(env_cpu(env), addr, access_type,
1880                              mmu_idx, retaddr);
1881     }
1882 
1883     /* If the TLB entry is for a different page, reload and try again.  */
1884     if (!tlb_hit(tlb_addr, addr)) {
1885         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1886                             addr & TARGET_PAGE_MASK)) {
1887             tlb_fill(env_cpu(env), addr, size,
1888                      access_type, mmu_idx, retaddr);
1889             index = tlb_index(env, mmu_idx, addr);
1890             entry = tlb_entry(env, mmu_idx, addr);
1891         }
1892         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1893         tlb_addr &= ~TLB_INVALID_MASK;
1894     }
1895 
1896     /* Handle anything that isn't just a straight memory access.  */
1897     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1898         CPUIOTLBEntry *iotlbentry;
1899         bool need_swap;
1900 
1901         /* For anything that is unaligned, recurse through full_load.  */
1902         if ((addr & (size - 1)) != 0) {
1903             goto do_unaligned_access;
1904         }
1905 
1906         iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1907 
1908         /* Handle watchpoints.  */
1909         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1910             /* On watchpoint hit, this will longjmp out.  */
1911             cpu_check_watchpoint(env_cpu(env), addr, size,
1912                                  iotlbentry->attrs, BP_MEM_READ, retaddr);
1913         }
1914 
1915         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1916 
1917         /* Handle I/O access.  */
1918         if (likely(tlb_addr & TLB_MMIO)) {
1919             return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1920                             access_type, op ^ (need_swap * MO_BSWAP));
1921         }
1922 
1923         haddr = (void *)((uintptr_t)addr + entry->addend);
1924 
1925         /*
1926          * Keep these two load_memop separate to ensure that the compiler
1927          * is able to fold the entire function to a single instruction.
1928          * There is a build-time assert inside to remind you of this.  ;-)
1929          */
1930         if (unlikely(need_swap)) {
1931             return load_memop(haddr, op ^ MO_BSWAP);
1932         }
1933         return load_memop(haddr, op);
1934     }
1935 
1936     /* Handle slow unaligned access (it spans two pages or IO).  */
1937     if (size > 1
1938         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1939                     >= TARGET_PAGE_SIZE)) {
1940         target_ulong addr1, addr2;
1941         uint64_t r1, r2;
1942         unsigned shift;
1943     do_unaligned_access:
1944         addr1 = addr & ~((target_ulong)size - 1);
1945         addr2 = addr1 + size;
1946         r1 = full_load(env, addr1, oi, retaddr);
1947         r2 = full_load(env, addr2, oi, retaddr);
1948         shift = (addr & (size - 1)) * 8;
1949 
1950         if (memop_big_endian(op)) {
1951             /* Big-endian combine.  */
1952             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1953         } else {
1954             /* Little-endian combine.  */
1955             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1956         }
1957         return res & MAKE_64BIT_MASK(0, size * 8);
1958     }
1959 
1960     haddr = (void *)((uintptr_t)addr + entry->addend);
1961     return load_memop(haddr, op);
1962 }
1963 
1964 /*
1965  * For the benefit of TCG generated code, we want to avoid the
1966  * complication of ABI-specific return type promotion and always
1967  * return a value extended to the register size of the host. This is
1968  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1969  * data, and for that we always have uint64_t.
1970  *
1971  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1972  */
1973 
1974 static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1975                               TCGMemOpIdx oi, uintptr_t retaddr)
1976 {
1977     return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
1978 }
1979 
1980 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1981                                      TCGMemOpIdx oi, uintptr_t retaddr)
1982 {
1983     return full_ldub_mmu(env, addr, oi, retaddr);
1984 }
1985 
1986 static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1987                                  TCGMemOpIdx oi, uintptr_t retaddr)
1988 {
1989     return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
1990                        full_le_lduw_mmu);
1991 }
1992 
1993 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1994                                     TCGMemOpIdx oi, uintptr_t retaddr)
1995 {
1996     return full_le_lduw_mmu(env, addr, oi, retaddr);
1997 }
1998 
1999 static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2000                                  TCGMemOpIdx oi, uintptr_t retaddr)
2001 {
2002     return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2003                        full_be_lduw_mmu);
2004 }
2005 
2006 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
2007                                     TCGMemOpIdx oi, uintptr_t retaddr)
2008 {
2009     return full_be_lduw_mmu(env, addr, oi, retaddr);
2010 }
2011 
2012 static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2013                                  TCGMemOpIdx oi, uintptr_t retaddr)
2014 {
2015     return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2016                        full_le_ldul_mmu);
2017 }
2018 
2019 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
2020                                     TCGMemOpIdx oi, uintptr_t retaddr)
2021 {
2022     return full_le_ldul_mmu(env, addr, oi, retaddr);
2023 }
2024 
2025 static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2026                                  TCGMemOpIdx oi, uintptr_t retaddr)
2027 {
2028     return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2029                        full_be_ldul_mmu);
2030 }
2031 
2032 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
2033                                     TCGMemOpIdx oi, uintptr_t retaddr)
2034 {
2035     return full_be_ldul_mmu(env, addr, oi, retaddr);
2036 }
2037 
2038 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
2039                            TCGMemOpIdx oi, uintptr_t retaddr)
2040 {
2041     return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
2042                        helper_le_ldq_mmu);
2043 }
2044 
2045 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
2046                            TCGMemOpIdx oi, uintptr_t retaddr)
2047 {
2048     return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
2049                        helper_be_ldq_mmu);
2050 }
2051 
2052 /*
2053  * Provide signed versions of the load routines as well.  We can of course
2054  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2055  */
2056 
2057 
2058 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
2059                                      TCGMemOpIdx oi, uintptr_t retaddr)
2060 {
2061     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2062 }
2063 
2064 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
2065                                     TCGMemOpIdx oi, uintptr_t retaddr)
2066 {
2067     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2068 }
2069 
2070 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
2071                                     TCGMemOpIdx oi, uintptr_t retaddr)
2072 {
2073     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2074 }
2075 
2076 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
2077                                     TCGMemOpIdx oi, uintptr_t retaddr)
2078 {
2079     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2080 }
2081 
2082 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
2083                                     TCGMemOpIdx oi, uintptr_t retaddr)
2084 {
2085     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2086 }
2087 
2088 /*
2089  * Load helpers for cpu_ldst.h.
2090  */
2091 
2092 static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2093                                        int mmu_idx, uintptr_t retaddr,
2094                                        MemOp op, FullLoadHelper *full_load)
2095 {
2096     uint16_t meminfo;
2097     TCGMemOpIdx oi;
2098     uint64_t ret;
2099 
2100     meminfo = trace_mem_get_info(op, mmu_idx, false);
2101     trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
2102 
2103     op &= ~MO_SIGN;
2104     oi = make_memop_idx(op, mmu_idx);
2105     ret = full_load(env, addr, oi, retaddr);
2106 
2107     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
2108 
2109     return ret;
2110 }
2111 
2112 uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2113                             int mmu_idx, uintptr_t ra)
2114 {
2115     return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
2116 }
2117 
2118 int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2119                        int mmu_idx, uintptr_t ra)
2120 {
2121     return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
2122                                    full_ldub_mmu);
2123 }
2124 
2125 uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2126                                int mmu_idx, uintptr_t ra)
2127 {
2128     return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu);
2129 }
2130 
2131 int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2132                           int mmu_idx, uintptr_t ra)
2133 {
2134     return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW,
2135                                     full_be_lduw_mmu);
2136 }
2137 
2138 uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2139                               int mmu_idx, uintptr_t ra)
2140 {
2141     return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu);
2142 }
2143 
2144 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2145                               int mmu_idx, uintptr_t ra)
2146 {
2147     return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu);
2148 }
2149 
2150 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2151                                int mmu_idx, uintptr_t ra)
2152 {
2153     return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu);
2154 }
2155 
2156 int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2157                           int mmu_idx, uintptr_t ra)
2158 {
2159     return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW,
2160                                     full_le_lduw_mmu);
2161 }
2162 
2163 uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2164                               int mmu_idx, uintptr_t ra)
2165 {
2166     return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu);
2167 }
2168 
2169 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
2170                               int mmu_idx, uintptr_t ra)
2171 {
2172     return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu);
2173 }
2174 
2175 uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
2176                           uintptr_t retaddr)
2177 {
2178     return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2179 }
2180 
2181 int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
2182 {
2183     return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2184 }
2185 
2186 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr,
2187                              uintptr_t retaddr)
2188 {
2189     return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2190 }
2191 
2192 int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
2193 {
2194     return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2195 }
2196 
2197 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr,
2198                             uintptr_t retaddr)
2199 {
2200     return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2201 }
2202 
2203 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr,
2204                             uintptr_t retaddr)
2205 {
2206     return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2207 }
2208 
2209 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr,
2210                              uintptr_t retaddr)
2211 {
2212     return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2213 }
2214 
2215 int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
2216 {
2217     return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2218 }
2219 
2220 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr,
2221                             uintptr_t retaddr)
2222 {
2223     return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2224 }
2225 
2226 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr,
2227                             uintptr_t retaddr)
2228 {
2229     return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
2230 }
2231 
2232 uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
2233 {
2234     return cpu_ldub_data_ra(env, ptr, 0);
2235 }
2236 
2237 int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
2238 {
2239     return cpu_ldsb_data_ra(env, ptr, 0);
2240 }
2241 
2242 uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr)
2243 {
2244     return cpu_lduw_be_data_ra(env, ptr, 0);
2245 }
2246 
2247 int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr)
2248 {
2249     return cpu_ldsw_be_data_ra(env, ptr, 0);
2250 }
2251 
2252 uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr)
2253 {
2254     return cpu_ldl_be_data_ra(env, ptr, 0);
2255 }
2256 
2257 uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr)
2258 {
2259     return cpu_ldq_be_data_ra(env, ptr, 0);
2260 }
2261 
2262 uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr)
2263 {
2264     return cpu_lduw_le_data_ra(env, ptr, 0);
2265 }
2266 
2267 int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr)
2268 {
2269     return cpu_ldsw_le_data_ra(env, ptr, 0);
2270 }
2271 
2272 uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr)
2273 {
2274     return cpu_ldl_le_data_ra(env, ptr, 0);
2275 }
2276 
2277 uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr)
2278 {
2279     return cpu_ldq_le_data_ra(env, ptr, 0);
2280 }
2281 
2282 /*
2283  * Store Helpers
2284  */
2285 
2286 static inline void QEMU_ALWAYS_INLINE
2287 store_memop(void *haddr, uint64_t val, MemOp op)
2288 {
2289     switch (op) {
2290     case MO_UB:
2291         stb_p(haddr, val);
2292         break;
2293     case MO_BEUW:
2294         stw_be_p(haddr, val);
2295         break;
2296     case MO_LEUW:
2297         stw_le_p(haddr, val);
2298         break;
2299     case MO_BEUL:
2300         stl_be_p(haddr, val);
2301         break;
2302     case MO_LEUL:
2303         stl_le_p(haddr, val);
2304         break;
2305     case MO_BEQ:
2306         stq_be_p(haddr, val);
2307         break;
2308     case MO_LEQ:
2309         stq_le_p(haddr, val);
2310         break;
2311     default:
2312         qemu_build_not_reached();
2313     }
2314 }
2315 
2316 static void __attribute__((noinline))
2317 store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2318                        uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2319                        bool big_endian)
2320 {
2321     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2322     uintptr_t index, index2;
2323     CPUTLBEntry *entry, *entry2;
2324     target_ulong page2, tlb_addr, tlb_addr2;
2325     TCGMemOpIdx oi;
2326     size_t size2;
2327     int i;
2328 
2329     /*
2330      * Ensure the second page is in the TLB.  Note that the first page
2331      * is already guaranteed to be filled, and that the second page
2332      * cannot evict the first.
2333      */
2334     page2 = (addr + size) & TARGET_PAGE_MASK;
2335     size2 = (addr + size) & ~TARGET_PAGE_MASK;
2336     index2 = tlb_index(env, mmu_idx, page2);
2337     entry2 = tlb_entry(env, mmu_idx, page2);
2338 
2339     tlb_addr2 = tlb_addr_write(entry2);
2340     if (!tlb_hit_page(tlb_addr2, page2)) {
2341         if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2342             tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2343                      mmu_idx, retaddr);
2344             index2 = tlb_index(env, mmu_idx, page2);
2345             entry2 = tlb_entry(env, mmu_idx, page2);
2346         }
2347         tlb_addr2 = tlb_addr_write(entry2);
2348     }
2349 
2350     index = tlb_index(env, mmu_idx, addr);
2351     entry = tlb_entry(env, mmu_idx, addr);
2352     tlb_addr = tlb_addr_write(entry);
2353 
2354     /*
2355      * Handle watchpoints.  Since this may trap, all checks
2356      * must happen before any store.
2357      */
2358     if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2359         cpu_check_watchpoint(env_cpu(env), addr, size - size2,
2360                              env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
2361                              BP_MEM_WRITE, retaddr);
2362     }
2363     if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2364         cpu_check_watchpoint(env_cpu(env), page2, size2,
2365                              env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
2366                              BP_MEM_WRITE, retaddr);
2367     }
2368 
2369     /*
2370      * XXX: not efficient, but simple.
2371      * This loop must go in the forward direction to avoid issues
2372      * with self-modifying code in Windows 64-bit.
2373      */
2374     oi = make_memop_idx(MO_UB, mmu_idx);
2375     if (big_endian) {
2376         for (i = 0; i < size; ++i) {
2377             /* Big-endian extract.  */
2378             uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2379             helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
2380         }
2381     } else {
2382         for (i = 0; i < size; ++i) {
2383             /* Little-endian extract.  */
2384             uint8_t val8 = val >> (i * 8);
2385             helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
2386         }
2387     }
2388 }
2389 
2390 static inline void QEMU_ALWAYS_INLINE
2391 store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2392              TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
2393 {
2394     uintptr_t mmu_idx = get_mmuidx(oi);
2395     uintptr_t index = tlb_index(env, mmu_idx, addr);
2396     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
2397     target_ulong tlb_addr = tlb_addr_write(entry);
2398     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2399     unsigned a_bits = get_alignment_bits(get_memop(oi));
2400     void *haddr;
2401     size_t size = memop_size(op);
2402 
2403     /* Handle CPU specific unaligned behaviour */
2404     if (addr & ((1 << a_bits) - 1)) {
2405         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2406                              mmu_idx, retaddr);
2407     }
2408 
2409     /* If the TLB entry is for a different page, reload and try again.  */
2410     if (!tlb_hit(tlb_addr, addr)) {
2411         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2412             addr & TARGET_PAGE_MASK)) {
2413             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2414                      mmu_idx, retaddr);
2415             index = tlb_index(env, mmu_idx, addr);
2416             entry = tlb_entry(env, mmu_idx, addr);
2417         }
2418         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2419     }
2420 
2421     /* Handle anything that isn't just a straight memory access.  */
2422     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
2423         CPUIOTLBEntry *iotlbentry;
2424         bool need_swap;
2425 
2426         /* For anything that is unaligned, recurse through byte stores.  */
2427         if ((addr & (size - 1)) != 0) {
2428             goto do_unaligned_access;
2429         }
2430 
2431         iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
2432 
2433         /* Handle watchpoints.  */
2434         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2435             /* On watchpoint hit, this will longjmp out.  */
2436             cpu_check_watchpoint(env_cpu(env), addr, size,
2437                                  iotlbentry->attrs, BP_MEM_WRITE, retaddr);
2438         }
2439 
2440         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2441 
2442         /* Handle I/O access.  */
2443         if (tlb_addr & TLB_MMIO) {
2444             io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
2445                       op ^ (need_swap * MO_BSWAP));
2446             return;
2447         }
2448 
2449         /* Ignore writes to ROM.  */
2450         if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2451             return;
2452         }
2453 
2454         /* Handle clean RAM pages.  */
2455         if (tlb_addr & TLB_NOTDIRTY) {
2456             notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
2457         }
2458 
2459         haddr = (void *)((uintptr_t)addr + entry->addend);
2460 
2461         /*
2462          * Keep these two store_memop separate to ensure that the compiler
2463          * is able to fold the entire function to a single instruction.
2464          * There is a build-time assert inside to remind you of this.  ;-)
2465          */
2466         if (unlikely(need_swap)) {
2467             store_memop(haddr, val, op ^ MO_BSWAP);
2468         } else {
2469             store_memop(haddr, val, op);
2470         }
2471         return;
2472     }
2473 
2474     /* Handle slow unaligned access (it spans two pages or IO).  */
2475     if (size > 1
2476         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2477                      >= TARGET_PAGE_SIZE)) {
2478     do_unaligned_access:
2479         store_helper_unaligned(env, addr, val, retaddr, size,
2480                                mmu_idx, memop_big_endian(op));
2481         return;
2482     }
2483 
2484     haddr = (void *)((uintptr_t)addr + entry->addend);
2485     store_memop(haddr, val, op);
2486 }
2487 
2488 void __attribute__((noinline))
2489 helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2490                    TCGMemOpIdx oi, uintptr_t retaddr)
2491 {
2492     store_helper(env, addr, val, oi, retaddr, MO_UB);
2493 }
2494 
2495 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2496                        TCGMemOpIdx oi, uintptr_t retaddr)
2497 {
2498     store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2499 }
2500 
2501 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2502                        TCGMemOpIdx oi, uintptr_t retaddr)
2503 {
2504     store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2505 }
2506 
2507 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2508                        TCGMemOpIdx oi, uintptr_t retaddr)
2509 {
2510     store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2511 }
2512 
2513 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2514                        TCGMemOpIdx oi, uintptr_t retaddr)
2515 {
2516     store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2517 }
2518 
2519 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2520                        TCGMemOpIdx oi, uintptr_t retaddr)
2521 {
2522     store_helper(env, addr, val, oi, retaddr, MO_LEQ);
2523 }
2524 
2525 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2526                        TCGMemOpIdx oi, uintptr_t retaddr)
2527 {
2528     store_helper(env, addr, val, oi, retaddr, MO_BEQ);
2529 }
2530 
2531 /*
2532  * Store Helpers for cpu_ldst.h
2533  */
2534 
2535 static inline void QEMU_ALWAYS_INLINE
2536 cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2537                  int mmu_idx, uintptr_t retaddr, MemOp op)
2538 {
2539     TCGMemOpIdx oi;
2540     uint16_t meminfo;
2541 
2542     meminfo = trace_mem_get_info(op, mmu_idx, true);
2543     trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
2544 
2545     oi = make_memop_idx(op, mmu_idx);
2546     store_helper(env, addr, val, oi, retaddr, op);
2547 
2548     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
2549 }
2550 
2551 void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2552                        int mmu_idx, uintptr_t retaddr)
2553 {
2554     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
2555 }
2556 
2557 void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2558                           int mmu_idx, uintptr_t retaddr)
2559 {
2560     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW);
2561 }
2562 
2563 void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2564                           int mmu_idx, uintptr_t retaddr)
2565 {
2566     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL);
2567 }
2568 
2569 void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
2570                           int mmu_idx, uintptr_t retaddr)
2571 {
2572     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ);
2573 }
2574 
2575 void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2576                           int mmu_idx, uintptr_t retaddr)
2577 {
2578     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW);
2579 }
2580 
2581 void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2582                           int mmu_idx, uintptr_t retaddr)
2583 {
2584     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL);
2585 }
2586 
2587 void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
2588                           int mmu_idx, uintptr_t retaddr)
2589 {
2590     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ);
2591 }
2592 
2593 void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
2594                      uint32_t val, uintptr_t retaddr)
2595 {
2596     cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2597 }
2598 
2599 void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr,
2600                         uint32_t val, uintptr_t retaddr)
2601 {
2602     cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2603 }
2604 
2605 void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr,
2606                         uint32_t val, uintptr_t retaddr)
2607 {
2608     cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2609 }
2610 
2611 void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr,
2612                         uint64_t val, uintptr_t retaddr)
2613 {
2614     cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2615 }
2616 
2617 void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr,
2618                         uint32_t val, uintptr_t retaddr)
2619 {
2620     cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2621 }
2622 
2623 void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr,
2624                         uint32_t val, uintptr_t retaddr)
2625 {
2626     cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2627 }
2628 
2629 void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr,
2630                         uint64_t val, uintptr_t retaddr)
2631 {
2632     cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2633 }
2634 
2635 void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2636 {
2637     cpu_stb_data_ra(env, ptr, val, 0);
2638 }
2639 
2640 void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2641 {
2642     cpu_stw_be_data_ra(env, ptr, val, 0);
2643 }
2644 
2645 void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2646 {
2647     cpu_stl_be_data_ra(env, ptr, val, 0);
2648 }
2649 
2650 void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val)
2651 {
2652     cpu_stq_be_data_ra(env, ptr, val, 0);
2653 }
2654 
2655 void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2656 {
2657     cpu_stw_le_data_ra(env, ptr, val, 0);
2658 }
2659 
2660 void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2661 {
2662     cpu_stl_le_data_ra(env, ptr, val, 0);
2663 }
2664 
2665 void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
2666 {
2667     cpu_stq_le_data_ra(env, ptr, val, 0);
2668 }
2669 
2670 /* First set of helpers allows passing in of OI and RETADDR.  This makes
2671    them callable from other helpers.  */
2672 
2673 #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
2674 #define ATOMIC_NAME(X) \
2675     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
2676 #define ATOMIC_MMU_DECLS
2677 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
2678 #define ATOMIC_MMU_CLEANUP
2679 #define ATOMIC_MMU_IDX   get_mmuidx(oi)
2680 
2681 #include "atomic_common.c.inc"
2682 
2683 #define DATA_SIZE 1
2684 #include "atomic_template.h"
2685 
2686 #define DATA_SIZE 2
2687 #include "atomic_template.h"
2688 
2689 #define DATA_SIZE 4
2690 #include "atomic_template.h"
2691 
2692 #ifdef CONFIG_ATOMIC64
2693 #define DATA_SIZE 8
2694 #include "atomic_template.h"
2695 #endif
2696 
2697 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2698 #define DATA_SIZE 16
2699 #include "atomic_template.h"
2700 #endif
2701 
2702 /* Second set of helpers are directly callable from TCG as helpers.  */
2703 
2704 #undef EXTRA_ARGS
2705 #undef ATOMIC_NAME
2706 #undef ATOMIC_MMU_LOOKUP
2707 #define EXTRA_ARGS         , TCGMemOpIdx oi
2708 #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
2709 #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC())
2710 
2711 #define DATA_SIZE 1
2712 #include "atomic_template.h"
2713 
2714 #define DATA_SIZE 2
2715 #include "atomic_template.h"
2716 
2717 #define DATA_SIZE 4
2718 #include "atomic_template.h"
2719 
2720 #ifdef CONFIG_ATOMIC64
2721 #define DATA_SIZE 8
2722 #include "atomic_template.h"
2723 #endif
2724 #undef ATOMIC_MMU_IDX
2725 
2726 /* Code access functions.  */
2727 
2728 static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
2729                                TCGMemOpIdx oi, uintptr_t retaddr)
2730 {
2731     return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
2732 }
2733 
2734 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2735 {
2736     TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2737     return full_ldub_code(env, addr, oi, 0);
2738 }
2739 
2740 static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
2741                                TCGMemOpIdx oi, uintptr_t retaddr)
2742 {
2743     return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
2744 }
2745 
2746 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
2747 {
2748     TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2749     return full_lduw_code(env, addr, oi, 0);
2750 }
2751 
2752 static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2753                               TCGMemOpIdx oi, uintptr_t retaddr)
2754 {
2755     return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
2756 }
2757 
2758 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
2759 {
2760     TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2761     return full_ldl_code(env, addr, oi, 0);
2762 }
2763 
2764 static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
2765                               TCGMemOpIdx oi, uintptr_t retaddr)
2766 {
2767     return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
2768 }
2769 
2770 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2771 {
2772     TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
2773     return full_ldq_code(env, addr, oi, 0);
2774 }
2775