xref: /qemu/accel/tcg/cputlb.c (revision 74781c08)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "exec/memory.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/tb-flush.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "exec/mmu-access-type.h"
32 #include "exec/tlb-common.h"
33 #include "exec/vaddr.h"
34 #include "tcg/tcg.h"
35 #include "qemu/error-report.h"
36 #include "exec/log.h"
37 #include "exec/helper-proto-common.h"
38 #include "qemu/atomic.h"
39 #include "qemu/atomic128.h"
40 #include "exec/translate-all.h"
41 #include "trace.h"
42 #include "tb-hash.h"
43 #include "internal-common.h"
44 #include "internal-target.h"
45 #ifdef CONFIG_PLUGIN
46 #include "qemu/plugin-memory.h"
47 #endif
48 #include "tcg/tcg-ldst.h"
49 #include "tcg/oversized-guest.h"
50 
51 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
52 /* #define DEBUG_TLB */
53 /* #define DEBUG_TLB_LOG */
54 
55 #ifdef DEBUG_TLB
56 # define DEBUG_TLB_GATE 1
57 # ifdef DEBUG_TLB_LOG
58 #  define DEBUG_TLB_LOG_GATE 1
59 # else
60 #  define DEBUG_TLB_LOG_GATE 0
61 # endif
62 #else
63 # define DEBUG_TLB_GATE 0
64 # define DEBUG_TLB_LOG_GATE 0
65 #endif
66 
67 #define tlb_debug(fmt, ...) do { \
68     if (DEBUG_TLB_LOG_GATE) { \
69         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
70                       ## __VA_ARGS__); \
71     } else if (DEBUG_TLB_GATE) { \
72         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
73     } \
74 } while (0)
75 
76 #define assert_cpu_is_self(cpu) do {                              \
77         if (DEBUG_TLB_GATE) {                                     \
78             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
79         }                                                         \
80     } while (0)
81 
82 /* run_on_cpu_data.target_ptr should always be big enough for a
83  * vaddr even on 32 bit builds
84  */
85 QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
86 
87 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
88  */
89 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
90 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
91 
tlb_n_entries(CPUTLBDescFast * fast)92 static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
93 {
94     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
95 }
96 
sizeof_tlb(CPUTLBDescFast * fast)97 static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
98 {
99     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
100 }
101 
tlb_read_idx(const CPUTLBEntry * entry,MMUAccessType access_type)102 static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
103                                     MMUAccessType access_type)
104 {
105     /* Do not rearrange the CPUTLBEntry structure members. */
106     QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
107                       MMU_DATA_LOAD * sizeof(uint64_t));
108     QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
109                       MMU_DATA_STORE * sizeof(uint64_t));
110     QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
111                       MMU_INST_FETCH * sizeof(uint64_t));
112 
113 #if TARGET_LONG_BITS == 32
114     /* Use qatomic_read, in case of addr_write; only care about low bits. */
115     const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
116     ptr += HOST_BIG_ENDIAN;
117     return qatomic_read(ptr);
118 #else
119     const uint64_t *ptr = &entry->addr_idx[access_type];
120 # if TCG_OVERSIZED_GUEST
121     return *ptr;
122 # else
123     /* ofs might correspond to .addr_write, so use qatomic_read */
124     return qatomic_read(ptr);
125 # endif
126 #endif
127 }
128 
tlb_addr_write(const CPUTLBEntry * entry)129 static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
130 {
131     return tlb_read_idx(entry, MMU_DATA_STORE);
132 }
133 
134 /* Find the TLB index corresponding to the mmu_idx + address pair.  */
tlb_index(CPUState * cpu,uintptr_t mmu_idx,vaddr addr)135 static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
136                                   vaddr addr)
137 {
138     uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
139 
140     return (addr >> TARGET_PAGE_BITS) & size_mask;
141 }
142 
143 /* Find the TLB entry corresponding to the mmu_idx + address pair.  */
tlb_entry(CPUState * cpu,uintptr_t mmu_idx,vaddr addr)144 static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
145                                      vaddr addr)
146 {
147     return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
148 }
149 
tlb_window_reset(CPUTLBDesc * desc,int64_t ns,size_t max_entries)150 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
151                              size_t max_entries)
152 {
153     desc->window_begin_ns = ns;
154     desc->window_max_entries = max_entries;
155 }
156 
tb_jmp_cache_clear_page(CPUState * cpu,vaddr page_addr)157 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
158 {
159     CPUJumpCache *jc = cpu->tb_jmp_cache;
160     int i, i0;
161 
162     if (unlikely(!jc)) {
163         return;
164     }
165 
166     i0 = tb_jmp_cache_hash_page(page_addr);
167     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
168         qatomic_set(&jc->array[i0 + i].tb, NULL);
169     }
170 }
171 
172 /**
173  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
174  * @desc: The CPUTLBDesc portion of the TLB
175  * @fast: The CPUTLBDescFast portion of the same TLB
176  *
177  * Called with tlb_lock_held.
178  *
179  * We have two main constraints when resizing a TLB: (1) we only resize it
180  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
181  * the array or unnecessarily flushing it), which means we do not control how
182  * frequently the resizing can occur; (2) we don't have access to the guest's
183  * future scheduling decisions, and therefore have to decide the magnitude of
184  * the resize based on past observations.
185  *
186  * In general, a memory-hungry process can benefit greatly from an appropriately
187  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
188  * we just have to make the TLB as large as possible; while an oversized TLB
189  * results in minimal TLB miss rates, it also takes longer to be flushed
190  * (flushes can be _very_ frequent), and the reduced locality can also hurt
191  * performance.
192  *
193  * To achieve near-optimal performance for all kinds of workloads, we:
194  *
195  * 1. Aggressively increase the size of the TLB when the use rate of the
196  * TLB being flushed is high, since it is likely that in the near future this
197  * memory-hungry process will execute again, and its memory hungriness will
198  * probably be similar.
199  *
200  * 2. Slowly reduce the size of the TLB as the use rate declines over a
201  * reasonably large time window. The rationale is that if in such a time window
202  * we have not observed a high TLB use rate, it is likely that we won't observe
203  * it in the near future. In that case, once a time window expires we downsize
204  * the TLB to match the maximum use rate observed in the window.
205  *
206  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
207  * since in that range performance is likely near-optimal. Recall that the TLB
208  * is direct mapped, so we want the use rate to be low (or at least not too
209  * high), since otherwise we are likely to have a significant amount of
210  * conflict misses.
211  */
tlb_mmu_resize_locked(CPUTLBDesc * desc,CPUTLBDescFast * fast,int64_t now)212 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
213                                   int64_t now)
214 {
215     size_t old_size = tlb_n_entries(fast);
216     size_t rate;
217     size_t new_size = old_size;
218     int64_t window_len_ms = 100;
219     int64_t window_len_ns = window_len_ms * 1000 * 1000;
220     bool window_expired = now > desc->window_begin_ns + window_len_ns;
221 
222     if (desc->n_used_entries > desc->window_max_entries) {
223         desc->window_max_entries = desc->n_used_entries;
224     }
225     rate = desc->window_max_entries * 100 / old_size;
226 
227     if (rate > 70) {
228         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
229     } else if (rate < 30 && window_expired) {
230         size_t ceil = pow2ceil(desc->window_max_entries);
231         size_t expected_rate = desc->window_max_entries * 100 / ceil;
232 
233         /*
234          * Avoid undersizing when the max number of entries seen is just below
235          * a pow2. For instance, if max_entries == 1025, the expected use rate
236          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
237          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
238          * later. Thus, make sure that the expected use rate remains below 70%.
239          * (and since we double the size, that means the lowest rate we'd
240          * expect to get is 35%, which is still in the 30-70% range where
241          * we consider that the size is appropriate.)
242          */
243         if (expected_rate > 70) {
244             ceil *= 2;
245         }
246         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
247     }
248 
249     if (new_size == old_size) {
250         if (window_expired) {
251             tlb_window_reset(desc, now, desc->n_used_entries);
252         }
253         return;
254     }
255 
256     g_free(fast->table);
257     g_free(desc->fulltlb);
258 
259     tlb_window_reset(desc, now, 0);
260     /* desc->n_used_entries is cleared by the caller */
261     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
262     fast->table = g_try_new(CPUTLBEntry, new_size);
263     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
264 
265     /*
266      * If the allocations fail, try smaller sizes. We just freed some
267      * memory, so going back to half of new_size has a good chance of working.
268      * Increased memory pressure elsewhere in the system might cause the
269      * allocations to fail though, so we progressively reduce the allocation
270      * size, aborting if we cannot even allocate the smallest TLB we support.
271      */
272     while (fast->table == NULL || desc->fulltlb == NULL) {
273         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
274             error_report("%s: %s", __func__, strerror(errno));
275             abort();
276         }
277         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
278         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
279 
280         g_free(fast->table);
281         g_free(desc->fulltlb);
282         fast->table = g_try_new(CPUTLBEntry, new_size);
283         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
284     }
285 }
286 
tlb_mmu_flush_locked(CPUTLBDesc * desc,CPUTLBDescFast * fast)287 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
288 {
289     desc->n_used_entries = 0;
290     desc->large_page_addr = -1;
291     desc->large_page_mask = -1;
292     desc->vindex = 0;
293     memset(fast->table, -1, sizeof_tlb(fast));
294     memset(desc->vtable, -1, sizeof(desc->vtable));
295 }
296 
tlb_flush_one_mmuidx_locked(CPUState * cpu,int mmu_idx,int64_t now)297 static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
298                                         int64_t now)
299 {
300     CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
301     CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
302 
303     tlb_mmu_resize_locked(desc, fast, now);
304     tlb_mmu_flush_locked(desc, fast);
305 }
306 
tlb_mmu_init(CPUTLBDesc * desc,CPUTLBDescFast * fast,int64_t now)307 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
308 {
309     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
310 
311     tlb_window_reset(desc, now, 0);
312     desc->n_used_entries = 0;
313     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
314     fast->table = g_new(CPUTLBEntry, n_entries);
315     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
316     tlb_mmu_flush_locked(desc, fast);
317 }
318 
tlb_n_used_entries_inc(CPUState * cpu,uintptr_t mmu_idx)319 static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
320 {
321     cpu->neg.tlb.d[mmu_idx].n_used_entries++;
322 }
323 
tlb_n_used_entries_dec(CPUState * cpu,uintptr_t mmu_idx)324 static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
325 {
326     cpu->neg.tlb.d[mmu_idx].n_used_entries--;
327 }
328 
tlb_init(CPUState * cpu)329 void tlb_init(CPUState *cpu)
330 {
331     int64_t now = get_clock_realtime();
332     int i;
333 
334     qemu_spin_init(&cpu->neg.tlb.c.lock);
335 
336     /* All tlbs are initialized flushed. */
337     cpu->neg.tlb.c.dirty = 0;
338 
339     for (i = 0; i < NB_MMU_MODES; i++) {
340         tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
341     }
342 }
343 
tlb_destroy(CPUState * cpu)344 void tlb_destroy(CPUState *cpu)
345 {
346     int i;
347 
348     qemu_spin_destroy(&cpu->neg.tlb.c.lock);
349     for (i = 0; i < NB_MMU_MODES; i++) {
350         CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
351         CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
352 
353         g_free(fast->table);
354         g_free(desc->fulltlb);
355     }
356 }
357 
358 /* flush_all_helper: run fn across all cpus
359  *
360  * If the wait flag is set then the src cpu's helper will be queued as
361  * "safe" work and the loop exited creating a synchronisation point
362  * where all queued work will be finished before execution starts
363  * again.
364  */
flush_all_helper(CPUState * src,run_on_cpu_func fn,run_on_cpu_data d)365 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
366                              run_on_cpu_data d)
367 {
368     CPUState *cpu;
369 
370     CPU_FOREACH(cpu) {
371         if (cpu != src) {
372             async_run_on_cpu(cpu, fn, d);
373         }
374     }
375 }
376 
tlb_flush_by_mmuidx_async_work(CPUState * cpu,run_on_cpu_data data)377 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
378 {
379     uint16_t asked = data.host_int;
380     uint16_t all_dirty, work, to_clean;
381     int64_t now = get_clock_realtime();
382 
383     assert_cpu_is_self(cpu);
384 
385     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
386 
387     qemu_spin_lock(&cpu->neg.tlb.c.lock);
388 
389     all_dirty = cpu->neg.tlb.c.dirty;
390     to_clean = asked & all_dirty;
391     all_dirty &= ~to_clean;
392     cpu->neg.tlb.c.dirty = all_dirty;
393 
394     for (work = to_clean; work != 0; work &= work - 1) {
395         int mmu_idx = ctz32(work);
396         tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
397     }
398 
399     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
400 
401     tcg_flush_jmp_cache(cpu);
402 
403     if (to_clean == ALL_MMUIDX_BITS) {
404         qatomic_set(&cpu->neg.tlb.c.full_flush_count,
405                     cpu->neg.tlb.c.full_flush_count + 1);
406     } else {
407         qatomic_set(&cpu->neg.tlb.c.part_flush_count,
408                     cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
409         if (to_clean != asked) {
410             qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
411                         cpu->neg.tlb.c.elide_flush_count +
412                         ctpop16(asked & ~to_clean));
413         }
414     }
415 }
416 
tlb_flush_by_mmuidx(CPUState * cpu,uint16_t idxmap)417 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
418 {
419     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
420 
421     if (cpu->created && !qemu_cpu_is_self(cpu)) {
422         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
423                          RUN_ON_CPU_HOST_INT(idxmap));
424     } else {
425         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
426     }
427 }
428 
tlb_flush(CPUState * cpu)429 void tlb_flush(CPUState *cpu)
430 {
431     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
432 }
433 
tlb_flush_by_mmuidx_all_cpus(CPUState * src_cpu,uint16_t idxmap)434 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
435 {
436     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
437 
438     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
439 
440     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
441     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
442 }
443 
tlb_flush_all_cpus(CPUState * src_cpu)444 void tlb_flush_all_cpus(CPUState *src_cpu)
445 {
446     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
447 }
448 
tlb_flush_by_mmuidx_all_cpus_synced(CPUState * src_cpu,uint16_t idxmap)449 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
450 {
451     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
452 
453     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
454 
455     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
456     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
457 }
458 
tlb_flush_all_cpus_synced(CPUState * src_cpu)459 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
460 {
461     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
462 }
463 
tlb_hit_page_mask_anyprot(CPUTLBEntry * tlb_entry,vaddr page,vaddr mask)464 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
465                                       vaddr page, vaddr mask)
466 {
467     page &= mask;
468     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
469 
470     return (page == (tlb_entry->addr_read & mask) ||
471             page == (tlb_addr_write(tlb_entry) & mask) ||
472             page == (tlb_entry->addr_code & mask));
473 }
474 
tlb_hit_page_anyprot(CPUTLBEntry * tlb_entry,vaddr page)475 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
476 {
477     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
478 }
479 
480 /**
481  * tlb_entry_is_empty - return true if the entry is not in use
482  * @te: pointer to CPUTLBEntry
483  */
tlb_entry_is_empty(const CPUTLBEntry * te)484 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
485 {
486     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
487 }
488 
489 /* Called with tlb_c.lock held */
tlb_flush_entry_mask_locked(CPUTLBEntry * tlb_entry,vaddr page,vaddr mask)490 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
491                                         vaddr page,
492                                         vaddr mask)
493 {
494     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
495         memset(tlb_entry, -1, sizeof(*tlb_entry));
496         return true;
497     }
498     return false;
499 }
500 
tlb_flush_entry_locked(CPUTLBEntry * tlb_entry,vaddr page)501 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
502 {
503     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
504 }
505 
506 /* Called with tlb_c.lock held */
tlb_flush_vtlb_page_mask_locked(CPUState * cpu,int mmu_idx,vaddr page,vaddr mask)507 static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
508                                             vaddr page,
509                                             vaddr mask)
510 {
511     CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
512     int k;
513 
514     assert_cpu_is_self(cpu);
515     for (k = 0; k < CPU_VTLB_SIZE; k++) {
516         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
517             tlb_n_used_entries_dec(cpu, mmu_idx);
518         }
519     }
520 }
521 
tlb_flush_vtlb_page_locked(CPUState * cpu,int mmu_idx,vaddr page)522 static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
523                                               vaddr page)
524 {
525     tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
526 }
527 
tlb_flush_page_locked(CPUState * cpu,int midx,vaddr page)528 static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
529 {
530     vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
531     vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
532 
533     /* Check if we need to flush due to large pages.  */
534     if ((page & lp_mask) == lp_addr) {
535         tlb_debug("forcing full flush midx %d (%016"
536                   VADDR_PRIx "/%016" VADDR_PRIx ")\n",
537                   midx, lp_addr, lp_mask);
538         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
539     } else {
540         if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
541             tlb_n_used_entries_dec(cpu, midx);
542         }
543         tlb_flush_vtlb_page_locked(cpu, midx, page);
544     }
545 }
546 
547 /**
548  * tlb_flush_page_by_mmuidx_async_0:
549  * @cpu: cpu on which to flush
550  * @addr: page of virtual address to flush
551  * @idxmap: set of mmu_idx to flush
552  *
553  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
554  * at @addr from the tlbs indicated by @idxmap from @cpu.
555  */
tlb_flush_page_by_mmuidx_async_0(CPUState * cpu,vaddr addr,uint16_t idxmap)556 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
557                                              vaddr addr,
558                                              uint16_t idxmap)
559 {
560     int mmu_idx;
561 
562     assert_cpu_is_self(cpu);
563 
564     tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
565 
566     qemu_spin_lock(&cpu->neg.tlb.c.lock);
567     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
568         if ((idxmap >> mmu_idx) & 1) {
569             tlb_flush_page_locked(cpu, mmu_idx, addr);
570         }
571     }
572     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
573 
574     /*
575      * Discard jump cache entries for any tb which might potentially
576      * overlap the flushed page, which includes the previous.
577      */
578     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
579     tb_jmp_cache_clear_page(cpu, addr);
580 }
581 
582 /**
583  * tlb_flush_page_by_mmuidx_async_1:
584  * @cpu: cpu on which to flush
585  * @data: encoded addr + idxmap
586  *
587  * Helper for tlb_flush_page_by_mmuidx and friends, called through
588  * async_run_on_cpu.  The idxmap parameter is encoded in the page
589  * offset of the target_ptr field.  This limits the set of mmu_idx
590  * that can be passed via this method.
591  */
tlb_flush_page_by_mmuidx_async_1(CPUState * cpu,run_on_cpu_data data)592 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
593                                              run_on_cpu_data data)
594 {
595     vaddr addr_and_idxmap = data.target_ptr;
596     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
597     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
598 
599     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
600 }
601 
602 typedef struct {
603     vaddr addr;
604     uint16_t idxmap;
605 } TLBFlushPageByMMUIdxData;
606 
607 /**
608  * tlb_flush_page_by_mmuidx_async_2:
609  * @cpu: cpu on which to flush
610  * @data: allocated addr + idxmap
611  *
612  * Helper for tlb_flush_page_by_mmuidx and friends, called through
613  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
614  * TLBFlushPageByMMUIdxData structure that has been allocated
615  * specifically for this helper.  Free the structure when done.
616  */
tlb_flush_page_by_mmuidx_async_2(CPUState * cpu,run_on_cpu_data data)617 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
618                                              run_on_cpu_data data)
619 {
620     TLBFlushPageByMMUIdxData *d = data.host_ptr;
621 
622     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
623     g_free(d);
624 }
625 
tlb_flush_page_by_mmuidx(CPUState * cpu,vaddr addr,uint16_t idxmap)626 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
627 {
628     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
629 
630     /* This should already be page aligned */
631     addr &= TARGET_PAGE_MASK;
632 
633     if (qemu_cpu_is_self(cpu)) {
634         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
635     } else if (idxmap < TARGET_PAGE_SIZE) {
636         /*
637          * Most targets have only a few mmu_idx.  In the case where
638          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
639          * allocating memory for this operation.
640          */
641         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
642                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
643     } else {
644         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
645 
646         /* Otherwise allocate a structure, freed by the worker.  */
647         d->addr = addr;
648         d->idxmap = idxmap;
649         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
650                          RUN_ON_CPU_HOST_PTR(d));
651     }
652 }
653 
tlb_flush_page(CPUState * cpu,vaddr addr)654 void tlb_flush_page(CPUState *cpu, vaddr addr)
655 {
656     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
657 }
658 
tlb_flush_page_by_mmuidx_all_cpus(CPUState * src_cpu,vaddr addr,uint16_t idxmap)659 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
660                                        uint16_t idxmap)
661 {
662     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
663 
664     /* This should already be page aligned */
665     addr &= TARGET_PAGE_MASK;
666 
667     /*
668      * Allocate memory to hold addr+idxmap only when needed.
669      * See tlb_flush_page_by_mmuidx for details.
670      */
671     if (idxmap < TARGET_PAGE_SIZE) {
672         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
673                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
674     } else {
675         CPUState *dst_cpu;
676 
677         /* Allocate a separate data block for each destination cpu.  */
678         CPU_FOREACH(dst_cpu) {
679             if (dst_cpu != src_cpu) {
680                 TLBFlushPageByMMUIdxData *d
681                     = g_new(TLBFlushPageByMMUIdxData, 1);
682 
683                 d->addr = addr;
684                 d->idxmap = idxmap;
685                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
686                                  RUN_ON_CPU_HOST_PTR(d));
687             }
688         }
689     }
690 
691     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
692 }
693 
tlb_flush_page_all_cpus(CPUState * src,vaddr addr)694 void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
695 {
696     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
697 }
698 
tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState * src_cpu,vaddr addr,uint16_t idxmap)699 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
700                                               vaddr addr,
701                                               uint16_t idxmap)
702 {
703     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
704 
705     /* This should already be page aligned */
706     addr &= TARGET_PAGE_MASK;
707 
708     /*
709      * Allocate memory to hold addr+idxmap only when needed.
710      * See tlb_flush_page_by_mmuidx for details.
711      */
712     if (idxmap < TARGET_PAGE_SIZE) {
713         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
714                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
715         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
716                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
717     } else {
718         CPUState *dst_cpu;
719         TLBFlushPageByMMUIdxData *d;
720 
721         /* Allocate a separate data block for each destination cpu.  */
722         CPU_FOREACH(dst_cpu) {
723             if (dst_cpu != src_cpu) {
724                 d = g_new(TLBFlushPageByMMUIdxData, 1);
725                 d->addr = addr;
726                 d->idxmap = idxmap;
727                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
728                                  RUN_ON_CPU_HOST_PTR(d));
729             }
730         }
731 
732         d = g_new(TLBFlushPageByMMUIdxData, 1);
733         d->addr = addr;
734         d->idxmap = idxmap;
735         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
736                               RUN_ON_CPU_HOST_PTR(d));
737     }
738 }
739 
tlb_flush_page_all_cpus_synced(CPUState * src,vaddr addr)740 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
741 {
742     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
743 }
744 
tlb_flush_range_locked(CPUState * cpu,int midx,vaddr addr,vaddr len,unsigned bits)745 static void tlb_flush_range_locked(CPUState *cpu, int midx,
746                                    vaddr addr, vaddr len,
747                                    unsigned bits)
748 {
749     CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
750     CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
751     vaddr mask = MAKE_64BIT_MASK(0, bits);
752 
753     /*
754      * If @bits is smaller than the tlb size, there may be multiple entries
755      * within the TLB; otherwise all addresses that match under @mask hit
756      * the same TLB entry.
757      * TODO: Perhaps allow bits to be a few bits less than the size.
758      * For now, just flush the entire TLB.
759      *
760      * If @len is larger than the tlb size, then it will take longer to
761      * test all of the entries in the TLB than it will to flush it all.
762      */
763     if (mask < f->mask || len > f->mask) {
764         tlb_debug("forcing full flush midx %d ("
765                   "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
766                   midx, addr, mask, len);
767         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
768         return;
769     }
770 
771     /*
772      * Check if we need to flush due to large pages.
773      * Because large_page_mask contains all 1's from the msb,
774      * we only need to test the end of the range.
775      */
776     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
777         tlb_debug("forcing full flush midx %d ("
778                   "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
779                   midx, d->large_page_addr, d->large_page_mask);
780         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
781         return;
782     }
783 
784     for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
785         vaddr page = addr + i;
786         CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
787 
788         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
789             tlb_n_used_entries_dec(cpu, midx);
790         }
791         tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
792     }
793 }
794 
795 typedef struct {
796     vaddr addr;
797     vaddr len;
798     uint16_t idxmap;
799     uint16_t bits;
800 } TLBFlushRangeData;
801 
tlb_flush_range_by_mmuidx_async_0(CPUState * cpu,TLBFlushRangeData d)802 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
803                                               TLBFlushRangeData d)
804 {
805     int mmu_idx;
806 
807     assert_cpu_is_self(cpu);
808 
809     tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
810               d.addr, d.bits, d.len, d.idxmap);
811 
812     qemu_spin_lock(&cpu->neg.tlb.c.lock);
813     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
814         if ((d.idxmap >> mmu_idx) & 1) {
815             tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
816         }
817     }
818     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
819 
820     /*
821      * If the length is larger than the jump cache size, then it will take
822      * longer to clear each entry individually than it will to clear it all.
823      */
824     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
825         tcg_flush_jmp_cache(cpu);
826         return;
827     }
828 
829     /*
830      * Discard jump cache entries for any tb which might potentially
831      * overlap the flushed pages, which includes the previous.
832      */
833     d.addr -= TARGET_PAGE_SIZE;
834     for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
835         tb_jmp_cache_clear_page(cpu, d.addr);
836         d.addr += TARGET_PAGE_SIZE;
837     }
838 }
839 
tlb_flush_range_by_mmuidx_async_1(CPUState * cpu,run_on_cpu_data data)840 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
841                                               run_on_cpu_data data)
842 {
843     TLBFlushRangeData *d = data.host_ptr;
844     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
845     g_free(d);
846 }
847 
tlb_flush_range_by_mmuidx(CPUState * cpu,vaddr addr,vaddr len,uint16_t idxmap,unsigned bits)848 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
849                                vaddr len, uint16_t idxmap,
850                                unsigned bits)
851 {
852     TLBFlushRangeData d;
853 
854     /*
855      * If all bits are significant, and len is small,
856      * this devolves to tlb_flush_page.
857      */
858     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
859         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
860         return;
861     }
862     /* If no page bits are significant, this devolves to tlb_flush. */
863     if (bits < TARGET_PAGE_BITS) {
864         tlb_flush_by_mmuidx(cpu, idxmap);
865         return;
866     }
867 
868     /* This should already be page aligned */
869     d.addr = addr & TARGET_PAGE_MASK;
870     d.len = len;
871     d.idxmap = idxmap;
872     d.bits = bits;
873 
874     if (qemu_cpu_is_self(cpu)) {
875         tlb_flush_range_by_mmuidx_async_0(cpu, d);
876     } else {
877         /* Otherwise allocate a structure, freed by the worker.  */
878         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
879         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
880                          RUN_ON_CPU_HOST_PTR(p));
881     }
882 }
883 
tlb_flush_page_bits_by_mmuidx(CPUState * cpu,vaddr addr,uint16_t idxmap,unsigned bits)884 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
885                                    uint16_t idxmap, unsigned bits)
886 {
887     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
888 }
889 
tlb_flush_range_by_mmuidx_all_cpus(CPUState * src_cpu,vaddr addr,vaddr len,uint16_t idxmap,unsigned bits)890 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
891                                         vaddr addr, vaddr len,
892                                         uint16_t idxmap, unsigned bits)
893 {
894     TLBFlushRangeData d;
895     CPUState *dst_cpu;
896 
897     /*
898      * If all bits are significant, and len is small,
899      * this devolves to tlb_flush_page.
900      */
901     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
902         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
903         return;
904     }
905     /* If no page bits are significant, this devolves to tlb_flush. */
906     if (bits < TARGET_PAGE_BITS) {
907         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
908         return;
909     }
910 
911     /* This should already be page aligned */
912     d.addr = addr & TARGET_PAGE_MASK;
913     d.len = len;
914     d.idxmap = idxmap;
915     d.bits = bits;
916 
917     /* Allocate a separate data block for each destination cpu.  */
918     CPU_FOREACH(dst_cpu) {
919         if (dst_cpu != src_cpu) {
920             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
921             async_run_on_cpu(dst_cpu,
922                              tlb_flush_range_by_mmuidx_async_1,
923                              RUN_ON_CPU_HOST_PTR(p));
924         }
925     }
926 
927     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
928 }
929 
tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState * src_cpu,vaddr addr,uint16_t idxmap,unsigned bits)930 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
931                                             vaddr addr, uint16_t idxmap,
932                                             unsigned bits)
933 {
934     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
935                                        idxmap, bits);
936 }
937 
tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState * src_cpu,vaddr addr,vaddr len,uint16_t idxmap,unsigned bits)938 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
939                                                vaddr addr,
940                                                vaddr len,
941                                                uint16_t idxmap,
942                                                unsigned bits)
943 {
944     TLBFlushRangeData d, *p;
945     CPUState *dst_cpu;
946 
947     /*
948      * If all bits are significant, and len is small,
949      * this devolves to tlb_flush_page.
950      */
951     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
952         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
953         return;
954     }
955     /* If no page bits are significant, this devolves to tlb_flush. */
956     if (bits < TARGET_PAGE_BITS) {
957         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
958         return;
959     }
960 
961     /* This should already be page aligned */
962     d.addr = addr & TARGET_PAGE_MASK;
963     d.len = len;
964     d.idxmap = idxmap;
965     d.bits = bits;
966 
967     /* Allocate a separate data block for each destination cpu.  */
968     CPU_FOREACH(dst_cpu) {
969         if (dst_cpu != src_cpu) {
970             p = g_memdup(&d, sizeof(d));
971             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
972                              RUN_ON_CPU_HOST_PTR(p));
973         }
974     }
975 
976     p = g_memdup(&d, sizeof(d));
977     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
978                           RUN_ON_CPU_HOST_PTR(p));
979 }
980 
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState * src_cpu,vaddr addr,uint16_t idxmap,unsigned bits)981 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
982                                                    vaddr addr,
983                                                    uint16_t idxmap,
984                                                    unsigned bits)
985 {
986     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
987                                               idxmap, bits);
988 }
989 
990 /* update the TLBs so that writes to code in the virtual page 'addr'
991    can be detected */
tlb_protect_code(ram_addr_t ram_addr)992 void tlb_protect_code(ram_addr_t ram_addr)
993 {
994     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
995                                              TARGET_PAGE_SIZE,
996                                              DIRTY_MEMORY_CODE);
997 }
998 
999 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1000    tested for self modifying code */
tlb_unprotect_code(ram_addr_t ram_addr)1001 void tlb_unprotect_code(ram_addr_t ram_addr)
1002 {
1003     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
1004 }
1005 
1006 
1007 /*
1008  * Dirty write flag handling
1009  *
1010  * When the TCG code writes to a location it looks up the address in
1011  * the TLB and uses that data to compute the final address. If any of
1012  * the lower bits of the address are set then the slow path is forced.
1013  * There are a number of reasons to do this but for normal RAM the
1014  * most usual is detecting writes to code regions which may invalidate
1015  * generated code.
1016  *
1017  * Other vCPUs might be reading their TLBs during guest execution, so we update
1018  * te->addr_write with qatomic_set. We don't need to worry about this for
1019  * oversized guests as MTTCG is disabled for them.
1020  *
1021  * Called with tlb_c.lock held.
1022  */
tlb_reset_dirty_range_locked(CPUTLBEntry * tlb_entry,uintptr_t start,uintptr_t length)1023 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
1024                                          uintptr_t start, uintptr_t length)
1025 {
1026     uintptr_t addr = tlb_entry->addr_write;
1027 
1028     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
1029                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
1030         addr &= TARGET_PAGE_MASK;
1031         addr += tlb_entry->addend;
1032         if ((addr - start) < length) {
1033 #if TARGET_LONG_BITS == 32
1034             uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
1035             ptr_write += HOST_BIG_ENDIAN;
1036             qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
1037 #elif TCG_OVERSIZED_GUEST
1038             tlb_entry->addr_write |= TLB_NOTDIRTY;
1039 #else
1040             qatomic_set(&tlb_entry->addr_write,
1041                         tlb_entry->addr_write | TLB_NOTDIRTY);
1042 #endif
1043         }
1044     }
1045 }
1046 
1047 /*
1048  * Called with tlb_c.lock held.
1049  * Called only from the vCPU context, i.e. the TLB's owner thread.
1050  */
copy_tlb_helper_locked(CPUTLBEntry * d,const CPUTLBEntry * s)1051 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1052 {
1053     *d = *s;
1054 }
1055 
1056 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1057  * the target vCPU).
1058  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1059  * thing actually updated is the target TLB entry ->addr_write flags.
1060  */
tlb_reset_dirty(CPUState * cpu,ram_addr_t start1,ram_addr_t length)1061 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1062 {
1063     int mmu_idx;
1064 
1065     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1066     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1067         unsigned int i;
1068         unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
1069 
1070         for (i = 0; i < n; i++) {
1071             tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
1072                                          start1, length);
1073         }
1074 
1075         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1076             tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
1077                                          start1, length);
1078         }
1079     }
1080     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1081 }
1082 
1083 /* Called with tlb_c.lock held */
tlb_set_dirty1_locked(CPUTLBEntry * tlb_entry,vaddr addr)1084 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1085                                          vaddr addr)
1086 {
1087     if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1088         tlb_entry->addr_write = addr;
1089     }
1090 }
1091 
1092 /* update the TLB corresponding to virtual page vaddr
1093    so that it is no longer dirty */
tlb_set_dirty(CPUState * cpu,vaddr addr)1094 static void tlb_set_dirty(CPUState *cpu, vaddr addr)
1095 {
1096     int mmu_idx;
1097 
1098     assert_cpu_is_self(cpu);
1099 
1100     addr &= TARGET_PAGE_MASK;
1101     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1102     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1103         tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
1104     }
1105 
1106     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1107         int k;
1108         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1109             tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
1110         }
1111     }
1112     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1113 }
1114 
1115 /* Our TLB does not support large pages, so remember the area covered by
1116    large pages and trigger a full TLB flush if these are invalidated.  */
tlb_add_large_page(CPUState * cpu,int mmu_idx,vaddr addr,uint64_t size)1117 static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
1118                                vaddr addr, uint64_t size)
1119 {
1120     vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
1121     vaddr lp_mask = ~(size - 1);
1122 
1123     if (lp_addr == (vaddr)-1) {
1124         /* No previous large page.  */
1125         lp_addr = addr;
1126     } else {
1127         /* Extend the existing region to include the new page.
1128            This is a compromise between unnecessary flushes and
1129            the cost of maintaining a full variable size TLB.  */
1130         lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
1131         while (((lp_addr ^ addr) & lp_mask) != 0) {
1132             lp_mask <<= 1;
1133         }
1134     }
1135     cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1136     cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
1137 }
1138 
tlb_set_compare(CPUTLBEntryFull * full,CPUTLBEntry * ent,vaddr address,int flags,MMUAccessType access_type,bool enable)1139 static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1140                                    vaddr address, int flags,
1141                                    MMUAccessType access_type, bool enable)
1142 {
1143     if (enable) {
1144         address |= flags & TLB_FLAGS_MASK;
1145         flags &= TLB_SLOW_FLAGS_MASK;
1146         if (flags) {
1147             address |= TLB_FORCE_SLOW;
1148         }
1149     } else {
1150         address = -1;
1151         flags = 0;
1152     }
1153     ent->addr_idx[access_type] = address;
1154     full->slow_flags[access_type] = flags;
1155 }
1156 
1157 /*
1158  * Add a new TLB entry. At most one entry for a given virtual address
1159  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1160  * supplied size is only used by tlb_flush_page.
1161  *
1162  * Called from TCG-generated code, which is under an RCU read-side
1163  * critical section.
1164  */
tlb_set_page_full(CPUState * cpu,int mmu_idx,vaddr addr,CPUTLBEntryFull * full)1165 void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1166                        vaddr addr, CPUTLBEntryFull *full)
1167 {
1168     CPUTLB *tlb = &cpu->neg.tlb;
1169     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1170     MemoryRegionSection *section;
1171     unsigned int index, read_flags, write_flags;
1172     uintptr_t addend;
1173     CPUTLBEntry *te, tn;
1174     hwaddr iotlb, xlat, sz, paddr_page;
1175     vaddr addr_page;
1176     int asidx, wp_flags, prot;
1177     bool is_ram, is_romd;
1178 
1179     assert_cpu_is_self(cpu);
1180 
1181     if (full->lg_page_size <= TARGET_PAGE_BITS) {
1182         sz = TARGET_PAGE_SIZE;
1183     } else {
1184         sz = (hwaddr)1 << full->lg_page_size;
1185         tlb_add_large_page(cpu, mmu_idx, addr, sz);
1186     }
1187     addr_page = addr & TARGET_PAGE_MASK;
1188     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1189 
1190     prot = full->prot;
1191     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1192     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1193                                                 &xlat, &sz, full->attrs, &prot);
1194     assert(sz >= TARGET_PAGE_SIZE);
1195 
1196     tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1197               " prot=%x idx=%d\n",
1198               addr, full->phys_addr, prot, mmu_idx);
1199 
1200     read_flags = full->tlb_fill_flags;
1201     if (full->lg_page_size < TARGET_PAGE_BITS) {
1202         /* Repeat the MMU check and TLB fill on every access.  */
1203         read_flags |= TLB_INVALID_MASK;
1204     }
1205 
1206     is_ram = memory_region_is_ram(section->mr);
1207     is_romd = memory_region_is_romd(section->mr);
1208 
1209     if (is_ram || is_romd) {
1210         /* RAM and ROMD both have associated host memory. */
1211         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1212     } else {
1213         /* I/O does not; force the host address to NULL. */
1214         addend = 0;
1215     }
1216 
1217     write_flags = read_flags;
1218     if (is_ram) {
1219         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1220         assert(!(iotlb & ~TARGET_PAGE_MASK));
1221         /*
1222          * Computing is_clean is expensive; avoid all that unless
1223          * the page is actually writable.
1224          */
1225         if (prot & PAGE_WRITE) {
1226             if (section->readonly) {
1227                 write_flags |= TLB_DISCARD_WRITE;
1228             } else if (cpu_physical_memory_is_clean(iotlb)) {
1229                 write_flags |= TLB_NOTDIRTY;
1230             }
1231         }
1232     } else {
1233         /* I/O or ROMD */
1234         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1235         /*
1236          * Writes to romd devices must go through MMIO to enable write.
1237          * Reads to romd devices go through the ram_ptr found above,
1238          * but of course reads to I/O must go through MMIO.
1239          */
1240         write_flags |= TLB_MMIO;
1241         if (!is_romd) {
1242             read_flags = write_flags;
1243         }
1244     }
1245 
1246     wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
1247                                               TARGET_PAGE_SIZE);
1248 
1249     index = tlb_index(cpu, mmu_idx, addr_page);
1250     te = tlb_entry(cpu, mmu_idx, addr_page);
1251 
1252     /*
1253      * Hold the TLB lock for the rest of the function. We could acquire/release
1254      * the lock several times in the function, but it is faster to amortize the
1255      * acquisition cost by acquiring it just once. Note that this leads to
1256      * a longer critical section, but this is not a concern since the TLB lock
1257      * is unlikely to be contended.
1258      */
1259     qemu_spin_lock(&tlb->c.lock);
1260 
1261     /* Note that the tlb is no longer clean.  */
1262     tlb->c.dirty |= 1 << mmu_idx;
1263 
1264     /* Make sure there's no cached translation for the new page.  */
1265     tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
1266 
1267     /*
1268      * Only evict the old entry to the victim tlb if it's for a
1269      * different page; otherwise just overwrite the stale data.
1270      */
1271     if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1272         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1273         CPUTLBEntry *tv = &desc->vtable[vidx];
1274 
1275         /* Evict the old entry into the victim tlb.  */
1276         copy_tlb_helper_locked(tv, te);
1277         desc->vfulltlb[vidx] = desc->fulltlb[index];
1278         tlb_n_used_entries_dec(cpu, mmu_idx);
1279     }
1280 
1281     /* refill the tlb */
1282     /*
1283      * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1284      * aligned ram_addr_t of the page base of the target RAM.
1285      * Otherwise, iotlb contains
1286      *  - a physical section number in the lower TARGET_PAGE_BITS
1287      *  - the offset within section->mr of the page base (I/O, ROMD) with the
1288      *    TARGET_PAGE_BITS masked off.
1289      * We subtract addr_page (which is page aligned and thus won't
1290      * disturb the low bits) to give an offset which can be added to the
1291      * (non-page-aligned) vaddr of the eventual memory access to get
1292      * the MemoryRegion offset for the access. Note that the vaddr we
1293      * subtract here is that of the page base, and not the same as the
1294      * vaddr we add back in io_prepare()/get_page_addr_code().
1295      */
1296     desc->fulltlb[index] = *full;
1297     full = &desc->fulltlb[index];
1298     full->xlat_section = iotlb - addr_page;
1299     full->phys_addr = paddr_page;
1300 
1301     /* Now calculate the new entry */
1302     tn.addend = addend - addr_page;
1303 
1304     tlb_set_compare(full, &tn, addr_page, read_flags,
1305                     MMU_INST_FETCH, prot & PAGE_EXEC);
1306 
1307     if (wp_flags & BP_MEM_READ) {
1308         read_flags |= TLB_WATCHPOINT;
1309     }
1310     tlb_set_compare(full, &tn, addr_page, read_flags,
1311                     MMU_DATA_LOAD, prot & PAGE_READ);
1312 
1313     if (prot & PAGE_WRITE_INV) {
1314         write_flags |= TLB_INVALID_MASK;
1315     }
1316     if (wp_flags & BP_MEM_WRITE) {
1317         write_flags |= TLB_WATCHPOINT;
1318     }
1319     tlb_set_compare(full, &tn, addr_page, write_flags,
1320                     MMU_DATA_STORE, prot & PAGE_WRITE);
1321 
1322     copy_tlb_helper_locked(te, &tn);
1323     tlb_n_used_entries_inc(cpu, mmu_idx);
1324     qemu_spin_unlock(&tlb->c.lock);
1325 }
1326 
tlb_set_page_with_attrs(CPUState * cpu,vaddr addr,hwaddr paddr,MemTxAttrs attrs,int prot,int mmu_idx,uint64_t size)1327 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
1328                              hwaddr paddr, MemTxAttrs attrs, int prot,
1329                              int mmu_idx, uint64_t size)
1330 {
1331     CPUTLBEntryFull full = {
1332         .phys_addr = paddr,
1333         .attrs = attrs,
1334         .prot = prot,
1335         .lg_page_size = ctz64(size)
1336     };
1337 
1338     assert(is_power_of_2(size));
1339     tlb_set_page_full(cpu, mmu_idx, addr, &full);
1340 }
1341 
tlb_set_page(CPUState * cpu,vaddr addr,hwaddr paddr,int prot,int mmu_idx,uint64_t size)1342 void tlb_set_page(CPUState *cpu, vaddr addr,
1343                   hwaddr paddr, int prot,
1344                   int mmu_idx, uint64_t size)
1345 {
1346     tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1347                             prot, mmu_idx, size);
1348 }
1349 
1350 /*
1351  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1352  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1353  * be discarded and looked up again (e.g. via tlb_entry()).
1354  */
tlb_fill(CPUState * cpu,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)1355 static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1356                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1357 {
1358     bool ok;
1359 
1360     /*
1361      * This is not a probe, so only valid return is success; failure
1362      * should result in exception + longjmp to the cpu loop.
1363      */
1364     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1365                                     access_type, mmu_idx, false, retaddr);
1366     assert(ok);
1367 }
1368 
cpu_unaligned_access(CPUState * cpu,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)1369 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1370                                         MMUAccessType access_type,
1371                                         int mmu_idx, uintptr_t retaddr)
1372 {
1373     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1374                                           mmu_idx, retaddr);
1375 }
1376 
1377 static MemoryRegionSection *
io_prepare(hwaddr * out_offset,CPUState * cpu,hwaddr xlat,MemTxAttrs attrs,vaddr addr,uintptr_t retaddr)1378 io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
1379            MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
1380 {
1381     MemoryRegionSection *section;
1382     hwaddr mr_offset;
1383 
1384     section = iotlb_to_section(cpu, xlat, attrs);
1385     mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
1386     cpu->mem_io_pc = retaddr;
1387     if (!cpu->neg.can_do_io) {
1388         cpu_io_recompile(cpu, retaddr);
1389     }
1390 
1391     *out_offset = mr_offset;
1392     return section;
1393 }
1394 
io_failed(CPUState * cpu,CPUTLBEntryFull * full,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxResult response,uintptr_t retaddr)1395 static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
1396                       unsigned size, MMUAccessType access_type, int mmu_idx,
1397                       MemTxResult response, uintptr_t retaddr)
1398 {
1399     if (!cpu->ignore_memory_transaction_failures
1400         && cpu->cc->tcg_ops->do_transaction_failed) {
1401         hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1402 
1403         cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1404                                                 access_type, mmu_idx,
1405                                                 full->attrs, response, retaddr);
1406     }
1407 }
1408 
1409 /* Return true if ADDR is present in the victim tlb, and has been copied
1410    back to the main tlb.  */
victim_tlb_hit(CPUState * cpu,size_t mmu_idx,size_t index,MMUAccessType access_type,vaddr page)1411 static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
1412                            MMUAccessType access_type, vaddr page)
1413 {
1414     size_t vidx;
1415 
1416     assert_cpu_is_self(cpu);
1417     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1418         CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
1419         uint64_t cmp = tlb_read_idx(vtlb, access_type);
1420 
1421         if (cmp == page) {
1422             /* Found entry in victim tlb, swap tlb and iotlb.  */
1423             CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
1424 
1425             qemu_spin_lock(&cpu->neg.tlb.c.lock);
1426             copy_tlb_helper_locked(&tmptlb, tlb);
1427             copy_tlb_helper_locked(tlb, vtlb);
1428             copy_tlb_helper_locked(vtlb, &tmptlb);
1429             qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1430 
1431             CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1432             CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
1433             CPUTLBEntryFull tmpf;
1434             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1435             return true;
1436         }
1437     }
1438     return false;
1439 }
1440 
notdirty_write(CPUState * cpu,vaddr mem_vaddr,unsigned size,CPUTLBEntryFull * full,uintptr_t retaddr)1441 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1442                            CPUTLBEntryFull *full, uintptr_t retaddr)
1443 {
1444     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1445 
1446     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1447 
1448     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1449         tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1450     }
1451 
1452     /*
1453      * Set both VGA and migration bits for simplicity and to remove
1454      * the notdirty callback faster.
1455      */
1456     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1457 
1458     /* We remove the notdirty callback only if the code has been flushed. */
1459     if (!cpu_physical_memory_is_clean(ram_addr)) {
1460         trace_memory_notdirty_set_dirty(mem_vaddr);
1461         tlb_set_dirty(cpu, mem_vaddr);
1462     }
1463 }
1464 
probe_access_internal(CPUState * cpu,vaddr addr,int fault_size,MMUAccessType access_type,int mmu_idx,bool nonfault,void ** phost,CPUTLBEntryFull ** pfull,uintptr_t retaddr,bool check_mem_cbs)1465 static int probe_access_internal(CPUState *cpu, vaddr addr,
1466                                  int fault_size, MMUAccessType access_type,
1467                                  int mmu_idx, bool nonfault,
1468                                  void **phost, CPUTLBEntryFull **pfull,
1469                                  uintptr_t retaddr, bool check_mem_cbs)
1470 {
1471     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1472     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1473     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1474     vaddr page_addr = addr & TARGET_PAGE_MASK;
1475     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
1476     bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
1477     CPUTLBEntryFull *full;
1478 
1479     if (!tlb_hit_page(tlb_addr, page_addr)) {
1480         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
1481             if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
1482                                             mmu_idx, nonfault, retaddr)) {
1483                 /* Non-faulting page table read failed.  */
1484                 *phost = NULL;
1485                 *pfull = NULL;
1486                 return TLB_INVALID_MASK;
1487             }
1488 
1489             /* TLB resize via tlb_fill may have moved the entry.  */
1490             index = tlb_index(cpu, mmu_idx, addr);
1491             entry = tlb_entry(cpu, mmu_idx, addr);
1492 
1493             /*
1494              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1495              * to force the next access through tlb_fill.  We've just
1496              * called tlb_fill, so we know that this entry *is* valid.
1497              */
1498             flags &= ~TLB_INVALID_MASK;
1499         }
1500         tlb_addr = tlb_read_idx(entry, access_type);
1501     }
1502     flags &= tlb_addr;
1503 
1504     *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1505     flags |= full->slow_flags[access_type];
1506 
1507     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1508     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
1509         || (access_type != MMU_INST_FETCH && force_mmio)) {
1510         *phost = NULL;
1511         return TLB_MMIO;
1512     }
1513 
1514     /* Everything else is RAM. */
1515     *phost = (void *)((uintptr_t)addr + entry->addend);
1516     return flags;
1517 }
1518 
probe_access_full(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,bool nonfault,void ** phost,CPUTLBEntryFull ** pfull,uintptr_t retaddr)1519 int probe_access_full(CPUArchState *env, vaddr addr, int size,
1520                       MMUAccessType access_type, int mmu_idx,
1521                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1522                       uintptr_t retaddr)
1523 {
1524     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1525                                       mmu_idx, nonfault, phost, pfull, retaddr,
1526                                       true);
1527 
1528     /* Handle clean RAM pages.  */
1529     if (unlikely(flags & TLB_NOTDIRTY)) {
1530         int dirtysize = size == 0 ? 1 : size;
1531         notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
1532         flags &= ~TLB_NOTDIRTY;
1533     }
1534 
1535     return flags;
1536 }
1537 
probe_access_full_mmu(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,void ** phost,CPUTLBEntryFull ** pfull)1538 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
1539                           MMUAccessType access_type, int mmu_idx,
1540                           void **phost, CPUTLBEntryFull **pfull)
1541 {
1542     void *discard_phost;
1543     CPUTLBEntryFull *discard_tlb;
1544 
1545     /* privately handle users that don't need full results */
1546     phost = phost ? phost : &discard_phost;
1547     pfull = pfull ? pfull : &discard_tlb;
1548 
1549     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1550                                       mmu_idx, true, phost, pfull, 0, false);
1551 
1552     /* Handle clean RAM pages.  */
1553     if (unlikely(flags & TLB_NOTDIRTY)) {
1554         int dirtysize = size == 0 ? 1 : size;
1555         notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
1556         flags &= ~TLB_NOTDIRTY;
1557     }
1558 
1559     return flags;
1560 }
1561 
probe_access_flags(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,bool nonfault,void ** phost,uintptr_t retaddr)1562 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1563                        MMUAccessType access_type, int mmu_idx,
1564                        bool nonfault, void **phost, uintptr_t retaddr)
1565 {
1566     CPUTLBEntryFull *full;
1567     int flags;
1568 
1569     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1570 
1571     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1572                                   mmu_idx, nonfault, phost, &full, retaddr,
1573                                   true);
1574 
1575     /* Handle clean RAM pages. */
1576     if (unlikely(flags & TLB_NOTDIRTY)) {
1577         int dirtysize = size == 0 ? 1 : size;
1578         notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
1579         flags &= ~TLB_NOTDIRTY;
1580     }
1581 
1582     return flags;
1583 }
1584 
probe_access(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)1585 void *probe_access(CPUArchState *env, vaddr addr, int size,
1586                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1587 {
1588     CPUTLBEntryFull *full;
1589     void *host;
1590     int flags;
1591 
1592     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1593 
1594     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1595                                   mmu_idx, false, &host, &full, retaddr,
1596                                   true);
1597 
1598     /* Per the interface, size == 0 merely faults the access. */
1599     if (size == 0) {
1600         return NULL;
1601     }
1602 
1603     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1604         /* Handle watchpoints.  */
1605         if (flags & TLB_WATCHPOINT) {
1606             int wp_access = (access_type == MMU_DATA_STORE
1607                              ? BP_MEM_WRITE : BP_MEM_READ);
1608             cpu_check_watchpoint(env_cpu(env), addr, size,
1609                                  full->attrs, wp_access, retaddr);
1610         }
1611 
1612         /* Handle clean RAM pages.  */
1613         if (flags & TLB_NOTDIRTY) {
1614             notdirty_write(env_cpu(env), addr, size, full, retaddr);
1615         }
1616     }
1617 
1618     return host;
1619 }
1620 
tlb_vaddr_to_host(CPUArchState * env,abi_ptr addr,MMUAccessType access_type,int mmu_idx)1621 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1622                         MMUAccessType access_type, int mmu_idx)
1623 {
1624     CPUTLBEntryFull *full;
1625     void *host;
1626     int flags;
1627 
1628     flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
1629                                   mmu_idx, true, &host, &full, 0, false);
1630 
1631     /* No combination of flags are expected by the caller. */
1632     return flags ? NULL : host;
1633 }
1634 
1635 /*
1636  * Return a ram_addr_t for the virtual address for execution.
1637  *
1638  * Return -1 if we can't translate and execute from an entire page
1639  * of RAM.  This will force us to execute by loading and translating
1640  * one insn at a time, without caching.
1641  *
1642  * NOTE: This function will trigger an exception if the page is
1643  * not executable.
1644  */
get_page_addr_code_hostp(CPUArchState * env,vaddr addr,void ** hostp)1645 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
1646                                         void **hostp)
1647 {
1648     CPUTLBEntryFull *full;
1649     void *p;
1650 
1651     (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
1652                                 cpu_mmu_index(env_cpu(env), true), false,
1653                                 &p, &full, 0, false);
1654     if (p == NULL) {
1655         return -1;
1656     }
1657 
1658     if (full->lg_page_size < TARGET_PAGE_BITS) {
1659         return -1;
1660     }
1661 
1662     if (hostp) {
1663         *hostp = p;
1664     }
1665     return qemu_ram_addr_from_host_nofail(p);
1666 }
1667 
1668 /* Load/store with atomicity primitives. */
1669 #include "ldst_atomicity.c.inc"
1670 
1671 #ifdef CONFIG_PLUGIN
1672 /*
1673  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1674  * This should be a hot path as we will have just looked this path up
1675  * in the softmmu lookup code (or helper). We don't handle re-fills or
1676  * checking the victim table. This is purely informational.
1677  *
1678  * The one corner case is i/o write, which can cause changes to the
1679  * address space.  Those changes, and the corresponding tlb flush,
1680  * should be delayed until the next TB, so even then this ought not fail.
1681  * But check, Just in Case.
1682  */
tlb_plugin_lookup(CPUState * cpu,vaddr addr,int mmu_idx,bool is_store,struct qemu_plugin_hwaddr * data)1683 bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1684                        bool is_store, struct qemu_plugin_hwaddr *data)
1685 {
1686     CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
1687     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1688     MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
1689     uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
1690     CPUTLBEntryFull *full;
1691 
1692     if (unlikely(!tlb_hit(tlb_addr, addr))) {
1693         return false;
1694     }
1695 
1696     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1697     data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1698 
1699     /* We must have an iotlb entry for MMIO */
1700     if (tlb_addr & TLB_MMIO) {
1701         MemoryRegionSection *section =
1702             iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
1703                              full->attrs);
1704         data->is_io = true;
1705         data->mr = section->mr;
1706     } else {
1707         data->is_io = false;
1708         data->mr = NULL;
1709     }
1710     return true;
1711 }
1712 #endif
1713 
1714 /*
1715  * Probe for a load/store operation.
1716  * Return the host address and into @flags.
1717  */
1718 
1719 typedef struct MMULookupPageData {
1720     CPUTLBEntryFull *full;
1721     void *haddr;
1722     vaddr addr;
1723     int flags;
1724     int size;
1725 } MMULookupPageData;
1726 
1727 typedef struct MMULookupLocals {
1728     MMULookupPageData page[2];
1729     MemOp memop;
1730     int mmu_idx;
1731 } MMULookupLocals;
1732 
1733 /**
1734  * mmu_lookup1: translate one page
1735  * @cpu: generic cpu state
1736  * @data: lookup parameters
1737  * @mmu_idx: virtual address context
1738  * @access_type: load/store/code
1739  * @ra: return address into tcg generated code, or 0
1740  *
1741  * Resolve the translation for the one page at @data.addr, filling in
1742  * the rest of @data with the results.  If the translation fails,
1743  * tlb_fill will longjmp out.  Return true if the softmmu tlb for
1744  * @mmu_idx may have resized.
1745  */
mmu_lookup1(CPUState * cpu,MMULookupPageData * data,int mmu_idx,MMUAccessType access_type,uintptr_t ra)1746 static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
1747                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
1748 {
1749     vaddr addr = data->addr;
1750     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1751     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1752     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1753     bool maybe_resized = false;
1754     CPUTLBEntryFull *full;
1755     int flags;
1756 
1757     /* If the TLB entry is for a different page, reload and try again.  */
1758     if (!tlb_hit(tlb_addr, addr)) {
1759         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
1760                             addr & TARGET_PAGE_MASK)) {
1761             tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
1762             maybe_resized = true;
1763             index = tlb_index(cpu, mmu_idx, addr);
1764             entry = tlb_entry(cpu, mmu_idx, addr);
1765         }
1766         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
1767     }
1768 
1769     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1770     flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
1771     flags |= full->slow_flags[access_type];
1772 
1773     data->full = full;
1774     data->flags = flags;
1775     /* Compute haddr speculatively; depending on flags it might be invalid. */
1776     data->haddr = (void *)((uintptr_t)addr + entry->addend);
1777 
1778     return maybe_resized;
1779 }
1780 
1781 /**
1782  * mmu_watch_or_dirty
1783  * @cpu: generic cpu state
1784  * @data: lookup parameters
1785  * @access_type: load/store/code
1786  * @ra: return address into tcg generated code, or 0
1787  *
1788  * Trigger watchpoints for @data.addr:@data.size;
1789  * record writes to protected clean pages.
1790  */
mmu_watch_or_dirty(CPUState * cpu,MMULookupPageData * data,MMUAccessType access_type,uintptr_t ra)1791 static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
1792                                MMUAccessType access_type, uintptr_t ra)
1793 {
1794     CPUTLBEntryFull *full = data->full;
1795     vaddr addr = data->addr;
1796     int flags = data->flags;
1797     int size = data->size;
1798 
1799     /* On watchpoint hit, this will longjmp out.  */
1800     if (flags & TLB_WATCHPOINT) {
1801         int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1802         cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
1803         flags &= ~TLB_WATCHPOINT;
1804     }
1805 
1806     /* Note that notdirty is only set for writes. */
1807     if (flags & TLB_NOTDIRTY) {
1808         notdirty_write(cpu, addr, size, full, ra);
1809         flags &= ~TLB_NOTDIRTY;
1810     }
1811     data->flags = flags;
1812 }
1813 
1814 /**
1815  * mmu_lookup: translate page(s)
1816  * @cpu: generic cpu state
1817  * @addr: virtual address
1818  * @oi: combined mmu_idx and MemOp
1819  * @ra: return address into tcg generated code, or 0
1820  * @access_type: load/store/code
1821  * @l: output result
1822  *
1823  * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1824  * bytes.  Return true if the lookup crosses a page boundary.
1825  */
mmu_lookup(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType type,MMULookupLocals * l)1826 static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1827                        uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
1828 {
1829     unsigned a_bits;
1830     bool crosspage;
1831     int flags;
1832 
1833     l->memop = get_memop(oi);
1834     l->mmu_idx = get_mmuidx(oi);
1835 
1836     tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
1837 
1838     /* Handle CPU specific unaligned behaviour */
1839     a_bits = get_alignment_bits(l->memop);
1840     if (addr & ((1 << a_bits) - 1)) {
1841         cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
1842     }
1843 
1844     l->page[0].addr = addr;
1845     l->page[0].size = memop_size(l->memop);
1846     l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
1847     l->page[1].size = 0;
1848     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
1849 
1850     if (likely(!crosspage)) {
1851         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1852 
1853         flags = l->page[0].flags;
1854         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1855             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1856         }
1857         if (unlikely(flags & TLB_BSWAP)) {
1858             l->memop ^= MO_BSWAP;
1859         }
1860     } else {
1861         /* Finish compute of page crossing. */
1862         int size0 = l->page[1].addr - addr;
1863         l->page[1].size = l->page[0].size - size0;
1864         l->page[0].size = size0;
1865 
1866         /*
1867          * Lookup both pages, recognizing exceptions from either.  If the
1868          * second lookup potentially resized, refresh first CPUTLBEntryFull.
1869          */
1870         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1871         if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
1872             uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
1873             l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
1874         }
1875 
1876         flags = l->page[0].flags | l->page[1].flags;
1877         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1878             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1879             mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
1880         }
1881 
1882         /*
1883          * Since target/sparc is the only user of TLB_BSWAP, and all
1884          * Sparc accesses are aligned, any treatment across two pages
1885          * would be arbitrary.  Refuse it until there's a use.
1886          */
1887         tcg_debug_assert((flags & TLB_BSWAP) == 0);
1888     }
1889 
1890     /*
1891      * This alignment check differs from the one above, in that this is
1892      * based on the atomicity of the operation. The intended use case is
1893      * the ARM memory type field of each PTE, where access to pages with
1894      * Device memory type require alignment.
1895      */
1896     if (unlikely(flags & TLB_CHECK_ALIGNED)) {
1897         MemOp size = l->memop & MO_SIZE;
1898 
1899         switch (l->memop & MO_ATOM_MASK) {
1900         case MO_ATOM_NONE:
1901             size = MO_8;
1902             break;
1903         case MO_ATOM_IFALIGN_PAIR:
1904         case MO_ATOM_WITHIN16_PAIR:
1905             size = size ? size - 1 : 0;
1906             break;
1907         default:
1908             break;
1909         }
1910         if (addr & ((1 << size) - 1)) {
1911             cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
1912         }
1913     }
1914 
1915     return crosspage;
1916 }
1917 
1918 /*
1919  * Probe for an atomic operation.  Do not allow unaligned operations,
1920  * or io operations to proceed.  Return the host address.
1921  */
atomic_mmu_lookup(CPUState * cpu,vaddr addr,MemOpIdx oi,int size,uintptr_t retaddr)1922 static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1923                                int size, uintptr_t retaddr)
1924 {
1925     uintptr_t mmu_idx = get_mmuidx(oi);
1926     MemOp mop = get_memop(oi);
1927     int a_bits = get_alignment_bits(mop);
1928     uintptr_t index;
1929     CPUTLBEntry *tlbe;
1930     vaddr tlb_addr;
1931     void *hostaddr;
1932     CPUTLBEntryFull *full;
1933 
1934     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1935 
1936     /* Adjust the given return address.  */
1937     retaddr -= GETPC_ADJ;
1938 
1939     /* Enforce guest required alignment.  */
1940     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1941         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1942         cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
1943                              mmu_idx, retaddr);
1944     }
1945 
1946     /* Enforce qemu required alignment.  */
1947     if (unlikely(addr & (size - 1))) {
1948         /* We get here if guest alignment was not requested,
1949            or was not enforced by cpu_unaligned_access above.
1950            We might widen the access and emulate, but for now
1951            mark an exception and exit the cpu loop.  */
1952         goto stop_the_world;
1953     }
1954 
1955     index = tlb_index(cpu, mmu_idx, addr);
1956     tlbe = tlb_entry(cpu, mmu_idx, addr);
1957 
1958     /* Check TLB entry and enforce page permissions.  */
1959     tlb_addr = tlb_addr_write(tlbe);
1960     if (!tlb_hit(tlb_addr, addr)) {
1961         if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
1962                             addr & TARGET_PAGE_MASK)) {
1963             tlb_fill(cpu, addr, size,
1964                      MMU_DATA_STORE, mmu_idx, retaddr);
1965             index = tlb_index(cpu, mmu_idx, addr);
1966             tlbe = tlb_entry(cpu, mmu_idx, addr);
1967         }
1968         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1969     }
1970 
1971     /*
1972      * Let the guest notice RMW on a write-only page.
1973      * We have just verified that the page is writable.
1974      * Subpage lookups may have left TLB_INVALID_MASK set,
1975      * but addr_read will only be -1 if PAGE_READ was unset.
1976      */
1977     if (unlikely(tlbe->addr_read == -1)) {
1978         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
1979         /*
1980          * Since we don't support reads and writes to different
1981          * addresses, and we do have the proper page loaded for
1982          * write, this shouldn't ever return.  But just in case,
1983          * handle via stop-the-world.
1984          */
1985         goto stop_the_world;
1986     }
1987     /* Collect tlb flags for read. */
1988     tlb_addr |= tlbe->addr_read;
1989 
1990     /* Notice an IO access or a needs-MMU-lookup access */
1991     if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
1992         /* There's really nothing that can be done to
1993            support this apart from stop-the-world.  */
1994         goto stop_the_world;
1995     }
1996 
1997     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1998     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1999 
2000     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
2001         notdirty_write(cpu, addr, size, full, retaddr);
2002     }
2003 
2004     if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
2005         int wp_flags = 0;
2006 
2007         if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
2008             wp_flags |= BP_MEM_WRITE;
2009         }
2010         if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
2011             wp_flags |= BP_MEM_READ;
2012         }
2013         if (wp_flags) {
2014             cpu_check_watchpoint(cpu, addr, size,
2015                                  full->attrs, wp_flags, retaddr);
2016         }
2017     }
2018 
2019     return hostaddr;
2020 
2021  stop_the_world:
2022     cpu_loop_exit_atomic(cpu, retaddr);
2023 }
2024 
2025 /*
2026  * Load Helpers
2027  *
2028  * We support two different access types. SOFTMMU_CODE_ACCESS is
2029  * specifically for reading instructions from system memory. It is
2030  * called by the translation loop and in some helpers where the code
2031  * is disassembled. It shouldn't be called directly by guest code.
2032  *
2033  * For the benefit of TCG generated code, we want to avoid the
2034  * complication of ABI-specific return type promotion and always
2035  * return a value extended to the register size of the host. This is
2036  * tcg_target_long, except in the case of a 32-bit host and 64-bit
2037  * data, and for that we always have uint64_t.
2038  *
2039  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2040  */
2041 
2042 /**
2043  * do_ld_mmio_beN:
2044  * @cpu: generic cpu state
2045  * @full: page parameters
2046  * @ret_be: accumulated data
2047  * @addr: virtual address
2048  * @size: number of bytes
2049  * @mmu_idx: virtual address context
2050  * @ra: return address into tcg generated code, or 0
2051  * Context: BQL held
2052  *
2053  * Load @size bytes from @addr, which is memory-mapped i/o.
2054  * The bytes are concatenated in big-endian order with @ret_be.
2055  */
int_ld_mmio_beN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t ret_be,vaddr addr,int size,int mmu_idx,MMUAccessType type,uintptr_t ra,MemoryRegion * mr,hwaddr mr_offset)2056 static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2057                                 uint64_t ret_be, vaddr addr, int size,
2058                                 int mmu_idx, MMUAccessType type, uintptr_t ra,
2059                                 MemoryRegion *mr, hwaddr mr_offset)
2060 {
2061     do {
2062         MemOp this_mop;
2063         unsigned this_size;
2064         uint64_t val;
2065         MemTxResult r;
2066 
2067         /* Read aligned pieces up to 8 bytes. */
2068         this_mop = ctz32(size | (int)addr | 8);
2069         this_size = 1 << this_mop;
2070         this_mop |= MO_BE;
2071 
2072         r = memory_region_dispatch_read(mr, mr_offset, &val,
2073                                         this_mop, full->attrs);
2074         if (unlikely(r != MEMTX_OK)) {
2075             io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
2076         }
2077         if (this_size == 8) {
2078             return val;
2079         }
2080 
2081         ret_be = (ret_be << (this_size * 8)) | val;
2082         addr += this_size;
2083         mr_offset += this_size;
2084         size -= this_size;
2085     } while (size);
2086 
2087     return ret_be;
2088 }
2089 
do_ld_mmio_beN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t ret_be,vaddr addr,int size,int mmu_idx,MMUAccessType type,uintptr_t ra)2090 static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2091                                uint64_t ret_be, vaddr addr, int size,
2092                                int mmu_idx, MMUAccessType type, uintptr_t ra)
2093 {
2094     MemoryRegionSection *section;
2095     MemoryRegion *mr;
2096     hwaddr mr_offset;
2097     MemTxAttrs attrs;
2098 
2099     tcg_debug_assert(size > 0 && size <= 8);
2100 
2101     attrs = full->attrs;
2102     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2103     mr = section->mr;
2104 
2105     BQL_LOCK_GUARD();
2106     return int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
2107                            type, ra, mr, mr_offset);
2108 }
2109 
do_ld16_mmio_beN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t ret_be,vaddr addr,int size,int mmu_idx,uintptr_t ra)2110 static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2111                                uint64_t ret_be, vaddr addr, int size,
2112                                int mmu_idx, uintptr_t ra)
2113 {
2114     MemoryRegionSection *section;
2115     MemoryRegion *mr;
2116     hwaddr mr_offset;
2117     MemTxAttrs attrs;
2118     uint64_t a, b;
2119 
2120     tcg_debug_assert(size > 8 && size <= 16);
2121 
2122     attrs = full->attrs;
2123     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2124     mr = section->mr;
2125 
2126     BQL_LOCK_GUARD();
2127     a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
2128                         MMU_DATA_LOAD, ra, mr, mr_offset);
2129     b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
2130                         MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
2131     return int128_make128(b, a);
2132 }
2133 
2134 /**
2135  * do_ld_bytes_beN
2136  * @p: translation parameters
2137  * @ret_be: accumulated data
2138  *
2139  * Load @p->size bytes from @p->haddr, which is RAM.
2140  * The bytes to concatenated in big-endian order with @ret_be.
2141  */
do_ld_bytes_beN(MMULookupPageData * p,uint64_t ret_be)2142 static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
2143 {
2144     uint8_t *haddr = p->haddr;
2145     int i, size = p->size;
2146 
2147     for (i = 0; i < size; i++) {
2148         ret_be = (ret_be << 8) | haddr[i];
2149     }
2150     return ret_be;
2151 }
2152 
2153 /**
2154  * do_ld_parts_beN
2155  * @p: translation parameters
2156  * @ret_be: accumulated data
2157  *
2158  * As do_ld_bytes_beN, but atomically on each aligned part.
2159  */
do_ld_parts_beN(MMULookupPageData * p,uint64_t ret_be)2160 static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2161 {
2162     void *haddr = p->haddr;
2163     int size = p->size;
2164 
2165     do {
2166         uint64_t x;
2167         int n;
2168 
2169         /*
2170          * Find minimum of alignment and size.
2171          * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2172          * would have only checked the low bits of addr|size once at the start,
2173          * but is just as easy.
2174          */
2175         switch (((uintptr_t)haddr | size) & 7) {
2176         case 4:
2177             x = cpu_to_be32(load_atomic4(haddr));
2178             ret_be = (ret_be << 32) | x;
2179             n = 4;
2180             break;
2181         case 2:
2182         case 6:
2183             x = cpu_to_be16(load_atomic2(haddr));
2184             ret_be = (ret_be << 16) | x;
2185             n = 2;
2186             break;
2187         default:
2188             x = *(uint8_t *)haddr;
2189             ret_be = (ret_be << 8) | x;
2190             n = 1;
2191             break;
2192         case 0:
2193             g_assert_not_reached();
2194         }
2195         haddr += n;
2196         size -= n;
2197     } while (size != 0);
2198     return ret_be;
2199 }
2200 
2201 /**
2202  * do_ld_parts_be4
2203  * @p: translation parameters
2204  * @ret_be: accumulated data
2205  *
2206  * As do_ld_bytes_beN, but with one atomic load.
2207  * Four aligned bytes are guaranteed to cover the load.
2208  */
do_ld_whole_be4(MMULookupPageData * p,uint64_t ret_be)2209 static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2210 {
2211     int o = p->addr & 3;
2212     uint32_t x = load_atomic4(p->haddr - o);
2213 
2214     x = cpu_to_be32(x);
2215     x <<= o * 8;
2216     x >>= (4 - p->size) * 8;
2217     return (ret_be << (p->size * 8)) | x;
2218 }
2219 
2220 /**
2221  * do_ld_parts_be8
2222  * @p: translation parameters
2223  * @ret_be: accumulated data
2224  *
2225  * As do_ld_bytes_beN, but with one atomic load.
2226  * Eight aligned bytes are guaranteed to cover the load.
2227  */
do_ld_whole_be8(CPUState * cpu,uintptr_t ra,MMULookupPageData * p,uint64_t ret_be)2228 static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
2229                                 MMULookupPageData *p, uint64_t ret_be)
2230 {
2231     int o = p->addr & 7;
2232     uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
2233 
2234     x = cpu_to_be64(x);
2235     x <<= o * 8;
2236     x >>= (8 - p->size) * 8;
2237     return (ret_be << (p->size * 8)) | x;
2238 }
2239 
2240 /**
2241  * do_ld_parts_be16
2242  * @p: translation parameters
2243  * @ret_be: accumulated data
2244  *
2245  * As do_ld_bytes_beN, but with one atomic load.
2246  * 16 aligned bytes are guaranteed to cover the load.
2247  */
do_ld_whole_be16(CPUState * cpu,uintptr_t ra,MMULookupPageData * p,uint64_t ret_be)2248 static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
2249                                MMULookupPageData *p, uint64_t ret_be)
2250 {
2251     int o = p->addr & 15;
2252     Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
2253     int size = p->size;
2254 
2255     if (!HOST_BIG_ENDIAN) {
2256         y = bswap128(y);
2257     }
2258     y = int128_lshift(y, o * 8);
2259     y = int128_urshift(y, (16 - size) * 8);
2260     x = int128_make64(ret_be);
2261     x = int128_lshift(x, size * 8);
2262     return int128_or(x, y);
2263 }
2264 
2265 /*
2266  * Wrapper for the above.
2267  */
do_ld_beN(CPUState * cpu,MMULookupPageData * p,uint64_t ret_be,int mmu_idx,MMUAccessType type,MemOp mop,uintptr_t ra)2268 static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
2269                           uint64_t ret_be, int mmu_idx, MMUAccessType type,
2270                           MemOp mop, uintptr_t ra)
2271 {
2272     MemOp atom;
2273     unsigned tmp, half_size;
2274 
2275     if (unlikely(p->flags & TLB_MMIO)) {
2276         return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
2277                               mmu_idx, type, ra);
2278     }
2279 
2280     /*
2281      * It is a given that we cross a page and therefore there is no
2282      * atomicity for the load as a whole, but subobjects may need attention.
2283      */
2284     atom = mop & MO_ATOM_MASK;
2285     switch (atom) {
2286     case MO_ATOM_SUBALIGN:
2287         return do_ld_parts_beN(p, ret_be);
2288 
2289     case MO_ATOM_IFALIGN_PAIR:
2290     case MO_ATOM_WITHIN16_PAIR:
2291         tmp = mop & MO_SIZE;
2292         tmp = tmp ? tmp - 1 : 0;
2293         half_size = 1 << tmp;
2294         if (atom == MO_ATOM_IFALIGN_PAIR
2295             ? p->size == half_size
2296             : p->size >= half_size) {
2297             if (!HAVE_al8_fast && p->size < 4) {
2298                 return do_ld_whole_be4(p, ret_be);
2299             } else {
2300                 return do_ld_whole_be8(cpu, ra, p, ret_be);
2301             }
2302         }
2303         /* fall through */
2304 
2305     case MO_ATOM_IFALIGN:
2306     case MO_ATOM_WITHIN16:
2307     case MO_ATOM_NONE:
2308         return do_ld_bytes_beN(p, ret_be);
2309 
2310     default:
2311         g_assert_not_reached();
2312     }
2313 }
2314 
2315 /*
2316  * Wrapper for the above, for 8 < size < 16.
2317  */
do_ld16_beN(CPUState * cpu,MMULookupPageData * p,uint64_t a,int mmu_idx,MemOp mop,uintptr_t ra)2318 static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
2319                           uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
2320 {
2321     int size = p->size;
2322     uint64_t b;
2323     MemOp atom;
2324 
2325     if (unlikely(p->flags & TLB_MMIO)) {
2326         return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
2327     }
2328 
2329     /*
2330      * It is a given that we cross a page and therefore there is no
2331      * atomicity for the load as a whole, but subobjects may need attention.
2332      */
2333     atom = mop & MO_ATOM_MASK;
2334     switch (atom) {
2335     case MO_ATOM_SUBALIGN:
2336         p->size = size - 8;
2337         a = do_ld_parts_beN(p, a);
2338         p->haddr += size - 8;
2339         p->size = 8;
2340         b = do_ld_parts_beN(p, 0);
2341         break;
2342 
2343     case MO_ATOM_WITHIN16_PAIR:
2344         /* Since size > 8, this is the half that must be atomic. */
2345         return do_ld_whole_be16(cpu, ra, p, a);
2346 
2347     case MO_ATOM_IFALIGN_PAIR:
2348         /*
2349          * Since size > 8, both halves are misaligned,
2350          * and so neither is atomic.
2351          */
2352     case MO_ATOM_IFALIGN:
2353     case MO_ATOM_WITHIN16:
2354     case MO_ATOM_NONE:
2355         p->size = size - 8;
2356         a = do_ld_bytes_beN(p, a);
2357         b = ldq_be_p(p->haddr + size - 8);
2358         break;
2359 
2360     default:
2361         g_assert_not_reached();
2362     }
2363 
2364     return int128_make128(b, a);
2365 }
2366 
do_ld_1(CPUState * cpu,MMULookupPageData * p,int mmu_idx,MMUAccessType type,uintptr_t ra)2367 static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2368                        MMUAccessType type, uintptr_t ra)
2369 {
2370     if (unlikely(p->flags & TLB_MMIO)) {
2371         return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
2372     } else {
2373         return *(uint8_t *)p->haddr;
2374     }
2375 }
2376 
do_ld_2(CPUState * cpu,MMULookupPageData * p,int mmu_idx,MMUAccessType type,MemOp memop,uintptr_t ra)2377 static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2378                         MMUAccessType type, MemOp memop, uintptr_t ra)
2379 {
2380     uint16_t ret;
2381 
2382     if (unlikely(p->flags & TLB_MMIO)) {
2383         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
2384         if ((memop & MO_BSWAP) == MO_LE) {
2385             ret = bswap16(ret);
2386         }
2387     } else {
2388         /* Perform the load host endian, then swap if necessary. */
2389         ret = load_atom_2(cpu, ra, p->haddr, memop);
2390         if (memop & MO_BSWAP) {
2391             ret = bswap16(ret);
2392         }
2393     }
2394     return ret;
2395 }
2396 
do_ld_4(CPUState * cpu,MMULookupPageData * p,int mmu_idx,MMUAccessType type,MemOp memop,uintptr_t ra)2397 static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2398                         MMUAccessType type, MemOp memop, uintptr_t ra)
2399 {
2400     uint32_t ret;
2401 
2402     if (unlikely(p->flags & TLB_MMIO)) {
2403         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
2404         if ((memop & MO_BSWAP) == MO_LE) {
2405             ret = bswap32(ret);
2406         }
2407     } else {
2408         /* Perform the load host endian. */
2409         ret = load_atom_4(cpu, ra, p->haddr, memop);
2410         if (memop & MO_BSWAP) {
2411             ret = bswap32(ret);
2412         }
2413     }
2414     return ret;
2415 }
2416 
do_ld_8(CPUState * cpu,MMULookupPageData * p,int mmu_idx,MMUAccessType type,MemOp memop,uintptr_t ra)2417 static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2418                         MMUAccessType type, MemOp memop, uintptr_t ra)
2419 {
2420     uint64_t ret;
2421 
2422     if (unlikely(p->flags & TLB_MMIO)) {
2423         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
2424         if ((memop & MO_BSWAP) == MO_LE) {
2425             ret = bswap64(ret);
2426         }
2427     } else {
2428         /* Perform the load host endian. */
2429         ret = load_atom_8(cpu, ra, p->haddr, memop);
2430         if (memop & MO_BSWAP) {
2431             ret = bswap64(ret);
2432         }
2433     }
2434     return ret;
2435 }
2436 
do_ld1_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)2437 static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2438                           uintptr_t ra, MMUAccessType access_type)
2439 {
2440     MMULookupLocals l;
2441     bool crosspage;
2442 
2443     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2444     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2445     tcg_debug_assert(!crosspage);
2446 
2447     return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2448 }
2449 
do_ld2_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)2450 static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2451                            uintptr_t ra, MMUAccessType access_type)
2452 {
2453     MMULookupLocals l;
2454     bool crosspage;
2455     uint16_t ret;
2456     uint8_t a, b;
2457 
2458     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2459     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2460     if (likely(!crosspage)) {
2461         return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2462     }
2463 
2464     a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2465     b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
2466 
2467     if ((l.memop & MO_BSWAP) == MO_LE) {
2468         ret = a | (b << 8);
2469     } else {
2470         ret = b | (a << 8);
2471     }
2472     return ret;
2473 }
2474 
do_ld4_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)2475 static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2476                            uintptr_t ra, MMUAccessType access_type)
2477 {
2478     MMULookupLocals l;
2479     bool crosspage;
2480     uint32_t ret;
2481 
2482     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2483     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2484     if (likely(!crosspage)) {
2485         return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2486     }
2487 
2488     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2489     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2490     if ((l.memop & MO_BSWAP) == MO_LE) {
2491         ret = bswap32(ret);
2492     }
2493     return ret;
2494 }
2495 
do_ld8_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)2496 static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2497                            uintptr_t ra, MMUAccessType access_type)
2498 {
2499     MMULookupLocals l;
2500     bool crosspage;
2501     uint64_t ret;
2502 
2503     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2504     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2505     if (likely(!crosspage)) {
2506         return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2507     }
2508 
2509     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2510     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2511     if ((l.memop & MO_BSWAP) == MO_LE) {
2512         ret = bswap64(ret);
2513     }
2514     return ret;
2515 }
2516 
do_ld16_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra)2517 static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
2518                           MemOpIdx oi, uintptr_t ra)
2519 {
2520     MMULookupLocals l;
2521     bool crosspage;
2522     uint64_t a, b;
2523     Int128 ret;
2524     int first;
2525 
2526     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2527     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
2528     if (likely(!crosspage)) {
2529         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2530             ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
2531                                    l.mmu_idx, ra);
2532             if ((l.memop & MO_BSWAP) == MO_LE) {
2533                 ret = bswap128(ret);
2534             }
2535         } else {
2536             /* Perform the load host endian. */
2537             ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
2538             if (l.memop & MO_BSWAP) {
2539                 ret = bswap128(ret);
2540             }
2541         }
2542         return ret;
2543     }
2544 
2545     first = l.page[0].size;
2546     if (first == 8) {
2547         MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
2548 
2549         a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2550         b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2551         if ((mop8 & MO_BSWAP) == MO_LE) {
2552             ret = int128_make128(a, b);
2553         } else {
2554             ret = int128_make128(b, a);
2555         }
2556         return ret;
2557     }
2558 
2559     if (first < 8) {
2560         a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
2561                       MMU_DATA_LOAD, l.memop, ra);
2562         ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
2563     } else {
2564         ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
2565         b = int128_getlo(ret);
2566         ret = int128_lshift(ret, l.page[1].size * 8);
2567         a = int128_gethi(ret);
2568         b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
2569                       MMU_DATA_LOAD, l.memop, ra);
2570         ret = int128_make128(b, a);
2571     }
2572     if ((l.memop & MO_BSWAP) == MO_LE) {
2573         ret = bswap128(ret);
2574     }
2575     return ret;
2576 }
2577 
2578 /*
2579  * Store Helpers
2580  */
2581 
2582 /**
2583  * do_st_mmio_leN:
2584  * @cpu: generic cpu state
2585  * @full: page parameters
2586  * @val_le: data to store
2587  * @addr: virtual address
2588  * @size: number of bytes
2589  * @mmu_idx: virtual address context
2590  * @ra: return address into tcg generated code, or 0
2591  * Context: BQL held
2592  *
2593  * Store @size bytes at @addr, which is memory-mapped i/o.
2594  * The bytes to store are extracted in little-endian order from @val_le;
2595  * return the bytes of @val_le beyond @p->size that have not been stored.
2596  */
int_st_mmio_leN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t val_le,vaddr addr,int size,int mmu_idx,uintptr_t ra,MemoryRegion * mr,hwaddr mr_offset)2597 static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2598                                 uint64_t val_le, vaddr addr, int size,
2599                                 int mmu_idx, uintptr_t ra,
2600                                 MemoryRegion *mr, hwaddr mr_offset)
2601 {
2602     do {
2603         MemOp this_mop;
2604         unsigned this_size;
2605         MemTxResult r;
2606 
2607         /* Store aligned pieces up to 8 bytes. */
2608         this_mop = ctz32(size | (int)addr | 8);
2609         this_size = 1 << this_mop;
2610         this_mop |= MO_LE;
2611 
2612         r = memory_region_dispatch_write(mr, mr_offset, val_le,
2613                                          this_mop, full->attrs);
2614         if (unlikely(r != MEMTX_OK)) {
2615             io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
2616                       mmu_idx, r, ra);
2617         }
2618         if (this_size == 8) {
2619             return 0;
2620         }
2621 
2622         val_le >>= this_size * 8;
2623         addr += this_size;
2624         mr_offset += this_size;
2625         size -= this_size;
2626     } while (size);
2627 
2628     return val_le;
2629 }
2630 
do_st_mmio_leN(CPUState * cpu,CPUTLBEntryFull * full,uint64_t val_le,vaddr addr,int size,int mmu_idx,uintptr_t ra)2631 static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2632                                uint64_t val_le, vaddr addr, int size,
2633                                int mmu_idx, uintptr_t ra)
2634 {
2635     MemoryRegionSection *section;
2636     hwaddr mr_offset;
2637     MemoryRegion *mr;
2638     MemTxAttrs attrs;
2639 
2640     tcg_debug_assert(size > 0 && size <= 8);
2641 
2642     attrs = full->attrs;
2643     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2644     mr = section->mr;
2645 
2646     BQL_LOCK_GUARD();
2647     return int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
2648                            ra, mr, mr_offset);
2649 }
2650 
do_st16_mmio_leN(CPUState * cpu,CPUTLBEntryFull * full,Int128 val_le,vaddr addr,int size,int mmu_idx,uintptr_t ra)2651 static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2652                                  Int128 val_le, vaddr addr, int size,
2653                                  int mmu_idx, uintptr_t ra)
2654 {
2655     MemoryRegionSection *section;
2656     MemoryRegion *mr;
2657     hwaddr mr_offset;
2658     MemTxAttrs attrs;
2659 
2660     tcg_debug_assert(size > 8 && size <= 16);
2661 
2662     attrs = full->attrs;
2663     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2664     mr = section->mr;
2665 
2666     BQL_LOCK_GUARD();
2667     int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
2668                     mmu_idx, ra, mr, mr_offset);
2669     return int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
2670                            size - 8, mmu_idx, ra, mr, mr_offset + 8);
2671 }
2672 
2673 /*
2674  * Wrapper for the above.
2675  */
do_st_leN(CPUState * cpu,MMULookupPageData * p,uint64_t val_le,int mmu_idx,MemOp mop,uintptr_t ra)2676 static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
2677                           uint64_t val_le, int mmu_idx,
2678                           MemOp mop, uintptr_t ra)
2679 {
2680     MemOp atom;
2681     unsigned tmp, half_size;
2682 
2683     if (unlikely(p->flags & TLB_MMIO)) {
2684         return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
2685                               p->size, mmu_idx, ra);
2686     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2687         return val_le >> (p->size * 8);
2688     }
2689 
2690     /*
2691      * It is a given that we cross a page and therefore there is no atomicity
2692      * for the store as a whole, but subobjects may need attention.
2693      */
2694     atom = mop & MO_ATOM_MASK;
2695     switch (atom) {
2696     case MO_ATOM_SUBALIGN:
2697         return store_parts_leN(p->haddr, p->size, val_le);
2698 
2699     case MO_ATOM_IFALIGN_PAIR:
2700     case MO_ATOM_WITHIN16_PAIR:
2701         tmp = mop & MO_SIZE;
2702         tmp = tmp ? tmp - 1 : 0;
2703         half_size = 1 << tmp;
2704         if (atom == MO_ATOM_IFALIGN_PAIR
2705             ? p->size == half_size
2706             : p->size >= half_size) {
2707             if (!HAVE_al8_fast && p->size <= 4) {
2708                 return store_whole_le4(p->haddr, p->size, val_le);
2709             } else if (HAVE_al8) {
2710                 return store_whole_le8(p->haddr, p->size, val_le);
2711             } else {
2712                 cpu_loop_exit_atomic(cpu, ra);
2713             }
2714         }
2715         /* fall through */
2716 
2717     case MO_ATOM_IFALIGN:
2718     case MO_ATOM_WITHIN16:
2719     case MO_ATOM_NONE:
2720         return store_bytes_leN(p->haddr, p->size, val_le);
2721 
2722     default:
2723         g_assert_not_reached();
2724     }
2725 }
2726 
2727 /*
2728  * Wrapper for the above, for 8 < size < 16.
2729  */
do_st16_leN(CPUState * cpu,MMULookupPageData * p,Int128 val_le,int mmu_idx,MemOp mop,uintptr_t ra)2730 static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
2731                             Int128 val_le, int mmu_idx,
2732                             MemOp mop, uintptr_t ra)
2733 {
2734     int size = p->size;
2735     MemOp atom;
2736 
2737     if (unlikely(p->flags & TLB_MMIO)) {
2738         return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
2739                                 size, mmu_idx, ra);
2740     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2741         return int128_gethi(val_le) >> ((size - 8) * 8);
2742     }
2743 
2744     /*
2745      * It is a given that we cross a page and therefore there is no atomicity
2746      * for the store as a whole, but subobjects may need attention.
2747      */
2748     atom = mop & MO_ATOM_MASK;
2749     switch (atom) {
2750     case MO_ATOM_SUBALIGN:
2751         store_parts_leN(p->haddr, 8, int128_getlo(val_le));
2752         return store_parts_leN(p->haddr + 8, p->size - 8,
2753                                int128_gethi(val_le));
2754 
2755     case MO_ATOM_WITHIN16_PAIR:
2756         /* Since size > 8, this is the half that must be atomic. */
2757         if (!HAVE_CMPXCHG128) {
2758             cpu_loop_exit_atomic(cpu, ra);
2759         }
2760         return store_whole_le16(p->haddr, p->size, val_le);
2761 
2762     case MO_ATOM_IFALIGN_PAIR:
2763         /*
2764          * Since size > 8, both halves are misaligned,
2765          * and so neither is atomic.
2766          */
2767     case MO_ATOM_IFALIGN:
2768     case MO_ATOM_WITHIN16:
2769     case MO_ATOM_NONE:
2770         stq_le_p(p->haddr, int128_getlo(val_le));
2771         return store_bytes_leN(p->haddr + 8, p->size - 8,
2772                                int128_gethi(val_le));
2773 
2774     default:
2775         g_assert_not_reached();
2776     }
2777 }
2778 
do_st_1(CPUState * cpu,MMULookupPageData * p,uint8_t val,int mmu_idx,uintptr_t ra)2779 static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
2780                     int mmu_idx, uintptr_t ra)
2781 {
2782     if (unlikely(p->flags & TLB_MMIO)) {
2783         do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
2784     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2785         /* nothing */
2786     } else {
2787         *(uint8_t *)p->haddr = val;
2788     }
2789 }
2790 
do_st_2(CPUState * cpu,MMULookupPageData * p,uint16_t val,int mmu_idx,MemOp memop,uintptr_t ra)2791 static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
2792                     int mmu_idx, MemOp memop, uintptr_t ra)
2793 {
2794     if (unlikely(p->flags & TLB_MMIO)) {
2795         if ((memop & MO_BSWAP) != MO_LE) {
2796             val = bswap16(val);
2797         }
2798         do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
2799     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2800         /* nothing */
2801     } else {
2802         /* Swap to host endian if necessary, then store. */
2803         if (memop & MO_BSWAP) {
2804             val = bswap16(val);
2805         }
2806         store_atom_2(cpu, ra, p->haddr, memop, val);
2807     }
2808 }
2809 
do_st_4(CPUState * cpu,MMULookupPageData * p,uint32_t val,int mmu_idx,MemOp memop,uintptr_t ra)2810 static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
2811                     int mmu_idx, MemOp memop, uintptr_t ra)
2812 {
2813     if (unlikely(p->flags & TLB_MMIO)) {
2814         if ((memop & MO_BSWAP) != MO_LE) {
2815             val = bswap32(val);
2816         }
2817         do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
2818     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2819         /* nothing */
2820     } else {
2821         /* Swap to host endian if necessary, then store. */
2822         if (memop & MO_BSWAP) {
2823             val = bswap32(val);
2824         }
2825         store_atom_4(cpu, ra, p->haddr, memop, val);
2826     }
2827 }
2828 
do_st_8(CPUState * cpu,MMULookupPageData * p,uint64_t val,int mmu_idx,MemOp memop,uintptr_t ra)2829 static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
2830                     int mmu_idx, MemOp memop, uintptr_t ra)
2831 {
2832     if (unlikely(p->flags & TLB_MMIO)) {
2833         if ((memop & MO_BSWAP) != MO_LE) {
2834             val = bswap64(val);
2835         }
2836         do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
2837     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2838         /* nothing */
2839     } else {
2840         /* Swap to host endian if necessary, then store. */
2841         if (memop & MO_BSWAP) {
2842             val = bswap64(val);
2843         }
2844         store_atom_8(cpu, ra, p->haddr, memop, val);
2845     }
2846 }
2847 
do_st1_mmu(CPUState * cpu,vaddr addr,uint8_t val,MemOpIdx oi,uintptr_t ra)2848 static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
2849                        MemOpIdx oi, uintptr_t ra)
2850 {
2851     MMULookupLocals l;
2852     bool crosspage;
2853 
2854     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2855     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2856     tcg_debug_assert(!crosspage);
2857 
2858     do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
2859 }
2860 
do_st2_mmu(CPUState * cpu,vaddr addr,uint16_t val,MemOpIdx oi,uintptr_t ra)2861 static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
2862                        MemOpIdx oi, uintptr_t ra)
2863 {
2864     MMULookupLocals l;
2865     bool crosspage;
2866     uint8_t a, b;
2867 
2868     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2869     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2870     if (likely(!crosspage)) {
2871         do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2872         return;
2873     }
2874 
2875     if ((l.memop & MO_BSWAP) == MO_LE) {
2876         a = val, b = val >> 8;
2877     } else {
2878         b = val, a = val >> 8;
2879     }
2880     do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
2881     do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
2882 }
2883 
do_st4_mmu(CPUState * cpu,vaddr addr,uint32_t val,MemOpIdx oi,uintptr_t ra)2884 static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
2885                        MemOpIdx oi, uintptr_t ra)
2886 {
2887     MMULookupLocals l;
2888     bool crosspage;
2889 
2890     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2891     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2892     if (likely(!crosspage)) {
2893         do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2894         return;
2895     }
2896 
2897     /* Swap to little endian for simplicity, then store by bytes. */
2898     if ((l.memop & MO_BSWAP) != MO_LE) {
2899         val = bswap32(val);
2900     }
2901     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2902     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2903 }
2904 
do_st8_mmu(CPUState * cpu,vaddr addr,uint64_t val,MemOpIdx oi,uintptr_t ra)2905 static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
2906                        MemOpIdx oi, uintptr_t ra)
2907 {
2908     MMULookupLocals l;
2909     bool crosspage;
2910 
2911     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2912     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2913     if (likely(!crosspage)) {
2914         do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2915         return;
2916     }
2917 
2918     /* Swap to little endian for simplicity, then store by bytes. */
2919     if ((l.memop & MO_BSWAP) != MO_LE) {
2920         val = bswap64(val);
2921     }
2922     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2923     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2924 }
2925 
do_st16_mmu(CPUState * cpu,vaddr addr,Int128 val,MemOpIdx oi,uintptr_t ra)2926 static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
2927                         MemOpIdx oi, uintptr_t ra)
2928 {
2929     MMULookupLocals l;
2930     bool crosspage;
2931     uint64_t a, b;
2932     int first;
2933 
2934     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2935     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2936     if (likely(!crosspage)) {
2937         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2938             if ((l.memop & MO_BSWAP) != MO_LE) {
2939                 val = bswap128(val);
2940             }
2941             do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
2942         } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
2943             /* nothing */
2944         } else {
2945             /* Swap to host endian if necessary, then store. */
2946             if (l.memop & MO_BSWAP) {
2947                 val = bswap128(val);
2948             }
2949             store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
2950         }
2951         return;
2952     }
2953 
2954     first = l.page[0].size;
2955     if (first == 8) {
2956         MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
2957 
2958         if (l.memop & MO_BSWAP) {
2959             val = bswap128(val);
2960         }
2961         if (HOST_BIG_ENDIAN) {
2962             b = int128_getlo(val), a = int128_gethi(val);
2963         } else {
2964             a = int128_getlo(val), b = int128_gethi(val);
2965         }
2966         do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
2967         do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
2968         return;
2969     }
2970 
2971     if ((l.memop & MO_BSWAP) != MO_LE) {
2972         val = bswap128(val);
2973     }
2974     if (first < 8) {
2975         do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
2976         val = int128_urshift(val, first * 8);
2977         do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2978     } else {
2979         b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2980         do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
2981     }
2982 }
2983 
2984 #include "ldst_common.c.inc"
2985 
2986 /*
2987  * First set of functions passes in OI and RETADDR.
2988  * This makes them callable from other helpers.
2989  */
2990 
2991 #define ATOMIC_NAME(X) \
2992     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2993 
2994 #define ATOMIC_MMU_CLEANUP
2995 
2996 #include "atomic_common.c.inc"
2997 
2998 #define DATA_SIZE 1
2999 #include "atomic_template.h"
3000 
3001 #define DATA_SIZE 2
3002 #include "atomic_template.h"
3003 
3004 #define DATA_SIZE 4
3005 #include "atomic_template.h"
3006 
3007 #ifdef CONFIG_ATOMIC64
3008 #define DATA_SIZE 8
3009 #include "atomic_template.h"
3010 #endif
3011 
3012 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
3013 #define DATA_SIZE 16
3014 #include "atomic_template.h"
3015 #endif
3016 
3017 /* Code access functions.  */
3018 
cpu_ldub_code(CPUArchState * env,abi_ptr addr)3019 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
3020 {
3021     CPUState *cs = env_cpu(env);
3022     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
3023     return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3024 }
3025 
cpu_lduw_code(CPUArchState * env,abi_ptr addr)3026 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
3027 {
3028     CPUState *cs = env_cpu(env);
3029     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
3030     return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3031 }
3032 
cpu_ldl_code(CPUArchState * env,abi_ptr addr)3033 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
3034 {
3035     CPUState *cs = env_cpu(env);
3036     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
3037     return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3038 }
3039 
cpu_ldq_code(CPUArchState * env,abi_ptr addr)3040 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
3041 {
3042     CPUState *cs = env_cpu(env);
3043     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
3044     return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3045 }
3046 
cpu_ldb_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t retaddr)3047 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
3048                          MemOpIdx oi, uintptr_t retaddr)
3049 {
3050     return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3051 }
3052 
cpu_ldw_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t retaddr)3053 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
3054                           MemOpIdx oi, uintptr_t retaddr)
3055 {
3056     return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3057 }
3058 
cpu_ldl_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t retaddr)3059 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
3060                           MemOpIdx oi, uintptr_t retaddr)
3061 {
3062     return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3063 }
3064 
cpu_ldq_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t retaddr)3065 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
3066                           MemOpIdx oi, uintptr_t retaddr)
3067 {
3068     return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3069 }
3070