xref: /qemu/target/arm/tcg/mte_helper.c (revision cc37d98b)
1 /*
2  * ARM v8.5-MemTag Operations
3  *
4  * Copyright (c) 2020 Linaro, Ltd.
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/ram_addr.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "qapi/error.h"
29 #include "qemu/guest-random.h"
30 
31 
32 static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
33 {
34     if (exclude == 0xffff) {
35         return 0;
36     }
37     if (offset == 0) {
38         while (exclude & (1 << tag)) {
39             tag = (tag + 1) & 15;
40         }
41     } else {
42         do {
43             do {
44                 tag = (tag + 1) & 15;
45             } while (exclude & (1 << tag));
46         } while (--offset > 0);
47     }
48     return tag;
49 }
50 
51 /**
52  * allocation_tag_mem:
53  * @env: the cpu environment
54  * @ptr_mmu_idx: the addressing regime to use for the virtual address
55  * @ptr: the virtual address for which to look up tag memory
56  * @ptr_access: the access to use for the virtual address
57  * @ptr_size: the number of bytes in the normal memory access
58  * @tag_access: the access to use for the tag memory
59  * @tag_size: the number of bytes in the tag memory access
60  * @ra: the return address for exception handling
61  *
62  * Our tag memory is formatted as a sequence of little-endian nibbles.
63  * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
64  * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
65  * for the higher addr.
66  *
67  * Here, resolve the physical address from the virtual address, and return
68  * a pointer to the corresponding tag byte.  Exit with exception if the
69  * virtual address is not accessible for @ptr_access.
70  *
71  * The @ptr_size and @tag_size values may not have an obvious relation
72  * due to the alignment of @ptr, and the number of tag checks required.
73  *
74  * If there is no tag storage corresponding to @ptr, return NULL.
75  */
76 static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
77                                    uint64_t ptr, MMUAccessType ptr_access,
78                                    int ptr_size, MMUAccessType tag_access,
79                                    int tag_size, uintptr_t ra)
80 {
81 #ifdef CONFIG_USER_ONLY
82     uint64_t clean_ptr = useronly_clean_ptr(ptr);
83     int flags = page_get_flags(clean_ptr);
84     uint8_t *tags;
85     uintptr_t index;
86 
87     if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
88         cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
89                               !(flags & PAGE_VALID), ra);
90     }
91 
92     /* Require both MAP_ANON and PROT_MTE for the page. */
93     if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
94         return NULL;
95     }
96 
97     tags = page_get_target_data(clean_ptr);
98 
99     index = extract32(ptr, LOG2_TAG_GRANULE + 1,
100                       TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
101     return tags + index;
102 #else
103     CPUTLBEntryFull *full;
104     MemTxAttrs attrs;
105     int in_page, flags;
106     hwaddr ptr_paddr, tag_paddr, xlat;
107     MemoryRegion *mr;
108     ARMASIdx tag_asi;
109     AddressSpace *tag_as;
110     void *host;
111 
112     /*
113      * Probe the first byte of the virtual address.  This raises an
114      * exception for inaccessible pages, and resolves the virtual address
115      * into the softmmu tlb.
116      *
117      * When RA == 0, this is for mte_probe.  The page is expected to be
118      * valid.  Indicate to probe_access_flags no-fault, then assert that
119      * we received a valid page.
120      */
121     flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
122                               ra == 0, &host, &full, ra);
123     assert(!(flags & TLB_INVALID_MASK));
124 
125     /* If the virtual page MemAttr != Tagged, access unchecked. */
126     if (full->pte_attrs != 0xf0) {
127         return NULL;
128     }
129 
130     /*
131      * If not backed by host ram, there is no tag storage: access unchecked.
132      * This is probably a guest os bug though, so log it.
133      */
134     if (unlikely(flags & TLB_MMIO)) {
135         qemu_log_mask(LOG_GUEST_ERROR,
136                       "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
137                       "but is not backed by host ram\n", ptr);
138         return NULL;
139     }
140 
141     /*
142      * Remember these values across the second lookup below,
143      * which may invalidate this pointer via tlb resize.
144      */
145     ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
146     attrs = full->attrs;
147     full = NULL;
148 
149     /*
150      * The Normal memory access can extend to the next page.  E.g. a single
151      * 8-byte access to the last byte of a page will check only the last
152      * tag on the first page.
153      * Any page access exception has priority over tag check exception.
154      */
155     in_page = -(ptr | TARGET_PAGE_MASK);
156     if (unlikely(ptr_size > in_page)) {
157         flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
158                                    ptr_mmu_idx, ra == 0, &host, &full, ra);
159         assert(!(flags & TLB_INVALID_MASK));
160     }
161 
162     /* Any debug exception has priority over a tag check exception. */
163     if (unlikely(flags & TLB_WATCHPOINT)) {
164         int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
165         assert(ra != 0);
166         cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
167     }
168 
169     /* Convert to the physical address in tag space.  */
170     tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
171 
172     /* Look up the address in tag space. */
173     tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
174     tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
175     mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
176                                  tag_access == MMU_DATA_STORE, attrs);
177 
178     /*
179      * Note that @mr will never be NULL.  If there is nothing in the address
180      * space at @tag_paddr, the translation will return the unallocated memory
181      * region.  For our purposes, the result must be ram.
182      */
183     if (unlikely(!memory_region_is_ram(mr))) {
184         /* ??? Failure is a board configuration error. */
185         qemu_log_mask(LOG_UNIMP,
186                       "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
187                       "Normal Memory @ 0x%" HWADDR_PRIx "\n",
188                       tag_paddr, ptr_paddr);
189         return NULL;
190     }
191 
192     /*
193      * Ensure the tag memory is dirty on write, for migration.
194      * Tag memory can never contain code or display memory (vga).
195      */
196     if (tag_access == MMU_DATA_STORE) {
197         ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
198         cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
199     }
200 
201     return memory_region_get_ram_ptr(mr) + xlat;
202 #endif
203 }
204 
205 uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
206 {
207     uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
208     int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
209     int start = extract32(env->cp15.rgsr_el1, 0, 4);
210     int seed = extract32(env->cp15.rgsr_el1, 8, 16);
211     int offset, i, rtag;
212 
213     /*
214      * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
215      * deterministic algorithm.  Except that with RRND==1 the kernel is
216      * not required to have set RGSR_EL1.SEED != 0, which is required for
217      * the deterministic algorithm to function.  So we force a non-zero
218      * SEED for that case.
219      */
220     if (unlikely(seed == 0) && rrnd) {
221         do {
222             Error *err = NULL;
223             uint16_t two;
224 
225             if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
226                 /*
227                  * Failed, for unknown reasons in the crypto subsystem.
228                  * Best we can do is log the reason and use a constant seed.
229                  */
230                 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
231                               error_get_pretty(err));
232                 error_free(err);
233                 two = 1;
234             }
235             seed = two;
236         } while (seed == 0);
237     }
238 
239     /* RandomTag */
240     for (i = offset = 0; i < 4; ++i) {
241         /* NextRandomTagBit */
242         int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
243                    extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
244         seed = (top << 15) | (seed >> 1);
245         offset |= top << i;
246     }
247     rtag = choose_nonexcluded_tag(start, offset, exclude);
248     env->cp15.rgsr_el1 = rtag | (seed << 8);
249 
250     return address_with_allocation_tag(rn, rtag);
251 }
252 
253 uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
254                          int32_t offset, uint32_t tag_offset)
255 {
256     int start_tag = allocation_tag_from_addr(ptr);
257     uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
258     int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
259 
260     return address_with_allocation_tag(ptr + offset, rtag);
261 }
262 
263 static int load_tag1(uint64_t ptr, uint8_t *mem)
264 {
265     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
266     return extract32(*mem, ofs, 4);
267 }
268 
269 uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
270 {
271     int mmu_idx = cpu_mmu_index(env, false);
272     uint8_t *mem;
273     int rtag = 0;
274 
275     /* Trap if accessing an invalid page.  */
276     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
277                              MMU_DATA_LOAD, 1, GETPC());
278 
279     /* Load if page supports tags. */
280     if (mem) {
281         rtag = load_tag1(ptr, mem);
282     }
283 
284     return address_with_allocation_tag(xt, rtag);
285 }
286 
287 static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
288 {
289     if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
290         arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
291                                     cpu_mmu_index(env, false), ra);
292         g_assert_not_reached();
293     }
294 }
295 
296 /* For use in a non-parallel context, store to the given nibble.  */
297 static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
298 {
299     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
300     *mem = deposit32(*mem, ofs, 4, tag);
301 }
302 
303 /* For use in a parallel context, atomically store to the given nibble.  */
304 static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
305 {
306     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
307     uint8_t old = qatomic_read(mem);
308 
309     while (1) {
310         uint8_t new = deposit32(old, ofs, 4, tag);
311         uint8_t cmp = qatomic_cmpxchg(mem, old, new);
312         if (likely(cmp == old)) {
313             return;
314         }
315         old = cmp;
316     }
317 }
318 
319 typedef void stg_store1(uint64_t, uint8_t *, int);
320 
321 static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
322                           uintptr_t ra, stg_store1 store1)
323 {
324     int mmu_idx = cpu_mmu_index(env, false);
325     uint8_t *mem;
326 
327     check_tag_aligned(env, ptr, ra);
328 
329     /* Trap if accessing an invalid page.  */
330     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
331                              MMU_DATA_STORE, 1, ra);
332 
333     /* Store if page supports tags. */
334     if (mem) {
335         store1(ptr, mem, allocation_tag_from_addr(xt));
336     }
337 }
338 
339 void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
340 {
341     do_stg(env, ptr, xt, GETPC(), store_tag1);
342 }
343 
344 void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
345 {
346     do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
347 }
348 
349 void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
350 {
351     int mmu_idx = cpu_mmu_index(env, false);
352     uintptr_t ra = GETPC();
353 
354     check_tag_aligned(env, ptr, ra);
355     probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
356 }
357 
358 static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
359                            uintptr_t ra, stg_store1 store1)
360 {
361     int mmu_idx = cpu_mmu_index(env, false);
362     int tag = allocation_tag_from_addr(xt);
363     uint8_t *mem1, *mem2;
364 
365     check_tag_aligned(env, ptr, ra);
366 
367     /*
368      * Trap if accessing an invalid page(s).
369      * This takes priority over !allocation_tag_access_enabled.
370      */
371     if (ptr & TAG_GRANULE) {
372         /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
373         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
374                                   TAG_GRANULE, MMU_DATA_STORE, 1, ra);
375         mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
376                                   MMU_DATA_STORE, TAG_GRANULE,
377                                   MMU_DATA_STORE, 1, ra);
378 
379         /* Store if page(s) support tags. */
380         if (mem1) {
381             store1(TAG_GRANULE, mem1, tag);
382         }
383         if (mem2) {
384             store1(0, mem2, tag);
385         }
386     } else {
387         /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
388         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
389                                   2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
390         if (mem1) {
391             tag |= tag << 4;
392             qatomic_set(mem1, tag);
393         }
394     }
395 }
396 
397 void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
398 {
399     do_st2g(env, ptr, xt, GETPC(), store_tag1);
400 }
401 
402 void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
403 {
404     do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
405 }
406 
407 void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
408 {
409     int mmu_idx = cpu_mmu_index(env, false);
410     uintptr_t ra = GETPC();
411     int in_page = -(ptr | TARGET_PAGE_MASK);
412 
413     check_tag_aligned(env, ptr, ra);
414 
415     if (likely(in_page >= 2 * TAG_GRANULE)) {
416         probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
417     } else {
418         probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
419         probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
420     }
421 }
422 
423 #define LDGM_STGM_SIZE  (4 << GMID_EL1_BS)
424 
425 uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
426 {
427     int mmu_idx = cpu_mmu_index(env, false);
428     uintptr_t ra = GETPC();
429     void *tag_mem;
430 
431     ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
432 
433     /* Trap if accessing an invalid page.  */
434     tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
435                                  LDGM_STGM_SIZE, MMU_DATA_LOAD,
436                                  LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
437 
438     /* The tag is squashed to zero if the page does not support tags.  */
439     if (!tag_mem) {
440         return 0;
441     }
442 
443     QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
444     /*
445      * We are loading 64-bits worth of tags.  The ordering of elements
446      * within the word corresponds to a 64-bit little-endian operation.
447      */
448     return ldq_le_p(tag_mem);
449 }
450 
451 void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
452 {
453     int mmu_idx = cpu_mmu_index(env, false);
454     uintptr_t ra = GETPC();
455     void *tag_mem;
456 
457     ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
458 
459     /* Trap if accessing an invalid page.  */
460     tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
461                                  LDGM_STGM_SIZE, MMU_DATA_LOAD,
462                                  LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
463 
464     /*
465      * Tag store only happens if the page support tags,
466      * and if the OS has enabled access to the tags.
467      */
468     if (!tag_mem) {
469         return;
470     }
471 
472     QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
473     /*
474      * We are storing 64-bits worth of tags.  The ordering of elements
475      * within the word corresponds to a 64-bit little-endian operation.
476      */
477     stq_le_p(tag_mem, val);
478 }
479 
480 void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
481 {
482     uintptr_t ra = GETPC();
483     int mmu_idx = cpu_mmu_index(env, false);
484     int log2_dcz_bytes, log2_tag_bytes;
485     intptr_t dcz_bytes, tag_bytes;
486     uint8_t *mem;
487 
488     /*
489      * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
490      * i.e. 32 bytes, which is an unreasonably small dcz anyway,
491      * to make sure that we can access one complete tag byte here.
492      */
493     log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
494     log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
495     dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
496     tag_bytes = (intptr_t)1 << log2_tag_bytes;
497     ptr &= -dcz_bytes;
498 
499     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
500                              MMU_DATA_STORE, tag_bytes, ra);
501     if (mem) {
502         int tag_pair = (val & 0xf) * 0x11;
503         memset(mem, tag_pair, tag_bytes);
504     }
505 }
506 
507 static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
508                                 uint64_t dirty_ptr, uintptr_t ra)
509 {
510     int is_write, syn;
511 
512     env->exception.vaddress = dirty_ptr;
513 
514     is_write = FIELD_EX32(desc, MTEDESC, WRITE);
515     syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
516                                 0x11);
517     raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
518     g_assert_not_reached();
519 }
520 
521 static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
522                                  uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
523 {
524     int select;
525 
526     if (regime_has_2_ranges(arm_mmu_idx)) {
527         select = extract64(dirty_ptr, 55, 1);
528     } else {
529         select = 0;
530     }
531     env->cp15.tfsr_el[el] |= 1 << select;
532 #ifdef CONFIG_USER_ONLY
533     /*
534      * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
535      * which then sends a SIGSEGV when the thread is next scheduled.
536      * This cpu will return to the main loop at the end of the TB,
537      * which is rather sooner than "normal".  But the alternative
538      * is waiting until the next syscall.
539      */
540     qemu_cpu_kick(env_cpu(env));
541 #endif
542 }
543 
544 /* Record a tag check failure.  */
545 static void mte_check_fail(CPUARMState *env, uint32_t desc,
546                            uint64_t dirty_ptr, uintptr_t ra)
547 {
548     int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
549     ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
550     int el, reg_el, tcf;
551     uint64_t sctlr;
552 
553     reg_el = regime_el(env, arm_mmu_idx);
554     sctlr = env->cp15.sctlr_el[reg_el];
555 
556     switch (arm_mmu_idx) {
557     case ARMMMUIdx_E10_0:
558     case ARMMMUIdx_E20_0:
559         el = 0;
560         tcf = extract64(sctlr, 38, 2);
561         break;
562     default:
563         el = reg_el;
564         tcf = extract64(sctlr, 40, 2);
565     }
566 
567     switch (tcf) {
568     case 1:
569         /* Tag check fail causes a synchronous exception. */
570         mte_sync_check_fail(env, desc, dirty_ptr, ra);
571         break;
572 
573     case 0:
574         /*
575          * Tag check fail does not affect the PE.
576          * We eliminate this case by not setting MTE_ACTIVE
577          * in tb_flags, so that we never make this runtime call.
578          */
579         g_assert_not_reached();
580 
581     case 2:
582         /* Tag check fail causes asynchronous flag set.  */
583         mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
584         break;
585 
586     case 3:
587         /*
588          * Tag check fail causes asynchronous flag set for stores, or
589          * a synchronous exception for loads.
590          */
591         if (FIELD_EX32(desc, MTEDESC, WRITE)) {
592             mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
593         } else {
594             mte_sync_check_fail(env, desc, dirty_ptr, ra);
595         }
596         break;
597     }
598 }
599 
600 /**
601  * checkN:
602  * @tag: tag memory to test
603  * @odd: true to begin testing at tags at odd nibble
604  * @cmp: the tag to compare against
605  * @count: number of tags to test
606  *
607  * Return the number of successful tests.
608  * Thus a return value < @count indicates a failure.
609  *
610  * A note about sizes: count is expected to be small.
611  *
612  * The most common use will be LDP/STP of two integer registers,
613  * which means 16 bytes of memory touching at most 2 tags, but
614  * often the access is aligned and thus just 1 tag.
615  *
616  * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
617  * touching at most 5 tags.  SVE LDR/STR (vector) with the default
618  * vector length is also 64 bytes; the maximum architectural length
619  * is 256 bytes touching at most 9 tags.
620  *
621  * The loop below uses 7 logical operations and 1 memory operation
622  * per tag pair.  An implementation that loads an aligned word and
623  * uses masking to ignore adjacent tags requires 18 logical operations
624  * and thus does not begin to pay off until 6 tags.
625  * Which, according to the survey above, is unlikely to be common.
626  */
627 static int checkN(uint8_t *mem, int odd, int cmp, int count)
628 {
629     int n = 0, diff;
630 
631     /* Replicate the test tag and compare.  */
632     cmp *= 0x11;
633     diff = *mem++ ^ cmp;
634 
635     if (odd) {
636         goto start_odd;
637     }
638 
639     while (1) {
640         /* Test even tag. */
641         if (unlikely((diff) & 0x0f)) {
642             break;
643         }
644         if (++n == count) {
645             break;
646         }
647 
648     start_odd:
649         /* Test odd tag. */
650         if (unlikely((diff) & 0xf0)) {
651             break;
652         }
653         if (++n == count) {
654             break;
655         }
656 
657         diff = *mem++ ^ cmp;
658     }
659     return n;
660 }
661 
662 /**
663  * mte_probe_int() - helper for mte_probe and mte_check
664  * @env: CPU environment
665  * @desc: MTEDESC descriptor
666  * @ptr: virtual address of the base of the access
667  * @fault: return virtual address of the first check failure
668  *
669  * Internal routine for both mte_probe and mte_check.
670  * Return zero on failure, filling in *fault.
671  * Return negative on trivial success for tbi disabled.
672  * Return positive on success with tbi enabled.
673  */
674 static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
675                          uintptr_t ra, uint64_t *fault)
676 {
677     int mmu_idx, ptr_tag, bit55;
678     uint64_t ptr_last, prev_page, next_page;
679     uint64_t tag_first, tag_last;
680     uint64_t tag_byte_first, tag_byte_last;
681     uint32_t sizem1, tag_count, tag_size, n, c;
682     uint8_t *mem1, *mem2;
683     MMUAccessType type;
684 
685     bit55 = extract64(ptr, 55, 1);
686     *fault = ptr;
687 
688     /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
689     if (unlikely(!tbi_check(desc, bit55))) {
690         return -1;
691     }
692 
693     ptr_tag = allocation_tag_from_addr(ptr);
694 
695     if (tcma_check(desc, bit55, ptr_tag)) {
696         return 1;
697     }
698 
699     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
700     type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
701     sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
702 
703     /* Find the addr of the end of the access */
704     ptr_last = ptr + sizem1;
705 
706     /* Round the bounds to the tag granule, and compute the number of tags. */
707     tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
708     tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
709     tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
710 
711     /* Round the bounds to twice the tag granule, and compute the bytes. */
712     tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
713     tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
714 
715     /* Locate the page boundaries. */
716     prev_page = ptr & TARGET_PAGE_MASK;
717     next_page = prev_page + TARGET_PAGE_SIZE;
718 
719     if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
720         /* Memory access stays on one page. */
721         tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
722         mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
723                                   MMU_DATA_LOAD, tag_size, ra);
724         if (!mem1) {
725             return 1;
726         }
727         /* Perform all of the comparisons. */
728         n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
729     } else {
730         /* Memory access crosses to next page. */
731         tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
732         mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
733                                   MMU_DATA_LOAD, tag_size, ra);
734 
735         tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
736         mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
737                                   ptr_last - next_page + 1,
738                                   MMU_DATA_LOAD, tag_size, ra);
739 
740         /*
741          * Perform all of the comparisons.
742          * Note the possible but unlikely case of the operation spanning
743          * two pages that do not both have tagging enabled.
744          */
745         n = c = (next_page - tag_first) / TAG_GRANULE;
746         if (mem1) {
747             n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
748         }
749         if (n == c) {
750             if (!mem2) {
751                 return 1;
752             }
753             n += checkN(mem2, 0, ptr_tag, tag_count - c);
754         }
755     }
756 
757     if (likely(n == tag_count)) {
758         return 1;
759     }
760 
761     /*
762      * If we failed, we know which granule.  For the first granule, the
763      * failure address is @ptr, the first byte accessed.  Otherwise the
764      * failure address is the first byte of the nth granule.
765      */
766     if (n > 0) {
767         *fault = tag_first + n * TAG_GRANULE;
768     }
769     return 0;
770 }
771 
772 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
773 {
774     uint64_t fault;
775     int ret = mte_probe_int(env, desc, ptr, ra, &fault);
776 
777     if (unlikely(ret == 0)) {
778         mte_check_fail(env, desc, fault, ra);
779     } else if (ret < 0) {
780         return ptr;
781     }
782     return useronly_clean_ptr(ptr);
783 }
784 
785 uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
786 {
787     return mte_check(env, desc, ptr, GETPC());
788 }
789 
790 /*
791  * No-fault version of mte_check, to be used by SVE for MemSingleNF.
792  * Returns false if the access is Checked and the check failed.  This
793  * is only intended to probe the tag -- the validity of the page must
794  * be checked beforehand.
795  */
796 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
797 {
798     uint64_t fault;
799     int ret = mte_probe_int(env, desc, ptr, 0, &fault);
800 
801     return ret != 0;
802 }
803 
804 /*
805  * Perform an MTE checked access for DC_ZVA.
806  */
807 uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
808 {
809     uintptr_t ra = GETPC();
810     int log2_dcz_bytes, log2_tag_bytes;
811     int mmu_idx, bit55;
812     intptr_t dcz_bytes, tag_bytes, i;
813     void *mem;
814     uint64_t ptr_tag, mem_tag, align_ptr;
815 
816     bit55 = extract64(ptr, 55, 1);
817 
818     /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
819     if (unlikely(!tbi_check(desc, bit55))) {
820         return ptr;
821     }
822 
823     ptr_tag = allocation_tag_from_addr(ptr);
824 
825     if (tcma_check(desc, bit55, ptr_tag)) {
826         goto done;
827     }
828 
829     /*
830      * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
831      * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
832      * sure that we can access one complete tag byte here.
833      */
834     log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
835     log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
836     dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
837     tag_bytes = (intptr_t)1 << log2_tag_bytes;
838     align_ptr = ptr & -dcz_bytes;
839 
840     /*
841      * Trap if accessing an invalid page.  DC_ZVA requires that we supply
842      * the original pointer for an invalid page.  But watchpoints require
843      * that we probe the actual space.  So do both.
844      */
845     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
846     (void) probe_write(env, ptr, 1, mmu_idx, ra);
847     mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
848                              dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
849     if (!mem) {
850         goto done;
851     }
852 
853     /*
854      * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
855      * it is quite easy to perform all of the comparisons at once without
856      * any extra masking.
857      *
858      * The most common zva block size is 64; some of the thunderx cpus use
859      * a block size of 128.  For user-only, aarch64_max_initfn will set the
860      * block size to 512.  Fill out the other cases for future-proofing.
861      *
862      * In order to be able to find the first miscompare later, we want the
863      * tag bytes to be in little-endian order.
864      */
865     switch (log2_tag_bytes) {
866     case 0: /* zva_blocksize 32 */
867         mem_tag = *(uint8_t *)mem;
868         ptr_tag *= 0x11u;
869         break;
870     case 1: /* zva_blocksize 64 */
871         mem_tag = cpu_to_le16(*(uint16_t *)mem);
872         ptr_tag *= 0x1111u;
873         break;
874     case 2: /* zva_blocksize 128 */
875         mem_tag = cpu_to_le32(*(uint32_t *)mem);
876         ptr_tag *= 0x11111111u;
877         break;
878     case 3: /* zva_blocksize 256 */
879         mem_tag = cpu_to_le64(*(uint64_t *)mem);
880         ptr_tag *= 0x1111111111111111ull;
881         break;
882 
883     default: /* zva_blocksize 512, 1024, 2048 */
884         ptr_tag *= 0x1111111111111111ull;
885         i = 0;
886         do {
887             mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
888             if (unlikely(mem_tag != ptr_tag)) {
889                 goto fail;
890             }
891             i += 8;
892             align_ptr += 16 * TAG_GRANULE;
893         } while (i < tag_bytes);
894         goto done;
895     }
896 
897     if (likely(mem_tag == ptr_tag)) {
898         goto done;
899     }
900 
901  fail:
902     /* Locate the first nibble that differs. */
903     i = ctz64(mem_tag ^ ptr_tag) >> 4;
904     mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
905 
906  done:
907     return useronly_clean_ptr(ptr);
908 }
909