xref: /qemu/target/arm/tcg/translate-a64.h (revision 8b7b9c5c)
1 /*
2  *  AArch64 translation, common definitions.
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef TARGET_ARM_TRANSLATE_A64_H
19 #define TARGET_ARM_TRANSLATE_A64_H
20 
21 TCGv_i64 cpu_reg(DisasContext *s, int reg);
22 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg);
23 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf);
24 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf);
25 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
26 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
27                             unsigned int imms, unsigned int immr);
28 bool sve_access_check(DisasContext *s);
29 bool sme_enabled_check(DisasContext *s);
30 bool sme_enabled_check_with_svcr(DisasContext *s, unsigned);
31 
32 /* This function corresponds to CheckStreamingSVEEnabled. */
33 static inline bool sme_sm_enabled_check(DisasContext *s)
34 {
35     return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK);
36 }
37 
38 /* This function corresponds to CheckSMEAndZAEnabled. */
39 static inline bool sme_za_enabled_check(DisasContext *s)
40 {
41     return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK);
42 }
43 
44 /* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */
45 static inline bool sme_smza_enabled_check(DisasContext *s)
46 {
47     return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
48 }
49 
50 TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
51 TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
52                         bool tag_checked, MemOp memop);
53 TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
54                         bool tag_checked, int total_size, MemOp memop);
55 
56 /* We should have at some point before trying to access an FP register
57  * done the necessary access check, so assert that
58  * (a) we did the check and
59  * (b) we didn't then just plough ahead anyway if it failed.
60  * Print the instruction pattern in the abort message so we can figure
61  * out what we need to fix if a user encounters this problem in the wild.
62  */
63 static inline void assert_fp_access_checked(DisasContext *s)
64 {
65 #ifdef CONFIG_DEBUG_TCG
66     if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
67         fprintf(stderr, "target-arm: FP access check missing for "
68                 "instruction 0x%08x\n", s->insn);
69         abort();
70     }
71 #endif
72 }
73 
74 /* Return the offset into CPUARMState of an element of specified
75  * size, 'element' places in from the least significant end of
76  * the FP/vector register Qn.
77  */
78 static inline int vec_reg_offset(DisasContext *s, int regno,
79                                  int element, MemOp size)
80 {
81     int element_size = 1 << size;
82     int offs = element * element_size;
83 #if HOST_BIG_ENDIAN
84     /* This is complicated slightly because vfp.zregs[n].d[0] is
85      * still the lowest and vfp.zregs[n].d[15] the highest of the
86      * 256 byte vector, even on big endian systems.
87      *
88      * Calculate the offset assuming fully little-endian,
89      * then XOR to account for the order of the 8-byte units.
90      *
91      * For 16 byte elements, the two 8 byte halves will not form a
92      * host int128 if the host is bigendian, since they're in the
93      * wrong order.  However the only 16 byte operation we have is
94      * a move, so we can ignore this for the moment.  More complicated
95      * operations will have to special case loading and storing from
96      * the zregs array.
97      */
98     if (element_size < 8) {
99         offs ^= 8 - element_size;
100     }
101 #endif
102     offs += offsetof(CPUARMState, vfp.zregs[regno]);
103     assert_fp_access_checked(s);
104     return offs;
105 }
106 
107 /* Return the offset info CPUARMState of the "whole" vector register Qn.  */
108 static inline int vec_full_reg_offset(DisasContext *s, int regno)
109 {
110     assert_fp_access_checked(s);
111     return offsetof(CPUARMState, vfp.zregs[regno]);
112 }
113 
114 /* Return a newly allocated pointer to the vector register.  */
115 static inline TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
116 {
117     TCGv_ptr ret = tcg_temp_new_ptr();
118     tcg_gen_addi_ptr(ret, tcg_env, vec_full_reg_offset(s, regno));
119     return ret;
120 }
121 
122 /* Return the byte size of the "whole" vector register, VL / 8.  */
123 static inline int vec_full_reg_size(DisasContext *s)
124 {
125     return s->vl;
126 }
127 
128 /* Return the byte size of the vector register, SVL / 8. */
129 static inline int streaming_vec_reg_size(DisasContext *s)
130 {
131     return s->svl;
132 }
133 
134 /*
135  * Return the offset info CPUARMState of the predicate vector register Pn.
136  * Note for this purpose, FFR is P16.
137  */
138 static inline int pred_full_reg_offset(DisasContext *s, int regno)
139 {
140     return offsetof(CPUARMState, vfp.pregs[regno]);
141 }
142 
143 /* Return the byte size of the whole predicate register, VL / 64.  */
144 static inline int pred_full_reg_size(DisasContext *s)
145 {
146     return s->vl >> 3;
147 }
148 
149 /* Return the byte size of the predicate register, SVL / 64.  */
150 static inline int streaming_pred_reg_size(DisasContext *s)
151 {
152     return s->svl >> 3;
153 }
154 
155 /*
156  * Round up the size of a register to a size allowed by
157  * the tcg vector infrastructure.  Any operation which uses this
158  * size may assume that the bits above pred_full_reg_size are zero,
159  * and must leave them the same way.
160  *
161  * Note that this is not needed for the vector registers as they
162  * are always properly sized for tcg vectors.
163  */
164 static inline int size_for_gvec(int size)
165 {
166     if (size <= 8) {
167         return 8;
168     } else {
169         return QEMU_ALIGN_UP(size, 16);
170     }
171 }
172 
173 static inline int pred_gvec_reg_size(DisasContext *s)
174 {
175     return size_for_gvec(pred_full_reg_size(s));
176 }
177 
178 /* Return a newly allocated pointer to the predicate register.  */
179 static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno)
180 {
181     TCGv_ptr ret = tcg_temp_new_ptr();
182     tcg_gen_addi_ptr(ret, tcg_env, pred_full_reg_offset(s, regno));
183     return ret;
184 }
185 
186 bool disas_sve(DisasContext *, uint32_t);
187 bool disas_sme(DisasContext *, uint32_t);
188 
189 void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
190                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
191 void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
192                   uint32_t rm_ofs, int64_t shift,
193                   uint32_t opr_sz, uint32_t max_sz);
194 
195 void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
196 void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
197 
198 #endif /* TARGET_ARM_TRANSLATE_A64_H */
199