xref: /qemu/linux-user/aarch64/target_prctl.h (revision 5db05230)
1 /*
2  * AArch64 specific prctl functions for linux-user
3  *
4  * SPDX-License-Identifier: GPL-2.0-or-later
5  */
6 #ifndef AARCH64_TARGET_PRCTL_H
7 #define AARCH64_TARGET_PRCTL_H
8 
9 #include "target/arm/cpu-features.h"
10 
11 static abi_long do_prctl_sve_get_vl(CPUArchState *env)
12 {
13     ARMCPU *cpu = env_archcpu(env);
14     if (cpu_isar_feature(aa64_sve, cpu)) {
15         /* PSTATE.SM is always unset on syscall entry. */
16         return sve_vq(env) * 16;
17     }
18     return -TARGET_EINVAL;
19 }
20 #define do_prctl_sve_get_vl do_prctl_sve_get_vl
21 
22 static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
23 {
24     /*
25      * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
26      * Note the kernel definition of sve_vl_valid allows for VQ=512,
27      * i.e. VL=8192, even though the current architectural maximum is VQ=16.
28      */
29     if (cpu_isar_feature(aa64_sve, env_archcpu(env))
30         && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
31         uint32_t vq, old_vq;
32 
33         /* PSTATE.SM is always unset on syscall entry. */
34         old_vq = sve_vq(env);
35 
36         /*
37          * Bound the value of arg2, so that we know that it fits into
38          * the 4-bit field in ZCR_EL1.  Rely on the hflags rebuild to
39          * sort out the length supported by the cpu.
40          */
41         vq = MAX(arg2 / 16, 1);
42         vq = MIN(vq, ARM_MAX_VQ);
43         env->vfp.zcr_el[1] = vq - 1;
44         arm_rebuild_hflags(env);
45 
46         vq = sve_vq(env);
47         if (vq < old_vq) {
48             aarch64_sve_narrow_vq(env, vq);
49         }
50         return vq * 16;
51     }
52     return -TARGET_EINVAL;
53 }
54 #define do_prctl_sve_set_vl do_prctl_sve_set_vl
55 
56 static abi_long do_prctl_sme_get_vl(CPUArchState *env)
57 {
58     ARMCPU *cpu = env_archcpu(env);
59     if (cpu_isar_feature(aa64_sme, cpu)) {
60         return sme_vq(env) * 16;
61     }
62     return -TARGET_EINVAL;
63 }
64 #define do_prctl_sme_get_vl do_prctl_sme_get_vl
65 
66 static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2)
67 {
68     /*
69      * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
70      * Note the kernel definition of sve_vl_valid allows for VQ=512,
71      * i.e. VL=8192, even though the architectural maximum is VQ=16.
72      */
73     if (cpu_isar_feature(aa64_sme, env_archcpu(env))
74         && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
75         int vq, old_vq;
76 
77         old_vq = sme_vq(env);
78 
79         /*
80          * Bound the value of vq, so that we know that it fits into
81          * the 4-bit field in SMCR_EL1.  Because PSTATE.SM is cleared
82          * on syscall entry, we are not modifying the current SVE
83          * vector length.
84          */
85         vq = MAX(arg2 / 16, 1);
86         vq = MIN(vq, 16);
87         env->vfp.smcr_el[1] =
88             FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1);
89 
90         /* Delay rebuilding hflags until we know if ZA must change. */
91         vq = sve_vqm1_for_el_sm(env, 0, true) + 1;
92 
93         if (vq != old_vq) {
94             /*
95              * PSTATE.ZA state is cleared on any change to SVL.
96              * We need not call arm_rebuild_hflags because PSTATE.SM was
97              * cleared on syscall entry, so this hasn't changed VL.
98              */
99             env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0);
100             arm_rebuild_hflags(env);
101         }
102         return vq * 16;
103     }
104     return -TARGET_EINVAL;
105 }
106 #define do_prctl_sme_set_vl do_prctl_sme_set_vl
107 
108 static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
109 {
110     ARMCPU *cpu = env_archcpu(env);
111 
112     if (cpu_isar_feature(aa64_pauth, cpu)) {
113         int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY |
114                    PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY);
115         int ret = 0;
116         Error *err = NULL;
117 
118         if (arg2 == 0) {
119             arg2 = all;
120         } else if (arg2 & ~all) {
121             return -TARGET_EINVAL;
122         }
123         if (arg2 & PR_PAC_APIAKEY) {
124             ret |= qemu_guest_getrandom(&env->keys.apia,
125                                         sizeof(ARMPACKey), &err);
126         }
127         if (arg2 & PR_PAC_APIBKEY) {
128             ret |= qemu_guest_getrandom(&env->keys.apib,
129                                         sizeof(ARMPACKey), &err);
130         }
131         if (arg2 & PR_PAC_APDAKEY) {
132             ret |= qemu_guest_getrandom(&env->keys.apda,
133                                         sizeof(ARMPACKey), &err);
134         }
135         if (arg2 & PR_PAC_APDBKEY) {
136             ret |= qemu_guest_getrandom(&env->keys.apdb,
137                                         sizeof(ARMPACKey), &err);
138         }
139         if (arg2 & PR_PAC_APGAKEY) {
140             ret |= qemu_guest_getrandom(&env->keys.apga,
141                                         sizeof(ARMPACKey), &err);
142         }
143         if (ret != 0) {
144             /*
145              * Some unknown failure in the crypto.  The best
146              * we can do is log it and fail the syscall.
147              * The real syscall cannot fail this way.
148              */
149             qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s",
150                           error_get_pretty(err));
151             error_free(err);
152             return -TARGET_EIO;
153         }
154         return 0;
155     }
156     return -TARGET_EINVAL;
157 }
158 #define do_prctl_reset_keys do_prctl_reset_keys
159 
160 static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2)
161 {
162     abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE;
163     ARMCPU *cpu = env_archcpu(env);
164 
165     if (cpu_isar_feature(aa64_mte, cpu)) {
166         valid_mask |= PR_MTE_TCF_MASK;
167         valid_mask |= PR_MTE_TAG_MASK;
168     }
169 
170     if (arg2 & ~valid_mask) {
171         return -TARGET_EINVAL;
172     }
173     env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE;
174 
175     if (cpu_isar_feature(aa64_mte, cpu)) {
176         switch (arg2 & PR_MTE_TCF_MASK) {
177         case PR_MTE_TCF_NONE:
178         case PR_MTE_TCF_SYNC:
179         case PR_MTE_TCF_ASYNC:
180             break;
181         default:
182             return -EINVAL;
183         }
184 
185         /*
186          * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
187          * Note that the syscall values are consistent with hw.
188          */
189         env->cp15.sctlr_el[1] =
190             deposit64(env->cp15.sctlr_el[1], 38, 2, arg2 >> PR_MTE_TCF_SHIFT);
191 
192         /*
193          * Write PR_MTE_TAG to GCR_EL1[Exclude].
194          * Note that the syscall uses an include mask,
195          * and hardware uses an exclude mask -- invert.
196          */
197         env->cp15.gcr_el1 =
198             deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT);
199         arm_rebuild_hflags(env);
200     }
201     return 0;
202 }
203 #define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl
204 
205 static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
206 {
207     ARMCPU *cpu = env_archcpu(env);
208     abi_long ret = 0;
209 
210     if (env->tagged_addr_enable) {
211         ret |= PR_TAGGED_ADDR_ENABLE;
212     }
213     if (cpu_isar_feature(aa64_mte, cpu)) {
214         /* See do_prctl_set_tagged_addr_ctrl. */
215         ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT;
216         ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1);
217     }
218     return ret;
219 }
220 #define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
221 
222 #endif /* AARCH64_TARGET_PRCTL_H */
223