xref: /freebsd/sys/arm64/arm64/ptrauth.c (revision f126890a)
1 /*-
2  * Copyright (c) 2021 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under sponsorship from
5  * the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * This manages pointer authentication. As it needs to enable the use of
31  * pointer authentication and change the keys we must built this with
32  * pointer authentication disabled.
33  */
34 #ifdef __ARM_FEATURE_PAC_DEFAULT
35 #error Must be built with pointer authentication disabled
36 #endif
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/libkern.h>
41 #include <sys/proc.h>
42 #include <sys/reboot.h>
43 
44 #include <machine/armreg.h>
45 #include <machine/cpu.h>
46 #include <machine/reg.h>
47 #include <machine/vmparam.h>
48 
49 #define	SCTLR_PTRAUTH	(SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)
50 
51 static bool __read_mostly enable_ptrauth = false;
52 
53 /* Functions called from assembly. */
54 void ptrauth_start(void);
55 struct thread *ptrauth_switch(struct thread *);
56 void ptrauth_exit_el0(struct thread *);
57 void ptrauth_enter_el0(struct thread *);
58 
59 static bool
60 ptrauth_disable(void)
61 {
62 	const char *family, *maker, *product;
63 
64 	family = kern_getenv("smbios.system.family");
65 	maker = kern_getenv("smbios.system.maker");
66 	product = kern_getenv("smbios.system.product");
67 	if (family == NULL || maker == NULL || product == NULL)
68 		return (false);
69 
70 	/*
71 	 * The Dev Kit appears to be configured to trap upon access to PAC
72 	 * registers, but the kernel boots at EL1 and so we have no way to
73 	 * inspect or change this configuration.  As a workaround, simply
74 	 * disable PAC on this platform.
75 	 */
76 	if (strcmp(maker, "Microsoft Corporation") == 0 &&
77 	    strcmp(family, "Surface") == 0 &&
78 	    strcmp(product, "Windows Dev Kit 2023") == 0)
79 		return (true);
80 
81 	return (false);
82 }
83 
84 void
85 ptrauth_init(void)
86 {
87 	uint64_t isar1;
88 	int pac_enable;
89 
90 	/*
91 	 * Allow the sysadmin to disable pointer authentication globally,
92 	 * e.g. on broken hardware.
93 	 */
94 	pac_enable = 1;
95 	TUNABLE_INT_FETCH("hw.pac.enable", &pac_enable);
96 	if (!pac_enable) {
97 		if (boothowto & RB_VERBOSE)
98 			printf("Pointer authentication is disabled\n");
99 		return;
100 	}
101 
102 	if (!get_kernel_reg(ID_AA64ISAR1_EL1, &isar1))
103 		return;
104 
105 	if (ptrauth_disable())
106 		return;
107 
108 	/*
109 	 * This assumes if there is pointer authentication on the boot CPU
110 	 * it will also be available on any non-boot CPUs. If this is ever
111 	 * not the case we will have to add a quirk.
112 	 */
113 	if (ID_AA64ISAR1_APA_VAL(isar1) > 0 ||
114 	    ID_AA64ISAR1_API_VAL(isar1) > 0) {
115 		enable_ptrauth = true;
116 		elf64_addr_mask.code |= PAC_ADDR_MASK;
117 		elf64_addr_mask.data |= PAC_ADDR_MASK;
118 	}
119 }
120 
121 /* Copy the keys when forking a new process */
122 void
123 ptrauth_fork(struct thread *new_td, struct thread *orig_td)
124 {
125 	if (!enable_ptrauth)
126 		return;
127 
128 	memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
129 	    sizeof(new_td->td_md.md_ptrauth_user));
130 }
131 
132 /* Generate new userspace keys when executing a new process */
133 void
134 ptrauth_exec(struct thread *td)
135 {
136 	if (!enable_ptrauth)
137 		return;
138 
139 	arc4rand(&td->td_md.md_ptrauth_user, sizeof(td->td_md.md_ptrauth_user),
140 	    0);
141 }
142 
143 /*
144  * Copy the user keys when creating a new userspace thread until it's clear
145  * how the ABI expects the various keys to be assigned.
146  */
147 void
148 ptrauth_copy_thread(struct thread *new_td, struct thread *orig_td)
149 {
150 	if (!enable_ptrauth)
151 		return;
152 
153 	memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
154 	    sizeof(new_td->td_md.md_ptrauth_user));
155 }
156 
157 /* Generate new kernel keys when executing a new kernel thread */
158 void
159 ptrauth_thread_alloc(struct thread *td)
160 {
161 	if (!enable_ptrauth)
162 		return;
163 
164 	arc4rand(&td->td_md.md_ptrauth_kern, sizeof(td->td_md.md_ptrauth_kern),
165 	    0);
166 }
167 
168 /*
169  * Load the userspace keys. We can't use WRITE_SPECIALREG as we need
170  * to set the architecture extension.
171  */
172 #define	LOAD_KEY(space, name)					\
173 __asm __volatile(						\
174 	".arch_extension pauth			\n"		\
175 	"msr	"#name"keylo_el1, %0		\n"		\
176 	"msr	"#name"keyhi_el1, %1		\n"		\
177 	".arch_extension nopauth		\n"		\
178 	:: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo),	\
179 	   "r"(td->td_md.md_ptrauth_##space.name.pa_key_hi))
180 
181 void
182 ptrauth_thread0(struct thread *td)
183 {
184 	if (!enable_ptrauth)
185 		return;
186 
187 	/* TODO: Generate a random number here */
188 	memset(&td->td_md.md_ptrauth_kern, 0,
189 	    sizeof(td->td_md.md_ptrauth_kern));
190 	LOAD_KEY(kern, apia);
191 	/*
192 	 * No isb as this is called before ptrauth_start so can rely on
193 	 * the instruction barrier there.
194 	 */
195 }
196 
197 /*
198  * Enable pointer authentication. After this point userspace and the kernel
199  * can sign return addresses, etc. based on their keys
200  *
201  * This assumes either all or no CPUs have pointer authentication support,
202  * and, if supported, all CPUs have the same algorithm.
203  */
204 void
205 ptrauth_start(void)
206 {
207 	uint64_t sctlr;
208 
209 	if (!enable_ptrauth)
210 		return;
211 
212 	/* Enable pointer authentication */
213 	sctlr = READ_SPECIALREG(sctlr_el1);
214 	sctlr |= SCTLR_PTRAUTH;
215 	WRITE_SPECIALREG(sctlr_el1, sctlr);
216 	isb();
217 }
218 
219 #ifdef SMP
220 void
221 ptrauth_mp_start(uint64_t cpu)
222 {
223 	struct ptrauth_key start_key;
224 	uint64_t sctlr;
225 
226 	if (!enable_ptrauth)
227 		return;
228 
229 	/*
230 	 * We need a key until we call sched_throw, however we don't have
231 	 * a thread until then. Create a key just for use within
232 	 * init_secondary and whatever it calls. As init_secondary never
233 	 * returns it is safe to do so from within it.
234 	 *
235 	 * As it's only used for a short length of time just use the cpu
236 	 * as the key.
237 	 */
238 	start_key.pa_key_lo = cpu;
239 	start_key.pa_key_hi = ~cpu;
240 
241 	__asm __volatile(
242 	    ".arch_extension pauth		\n"
243 	    "msr	apiakeylo_el1, %0	\n"
244 	    "msr	apiakeyhi_el1, %1	\n"
245 	    ".arch_extension nopauth		\n"
246 	    :: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi));
247 
248 	/* Enable pointer authentication */
249 	sctlr = READ_SPECIALREG(sctlr_el1);
250 	sctlr |= SCTLR_PTRAUTH;
251 	WRITE_SPECIALREG(sctlr_el1, sctlr);
252 	isb();
253 }
254 #endif
255 
256 struct thread *
257 ptrauth_switch(struct thread *td)
258 {
259 	if (enable_ptrauth) {
260 		LOAD_KEY(kern, apia);
261 		isb();
262 	}
263 
264 	return (td);
265 }
266 
267 /* Called when we are exiting uerspace and entering the kernel */
268 void
269 ptrauth_exit_el0(struct thread *td)
270 {
271 	if (!enable_ptrauth)
272 		return;
273 
274 	LOAD_KEY(kern, apia);
275 	isb();
276 }
277 
278 /* Called when we are about to exit the kernel and enter userspace */
279 void
280 ptrauth_enter_el0(struct thread *td)
281 {
282 	if (!enable_ptrauth)
283 		return;
284 
285 	LOAD_KEY(user, apia);
286 	LOAD_KEY(user, apib);
287 	LOAD_KEY(user, apda);
288 	LOAD_KEY(user, apdb);
289 	LOAD_KEY(user, apga);
290 	/*
291 	 * No isb as this is called from the exception handler so can rely
292 	 * on the eret instruction to be the needed context synchronizing event.
293 	 */
294 }
295