xref: /freebsd/sys/arm64/arm64/ptrauth.c (revision 9768746b)
1 /*-
2  * Copyright (c) 2021 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under sponsorship from
5  * the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * This manages pointer authentication. As it needs to enable the use of
31  * pointer authentication and change the keys we must built this with
32  * pointer authentication disabled.
33  */
34 #ifdef __ARM_FEATURE_PAC_DEFAULT
35 #error Must be built with pointer authentication disabled
36 #endif
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/libkern.h>
44 #include <sys/proc.h>
45 #include <sys/reboot.h>
46 
47 #include <machine/armreg.h>
48 #include <machine/cpu.h>
49 #include <machine/reg.h>
50 #include <machine/vmparam.h>
51 
52 #define	SCTLR_PTRAUTH	(SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)
53 
54 static bool __read_mostly enable_ptrauth = false;
55 
56 /* Functions called from assembly. */
57 void ptrauth_start(void);
58 struct thread *ptrauth_switch(struct thread *);
59 void ptrauth_exit_el0(struct thread *);
60 void ptrauth_enter_el0(struct thread *);
61 
62 void
63 ptrauth_init(void)
64 {
65 	uint64_t isar1;
66 	int pac_enable;
67 
68 	/*
69 	 * Allow the sysadmin to disable pointer authentication globally,
70 	 * e.g. on broken hardware.
71 	 */
72 	pac_enable = 1;
73 	TUNABLE_INT_FETCH("hw.pac.enable", &pac_enable);
74 	if (!pac_enable) {
75 		if (boothowto & RB_VERBOSE)
76 			printf("Pointer authentication is disabled\n");
77 		return;
78 	}
79 
80 	get_kernel_reg(ID_AA64ISAR1_EL1, &isar1);
81 
82 	/*
83 	 * This assumes if there is pointer authentication on the boot CPU
84 	 * it will also be available on any non-boot CPUs. If this is ever
85 	 * not the case we will have to add a quirk.
86 	 */
87 	if (ID_AA64ISAR1_APA_VAL(isar1) > 0 ||
88 	    ID_AA64ISAR1_API_VAL(isar1) > 0) {
89 		enable_ptrauth = true;
90 		elf64_addr_mask.code |= PAC_ADDR_MASK;
91 		elf64_addr_mask.data |= PAC_ADDR_MASK;
92 	}
93 }
94 
95 /* Copy the keys when forking a new process */
96 void
97 ptrauth_fork(struct thread *new_td, struct thread *orig_td)
98 {
99 	if (!enable_ptrauth)
100 		return;
101 
102 	memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
103 	    sizeof(new_td->td_md.md_ptrauth_user));
104 }
105 
106 /* Generate new userspace keys when executing a new process */
107 void
108 ptrauth_exec(struct thread *td)
109 {
110 	if (!enable_ptrauth)
111 		return;
112 
113 	arc4rand(&td->td_md.md_ptrauth_user, sizeof(td->td_md.md_ptrauth_user),
114 	    0);
115 }
116 
117 /*
118  * Copy the user keys when creating a new userspace thread until it's clear
119  * how the ABI expects the various keys to be assigned.
120  */
121 void
122 ptrauth_copy_thread(struct thread *new_td, struct thread *orig_td)
123 {
124 	if (!enable_ptrauth)
125 		return;
126 
127 	memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
128 	    sizeof(new_td->td_md.md_ptrauth_user));
129 }
130 
131 /* Generate new kernel keys when executing a new kernel thread */
132 void
133 ptrauth_thread_alloc(struct thread *td)
134 {
135 	if (!enable_ptrauth)
136 		return;
137 
138 	arc4rand(&td->td_md.md_ptrauth_kern, sizeof(td->td_md.md_ptrauth_kern),
139 	    0);
140 }
141 
142 /*
143  * Load the userspace keys. We can't use WRITE_SPECIALREG as we need
144  * to set the architecture extension.
145  */
146 #define	LOAD_KEY(space, name)					\
147 __asm __volatile(						\
148 	".arch_extension pauth			\n"		\
149 	"msr	"#name"keylo_el1, %0		\n"		\
150 	"msr	"#name"keyhi_el1, %1		\n"		\
151 	".arch_extension nopauth		\n"		\
152 	:: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo),	\
153 	   "r"(td->td_md.md_ptrauth_##space.name.pa_key_hi))
154 
155 void
156 ptrauth_thread0(struct thread *td)
157 {
158 	if (!enable_ptrauth)
159 		return;
160 
161 	/* TODO: Generate a random number here */
162 	memset(&td->td_md.md_ptrauth_kern, 0,
163 	    sizeof(td->td_md.md_ptrauth_kern));
164 	LOAD_KEY(kern, apia);
165 	/*
166 	 * No isb as this is called before ptrauth_start so can rely on
167 	 * the instruction barrier there.
168 	 */
169 }
170 
171 /*
172  * Enable pointer authentication. After this point userspace and the kernel
173  * can sign return addresses, etc. based on their keys
174  *
175  * This assumes either all or no CPUs have pointer authentication support,
176  * and, if supported, all CPUs have the same algorithm.
177  */
178 void
179 ptrauth_start(void)
180 {
181 	uint64_t sctlr;
182 
183 	if (!enable_ptrauth)
184 		return;
185 
186 	/* Enable pointer authentication */
187 	sctlr = READ_SPECIALREG(sctlr_el1);
188 	sctlr |= SCTLR_PTRAUTH;
189 	WRITE_SPECIALREG(sctlr_el1, sctlr);
190 	isb();
191 }
192 
193 #ifdef SMP
194 void
195 ptrauth_mp_start(uint64_t cpu)
196 {
197 	struct ptrauth_key start_key;
198 	uint64_t sctlr;
199 
200 	if (!enable_ptrauth)
201 		return;
202 
203 	/*
204 	 * We need a key until we call sched_throw, however we don't have
205 	 * a thread until then. Create a key just for use within
206 	 * init_secondary and whatever it calls. As init_secondary never
207 	 * returns it is safe to do so from within it.
208 	 *
209 	 * As it's only used for a short length of time just use the cpu
210 	 * as the key.
211 	 */
212 	start_key.pa_key_lo = cpu;
213 	start_key.pa_key_hi = ~cpu;
214 
215 	__asm __volatile(
216 	    ".arch_extension pauth		\n"
217 	    "msr	apiakeylo_el1, %0	\n"
218 	    "msr	apiakeyhi_el1, %1	\n"
219 	    ".arch_extension nopauth		\n"
220 	    :: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi));
221 
222 	/* Enable pointer authentication */
223 	sctlr = READ_SPECIALREG(sctlr_el1);
224 	sctlr |= SCTLR_PTRAUTH;
225 	WRITE_SPECIALREG(sctlr_el1, sctlr);
226 	isb();
227 }
228 #endif
229 
230 struct thread *
231 ptrauth_switch(struct thread *td)
232 {
233 	if (enable_ptrauth) {
234 		LOAD_KEY(kern, apia);
235 		isb();
236 	}
237 
238 	return (td);
239 }
240 
241 /* Called when we are exiting uerspace and entering the kernel */
242 void
243 ptrauth_exit_el0(struct thread *td)
244 {
245 	if (!enable_ptrauth)
246 		return;
247 
248 	LOAD_KEY(kern, apia);
249 	isb();
250 }
251 
252 /* Called when we are about to exit the kernel and enter userspace */
253 void
254 ptrauth_enter_el0(struct thread *td)
255 {
256 	if (!enable_ptrauth)
257 		return;
258 
259 	LOAD_KEY(user, apia);
260 	LOAD_KEY(user, apib);
261 	LOAD_KEY(user, apda);
262 	LOAD_KEY(user, apdb);
263 	LOAD_KEY(user, apga);
264 	/*
265 	 * No isb as this is called from the exception handler so can rely
266 	 * on the eret instruction to be the needed context synchronizing event.
267 	 */
268 }
269