1/*- 2 * Copyright (c) 2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28#include <machine/asm.h> 29#include <machine/armreg.h> 30__FBSDID("$FreeBSD$"); 31 32#include "assym.inc" 33 34 .text 35 36/* 37 * This is limited to 28 instructions as it's placed in the exception vector 38 * slot that is 32 instructions long. We need one for the branch, and three 39 * for the prologue. 40 */ 41.macro save_registers_head el 42.if \el == 1 43 mov x18, sp 44 sub sp, sp, #128 45.endif 46 sub sp, sp, #(TF_SIZE) 47 stp x28, x29, [sp, #(TF_X + 28 * 8)] 48 stp x26, x27, [sp, #(TF_X + 26 * 8)] 49 stp x24, x25, [sp, #(TF_X + 24 * 8)] 50 stp x22, x23, [sp, #(TF_X + 22 * 8)] 51 stp x20, x21, [sp, #(TF_X + 20 * 8)] 52 stp x18, x19, [sp, #(TF_X + 18 * 8)] 53 stp x16, x17, [sp, #(TF_X + 16 * 8)] 54 stp x14, x15, [sp, #(TF_X + 14 * 8)] 55 stp x12, x13, [sp, #(TF_X + 12 * 8)] 56 stp x10, x11, [sp, #(TF_X + 10 * 8)] 57 stp x8, x9, [sp, #(TF_X + 8 * 8)] 58 stp x6, x7, [sp, #(TF_X + 6 * 8)] 59 stp x4, x5, [sp, #(TF_X + 4 * 8)] 60 stp x2, x3, [sp, #(TF_X + 2 * 8)] 61 stp x0, x1, [sp, #(TF_X + 0 * 8)] 62 mrs x10, elr_el1 63 mrs x11, spsr_el1 64 mrs x12, esr_el1 65.if \el == 0 66 mrs x18, sp_el0 67.endif 68 str x10, [sp, #(TF_ELR)] 69 stp w11, w12, [sp, #(TF_SPSR)] 70 stp x18, lr, [sp, #(TF_SP)] 71 mrs x18, tpidr_el1 72 add x29, sp, #(TF_SIZE) 73.endm 74 75.macro save_registers el 76.if \el == 0 77#if defined(PERTHREAD_SSP) 78 /* Load the SSP canary to sp_el0 */ 79 ldr x1, [x18, #(PC_CURTHREAD)] 80 add x1, x1, #(TD_MD_CANARY) 81 msr sp_el0, x1 82#endif 83 84 /* Apply the SSBD (CVE-2018-3639) workaround if needed */ 85 ldr x1, [x18, #PC_SSBD] 86 cbz x1, 1f 87 mov w0, #1 88 blr x1 891: 90 91 ldr x0, [x18, #PC_CURTHREAD] 92 bl ptrauth_exit_el0 93 94 ldr x0, [x18, #(PC_CURTHREAD)] 95 bl dbg_monitor_enter 96 97 /* Unmask debug and SError exceptions */ 98 msr daifclr, #(DAIF_D | DAIF_A) 99.else 100 /* 101 * Unmask debug and SError exceptions. 102 * For EL1, debug exceptions are conditionally unmasked in 103 * do_el1h_sync(). 104 */ 105 msr daifclr, #(DAIF_A) 106.endif 107.endm 108 109.macro restore_registers el 110 /* 111 * Mask all exceptions, x18 may change in the interrupt exception 112 * handler. 113 */ 114 msr daifset, #(DAIF_ALL) 115.if \el == 0 116 ldr x0, [x18, #PC_CURTHREAD] 117 mov x1, sp 118 bl dbg_monitor_exit 119 120 ldr x0, [x18, #PC_CURTHREAD] 121 bl ptrauth_enter_el0 122 123 /* Remove the SSBD (CVE-2018-3639) workaround if needed */ 124 ldr x1, [x18, #PC_SSBD] 125 cbz x1, 1f 126 mov w0, #0 127 blr x1 1281: 129.endif 130 ldp x18, lr, [sp, #(TF_SP)] 131 ldp x10, x11, [sp, #(TF_ELR)] 132.if \el == 0 133 msr sp_el0, x18 134.endif 135 msr spsr_el1, x11 136 msr elr_el1, x10 137 ldp x0, x1, [sp, #(TF_X + 0 * 8)] 138 ldp x2, x3, [sp, #(TF_X + 2 * 8)] 139 ldp x4, x5, [sp, #(TF_X + 4 * 8)] 140 ldp x6, x7, [sp, #(TF_X + 6 * 8)] 141 ldp x8, x9, [sp, #(TF_X + 8 * 8)] 142 ldp x10, x11, [sp, #(TF_X + 10 * 8)] 143 ldp x12, x13, [sp, #(TF_X + 12 * 8)] 144 ldp x14, x15, [sp, #(TF_X + 14 * 8)] 145 ldp x16, x17, [sp, #(TF_X + 16 * 8)] 146.if \el == 0 147 /* 148 * We only restore the callee saved registers when returning to 149 * userland as they may have been updated by a system call or signal. 150 */ 151 ldp x18, x19, [sp, #(TF_X + 18 * 8)] 152 ldp x20, x21, [sp, #(TF_X + 20 * 8)] 153 ldp x22, x23, [sp, #(TF_X + 22 * 8)] 154 ldp x24, x25, [sp, #(TF_X + 24 * 8)] 155 ldp x26, x27, [sp, #(TF_X + 26 * 8)] 156 ldp x28, x29, [sp, #(TF_X + 28 * 8)] 157.else 158 ldr x29, [sp, #(TF_X + 29 * 8)] 159.endif 160.if \el == 0 161 add sp, sp, #(TF_SIZE) 162.else 163 mov sp, x18 164 mrs x18, tpidr_el1 165.endif 166.endm 167 168.macro do_ast 169 mrs x19, daif 170 /* Make sure the IRQs are enabled before calling ast() */ 171 bic x19, x19, #PSR_I 1721: 173 /* 174 * Mask interrupts while checking the ast pending flag 175 */ 176 msr daifset, #(DAIF_INTR) 177 178 /* Read the current thread AST mask */ 179 ldr x1, [x18, #PC_CURTHREAD] /* Load curthread */ 180 ldr w1, [x1, #(TD_AST)] 181 182 /* Check if we have a non-zero AST mask */ 183 cbz w1, 2f 184 185 /* Restore interrupts */ 186 msr daif, x19 187 188 /* handle the ast */ 189 mov x0, sp 190 bl _C_LABEL(ast) 191 192 /* Re-check for new ast scheduled */ 193 b 1b 1942: 195.endm 196 197ENTRY(handle_el1h_sync) 198 save_registers 1 199 ldr x0, [x18, #PC_CURTHREAD] 200 mov x1, sp 201 bl do_el1h_sync 202 restore_registers 1 203 ERET 204END(handle_el1h_sync) 205 206ENTRY(handle_el1h_irq) 207 save_registers 1 208 mov x0, sp 209 bl intr_irq_handler 210 restore_registers 1 211 ERET 212END(handle_el1h_irq) 213 214ENTRY(handle_el0_sync) 215 /* 216 * Read the fault address early. The current thread structure may 217 * be transiently unmapped if it is part of a memory range being 218 * promoted or demoted to/from a superpage. As this involves a 219 * break-before-make sequence there is a short period of time where 220 * an access will raise an exception. If this happens the fault 221 * address will be changed to the kernel address so a later read of 222 * far_el1 will give the wrong value. 223 * 224 * The earliest memory access that could trigger a fault is in a 225 * function called by the save_registers macro so this is the latest 226 * we can read the userspace value. 227 */ 228 mrs x19, far_el1 229 save_registers 0 230 ldr x0, [x18, #PC_CURTHREAD] 231 mov x1, sp 232 str x1, [x0, #TD_FRAME] 233 mov x2, x19 234 bl do_el0_sync 235 do_ast 236 restore_registers 0 237 ERET 238END(handle_el0_sync) 239 240ENTRY(handle_el0_irq) 241 save_registers 0 242 mov x0, sp 243 bl intr_irq_handler 244 do_ast 245 restore_registers 0 246 ERET 247END(handle_el0_irq) 248 249ENTRY(handle_serror) 250 save_registers 0 251 mov x0, sp 2521: bl do_serror 253 b 1b 254END(handle_serror) 255 256ENTRY(handle_empty_exception) 257 save_registers 0 258 mov x0, sp 2591: bl unhandled_exception 260 b 1b 261END(handle_empty_exception) 262 263.macro vector name, el 264 .align 7 265 save_registers_head \el 266 b handle_\name 267 dsb sy 268 isb 269 /* Break instruction to ensure we aren't executing code here. */ 270 brk 0x42 271.endm 272 273.macro vempty el 274 vector empty_exception \el 275.endm 276 277 .align 11 278 .globl exception_vectors 279exception_vectors: 280 vempty 1 /* Synchronous EL1t */ 281 vempty 1 /* IRQ EL1t */ 282 vempty 1 /* FIQ EL1t */ 283 vempty 1 /* Error EL1t */ 284 285 vector el1h_sync 1 /* Synchronous EL1h */ 286 vector el1h_irq 1 /* IRQ EL1h */ 287 vempty 1 /* FIQ EL1h */ 288 vector serror 1 /* Error EL1h */ 289 290 vector el0_sync 0 /* Synchronous 64-bit EL0 */ 291 vector el0_irq 0 /* IRQ 64-bit EL0 */ 292 vempty 0 /* FIQ 64-bit EL0 */ 293 vector serror 0 /* Error 64-bit EL0 */ 294 295 vector el0_sync 0 /* Synchronous 32-bit EL0 */ 296 vector el0_irq 0 /* IRQ 32-bit EL0 */ 297 vempty 0 /* FIQ 32-bit EL0 */ 298 vector serror 0 /* Error 32-bit EL0 */ 299 300