1/* $OpenBSD: locore0.S,v 1.10 2022/12/23 17:31:30 kettenis Exp $ */ 2/*- 3 * Copyright (c) 2012-2014 Andrew Turner 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: head/sys/arm64/arm64/locore.S 282867 2015-05-13 18:57:03Z zbb $ 28 */ 29 30#include "assym.h" 31#include <machine/asm.h> 32#include <machine/armreg.h> 33#include <machine/param.h> 34#include <machine/pte.h> 35 36#define DEVICE_MEM 0 37#define NORMAL_UNCACHED 2 38#define NORMAL_MEM 3 39 40/* 41 * We assume: 42 * MMU on with an identity map, or off 43 * D-Cache: off 44 * I-Cache: on or off 45 * We are loaded at a 2MiB aligned address 46 */ 47 48#define INIT_STACK_SIZE (PAGE_SIZE * 4) 49 50 .text 51 .globl _start 52_start: 53 mov x21, x0 54 mov x22, x1 55 mov x23, x2 56 57 /* Drop to EL1 */ 58 bl drop_to_el1 59 60 /* 61 * Disable the MMU. We may have entered the kernel with it on and 62 * will need to update the tables later. If this has been set up 63 * with anything other than a VA == PA map then this will fail, 64 * but in this case the code to find where we are running from 65 * would have also failed. 66 */ 67 dsb sy 68 mrs x2, sctlr_el1 69 bic x2, x2, SCTLR_M 70 msr sctlr_el1, x2 71 isb 72 73 /* Set the context id */ 74 msr contextidr_el1, xzr 75 76 /* Get the virt -> phys offset */ 77 bl get_virt_delta 78 79 /* Store symbol value. */ 80 adr x0, .Lesym 81 ldr x0, [x0] 82 sub x0, x0, x29 83 add x21, x21, x29 84 str x21, [x0] 85 86 /* 87 * At this point: 88 * x29 = PA - VA 89 * x28 = Our physical load address 90 */ 91 92 /* Create the page tables */ 93 bl create_pagetables 94 95 /* 96 * At this point: 97 * x27 = TTBR0 table 98 * x26 = TTBR1 table 99 */ 100 101 /* Enable the mmu */ 102 bl start_mmu 103 104 /* Jump to the virtual address space */ 105 ldr x15, .Lvirtdone 106 br x15 107 108.Linitstack: 109 .xword initstack 110.Linitstack_end: 111 .xword initstack_end 112virtdone: 113 /* Set up the stack */ 114 adr x25, .Linitstack_end 115 ldr x25, [x25] 116 mov sp, x25 117 mov x8, #TRAPFRAME_SIZEOF 118 sub x8, x8, (STACKALIGNBYTES) 119 and x8, x8, ~(STACKALIGNBYTES) 120 121 // pass base of kernel stack as proc0 122 adr x25, .Linitstack 123 ldr x25, [x25] 124 125 sub sp, sp, x8 126 127 /* Zero the BSS */ 128 ldr x15, .Lbss 129 ldr x14, .Lend 1301: 131 str xzr, [x15], #8 132 cmp x15, x14 133 b.lo 1b 134 135 /* Backup the module pointer */ 136 mov x1, x0 137 138 /* Make the page table base a virtual address */ 139 sub x26, x26, x29 140 141 // XXX - shouldn't this be 8 * 5 (struct grew from 4 -> 5) 142 sub sp, sp, #(64 * 4) 143 mov x0, sp 144 145 /* Negate the delta so it is VA -> PA */ 146 neg x29, x29 147 148 str x1, [x0] /* modulep */ 149 str x26, [x0, 8] /* kern_l1pt */ 150 str x29, [x0, 16] /* kern_delta */ 151 str x25, [x0, 24] /* kern_stack */ 152 str x21, [x0, 32] /* ? (x0 arg on boot) */ 153 str x22, [x0, 40] /* ? (x1 arg on boot) */ 154 str x23, [x0, 48] /* fdt (x2 arg on boot) */ 155 156 /* trace back starts here */ 157 mov fp, #0 158 /* Branch to C code */ 159 bl initarm 160 bl main 161 162 /* We should not get here */ 163 brk 0 164 165 166 .align 3 167.Lvirtdone: 168 .quad virtdone 169.Lbss: 170 .quad __bss_start 171.Lend: 172 .quad _end 173 174/* 175 * This builds the page tables containing the identity map, and the 176 * initial kernel virtual map. 177 * 178 * It relies on: 179 * We were loaded into contiguous 64MB block of memory 180 * We were loaded to an address that is on a 2MiB boundary 181 * x28 contains the physical address we were loaded from 182 * 183 * The page table for the identity map starts at L0 and maps the 64MB 184 * block that the kernel was loaded into by the bootloader using 185 * 2MB (L2) pages. These are loaded into TTBR0. 186 * 187 * The initial kernel page table starts at L1 and maps the 64MB block 188 * that the kernel was initially loaded into by the bootloader using 189 * 2MB (L2) pages. The first 2MB of this 64MB block is unused and 190 * not mapped. These are loaded into TTBR1. 191 * 192 * The pages for the page tables are allocated aligned in .data 193 * (see locore.S). 194 */ 195.Lpagetable: 196 .xword pagetable 197.Lpagetable_end: 198 .xword pagetable_end 199 200.Lesym: 201 .xword esym 202 203create_pagetables: 204 /* Save the Link register */ 205 mov x5, x30 206 207 /* Clean the page table */ 208 adr x6, .Lpagetable 209 ldr x6, [x6] 210 sub x6, x6, x29 // VA -> PA 211 mov x26, x6 212 adr x27, .Lpagetable_end 213 ldr x27, [x27] 214 sub x27, x27, x29 // VA -> PA 2151: 216 stp xzr, xzr, [x6], #16 217 stp xzr, xzr, [x6], #16 218 stp xzr, xzr, [x6], #16 219 stp xzr, xzr, [x6], #16 220 cmp x6, x27 221 b.lo 1b 222 223 /* 224 * Build the TTBR1 maps. 225 */ 226 227 /* Create the kernel space L2 table */ 228 mov x6, x26 // pagetable_l2_ttbr1: 229 mov x7, #NORMAL_MEM 230 add x8, x28, x29 231 mov x9, x28 232 mov x10, #31 // entries for 64MB - 2MB 233 bl build_l2_block_pagetable 234 235 /* Move to the l1 table */ 236 add x26, x26, #PAGE_SIZE * 2 // pagetable_l1_ttbr1: 237 238 /* Link the l1 -> l2 table */ 239 mov x9, x6 240 mov x10, #2 241 mov x6, x26 242 bl link_l1_pagetable 243 244 /* 245 * Build the TTBR0 maps. 246 */ 247 add x27, x26, #PAGE_SIZE // pagetable_l2_ttbr0: 248 249 /* Create the kernel space L2 table */ 250 mov x6, x27 // pagetable_l2_ttbr0: 251 mov x7, #NORMAL_MEM 252 mov x8, x28 253 mov x9, x28 254 mov x10, #31 // entries for 64MB - 2MB 255 bl build_l2_block_pagetable 256 257 /* Move to the l1 table */ 258 add x27, x27, #PAGE_SIZE * 2 // pagetable_l1_ttbr0: 259 260 /* Link the l1 -> l2 table */ 261 mov x9, x6 262 mov x10, #2 263 mov x6, x27 264 bl link_l1_pagetable 265 266 /* Move to the l0 table */ 267 add x27, x27, #PAGE_SIZE * 2 // pagetable_l0_ttbr0: 268 269 /* Link the l0 -> l1 table */ 270 mov x9, x6 271 mov x10, #2 272 mov x6, x27 273 bl link_l0_pagetable 274 275 /* Restore the Link register */ 276 mov x30, x5 277 ret 278 279/* 280 * Builds an L0 -> L1 table descriptor 281 * 282 * This is a link for a 512GiB block of memory with up to 1GiB regions mapped 283 * within it by link_l1_pagetable. 284 * 285 * x6 = L0 table 286 * x8 = Virtual Address 287 * x9 = L1 PA (trashed) 288 * x10 = Entry count 289 * x11, x12 and x13 are trashed 290 */ 291link_l0_pagetable: 292 /* 293 * Link an L0 -> L1 table entry. 294 */ 295 /* Find the table index */ 296 lsr x11, x8, #L0_SHIFT 297 and x11, x11, #Ln_ADDR_MASK 298 299 /* Build the L0 block entry */ 300 mov x12, #L0_TABLE 301 302 /* Only use the output address bits */ 303 lsr x9, x9, #PAGE_SHIFT 3041: orr x13, x12, x9, lsl #PAGE_SHIFT 305 306 /* Store the entry */ 307 str x13, [x6, x11, lsl #3] 308 309 sub x10, x10, #1 310 add x11, x11, #1 311 add x9, x9, #1 312 cbnz x10, 1b 313 314 ret 315 316/* 317 * Builds an L1 -> L2 table descriptor 318 * 319 * This is a link for a 1GiB block of memory with up to 2MiB regions mapped 320 * within it by build_l2_block_pagetable. 321 * 322 * x6 = L1 table 323 * x8 = Virtual Address 324 * x9 = L2 PA (trashed) 325 * x10 = Entry Count 326 * x11, x12 and x13 are trashed 327 */ 328link_l1_pagetable: 329 /* 330 * Link an L1 -> L2 table entry. 331 */ 332 /* Find the table index */ 333 lsr x11, x8, #L1_SHIFT 334 and x11, x11, #Ln_ADDR_MASK 335 336 /* Build the L1 block entry */ 337 mov x12, #L1_TABLE 338 339 /* Only use the output address bits */ 340 lsr x9, x9, #PAGE_SHIFT 3411: orr x13, x12, x9, lsl #PAGE_SHIFT 342 343 /* Store the entry */ 344 str x13, [x6, x11, lsl #3] 345 346 sub x10, x10, #1 347 add x11, x11, #1 348 add x9, x9, #1 349 cbnz x10, 1b 350 351 ret 352 353/* 354 * Builds count 2 MiB page table entry 355 * x6 = L2 table 356 * x7 = Type (0 = Device, 1 = Normal) 357 * x8 = VA start 358 * x9 = PA start (trashed) 359 * x10 = Entry count 360 * x11, x12 and x13 are trashed 361 */ 362build_l2_block_pagetable: 363 /* 364 * Build the L2 table entry. 365 */ 366 /* Find the table index */ 367 lsr x11, x8, #L2_SHIFT 368 and x11, x11, #Ln_ADDR_MASK 369 370 /* Build the L2 block entry */ 371 lsl x12, x7, #2 372 orr x12, x12, #L2_BLOCK 373 orr x12, x12, #(ATTR_nG | ATTR_AF | ATTR_SH(SH_INNER)) 374 orr x12, x12, #ATTR_UXN 375 376 /* Only use the output address bits */ 377 lsr x9, x9, #L2_SHIFT 378 379 /* Set the physical address for this virtual address */ 3801: orr x13, x12, x9, lsl #L2_SHIFT 381 382 /* Store the entry */ 383 str x13, [x6, x11, lsl #3] 384 385 sub x10, x10, #1 386 add x11, x11, #1 387 add x9, x9, #1 388 cbnz x10, 1b 389 390 ret 391