1/*- 2 * Copyright (c) 2003 Peter Wemm 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/amd64/mpboot.S 130224 2004-06-08 01:02:52Z peter $ 27 */ 28 29#include <machine/asmacros.h> /* miscellaneous asm macros */ 30#include <machine/specialreg.h> 31 32#include "assym.s" 33 34 .data /* So we can modify it */ 35 36 .p2align 4,0 37 .globl mptramp_start 38mptramp_start: 39 .code16 40 /* 41 * The AP enters here in response to the startup IPI. 42 * We are in real mode. %cs is the only segment register set. 43 */ 44 cli /* make sure no interrupts */ 45 mov %cs, %ax /* copy %cs to %ds. Remember these */ 46 mov %ax, %ds /* are offsets rather than selectors */ 47 mov %ax, %ss 48 49 /* 50 * Find relocation base and patch the gdt descript and ljmp targets 51 */ 52 xorl %ebx,%ebx 53 mov %cs, %bx 54 sall $4, %ebx /* %ebx is now our relocation base */ 55 orl %ebx, lgdt_desc-mptramp_start+2 56 orl %ebx, jmp_32-mptramp_start+2 57 orl %ebx, jmp_64-mptramp_start+1 58 59 /* 60 * Load the descriptor table pointer. We'll need it when running 61 * in 16 bit protected mode. 62 */ 63 lgdt lgdt_desc-mptramp_start 64 65 /* Enable protected mode */ 66 movl $CR0_PE, %eax 67 mov %eax, %cr0 68 69 /* 70 * Now execute a far jump to turn on protected mode. This 71 * causes the segment registers to turn into selectors and causes 72 * %cs to be loaded from the gdt. 73 * 74 * The following instruction is: 75 * ljmpl $bootcode-gdt, $protmode-mptramp_start 76 * but gas cannot assemble that. And besides, we patch the targets 77 * in early startup and its a little clearer what we are patching. 78 */ 79jmp_32: 80 .byte 0x66 /* size override to 32 bits */ 81 .byte 0xea /* opcode for far jump */ 82 .long protmode-mptramp_start /* offset in segment */ 83 .word bootcode-gdt /* index in gdt for 32 bit code */ 84 85 /* 86 * At this point, we are running in 32 bit legacy protected mode. 87 */ 88 .code32 89protmode: 90 mov $bootdata-gdt, %eax 91 mov %ax, %ds 92 93 /* Turn on the PAE, PSE and PGE bits for when paging is enabled */ 94 mov %cr4, %eax 95 orl $(CR4_PAE | CR4_PSE), %eax 96 mov %eax, %cr4 97 98 /* 99 * Enable EFER.LME so that we get long mode when all the prereqs are 100 * in place. In this case, it turns on when CR0_PG is finally enabled. 101 * Pick up a few other EFER bits that we'll use need we're here. 102 */ 103 movl $MSR_EFER, %ecx 104 rdmsr 105 orl $EFER_LME | EFER_SCE, %eax 106 wrmsr 107 108 /* 109 * Point to the embedded page tables for startup. Note that this 110 * only gets accessed after we're actually in 64 bit mode, however 111 * we can only set the bottom 32 bits of %cr3 in this state. This 112 * means we are required to use a temporary page table that is below 113 * the 4GB limit. %ebx is still our relocation base. We could just 114 * subtract 3 * PAGE_SIZE, but that would be too easy. 115 */ 116 leal mptramp_pagetables-mptramp_start(%ebx),%eax 117 movl (%eax), %eax 118 mov %eax, %cr3 119 120 /* 121 * Finally, switch to long bit mode by enabling paging. We have 122 * to be very careful here because all the segmentation disappears 123 * out from underneath us. The spec says we can depend on the 124 * subsequent pipelined branch to execute, but *only if* everything 125 * is still identity mapped. If any mappings change, the pipeline 126 * will flush. 127 */ 128 mov %cr0, %eax 129 orl $CR0_PG, %eax 130 mov %eax, %cr0 131 132 /* 133 * At this point paging is enabled, and we are in "compatibility" mode. 134 * We do another far jump to reload %cs with the 64 bit selector. 135 * %cr3 points to a 4-level page table page. 136 * We cannot yet jump all the way to the kernel because we can only 137 * specify a 32 bit linear address. So, yet another trampoline. 138 * 139 * The following instruction is: 140 * ljmp $kernelcode-gdt, $tramp_64-mptramp_start 141 * but gas cannot assemble that. And besides, we patch the targets 142 * in early startup and its a little clearer what we are patching. 143 */ 144jmp_64: 145 .byte 0xea /* opcode for far jump */ 146 .long tramp_64-mptramp_start /* offset in segment */ 147 .word kernelcode-gdt /* index in gdt for 64 bit code */ 148 149 /* 150 * Yeehar! We're running in 64 bit mode! We can mostly ignore our 151 * segment registers, and get on with it. 152 * Note that we are running at the correct virtual address, but with 153 * a 1:1 1GB mirrored mapping over entire address space. We had better 154 * switch to a real %cr3 promptly so that we can get to the direct map 155 * space. Remember that jmp is relative and that we've been relocated, 156 * so use an indirect jump. 157 */ 158 .code64 159tramp_64: 160 movabsq $entry_64,%rax /* 64 bit immediate load */ 161 jmp *%rax 162 163 .p2align 4,0 164gdt: 165 /* 166 * All segment descriptor tables start with a null descriptor 167 */ 168 .long 0x00000000 169 .long 0x00000000 170 171 /* 172 * This is the 64 bit long mode code descriptor. There is no 173 * 64 bit data descriptor. 174 */ 175kernelcode: 176 .long 0x00000000 177 .long 0x00209800 178 179 /* 180 * This is the descriptor for the 32 bit boot code. 181 * %cs: +A, +R, -C, DPL=0, +P, +D, +G 182 * Accessed, Readable, Present, 32 bit, 4G granularity 183 */ 184bootcode: 185 .long 0x0000ffff 186 .long 0x00cf9b00 187 188 /* 189 * This is the descriptor for the 32 bit boot data. 190 * We load it into %ds and %ss. The bits for each selector 191 * are interpreted slightly differently. 192 * %ds: +A, +W, -E, DPL=0, +P, +D, +G 193 * %ss: +A, +W, -E, DPL=0, +P, +B, +G 194 * Accessed, Writeable, Expand up, Present, 32 bit, 4GB 195 * For %ds, +D means 'default operand size is 32 bit'. 196 * For %ss, +B means the stack register is %esp rather than %sp. 197 */ 198bootdata: 199 .long 0x0000ffff 200 .long 0x00cf9300 201 202gdtend: 203 204 /* 205 * The address of our page table pages that the boot code 206 * uses to trampoline up to kernel address space. 207 */ 208 .globl mptramp_pagetables 209mptramp_pagetables: 210 .long 0 211 212 /* 213 * The pseudo descriptor for lgdt to use. 214 */ 215lgdt_desc: 216 .word gdtend-gdt /* Length */ 217 .long gdt-mptramp_start /* Offset plus %ds << 4 */ 218 219 .globl mptramp_end 220mptramp_end: 221 222 /* 223 * From here on down is executed in the kernel .text section. 224 * 225 * Load a real %cr3 that has all the direct map stuff and switches 226 * off the 1GB replicated mirror. Load a stack pointer and jump 227 * into AP startup code in C. 228 */ 229 .text 230 .code64 231 .p2align 4,0 232entry_64: 233 movq KPML4phys, %rax 234 movq %rax, %cr3 235 movq bootSTK, %rsp 236 call init_secondary 237 /* 238 * Execute the context restore function for the idlethread which 239 * has conveniently been set as curthread. Remember, %rax must 240 * contain the target thread and %rbx must contain the originating 241 * thread (which we just set the same since we have no originating 242 * thread). BSP/AP synchronization occurs in ap_init(). We do 243 * not need to mess with the BGL for this because LWKT threads are 244 * self-contained on each cpu (or, at least, the idlethread is!). 245 */ 246 movq PCPU(curthread),%rax 247 movq %rax,%rbx 248 movq TD_SP(%rax),%rsp 249 ret 250