1 /*-
2  * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #define __ELF_WORD_SIZE 64
30 #include <sys/param.h>
31 #include <sys/exec.h>
32 #include <sys/linker.h>
33 #include <string.h>
34 #include <machine/elf.h>
35 #include <stand.h>
36 #include <vm/vm.h>
37 #include <vm/pmap.h>
38 
39 #include <efi.h>
40 #include <efilib.h>
41 
42 #include "bootstrap.h"
43 
44 #include "loader_efi.h"
45 
46 extern int bi_load(char *args, vm_offset_t *modulep, vm_offset_t *kernendp,
47     bool exit_bs);
48 
49 static int	elf64_exec(struct preloaded_file *amp);
50 static int	elf64_obj_exec(struct preloaded_file *amp);
51 
52 static struct file_format amd64_elf = {
53 	.l_load = elf64_loadfile,
54 	.l_exec = elf64_exec,
55 };
56 static struct file_format amd64_elf_obj = {
57 	.l_load = elf64_obj_loadfile,
58 	.l_exec = elf64_obj_exec,
59 };
60 
61 extern struct file_format multiboot2;
62 extern struct file_format multiboot2_obj;
63 
64 struct file_format *file_formats[] = {
65 	&multiboot2,
66 	&multiboot2_obj,
67 	&amd64_elf,
68 	&amd64_elf_obj,
69 	NULL
70 };
71 
72 static pml4_entry_t *PT4;
73 static pdp_entry_t *PT3;
74 static pdp_entry_t *PT3_l, *PT3_u;
75 static pd_entry_t *PT2;
76 static pd_entry_t *PT2_l0, *PT2_l1, *PT2_l2, *PT2_l3, *PT2_u0, *PT2_u1;
77 
78 extern EFI_PHYSICAL_ADDRESS staging;
79 
80 static void (*trampoline)(uint64_t stack, void *copy_finish, uint64_t kernend,
81     uint64_t modulep, pml4_entry_t *pagetable, uint64_t entry);
82 
83 extern uintptr_t amd64_tramp;
84 extern uint32_t amd64_tramp_size;
85 
86 /*
87  * There is an ELF kernel and one or more ELF modules loaded.
88  * We wish to start executing the kernel image, so make such
89  * preparations as are required, and do so.
90  */
91 static int
92 elf64_exec(struct preloaded_file *fp)
93 {
94 	struct file_metadata	*md;
95 	Elf_Ehdr 		*ehdr;
96 	vm_offset_t		modulep, kernend, trampcode, trampstack;
97 	int			err, i;
98 	bool			copy_auto;
99 
100 	copy_auto = copy_staging == COPY_STAGING_AUTO;
101 	if (copy_auto)
102 		copy_staging = fp->f_kernphys_relocatable ?
103 		    COPY_STAGING_DISABLE : COPY_STAGING_ENABLE;
104 
105 	if ((md = file_findmetadata(fp, MODINFOMD_ELFHDR)) == NULL)
106 		return (EFTYPE);
107 	ehdr = (Elf_Ehdr *)&(md->md_data);
108 
109 	trampcode = copy_staging == COPY_STAGING_ENABLE ?
110 	    (vm_offset_t)0x0000000040000000 /* 1G */ :
111 	    (vm_offset_t)0x0000000100000000; /* 4G */;
112 	err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 1,
113 	    (EFI_PHYSICAL_ADDRESS *)&trampcode);
114 	if (EFI_ERROR(err)) {
115 		printf("Unable to allocate trampoline\n");
116 		if (copy_auto)
117 			copy_staging = COPY_STAGING_AUTO;
118 		return (ENOMEM);
119 	}
120 	bzero((void *)trampcode, EFI_PAGE_SIZE);
121 	trampstack = trampcode + EFI_PAGE_SIZE - 8;
122 	bcopy((void *)&amd64_tramp, (void *)trampcode, amd64_tramp_size);
123 	trampoline = (void *)trampcode;
124 
125 	if (copy_staging == COPY_STAGING_ENABLE) {
126 		PT4 = (pml4_entry_t *)0x0000000040000000; /* 1G */
127 		err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 3,
128 		    (EFI_PHYSICAL_ADDRESS *)&PT4);
129 		if (EFI_ERROR(err)) {
130 			printf("Unable to allocate trampoline page table\n");
131 			BS->FreePages(trampcode, 1);
132 			if (copy_auto)
133 				copy_staging = COPY_STAGING_AUTO;
134 			return (ENOMEM);
135 		}
136 		bzero(PT4, 3 * EFI_PAGE_SIZE);
137 		PT3 = &PT4[512];
138 		PT2 = &PT3[512];
139 
140 		/*
141 		 * This is kinda brutal, but every single 1GB VM
142 		 * memory segment points to the same first 1GB of
143 		 * physical memory.  But it is more than adequate.
144 		 */
145 		for (i = 0; i < NPTEPG; i++) {
146 			/*
147 			 * Each slot of the L4 pages points to the
148 			 * same L3 page.
149 			 */
150 			PT4[i] = (pml4_entry_t)PT3;
151 			PT4[i] |= PG_V | PG_RW;
152 
153 			/*
154 			 * Each slot of the L3 pages points to the
155 			 * same L2 page.
156 			 */
157 			PT3[i] = (pdp_entry_t)PT2;
158 			PT3[i] |= PG_V | PG_RW;
159 
160 			/*
161 			 * The L2 page slots are mapped with 2MB pages for 1GB.
162 			 */
163 			PT2[i] = (pd_entry_t)i * (2 * 1024 * 1024);
164 			PT2[i] |= PG_V | PG_RW | PG_PS;
165 		}
166 	} else {
167 		PT4 = (pml4_entry_t *)0x0000000100000000; /* 4G */
168 		err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 9,
169 		    (EFI_PHYSICAL_ADDRESS *)&PT4);
170 		if (EFI_ERROR(err)) {
171 			printf("Unable to allocate trampoline page table\n");
172 			BS->FreePages(trampcode, 9);
173 			if (copy_auto)
174 				copy_staging = COPY_STAGING_AUTO;
175 			return (ENOMEM);
176 		}
177 
178 		bzero(PT4, 9 * EFI_PAGE_SIZE);
179 
180 		PT3_l = &PT4[NPML4EPG * 1];
181 		PT3_u = &PT4[NPML4EPG * 2];
182 		PT2_l0 = &PT4[NPML4EPG * 3];
183 		PT2_l1 = &PT4[NPML4EPG * 4];
184 		PT2_l2 = &PT4[NPML4EPG * 5];
185 		PT2_l3 = &PT4[NPML4EPG * 6];
186 		PT2_u0 = &PT4[NPML4EPG * 7];
187 		PT2_u1 = &PT4[NPML4EPG * 8];
188 
189 		/* 1:1 mapping of lower 4G */
190 		PT4[0] = (pml4_entry_t)PT3_l | PG_V | PG_RW;
191 		PT3_l[0] = (pdp_entry_t)PT2_l0 | PG_V | PG_RW;
192 		PT3_l[1] = (pdp_entry_t)PT2_l1 | PG_V | PG_RW;
193 		PT3_l[2] = (pdp_entry_t)PT2_l2 | PG_V | PG_RW;
194 		PT3_l[3] = (pdp_entry_t)PT2_l3 | PG_V | PG_RW;
195 		for (i = 0; i < 4 * NPDEPG; i++) {
196 			PT2_l0[i] = ((pd_entry_t)i << PDRSHIFT) | PG_V |
197 			    PG_RW | PG_PS;
198 		}
199 
200 		/* mapping of kernel 2G below top */
201 		PT4[NPML4EPG - 1] = (pml4_entry_t)PT3_u | PG_V | PG_RW;
202 		PT3_u[NPDPEPG - 2] = (pdp_entry_t)PT2_u0 | PG_V | PG_RW;
203 		PT3_u[NPDPEPG - 1] = (pdp_entry_t)PT2_u1 | PG_V | PG_RW;
204 		/* compat mapping of phys @0 */
205 		PT2_u0[0] = PG_PS | PG_V | PG_RW;
206 		/* this maps past staging area */
207 		for (i = 1; i < 2 * NPDEPG; i++) {
208 			PT2_u0[i] = ((pd_entry_t)staging +
209 			    ((pd_entry_t)i - 1) * NBPDR) |
210 			    PG_V | PG_RW | PG_PS;
211 		}
212 	}
213 
214 	printf("staging %#lx (%scopying) tramp %p PT4 %p\n",
215 	    staging, copy_staging == COPY_STAGING_ENABLE ? "" : "not ",
216 	    trampoline, PT4);
217 	printf("Start @ 0x%lx ...\n", ehdr->e_entry);
218 
219 	efi_time_fini();
220 	err = bi_load(fp->f_args, &modulep, &kernend, true);
221 	if (err != 0) {
222 		efi_time_init();
223 		if (copy_auto)
224 			copy_staging = COPY_STAGING_AUTO;
225 		return (err);
226 	}
227 
228 	dev_cleanup();
229 
230 	trampoline(trampstack, copy_staging == COPY_STAGING_ENABLE ?
231 	    efi_copy_finish : efi_copy_finish_nop, kernend, modulep,
232 	    PT4, ehdr->e_entry);
233 
234 	panic("exec returned");
235 }
236 
237 static int
238 elf64_obj_exec(struct preloaded_file *fp)
239 {
240 
241 	return (EFTYPE);
242 }
243