xref: /freebsd/sys/arm64/arm64/efirt_machdep.c (revision 315ee00f)
1 /*-
2  * Copyright (c) 2004 Marcel Moolenaar
3  * Copyright (c) 2001 Doug Rabson
4  * Copyright (c) 2016 The FreeBSD Foundation
5  * Copyright (c) 2017 Andrew Turner
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Konstantin Belousov
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * This software was developed by SRI International and the University of
12  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
13  * ("CTSRD"), as part of the DARPA CRASH research programme.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 #include <sys/param.h>
39 #include <sys/efi.h>
40 #include <sys/kernel.h>
41 #include <sys/linker.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/systm.h>
47 #include <sys/vmmeter.h>
48 
49 #include <machine/metadata.h>
50 #include <machine/pcb.h>
51 #include <machine/pte.h>
52 #include <machine/vfp.h>
53 #include <machine/vmparam.h>
54 
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
61 
62 static vm_object_t obj_1t1_pt;
63 static vm_pindex_t efi_1t1_idx;
64 static pd_entry_t *efi_l0;
65 static uint64_t efi_ttbr0;
66 
67 void
68 efi_destroy_1t1_map(void)
69 {
70 	vm_page_t m;
71 
72 	if (obj_1t1_pt != NULL) {
73 		VM_OBJECT_RLOCK(obj_1t1_pt);
74 		TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
75 			m->ref_count = VPRC_OBJREF;
76 		vm_wire_sub(obj_1t1_pt->resident_page_count);
77 		VM_OBJECT_RUNLOCK(obj_1t1_pt);
78 		vm_object_deallocate(obj_1t1_pt);
79 	}
80 
81 	obj_1t1_pt = NULL;
82 	efi_1t1_idx = 0;
83 	efi_l0 = NULL;
84 	efi_ttbr0 = 0;
85 }
86 
87 static vm_page_t
88 efi_1t1_page(void)
89 {
90 
91 	return (vm_page_grab(obj_1t1_pt, efi_1t1_idx++, VM_ALLOC_NOBUSY |
92 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO));
93 }
94 
95 static pt_entry_t *
96 efi_1t1_l3(vm_offset_t va)
97 {
98 	pd_entry_t *l0, *l1, *l2;
99 	pt_entry_t *l3;
100 	vm_pindex_t l0_idx, l1_idx, l2_idx;
101 	vm_page_t m;
102 	vm_paddr_t mphys;
103 
104 	l0_idx = pmap_l0_index(va);
105 	l0 = &efi_l0[l0_idx];
106 	if (*l0 == 0) {
107 		m = efi_1t1_page();
108 		mphys = VM_PAGE_TO_PHYS(m);
109 		*l0 = PHYS_TO_PTE(mphys) | L0_TABLE;
110 	} else {
111 		mphys = PTE_TO_PHYS(*l0);
112 	}
113 
114 	l1 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
115 	l1_idx = pmap_l1_index(va);
116 	l1 += l1_idx;
117 	if (*l1 == 0) {
118 		m = efi_1t1_page();
119 		mphys = VM_PAGE_TO_PHYS(m);
120 		*l1 = PHYS_TO_PTE(mphys) | L1_TABLE;
121 	} else {
122 		mphys = PTE_TO_PHYS(*l1);
123 	}
124 
125 	l2 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
126 	l2_idx = pmap_l2_index(va);
127 	l2 += l2_idx;
128 	if (*l2 == 0) {
129 		m = efi_1t1_page();
130 		mphys = VM_PAGE_TO_PHYS(m);
131 		*l2 = PHYS_TO_PTE(mphys) | L2_TABLE;
132 	} else {
133 		mphys = PTE_TO_PHYS(*l2);
134 	}
135 
136 	l3 = (pt_entry_t *)PHYS_TO_DMAP(mphys);
137 	l3 += pmap_l3_index(va);
138 	KASSERT(*l3 == 0, ("%s: Already mapped: va %#jx *pt %#jx", __func__,
139 	    va, *l3));
140 
141 	return (l3);
142 }
143 
144 /*
145  * Map a physical address from EFI runtime space into KVA space.  Returns 0 to
146  * indicate a failed mapping so that the caller may handle error.
147  */
148 vm_offset_t
149 efi_phys_to_kva(vm_paddr_t paddr)
150 {
151 	vm_offset_t vaddr;
152 
153 	if (PHYS_IN_DMAP(paddr)) {
154 		vaddr = PHYS_TO_DMAP(paddr);
155 		if (pmap_klookup(vaddr, NULL))
156 			return (vaddr);
157 	}
158 
159 	/* TODO: Map memory not in the DMAP */
160 
161 	return (0);
162 }
163 
164 /*
165  * Create the 1:1 virtual to physical map for EFI
166  */
167 bool
168 efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
169 {
170 	struct efi_md *p;
171 	pt_entry_t *l3, l3_attr;
172 	vm_offset_t va;
173 	vm_page_t efi_l0_page;
174 	uint64_t idx;
175 	int i, mode;
176 
177 	obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, L0_ENTRIES +
178 	    L0_ENTRIES * Ln_ENTRIES + L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES +
179 	    L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES * Ln_ENTRIES,
180 	    VM_PROT_ALL, 0, NULL);
181 	VM_OBJECT_WLOCK(obj_1t1_pt);
182 	efi_l0_page = efi_1t1_page();
183 	VM_OBJECT_WUNLOCK(obj_1t1_pt);
184 	efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page));
185 	efi_ttbr0 = ASID_TO_OPERAND(ASID_RESERVED_FOR_EFI) |
186 	    VM_PAGE_TO_PHYS(efi_l0_page);
187 
188 	for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
189 	    descsz)) {
190 		if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
191 			continue;
192 		if (p->md_virt != 0 && p->md_virt != p->md_phys) {
193 			if (bootverbose)
194 				printf("EFI Runtime entry %d is mapped\n", i);
195 			goto fail;
196 		}
197 		if ((p->md_phys & EFI_PAGE_MASK) != 0) {
198 			if (bootverbose)
199 				printf("EFI Runtime entry %d is not aligned\n",
200 				    i);
201 			goto fail;
202 		}
203 		if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
204 		    p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
205 		    VM_MAXUSER_ADDRESS) {
206 			printf("EFI Runtime entry %d is not in mappable for RT:"
207 			    "base %#016jx %#jx pages\n",
208 			    i, (uintmax_t)p->md_phys,
209 			    (uintmax_t)p->md_pages);
210 			goto fail;
211 		}
212 		if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
213 			mode = VM_MEMATTR_WRITE_BACK;
214 		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
215 			mode = VM_MEMATTR_WRITE_THROUGH;
216 		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
217 			mode = VM_MEMATTR_WRITE_COMBINING;
218 		else
219 			mode = VM_MEMATTR_DEVICE;
220 
221 		if (bootverbose) {
222 			printf("MAP %lx mode %x pages %lu\n",
223 			    p->md_phys, mode, p->md_pages);
224 		}
225 
226 		l3_attr = ATTR_DEFAULT | ATTR_S1_IDX(mode) |
227 		    ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
228 		if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
229 			l3_attr |= ATTR_S1_XN;
230 
231 		VM_OBJECT_WLOCK(obj_1t1_pt);
232 		for (va = p->md_phys, idx = 0; idx < p->md_pages;
233 		    idx += (PAGE_SIZE / EFI_PAGE_SIZE), va += PAGE_SIZE) {
234 			l3 = efi_1t1_l3(va);
235 			*l3 = va | l3_attr;
236 		}
237 		VM_OBJECT_WUNLOCK(obj_1t1_pt);
238 	}
239 
240 	return (true);
241 fail:
242 	efi_destroy_1t1_map();
243 	return (false);
244 }
245 
246 int
247 efi_arch_enter(void)
248 {
249 
250 	CRITICAL_ASSERT(curthread);
251 
252 	/*
253 	 * Temporarily switch to EFI's page table.  However, we leave curpmap
254 	 * unchanged in order to prevent its ASID from being reclaimed before
255 	 * we switch back to its page table in efi_arch_leave().
256 	 */
257 	set_ttbr0(efi_ttbr0);
258 	if (PCPU_GET(bcast_tlbi_workaround) != 0)
259 		invalidate_local_icache();
260 
261 	return (0);
262 }
263 
264 void
265 efi_arch_leave(void)
266 {
267 
268 	/*
269 	 * Restore the pcpu pointer. Some UEFI implementations trash it and
270 	 * we don't store it before calling into them. To fix this we need
271 	 * to restore it after returning to the kernel context. As reading
272 	 * curpmap will access x18 we need to restore it before loading
273 	 * the pmap pointer.
274 	 */
275 	__asm __volatile(
276 	    "mrs x18, tpidr_el1	\n"
277 	);
278 	set_ttbr0(pmap_to_ttbr0(PCPU_GET(curpmap)));
279 	if (PCPU_GET(bcast_tlbi_workaround) != 0)
280 		invalidate_local_icache();
281 }
282 
283 int
284 efi_rt_arch_call(struct efirt_callinfo *ec)
285 {
286 
287 	panic("not implemented");
288 }
289