xref: /freebsd/sys/arm64/arm64/efirt_machdep.c (revision 4f52dfbb)
1 /*-
2  * Copyright (c) 2004 Marcel Moolenaar
3  * Copyright (c) 2001 Doug Rabson
4  * Copyright (c) 2016 The FreeBSD Foundation
5  * Copyright (c) 2017 Andrew Turner
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Konstantin Belousov
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * This software was developed by SRI International and the University of
12  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
13  * ("CTSRD"), as part of the DARPA CRASH research programme.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/efi.h>
42 #include <sys/kernel.h>
43 #include <sys/linker.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/rwlock.h>
48 #include <sys/systm.h>
49 #include <sys/vmmeter.h>
50 
51 #include <machine/metadata.h>
52 #include <machine/pcb.h>
53 #include <machine/pte.h>
54 #include <machine/vfp.h>
55 #include <machine/vmparam.h>
56 
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_pager.h>
63 
64 static vm_object_t obj_1t1_pt;
65 static vm_page_t efi_l0_page;
66 static pd_entry_t *efi_l0;
67 static vm_pindex_t efi_1t1_idx;
68 
69 void
70 efi_destroy_1t1_map(void)
71 {
72 	vm_page_t m;
73 
74 	if (obj_1t1_pt != NULL) {
75 		VM_OBJECT_RLOCK(obj_1t1_pt);
76 		TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
77 			m->wire_count = 0;
78 		vm_wire_sub(obj_1t1_pt->resident_page_count);
79 		VM_OBJECT_RUNLOCK(obj_1t1_pt);
80 		vm_object_deallocate(obj_1t1_pt);
81 	}
82 
83 	obj_1t1_pt = NULL;
84 	efi_l0 = NULL;
85 	efi_l0_page = NULL;
86 }
87 
88 static vm_page_t
89 efi_1t1_page(void)
90 {
91 
92 	return (vm_page_grab(obj_1t1_pt, efi_1t1_idx++, VM_ALLOC_NOBUSY |
93 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO));
94 }
95 
96 static pt_entry_t *
97 efi_1t1_l3(vm_offset_t va)
98 {
99 	pd_entry_t *l0, *l1, *l2;
100 	pt_entry_t *l3;
101 	vm_pindex_t l0_idx, l1_idx, l2_idx;
102 	vm_page_t m;
103 	vm_paddr_t mphys;
104 
105 	l0_idx = pmap_l0_index(va);
106 	l0 = &efi_l0[l0_idx];
107 	if (*l0 == 0) {
108 		m = efi_1t1_page();
109 		mphys = VM_PAGE_TO_PHYS(m);
110 		*l0 = mphys | L0_TABLE;
111 	} else {
112 		mphys = *l0 & ~ATTR_MASK;
113 	}
114 
115 	l1 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
116 	l1_idx = pmap_l1_index(va);
117 	l1 += l1_idx;
118 	if (*l1 == 0) {
119 		m = efi_1t1_page();
120 		mphys = VM_PAGE_TO_PHYS(m);
121 		*l1 = mphys | L1_TABLE;
122 	} else {
123 		mphys = *l1 & ~ATTR_MASK;
124 	}
125 
126 	l2 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
127 	l2_idx = pmap_l2_index(va);
128 	l2 += l2_idx;
129 	if (*l2 == 0) {
130 		m = efi_1t1_page();
131 		mphys = VM_PAGE_TO_PHYS(m);
132 		*l2 = mphys | L2_TABLE;
133 	} else {
134 		mphys = *l2 & ~ATTR_MASK;
135 	}
136 
137 	l3 = (pt_entry_t *)PHYS_TO_DMAP(mphys);
138 	l3 += pmap_l3_index(va);
139 	KASSERT(*l3 == 0, ("%s: Already mapped: va %#jx *pt %#jx", __func__,
140 	    va, *l3));
141 
142 	return (l3);
143 }
144 
145 /*
146  * Create the 1:1 virtual to physical map for EFI
147  */
148 bool
149 efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
150 {
151 	struct efi_md *p;
152 	pt_entry_t *l3;
153 	vm_offset_t va;
154 	uint64_t idx;
155 	int i, mode;
156 
157 	obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, L0_ENTRIES +
158 	    L0_ENTRIES * Ln_ENTRIES + L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES +
159 	    L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES * Ln_ENTRIES,
160 	    VM_PROT_ALL, 0, NULL);
161 	VM_OBJECT_WLOCK(obj_1t1_pt);
162 	efi_1t1_idx = 0;
163 	efi_l0_page = efi_1t1_page();
164 	VM_OBJECT_WUNLOCK(obj_1t1_pt);
165 	efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page));
166 	bzero(efi_l0, L0_ENTRIES * sizeof(*efi_l0));
167 
168 	for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
169 	    descsz)) {
170 		if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
171 			continue;
172 		if (p->md_virt != NULL && (uint64_t)p->md_virt != p->md_phys) {
173 			if (bootverbose)
174 				printf("EFI Runtime entry %d is mapped\n", i);
175 			goto fail;
176 		}
177 		if ((p->md_phys & EFI_PAGE_MASK) != 0) {
178 			if (bootverbose)
179 				printf("EFI Runtime entry %d is not aligned\n",
180 				    i);
181 			goto fail;
182 		}
183 		if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
184 		    p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
185 		    VM_MAXUSER_ADDRESS) {
186 			printf("EFI Runtime entry %d is not in mappable for RT:"
187 			    "base %#016jx %#jx pages\n",
188 			    i, (uintmax_t)p->md_phys,
189 			    (uintmax_t)p->md_pages);
190 			goto fail;
191 		}
192 		if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
193 			mode = VM_MEMATTR_WRITE_BACK;
194 		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
195 			mode = VM_MEMATTR_WRITE_THROUGH;
196 		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
197 			mode = VM_MEMATTR_WRITE_COMBINING;
198 		else if ((p->md_attr & EFI_MD_ATTR_UC) != 0)
199 			mode = VM_MEMATTR_UNCACHEABLE;
200 		else {
201 			if (bootverbose)
202 				printf("EFI Runtime entry %d mapping "
203 				    "attributes unsupported\n", i);
204 			mode = VM_MEMATTR_UNCACHEABLE;
205 		}
206 
207 		printf("MAP %lx mode %x pages %lu\n", p->md_phys, mode, p->md_pages);
208 		VM_OBJECT_WLOCK(obj_1t1_pt);
209 		for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++,
210 		    va += PAGE_SIZE) {
211 			l3 = efi_1t1_l3(va);
212 			*l3 = va | ATTR_DEFAULT | ATTR_IDX(mode) |
213 			    ATTR_AP(ATTR_AP_RW) | L3_PAGE;
214 		}
215 		VM_OBJECT_WUNLOCK(obj_1t1_pt);
216 	}
217 
218 	return (true);
219 fail:
220 	efi_destroy_1t1_map();
221 	return (false);
222 }
223 
224 int
225 efi_arch_enter(void)
226 {
227 
228 	__asm __volatile(
229 	    "msr ttbr0_el1, %0	\n"
230 	    "dsb  ishst		\n"
231 	    "tlbi vmalle1is	\n"
232 	    "dsb  ish		\n"
233 	    "isb		\n"
234 	     : : "r"(VM_PAGE_TO_PHYS(efi_l0_page)));
235 
236 	return (0);
237 }
238 
239 void
240 efi_arch_leave(void)
241 {
242 	struct thread *td;
243 
244 	td = curthread;
245 	__asm __volatile(
246 	    "msr ttbr0_el1, %0	\n"
247 	    "dsb  ishst		\n"
248 	    "tlbi vmalle1is	\n"
249 	    "dsb  ish		\n"
250 	    "isb		\n"
251 	     : : "r"(td->td_proc->p_md.md_l0addr));
252 }
253