xref: /netbsd/sys/arch/xen/x86/x86_xpmap.c (revision e8cbb42e)
1 /*	$NetBSD: x86_xpmap.c,v 1.92 2022/08/20 23:48:51 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2017 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
34  *
35  * Permission to use, copy, modify, and distribute this software for any
36  * purpose with or without fee is hereby granted, provided that the above
37  * copyright notice and this permission notice appear in all copies.
38  *
39  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
40  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
41  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
42  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
43  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
44  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
45  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
46  */
47 
48 /*
49  * Copyright (c) 2006, 2007 Manuel Bouyer.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  * 1. Redistributions of source code must retain the above copyright
55  *    notice, this list of conditions and the following disclaimer.
56  * 2. Redistributions in binary form must reproduce the above copyright
57  *    notice, this list of conditions and the following disclaimer in the
58  *    documentation and/or other materials provided with the distribution.
59  *
60  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
61  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
62  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
64  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
65  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
66  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
67  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
68  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
69  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
70  */
71 
72 /*
73  * Copyright (c) 2004 Christian Limpach.
74  * All rights reserved.
75  *
76  * Redistribution and use in source and binary forms, with or without
77  * modification, are permitted provided that the following conditions
78  * are met:
79  * 1. Redistributions of source code must retain the above copyright
80  *    notice, this list of conditions and the following disclaimer.
81  * 2. Redistributions in binary form must reproduce the above copyright
82  *    notice, this list of conditions and the following disclaimer in the
83  *    documentation and/or other materials provided with the distribution.
84  *
85  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
86  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
87  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
88  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
89  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
90  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
91  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
92  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
93  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
94  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 
97 #include <sys/cdefs.h>
98 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.92 2022/08/20 23:48:51 riastradh Exp $");
99 
100 #include "opt_xen.h"
101 #include "opt_ddb.h"
102 #include "ksyms.h"
103 
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/mutex.h>
107 #include <sys/cpu.h>
108 #include <sys/kernel.h>
109 
110 #include <uvm/uvm.h>
111 
112 #include <machine/gdt.h>
113 #include <machine/pmap_private.h>
114 
115 #include <xen/xenfunc.h>
116 
117 #include <dev/isa/isareg.h>
118 #include <machine/isa_machdep.h>
119 
120 #ifdef XENDEBUG
121 #define	__PRINTK(x) printk x
122 #else
123 #define	__PRINTK(x)
124 #endif
125 
126 /* Xen requires the start_info struct to be page aligned */
127 union start_info_union start_info_union __aligned(PAGE_SIZE);
128 
129 volatile shared_info_t *HYPERVISOR_shared_info __read_mostly;
130 unsigned long *xpmap_phys_to_machine_mapping __read_mostly;
131 kmutex_t pte_lock __cacheline_aligned;
132 vaddr_t xen_dummy_page;
133 pt_entry_t xpmap_pg_nx __read_mostly;
134 
135 #define XPQUEUE_SIZE 2048
136 static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
137 
138 void xen_failsafe_handler(void);
139 
140 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
141 
142 static void xen_bt_set_readonly(vaddr_t);
143 static void xen_bootstrap_tables(vaddr_t, vaddr_t, size_t, size_t, bool);
144 
145 vaddr_t xen_locore(void);
146 
147 /*
148  * kcpuset internally uses an array of uint32_t while xen uses an array of
149  * u_long. As we're little-endian we can cast one to the other.
150  */
151 typedef union {
152 #ifdef _LP64
153 	uint32_t xcpum_km[2];
154 #else
155 	uint32_t xcpum_km[1];
156 #endif
157 	u_long xcpum_xm;
158 } xcpumask_t;
159 
160 void
xen_failsafe_handler(void)161 xen_failsafe_handler(void)
162 {
163 
164 	panic("xen_failsafe_handler called!\n");
165 }
166 
167 void
xen_set_ldt(vaddr_t base,uint32_t entries)168 xen_set_ldt(vaddr_t base, uint32_t entries)
169 {
170 	vaddr_t va;
171 	vaddr_t end;
172 	pt_entry_t *ptp;
173 	int s;
174 
175 #ifdef __x86_64__
176 	end = base + (entries << 3);
177 #else
178 	end = base + entries * sizeof(union descriptor);
179 #endif
180 
181 	for (va = base; va < end; va += PAGE_SIZE) {
182 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
183 		ptp = kvtopte(va);
184 		pmap_pte_clearbits(ptp, PTE_W);
185 	}
186 	s = splvm();
187 	xpq_queue_set_ldt(base, entries);
188 	splx(s);
189 }
190 
191 void
xpq_flush_queue(void)192 xpq_flush_queue(void)
193 {
194 	mmu_update_t *xpq_queue;
195 	int done = 0, ret;
196 	size_t xpq_idx;
197 
198 	KASSERT(curcpu()->ci_ilevel >= IPL_VM || cold);
199 
200 	xpq_idx = curcpu()->ci_xpq_idx;
201 	xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
202 
203 retry:
204 	ret = HYPERVISOR_mmu_update(xpq_queue, xpq_idx, &done, DOMID_SELF);
205 
206 	if (ret < 0 && xpq_idx != 0) {
207 		printf("xpq_flush_queue: %zu entries (%d successful) on "
208 		    "cpu%d (%ld)\n",
209 		    xpq_idx, done, curcpu()->ci_index, curcpu()->ci_cpuid);
210 
211 		if (done != 0) {
212 			xpq_queue += done;
213 			xpq_idx -= done;
214 			done = 0;
215 			goto retry;
216 		}
217 
218 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
219 	}
220 	curcpu()->ci_xpq_idx = 0;
221 }
222 
223 static inline void
xpq_increment_idx(void)224 xpq_increment_idx(void)
225 {
226 	KASSERT(curcpu()->ci_ilevel >= IPL_VM || cold);
227 	if (__predict_false(++curcpu()->ci_xpq_idx == XPQUEUE_SIZE))
228 		xpq_flush_queue();
229 }
230 
231 void
xpq_queue_machphys_update(paddr_t ma,paddr_t pa)232 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
233 {
234 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
235 	size_t xpq_idx = curcpu()->ci_xpq_idx;
236 
237 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
238 	xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT;
239 	xpq_increment_idx();
240 }
241 
242 void
xpq_queue_pte_update(paddr_t ptr,pt_entry_t val)243 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
244 {
245 	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
246 	size_t xpq_idx = curcpu()->ci_xpq_idx;
247 
248 	xpq_queue[xpq_idx].ptr = ptr | MMU_NORMAL_PT_UPDATE;
249 	xpq_queue[xpq_idx].val = val;
250 	xpq_increment_idx();
251 }
252 
253 void
xpq_queue_pt_switch(paddr_t pa)254 xpq_queue_pt_switch(paddr_t pa)
255 {
256 	struct mmuext_op op;
257 
258 	xpq_flush_queue();
259 
260 	op.cmd = MMUEXT_NEW_BASEPTR;
261 	op.arg1.mfn = pa >> PAGE_SHIFT;
262 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
263 		panic(__func__);
264 }
265 
266 void
xpq_queue_pin_table(paddr_t pa,int lvl)267 xpq_queue_pin_table(paddr_t pa, int lvl)
268 {
269 	struct mmuext_op op;
270 
271 	xpq_flush_queue();
272 
273 	op.cmd = lvl;
274 	op.arg1.mfn = pa >> PAGE_SHIFT;
275 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
276 		panic(__func__);
277 }
278 
279 void
xpq_queue_unpin_table(paddr_t pa)280 xpq_queue_unpin_table(paddr_t pa)
281 {
282 	struct mmuext_op op;
283 
284 	xpq_flush_queue();
285 
286 	op.cmd = MMUEXT_UNPIN_TABLE;
287 	op.arg1.mfn = pa >> PAGE_SHIFT;
288 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
289 		panic(__func__);
290 }
291 
292 void
xpq_queue_set_ldt(vaddr_t va,uint32_t entries)293 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
294 {
295 	struct mmuext_op op;
296 
297 	xpq_flush_queue();
298 
299 	KASSERT(va == (va & ~PAGE_MASK));
300 	op.cmd = MMUEXT_SET_LDT;
301 	op.arg1.linear_addr = va;
302 	op.arg2.nr_ents = entries;
303 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
304 		panic(__func__);
305 }
306 
307 void
xpq_queue_tlb_flush(void)308 xpq_queue_tlb_flush(void)
309 {
310 	struct mmuext_op op;
311 
312 	xpq_flush_queue();
313 
314 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
315 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
316 		panic(__func__);
317 }
318 
319 void
xpq_flush_cache(void)320 xpq_flush_cache(void)
321 {
322 	int s = splvm();
323 
324 	xpq_flush_queue();
325 
326 	asm("wbinvd":::"memory");
327 	splx(s);
328 }
329 
330 void
xpq_queue_invlpg(vaddr_t va)331 xpq_queue_invlpg(vaddr_t va)
332 {
333 	struct mmuext_op op;
334 
335 	xpq_flush_queue();
336 
337 	op.cmd = MMUEXT_INVLPG_LOCAL;
338 	op.arg1.linear_addr = (va & ~PAGE_MASK);
339 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
340 		panic(__func__);
341 }
342 
343 void
xen_mcast_invlpg(vaddr_t va,kcpuset_t * kc)344 xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc)
345 {
346 	xcpumask_t xcpumask;
347 	mmuext_op_t op;
348 
349 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
350 
351 	xpq_flush_queue();
352 
353 	op.cmd = MMUEXT_INVLPG_MULTI;
354 	op.arg1.linear_addr = va;
355 	set_xen_guest_handle(op.arg2.vcpumask, &xcpumask.xcpum_xm);
356 
357 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
358 		panic(__func__);
359 }
360 
361 void
xen_bcast_invlpg(vaddr_t va)362 xen_bcast_invlpg(vaddr_t va)
363 {
364 	mmuext_op_t op;
365 
366 	xpq_flush_queue();
367 
368 	op.cmd = MMUEXT_INVLPG_ALL;
369 	op.arg1.linear_addr = va;
370 
371 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
372 		panic(__func__);
373 }
374 
375 /* This is a synchronous call. */
376 void
xen_mcast_tlbflush(kcpuset_t * kc)377 xen_mcast_tlbflush(kcpuset_t *kc)
378 {
379 	xcpumask_t xcpumask;
380 	mmuext_op_t op;
381 
382 	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));
383 
384 	xpq_flush_queue();
385 
386 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
387 	set_xen_guest_handle(op.arg2.vcpumask, &xcpumask.xcpum_xm);
388 
389 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
390 		panic(__func__);
391 }
392 
393 /* This is a synchronous call. */
394 void
xen_bcast_tlbflush(void)395 xen_bcast_tlbflush(void)
396 {
397 	mmuext_op_t op;
398 
399 	xpq_flush_queue();
400 
401 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
402 
403 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
404 		panic(__func__);
405 }
406 
407 void
xen_copy_page(paddr_t srcpa,paddr_t dstpa)408 xen_copy_page(paddr_t srcpa, paddr_t dstpa)
409 {
410 	mmuext_op_t op;
411 
412 	op.cmd = MMUEXT_COPY_PAGE;
413 	op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT;
414 	op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT;
415 
416 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
417 		panic(__func__);
418 }
419 
420 void
xen_pagezero(paddr_t pa)421 xen_pagezero(paddr_t pa)
422 {
423 	mmuext_op_t op;
424 
425 	op.cmd = MMUEXT_CLEAR_PAGE;
426 	op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
427 
428 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
429 		panic(__func__);
430 }
431 
432 int
xpq_update_foreign(paddr_t ptr,pt_entry_t val,int dom,u_int flags)433 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom, u_int flags)
434 {
435 	mmu_update_t op;
436 	int ok;
437 	int err;
438 
439 	xpq_flush_queue();
440 
441 	op.ptr = ptr;
442 	if (flags & PMAP_MD_XEN_NOTR)
443 		op.ptr |= MMU_PT_UPDATE_NO_TRANSLATE;
444 	op.val = val;
445 	/*
446 	 * here we return a negative error number as Xen error to
447 	 * pmap_enter_ma. only calls from privcmd.c should end here, and
448 	 * it can deal with it.
449 	 */
450 	if ((err = HYPERVISOR_mmu_update(&op, 1, &ok, dom)) < 0) {
451 		return err;
452 	}
453 	return 0;
454 }
455 
456 #if L2_SLOT_KERNBASE > 0
457 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
458 #else
459 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
460 #endif
461 
462 #ifdef __x86_64__
463 #define PDIRSZ	PTP_LEVELS
464 #else
465 /*
466  * For PAE, we need an L3 page, a single contiguous L2 "superpage" of 4 pages
467  * (all of them mapped by the L3 page), and a shadow page for L3[3].
468  */
469 #define PDIRSZ	(1 + 4 + 1)
470 #endif
471 
472 /*
473  * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page
474  * tables.
475  *
476  * Virtual address space of the kernel when leaving this function:
477  * +--------------+------------------+-------------+------------+---------------
478  * | KERNEL IMAGE | BOOTSTRAP TABLES | PROC0 UAREA | DUMMY PAGE | HYPER. SHARED
479  * +--------------+------------------+-------------+------------+---------------
480  *
481  * ------+-----------------+-------------+
482  *  INFO | EARLY ZERO PAGE | ISA I/O MEM |
483  * ------+-----------------+-------------+
484  *
485  * DUMMY PAGE is either a PDG for amd64 or a GDT for i386.
486  *
487  * (HYPER. SHARED INFO + EARLY ZERO PAGE + ISA I/O MEM) have no physical
488  * addresses preallocated.
489  */
490 vaddr_t
xen_locore(void)491 xen_locore(void)
492 {
493 	size_t nL2, oldcount, mapsize;
494 	vaddr_t our_tables, xen_tables;
495 	u_int descs[4];
496 
497 	xen_init_features();
498 
499 	xpmap_phys_to_machine_mapping =
500 	    (unsigned long *)xen_start_info.mfn_list;
501 
502 	/* Set the NX/XD bit, if available. descs[3] = %edx. */
503 	x86_cpuid(0x80000001, descs);
504 	xpmap_pg_nx = (descs[3] & CPUID_NOX) ? PTE_NX : 0;
505 
506 	/* Space after Xen boostrap tables should be free */
507 	xen_tables = xen_start_info.pt_base;
508 	our_tables = xen_tables + (xen_start_info.nr_pt_frames * PAGE_SIZE);
509 
510 	/*
511 	 * Calculate how much space we need. First, everything mapped before
512 	 * the Xen bootstrap tables.
513 	 */
514 	mapsize = xen_tables - KERNTEXTOFF;
515 
516 	/* After the tables we'll have:
517 	 *  - UAREA
518 	 *  - dummy user PGD (x86_64)
519 	 *  - HYPERVISOR_shared_info
520 	 *  - early_zerop
521 	 *  - ISA I/O mem (if needed)
522 	 */
523 	mapsize += UPAGES * PAGE_SIZE;
524 #ifdef __x86_64__
525 	mapsize += PAGE_SIZE;
526 #endif
527 	mapsize += PAGE_SIZE;
528 	mapsize += PAGE_SIZE;
529 #ifdef DOM0OPS
530 	if (xendomain_is_dom0()) {
531 		mapsize += IOM_SIZE;
532 	}
533 #endif
534 
535 	/*
536 	 * At this point, mapsize doesn't include the table size.
537 	 */
538 #ifdef __x86_64__
539 	nL2 = TABLE_L2_ENTRIES;
540 #else
541 	nL2 = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT;
542 #endif
543 
544 	/*
545 	 * Now compute how many L2 pages we need exactly. This is useful only
546 	 * on i386, since the initial count for amd64 is already enough.
547 	 */
548 	while (KERNTEXTOFF + mapsize + (nL2 + PDIRSZ) * PAGE_SIZE >
549 	    KERNBASE + (nL2 << L2_SHIFT)) {
550 		nL2++;
551 	}
552 
553 #ifdef i386
554 	/*
555 	 * One more L2 page: we'll allocate several pages after kva_start
556 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
557 	 * counted here. It's not a big issue to allocate one more L2 as
558 	 * pmap_growkernel() will be called anyway.
559 	 */
560 	nL2++;
561 	nkptp[1] = nL2;
562 #endif
563 
564 	/*
565 	 * Install bootstrap pages. We may need more L2 pages than will
566 	 * have the final table here, as it's installed after the final table.
567 	 */
568 	oldcount = nL2;
569 
570 bootstrap_again:
571 
572 	/*
573 	 * Xen space we'll reclaim may not be enough for our new page tables,
574 	 * move bootstrap tables if necessary.
575 	 */
576 	if (our_tables < xen_tables + ((nL2 + PDIRSZ) * PAGE_SIZE))
577 		our_tables = xen_tables + ((nL2 + PDIRSZ) * PAGE_SIZE);
578 
579 	/*
580 	 * Make sure the number of L2 pages we have is enough to map everything
581 	 * from KERNBASE to the bootstrap tables themselves.
582 	 */
583 	if (our_tables + ((oldcount + PDIRSZ) * PAGE_SIZE) >
584 	    KERNBASE + (oldcount << L2_SHIFT)) {
585 		oldcount++;
586 		goto bootstrap_again;
587 	}
588 
589 	/* Create temporary tables */
590 	xen_bootstrap_tables(xen_tables, our_tables,
591 	    xen_start_info.nr_pt_frames, oldcount, false);
592 
593 	/* Create final tables */
594 	xen_bootstrap_tables(our_tables, xen_tables,
595 	    oldcount + PDIRSZ, nL2, true);
596 
597 	/* Zero out PROC0 UAREA and DUMMY PAGE. */
598 	memset((void *)(xen_tables + ((nL2 + PDIRSZ) * PAGE_SIZE)), 0,
599 	    (UPAGES + 1) * PAGE_SIZE);
600 
601 	/* Finally, flush TLB. */
602 	xpq_queue_tlb_flush();
603 
604 	return (xen_tables + ((nL2 + PDIRSZ) * PAGE_SIZE));
605 }
606 
607 /*
608  * Build a new table and switch to it.
609  * old_count is # of old tables (including L4, L3 and L2).
610  * new_count is # of new tables (PTE only).
611  * We assume the areas don't overlap.
612  */
613 static void
xen_bootstrap_tables(vaddr_t old_pgd,vaddr_t new_pgd,size_t old_count,size_t new_count,bool final)614 xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
615     size_t new_count, bool final)
616 {
617 	pd_entry_t *L4cpu, *L4, *L3, *L2, *pte;
618 	paddr_t addr;
619 	vaddr_t page, avail, map_end;
620 	int i;
621 	extern char __rodata_start;
622 	extern char __data_start;
623 	extern char __kernel_end;
624 	extern char *early_zerop; /* from pmap.c */
625 #ifdef i386
626 	extern union descriptor tmpgdt[];
627 #endif
628 
629 	/*
630 	 * Layout of RW area after the kernel image:
631 	 *     xencons_interface (if present)
632 	 *     xenstore_interface (if present)
633 	 *     table pages (new_count + PDIRSZ entries)
634 	 * Extra mappings (only when final is true):
635 	 *     UAREA
636 	 *     dummy user PGD (x86_64 only) / GDT page (i386 only)
637 	 *     HYPERVISOR_shared_info
638 	 *     early_zerop
639 	 *     ISA I/O mem (if needed)
640 	 */
641 	map_end = new_pgd + ((new_count + PDIRSZ) * PAGE_SIZE);
642 	if (final) {
643 		map_end += UPAGES * PAGE_SIZE;
644 		xen_dummy_page = (vaddr_t)map_end;
645 		map_end += PAGE_SIZE;
646 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
647 		map_end += PAGE_SIZE;
648 		early_zerop = (char *)map_end;
649 		map_end += PAGE_SIZE;
650 	}
651 
652 	/*
653 	 * We always set atdevbase, as it's used by init386 to find the first
654 	 * available VA. map_end is updated only if we are dom0, so
655 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
656 	 * this case.
657 	 */
658 	if (final) {
659 		atdevbase = map_end;
660 #ifdef DOM0OPS
661 		if (xendomain_is_dom0()) {
662 			/* ISA I/O mem */
663 			map_end += IOM_SIZE;
664 		}
665 #endif
666 	}
667 
668 	__PRINTK(("xen_bootstrap_tables map_end 0x%lx\n", map_end));
669 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
670 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
671 
672 	avail = new_pgd;
673 
674 	/*
675 	 * Create our page tables.
676 	 */
677 
678 #ifdef __x86_64__
679 	/* per-cpu L4 */
680 	L4cpu = (pd_entry_t *)avail;
681 	memset(L4cpu, 0, PAGE_SIZE);
682 	avail += PAGE_SIZE;
683 
684 	/* pmap_kernel L4 */
685 	L4 = (pd_entry_t *)avail;
686 	memset(L4, 0, PAGE_SIZE);
687 	avail += PAGE_SIZE;
688 
689 	/* L3 */
690 	L3 = (pd_entry_t *)avail;
691 	memset(L3, 0, PAGE_SIZE);
692 	avail += PAGE_SIZE;
693 
694 	/* link L4->L3 */
695 	addr = ((u_long)L3) - KERNBASE;
696 	L4cpu[pl4_pi(KERNTEXTOFF)] = xpmap_ptom_masked(addr) | PTE_P | PTE_W;
697 	L4[pl4_pi(KERNTEXTOFF)] = xpmap_ptom_masked(addr) | PTE_P | PTE_W;
698 
699 	/* L2 */
700 	L2 = (pd_entry_t *)avail;
701 	memset(L2, 0, PAGE_SIZE);
702 	avail += PAGE_SIZE;
703 
704 	/* link L3->L2 */
705 	addr = ((u_long)L2) - KERNBASE;
706 	L3[pl3_pi(KERNTEXTOFF)] = xpmap_ptom_masked(addr) | PTE_P | PTE_W;
707 #else
708 	/* no L4 on i386PAE */
709 	__USE(L4cpu);
710 	__USE(L4);
711 
712 	/* L3 */
713 	L3 = (pd_entry_t *)avail;
714 	memset(L3, 0, PAGE_SIZE);
715 	avail += PAGE_SIZE;
716 
717 	/*
718 	 * Our PAE-style level 2, 5 contiguous pages (4 L2 + 1 shadow).
719 	 *                  +-----------------+----------------+---------+
720 	 * Physical layout: | 3 * USERLAND L2 | L2 KERN SHADOW | L2 KERN |
721 	 *                  +-----------------+----------------+---------+
722 	 * However, we enter L3[3] into L2 KERN, and not L2 KERN SHADOW.
723 	 * This way, L2[L2_SLOT_KERN] always points to the shadow.
724 	 */
725 	L2 = (pd_entry_t *)avail;
726 	memset(L2, 0, PAGE_SIZE * 5);
727 	avail += PAGE_SIZE * 5;
728 
729 	/*
730 	 * Link L2 pages in L3, with a special case for L2 KERN. Xen doesn't
731 	 * want RW permissions in L3 entries, it'll add them itself.
732 	 */
733 	addr = ((u_long)L2) - KERNBASE;
734 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
735 		L3[i] = xpmap_ptom_masked(addr) | PTE_P;
736 	}
737 	addr += PAGE_SIZE;
738 	L3[3] = xpmap_ptom_masked(addr) | PTE_P;
739 #endif
740 
741 	/* Level 1 */
742 	page = KERNTEXTOFF;
743 	for (i = 0; i < new_count; i ++) {
744 		vaddr_t cur_page = page;
745 
746 		pte = (pd_entry_t *)avail;
747 		memset(pte, 0, PAGE_SIZE);
748 		avail += PAGE_SIZE;
749 
750 		while (pl2_pi(page) == pl2_pi(cur_page)) {
751 			if (page >= map_end) {
752 				/* not mapped at all */
753 				pte[pl1_pi(page)] = 0;
754 				page += PAGE_SIZE;
755 				continue;
756 			}
757 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
758 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
759 				pte[pl1_pi(page)] = xen_start_info.shared_info;
760 			}
761 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
762 			    == xen_start_info.console.domU.mfn) {
763 				xencons_interface = (void *)page;
764 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
765 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
766 			}
767 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
768 			    == xen_start_info.store_mfn) {
769 				xenstore_interface = (void *)page;
770 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
771 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
772 			}
773 #ifdef DOM0OPS
774 			if (page >= (vaddr_t)atdevbase &&
775 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
776 				pte[pl1_pi(page)] =
777 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
778 				pte[pl1_pi(page)] |= xpmap_pg_nx;
779 			}
780 #endif
781 
782 			pte[pl1_pi(page)] |= PTE_P;
783 			if (page < (vaddr_t)&__rodata_start) {
784 				/* Map the kernel text RX. Nothing to do. */
785 			} else if (page >= (vaddr_t)&__rodata_start &&
786 			    page < (vaddr_t)&__data_start) {
787 				/* Map the kernel rodata R. */
788 				pte[pl1_pi(page)] |= xpmap_pg_nx;
789 			} else if (page >= old_pgd &&
790 			    page < old_pgd + (old_count * PAGE_SIZE)) {
791 				/* Map the old page tables R. */
792 				pte[pl1_pi(page)] |= xpmap_pg_nx;
793 			} else if (page >= new_pgd &&
794 			    page < new_pgd + ((new_count + PDIRSZ) * PAGE_SIZE)) {
795 				/* Map the new page tables R. */
796 				pte[pl1_pi(page)] |= xpmap_pg_nx;
797 #ifdef i386
798 			} else if (page == (vaddr_t)tmpgdt) {
799 				/*
800 				 * Map bootstrap gdt R/O. Later, we will re-add
801 				 * this page to uvm after making it writable.
802 				 */
803 				pte[pl1_pi(page)] = 0;
804 				page += PAGE_SIZE;
805 				continue;
806 #endif
807 			} else if (page >= (vaddr_t)&__data_start &&
808 			    page < (vaddr_t)&__kernel_end) {
809 				/* Map the kernel data+bss RW. */
810 				pte[pl1_pi(page)] |= PTE_W | xpmap_pg_nx;
811 			} else {
812 				/* Map the page RW. */
813 				pte[pl1_pi(page)] |= PTE_W | xpmap_pg_nx;
814 			}
815 
816 			page += PAGE_SIZE;
817 		}
818 
819 		addr = ((u_long)pte) - KERNBASE;
820 		L2[pl2_pi(cur_page)] = xpmap_ptom_masked(addr) | PTE_W | PTE_P;
821 
822 		/* Mark readonly */
823 		xen_bt_set_readonly((vaddr_t)pte);
824 	}
825 
826 	/* Install recursive page tables mapping */
827 #ifdef __x86_64__
828 	/* Recursive entry in pmap_kernel(). */
829 	L4[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)L4 - KERNBASE)
830 	    | PTE_P | xpmap_pg_nx;
831 	/* Recursive entry in higher-level per-cpu PD. */
832 	L4cpu[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)L4cpu - KERNBASE)
833 	    | PTE_P | xpmap_pg_nx;
834 
835 	/* Mark tables RO */
836 	xen_bt_set_readonly((vaddr_t)L2);
837 #else
838 	/* Copy L2 KERN into L2 KERN SHADOW, and reference the latter in cpu0. */
839 	memcpy(&L2[L2_SLOT_KERN + NPDPG], &L2[L2_SLOT_KERN], PAGE_SIZE);
840 	cpu_info_primary.ci_kpm_pdir = &L2[L2_SLOT_KERN + NPDPG];
841 	cpu_info_primary.ci_kpm_pdirpa =
842 	    (vaddr_t)cpu_info_primary.ci_kpm_pdir - KERNBASE;
843 
844 	/*
845 	 * We don't enter a recursive entry from the L3 PD. Instead, we enter
846 	 * the first 4 L2 pages, which includes the kernel's L2 shadow. But we
847 	 * have to enter the shadow after switching %cr3, or Xen will refcount
848 	 * some PTEs with the wrong type.
849 	 */
850 	addr = (u_long)L2 - KERNBASE;
851 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
852 		L2[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PTE_P |
853 		    xpmap_pg_nx;
854 	}
855 
856 	/* Mark tables RO, and pin L2 KERN SHADOW. */
857 	addr = (u_long)L2 - KERNBASE;
858 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
859 		xen_bt_set_readonly(((vaddr_t)L2) + PAGE_SIZE * i);
860 	}
861 	if (final) {
862 		addr = (u_long)L2 - KERNBASE + 3 * PAGE_SIZE;
863 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
864 	}
865 #endif
866 
867 	xen_bt_set_readonly((vaddr_t)L3);
868 #ifdef __x86_64__
869 	xen_bt_set_readonly((vaddr_t)L4cpu);
870 #endif
871 
872 	/* Pin the PGD */
873 #ifdef __x86_64__
874 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
875 #else
876 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
877 #endif
878 
879 	/* Save phys. addr of PDP, for libkvm. */
880 #ifdef __x86_64__
881 	PDPpaddr = (u_long)L4 - KERNBASE;
882 #else
883 	PDPpaddr = (u_long)L2 - KERNBASE; /* PDP is the L2 with PAE */
884 #endif
885 
886 	/* Switch to new tables */
887 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
888 
889 	if (final) {
890 #ifdef __x86_64__
891 		/* Save the address of the real per-cpu L4 page. */
892 		cpu_info_primary.ci_kpm_pdir = L4cpu;
893 		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)L4cpu - KERNBASE);
894 #else
895 		/* Save the address of the L3 page */
896 		cpu_info_primary.ci_pae_l3_pdir = L3;
897 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
898 
899 		/* Now enter the kernel's PTE mappings */
900 		addr = (u_long)L2 - KERNBASE + PAGE_SIZE * 3;
901 		xpq_queue_pte_update(
902 		    xpmap_ptom(((vaddr_t)&L2[PDIR_SLOT_PTE + 3]) - KERNBASE),
903 		    xpmap_ptom_masked(addr) | PTE_P);
904 		xpq_flush_queue();
905 #endif
906 	}
907 
908 	/*
909 	 * Now we can safely reclaim the space taken by the old tables.
910 	 */
911 
912 	/* Unpin old PGD */
913 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
914 
915 	/* Mark old tables RW if used, unmap otherwise */
916 	page = old_pgd;
917 	addr = xpmap_mtop((paddr_t)L2[pl2_pi(page)] & PTE_4KFRAME);
918 	pte = (pd_entry_t *)((u_long)addr + KERNBASE);
919 	pte += pl1_pi(page);
920 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
921 		addr = xpmap_ptom(((u_long)pte) - KERNBASE);
922 		xpq_queue_pte_update(addr, *pte | PTE_W);
923 		page += PAGE_SIZE;
924 		/*
925 		 * Our PTEs are contiguous so it's safe to just "++" here.
926 		 */
927 		pte++;
928 	}
929 	while (page < old_pgd + (old_count * PAGE_SIZE)) {
930 		addr = xpmap_ptom(((u_long)pte) - KERNBASE);
931 		xpq_queue_pte_update(addr, 0);
932 		page += PAGE_SIZE;
933 		pte++;
934 	}
935 	xpq_flush_queue();
936 }
937 
938 /*
939  * Mark a page read-only, assuming vaddr = paddr + KERNBASE.
940  */
941 static void
xen_bt_set_readonly(vaddr_t page)942 xen_bt_set_readonly(vaddr_t page)
943 {
944 	pt_entry_t entry;
945 
946 	entry = xpmap_ptom_masked(page - KERNBASE);
947 	entry |= PTE_P | xpmap_pg_nx;
948 
949 	HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG);
950 }
951 
952 #ifdef __x86_64__
953 void
xen_set_user_pgd(paddr_t page)954 xen_set_user_pgd(paddr_t page)
955 {
956 	struct mmuext_op op;
957 
958 	int s = splvm();
959 	xpq_flush_queue();
960 	splx(s);
961 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
962 	op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT;
963 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
964 		panic("xen_set_user_pgd: failed to install new user page"
965 			" directory %#" PRIxPADDR, page);
966 }
967 #endif /* __x86_64__ */
968