xref: /netbsd/sys/arch/xen/xen/hypervisor.c (revision cded5e95)
1 /* $NetBSD: hypervisor.c,v 1.96 2022/06/23 14:32:16 bouyer Exp $ */
2 
3 /*
4  * Copyright (c) 2005 Manuel Bouyer.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  */
27 
28 /*
29  *
30  * Copyright (c) 2004 Christian Limpach.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
43  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
44  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
45  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
46  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
47  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
51  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52  */
53 
54 
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(0, "$NetBSD: hypervisor.c,v 1.96 2022/06/23 14:32:16 bouyer Exp $");
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/device.h>
61 #include <sys/sysctl.h>
62 
63 #include "xenbus.h"
64 #include "xencons.h"
65 #include "isa.h"
66 #include "isadma.h"
67 #include "pci.h"
68 #include "acpica.h"
69 #include "kernfs.h"
70 
71 #include "opt_xen.h"
72 #include "opt_mpbios.h"
73 
74 #include <xen/xen.h>
75 #include <xen/hypervisor.h>
76 #include <xen/evtchn.h>
77 #include <xen/include/public/version.h>
78 #include <xen/include/public/vcpu.h>
79 #include <x86/pio.h>
80 #include <x86/machdep.h>
81 
82 #include <sys/cpu.h>
83 #include <sys/dirent.h>
84 #include <sys/stat.h>
85 #include <sys/tree.h>
86 #include <sys/vnode.h>
87 #include <miscfs/specfs/specdev.h>
88 #include <miscfs/kernfs/kernfs.h>
89 #include <xen/kernfs_machdep.h>
90 #include <dev/isa/isavar.h>
91 #include <xen/granttables.h>
92 #include <xen/vcpuvar.h>
93 #if NPCI > 0
94 #include <dev/pci/pcivar.h>
95 #if NACPICA > 0
96 #include <dev/acpi/acpivar.h>
97 #include <machine/mpconfig.h>
98 #include <xen/mpacpi.h>
99 #endif
100 #ifdef MPBIOS
101 #include <machine/mpbiosvar.h>
102 #endif
103 #endif /* NPCI */
104 
105 #if NXENBUS > 0
106 #include <xen/xenbus.h>
107 #endif
108 
109 #if NXENNET_HYPERVISOR > 0
110 #include <net/if.h>
111 #include <net/if_ether.h>
112 #include <net/if_media.h>
113 #include <xen/if_xennetvar.h>
114 #endif
115 
116 #if NXBD_HYPERVISOR > 0
117 #include <sys/buf.h>
118 #include <sys/disk.h>
119 #include <sys/bufq.h>
120 #include <dev/dkvar.h>
121 #include <xen/xbdvar.h>
122 #endif
123 
124 int	hypervisor_match(device_t, cfdata_t, void *);
125 void	hypervisor_attach(device_t, device_t, void *);
126 
127 CFATTACH_DECL_NEW(hypervisor, 0,
128     hypervisor_match, hypervisor_attach, NULL, NULL);
129 
130 static int hypervisor_print(void *, const char *);
131 
132 union hypervisor_attach_cookie {
133 	const char *hac_device;		/* first elem of all */
134 #if NXENCONS > 0
135 	struct xencons_attach_args hac_xencons;
136 #endif
137 #if NXENBUS > 0
138 	struct xenbus_attach_args hac_xenbus;
139 #endif
140 #if NXENNET_HYPERVISOR > 0
141 	struct xennet_attach_args hac_xennet;
142 #endif
143 #if NXBD_HYPERVISOR > 0
144 	struct xbd_attach_args hac_xbd;
145 #endif
146 #if NPCI > 0
147 	struct pcibus_attach_args hac_pba;
148 #if defined(DOM0OPS) && NISA > 0
149 	struct isabus_attach_args hac_iba;
150 #endif
151 #if NACPICA > 0
152 	struct acpibus_attach_args hac_acpi;
153 #endif
154 #endif /* NPCI */
155 	struct vcpu_attach_args hac_vcaa;
156 };
157 
158 /*
159  * This is set when the ISA bus is attached.  If it's not set by the
160  * time it's checked below, then mainbus attempts to attach an ISA.
161  */
162 #if defined(XENPV) && defined(DOM0OPS)
163 int     isa_has_been_seen;
164 #if NISA > 0
165 struct  x86_isa_chipset x86_isa_chipset;
166 #endif
167 #endif
168 
169 #if defined(XENPVHVM) || defined(XENPVH)
170 #include <xen/include/public/arch-x86/cpuid.h>
171 #include <xen/include/public/arch-x86/hvm/start_info.h>
172 #include <xen/include/public/hvm/hvm_op.h>
173 #include <xen/include/public/hvm/params.h>
174 
175 #include <x86/bootinfo.h>
176 
177 #define	IDTVEC(name)	__CONCAT(X, name)
178 typedef void (vector)(void);
179 extern vector IDTVEC(syscall);
180 extern vector IDTVEC(syscall32);
181 extern vector IDTVEC(osyscall);
182 extern vector *x86_exceptions[];
183 
184 extern vector IDTVEC(hypervisor_pvhvm_callback);
185 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
186 
187 volatile shared_info_t *HYPERVISOR_shared_info __read_mostly;
188 paddr_t HYPERVISOR_shared_info_pa;
189 union start_info_union start_info_union __aligned(PAGE_SIZE);
190 struct hvm_start_info *hvm_start_info;
191 
192 static int xen_hvm_vec = 0;
193 #endif
194 
195 int xen_version;
196 
197 /* power management, for save/restore */
198 static bool hypervisor_suspend(device_t, const pmf_qual_t *);
199 static bool hypervisor_resume(device_t, const pmf_qual_t *);
200 
201 /* from FreeBSD */
202 #define XEN_MAGIC_IOPORT 0x10
203 enum {
204 	XMI_MAGIC                        = 0x49d2,
205 	XMI_UNPLUG_IDE_DISKS             = 0x01,
206 	XMI_UNPLUG_NICS                  = 0x02,
207 	XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
208 };
209 
210 
211 #ifdef XENPVHVM
212 
213 bool xenhvm_use_percpu_callback = 0;
214 
215 static void
xen_init_hypercall_page(void)216 xen_init_hypercall_page(void)
217 {
218 	extern vaddr_t hypercall_page;
219 	u_int descs[4];
220 
221 	x86_cpuid(XEN_CPUID_LEAF(2), descs);
222 
223 	/*
224 	 * Given 32 bytes per hypercall stub, and an optimistic number
225 	 * of 100 hypercalls ( the current max is 55), there shouldn't
226 	 * be any reason to spill over the arbitrary number of 1
227 	 * hypercall page. This is what we allocate in locore.S
228 	 * anyway. Make sure the allocation matches the registration.
229 	 */
230 
231 	KASSERT(descs[0] == 1);
232 
233 	/* XXX: vtophys(&hypercall_page) */
234 	wrmsr(descs[1], (uintptr_t)&hypercall_page - KERNBASE);
235 }
236 
237 uint32_t hvm_start_paddr;
238 
239 void init_xen_early(void);
240 void
init_xen_early(void)241 init_xen_early(void)
242 {
243 	const char *cmd_line;
244 	if (vm_guest != VM_GUEST_XENPVH)
245 		return;
246 	xen_init_hypercall_page();
247 	hvm_start_info = (void *)((uintptr_t)hvm_start_paddr + KERNBASE);
248 
249 	HYPERVISOR_shared_info = (void *)((uintptr_t)HYPERVISOR_shared_info_pa + KERNBASE);
250 	struct xen_add_to_physmap xmap = {
251 		.domid = DOMID_SELF,
252 		.space = XENMAPSPACE_shared_info,
253 		.idx = 0, /* Important - XEN checks for this */
254 		.gpfn = atop(HYPERVISOR_shared_info_pa)
255 	};
256 
257 	int err;
258 
259 	if ((err = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xmap)) < 0) {
260 		printk(
261 		    "Xen HVM: Unable to register HYPERVISOR_shared_info %d\n", err);
262 	}
263 	delay_func = x86_delay = xen_delay;
264 	x86_initclock_func = xen_initclocks;
265 	if (hvm_start_info->cmdline_paddr != 0) {
266 		cmd_line =
267 		    (void *)((uintptr_t)hvm_start_info->cmdline_paddr + KERNBASE);
268 		strlcpy(xen_start_info.cmd_line, cmd_line,
269 		    sizeof(xen_start_info.cmd_line));
270 	} else {
271 		xen_start_info.cmd_line[0] = '\0';
272 	}
273 	xen_start_info.flags = hvm_start_info->flags;
274 }
275 
276 
277 static bool
xen_check_hypervisordev(void)278 xen_check_hypervisordev(void)
279 {
280 	extern struct cfdata cfdata[];
281 	for (int i = 0; cfdata[i].cf_name != NULL; i++) {
282 		if (strcasecmp("hypervisor", cfdata[i].cf_name) == 0) {
283 			switch(cfdata[i].cf_fstate) {
284 			case FSTATE_NOTFOUND:
285 			case FSTATE_FOUND:
286 			case FSTATE_STAR:
287 				return true;
288 			default:
289 				return false;
290 			}
291 		}
292 	}
293 	return 0;
294 }
295 
296 static int
xen_hvm_init_late(void)297 xen_hvm_init_late(void)
298 {
299 	struct idt_vec *iv = &(cpu_info_primary.ci_idtvec);
300 
301 	if (HYPERVISOR_xen_version(XENVER_version, NULL) < 0) {
302 		aprint_error("Xen HVM: hypercall page not working\n");
303 		return 0;
304 	}
305 	xen_init_features();
306 
307 	/* Init various preset boot time data structures  */
308 	/* XEN xenstore shared page address, event channel */
309 	struct xen_hvm_param xen_hvm_param;
310 
311 	xen_hvm_param.domid = DOMID_SELF;
312 	xen_hvm_param.index = HVM_PARAM_STORE_PFN;
313 
314 	if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) {
315 		aprint_error(
316 		    "Xen HVM: Unable to obtain xenstore page address\n");
317 		return 0;
318 	}
319 
320 	/* Re-use PV field */
321 	xen_start_info.store_mfn = xen_hvm_param.value;
322 
323 	pmap_kenter_pa((vaddr_t) xenstore_interface, ptoa(xen_start_info.store_mfn),
324 	    VM_PROT_READ|VM_PROT_WRITE, 0);
325 
326 	xen_hvm_param.domid = DOMID_SELF;
327 	xen_hvm_param.index = HVM_PARAM_STORE_EVTCHN;
328 
329 	if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) {
330 		aprint_error(
331 		    "Xen HVM: Unable to obtain xenstore event channel\n");
332 		return 0;
333 	}
334 
335 	xen_start_info.store_evtchn = xen_hvm_param.value;
336 
337 	/*
338 	 * First register callback: here's why
339 	 * http://xenbits.xen.org/gitweb/?p=xen.git;a=commit;h=7b5b8ca7dffde866d851f0b87b994e0b13e5b867
340 	 */
341 
342 	/*
343 	 * Check for XENFEAT_hvm_callback_vector. Can't proceed
344 	 * without it.
345 	 */
346 	if (!xen_feature(XENFEAT_hvm_callback_vector)) {
347 		aprint_error("Xen HVM: XENFEAT_hvm_callback_vector"
348 		    "not available, cannot proceed");
349 		return 0;
350 	}
351 
352 	/*
353 	 * prepare vector.
354 	 * We don't really care where it is, as long as it's free
355 	 */
356 	xen_hvm_vec = idt_vec_alloc(iv, 129, 255);
357 	idt_vec_set(iv, xen_hvm_vec, &IDTVEC(hypervisor_pvhvm_callback));
358 
359 	events_default_setup();
360 	return 1;
361 }
362 
363 int
xen_hvm_init(void)364 xen_hvm_init(void)
365 {
366 	/*
367 	 * We need to setup the HVM interfaces early, so that we can
368 	 * properly setup the CPUs later (especially, all CPUs needs to
369 	 * run x86_cpuid() locally to get their vcpuid.
370 	 *
371 	 * For PVH, part of it has already been done.
372 	 */
373 	if (vm_guest == VM_GUEST_XENPVH) {
374 		if (xen_hvm_init_late() == 0) {
375 			panic("hvm_init failed");
376 		}
377 		return 1;
378 	}
379 
380 	if (vm_guest != VM_GUEST_XENHVM)
381 		return 0;
382 
383 	/* check if hypervisor was disabled with userconf */
384 	if (!xen_check_hypervisordev())
385 		return 0;
386 
387 	aprint_normal("Identified Guest XEN in HVM mode.\n");
388 
389 	xen_init_hypercall_page();
390 
391 	/* HYPERVISOR_shared_info */
392 	struct xen_add_to_physmap xmap = {
393 		.domid = DOMID_SELF,
394 		.space = XENMAPSPACE_shared_info,
395 		.idx = 0, /* Important - XEN checks for this */
396 		.gpfn = atop(HYPERVISOR_shared_info_pa)
397 	};
398 
399 	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xmap) < 0) {
400 		aprint_error(
401 		    "Xen HVM: Unable to register HYPERVISOR_shared_info\n");
402 		return 0;
403 	}
404 
405 	/* HYPERVISOR_shared_info va,pa has been allocated in pmap_bootstrap() */
406 	pmap_kenter_pa((vaddr_t) HYPERVISOR_shared_info,
407 	    HYPERVISOR_shared_info_pa, VM_PROT_READ|VM_PROT_WRITE, 0);
408 
409 	if (xen_hvm_init_late() == 0)
410 		return 0;
411 
412 	struct xen_hvm_param xen_hvm_param;
413 	xen_hvm_param.domid = DOMID_SELF;
414 	xen_hvm_param.index = HVM_PARAM_CONSOLE_PFN;
415 
416 	if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) {
417 		aprint_debug(
418 		    "Xen HVM: Unable to obtain xencons page address\n");
419 		xen_start_info.console.domU.mfn = 0;
420 		xen_start_info.console.domU.evtchn = -1;
421 		xencons_interface = 0;
422 	} else {
423 		/* Re-use PV field */
424 		xen_start_info.console.domU.mfn = xen_hvm_param.value;
425 
426 		pmap_kenter_pa((vaddr_t) xencons_interface,
427 		    ptoa(xen_start_info.console.domU.mfn),
428 		    VM_PROT_READ|VM_PROT_WRITE, 0);
429 
430 		xen_hvm_param.domid = DOMID_SELF;
431 		xen_hvm_param.index = HVM_PARAM_CONSOLE_EVTCHN;
432 
433 		if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) {
434 			aprint_error(
435 			   "Xen HVM: Unable to obtain xencons event channel\n");
436 			return 0;
437 		}
438 
439 		xen_start_info.console.domU.evtchn = xen_hvm_param.value;
440 	}
441 
442 	/*
443 	 * PR port-amd64/55543
444 	 * workround for amazon's Xen 4.2: it looks like the Xen clock is not
445 	 * fully functional here. This version also doesn't support
446 	 * HVM_PARAM_CONSOLE_PFN.
447 	 */
448 	if (xencons_interface != 0) {
449 		delay_func = x86_delay = xen_delay;
450 		x86_initclock_func = xen_initclocks;
451 	}
452 
453 	vm_guest = VM_GUEST_XENPVHVM; /* Be more specific */
454 	return 1;
455 }
456 
457 int
xen_hvm_init_cpu(struct cpu_info * ci)458 xen_hvm_init_cpu(struct cpu_info *ci)
459 {
460 	u_int32_t descs[4];
461 	struct xen_hvm_param xen_hvm_param;
462 	int error;
463 	static bool again = 0;
464 
465 	if (!vm_guest_is_xenpvh_or_pvhvm())
466 		return 0;
467 
468 	KASSERT(ci == curcpu());
469 
470 	descs[0] = 0;
471 	x86_cpuid(XEN_CPUID_LEAF(4), descs);
472 	if (descs[0] & XEN_HVM_CPUID_VCPU_ID_PRESENT) {
473 		ci->ci_vcpuid = descs[1];
474 	} else {
475 		aprint_debug_dev(ci->ci_dev,
476 		    "Xen HVM: can't get VCPU id, falling back to ci_acpiid\n");
477 		ci->ci_vcpuid = ci->ci_acpiid;
478 	}
479 
480 	xen_map_vcpu(ci);
481 
482 	/* Register event callback handler. */
483 
484 	xen_hvm_param.domid = DOMID_SELF;
485 	xen_hvm_param.index = HVM_PARAM_CALLBACK_IRQ;
486 
487 	/* val[63:56] = 2, val[7:0] = vec */
488 	xen_hvm_param.value = ((int64_t)0x2 << 56) | xen_hvm_vec;
489 
490 	/* First try to set up a per-cpu vector. */
491 	if (!again || xenhvm_use_percpu_callback) {
492 		struct xen_hvm_evtchn_upcall_vector xen_hvm_uvec;
493 		xen_hvm_uvec.vcpu = ci->ci_vcpuid;
494 		xen_hvm_uvec.vector = xen_hvm_vec;
495 
496 		xenhvm_use_percpu_callback = 1;
497 		error = HYPERVISOR_hvm_op(
498 		    HVMOP_set_evtchn_upcall_vector, &xen_hvm_uvec);
499 		if (error < 0) {
500 			aprint_error_dev(ci->ci_dev,
501 			    "failed to set event upcall vector: %d\n", error);
502 			if (again)
503 				panic("event upcall vector");
504 			aprint_error_dev(ci->ci_dev,
505 			    "falling back to global vector\n");
506 			xenhvm_use_percpu_callback = 0;
507 		} else {
508 			/*
509 			 * From FreeBSD:
510 			 * Trick toolstack to think we are enlightened
511 			 */
512 			xen_hvm_param.value = 1;
513 			aprint_verbose_dev(ci->ci_dev,
514 			    "using event upcall vector: %d\n", xen_hvm_vec );
515 		}
516 	}
517 
518 	if (again)
519 		return 1;
520 
521 	if (HYPERVISOR_hvm_op(HVMOP_set_param, &xen_hvm_param) < 0) {
522 		aprint_error_dev(ci->ci_dev,
523 		    "Xen HVM: Unable to register event callback vector\n");
524 		vm_guest = VM_GUEST_XENHVM;
525 		return 0;
526 	}
527 	again = 1;
528 	return 1;
529 }
530 
531 #endif /* XENPVHVM */
532 
533 /*
534  * Probe for the hypervisor; always succeeds.
535  */
536 int
hypervisor_match(device_t parent,cfdata_t match,void * aux)537 hypervisor_match(device_t parent, cfdata_t match, void *aux)
538 {
539 	struct hypervisor_attach_args *haa = aux;
540 
541 	/* Attach path sanity check */
542 	if (strncmp(haa->haa_busname, "hypervisor", sizeof("hypervisor")) != 0)
543 		return 0;
544 
545 
546 #ifdef XENPVHVM
547 	if (!vm_guest_is_xenpvh_or_pvhvm())
548 		return 0;
549 #endif
550 	/* If we got here, it must mean we matched */
551 	return 1;
552 }
553 
554 #if defined(MULTIPROCESSOR) && defined(XENPV)
555 static int
hypervisor_vcpu_print(void * aux,const char * parent)556 hypervisor_vcpu_print(void *aux, const char *parent)
557 {
558 	/* Unconfigured cpus are ignored quietly. */
559 	return (QUIET);
560 }
561 #endif /* MULTIPROCESSOR && XENPV */
562 
563 /*
564  * Attach the hypervisor.
565  */
566 void
hypervisor_attach(device_t parent,device_t self,void * aux)567 hypervisor_attach(device_t parent, device_t self, void *aux)
568 {
569 
570 #if NPCI >0
571 #ifdef PCI_BUS_FIXUP
572 	int pci_maxbus = 0;
573 #endif
574 #endif /* NPCI */
575 	union hypervisor_attach_cookie hac;
576 	char xen_extra_version[XEN_EXTRAVERSION_LEN];
577 	static char xen_version_string[20];
578 	int rc;
579 	const struct sysctlnode *node = NULL;
580 
581 #ifdef XENPVHVM
582 	if (vm_guest == VM_GUEST_XENPVHVM) {
583 		/* disable emulated devices */
584 		if (inw(XEN_MAGIC_IOPORT) == XMI_MAGIC) {
585 			outw(XEN_MAGIC_IOPORT,
586 			    XMI_UNPLUG_IDE_DISKS | XMI_UNPLUG_NICS);
587 		} else {
588 			aprint_error_dev(self,
589 			    "Unable to disable emulated devices\n");
590 		}
591 	}
592 #endif /* XENPVHVM */
593 	xenkernfs_init();
594 
595 	xen_version = HYPERVISOR_xen_version(XENVER_version, NULL);
596 	memset(xen_extra_version, 0, sizeof(xen_extra_version));
597 	HYPERVISOR_xen_version(XENVER_extraversion, xen_extra_version);
598 	rc = snprintf(xen_version_string, 20, "%d.%d%s", XEN_MAJOR(xen_version),
599 		XEN_MINOR(xen_version), xen_extra_version);
600 	aprint_normal(": Xen version %s\n", xen_version_string);
601 	if (rc >= 20)
602 		aprint_debug(": xen_version_string truncated\n");
603 
604 	sysctl_createv(NULL, 0, NULL, &node, 0,
605 	    CTLTYPE_NODE, "xen",
606 	    SYSCTL_DESCR("Xen top level node"),
607 	    NULL, 0, NULL, 0, CTL_MACHDEP, CTL_CREATE, CTL_EOL);
608 
609 	if (node != NULL) {
610 		sysctl_createv(NULL, 0, &node, NULL, CTLFLAG_READONLY,
611 		    CTLTYPE_STRING, "version",
612 		    SYSCTL_DESCR("Xen hypervisor version"),
613 		    NULL, 0, xen_version_string, 0, CTL_CREATE, CTL_EOL);
614 	}
615 
616 	aprint_verbose_dev(self, "features: ");
617 #define XEN_TST_F(n) \
618 	if (xen_feature(XENFEAT_##n)) \
619 		aprint_verbose(" %s", #n);
620 
621 	XEN_TST_F(writable_page_tables);
622 	XEN_TST_F(writable_descriptor_tables);
623 	XEN_TST_F(auto_translated_physmap);
624 	XEN_TST_F(supervisor_mode_kernel);
625 	XEN_TST_F(pae_pgdir_above_4gb);
626 	XEN_TST_F(mmu_pt_update_preserve_ad);
627 	XEN_TST_F(highmem_assist);
628 	XEN_TST_F(gnttab_map_avail_bits);
629 	XEN_TST_F(hvm_callback_vector);
630 	XEN_TST_F(hvm_safe_pvclock);
631 	XEN_TST_F(hvm_pirqs);
632 #undef XEN_TST_F
633 	aprint_verbose("\n");
634 
635 	xengnt_init();
636 	events_init();
637 
638 #ifdef XENPV
639 	memset(&hac, 0, sizeof(hac));
640 	hac.hac_vcaa.vcaa_name = "vcpu";
641 	hac.hac_vcaa.vcaa_caa.cpu_number = 0;
642 	hac.hac_vcaa.vcaa_caa.cpu_role = CPU_ROLE_BP;
643 	hac.hac_vcaa.vcaa_caa.cpu_func = NULL; /* See xen/x86/cpu.c:vcpu_attach() */
644 	config_found(self, &hac.hac_vcaa, hypervisor_print,
645 	    CFARGS(.iattr = "xendevbus"));
646 
647 #ifdef MULTIPROCESSOR
648 
649 	/*
650 	 * The xenstore contains the configured number of vcpus.
651 	 * The xenstore however, is not accessible until much later in
652 	 * the boot sequence. We therefore bruteforce check for
653 	 * allocated vcpus (See: cpu.c:vcpu_match()) by iterating
654 	 * through the maximum supported by NetBSD MP.
655 	 */
656 	cpuid_t vcpuid;
657 
658 	for (vcpuid = 1; vcpuid < maxcpus; vcpuid++) {
659 		memset(&hac, 0, sizeof(hac));
660 		hac.hac_vcaa.vcaa_name = "vcpu";
661 		hac.hac_vcaa.vcaa_caa.cpu_number = vcpuid;
662 		hac.hac_vcaa.vcaa_caa.cpu_role = CPU_ROLE_AP;
663 		hac.hac_vcaa.vcaa_caa.cpu_func = NULL; /* See xen/x86/cpu.c:vcpu_attach() */
664 		if (NULL == config_found(self, &hac.hac_vcaa,
665 					 hypervisor_vcpu_print,
666 					 CFARGS(.iattr = "xendevbus"))) {
667 			break;
668 		}
669 	}
670 
671 #endif /* MULTIPROCESSOR */
672 #endif /* XENPV */
673 
674 #if NXENBUS > 0
675 	extern struct x86_bus_dma_tag xenbus_bus_dma_tag;
676 	memset(&hac, 0, sizeof(hac));
677 	hac.hac_xenbus.xa_device = "xenbus";
678 	hac.hac_xenbus.xa_dmat = &xenbus_bus_dma_tag;
679 	config_found(self, &hac.hac_xenbus, hypervisor_print,
680 	    CFARGS(.iattr = "xendevbus"));
681 #endif
682 #if NXENCONS > 0
683 	if (xencons_interface != 0 || vm_guest != VM_GUEST_XENPVHVM) {
684 		memset(&hac, 0, sizeof(hac));
685 		hac.hac_xencons.xa_device = "xencons";
686 		config_found(self, &hac.hac_xencons, hypervisor_print,
687 		    CFARGS(.iattr = "xendevbus"));
688 	}
689 #endif
690 
691 #if defined(DOM0OPS)
692 #if defined(XENPV)
693 #if NISADMA > 0 && NACPICA > 0
694         /*
695 	 * ACPI needs ISA DMA initialized before they start probing.
696 	 */
697 	isa_dmainit(&x86_isa_chipset, x86_bus_space_io, &isa_bus_dma_tag,
698 	    self);
699 #endif
700 
701 #if NPCI > 0
702 #if NACPICA > 0
703 	if (acpi_present) {
704 		memset(&hac, 0, sizeof(hac));
705 		hac.hac_acpi.aa_iot = x86_bus_space_io;
706 		hac.hac_acpi.aa_memt = x86_bus_space_mem;
707 		hac.hac_acpi.aa_pc = NULL;
708 		hac.hac_acpi.aa_pciflags =
709 			PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY |
710 			PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY |
711 			PCI_FLAGS_MWI_OKAY;
712 		hac.hac_acpi.aa_ic = &x86_isa_chipset;
713 		hac.hac_acpi.aa_dmat = &pci_bus_dma_tag;
714 #ifdef _LP64
715 		hac.hac_acpi.aa_dmat64 = &pci_bus_dma64_tag;
716 #else
717 		hac.hac_acpi.aa_dmat64 = NULL;
718 #endif /* _LP64 */
719 		config_found(self, &hac.hac_acpi, NULL,
720 		    CFARGS(.iattr = "acpibus"));
721 	}
722 #endif /* NACPICA */
723 	memset(&hac, 0, sizeof(hac));
724 	hac.hac_pba.pba_iot = x86_bus_space_io;
725 	hac.hac_pba.pba_memt = x86_bus_space_mem;
726 	hac.hac_pba.pba_dmat = &pci_bus_dma_tag;
727 #ifdef _LP64
728 	hac.hac_pba.pba_dmat64 = &pci_bus_dma64_tag;
729 #else
730 	hac.hac_pba.pba_dmat64 = NULL;
731 #endif /* _LP64 */
732 	hac.hac_pba.pba_flags = PCI_FLAGS_MEM_OKAY | PCI_FLAGS_IO_OKAY;
733 	hac.hac_pba.pba_bridgetag = NULL;
734 	hac.hac_pba.pba_bus = 0;
735 #if NACPICA > 0 && defined(ACPI_SCANPCI)
736 	if (mpacpi_active)
737 		mp_pci_scan(self, &hac.hac_pba, pcibusprint);
738 	else
739 #endif
740 #if defined(MPBIOS) && defined(MPBIOS_SCANPCI)
741 	if (mpbios_scanned != 0)
742 		mp_pci_scan(self, &hac.hac_pba, pcibusprint);
743 	else
744 #endif
745 	config_found(self, &hac.hac_pba, pcibusprint,
746 	    CFARGS(.iattr = "pcibus"));
747 #if NACPICA > 0
748 	if (mp_verbose)
749 		acpi_pci_link_state();
750 #endif
751 #if NISA > 0
752 	if (isa_has_been_seen == 0) {
753 		memset(&hac, 0, sizeof(hac));
754 		hac.hac_iba._iba_busname = "isa";
755 		hac.hac_iba.iba_iot = x86_bus_space_io;
756 		hac.hac_iba.iba_memt = x86_bus_space_mem;
757 		hac.hac_iba.iba_dmat = &isa_bus_dma_tag;
758 		hac.hac_iba.iba_ic = NULL; /* No isa DMA yet */
759 		config_found(self, &hac.hac_iba, isabusprint,
760 		    CFARGS(.iattr = "isabus"));
761 	}
762 #endif /* NISA */
763 #endif /* NPCI */
764 #endif /* XENPV */
765 
766 	if (xendomain_is_privileged()) {
767 		xenprivcmd_init();
768 	}
769 #endif /* DOM0OPS */
770 
771 	hypervisor_machdep_attach();
772 
773 	if (!pmf_device_register(self, hypervisor_suspend, hypervisor_resume))
774 		aprint_error_dev(self, "couldn't establish power handler\n");
775 
776 }
777 
778 static bool
hypervisor_suspend(device_t dev,const pmf_qual_t * qual)779 hypervisor_suspend(device_t dev, const pmf_qual_t *qual)
780 {
781 #ifdef XENPV
782 	events_suspend();
783 	xengnt_suspend();
784 #endif
785 	return true;
786 }
787 
788 static bool
hypervisor_resume(device_t dev,const pmf_qual_t * qual)789 hypervisor_resume(device_t dev, const pmf_qual_t *qual)
790 {
791 #ifdef XENPV
792 	hypervisor_machdep_resume();
793 
794 	xengnt_resume();
795 	events_resume();
796 #endif
797 	return true;
798 }
799 
800 static int
hypervisor_print(void * aux,const char * parent)801 hypervisor_print(void *aux, const char *parent)
802 {
803 	union hypervisor_attach_cookie *hac = aux;
804 
805 	if (parent)
806 		aprint_normal("%s at %s", hac->hac_device, parent);
807 	return (UNCONF);
808 }
809 
810 #define DIR_MODE	(S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
811 
812 kernfs_parentdir_t *kernxen_pkt;
813 
814 void
xenkernfs_init(void)815 xenkernfs_init(void)
816 {
817 #if NKERNFS > 0
818 	kernfs_entry_t *dkt;
819 
820 	KERNFS_ALLOCENTRY(dkt, KM_SLEEP);
821 	KERNFS_INITENTRY(dkt, DT_DIR, "xen", NULL, KFSsubdir, VDIR, DIR_MODE);
822 	kernfs_addentry(NULL, dkt);
823 	kernxen_pkt = KERNFS_ENTOPARENTDIR(dkt);
824 #endif
825 }
826 
827 /*
828  * setup Xen's vcpu_info. requires ci_vcpuid to be initialized.
829  */
830 void
xen_map_vcpu(struct cpu_info * ci)831 xen_map_vcpu(struct cpu_info *ci)
832 {
833 	int size;
834 	uintptr_t ptr;
835 	struct vcpu_register_vcpu_info vcpu_info_op;
836 	paddr_t ma;
837 	int ret;
838 
839 	if (ci->ci_vcpuid < XEN_LEGACY_MAX_VCPUS) {
840 		ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[ci->ci_vcpuid];
841 		return;
842 	}
843 
844 	/*
845 	 * need to map it via VCPUOP_register_vcpu_info
846 	 * aligning to the smallest power-of-2 size which can contain
847 	 * vcpu_info ensures this. Also make sure it's cache-line aligned,
848 	 * for performances.
849 	 */
850 	size = CACHE_LINE_SIZE;
851 	while (size < sizeof(struct vcpu_info)) {
852 		size = size << 1;
853 	}
854 	ptr = (uintptr_t)uvm_km_alloc(kernel_map,
855 		    sizeof(struct vcpu_info) + size - 1, 0,
856 		    UVM_KMF_WIRED|UVM_KMF_ZERO);
857 	ptr = roundup2(ptr, size);
858 	ci->ci_vcpu = (struct vcpu_info *)ptr;
859 
860 	pmap_extract_ma(pmap_kernel(), (ptr & ~PAGE_MASK), &ma);
861 	vcpu_info_op.mfn = ma >> PAGE_SHIFT;
862 	vcpu_info_op.offset = (ptr & PAGE_MASK);
863 	vcpu_info_op.rsvd = 0;
864 
865 	ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info,
866 	    ci->ci_vcpuid, &vcpu_info_op);
867 	if (ret) {
868 		panic("VCPUOP_register_vcpu_info for %d failed: %d",
869 		    ci->ci_vcpuid, ret);
870 	}
871 }
872