1 /*
2  * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  *
19  * You can also choose to distribute this program under the terms of
20  * the Unmodified Binary Distribution Licence (as given in the file
21  * COPYING.UBDL), provided that you have satisfied its requirements.
22  */
23 
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25 
26 #include <stdint.h>
27 #include <stdio.h>
28 #include <errno.h>
29 #include <ipxe/malloc.h>
30 #include <ipxe/pci.h>
31 #include <ipxe/cpuid.h>
32 #include <ipxe/msr.h>
33 #include <ipxe/xen.h>
34 #include <ipxe/xenver.h>
35 #include <ipxe/xenmem.h>
36 #include <ipxe/xenstore.h>
37 #include <ipxe/xenbus.h>
38 #include <ipxe/xengrant.h>
39 #include "hvm.h"
40 
41 /** @file
42  *
43  * Xen HVM driver
44  *
45  */
46 
47 /**
48  * Get CPUID base
49  *
50  * @v hvm		HVM device
51  * @ret rc		Return status code
52  */
hvm_cpuid_base(struct hvm_device * hvm)53 static int hvm_cpuid_base ( struct hvm_device *hvm ) {
54 	struct {
55 		uint32_t ebx;
56 		uint32_t ecx;
57 		uint32_t edx;
58 	} __attribute__ (( packed )) signature;
59 	uint32_t base;
60 	uint32_t version;
61 	uint32_t discard_eax;
62 	uint32_t discard_ebx;
63 	uint32_t discard_ecx;
64 	uint32_t discard_edx;
65 
66 	/* Scan for magic signature */
67 	for ( base = HVM_CPUID_MIN ; base <= HVM_CPUID_MAX ;
68 	      base += HVM_CPUID_STEP ) {
69 		cpuid ( base, 0, &discard_eax, &signature.ebx, &signature.ecx,
70 			&signature.edx );
71 		if ( memcmp ( &signature, HVM_CPUID_MAGIC,
72 			      sizeof ( signature ) ) == 0 ) {
73 			hvm->cpuid_base = base;
74 			cpuid ( ( base + HVM_CPUID_VERSION ), 0, &version,
75 				&discard_ebx, &discard_ecx, &discard_edx );
76 			DBGC2 ( hvm, "HVM using CPUID base %#08x (v%d.%d)\n",
77 				base, ( version >> 16 ), ( version & 0xffff ) );
78 			return 0;
79 		}
80 	}
81 
82 	DBGC ( hvm, "HVM could not find hypervisor\n" );
83 	return -ENODEV;
84 }
85 
86 /**
87  * Map hypercall page(s)
88  *
89  * @v hvm		HVM device
90  * @ret rc		Return status code
91  */
hvm_map_hypercall(struct hvm_device * hvm)92 static int hvm_map_hypercall ( struct hvm_device *hvm ) {
93 	uint32_t pages;
94 	uint32_t msr;
95 	uint32_t discard_ecx;
96 	uint32_t discard_edx;
97 	physaddr_t hypercall_phys;
98 	uint32_t version;
99 	static xen_extraversion_t extraversion;
100 	int xenrc;
101 	int rc;
102 
103 	/* Get number of hypercall pages and MSR to use */
104 	cpuid ( ( hvm->cpuid_base + HVM_CPUID_PAGES ), 0, &pages, &msr,
105 		&discard_ecx, &discard_edx );
106 
107 	/* Allocate pages */
108 	hvm->hypercall_len = ( pages * PAGE_SIZE );
109 	hvm->xen.hypercall = malloc_dma ( hvm->hypercall_len, PAGE_SIZE );
110 	if ( ! hvm->xen.hypercall ) {
111 		DBGC ( hvm, "HVM could not allocate %d hypercall page(s)\n",
112 		       pages );
113 		return -ENOMEM;
114 	}
115 	hypercall_phys = virt_to_phys ( hvm->xen.hypercall );
116 	DBGC2 ( hvm, "HVM hypercall page(s) at [%#08lx,%#08lx) via MSR %#08x\n",
117 		hypercall_phys, ( hypercall_phys + hvm->hypercall_len ), msr );
118 
119 	/* Write to MSR */
120 	wrmsr ( msr, hypercall_phys );
121 
122 	/* Check that hypercall mechanism is working */
123 	version = xenver_version ( &hvm->xen );
124 	if ( ( xenrc = xenver_extraversion ( &hvm->xen, &extraversion ) ) != 0){
125 		rc = -EXEN ( xenrc );
126 		DBGC ( hvm, "HVM could not get extraversion: %s\n",
127 		       strerror ( rc ) );
128 		return rc;
129 	}
130 	DBGC2 ( hvm, "HVM found Xen version %d.%d%s\n",
131 		( version >> 16 ), ( version & 0xffff ) , extraversion );
132 
133 	return 0;
134 }
135 
136 /**
137  * Unmap hypercall page(s)
138  *
139  * @v hvm		HVM device
140  */
hvm_unmap_hypercall(struct hvm_device * hvm)141 static void hvm_unmap_hypercall ( struct hvm_device *hvm ) {
142 
143 	/* Free pages */
144 	free_dma ( hvm->xen.hypercall, hvm->hypercall_len );
145 }
146 
147 /**
148  * Allocate and map MMIO space
149  *
150  * @v hvm		HVM device
151  * @v space		Source mapping space
152  * @v len		Length (must be a multiple of PAGE_SIZE)
153  * @ret mmio		MMIO space address, or NULL on error
154  */
hvm_ioremap(struct hvm_device * hvm,unsigned int space,size_t len)155 static void * hvm_ioremap ( struct hvm_device *hvm, unsigned int space,
156 			    size_t len ) {
157 	struct xen_add_to_physmap add;
158 	struct xen_remove_from_physmap remove;
159 	unsigned int pages = ( len / PAGE_SIZE );
160 	physaddr_t mmio_phys;
161 	unsigned int i;
162 	void *mmio;
163 	int xenrc;
164 	int rc;
165 
166 	/* Sanity check */
167 	assert ( ( len % PAGE_SIZE ) == 0 );
168 
169 	/* Check for available space */
170 	if ( ( hvm->mmio_offset + len ) > hvm->mmio_len ) {
171 		DBGC ( hvm, "HVM could not allocate %zd bytes of MMIO space "
172 		       "(%zd of %zd remaining)\n", len,
173 		       ( hvm->mmio_len - hvm->mmio_offset ), hvm->mmio_len );
174 		goto err_no_space;
175 	}
176 
177 	/* Map this space */
178 	mmio = ioremap ( ( hvm->mmio + hvm->mmio_offset ), len );
179 	if ( ! mmio ) {
180 		DBGC ( hvm, "HVM could not map MMIO space [%08lx,%08lx)\n",
181 		       ( hvm->mmio + hvm->mmio_offset ),
182 		       ( hvm->mmio + hvm->mmio_offset + len ) );
183 		goto err_ioremap;
184 	}
185 	mmio_phys = virt_to_phys ( mmio );
186 
187 	/* Add to physical address space */
188 	for ( i = 0 ; i < pages ; i++ ) {
189 		add.domid = DOMID_SELF;
190 		add.idx = i;
191 		add.space = space;
192 		add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
193 		if ( ( xenrc = xenmem_add_to_physmap ( &hvm->xen, &add ) ) !=0){
194 			rc = -EXEN ( xenrc );
195 			DBGC ( hvm, "HVM could not add space %d idx %d at "
196 			       "[%08lx,%08lx): %s\n", space, i,
197 			       ( mmio_phys + ( i * PAGE_SIZE ) ),
198 			       ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ),
199 			       strerror ( rc ) );
200 			goto err_add_to_physmap;
201 		}
202 	}
203 
204 	/* Update offset */
205 	hvm->mmio_offset += len;
206 
207 	return mmio;
208 
209 	i = pages;
210  err_add_to_physmap:
211 	for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
212 		remove.domid = DOMID_SELF;
213 		add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
214 		xenmem_remove_from_physmap ( &hvm->xen, &remove );
215 	}
216 	iounmap ( mmio );
217  err_ioremap:
218  err_no_space:
219 	return NULL;
220 }
221 
222 /**
223  * Unmap MMIO space
224  *
225  * @v hvm		HVM device
226  * @v mmio		MMIO space address
227  * @v len		Length (must be a multiple of PAGE_SIZE)
228  */
hvm_iounmap(struct hvm_device * hvm,void * mmio,size_t len)229 static void hvm_iounmap ( struct hvm_device *hvm, void *mmio, size_t len ) {
230 	struct xen_remove_from_physmap remove;
231 	physaddr_t mmio_phys = virt_to_phys ( mmio );
232 	unsigned int pages = ( len / PAGE_SIZE );
233 	unsigned int i;
234 	int xenrc;
235 	int rc;
236 
237 	/* Unmap this space */
238 	iounmap ( mmio );
239 
240 	/* Remove from physical address space */
241 	for ( i = 0 ; i < pages ; i++ ) {
242 		remove.domid = DOMID_SELF;
243 		remove.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
244 		if ( ( xenrc = xenmem_remove_from_physmap ( &hvm->xen,
245 							    &remove ) ) != 0 ) {
246 			rc = -EXEN ( xenrc );
247 			DBGC ( hvm, "HVM could not remove space [%08lx,%08lx): "
248 			       "%s\n", ( mmio_phys + ( i * PAGE_SIZE ) ),
249 			       ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ),
250 			       strerror ( rc ) );
251 			/* Nothing we can do about this */
252 		}
253 	}
254 }
255 
256 /**
257  * Map shared info page
258  *
259  * @v hvm		HVM device
260  * @ret rc		Return status code
261  */
hvm_map_shared_info(struct hvm_device * hvm)262 static int hvm_map_shared_info ( struct hvm_device *hvm ) {
263 	physaddr_t shared_info_phys;
264 	int rc;
265 
266 	/* Map shared info page */
267 	hvm->xen.shared = hvm_ioremap ( hvm, XENMAPSPACE_shared_info,
268 					PAGE_SIZE );
269 	if ( ! hvm->xen.shared ) {
270 		rc = -ENOMEM;
271 		goto err_alloc;
272 	}
273 	shared_info_phys = virt_to_phys ( hvm->xen.shared );
274 	DBGC2 ( hvm, "HVM shared info page at [%#08lx,%#08lx)\n",
275 		shared_info_phys, ( shared_info_phys + PAGE_SIZE ) );
276 
277 	/* Sanity check */
278 	DBGC2 ( hvm, "HVM wallclock time is %d\n",
279 		readl ( &hvm->xen.shared->wc_sec ) );
280 
281 	return 0;
282 
283 	hvm_iounmap ( hvm, hvm->xen.shared, PAGE_SIZE );
284  err_alloc:
285 	return rc;
286 }
287 
288 /**
289  * Unmap shared info page
290  *
291  * @v hvm		HVM device
292  */
hvm_unmap_shared_info(struct hvm_device * hvm)293 static void hvm_unmap_shared_info ( struct hvm_device *hvm ) {
294 
295 	/* Unmap shared info page */
296 	hvm_iounmap ( hvm, hvm->xen.shared, PAGE_SIZE );
297 }
298 
299 /**
300  * Map grant table
301  *
302  * @v hvm		HVM device
303  * @ret rc		Return status code
304  */
hvm_map_grant(struct hvm_device * hvm)305 static int hvm_map_grant ( struct hvm_device *hvm ) {
306 	physaddr_t grant_phys;
307 	int rc;
308 
309 	/* Initialise grant table */
310 	if ( ( rc = xengrant_init ( &hvm->xen ) ) != 0 ) {
311 		DBGC ( hvm, "HVM could not initialise grant table: %s\n",
312 		       strerror ( rc ) );
313 		return rc;
314 	}
315 
316 	/* Map grant table */
317 	hvm->xen.grant.table = hvm_ioremap ( hvm, XENMAPSPACE_grant_table,
318 					     hvm->xen.grant.len );
319 	if ( ! hvm->xen.grant.table )
320 		return -ENODEV;
321 
322 	grant_phys = virt_to_phys ( hvm->xen.grant.table );
323 	DBGC2 ( hvm, "HVM mapped grant table at [%08lx,%08lx)\n",
324 		grant_phys, ( grant_phys + hvm->xen.grant.len ) );
325 	return 0;
326 }
327 
328 /**
329  * Unmap grant table
330  *
331  * @v hvm		HVM device
332  */
hvm_unmap_grant(struct hvm_device * hvm)333 static void hvm_unmap_grant ( struct hvm_device *hvm ) {
334 
335 	/* Unmap grant table */
336 	hvm_iounmap ( hvm, hvm->xen.grant.table, hvm->xen.grant.len );
337 }
338 
339 /**
340  * Map XenStore
341  *
342  * @v hvm		HVM device
343  * @ret rc		Return status code
344  */
hvm_map_xenstore(struct hvm_device * hvm)345 static int hvm_map_xenstore ( struct hvm_device *hvm ) {
346 	uint64_t xenstore_evtchn;
347 	uint64_t xenstore_pfn;
348 	physaddr_t xenstore_phys;
349 	char *name;
350 	int xenrc;
351 	int rc;
352 
353 	/* Get XenStore event channel */
354 	if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_EVTCHN,
355 					   &xenstore_evtchn ) ) != 0 ) {
356 		rc = -EXEN ( xenrc );
357 		DBGC ( hvm, "HVM could not get XenStore event channel: %s\n",
358 		       strerror ( rc ) );
359 		return rc;
360 	}
361 	hvm->xen.store.port = xenstore_evtchn;
362 
363 	/* Get XenStore PFN */
364 	if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_PFN,
365 					   &xenstore_pfn ) ) != 0 ) {
366 		rc = -EXEN ( xenrc );
367 		DBGC ( hvm, "HVM could not get XenStore PFN: %s\n",
368 		       strerror ( rc ) );
369 		return rc;
370 	}
371 	xenstore_phys = ( xenstore_pfn * PAGE_SIZE );
372 
373 	/* Map XenStore */
374 	hvm->xen.store.intf = ioremap ( xenstore_phys, PAGE_SIZE );
375 	if ( ! hvm->xen.store.intf ) {
376 		DBGC ( hvm, "HVM could not map XenStore at [%08lx,%08lx)\n",
377 		       xenstore_phys, ( xenstore_phys + PAGE_SIZE ) );
378 		return -ENODEV;
379 	}
380 	DBGC2 ( hvm, "HVM mapped XenStore at [%08lx,%08lx) with event port "
381 		"%d\n", xenstore_phys, ( xenstore_phys + PAGE_SIZE ),
382 		hvm->xen.store.port );
383 
384 	/* Check that XenStore is working */
385 	if ( ( rc = xenstore_read ( &hvm->xen, &name, "name", NULL ) ) != 0 ) {
386 		DBGC ( hvm, "HVM could not read domain name: %s\n",
387 		       strerror ( rc ) );
388 		return rc;
389 	}
390 	DBGC2 ( hvm, "HVM running in domain \"%s\"\n", name );
391 	free ( name );
392 
393 	return 0;
394 }
395 
396 /**
397  * Unmap XenStore
398  *
399  * @v hvm		HVM device
400  */
hvm_unmap_xenstore(struct hvm_device * hvm)401 static void hvm_unmap_xenstore ( struct hvm_device *hvm ) {
402 
403 	/* Unmap XenStore */
404 	iounmap ( hvm->xen.store.intf );
405 }
406 
407 /**
408  * Probe PCI device
409  *
410  * @v pci		PCI device
411  * @ret rc		Return status code
412  */
hvm_probe(struct pci_device * pci)413 static int hvm_probe ( struct pci_device *pci ) {
414 	struct hvm_device *hvm;
415 	int rc;
416 
417 	/* Allocate and initialise structure */
418 	hvm = zalloc ( sizeof ( *hvm ) );
419 	if ( ! hvm ) {
420 		rc = -ENOMEM;
421 		goto err_alloc;
422 	}
423 	hvm->mmio = pci_bar_start ( pci, HVM_MMIO_BAR );
424 	hvm->mmio_len = pci_bar_size ( pci, HVM_MMIO_BAR );
425 	DBGC2 ( hvm, "HVM has MMIO space [%08lx,%08lx)\n",
426 		hvm->mmio, ( hvm->mmio + hvm->mmio_len ) );
427 
428 	/* Fix up PCI device */
429 	adjust_pci_device ( pci );
430 
431 	/* Attach to hypervisor */
432 	if ( ( rc = hvm_cpuid_base ( hvm ) ) != 0 )
433 		goto err_cpuid_base;
434 	if ( ( rc = hvm_map_hypercall ( hvm ) ) != 0 )
435 		goto err_map_hypercall;
436 	if ( ( rc = hvm_map_shared_info ( hvm ) ) != 0 )
437 		goto err_map_shared_info;
438 	if ( ( rc = hvm_map_grant ( hvm ) ) != 0 )
439 		goto err_map_grant;
440 	if ( ( rc = hvm_map_xenstore ( hvm ) ) != 0 )
441 		goto err_map_xenstore;
442 
443 	/* Probe Xen devices */
444 	if ( ( rc = xenbus_probe ( &hvm->xen, &pci->dev ) ) != 0 ) {
445 		DBGC ( hvm, "HVM could not probe Xen bus: %s\n",
446 		       strerror ( rc ) );
447 		goto err_xenbus_probe;
448 	}
449 
450 	pci_set_drvdata ( pci, hvm );
451 	return 0;
452 
453 	xenbus_remove ( &hvm->xen, &pci->dev );
454  err_xenbus_probe:
455 	hvm_unmap_xenstore ( hvm );
456  err_map_xenstore:
457 	hvm_unmap_grant ( hvm );
458  err_map_grant:
459 	hvm_unmap_shared_info ( hvm );
460  err_map_shared_info:
461 	hvm_unmap_hypercall ( hvm );
462  err_map_hypercall:
463  err_cpuid_base:
464 	free ( hvm );
465  err_alloc:
466 	return rc;
467 }
468 
469 /**
470  * Remove PCI device
471  *
472  * @v pci		PCI device
473  */
hvm_remove(struct pci_device * pci)474 static void hvm_remove ( struct pci_device *pci ) {
475 	struct hvm_device *hvm = pci_get_drvdata ( pci );
476 
477 	xenbus_remove ( &hvm->xen, &pci->dev );
478 	hvm_unmap_xenstore ( hvm );
479 	hvm_unmap_grant ( hvm );
480 	hvm_unmap_shared_info ( hvm );
481 	hvm_unmap_hypercall ( hvm );
482 	free ( hvm );
483 }
484 
485 /** PCI device IDs */
486 static struct pci_device_id hvm_ids[] = {
487 	PCI_ROM ( 0x5853, 0x0001, "hvm", "hvm", 0 ),
488 	PCI_ROM ( 0x5853, 0x0002, "hvm2", "hvm2", 0 ),
489 };
490 
491 /** PCI driver */
492 struct pci_driver hvm_driver __pci_driver = {
493 	.ids = hvm_ids,
494 	.id_count = ( sizeof ( hvm_ids ) / sizeof ( hvm_ids[0] ) ),
495 	.probe = hvm_probe,
496 	.remove = hvm_remove,
497 };
498 
499 /* Drag in objects via hvm_driver */
500 REQUIRING_SYMBOL ( hvm_driver );
501 
502 /* Drag in netfront driver */
503 REQUIRE_OBJECT ( netfront );
504