1 /*-
2  * Copyright (c) 2012 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef	_VMM_INSTRUCTION_EMUL_H_
30 #define	_VMM_INSTRUCTION_EMUL_H_
31 
32 /*
33  * Callback functions to read and write memory regions.
34  */
35 typedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa,
36 				 uint64_t *rval, int rsize, void *arg);
37 
38 typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa,
39 				  uint64_t wval, int wsize, void *arg);
40 
41 /*
42  * Emulate the decoded 'vie' instruction.
43  *
44  * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region
45  * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the
46  * callback functions.
47  *
48  * 'void *vm' should be 'struct vm *' when called from kernel context and
49  * 'struct vmctx *' when called from user context.
50  * s
51  */
52 int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie,
53 			    mem_region_read_t mrr, mem_region_write_t mrw,
54 			    void *mrarg);
55 
56 int vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
57     uint64_t val, int size);
58 
59 /*
60  * Returns 1 if an alignment check exception should be injected and 0 otherwise.
61  */
62 int vie_alignment_check(int cpl, int operand_size, uint64_t cr0,
63     uint64_t rflags, uint64_t gla);
64 
65 /* Returns 1 if the 'gla' is not canonical and 0 otherwise. */
66 int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla);
67 
68 uint64_t vie_size2mask(int size);
69 
70 int vie_calculate_gla(enum vm_cpu_mode cpu_mode, int addrsize,
71     enum vm_reg_name seg, struct seg_desc *desc, uint64_t off, uint64_t *gla);
72 
73 #ifdef _KERNEL
74 /*
75  * APIs to fetch and decode the instruction from nested page fault handler.
76  *
77  * 'vie' must be initialized before calling 'vmm_fetch_instruction()'
78  */
79 int vmm_fetch_instruction(struct vm *vm, int cpuid,
80 			  struct vm_guest_paging *guest_paging,
81 			  uint64_t rip, int inst_length, struct vie *vie);
82 
83 /*
84  * Translate the guest linear address 'gla' to a guest physical address.
85  *
86  * Returns 0 on success and '*gpa' contains the result of the translation.
87  * Returns 1 if an exception was injected into the guest.
88  * Returns -1 otherwise.
89  */
90 int vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
91     uint64_t gla, int prot, uint64_t *gpa);
92 
93 void vie_init(struct vie *vie);
94 
95 /*
96  * Decode the instruction fetched into 'vie' so it can be emulated.
97  *
98  * 'gla' is the guest linear address provided by the hardware assist
99  * that caused the nested page table fault. It is used to verify that
100  * the software instruction decoding is in agreement with the hardware.
101  *
102  * Some hardware assists do not provide the 'gla' to the hypervisor.
103  * To skip the 'gla' verification for this or any other reason pass
104  * in VIE_INVALID_GLA instead.
105  */
106 #define	VIE_INVALID_GLA		(1UL << 63)	/* a non-canonical address */
107 int vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
108 			   enum vm_cpu_mode cpu_mode, struct vie *vie);
109 #endif	/* _KERNEL */
110 
111 #endif	/* _VMM_INSTRUCTION_EMUL_H_ */
112