1 /*-
2  * Copyright (c) 2012 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef	_VMM_INSTRUCTION_EMUL_H_
30 #define	_VMM_INSTRUCTION_EMUL_H_
31 
32 /*
33  * The data structures 'vie' and 'vie_op' are meant to be opaque to the
34  * consumers of instruction decoding. The only reason why their contents
35  * need to be exposed is because they are part of the 'vm_exit' structure.
36  */
37 struct vie_op {
38 	uint8_t		op_byte;	/* actual opcode byte */
39 	uint8_t		op_type;	/* type of operation (e.g. MOV) */
40 	uint16_t	op_flags;
41 };
42 
43 #define	VIE_INST_SIZE	15
44 struct vie {
45 	uint8_t		inst[VIE_INST_SIZE];	/* instruction bytes */
46 	uint8_t		num_valid;		/* size of the instruction */
47 	uint8_t		num_processed;
48 
49 	uint8_t		rex_w:1,		/* REX prefix */
50 			rex_r:1,
51 			rex_x:1,
52 			rex_b:1,
53 			rex_present:1;
54 
55 	uint8_t		mod:2,			/* ModRM byte */
56 			reg:4,
57 			rm:4;
58 
59 	uint8_t		ss:2,			/* SIB byte */
60 			index:4,
61 			base:4;
62 
63 	uint8_t		disp_bytes;
64 	uint8_t		imm_bytes;
65 
66 	uint8_t		scale;
67 	int		base_register;		/* VM_REG_GUEST_xyz */
68 	int		index_register;		/* VM_REG_GUEST_xyz */
69 
70 	int64_t		displacement;		/* optional addr displacement */
71 	int64_t		immediate;		/* optional immediate operand */
72 
73 	uint8_t		decoded;	/* set to 1 if successfully decoded */
74 
75 	struct vie_op	op;			/* opcode description */
76 };
77 
78 /*
79  * Callback functions to read and write memory regions.
80  */
81 typedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa,
82 				 uint64_t *rval, int rsize, void *arg);
83 
84 typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa,
85 				  uint64_t wval, int wsize, void *arg);
86 
87 /*
88  * Emulate the decoded 'vie' instruction.
89  *
90  * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region
91  * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the
92  * callback functions.
93  *
94  * 'void *vm' should be 'struct vm *' when called from kernel context and
95  * 'struct vmctx *' when called from user context.
96  * s
97  */
98 int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie,
99 			    mem_region_read_t mrr, mem_region_write_t mrw,
100 			    void *mrarg);
101 
102 #ifdef _KERNEL
103 /*
104  * APIs to fetch and decode the instruction from nested page fault handler.
105  */
106 int vmm_fetch_instruction(struct vm *vm, int cpuid,
107 			  uint64_t rip, int inst_length, uint64_t cr3,
108 			  struct vie *vie);
109 
110 /*
111  * Decode the instruction fetched into 'vie' so it can be emulated.
112  *
113  * 'gla' is the guest linear address provided by the hardware assist
114  * that caused the nested page table fault. It is used to verify that
115  * the software instruction decoding is in agreement with the hardware.
116  *
117  * Some hardware assists do not provide the 'gla' to the hypervisor.
118  * To skip the 'gla' verification for this or any other reason pass
119  * in VIE_INVALID_GLA instead.
120  */
121 #define	VIE_INVALID_GLA		(1UL << 63)	/* a non-canonical address */
122 int vmm_decode_instruction(struct vm *vm, int cpuid,
123 			   uint64_t gla, struct vie *vie);
124 #endif	/* _KERNEL */
125 
126 #endif	/* _VMM_INSTRUCTION_EMUL_H_ */
127