xref: /freebsd/sys/arm64/vmm/arm64.h (revision 75cb9492)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #ifndef _VMM_ARM64_H_
29 #define _VMM_ARM64_H_
30 
31 #include <machine/reg.h>
32 #include <machine/hypervisor.h>
33 #include <machine/pcpu.h>
34 
35 #include "mmu.h"
36 #include "io/vgic_v3.h"
37 #include "io/vtimer.h"
38 
39 struct vgic_v3;
40 struct vgic_v3_cpu;
41 
42 /*
43  * Per-vCPU hypervisor state.
44  */
45 struct hypctx {
46 	struct trapframe tf;
47 
48 	/*
49 	 * EL1 control registers.
50 	 */
51 	uint64_t	elr_el1;	/* Exception Link Register */
52 	uint64_t	sp_el0;		/* Stack pointer */
53 	uint64_t	tpidr_el0;	/* EL0 Software ID Register */
54 	uint64_t	tpidrro_el0;	/* Read-only Thread ID Register */
55 	uint64_t	tpidr_el1;	/* EL1 Software ID Register */
56 	uint64_t	vbar_el1;	/* Vector Base Address Register */
57 
58 	uint64_t	actlr_el1;	/* Auxiliary Control Register */
59 	uint64_t	afsr0_el1;	/* Auxiliary Fault Status Register 0 */
60 	uint64_t	afsr1_el1;	/* Auxiliary Fault Status Register 1 */
61 	uint64_t	amair_el1;	/* Auxiliary Memory Attribute Indirection Register */
62 	uint64_t	contextidr_el1;	/* Current Process Identifier */
63 	uint64_t	cpacr_el1;	/* Architectural Feature Access Control Register */
64 	uint64_t	csselr_el1;	/* Cache Size Selection Register */
65 	uint64_t	esr_el1;	/* Exception Syndrome Register */
66 	uint64_t	far_el1;	/* Fault Address Register */
67 	uint64_t	mair_el1;	/* Memory Attribute Indirection Register */
68 	uint64_t	mdccint_el1;	/* Monitor DCC Interrupt Enable Register */
69 	uint64_t	mdscr_el1;	/* Monitor Debug System Control Register */
70 	uint64_t	par_el1;	/* Physical Address Register */
71 	uint64_t	sctlr_el1;	/* System Control Register */
72 	uint64_t	tcr_el1;	/* Translation Control Register */
73 	uint64_t	tcr2_el1;	/* Translation Control Register 2 */
74 	uint64_t	ttbr0_el1;	/* Translation Table Base Register 0 */
75 	uint64_t	ttbr1_el1;	/* Translation Table Base Register 1 */
76 	uint64_t	spsr_el1;	/* Saved Program Status Register */
77 
78 	uint64_t	pmcr_el0;	/* Performance Monitors Control Register */
79 	uint64_t	pmccntr_el0;
80 	uint64_t	pmccfiltr_el0;
81 	uint64_t	pmcntenset_el0;
82 	uint64_t	pmintenset_el1;
83 	uint64_t	pmovsset_el0;
84 	uint64_t	pmselr_el0;
85 	uint64_t	pmuserenr_el0;
86 	uint64_t	pmevcntr_el0[31];
87 	uint64_t	pmevtyper_el0[31];
88 
89 	uint64_t	dbgbcr_el1[16];	/* Debug Breakpoint Control Registers */
90 	uint64_t	dbgbvr_el1[16];	/* Debug Breakpoint Value Registers */
91 	uint64_t	dbgwcr_el1[16];	/* Debug Watchpoint Control Registers */
92 	uint64_t	dbgwvr_el1[16];	/* Debug Watchpoint Value Registers */
93 
94 	/* EL2 control registers */
95 	uint64_t	cptr_el2;	/* Architectural Feature Trap Register */
96 	uint64_t	hcr_el2;	/* Hypervisor Configuration Register */
97 	uint64_t	mdcr_el2;	/* Monitor Debug Configuration Register */
98 	uint64_t	vpidr_el2;	/* Virtualization Processor ID Register */
99 	uint64_t	vmpidr_el2;	/* Virtualization Multiprocessor ID Register */
100 	uint64_t	el2_addr;	/* The address of this in el2 space */
101 	struct hyp	*hyp;
102 	struct vcpu	*vcpu;
103 	struct {
104 		uint64_t	far_el2;	/* Fault Address Register */
105 		uint64_t	hpfar_el2;	/* Hypervisor IPA Fault Address Register */
106 	} exit_info;
107 
108 	struct vtimer_cpu 	vtimer_cpu;
109 
110 	uint64_t		setcaps;	/* Currently enabled capabilities. */
111 
112 	/* vCPU state used to handle guest debugging. */
113 	uint64_t		debug_spsr;		/* Saved guest SPSR */
114 	uint64_t		debug_mdscr;		/* Saved guest MDSCR */
115 
116 	struct vgic_v3_regs	vgic_v3_regs;
117 	struct vgic_v3_cpu	*vgic_cpu;
118 	bool			has_exception;
119 };
120 
121 struct hyp {
122 	struct vm	*vm;
123 	struct vtimer	vtimer;
124 	uint64_t	vmid_generation;
125 	uint64_t	vttbr_el2;
126 	uint64_t	el2_addr;	/* The address of this in el2 space */
127 	bool		vgic_attached;
128 	struct vgic_v3	*vgic;
129 	struct hypctx	*ctx[];
130 };
131 
132 #define	DEFINE_VMMOPS_IFUNC(ret_type, opname, args)			\
133 	ret_type vmmops_##opname args;
134 
135 DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
136 DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
137 DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
138 DEFINE_VMMOPS_IFUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging,
139     uint64_t gla, int prot, uint64_t *gpa, int *is_fault))
140 DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap,
141     struct vm_eventinfo *info))
142 DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
143 DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
144     int vcpu_id))
145 DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
146 DEFINE_VMMOPS_IFUNC(int, exception, (void *vcpui, uint64_t esr, uint64_t far))
147 DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
148 DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
149 DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
150 DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
151 DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
152     vm_offset_t max))
153 DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
154 #ifdef notyet
155 #ifdef BHYVE_SNAPSHOT
156 DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta))
157 DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
158     struct vm_snapshot_meta *meta))
159 DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
160 #endif
161 #endif
162 
163 uint64_t	vmm_call_hyp(uint64_t, ...);
164 
165 #if 0
166 #define	eprintf(fmt, ...)	printf("%s:%d " fmt, __func__, __LINE__, ##__VA_ARGS__)
167 #else
168 #define	eprintf(fmt, ...)	do {} while(0)
169 #endif
170 
171 struct hypctx *arm64_get_active_vcpu(void);
172 void raise_data_insn_abort(struct hypctx *, uint64_t, bool, int);
173 
174 #endif /* !_VMM_ARM64_H_ */
175