xref: /dragonfly/sys/dev/virtual/nvmm/nvmm_os.h (revision 7d3e9a5b)
1 /*
2  * Copyright (c) 2021 Maxime Villard, m00nbsd.net
3  * Copyright (c) 2021 The DragonFly Project.
4  * All rights reserved.
5  *
6  * This code is part of the NVMM hypervisor.
7  *
8  * This code is derived from software contributed to The DragonFly Project
9  * by Aaron LI <aly@aaronly.me>
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #ifndef _NVMM_OS_H_
34 #define _NVMM_OS_H_
35 
36 #ifndef _KERNEL
37 #error "This file should not be included by userland programs."
38 #endif
39 
40 #if defined(__NetBSD__)
41 #include <sys/cpu.h>
42 #include <uvm/uvm_object.h>
43 #include <uvm/uvm_extern.h>
44 #include <uvm/uvm_page.h>
45 #elif defined(__DragonFly__)
46 #include <sys/lock.h>
47 #include <sys/malloc.h> /* contigmalloc, contigfree */
48 #include <sys/proc.h> /* LWP_MP_URETMASK */
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_param.h> /* KERN_SUCCESS, etc. */
56 #include <vm/pmap.h> /* pmap_ept_transform, pmap_npt_transform */
57 #include <machine/cpu.h> /* hvm_break_wanted */
58 #include <machine/cpufunc.h> /* ffsl, ffs, etc. */
59 #endif
60 
61 /* Types. */
62 #if defined(__NetBSD__)
63 typedef struct vmspace		os_vmspace_t;
64 typedef struct uvm_object	os_vmobj_t;
65 typedef krwlock_t		os_rwl_t;
66 typedef kmutex_t		os_mtx_t;
67 #elif defined(__DragonFly__)
68 typedef struct vmspace		os_vmspace_t;
69 typedef struct vm_object	os_vmobj_t;
70 typedef struct lock		os_rwl_t;
71 typedef struct lock		os_mtx_t;
72 /* A few standard types. */
73 typedef vm_offset_t		vaddr_t;
74 typedef vm_offset_t		voff_t;
75 typedef vm_size_t		vsize_t;
76 typedef vm_paddr_t		paddr_t;
77 #endif
78 
79 /* Attributes. */
80 #if defined(__DragonFly__)
81 #define __cacheline_aligned	__cachealign
82 #define __diagused		__debugvar
83 #endif
84 
85 /* Macros. */
86 #if defined(__DragonFly__)
87 #define __arraycount(__x)	(sizeof(__x) / sizeof(__x[0]))
88 #define __insn_barrier()	__asm __volatile("":::"memory")
89 #endif
90 
91 /* Bitops. */
92 #if defined(__NetBSD__)
93 #include <sys/bitops.h>
94 #elif defined(__DragonFly__)
95 #include <sys/bitops.h>
96 #ifdef __x86_64__
97 #undef  __BIT
98 #define __BIT(__n)		__BIT64(__n)
99 #undef  __BITS
100 #define __BITS(__m, __n)	__BITS64(__m, __n)
101 #endif /* __x86_64__ */
102 #endif
103 
104 /* Maps. */
105 #if defined(__NetBSD__) || defined(__DragonFly__)
106 #define os_kernel_map		kernel_map
107 #define os_curproc_map		&curproc->p_vmspace->vm_map
108 #endif
109 
110 /* R/W locks. */
111 #if defined(__NetBSD__)
112 #define os_rwl_init(lock)	rw_init(lock)
113 #define os_rwl_destroy(lock)	rw_destroy(lock)
114 #define os_rwl_rlock(lock)	rw_enter(lock, RW_READER);
115 #define os_rwl_wlock(lock)	rw_enter(lock, RW_WRITER);
116 #define os_rwl_unlock(lock)	rw_exit(lock)
117 #define os_rwl_wheld(lock)	rw_write_held(lock)
118 #elif defined(__DragonFly__)
119 #define os_rwl_init(lock)	lockinit(lock, "nvmmrw", 0, 0)
120 #define os_rwl_destroy(lock)	lockuninit(lock)
121 #define os_rwl_rlock(lock)	lockmgr(lock, LK_SHARED);
122 #define os_rwl_wlock(lock)	lockmgr(lock, LK_EXCLUSIVE);
123 #define os_rwl_unlock(lock)	lockmgr(lock, LK_RELEASE)
124 #define os_rwl_wheld(lock)	(lockstatus(lock, curthread) == LK_EXCLUSIVE)
125 #endif
126 
127 /* Mutexes. */
128 #if defined(__NetBSD__)
129 #define os_mtx_init(lock)	mutex_init(lock, MUTEX_DEFAULT, IPL_NONE)
130 #define os_mtx_destroy(lock)	mutex_destroy(lock)
131 #define os_mtx_lock(lock)	mutex_enter(lock)
132 #define os_mtx_unlock(lock)	mutex_exit(lock)
133 #define os_mtx_owned(lock)	mutex_owned(lock)
134 #elif defined(__DragonFly__)
135 #define os_mtx_init(lock)	lockinit(lock, "nvmmmtx", 0, 0)
136 #define os_mtx_destroy(lock)	lockuninit(lock)
137 #define os_mtx_lock(lock)	lockmgr(lock, LK_EXCLUSIVE)
138 #define os_mtx_unlock(lock)	lockmgr(lock, LK_RELEASE)
139 #define os_mtx_owned(lock)	(lockstatus(lock, curthread) == LK_EXCLUSIVE)
140 #endif
141 
142 /* Malloc. */
143 #if defined(__NetBSD__)
144 #include <sys/kmem.h>
145 #define os_mem_alloc(size)	kmem_alloc(size, KM_SLEEP)
146 #define os_mem_zalloc(size)	kmem_zalloc(size, KM_SLEEP)
147 #define os_mem_free(ptr, size)	kmem_free(ptr, size)
148 #elif defined(__DragonFly__)
149 #include <sys/malloc.h>
150 MALLOC_DECLARE(M_NVMM);
151 #define os_mem_alloc(size)	kmalloc(size, M_NVMM, M_WAITOK)
152 #define os_mem_zalloc(size)	kmalloc(size, M_NVMM, M_WAITOK | M_ZERO)
153 #define os_mem_free(ptr, size)	kfree(ptr, M_NVMM)
154 #endif
155 
156 /* Printf. */
157 #if defined(__NetBSD__)
158 #define os_printf		printf
159 #elif defined(__DragonFly__)
160 #define os_printf		kprintf
161 #endif
162 
163 /* Atomics. */
164 #if defined(__NetBSD__)
165 #include <sys/atomic.h>
166 #define os_atomic_inc_uint(x)	atomic_inc_uint(x)
167 #define os_atomic_dec_uint(x)	atomic_dec_uint(x)
168 #define os_atomic_load_uint(x)	atomic_load_relaxed(x)
169 #define os_atomic_inc_64(x)	atomic_inc_64(x)
170 #elif defined(__DragonFly__)
171 #include <machine/atomic.h>
172 #define os_atomic_inc_uint(x)	atomic_add_int(x, 1)
173 #define os_atomic_dec_uint(x)	atomic_subtract_int(x, 1)
174 #define os_atomic_load_uint(x)	atomic_load_acq_int(x)
175 #define os_atomic_inc_64(x)	atomic_add_64(x, 1)
176 #endif
177 
178 /* Pmap. */
179 #if defined(__NetBSD__)
180 extern bool pmap_ept_has_ad;
181 #define os_vmspace_pmap(vm)	((vm)->vm_map.pmap)
182 #define os_vmspace_pdirpa(vm)	((vm)->vm_map.pmap->pm_pdirpa[0])
183 #define os_pmap_mach(pm)	((pm)->pm_data)
184 #elif defined(__DragonFly__)
185 #define os_vmspace_pmap(vm)	vmspace_pmap(vm)
186 #define os_vmspace_pdirpa(vm)	(vtophys(vmspace_pmap(vm)->pm_pml4))
187 #endif
188 
189 /* CPU. */
190 #if defined(__NetBSD__)
191 #include <sys/cpu.h>
192 typedef struct cpu_info		os_cpu_t;
193 #define OS_MAXCPUS		MAXCPUS
194 #define OS_CPU_FOREACH(cpu)	for (CPU_INFO_FOREACH(, cpu))
195 #define os_cpu_number(cpu)	cpu_index(cpu)
196 #define os_curcpu()		curcpu()
197 #define os_curcpu_number()	cpu_number()
198 #define os_curcpu_tss_sel()	curcpu()->ci_tss_sel
199 #define os_curcpu_tss()		curcpu()->ci_tss
200 #define os_curcpu_gdt()		curcpu()->ci_gdt
201 #define os_curcpu_idt()		curcpu()->ci_idtvec.iv_idt
202 #elif defined(__DragonFly__)
203 #include <sys/globaldata.h>
204 #include <machine/segments.h>
205 typedef struct globaldata	os_cpu_t;
206 #define OS_MAXCPUS		SMP_MAXCPU
207 #define OS_CPU_FOREACH(cpu)	\
208 	for (int idx = 0; idx < ncpus && (cpu = globaldata_find(idx)); idx++)
209 #define os_cpu_number(cpu)	(cpu)->gd_cpuid
210 #define os_curcpu()		mycpu
211 #define os_curcpu_number()	mycpuid
212 #define os_curcpu_tss_sel()	GSEL(GPROC0_SEL, SEL_KPL)
213 #define os_curcpu_tss()		&mycpu->gd_prvspace->common_tss
214 #define os_curcpu_gdt()		mdcpu->gd_gdt
215 #define os_curcpu_idt()		r_idt_arr[mycpuid].rd_base
216 #endif
217 
218 /* Cpusets. */
219 #if defined(__NetBSD__)
220 #include <sys/kcpuset.h>
221 typedef kcpuset_t		os_cpuset_t;
222 #define os_cpuset_init(s)	kcpuset_create(s, true)
223 #define os_cpuset_destroy(s)	kcpuset_destroy(s)
224 #define os_cpuset_isset(s, c)	kcpuset_isset(s, c)
225 #define os_cpuset_clear(s, c)	kcpuset_clear(s, c)
226 #define os_cpuset_setrunning(s)	kcpuset_copy(s, kcpuset_running)
227 #elif defined(__DragonFly__)
228 #include <sys/cpumask.h>
229 #include <machine/smp.h> /* smp_active_mask */
230 typedef cpumask_t		os_cpuset_t;
231 #define os_cpuset_init(s)	\
232 	({ *(s) = kmalloc(sizeof(cpumask_t), M_NVMM, M_WAITOK | M_ZERO); })
233 #define os_cpuset_destroy(s)	kfree((s), M_NVMM)
234 #define os_cpuset_isset(s, c)	CPUMASK_TESTBIT(*(s), c)
235 #define os_cpuset_clear(s, c)	ATOMIC_CPUMASK_NANDBIT(*(s), c)
236 #define os_cpuset_setrunning(s)	ATOMIC_CPUMASK_ORMASK(*(s), smp_active_mask)
237 #endif
238 
239 /* Preemption. */
240 #if defined(__NetBSD__)
241 #define os_preempt_disable()	kpreempt_disable()
242 #define os_preempt_enable()	kpreempt_enable()
243 #define os_preempt_disabled()	kpreempt_disabled()
244 #elif defined(__DragonFly__)
245 /*
246  * In DragonFly, a thread in kernel mode will not be preemptively migrated
247  * to another CPU or preemptively switched to another normal kernel thread,
248  * but can be preemptively switched to an interrupt thread (which switches
249  * back to the kernel thread it preempted the instant it is done or blocks).
250  *
251  * However, we still need to use a critical section to prevent this nominal
252  * interrupt thread preemption to avoid exposing interrupt threads to
253  * guest DB and FP register state.  We operate under the assumption that
254  * the hard interrupt code won't mess with this state.
255  */
256 #define os_preempt_disable()	crit_enter()
257 #define os_preempt_enable()	crit_exit()
258 #define os_preempt_disabled()	(curthread->td_critcount != 0)
259 #endif
260 
261 /* Asserts. */
262 #if defined(__NetBSD__)
263 #define OS_ASSERT		KASSERT
264 #elif defined(__DragonFly__)
265 #define OS_ASSERT		KKASSERT
266 #endif
267 
268 /* Misc. */
269 #if defined(__DragonFly__)
270 #define ilog2(n)		((sizeof(n) > 4 ? ffsl(n) : ffs(n)) - 1)
271 #define uimin(a, b)		((u_int)a < (u_int)b ? (u_int)a : (u_int)b)
272 #endif
273 
274 /* -------------------------------------------------------------------------- */
275 
276 os_vmspace_t *	os_vmspace_create(vaddr_t, vaddr_t);
277 void		os_vmspace_destroy(os_vmspace_t *);
278 int		os_vmspace_fault(os_vmspace_t *, vaddr_t, vm_prot_t);
279 
280 os_vmobj_t *	os_vmobj_create(voff_t);
281 void		os_vmobj_ref(os_vmobj_t *);
282 void		os_vmobj_rel(os_vmobj_t *);
283 
284 int		os_vmobj_map(struct vm_map *, vaddr_t *, vsize_t, os_vmobj_t *,
285 		    voff_t, bool, bool, bool, int, int);
286 void		os_vmobj_unmap(struct vm_map *map, vaddr_t, vaddr_t, bool);
287 
288 void *		os_pagemem_zalloc(size_t);
289 void		os_pagemem_free(void *, size_t);
290 
291 paddr_t		os_pa_zalloc(void);
292 void		os_pa_free(paddr_t);
293 
294 int		os_contigpa_zalloc(paddr_t *, vaddr_t *, size_t);
295 void		os_contigpa_free(paddr_t, vaddr_t, size_t);
296 
297 static inline bool
298 os_return_needed(void)
299 {
300 #if defined(__NetBSD__)
301 	if (preempt_needed()) {
302 		return true;
303 	}
304 	if (curlwp->l_flag & LW_USERRET) {
305 		return true;
306 	}
307 	return false;
308 #elif defined(__DragonFly__)
309 	if (__predict_false(hvm_break_wanted())) {
310 		return true;
311 	}
312 	if (__predict_false(curthread->td_lwp->lwp_mpflags & LWP_MP_URETMASK)) {
313 		return true;
314 	}
315 	return false;
316 #endif
317 }
318 
319 /* -------------------------------------------------------------------------- */
320 
321 /* IPIs. */
322 
323 #if defined(__NetBSD__)
324 
325 #include <sys/xcall.h>
326 #define OS_IPI_FUNC(func)	void func(void *arg, void *unused)
327 
328 static inline void
329 os_ipi_unicast(os_cpu_t *cpu, void (*func)(void *, void *), void *arg)
330 {
331 	xc_wait(xc_unicast(XC_HIGHPRI, func, arg, NULL, cpu));
332 }
333 
334 static inline void
335 os_ipi_broadcast(void (*func)(void *, void *), void *arg)
336 {
337 	xc_wait(xc_broadcast(0, func, arg, NULL));
338 }
339 
340 static inline void
341 os_ipi_kickall(void)
342 {
343 	/*
344 	 * XXX: this is probably too expensive. NetBSD should have a dummy
345 	 * interrupt handler that just IRETs without doing anything.
346 	 */
347 	pmap_tlb_shootdown(pmap_kernel(), -1, PTE_G, TLBSHOOT_NVMM);
348 }
349 
350 #elif defined(__DragonFly__)
351 
352 #include <sys/thread2.h>
353 #define OS_IPI_FUNC(func)	void func(void *arg)
354 
355 static inline void
356 os_ipi_unicast(os_cpu_t *cpu, void (*func)(void *), void *arg)
357 {
358 	int seq;
359 
360 	seq = lwkt_send_ipiq(cpu, func, arg);
361 	lwkt_wait_ipiq(cpu, seq);
362 }
363 
364 static inline void
365 os_ipi_broadcast(void (*func)(void *), void *arg)
366 {
367 	cpumask_t mask;
368 	int i;
369 
370 	for (i = 0; i < ncpus; i++) {
371 		CPUMASK_ASSBIT(mask, i);
372 		lwkt_cpusync_simple(mask, func, arg);
373 	}
374 }
375 
376 /*
377  * On DragonFly, no need to bind the thread, because any normal kernel
378  * thread will not migrate to another CPU or be preempted (except by an
379  * interrupt thread).
380  */
381 #define curlwp_bind()		((int)0)
382 #define curlwp_bindx(bound)	/* nothing */
383 
384 #endif /* __NetBSD__ */
385 
386 #endif /* _NVMM_OS_H_ */
387