xref: /freebsd/sys/arm64/vmm/io/vtimer.c (revision 47e07394)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2017 The FreeBSD Foundation
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the company nor the name of the author may be used to
15  *    endorse or promote products derived from this software without specific
16  *    prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/rman.h>
39 #include <sys/time.h>
40 #include <sys/timeet.h>
41 #include <sys/timetc.h>
42 
43 #include <machine/bus.h>
44 #include <machine/machdep.h>
45 #include <machine/vmm.h>
46 #include <machine/armreg.h>
47 
48 #include <arm64/vmm/arm64.h>
49 
50 #include "vgic.h"
51 #include "vtimer.h"
52 
53 #define	RES1		0xffffffffffffffffUL
54 
55 #define timer_enabled(ctl)	\
56     (!((ctl) & CNTP_CTL_IMASK) && ((ctl) & CNTP_CTL_ENABLE))
57 
58 static uint64_t cnthctl_el2_reg;
59 static uint32_t tmr_frq;
60 
61 #define timer_condition_met(ctl)	((ctl) & CNTP_CTL_ISTATUS)
62 
63 static void vtimer_schedule_irq(struct hypctx *hypctx, bool phys);
64 
65 static int
vtimer_virtual_timer_intr(void * arg)66 vtimer_virtual_timer_intr(void *arg)
67 {
68 	struct hypctx *hypctx;
69 	uint64_t cntpct_el0;
70 	uint32_t cntv_ctl;
71 
72 	hypctx = arm64_get_active_vcpu();
73 	cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
74 
75 	if (!hypctx) {
76 		/* vm_destroy() was called. */
77 		eprintf("No active vcpu\n");
78 		cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
79 		goto out;
80 	}
81 	if (!timer_enabled(cntv_ctl)) {
82 		eprintf("Timer not enabled\n");
83 		goto out;
84 	}
85 	if (!timer_condition_met(cntv_ctl)) {
86 		eprintf("Timer condition not met\n");
87 		goto out;
88 	}
89 
90 	cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
91 	    hypctx->hyp->vtimer.cntvoff_el2;
92 	if (hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 < cntpct_el0)
93 		vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
94 		    GT_VIRT_IRQ, true);
95 
96 	cntv_ctl = hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0;
97 
98 out:
99 	/*
100 	 * Disable the timer interrupt. This will prevent the interrupt from
101 	 * being reasserted as soon as we exit the handler and getting stuck
102 	 * in an infinite loop.
103 	 *
104 	 * This is safe to do because the guest disabled the timer, and then
105 	 * enables it as part of the interrupt handling routine.
106 	 */
107 	cntv_ctl &= ~CNTP_CTL_ENABLE;
108 	WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl);
109 
110 	return (FILTER_HANDLED);
111 }
112 
113 int
vtimer_init(uint64_t cnthctl_el2)114 vtimer_init(uint64_t cnthctl_el2)
115 {
116 	cnthctl_el2_reg = cnthctl_el2;
117 	/*
118 	 * The guest *MUST* use the same timer frequency as the host. The
119 	 * register CNTFRQ_EL0 is accessible to the guest and a different value
120 	 * in the guest dts file might have unforseen consequences.
121 	 */
122 	tmr_frq = READ_SPECIALREG(cntfrq_el0);
123 
124 	return (0);
125 }
126 
127 void
vtimer_vminit(struct hyp * hyp)128 vtimer_vminit(struct hyp *hyp)
129 {
130 	uint64_t now;
131 
132 	/*
133 	 * Configure the Counter-timer Hypervisor Control Register for the VM.
134 	 *
135 	 * CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0 from EL1
136 	 * CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0
137 	 */
138 	hyp->vtimer.cnthctl_el2 = cnthctl_el2_reg & ~CNTHCTL_EL1PCEN;
139 	hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCTEN;
140 
141 	now = READ_SPECIALREG(cntpct_el0);
142 	hyp->vtimer.cntvoff_el2 = now;
143 
144 	return;
145 }
146 
147 void
vtimer_cpuinit(struct hypctx * hypctx)148 vtimer_cpuinit(struct hypctx *hypctx)
149 {
150 	struct vtimer_cpu *vtimer_cpu;
151 
152 	vtimer_cpu = &hypctx->vtimer_cpu;
153 	/*
154 	 * Configure physical timer interrupts for the VCPU.
155 	 *
156 	 * CNTP_CTL_IMASK: mask interrupts
157 	 * ~CNTP_CTL_ENABLE: disable the timer
158 	 */
159 	vtimer_cpu->phys_timer.cntx_ctl_el0 = CNTP_CTL_IMASK & ~CNTP_CTL_ENABLE;
160 
161 	mtx_init(&vtimer_cpu->phys_timer.mtx, "vtimer phys callout mutex", NULL,
162 	    MTX_DEF);
163 	callout_init_mtx(&vtimer_cpu->phys_timer.callout,
164 	    &vtimer_cpu->phys_timer.mtx, 0);
165 	vtimer_cpu->phys_timer.irqid = GT_PHYS_NS_IRQ;
166 
167 	mtx_init(&vtimer_cpu->virt_timer.mtx, "vtimer virt callout mutex", NULL,
168 	    MTX_DEF);
169 	callout_init_mtx(&vtimer_cpu->virt_timer.callout,
170 	    &vtimer_cpu->virt_timer.mtx, 0);
171 	vtimer_cpu->virt_timer.irqid = GT_VIRT_IRQ;
172 }
173 
174 void
vtimer_cpucleanup(struct hypctx * hypctx)175 vtimer_cpucleanup(struct hypctx *hypctx)
176 {
177 	struct vtimer_cpu *vtimer_cpu;
178 
179 	vtimer_cpu = &hypctx->vtimer_cpu;
180 	callout_drain(&vtimer_cpu->phys_timer.callout);
181 	callout_drain(&vtimer_cpu->virt_timer.callout);
182 	mtx_destroy(&vtimer_cpu->phys_timer.mtx);
183 	mtx_destroy(&vtimer_cpu->virt_timer.mtx);
184 }
185 
186 void
vtimer_vmcleanup(struct hyp * hyp)187 vtimer_vmcleanup(struct hyp *hyp)
188 {
189 	struct hypctx *hypctx;
190 	uint32_t cntv_ctl;
191 
192 	hypctx = arm64_get_active_vcpu();
193 	if (!hypctx) {
194 		/* The active VM was destroyed, stop the timer. */
195 		cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
196 		cntv_ctl &= ~CNTP_CTL_ENABLE;
197 		WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl);
198 	}
199 }
200 
201 void
vtimer_cleanup(void)202 vtimer_cleanup(void)
203 {
204 }
205 
206 void
vtimer_sync_hwstate(struct hypctx * hypctx)207 vtimer_sync_hwstate(struct hypctx *hypctx)
208 {
209 	struct vtimer_timer *timer;
210 	uint64_t cntpct_el0;
211 
212 	timer = &hypctx->vtimer_cpu.virt_timer;
213 	cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
214 	    hypctx->hyp->vtimer.cntvoff_el2;
215 	if (!timer_enabled(timer->cntx_ctl_el0)) {
216 		vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
217 		    timer->irqid, false);
218 	} else if (timer->cntx_cval_el0 < cntpct_el0) {
219 		vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
220 		    timer->irqid, true);
221 	} else {
222 		vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
223 		    timer->irqid, false);
224 		vtimer_schedule_irq(hypctx, false);
225 	}
226 }
227 
228 static void
vtimer_inject_irq_callout_phys(void * context)229 vtimer_inject_irq_callout_phys(void *context)
230 {
231 	struct hypctx *hypctx;
232 
233 	hypctx = context;
234 	vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
235 	    hypctx->vtimer_cpu.phys_timer.irqid, true);
236 }
237 
238 static void
vtimer_inject_irq_callout_virt(void * context)239 vtimer_inject_irq_callout_virt(void *context)
240 {
241 	struct hypctx *hypctx;
242 
243 	hypctx = context;
244 	vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
245 	    hypctx->vtimer_cpu.virt_timer.irqid, true);
246 }
247 
248 static void
vtimer_schedule_irq(struct hypctx * hypctx,bool phys)249 vtimer_schedule_irq(struct hypctx *hypctx, bool phys)
250 {
251 	sbintime_t time;
252 	struct vtimer_timer *timer;
253 	uint64_t cntpct_el0;
254 	uint64_t diff;
255 
256 	if (phys)
257 		timer = &hypctx->vtimer_cpu.phys_timer;
258 	else
259 		timer = &hypctx->vtimer_cpu.virt_timer;
260 	cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
261 	    hypctx->hyp->vtimer.cntvoff_el2;
262 	if (timer->cntx_cval_el0 < cntpct_el0) {
263 		/* Timer set in the past, trigger interrupt */
264 		vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
265 		    timer->irqid, true);
266 	} else {
267 		diff = timer->cntx_cval_el0 - cntpct_el0;
268 		time = diff * SBT_1S / tmr_frq;
269 		if (phys)
270 			callout_reset_sbt(&timer->callout, time, 0,
271 			    vtimer_inject_irq_callout_phys, hypctx, 0);
272 		else
273 			callout_reset_sbt(&timer->callout, time, 0,
274 			    vtimer_inject_irq_callout_virt, hypctx, 0);
275 	}
276 }
277 
278 static void
vtimer_remove_irq(struct hypctx * hypctx,struct vcpu * vcpu)279 vtimer_remove_irq(struct hypctx *hypctx, struct vcpu *vcpu)
280 {
281 	struct vtimer_cpu *vtimer_cpu;
282 	struct vtimer_timer *timer;
283 
284 	vtimer_cpu = &hypctx->vtimer_cpu;
285 	timer = &vtimer_cpu->phys_timer;
286 
287 	callout_drain(&timer->callout);
288 	/*
289 	 * The interrupt needs to be deactivated here regardless of the callout
290 	 * function having been executed. The timer interrupt can be masked with
291 	 * the CNTP_CTL_EL0.IMASK bit instead of reading the IAR register.
292 	 * Masking the interrupt doesn't remove it from the list registers.
293 	 */
294 	vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(vcpu), timer->irqid, false);
295 }
296 
297 /*
298  * Timer emulation functions.
299  *
300  * The guest should use the virtual timer, however some software, e.g. u-boot,
301  * used the physical timer. Emulate this in software for the guest to use.
302  *
303  * Adjust for cntvoff_el2 so the physical and virtual timers are at similar
304  * times. This simplifies interrupt handling in the virtual timer as the
305  * adjustment will have already happened.
306  */
307 
308 int
vtimer_phys_ctl_read(struct vcpu * vcpu,uint64_t * rval,void * arg)309 vtimer_phys_ctl_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
310 {
311 	struct hyp *hyp;
312 	struct hypctx *hypctx;
313 	struct vtimer_cpu *vtimer_cpu;
314 	uint64_t cntpct_el0;
315 
316 	hypctx = vcpu_get_cookie(vcpu);
317 	hyp = hypctx->hyp;
318 	vtimer_cpu = &hypctx->vtimer_cpu;
319 
320 	cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
321 	if (vtimer_cpu->phys_timer.cntx_cval_el0 < cntpct_el0)
322 		/* Timer condition met */
323 		*rval = vtimer_cpu->phys_timer.cntx_ctl_el0 | CNTP_CTL_ISTATUS;
324 	else
325 		*rval = vtimer_cpu->phys_timer.cntx_ctl_el0 & ~CNTP_CTL_ISTATUS;
326 
327 	return (0);
328 }
329 
330 int
vtimer_phys_ctl_write(struct vcpu * vcpu,uint64_t wval,void * arg)331 vtimer_phys_ctl_write(struct vcpu *vcpu, uint64_t wval, void *arg)
332 {
333 	struct hypctx *hypctx;
334 	struct vtimer_cpu *vtimer_cpu;
335 	uint64_t ctl_el0;
336 	bool timer_toggled_on;
337 
338 	hypctx = vcpu_get_cookie(vcpu);
339 	vtimer_cpu = &hypctx->vtimer_cpu;
340 
341 	timer_toggled_on = false;
342 	ctl_el0 = vtimer_cpu->phys_timer.cntx_ctl_el0;
343 
344 	if (!timer_enabled(ctl_el0) && timer_enabled(wval))
345 		timer_toggled_on = true;
346 	else if (timer_enabled(ctl_el0) && !timer_enabled(wval))
347 		vtimer_remove_irq(hypctx, vcpu);
348 
349 	vtimer_cpu->phys_timer.cntx_ctl_el0 = wval;
350 
351 	if (timer_toggled_on)
352 		vtimer_schedule_irq(hypctx, true);
353 
354 	return (0);
355 }
356 
357 int
vtimer_phys_cnt_read(struct vcpu * vcpu,uint64_t * rval,void * arg)358 vtimer_phys_cnt_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
359 {
360 	struct vm *vm;
361 	struct hyp *hyp;
362 
363 	vm = vcpu_vm(vcpu);
364 	hyp = vm_get_cookie(vm);
365 	*rval = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
366 	return (0);
367 }
368 
369 int
vtimer_phys_cnt_write(struct vcpu * vcpu,uint64_t wval,void * arg)370 vtimer_phys_cnt_write(struct vcpu *vcpu, uint64_t wval, void *arg)
371 {
372 	return (0);
373 }
374 
375 int
vtimer_phys_cval_read(struct vcpu * vcpu,uint64_t * rval,void * arg)376 vtimer_phys_cval_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
377 {
378 	struct hypctx *hypctx;
379 	struct vtimer_cpu *vtimer_cpu;
380 
381 	hypctx = vcpu_get_cookie(vcpu);
382 	vtimer_cpu = &hypctx->vtimer_cpu;
383 
384 	*rval = vtimer_cpu->phys_timer.cntx_cval_el0;
385 
386 	return (0);
387 }
388 
389 int
vtimer_phys_cval_write(struct vcpu * vcpu,uint64_t wval,void * arg)390 vtimer_phys_cval_write(struct vcpu *vcpu, uint64_t wval, void *arg)
391 {
392 	struct hypctx *hypctx;
393 	struct vtimer_cpu *vtimer_cpu;
394 
395 	hypctx = vcpu_get_cookie(vcpu);
396 	vtimer_cpu = &hypctx->vtimer_cpu;
397 
398 	vtimer_cpu->phys_timer.cntx_cval_el0 = wval;
399 
400 	vtimer_remove_irq(hypctx, vcpu);
401 	if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) {
402 		vtimer_schedule_irq(hypctx, true);
403 	}
404 
405 	return (0);
406 }
407 
408 int
vtimer_phys_tval_read(struct vcpu * vcpu,uint64_t * rval,void * arg)409 vtimer_phys_tval_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
410 {
411 	struct hyp *hyp;
412 	struct hypctx *hypctx;
413 	struct vtimer_cpu *vtimer_cpu;
414 	uint32_t cntpct_el0;
415 
416 	hypctx = vcpu_get_cookie(vcpu);
417 	hyp = hypctx->hyp;
418 	vtimer_cpu = &hypctx->vtimer_cpu;
419 
420 	if (!(vtimer_cpu->phys_timer.cntx_ctl_el0 & CNTP_CTL_ENABLE)) {
421 		/*
422 		 * ARMv8 Architecture Manual, p. D7-2702: the result of reading
423 		 * TVAL when the timer is disabled is UNKNOWN. I have chosen to
424 		 * return the maximum value possible on 32 bits which means the
425 		 * timer will fire very far into the future.
426 		 */
427 		*rval = (uint32_t)RES1;
428 	} else {
429 		cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
430 		    hyp->vtimer.cntvoff_el2;
431 		*rval = vtimer_cpu->phys_timer.cntx_cval_el0 - cntpct_el0;
432 	}
433 
434 	return (0);
435 }
436 
437 int
vtimer_phys_tval_write(struct vcpu * vcpu,uint64_t wval,void * arg)438 vtimer_phys_tval_write(struct vcpu *vcpu, uint64_t wval, void *arg)
439 {
440 	struct hyp *hyp;
441 	struct hypctx *hypctx;
442 	struct vtimer_cpu *vtimer_cpu;
443 	uint64_t cntpct_el0;
444 
445 	hypctx = vcpu_get_cookie(vcpu);
446 	hyp = hypctx->hyp;
447 	vtimer_cpu = &hypctx->vtimer_cpu;
448 
449 	cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
450 	vtimer_cpu->phys_timer.cntx_cval_el0 = (int32_t)wval + cntpct_el0;
451 
452 	vtimer_remove_irq(hypctx, vcpu);
453 	if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) {
454 		vtimer_schedule_irq(hypctx, true);
455 	}
456 
457 	return (0);
458 }
459 
460 struct vtimer_softc {
461 	struct resource *res;
462 	void *ihl;
463 	int rid;
464 };
465 
466 static int
vtimer_probe(device_t dev)467 vtimer_probe(device_t dev)
468 {
469 	device_set_desc(dev, "Virtual timer");
470 	return (BUS_PROBE_DEFAULT);
471 }
472 
473 static int
vtimer_attach(device_t dev)474 vtimer_attach(device_t dev)
475 {
476 	struct vtimer_softc *sc;
477 
478 	sc = device_get_softc(dev);
479 
480 	sc->rid = 0;
481 	sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->rid, RF_ACTIVE);
482 	if (sc->res == NULL)
483 		return (ENXIO);
484 
485 	bus_setup_intr(dev, sc->res, INTR_TYPE_CLK, vtimer_virtual_timer_intr,
486 	    NULL, NULL, &sc->ihl);
487 
488 	return (0);
489 }
490 
491 static device_method_t vtimer_methods[] = {
492 	/* Device interface */
493 	DEVMETHOD(device_probe,		vtimer_probe),
494 	DEVMETHOD(device_attach,	vtimer_attach),
495 
496 	/* End */
497 	DEVMETHOD_END
498 };
499 
500 DEFINE_CLASS_0(vtimer, vtimer_driver, vtimer_methods,
501     sizeof(struct vtimer_softc));
502 
503 DRIVER_MODULE(vtimer, generic_timer, vtimer_driver, 0, 0);
504