xref: /dragonfly/sys/platform/pc64/isa/clock.c (revision cac12823)
1c8fe38aeSMatthew Dillon /*-
2c8fe38aeSMatthew Dillon  * Copyright (c) 1990 The Regents of the University of California.
3*cac12823SMatthew Dillon  * Copyright (c) 2008-2021 The DragonFly Project.  All rights reserved.
4c8fe38aeSMatthew Dillon  *
5c8fe38aeSMatthew Dillon  * This code is derived from software contributed to Berkeley by
6c8fe38aeSMatthew Dillon  * William Jolitz and Don Ahn.
7c8fe38aeSMatthew Dillon  *
8*cac12823SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
9*cac12823SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
10*cac12823SMatthew Dillon  *
11c8fe38aeSMatthew Dillon  * Redistribution and use in source and binary forms, with or without
12c8fe38aeSMatthew Dillon  * modification, are permitted provided that the following conditions
13c8fe38aeSMatthew Dillon  * are met:
14c8fe38aeSMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
15c8fe38aeSMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
16c8fe38aeSMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
17c8fe38aeSMatthew Dillon  *    notice, this list of conditions and the following disclaimer in the
18c8fe38aeSMatthew Dillon  *    documentation and/or other materials provided with the distribution.
192c64e990Szrj  * 3. Neither the name of the University nor the names of its contributors
20c8fe38aeSMatthew Dillon  *    may be used to endorse or promote products derived from this software
21c8fe38aeSMatthew Dillon  *    without specific prior written permission.
22c8fe38aeSMatthew Dillon  *
23c8fe38aeSMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24c8fe38aeSMatthew Dillon  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25c8fe38aeSMatthew Dillon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26c8fe38aeSMatthew Dillon  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27c8fe38aeSMatthew Dillon  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28c8fe38aeSMatthew Dillon  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29c8fe38aeSMatthew Dillon  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30c8fe38aeSMatthew Dillon  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31c8fe38aeSMatthew Dillon  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32c8fe38aeSMatthew Dillon  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33c8fe38aeSMatthew Dillon  * SUCH DAMAGE.
34c8fe38aeSMatthew Dillon  *
35c8fe38aeSMatthew Dillon  *	from: @(#)clock.c	7.2 (Berkeley) 5/12/91
36c8fe38aeSMatthew Dillon  * $FreeBSD: src/sys/i386/isa/clock.c,v 1.149.2.6 2002/11/02 04:41:50 iwasaki Exp $
37c8fe38aeSMatthew Dillon  */
38c8fe38aeSMatthew Dillon 
39c8fe38aeSMatthew Dillon /*
40c8fe38aeSMatthew Dillon  * Routines to handle clock hardware.
41c8fe38aeSMatthew Dillon  */
42c8fe38aeSMatthew Dillon 
43c8fe38aeSMatthew Dillon /*
44c8fe38aeSMatthew Dillon  * inittodr, settodr and support routines written
45c8fe38aeSMatthew Dillon  * by Christoph Robitschko <chmr@edvz.tu-graz.ac.at>
46c8fe38aeSMatthew Dillon  *
47c8fe38aeSMatthew Dillon  * reintroduced and updated by Chris Stenton <chris@gnome.co.uk> 8/10/94
48c8fe38aeSMatthew Dillon  */
49c8fe38aeSMatthew Dillon 
5040672791SSepherosa Ziehau #if 0
5140672791SSepherosa Ziehau #include "opt_clock.h"
5240672791SSepherosa Ziehau #endif
53c8fe38aeSMatthew Dillon 
54c8fe38aeSMatthew Dillon #include <sys/param.h>
55c8fe38aeSMatthew Dillon #include <sys/systm.h>
56c8fe38aeSMatthew Dillon #include <sys/eventhandler.h>
57c8fe38aeSMatthew Dillon #include <sys/time.h>
58c8fe38aeSMatthew Dillon #include <sys/kernel.h>
59c8fe38aeSMatthew Dillon #include <sys/bus.h>
60c8fe38aeSMatthew Dillon #include <sys/sysctl.h>
61c8fe38aeSMatthew Dillon #include <sys/cons.h>
62ce7866b8SMatthew Dillon #include <sys/kbio.h>
63c8fe38aeSMatthew Dillon #include <sys/systimer.h>
64c8fe38aeSMatthew Dillon #include <sys/globaldata.h>
65c8fe38aeSMatthew Dillon #include <sys/machintr.h>
661b505979SSepherosa Ziehau #include <sys/interrupt.h>
67c8fe38aeSMatthew Dillon 
68ce7866b8SMatthew Dillon #include <sys/thread2.h>
69ce7866b8SMatthew Dillon 
70c8fe38aeSMatthew Dillon #include <machine/clock.h>
71c8fe38aeSMatthew Dillon #include <machine/cputypes.h>
72c8fe38aeSMatthew Dillon #include <machine/frame.h>
73c8fe38aeSMatthew Dillon #include <machine/ipl.h>
74c8fe38aeSMatthew Dillon #include <machine/limits.h>
75c8fe38aeSMatthew Dillon #include <machine/md_var.h>
76c8fe38aeSMatthew Dillon #include <machine/psl.h>
77c8fe38aeSMatthew Dillon #include <machine/segments.h>
78c8fe38aeSMatthew Dillon #include <machine/smp.h>
79c8fe38aeSMatthew Dillon #include <machine/specialreg.h>
8057a9c56bSSepherosa Ziehau #include <machine/intr_machdep.h>
81c8fe38aeSMatthew Dillon 
82ed4d621dSSepherosa Ziehau #include <machine_base/apic/ioapic.h>
836b809ec7SSepherosa Ziehau #include <machine_base/apic/ioapic_abi.h>
84c8fe38aeSMatthew Dillon #include <machine_base/icu/icu.h>
850855a2afSJordan Gordeev #include <bus/isa/isa.h>
86c8fe38aeSMatthew Dillon #include <bus/isa/rtc.h>
87c8fe38aeSMatthew Dillon #include <machine_base/isa/timerreg.h>
88c8fe38aeSMatthew Dillon 
891a3a6ceeSImre Vadász SET_DECLARE(timecounter_init_set, const timecounter_init_t);
901a3a6ceeSImre Vadász TIMECOUNTER_INIT(placeholder, NULL);
911a3a6ceeSImre Vadász 
92c8fe38aeSMatthew Dillon static void i8254_restore(void);
93c8fe38aeSMatthew Dillon static void resettodr_on_shutdown(void *arg __unused);
94c8fe38aeSMatthew Dillon 
95c8fe38aeSMatthew Dillon /*
96c8fe38aeSMatthew Dillon  * 32-bit time_t's can't reach leap years before 1904 or after 2036, so we
97c8fe38aeSMatthew Dillon  * can use a simple formula for leap years.
98c8fe38aeSMatthew Dillon  */
99c8fe38aeSMatthew Dillon #define	LEAPYEAR(y) ((u_int)(y) % 4 == 0)
100c8fe38aeSMatthew Dillon #define DAYSPERYEAR   (31+28+31+30+31+30+31+31+30+31+30+31)
101c8fe38aeSMatthew Dillon 
102c8fe38aeSMatthew Dillon #ifndef TIMER_FREQ
103c8fe38aeSMatthew Dillon #define TIMER_FREQ   1193182
104c8fe38aeSMatthew Dillon #endif
105c8fe38aeSMatthew Dillon 
106c8fe38aeSMatthew Dillon static uint8_t i8254_walltimer_sel;
107c8fe38aeSMatthew Dillon static uint16_t i8254_walltimer_cntr;
108db2ec6f8SSascha Wildner static int timer0_running;
109c8fe38aeSMatthew Dillon 
110c8fe38aeSMatthew Dillon int	adjkerntz;		/* local offset from GMT in seconds */
111c8fe38aeSMatthew Dillon int	disable_rtc_set;	/* disable resettodr() if != 0 */
112c8fe38aeSMatthew Dillon int	tsc_present;
1135a81b19fSSepherosa Ziehau int	tsc_invariant;
114dda44f1eSSepherosa Ziehau int	tsc_mpsync;
115c8fe38aeSMatthew Dillon int	wall_cmos_clock;	/* wall CMOS clock assumed if != 0 */
1165b49787bSMatthew Dillon tsc_uclock_t tsc_frequency;
1175b49787bSMatthew Dillon tsc_uclock_t tsc_oneus_approx;	/* always at least 1, approx only */
1185b49787bSMatthew Dillon 
119c8fe38aeSMatthew Dillon enum tstate { RELEASED, ACQUIRED };
120db2ec6f8SSascha Wildner static enum tstate timer0_state;
121db2ec6f8SSascha Wildner static enum tstate timer1_state;
122db2ec6f8SSascha Wildner static enum tstate timer2_state;
123c8fe38aeSMatthew Dillon 
1241a3a6ceeSImre Vadász int	i8254_cputimer_disable;	/* No need to initialize i8254 cputimer. */
1251a3a6ceeSImre Vadász 
126c8fe38aeSMatthew Dillon static	int	beeping = 0;
127c8fe38aeSMatthew Dillon static	const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
128c8fe38aeSMatthew Dillon static	u_char	rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
129c8fe38aeSMatthew Dillon static	u_char	rtc_statusb = RTCSB_24HR | RTCSB_PINTR;
130c8fe38aeSMatthew Dillon static  int	rtc_loaded;
131c8fe38aeSMatthew Dillon 
1328fbc264dSMatthew Dillon static	sysclock_t i8254_cputimer_div;
133c8fe38aeSMatthew Dillon 
13440672791SSepherosa Ziehau static int i8254_nointr;
1354d517764SSepherosa Ziehau static int i8254_intr_disable = 1;
13640672791SSepherosa Ziehau TUNABLE_INT("hw.i8254.intr_disable", &i8254_intr_disable);
13740672791SSepherosa Ziehau 
138242fd95fSImre Vadász static int calibrate_timers_with_rtc = 0;
139242fd95fSImre Vadász TUNABLE_INT("hw.calibrate_timers_with_rtc", &calibrate_timers_with_rtc);
140242fd95fSImre Vadász 
1414098a6e5SImre Vadász static int calibrate_tsc_fast = 1;
1424098a6e5SImre Vadász TUNABLE_INT("hw.calibrate_tsc_fast", &calibrate_tsc_fast);
1434098a6e5SImre Vadász 
1444098a6e5SImre Vadász static int calibrate_test;
1454098a6e5SImre Vadász TUNABLE_INT("hw.tsc_calibrate_test", &calibrate_test);
1464098a6e5SImre Vadász 
147c8fe38aeSMatthew Dillon static struct callout sysbeepstop_ch;
148c8fe38aeSMatthew Dillon 
149c8fe38aeSMatthew Dillon static sysclock_t i8254_cputimer_count(void);
150c8fe38aeSMatthew Dillon static void i8254_cputimer_construct(struct cputimer *cputimer, sysclock_t last);
151c8fe38aeSMatthew Dillon static void i8254_cputimer_destruct(struct cputimer *cputimer);
152c8fe38aeSMatthew Dillon 
153c8fe38aeSMatthew Dillon static struct cputimer	i8254_cputimer = {
1540087561dSSepherosa Ziehau     .next		= SLIST_ENTRY_INITIALIZER,
1550087561dSSepherosa Ziehau     .name		= "i8254",
1560087561dSSepherosa Ziehau     .pri		= CPUTIMER_PRI_8254,
1570087561dSSepherosa Ziehau     .type		= 0,	/* determined later */
1580087561dSSepherosa Ziehau     .count		= i8254_cputimer_count,
1590087561dSSepherosa Ziehau     .fromhz		= cputimer_default_fromhz,
1600087561dSSepherosa Ziehau     .fromus		= cputimer_default_fromus,
1610087561dSSepherosa Ziehau     .construct		= i8254_cputimer_construct,
1620087561dSSepherosa Ziehau     .destruct		= i8254_cputimer_destruct,
1630087561dSSepherosa Ziehau     .freq		= TIMER_FREQ
164c8fe38aeSMatthew Dillon };
165c8fe38aeSMatthew Dillon 
16640672791SSepherosa Ziehau static void i8254_intr_reload(struct cputimer_intr *, sysclock_t);
16740672791SSepherosa Ziehau static void i8254_intr_config(struct cputimer_intr *, const struct cputimer *);
16840672791SSepherosa Ziehau static void i8254_intr_initclock(struct cputimer_intr *, boolean_t);
16940672791SSepherosa Ziehau 
17040672791SSepherosa Ziehau static struct cputimer_intr i8254_cputimer_intr = {
17140672791SSepherosa Ziehau     .freq = TIMER_FREQ,
17240672791SSepherosa Ziehau     .reload = i8254_intr_reload,
17340672791SSepherosa Ziehau     .enable = cputimer_intr_default_enable,
17440672791SSepherosa Ziehau     .config = i8254_intr_config,
17540672791SSepherosa Ziehau     .restart = cputimer_intr_default_restart,
17640672791SSepherosa Ziehau     .pmfixup = cputimer_intr_default_pmfixup,
17740672791SSepherosa Ziehau     .initclock = i8254_intr_initclock,
17842098fc3SSepherosa Ziehau     .pcpuhand = NULL,
17940672791SSepherosa Ziehau     .next = SLIST_ENTRY_INITIALIZER,
18040672791SSepherosa Ziehau     .name = "i8254",
18140672791SSepherosa Ziehau     .type = CPUTIMER_INTR_8254,
18240672791SSepherosa Ziehau     .prio = CPUTIMER_INTR_PRIO_8254,
18342098fc3SSepherosa Ziehau     .caps = CPUTIMER_INTR_CAP_PS,
18442098fc3SSepherosa Ziehau     .priv = NULL
18540672791SSepherosa Ziehau };
18640672791SSepherosa Ziehau 
187c8fe38aeSMatthew Dillon /*
1881eb5a42bSMatthew Dillon  * Use this to lwkt_switch() when the scheduler clock is not
1891eb5a42bSMatthew Dillon  * yet running, otherwise lwkt_switch() won't do anything.
1901eb5a42bSMatthew Dillon  * XXX needs cleaning up in lwkt_thread.c
1911eb5a42bSMatthew Dillon  */
1921eb5a42bSMatthew Dillon static void
lwkt_force_switch(void)1931eb5a42bSMatthew Dillon lwkt_force_switch(void)
1941eb5a42bSMatthew Dillon {
1951eb5a42bSMatthew Dillon 	crit_enter();
1961eb5a42bSMatthew Dillon 	lwkt_schedulerclock(curthread);
1971eb5a42bSMatthew Dillon 	crit_exit();
1981eb5a42bSMatthew Dillon 	lwkt_switch();
1991eb5a42bSMatthew Dillon }
2001eb5a42bSMatthew Dillon 
2011eb5a42bSMatthew Dillon /*
202c8fe38aeSMatthew Dillon  * timer0 clock interrupt.  Timer0 is in one-shot mode and has stopped
203c8fe38aeSMatthew Dillon  * counting as of this interrupt.  We use timer1 in free-running mode (not
204c8fe38aeSMatthew Dillon  * generating any interrupts) as our main counter.  Each cpu has timeouts
205c8fe38aeSMatthew Dillon  * pending.
206c8fe38aeSMatthew Dillon  *
207c8fe38aeSMatthew Dillon  * This code is INTR_MPSAFE and may be called without the BGL held.
208c8fe38aeSMatthew Dillon  */
209c8fe38aeSMatthew Dillon static void
clkintr(void * dummy,void * frame_arg)210c8fe38aeSMatthew Dillon clkintr(void *dummy, void *frame_arg)
211c8fe38aeSMatthew Dillon {
212c8fe38aeSMatthew Dillon 	static sysclock_t sysclock_count;	/* NOTE! Must be static */
213c8fe38aeSMatthew Dillon 	struct globaldata *gd = mycpu;
214c8fe38aeSMatthew Dillon 	struct globaldata *gscan;
215c8fe38aeSMatthew Dillon 	int n;
216c8fe38aeSMatthew Dillon 
217c8fe38aeSMatthew Dillon 	/*
218c8fe38aeSMatthew Dillon 	 * SWSTROBE mode is a one-shot, the timer is no longer running
219c8fe38aeSMatthew Dillon 	 */
220c8fe38aeSMatthew Dillon 	timer0_running = 0;
221c8fe38aeSMatthew Dillon 
222c8fe38aeSMatthew Dillon 	/*
223c8fe38aeSMatthew Dillon 	 * XXX the dispatcher needs work.  right now we call systimer_intr()
224c8fe38aeSMatthew Dillon 	 * directly or via IPI for any cpu with systimers queued, which is
225c8fe38aeSMatthew Dillon 	 * usually *ALL* of them.  We need to use the LAPIC timer for this.
226c8fe38aeSMatthew Dillon 	 */
227c8fe38aeSMatthew Dillon 	sysclock_count = sys_cputimer->count();
228c8fe38aeSMatthew Dillon 	for (n = 0; n < ncpus; ++n) {
229c8fe38aeSMatthew Dillon 	    gscan = globaldata_find(n);
230c8fe38aeSMatthew Dillon 	    if (TAILQ_FIRST(&gscan->gd_systimerq) == NULL)
231c8fe38aeSMatthew Dillon 		continue;
232c8fe38aeSMatthew Dillon 	    if (gscan != gd) {
233c8fe38aeSMatthew Dillon 		lwkt_send_ipiq3(gscan, (ipifunc3_t)systimer_intr,
23496d52ac8SSepherosa Ziehau 				&sysclock_count, 1);
235c8fe38aeSMatthew Dillon 	    } else {
236c8fe38aeSMatthew Dillon 		systimer_intr(&sysclock_count, 0, frame_arg);
237c8fe38aeSMatthew Dillon 	    }
238c8fe38aeSMatthew Dillon 	}
239c8fe38aeSMatthew Dillon }
240c8fe38aeSMatthew Dillon 
241c8fe38aeSMatthew Dillon 
242c8fe38aeSMatthew Dillon /*
243c8fe38aeSMatthew Dillon  * NOTE! not MP safe.
244c8fe38aeSMatthew Dillon  */
245c8fe38aeSMatthew Dillon int
acquire_timer2(int mode)246c8fe38aeSMatthew Dillon acquire_timer2(int mode)
247c8fe38aeSMatthew Dillon {
248c8fe38aeSMatthew Dillon 	if (timer2_state != RELEASED)
249c8fe38aeSMatthew Dillon 		return (-1);
250c8fe38aeSMatthew Dillon 	timer2_state = ACQUIRED;
251c8fe38aeSMatthew Dillon 
252c8fe38aeSMatthew Dillon 	/*
253c8fe38aeSMatthew Dillon 	 * This access to the timer registers is as atomic as possible
254c8fe38aeSMatthew Dillon 	 * because it is a single instruction.  We could do better if we
255c8fe38aeSMatthew Dillon 	 * knew the rate.
256c8fe38aeSMatthew Dillon 	 */
257c8fe38aeSMatthew Dillon 	outb(TIMER_MODE, TIMER_SEL2 | (mode & 0x3f));
258c8fe38aeSMatthew Dillon 	return (0);
259c8fe38aeSMatthew Dillon }
260c8fe38aeSMatthew Dillon 
261c8fe38aeSMatthew Dillon int
release_timer2(void)262c8fe38aeSMatthew Dillon release_timer2(void)
263c8fe38aeSMatthew Dillon {
264c8fe38aeSMatthew Dillon 	if (timer2_state != ACQUIRED)
265c8fe38aeSMatthew Dillon 		return (-1);
266c8fe38aeSMatthew Dillon 	outb(TIMER_MODE, TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT);
267c8fe38aeSMatthew Dillon 	timer2_state = RELEASED;
268c8fe38aeSMatthew Dillon 	return (0);
269c8fe38aeSMatthew Dillon }
270c8fe38aeSMatthew Dillon 
271c8fe38aeSMatthew Dillon #include "opt_ddb.h"
272c8fe38aeSMatthew Dillon #ifdef DDB
273c8fe38aeSMatthew Dillon #include <ddb/ddb.h>
274c8fe38aeSMatthew Dillon 
DB_SHOW_COMMAND(rtc,rtc)275c8fe38aeSMatthew Dillon DB_SHOW_COMMAND(rtc, rtc)
276c8fe38aeSMatthew Dillon {
277c8fe38aeSMatthew Dillon 	kprintf("%02x/%02x/%02x %02x:%02x:%02x, A = %02x, B = %02x, C = %02x\n",
278c8fe38aeSMatthew Dillon 	       rtcin(RTC_YEAR), rtcin(RTC_MONTH), rtcin(RTC_DAY),
279c8fe38aeSMatthew Dillon 	       rtcin(RTC_HRS), rtcin(RTC_MIN), rtcin(RTC_SEC),
280c8fe38aeSMatthew Dillon 	       rtcin(RTC_STATUSA), rtcin(RTC_STATUSB), rtcin(RTC_INTR));
281c8fe38aeSMatthew Dillon }
282c8fe38aeSMatthew Dillon #endif /* DDB */
283c8fe38aeSMatthew Dillon 
284c8fe38aeSMatthew Dillon /*
285c8fe38aeSMatthew Dillon  * Return the current cpu timer count as a 32 bit integer.
286c8fe38aeSMatthew Dillon  */
287c8fe38aeSMatthew Dillon static
288c8fe38aeSMatthew Dillon sysclock_t
i8254_cputimer_count(void)289c8fe38aeSMatthew Dillon i8254_cputimer_count(void)
290c8fe38aeSMatthew Dillon {
291e28c8ef4SSascha Wildner 	static uint16_t cputimer_last;
292e28c8ef4SSascha Wildner 	uint16_t count;
293c8fe38aeSMatthew Dillon 	sysclock_t ret;
294c8fe38aeSMatthew Dillon 
295c8fe38aeSMatthew Dillon 	clock_lock();
296c8fe38aeSMatthew Dillon 	outb(TIMER_MODE, i8254_walltimer_sel | TIMER_LATCH);
297e28c8ef4SSascha Wildner 	count = (uint8_t)inb(i8254_walltimer_cntr);	/* get countdown */
298e28c8ef4SSascha Wildner 	count |= ((uint8_t)inb(i8254_walltimer_cntr) << 8);
299c8fe38aeSMatthew Dillon 	count = -count;					/* -> countup */
300c8fe38aeSMatthew Dillon 	if (count < cputimer_last)			/* rollover */
3018fbc264dSMatthew Dillon 		i8254_cputimer.base += 0x00010000U;
302c8fe38aeSMatthew Dillon 	ret = i8254_cputimer.base | count;
303c8fe38aeSMatthew Dillon 	cputimer_last = count;
304c8fe38aeSMatthew Dillon 	clock_unlock();
3058fbc264dSMatthew Dillon 
306c8fe38aeSMatthew Dillon 	return(ret);
307c8fe38aeSMatthew Dillon }
308c8fe38aeSMatthew Dillon 
309c8fe38aeSMatthew Dillon /*
310c8fe38aeSMatthew Dillon  * This function is called whenever the system timebase changes, allowing
311c8fe38aeSMatthew Dillon  * us to calculate what is needed to convert a system timebase tick
312c8fe38aeSMatthew Dillon  * into an 8254 tick for the interrupt timer.  If we can convert to a
313c8fe38aeSMatthew Dillon  * simple shift, multiplication, or division, we do so.  Otherwise 64
314c8fe38aeSMatthew Dillon  * bit arithmatic is required every time the interrupt timer is reloaded.
315c8fe38aeSMatthew Dillon  */
31640672791SSepherosa Ziehau static void
i8254_intr_config(struct cputimer_intr * cti,const struct cputimer * timer)31740672791SSepherosa Ziehau i8254_intr_config(struct cputimer_intr *cti, const struct cputimer *timer)
318c8fe38aeSMatthew Dillon {
3198fbc264dSMatthew Dillon     sysclock_t freq;
3208fbc264dSMatthew Dillon     sysclock_t div;
321c8fe38aeSMatthew Dillon 
322c8fe38aeSMatthew Dillon     /*
323c8fe38aeSMatthew Dillon      * Will a simple divide do the trick?
324c8fe38aeSMatthew Dillon      */
32540672791SSepherosa Ziehau     div = (timer->freq + (cti->freq / 2)) / cti->freq;
32640672791SSepherosa Ziehau     freq = cti->freq * div;
327c8fe38aeSMatthew Dillon 
328c8fe38aeSMatthew Dillon     if (freq >= timer->freq - 1 && freq <= timer->freq + 1)
329c8fe38aeSMatthew Dillon 	i8254_cputimer_div = div;
330c8fe38aeSMatthew Dillon     else
331c8fe38aeSMatthew Dillon 	i8254_cputimer_div = 0;
332c8fe38aeSMatthew Dillon }
333c8fe38aeSMatthew Dillon 
334c8fe38aeSMatthew Dillon /*
335c8fe38aeSMatthew Dillon  * Reload for the next timeout.  It is possible for the reload value
336c8fe38aeSMatthew Dillon  * to be 0 or negative, indicating that an immediate timer interrupt
337c8fe38aeSMatthew Dillon  * is desired.  For now make the minimum 2 ticks.
338c8fe38aeSMatthew Dillon  *
339c8fe38aeSMatthew Dillon  * We may have to convert from the system timebase to the 8254 timebase.
340c8fe38aeSMatthew Dillon  */
341c5b8324cSSepherosa Ziehau static void
i8254_intr_reload(struct cputimer_intr * cti,sysclock_t reload)34240672791SSepherosa Ziehau i8254_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
343c8fe38aeSMatthew Dillon {
344e28c8ef4SSascha Wildner     uint16_t count;
345c8fe38aeSMatthew Dillon 
346feadd4aeSMatthew Dillon     if ((ssysclock_t)reload < 0)
347feadd4aeSMatthew Dillon 	    reload = 1;
348c8fe38aeSMatthew Dillon     if (i8254_cputimer_div)
349c8fe38aeSMatthew Dillon 	reload /= i8254_cputimer_div;
350c8fe38aeSMatthew Dillon     else
3518fbc264dSMatthew Dillon 	reload = muldivu64(reload, cti->freq, sys_cputimer->freq);
352c8fe38aeSMatthew Dillon 
3538fbc264dSMatthew Dillon     if (reload < 2)
3548fbc264dSMatthew Dillon 	reload = 2;		/* minimum count */
3558fbc264dSMatthew Dillon     if (reload > 0xFFFF)
3568fbc264dSMatthew Dillon 	reload = 0xFFFF;	/* almost full count (0 is full count) */
357c8fe38aeSMatthew Dillon 
358c8fe38aeSMatthew Dillon     clock_lock();
359c8fe38aeSMatthew Dillon     if (timer0_running) {
360c8fe38aeSMatthew Dillon 	outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);	/* count-down timer */
361e28c8ef4SSascha Wildner 	count = (uint8_t)inb(TIMER_CNTR0);		/* lsb */
362e28c8ef4SSascha Wildner 	count |= ((uint8_t)inb(TIMER_CNTR0) << 8);	/* msb */
363c8fe38aeSMatthew Dillon 	if (reload < count) {
364c8fe38aeSMatthew Dillon 	    outb(TIMER_MODE, TIMER_SEL0 | TIMER_SWSTROBE | TIMER_16BIT);
365e28c8ef4SSascha Wildner 	    outb(TIMER_CNTR0, (uint8_t)reload); 	/* lsb */
366e28c8ef4SSascha Wildner 	    outb(TIMER_CNTR0, (uint8_t)(reload >> 8));	/* msb */
367c8fe38aeSMatthew Dillon 	}
368c8fe38aeSMatthew Dillon     } else {
369c8fe38aeSMatthew Dillon 	timer0_running = 1;
370c8fe38aeSMatthew Dillon 	outb(TIMER_MODE, TIMER_SEL0 | TIMER_SWSTROBE | TIMER_16BIT);
371e28c8ef4SSascha Wildner 	outb(TIMER_CNTR0, (uint8_t)reload); 		/* lsb */
372e28c8ef4SSascha Wildner 	outb(TIMER_CNTR0, (uint8_t)(reload >> 8));	/* msb */
373c8fe38aeSMatthew Dillon     }
374c8fe38aeSMatthew Dillon     clock_unlock();
375c8fe38aeSMatthew Dillon }
376c8fe38aeSMatthew Dillon 
377c8fe38aeSMatthew Dillon /*
378c8fe38aeSMatthew Dillon  * DELAY(usec)	     - Spin for the specified number of microseconds.
379c8fe38aeSMatthew Dillon  * DRIVERSLEEP(usec) - Spin for the specified number of microseconds,
380c8fe38aeSMatthew Dillon  *		       but do a thread switch in the loop
381c8fe38aeSMatthew Dillon  *
382c8fe38aeSMatthew Dillon  * Relies on timer 1 counting down from (cputimer_freq / hz)
383c8fe38aeSMatthew Dillon  * Note: timer had better have been programmed before this is first used!
384c8fe38aeSMatthew Dillon  */
385c8fe38aeSMatthew Dillon static void
DODELAY(int n,int doswitch)386c8fe38aeSMatthew Dillon DODELAY(int n, int doswitch)
387c8fe38aeSMatthew Dillon {
3888a224941SSepherosa Ziehau 	ssysclock_t delta, ticks_left;
3898a224941SSepherosa Ziehau 	sysclock_t prev_tick, tick;
390c8fe38aeSMatthew Dillon 
391c8fe38aeSMatthew Dillon #ifdef DELAYDEBUG
392c8fe38aeSMatthew Dillon 	int getit_calls = 1;
393c8fe38aeSMatthew Dillon 	int n1;
394c8fe38aeSMatthew Dillon 	static int state = 0;
395c8fe38aeSMatthew Dillon 
396c8fe38aeSMatthew Dillon 	if (state == 0) {
397c8fe38aeSMatthew Dillon 		state = 1;
398c8fe38aeSMatthew Dillon 		for (n1 = 1; n1 <= 10000000; n1 *= 10)
399c8fe38aeSMatthew Dillon 			DELAY(n1);
400c8fe38aeSMatthew Dillon 		state = 2;
401c8fe38aeSMatthew Dillon 	}
402c8fe38aeSMatthew Dillon 	if (state == 1)
403c8fe38aeSMatthew Dillon 		kprintf("DELAY(%d)...", n);
404c8fe38aeSMatthew Dillon #endif
405c8fe38aeSMatthew Dillon 	/*
406c8fe38aeSMatthew Dillon 	 * Guard against the timer being uninitialized if we are called
407c8fe38aeSMatthew Dillon 	 * early for console i/o.
408c8fe38aeSMatthew Dillon 	 */
4091a3a6ceeSImre Vadász 	if (timer0_state == RELEASED && i8254_cputimer_disable == 0)
410c8fe38aeSMatthew Dillon 		i8254_restore();
411c8fe38aeSMatthew Dillon 
412c8fe38aeSMatthew Dillon 	/*
413c8fe38aeSMatthew Dillon 	 * Read the counter first, so that the rest of the setup overhead is
414c8fe38aeSMatthew Dillon 	 * counted.  Then calculate the number of hardware timer ticks
415c8fe38aeSMatthew Dillon 	 * required, rounding up to be sure we delay at least the requested
416c8fe38aeSMatthew Dillon 	 * number of microseconds.
417c8fe38aeSMatthew Dillon 	 */
418c8fe38aeSMatthew Dillon 	prev_tick = sys_cputimer->count();
4198fbc264dSMatthew Dillon 	ticks_left = muldivu64(n, sys_cputimer->freq + 999999, 1000000);
420c8fe38aeSMatthew Dillon 
421c8fe38aeSMatthew Dillon 	/*
422c8fe38aeSMatthew Dillon 	 * Loop until done.
423c8fe38aeSMatthew Dillon 	 */
424c8fe38aeSMatthew Dillon 	while (ticks_left > 0) {
425c8fe38aeSMatthew Dillon 		tick = sys_cputimer->count();
426c8fe38aeSMatthew Dillon #ifdef DELAYDEBUG
427c8fe38aeSMatthew Dillon 		++getit_calls;
428c8fe38aeSMatthew Dillon #endif
429c8fe38aeSMatthew Dillon 		delta = tick - prev_tick;
430c8fe38aeSMatthew Dillon 		prev_tick = tick;
431c8fe38aeSMatthew Dillon 		if (delta < 0)
432c8fe38aeSMatthew Dillon 			delta = 0;
433c8fe38aeSMatthew Dillon 		ticks_left -= delta;
434c8fe38aeSMatthew Dillon 		if (doswitch && ticks_left > 0)
435c8fe38aeSMatthew Dillon 			lwkt_switch();
436c5724852SMatthew Dillon 		cpu_pause();
437c8fe38aeSMatthew Dillon 	}
438c8fe38aeSMatthew Dillon #ifdef DELAYDEBUG
439c8fe38aeSMatthew Dillon 	if (state == 1)
440c8fe38aeSMatthew Dillon 		kprintf(" %d calls to getit() at %d usec each\n",
441c8fe38aeSMatthew Dillon 		       getit_calls, (n + 5) / getit_calls);
442c8fe38aeSMatthew Dillon #endif
443c8fe38aeSMatthew Dillon }
444c8fe38aeSMatthew Dillon 
44577912481SMatthew Dillon /*
44677912481SMatthew Dillon  * DELAY() never switches.
44777912481SMatthew Dillon  */
448c8fe38aeSMatthew Dillon void
DELAY(int n)449c8fe38aeSMatthew Dillon DELAY(int n)
450c8fe38aeSMatthew Dillon {
451c8fe38aeSMatthew Dillon 	DODELAY(n, 0);
452c8fe38aeSMatthew Dillon }
453c8fe38aeSMatthew Dillon 
45477912481SMatthew Dillon /*
455d8129ed3SMatthew Dillon  * Returns non-zero if the specified time period has elapsed.  Call
456d8129ed3SMatthew Dillon  * first with last_clock set to 0.
457d8129ed3SMatthew Dillon  */
458d8129ed3SMatthew Dillon int
CHECKTIMEOUT(TOTALDELAY * tdd)459d8129ed3SMatthew Dillon CHECKTIMEOUT(TOTALDELAY *tdd)
460d8129ed3SMatthew Dillon {
461d8129ed3SMatthew Dillon 	sysclock_t delta;
462d8129ed3SMatthew Dillon 	int us;
463d8129ed3SMatthew Dillon 
464d8129ed3SMatthew Dillon 	if (tdd->started == 0) {
4651a3a6ceeSImre Vadász 		if (timer0_state == RELEASED && i8254_cputimer_disable == 0)
466d8129ed3SMatthew Dillon 			i8254_restore();
467d8129ed3SMatthew Dillon 		tdd->last_clock = sys_cputimer->count();
468d8129ed3SMatthew Dillon 		tdd->started = 1;
469d8129ed3SMatthew Dillon 		return(0);
470d8129ed3SMatthew Dillon 	}
471d8129ed3SMatthew Dillon 	delta = sys_cputimer->count() - tdd->last_clock;
4728fbc264dSMatthew Dillon 	us = muldivu64(delta, 1000000, sys_cputimer->freq);
4738fbc264dSMatthew Dillon 	tdd->last_clock += muldivu64(us, sys_cputimer->freq, 1000000);
474d8129ed3SMatthew Dillon 	tdd->us -= us;
4758fbc264dSMatthew Dillon 
476d8129ed3SMatthew Dillon 	return (tdd->us < 0);
477d8129ed3SMatthew Dillon }
478d8129ed3SMatthew Dillon 
479d8129ed3SMatthew Dillon 
480d8129ed3SMatthew Dillon /*
48177912481SMatthew Dillon  * DRIVERSLEEP() does not switch if called with a spinlock held or
48277912481SMatthew Dillon  * from a hard interrupt.
48377912481SMatthew Dillon  */
484c8fe38aeSMatthew Dillon void
DRIVERSLEEP(int usec)485c8fe38aeSMatthew Dillon DRIVERSLEEP(int usec)
486c8fe38aeSMatthew Dillon {
487c8fe38aeSMatthew Dillon 	globaldata_t gd = mycpu;
488c8fe38aeSMatthew Dillon 
4890846e4ceSMatthew Dillon 	if (gd->gd_intr_nesting_level || gd->gd_spinlocks) {
490c8fe38aeSMatthew Dillon 		DODELAY(usec, 0);
491c8fe38aeSMatthew Dillon 	} else {
492c8fe38aeSMatthew Dillon 		DODELAY(usec, 1);
493c8fe38aeSMatthew Dillon 	}
494c8fe38aeSMatthew Dillon }
495c8fe38aeSMatthew Dillon 
496c8fe38aeSMatthew Dillon static void
sysbeepstop(void * chan)497c8fe38aeSMatthew Dillon sysbeepstop(void *chan)
498c8fe38aeSMatthew Dillon {
499c8fe38aeSMatthew Dillon 	outb(IO_PPI, inb(IO_PPI)&0xFC);	/* disable counter2 output to speaker */
500c8fe38aeSMatthew Dillon 	beeping = 0;
501c8fe38aeSMatthew Dillon 	release_timer2();
502c8fe38aeSMatthew Dillon }
503c8fe38aeSMatthew Dillon 
504c8fe38aeSMatthew Dillon int
sysbeep(int pitch,int period)505c8fe38aeSMatthew Dillon sysbeep(int pitch, int period)
506c8fe38aeSMatthew Dillon {
507c8fe38aeSMatthew Dillon 	if (acquire_timer2(TIMER_SQWAVE|TIMER_16BIT))
508c8fe38aeSMatthew Dillon 		return(-1);
5097caeaffeSMatthew Dillon 	if (sysbeep_enable == 0)
5107caeaffeSMatthew Dillon 		return(-1);
511c8fe38aeSMatthew Dillon 	/*
512c8fe38aeSMatthew Dillon 	 * Nobody else is using timer2, we do not need the clock lock
513c8fe38aeSMatthew Dillon 	 */
514c8fe38aeSMatthew Dillon 	outb(TIMER_CNTR2, pitch);
515c8fe38aeSMatthew Dillon 	outb(TIMER_CNTR2, (pitch>>8));
516c8fe38aeSMatthew Dillon 	if (!beeping) {
517c8fe38aeSMatthew Dillon 		/* enable counter2 output to speaker */
518c8fe38aeSMatthew Dillon 		outb(IO_PPI, inb(IO_PPI) | 3);
519c8fe38aeSMatthew Dillon 		beeping = period;
520c8fe38aeSMatthew Dillon 		callout_reset(&sysbeepstop_ch, period, sysbeepstop, NULL);
521c8fe38aeSMatthew Dillon 	}
522c8fe38aeSMatthew Dillon 	return (0);
523c8fe38aeSMatthew Dillon }
524c8fe38aeSMatthew Dillon 
525c8fe38aeSMatthew Dillon /*
526c8fe38aeSMatthew Dillon  * RTC support routines
527c8fe38aeSMatthew Dillon  */
528c8fe38aeSMatthew Dillon 
529c8fe38aeSMatthew Dillon int
rtcin(int reg)530c8fe38aeSMatthew Dillon rtcin(int reg)
531c8fe38aeSMatthew Dillon {
532c8fe38aeSMatthew Dillon 	u_char val;
533c8fe38aeSMatthew Dillon 
534c8fe38aeSMatthew Dillon 	crit_enter();
535c8fe38aeSMatthew Dillon 	outb(IO_RTC, reg);
536c8fe38aeSMatthew Dillon 	inb(0x84);
537c8fe38aeSMatthew Dillon 	val = inb(IO_RTC + 1);
538c8fe38aeSMatthew Dillon 	inb(0x84);
539c8fe38aeSMatthew Dillon 	crit_exit();
540c8fe38aeSMatthew Dillon 	return (val);
541c8fe38aeSMatthew Dillon }
542c8fe38aeSMatthew Dillon 
543c8fe38aeSMatthew Dillon static __inline void
writertc(u_char reg,u_char val)544c8fe38aeSMatthew Dillon writertc(u_char reg, u_char val)
545c8fe38aeSMatthew Dillon {
546c8fe38aeSMatthew Dillon 	crit_enter();
547c8fe38aeSMatthew Dillon 	inb(0x84);
548c8fe38aeSMatthew Dillon 	outb(IO_RTC, reg);
549c8fe38aeSMatthew Dillon 	inb(0x84);
550c8fe38aeSMatthew Dillon 	outb(IO_RTC + 1, val);
551c8fe38aeSMatthew Dillon 	inb(0x84);		/* XXX work around wrong order in rtcin() */
552c8fe38aeSMatthew Dillon 	crit_exit();
553c8fe38aeSMatthew Dillon }
554c8fe38aeSMatthew Dillon 
555c8fe38aeSMatthew Dillon static __inline int
readrtc(int port)556c8fe38aeSMatthew Dillon readrtc(int port)
557c8fe38aeSMatthew Dillon {
558c8fe38aeSMatthew Dillon 	return(bcd2bin(rtcin(port)));
559c8fe38aeSMatthew Dillon }
560c8fe38aeSMatthew Dillon 
561c8fe38aeSMatthew Dillon static u_int
calibrate_clocks(void)562c8fe38aeSMatthew Dillon calibrate_clocks(void)
563c8fe38aeSMatthew Dillon {
5645b49787bSMatthew Dillon 	tsc_uclock_t old_tsc;
5658fbc264dSMatthew Dillon 	sysclock_t tot_count;
5668a224941SSepherosa Ziehau 	sysclock_t count, prev_count;
567c8fe38aeSMatthew Dillon 	int sec, start_sec, timeout;
568c8fe38aeSMatthew Dillon 
569c8fe38aeSMatthew Dillon 	if (bootverbose)
5705a81b19fSSepherosa Ziehau 	        kprintf("Calibrating clock(s) ...\n");
571c8fe38aeSMatthew Dillon 	if (!(rtcin(RTC_STATUSD) & RTCSD_PWR))
572c8fe38aeSMatthew Dillon 		goto fail;
573c8fe38aeSMatthew Dillon 	timeout = 100000000;
574c8fe38aeSMatthew Dillon 
575c8fe38aeSMatthew Dillon 	/* Read the mc146818A seconds counter. */
576c8fe38aeSMatthew Dillon 	for (;;) {
577c8fe38aeSMatthew Dillon 		if (!(rtcin(RTC_STATUSA) & RTCSA_TUP)) {
578c8fe38aeSMatthew Dillon 			sec = rtcin(RTC_SEC);
579c8fe38aeSMatthew Dillon 			break;
580c8fe38aeSMatthew Dillon 		}
581c8fe38aeSMatthew Dillon 		if (--timeout == 0)
582c8fe38aeSMatthew Dillon 			goto fail;
583c8fe38aeSMatthew Dillon 	}
584c8fe38aeSMatthew Dillon 
585c8fe38aeSMatthew Dillon 	/* Wait for the mC146818A seconds counter to change. */
586c8fe38aeSMatthew Dillon 	start_sec = sec;
587c8fe38aeSMatthew Dillon 	for (;;) {
588c8fe38aeSMatthew Dillon 		if (!(rtcin(RTC_STATUSA) & RTCSA_TUP)) {
589c8fe38aeSMatthew Dillon 			sec = rtcin(RTC_SEC);
590c8fe38aeSMatthew Dillon 			if (sec != start_sec)
591c8fe38aeSMatthew Dillon 				break;
592c8fe38aeSMatthew Dillon 		}
593c8fe38aeSMatthew Dillon 		if (--timeout == 0)
594c8fe38aeSMatthew Dillon 			goto fail;
595c8fe38aeSMatthew Dillon 	}
596c8fe38aeSMatthew Dillon 
597c8fe38aeSMatthew Dillon 	/* Start keeping track of the i8254 counter. */
598c8fe38aeSMatthew Dillon 	prev_count = sys_cputimer->count();
599c8fe38aeSMatthew Dillon 	tot_count = 0;
600c8fe38aeSMatthew Dillon 
601c8fe38aeSMatthew Dillon 	if (tsc_present)
602c8fe38aeSMatthew Dillon 		old_tsc = rdtsc();
603c8fe38aeSMatthew Dillon 	else
604c8fe38aeSMatthew Dillon 		old_tsc = 0;		/* shut up gcc */
605c8fe38aeSMatthew Dillon 
606c8fe38aeSMatthew Dillon 	/*
607c8fe38aeSMatthew Dillon 	 * Wait for the mc146818A seconds counter to change.  Read the i8254
608c8fe38aeSMatthew Dillon 	 * counter for each iteration since this is convenient and only
609c8fe38aeSMatthew Dillon 	 * costs a few usec of inaccuracy. The timing of the final reads
610c8fe38aeSMatthew Dillon 	 * of the counters almost matches the timing of the initial reads,
611c8fe38aeSMatthew Dillon 	 * so the main cause of inaccuracy is the varying latency from
612c8fe38aeSMatthew Dillon 	 * inside getit() or rtcin(RTC_STATUSA) to the beginning of the
613c8fe38aeSMatthew Dillon 	 * rtcin(RTC_SEC) that returns a changed seconds count.  The
614c8fe38aeSMatthew Dillon 	 * maximum inaccuracy from this cause is < 10 usec on 486's.
615c8fe38aeSMatthew Dillon 	 */
616c8fe38aeSMatthew Dillon 	start_sec = sec;
617c8fe38aeSMatthew Dillon 	for (;;) {
618c8fe38aeSMatthew Dillon 		if (!(rtcin(RTC_STATUSA) & RTCSA_TUP))
619c8fe38aeSMatthew Dillon 			sec = rtcin(RTC_SEC);
620c8fe38aeSMatthew Dillon 		count = sys_cputimer->count();
6218fbc264dSMatthew Dillon 		tot_count += (sysclock_t)(count - prev_count);
622c8fe38aeSMatthew Dillon 		prev_count = count;
623c8fe38aeSMatthew Dillon 		if (sec != start_sec)
624c8fe38aeSMatthew Dillon 			break;
625c8fe38aeSMatthew Dillon 		if (--timeout == 0)
626c8fe38aeSMatthew Dillon 			goto fail;
627c8fe38aeSMatthew Dillon 	}
628c8fe38aeSMatthew Dillon 
629c8fe38aeSMatthew Dillon 	/*
630c8fe38aeSMatthew Dillon 	 * Read the cpu cycle counter.  The timing considerations are
631c8fe38aeSMatthew Dillon 	 * similar to those for the i8254 clock.
632c8fe38aeSMatthew Dillon 	 */
633c8fe38aeSMatthew Dillon 	if (tsc_present) {
634c8fe38aeSMatthew Dillon 		tsc_frequency = rdtsc() - old_tsc;
635632f4575SSepherosa Ziehau 		if (bootverbose) {
636632f4575SSepherosa Ziehau 			kprintf("TSC clock: %jd Hz (Method A)\n",
637632f4575SSepherosa Ziehau 			    (intmax_t)tsc_frequency);
638632f4575SSepherosa Ziehau 		}
639c8fe38aeSMatthew Dillon 	}
6405b49787bSMatthew Dillon 	tsc_oneus_approx = ((tsc_frequency|1) + 999999) / 1000000;
641c8fe38aeSMatthew Dillon 
6428fbc264dSMatthew Dillon 	kprintf("i8254 clock: %lu Hz\n", tot_count);
643c8fe38aeSMatthew Dillon 	return (tot_count);
644c8fe38aeSMatthew Dillon 
645c8fe38aeSMatthew Dillon fail:
6468fbc264dSMatthew Dillon 	kprintf("failed, using default i8254 clock of %lu Hz\n",
647c8fe38aeSMatthew Dillon 		i8254_cputimer.freq);
648c8fe38aeSMatthew Dillon 	return (i8254_cputimer.freq);
649c8fe38aeSMatthew Dillon }
650c8fe38aeSMatthew Dillon 
651c8fe38aeSMatthew Dillon static void
i8254_restore(void)652c8fe38aeSMatthew Dillon i8254_restore(void)
653c8fe38aeSMatthew Dillon {
654c8fe38aeSMatthew Dillon 	timer0_state = ACQUIRED;
655c8fe38aeSMatthew Dillon 
656c8fe38aeSMatthew Dillon 	clock_lock();
657c8fe38aeSMatthew Dillon 
658c8fe38aeSMatthew Dillon 	/*
659c8fe38aeSMatthew Dillon 	 * Timer0 is our fine-grained variable clock interrupt
660c8fe38aeSMatthew Dillon 	 */
661c8fe38aeSMatthew Dillon 	outb(TIMER_MODE, TIMER_SEL0 | TIMER_SWSTROBE | TIMER_16BIT);
662c8fe38aeSMatthew Dillon 	outb(TIMER_CNTR0, 2);	/* lsb */
663c8fe38aeSMatthew Dillon 	outb(TIMER_CNTR0, 0);	/* msb */
664c8fe38aeSMatthew Dillon 	clock_unlock();
665c8fe38aeSMatthew Dillon 
66640672791SSepherosa Ziehau 	if (!i8254_nointr) {
66740672791SSepherosa Ziehau 		cputimer_intr_register(&i8254_cputimer_intr);
66840672791SSepherosa Ziehau 		cputimer_intr_select(&i8254_cputimer_intr, 0);
66940672791SSepherosa Ziehau 	}
67040672791SSepherosa Ziehau 
671c8fe38aeSMatthew Dillon 	/*
672c8fe38aeSMatthew Dillon 	 * Timer1 or timer2 is our free-running clock, but only if another
673c8fe38aeSMatthew Dillon 	 * has not been selected.
674c8fe38aeSMatthew Dillon 	 */
675c8fe38aeSMatthew Dillon 	cputimer_register(&i8254_cputimer);
676c8fe38aeSMatthew Dillon 	cputimer_select(&i8254_cputimer, 0);
677c8fe38aeSMatthew Dillon }
678c8fe38aeSMatthew Dillon 
679c8fe38aeSMatthew Dillon static void
i8254_cputimer_construct(struct cputimer * timer,sysclock_t oldclock)680c8fe38aeSMatthew Dillon i8254_cputimer_construct(struct cputimer *timer, sysclock_t oldclock)
681c8fe38aeSMatthew Dillon {
682c8fe38aeSMatthew Dillon  	int which;
683c8fe38aeSMatthew Dillon 
684c8fe38aeSMatthew Dillon 	/*
685c8fe38aeSMatthew Dillon 	 * Should we use timer 1 or timer 2 ?
686c8fe38aeSMatthew Dillon 	 */
687c8fe38aeSMatthew Dillon 	which = 0;
688c8fe38aeSMatthew Dillon 	TUNABLE_INT_FETCH("hw.i8254.walltimer", &which);
689c8fe38aeSMatthew Dillon 	if (which != 1 && which != 2)
690c8fe38aeSMatthew Dillon 		which = 2;
691c8fe38aeSMatthew Dillon 
692c8fe38aeSMatthew Dillon 	switch(which) {
693c8fe38aeSMatthew Dillon 	case 1:
694c8fe38aeSMatthew Dillon 		timer->name = "i8254_timer1";
695c8fe38aeSMatthew Dillon 		timer->type = CPUTIMER_8254_SEL1;
696c8fe38aeSMatthew Dillon 		i8254_walltimer_sel = TIMER_SEL1;
697c8fe38aeSMatthew Dillon 		i8254_walltimer_cntr = TIMER_CNTR1;
698c8fe38aeSMatthew Dillon 		timer1_state = ACQUIRED;
699c8fe38aeSMatthew Dillon 		break;
700c8fe38aeSMatthew Dillon 	case 2:
701c8fe38aeSMatthew Dillon 		timer->name = "i8254_timer2";
702c8fe38aeSMatthew Dillon 		timer->type = CPUTIMER_8254_SEL2;
703c8fe38aeSMatthew Dillon 		i8254_walltimer_sel = TIMER_SEL2;
704c8fe38aeSMatthew Dillon 		i8254_walltimer_cntr = TIMER_CNTR2;
705c8fe38aeSMatthew Dillon 		timer2_state = ACQUIRED;
706c8fe38aeSMatthew Dillon 		break;
707c8fe38aeSMatthew Dillon 	}
708c8fe38aeSMatthew Dillon 
7098fbc264dSMatthew Dillon 	timer->base = (oldclock + 0xFFFF) & 0xFFFFFFFFFFFF0000LU;
710c8fe38aeSMatthew Dillon 
711c8fe38aeSMatthew Dillon 	clock_lock();
712c8fe38aeSMatthew Dillon 	outb(TIMER_MODE, i8254_walltimer_sel | TIMER_RATEGEN | TIMER_16BIT);
713c8fe38aeSMatthew Dillon 	outb(i8254_walltimer_cntr, 0);	/* lsb */
714c8fe38aeSMatthew Dillon 	outb(i8254_walltimer_cntr, 0);	/* msb */
715c8fe38aeSMatthew Dillon 	outb(IO_PPI, inb(IO_PPI) | 1);	/* bit 0: enable gate, bit 1: spkr */
716c8fe38aeSMatthew Dillon 	clock_unlock();
717c8fe38aeSMatthew Dillon }
718c8fe38aeSMatthew Dillon 
719c8fe38aeSMatthew Dillon static void
i8254_cputimer_destruct(struct cputimer * timer)720c8fe38aeSMatthew Dillon i8254_cputimer_destruct(struct cputimer *timer)
721c8fe38aeSMatthew Dillon {
722c8fe38aeSMatthew Dillon 	switch(timer->type) {
723c8fe38aeSMatthew Dillon 	case CPUTIMER_8254_SEL1:
724c8fe38aeSMatthew Dillon 	    timer1_state = RELEASED;
725c8fe38aeSMatthew Dillon 	    break;
726c8fe38aeSMatthew Dillon 	case CPUTIMER_8254_SEL2:
727c8fe38aeSMatthew Dillon 	    timer2_state = RELEASED;
728c8fe38aeSMatthew Dillon 	    break;
729c8fe38aeSMatthew Dillon 	default:
730c8fe38aeSMatthew Dillon 	    break;
731c8fe38aeSMatthew Dillon 	}
732c8fe38aeSMatthew Dillon 	timer->type = 0;
733c8fe38aeSMatthew Dillon }
734c8fe38aeSMatthew Dillon 
735c8fe38aeSMatthew Dillon static void
rtc_restore(void)736c8fe38aeSMatthew Dillon rtc_restore(void)
737c8fe38aeSMatthew Dillon {
738c8fe38aeSMatthew Dillon 	/* Restore all of the RTC's "status" (actually, control) registers. */
739c8fe38aeSMatthew Dillon 	writertc(RTC_STATUSB, RTCSB_24HR);
740c8fe38aeSMatthew Dillon 	writertc(RTC_STATUSA, rtc_statusa);
741c8fe38aeSMatthew Dillon 	writertc(RTC_STATUSB, rtc_statusb);
742c8fe38aeSMatthew Dillon }
743c8fe38aeSMatthew Dillon 
744c8fe38aeSMatthew Dillon /*
745c8fe38aeSMatthew Dillon  * Restore all the timers.
746c8fe38aeSMatthew Dillon  *
747c8fe38aeSMatthew Dillon  * This function is called to resynchronize our core timekeeping after a
748c8fe38aeSMatthew Dillon  * long halt, e.g. from apm_default_resume() and friends.  It is also
749c8fe38aeSMatthew Dillon  * called if after a BIOS call we have detected munging of the 8254.
750c8fe38aeSMatthew Dillon  * It is necessary because cputimer_count() counter's delta may have grown
751c8fe38aeSMatthew Dillon  * too large for nanouptime() and friends to handle, or (in the case of 8254
752c8fe38aeSMatthew Dillon  * munging) might cause the SYSTIMER code to prematurely trigger.
753c8fe38aeSMatthew Dillon  */
754c8fe38aeSMatthew Dillon void
timer_restore(void)755c8fe38aeSMatthew Dillon timer_restore(void)
756c8fe38aeSMatthew Dillon {
757c8fe38aeSMatthew Dillon 	crit_enter();
7581a3a6ceeSImre Vadász 	if (i8254_cputimer_disable == 0)
759c8fe38aeSMatthew Dillon 		i8254_restore();	/* restore timer_freq and hz */
760c8fe38aeSMatthew Dillon 	rtc_restore();			/* reenable RTC interrupts */
761c8fe38aeSMatthew Dillon 	crit_exit();
762c8fe38aeSMatthew Dillon }
763c8fe38aeSMatthew Dillon 
7644098a6e5SImre Vadász #define MAX_MEASURE_RETRIES	100
7654098a6e5SImre Vadász 
7664098a6e5SImre Vadász static u_int64_t
do_measure(u_int64_t timer_latency,u_int64_t * latency,sysclock_t * time,int * retries)7674098a6e5SImre Vadász do_measure(u_int64_t timer_latency, u_int64_t *latency, sysclock_t *time,
7684098a6e5SImre Vadász     int *retries)
7694098a6e5SImre Vadász {
7704098a6e5SImre Vadász 	u_int64_t tsc1, tsc2;
7714098a6e5SImre Vadász 	u_int64_t threshold;
7724098a6e5SImre Vadász 	sysclock_t val;
7734098a6e5SImre Vadász 	int cnt = 0;
7744098a6e5SImre Vadász 
7754098a6e5SImre Vadász 	do {
7764098a6e5SImre Vadász 		if (cnt > MAX_MEASURE_RETRIES/2)
7774098a6e5SImre Vadász 			threshold = timer_latency << 1;
7784098a6e5SImre Vadász 		else
7794098a6e5SImre Vadász 			threshold = timer_latency + (timer_latency >> 2);
7804098a6e5SImre Vadász 
7814098a6e5SImre Vadász 		cnt++;
7824098a6e5SImre Vadász 		tsc1 = rdtsc_ordered();
7834098a6e5SImre Vadász 		val = sys_cputimer->count();
7844098a6e5SImre Vadász 		tsc2 = rdtsc_ordered();
7854098a6e5SImre Vadász 	} while (timer_latency > 0 && cnt < MAX_MEASURE_RETRIES &&
7864098a6e5SImre Vadász 	    tsc2 - tsc1 > threshold);
7874098a6e5SImre Vadász 
7884098a6e5SImre Vadász 	*retries = cnt - 1;
7894098a6e5SImre Vadász 	*latency = tsc2 - tsc1;
7904098a6e5SImre Vadász 	*time = val;
7914098a6e5SImre Vadász 	return tsc1;
7924098a6e5SImre Vadász }
7934098a6e5SImre Vadász 
7944098a6e5SImre Vadász static u_int64_t
do_calibrate_cputimer(u_int usecs,u_int64_t timer_latency)7954098a6e5SImre Vadász do_calibrate_cputimer(u_int usecs, u_int64_t timer_latency)
7964098a6e5SImre Vadász {
7974098a6e5SImre Vadász 	if (calibrate_tsc_fast) {
7984098a6e5SImre Vadász 		u_int64_t old_tsc1, start_lat1, new_tsc1, end_lat1;
7994098a6e5SImre Vadász 		u_int64_t old_tsc2, start_lat2, new_tsc2, end_lat2;
8004098a6e5SImre Vadász 		u_int64_t freq1, freq2;
8014098a6e5SImre Vadász 		sysclock_t start1, end1, start2, end2;
8024098a6e5SImre Vadász 		int retries1, retries2, retries3, retries4;
8034098a6e5SImre Vadász 
8044098a6e5SImre Vadász 		DELAY(1000);
8054098a6e5SImre Vadász 		old_tsc1 = do_measure(timer_latency, &start_lat1, &start1,
8064098a6e5SImre Vadász 		    &retries1);
8074098a6e5SImre Vadász 		DELAY(20000);
8084098a6e5SImre Vadász 		old_tsc2 = do_measure(timer_latency, &start_lat2, &start2,
8094098a6e5SImre Vadász 		    &retries2);
8104098a6e5SImre Vadász 		DELAY(usecs);
8114098a6e5SImre Vadász 		new_tsc1 = do_measure(timer_latency, &end_lat1, &end1,
8124098a6e5SImre Vadász 		    &retries3);
8134098a6e5SImre Vadász 		DELAY(20000);
8144098a6e5SImre Vadász 		new_tsc2 = do_measure(timer_latency, &end_lat2, &end2,
8154098a6e5SImre Vadász 		    &retries4);
8164098a6e5SImre Vadász 
8174098a6e5SImre Vadász 		old_tsc1 += start_lat1;
8184098a6e5SImre Vadász 		old_tsc2 += start_lat2;
8194098a6e5SImre Vadász 		freq1 = (new_tsc1 - old_tsc1) + (start_lat1 + end_lat1) / 2;
8204098a6e5SImre Vadász 		freq2 = (new_tsc2 - old_tsc2) + (start_lat2 + end_lat2) / 2;
8214098a6e5SImre Vadász 		end1 -= start1;
8224098a6e5SImre Vadász 		end2 -= start2;
8234098a6e5SImre Vadász 		/* This should in practice be safe from overflows. */
8248fbc264dSMatthew Dillon 		freq1 = muldivu64(freq1, sys_cputimer->freq, end1);
8258fbc264dSMatthew Dillon 		freq2 = muldivu64(freq2, sys_cputimer->freq, end2);
8264098a6e5SImre Vadász 		if (calibrate_test && (retries1 > 0 || retries2 > 0)) {
8274098a6e5SImre Vadász 			kprintf("%s: retries: %d, %d, %d, %d\n",
8284098a6e5SImre Vadász 			    __func__, retries1, retries2, retries3, retries4);
8294098a6e5SImre Vadász 		}
8304098a6e5SImre Vadász 		if (calibrate_test) {
8314098a6e5SImre Vadász 			kprintf("%s: freq1=%ju freq2=%ju avg=%ju\n",
8324098a6e5SImre Vadász 			    __func__, freq1, freq2, (freq1 + freq2) / 2);
8334098a6e5SImre Vadász 		}
8344098a6e5SImre Vadász 		return (freq1 + freq2) / 2;
8354098a6e5SImre Vadász 	} else {
8364098a6e5SImre Vadász 		u_int64_t old_tsc, new_tsc;
8374098a6e5SImre Vadász 		u_int64_t freq;
8384098a6e5SImre Vadász 
8394098a6e5SImre Vadász 		old_tsc = rdtsc_ordered();
8404098a6e5SImre Vadász 		DELAY(usecs);
8414098a6e5SImre Vadász 		new_tsc = rdtsc();
8424098a6e5SImre Vadász 		freq = new_tsc - old_tsc;
8434098a6e5SImre Vadász 		/* This should in practice be safe from overflows. */
8444098a6e5SImre Vadász 		freq = (freq * 1000 * 1000) / usecs;
8454098a6e5SImre Vadász 		return freq;
8464098a6e5SImre Vadász 	}
8474098a6e5SImre Vadász }
8484098a6e5SImre Vadász 
849c8fe38aeSMatthew Dillon /*
850c8fe38aeSMatthew Dillon  * Initialize 8254 timer 0 early so that it can be used in DELAY().
851c8fe38aeSMatthew Dillon  */
852c8fe38aeSMatthew Dillon void
startrtclock(void)853c8fe38aeSMatthew Dillon startrtclock(void)
854c8fe38aeSMatthew Dillon {
8551a3a6ceeSImre Vadász 	const timecounter_init_t **list;
8568fbc264dSMatthew Dillon 	sysclock_t delta, freq;
857c8fe38aeSMatthew Dillon 
8581a3a6ceeSImre Vadász 	callout_init_mp(&sysbeepstop_ch);
8591a3a6ceeSImre Vadász 
860c8fe38aeSMatthew Dillon 	/*
861c8fe38aeSMatthew Dillon 	 * Can we use the TSC?
8621997b4c2SMatthew Dillon 	 *
8631997b4c2SMatthew Dillon 	 * NOTE: If running under qemu, probably a good idea to force the
8641997b4c2SMatthew Dillon 	 *	 TSC because we are not likely to detect it as being
8651997b4c2SMatthew Dillon 	 *	 invariant or mpsyncd if you don't.  This will greatly
8661997b4c2SMatthew Dillon 	 *	 reduce SMP contention.
867c8fe38aeSMatthew Dillon 	 */
8685a81b19fSSepherosa Ziehau 	if (cpu_feature & CPUID_TSC) {
869c8fe38aeSMatthew Dillon 		tsc_present = 1;
8701997b4c2SMatthew Dillon 		TUNABLE_INT_FETCH("hw.tsc_cputimer_force", &tsc_invariant);
8711997b4c2SMatthew Dillon 
8725a81b19fSSepherosa Ziehau 		if ((cpu_vendor_id == CPU_VENDOR_INTEL ||
8735a81b19fSSepherosa Ziehau 		     cpu_vendor_id == CPU_VENDOR_AMD) &&
8745a81b19fSSepherosa Ziehau 		    cpu_exthigh >= 0x80000007) {
8755a81b19fSSepherosa Ziehau 			u_int regs[4];
8765a81b19fSSepherosa Ziehau 
8775a81b19fSSepherosa Ziehau 			do_cpuid(0x80000007, regs);
8785a81b19fSSepherosa Ziehau 			if (regs[3] & 0x100)
8795a81b19fSSepherosa Ziehau 				tsc_invariant = 1;
8805a81b19fSSepherosa Ziehau 		}
8815a81b19fSSepherosa Ziehau 	} else {
882c8fe38aeSMatthew Dillon 		tsc_present = 0;
8835a81b19fSSepherosa Ziehau 	}
884c8fe38aeSMatthew Dillon 
885c8fe38aeSMatthew Dillon 	/*
886c8fe38aeSMatthew Dillon 	 * Initial RTC state, don't do anything unexpected
887c8fe38aeSMatthew Dillon 	 */
888c8fe38aeSMatthew Dillon 	writertc(RTC_STATUSA, rtc_statusa);
889c8fe38aeSMatthew Dillon 	writertc(RTC_STATUSB, RTCSB_24HR);
890c8fe38aeSMatthew Dillon 
8911a3a6ceeSImre Vadász 	SET_FOREACH(list, timecounter_init_set) {
8921a3a6ceeSImre Vadász 		if ((*list)->configure != NULL)
8931a3a6ceeSImre Vadász 			(*list)->configure();
8941a3a6ceeSImre Vadász 	}
8951a3a6ceeSImre Vadász 
8961a3a6ceeSImre Vadász 	/*
8971a3a6ceeSImre Vadász 	 * If tsc_frequency is already initialized now, and a flag is set
8981a3a6ceeSImre Vadász 	 * that i8254 timer is unneeded, we are done.
8991a3a6ceeSImre Vadász 	 */
9001a3a6ceeSImre Vadász 	if (tsc_frequency != 0 && i8254_cputimer_disable != 0)
9011a3a6ceeSImre Vadász 		goto done;
9021a3a6ceeSImre Vadász 
903c8fe38aeSMatthew Dillon 	/*
904c8fe38aeSMatthew Dillon 	 * Set the 8254 timer0 in TIMER_SWSTROBE mode and cause it to
905c8fe38aeSMatthew Dillon 	 * generate an interrupt, which we will ignore for now.
906c8fe38aeSMatthew Dillon 	 *
907c8fe38aeSMatthew Dillon 	 * Set the 8254 timer1 in TIMER_RATEGEN mode and load 0x0000
908c8fe38aeSMatthew Dillon 	 * (so it counts a full 2^16 and repeats).  We will use this timer
909c8fe38aeSMatthew Dillon 	 * for our counting.
910c8fe38aeSMatthew Dillon 	 */
9111a3a6ceeSImre Vadász 	if (i8254_cputimer_disable == 0)
912c8fe38aeSMatthew Dillon 		i8254_restore();
913242fd95fSImre Vadász 
9141a3a6ceeSImre Vadász 	kprintf("Using cputimer %s for TSC calibration\n", sys_cputimer->name);
9151a3a6ceeSImre Vadász 
916242fd95fSImre Vadász 	/*
917242fd95fSImre Vadász 	 * When booting without verbose messages, it's pointless to run the
918242fd95fSImre Vadász 	 * calibrate_clocks() calibration code, when we don't use the
919242fd95fSImre Vadász 	 * results in any way. With bootverbose, we are at least printing
920242fd95fSImre Vadász 	 *  this information to the kernel log.
921242fd95fSImre Vadász 	 */
9221a3a6ceeSImre Vadász 	if (i8254_cputimer_disable != 0 ||
9231a3a6ceeSImre Vadász 	    (calibrate_timers_with_rtc == 0 && !bootverbose)) {
924242fd95fSImre Vadász 		goto skip_rtc_based;
9251a3a6ceeSImre Vadász 	}
926242fd95fSImre Vadász 
927c8fe38aeSMatthew Dillon 	freq = calibrate_clocks();
928c8fe38aeSMatthew Dillon #ifdef CLK_CALIBRATION_LOOP
929c8fe38aeSMatthew Dillon 	if (bootverbose) {
930ce7866b8SMatthew Dillon 		int c;
931ce7866b8SMatthew Dillon 
932ce7866b8SMatthew Dillon 		cnpoll(TRUE);
933ce7866b8SMatthew Dillon 		kprintf("Press a key on the console to "
934ce7866b8SMatthew Dillon 			"abort clock calibration\n");
935ce7866b8SMatthew Dillon 		while ((c = cncheckc()) == -1 || c == NOKEY)
936c8fe38aeSMatthew Dillon 			calibrate_clocks();
937ce7866b8SMatthew Dillon 		cnpoll(FALSE);
938c8fe38aeSMatthew Dillon 	}
939c8fe38aeSMatthew Dillon #endif
940c8fe38aeSMatthew Dillon 
941c8fe38aeSMatthew Dillon 	/*
942c8fe38aeSMatthew Dillon 	 * Use the calibrated i8254 frequency if it seems reasonable.
943c8fe38aeSMatthew Dillon 	 * Otherwise use the default, and don't use the calibrated i586
944c8fe38aeSMatthew Dillon 	 * frequency.
945c8fe38aeSMatthew Dillon 	 */
946c8fe38aeSMatthew Dillon 	delta = freq > i8254_cputimer.freq ?
947c8fe38aeSMatthew Dillon 		freq - i8254_cputimer.freq : i8254_cputimer.freq - freq;
948c8fe38aeSMatthew Dillon 	if (delta < i8254_cputimer.freq / 100) {
949242fd95fSImre Vadász 		if (calibrate_timers_with_rtc == 0) {
950c8fe38aeSMatthew Dillon 			kprintf(
951242fd95fSImre Vadász "hw.calibrate_timers_with_rtc not set - using default i8254 frequency\n");
952c8fe38aeSMatthew Dillon 			freq = i8254_cputimer.freq;
953242fd95fSImre Vadász 		}
95440672791SSepherosa Ziehau 		/*
95540672791SSepherosa Ziehau 		 * NOTE:
95640672791SSepherosa Ziehau 		 * Interrupt timer's freq must be adjusted
95740672791SSepherosa Ziehau 		 * before we change the cuptimer's frequency.
95840672791SSepherosa Ziehau 		 */
95940672791SSepherosa Ziehau 		i8254_cputimer_intr.freq = freq;
960c8fe38aeSMatthew Dillon 		cputimer_set_frequency(&i8254_cputimer, freq);
961c8fe38aeSMatthew Dillon 	} else {
962c8fe38aeSMatthew Dillon 		if (bootverbose)
9638fbc264dSMatthew Dillon 			kprintf("%lu Hz differs from default of %lu Hz "
9645b49787bSMatthew Dillon 				"by more than 1%%\n",
965c8fe38aeSMatthew Dillon 			        freq, i8254_cputimer.freq);
966c8fe38aeSMatthew Dillon 		tsc_frequency = 0;
967c8fe38aeSMatthew Dillon 	}
968c8fe38aeSMatthew Dillon 
969242fd95fSImre Vadász 	if (tsc_frequency != 0 && calibrate_timers_with_rtc == 0) {
9705b49787bSMatthew Dillon 		kprintf("hw.calibrate_timers_with_rtc not "
9715b49787bSMatthew Dillon 			"set - using old calibration method\n");
972c8fe38aeSMatthew Dillon 		tsc_frequency = 0;
973c8fe38aeSMatthew Dillon 	}
974242fd95fSImre Vadász 
975242fd95fSImre Vadász skip_rtc_based:
976c8fe38aeSMatthew Dillon 	if (tsc_present && tsc_frequency == 0) {
9771a3a6ceeSImre Vadász 		u_int cnt;
9784098a6e5SImre Vadász 		u_int64_t cputime_latency_tsc = 0, max = 0, min = 0;
9794098a6e5SImre Vadász 		int i;
9801a3a6ceeSImre Vadász 
9814098a6e5SImre Vadász 		for (i = 0; i < 10; i++) {
9824098a6e5SImre Vadász 			/* Warm up */
9834098a6e5SImre Vadász 			(void)sys_cputimer->count();
9844098a6e5SImre Vadász 		}
9854098a6e5SImre Vadász 		for (i = 0; i < 100; i++) {
9864098a6e5SImre Vadász 			u_int64_t old_tsc, new_tsc;
9874098a6e5SImre Vadász 
9884098a6e5SImre Vadász 			old_tsc = rdtsc_ordered();
9894098a6e5SImre Vadász 			(void)sys_cputimer->count();
9904098a6e5SImre Vadász 			new_tsc = rdtsc_ordered();
9914098a6e5SImre Vadász 			cputime_latency_tsc += (new_tsc - old_tsc);
9924098a6e5SImre Vadász 			if (max < (new_tsc - old_tsc))
9934098a6e5SImre Vadász 				max = new_tsc - old_tsc;
9944098a6e5SImre Vadász 			if (min == 0 || min > (new_tsc - old_tsc))
9954098a6e5SImre Vadász 				min = new_tsc - old_tsc;
9964098a6e5SImre Vadász 		}
9974098a6e5SImre Vadász 		cputime_latency_tsc /= 100;
9984098a6e5SImre Vadász 		kprintf(
9994098a6e5SImre Vadász 		    "Timer latency (in TSC ticks): %lu min=%lu max=%lu\n",
10004098a6e5SImre Vadász 		    cputime_latency_tsc, min, max);
10014098a6e5SImre Vadász 		/* XXX Instead of this, properly filter out outliers. */
10024098a6e5SImre Vadász 		cputime_latency_tsc = min;
10034098a6e5SImre Vadász 
10044098a6e5SImre Vadász 		if (calibrate_test > 0) {
10054098a6e5SImre Vadász 			u_int64_t values[20], avg = 0;
10064098a6e5SImre Vadász 			for (i = 1; i <= 20; i++) {
10074098a6e5SImre Vadász 				u_int64_t freq;
10084098a6e5SImre Vadász 
10094098a6e5SImre Vadász 				freq = do_calibrate_cputimer(i * 100 * 1000,
10104098a6e5SImre Vadász 				    cputime_latency_tsc);
10114098a6e5SImre Vadász 				values[i - 1] = freq;
10124098a6e5SImre Vadász 			}
10134098a6e5SImre Vadász 			/* Compute an average TSC for the 1s to 2s delays. */
10144098a6e5SImre Vadász 			for (i = 10; i < 20; i++)
10154098a6e5SImre Vadász 				avg += values[i];
10164098a6e5SImre Vadász 			avg /= 10;
10174098a6e5SImre Vadász 			for (i = 0; i < 20; i++) {
10184098a6e5SImre Vadász 				kprintf("%ums: %lu (Diff from average: %ld)\n",
10194098a6e5SImre Vadász 				    (i + 1) * 100, values[i],
10204098a6e5SImre Vadász 				    (int64_t)(values[i] - avg));
10214098a6e5SImre Vadász 			}
10224098a6e5SImre Vadász 		}
10234098a6e5SImre Vadász 
10244098a6e5SImre Vadász 		if (calibrate_tsc_fast > 0) {
10254098a6e5SImre Vadász 			/* HPET would typically be >10MHz */
10261a3a6ceeSImre Vadász 			if (sys_cputimer->freq >= 10000000)
10271a3a6ceeSImre Vadász 				cnt = 200000;
10281a3a6ceeSImre Vadász 			else
10294098a6e5SImre Vadász 				cnt = 500000;
10304098a6e5SImre Vadász 		} else {
10311a3a6ceeSImre Vadász 			cnt = 1000000;
10324098a6e5SImre Vadász 		}
10331a3a6ceeSImre Vadász 
10344098a6e5SImre Vadász 		tsc_frequency = do_calibrate_cputimer(cnt, cputime_latency_tsc);
1035242fd95fSImre Vadász 		if (bootverbose && calibrate_timers_with_rtc) {
1036632f4575SSepherosa Ziehau 			kprintf("TSC clock: %jd Hz (Method B)\n",
1037632f4575SSepherosa Ziehau 			    (intmax_t)tsc_frequency);
1038c8fe38aeSMatthew Dillon 		}
1039c8fe38aeSMatthew Dillon 	}
1040c8fe38aeSMatthew Dillon 
10411a3a6ceeSImre Vadász done:
1042632f4575SSepherosa Ziehau 	if (tsc_present) {
1043632f4575SSepherosa Ziehau 		kprintf("TSC%s clock: %jd Hz\n",
1044632f4575SSepherosa Ziehau 		    tsc_invariant ? " invariant" : "",
1045632f4575SSepherosa Ziehau 		    (intmax_t)tsc_frequency);
1046632f4575SSepherosa Ziehau 	}
10475b49787bSMatthew Dillon 	tsc_oneus_approx = ((tsc_frequency|1) + 999999) / 1000000;
1048632f4575SSepherosa Ziehau 
10495b49787bSMatthew Dillon 	EVENTHANDLER_REGISTER(shutdown_post_sync, resettodr_on_shutdown,
10505b49787bSMatthew Dillon 			      NULL, SHUTDOWN_PRI_LAST);
1051c8fe38aeSMatthew Dillon }
1052c8fe38aeSMatthew Dillon 
1053c8fe38aeSMatthew Dillon /*
1054c8fe38aeSMatthew Dillon  * Sync the time of day back to the RTC on shutdown, but only if
1055c8fe38aeSMatthew Dillon  * we have already loaded it and have not crashed.
1056c8fe38aeSMatthew Dillon  */
1057c8fe38aeSMatthew Dillon static void
resettodr_on_shutdown(void * arg __unused)1058c8fe38aeSMatthew Dillon resettodr_on_shutdown(void *arg __unused)
1059c8fe38aeSMatthew Dillon {
1060c8fe38aeSMatthew Dillon  	if (rtc_loaded && panicstr == NULL) {
1061c8fe38aeSMatthew Dillon 		resettodr();
1062c8fe38aeSMatthew Dillon 	}
1063c8fe38aeSMatthew Dillon }
1064c8fe38aeSMatthew Dillon 
1065c8fe38aeSMatthew Dillon /*
1066c8fe38aeSMatthew Dillon  * Initialize the time of day register, based on the time base which is, e.g.
1067c8fe38aeSMatthew Dillon  * from a filesystem.
1068c8fe38aeSMatthew Dillon  */
1069c8fe38aeSMatthew Dillon void
inittodr(time_t base)1070c8fe38aeSMatthew Dillon inittodr(time_t base)
1071c8fe38aeSMatthew Dillon {
1072c8fe38aeSMatthew Dillon 	unsigned long	sec, days;
1073c8fe38aeSMatthew Dillon 	int		year, month;
1074c8fe38aeSMatthew Dillon 	int		y, m;
1075c8fe38aeSMatthew Dillon 	struct timespec ts;
1076c8fe38aeSMatthew Dillon 
1077c8fe38aeSMatthew Dillon 	if (base) {
1078c8fe38aeSMatthew Dillon 		ts.tv_sec = base;
1079c8fe38aeSMatthew Dillon 		ts.tv_nsec = 0;
1080c8fe38aeSMatthew Dillon 		set_timeofday(&ts);
1081c8fe38aeSMatthew Dillon 	}
1082c8fe38aeSMatthew Dillon 
1083c8fe38aeSMatthew Dillon 	/* Look if we have a RTC present and the time is valid */
1084c8fe38aeSMatthew Dillon 	if (!(rtcin(RTC_STATUSD) & RTCSD_PWR))
1085c8fe38aeSMatthew Dillon 		goto wrong_time;
1086c8fe38aeSMatthew Dillon 
1087c8fe38aeSMatthew Dillon 	/* wait for time update to complete */
1088c8fe38aeSMatthew Dillon 	/* If RTCSA_TUP is zero, we have at least 244us before next update */
1089c8fe38aeSMatthew Dillon 	crit_enter();
1090c8fe38aeSMatthew Dillon 	while (rtcin(RTC_STATUSA) & RTCSA_TUP) {
1091c8fe38aeSMatthew Dillon 		crit_exit();
1092c8fe38aeSMatthew Dillon 		crit_enter();
1093c8fe38aeSMatthew Dillon 	}
1094c8fe38aeSMatthew Dillon 
1095c8fe38aeSMatthew Dillon 	days = 0;
1096c8fe38aeSMatthew Dillon #ifdef USE_RTC_CENTURY
1097c8fe38aeSMatthew Dillon 	year = readrtc(RTC_YEAR) + readrtc(RTC_CENTURY) * 100;
1098c8fe38aeSMatthew Dillon #else
1099c8fe38aeSMatthew Dillon 	year = readrtc(RTC_YEAR) + 1900;
1100c8fe38aeSMatthew Dillon 	if (year < 1970)
1101c8fe38aeSMatthew Dillon 		year += 100;
1102c8fe38aeSMatthew Dillon #endif
1103c8fe38aeSMatthew Dillon 	if (year < 1970) {
1104c8fe38aeSMatthew Dillon 		crit_exit();
1105c8fe38aeSMatthew Dillon 		goto wrong_time;
1106c8fe38aeSMatthew Dillon 	}
1107c8fe38aeSMatthew Dillon 	month = readrtc(RTC_MONTH);
1108c8fe38aeSMatthew Dillon 	for (m = 1; m < month; m++)
1109c8fe38aeSMatthew Dillon 		days += daysinmonth[m-1];
1110c8fe38aeSMatthew Dillon 	if ((month > 2) && LEAPYEAR(year))
1111c8fe38aeSMatthew Dillon 		days ++;
1112c8fe38aeSMatthew Dillon 	days += readrtc(RTC_DAY) - 1;
1113c8fe38aeSMatthew Dillon 	for (y = 1970; y < year; y++)
1114c8fe38aeSMatthew Dillon 		days += DAYSPERYEAR + LEAPYEAR(y);
1115c8fe38aeSMatthew Dillon 	sec = ((( days * 24 +
1116c8fe38aeSMatthew Dillon 		  readrtc(RTC_HRS)) * 60 +
1117c8fe38aeSMatthew Dillon 		  readrtc(RTC_MIN)) * 60 +
1118c8fe38aeSMatthew Dillon 		  readrtc(RTC_SEC));
1119c8fe38aeSMatthew Dillon 	/* sec now contains the number of seconds, since Jan 1 1970,
1120c8fe38aeSMatthew Dillon 	   in the local time zone */
1121c8fe38aeSMatthew Dillon 
1122c8fe38aeSMatthew Dillon 	sec += tz.tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
1123c8fe38aeSMatthew Dillon 
1124cec73927SMatthew Dillon 	y = (int)(time_second - sec);
1125c8fe38aeSMatthew Dillon 	if (y <= -2 || y >= 2) {
1126c8fe38aeSMatthew Dillon 		/* badly off, adjust it */
1127c8fe38aeSMatthew Dillon 		ts.tv_sec = sec;
1128c8fe38aeSMatthew Dillon 		ts.tv_nsec = 0;
1129c8fe38aeSMatthew Dillon 		set_timeofday(&ts);
1130c8fe38aeSMatthew Dillon 	}
1131c8fe38aeSMatthew Dillon 	rtc_loaded = 1;
1132c8fe38aeSMatthew Dillon 	crit_exit();
1133c8fe38aeSMatthew Dillon 	return;
1134c8fe38aeSMatthew Dillon 
1135c8fe38aeSMatthew Dillon wrong_time:
1136c8fe38aeSMatthew Dillon 	kprintf("Invalid time in real time clock.\n");
1137c8fe38aeSMatthew Dillon 	kprintf("Check and reset the date immediately!\n");
1138c8fe38aeSMatthew Dillon }
1139c8fe38aeSMatthew Dillon 
1140c8fe38aeSMatthew Dillon /*
1141c8fe38aeSMatthew Dillon  * Write system time back to RTC
1142c8fe38aeSMatthew Dillon  */
1143c8fe38aeSMatthew Dillon void
resettodr(void)1144c8fe38aeSMatthew Dillon resettodr(void)
1145c8fe38aeSMatthew Dillon {
1146c8fe38aeSMatthew Dillon 	struct timeval tv;
1147c8fe38aeSMatthew Dillon 	unsigned long tm;
1148c8fe38aeSMatthew Dillon 	int m;
1149c8fe38aeSMatthew Dillon 	int y;
1150c8fe38aeSMatthew Dillon 
1151c8fe38aeSMatthew Dillon 	if (disable_rtc_set)
1152c8fe38aeSMatthew Dillon 		return;
1153c8fe38aeSMatthew Dillon 
1154c8fe38aeSMatthew Dillon 	microtime(&tv);
1155c8fe38aeSMatthew Dillon 	tm = tv.tv_sec;
1156c8fe38aeSMatthew Dillon 
1157c8fe38aeSMatthew Dillon 	crit_enter();
1158c8fe38aeSMatthew Dillon 	/* Disable RTC updates and interrupts. */
1159c8fe38aeSMatthew Dillon 	writertc(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR);
1160c8fe38aeSMatthew Dillon 
1161c8fe38aeSMatthew Dillon 	/* Calculate local time to put in RTC */
1162c8fe38aeSMatthew Dillon 
1163c8fe38aeSMatthew Dillon 	tm -= tz.tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
1164c8fe38aeSMatthew Dillon 
1165c8fe38aeSMatthew Dillon 	writertc(RTC_SEC, bin2bcd(tm%60)); tm /= 60;	/* Write back Seconds */
1166c8fe38aeSMatthew Dillon 	writertc(RTC_MIN, bin2bcd(tm%60)); tm /= 60;	/* Write back Minutes */
1167c8fe38aeSMatthew Dillon 	writertc(RTC_HRS, bin2bcd(tm%24)); tm /= 24;	/* Write back Hours   */
1168c8fe38aeSMatthew Dillon 
1169c8fe38aeSMatthew Dillon 	/* We have now the days since 01-01-1970 in tm */
1170c8fe38aeSMatthew Dillon 	writertc(RTC_WDAY, (tm+4)%7);			/* Write back Weekday */
1171c8fe38aeSMatthew Dillon 	for (y = 1970, m = DAYSPERYEAR + LEAPYEAR(y);
1172c8fe38aeSMatthew Dillon 	     tm >= m;
1173c8fe38aeSMatthew Dillon 	     y++,      m = DAYSPERYEAR + LEAPYEAR(y))
1174c8fe38aeSMatthew Dillon 	     tm -= m;
1175c8fe38aeSMatthew Dillon 
1176c8fe38aeSMatthew Dillon 	/* Now we have the years in y and the day-of-the-year in tm */
1177c8fe38aeSMatthew Dillon 	writertc(RTC_YEAR, bin2bcd(y%100));		/* Write back Year    */
1178c8fe38aeSMatthew Dillon #ifdef USE_RTC_CENTURY
1179c8fe38aeSMatthew Dillon 	writertc(RTC_CENTURY, bin2bcd(y/100));		/* ... and Century    */
1180c8fe38aeSMatthew Dillon #endif
1181c8fe38aeSMatthew Dillon 	for (m = 0; ; m++) {
1182c8fe38aeSMatthew Dillon 		int ml;
1183c8fe38aeSMatthew Dillon 
1184c8fe38aeSMatthew Dillon 		ml = daysinmonth[m];
1185c8fe38aeSMatthew Dillon 		if (m == 1 && LEAPYEAR(y))
1186c8fe38aeSMatthew Dillon 			ml++;
1187c8fe38aeSMatthew Dillon 		if (tm < ml)
1188c8fe38aeSMatthew Dillon 			break;
1189c8fe38aeSMatthew Dillon 		tm -= ml;
1190c8fe38aeSMatthew Dillon 	}
1191c8fe38aeSMatthew Dillon 
1192c8fe38aeSMatthew Dillon 	writertc(RTC_MONTH, bin2bcd(m + 1));            /* Write back Month   */
1193c8fe38aeSMatthew Dillon 	writertc(RTC_DAY, bin2bcd(tm + 1));             /* Write back Month Day */
1194c8fe38aeSMatthew Dillon 
1195c8fe38aeSMatthew Dillon 	/* Reenable RTC updates and interrupts. */
1196c8fe38aeSMatthew Dillon 	writertc(RTC_STATUSB, rtc_statusb);
1197c8fe38aeSMatthew Dillon 	crit_exit();
1198c8fe38aeSMatthew Dillon }
1199c8fe38aeSMatthew Dillon 
12006b809ec7SSepherosa Ziehau static int
i8254_ioapic_trial(int irq,struct cputimer_intr * cti)12016b809ec7SSepherosa Ziehau i8254_ioapic_trial(int irq, struct cputimer_intr *cti)
12026b809ec7SSepherosa Ziehau {
12036b809ec7SSepherosa Ziehau 	sysclock_t base;
12046b809ec7SSepherosa Ziehau 	long lastcnt;
12056b809ec7SSepherosa Ziehau 
12066b809ec7SSepherosa Ziehau 	/*
12076b809ec7SSepherosa Ziehau 	 * Following code assumes the 8254 is the cpu timer,
12086b809ec7SSepherosa Ziehau 	 * so make sure it is.
12096b809ec7SSepherosa Ziehau 	 */
121063823918SMatthew Dillon 	/*KKASSERT(sys_cputimer == &i8254_cputimer); (tested by CuteLarva) */
12116b809ec7SSepherosa Ziehau 	KKASSERT(cti == &i8254_cputimer_intr);
12126b809ec7SSepherosa Ziehau 
1213c83c147eSSepherosa Ziehau 	lastcnt = get_interrupt_counter(irq, mycpuid);
12146b809ec7SSepherosa Ziehau 
12156b809ec7SSepherosa Ziehau 	/*
12166b809ec7SSepherosa Ziehau 	 * Force an 8254 Timer0 interrupt and wait 1/100s for
12176b809ec7SSepherosa Ziehau 	 * it to happen, then see if we got it.
12186b809ec7SSepherosa Ziehau 	 */
12198fbc264dSMatthew Dillon 	kprintf("IOAPIC: testing 8254 interrupt delivery...");
12206b809ec7SSepherosa Ziehau 
12218fbc264dSMatthew Dillon 	i8254_intr_reload(cti, sys_cputimer->fromus(2));
12226b809ec7SSepherosa Ziehau 	base = sys_cputimer->count();
12236b809ec7SSepherosa Ziehau 	while (sys_cputimer->count() - base < sys_cputimer->freq / 100)
12246b809ec7SSepherosa Ziehau 		; /* nothing */
12256b809ec7SSepherosa Ziehau 
12268fbc264dSMatthew Dillon 	if (get_interrupt_counter(irq, mycpuid) - lastcnt == 0) {
12278fbc264dSMatthew Dillon 		kprintf(" failed\n");
12286b809ec7SSepherosa Ziehau 		return ENOENT;
12298fbc264dSMatthew Dillon 	} else {
12308fbc264dSMatthew Dillon 		kprintf(" success\n");
12318fbc264dSMatthew Dillon 	}
12326b809ec7SSepherosa Ziehau 	return 0;
12336b809ec7SSepherosa Ziehau }
12346b809ec7SSepherosa Ziehau 
1235adc34348SSepherosa Ziehau /*
1236adc34348SSepherosa Ziehau  * Start both clocks running.  DragonFly note: the stat clock is no longer
1237adc34348SSepherosa Ziehau  * used.  Instead, 8254 based systimers are used for all major clock
1238d426f67aSSepherosa Ziehau  * interrupts.
1239adc34348SSepherosa Ziehau  */
124040672791SSepherosa Ziehau static void
i8254_intr_initclock(struct cputimer_intr * cti,boolean_t selected)1241adc34348SSepherosa Ziehau i8254_intr_initclock(struct cputimer_intr *cti, boolean_t selected)
1242c8fe38aeSMatthew Dillon {
1243c79ae131SSascha Wildner 	void *clkdesc = NULL;
12446b809ec7SSepherosa Ziehau 	int irq = 0, mixed_mode = 0, error;
1245adc34348SSepherosa Ziehau 
12466355d931SSepherosa Ziehau 	KKASSERT(mycpuid == 0);
1247c8fe38aeSMatthew Dillon 
12486b809ec7SSepherosa Ziehau 	if (!selected && i8254_intr_disable)
12496b809ec7SSepherosa Ziehau 		goto nointr;
1250adc34348SSepherosa Ziehau 
1251c8fe38aeSMatthew Dillon 	/*
1252c8fe38aeSMatthew Dillon 	 * The stat interrupt mask is different without the
1253c8fe38aeSMatthew Dillon 	 * statistics clock.  Also, don't set the interrupt
1254c8fe38aeSMatthew Dillon 	 * flag which would normally cause the RTC to generate
1255c8fe38aeSMatthew Dillon 	 * interrupts.
1256c8fe38aeSMatthew Dillon 	 */
1257c8fe38aeSMatthew Dillon 	rtc_statusb = RTCSB_24HR;
1258c8fe38aeSMatthew Dillon 
1259da23a592SMatthew Dillon 	/* Finish initializing 8254 timer 0. */
1260f45bfca0SSepherosa Ziehau 	if (ioapic_enable) {
126186d692feSSepherosa Ziehau 		irq = machintr_legacy_intr_find(0, INTR_TRIGGER_EDGE,
12626b809ec7SSepherosa Ziehau 			INTR_POLARITY_HIGH);
12636b809ec7SSepherosa Ziehau 		if (irq < 0) {
12646b809ec7SSepherosa Ziehau mixed_mode_setup:
1265027bbbfeSSepherosa Ziehau 			error = ioapic_conf_legacy_extint(0);
12666b809ec7SSepherosa Ziehau 			if (!error) {
126786d692feSSepherosa Ziehau 				irq = machintr_legacy_intr_find(0,
1268027bbbfeSSepherosa Ziehau 				    INTR_TRIGGER_EDGE, INTR_POLARITY_HIGH);
12696b809ec7SSepherosa Ziehau 				if (irq < 0)
12706b809ec7SSepherosa Ziehau 					error = ENOENT;
12716b809ec7SSepherosa Ziehau 			}
12726b809ec7SSepherosa Ziehau 
12736b809ec7SSepherosa Ziehau 			if (error) {
12746b809ec7SSepherosa Ziehau 				if (!selected) {
12756b809ec7SSepherosa Ziehau 					kprintf("IOAPIC: setup mixed mode for "
12766b809ec7SSepherosa Ziehau 						"irq 0 failed: %d\n", error);
12776b809ec7SSepherosa Ziehau 					goto nointr;
12786b809ec7SSepherosa Ziehau 				} else {
12796b809ec7SSepherosa Ziehau 					panic("IOAPIC: setup mixed mode for "
12806b809ec7SSepherosa Ziehau 					      "irq 0 failed: %d\n", error);
12816b809ec7SSepherosa Ziehau 				}
12826b809ec7SSepherosa Ziehau 			}
12836b809ec7SSepherosa Ziehau 			mixed_mode = 1;
12846b809ec7SSepherosa Ziehau 		}
12856b809ec7SSepherosa Ziehau 		clkdesc = register_int(irq, clkintr, NULL, "clk",
12866b809ec7SSepherosa Ziehau 				       NULL,
12876b809ec7SSepherosa Ziehau 				       INTR_EXCL | INTR_CLOCK |
12886b809ec7SSepherosa Ziehau 				       INTR_NOPOLL | INTR_MPSAFE |
12896355d931SSepherosa Ziehau 				       INTR_NOENTROPY, 0);
1290faaf4131SMichael Neumann 	} else {
1291adc34348SSepherosa Ziehau 		register_int(0, clkintr, NULL, "clk", NULL,
1292f8a09be1SMatthew Dillon 			     INTR_EXCL | INTR_CLOCK |
1293adc34348SSepherosa Ziehau 			     INTR_NOPOLL | INTR_MPSAFE |
12946355d931SSepherosa Ziehau 			     INTR_NOENTROPY, 0);
1295faaf4131SMichael Neumann 	}
1296adc34348SSepherosa Ziehau 
1297adc34348SSepherosa Ziehau 	/* Initialize RTC. */
1298adc34348SSepherosa Ziehau 	writertc(RTC_STATUSA, rtc_statusa);
1299adc34348SSepherosa Ziehau 	writertc(RTC_STATUSB, RTCSB_24HR);
1300adc34348SSepherosa Ziehau 
1301f45bfca0SSepherosa Ziehau 	if (ioapic_enable) {
13026b809ec7SSepherosa Ziehau 		error = i8254_ioapic_trial(irq, cti);
13036b809ec7SSepherosa Ziehau 		if (error) {
13046b809ec7SSepherosa Ziehau 			if (mixed_mode) {
13056b809ec7SSepherosa Ziehau 				if (!selected) {
13066b809ec7SSepherosa Ziehau 					kprintf("IOAPIC: mixed mode for irq %d "
13077a603b36SSepherosa Ziehau 						"trial failed: %d\n",
13087a603b36SSepherosa Ziehau 						irq, error);
13096b809ec7SSepherosa Ziehau 					goto nointr;
13106b809ec7SSepherosa Ziehau 				} else {
13116b809ec7SSepherosa Ziehau 					panic("IOAPIC: mixed mode for irq %d "
13126b809ec7SSepherosa Ziehau 					      "trial failed: %d\n", irq, error);
13136b809ec7SSepherosa Ziehau 				}
13146b809ec7SSepherosa Ziehau 			} else {
13156b809ec7SSepherosa Ziehau 				kprintf("IOAPIC: warning 8254 is not connected "
13166b809ec7SSepherosa Ziehau 					"to the correct pin, try mixed mode\n");
13176355d931SSepherosa Ziehau 				unregister_int(clkdesc, 0);
13186b809ec7SSepherosa Ziehau 				goto mixed_mode_setup;
13196b809ec7SSepherosa Ziehau 			}
13206b809ec7SSepherosa Ziehau 		}
1321faaf4131SMichael Neumann 	}
13226b809ec7SSepherosa Ziehau 	return;
13236b809ec7SSepherosa Ziehau 
13246b809ec7SSepherosa Ziehau nointr:
13256b809ec7SSepherosa Ziehau 	i8254_nointr = 1; /* don't try to register again */
13266b809ec7SSepherosa Ziehau 	cputimer_intr_deregister(cti);
1327c8fe38aeSMatthew Dillon }
1328c8fe38aeSMatthew Dillon 
1329c8fe38aeSMatthew Dillon void
setstatclockrate(int newhz)1330c8fe38aeSMatthew Dillon setstatclockrate(int newhz)
1331c8fe38aeSMatthew Dillon {
1332c8fe38aeSMatthew Dillon 	if (newhz == RTC_PROFRATE)
1333c8fe38aeSMatthew Dillon 		rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
1334c8fe38aeSMatthew Dillon 	else
1335c8fe38aeSMatthew Dillon 		rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
1336c8fe38aeSMatthew Dillon 	writertc(RTC_STATUSA, rtc_statusa);
1337c8fe38aeSMatthew Dillon }
1338c8fe38aeSMatthew Dillon 
1339c8fe38aeSMatthew Dillon #if 0
1340c8fe38aeSMatthew Dillon static unsigned
1341c8fe38aeSMatthew Dillon tsc_get_timecount(struct timecounter *tc)
1342c8fe38aeSMatthew Dillon {
1343c8fe38aeSMatthew Dillon 	return (rdtsc());
1344c8fe38aeSMatthew Dillon }
1345c8fe38aeSMatthew Dillon #endif
1346c8fe38aeSMatthew Dillon 
1347c8fe38aeSMatthew Dillon #ifdef KERN_TIMESTAMP
1348c8fe38aeSMatthew Dillon #define KERN_TIMESTAMP_SIZE 16384
1349c8fe38aeSMatthew Dillon static u_long tsc[KERN_TIMESTAMP_SIZE] ;
1350c8fe38aeSMatthew Dillon SYSCTL_OPAQUE(_debug, OID_AUTO, timestamp, CTLFLAG_RD, tsc,
1351c8fe38aeSMatthew Dillon 	sizeof(tsc), "LU", "Kernel timestamps");
1352c8fe38aeSMatthew Dillon void
_TSTMP(u_int32_t x)1353c8fe38aeSMatthew Dillon _TSTMP(u_int32_t x)
1354c8fe38aeSMatthew Dillon {
1355c8fe38aeSMatthew Dillon 	static int i;
1356c8fe38aeSMatthew Dillon 
1357c8fe38aeSMatthew Dillon 	tsc[i] = (u_int32_t)rdtsc();
1358c8fe38aeSMatthew Dillon 	tsc[i+1] = x;
1359c8fe38aeSMatthew Dillon 	i = i + 2;
1360c8fe38aeSMatthew Dillon 	if (i >= KERN_TIMESTAMP_SIZE)
1361c8fe38aeSMatthew Dillon 		i = 0;
1362c8fe38aeSMatthew Dillon 	tsc[i] = 0; /* mark last entry */
1363c8fe38aeSMatthew Dillon }
1364c8fe38aeSMatthew Dillon #endif /* KERN_TIMESTAMP */
1365c8fe38aeSMatthew Dillon 
1366c8fe38aeSMatthew Dillon /*
1367c8fe38aeSMatthew Dillon  *
1368c8fe38aeSMatthew Dillon  */
1369c8fe38aeSMatthew Dillon 
1370c8fe38aeSMatthew Dillon static int
hw_i8254_timestamp(SYSCTL_HANDLER_ARGS)1371c8fe38aeSMatthew Dillon hw_i8254_timestamp(SYSCTL_HANDLER_ARGS)
1372c8fe38aeSMatthew Dillon {
1373c8fe38aeSMatthew Dillon     sysclock_t count;
1374e28c8ef4SSascha Wildner     uint64_t tscval;
1375c8fe38aeSMatthew Dillon     char buf[32];
1376c8fe38aeSMatthew Dillon 
1377c8fe38aeSMatthew Dillon     crit_enter();
1378c8fe38aeSMatthew Dillon     if (sys_cputimer == &i8254_cputimer)
1379c8fe38aeSMatthew Dillon 	count = sys_cputimer->count();
1380c8fe38aeSMatthew Dillon     else
1381c8fe38aeSMatthew Dillon 	count = 0;
1382c8fe38aeSMatthew Dillon     if (tsc_present)
1383c8fe38aeSMatthew Dillon 	tscval = rdtsc();
1384c8fe38aeSMatthew Dillon     else
1385c8fe38aeSMatthew Dillon 	tscval = 0;
1386c8fe38aeSMatthew Dillon     crit_exit();
13878fbc264dSMatthew Dillon     ksnprintf(buf, sizeof(buf), "%016lx %016lx", count, tscval);
1388c8fe38aeSMatthew Dillon     return(SYSCTL_OUT(req, buf, strlen(buf) + 1));
1389c8fe38aeSMatthew Dillon }
1390c8fe38aeSMatthew Dillon 
13911eb5a42bSMatthew Dillon struct tsc_mpsync_info {
13921eb5a42bSMatthew Dillon 	volatile int		tsc_ready_cnt;
139300ec69c7SSepherosa Ziehau 	volatile int		tsc_done_cnt;
13941eb5a42bSMatthew Dillon 	volatile int		tsc_command;
13951eb5a42bSMatthew Dillon 	volatile int		unused01[5];
13961eb5a42bSMatthew Dillon 	struct {
13971eb5a42bSMatthew Dillon 		uint64_t	v;
13981eb5a42bSMatthew Dillon 		uint64_t	unused02;
13991eb5a42bSMatthew Dillon 	} tsc_saved[MAXCPU];
14001eb5a42bSMatthew Dillon } __cachealign;
1401dda44f1eSSepherosa Ziehau 
14021eb5a42bSMatthew Dillon #if 0
1403dda44f1eSSepherosa Ziehau static void
14041eb5a42bSMatthew Dillon tsc_mpsync_test_loop(struct tsc_mpsync_thr *info)
140500ec69c7SSepherosa Ziehau {
140600ec69c7SSepherosa Ziehau 	struct globaldata *gd = mycpu;
14075b49787bSMatthew Dillon 	tsc_uclock_t test_end, test_begin;
140800ec69c7SSepherosa Ziehau 	u_int i;
140900ec69c7SSepherosa Ziehau 
141000ec69c7SSepherosa Ziehau 	if (bootverbose) {
141100ec69c7SSepherosa Ziehau 		kprintf("cpu%d: TSC testing MP synchronization ...\n",
141200ec69c7SSepherosa Ziehau 		    gd->gd_cpuid);
141300ec69c7SSepherosa Ziehau 	}
141400ec69c7SSepherosa Ziehau 
1415ea9728caSSepherosa Ziehau 	test_begin = rdtsc_ordered();
141600ec69c7SSepherosa Ziehau 	/* Run test for 100ms */
141700ec69c7SSepherosa Ziehau 	test_end = test_begin + (tsc_frequency / 10);
141800ec69c7SSepherosa Ziehau 
141900ec69c7SSepherosa Ziehau 	arg->tsc_mpsync = 1;
142000ec69c7SSepherosa Ziehau 	arg->tsc_target = test_begin;
142100ec69c7SSepherosa Ziehau 
142200ec69c7SSepherosa Ziehau #define TSC_TEST_TRYMAX		1000000	/* Make sure we could stop */
142300ec69c7SSepherosa Ziehau #define TSC_TEST_TRYMIN		50000
142400ec69c7SSepherosa Ziehau 
142500ec69c7SSepherosa Ziehau 	for (i = 0; i < TSC_TEST_TRYMAX; ++i) {
142600ec69c7SSepherosa Ziehau 		struct lwkt_cpusync cs;
142700ec69c7SSepherosa Ziehau 
142800ec69c7SSepherosa Ziehau 		crit_enter();
142900ec69c7SSepherosa Ziehau 		lwkt_cpusync_init(&cs, gd->gd_other_cpus,
143000ec69c7SSepherosa Ziehau 		    tsc_mpsync_test_remote, arg);
143100ec69c7SSepherosa Ziehau 		lwkt_cpusync_interlock(&cs);
14323a80fe2bSSepherosa Ziehau 		cpu_pause();
1433ea9728caSSepherosa Ziehau 		arg->tsc_target = rdtsc_ordered();
143400ec69c7SSepherosa Ziehau 		cpu_mfence();
143500ec69c7SSepherosa Ziehau 		lwkt_cpusync_deinterlock(&cs);
143600ec69c7SSepherosa Ziehau 		crit_exit();
14373a80fe2bSSepherosa Ziehau 		cpu_pause();
143800ec69c7SSepherosa Ziehau 
143900ec69c7SSepherosa Ziehau 		if (!arg->tsc_mpsync) {
144000ec69c7SSepherosa Ziehau 			kprintf("cpu%d: TSC is not MP synchronized @%u\n",
144100ec69c7SSepherosa Ziehau 			    gd->gd_cpuid, i);
144200ec69c7SSepherosa Ziehau 			break;
144300ec69c7SSepherosa Ziehau 		}
144400ec69c7SSepherosa Ziehau 		if (arg->tsc_target > test_end && i >= TSC_TEST_TRYMIN)
144500ec69c7SSepherosa Ziehau 			break;
144600ec69c7SSepherosa Ziehau 	}
144700ec69c7SSepherosa Ziehau 
144800ec69c7SSepherosa Ziehau #undef TSC_TEST_TRYMIN
144900ec69c7SSepherosa Ziehau #undef TSC_TEST_TRYMAX
145000ec69c7SSepherosa Ziehau 
145100ec69c7SSepherosa Ziehau 	if (arg->tsc_target == test_begin) {
145200ec69c7SSepherosa Ziehau 		kprintf("cpu%d: TSC does not tick?!\n", gd->gd_cpuid);
145300ec69c7SSepherosa Ziehau 		/* XXX disable TSC? */
145400ec69c7SSepherosa Ziehau 		tsc_invariant = 0;
145500ec69c7SSepherosa Ziehau 		arg->tsc_mpsync = 0;
145600ec69c7SSepherosa Ziehau 		return;
145700ec69c7SSepherosa Ziehau 	}
145800ec69c7SSepherosa Ziehau 
145900ec69c7SSepherosa Ziehau 	if (arg->tsc_mpsync && bootverbose) {
146000ec69c7SSepherosa Ziehau 		kprintf("cpu%d: TSC is MP synchronized after %u tries\n",
146100ec69c7SSepherosa Ziehau 		    gd->gd_cpuid, i);
146200ec69c7SSepherosa Ziehau 	}
146300ec69c7SSepherosa Ziehau }
146400ec69c7SSepherosa Ziehau 
14651eb5a42bSMatthew Dillon #endif
146600ec69c7SSepherosa Ziehau 
14671eb5a42bSMatthew Dillon #define TSC_TEST_COUNT		50000
14681eb5a42bSMatthew Dillon 
14691eb5a42bSMatthew Dillon static void
tsc_mpsync_ap_thread(void * xinfo)14701eb5a42bSMatthew Dillon tsc_mpsync_ap_thread(void *xinfo)
14711eb5a42bSMatthew Dillon {
14721eb5a42bSMatthew Dillon 	struct tsc_mpsync_info *info = xinfo;
14731eb5a42bSMatthew Dillon 	int cpu = mycpuid;
14741eb5a42bSMatthew Dillon 	int i;
14751eb5a42bSMatthew Dillon 
14761eb5a42bSMatthew Dillon 	/*
14771eb5a42bSMatthew Dillon 	 * Tell main loop that we are ready and wait for initiation
14781eb5a42bSMatthew Dillon 	 */
14791eb5a42bSMatthew Dillon 	atomic_add_int(&info->tsc_ready_cnt, 1);
14801eb5a42bSMatthew Dillon 	while (info->tsc_command == 0) {
14811eb5a42bSMatthew Dillon 		lwkt_force_switch();
148200ec69c7SSepherosa Ziehau 	}
14831eb5a42bSMatthew Dillon 
14841eb5a42bSMatthew Dillon 	/*
14851eb5a42bSMatthew Dillon 	 * Run test for 10000 loops or until tsc_done_cnt != 0 (another
14861eb5a42bSMatthew Dillon 	 * cpu has finished its test), then increment done.
14871eb5a42bSMatthew Dillon 	 */
14881eb5a42bSMatthew Dillon 	crit_enter();
14891eb5a42bSMatthew Dillon 	for (i = 0; i < TSC_TEST_COUNT && info->tsc_done_cnt == 0; ++i) {
14901eb5a42bSMatthew Dillon 		info->tsc_saved[cpu].v = rdtsc_ordered();
14911eb5a42bSMatthew Dillon 	}
14921eb5a42bSMatthew Dillon 	crit_exit();
14931eb5a42bSMatthew Dillon 	atomic_add_int(&info->tsc_done_cnt, 1);
149400ec69c7SSepherosa Ziehau 
149500ec69c7SSepherosa Ziehau 	lwkt_exit();
1496dda44f1eSSepherosa Ziehau }
1497dda44f1eSSepherosa Ziehau 
1498dda44f1eSSepherosa Ziehau static void
tsc_mpsync_test(void)1499dda44f1eSSepherosa Ziehau tsc_mpsync_test(void)
1500dda44f1eSSepherosa Ziehau {
1501*cac12823SMatthew Dillon 	enum { TSCOK, TSCNEG, TSCSPAN } error = TSCOK;
15021eb5a42bSMatthew Dillon 	int cpu;
15031eb5a42bSMatthew Dillon 	int try;
1504dda44f1eSSepherosa Ziehau 
1505dda44f1eSSepherosa Ziehau 	if (!tsc_invariant) {
1506dda44f1eSSepherosa Ziehau 		/* Not even invariant TSC */
1507*cac12823SMatthew Dillon 		kprintf("TSC is not invariant, "
1508*cac12823SMatthew Dillon 			"no further tests will be performed\n");
1509dda44f1eSSepherosa Ziehau 		return;
1510dda44f1eSSepherosa Ziehau 	}
1511dda44f1eSSepherosa Ziehau 
1512dda44f1eSSepherosa Ziehau 	if (ncpus == 1) {
1513dda44f1eSSepherosa Ziehau 		/* Only one CPU */
1514dda44f1eSSepherosa Ziehau 		tsc_mpsync = 1;
1515dda44f1eSSepherosa Ziehau 		return;
1516dda44f1eSSepherosa Ziehau 	}
1517dda44f1eSSepherosa Ziehau 
15181997b4c2SMatthew Dillon 	/*
15191997b4c2SMatthew Dillon 	 * Forcing can be used w/qemu to reduce contention
15201997b4c2SMatthew Dillon 	 */
15211997b4c2SMatthew Dillon 	TUNABLE_INT_FETCH("hw.tsc_cputimer_force", &tsc_mpsync);
152279c04d9cSMatthew Dillon 
152379c04d9cSMatthew Dillon 	if (tsc_mpsync == 0) {
152479c04d9cSMatthew Dillon 		switch (cpu_vendor_id) {
152579c04d9cSMatthew Dillon 		case CPU_VENDOR_INTEL:
152679c04d9cSMatthew Dillon 			/*
152779c04d9cSMatthew Dillon 			 * Intel probably works
152879c04d9cSMatthew Dillon 			 */
152979c04d9cSMatthew Dillon 			break;
153033bb59d9SSepherosa Ziehau 
153179c04d9cSMatthew Dillon 		case CPU_VENDOR_AMD:
153279c04d9cSMatthew Dillon 			/*
15338a93c79fSImre Vadász 			 * For AMD 15h and 16h (i.e. The Bulldozer and Jaguar
15348a93c79fSImre Vadász 			 * architectures) we have to watch out for
15358a93c79fSImre Vadász 			 * Erratum 778:
15368a93c79fSImre Vadász 			 *     "Processor Core Time Stamp Counters May
15378a93c79fSImre Vadász 			 *      Experience Drift"
15388a93c79fSImre Vadász 			 * This Erratum is only listed for cpus in Family
15398a93c79fSImre Vadász 			 * 15h < Model 30h and for 16h < Model 30h.
15408a93c79fSImre Vadász 			 *
15418a93c79fSImre Vadász 			 * AMD < Bulldozer probably doesn't work
154279c04d9cSMatthew Dillon 			 */
15438a93c79fSImre Vadász 			if (CPUID_TO_FAMILY(cpu_id) == 0x15 ||
15448a93c79fSImre Vadász 			    CPUID_TO_FAMILY(cpu_id) == 0x16) {
15458a93c79fSImre Vadász 				if (CPUID_TO_MODEL(cpu_id) < 0x30)
154679c04d9cSMatthew Dillon 					return;
15478a93c79fSImre Vadász 			} else if (CPUID_TO_FAMILY(cpu_id) < 0x17) {
15488a93c79fSImre Vadász 				return;
15498a93c79fSImre Vadász 			}
155079c04d9cSMatthew Dillon 			break;
155133bb59d9SSepherosa Ziehau 
155279c04d9cSMatthew Dillon 		default:
155379c04d9cSMatthew Dillon 			/* probably won't work */
15541997b4c2SMatthew Dillon 			return;
15551997b4c2SMatthew Dillon 		}
155633bb59d9SSepherosa Ziehau 	} else if (tsc_mpsync < 0) {
155733bb59d9SSepherosa Ziehau 		kprintf("TSC MP synchronization test is disabled\n");
155833bb59d9SSepherosa Ziehau 		tsc_mpsync = 0;
155933bb59d9SSepherosa Ziehau 		return;
1560dda44f1eSSepherosa Ziehau 	}
1561dda44f1eSSepherosa Ziehau 
156279c04d9cSMatthew Dillon 	/*
15631eb5a42bSMatthew Dillon 	 * Test even if forced to 1 above.  If forced, we will use the TSC
15641eb5a42bSMatthew Dillon 	 * even if the test fails.  (set forced to -1 to disable entirely).
156579c04d9cSMatthew Dillon 	 */
1566dda44f1eSSepherosa Ziehau 	kprintf("TSC testing MP synchronization ...\n");
1567*cac12823SMatthew Dillon 	kprintf("TSC testing MP: NOTE! CPU pwrsave will inflate latencies!\n");
156800ec69c7SSepherosa Ziehau 
156900ec69c7SSepherosa Ziehau 	/*
1570*cac12823SMatthew Dillon 	 * Test that the TSC is monotonically increasing across CPU
1571*cac12823SMatthew Dillon 	 * switches.  Otherwise time will get really messed up if the
1572*cac12823SMatthew Dillon 	 * TSC is selected as the timebase.
1573*cac12823SMatthew Dillon 	 *
1574*cac12823SMatthew Dillon 	 * Test 4 times
157500ec69c7SSepherosa Ziehau 	 */
1576*cac12823SMatthew Dillon 	for (try = 0; tsc_frequency && try < 4; ++try) {
1577*cac12823SMatthew Dillon 		tsc_uclock_t last;
1578*cac12823SMatthew Dillon 		tsc_uclock_t next;
1579*cac12823SMatthew Dillon 		tsc_sclock_t delta;
1580*cac12823SMatthew Dillon 		tsc_sclock_t lo_delta = 0x7FFFFFFFFFFFFFFFLL;
1581*cac12823SMatthew Dillon 		tsc_sclock_t hi_delta = -0x7FFFFFFFFFFFFFFFLL;
1582*cac12823SMatthew Dillon 
1583*cac12823SMatthew Dillon 		last = rdtsc();
1584*cac12823SMatthew Dillon 		for (cpu = 0; cpu < ncpus; ++cpu) {
1585*cac12823SMatthew Dillon 			lwkt_migratecpu(cpu);
1586*cac12823SMatthew Dillon 			next = rdtsc();
1587*cac12823SMatthew Dillon 			if (cpu == 0) {
1588*cac12823SMatthew Dillon 				last = next;
1589*cac12823SMatthew Dillon 				continue;
1590*cac12823SMatthew Dillon 			}
1591*cac12823SMatthew Dillon 
1592*cac12823SMatthew Dillon 			delta = next - last;
1593*cac12823SMatthew Dillon 			if (delta < 0) {
1594*cac12823SMatthew Dillon 				kprintf("TSC cpu-delta NEGATIVE: "
1595*cac12823SMatthew Dillon 					"cpu %d to %d (%ld)\n",
1596*cac12823SMatthew Dillon 					cpu - 1, cpu, delta);
1597*cac12823SMatthew Dillon 				error = TSCNEG;
1598*cac12823SMatthew Dillon 			}
1599*cac12823SMatthew Dillon 			if (lo_delta > delta)
1600*cac12823SMatthew Dillon 				lo_delta = delta;
1601*cac12823SMatthew Dillon 			if (hi_delta < delta)
1602*cac12823SMatthew Dillon 				hi_delta = delta;
1603*cac12823SMatthew Dillon 			last = next;
1604*cac12823SMatthew Dillon 		}
1605*cac12823SMatthew Dillon 		last = rdtsc();
1606*cac12823SMatthew Dillon 		for (cpu = ncpus - 2; cpu >= 0; --cpu) {
1607*cac12823SMatthew Dillon 			lwkt_migratecpu(cpu);
1608*cac12823SMatthew Dillon 			next = rdtsc();
1609*cac12823SMatthew Dillon 			delta = next - last;
1610*cac12823SMatthew Dillon 			if (delta <= 0) {
1611*cac12823SMatthew Dillon 				kprintf("TSC cpu-delta WAS NEGATIVE! "
1612*cac12823SMatthew Dillon 					"cpu %d to %d (%ld)\n",
1613*cac12823SMatthew Dillon 					cpu + 1, cpu, delta);
1614*cac12823SMatthew Dillon 				error = TSCNEG;
1615*cac12823SMatthew Dillon 			}
1616*cac12823SMatthew Dillon 			if (lo_delta > delta)
1617*cac12823SMatthew Dillon 				lo_delta = delta;
1618*cac12823SMatthew Dillon 			if (hi_delta < delta)
1619*cac12823SMatthew Dillon 				hi_delta = delta;
1620*cac12823SMatthew Dillon 			last = next;
1621*cac12823SMatthew Dillon 		}
1622*cac12823SMatthew Dillon 		kprintf("TSC cpu-delta test complete, %ldnS to %ldnS ",
1623*cac12823SMatthew Dillon 			muldivu64(lo_delta, 1000000000, tsc_frequency),
1624*cac12823SMatthew Dillon 			muldivu64(hi_delta, 1000000000, tsc_frequency));
1625*cac12823SMatthew Dillon 		if (error != TSCOK) {
1626*cac12823SMatthew Dillon 			kprintf("FAILURE\n");
1627*cac12823SMatthew Dillon 			break;
1628*cac12823SMatthew Dillon 		}
1629*cac12823SMatthew Dillon 		kprintf("SUCCESS\n");
1630*cac12823SMatthew Dillon 	}
1631*cac12823SMatthew Dillon 
1632*cac12823SMatthew Dillon 	/*
1633*cac12823SMatthew Dillon 	 * Test TSC MP synchronization on APs.
1634*cac12823SMatthew Dillon 	 *
1635*cac12823SMatthew Dillon 	 * Test 4 times.
1636*cac12823SMatthew Dillon 	 */
1637*cac12823SMatthew Dillon 	for (try = 0; tsc_frequency && try < 4; ++try) {
16381eb5a42bSMatthew Dillon 		struct tsc_mpsync_info info;
16391eb5a42bSMatthew Dillon 		uint64_t last;
1640*cac12823SMatthew Dillon 		int64_t xworst;
16411eb5a42bSMatthew Dillon 		int64_t xdelta;
16421eb5a42bSMatthew Dillon 		int64_t delta;
164300ec69c7SSepherosa Ziehau 
16441eb5a42bSMatthew Dillon 		bzero(&info, sizeof(info));
164500ec69c7SSepherosa Ziehau 
164600ec69c7SSepherosa Ziehau 		for (cpu = 0; cpu < ncpus; ++cpu) {
16471eb5a42bSMatthew Dillon 			thread_t td;
16481eb5a42bSMatthew Dillon 			lwkt_create(tsc_mpsync_ap_thread, &info, &td,
16491eb5a42bSMatthew Dillon 				    NULL, TDF_NOSTART, cpu,
16501eb5a42bSMatthew Dillon 				    "tsc mpsync %d", cpu);
16511eb5a42bSMatthew Dillon 			lwkt_setpri_initial(td, curthread->td_pri);
16521eb5a42bSMatthew Dillon 			lwkt_schedule(td);
16531eb5a42bSMatthew Dillon 		}
16541eb5a42bSMatthew Dillon 		while (info.tsc_ready_cnt != ncpus)
16551eb5a42bSMatthew Dillon 			lwkt_force_switch();
165600ec69c7SSepherosa Ziehau 
16571eb5a42bSMatthew Dillon 		/*
16581eb5a42bSMatthew Dillon 		 * All threads are ready, start the test and wait for
16591eb5a42bSMatthew Dillon 		 * completion.
16601eb5a42bSMatthew Dillon 		 */
16611eb5a42bSMatthew Dillon 		info.tsc_command = 1;
16621eb5a42bSMatthew Dillon 		while (info.tsc_done_cnt != ncpus)
16631eb5a42bSMatthew Dillon 			lwkt_force_switch();
16641eb5a42bSMatthew Dillon 
16651eb5a42bSMatthew Dillon 		/*
16661eb5a42bSMatthew Dillon 		 * Process results
16671eb5a42bSMatthew Dillon 		 */
16681eb5a42bSMatthew Dillon 		last = info.tsc_saved[0].v;
16691eb5a42bSMatthew Dillon 		delta = 0;
1670*cac12823SMatthew Dillon 		xworst = 0;
16711eb5a42bSMatthew Dillon 		for (cpu = 0; cpu < ncpus; ++cpu) {
16721eb5a42bSMatthew Dillon 			xdelta = (int64_t)(info.tsc_saved[cpu].v - last);
16731eb5a42bSMatthew Dillon 			last = info.tsc_saved[cpu].v;
16741eb5a42bSMatthew Dillon 			if (xdelta < 0)
16751eb5a42bSMatthew Dillon 				xdelta = -xdelta;
1676*cac12823SMatthew Dillon 			if (xworst < xdelta)
1677*cac12823SMatthew Dillon 				xworst = xdelta;
16781eb5a42bSMatthew Dillon 			delta += xdelta;
16791eb5a42bSMatthew Dillon 
168000ec69c7SSepherosa Ziehau 		}
168100ec69c7SSepherosa Ziehau 
16821eb5a42bSMatthew Dillon 		/*
1683*cac12823SMatthew Dillon 		 * Result from attempt.  Break-out if we succeeds, otherwise
1684*cac12823SMatthew Dillon 		 * try again (up to 4 times).  This might be in a VM so we
1685*cac12823SMatthew Dillon 		 * need to be robust.
16861eb5a42bSMatthew Dillon 		 */
1687*cac12823SMatthew Dillon 		kprintf("TSC cpu concurrency test complete, worst=%ldns, "
1688*cac12823SMatthew Dillon 			"avg=%ldns ",
1689*cac12823SMatthew Dillon 			muldivu64(xworst, 1000000000, tsc_frequency),
1690*cac12823SMatthew Dillon 			muldivu64(delta / ncpus, 1000000000, tsc_frequency));
1691*cac12823SMatthew Dillon 		if (delta / ncpus > tsc_frequency / 100) {
1692*cac12823SMatthew Dillon 			kprintf("FAILURE\n");
1693*cac12823SMatthew Dillon 		}
16941eb5a42bSMatthew Dillon 		if (delta / ncpus < tsc_frequency / 100000) {
1695*cac12823SMatthew Dillon 			kprintf("SUCCESS\n");
1696*cac12823SMatthew Dillon 			if (error == TSCOK)
1697dda44f1eSSepherosa Ziehau 				tsc_mpsync = 1;
16981eb5a42bSMatthew Dillon 			break;
16991eb5a42bSMatthew Dillon 		}
1700*cac12823SMatthew Dillon 		kprintf("INDETERMINATE\n");
1701dda44f1eSSepherosa Ziehau 	}
1702dda44f1eSSepherosa Ziehau 
170300ec69c7SSepherosa Ziehau 	if (tsc_mpsync)
170400ec69c7SSepherosa Ziehau 		kprintf("TSC is MP synchronized\n");
170500ec69c7SSepherosa Ziehau 	else
170600ec69c7SSepherosa Ziehau 		kprintf("TSC is not MP synchronized\n");
1707dda44f1eSSepherosa Ziehau }
1708dda44f1eSSepherosa Ziehau SYSINIT(tsc_mpsync, SI_BOOT2_FINISH_SMP, SI_ORDER_ANY, tsc_mpsync_test, NULL);
1709dda44f1eSSepherosa Ziehau 
1710db2ec6f8SSascha Wildner static SYSCTL_NODE(_hw, OID_AUTO, i8254, CTLFLAG_RW, 0, "I8254");
1711c8fe38aeSMatthew Dillon SYSCTL_UINT(_hw_i8254, OID_AUTO, freq, CTLFLAG_RD, &i8254_cputimer.freq, 0,
1712c8fe38aeSMatthew Dillon 	    "frequency");
1713c8fe38aeSMatthew Dillon SYSCTL_PROC(_hw_i8254, OID_AUTO, timestamp, CTLTYPE_STRING|CTLFLAG_RD,
1714c8fe38aeSMatthew Dillon 	    0, 0, hw_i8254_timestamp, "A", "");
1715c8fe38aeSMatthew Dillon 
1716c8fe38aeSMatthew Dillon SYSCTL_INT(_hw, OID_AUTO, tsc_present, CTLFLAG_RD,
1717c8fe38aeSMatthew Dillon 	    &tsc_present, 0, "TSC Available");
17185a81b19fSSepherosa Ziehau SYSCTL_INT(_hw, OID_AUTO, tsc_invariant, CTLFLAG_RD,
17195a81b19fSSepherosa Ziehau 	    &tsc_invariant, 0, "Invariant TSC");
1720dda44f1eSSepherosa Ziehau SYSCTL_INT(_hw, OID_AUTO, tsc_mpsync, CTLFLAG_RD,
1721dda44f1eSSepherosa Ziehau 	    &tsc_mpsync, 0, "TSC is synchronized across CPUs");
1722c8fe38aeSMatthew Dillon SYSCTL_QUAD(_hw, OID_AUTO, tsc_frequency, CTLFLAG_RD,
1723c8fe38aeSMatthew Dillon 	    &tsc_frequency, 0, "TSC Frequency");
1724