xref: /openbsd/sys/arch/hppa/dev/clock.c (revision 097a140d)
1 /*	$OpenBSD: clock.c,v 1.32 2021/02/23 04:44:30 cheloha Exp $	*/
2 
3 /*
4  * Copyright (c) 1998-2003 Michael Shalayeff
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/timetc.h>
33 
34 #include <dev/clock_subr.h>
35 
36 #include <machine/pdc.h>
37 #include <machine/iomod.h>
38 #include <machine/psl.h>
39 #include <machine/intr.h>
40 #include <machine/reg.h>
41 #include <machine/cpufunc.h>
42 #include <machine/autoconf.h>
43 
44 u_long	cpu_hzticks;
45 
46 int	cpu_hardclock(void *);
47 u_int	itmr_get_timecount(struct timecounter *);
48 
49 struct timecounter itmr_timecounter = {
50 	.tc_get_timecount = itmr_get_timecount,
51 	.tc_poll_pps = NULL,
52 	.tc_counter_mask = 0xffffffff,
53 	.tc_frequency = 0,
54 	.tc_name = "itmr",
55 	.tc_quality = 0,
56 	.tc_priv = NULL,
57 	.tc_user = 0,
58 };
59 
60 extern todr_chip_handle_t todr_handle;
61 struct todr_chip_handle pdc_todr;
62 
63 int
64 pdc_gettime(struct todr_chip_handle *handle, struct timeval *tv)
65 {
66 	struct pdc_tod tod PDC_ALIGNMENT;
67 	int error;
68 
69 	if ((error = pdc_call((iodcio_t)pdc, 1, PDC_TOD, PDC_TOD_READ,
70 	    &tod, 0, 0, 0, 0, 0))) {
71 		printf("clock: failed to fetch (%d)\n", error);
72 		return EIO;
73 	}
74 
75 	tv->tv_sec = tod.sec;
76 	tv->tv_usec = tod.usec;
77 	return 0;
78 }
79 
80 int
81 pdc_settime(struct todr_chip_handle *handle, struct timeval *tv)
82 {
83 	int error;
84 
85 	if ((error = pdc_call((iodcio_t)pdc, 1, PDC_TOD, PDC_TOD_WRITE,
86 	    tv->tv_sec, tv->tv_usec))) {
87 		printf("clock: failed to save (%d)\n", error);
88 		return EIO;
89 	}
90 
91 	return 0;
92 }
93 
94 void
95 cpu_initclocks(void)
96 {
97 	struct cpu_info *ci = curcpu();
98 	u_long __itmr;
99 
100 	pdc_todr.todr_gettime = pdc_gettime;
101 	pdc_todr.todr_settime = pdc_settime;
102 	todr_handle = &pdc_todr;
103 
104 	cpu_hzticks = (PAGE0->mem_10msec * 100) / hz;
105 
106 	itmr_timecounter.tc_frequency = PAGE0->mem_10msec * 100;
107 	tc_init(&itmr_timecounter);
108 
109 	mfctl(CR_ITMR, __itmr);
110 	ci->ci_itmr = __itmr;
111 	__itmr += cpu_hzticks;
112 	mtctl(__itmr, CR_ITMR);
113 }
114 
115 int
116 cpu_hardclock(void *v)
117 {
118 	struct cpu_info *ci = curcpu();
119 	u_long __itmr, delta, eta;
120 	int wrap;
121 	register_t eiem;
122 
123 	/*
124 	 * Invoke hardclock as many times as there has been cpu_hzticks
125 	 * ticks since the last interrupt.
126 	 */
127 	for (;;) {
128 		mfctl(CR_ITMR, __itmr);
129 		delta = __itmr - ci->ci_itmr;
130 		if (delta >= cpu_hzticks) {
131 			hardclock(v);
132 			ci->ci_itmr += cpu_hzticks;
133 		} else
134 			break;
135 	}
136 
137 	/*
138 	 * Program the next clock interrupt, making sure it will
139 	 * indeed happen in the future. This is done with interrupts
140 	 * disabled to avoid a possible race.
141 	 */
142 	eta = ci->ci_itmr + cpu_hzticks;
143 	wrap = eta < ci->ci_itmr;	/* watch out for a wraparound */
144 	__asm volatile("mfctl	%%cr15, %0": "=r" (eiem));
145 	__asm volatile("mtctl	%r0, %cr15");
146 	mtctl(eta, CR_ITMR);
147 	mfctl(CR_ITMR, __itmr);
148 	/*
149 	 * If we were close enough to the next tick interrupt
150 	 * value, by the time we have programmed itmr, it might
151 	 * have passed the value, which would cause a complete
152 	 * cycle until the next interrupt occurs. On slow
153 	 * models, this would be a disaster (a complete cycle
154 	 * taking over two minutes on a 715/33).
155 	 *
156 	 * We expect that it will only be necessary to postpone
157 	 * the interrupt once. Thus, there are two cases:
158 	 * - We are expecting a wraparound: eta < cpu_itmr.
159 	 *   itmr is in tracks if either >= cpu_itmr or < eta.
160 	 * - We are not wrapping: eta > cpu_itmr.
161 	 *   itmr is in tracks if >= cpu_itmr and < eta (we need
162 	 *   to keep the >= cpu_itmr test because itmr might wrap
163 	 *   before eta does).
164 	 */
165 	if ((wrap && !(eta > __itmr || __itmr >= ci->ci_itmr)) ||
166 	    (!wrap && !(eta > __itmr && __itmr >= ci->ci_itmr))) {
167 		eta += cpu_hzticks;
168 		mtctl(eta, CR_ITMR);
169 	}
170 	__asm volatile("mtctl	%0, %%cr15":: "r" (eiem));
171 
172 	return (1);
173 }
174 
175 void
176 setstatclockrate(int newhz)
177 {
178 	/* nothing we can do */
179 }
180 
181 u_int
182 itmr_get_timecount(struct timecounter *tc)
183 {
184 	u_long __itmr;
185 
186 	mfctl(CR_ITMR, __itmr);
187 	return (__itmr);
188 }
189