xref: /netbsd/sys/arch/powerpc/powerpc/clock.c (revision 6550d01e)
1 /*	$NetBSD: clock.c,v 1.10 2011/01/18 01:02:55 matt Exp $	*/
2 /*      $OpenBSD: clock.c,v 1.3 1997/10/13 13:42:53 pefo Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6  * Copyright (C) 1995, 1996 TooLs GmbH.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by TooLs GmbH.
20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.10 2011/01/18 01:02:55 matt Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/timetc.h>
43 
44 #include <uvm/uvm_extern.h>
45 
46 #include <powerpc/spr.h>
47 #if defined (PPC_OEA) || defined(PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
48 #include <powerpc/oea/spr.h>
49 #elif defined (PPC_BOOKE)
50 #include <powerpc/booke/spr.h>
51 #elif defined (PPC_IBM4XX)
52 #include <powerpc/ibm4xx/spr.h>
53 #else
54 #error unknown powerpc variant
55 #endif
56 
57 void decr_intr(struct clockframe *);
58 void init_powerpc_tc(void);
59 static u_int get_powerpc_timecount(struct timecounter *);
60 
61 uint32_t ticks_per_sec;
62 uint32_t ns_per_tick;
63 uint32_t ticks_per_intr = 0;
64 
65 static struct timecounter powerpc_timecounter = {
66 	get_powerpc_timecount,	/* get_timecount */
67 	0,			/* no poll_pps */
68 	0x7fffffff,		/* counter_mask */
69 	0,			/* frequency */
70 	"mftb",			/* name */
71 	100,			/* quality */
72 	NULL,			/* tc_priv */
73 	NULL			/* tc_next */
74 };
75 
76 /*
77  * Start the real-time and statistics clocks. Leave stathz 0 since there
78  * are no other timers available.
79  */
80 void
81 cpu_initclocks(void)
82 {
83 	struct cpu_info * const ci = curcpu();
84 	uint32_t msr;
85 
86 	ticks_per_intr = ticks_per_sec / hz;
87 	cpu_timebase = ticks_per_sec;
88 #ifdef PPC_OEA601
89 	if ((mfpvr() >> 16) == MPC601)
90 		__asm volatile
91 		    ("mfspr %0,%1" : "=r"(ci->ci_lasttb) : "n"(SPR_RTCL_R));
92 	else
93 #endif
94 		__asm volatile ("mftb %0" : "=r"(ci->ci_lasttb));
95 	__asm volatile ("mtdec %0" :: "r"(ticks_per_intr));
96 	init_powerpc_tc();
97 
98 	/*
99 	 * Now allow all hardware interrupts including hardclock(9).
100 	 */
101 	__asm volatile ("mfmsr %0; ori %0,%0,%1; mtmsr %0"
102 	    : "=r"(msr) : "K"(PSL_EE|PSL_RI));
103 }
104 
105 /*
106  * We assume newhz is either stathz or profhz, and that neither will
107  * change after being set up above.  Could recalculate intervals here
108  * but that would be a drag.
109  */
110 void
111 setstatclockrate(int arg)
112 {
113 
114 	/* Nothing we can do */
115 }
116 
117 void
118 decr_intr(struct clockframe *cfp)
119 {
120 	struct cpu_info * const ci = curcpu();
121 	int msr;
122 	int pri;
123 	u_long tb;
124 	long ticks;
125 	int nticks;
126 
127 	/* Check whether we are initialized */
128 	if (!ticks_per_intr)
129 		return;
130 
131 	/*
132 	 * Based on the actual time delay since the last decrementer reload,
133 	 * we arrange for earlier interrupt next time.
134 	 */
135 	__asm ("mfdec %0" : "=r"(ticks));
136 	for (nticks = 0; ticks < 0; nticks++)
137 		ticks += ticks_per_intr;
138 	__asm volatile ("mtdec %0" :: "r"(ticks));
139 
140 	ci->ci_data.cpu_nintr++;
141 	ci->ci_ev_clock.ev_count++;
142 
143 	pri = splclock();
144 	if (pri & (1 << SPL_CLOCK)) {
145 		ci->ci_tickspending += nticks;
146 	} else {
147 		nticks += ci->ci_tickspending;
148 		ci->ci_tickspending = 0;
149 
150 		/*
151 		 * lasttb is used during microtime. Set it to the virtual
152 		 * start of this tick interval.
153 		 */
154 #ifdef PPC_OEA601
155 		if ((mfpvr() >> 16) == MPC601)
156 			__asm volatile
157 			    ("mfspr %0,%1" : "=r"(tb) : "n"(SPR_RTCL_R));
158 		else
159 #endif
160 			__asm volatile ("mftb %0" : "=r"(tb));
161 
162 		ci->ci_lasttb = tb + ticks - ticks_per_intr;
163 
164 		/*
165 		 * Reenable interrupts
166 		 */
167 		__asm volatile ("mfmsr %0; ori %0, %0, %1; mtmsr %0"
168 			      : "=r"(msr) : "K"(PSL_EE));
169 
170 		/*
171 		 * Do standard timer interrupt stuff.
172 		 * Do softclock stuff only on the last iteration.
173 		 */
174 		while (--nticks > 0)
175 			hardclock(cfp);
176 		hardclock(cfp);
177 	}
178 	splx(pri);
179 }
180 
181 /*
182  * Wait for about n microseconds (at least!).
183  */
184 void
185 delay(unsigned int n)
186 {
187 	u_quad_t tb;
188 	u_long tbh, tbl, scratch;
189 
190 #ifdef PPC_OEA601
191 	if ((mfpvr() >> 16) == MPC601) {
192 		u_int32_t rtc[2];
193 
194 		mfrtc(rtc);
195 		while (n >= 1000000) {
196 			rtc[0]++;
197 			n -= 1000000;
198 		}
199 		rtc[1] += (n * 1000);
200 		if (rtc[1] >= 1000000000) {
201 			rtc[0]++;
202 			rtc[1] -= 1000000000;
203 		}
204 		__asm volatile ("1: mfspr %0,%3; cmplw %0,%1; blt 1b; bgt 2f;"
205 		    "mfspr %0,%4; cmplw %0,%2; blt 1b; 2:"
206 		    : "=&r"(scratch)
207 		    : "r"(rtc[0]), "r"(rtc[1]), "n"(SPR_RTCU_R), "n"(SPR_RTCL_R)
208 		    : "cr0");
209 	} else
210 #endif
211 	{
212 		tb = mftb();
213 		tb += (n * 1000 + ns_per_tick - 1) / ns_per_tick;
214 		tbh = tb >> 32;
215 		tbl = tb;
216 		__asm volatile ("1: mftbu %0; cmplw %0,%1; blt 1b; bgt 2f;"
217 			      "mftb %0; cmplw %0,%2; blt 1b; 2:"
218 			      : "=&r"(scratch) : "r"(tbh), "r"(tbl)
219 			      : "cr0");
220 	}
221 }
222 
223 static u_int
224 get_powerpc_timecount(struct timecounter *tc)
225 {
226 	u_long tb;
227 	int msr, scratch;
228 
229 	__asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1"
230 		      : "=r"(msr), "=r"(scratch) : "K"((u_short)~PSL_EE));
231 #ifdef PPC_OEA601
232 	if ((mfpvr() >> 16) == MPC601)
233 		__asm volatile ("mfspr %0,%1" : "=r"(tb) : "n"(SPR_RTCL_R));
234 	else
235 #endif
236 		__asm volatile ("mftb %0" : "=r"(tb));
237 	mtmsr(msr);
238 
239 	return tb;
240 }
241 
242 void
243 init_powerpc_tc(void)
244 {
245 	/* from machdep initialization */
246 	powerpc_timecounter.tc_frequency = ticks_per_sec;
247 	tc_init(&powerpc_timecounter);
248 }
249