xref: /original-bsd/sys/hp300/hp300/clock.c (revision ba762ddc)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: clock.c 1.17 89/11/30$
13  *
14  *	@(#)clock.c	7.5 (Berkeley) 04/20/91
15  */
16 
17 #include "param.h"
18 #include "kernel.h"
19 #include "../dev/hilreg.h"
20 #include "clockreg.h"
21 
22 #include "vm/vm.h"
23 #include "../include/psl.h"
24 #include "../include/cpu.h"
25 
26 #if defined(GPROF) && defined(PROFTIMER)
27 #include "sys/gprof.h"
28 #endif
29 
30 int    clkstd[1];
31 
32 static int month_days[12] = {
33 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
34 };
35 struct bbc_tm *gmt_to_bbc();
36 u_char bbc_registers[13];
37 u_char write_bbc_reg(), read_bbc_reg();
38 struct hil_dev *bbcaddr = NULL;
39 
40 /*
41  * Machine-dependent clock routines.
42  *
43  * Startrtclock restarts the real-time clock, which provides
44  * hardclock interrupts to kern_clock.c.
45  *
46  * Inittodr initializes the time of day hardware which provides
47  * date functions.
48  *
49  * Resettodr restores the time of day hardware after a time change.
50  *
51  * A note on the real-time clock:
52  * We actually load the clock with CLK_INTERVAL-1 instead of CLK_INTERVAL.
53  * This is because the counter decrements to zero after N+1 enabled clock
54  * periods where N is the value loaded into the counter.
55  */
56 
57 /*
58  * Start the real-time clock.
59  */
60 startrtclock()
61 {
62 	register struct clkreg *clk;
63 
64 	clkstd[0] = IOV(0x5F8000);
65 	clk = (struct clkreg *) clkstd[0];
66 
67 	clk->clk_cr2 = CLK_CR1;
68 	clk->clk_cr1 = CLK_RESET;
69 	clk->clk_cr2 = CLK_CR3;
70 	clk->clk_cr3 = 0;
71 	clk->clk_msb1 = (CLK_INTERVAL-1) >> 8 & 0xFF;
72 	clk->clk_lsb1 = (CLK_INTERVAL-1) & 0xFF;
73 	clk->clk_msb2 = 0;
74 	clk->clk_lsb2 = 0;
75 	clk->clk_msb3 = 0;
76 	clk->clk_lsb3 = 0;
77 	clk->clk_cr2 = CLK_CR1;
78 	clk->clk_cr1 = CLK_IENAB;
79 }
80 
81 /*
82  * Returns number of usec since last recorded clock "tick"
83  * (i.e. clock interrupt).
84  */
85 clkread()
86 {
87 	register struct clkreg *clk = (struct clkreg *) clkstd[0];
88 	register int high, low;
89 
90 	high = clk->clk_msb1;
91 	low = clk->clk_lsb1;
92 	if (high != clk->clk_msb1)
93 		high = clk->clk_msb1;
94 
95 	high = (CLK_INTERVAL-1) - ((high << 8) | low);
96 	/*
97 	 * Pending interrupt indicates that the counter has wrapped
98 	 * since we went to splhigh().  Need to compensate.
99 	 */
100 	if (clk->clk_sr & CLK_INT1)
101 		high += CLK_INTERVAL;
102 	return((high * tick) / CLK_INTERVAL);
103 }
104 
105 #include "clock.h"
106 #if NCLOCK > 0
107 /*
108  * /dev/clock: mappable high resolution timer.
109  *
110  * This code implements a 32-bit recycling counter (with a 4 usec period)
111  * using timers 2 & 3 on the 6840 clock chip.  The counter can be mapped
112  * RO into a user's address space to achieve low overhead (no system calls),
113  * high-precision timing.
114  *
115  * Note that timer 3 is also used for the high precision profiling timer
116  * (PROFTIMER code above).  Care should be taken when both uses are
117  * configured as only a token effort is made to avoid conflicting use.
118  */
119 #include "sys/proc.h"
120 #include "sys/ioctl.h"
121 #include "sys/malloc.h"
122 #include "clockioctl.h"
123 #include "vm/vm_param.h"
124 #include "vm/vm_pager.h"
125 #include "vm/vm_prot.h"
126 #include "sys/specdev.h"
127 #include "sys/vnode.h"
128 #include "sys/mman.h"
129 
130 int clockon = 0;		/* non-zero if high-res timer enabled */
131 #ifdef PROFTIMER
132 int  profprocs = 0;		/* # of procs using profiling timer */
133 #endif
134 #ifdef DEBUG
135 int clockdebug = 0;
136 #endif
137 
138 /*ARGSUSED*/
139 clockopen(dev, flags)
140 	dev_t dev;
141 {
142 #ifdef PROFTIMER
143 #ifdef GPROF
144 	/*
145 	 * Kernel profiling enabled, give up.
146 	 */
147 	if (profiling)
148 		return(EBUSY);
149 #endif
150 	/*
151 	 * If any user processes are profiling, give up.
152 	 */
153 	if (profprocs)
154 		return(EBUSY);
155 #endif
156 	if (!clockon) {
157 		startclock();
158 		clockon++;
159 	}
160 	return(0);
161 }
162 
163 /*ARGSUSED*/
164 clockclose(dev, flags)
165 	dev_t dev;
166 {
167 	(void) clockunmmap(dev, (caddr_t)0, curproc);	/* XXX */
168 	stopclock();
169 	clockon = 0;
170 	return(0);
171 }
172 
173 /*ARGSUSED*/
174 clockioctl(dev, cmd, data, flag, p)
175 	dev_t dev;
176 	caddr_t data;
177 	struct proc *p;
178 {
179 	int error = 0;
180 
181 	switch (cmd) {
182 
183 	case CLOCKMAP:
184 		error = clockmmap(dev, (caddr_t *)data, p);
185 		break;
186 
187 	case CLOCKUNMAP:
188 		error = clockunmmap(dev, *(caddr_t *)data, p);
189 		break;
190 
191 	case CLOCKGETRES:
192 		*(int *)data = CLK_RESOLUTION;
193 		break;
194 
195 	default:
196 		error = EINVAL;
197 		break;
198 	}
199 	return(error);
200 }
201 
202 /*ARGSUSED*/
203 clockmap(dev, off, prot)
204 	dev_t dev;
205 {
206 	return((off + (IOBASE+CLKBASE+CLKSR-1)) >> PGSHIFT);
207 }
208 
209 clockmmap(dev, addrp, p)
210 	dev_t dev;
211 	caddr_t *addrp;
212 	struct proc *p;
213 {
214 	int error;
215 	struct vnode vn;
216 	struct specinfo si;
217 	int flags;
218 
219 	flags = MAP_FILE|MAP_SHARED;
220 	if (*addrp)
221 		flags |= MAP_FIXED;
222 	else
223 		*addrp = (caddr_t)0x1000000;	/* XXX */
224 	vn.v_type = VCHR;			/* XXX */
225 	vn.v_specinfo = &si;			/* XXX */
226 	vn.v_rdev = dev;			/* XXX */
227 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)addrp,
228 			PAGE_SIZE, VM_PROT_ALL, flags, (caddr_t)&vn, 0);
229 	return(error);
230 }
231 
232 clockunmmap(dev, addr, p)
233 	dev_t dev;
234 	caddr_t addr;
235 	struct proc *p;
236 {
237 	int rv;
238 
239 	if (addr == 0)
240 		return(EINVAL);		/* XXX: how do we deal with this? */
241 	rv = vm_deallocate(p->p_vmspace->vm_map, (vm_offset_t)addr, PAGE_SIZE);
242 	return(rv == KERN_SUCCESS ? 0 : EINVAL);
243 }
244 
245 startclock()
246 {
247 	register struct clkreg *clk = (struct clkreg *)clkstd[0];
248 
249 	clk->clk_msb2 = -1; clk->clk_lsb2 = -1;
250 	clk->clk_msb3 = -1; clk->clk_lsb3 = -1;
251 
252 	clk->clk_cr2 = CLK_CR3;
253 	clk->clk_cr3 = CLK_OENAB|CLK_8BIT;
254 	clk->clk_cr2 = CLK_CR1;
255 	clk->clk_cr1 = CLK_IENAB;
256 }
257 
258 stopclock()
259 {
260 	register struct clkreg *clk = (struct clkreg *)clkstd[0];
261 
262 	clk->clk_cr2 = CLK_CR3;
263 	clk->clk_cr3 = 0;
264 	clk->clk_cr2 = CLK_CR1;
265 	clk->clk_cr1 = CLK_IENAB;
266 }
267 #endif
268 
269 #ifdef PROFTIMER
270 /*
271  * This code allows the hp300 kernel to use one of the extra timers on
272  * the clock chip for profiling, instead of the regular system timer.
273  * The advantage of this is that the profiling timer can be turned up to
274  * a higher interrupt rate, giving finer resolution timing. The profclock
275  * routine is called from the lev6intr in locore, and is a specialized
276  * routine that calls addupc. The overhead then is far less than if
277  * hardclock/softclock was called. Further, the context switch code in
278  * locore has been changed to turn the profile clock on/off when switching
279  * into/out of a process that is profiling (startprofclock/stopprofclock).
280  * This reduces the impact of the profiling clock on other users, and might
281  * possibly increase the accuracy of the profiling.
282  */
283 int  profint   = PRF_INTERVAL;	/* Clock ticks between interrupts */
284 int  profscale = 0;		/* Scale factor from sys clock to prof clock */
285 char profon    = 0;		/* Is profiling clock on? */
286 
287 /* profon values - do not change, locore.s assumes these values */
288 #define PRF_NONE	0x00
289 #define	PRF_USER	0x01
290 #define	PRF_KERNEL	0x80
291 
292 
293 #ifdef notcalled
294 initprofclock()
295 {
296 #if NCLOCK > 0
297 	struct proc *p = curproc;		/* XXX */
298 	/*
299 	 * If the high-res timer is running, force profiling off.
300 	 * Unfortunately, this gets reflected back to the user not as
301 	 * an error but as a lack of results.
302 	 */
303 	if (clockon) {
304 		p->p_stats->p_prof.pr_scale = 0;
305 		return;
306 	}
307 	/*
308 	 * Keep track of the number of user processes that are profiling
309 	 * by checking the scale value.
310 	 *
311 	 * XXX: this all assumes that the profiling code is well behaved;
312 	 * i.e. profil() is called once per process with pcscale non-zero
313 	 * to turn it on, and once with pcscale zero to turn it off.
314 	 * Also assumes you don't do any forks or execs.  Oh well, there
315 	 * is always adb...
316 	 */
317 	if (p->p_stats->p_prof.pr_scale)
318 		profprocs++;
319 	else
320 		profprocs--;
321 #endif
322 	/*
323 	 * The profile interrupt interval must be an even divisor
324 	 * of the CLK_INTERVAL so that scaling from a system clock
325 	 * tick to a profile clock tick is possible using integer math.
326 	 */
327 	if (profint > CLK_INTERVAL || (CLK_INTERVAL % profint) != 0)
328 		profint = CLK_INTERVAL;
329 	profscale = CLK_INTERVAL / profint;
330 }
331 #endif
332 
333 startprofclock()
334 {
335 	register struct clkreg *clk = (struct clkreg *)clkstd[0];
336 
337 	clk->clk_msb3 = (profint-1) >> 8 & 0xFF;
338 	clk->clk_lsb3 = (profint-1) & 0xFF;
339 
340 	clk->clk_cr2 = CLK_CR3;
341 	clk->clk_cr3 = CLK_IENAB;
342 }
343 
344 stopprofclock()
345 {
346 	register struct clkreg *clk = (struct clkreg *)clkstd[0];
347 
348 	clk->clk_cr2 = CLK_CR3;
349 	clk->clk_cr3 = 0;
350 }
351 
352 #ifdef GPROF
353 /*
354  * profclock() is expanded in line in lev6intr() unless profiling kernel.
355  * Assumes it is called with clock interrupts blocked.
356  */
357 profclock(pc, ps)
358 	caddr_t pc;
359 	int ps;
360 {
361 	/*
362 	 * Came from user mode.
363 	 * If this process is being profiled record the tick.
364 	 */
365 	if (USERMODE(ps)) {
366 		if (p->p_stats.p_prof.pr_scale)
367 			addupc(pc, &curproc->p_stats.p_prof, 1);
368 	}
369 	/*
370 	 * Came from kernel (supervisor) mode.
371 	 * If we are profiling the kernel, record the tick.
372 	 */
373 	else if (profiling < 2) {
374 		register int s = pc - s_lowpc;
375 
376 		if (s < s_textsize)
377 			kcount[s / (HISTFRACTION * sizeof (*kcount))]++;
378 	}
379 	/*
380 	 * Kernel profiling was on but has been disabled.
381 	 * Mark as no longer profiling kernel and if all profiling done,
382 	 * disable the clock.
383 	 */
384 	if (profiling && (profon & PRF_KERNEL)) {
385 		profon &= ~PRF_KERNEL;
386 		if (profon == PRF_NONE)
387 			stopprofclock();
388 	}
389 }
390 #endif
391 #endif
392 
393 /*
394  * Initialize the time of day register, based on the time base which is, e.g.
395  * from a filesystem.
396  */
397 inittodr(base)
398 	time_t base;
399 {
400 	u_long timbuf = base;	/* assume no battery clock exists */
401 	static int bbcinited = 0;
402 
403 	/* XXX */
404 	if (!bbcinited) {
405 		if (badbaddr(&BBCADDR->hil_stat))
406 			printf("WARNING: no battery clock\n");
407 		else
408 			bbcaddr = BBCADDR;
409 		bbcinited = 1;
410 	}
411 
412 	/*
413 	 * bbc_to_gmt converts and stores the gmt in timbuf.
414 	 * If an error is detected in bbc_to_gmt, or if the filesystem
415 	 * time is more recent than the gmt time in the clock,
416 	 * then use the filesystem time and warn the user.
417  	 */
418 	if (!bbc_to_gmt(&timbuf) || timbuf < base) {
419 		printf("WARNING: bad date in battery clock\n");
420 		timbuf = base;
421 	}
422 	if (base < 5*SECYR) {
423 		printf("WARNING: preposterous time in file system");
424 		timbuf = 6*SECYR + 186*SECDAY + SECDAY/2;
425 		printf(" -- CHECK AND RESET THE DATE!\n");
426 	}
427 
428 	/* Battery clock does not store usec's, so forget about it. */
429 	time.tv_sec = timbuf;
430 }
431 
432 resettodr()
433 {
434 	register int i;
435 	register struct bbc_tm *tmptr;
436 
437 	tmptr = gmt_to_bbc(time.tv_sec);
438 
439 	decimal_to_bbc(0, 1,  tmptr->tm_sec);
440 	decimal_to_bbc(2, 3,  tmptr->tm_min);
441 	decimal_to_bbc(4, 5,  tmptr->tm_hour);
442 	decimal_to_bbc(7, 8,  tmptr->tm_mday);
443 	decimal_to_bbc(9, 10, tmptr->tm_mon);
444 	decimal_to_bbc(11, 12, tmptr->tm_year);
445 
446 	/* Some bogusness to deal with seemingly broken hardware. Nonsense */
447 	bbc_registers[5] = ((tmptr->tm_hour / 10) & 0x03) + 8;
448 
449 	write_bbc_reg(15, 13);	/* reset prescalar */
450 
451 	for (i = 0; i <= NUM_BBC_REGS; i++)
452 	  	if (bbc_registers[i] != write_bbc_reg(i, bbc_registers[i])) {
453 			printf("Cannot set battery backed clock\n");
454 			break;
455 		}
456 }
457 
458 struct bbc_tm *
459 gmt_to_bbc(tim)
460 	long tim;
461 {
462 	register int i;
463 	register long hms, day;
464 	static struct bbc_tm rt;
465 
466 	day = tim / SECDAY;
467 	hms = tim % SECDAY;
468 
469 	/* Hours, minutes, seconds are easy */
470 	rt.tm_hour = hms / 3600;
471 	rt.tm_min  = (hms % 3600) / 60;
472 	rt.tm_sec  = (hms % 3600) % 60;
473 
474 	/* Number of years in days */
475 	for (i = STARTOFTIME - 1900; day >= days_in_year(i); i++)
476 	  	day -= days_in_year(i);
477 	rt.tm_year = i;
478 
479 	/* Number of months in days left */
480 	if (leapyear(rt.tm_year))
481 		days_in_month(FEBRUARY) = 29;
482 	for (i = 1; day >= days_in_month(i); i++)
483 		day -= days_in_month(i);
484 	days_in_month(FEBRUARY) = 28;
485 	rt.tm_mon = i;
486 
487 	/* Days are what is left over (+1) from all that. */
488 	rt.tm_mday = day + 1;
489 
490 	return(&rt);
491 }
492 
493 bbc_to_gmt(timbuf)
494 	u_long *timbuf;
495 {
496 	register int i;
497 	register u_long tmp;
498 	int year, month, day, hour, min, sec;
499 
500 	read_bbc();
501 
502 	sec = bbc_to_decimal(1, 0);
503 	min = bbc_to_decimal(3, 2);
504 
505 	/*
506 	 * Hours are different for some reason. Makes no sense really.
507 	 */
508 	hour  = ((bbc_registers[5] & 0x03) * 10) + bbc_registers[4];
509 	day   = bbc_to_decimal(8, 7);
510 	month = bbc_to_decimal(10, 9);
511 	year  = bbc_to_decimal(12, 11) + 1900;
512 
513 	range_test(hour, 0, 23);
514 	range_test(day, 1, 31);
515 	range_test(month, 1, 12);
516 	range_test(year, STARTOFTIME, 2000);
517 
518 	tmp = 0;
519 
520 	for (i = STARTOFTIME; i < year; i++)
521 		tmp += days_in_year(i);
522 	if (leapyear(year) && month > FEBRUARY)
523 		tmp++;
524 
525 	for (i = 1; i < month; i++)
526 	  	tmp += days_in_month(i);
527 
528 	tmp += (day - 1);
529 	tmp = ((tmp * 24 + hour) * 60 + min) * 60 + sec;
530 
531 	*timbuf = tmp;
532 	return(1);
533 }
534 
535 read_bbc()
536 {
537   	register int i, read_okay;
538 
539 	read_okay = 0;
540 	while (!read_okay) {
541 		read_okay = 1;
542 		for (i = 0; i <= NUM_BBC_REGS; i++)
543 			bbc_registers[i] = read_bbc_reg(i);
544 		for (i = 0; i <= NUM_BBC_REGS; i++)
545 			if (bbc_registers[i] != read_bbc_reg(i))
546 				read_okay = 0;
547 	}
548 }
549 
550 u_char
551 read_bbc_reg(reg)
552 	int reg;
553 {
554 	u_char data = reg;
555 
556 	if (bbcaddr) {
557 		send_hil_cmd(bbcaddr, BBC_SET_REG, &data, 1, NULL);
558 		send_hil_cmd(bbcaddr, BBC_READ_REG, NULL, 0, &data);
559 	}
560 	return(data);
561 }
562 
563 u_char
564 write_bbc_reg(reg, data)
565 	u_int data;
566 {
567 	u_char tmp;
568 
569 	tmp = (u_char) ((data << HIL_SSHIFT) | reg);
570 
571 	if (bbcaddr) {
572 		send_hil_cmd(bbcaddr, BBC_SET_REG, &tmp, 1, NULL);
573 		send_hil_cmd(bbcaddr, BBC_WRITE_REG, NULL, 0, NULL);
574 		send_hil_cmd(bbcaddr, BBC_READ_REG, NULL, 0, &tmp);
575 	}
576 	return(tmp);
577 }
578