xref: /minix/minix/kernel/arch/i386/arch_clock.c (revision b89261ba)
1 
2 /* i386-specific clock functions. */
3 
4 #include <machine/ports.h>
5 #include <minix/portio.h>
6 
7 #include "kernel/kernel.h"
8 
9 #include "kernel/clock.h"
10 #include "kernel/interrupt.h"
11 #include <minix/u64.h>
12 #include "glo.h"
13 #include "kernel/profile.h"
14 
15 #include <sys/sched.h> /* for CP_*, CPUSTATES */
16 #if CPUSTATES != MINIX_CPUSTATES
17 /* If this breaks, the code in this file may have to be adapted accordingly. */
18 #error "MINIX_CPUSTATES value is out of sync with NetBSD's!"
19 #endif
20 
21 #ifdef USE_APIC
22 #include "apic.h"
23 #endif
24 
25 #include "kernel/spinlock.h"
26 
27 #ifdef CONFIG_SMP
28 #include "kernel/smp.h"
29 #endif
30 
31 #define CLOCK_ACK_BIT   0x80    /* PS/2 clock interrupt acknowledge bit */
32 
33 /* Clock parameters. */
34 #define COUNTER_FREQ (2*TIMER_FREQ) /* counter frequency using square wave */
35 #define LATCH_COUNT     0x00    /* cc00xxxx, c = channel, x = any */
36 #define SQUARE_WAVE     0x36    /* ccaammmb, a = access, m = mode, b = BCD */
37                                 /*   11x11, 11 = LSB then MSB, x11 = sq wave */
38 #define TIMER_FREQ  1193182    /* clock frequency for timer in PC and AT */
39 #define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
40 
41 static irq_hook_t pic_timer_hook;		/* interrupt handler hook */
42 
43 static unsigned probe_ticks;
44 static u64_t tsc0, tsc1;
45 #define PROBE_TICKS	(system_hz / 10)
46 
47 static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
48 static unsigned tsc_per_tick[CONFIG_MAX_CPUS];
49 static uint64_t tsc_per_state[CONFIG_MAX_CPUS][CPUSTATES];
50 
51 /*===========================================================================*
52  *				init_8235A_timer			     *
53  *===========================================================================*/
54 int init_8253A_timer(const unsigned freq)
55 {
56 	/* Initialize channel 0 of the 8253A timer to, e.g., 60 Hz,
57 	 * and register the CLOCK task's interrupt handler to be run
58 	 * on every clock tick.
59 	 */
60 	outb(TIMER_MODE, SQUARE_WAVE);  /* run continuously */
61 	outb(TIMER0, (TIMER_COUNT(freq) & 0xff)); /* timer low byte */
62 	outb(TIMER0, TIMER_COUNT(freq) >> 8); /* timer high byte */
63 
64 	return OK;
65 }
66 
67 /*===========================================================================*
68  *				stop_8235A_timer			     *
69  *===========================================================================*/
70 void stop_8253A_timer(void)
71 {
72 	/* Reset the clock to the BIOS rate. (For rebooting.) */
73 	outb(TIMER_MODE, 0x36);
74 	outb(TIMER0, 0);
75 	outb(TIMER0, 0);
76 }
77 
78 void arch_timer_int_handler(void)
79 {
80 }
81 
82 static int calib_cpu_handler(irq_hook_t * UNUSED(hook))
83 {
84 	u64_t tsc;
85 
86 	probe_ticks++;
87 	read_tsc_64(&tsc);
88 
89 
90 	if (probe_ticks == 1) {
91 		tsc0 = tsc;
92 	}
93 	else if (probe_ticks == PROBE_TICKS) {
94 		tsc1 = tsc;
95 	}
96 
97 	/* just in case we are in an SMP single cpu fallback mode */
98 	BKL_UNLOCK();
99 	return 1;
100 }
101 
102 static void estimate_cpu_freq(void)
103 {
104 	u64_t tsc_delta;
105 	u64_t cpu_freq;
106 
107 	irq_hook_t calib_cpu;
108 
109 	/* set the probe, we use the legacy timer, IRQ 0 */
110 	put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler);
111 
112 	/* just in case we are in an SMP single cpu fallback mode */
113 	BKL_UNLOCK();
114 	/* set the PIC timer to get some time */
115 	intr_enable();
116 
117 	/* loop for some time to get a sample */
118 	while(probe_ticks < PROBE_TICKS) {
119 		intr_enable();
120 	}
121 
122 	intr_disable();
123 	/* just in case we are in an SMP single cpu fallback mode */
124 	BKL_LOCK();
125 
126 	/* remove the probe */
127 	rm_irq_handler(&calib_cpu);
128 
129 	tsc_delta = tsc1 - tsc0;
130 
131 	cpu_freq = (tsc_delta / (PROBE_TICKS - 1)) * system_hz;
132 	cpu_set_freq(cpuid, cpu_freq);
133 	cpu_info[cpuid].freq = (unsigned long)(cpu_freq / 1000000);
134 	BOOT_VERBOSE(cpu_print_freq(cpuid));
135 }
136 
137 int init_local_timer(unsigned freq)
138 {
139 #ifdef USE_APIC
140 	/* if we know the address, lapic is enabled and we should use it */
141 	if (lapic_addr) {
142 		unsigned cpu = cpuid;
143 		tsc_per_ms[cpu] = (unsigned)(cpu_get_freq(cpu) / 1000);
144 		tsc_per_tick[cpu] = (unsigned)(cpu_get_freq(cpu) / system_hz);
145 		lapic_set_timer_one_shot(1000000 / system_hz);
146 	} else {
147 		DEBUGBASIC(("Initiating legacy i8253 timer\n"));
148 #else
149 	{
150 #endif
151 		init_8253A_timer(freq);
152 		estimate_cpu_freq();
153 		/* always only 1 cpu in the system */
154 		tsc_per_ms[0] = (unsigned long)(cpu_get_freq(0) / 1000);
155 		tsc_per_tick[0] = (unsigned)(cpu_get_freq(0) / system_hz);
156 	}
157 
158 	return 0;
159 }
160 
161 void stop_local_timer(void)
162 {
163 #ifdef USE_APIC
164 	if (lapic_addr) {
165 		lapic_stop_timer();
166 		apic_eoi();
167 	} else
168 #endif
169 	{
170 		stop_8253A_timer();
171 	}
172 }
173 
174 void restart_local_timer(void)
175 {
176 #ifdef USE_APIC
177 	if (lapic_addr) {
178 		lapic_restart_timer();
179 	}
180 #endif
181 }
182 
183 int register_local_timer_handler(const irq_handler_t handler)
184 {
185 #ifdef USE_APIC
186 	if (lapic_addr) {
187 		/* Using APIC, it is configured in apic_idt_init() */
188 		BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
189 	} else
190 #endif
191 	{
192 		/* Using PIC, Initialize the CLOCK's interrupt hook. */
193 		pic_timer_hook.proc_nr_e = NONE;
194 		pic_timer_hook.irq = CLOCK_IRQ;
195 
196 		put_irq_handler(&pic_timer_hook, CLOCK_IRQ, handler);
197 	}
198 
199 	return 0;
200 }
201 
202 void cycles_accounting_init(void)
203 {
204 #ifdef CONFIG_SMP
205 	unsigned cpu = cpuid;
206 #endif
207 
208 	read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
209 
210        get_cpu_var(cpu, cpu_last_tsc) = 0;
211        get_cpu_var(cpu, cpu_last_idle) = 0;
212 }
213 
214 void context_stop(struct proc * p)
215 {
216 	u64_t tsc, tsc_delta;
217 	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
218 	unsigned int cpu, tpt, counter;
219 #ifdef CONFIG_SMP
220 	int must_bkl_unlock = 0;
221 
222 	cpu = cpuid;
223 
224 	/*
225 	 * This function is called only if we switch from kernel to user or idle
226 	 * or back. Therefore this is a perfect location to place the big kernel
227 	 * lock which will hopefully disappear soon.
228 	 *
229 	 * If we stop accounting for KERNEL we must unlock the BKL. If account
230 	 * for IDLE we must not hold the lock
231 	 */
232 	if (p == proc_addr(KERNEL)) {
233 		u64_t tmp;
234 
235 		read_tsc_64(&tsc);
236 		tmp = tsc - *__tsc_ctr_switch;
237 		kernel_ticks[cpu] = kernel_ticks[cpu] + tmp;
238 		p->p_cycles = p->p_cycles + tmp;
239 		must_bkl_unlock = 1;
240 	} else {
241 		u64_t bkl_tsc;
242 		atomic_t succ;
243 
244 		read_tsc_64(&bkl_tsc);
245 		/* this only gives a good estimate */
246 		succ = big_kernel_lock.val;
247 
248 		BKL_LOCK();
249 
250 		read_tsc_64(&tsc);
251 
252 		bkl_ticks[cpu] = bkl_ticks[cpu] + tsc - bkl_tsc;
253 		bkl_tries[cpu]++;
254 		bkl_succ[cpu] += !(!(succ == 0));
255 
256 		p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
257 
258 #ifdef CONFIG_SMP
259 		/*
260 		 * Since at the time we got a scheduling IPI we might have been
261 		 * waiting for BKL already, we may miss it due to a similar IPI to
262 		 * the cpu which is already waiting for us to handle its. This
263 		 * results in a live-lock of these two cpus.
264 		 *
265 		 * Therefore we always check if there is one pending and if so,
266 		 * we handle it straight away so the other cpu can continue and
267 		 * we do not deadlock.
268 		 */
269 		smp_sched_handler();
270 #endif
271 	}
272 #else
273 	read_tsc_64(&tsc);
274 	p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
275 	cpu = 0;
276 #endif
277 
278 	tsc_delta = tsc - *__tsc_ctr_switch;
279 
280 	if (kbill_ipc) {
281 		kbill_ipc->p_kipc_cycles =
282 			kbill_ipc->p_kipc_cycles + tsc_delta;
283 		kbill_ipc = NULL;
284 	}
285 
286 	if (kbill_kcall) {
287 		kbill_kcall->p_kcall_cycles =
288 			kbill_kcall->p_kcall_cycles + tsc_delta;
289 		kbill_kcall = NULL;
290 	}
291 
292 	/*
293 	 * Perform CPU average accounting here, rather than in the generic
294 	 * clock handler.  Doing it here offers two advantages: 1) we can
295 	 * account for time spent in the kernel, and 2) we properly account for
296 	 * CPU time spent by a process that has a lot of short-lasting activity
297 	 * such that it spends serious CPU time but never actually runs when a
298 	 * clock tick triggers.  Note that clock speed inaccuracy requires that
299 	 * the code below is a loop, but the loop will in by far most cases not
300 	 * be executed more than once, and often be skipped at all.
301 	 */
302 	tpt = tsc_per_tick[cpu];
303 
304 	p->p_tick_cycles += tsc_delta;
305 	while (tpt > 0 && p->p_tick_cycles >= tpt) {
306 		p->p_tick_cycles -= tpt;
307 
308 		/*
309 		 * The process has spent roughly a whole clock tick worth of
310 		 * CPU cycles.  Update its per-process CPU utilization counter.
311 		 * Some of the cycles may actually have been spent in a
312 		 * previous second, but that is not a problem.
313 		 */
314 		cpuavg_increment(&p->p_cpuavg, kclockinfo.uptime, system_hz);
315 	}
316 
317 	/*
318 	 * deduct the just consumed cpu cycles from the cpu time left for this
319 	 * process during its current quantum. Skip IDLE and other pseudo kernel
320 	 * tasks, except for global accounting purposes.
321 	 */
322 	if (p->p_endpoint >= 0) {
323 		/* On MINIX3, the "system" counter covers system processes. */
324 		if (p->p_priv != priv_addr(USER_PRIV_ID))
325 			counter = CP_SYS;
326 		else if (p->p_misc_flags & MF_NICED)
327 			counter = CP_NICE;
328 		else
329 			counter = CP_USER;
330 
331 #if DEBUG_RACE
332 		p->p_cpu_time_left = 0;
333 #else
334 		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
335 		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
336 				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
337 				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
338 			p->p_cpu_time_left = p->p_cpu_time_left - tsc_delta;
339 		else {
340 			p->p_cpu_time_left = 0;
341 		}
342 #endif
343 	} else {
344 		/* On MINIX3, the "interrupts" counter covers the kernel. */
345 		if (p->p_endpoint == IDLE)
346 			counter = CP_IDLE;
347 		else
348 			counter = CP_INTR;
349 	}
350 
351 	tsc_per_state[cpu][counter] += tsc_delta;
352 
353 	*__tsc_ctr_switch = tsc;
354 
355 #ifdef CONFIG_SMP
356 	if(must_bkl_unlock) {
357 		BKL_UNLOCK();
358 	}
359 #endif
360 }
361 
362 void context_stop_idle(void)
363 {
364 	int is_idle;
365 #ifdef CONFIG_SMP
366 	unsigned cpu = cpuid;
367 #endif
368 
369 	is_idle = get_cpu_var(cpu, cpu_is_idle);
370 	get_cpu_var(cpu, cpu_is_idle) = 0;
371 
372 	context_stop(get_cpulocal_var_ptr(idle_proc));
373 
374 	if (is_idle)
375 		restart_local_timer();
376 #if SPROFILE
377 	if (sprofiling)
378 		get_cpulocal_var(idle_interrupted) = 1;
379 #endif
380 }
381 
382 u64_t ms_2_cpu_time(unsigned ms)
383 {
384 	return (u64_t)tsc_per_ms[cpuid] * ms;
385 }
386 
387 unsigned cpu_time_2_ms(u64_t cpu_time)
388 {
389 	return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
390 }
391 
392 short cpu_load(void)
393 {
394 	u64_t current_tsc, *current_idle;
395 	u64_t tsc_delta, idle_delta, busy;
396 	struct proc *idle;
397 	short load;
398 #ifdef CONFIG_SMP
399 	unsigned cpu = cpuid;
400 #endif
401 
402 	u64_t *last_tsc, *last_idle;
403 
404 	last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
405 	last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
406 
407 	idle = get_cpu_var_ptr(cpu, idle_proc);;
408 	read_tsc_64(&current_tsc);
409 	current_idle = &idle->p_cycles; /* ptr to idle proc */
410 
411 	/* calculate load since last cpu_load invocation */
412 	if (*last_tsc) {
413 		tsc_delta = current_tsc - *last_tsc;
414 		idle_delta = *current_idle - *last_idle;
415 
416 		busy = tsc_delta - idle_delta;
417 		busy = busy * 100;
418 		load = ex64lo(busy / tsc_delta);
419 
420 		if (load > 100)
421 			load = 100;
422 	} else
423 		load = 0;
424 
425 	*last_tsc = current_tsc;
426 	*last_idle = *current_idle;
427 	return load;
428 }
429 
430 void busy_delay_ms(int ms)
431 {
432 	u64_t cycles = ms_2_cpu_time(ms), tsc0, tsc, tsc1;
433 	read_tsc_64(&tsc0);
434 	tsc1 = tsc0 + cycles;
435 	do { read_tsc_64(&tsc); } while(tsc < tsc1);
436 	return;
437 }
438 
439 /*
440  * Return the number of clock ticks spent in each of a predefined number of
441  * CPU states.
442  */
443 void
444 get_cpu_ticks(unsigned int cpu, uint64_t ticks[CPUSTATES])
445 {
446 	int i;
447 
448 	/* TODO: make this inter-CPU safe! */
449 	for (i = 0; i < CPUSTATES; i++)
450 		ticks[i] = tsc_per_state[cpu][i] / tsc_per_tick[cpu];
451 }
452