xref: /minix/minix/kernel/arch/i386/arch_clock.c (revision fb4fbf7a)
1 /* i386-specific clock functions. */
2 
3 #include <machine/ports.h>
4 #include <minix/portio.h>
5 
6 #include "kernel/kernel.h"
7 
8 #include "kernel/clock.h"
9 #include "kernel/interrupt.h"
10 #include <minix/u64.h>
11 #include "kernel/glo.h"
12 #include "kernel/profile.h"
13 
14 #include <sys/sched.h> /* for CP_*, CPUSTATES */
15 #if CPUSTATES != MINIX_CPUSTATES
16 /* If this breaks, the code in this file may have to be adapted accordingly. */
17 #error "MINIX_CPUSTATES value is out of sync with NetBSD's!"
18 #endif
19 
20 #ifdef USE_APIC
21 #include "apic.h"
22 #endif
23 
24 #include "kernel/spinlock.h"
25 
26 #ifdef CONFIG_SMP
27 #include "kernel/smp.h"
28 #endif
29 
30 #define CLOCK_ACK_BIT   0x80    /* PS/2 clock interrupt acknowledge bit */
31 
32 /* Clock parameters. */
33 #define COUNTER_FREQ (2*TIMER_FREQ) /* counter frequency using square wave */
34 #define LATCH_COUNT     0x00    /* cc00xxxx, c = channel, x = any */
35 #define SQUARE_WAVE     0x36    /* ccaammmb, a = access, m = mode, b = BCD */
36                                 /*   11x11, 11 = LSB then MSB, x11 = sq wave */
37 #define TIMER_FREQ  1193182    /* clock frequency for timer in PC and AT */
38 #define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
39 
40 static irq_hook_t pic_timer_hook;		/* interrupt handler hook */
41 
42 static unsigned probe_ticks;
43 static u64_t tsc0, tsc1;
44 #define PROBE_TICKS	(system_hz / 10)
45 
46 static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
47 static unsigned tsc_per_tick[CONFIG_MAX_CPUS];
48 static uint64_t tsc_per_state[CONFIG_MAX_CPUS][CPUSTATES];
49 
50 /*===========================================================================*
51  *				init_8235A_timer			     *
52  *===========================================================================*/
53 int init_8253A_timer(const unsigned freq)
54 {
55 	/* Initialize channel 0 of the 8253A timer to, e.g., 60 Hz,
56 	 * and register the CLOCK task's interrupt handler to be run
57 	 * on every clock tick.
58 	 */
59 	outb(TIMER_MODE, SQUARE_WAVE);  /* run continuously */
60 	outb(TIMER0, (TIMER_COUNT(freq) & 0xff)); /* timer low byte */
61 	outb(TIMER0, TIMER_COUNT(freq) >> 8); /* timer high byte */
62 
63 	return OK;
64 }
65 
66 /*===========================================================================*
67  *				stop_8235A_timer			     *
68  *===========================================================================*/
69 void stop_8253A_timer(void)
70 {
71 	/* Reset the clock to the BIOS rate. (For rebooting.) */
72 	outb(TIMER_MODE, 0x36);
73 	outb(TIMER0, 0);
74 	outb(TIMER0, 0);
75 }
76 
77 void arch_timer_int_handler(void)
78 {
79 }
80 
81 static int calib_cpu_handler(irq_hook_t * UNUSED(hook))
82 {
83 	u64_t tsc;
84 
85 	probe_ticks++;
86 	read_tsc_64(&tsc);
87 
88 
89 	if (probe_ticks == 1) {
90 		tsc0 = tsc;
91 	}
92 	else if (probe_ticks == PROBE_TICKS) {
93 		tsc1 = tsc;
94 	}
95 
96 	/* just in case we are in an SMP single cpu fallback mode */
97 	BKL_UNLOCK();
98 	return 1;
99 }
100 
101 static void estimate_cpu_freq(void)
102 {
103 	u64_t tsc_delta;
104 	u64_t cpu_freq;
105 
106 	irq_hook_t calib_cpu;
107 
108 	/* set the probe, we use the legacy timer, IRQ 0 */
109 	put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler);
110 
111 	/* just in case we are in an SMP single cpu fallback mode */
112 	BKL_UNLOCK();
113 	/* set the PIC timer to get some time */
114 	intr_enable();
115 
116 	/* loop for some time to get a sample */
117 	while(probe_ticks < PROBE_TICKS) {
118 		intr_enable();
119 	}
120 
121 	intr_disable();
122 	/* just in case we are in an SMP single cpu fallback mode */
123 	BKL_LOCK();
124 
125 	/* remove the probe */
126 	rm_irq_handler(&calib_cpu);
127 
128 	tsc_delta = tsc1 - tsc0;
129 
130 	cpu_freq = (tsc_delta / (PROBE_TICKS - 1)) * system_hz;
131 	cpu_set_freq(cpuid, cpu_freq);
132 	cpu_info[cpuid].freq = (unsigned long)(cpu_freq / 1000000);
133 	BOOT_VERBOSE(cpu_print_freq(cpuid));
134 }
135 
136 int init_local_timer(unsigned freq)
137 {
138 #ifdef USE_APIC
139 	/* if we know the address, lapic is enabled and we should use it */
140 	if (lapic_addr) {
141 		unsigned cpu = cpuid;
142 		tsc_per_ms[cpu] = (unsigned)(cpu_get_freq(cpu) / 1000);
143 		tsc_per_tick[cpu] = (unsigned)(cpu_get_freq(cpu) / system_hz);
144 		lapic_set_timer_one_shot(1000000 / system_hz);
145 	} else {
146 		DEBUGBASIC(("Initiating legacy i8253 timer\n"));
147 #else
148 	{
149 #endif
150 		init_8253A_timer(freq);
151 		estimate_cpu_freq();
152 		/* always only 1 cpu in the system */
153 		tsc_per_ms[0] = (unsigned long)(cpu_get_freq(0) / 1000);
154 		tsc_per_tick[0] = (unsigned)(cpu_get_freq(0) / system_hz);
155 	}
156 
157 	return 0;
158 }
159 
160 void stop_local_timer(void)
161 {
162 #ifdef USE_APIC
163 	if (lapic_addr) {
164 		lapic_stop_timer();
165 		apic_eoi();
166 	} else
167 #endif
168 	{
169 		stop_8253A_timer();
170 	}
171 }
172 
173 void restart_local_timer(void)
174 {
175 #ifdef USE_APIC
176 	if (lapic_addr) {
177 		lapic_restart_timer();
178 	}
179 #endif
180 }
181 
182 int register_local_timer_handler(const irq_handler_t handler)
183 {
184 #ifdef USE_APIC
185 	if (lapic_addr) {
186 		/* Using APIC, it is configured in apic_idt_init() */
187 		BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
188 	} else
189 #endif
190 	{
191 		/* Using PIC, Initialize the CLOCK's interrupt hook. */
192 		pic_timer_hook.proc_nr_e = NONE;
193 		pic_timer_hook.irq = CLOCK_IRQ;
194 
195 		put_irq_handler(&pic_timer_hook, CLOCK_IRQ, handler);
196 	}
197 
198 	return 0;
199 }
200 
201 void cycles_accounting_init(void)
202 {
203 #ifdef CONFIG_SMP
204 	unsigned cpu = cpuid;
205 #endif
206 
207 	read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
208 
209 	get_cpu_var(cpu, cpu_last_tsc) = 0;
210 	get_cpu_var(cpu, cpu_last_idle) = 0;
211 }
212 
213 void context_stop(struct proc * p)
214 {
215 	u64_t tsc, tsc_delta;
216 	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
217 	unsigned int cpu, tpt, counter;
218 #ifdef CONFIG_SMP
219 	int must_bkl_unlock = 0;
220 
221 	cpu = cpuid;
222 
223 	/*
224 	 * This function is called only if we switch from kernel to user or idle
225 	 * or back. Therefore this is a perfect location to place the big kernel
226 	 * lock which will hopefully disappear soon.
227 	 *
228 	 * If we stop accounting for KERNEL we must unlock the BKL. If account
229 	 * for IDLE we must not hold the lock
230 	 */
231 	if (p == proc_addr(KERNEL)) {
232 		u64_t tmp;
233 
234 		read_tsc_64(&tsc);
235 		tmp = tsc - *__tsc_ctr_switch;
236 		kernel_ticks[cpu] = kernel_ticks[cpu] + tmp;
237 		p->p_cycles = p->p_cycles + tmp;
238 		must_bkl_unlock = 1;
239 	} else {
240 		u64_t bkl_tsc;
241 		atomic_t succ;
242 
243 		read_tsc_64(&bkl_tsc);
244 		/* this only gives a good estimate */
245 		succ = big_kernel_lock.val;
246 
247 		BKL_LOCK();
248 
249 		read_tsc_64(&tsc);
250 
251 		bkl_ticks[cpu] = bkl_ticks[cpu] + tsc - bkl_tsc;
252 		bkl_tries[cpu]++;
253 		bkl_succ[cpu] += !(!(succ == 0));
254 
255 		p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
256 
257 #ifdef CONFIG_SMP
258 		/*
259 		 * Since at the time we got a scheduling IPI we might have been
260 		 * waiting for BKL already, we may miss it due to a similar IPI to
261 		 * the cpu which is already waiting for us to handle its. This
262 		 * results in a live-lock of these two cpus.
263 		 *
264 		 * Therefore we always check if there is one pending and if so,
265 		 * we handle it straight away so the other cpu can continue and
266 		 * we do not deadlock.
267 		 */
268 		smp_sched_handler();
269 #endif
270 	}
271 #else
272 	read_tsc_64(&tsc);
273 	p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
274 	cpu = 0;
275 #endif
276 
277 	tsc_delta = tsc - *__tsc_ctr_switch;
278 
279 	if (kbill_ipc) {
280 		kbill_ipc->p_kipc_cycles += tsc_delta;
281 		kbill_ipc = NULL;
282 	}
283 
284 	if (kbill_kcall) {
285 		kbill_kcall->p_kcall_cycles += tsc_delta;
286 		kbill_kcall = NULL;
287 	}
288 
289 	/*
290 	 * Perform CPU average accounting here, rather than in the generic
291 	 * clock handler.  Doing it here offers two advantages: 1) we can
292 	 * account for time spent in the kernel, and 2) we properly account for
293 	 * CPU time spent by a process that has a lot of short-lasting activity
294 	 * such that it spends serious CPU time but never actually runs when a
295 	 * clock tick triggers.  Note that clock speed inaccuracy requires that
296 	 * the code below is a loop, but the loop will in by far most cases not
297 	 * be executed more than once, and often be skipped at all.
298 	 */
299 	tpt = tsc_per_tick[cpu];
300 
301 	p->p_tick_cycles += tsc_delta;
302 	while (tpt > 0 && p->p_tick_cycles >= tpt) {
303 		p->p_tick_cycles -= tpt;
304 
305 		/*
306 		 * The process has spent roughly a whole clock tick worth of
307 		 * CPU cycles.  Update its per-process CPU utilization counter.
308 		 * Some of the cycles may actually have been spent in a
309 		 * previous second, but that is not a problem.
310 		 */
311 		cpuavg_increment(&p->p_cpuavg, kclockinfo.uptime, system_hz);
312 	}
313 
314 	/*
315 	 * deduct the just consumed cpu cycles from the cpu time left for this
316 	 * process during its current quantum. Skip IDLE and other pseudo kernel
317 	 * tasks, except for global accounting purposes.
318 	 */
319 	if (p->p_endpoint >= 0) {
320 		/* On MINIX3, the "system" counter covers system processes. */
321 		if (p->p_priv != priv_addr(USER_PRIV_ID))
322 			counter = CP_SYS;
323 		else if (p->p_misc_flags & MF_NICED)
324 			counter = CP_NICE;
325 		else
326 			counter = CP_USER;
327 
328 #if DEBUG_RACE
329 		p->p_cpu_time_left = 0;
330 #else
331 		if (tsc_delta < p->p_cpu_time_left) {
332 			p->p_cpu_time_left -= tsc_delta;
333 		} else {
334 			p->p_cpu_time_left = 0;
335 		}
336 #endif
337 	} else {
338 		/* On MINIX3, the "interrupts" counter covers the kernel. */
339 		if (p->p_endpoint == IDLE)
340 			counter = CP_IDLE;
341 		else
342 			counter = CP_INTR;
343 	}
344 
345 	tsc_per_state[cpu][counter] += tsc_delta;
346 
347 	*__tsc_ctr_switch = tsc;
348 
349 #ifdef CONFIG_SMP
350 	if(must_bkl_unlock) {
351 		BKL_UNLOCK();
352 	}
353 #endif
354 }
355 
356 void context_stop_idle(void)
357 {
358 	int is_idle;
359 #ifdef CONFIG_SMP
360 	unsigned cpu = cpuid;
361 #endif
362 
363 	is_idle = get_cpu_var(cpu, cpu_is_idle);
364 	get_cpu_var(cpu, cpu_is_idle) = 0;
365 
366 	context_stop(get_cpulocal_var_ptr(idle_proc));
367 
368 	if (is_idle)
369 		restart_local_timer();
370 #if SPROFILE
371 	if (sprofiling)
372 		get_cpulocal_var(idle_interrupted) = 1;
373 #endif
374 }
375 
376 u64_t ms_2_cpu_time(unsigned ms)
377 {
378 	return (u64_t)tsc_per_ms[cpuid] * ms;
379 }
380 
381 unsigned cpu_time_2_ms(u64_t cpu_time)
382 {
383 	return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
384 }
385 
386 short cpu_load(void)
387 {
388 	u64_t current_tsc, *current_idle;
389 	u64_t tsc_delta, idle_delta, busy;
390 	struct proc *idle;
391 	short load;
392 #ifdef CONFIG_SMP
393 	unsigned cpu = cpuid;
394 #endif
395 
396 	u64_t *last_tsc, *last_idle;
397 
398 	last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
399 	last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
400 
401 	idle = get_cpu_var_ptr(cpu, idle_proc);;
402 	read_tsc_64(&current_tsc);
403 	current_idle = &idle->p_cycles; /* ptr to idle proc */
404 
405 	/* calculate load since last cpu_load invocation */
406 	if (*last_tsc) {
407 		tsc_delta = current_tsc - *last_tsc;
408 		idle_delta = *current_idle - *last_idle;
409 
410 		busy = tsc_delta - idle_delta;
411 		busy = busy * 100;
412 		load = ex64lo(busy / tsc_delta);
413 
414 		if (load > 100)
415 			load = 100;
416 	} else
417 		load = 0;
418 
419 	*last_tsc = current_tsc;
420 	*last_idle = *current_idle;
421 	return load;
422 }
423 
424 void busy_delay_ms(int ms)
425 {
426 	u64_t cycles = ms_2_cpu_time(ms), tsc0, tsc, tsc1;
427 	read_tsc_64(&tsc0);
428 	tsc1 = tsc0 + cycles;
429 	do { read_tsc_64(&tsc); } while(tsc < tsc1);
430 	return;
431 }
432 
433 /*
434  * Return the number of clock ticks spent in each of a predefined number of
435  * CPU states.
436  */
437 void
438 get_cpu_ticks(unsigned int cpu, uint64_t ticks[CPUSTATES])
439 {
440 	int i;
441 
442 	/* TODO: make this inter-CPU safe! */
443 	for (i = 0; i < CPUSTATES; i++)
444 		ticks[i] = tsc_per_state[cpu][i] / tsc_per_tick[cpu];
445 }
446