xref: /minix/minix/kernel/arch/i386/arch_clock.c (revision 83133719)
1 
2 /* i386-specific clock functions. */
3 
4 #include <machine/ports.h>
5 #include <minix/portio.h>
6 
7 #include "kernel/kernel.h"
8 
9 #include "kernel/clock.h"
10 #include "kernel/interrupt.h"
11 #include <minix/u64.h>
12 #include "glo.h"
13 #include "kernel/profile.h"
14 
15 
16 #ifdef USE_APIC
17 #include "apic.h"
18 #endif
19 
20 #include "kernel/spinlock.h"
21 
22 #ifdef CONFIG_SMP
23 #include "kernel/smp.h"
24 #endif
25 
26 #define CLOCK_ACK_BIT   0x80    /* PS/2 clock interrupt acknowledge bit */
27 
28 /* Clock parameters. */
29 #define COUNTER_FREQ (2*TIMER_FREQ) /* counter frequency using square wave */
30 #define LATCH_COUNT     0x00    /* cc00xxxx, c = channel, x = any */
31 #define SQUARE_WAVE     0x36    /* ccaammmb, a = access, m = mode, b = BCD */
32                                 /*   11x11, 11 = LSB then MSB, x11 = sq wave */
33 #define TIMER_FREQ  1193182    /* clock frequency for timer in PC and AT */
34 #define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
35 
36 static irq_hook_t pic_timer_hook;		/* interrupt handler hook */
37 
38 static unsigned probe_ticks;
39 static u64_t tsc0, tsc1;
40 #define PROBE_TICKS	(system_hz / 10)
41 
42 static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
43 
44 /*===========================================================================*
45  *				init_8235A_timer			     *
46  *===========================================================================*/
47 int init_8253A_timer(const unsigned freq)
48 {
49 	/* Initialize channel 0 of the 8253A timer to, e.g., 60 Hz,
50 	 * and register the CLOCK task's interrupt handler to be run
51 	 * on every clock tick.
52 	 */
53 	outb(TIMER_MODE, SQUARE_WAVE);  /* run continuously */
54 	outb(TIMER0, (TIMER_COUNT(freq) & 0xff)); /* timer low byte */
55 	outb(TIMER0, TIMER_COUNT(freq) >> 8); /* timer high byte */
56 
57 	return OK;
58 }
59 
60 /*===========================================================================*
61  *				stop_8235A_timer			     *
62  *===========================================================================*/
63 void stop_8253A_timer(void)
64 {
65 	/* Reset the clock to the BIOS rate. (For rebooting.) */
66 	outb(TIMER_MODE, 0x36);
67 	outb(TIMER0, 0);
68 	outb(TIMER0, 0);
69 }
70 
71 void arch_timer_int_handler(void)
72 {
73 }
74 
75 static int calib_cpu_handler(irq_hook_t * UNUSED(hook))
76 {
77 	u64_t tsc;
78 
79 	probe_ticks++;
80 	read_tsc_64(&tsc);
81 
82 
83 	if (probe_ticks == 1) {
84 		tsc0 = tsc;
85 	}
86 	else if (probe_ticks == PROBE_TICKS) {
87 		tsc1 = tsc;
88 	}
89 
90 	/* just in case we are in an SMP single cpu fallback mode */
91 	BKL_UNLOCK();
92 	return 1;
93 }
94 
95 static void estimate_cpu_freq(void)
96 {
97 	u64_t tsc_delta;
98 	u64_t cpu_freq;
99 
100 	irq_hook_t calib_cpu;
101 
102 	/* set the probe, we use the legacy timer, IRQ 0 */
103 	put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler);
104 
105 	/* just in case we are in an SMP single cpu fallback mode */
106 	BKL_UNLOCK();
107 	/* set the PIC timer to get some time */
108 	intr_enable();
109 
110 	/* loop for some time to get a sample */
111 	while(probe_ticks < PROBE_TICKS) {
112 		intr_enable();
113 	}
114 
115 	intr_disable();
116 	/* just in case we are in an SMP single cpu fallback mode */
117 	BKL_LOCK();
118 
119 	/* remove the probe */
120 	rm_irq_handler(&calib_cpu);
121 
122 	tsc_delta = tsc1 - tsc0;
123 
124 	cpu_freq = (tsc_delta / (PROBE_TICKS - 1)) * system_hz;
125 	cpu_set_freq(cpuid, cpu_freq);
126 	cpu_info[cpuid].freq = (unsigned long)(cpu_freq / 1000000);
127 	BOOT_VERBOSE(cpu_print_freq(cpuid));
128 }
129 
130 int init_local_timer(unsigned freq)
131 {
132 #ifdef USE_APIC
133 	/* if we know the address, lapic is enabled and we should use it */
134 	if (lapic_addr) {
135 		unsigned cpu = cpuid;
136 		tsc_per_ms[cpu] = (unsigned long)(cpu_get_freq(cpu) / 1000);
137 		lapic_set_timer_one_shot(1000000 / system_hz);
138 	} else {
139 		DEBUGBASIC(("Initiating legacy i8253 timer\n"));
140 #else
141 	{
142 #endif
143 		init_8253A_timer(freq);
144 		estimate_cpu_freq();
145 		/* always only 1 cpu in the system */
146 		tsc_per_ms[0] = (unsigned long)(cpu_get_freq(0) / 1000);
147 	}
148 
149 	return 0;
150 }
151 
152 void stop_local_timer(void)
153 {
154 #ifdef USE_APIC
155 	if (lapic_addr) {
156 		lapic_stop_timer();
157 		apic_eoi();
158 	} else
159 #endif
160 	{
161 		stop_8253A_timer();
162 	}
163 }
164 
165 void restart_local_timer(void)
166 {
167 #ifdef USE_APIC
168 	if (lapic_addr) {
169 		lapic_restart_timer();
170 	}
171 #endif
172 }
173 
174 int register_local_timer_handler(const irq_handler_t handler)
175 {
176 #ifdef USE_APIC
177 	if (lapic_addr) {
178 		/* Using APIC, it is configured in apic_idt_init() */
179 		BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
180 	} else
181 #endif
182 	{
183 		/* Using PIC, Initialize the CLOCK's interrupt hook. */
184 		pic_timer_hook.proc_nr_e = NONE;
185 		pic_timer_hook.irq = CLOCK_IRQ;
186 
187 		put_irq_handler(&pic_timer_hook, CLOCK_IRQ, handler);
188 	}
189 
190 	return 0;
191 }
192 
193 void cycles_accounting_init(void)
194 {
195 #ifdef CONFIG_SMP
196 	unsigned cpu = cpuid;
197 #endif
198 
199 	read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
200 
201        get_cpu_var(cpu, cpu_last_tsc) = 0;
202        get_cpu_var(cpu, cpu_last_idle) = 0;
203 }
204 
205 void context_stop(struct proc * p)
206 {
207 	u64_t tsc, tsc_delta;
208 	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
209 #ifdef CONFIG_SMP
210 	unsigned cpu = cpuid;
211 	int must_bkl_unlock = 0;
212 
213 	/*
214 	 * This function is called only if we switch from kernel to user or idle
215 	 * or back. Therefore this is a perfect location to place the big kernel
216 	 * lock which will hopefully disappear soon.
217 	 *
218 	 * If we stop accounting for KERNEL we must unlock the BKL. If account
219 	 * for IDLE we must not hold the lock
220 	 */
221 	if (p == proc_addr(KERNEL)) {
222 		u64_t tmp;
223 
224 		read_tsc_64(&tsc);
225 		tmp = tsc - *__tsc_ctr_switch;
226 		kernel_ticks[cpu] = kernel_ticks[cpu] + tmp;
227 		p->p_cycles = p->p_cycles + tmp;
228 		must_bkl_unlock = 1;
229 	} else {
230 		u64_t bkl_tsc;
231 		atomic_t succ;
232 
233 		read_tsc_64(&bkl_tsc);
234 		/* this only gives a good estimate */
235 		succ = big_kernel_lock.val;
236 
237 		BKL_LOCK();
238 
239 		read_tsc_64(&tsc);
240 
241 		bkl_ticks[cpu] = bkl_ticks[cpu] + tsc - bkl_tsc;
242 		bkl_tries[cpu]++;
243 		bkl_succ[cpu] += !(!(succ == 0));
244 
245 		p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
246 
247 #ifdef CONFIG_SMP
248 		/*
249 		 * Since at the time we got a scheduling IPI we might have been
250 		 * waiting for BKL already, we may miss it due to a similar IPI to
251 		 * the cpu which is already waiting for us to handle its. This
252 		 * results in a live-lock of these two cpus.
253 		 *
254 		 * Therefore we always check if there is one pending and if so,
255 		 * we handle it straight away so the other cpu can continue and
256 		 * we do not deadlock.
257 		 */
258 		smp_sched_handler();
259 #endif
260 	}
261 #else
262 	read_tsc_64(&tsc);
263 	p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
264 #endif
265 
266 	tsc_delta = tsc - *__tsc_ctr_switch;
267 
268 	if (kbill_ipc) {
269 		kbill_ipc->p_kipc_cycles =
270 			kbill_ipc->p_kipc_cycles + tsc_delta;
271 		kbill_ipc = NULL;
272 	}
273 
274 	if (kbill_kcall) {
275 		kbill_kcall->p_kcall_cycles =
276 			kbill_kcall->p_kcall_cycles + tsc_delta;
277 		kbill_kcall = NULL;
278 	}
279 
280 	/*
281 	 * deduct the just consumed cpu cycles from the cpu time left for this
282 	 * process during its current quantum. Skip IDLE and other pseudo kernel
283 	 * tasks
284 	 */
285 	if (p->p_endpoint >= 0) {
286 #if DEBUG_RACE
287 		p->p_cpu_time_left = 0;
288 #else
289 		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
290 		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
291 				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
292 				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
293 			p->p_cpu_time_left = p->p_cpu_time_left - tsc_delta;
294 		else {
295 			p->p_cpu_time_left = 0;
296 		}
297 #endif
298 	}
299 
300 	*__tsc_ctr_switch = tsc;
301 
302 #ifdef CONFIG_SMP
303 	if(must_bkl_unlock) {
304 		BKL_UNLOCK();
305 	}
306 #endif
307 }
308 
309 void context_stop_idle(void)
310 {
311 	int is_idle;
312 #ifdef CONFIG_SMP
313 	unsigned cpu = cpuid;
314 #endif
315 
316 	is_idle = get_cpu_var(cpu, cpu_is_idle);
317 	get_cpu_var(cpu, cpu_is_idle) = 0;
318 
319 	context_stop(get_cpulocal_var_ptr(idle_proc));
320 
321 	if (is_idle)
322 		restart_local_timer();
323 #if SPROFILE
324 	if (sprofiling)
325 		get_cpulocal_var(idle_interrupted) = 1;
326 #endif
327 }
328 
329 u64_t ms_2_cpu_time(unsigned ms)
330 {
331 	return (u64_t)tsc_per_ms[cpuid] * ms;
332 }
333 
334 unsigned cpu_time_2_ms(u64_t cpu_time)
335 {
336 	return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
337 }
338 
339 short cpu_load(void)
340 {
341 	u64_t current_tsc, *current_idle;
342 	u64_t tsc_delta, idle_delta, busy;
343 	struct proc *idle;
344 	short load;
345 #ifdef CONFIG_SMP
346 	unsigned cpu = cpuid;
347 #endif
348 
349 	u64_t *last_tsc, *last_idle;
350 
351 	last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
352 	last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
353 
354 	idle = get_cpu_var_ptr(cpu, idle_proc);;
355 	read_tsc_64(&current_tsc);
356 	current_idle = &idle->p_cycles; /* ptr to idle proc */
357 
358 	/* calculate load since last cpu_load invocation */
359 	if (*last_tsc) {
360 		tsc_delta = current_tsc - *last_tsc;
361 		idle_delta = *current_idle - *last_idle;
362 
363 		busy = tsc_delta - idle_delta;
364 		busy = busy * 100;
365 		load = ex64lo(busy / tsc_delta);
366 
367 		if (load > 100)
368 			load = 100;
369 	} else
370 		load = 0;
371 
372 	*last_tsc = current_tsc;
373 	*last_idle = *current_idle;
374 	return load;
375 }
376 
377 void busy_delay_ms(int ms)
378 {
379 	u64_t cycles = ms_2_cpu_time(ms), tsc0, tsc, tsc1;
380 	read_tsc_64(&tsc0);
381 	tsc1 = tsc0 + cycles;
382 	do { read_tsc_64(&tsc); } while(tsc < tsc1);
383 	return;
384 }
385 
386