xref: /minix/minix/kernel/arch/earm/arch_clock.c (revision f12160c1)
1 /* ARM-specific clock functions. */
2 
3 #include "kernel/kernel.h"
4 
5 #include "kernel/clock.h"
6 #include "kernel/interrupt.h"
7 #include <minix/u64.h>
8 #include <minix/board.h>
9 #include "kernel/glo.h"
10 #include "kernel/profile.h"
11 
12 #include <sys/sched.h> /* for CP_*, CPUSTATES */
13 #if CPUSTATES != MINIX_CPUSTATES
14 /* If this breaks, the code in this file may have to be adapted accordingly. */
15 #error "MINIX_CPUSTATES value is out of sync with NetBSD's!"
16 #endif
17 
18 #include "kernel/spinlock.h"
19 
20 #ifdef CONFIG_SMP
21 #include "kernel/smp.h"
22 #error CONFIG_SMP is unsupported on ARM
23 #endif
24 
25 #include "bsp_timer.h"
26 #include "bsp_intr.h"
27 
28 static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
29 static unsigned tsc_per_tick[CONFIG_MAX_CPUS];
30 static uint64_t tsc_per_state[CONFIG_MAX_CPUS][CPUSTATES];
31 
init_local_timer(unsigned freq)32 int init_local_timer(unsigned freq)
33 {
34 	bsp_timer_init(freq);
35 
36 	if (BOARD_IS_BBXM(machine.board_id)) {
37 		tsc_per_ms[0] = 16250;
38 	} else if (BOARD_IS_BB(machine.board_id)) {
39 		tsc_per_ms[0] = 15000;
40 	} else {
41 		panic("Can not do the clock setup. machine (0x%08x) is unknown\n",machine.board_id);
42 	};
43 
44 	tsc_per_tick[0] = tsc_per_ms[0] * 1000 / system_hz;
45 
46 	return 0;
47 }
48 
stop_local_timer(void)49 void stop_local_timer(void)
50 {
51 	bsp_timer_stop();
52 }
53 
arch_timer_int_handler(void)54 void arch_timer_int_handler(void)
55 {
56 	bsp_timer_int_handler();
57 }
58 
cycles_accounting_init(void)59 void cycles_accounting_init(void)
60 {
61 #ifdef CONFIG_SMP
62 	unsigned cpu = cpuid;
63 #endif
64 
65 	read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
66 
67 	get_cpu_var(cpu, cpu_last_tsc) = 0;
68 	get_cpu_var(cpu, cpu_last_idle) = 0;
69 }
70 
context_stop(struct proc * p)71 void context_stop(struct proc * p)
72 {
73 	u64_t tsc, tsc_delta;
74 	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
75 	unsigned int cpu, tpt, counter;
76 
77 #ifdef CONFIG_SMP
78 #error CONFIG_SMP is unsupported on ARM
79 #else
80 	read_tsc_64(&tsc);
81 	p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
82 	cpu = 0;
83 #endif
84 
85 	tsc_delta = tsc - *__tsc_ctr_switch;
86 
87 	if (kbill_ipc) {
88 		kbill_ipc->p_kipc_cycles += tsc_delta;
89 		kbill_ipc = NULL;
90 	}
91 
92 	if (kbill_kcall) {
93 		kbill_kcall->p_kcall_cycles += tsc_delta;
94 		kbill_kcall = NULL;
95 	}
96 
97 	/*
98 	 * Perform CPU average accounting here, rather than in the generic
99 	 * clock handler.  Doing it here offers two advantages: 1) we can
100 	 * account for time spent in the kernel, and 2) we properly account for
101 	 * CPU time spent by a process that has a lot of short-lasting activity
102 	 * such that it spends serious CPU time but never actually runs when a
103 	 * clock tick triggers.  Note that clock speed inaccuracy requires that
104 	 * the code below is a loop, but the loop will in by far most cases not
105 	 * be executed more than once, and often be skipped at all.
106 	 */
107 	tpt = tsc_per_tick[cpu];
108 
109 	p->p_tick_cycles += tsc_delta;
110 	while (tpt > 0 && p->p_tick_cycles >= tpt) {
111 		p->p_tick_cycles -= tpt;
112 
113 		/*
114 		 * The process has spent roughly a whole clock tick worth of
115 		 * CPU cycles.  Update its per-process CPU utilization counter.
116 		 * Some of the cycles may actually have been spent in a
117 		 * previous second, but that is not a problem.
118 		 */
119 		cpuavg_increment(&p->p_cpuavg, kclockinfo.uptime, system_hz);
120 	}
121 
122 	/*
123 	 * deduct the just consumed cpu cycles from the cpu time left for this
124 	 * process during its current quantum. Skip IDLE and other pseudo kernel
125 	 * tasks, except for global accounting purposes.
126 	 */
127 	if (p->p_endpoint >= 0) {
128 		/* On MINIX3, the "system" counter covers system processes. */
129 		if (p->p_priv != priv_addr(USER_PRIV_ID))
130 			counter = CP_SYS;
131 		else if (p->p_misc_flags & MF_NICED)
132 			counter = CP_NICE;
133 		else
134 			counter = CP_USER;
135 
136 #if DEBUG_RACE
137 		p->p_cpu_time_left = 0;
138 #else
139 		if (tsc_delta < p->p_cpu_time_left) {
140 			p->p_cpu_time_left -= tsc_delta;
141 		} else {
142 			p->p_cpu_time_left = 0;
143 		}
144 #endif
145 	} else {
146 		/* On MINIX3, the "interrupts" counter covers the kernel. */
147 		if (p->p_endpoint == IDLE)
148 			counter = CP_IDLE;
149 		else
150 			counter = CP_INTR;
151 	}
152 
153 	tsc_per_state[cpu][counter] += tsc_delta;
154 
155 	*__tsc_ctr_switch = tsc;
156 }
157 
context_stop_idle(void)158 void context_stop_idle(void)
159 {
160 	int is_idle;
161 #ifdef CONFIG_SMP
162 	unsigned cpu = cpuid;
163 #endif
164 
165 	is_idle = get_cpu_var(cpu, cpu_is_idle);
166 	get_cpu_var(cpu, cpu_is_idle) = 0;
167 
168 	context_stop(get_cpulocal_var_ptr(idle_proc));
169 
170 	if (is_idle)
171 		restart_local_timer();
172 #if SPROFILE
173 	if (sprofiling)
174 		get_cpulocal_var(idle_interrupted) = 1;
175 #endif
176 }
177 
restart_local_timer(void)178 void restart_local_timer(void)
179 {
180 }
181 
register_local_timer_handler(const irq_handler_t handler)182 int register_local_timer_handler(const irq_handler_t handler)
183 {
184 	return bsp_register_timer_handler(handler);
185 }
186 
ms_2_cpu_time(unsigned ms)187 u64_t ms_2_cpu_time(unsigned ms)
188 {
189 	return (u64_t)tsc_per_ms[cpuid] * ms;
190 }
191 
cpu_time_2_ms(u64_t cpu_time)192 unsigned cpu_time_2_ms(u64_t cpu_time)
193 {
194 	return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
195 }
196 
cpu_load(void)197 short cpu_load(void)
198 {
199 	u64_t current_tsc, *current_idle;
200 	u64_t tsc_delta, idle_delta, busy;
201 	struct proc *idle;
202 	short load;
203 #ifdef CONFIG_SMP
204 	unsigned cpu = cpuid;
205 #endif
206 
207 	u64_t *last_tsc, *last_idle;
208 
209 	last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
210 	last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
211 
212 	idle = get_cpu_var_ptr(cpu, idle_proc);;
213 	read_tsc_64(&current_tsc);
214 	current_idle = &idle->p_cycles; /* ptr to idle proc */
215 
216 	/* calculate load since last cpu_load invocation */
217 	if (*last_tsc) {
218 		tsc_delta = current_tsc - *last_tsc;
219 		idle_delta = *current_idle - *last_idle;
220 
221 		busy = tsc_delta - idle_delta;
222 		busy = busy * 100;
223 		load = ex64lo(busy / tsc_delta);
224 
225 		if (load > 100)
226 			load = 100;
227 	} else
228 		load = 0;
229 
230 	*last_tsc = current_tsc;
231 	*last_idle = *current_idle;
232 	return load;
233 }
234 
235 /*
236  * Return the number of clock ticks spent in each of a predefined number of
237  * CPU states.
238  */
239 void
get_cpu_ticks(unsigned int cpu,uint64_t ticks[CPUSTATES])240 get_cpu_ticks(unsigned int cpu, uint64_t ticks[CPUSTATES])
241 {
242 	int i;
243 
244 	/* TODO: make this inter-CPU safe! */
245 	for (i = 0; i < CPUSTATES; i++)
246 		ticks[i] = tsc_per_state[cpu][i] / tsc_per_tick[cpu];
247 }
248