xref: /minix/minix/kernel/smp.c (revision 83133719)
1 #include <assert.h>
2 
3 #include "smp.h"
4 #include "interrupt.h"
5 #include "clock.h"
6 
7 unsigned ncpus;
8 unsigned ht_per_core;
9 unsigned bsp_cpu_id;
10 
11 struct cpu cpus[CONFIG_MAX_CPUS];
12 
13 /* info passed to another cpu along with a sched ipi */
14 struct sched_ipi_data {
15 	volatile u32_t	flags;
16 	volatile u32_t	data;
17 };
18 
19 static struct sched_ipi_data  sched_ipi_data[CONFIG_MAX_CPUS];
20 
21 #define SCHED_IPI_STOP_PROC	1
22 #define SCHED_IPI_VM_INHIBIT	2
23 #define SCHED_IPI_SAVE_CTX	4
24 
25 static volatile unsigned ap_cpus_booted;
26 
27 SPINLOCK_DEFINE(big_kernel_lock)
28 SPINLOCK_DEFINE(boot_lock)
29 
30 void wait_for_APs_to_finish_booting(void)
31 {
32 	unsigned n = 0;
33 	int i;
34 
35 	/* check how many cpus are actually alive */
36 	for (i = 0 ; i < ncpus ; i++) {
37 		if (cpu_test_flag(i, CPU_IS_READY))
38 			n++;
39 	}
40 	if (n != ncpus)
41 		printf("WARNING only %d out of %d cpus booted\n", n, ncpus);
42 
43 	/* we must let the other CPUs to run in kernel mode first */
44 	BKL_UNLOCK();
45 	while (ap_cpus_booted != (n - 1))
46 		arch_pause();
47 	/* now we have to take the lock again as we continue execution */
48 	BKL_LOCK();
49 }
50 
51 void ap_boot_finished(unsigned cpu)
52 {
53 	ap_cpus_booted++;
54 }
55 
56 void smp_ipi_halt_handler(void)
57 {
58 	ipi_ack();
59 	stop_local_timer();
60 	arch_smp_halt_cpu();
61 }
62 
63 void smp_schedule(unsigned cpu)
64 {
65 	arch_send_smp_schedule_ipi(cpu);
66 }
67 
68 void smp_sched_handler(void);
69 
70 /*
71  * tell another cpu about a task to do and return only after the cpu acks that
72  * the task is finished. Also wait before it finishes task sent by another cpu
73  * to the same one.
74  */
75 static void smp_schedule_sync(struct proc * p, unsigned task)
76 {
77 	unsigned cpu = p->p_cpu;
78 	unsigned mycpu = cpuid;
79 
80 	assert(cpu != mycpu);
81 	/*
82 	 * if some other cpu made a request to the same cpu, wait until it is
83 	 * done before proceeding
84 	 */
85 	if (sched_ipi_data[cpu].flags != 0) {
86 		BKL_UNLOCK();
87 		while (sched_ipi_data[cpu].flags != 0) {
88 			if (sched_ipi_data[mycpu].flags) {
89 				BKL_LOCK();
90 				smp_sched_handler();
91 				BKL_UNLOCK();
92 			}
93 		}
94 		BKL_LOCK();
95 	}
96 
97 	sched_ipi_data[cpu].data = (u32_t) p;
98 	sched_ipi_data[cpu].flags |= task;
99 	__insn_barrier();
100 	arch_send_smp_schedule_ipi(cpu);
101 
102 	/* wait until the destination cpu finishes its job */
103 	BKL_UNLOCK();
104 	while (sched_ipi_data[cpu].flags != 0) {
105 		if (sched_ipi_data[mycpu].flags) {
106 			BKL_LOCK();
107 			smp_sched_handler();
108 			BKL_UNLOCK();
109 		}
110 	}
111 	BKL_LOCK();
112 }
113 
114 void smp_schedule_stop_proc(struct proc * p)
115 {
116 	if (proc_is_runnable(p))
117 		smp_schedule_sync(p, SCHED_IPI_STOP_PROC);
118 	else
119 		RTS_SET(p, RTS_PROC_STOP);
120 	assert(RTS_ISSET(p, RTS_PROC_STOP));
121 }
122 
123 void smp_schedule_vminhibit(struct proc * p)
124 {
125 	if (proc_is_runnable(p))
126 		smp_schedule_sync(p, SCHED_IPI_VM_INHIBIT);
127 	else
128 		RTS_SET(p, RTS_VMINHIBIT);
129 	assert(RTS_ISSET(p, RTS_VMINHIBIT));
130 }
131 
132 void smp_schedule_stop_proc_save_ctx(struct proc * p)
133 {
134 	/*
135 	 * stop the processes and force the complete context of the process to
136 	 * be saved (i.e. including FPU state and such)
137 	 */
138 	smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
139 	assert(RTS_ISSET(p, RTS_PROC_STOP));
140 }
141 
142 void smp_schedule_migrate_proc(struct proc * p, unsigned dest_cpu)
143 {
144 	/*
145 	 * stop the processes and force the complete context of the process to
146 	 * be saved (i.e. including FPU state and such)
147 	 */
148 	smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
149 	assert(RTS_ISSET(p, RTS_PROC_STOP));
150 
151 	/* assign the new cpu and let the process run again */
152 	p->p_cpu = dest_cpu;
153 	RTS_UNSET(p, RTS_PROC_STOP);
154 }
155 
156 void smp_sched_handler(void)
157 {
158 	unsigned flgs;
159 	unsigned cpu = cpuid;
160 
161 	flgs = sched_ipi_data[cpu].flags;
162 
163 	if (flgs) {
164 		struct proc * p;
165 		p = (struct proc *)sched_ipi_data[cpu].data;
166 
167 		if (flgs & SCHED_IPI_STOP_PROC) {
168 			RTS_SET(p, RTS_PROC_STOP);
169 		}
170 		if (flgs & SCHED_IPI_SAVE_CTX) {
171 			/* all context has been saved already, FPU remains */
172 			if (proc_used_fpu(p) &&
173 					get_cpulocal_var(fpu_owner) == p) {
174 				disable_fpu_exception();
175 				save_local_fpu(p, FALSE /*retain*/);
176 				/* we're preparing to migrate somewhere else */
177 				release_fpu(p);
178 			}
179 		}
180 		if (flgs & SCHED_IPI_VM_INHIBIT) {
181 			RTS_SET(p, RTS_VMINHIBIT);
182 		}
183 	}
184 
185 	__insn_barrier();
186 	sched_ipi_data[cpu].flags = 0;
187 }
188 
189 /*
190  * This function gets always called only after smp_sched_handler() has been
191  * already called. It only serves the purpose of acknowledging the IPI and
192  * preempting the current process if the CPU was not idle.
193  */
194 void smp_ipi_sched_handler(void)
195 {
196 	struct proc * curr;
197 
198 	ipi_ack();
199 
200 	curr = get_cpulocal_var(proc_ptr);
201 	if (curr->p_endpoint != IDLE) {
202 		RTS_SET(curr, RTS_PREEMPTED);
203 	}
204 }
205 
206