1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2015 Google, Inc
4 *
5 * Taken from coreboot file of the same name
6 */
7
8 #ifndef _X86_MP_H_
9 #define _X86_MP_H_
10
11 #include <asm/atomic.h>
12 #include <asm/cache.h>
13
14 struct udevice;
15
16 enum {
17 /* Indicates that the function should run on all CPUs */
18 MP_SELECT_ALL = -1,
19
20 /* Run on boot CPUs */
21 MP_SELECT_BSP = -2,
22
23 /* Run on non-boot CPUs */
24 MP_SELECT_APS = -3,
25 };
26
27 typedef int (*mp_callback_t)(struct udevice *cpu, void *arg);
28
29 /*
30 * A mp_flight_record details a sequence of calls for the APs to perform
31 * along with the BSP to coordinate sequencing. Each flight record either
32 * provides a barrier for each AP before calling the callback or the APs
33 * are allowed to perform the callback without waiting. Regardless, each
34 * record has the cpus_entered field incremented for each record. When
35 * the BSP observes that the cpus_entered matches the number of APs
36 * the bsp_call is called with bsp_arg and upon returning releases the
37 * barrier allowing the APs to make further progress.
38 *
39 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
40 * callback will just not be called.
41 *
42 * @barrier: Ensures that the BSP and AP don't run the flight record at the same
43 * time
44 * @cpus_entered: Counts the number of APs that have run this record
45 * @ap_call: Function for the APs to call
46 * @ap_arg: Argument to pass to @ap_call
47 * @bsp_call: Function for the BSP to call
48 * @bsp_arg: Argument to pass to @bsp_call
49 */
50 struct mp_flight_record {
51 atomic_t barrier;
52 atomic_t cpus_entered;
53 mp_callback_t ap_call;
54 void *ap_arg;
55 mp_callback_t bsp_call;
56 void *bsp_arg;
57 } __attribute__((aligned(ARCH_DMA_MINALIGN)));
58
59 #define MP_FLIGHT_RECORD(barrier_, ap_func_, ap_arg_, bsp_func_, bsp_arg_) \
60 { \
61 .barrier = ATOMIC_INIT(barrier_), \
62 .cpus_entered = ATOMIC_INIT(0), \
63 .ap_call = ap_func_, \
64 .ap_arg = ap_arg_, \
65 .bsp_call = bsp_func_, \
66 .bsp_arg = bsp_arg_, \
67 }
68
69 #define MP_FR_BLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
70 MP_FLIGHT_RECORD(0, ap_func, ap_arg, bsp_func, bsp_arg)
71
72 #define MP_FR_NOBLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
73 MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg)
74
75 /*
76 * mp_init() will set up the SIPI vector and bring up the APs according to
77 * mp_params. Each flight record will be executed according to the plan. Note
78 * that the MP infrastructure uses SMM default area without saving it. It's
79 * up to the chipset or mainboard to either e820 reserve this area or save this
80 * region prior to calling mp_init() and restoring it after mp_init returns.
81 *
82 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
83 * caching is enabled before running the flight plan.
84 *
85 * The MP init has the following properties:
86 * 1. APs are brought up in parallel.
87 * 2. The ordering of cpu number and APIC ids is not deterministic.
88 * Therefore, one cannot rely on this property or the order of devices in
89 * the device tree unless the chipset or mainboard know the APIC ids
90 * a priori.
91 *
92 * mp_init() returns < 0 on error, 0 on success.
93 */
94 int mp_init(void);
95
96 /**
97 * x86_mp_init() - Set up additional CPUs
98 *
99 * @returns < 0 on error, 0 on success.
100 */
101 int x86_mp_init(void);
102
103 /**
104 * mp_run_func() - Function to call on the AP
105 *
106 * @arg: Argument to pass
107 */
108 typedef void (*mp_run_func)(void *arg);
109
110 #if CONFIG_IS_ENABLED(SMP) && !CONFIG_IS_ENABLED(X86_64)
111 /**
112 * mp_run_on_cpus() - Run a function on one or all CPUs
113 *
114 * This does not return until all CPUs have completed the work
115 *
116 * Running on anything other than the boot CPU is only supported if
117 * CONFIG_SMP_AP_WORK is enabled
118 *
119 * @cpu_select: CPU to run on (its dev_seq() value), or MP_SELECT_ALL for
120 * all, or MP_SELECT_BSP for BSP
121 * @func: Function to run
122 * @arg: Argument to pass to the function
123 * @return 0 on success, -ve on error
124 */
125 int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg);
126
127 /**
128 * mp_park_aps() - Park the APs ready for the OS
129 *
130 * This halts all CPUs except the main one, ready for the OS to use them
131 *
132 * @return 0 if OK, -ve on error
133 */
134 int mp_park_aps(void);
135
136 /**
137 * mp_first_cpu() - Get the first CPU to process, from a selection
138 *
139 * This is used to iterate through selected CPUs. Call this function first, then
140 * call mp_next_cpu() repeatedly (with the same @cpu_select) until it returns
141 * -EFBIG.
142 *
143 * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
144 * @return next CPU number to run on (e.g. 0)
145 */
146 int mp_first_cpu(int cpu_select);
147
148 /**
149 * mp_next_cpu() - Get the next CPU to process, from a selection
150 *
151 * This is used to iterate through selected CPUs. After first calling
152 * mp_first_cpu() once, call this function repeatedly until it returns -EFBIG.
153 *
154 * The value of @cpu_select must be the same for all calls and must match the
155 * value passed to mp_first_cpu(), otherwise the behaviour is undefined.
156 *
157 * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
158 * @prev_cpu: Previous value returned by mp_first_cpu()/mp_next_cpu()
159 * @return next CPU number to run on (e.g. 0)
160 */
161 int mp_next_cpu(int cpu_select, int prev_cpu);
162 #else
mp_run_on_cpus(int cpu_select,mp_run_func func,void * arg)163 static inline int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg)
164 {
165 /* There is only one CPU, so just call the function here */
166 func(arg);
167
168 return 0;
169 }
170
mp_park_aps(void)171 static inline int mp_park_aps(void)
172 {
173 /* No APs to park */
174
175 return 0;
176 }
177
mp_first_cpu(int cpu_select)178 static inline int mp_first_cpu(int cpu_select)
179 {
180 /* We cannot run on any APs, nor a selected CPU */
181 return cpu_select == MP_SELECT_APS ? -EFBIG : MP_SELECT_BSP;
182 }
183
mp_next_cpu(int cpu_select,int prev_cpu)184 static inline int mp_next_cpu(int cpu_select, int prev_cpu)
185 {
186 /*
187 * When MP is not enabled, there is only one CPU and we did it in
188 * mp_first_cpu()
189 */
190 return -EFBIG;
191 }
192
193 #endif
194
195 #endif /* _X86_MP_H_ */
196