1 /*
2  * Copyright (c) 2015 Google, Inc
3  *
4  * SPDX-License-Identifier:	GPL-2.0
5  *
6  * Taken from coreboot file of the same name
7  */
8 
9 #ifndef _X86_MP_H_
10 #define _X86_MP_H_
11 
12 #include <asm/atomic.h>
13 
14 typedef int (*mp_callback_t)(struct udevice *cpu, void *arg);
15 
16 /*
17  * A mp_flight_record details a sequence of calls for the APs to perform
18  * along with the BSP to coordinate sequencing. Each flight record either
19  * provides a barrier for each AP before calling the callback or the APs
20  * are allowed to perform the callback without waiting. Regardless, each
21  * record has the cpus_entered field incremented for each record. When
22  * the BSP observes that the cpus_entered matches the number of APs
23  * the bsp_call is called with bsp_arg and upon returning releases the
24  * barrier allowing the APs to make further progress.
25  *
26  * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
27  * callback will just not be called.
28  */
29 struct mp_flight_record {
30 	atomic_t barrier;
31 	atomic_t cpus_entered;
32 	mp_callback_t ap_call;
33 	void *ap_arg;
34 	mp_callback_t bsp_call;
35 	void *bsp_arg;
36 } __attribute__((aligned(ARCH_DMA_MINALIGN)));
37 
38 #define MP_FLIGHT_RECORD(barrier_, ap_func_, ap_arg_, bsp_func_, bsp_arg_) \
39 	{							\
40 		.barrier = ATOMIC_INIT(barrier_),		\
41 		.cpus_entered = ATOMIC_INIT(0),			\
42 		.ap_call = ap_func_,				\
43 		.ap_arg = ap_arg_,				\
44 		.bsp_call = bsp_func_,				\
45 		.bsp_arg = bsp_arg_,				\
46 	}
47 
48 #define MP_FR_BLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
49 	MP_FLIGHT_RECORD(0, ap_func, ap_arg, bsp_func, bsp_arg)
50 
51 #define MP_FR_NOBLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
52 	MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg)
53 
54 /*
55  * The mp_params structure provides the arguments to the mp subsystem
56  * for bringing up APs.
57  *
58  * At present this is overkill for U-Boot, but it may make it easier to add
59  * SMM support.
60  */
61 struct mp_params {
62 	int num_cpus; /* Total cpus include BSP */
63 	int parallel_microcode_load;
64 	const void *microcode_pointer;
65 	/* Flight plan  for APs and BSP */
66 	struct mp_flight_record *flight_plan;
67 	int num_records;
68 };
69 
70 /*
71  * mp_init() will set up the SIPI vector and bring up the APs according to
72  * mp_params. Each flight record will be executed according to the plan. Note
73  * that the MP infrastructure uses SMM default area without saving it. It's
74  * up to the chipset or mainboard to either e820 reserve this area or save this
75  * region prior to calling mp_init() and restoring it after mp_init returns.
76  *
77  * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
78  * caching is enabled before running the flight plan.
79  *
80  * The MP init has the following properties:
81  * 1. APs are brought up in parallel.
82  * 2. The ordering of cpu number and APIC ids is not deterministic.
83  *    Therefore, one cannot rely on this property or the order of devices in
84  *    the device tree unless the chipset or mainboard know the APIC ids
85  *    a priori.
86  *
87  * mp_init() returns < 0 on error, 0 on success.
88  */
89 int mp_init(struct mp_params *params);
90 
91 /* Probes the CPU device */
92 int mp_init_cpu(struct udevice *cpu, void *unused);
93 
94 #endif /* _X86_MP_H_ */
95