1 /*
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
5 *
6 * Authors:
7 * Atish Patra <atish.patra@wdc.com>
8 */
9
10 #include <sbi/riscv_asm.h>
11 #include <sbi/riscv_barrier.h>
12 #include <sbi/riscv_encoding.h>
13 #include <sbi/riscv_atomic.h>
14 #include <sbi/sbi_bitops.h>
15 #include <sbi/sbi_console.h>
16 #include <sbi/sbi_error.h>
17 #include <sbi/sbi_ecall_interface.h>
18 #include <sbi/sbi_hart.h>
19 #include <sbi/sbi_hartmask.h>
20 #include <sbi/sbi_hsm.h>
21 #include <sbi/sbi_init.h>
22 #include <sbi/sbi_ipi.h>
23 #include <sbi/sbi_platform.h>
24 #include <sbi/sbi_system.h>
25 #include <sbi/sbi_timer.h>
26 #include <sbi/sbi_console.h>
27
28 static unsigned long hart_data_offset;
29
30 /** Per hart specific data to manage state transition **/
31 struct sbi_hsm_data {
32 atomic_t state;
33 };
34
sbi_hsm_hart_state_to_status(int state)35 int sbi_hsm_hart_state_to_status(int state)
36 {
37 int ret;
38
39 switch (state) {
40 case SBI_HART_STOPPED:
41 ret = SBI_HSM_HART_STATUS_STOPPED;
42 break;
43 case SBI_HART_STOPPING:
44 ret = SBI_HSM_HART_STATUS_STOP_PENDING;
45 break;
46 case SBI_HART_STARTING:
47 ret = SBI_HSM_HART_STATUS_START_PENDING;
48 break;
49 case SBI_HART_STARTED:
50 ret = SBI_HSM_HART_STATUS_STARTED;
51 break;
52 default:
53 ret = SBI_EINVAL;
54 }
55
56 return ret;
57 }
58
sbi_hsm_hart_get_state(u32 hartid)59 int sbi_hsm_hart_get_state(u32 hartid)
60 {
61 struct sbi_hsm_data *hdata;
62 struct sbi_scratch *scratch;
63
64 scratch = sbi_hartid_to_scratch(hartid);
65 if (!scratch)
66 return SBI_HART_UNKNOWN;
67
68 hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset);
69
70 return atomic_read(&hdata->state);
71 }
72
sbi_hsm_hart_started(u32 hartid)73 bool sbi_hsm_hart_started(u32 hartid)
74 {
75 if (sbi_hsm_hart_get_state(hartid) == SBI_HART_STARTED)
76 return TRUE;
77 else
78 return FALSE;
79 }
80
81 /**
82 * Get ulong HART mask for given HART base ID
83 * @param hbase the HART base ID
84 * @param out_hmask the output ulong HART mask
85 * @return 0 on success and SBI_Exxx (< 0) on failure
86 * Note: the output HART mask will be set to zero on failure as well.
87 */
sbi_hsm_hart_started_mask(ulong hbase,ulong * out_hmask)88 int sbi_hsm_hart_started_mask(ulong hbase, ulong *out_hmask)
89 {
90 ulong i;
91 ulong hcount = sbi_scratch_last_hartid() + 1;
92
93 *out_hmask = 0;
94 if (hcount <= hbase)
95 return SBI_EINVAL;
96 if (BITS_PER_LONG < (hcount - hbase))
97 hcount = BITS_PER_LONG;
98
99 for (i = hbase; i < hcount; i++) {
100 if (sbi_hsm_hart_get_state(i) == SBI_HART_STARTED)
101 *out_hmask |= 1UL << (i - hbase);
102 }
103
104 return 0;
105 }
106
sbi_hsm_prepare_next_jump(struct sbi_scratch * scratch,u32 hartid)107 void sbi_hsm_prepare_next_jump(struct sbi_scratch *scratch, u32 hartid)
108 {
109 u32 oldstate;
110 struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
111 hart_data_offset);
112
113 oldstate = atomic_cmpxchg(&hdata->state, SBI_HART_STARTING,
114 SBI_HART_STARTED);
115 if (oldstate != SBI_HART_STARTING)
116 sbi_hart_hang();
117 }
118
sbi_hsm_hart_wait(struct sbi_scratch * scratch,u32 hartid)119 static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid)
120 {
121 unsigned long saved_mie;
122 const struct sbi_platform *plat = sbi_platform_ptr(scratch);
123 struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
124 hart_data_offset);
125 /* Save MIE CSR */
126 saved_mie = csr_read(CSR_MIE);
127
128 /* Set MSIE bit to receive IPI */
129 csr_set(CSR_MIE, MIP_MSIP);
130
131 /* Wait for hart_add call*/
132 while (atomic_read(&hdata->state) != SBI_HART_STARTING) {
133 wfi();
134 };
135
136 /* Restore MIE CSR */
137 csr_write(CSR_MIE, saved_mie);
138
139 /* Clear current HART IPI */
140 sbi_platform_ipi_clear(plat, hartid);
141 }
142
sbi_hsm_init(struct sbi_scratch * scratch,u32 hartid,bool cold_boot)143 int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
144 {
145 u32 i;
146 struct sbi_scratch *rscratch;
147 struct sbi_hsm_data *hdata;
148
149 if (cold_boot) {
150 hart_data_offset = sbi_scratch_alloc_offset(sizeof(*hdata),
151 "HART_DATA");
152 if (!hart_data_offset)
153 return SBI_ENOMEM;
154
155 /* Initialize hart state data for every hart */
156 for (i = 0; i <= sbi_scratch_last_hartid(); i++) {
157 rscratch = sbi_hartid_to_scratch(i);
158 if (!rscratch)
159 continue;
160
161 hdata = sbi_scratch_offset_ptr(rscratch,
162 hart_data_offset);
163 ATOMIC_INIT(&hdata->state,
164 (i == hartid) ? SBI_HART_STARTING : SBI_HART_STOPPED);
165 }
166 } else {
167 sbi_hsm_hart_wait(scratch, hartid);
168 }
169
170 return 0;
171 }
172
sbi_hsm_exit(struct sbi_scratch * scratch)173 void __noreturn sbi_hsm_exit(struct sbi_scratch *scratch)
174 {
175 u32 hstate;
176 const struct sbi_platform *plat = sbi_platform_ptr(scratch);
177 struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
178 hart_data_offset);
179 void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr;
180
181 hstate = atomic_cmpxchg(&hdata->state, SBI_HART_STOPPING,
182 SBI_HART_STOPPED);
183 if (hstate != SBI_HART_STOPPING)
184 goto fail_exit;
185
186 if (sbi_platform_has_hart_hotplug(plat)) {
187 sbi_platform_hart_stop(plat);
188 /* It should never reach here */
189 goto fail_exit;
190 }
191
192 /**
193 * As platform is lacking support for hotplug, directly jump to warmboot
194 * and wait for interrupts in warmboot. We do it preemptively in order
195 * preserve the hart states and reuse the code path for hotplug.
196 */
197 jump_warmboot();
198
199 fail_exit:
200 /* It should never reach here */
201 sbi_printf("ERR: Failed stop hart [%u]\n", current_hartid());
202 sbi_hart_hang();
203 }
204
sbi_hsm_hart_start(struct sbi_scratch * scratch,u32 hartid,ulong saddr,ulong priv)205 int sbi_hsm_hart_start(struct sbi_scratch *scratch, u32 hartid,
206 ulong saddr, ulong priv)
207 {
208 int rc;
209 unsigned long init_count;
210 unsigned int hstate;
211 struct sbi_scratch *rscratch;
212 struct sbi_hsm_data *hdata;
213 const struct sbi_platform *plat = sbi_platform_ptr(scratch);
214
215 rscratch = sbi_hartid_to_scratch(hartid);
216 if (!rscratch)
217 return SBI_EINVAL;
218 hdata = sbi_scratch_offset_ptr(rscratch, hart_data_offset);
219 hstate = atomic_cmpxchg(&hdata->state, SBI_HART_STOPPED,
220 SBI_HART_STARTING);
221 if (hstate == SBI_HART_STARTED)
222 return SBI_EALREADY;
223
224 /**
225 * if a hart is already transition to start or stop, another start call
226 * is considered as invalid request.
227 */
228 if (hstate != SBI_HART_STOPPED)
229 return SBI_EINVAL;
230
231 rc = sbi_hart_pmp_check_addr(scratch, saddr, PMP_X);
232 if (rc)
233 return rc;
234 //TODO: We also need to check saddr for valid physical address as well.
235
236 init_count = sbi_init_count(hartid);
237 rscratch->next_arg1 = priv;
238 rscratch->next_addr = saddr;
239
240 if (sbi_platform_has_hart_hotplug(plat) ||
241 (sbi_platform_has_hart_secondary_boot(plat) && !init_count)) {
242 return sbi_platform_hart_start(plat, hartid,
243 scratch->warmboot_addr);
244 } else {
245 sbi_platform_ipi_send(plat, hartid);
246 }
247
248 return 0;
249 }
250
sbi_hsm_hart_stop(struct sbi_scratch * scratch,bool exitnow)251 int sbi_hsm_hart_stop(struct sbi_scratch *scratch, bool exitnow)
252 {
253 int oldstate;
254 u32 hartid = current_hartid();
255 struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
256 hart_data_offset);
257
258 if (!sbi_hsm_hart_started(hartid))
259 return SBI_EINVAL;
260
261 oldstate = atomic_cmpxchg(&hdata->state, SBI_HART_STARTED,
262 SBI_HART_STOPPING);
263 if (oldstate != SBI_HART_STARTED) {
264 sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
265 __func__, oldstate);
266 return SBI_EDENIED;
267 }
268
269 if (exitnow)
270 sbi_exit(scratch);
271
272 return 0;
273 }
274