1 /*
2 * Copyright (c) 2017 - 2020, Broadcom
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9
10 #include <arch_helpers.h>
11 #include <common/debug.h>
12 #include <drivers/arm/ccn.h>
13 #include <lib/bakery_lock.h>
14 #include <lib/mmio.h>
15 #include <lib/psci/psci.h>
16 #include <lib/spinlock.h>
17
18 #include <brcm_scpi.h>
19 #include <chimp.h>
20 #include <cmn_plat_util.h>
21 #include <plat_brcm.h>
22 #include <platform_def.h>
23 #include <sr_utils.h>
24
25 #include "m0_cfg.h"
26
27
28 #define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
29 #define CLUSTER_PWR_STATE(state) \
30 ((state)->pwr_domain_state[MPIDR_AFFLVL1])
31 #define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL2])
32
33 #define VENDOR_RST_TYPE_SHIFT 4
34
35 #if HW_ASSISTED_COHERENCY
36 /*
37 * On systems where participant CPUs are cache-coherent, we can use spinlocks
38 * instead of bakery locks.
39 */
40 spinlock_t event_lock;
41 #define event_lock_get(_lock) spin_lock(&_lock)
42 #define event_lock_release(_lock) spin_unlock(&_lock)
43
44 #else
45 /*
46 * Use bakery locks for state coordination as not all participants are
47 * cache coherent now.
48 */
49 DEFINE_BAKERY_LOCK(event_lock);
50 #define event_lock_get(_lock) bakery_lock_get(&_lock)
51 #define event_lock_release(_lock) bakery_lock_release(&_lock)
52 #endif
53
brcm_pwr_domain_on(u_register_t mpidr)54 static int brcm_pwr_domain_on(u_register_t mpidr)
55 {
56 /*
57 * SCP takes care of powering up parent power domains so we
58 * only need to care about level 0
59 */
60 scpi_set_brcm_power_state(mpidr, scpi_power_on, scpi_power_on,
61 scpi_power_on);
62
63 return PSCI_E_SUCCESS;
64 }
65
66 /*******************************************************************************
67 * Handler called when a power level has just been powered on after
68 * being turned off earlier. The target_state encodes the low power state that
69 * each level has woken up from. This handler would never be invoked with
70 * the system power domain uninitialized as either the primary would have taken
71 * care of it as part of cold boot or the first core awakened from system
72 * suspend would have already initialized it.
73 ******************************************************************************/
brcm_pwr_domain_on_finish(const psci_power_state_t * target_state)74 static void brcm_pwr_domain_on_finish(const psci_power_state_t *target_state)
75 {
76 unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr());
77
78 /* Assert that the system power domain need not be initialized */
79 assert(SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_RUN);
80
81 assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
82
83 /*
84 * Perform the common cluster specific operations i.e enable coherency
85 * if this cluster was off.
86 */
87 if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF) {
88 INFO("Cluster #%lu entering to snoop/dvm domain\n", cluster_id);
89 ccn_enter_snoop_dvm_domain(1 << cluster_id);
90 }
91
92 /* Program the gic per-cpu distributor or re-distributor interface */
93 plat_brcm_gic_pcpu_init();
94
95 /* Enable the gic cpu interface */
96 plat_brcm_gic_cpuif_enable();
97 }
98
brcm_power_down_common(void)99 static void brcm_power_down_common(void)
100 {
101 unsigned int standbywfil2, standbywfi;
102 uint64_t mpidr = read_mpidr_el1();
103
104 switch (MPIDR_AFFLVL1_VAL(mpidr)) {
105 case 0x0:
106 standbywfi = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFI;
107 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFIL2;
108 break;
109 case 0x1:
110 standbywfi = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFI;
111 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFIL2;
112 break;
113 case 0x2:
114 standbywfi = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFI;
115 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFIL2;
116 break;
117 case 0x3:
118 standbywfi = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFI;
119 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFIL2;
120 break;
121 default:
122 ERROR("Invalid cluster #%llx\n", MPIDR_AFFLVL1_VAL(mpidr));
123 return;
124 }
125 /* Clear the WFI status bit */
126 event_lock_get(event_lock);
127 mmio_setbits_32(CDRU_PROC_EVENT_CLEAR,
128 (1 << (standbywfi + MPIDR_AFFLVL0_VAL(mpidr))) |
129 (1 << standbywfil2));
130 event_lock_release(event_lock);
131 }
132
133 /*
134 * Helper function to inform power down state to SCP.
135 */
brcm_scp_suspend(const psci_power_state_t * target_state)136 static void brcm_scp_suspend(const psci_power_state_t *target_state)
137 {
138 uint32_t cluster_state = scpi_power_on;
139 uint32_t system_state = scpi_power_on;
140
141 /* Check if power down at system power domain level is requested */
142 if (SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
143 system_state = scpi_power_retention;
144
145 /* Check if Cluster is to be turned off */
146 if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
147 cluster_state = scpi_power_off;
148
149 /*
150 * Ask the SCP to power down the appropriate components depending upon
151 * their state.
152 */
153 scpi_set_brcm_power_state(read_mpidr_el1(),
154 scpi_power_off,
155 cluster_state,
156 system_state);
157 }
158
159 /*
160 * Helper function to turn off a CPU power domain and its parent power domains
161 * if applicable. Since SCPI doesn't differentiate between OFF and suspend, we
162 * call the suspend helper here.
163 */
brcm_scp_off(const psci_power_state_t * target_state)164 static void brcm_scp_off(const psci_power_state_t *target_state)
165 {
166 brcm_scp_suspend(target_state);
167 }
168
brcm_pwr_domain_off(const psci_power_state_t * target_state)169 static void brcm_pwr_domain_off(const psci_power_state_t *target_state)
170 {
171 unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
172
173 assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
174 /* Prevent interrupts from spuriously waking up this cpu */
175 plat_brcm_gic_cpuif_disable();
176
177 /* Turn redistributor off */
178 plat_brcm_gic_redistif_off();
179
180 /* If Cluster is to be turned off, disable coherency */
181 if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
182 ccn_exit_snoop_dvm_domain(1 << cluster_id);
183
184 brcm_power_down_common();
185
186 brcm_scp_off(target_state);
187 }
188
189 /*******************************************************************************
190 * Handler called when the CPU power domain is about to enter standby.
191 ******************************************************************************/
brcm_cpu_standby(plat_local_state_t cpu_state)192 static void brcm_cpu_standby(plat_local_state_t cpu_state)
193 {
194 unsigned int scr;
195
196 assert(cpu_state == PLAT_LOCAL_STATE_RET);
197
198 scr = read_scr_el3();
199 /*
200 * Enable the Non secure interrupt to wake the CPU.
201 * In GICv3 affinity routing mode, the non secure group1 interrupts use
202 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
203 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
204 * routing mode.
205 */
206 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
207 isb();
208 dsb();
209 wfi();
210
211 /*
212 * Restore SCR to the original value, synchronisation of scr_el3 is
213 * done by eret while el3_exit to save some execution cycles.
214 */
215 write_scr_el3(scr);
216 }
217
218 /*
219 * Helper function to shutdown the system via SCPI.
220 */
brcm_scp_sys_shutdown(void)221 static void __dead2 brcm_scp_sys_shutdown(void)
222 {
223 /*
224 * Disable GIC CPU interface to prevent pending interrupt
225 * from waking up the AP from WFI.
226 */
227 plat_brcm_gic_cpuif_disable();
228
229 /* Flush and invalidate data cache */
230 dcsw_op_all(DCCISW);
231
232 /* Bring Cluster out of coherency domain as its going to die */
233 plat_brcm_interconnect_exit_coherency();
234
235 brcm_power_down_common();
236
237 /* Send the power down request to the SCP */
238 scpi_sys_power_state(scpi_system_shutdown);
239
240 wfi();
241 ERROR("BRCM System Off: operation not handled.\n");
242 panic();
243 }
244
245 /*
246 * Helper function to reset the system
247 */
brcm_scp_sys_reset(unsigned int reset_type)248 static void __dead2 brcm_scp_sys_reset(unsigned int reset_type)
249 {
250 /*
251 * Disable GIC CPU interface to prevent pending interrupt
252 * from waking up the AP from WFI.
253 */
254 plat_brcm_gic_cpuif_disable();
255
256 /* Flush and invalidate data cache */
257 dcsw_op_all(DCCISW);
258
259 /* Bring Cluster out of coherency domain as its going to die */
260 plat_brcm_interconnect_exit_coherency();
261
262 brcm_power_down_common();
263
264 /* Send the system reset request to the SCP
265 *
266 * As per PSCI spec system power state could be
267 * 0-> Shutdown
268 * 1-> Reboot- Board level Reset
269 * 2-> Reset - SoC level Reset
270 *
271 * Spec allocates 8 bits, 2 nibble, for this. One nibble is sufficient
272 * for sending the state hence We are utilizing 2nd nibble for vendor
273 * define reset type.
274 */
275 scpi_sys_power_state((reset_type << VENDOR_RST_TYPE_SHIFT) |
276 scpi_system_reboot);
277
278 wfi();
279 ERROR("BRCM System Reset: operation not handled.\n");
280 panic();
281 }
282
brcm_system_reset(void)283 static void __dead2 brcm_system_reset(void)
284 {
285 unsigned int reset_type;
286
287 if (bcm_chimp_is_nic_mode())
288 reset_type = SOFT_RESET_L3;
289 else
290 reset_type = SOFT_SYS_RESET_L1;
291
292 brcm_scp_sys_reset(reset_type);
293 }
294
brcm_system_reset2(int is_vendor,int reset_type,u_register_t cookie)295 static int brcm_system_reset2(int is_vendor, int reset_type,
296 u_register_t cookie)
297 {
298 if (!is_vendor) {
299 /* Architectural warm boot: only warm reset is supported */
300 reset_type = SOFT_RESET_L3;
301 } else {
302 uint32_t boot_source = (uint32_t)cookie;
303
304 boot_source &= BOOT_SOURCE_MASK;
305 brcm_stingray_set_straps(boot_source);
306 }
307 brcm_scp_sys_reset(reset_type);
308
309 /*
310 * brcm_scp_sys_reset cannot return (it is a __dead function),
311 * but brcm_system_reset2 has to return some value, even in
312 * this case.
313 */
314 return 0;
315 }
316
brcm_validate_ns_entrypoint(uintptr_t entrypoint)317 static int brcm_validate_ns_entrypoint(uintptr_t entrypoint)
318 {
319 /*
320 * Check if the non secure entrypoint lies within the non
321 * secure DRAM.
322 */
323 if ((entrypoint >= BRCM_NS_DRAM1_BASE) &&
324 (entrypoint < (BRCM_NS_DRAM1_BASE + BRCM_NS_DRAM1_SIZE)))
325 return PSCI_E_SUCCESS;
326 #ifdef __aarch64__
327 if ((entrypoint >= BRCM_DRAM2_BASE) &&
328 (entrypoint < (BRCM_DRAM2_BASE + BRCM_DRAM2_SIZE)))
329 return PSCI_E_SUCCESS;
330
331 if ((entrypoint >= BRCM_DRAM3_BASE) &&
332 (entrypoint < (BRCM_DRAM3_BASE + BRCM_DRAM3_SIZE)))
333 return PSCI_E_SUCCESS;
334 #endif
335
336 return PSCI_E_INVALID_ADDRESS;
337 }
338
339 /*******************************************************************************
340 * ARM standard platform handler called to check the validity of the power state
341 * parameter.
342 ******************************************************************************/
brcm_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)343 static int brcm_validate_power_state(unsigned int power_state,
344 psci_power_state_t *req_state)
345 {
346 int pstate = psci_get_pstate_type(power_state);
347 int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
348 int i;
349
350 assert(req_state);
351
352 if (pwr_lvl > PLAT_MAX_PWR_LVL)
353 return PSCI_E_INVALID_PARAMS;
354
355 /* Sanity check the requested state */
356 if (pstate == PSTATE_TYPE_STANDBY) {
357 /*
358 * It's possible to enter standby only on power level 0
359 * Ignore any other power level.
360 */
361 if (pwr_lvl != MPIDR_AFFLVL0)
362 return PSCI_E_INVALID_PARAMS;
363
364 req_state->pwr_domain_state[MPIDR_AFFLVL0] =
365 PLAT_LOCAL_STATE_RET;
366 } else {
367 for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
368 req_state->pwr_domain_state[i] =
369 PLAT_LOCAL_STATE_OFF;
370 }
371
372 /*
373 * We expect the 'state id' to be zero.
374 */
375 if (psci_get_pstate_id(power_state))
376 return PSCI_E_INVALID_PARAMS;
377
378 return PSCI_E_SUCCESS;
379 }
380
381 /*******************************************************************************
382 * Export the platform handlers via plat_brcm_psci_pm_ops. The ARM Standard
383 * platform will take care of registering the handlers with PSCI.
384 ******************************************************************************/
385 plat_psci_ops_t plat_brcm_psci_pm_ops = {
386 .pwr_domain_on = brcm_pwr_domain_on,
387 .pwr_domain_on_finish = brcm_pwr_domain_on_finish,
388 .pwr_domain_off = brcm_pwr_domain_off,
389 .cpu_standby = brcm_cpu_standby,
390 .system_off = brcm_scp_sys_shutdown,
391 .system_reset = brcm_system_reset,
392 .system_reset2 = brcm_system_reset2,
393 .validate_ns_entrypoint = brcm_validate_ns_entrypoint,
394 .validate_power_state = brcm_validate_power_state,
395 };
396
plat_setup_psci_ops(uintptr_t sec_entrypoint,const struct plat_psci_ops ** psci_ops)397 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
398 const struct plat_psci_ops **psci_ops)
399 {
400 *psci_ops = &plat_brcm_psci_pm_ops;
401
402 /* Setup mailbox with entry point. */
403 mmio_write_64(CRMU_CFG_BASE + offsetof(M0CFG, core_cfg.rvbar),
404 sec_entrypoint);
405
406 return 0;
407 }
408