1 /*
2  * Copyright (c) 2019-2020, MediaTek Inc. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 /* common headers */
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <common/debug.h>
11 #include <lib/mmio.h>
12 #include <lib/psci/psci.h>
13 #include <errno.h>
14 
15 /* mediatek platform specific headers */
16 #include <platform_def.h>
17 #include <scu.h>
18 #include <mt_gic_v3.h>
19 #include <mtk_mcdi.h>
20 #include <mtk_plat_common.h>
21 #include <mtgpio.h>
22 #include <mtspmc.h>
23 #include <plat_dcm.h>
24 #include <plat_debug.h>
25 #include <plat_params.h>
26 #include <plat_private.h>
27 #include <power_tracer.h>
28 #include <pmic.h>
29 #include <spm.h>
30 #include <spm_suspend.h>
31 #include <sspm.h>
32 #include <rtc.h>
33 
34 /* Local power state for power domains in Run state. */
35 #define MTK_LOCAL_STATE_RUN	0
36 /* Local power state for retention. */
37 #define MTK_LOCAL_STATE_RET	1
38 /* Local power state for OFF/power-down. */
39 #define MTK_LOCAL_STATE_OFF	2
40 
41 #if PSCI_EXTENDED_STATE_ID
42 /*
43  * Macros used to parse state information from State-ID if it is using the
44  * recommended encoding for State-ID.
45  */
46 #define MTK_LOCAL_PSTATE_WIDTH		4
47 #define MTK_LOCAL_PSTATE_MASK		((1 << MTK_LOCAL_PSTATE_WIDTH) - 1)
48 
49 /* Macros to construct the composite power state */
50 
51 /* Make composite power state parameter till power level 0 */
52 
53 #define mtk_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
54 	(((lvl0_state) << PSTATE_ID_SHIFT) | ((type) << PSTATE_TYPE_SHIFT))
55 
56 #else /* !PSCI_EXTENDED_STATE_ID */
57 
58 #define mtk_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
59 		(((lvl0_state) << PSTATE_ID_SHIFT) | \
60 		((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
61 		((type) << PSTATE_TYPE_SHIFT))
62 
63 #endif /* PSCI_EXTENDED_STATE_ID */
64 
65 /* Make composite power state parameter till power level 1 */
66 #define mtk_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type) \
67 		(((lvl1_state) << MTK_LOCAL_PSTATE_WIDTH) | \
68 		mtk_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type))
69 
70 /* Make composite power state parameter till power level 2 */
71 #define mtk_make_pwrstate_lvl2( \
72 		lvl2_state, lvl1_state, lvl0_state, pwr_lvl, type) \
73 		(((lvl2_state) << (MTK_LOCAL_PSTATE_WIDTH * 2)) | \
74 		mtk_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type))
75 
76 #define MTK_PWR_LVL0	0
77 #define MTK_PWR_LVL1	1
78 #define MTK_PWR_LVL2	2
79 
80 /* Macros to read the MTK power domain state */
81 #define MTK_CORE_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL0]
82 #define MTK_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[MTK_PWR_LVL1]
83 #define MTK_SYSTEM_PWR_STATE(state)	((PLAT_MAX_PWR_LVL > MTK_PWR_LVL1) ? \
84 			(state)->pwr_domain_state[MTK_PWR_LVL2] : 0)
85 
86 #if PSCI_EXTENDED_STATE_ID
87 /*
88  *  The table storing the valid idle power states. Ensure that the
89  *  array entries are populated in ascending order of state-id to
90  *  enable us to use binary search during power state validation.
91  *  The table must be terminated by a NULL entry.
92  */
93 const unsigned int mtk_pm_idle_states[] = {
94 	/* State-id - 0x001 */
95 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
96 		MTK_LOCAL_STATE_RET, MTK_PWR_LVL0, PSTATE_TYPE_STANDBY),
97 	/* State-id - 0x002 */
98 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_RUN,
99 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
100 	/* State-id - 0x022 */
101 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_RUN, MTK_LOCAL_STATE_OFF,
102 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
103 #if PLAT_MAX_PWR_LVL > MTK_PWR_LVL1
104 	/* State-id - 0x222 */
105 	mtk_make_pwrstate_lvl2(MTK_LOCAL_STATE_OFF, MTK_LOCAL_STATE_OFF,
106 		MTK_LOCAL_STATE_OFF, MTK_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
107 #endif
108 	0,
109 };
110 #endif
111 
112 #define CPU_IDX(cluster, cpu)		((cluster << 2) + cpu)
113 #define ON	true
114 #define OFF	false
115 
116 /* Pause MCDI when CPU hotplug */
117 static bool HP_SSPM_PAUSE;
118 /* CPU Hotplug by SSPM */
119 static bool HP_SSPM_CTRL = true;
120 /* Turn off cluster when CPU hotplug off */
121 static bool HP_CLUSTER_OFF = true;
122 /* Turn off cluster when CPU MCDI off */
123 static bool MCDI_C2 = true;
124 /* Enable MCDI */
125 static bool MCDI_SSPM = true;
126 
127 static uintptr_t secure_entrypoint;
128 
mp1_L2_desel_config(void)129 static void mp1_L2_desel_config(void)
130 {
131 	mmio_write_64(MCUCFG_BASE + 0x2200, 0x2092c820);
132 
133 	dsb();
134 }
135 
clst_single_pwr(int cluster,int cpu)136 static bool clst_single_pwr(int cluster, int cpu)
137 {
138 	uint32_t cpu_mask[2] = {0x00001e00, 0x000f0000};
139 	uint32_t cpu_pwr_bit[] = {9, 10, 11, 12, 16, 17, 18, 19};
140 	int my_idx = (cluster << 2) + cpu;
141 	uint32_t pwr_stat = mmio_read_32(0x10006180);
142 
143 	return !(pwr_stat & (cpu_mask[cluster] & ~BIT(cpu_pwr_bit[my_idx])));
144 }
145 
clst_single_on(int cluster,int cpu)146 static bool clst_single_on(int cluster, int cpu)
147 {
148 	uint32_t cpu_mask[2] = {0x0f, 0xf0};
149 	int my_idx = (cluster << 2) + cpu;
150 	uint32_t on_stat = mcdi_avail_cpu_mask_read();
151 
152 	return !(on_stat & (cpu_mask[cluster] & ~BIT(my_idx)));
153 }
154 
plat_cpu_pwrdwn_common(void)155 static void plat_cpu_pwrdwn_common(void)
156 {
157 	/* Prevent interrupts from spuriously waking up this cpu */
158 	mt_gic_rdistif_save();
159 	mt_gic_cpuif_disable();
160 }
161 
plat_cpu_pwron_common(void)162 static void plat_cpu_pwron_common(void)
163 {
164 	/* Enable the gic cpu interface */
165 	mt_gic_cpuif_enable();
166 	mt_gic_rdistif_init();
167 	mt_gic_rdistif_restore();
168 }
169 
plat_cluster_pwrdwn_common(uint64_t mpidr,int cluster)170 static void plat_cluster_pwrdwn_common(uint64_t mpidr, int cluster)
171 {
172 	if (cluster > 0)
173 		mt_gic_sync_dcm_enable();
174 
175 	/* Disable coherency */
176 	plat_mtk_cci_disable();
177 	disable_scu(mpidr);
178 }
179 
plat_cluster_pwron_common(uint64_t mpidr,int cluster)180 static void plat_cluster_pwron_common(uint64_t mpidr, int cluster)
181 {
182 	if (cluster > 0) {
183 		l2c_parity_check_setup();
184 		circular_buffer_setup();
185 		mp1_L2_desel_config();
186 		mt_gic_sync_dcm_disable();
187 	}
188 
189 	/* Enable coherency */
190 	enable_scu(mpidr);
191 	plat_mtk_cci_enable();
192 	/* Enable big core dcm */
193 	plat_dcm_restore_cluster_on(mpidr);
194 	/* Enable rgu dcm */
195 	plat_dcm_rgu_enable();
196 }
197 
plat_cpu_standby(plat_local_state_t cpu_state)198 static void plat_cpu_standby(plat_local_state_t cpu_state)
199 {
200 	u_register_t scr;
201 
202 	scr = read_scr_el3();
203 	write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
204 
205 	isb();
206 	dsb();
207 	wfi();
208 
209 	write_scr_el3(scr);
210 }
211 
mcdi_ctrl_before_hotplug_on(int cluster,int cpu)212 static void mcdi_ctrl_before_hotplug_on(int cluster, int cpu)
213 {
214 	if (!HP_SSPM_CTRL && HP_SSPM_PAUSE && MCDI_SSPM) {
215 		mcdi_pause_clr(cluster, CPU_IDX(cluster, cpu), OFF);
216 		mcdi_pause_set(cluster, CPU_IDX(cluster, cpu), ON);
217 	}
218 }
219 
mcdi_ctrl_before_hotplug_off(int cluster,int cpu,bool cluster_off)220 static void mcdi_ctrl_before_hotplug_off(int cluster, int cpu, bool cluster_off)
221 {
222 	if (!HP_SSPM_CTRL && HP_SSPM_PAUSE && MCDI_SSPM)
223 		mcdi_pause_set(cluster_off ? cluster : -1,
224 				CPU_IDX(cluster, cpu), OFF);
225 }
226 
mcdi_ctrl_cluster_cpu_off(int cluster,int cpu,bool cluster_off)227 static void mcdi_ctrl_cluster_cpu_off(int cluster, int cpu, bool cluster_off)
228 {
229 	if (MCDI_SSPM) {
230 		sspm_set_bootaddr(secure_entrypoint);
231 
232 		sspm_standbywfi_irq_enable(CPU_IDX(cluster, cpu));
233 
234 		if (cluster_off)
235 			sspm_cluster_pwr_off_notify(cluster);
236 		else
237 			sspm_cluster_pwr_on_notify(cluster);
238 	}
239 }
240 
mcdi_ctrl_suspend(void)241 static void mcdi_ctrl_suspend(void)
242 {
243 	if (MCDI_SSPM)
244 		mcdi_pause();
245 }
246 
mcdi_ctrl_resume(void)247 static void mcdi_ctrl_resume(void)
248 {
249 	if (MCDI_SSPM)
250 		mcdi_unpause();
251 }
252 
hotplug_ctrl_cluster_on(int cluster,int cpu)253 static void hotplug_ctrl_cluster_on(int cluster, int cpu)
254 {
255 	if (HP_SSPM_CTRL && MCDI_SSPM) {
256 		mcdi_hotplug_clr(cluster, CPU_IDX(cluster, cpu), OFF);
257 		mcdi_hotplug_set(cluster, -1, ON);
258 		mcdi_hotplug_wait_ack(cluster, -1, ON);
259 	} else {
260 		/* power on cluster */
261 		if (!spm_get_cluster_powerstate(cluster))
262 			spm_poweron_cluster(cluster);
263 	}
264 }
265 
hotplug_ctrl_cpu_on(int cluster,int cpu)266 static void hotplug_ctrl_cpu_on(int cluster, int cpu)
267 {
268 	if (HP_SSPM_CTRL && MCDI_SSPM)
269 		mcdi_hotplug_set(cluster, CPU_IDX(cluster, cpu), ON);
270 	else
271 		spm_poweron_cpu(cluster, cpu);
272 }
273 
hotplug_ctrl_cpu_on_finish(int cluster,int cpu)274 static void hotplug_ctrl_cpu_on_finish(int cluster, int cpu)
275 {
276 	spm_disable_cpu_auto_off(cluster, cpu);
277 
278 	if (HP_SSPM_CTRL && MCDI_SSPM)
279 		mcdi_hotplug_clr(cluster, CPU_IDX(cluster, cpu), ON);
280 	else if (HP_SSPM_PAUSE && MCDI_SSPM)
281 		mcdi_pause_clr(cluster, CPU_IDX(cluster, cpu), ON);
282 
283 	mcdi_avail_cpu_mask_set(BIT(CPU_IDX(cluster, cpu)));
284 }
285 
hotplug_ctrl_cluster_cpu_off(int cluster,int cpu,bool cluster_off)286 static void hotplug_ctrl_cluster_cpu_off(int cluster, int cpu, bool cluster_off)
287 {
288 	mcdi_avail_cpu_mask_clr(BIT(CPU_IDX(cluster, cpu)));
289 
290 	if (HP_SSPM_CTRL && MCDI_SSPM) {
291 		mcdi_hotplug_set(cluster_off ? cluster : -1,
292 				CPU_IDX(cluster, cpu), OFF);
293 	} else {
294 		spm_enable_cpu_auto_off(cluster, cpu);
295 
296 		if (cluster_off)
297 			spm_enable_cluster_auto_off(cluster);
298 
299 		spm_set_cpu_power_off(cluster, cpu);
300 	}
301 }
302 
plat_mtk_power_domain_on(unsigned long mpidr)303 static int plat_mtk_power_domain_on(unsigned long mpidr)
304 {
305 	int cpu = MPIDR_AFFLVL0_VAL(mpidr);
306 	int cluster = MPIDR_AFFLVL1_VAL(mpidr);
307 	int clst_pwr = spm_get_cluster_powerstate(cluster);
308 	unsigned int i;
309 
310 	mcdi_ctrl_before_hotplug_on(cluster, cpu);
311 	hotplug_ctrl_cluster_on(cluster, cpu);
312 
313 	if (clst_pwr == 0) {
314 		/* init cpu reset arch as AARCH64 of cluster */
315 		for (i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER; i++) {
316 			mcucfg_init_archstate(cluster, i, 1);
317 			mcucfg_set_bootaddr(cluster, i, secure_entrypoint);
318 		}
319 	}
320 
321 	hotplug_ctrl_cpu_on(cluster, cpu);
322 
323 	return PSCI_E_SUCCESS;
324 }
325 
plat_mtk_power_domain_off(const psci_power_state_t * state)326 static void plat_mtk_power_domain_off(const psci_power_state_t *state)
327 {
328 	uint64_t mpidr = read_mpidr();
329 	int cpu = MPIDR_AFFLVL0_VAL(mpidr);
330 	int cluster = MPIDR_AFFLVL1_VAL(mpidr);
331 	const plat_local_state_t *pds = state->pwr_domain_state;
332 	bool afflvl1 = (pds[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF);
333 	bool cluster_off = (HP_CLUSTER_OFF && afflvl1 &&
334 					clst_single_on(cluster, cpu));
335 
336 	plat_cpu_pwrdwn_common();
337 
338 	if (cluster_off)
339 		plat_cluster_pwrdwn_common(mpidr, cluster);
340 
341 	mcdi_ctrl_before_hotplug_off(cluster, cpu, cluster_off);
342 	hotplug_ctrl_cluster_cpu_off(cluster, cpu, cluster_off);
343 }
344 
plat_mtk_power_domain_on_finish(const psci_power_state_t * state)345 static void plat_mtk_power_domain_on_finish(const psci_power_state_t *state)
346 {
347 	uint64_t mpidr = read_mpidr();
348 	int cpu = MPIDR_AFFLVL0_VAL(mpidr);
349 	int cluster = MPIDR_AFFLVL1_VAL(mpidr);
350 	const plat_local_state_t *pds = state->pwr_domain_state;
351 	bool afflvl1 = (pds[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF);
352 
353 	if (afflvl1)
354 		plat_cluster_pwron_common(mpidr, cluster);
355 
356 	plat_cpu_pwron_common();
357 
358 	hotplug_ctrl_cpu_on_finish(cluster, cpu);
359 }
360 
plat_mtk_power_domain_suspend(const psci_power_state_t * state)361 static void plat_mtk_power_domain_suspend(const psci_power_state_t *state)
362 {
363 	uint64_t mpidr = read_mpidr();
364 	int cpu = MPIDR_AFFLVL0_VAL(mpidr);
365 	int cluster = MPIDR_AFFLVL1_VAL(mpidr);
366 	const plat_local_state_t *pds = state->pwr_domain_state;
367 	bool afflvl1 = (pds[MPIDR_AFFLVL1] == MTK_LOCAL_STATE_OFF);
368 	bool afflvl2 = (pds[MPIDR_AFFLVL2] == MTK_LOCAL_STATE_OFF);
369 	bool cluster_off = MCDI_C2 && afflvl1 && clst_single_pwr(cluster, cpu);
370 
371 	plat_cpu_pwrdwn_common();
372 
373 	plat_dcm_mcsi_a_backup();
374 
375 	if (cluster_off || afflvl2)
376 		plat_cluster_pwrdwn_common(mpidr, cluster);
377 
378 	if (afflvl2) {
379 		spm_data_t spm_d = { .cmd = SPM_SUSPEND };
380 		uint32_t *d = (uint32_t *)&spm_d;
381 		uint32_t l = sizeof(spm_d) / sizeof(uint32_t);
382 
383 		mcdi_ctrl_suspend();
384 
385 		spm_set_bootaddr(secure_entrypoint);
386 
387 		if (MCDI_SSPM)
388 			sspm_ipi_send_non_blocking(IPI_ID_SUSPEND, d);
389 
390 		spm_system_suspend();
391 
392 		if (MCDI_SSPM)
393 			while (sspm_ipi_recv_non_blocking(IPI_ID_SUSPEND, d, l))
394 				;
395 
396 		mt_gic_distif_save();
397 	} else {
398 		mcdi_ctrl_cluster_cpu_off(cluster, cpu, cluster_off);
399 	}
400 }
401 
plat_mtk_power_domain_suspend_finish(const psci_power_state_t * state)402 static void plat_mtk_power_domain_suspend_finish(const psci_power_state_t *state)
403 {
404 	uint64_t mpidr = read_mpidr();
405 	int cluster = MPIDR_AFFLVL1_VAL(mpidr);
406 	const plat_local_state_t *pds = state->pwr_domain_state;
407 	bool afflvl2 = (pds[MPIDR_AFFLVL2] == MTK_LOCAL_STATE_OFF);
408 
409 	if (afflvl2) {
410 		spm_data_t spm_d = { .cmd = SPM_RESUME };
411 		uint32_t *d = (uint32_t *)&spm_d;
412 		uint32_t l = sizeof(spm_d) / sizeof(uint32_t);
413 
414 		mt_gic_init();
415 		mt_gic_distif_restore();
416 		mt_gic_rdistif_restore();
417 
418 		mmio_write_32(EMI_WFIFO, 0xf);
419 
420 		if (MCDI_SSPM)
421 			sspm_ipi_send_non_blocking(IPI_ID_SUSPEND, d);
422 
423 		spm_system_suspend_finish();
424 
425 		if (MCDI_SSPM)
426 			while (sspm_ipi_recv_non_blocking(IPI_ID_SUSPEND, d, l))
427 				;
428 
429 		mcdi_ctrl_resume();
430 	} else {
431 		plat_cpu_pwron_common();
432 	}
433 
434 	plat_cluster_pwron_common(mpidr, cluster);
435 
436 	plat_dcm_mcsi_a_restore();
437 }
438 
439 #if PSCI_EXTENDED_STATE_ID
440 
plat_mtk_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)441 static int plat_mtk_validate_power_state(unsigned int power_state,
442 				psci_power_state_t *req_state)
443 {
444 	unsigned int state_id;
445 	int i;
446 
447 	assert(req_state);
448 
449 	if (!MCDI_SSPM)
450 		return PSCI_E_INVALID_PARAMS;
451 
452 	/*
453 	 *  Currently we are using a linear search for finding the matching
454 	 *  entry in the idle power state array. This can be made a binary
455 	 *  search if the number of entries justify the additional complexity.
456 	 */
457 	for (i = 0; !!mtk_pm_idle_states[i]; i++) {
458 		if (power_state == mtk_pm_idle_states[i])
459 			break;
460 	}
461 
462 	/* Return error if entry not found in the idle state array */
463 	if (!mtk_pm_idle_states[i])
464 		return PSCI_E_INVALID_PARAMS;
465 
466 	i = 0;
467 	state_id = psci_get_pstate_id(power_state);
468 
469 	/* Parse the State ID and populate the state info parameter */
470 	while (state_id) {
471 		req_state->pwr_domain_state[i++] = state_id &
472 						MTK_LOCAL_PSTATE_MASK;
473 		state_id >>= MTK_LOCAL_PSTATE_WIDTH;
474 	}
475 
476 	return PSCI_E_SUCCESS;
477 }
478 
479 #else /* if !PSCI_EXTENDED_STATE_ID */
480 
plat_mtk_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)481 static int plat_mtk_validate_power_state(unsigned int power_state,
482 					psci_power_state_t *req_state)
483 {
484 	int pstate = psci_get_pstate_type(power_state);
485 	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
486 	int i;
487 
488 	assert(req_state);
489 
490 	if (pwr_lvl > PLAT_MAX_PWR_LVL)
491 		return PSCI_E_INVALID_PARAMS;
492 
493 	/* Sanity check the requested state */
494 	if (pstate == PSTATE_TYPE_STANDBY) {
495 		/*
496 		 * It's possible to enter standby only on power level 0
497 		 * Ignore any other power level.
498 		 */
499 		if (pwr_lvl != 0)
500 			return PSCI_E_INVALID_PARAMS;
501 
502 		req_state->pwr_domain_state[MTK_PWR_LVL0] = MTK_LOCAL_STATE_RET;
503 	} else if (!MCDI_SSPM) {
504 		return PSCI_E_INVALID_PARAMS;
505 	} else {
506 		for (i = 0; i <= pwr_lvl; i++)
507 			req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
508 	}
509 
510 	return PSCI_E_SUCCESS;
511 }
512 
513 #endif /* PSCI_EXTENDED_STATE_ID */
514 
515 /*******************************************************************************
516  * MTK handlers to shutdown/reboot the system
517  ******************************************************************************/
plat_mtk_system_off(void)518 static void __dead2 plat_mtk_system_off(void)
519 {
520 	INFO("MTK System Off\n");
521 
522 	rtc_power_off_sequence();
523 	wk_pmic_enable_sdn_delay();
524 	pmic_power_off();
525 
526 	wfi();
527 	ERROR("MTK System Off: operation not handled.\n");
528 	panic();
529 }
530 
plat_mtk_system_reset(void)531 static void __dead2 plat_mtk_system_reset(void)
532 {
533 	struct bl_aux_gpio_info *gpio_reset = plat_get_mtk_gpio_reset();
534 
535 	INFO("MTK System Reset\n");
536 
537 	mt_set_gpio_out(gpio_reset->index, gpio_reset->polarity);
538 
539 	wfi();
540 	ERROR("MTK System Reset: operation not handled.\n");
541 	panic();
542 }
543 
plat_mtk_get_sys_suspend_power_state(psci_power_state_t * req_state)544 static void plat_mtk_get_sys_suspend_power_state(psci_power_state_t *req_state)
545 {
546 	assert(PLAT_MAX_PWR_LVL >= 2);
547 
548 	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
549 		req_state->pwr_domain_state[i] = MTK_LOCAL_STATE_OFF;
550 }
551 
552 /*******************************************************************************
553  * MTK_platform handler called when an affinity instance is about to be turned
554  * on. The level and mpidr determine the affinity instance.
555  ******************************************************************************/
556 static const plat_psci_ops_t plat_plat_pm_ops = {
557 	.cpu_standby			= plat_cpu_standby,
558 	.pwr_domain_on			= plat_mtk_power_domain_on,
559 	.pwr_domain_on_finish		= plat_mtk_power_domain_on_finish,
560 	.pwr_domain_off			= plat_mtk_power_domain_off,
561 	.pwr_domain_suspend		= plat_mtk_power_domain_suspend,
562 	.pwr_domain_suspend_finish	= plat_mtk_power_domain_suspend_finish,
563 	.system_off			= plat_mtk_system_off,
564 	.system_reset			= plat_mtk_system_reset,
565 	.validate_power_state		= plat_mtk_validate_power_state,
566 	.get_sys_suspend_power_state	= plat_mtk_get_sys_suspend_power_state
567 };
568 
plat_setup_psci_ops(uintptr_t sec_entrypoint,const plat_psci_ops_t ** psci_ops)569 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
570 			const plat_psci_ops_t **psci_ops)
571 {
572 	unsigned int i;
573 
574 	*psci_ops = &plat_plat_pm_ops;
575 	secure_entrypoint = sec_entrypoint;
576 
577 	/* Init cpu reset arch as AARCH64 of cluster 0 */
578 	for (i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER; i++) {
579 		mcucfg_init_archstate(0, i, 1);
580 		mcucfg_set_bootaddr(0, i, secure_entrypoint);
581 	}
582 
583 	if (!check_mcdi_ctl_stat()) {
584 		HP_SSPM_CTRL = false;
585 		MCDI_SSPM = false;
586 	}
587 
588 	return 0;
589 }
590