1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright 2016 Freescale Semiconductor, Inc.
4 * Author: Hongbo Zhang <hongbo.zhang@nxp.com>
5 * This file implements LS102X platform PSCI SYSTEM-SUSPEND function
6 */
7
8#include <config.h>
9#include <linux/linkage.h>
10#include <asm/psci.h>
11#include <asm/secure.h>
12
13/* Default PSCI function, return -1, Not Implemented */
14#define PSCI_DEFAULT(__fn) \
15	ENTRY(__fn); \
16	mov	w0, #ARM_PSCI_RET_NI; \
17	ret; \
18	ENDPROC(__fn); \
19	.weak __fn
20
21/* PSCI function and ID table definition*/
22#define PSCI_TABLE(__id, __fn) \
23	.quad __id; \
24	.quad __fn
25
26.pushsection ._secure.text, "ax"
27
28/* 32 bits PSCI default functions */
29PSCI_DEFAULT(psci_version)
30PSCI_DEFAULT(psci_cpu_suspend)
31PSCI_DEFAULT(psci_cpu_off)
32PSCI_DEFAULT(psci_cpu_on)
33PSCI_DEFAULT(psci_affinity_info)
34PSCI_DEFAULT(psci_migrate)
35PSCI_DEFAULT(psci_migrate_info_type)
36PSCI_DEFAULT(psci_migrate_info_up_cpu)
37PSCI_DEFAULT(psci_system_off)
38PSCI_DEFAULT(psci_system_reset)
39PSCI_DEFAULT(psci_features)
40PSCI_DEFAULT(psci_cpu_freeze)
41PSCI_DEFAULT(psci_cpu_default_suspend)
42PSCI_DEFAULT(psci_node_hw_state)
43PSCI_DEFAULT(psci_system_suspend)
44PSCI_DEFAULT(psci_set_suspend_mode)
45PSCI_DEFAULT(psi_stat_residency)
46PSCI_DEFAULT(psci_stat_count)
47
48.align 3
49_psci_32_table:
50PSCI_TABLE(ARM_PSCI_FN_CPU_SUSPEND, psci_cpu_suspend)
51PSCI_TABLE(ARM_PSCI_FN_CPU_OFF, psci_cpu_off)
52PSCI_TABLE(ARM_PSCI_FN_CPU_ON, psci_cpu_on)
53PSCI_TABLE(ARM_PSCI_FN_MIGRATE, psci_migrate)
54PSCI_TABLE(ARM_PSCI_0_2_FN_PSCI_VERSION, psci_version)
55PSCI_TABLE(ARM_PSCI_0_2_FN_CPU_SUSPEND, psci_cpu_suspend)
56PSCI_TABLE(ARM_PSCI_0_2_FN_CPU_OFF, psci_cpu_off)
57PSCI_TABLE(ARM_PSCI_0_2_FN_CPU_ON, psci_cpu_on)
58PSCI_TABLE(ARM_PSCI_0_2_FN_AFFINITY_INFO, psci_affinity_info)
59PSCI_TABLE(ARM_PSCI_0_2_FN_MIGRATE, psci_migrate)
60PSCI_TABLE(ARM_PSCI_0_2_FN_MIGRATE_INFO_TYPE, psci_migrate_info_type)
61PSCI_TABLE(ARM_PSCI_0_2_FN_MIGRATE_INFO_UP_CPU, psci_migrate_info_up_cpu)
62PSCI_TABLE(ARM_PSCI_0_2_FN_SYSTEM_OFF, psci_system_off)
63PSCI_TABLE(ARM_PSCI_0_2_FN_SYSTEM_RESET, psci_system_reset)
64PSCI_TABLE(ARM_PSCI_1_0_FN_PSCI_FEATURES, psci_features)
65PSCI_TABLE(ARM_PSCI_1_0_FN_CPU_FREEZE, psci_cpu_freeze)
66PSCI_TABLE(ARM_PSCI_1_0_FN_CPU_DEFAULT_SUSPEND, psci_cpu_default_suspend)
67PSCI_TABLE(ARM_PSCI_1_0_FN_NODE_HW_STATE, psci_node_hw_state)
68PSCI_TABLE(ARM_PSCI_1_0_FN_SYSTEM_SUSPEND, psci_system_suspend)
69PSCI_TABLE(ARM_PSCI_1_0_FN_SET_SUSPEND_MODE, psci_set_suspend_mode)
70PSCI_TABLE(ARM_PSCI_1_0_FN_STAT_RESIDENCY, psi_stat_residency)
71PSCI_TABLE(ARM_PSCI_1_0_FN_STAT_COUNT, psci_stat_count)
72PSCI_TABLE(0, 0)
73
74/* 64 bits PSCI default functions */
75PSCI_DEFAULT(psci_cpu_suspend_64)
76PSCI_DEFAULT(psci_cpu_on_64)
77PSCI_DEFAULT(psci_affinity_info_64)
78PSCI_DEFAULT(psci_migrate_64)
79PSCI_DEFAULT(psci_migrate_info_up_cpu_64)
80PSCI_DEFAULT(psci_cpu_default_suspend_64)
81PSCI_DEFAULT(psci_node_hw_state_64)
82PSCI_DEFAULT(psci_system_suspend_64)
83PSCI_DEFAULT(psci_stat_residency_64)
84PSCI_DEFAULT(psci_stat_count_64)
85
86.align 3
87_psci_64_table:
88PSCI_TABLE(ARM_PSCI_0_2_FN64_CPU_SUSPEND, psci_cpu_suspend_64)
89PSCI_TABLE(ARM_PSCI_0_2_FN64_CPU_ON, psci_cpu_on_64)
90PSCI_TABLE(ARM_PSCI_0_2_FN64_AFFINITY_INFO, psci_affinity_info_64)
91PSCI_TABLE(ARM_PSCI_0_2_FN64_MIGRATE, psci_migrate_64)
92PSCI_TABLE(ARM_PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, psci_migrate_info_up_cpu_64)
93PSCI_TABLE(ARM_PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND, psci_cpu_default_suspend_64)
94PSCI_TABLE(ARM_PSCI_1_0_FN64_NODE_HW_STATE, psci_node_hw_state_64)
95PSCI_TABLE(ARM_PSCI_1_0_FN64_SYSTEM_SUSPEND, psci_system_suspend_64)
96PSCI_TABLE(ARM_PSCI_1_0_FN64_STAT_RESIDENCY, psci_stat_residency_64)
97PSCI_TABLE(ARM_PSCI_1_0_FN64_STAT_COUNT, psci_stat_count_64)
98PSCI_TABLE(0, 0)
99
100.macro	psci_enter
101	/* PSCI call is Fast Call(atomic), so mask DAIF */
102	mrs	x15, DAIF
103	stp	x15, xzr, [sp, #-16]!
104	ldr	x15, =0x3C0
105	msr	DAIF, x15
106	/* SMC convention, x18 ~ x30 should be saved by callee */
107	stp	x29, x30, [sp, #-16]!
108	stp	x27, x28, [sp, #-16]!
109	stp	x25, x26, [sp, #-16]!
110	stp	x23, x24, [sp, #-16]!
111	stp	x21, x22, [sp, #-16]!
112	stp	x19, x20, [sp, #-16]!
113	mrs	x15, elr_el3
114	stp	x18, x15, [sp, #-16]!
115.endm
116
117.macro	psci_return
118	/* restore registers */
119	ldp	x18, x15, [sp], #16
120	msr	elr_el3, x15
121	ldp	x19, x20, [sp], #16
122	ldp	x21, x22, [sp], #16
123	ldp	x23, x24, [sp], #16
124	ldp	x25, x26, [sp], #16
125	ldp	x27, x28, [sp], #16
126	ldp	x29, x30, [sp], #16
127	/* restore DAIF */
128	ldp	x15, xzr, [sp], #16
129	msr	DAIF, x15
130	eret
131.endm
132
133/* Caller must put PSCI function-ID table base in x9 */
134handle_psci:
135	psci_enter
1361:	ldr	x10, [x9]		/* Load PSCI function table */
137	cbz	x10, 3f			/* If reach the end, bail out */
138	cmp	x10, x0
139	b.eq	2f			/* PSCI function found */
140	add x9, x9, #16			/* If not match, try next entry */
141	b	1b
142
1432:	ldr	x11, [x9, #8]		/* Load PSCI function */
144	blr	x11			/* Call PSCI function */
145	psci_return
146
1473:	mov	x0, #ARM_PSCI_RET_NI
148	psci_return
149
150/*
151 * Handle SiP service functions defined in SiP service function table.
152 * Use DECLARE_SECURE_SVC(_name, _id, _fn) to add platform specific SiP
153 * service function into the SiP service function table.
154 * SiP service function table is located in '._secure_svc_tbl_entries' section,
155 * which is next to '._secure.text' section.
156 */
157handle_svc:
158	adr	x9, __secure_svc_tbl_start
159	adr	x10, __secure_svc_tbl_end
160	subs	x12, x10, x9	/* Get number of entries in table */
161	b.eq	2f		/* Make sure SiP function table is not empty */
162	psci_enter
1631:	ldr x10, [x9]		/* Load SiP function table */
164	ldr x11, [x9, #8]
165	cmp	w10, w0
166	b.eq	2b		/* SiP service function found */
167	add x9, x9, #SECURE_SVC_TBL_OFFSET	/* Move to next entry */
168	subs	x12, x12, #SECURE_SVC_TBL_OFFSET
169	b.eq	3b		/* If reach the end, bail out */
170	b	1b
1712:	ldr	x0, =0xFFFFFFFF
172	eret
173
174handle_smc32:
175	/* SMC function ID  0x84000000-0x8400001F: 32 bits PSCI */
176	ldr	w9, =0x8400001F
177	cmp	w0, w9
178	b.gt	handle_svc
179	ldr	w9, =0x84000000
180	cmp	w0, w9
181	b.lt	handle_svc
182
183	adr	x9, _psci_32_table
184	b	handle_psci
185
186handle_smc64:
187	/* check SMC32 or SMC64 calls */
188	ubfx	x9, x0, #30, #1
189	cbz	x9, handle_smc32
190
191	/* SMC function ID 0xC4000000-0xC400001F: 64 bits PSCI */
192	ldr	x9, =0xC400001F
193	cmp	x0, x9
194	b.gt	handle_svc
195	ldr	x9, =0xC4000000
196	cmp	x0, x9
197	b.lt	handle_svc
198
199	adr	x9, _psci_64_table
200	b	handle_psci
201
202/*
203 * Get CPU ID from MPIDR, suppose every cluster has same number of CPU cores,
204 * Platform with asymmetric clusters should implement their own interface.
205 * In case this function being called by other platform's C code, the ARM
206 * Architecture Procedure Call Standard is considered, e.g. register X0 is
207 * used for the return value, while in this PSCI environment, X0 usually holds
208 * the SMC function identifier, so X0 should be saved by caller function.
209 */
210ENTRY(psci_get_cpu_id)
211#ifdef CONFIG_ARMV8_PSCI_CPUS_PER_CLUSTER
212	mrs	x9, MPIDR_EL1
213	ubfx	x9, x9, #8, #8
214	ldr	x10, =CONFIG_ARMV8_PSCI_CPUS_PER_CLUSTER
215	mul	x9, x10, x9
216#else
217	mov	x9, xzr
218#endif
219	mrs	x10, MPIDR_EL1
220	ubfx	x10, x10, #0, #8
221	add	x0, x10, x9
222	ret
223ENDPROC(psci_get_cpu_id)
224.weak psci_get_cpu_id
225
226/* CPU ID input in x0, stack top output in x0*/
227LENTRY(psci_get_cpu_stack_top)
228	adr	x9, __secure_stack_end
229	lsl	x0, x0, #ARM_PSCI_STACK_SHIFT
230	sub	x0, x9, x0
231	ret
232ENDPROC(psci_get_cpu_stack_top)
233
234unhandled_exception:
235	b	unhandled_exception	/* simply dead loop */
236
237handle_sync:
238	mov	x15, x30
239	mov	x14, x0
240
241	bl	psci_get_cpu_id
242	bl	psci_get_cpu_stack_top
243	mov	x9, #1
244	msr	spsel, x9
245	mov	sp, x0
246
247	mov	x0, x14
248	mov	x30, x15
249
250	mrs	x9, esr_el3
251	ubfx	x9, x9, #26, #6
252	cmp	x9, #0x13
253	b.eq	handle_smc32
254	cmp	x9, #0x17
255	b.eq	handle_smc64
256
257	b	unhandled_exception
258
259#ifdef CONFIG_ARMV8_EA_EL3_FIRST
260/*
261 * Override this function if custom error handling is
262 * needed for asynchronous aborts
263 */
264ENTRY(plat_error_handler)
265	ret
266ENDPROC(plat_error_handler)
267.weak plat_error_handler
268
269handle_error:
270	bl	psci_get_cpu_id
271	bl	psci_get_cpu_stack_top
272	mov	x9, #1
273	msr	spsel, x9
274	mov	sp, x0
275
276	bl	plat_error_handler	/* Platform specific error handling */
277deadloop:
278	b	deadloop		/* Never return */
279#endif
280
281	.align	11
282	.globl	el3_exception_vectors
283el3_exception_vectors:
284	b	unhandled_exception	/* Sync, Current EL using SP0 */
285	.align	7
286	b	unhandled_exception	/* IRQ, Current EL using SP0 */
287	.align	7
288	b	unhandled_exception	/* FIQ, Current EL using SP0 */
289	.align	7
290	b	unhandled_exception	/* SError, Current EL using SP0 */
291	.align	7
292	b	unhandled_exception	/* Sync, Current EL using SPx */
293	.align	7
294	b	unhandled_exception	/* IRQ, Current EL using SPx */
295	.align	7
296	b	unhandled_exception	/* FIQ, Current EL using SPx */
297	.align	7
298	b	unhandled_exception	/* SError, Current EL using SPx */
299	.align	7
300	b	handle_sync		/* Sync, Lower EL using AArch64 */
301	.align	7
302	b	unhandled_exception	/* IRQ, Lower EL using AArch64 */
303	.align	7
304	b	unhandled_exception	/* FIQ, Lower EL using AArch64 */
305	.align	7
306#ifdef CONFIG_ARMV8_EA_EL3_FIRST
307	b	handle_error		/* SError, Lower EL using AArch64 */
308#else
309	b	unhandled_exception	/* SError, Lower EL using AArch64 */
310#endif
311	.align	7
312	b	unhandled_exception	/* Sync, Lower EL using AArch32 */
313	.align	7
314	b	unhandled_exception	/* IRQ, Lower EL using AArch32 */
315	.align	7
316	b	unhandled_exception	/* FIQ, Lower EL using AArch32 */
317	.align	7
318	b	unhandled_exception	/* SError, Lower EL using AArch32 */
319
320ENTRY(psci_setup_vectors)
321	adr	x0, el3_exception_vectors
322	msr	vbar_el3, x0
323	ret
324ENDPROC(psci_setup_vectors)
325
326ENTRY(psci_arch_init)
327	ret
328ENDPROC(psci_arch_init)
329.weak psci_arch_init
330
331.popsection
332