xref: /linux/arch/powerpc/platforms/pseries/hvCall.S (revision 2da68a77)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * This file contains the generic code to perform a call to the
4 * pSeries LPAR hypervisor.
5 */
6#include <linux/jump_label.h>
7#include <asm/hvcall.h>
8#include <asm/processor.h>
9#include <asm/ppc_asm.h>
10#include <asm/asm-offsets.h>
11#include <asm/ptrace.h>
12#include <asm/feature-fixups.h>
13
14	.section	".text"
15
16#ifdef CONFIG_TRACEPOINTS
17
18#ifndef CONFIG_JUMP_LABEL
19	.data
20
21	.globl hcall_tracepoint_refcount
22hcall_tracepoint_refcount:
23	.8byte	0
24
25	.section	".text"
26#endif
27
28/*
29 * precall must preserve all registers.  use unused STK_PARAM()
30 * areas to save snapshots and opcode.
31 */
32#define HCALL_INST_PRECALL(FIRST_REG)				\
33	mflr	r0;						\
34	std	r3,STK_PARAM(R3)(r1);				\
35	std	r4,STK_PARAM(R4)(r1);				\
36	std	r5,STK_PARAM(R5)(r1);				\
37	std	r6,STK_PARAM(R6)(r1);				\
38	std	r7,STK_PARAM(R7)(r1);				\
39	std	r8,STK_PARAM(R8)(r1);				\
40	std	r9,STK_PARAM(R9)(r1);				\
41	std	r10,STK_PARAM(R10)(r1);				\
42	std	r0,16(r1);					\
43	addi	r4,r1,STK_PARAM(FIRST_REG);			\
44	stdu	r1,-STACK_FRAME_OVERHEAD(r1);			\
45	bl	__trace_hcall_entry;				\
46	ld	r3,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1);	\
47	ld	r4,STACK_FRAME_OVERHEAD+STK_PARAM(R4)(r1);	\
48	ld	r5,STACK_FRAME_OVERHEAD+STK_PARAM(R5)(r1);	\
49	ld	r6,STACK_FRAME_OVERHEAD+STK_PARAM(R6)(r1);	\
50	ld	r7,STACK_FRAME_OVERHEAD+STK_PARAM(R7)(r1);	\
51	ld	r8,STACK_FRAME_OVERHEAD+STK_PARAM(R8)(r1);	\
52	ld	r9,STACK_FRAME_OVERHEAD+STK_PARAM(R9)(r1);	\
53	ld	r10,STACK_FRAME_OVERHEAD+STK_PARAM(R10)(r1)
54
55/*
56 * postcall is performed immediately before function return which
57 * allows liberal use of volatile registers.
58 */
59#define __HCALL_INST_POSTCALL					\
60	ld	r0,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1);	\
61	std	r3,STACK_FRAME_OVERHEAD+STK_PARAM(R3)(r1);	\
62	mr	r4,r3;						\
63	mr	r3,r0;						\
64	bl	__trace_hcall_exit;				\
65	ld	r0,STACK_FRAME_OVERHEAD+16(r1);			\
66	addi	r1,r1,STACK_FRAME_OVERHEAD;			\
67	ld	r3,STK_PARAM(R3)(r1);				\
68	mtlr	r0
69
70#define HCALL_INST_POSTCALL_NORETS				\
71	li	r5,0;						\
72	__HCALL_INST_POSTCALL
73
74#define HCALL_INST_POSTCALL(BUFREG)				\
75	mr	r5,BUFREG;					\
76	__HCALL_INST_POSTCALL
77
78#ifdef CONFIG_JUMP_LABEL
79#define HCALL_BRANCH(LABEL)					\
80	ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key)
81#else
82
83/*
84 * We branch around this in early init (eg when populating the MMU
85 * hashtable) by using an unconditional cpu feature.
86 */
87#define HCALL_BRANCH(LABEL)					\
88BEGIN_FTR_SECTION;						\
89	b	1f;						\
90END_FTR_SECTION(0, 1);						\
91	LOAD_REG_ADDR(r12, hcall_tracepoint_refcount) ;		\
92	std	r12,32(r1);					\
93	cmpdi	r12,0;						\
94	bne-	LABEL;						\
951:
96#endif
97
98#else
99#define HCALL_INST_PRECALL(FIRST_ARG)
100#define HCALL_INST_POSTCALL_NORETS
101#define HCALL_INST_POSTCALL(BUFREG)
102#define HCALL_BRANCH(LABEL)
103#endif
104
105_GLOBAL_TOC(plpar_hcall_norets_notrace)
106	HMT_MEDIUM
107
108	mfcr	r0
109	stw	r0,8(r1)
110	HVSC				/* invoke the hypervisor */
111
112	li	r4,0
113	stb	r4,PACASRR_VALID(r13)
114
115	lwz	r0,8(r1)
116	mtcrf	0xff,r0
117	blr				/* return r3 = status */
118
119_GLOBAL_TOC(plpar_hcall_norets)
120	HMT_MEDIUM
121
122	mfcr	r0
123	stw	r0,8(r1)
124	HCALL_BRANCH(plpar_hcall_norets_trace)
125	HVSC				/* invoke the hypervisor */
126
127	li	r4,0
128	stb	r4,PACASRR_VALID(r13)
129
130	lwz	r0,8(r1)
131	mtcrf	0xff,r0
132	blr				/* return r3 = status */
133
134#ifdef CONFIG_TRACEPOINTS
135plpar_hcall_norets_trace:
136	HCALL_INST_PRECALL(R4)
137	HVSC
138	HCALL_INST_POSTCALL_NORETS
139
140	li	r4,0
141	stb	r4,PACASRR_VALID(r13)
142
143	lwz	r0,8(r1)
144	mtcrf	0xff,r0
145	blr
146#endif
147
148_GLOBAL_TOC(plpar_hcall)
149	HMT_MEDIUM
150
151	mfcr	r0
152	stw	r0,8(r1)
153
154	HCALL_BRANCH(plpar_hcall_trace)
155
156	std     r4,STK_PARAM(R4)(r1)     /* Save ret buffer */
157
158	mr	r4,r5
159	mr	r5,r6
160	mr	r6,r7
161	mr	r7,r8
162	mr	r8,r9
163	mr	r9,r10
164
165	HVSC				/* invoke the hypervisor */
166
167	ld	r12,STK_PARAM(R4)(r1)
168	std	r4,  0(r12)
169	std	r5,  8(r12)
170	std	r6, 16(r12)
171	std	r7, 24(r12)
172
173	li	r4,0
174	stb	r4,PACASRR_VALID(r13)
175
176	lwz	r0,8(r1)
177	mtcrf	0xff,r0
178
179	blr				/* return r3 = status */
180
181#ifdef CONFIG_TRACEPOINTS
182plpar_hcall_trace:
183	HCALL_INST_PRECALL(R5)
184
185	std	r4,STK_PARAM(R4)(r1)
186	mr	r0,r4
187
188	mr	r4,r5
189	mr	r5,r6
190	mr	r6,r7
191	mr	r7,r8
192	mr	r8,r9
193	mr	r9,r10
194
195	HVSC
196
197	ld	r12,STK_PARAM(R4)(r1)
198	std	r4,0(r12)
199	std	r5,8(r12)
200	std	r6,16(r12)
201	std	r7,24(r12)
202
203	HCALL_INST_POSTCALL(r12)
204
205	li	r4,0
206	stb	r4,PACASRR_VALID(r13)
207
208	lwz	r0,8(r1)
209	mtcrf	0xff,r0
210
211	blr
212#endif
213
214/*
215 * plpar_hcall_raw can be called in real mode. kexec/kdump need some
216 * hypervisor calls to be executed in real mode. So plpar_hcall_raw
217 * does not access the per cpu hypervisor call statistics variables,
218 * since these variables may not be present in the RMO region.
219 */
220_GLOBAL(plpar_hcall_raw)
221	HMT_MEDIUM
222
223	mfcr	r0
224	stw	r0,8(r1)
225
226	std     r4,STK_PARAM(R4)(r1)     /* Save ret buffer */
227
228	mr	r4,r5
229	mr	r5,r6
230	mr	r6,r7
231	mr	r7,r8
232	mr	r8,r9
233	mr	r9,r10
234
235	HVSC				/* invoke the hypervisor */
236
237	ld	r12,STK_PARAM(R4)(r1)
238	std	r4,  0(r12)
239	std	r5,  8(r12)
240	std	r6, 16(r12)
241	std	r7, 24(r12)
242
243	li	r4,0
244	stb	r4,PACASRR_VALID(r13)
245
246	lwz	r0,8(r1)
247	mtcrf	0xff,r0
248
249	blr				/* return r3 = status */
250
251_GLOBAL_TOC(plpar_hcall9)
252	HMT_MEDIUM
253
254	mfcr	r0
255	stw	r0,8(r1)
256
257	HCALL_BRANCH(plpar_hcall9_trace)
258
259	std     r4,STK_PARAM(R4)(r1)     /* Save ret buffer */
260
261	mr	r4,r5
262	mr	r5,r6
263	mr	r6,r7
264	mr	r7,r8
265	mr	r8,r9
266	mr	r9,r10
267	ld	r10,STK_PARAM(R11)(r1)	 /* put arg7 in R10 */
268	ld	r11,STK_PARAM(R12)(r1)	 /* put arg8 in R11 */
269	ld	r12,STK_PARAM(R13)(r1)    /* put arg9 in R12 */
270
271	HVSC				/* invoke the hypervisor */
272
273	mr	r0,r12
274	ld	r12,STK_PARAM(R4)(r1)
275	std	r4,  0(r12)
276	std	r5,  8(r12)
277	std	r6, 16(r12)
278	std	r7, 24(r12)
279	std	r8, 32(r12)
280	std	r9, 40(r12)
281	std	r10,48(r12)
282	std	r11,56(r12)
283	std	r0, 64(r12)
284
285	li	r4,0
286	stb	r4,PACASRR_VALID(r13)
287
288	lwz	r0,8(r1)
289	mtcrf	0xff,r0
290
291	blr				/* return r3 = status */
292
293#ifdef CONFIG_TRACEPOINTS
294plpar_hcall9_trace:
295	HCALL_INST_PRECALL(R5)
296
297	std	r4,STK_PARAM(R4)(r1)
298	mr	r0,r4
299
300	mr	r4,r5
301	mr	r5,r6
302	mr	r6,r7
303	mr	r7,r8
304	mr	r8,r9
305	mr	r9,r10
306	ld	r10,STACK_FRAME_OVERHEAD+STK_PARAM(R11)(r1)
307	ld	r11,STACK_FRAME_OVERHEAD+STK_PARAM(R12)(r1)
308	ld	r12,STACK_FRAME_OVERHEAD+STK_PARAM(R13)(r1)
309
310	HVSC
311
312	mr	r0,r12
313	ld	r12,STACK_FRAME_OVERHEAD+STK_PARAM(R4)(r1)
314	std	r4,0(r12)
315	std	r5,8(r12)
316	std	r6,16(r12)
317	std	r7,24(r12)
318	std	r8,32(r12)
319	std	r9,40(r12)
320	std	r10,48(r12)
321	std	r11,56(r12)
322	std	r0,64(r12)
323
324	HCALL_INST_POSTCALL(r12)
325
326	li	r4,0
327	stb	r4,PACASRR_VALID(r13)
328
329	lwz	r0,8(r1)
330	mtcrf	0xff,r0
331
332	blr
333#endif
334
335/* See plpar_hcall_raw to see why this is needed */
336_GLOBAL(plpar_hcall9_raw)
337	HMT_MEDIUM
338
339	mfcr	r0
340	stw	r0,8(r1)
341
342	std     r4,STK_PARAM(R4)(r1)     /* Save ret buffer */
343
344	mr	r4,r5
345	mr	r5,r6
346	mr	r6,r7
347	mr	r7,r8
348	mr	r8,r9
349	mr	r9,r10
350	ld	r10,STK_PARAM(R11)(r1)	 /* put arg7 in R10 */
351	ld	r11,STK_PARAM(R12)(r1)	 /* put arg8 in R11 */
352	ld	r12,STK_PARAM(R13)(r1)    /* put arg9 in R12 */
353
354	HVSC				/* invoke the hypervisor */
355
356	mr	r0,r12
357	ld	r12,STK_PARAM(R4)(r1)
358	std	r4,  0(r12)
359	std	r5,  8(r12)
360	std	r6, 16(r12)
361	std	r7, 24(r12)
362	std	r8, 32(r12)
363	std	r9, 40(r12)
364	std	r10,48(r12)
365	std	r11,56(r12)
366	std	r0, 64(r12)
367
368	li	r4,0
369	stb	r4,PACASRR_VALID(r13)
370
371	lwz	r0,8(r1)
372	mtcrf	0xff,r0
373
374	blr				/* return r3 = status */
375