xref: /freebsd/sys/arm/arm/swtch-v6.S (revision d0b2dbfa)
1/*	$NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $	*/
2
3/*-
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37/*-
38 * Copyright (c) 1994-1998 Mark Brinicombe.
39 * Copyright (c) 1994 Brini.
40 * All rights reserved.
41 *
42 * This code is derived from software written for Brini by Mark Brinicombe
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 *    notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 *    notice, this list of conditions and the following disclaimer in the
51 *    documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 *    must display the following acknowledgement:
54 *	This product includes software developed by Brini.
55 * 4. The name of the company nor the name of the author may be used to
56 *    endorse or promote products derived from this software without specific
57 *    prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * RiscBSD kernel project
72 *
73 * cpuswitch.S
74 *
75 * cpu switching functions
76 *
77 * Created      : 15/10/94
78 *
79 */
80
81#include "assym.inc"
82#include "opt_sched.h"
83
84#include <machine/asm.h>
85#include <machine/asmacros.h>
86#include <machine/armreg.h>
87#include <machine/sysreg.h>
88#include <machine/vfp.h>
89#if defined(SMP)
90#define GET_PCPU(tmp, tmp2) \
91	mrc 	CP15_MPIDR(tmp);	\
92	and	tmp, tmp, #0xf;		\
93	ldr 	tmp2, .Lcurpcpu+4;	\
94	mul 	tmp, tmp, tmp2;		\
95	ldr	tmp2, .Lcurpcpu;	\
96	add	tmp, tmp, tmp2;
97#else
98
99#define GET_PCPU(tmp, tmp2) \
100	ldr	tmp, .Lcurpcpu
101#endif
102
103#ifdef VFP
104	.fpu vfp	/* allow VFP instructions */
105#endif
106
107.Lcurpcpu:
108	.word	_C_LABEL(__pcpu)
109	.word	PCPU_SIZE
110.Lblocked_lock:
111	.word	_C_LABEL(blocked_lock)
112
113ENTRY(cpu_context_switch)
114	DSB
115	/*
116	* We can directly switch between translation tables only when the
117	* size of the mapping for any given virtual address is the same
118	* in the old and new translation tables.
119	* Thus, we must switch to kernel pmap translation table as
120	* intermediate mapping because all sizes of these mappings are same
121	* (or unmapped). The same is true for switch from kernel pmap
122	* translation table to new pmap one.
123	*/
124	mov	r2, #(CPU_ASID_KERNEL)
125	ldr	r1, =(_C_LABEL(pmap_kern_ttb))
126	ldr	r1, [r1]
127	mcr	CP15_TTBR0(r1)		/* switch to kernel TTB */
128	ISB
129	mcr	CP15_TLBIASID(r2)	/* flush not global TLBs */
130	DSB
131	mcr	CP15_TTBR0(r0)		/* switch to new TTB */
132	ISB
133	/*
134	* We must flush not global TLBs again because PT2MAP mapping
135	* is different.
136	*/
137	mcr	CP15_TLBIASID(r2)	/* flush not global TLBs */
138	/*
139	* Flush entire Branch Target Cache because of the branch predictor
140	* is not architecturally invisible. See ARM Architecture Reference
141	* Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch
142	* predictors and Requirements for branch predictor maintenance
143	* operations sections.
144	*/
145	/*
146	 * Additionally, to mitigate mistrained branch predictor attack
147	 * we must invalidate it on affected CPUs. Unfortunately, BPIALL
148	 * is effectively NOP on Cortex-A15 so it needs special treatment.
149	 */
150	ldr	r0, [r8, #PC_BP_HARDEN_KIND]
151	cmp	r0, #PCPU_BP_HARDEN_KIND_ICIALLU
152	mcrne	CP15_BPIALL		/* Flush entire Branch Target Cache   */
153	mcreq	CP15_ICIALLU		/* This is the only way how to flush  */
154					/* Branch Target Cache on Cortex-A15. */
155	DSB
156	mov	pc, lr
157END(cpu_context_switch)
158
159/*
160 * cpu_throw(oldtd, newtd)
161 *
162 * Remove current thread state,	then select the	next thread to run
163 * and load its	state.
164 * r0 =	oldtd
165 * r1 =	newtd
166 */
167ENTRY(cpu_throw)
168	mov	r10, r0			/* r10 = oldtd */
169	mov	r11, r1			/* r11 = newtd */
170
171#ifdef VFP				/* This thread is dying, disable */
172	bl	_C_LABEL(vfp_discard)	/* VFP without preserving state. */
173#endif
174	GET_PCPU(r8, r9)		/* r8 = current pcpu */
175	ldr	r4, [r8, #PC_CPUID]	/* r4 = current cpu id */
176
177	cmp	r10, #0			/* old thread? */
178	beq	2f			/* no, skip */
179
180	/* Remove this CPU from the active list. */
181	ldr	r5, [r8, #PC_CURPMAP]
182	mov	r0, #(PM_ACTIVE)
183	add	r5, r0			/* r5 = old pm_active */
184
185	/* Compute position and mask. */
186#if _NCPUWORDS > 1
187	lsr	r0, r4, #3
188	bic	r0, #3
189	add	r5, r0			/* r5 = position in old pm_active */
190	mov	r2, #1
191	and	r0, r4, #31
192	lsl	r2, r0			/* r2 = mask */
193#else
194	mov	r2, #1
195	lsl	r2, r4			/* r2 = mask */
196#endif
197	/* Clear cpu from old active list. */
198#ifdef SMP
1991:	ldrex	r0, [r5]
200	bic	r0, r2
201	strex	r1, r0, [r5]
202	teq	r1, #0
203	bne	1b
204#else
205	ldr	r0, [r5]
206	bic	r0, r2
207	str	r0, [r5]
208#endif
209
2102:
211#ifdef INVARIANTS
212	cmp	r11, #0			/* new thread? */
213	beq	badsw1			/* no, panic */
214#endif
215	ldr	r7, [r11, #(TD_PCB)]	/* r7 = new PCB */
216
217	/*
218	 * Registers at this point
219	 *   r4  = current cpu id
220	 *   r7  = new PCB
221	 *   r8  = current pcpu
222	 *   r11 = newtd
223	 */
224
225	/* MMU switch to new thread. */
226	ldr	r0, [r7, #(PCB_PAGEDIR)]
227#ifdef INVARIANTS
228	cmp	r0, #0			/* new thread? */
229	beq	badsw4			/* no, panic */
230#endif
231	bl	_C_LABEL(cpu_context_switch)
232
233	/*
234	 * Set new PMAP as current one.
235	 * Insert cpu to new active list.
236	 */
237
238	ldr	r6, [r11, #(TD_PROC)]	/* newtd->proc */
239	ldr	r6, [r6, #(P_VMSPACE)]	/* newtd->proc->vmspace */
240	add	r6, #VM_PMAP		/* newtd->proc->vmspace->pmap */
241	str	r6, [r8, #PC_CURPMAP]	/* store to curpmap */
242
243	mov	r0, #PM_ACTIVE
244	add	r6, r0			/* r6 = new pm_active */
245
246	/* compute position and mask */
247#if _NCPUWORDS > 1
248	lsr	r0, r4, #3
249	bic	r0, #3
250	add	r6, r0			/* r6 = position in new pm_active */
251	mov	r2, #1
252	and	r0, r4, #31
253	lsl	r2, r0			/* r2 = mask */
254#else
255	mov	r2, #1
256	lsl	r2, r4 			/* r2 = mask */
257#endif
258	/* Set cpu to new active list. */
259#ifdef SMP
2601:	ldrex	r0, [r6]
261	orr	r0, r2
262	strex	r1, r0, [r6]
263	teq	r1, #0
264	bne	1b
265#else
266	ldr	r0, [r6]
267	orr	r0, r2
268	str	r0, [r6]
269#endif
270	/*
271	 * Registers at this point.
272	 *   r7  = new PCB
273	 *   r8  = current pcpu
274	 *   r11 = newtd
275	 * They must match the ones in sw1 position !!!
276	 */
277	DMB
278	b	sw1	/* share new thread init with cpu_switch() */
279END(cpu_throw)
280
281/*
282 * cpu_switch(oldtd, newtd, lock)
283 *
284 * Save the current thread state, then select the next thread to run
285 * and load its state.
286 * r0 = oldtd
287 * r1 = newtd
288 * r2 = lock (new lock for old thread)
289 */
290ENTRY(cpu_switch)
291	/* Interrupts are disabled. */
292#ifdef INVARIANTS
293	cmp	r0, #0			/* old thread? */
294	beq	badsw2			/* no, panic */
295#endif
296	/* Save all the registers in the old thread's pcb. */
297	ldr	r3, [r0, #(TD_PCB)]
298	add	r3, #(PCB_R4)
299	stmia	r3, {r4-r12, sp, lr, pc}
300	mrc	CP15_TPIDRURW(r4)
301	str	r4, [r3, #(PCB_TPIDRURW - PCB_R4)]
302
303#ifdef INVARIANTS
304	cmp	r1, #0			/* new thread? */
305	beq	badsw3			/* no, panic */
306#endif
307	/*
308	 * Save arguments. Note that we can now use r0-r14 until
309	 * it is time to restore them for the new thread. However,
310	 * some registers are not safe over function call.
311	 */
312	mov	r9, r2			/* r9 = lock */
313	mov	r10, r0			/* r10 = oldtd */
314	mov	r11, r1			/* r11 = newtd */
315
316	GET_PCPU(r8, r3)		/* r8 = current PCPU */
317	ldr	r7, [r11, #(TD_PCB)]	/* r7 = newtd->td_pcb */
318
319
320
321#ifdef VFP
322	ldr	r3, [r10, #(TD_PCB)]
323	mov	r1, r3
324	mov	r0, r10
325	bl	_C_LABEL(vfp_save_state)
326#endif
327
328	/*
329	 * MMU switch. If we're switching to a thread with the same
330	 * address space as the outgoing one, we can skip the MMU switch.
331	 */
332	mrc	CP15_TTBR0(r1)		/* r1 = old TTB */
333	ldr	r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */
334	cmp	r0, r1			/* Switching to the TTB? */
335	beq	sw0			/* same TTB, skip */
336
337#ifdef INVARIANTS
338	cmp	r0, #0			/* new thread? */
339	beq	badsw4			/* no, panic */
340#endif
341
342	bl	cpu_context_switch	/* new TTB as argument */
343
344	/*
345	 * Registers at this point
346	 *   r7  = new PCB
347	 *   r8  = current pcpu
348	 *   r9  = lock
349	 *   r10 = oldtd
350	 *   r11 = newtd
351	 */
352
353	/*
354	 * Set new PMAP as current one.
355	 * Update active list on PMAPs.
356	 */
357	ldr	r6, [r11, #TD_PROC]	/* newtd->proc */
358	ldr	r6, [r6, #P_VMSPACE]	/* newtd->proc->vmspace */
359	add	r6, #VM_PMAP		/* newtd->proc->vmspace->pmap */
360
361	ldr	r5, [r8, #PC_CURPMAP]	/* get old curpmap */
362	str	r6, [r8, #PC_CURPMAP]	/* and save new one */
363
364	mov	r0, #PM_ACTIVE
365	add	r5, r0			/* r5 = old pm_active */
366	add	r6, r0			/* r6 = new pm_active */
367
368	/* Compute position and mask. */
369	ldr	r4, [r8, #PC_CPUID]
370#if _NCPUWORDS > 1
371	lsr	r0, r4, #3
372	bic	r0, #3
373	add	r5, r0			/* r5 = position in old pm_active */
374	add	r6, r0			/* r6 = position in new pm_active */
375	mov	r2, #1
376	and	r0, r4, #31
377	lsl	r2, r0			/* r2 = mask */
378#else
379	mov	r2, #1
380	lsl	r2, r4			/* r2 = mask */
381#endif
382	/* Clear cpu from old active list. */
383#ifdef SMP
3841:	ldrex	r0, [r5]
385	bic	r0, r2
386	strex	r1, r0, [r5]
387	teq	r1, #0
388	bne	1b
389#else
390	ldr	r0, [r5]
391	bic	r0, r2
392	str	r0, [r5]
393#endif
394	/* Set cpu to new active list. */
395#ifdef SMP
3961:	ldrex	r0, [r6]
397	orr	r0, r2
398	strex	r1, r0, [r6]
399	teq	r1, #0
400	bne	1b
401#else
402	ldr	r0, [r6]
403	orr	r0, r2
404	str	r0, [r6]
405#endif
406
407sw0:
408	/*
409	 * Registers at this point
410	 *   r7  = new PCB
411	 *   r8  = current pcpu
412	 *   r9  = lock
413	 *   r10 = oldtd
414	 *   r11 = newtd
415	 */
416
417	/* Change the old thread lock. */
418	add	r5, r10, #TD_LOCK
419	DMB
4201:	ldrex	r0, [r5]
421	strex	r1, r9, [r5]
422	teq	r1, #0
423	bne	1b
424	DMB
425
426sw1:
427	clrex
428	/*
429	 * Registers at this point
430	 *   r7  = new PCB
431	 *   r8  = current pcpu
432	 *   r11 = newtd
433	 */
434
435#if defined(SMP) && defined(SCHED_ULE)
436	/*
437	 * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE
438	 * QQQ: What does it mean in reality and why is it done?
439	 */
440	ldr	r6, =blocked_lock
4411:
442	ldr	r3, [r11, #TD_LOCK]	/* atomic write regular read */
443	cmp	r3, r6
444	beq	1b
445#endif
446
447	/* We have a new curthread now so make a note it */
448	str	r11, [r8, #PC_CURTHREAD]
449	mcr	CP15_TPIDRPRW(r11)
450
451	/* store pcb in per cpu structure */
452	str	r7, [r8, #PC_CURPCB]
453
454	/*
455	 * Restore all saved registers and return. Note that some saved
456	 * registers can be changed when either cpu_fork(), cpu_copy_thread(),
457	 * cpu_fork_kthread_handler(), or makectx() was called.
458	 *
459	 * The value of TPIDRURW is also written into TPIDRURO, as
460	 * userspace still uses TPIDRURO, modifying it through
461	 * sysarch(ARM_SET_TP, addr).
462	 */
463	ldr	r3, [r7, #PCB_TPIDRURW]
464	mcr	CP15_TPIDRURW(r3)	/* write tls thread reg 2 */
465	mcr	CP15_TPIDRURO(r3)	/* write tls thread reg 3 */
466	add	r3, r7, #PCB_R4
467	ldmia	r3, {r4-r12, sp, pc}
468
469#ifdef INVARIANTS
470badsw1:
471	ldr	r0, =sw1_panic_str
472	bl	_C_LABEL(panic)
4731:	nop
474	b	1b
475
476badsw2:
477	ldr	r0, =sw2_panic_str
478	bl	_C_LABEL(panic)
4791:	nop
480	b	1b
481
482badsw3:
483	ldr	r0, =sw3_panic_str
484	bl	_C_LABEL(panic)
4851:	nop
486	b	1b
487
488badsw4:
489	ldr	r0, =sw4_panic_str
490	bl	_C_LABEL(panic)
4911:	nop
492	b	1b
493
494sw1_panic_str:
495	.asciz	"cpu_throw: no newthread supplied.\n"
496sw2_panic_str:
497	.asciz	"cpu_switch: no curthread supplied.\n"
498sw3_panic_str:
499	.asciz	"cpu_switch: no newthread supplied.\n"
500sw4_panic_str:
501	.asciz	"cpu_switch: new pagedir is NULL.\n"
502#endif
503END(cpu_switch)
504