1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7#include <asm/addrspace.h>
8#include <asm/asm.h>
9#include <asm/asm-offsets.h>
10#include <asm/asmmacro.h>
11#include <asm/cacheops.h>
12#include <asm/eva.h>
13#include <asm/mipsregs.h>
14#include <asm/mipsmtregs.h>
15#include <asm/pm.h>
16
17#define GCR_CPC_BASE_OFS	0x0088
18#define GCR_CL_COHERENCE_OFS	0x2008
19#define GCR_CL_ID_OFS		0x2028
20
21#define CPC_CL_VC_STOP_OFS	0x2020
22#define CPC_CL_VC_RUN_OFS	0x2028
23
24.extern mips_cm_base
25
26.set noreorder
27
28#ifdef CONFIG_64BIT
29# define STATUS_BITDEPS		ST0_KX
30#else
31# define STATUS_BITDEPS		0
32#endif
33
34#ifdef CONFIG_MIPS_CPS_NS16550
35
36#define DUMP_EXCEP(name)		\
37	PTR_LA	a0, 8f;			\
38	jal	mips_cps_bev_dump;	\
39	 nop;				\
40	TEXT(name)
41
42#else /* !CONFIG_MIPS_CPS_NS16550 */
43
44#define DUMP_EXCEP(name)
45
46#endif /* !CONFIG_MIPS_CPS_NS16550 */
47
48	/*
49	 * Set dest to non-zero if the core supports the MT ASE, else zero. If
50	 * MT is not supported then branch to nomt.
51	 */
52	.macro	has_mt	dest, nomt
53	mfc0	\dest, CP0_CONFIG, 1
54	bgez	\dest, \nomt
55	 mfc0	\dest, CP0_CONFIG, 2
56	bgez	\dest, \nomt
57	 mfc0	\dest, CP0_CONFIG, 3
58	andi	\dest, \dest, MIPS_CONF3_MT
59	beqz	\dest, \nomt
60	 nop
61	.endm
62
63	/*
64	 * Set dest to non-zero if the core supports MIPSr6 multithreading
65	 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
66	 * branch to nomt.
67	 */
68	.macro	has_vp	dest, nomt
69	mfc0	\dest, CP0_CONFIG, 1
70	bgez	\dest, \nomt
71	 mfc0	\dest, CP0_CONFIG, 2
72	bgez	\dest, \nomt
73	 mfc0	\dest, CP0_CONFIG, 3
74	bgez	\dest, \nomt
75	 mfc0	\dest, CP0_CONFIG, 4
76	bgez	\dest, \nomt
77	 mfc0	\dest, CP0_CONFIG, 5
78	andi	\dest, \dest, MIPS_CONF5_VP
79	beqz	\dest, \nomt
80	 nop
81	.endm
82
83	/* Calculate an uncached address for the CM GCRs */
84	.macro	cmgcrb	dest
85	.set	push
86	.set	noat
87	MFC0	$1, CP0_CMGCRBASE
88	PTR_SLL	$1, $1, 4
89	PTR_LI	\dest, UNCAC_BASE
90	PTR_ADDU \dest, \dest, $1
91	.set	pop
92	.endm
93
94.balign 0x1000
95
96LEAF(mips_cps_core_entry)
97	/*
98	 * These first 4 bytes will be patched by cps_smp_setup to load the
99	 * CCA to use into register s0.
100	 */
101	.word	0
102
103	/* Check whether we're here due to an NMI */
104	mfc0	k0, CP0_STATUS
105	and	k0, k0, ST0_NMI
106	beqz	k0, not_nmi
107	 nop
108
109	/* This is an NMI */
110	PTR_LA	k0, nmi_handler
111	jr	k0
112	 nop
113
114not_nmi:
115	/* Setup Cause */
116	li	t0, CAUSEF_IV
117	mtc0	t0, CP0_CAUSE
118
119	/* Setup Status */
120	li	t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
121	mtc0	t0, CP0_STATUS
122
123	/* Skip cache & coherence setup if we're already coherent */
124	cmgcrb	v1
125	lw	s7, GCR_CL_COHERENCE_OFS(v1)
126	bnez	s7, 1f
127	 nop
128
129	/* Initialize the L1 caches */
130	jal	mips_cps_cache_init
131	 nop
132
133	/* Enter the coherent domain */
134	li	t0, 0xff
135	sw	t0, GCR_CL_COHERENCE_OFS(v1)
136	ehb
137
138	/* Set Kseg0 CCA to that in s0 */
1391:	mfc0	t0, CP0_CONFIG
140	ori	t0, 0x7
141	xori	t0, 0x7
142	or	t0, t0, s0
143	mtc0	t0, CP0_CONFIG
144	ehb
145
146	/* Jump to kseg0 */
147	PTR_LA	t0, 1f
148	jr	t0
149	 nop
150
151	/*
152	 * We're up, cached & coherent. Perform any EVA initialization necessary
153	 * before we access memory.
154	 */
1551:	eva_init
156
157	/* Retrieve boot configuration pointers */
158	jal	mips_cps_get_bootcfg
159	 nop
160
161	/* Skip core-level init if we started up coherent */
162	bnez	s7, 1f
163	 nop
164
165	/* Perform any further required core-level initialisation */
166	jal	mips_cps_core_init
167	 nop
168
169	/*
170	 * Boot any other VPEs within this core that should be online, and
171	 * deactivate this VPE if it should be offline.
172	 */
173	move	a1, t9
174	jal	mips_cps_boot_vpes
175	 move	a0, v0
176
177	/* Off we go! */
1781:	PTR_L	t1, VPEBOOTCFG_PC(v1)
179	PTR_L	gp, VPEBOOTCFG_GP(v1)
180	PTR_L	sp, VPEBOOTCFG_SP(v1)
181	jr	t1
182	 nop
183	END(mips_cps_core_entry)
184
185.org 0x200
186LEAF(excep_tlbfill)
187	DUMP_EXCEP("TLB Fill")
188	b	.
189	 nop
190	END(excep_tlbfill)
191
192.org 0x280
193LEAF(excep_xtlbfill)
194	DUMP_EXCEP("XTLB Fill")
195	b	.
196	 nop
197	END(excep_xtlbfill)
198
199.org 0x300
200LEAF(excep_cache)
201	DUMP_EXCEP("Cache")
202	b	.
203	 nop
204	END(excep_cache)
205
206.org 0x380
207LEAF(excep_genex)
208	DUMP_EXCEP("General")
209	b	.
210	 nop
211	END(excep_genex)
212
213.org 0x400
214LEAF(excep_intex)
215	DUMP_EXCEP("Interrupt")
216	b	.
217	 nop
218	END(excep_intex)
219
220.org 0x480
221LEAF(excep_ejtag)
222	PTR_LA	k0, ejtag_debug_handler
223	jr	k0
224	 nop
225	END(excep_ejtag)
226
227LEAF(mips_cps_core_init)
228#ifdef CONFIG_MIPS_MT_SMP
229	/* Check that the core implements the MT ASE */
230	has_mt	t0, 3f
231
232	.set	push
233	.set	MIPS_ISA_LEVEL_RAW
234	.set	mt
235
236	/* Only allow 1 TC per VPE to execute... */
237	dmt
238
239	/* ...and for the moment only 1 VPE */
240	dvpe
241	PTR_LA	t1, 1f
242	jr.hb	t1
243	 nop
244
245	/* Enter VPE configuration state */
2461:	mfc0	t0, CP0_MVPCONTROL
247	ori	t0, t0, MVPCONTROL_VPC
248	mtc0	t0, CP0_MVPCONTROL
249
250	/* Retrieve the number of VPEs within the core */
251	mfc0	t0, CP0_MVPCONF0
252	srl	t0, t0, MVPCONF0_PVPE_SHIFT
253	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
254	addiu	ta3, t0, 1
255
256	/* If there's only 1, we're done */
257	beqz	t0, 2f
258	 nop
259
260	/* Loop through each VPE within this core */
261	li	ta1, 1
262
2631:	/* Operate on the appropriate TC */
264	mtc0	ta1, CP0_VPECONTROL
265	ehb
266
267	/* Bind TC to VPE (1:1 TC:VPE mapping) */
268	mttc0	ta1, CP0_TCBIND
269
270	/* Set exclusive TC, non-active, master */
271	li	t0, VPECONF0_MVP
272	sll	t1, ta1, VPECONF0_XTC_SHIFT
273	or	t0, t0, t1
274	mttc0	t0, CP0_VPECONF0
275
276	/* Set TC non-active, non-allocatable */
277	mttc0	zero, CP0_TCSTATUS
278
279	/* Set TC halted */
280	li	t0, TCHALT_H
281	mttc0	t0, CP0_TCHALT
282
283	/* Next VPE */
284	addiu	ta1, ta1, 1
285	slt	t0, ta1, ta3
286	bnez	t0, 1b
287	 nop
288
289	/* Leave VPE configuration state */
2902:	mfc0	t0, CP0_MVPCONTROL
291	xori	t0, t0, MVPCONTROL_VPC
292	mtc0	t0, CP0_MVPCONTROL
293
2943:	.set	pop
295#endif
296	jr	ra
297	 nop
298	END(mips_cps_core_init)
299
300/**
301 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
302 *
303 * Returns: pointer to struct core_boot_config in v0, pointer to
304 *          struct vpe_boot_config in v1, VPE ID in t9
305 */
306LEAF(mips_cps_get_bootcfg)
307	/* Calculate a pointer to this cores struct core_boot_config */
308	cmgcrb	t0
309	lw	t0, GCR_CL_ID_OFS(t0)
310	li	t1, COREBOOTCFG_SIZE
311	mul	t0, t0, t1
312	PTR_LA	t1, mips_cps_core_bootcfg
313	PTR_L	t1, 0(t1)
314	PTR_ADDU v0, t0, t1
315
316	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
317	li	t9, 0
318#if defined(CONFIG_CPU_MIPSR6)
319	has_vp	ta2, 1f
320
321	/*
322	 * Assume non-contiguous numbering. Perhaps some day we'll need
323	 * to handle contiguous VP numbering, but no such systems yet
324	 * exist.
325	 */
326	mfc0	t9, CP0_GLOBALNUMBER
327	andi	t9, t9, MIPS_GLOBALNUMBER_VP
328#elif defined(CONFIG_MIPS_MT_SMP)
329	has_mt	ta2, 1f
330
331	/* Find the number of VPEs present in the core */
332	mfc0	t1, CP0_MVPCONF0
333	srl	t1, t1, MVPCONF0_PVPE_SHIFT
334	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
335	addiu	t1, t1, 1
336
337	/* Calculate a mask for the VPE ID from EBase.CPUNum */
338	clz	t1, t1
339	li	t2, 31
340	subu	t1, t2, t1
341	li	t2, 1
342	sll	t1, t2, t1
343	addiu	t1, t1, -1
344
345	/* Retrieve the VPE ID from EBase.CPUNum */
346	mfc0	t9, $15, 1
347	and	t9, t9, t1
348#endif
349
3501:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
351	li	t1, VPEBOOTCFG_SIZE
352	mul	v1, t9, t1
353	PTR_L	ta3, COREBOOTCFG_VPECONFIG(v0)
354	PTR_ADDU v1, v1, ta3
355
356	jr	ra
357	 nop
358	END(mips_cps_get_bootcfg)
359
360LEAF(mips_cps_boot_vpes)
361	lw	ta2, COREBOOTCFG_VPEMASK(a0)
362	PTR_L	ta3, COREBOOTCFG_VPECONFIG(a0)
363
364#if defined(CONFIG_CPU_MIPSR6)
365
366	has_vp	t0, 5f
367
368	/* Find base address of CPC */
369	cmgcrb	t3
370	PTR_L	t1, GCR_CPC_BASE_OFS(t3)
371	PTR_LI	t2, ~0x7fff
372	and	t1, t1, t2
373	PTR_LI	t2, UNCAC_BASE
374	PTR_ADD	t1, t1, t2
375
376	/* Start any other VPs that ought to be running */
377	PTR_S	ta2, CPC_CL_VC_RUN_OFS(t1)
378
379	/* Ensure this VP stops running if it shouldn't be */
380	not	ta2
381	PTR_S	ta2, CPC_CL_VC_STOP_OFS(t1)
382	ehb
383
384#elif defined(CONFIG_MIPS_MT)
385
386	/* If the core doesn't support MT then return */
387	has_mt	t0, 5f
388
389	/* Enter VPE configuration state */
390	.set	push
391	.set	MIPS_ISA_LEVEL_RAW
392	.set	mt
393	dvpe
394	.set	pop
395
396	PTR_LA	t1, 1f
397	jr.hb	t1
398	 nop
3991:	mfc0	t1, CP0_MVPCONTROL
400	ori	t1, t1, MVPCONTROL_VPC
401	mtc0	t1, CP0_MVPCONTROL
402	ehb
403
404	/* Loop through each VPE */
405	move	t8, ta2
406	li	ta1, 0
407
408	/* Check whether the VPE should be running. If not, skip it */
4091:	andi	t0, ta2, 1
410	beqz	t0, 2f
411	 nop
412
413	/* Operate on the appropriate TC */
414	mfc0	t0, CP0_VPECONTROL
415	ori	t0, t0, VPECONTROL_TARGTC
416	xori	t0, t0, VPECONTROL_TARGTC
417	or	t0, t0, ta1
418	mtc0	t0, CP0_VPECONTROL
419	ehb
420
421	.set	push
422	.set	MIPS_ISA_LEVEL_RAW
423	.set	mt
424
425	/* Skip the VPE if its TC is not halted */
426	mftc0	t0, CP0_TCHALT
427	beqz	t0, 2f
428	 nop
429
430	/* Calculate a pointer to the VPEs struct vpe_boot_config */
431	li	t0, VPEBOOTCFG_SIZE
432	mul	t0, t0, ta1
433	addu	t0, t0, ta3
434
435	/* Set the TC restart PC */
436	lw	t1, VPEBOOTCFG_PC(t0)
437	mttc0	t1, CP0_TCRESTART
438
439	/* Set the TC stack pointer */
440	lw	t1, VPEBOOTCFG_SP(t0)
441	mttgpr	t1, sp
442
443	/* Set the TC global pointer */
444	lw	t1, VPEBOOTCFG_GP(t0)
445	mttgpr	t1, gp
446
447	/* Copy config from this VPE */
448	mfc0	t0, CP0_CONFIG
449	mttc0	t0, CP0_CONFIG
450
451	/*
452	 * Copy the EVA config from this VPE if the CPU supports it.
453	 * CONFIG3 must exist to be running MT startup - just read it.
454	 */
455	mfc0	t0, CP0_CONFIG, 3
456	and	t0, t0, MIPS_CONF3_SC
457	beqz	t0, 3f
458	 nop
459	mfc0    t0, CP0_SEGCTL0
460	mttc0	t0, CP0_SEGCTL0
461	mfc0    t0, CP0_SEGCTL1
462	mttc0	t0, CP0_SEGCTL1
463	mfc0    t0, CP0_SEGCTL2
464	mttc0	t0, CP0_SEGCTL2
4653:
466	/* Ensure no software interrupts are pending */
467	mttc0	zero, CP0_CAUSE
468	mttc0	zero, CP0_STATUS
469
470	/* Set TC active, not interrupt exempt */
471	mftc0	t0, CP0_TCSTATUS
472	li	t1, ~TCSTATUS_IXMT
473	and	t0, t0, t1
474	ori	t0, t0, TCSTATUS_A
475	mttc0	t0, CP0_TCSTATUS
476
477	/* Clear the TC halt bit */
478	mttc0	zero, CP0_TCHALT
479
480	/* Set VPE active */
481	mftc0	t0, CP0_VPECONF0
482	ori	t0, t0, VPECONF0_VPA
483	mttc0	t0, CP0_VPECONF0
484
485	/* Next VPE */
4862:	srl	ta2, ta2, 1
487	addiu	ta1, ta1, 1
488	bnez	ta2, 1b
489	 nop
490
491	/* Leave VPE configuration state */
492	mfc0	t1, CP0_MVPCONTROL
493	xori	t1, t1, MVPCONTROL_VPC
494	mtc0	t1, CP0_MVPCONTROL
495	ehb
496	evpe
497
498	.set	pop
499
500	/* Check whether this VPE is meant to be running */
501	li	t0, 1
502	sll	t0, t0, a1
503	and	t0, t0, t8
504	bnez	t0, 2f
505	 nop
506
507	/* This VPE should be offline, halt the TC */
508	li	t0, TCHALT_H
509	mtc0	t0, CP0_TCHALT
510	PTR_LA	t0, 1f
5111:	jr.hb	t0
512	 nop
513
5142:
515
516#endif /* CONFIG_MIPS_MT_SMP */
517
518	/* Return */
5195:	jr	ra
520	 nop
521	END(mips_cps_boot_vpes)
522
523LEAF(mips_cps_cache_init)
524	/*
525	 * Clear the bits used to index the caches. Note that the architecture
526	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
527	 * be valid for all MIPS32 CPUs, even those for which said writes are
528	 * unnecessary.
529	 */
530	mtc0	zero, CP0_TAGLO, 0
531	mtc0	zero, CP0_TAGHI, 0
532	mtc0	zero, CP0_TAGLO, 2
533	mtc0	zero, CP0_TAGHI, 2
534	ehb
535
536	/* Primary cache configuration is indicated by Config1 */
537	mfc0	v0, CP0_CONFIG, 1
538
539	/* Detect I-cache line size */
540	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
541	beqz	t0, icache_done
542	 li	t1, 2
543	sllv	t0, t1, t0
544
545	/* Detect I-cache size */
546	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
547	xori	t2, t1, 0x7
548	beqz	t2, 1f
549	 li	t3, 32
550	addiu	t1, t1, 1
551	sllv	t1, t3, t1
5521:	/* At this point t1 == I-cache sets per way */
553	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
554	addiu	t2, t2, 1
555	mul	t1, t1, t0
556	mul	t1, t1, t2
557
558	li	a0, CKSEG0
559	PTR_ADD	a1, a0, t1
5601:	cache	Index_Store_Tag_I, 0(a0)
561	PTR_ADD	a0, a0, t0
562	bne	a0, a1, 1b
563	 nop
564icache_done:
565
566	/* Detect D-cache line size */
567	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
568	beqz	t0, dcache_done
569	 li	t1, 2
570	sllv	t0, t1, t0
571
572	/* Detect D-cache size */
573	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
574	xori	t2, t1, 0x7
575	beqz	t2, 1f
576	 li	t3, 32
577	addiu	t1, t1, 1
578	sllv	t1, t3, t1
5791:	/* At this point t1 == D-cache sets per way */
580	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
581	addiu	t2, t2, 1
582	mul	t1, t1, t0
583	mul	t1, t1, t2
584
585	li	a0, CKSEG0
586	PTR_ADDU a1, a0, t1
587	PTR_SUBU a1, a1, t0
5881:	cache	Index_Store_Tag_D, 0(a0)
589	bne	a0, a1, 1b
590	 PTR_ADD a0, a0, t0
591dcache_done:
592
593	jr	ra
594	 nop
595	END(mips_cps_cache_init)
596
597#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
598
599	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
600	.macro	psstate	dest
601	.set	push
602	.set	noat
603	lw	$1, TI_CPU(gp)
604	sll	$1, $1, LONGLOG
605	PTR_LA	\dest, __per_cpu_offset
606	addu	$1, $1, \dest
607	lw	$1, 0($1)
608	PTR_LA	\dest, cps_cpu_state
609	addu	\dest, \dest, $1
610	.set	pop
611	.endm
612
613LEAF(mips_cps_pm_save)
614	/* Save CPU state */
615	SUSPEND_SAVE_REGS
616	psstate	t1
617	SUSPEND_SAVE_STATIC
618	jr	v0
619	 nop
620	END(mips_cps_pm_save)
621
622LEAF(mips_cps_pm_restore)
623	/* Restore CPU state */
624	psstate	t1
625	RESUME_RESTORE_STATIC
626	RESUME_RESTORE_REGS_RETURN
627	END(mips_cps_pm_restore)
628
629#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
630