xref: /openbsd/sys/arch/m88k/m88k/m88k_machdep.c (revision ee4ffdb6)
1 /*	$OpenBSD: m88k_machdep.c,v 1.69 2018/10/22 17:31:24 krw Exp $	*/
2 /*
3  * Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr.
4  * Copyright (c) 1996 Nivas Madhur
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by Nivas Madhur.
18  * 4. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  */
33 /*
34  * Mach Operating System
35  * Copyright (c) 1993-1991 Carnegie Mellon University
36  * Copyright (c) 1991 OMRON Corporation
37  * All Rights Reserved.
38  *
39  * Permission to use, copy, modify and distribute this software and its
40  * documentation is hereby granted, provided that both the copyright
41  * notice and this permission notice appear in all copies of the
42  * software, derivative works or modified versions, and any portions
43  * thereof, and that both notices appear in supporting documentation.
44  *
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52 #include <sys/msgbuf.h>
53 #include <sys/exec.h>
54 #include <sys/errno.h>
55 #include <sys/malloc.h>
56 #ifdef MULTIPROCESSOR
57 #include <sys/mplock.h>
58 #endif
59 
60 #include <machine/asm.h>
61 #include <machine/asm_macro.h>
62 #include <machine/atomic.h>
63 #include <machine/cmmu.h>
64 #include <machine/cpu.h>
65 #include <machine/reg.h>
66 #ifdef M88100
67 #include <machine/m88100.h>
68 #endif
69 
70 #include <uvm/uvm_extern.h>
71 
72 #ifdef DDB
73 #include <machine/db_machdep.h>
74 #include <ddb/db_extern.h>
75 #include <ddb/db_interface.h>
76 #endif /* DDB */
77 
78 typedef struct {
79 	u_int32_t word_one, word_two;
80 } m88k_exception_vector_area;
81 
82 void	dumpconf(void);
83 void	dumpsys(void);
84 void	regdump(struct trapframe *f);
85 void	*vector_init(m88k_exception_vector_area *, u_int32_t *, int);
86 void	atomic_init(void);
87 
88 /*
89  * CMMU and CPU variables
90  */
91 
92 #ifdef MULTIPROCESSOR
93 cpuid_t	master_cpu;
94 __cpu_simple_lock_t cmmu_cpu_lock = __SIMPLELOCK_UNLOCKED;
95 #endif
96 
97 struct cpu_info m88k_cpus[MAX_CPUS] = {
98 #ifndef MULTIPROCESSOR
99 	{ .ci_flags = CIF_ALIVE | CIF_PRIMARY }
100 #endif
101 };
102 const struct cmmu_p *cmmu;
103 
104 /*
105  * safepri is a safe priority for sleep to set for a spin-wait
106  * during autoconfiguration or after a panic.
107  */
108 int   safepri = IPL_NONE;
109 
110 /*
111  * Set registers on exec.
112  * Clear all except sp and pc.
113  */
114 void
115 setregs(p, pack, stack, retval)
116 	struct proc *p;
117 	struct exec_package *pack;
118 	u_long stack;
119 	register_t retval[2];
120 {
121 	struct trapframe *tf = (struct trapframe *)USER_REGS(p);
122 
123 	/*
124 	 * Setup proper floating-point settings. This is necessary because
125 	 * we will return through the exception path, which only saves the
126 	 * integer registers, and not through cpu_switchto() (which saves
127 	 * fcr62 and fcr63 in the pcb).  This is safe to do here since the
128 	 * FPU is enabled and the kernel doesn't use it.
129 	 */
130 	__asm__ volatile ("fstcr %r0, %fcr0");
131 	__asm__ volatile ("fstcr %r0, %fcr62");
132 	__asm__ volatile ("fstcr %r0, %fcr63");
133 
134 	bzero((caddr_t)tf, sizeof *tf);
135 
136 #ifdef M88110
137 	if (CPU_IS88110) {
138 		/*
139 		 * user mode, interrupts enabled,
140 		 * graphics unit, fp enabled
141 		 */
142 		tf->tf_epsr = PSR_SFD;
143 	}
144 #endif
145 #ifdef M88100
146 	if (CPU_IS88100) {
147 		/*
148 		 * user mode, interrupts enabled,
149 		 * no graphics unit, fp enabled
150 		 */
151 		tf->tf_epsr = PSR_SFD | PSR_SFD2;
152 	}
153 #endif
154 
155 	/*
156 	 * We want to start executing at pack->ep_entry. The way to
157 	 * do this is force the processor to fetch from ep_entry.
158 	 *
159 	 * However, since we will return through m{88100,88110}_syscall(),
160 	 * we need to setup registers so that the success return, when
161 	 * ``incrementing'' the instruction pointers, will cause the
162 	 * binary to start at the expected address.
163 	 *
164 	 * This relies on the fact that binaries start with
165 	 *
166 	 *	br.n	1f
167 	 *	 or	r2, r0, r30
168 	 * 1:
169 	 *
170 	 * So the first two instructions can be skipped.
171 	 */
172 #ifdef M88110
173 	if (CPU_IS88110) {
174 		/*
175 		 * m88110_syscall() will resume at exip + 8... which
176 		 * really is the first instruction we want to run.
177 		 */
178 		tf->tf_exip = pack->ep_entry & XIP_ADDR;
179 	}
180 #endif
181 #ifdef M88100
182 	if (CPU_IS88100) {
183 		/*
184 		 * m88100_syscall() will resume at sfip / sfip + 4...
185 		 */
186 		tf->tf_sfip = ((pack->ep_entry + 8) & FIP_ADDR) | FIP_V;
187 
188 		/*
189 		 * ... unless we are starting init, in which case we
190 		 * won't be returning through the regular path, and
191 		 * need to explicitly set up nip and fip (note that
192 		 * 88110 do not need such a test).
193 		 * Note that this isn't 100% correct, as it mishandles
194 		 * a real execve() from userspace by process 1.  However
195 		 * our init will never do that, so it's okay.
196 		 */
197 		if (p->p_p->ps_pid == 1) {
198 			tf->tf_snip = tf->tf_sfip;
199 			tf->tf_sfip += 4;
200 		}
201 	}
202 #endif
203 	tf->tf_r[2] = retval[0] = stack;
204 	tf->tf_r[31] = stack;
205 	retval[1] = 0;
206 }
207 
208 int
209 copystr(fromaddr, toaddr, maxlength, lencopied)
210 	const void *fromaddr;
211 	void *toaddr;
212 	size_t maxlength;
213 	size_t *lencopied;
214 {
215 	u_int tally;
216 
217 	tally = 0;
218 
219 	while (maxlength--) {
220 		*(u_char *)toaddr = *(u_char *)fromaddr++;
221 		tally++;
222 		if (*(u_char *)toaddr++ == 0) {
223 			if (lencopied) *lencopied = tally;
224 			return (0);
225 		}
226 	}
227 
228 	if (lencopied)
229 		*lencopied = tally;
230 
231 	return (ENAMETOOLONG);
232 }
233 
234 #ifdef DDB
235 int longformat = 1;
236 void
237 regdump(struct trapframe *f)
238 {
239 #define R(i) f->tf_r[i]
240 	printf("R00-05: 0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx\n",
241 	       R(0),R(1),R(2),R(3),R(4),R(5));
242 	printf("R06-11: 0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx\n",
243 	       R(6),R(7),R(8),R(9),R(10),R(11));
244 	printf("R12-17: 0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx\n",
245 	       R(12),R(13),R(14),R(15),R(16),R(17));
246 	printf("R18-23: 0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx\n",
247 	       R(18),R(19),R(20),R(21),R(22),R(23));
248 	printf("R24-29: 0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx  0x%08lx\n",
249 	       R(24),R(25),R(26),R(27),R(28),R(29));
250 	printf("R30-31: 0x%08lx  0x%08lx\n",R(30),R(31));
251 #ifdef M88110
252 	if (CPU_IS88110) {
253 		printf("exip %lx enip %lx\n", f->tf_exip, f->tf_enip);
254 	}
255 #endif
256 #ifdef M88100
257 	if (CPU_IS88100) {
258 		printf("sxip %lx snip %lx sfip %lx\n",
259 		    f->tf_sxip, f->tf_snip, f->tf_sfip);
260 	}
261 	if (CPU_IS88100 && ISSET(f->tf_dmt0, DMT_VALID)) {
262 		/* print dmt stuff for data access fault */
263 		printf("fault type %ld\n", (f->tf_dpfsr >> 16) & 0x7);
264 		dae_print((u_int *)f);
265 	}
266 	if (CPU_IS88100 && longformat != 0) {
267 		printf("fpsr %lx fpcr %lx epsr %lx ssbr %lx\n",
268 		    f->tf_fpsr, f->tf_fpcr, f->tf_epsr, f->tf_ssbr);
269 		printf("fpecr %lx fphs1 %lx fpls1 %lx fphs2 %lx fpls2 %lx\n",
270 		    f->tf_fpecr, f->tf_fphs1, f->tf_fpls1,
271 		    f->tf_fphs2, f->tf_fpls2);
272 		printf("fppt %lx fprh %lx fprl %lx fpit %lx\n",
273 		    f->tf_fppt, f->tf_fprh, f->tf_fprl, f->tf_fpit);
274 		printf("vector %ld mask %lx flags %lx scratch1 %lx cpu %p\n",
275 		    f->tf_vector, f->tf_mask, f->tf_flags,
276 		    f->tf_scratch1, f->tf_cpu);
277 	}
278 #endif
279 #ifdef M88110
280 	if (CPU_IS88110 && longformat != 0) {
281 		printf("fpsr %lx fpcr %lx fpecr %lx epsr %lx\n",
282 		    f->tf_fpsr, f->tf_fpcr, f->tf_fpecr, f->tf_epsr);
283 		printf("dsap %lx duap %lx dsr %lx dlar %lx dpar %lx\n",
284 		    f->tf_dsap, f->tf_duap, f->tf_dsr, f->tf_dlar, f->tf_dpar);
285 		printf("isap %lx iuap %lx isr %lx ilar %lx ipar %lx\n",
286 		    f->tf_isap, f->tf_iuap, f->tf_isr, f->tf_ilar, f->tf_ipar);
287 		printf("vector %ld mask %lx flags %lx scratch1 %lx cpu %p\n",
288 		    f->tf_vector, f->tf_mask, f->tf_flags,
289 		    f->tf_scratch1, f->tf_cpu);
290 	}
291 #endif
292 }
293 #endif	/* DDB */
294 
295 /*
296  * Set up the cpu_info pointer and the cpu number for the current processor.
297  */
298 struct cpu_info *
299 set_cpu_number(cpuid_t number)
300 {
301 	struct cpu_info *ci;
302 
303 #ifdef MULTIPROCESSOR
304 	ci = &m88k_cpus[number];
305 #else
306 	ci = &m88k_cpus[0];
307 #endif
308 	ci->ci_cpuid = number;
309 
310 	__asm__ volatile ("stcr %0, %%cr17" :: "r" (ci));
311 	flush_pipeline();
312 	return ci;
313 }
314 
315 /*
316  * Notify the current process (p) that it has a signal pending,
317  * process as soon as possible.
318  */
319 void
320 signotify(struct proc *p)
321 {
322 	aston(p);
323 	cpu_unidle(p->p_cpu);
324 }
325 
326 #ifdef MULTIPROCESSOR
327 void
328 cpu_unidle(struct cpu_info *ci)
329 {
330 	if (ci != curcpu())
331 		m88k_send_ipi(CI_IPI_NOTIFY, ci->ci_cpuid);
332 }
333 #endif
334 
335 /*
336  * Preempt the current process if in interrupt from user mode,
337  * or after the current trap/syscall if in system mode.
338  */
339 void
340 need_resched(struct cpu_info *ci)
341 {
342 	ci->ci_want_resched = 1;
343 
344 	/* There's a risk we'll be called before the idle threads start */
345 	if (ci->ci_curproc != NULL) {
346 		aston(ci->ci_curproc);
347 		if (ci != curcpu())
348 			cpu_unidle(ci);
349 	}
350 }
351 
352 /*
353  * Generic soft interrupt interface
354  */
355 
356 void	dosoftint(int);
357 int	softpending;
358 
359 void
360 dosoftint(int sir)
361 {
362 	int q, mask;
363 
364 #ifdef MULTIPROCESSOR
365 	__mp_lock(&kernel_lock);
366 #endif
367 
368 	for (q = SI_NQUEUES - 1, mask = 1 << (SI_NQUEUES - 1); mask != 0;
369 	    q--, mask >>= 1)
370 		if (mask & sir)
371 			softintr_dispatch(q);
372 
373 #ifdef MULTIPROCESSOR
374 	__mp_unlock(&kernel_lock);
375 #endif
376 }
377 
378 int
379 spl0()
380 {
381 	int sir;
382 	int s;
383 
384 	/*
385 	 * Try to avoid potentially expensive setipl calls if nothing
386 	 * seems to be pending.
387 	 */
388 	if ((sir = atomic_clear_int(&softpending)) != 0) {
389 		s = setipl(IPL_SOFTINT);
390 		dosoftint(sir);
391 		setipl(IPL_NONE);
392 	} else
393 		s = setipl(IPL_NONE);
394 
395 	return (s);
396 }
397 
398 #define EMPTY_BR	0xc0000000	/* empty "br" instruction */
399 #define NO_OP 		0xf4005800	/* "or r0, r0, r0" */
400 
401 #define BRANCH(FROM, TO) \
402 	(EMPTY_BR | ((((vaddr_t)(TO) - (vaddr_t)(FROM)) >> 2) & 0x03ffffff))
403 
404 #define SET_VECTOR_88100(NUM, VALUE) \
405 	do { \
406 		vbr[NUM].word_one = NO_OP; \
407 		vbr[NUM].word_two = BRANCH(&vbr[NUM].word_two, VALUE); \
408 	} while (0)
409 
410 #define SET_VECTOR_88110(NUM, VALUE) \
411 	do { \
412 		vbr[NUM].word_one = BRANCH(&vbr[NUM].word_one, VALUE); \
413 		vbr[NUM].word_two = NO_OP; \
414 	} while (0)
415 
416 /*
417  * vector_init(vector, vector_init_list, bootstrap)
418  *
419  * This routine sets up the m88k vector table for the running processor,
420  * as well as the atomic operation routines for multiprocessor kernels.
421  * This is the first C code to run, before anything is initialized.
422  *
423  * I would add an extra four bytes to the exception vectors page pointed
424  * to by the vbr, since the 88100 may execute the first instruction of the
425  * next trap handler, as documented in its Errata. Processing trap #511
426  * would then fall into the next page, unless the address computation wraps,
427  * or software traps can not trigger the issue - the Errata does not provide
428  * more detail. And since the MVME BUG does not add an extra NOP after its
429  * VBR page, I'll assume this is safe for now -- miod
430  */
431 void *
432 vector_init(m88k_exception_vector_area *vbr, u_int32_t *vector_init_list,
433     int bootstrap)
434 {
435 	u_int num;
436 	u_int32_t vec;
437 
438 	switch (cputyp) {
439 	default:
440 #ifdef M88110
441 	case CPU_88110:
442 	    {
443 		extern void m88110_sigsys(void);
444 		extern void m88110_syscall_handler(void);
445 		extern void m88110_cache_flush_handler(void);
446 		extern void m88110_stepbpt(void);
447 		extern void m88110_userbpt(void);
448 
449 		for (num = 0; (vec = vector_init_list[num]) != 0; num++)
450 			SET_VECTOR_88110(num, vec);
451 
452 		if (bootstrap)
453 			SET_VECTOR_88110(0x03, vector_init_list[num + 1]);
454 
455 		for (; num < 512; num++)
456 			SET_VECTOR_88110(num, m88110_sigsys);
457 
458 		SET_VECTOR_88110(450, m88110_syscall_handler);
459 		SET_VECTOR_88110(451, m88110_cache_flush_handler);
460 		/*
461 		 * GCC will by default produce explicit trap 503
462 		 * for division by zero
463 		 */
464 		SET_VECTOR_88110(503, vector_init_list[8]);
465 		SET_VECTOR_88110(504, m88110_stepbpt);
466 		SET_VECTOR_88110(511, m88110_userbpt);
467 	    }
468 		break;
469 #endif
470 #ifdef M88100
471 	case CPU_88100:
472 	    {
473 		extern void sigsys(void);
474 		extern void syscall_handler(void);
475 		extern void cache_flush_handler(void);
476 		extern void stepbpt(void);
477 		extern void userbpt(void);
478 
479 		for (num = 0; (vec = vector_init_list[num]) != 0; num++)
480 			SET_VECTOR_88100(num, vec);
481 
482 		if (bootstrap)
483 			SET_VECTOR_88100(0x03, vector_init_list[num + 1]);
484 
485 		for (; num < 512; num++)
486 			SET_VECTOR_88100(num, sigsys);
487 
488 		SET_VECTOR_88100(450, syscall_handler);
489 		SET_VECTOR_88100(451, cache_flush_handler);
490 		/*
491 		 * GCC will by default produce explicit trap 503
492 		 * for division by zero
493 		 */
494 		SET_VECTOR_88100(503, vector_init_list[8]);
495 		SET_VECTOR_88100(504, stepbpt);
496 		SET_VECTOR_88100(511, userbpt);
497 	    }
498 		break;
499 #endif
500 	}
501 
502 	return vbr;
503 }
504 
505 #ifdef MULTIPROCESSOR
506 /*
507  * void atomic_init(void);
508  *
509  * This routine sets up proper atomic operation code for SMP kernels
510  * with both 88100 and 88110 support compiled-in. This is crucial enough
511  * to have to be done as early as possible.
512  * This is among the first C code to run, before anything is initialized.
513  */
514 void
515 atomic_init()
516 {
517 #if defined(M88100) && defined(M88110)
518 	if (cputyp == CPU_88100) {
519 		extern uint32_t __atomic_lock[];
520 		extern uint32_t __atomic_lock_88100[], __atomic_lock_88100_end[];
521 		extern uint32_t __atomic_unlock[];
522 		extern uint32_t __atomic_unlock_88100[], __atomic_unlock_88100_end[];
523 
524 		uint32_t *s, *e, *d;
525 
526 		d = __atomic_lock;
527 		s = __atomic_lock_88100;
528 		e = __atomic_lock_88100_end;
529 		while (s != e)
530 				*d++ = *s++;
531 
532 		d = __atomic_unlock;
533 		s = __atomic_unlock_88100;
534 		e = __atomic_unlock_88100_end;
535 		while (s != e)
536 				*d++ = *s++;
537 	}
538 #endif	/* M88100 && M88110 */
539 }
540 #endif	/* MULTIPROCESSOR */
541 
542 #ifdef MULTIPROCESSOR
543 
544 /*
545  * This function is invoked when it turns out one secondary processor is
546  * not usable.
547  * Be sure to put the process currently running on it in the run queues,
548  * so that another processor can take care of it.
549  */
550 __dead void
551 cpu_emergency_disable()
552 {
553 	struct cpu_info *ci = curcpu();
554 	struct schedstate_percpu *spc = &ci->ci_schedstate;
555 	struct proc *p = curproc;
556 	int s;
557 	extern void savectx(struct pcb *);
558 
559 	if (p != NULL && p != spc->spc_idleproc) {
560 		savectx(curpcb);
561 
562 		/*
563 		 * The following is an inline yield(), without the call
564 		 * to mi_switch().
565 		 */
566 		SCHED_LOCK(s);
567 		p->p_priority = p->p_usrpri;
568 		p->p_stat = SRUN;
569 		setrunqueue(p);
570 		p->p_ru.ru_nvcsw++;
571 		SCHED_UNLOCK(s);
572 	}
573 
574 	CLR(ci->ci_flags, CIF_ALIVE);
575 	set_psr(get_psr() | PSR_IND);
576 	splhigh();
577 
578 	for (;;)
579 		continue;
580 	/* NOTREACHED */
581 }
582 
583 #endif	/* MULTIPROCESSOR */
584