xref: /netbsd/sys/arch/usermode/dev/cpu.c (revision b5b20adb)
1 /* $NetBSD: cpu.c,v 1.83 2019/12/03 15:20:59 riastradh Exp $ */
2 
3 /*-
4  * Copyright (c) 2007 Jared D. McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "opt_cpu.h"
30 #include "opt_hz.h"
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.83 2019/12/03 15:20:59 riastradh Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/proc.h>
38 #include <sys/systm.h>
39 #include <sys/device.h>
40 #include <sys/reboot.h>
41 #include <sys/lwp.h>
42 #include <sys/cpu.h>
43 #include <sys/mbuf.h>
44 #include <sys/msgbuf.h>
45 #include <sys/kmem.h>
46 #include <sys/kernel.h>
47 #include <sys/mount.h>
48 
49 #include <dev/cons.h>
50 
51 #include <machine/cpu.h>
52 #include <machine/mainbus.h>
53 #include <machine/pcb.h>
54 #include <machine/machdep.h>
55 #include <machine/thunk.h>
56 
57 #include <uvm/uvm_extern.h>
58 #include <uvm/uvm_page.h>
59 
60 #if __GNUC_PREREQ__(4,4)
61 #define cpu_unreachable()	__builtin_unreachable()
62 #else
63 #define cpu_unreachable()	do { thunk_abort(); } while (0)
64 #endif
65 
66 static int	cpu_match(device_t, cfdata_t, void *);
67 static void	cpu_attach(device_t, device_t, void *);
68 
69 /* XXX */
70 //extern void *_lwp_getprivate(void);
71 //extern int _lwp_setprivate(void *);
72 
73 
74 struct cpu_info cpu_info_primary = {
75 	.ci_dev = 0,
76 	.ci_self = &cpu_info_primary,
77 	.ci_idepth = -1,
78 	.ci_curlwp = &lwp0,
79 };
80 
81 typedef struct cpu_softc {
82 	device_t	sc_dev;
83 	struct cpu_info	*sc_ci;
84 
85 	ucontext_t	sc_ucp;
86 	uint8_t		sc_ucp_stack[PAGE_SIZE];
87 } cpu_softc_t;
88 
89 
90 /* statics */
91 static struct pcb lwp0pcb;
92 static void *um_msgbuf;
93 
94 
95 /* attachment */
96 CFATTACH_DECL_NEW(cpu, sizeof(cpu_softc_t), cpu_match, cpu_attach, NULL, NULL);
97 
98 static int
cpu_match(device_t parent,cfdata_t match,void * opaque)99 cpu_match(device_t parent, cfdata_t match, void *opaque)
100 {
101 	struct thunkbus_attach_args *taa = opaque;
102 
103 	if (taa->taa_type != THUNKBUS_TYPE_CPU)
104 		return 0;
105 
106 	return 1;
107 }
108 
109 static void
cpu_attach(device_t parent,device_t self,void * opaque)110 cpu_attach(device_t parent, device_t self, void *opaque)
111 {
112 	cpu_softc_t *sc = device_private(self);
113 
114 	aprint_naive("\n");
115 	aprint_normal("\n");
116 
117 	cpu_info_primary.ci_dev = self;
118 	sc->sc_dev = self;
119 	sc->sc_ci = &cpu_info_primary;
120 
121 	thunk_getcontext(&sc->sc_ucp);
122 	sc->sc_ucp.uc_stack.ss_sp = sc->sc_ucp_stack;
123 	sc->sc_ucp.uc_stack.ss_size = PAGE_SIZE - sizeof(register_t);
124 	sc->sc_ucp.uc_flags = _UC_STACK | _UC_CPU | _UC_SIGMASK;
125 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGALRM);
126 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGIO);
127 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGINT);
128 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGTSTP);
129 }
130 
131 void
cpu_configure(void)132 cpu_configure(void)
133 {
134 	cpu_setmodel("virtual processor");
135 	if (config_rootfound("mainbus", NULL) == NULL)
136 		panic("configure: mainbus not configured");
137 
138 	spl0();
139 }
140 
141 
142 /* main guts */
143 void
cpu_reboot(int howto,char * bootstr)144 cpu_reboot(int howto, char *bootstr)
145 {
146 	extern void usermode_reboot(void);
147 
148 	if (cold)
149 		howto |= RB_HALT;
150 
151 	if ((howto & RB_NOSYNC) == 0)
152 		vfs_shutdown();
153 	else
154 		suspendsched();
155 
156 	doshutdownhooks();
157 	pmf_system_shutdown(boothowto);
158 
159 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN)
160 		thunk_exit(0);
161 
162 	splhigh();
163 
164 	if (howto & RB_DUMP)
165 		thunk_abort();
166 
167 	if (howto & RB_HALT) {
168 		printf("\n");
169 		printf("The operating system has halted.\n");
170 		printf("Please press any key to reboot.\n\n");
171 		cnpollc(1);
172 		cngetc();
173 		cnpollc(0);
174 	}
175 
176 	printf("rebooting...\n");
177 
178 	usermode_reboot();
179 
180 	/* NOTREACHED */
181 	cpu_unreachable();
182 }
183 
184 void
cpu_need_resched(struct cpu_info * ci,struct lwp * l,int flags)185 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
186 {
187 	aston(ci);
188 }
189 
190 void
cpu_need_proftick(struct lwp * l)191 cpu_need_proftick(struct lwp *l)
192 {
193 }
194 
195 int
cpu_lwp_setprivate(lwp_t * l,void * ptr)196 cpu_lwp_setprivate(lwp_t *l, void *ptr)
197 {
198 	struct pcb *pcb = lwp_getpcb(l);
199 
200 	/* set both ucontexts up for TLS just in case */
201 	pcb->pcb_ucp.uc_mcontext._mc_tlsbase =
202 		(uintptr_t) ptr;
203 	pcb->pcb_ucp.uc_flags |= _UC_TLSBASE;
204 
205 	pcb->pcb_userret_ucp.uc_mcontext._mc_tlsbase =
206 		(uintptr_t) ptr;
207 	pcb->pcb_userret_ucp.uc_flags |= _UC_TLSBASE;
208 
209 	return 0;
210 }
211 
212 static
213 void
cpu_switchto_atomic(lwp_t * oldlwp,lwp_t * newlwp)214 cpu_switchto_atomic(lwp_t *oldlwp, lwp_t *newlwp)
215 {
216 	struct pcb *oldpcb;
217 	struct pcb *newpcb;
218 	struct cpu_info *ci;
219 	int s;
220 
221 	oldpcb = oldlwp ? lwp_getpcb(oldlwp) : NULL;
222 	newpcb = lwp_getpcb(newlwp);
223 	ci = curcpu();
224 
225 	s = splhigh();
226 
227 	ci->ci_stash = oldlwp;
228 	if (oldpcb)
229 		oldpcb->pcb_errno = thunk_geterrno();
230 
231 	thunk_seterrno(newpcb->pcb_errno);
232 	curlwp = newlwp;
233 
234 	splx(s);
235 
236 	if (thunk_setcontext(&newpcb->pcb_ucp))
237 		panic("setcontext failed");
238 
239 	/* not reached */
240 }
241 
242 
243 lwp_t *
cpu_switchto(lwp_t * oldlwp,lwp_t * newlwp,bool returning)244 cpu_switchto(lwp_t *oldlwp, lwp_t *newlwp, bool returning)
245 {
246 	struct pcb *oldpcb = oldlwp ? lwp_getpcb(oldlwp) : NULL;
247 	struct pcb *newpcb = lwp_getpcb(newlwp);
248 	struct cpu_info *ci = curcpu();
249 	cpu_softc_t *sc = device_private(ci->ci_dev);
250 
251 #ifdef CPU_DEBUG
252 	thunk_printf_debug("cpu_switchto [%s,pid=%d,lid=%d] -> [%s,pid=%d,lid=%d]\n",
253 	    oldlwp ? oldlwp->l_name : "none",
254 	    oldlwp ? oldlwp->l_proc->p_pid : -1,
255 	    oldlwp ? oldlwp->l_lid : -1,
256 	    newlwp ? newlwp->l_name : "none",
257 	    newlwp ? newlwp->l_proc->p_pid : -1,
258 	    newlwp ? newlwp->l_lid : -1);
259 	if (oldpcb) {
260 		thunk_printf_debug("    oldpcb uc_link=%p, uc_stack.ss_sp=%p, "
261 		    "uc_stack.ss_size=%d, l_private %p, uc_mcontext._mc_tlsbase=%p(%s)\n",
262 		    oldpcb->pcb_ucp.uc_link,
263 		    oldpcb->pcb_ucp.uc_stack.ss_sp,
264 		    (int)oldpcb->pcb_ucp.uc_stack.ss_size,
265 		    (void *) oldlwp->l_private,
266 		    (void *) oldpcb->pcb_ucp.uc_mcontext._mc_tlsbase,
267 		    oldpcb->pcb_ucp.uc_flags & _UC_TLSBASE? "ON":"off");
268 	}
269 	if (newpcb) {
270 		thunk_printf_debug("    newpewcb uc_link=%p, uc_stack.ss_sp=%p, "
271 		    "uc_stack.ss_size=%d, l_private %p, uc_mcontext._mc_tlsbase=%p(%s)\n",
272 		    newpcb->pcb_ucp.uc_link,
273 		    newpcb->pcb_ucp.uc_stack.ss_sp,
274 		    (int)newpcb->pcb_ucp.uc_stack.ss_size,
275 		    (void *) newlwp->l_private,
276 		    (void *) newpcb->pcb_ucp.uc_mcontext._mc_tlsbase,
277 		    newpcb->pcb_ucp.uc_flags & _UC_TLSBASE? "ON":"off");
278 	}
279 #endif /* !CPU_DEBUG */
280 
281 	/* create atomic switcher */
282 	KASSERT(newlwp);
283 	thunk_makecontext(&sc->sc_ucp, (void (*)(void)) cpu_switchto_atomic,
284 			2, oldlwp, newlwp, NULL, NULL);
285 	KASSERT(sc);
286 	if (oldpcb) {
287 		thunk_swapcontext(&oldpcb->pcb_ucp, &sc->sc_ucp);
288 		/* returns here */
289 	} else {
290 		thunk_setcontext(&sc->sc_ucp);
291 		/* never returns */
292 	}
293 
294 #ifdef CPU_DEBUG
295 	thunk_printf_debug("cpu_switchto: returning %p (was %p)\n", ci->ci_stash, oldlwp);
296 #endif
297 	return ci->ci_stash;
298 }
299 
300 void
cpu_dumpconf(void)301 cpu_dumpconf(void)
302 {
303 #ifdef CPU_DEBUG
304 	thunk_printf_debug("cpu_dumpconf\n");
305 #endif
306 }
307 
308 void
cpu_signotify(struct lwp * l)309 cpu_signotify(struct lwp *l)
310 {
311 }
312 
313 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)314 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
315 {
316 	struct pcb *pcb = lwp_getpcb(l);
317 	ucontext_t *ucp = &pcb->pcb_userret_ucp;
318 
319 #ifdef CPU_DEBUG
320 	thunk_printf_debug("cpu_getmcontext\n");
321 #endif
322 	memcpy(mcp, &ucp->uc_mcontext, sizeof(mcontext_t));
323 
324 	/* report we have the CPU FPU and TLSBASE registers */
325 	mcp->_mc_tlsbase = (uintptr_t) l->l_private;
326 	*flags = _UC_CPU | _UC_FPU | _UC_TLSBASE;
327 
328 	return;
329 }
330 
331 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mcp)332 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
333 {
334 	/*
335 	 * can we check here? or should that be done in the target
336 	 * specific places?
337 	 */
338 	/* XXX NO CHECKING! XXX */
339 #ifdef CPU_DEBUG
340 	thunk_printf_debug("cpu_mcontext_validate\n");
341 #endif
342 	return 0;
343 }
344 
345 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)346 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
347 {
348 	struct pcb *pcb = lwp_getpcb(l);
349 	ucontext_t *ucp = &pcb->pcb_userret_ucp;
350 
351 #ifdef CPU_DEBUG
352 	thunk_printf_debug("cpu_setmcontext\n");
353 #endif
354 	if ((flags & _UC_CPU) != 0)
355 		memcpy(&ucp->uc_mcontext.__gregs, &mcp->__gregs, sizeof(__gregset_t));
356 	if ((flags & _UC_FPU) != 0)
357 		memcpy(&ucp->uc_mcontext.__fpregs, &mcp->__fpregs, sizeof(__fpregset_t));
358 	if ((flags & _UC_TLSBASE) != 0)
359 		lwp_setprivate(l, (void *) (uintptr_t) mcp->_mc_tlsbase);
360 
361 #if 0
362 	/*
363 	 * XXX we ignore the set and clear stack since signals are done
364 	 * slightly differently.
365 	 */
366 thunk_printf("%s: flags %x\n", __func__, flags);
367 	mutex_enter(l->l_proc->p_lock);
368 	if (flags & _UC_SETSTACK)
369 		l->l_sigstk.ss_flags |= SS_ONSTACK;
370 	if (flags & _UC_CLRSTACK)
371 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
372 	mutex_exit(l->l_proc->p_lock);
373 #endif
374 
375 	ucp->uc_flags |= (flags & (_UC_CPU | _UC_FPU | _UC_TLSBASE));
376 
377 	return 0;
378 }
379 
380 void
cpu_idle(void)381 cpu_idle(void)
382 {
383 	struct cpu_info *ci = curcpu();
384 
385 	if (ci->ci_want_resched)
386 		return;
387 
388 	thunk_idle();
389 }
390 
391 void
cpu_lwp_free(struct lwp * l,int proc)392 cpu_lwp_free(struct lwp *l, int proc)
393 {
394 #ifdef CPU_DEBUG
395 	thunk_printf_debug("cpu_lwp_free (dummy)\n");
396 #endif
397 }
398 
399 void
cpu_lwp_free2(struct lwp * l)400 cpu_lwp_free2(struct lwp *l)
401 {
402 	struct pcb *pcb = lwp_getpcb(l);
403 
404 #ifdef CPU_DEBUG
405 	thunk_printf_debug("cpu_lwp_free2\n");
406 #endif
407 
408 	if (pcb == NULL)
409 		return;
410 	/* XXX nothing to do? */
411 }
412 
413 static void
cpu_lwp_trampoline(ucontext_t * ucp,void (* func)(void *),void * arg)414 cpu_lwp_trampoline(ucontext_t *ucp, void (*func)(void *), void *arg)
415 {
416 #ifdef CPU_DEBUG
417 	thunk_printf_debug("cpu_lwp_trampoline called with func %p, arg %p\n", (void *) func, arg);
418 #endif
419 	/* init lwp */
420 	lwp_startup(curcpu()->ci_stash, curlwp);
421 
422 	/* actual jump */
423 	thunk_makecontext(ucp, (void (*)(void)) func, 1, arg, NULL, NULL, NULL);
424 	thunk_setcontext(ucp);
425 }
426 
427 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)428 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
429     void (*func)(void *), void *arg)
430 {
431 	struct pcb *pcb1 = lwp_getpcb(l1);
432 	struct pcb *pcb2 = lwp_getpcb(l2);
433 
434 #ifdef CPU_DEBUG
435 	thunk_printf_debug("cpu_lwp_fork [%s/%p] -> [%s/%p] stack=%p stacksize=%d\n",
436 	    l1 ? l1->l_name : "none", l1,
437 	    l2 ? l2->l_name : "none", l2,
438 	    stack, (int)stacksize);
439 #endif
440 	if (stack)
441 		panic("%s: stack passed, can't handle\n", __func__);
442 
443 	/* copy the PCB and its switchframes from parent */
444 	memcpy(pcb2, pcb1, sizeof(struct pcb));
445 
446 	/* refresh context, XXX needed? */
447 	if (thunk_getcontext(&pcb2->pcb_ucp))
448 		panic("getcontext failed");
449 
450 	/* set up for TLS */
451 	pcb2->pcb_ucp.uc_mcontext._mc_tlsbase = (intptr_t) l2->l_private;
452 	pcb2->pcb_ucp.uc_flags |= _UC_TLSBASE;
453 
454 	/* recalculate the system stack top */
455 	pcb2->sys_stack_top = pcb2->sys_stack + TRAPSTACKSIZE;
456 
457 	/* get l2 its own stack */
458 	pcb2->pcb_ucp.uc_stack.ss_sp = pcb2->sys_stack;
459 	pcb2->pcb_ucp.uc_stack.ss_size = pcb2->sys_stack_top - pcb2->sys_stack;
460 	pcb2->pcb_ucp.uc_link = &pcb2->pcb_userret_ucp;
461 
462 	thunk_sigemptyset(&pcb2->pcb_ucp.uc_sigmask);
463 
464 	thunk_makecontext(&pcb2->pcb_ucp,
465 	    (void (*)(void)) cpu_lwp_trampoline,
466 	    3, &pcb2->pcb_ucp, func, arg, NULL);
467 }
468 
469 void
cpu_initclocks(void)470 cpu_initclocks(void)
471 {
472 	extern timer_t clock_timerid;
473 
474 	thunk_timer_start(clock_timerid, HZ);
475 }
476 
477 void
cpu_startup(void)478 cpu_startup(void)
479 {
480 	vaddr_t minaddr, maxaddr;
481 	size_t msgbufsize = 32 * 1024;
482 
483 	/* get ourself a message buffer */
484 	um_msgbuf = kmem_zalloc(msgbufsize, KM_SLEEP);
485 	initmsgbuf(um_msgbuf, msgbufsize);
486 
487 	/* allocate a submap for physio, 1Mb enough? */
488 	minaddr = 0;
489 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
490 				   1024 * 1024, 0, false, NULL);
491 
492 	/* say hi! */
493 	banner();
494 
495 	/* init lwp0 */
496 	memset(&lwp0pcb, 0, sizeof(lwp0pcb));
497 	thunk_getcontext(&lwp0pcb.pcb_ucp);
498 	thunk_sigemptyset(&lwp0pcb.pcb_ucp.uc_sigmask);
499 	lwp0pcb.pcb_ucp.uc_flags = _UC_STACK | _UC_CPU | _UC_SIGMASK;
500 
501 	uvm_lwp_setuarea(&lwp0, (vaddr_t) &lwp0pcb);
502 	memcpy(&lwp0pcb.pcb_userret_ucp, &lwp0pcb.pcb_ucp, sizeof(ucontext_t));
503 
504 	/* set stack top */
505 	lwp0pcb.sys_stack_top = lwp0pcb.sys_stack + TRAPSTACKSIZE;
506 }
507 
508 void
cpu_rootconf(void)509 cpu_rootconf(void)
510 {
511 	extern char *usermode_root_device;
512 	device_t rdev;
513 
514 	if (usermode_root_device != NULL) {
515 		rdev = device_find_by_xname(usermode_root_device);
516 	} else {
517 		rdev = device_find_by_xname("ld0");
518 		if (rdev == NULL)
519 			rdev = device_find_by_xname("md0");
520 	}
521 
522 	aprint_normal("boot device: %s\n",
523 	    rdev ? device_xname(rdev) : "<unknown>");
524 	booted_device = rdev;
525 	rootconf();
526 }
527 
528 bool
cpu_intr_p(void)529 cpu_intr_p(void)
530 {
531 	uint64_t ncsw;
532 	int idepth;
533 	lwp_t *l;
534 
535 	l = curlwp;
536 	do {
537 		ncsw = l->l_ncsw;
538 		__insn_barrier();
539 		idepth = l->l_cpu->ci_idepth;
540 		__insn_barrier();
541 	} while (__predict_false(ncsw != l->l_ncsw));
542 
543 	return idepth >= 0;
544 }
545