1 /*	$NetBSD: cpu_subr.c,v 1.27 2016/07/11 16:15:36 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.27 2016/07/11 16:15:36 matt Exp $");
34 
35 #include "opt_ddb.h"
36 #include "opt_cputype.h"
37 #include "opt_modular.h"
38 #include "opt_multiprocessor.h"
39 
40 #include <sys/param.h>
41 #include <sys/cpu.h>
42 #include <sys/intr.h>
43 #include <sys/atomic.h>
44 #include <sys/device.h>
45 #include <sys/lwp.h>
46 #include <sys/proc.h>
47 #include <sys/ras.h>
48 #include <sys/module.h>
49 #include <sys/bitops.h>
50 #include <sys/idle.h>
51 #include <sys/xcall.h>
52 #include <sys/kernel.h>
53 #include <sys/ipi.h>
54 
55 #include <uvm/uvm.h>
56 
57 #include <mips/locore.h>
58 #include <mips/regnum.h>
59 #include <mips/pcb.h>
60 #include <mips/cache.h>
61 #include <mips/frame.h>
62 #include <mips/userret.h>
63 #include <mips/pte.h>
64 
65 #if defined(DDB) || defined(KGDB)
66 #ifdef DDB
67 #include <mips/db_machdep.h>
68 #include <ddb/db_command.h>
69 #include <ddb/db_output.h>
70 #endif
71 #endif
72 
73 #ifdef MIPS64_OCTEON
74 extern struct cpu_softc octeon_cpu0_softc;
75 #endif
76 
77 struct cpu_info cpu_info_store
78 #if defined(MULTIPROCESSOR) && !defined(MIPS64_OCTEON)
79 	__section(".data1")
80 	__aligned(1LU << ilog2((2*sizeof(struct cpu_info)-1)))
81 #endif
82     = {
83 	.ci_curlwp = &lwp0,
84 	.ci_tlb_info = &pmap_tlb0_info,
85 	.ci_pmap_kern_segtab = &pmap_kern_segtab,
86 	.ci_pmap_user_segtab = NULL,
87 #ifdef _LP64
88 	.ci_pmap_user_seg0tab = NULL,
89 #endif
90 	.ci_cpl = IPL_HIGH,
91 	.ci_tlb_slot = -1,
92 #ifdef MULTIPROCESSOR
93 	.ci_flags = CPUF_PRIMARY|CPUF_PRESENT|CPUF_RUNNING,
94 #endif
95 #ifdef MIPS64_OCTEON
96 	.ci_softc = &octeon_cpu0_softc,
97 #endif
98 };
99 
100 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
101 	[PCU_FPU] = &mips_fpu_ops,
102 #if (MIPS32R2 + MIPS64R2) > 0
103 	[PCU_DSP] = &mips_dsp_ops,
104 #endif
105 };
106 
107 #ifdef MULTIPROCESSOR
108 struct cpu_info * cpuid_infos[MAXCPUS] = {
109 	[0] = &cpu_info_store,
110 };
111 
112 kcpuset_t *cpus_halted;
113 kcpuset_t *cpus_hatched;
114 kcpuset_t *cpus_paused;
115 kcpuset_t *cpus_resumed;
116 kcpuset_t *cpus_running;
117 
118 static void cpu_ipi_wait(const char *, const kcpuset_t *, const kcpuset_t *);
119 
120 struct cpu_info *
cpu_info_alloc(struct pmap_tlb_info * ti,cpuid_t cpu_id,cpuid_t cpu_package_id,cpuid_t cpu_core_id,cpuid_t cpu_smt_id)121 cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
122 	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
123 {
124 	KASSERT(cpu_id < MAXCPUS);
125 
126 #ifdef MIPS64_OCTEON
127 	vaddr_t exc_page = MIPS_UTLB_MISS_EXC_VEC + 0x1000*cpu_id;
128 	__CTASSERT(sizeof(struct cpu_info) + sizeof(struct pmap_tlb_info) <= 0x1000 - 0x280);
129 
130 	struct cpu_info * const ci = ((struct cpu_info *)(exc_page + 0x1000)) - 1;
131 	memset((void *)exc_page, 0, PAGE_SIZE);
132 
133 	if (ti == NULL) {
134 		ti = ((struct pmap_tlb_info *)ci) - 1;
135 		pmap_tlb_info_init(ti);
136 	}
137 #else
138 	const vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK;
139 	struct pglist pglist;
140 	int error;
141 
142 	/*
143 	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
144 	* exception vectors and cpu_info for this cpu.
145 	*/
146 	error = uvm_pglistalloc(PAGE_SIZE,
147 	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
148 	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
149 	if (error)
150 		return NULL;
151 
152 	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
153 	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
154 	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
155 	memset((void *)va, 0, PAGE_SIZE);
156 
157 	/*
158 	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
159 	 * to take care of that for him.  Since we have room left over in the
160 	 * page we just allocated, just use a piece of that for it.
161 	 */
162 	if (ti == NULL) {
163 		if (cpu_info_offset >= sizeof(*ti)) {
164 			ti = (void *) va;
165 		} else {
166 			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
167 			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
168 		}
169 		pmap_tlb_info_init(ti);
170 	}
171 
172 	/*
173 	 * Attach its TLB info (which must be direct-mapped)
174 	 */
175 #ifdef _LP64
176 	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
177 #else
178 	KASSERT(MIPS_KSEG0_P(ti));
179 #endif
180 #endif /* MIPS64_OCTEON */
181 
182 	KASSERT(cpu_id != 0);
183 	ci->ci_cpuid = cpu_id;
184 	ci->ci_data.cpu_package_id = cpu_package_id;
185 	ci->ci_data.cpu_core_id = cpu_core_id;
186 	ci->ci_data.cpu_smt_id = cpu_smt_id;
187 	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
188 	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
189         ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
190         ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
191         ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
192 	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;
193 
194 	pmap_md_alloc_ephemeral_address_space(ci);
195 
196 	mi_cpu_attach(ci);
197 
198 	pmap_tlb_info_attach(ti, ci);
199 
200 	return ci;
201 }
202 #endif /* MULTIPROCESSOR */
203 
204 static void
cpu_hwrena_setup(void)205 cpu_hwrena_setup(void)
206 {
207 #if (MIPS32R2 + MIPS64R2) > 0
208 	const int cp0flags = mips_options.mips_cpu->cpu_cp0flags;
209 	if ((cp0flags & MIPS_CP0FL_USE) == 0)
210 		return;
211 
212 	if (cp0flags & MIPS_CP0FL_HWRENA) {
213 		mipsNN_cp0_hwrena_write(
214 		    MIPS_HWRENA_UL
215 		    |MIPS_HWRENA_CCRES
216 		    |MIPS_HWRENA_CC
217 		    |MIPS_HWRENA_SYNCI_STEP
218 		    |MIPS_HWRENA_CPUNUM);
219 		if (cp0flags & MIPS_CP0FL_USERLOCAL) {
220 			mipsNN_cp0_userlocal_write(curlwp->l_private);
221 		}
222 	}
223 #endif
224 }
225 
226 void
cpu_attach_common(device_t self,struct cpu_info * ci)227 cpu_attach_common(device_t self, struct cpu_info *ci)
228 {
229 	const char * const xname = device_xname(self);
230 
231 	/*
232 	 * Cross link cpu_info and its device together
233 	 */
234 	ci->ci_dev = self;
235 	self->dv_private = ci;
236 	KASSERT(ci->ci_idepth == 0);
237 
238 	evcnt_attach_dynamic(&ci->ci_ev_count_compare,
239 		EVCNT_TYPE_INTR, NULL, xname,
240 		"int 5 (clock)");
241 	evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed,
242 		EVCNT_TYPE_INTR, NULL, xname,
243 		"int 5 (clock) missed");
244 	evcnt_attach_dynamic(&ci->ci_ev_fpu_loads,
245 		EVCNT_TYPE_MISC, NULL, xname,
246 		"fpu loads");
247 	evcnt_attach_dynamic(&ci->ci_ev_fpu_saves,
248 		EVCNT_TYPE_MISC, NULL, xname,
249 		"fpu saves");
250 	evcnt_attach_dynamic(&ci->ci_ev_dsp_loads,
251 		EVCNT_TYPE_MISC, NULL, xname,
252 		"dsp loads");
253 	evcnt_attach_dynamic(&ci->ci_ev_dsp_saves,
254 		EVCNT_TYPE_MISC, NULL, xname,
255 		"dsp saves");
256 	evcnt_attach_dynamic(&ci->ci_ev_tlbmisses,
257 		EVCNT_TYPE_TRAP, NULL, xname,
258 		"tlb misses");
259 
260 #ifdef MULTIPROCESSOR
261 	if (ci != &cpu_info_store) {
262 		/*
263 		 * Tail insert this onto the list of cpu_info's.
264 		 */
265 		KASSERT(cpuid_infos[ci->ci_cpuid] == NULL);
266 		cpuid_infos[ci->ci_cpuid] = ci;
267 		membar_producer();
268 	}
269 	KASSERT(cpuid_infos[ci->ci_cpuid] != NULL);
270 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_activate_rqst,
271 	    EVCNT_TYPE_MISC, NULL, xname,
272 	    "syncicache activate request");
273 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_deferred_rqst,
274 	    EVCNT_TYPE_MISC, NULL, xname,
275 	    "syncicache deferred request");
276 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst,
277 	    EVCNT_TYPE_MISC, NULL, xname,
278 	    "syncicache ipi request");
279 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst,
280 	    EVCNT_TYPE_MISC, NULL, xname,
281 	    "syncicache onproc request");
282 
283 	/*
284 	 * Initialize IPI framework for this cpu instance
285 	 */
286 	ipi_init(ci);
287 #endif
288 }
289 
290 void
cpu_startup_common(void)291 cpu_startup_common(void)
292 {
293 	vaddr_t minaddr, maxaddr;
294 	char pbuf[9];	/* "99999 MB" */
295 
296 	pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);
297 
298 #ifdef MULTIPROCESSOR
299 	kcpuset_create(&cpus_halted, true);
300 		KASSERT(cpus_halted != NULL);
301 	kcpuset_create(&cpus_hatched, true);
302 		KASSERT(cpus_hatched != NULL);
303 	kcpuset_create(&cpus_paused, true);
304 		KASSERT(cpus_paused != NULL);
305 	kcpuset_create(&cpus_resumed, true);
306 		KASSERT(cpus_resumed != NULL);
307 	kcpuset_create(&cpus_running, true);
308 		KASSERT(cpus_running != NULL);
309 	kcpuset_set(cpus_hatched, cpu_number());
310 	kcpuset_set(cpus_running, cpu_number());
311 #endif
312 
313 	cpu_hwrena_setup();
314 
315 	/*
316 	 * Good {morning,afternoon,evening,night}.
317 	 */
318 	printf("%s%s", copyright, version);
319 	printf("%s\n", cpu_getmodel());
320 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
321 	printf("total memory = %s\n", pbuf);
322 
323 	minaddr = 0;
324 	/*
325 	 * Allocate a submap for physio.
326 	 */
327 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
328 				    VM_PHYS_SIZE, 0, FALSE, NULL);
329 
330 	/*
331 	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
332 	 * are allocated via the pool allocator, and we use KSEG/XKPHYS to
333 	 * map those pages.)
334 	 */
335 
336 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
337 	printf("avail memory = %s\n", pbuf);
338 
339 #if defined(__mips_n32)
340 	module_machine = "mips-n32";
341 #endif
342 }
343 
344 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)345 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
346 {
347 	const struct trapframe *tf = l->l_md.md_utf;
348 	__greg_t *gr = mcp->__gregs;
349 	__greg_t ras_pc;
350 
351 	/* Save register context. Dont copy R0 - it is always 0 */
352 	memcpy(&gr[_REG_AT], &tf->tf_regs[_R_AST], sizeof(mips_reg_t) * 31);
353 
354 	gr[_REG_MDLO]  = tf->tf_regs[_R_MULLO];
355 	gr[_REG_MDHI]  = tf->tf_regs[_R_MULHI];
356 	gr[_REG_CAUSE] = tf->tf_regs[_R_CAUSE];
357 	gr[_REG_EPC]   = tf->tf_regs[_R_PC];
358 	gr[_REG_SR]    = tf->tf_regs[_R_SR];
359 	mcp->_mc_tlsbase = (intptr_t)l->l_private;
360 
361 	if ((ras_pc = (intptr_t)ras_lookup(l->l_proc,
362 	    (void *) (intptr_t)gr[_REG_EPC])) != -1)
363 		gr[_REG_EPC] = ras_pc;
364 
365 	*flags |= _UC_CPU | _UC_TLSBASE;
366 
367 	/* Save floating point register context, if any. */
368 	KASSERT(l == curlwp);
369 	if (fpu_used_p()) {
370 		size_t fplen;
371 		/*
372 		 * If this process is the current FP owner, dump its
373 		 * context to the PCB first.
374 		 */
375 		fpu_save();
376 
377 		/*
378 		 * The PCB FP regs struct includes the FP CSR, so use the
379 		 * size of __fpregs.__fp_r when copying.
380 		 */
381 #if !defined(__mips_o32)
382 		if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
383 #endif
384 			fplen = sizeof(struct fpreg);
385 #if !defined(__mips_o32)
386 		} else {
387 			fplen = sizeof(struct fpreg_oabi);
388 		}
389 #endif
390 		struct pcb * const pcb = lwp_getpcb(l);
391 		memcpy(&mcp->__fpregs, &pcb->pcb_fpregs, fplen);
392 		*flags |= _UC_FPU;
393 	}
394 }
395 
396 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mcp)397 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
398 {
399 	/* XXX:  Do we validate the addresses?? */
400 	return 0;
401 }
402 
403 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)404 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
405 {
406 	struct trapframe *tf = l->l_md.md_utf;
407 	struct proc *p = l->l_proc;
408 	const __greg_t *gr = mcp->__gregs;
409 	int error;
410 
411 	/* Restore register context, if any. */
412 	if (flags & _UC_CPU) {
413 		error = cpu_mcontext_validate(l, mcp);
414 		if (error)
415 			return error;
416 
417 		/* Save register context. */
418 
419 #ifdef __mips_n32
420 		CTASSERT(_R_AST == _REG_AT);
421 		if (__predict_false(p->p_md.md_abi == _MIPS_BSD_API_O32)) {
422 			const mcontext_o32_t *mcp32 = (const mcontext_o32_t *)mcp;
423 			const __greg32_t *gr32 = mcp32->__gregs;
424 			for (size_t i = _R_AST; i < 32; i++) {
425 				tf->tf_regs[i] = gr32[i];
426 			}
427 		} else
428 #endif
429 		memcpy(&tf->tf_regs[_R_AST], &gr[_REG_AT],
430 		       sizeof(mips_reg_t) * 31);
431 
432 		tf->tf_regs[_R_MULLO] = gr[_REG_MDLO];
433 		tf->tf_regs[_R_MULHI] = gr[_REG_MDHI];
434 		tf->tf_regs[_R_CAUSE] = gr[_REG_CAUSE];
435 		tf->tf_regs[_R_PC]    = gr[_REG_EPC];
436 		/* Do not restore SR. */
437 	}
438 
439 	/* Restore the private thread context */
440 	if (flags & _UC_TLSBASE) {
441 		lwp_setprivate(l, (void *)(intptr_t)mcp->_mc_tlsbase);
442 	}
443 
444 	/* Restore floating point register context, if any. */
445 	if (flags & _UC_FPU) {
446 		size_t fplen;
447 
448 		/* Disable the FPU contents. */
449 		fpu_discard();
450 
451 #if !defined(__mips_o32)
452 		if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
453 #endif
454 			fplen = sizeof(struct fpreg);
455 #if !defined(__mips_o32)
456 		} else {
457 			fplen = sizeof(struct fpreg_oabi);
458 		}
459 #endif
460 		/*
461 		 * The PCB FP regs struct includes the FP CSR, so use the
462 		 * proper size of fpreg when copying.
463 		 */
464 		struct pcb * const pcb = lwp_getpcb(l);
465 		memcpy(&pcb->pcb_fpregs, &mcp->__fpregs, fplen);
466 	}
467 
468 	mutex_enter(p->p_lock);
469 	if (flags & _UC_SETSTACK)
470 		l->l_sigstk.ss_flags |= SS_ONSTACK;
471 	if (flags & _UC_CLRSTACK)
472 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
473 	mutex_exit(p->p_lock);
474 
475 	return (0);
476 }
477 
478 void
cpu_need_resched(struct cpu_info * ci,int flags)479 cpu_need_resched(struct cpu_info *ci, int flags)
480 {
481 	struct lwp * const l = ci->ci_data.cpu_onproc;
482 #ifdef MULTIPROCESSOR
483 	struct cpu_info * const cur_ci = curcpu();
484 #endif
485 
486 	KASSERT(kpreempt_disabled());
487 
488 	ci->ci_want_resched |= flags;
489 
490 	if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
491 		/*
492 		 * No point doing anything, it will switch soon.
493 		 * Also here to prevent an assertion failure in
494 		 * kpreempt() due to preemption being set on a
495 		 * soft interrupt LWP.
496 		 */
497 		return;
498 	}
499 
500 	if (__predict_false(l == ci->ci_data.cpu_idlelwp)) {
501 #ifdef MULTIPROCESSOR
502 		/*
503 		 * If the other CPU is idling, it must be waiting for an
504 		 * interrupt.  So give it one.
505 		 */
506 		if (__predict_false(ci != cur_ci))
507 			cpu_send_ipi(ci, IPI_NOP);
508 #endif
509 		return;
510 	}
511 
512 #ifdef MULTIPROCESSOR
513 	atomic_or_uint(&ci->ci_want_resched, flags);
514 #else
515 	ci->ci_want_resched |= flags;
516 #endif
517 
518 	if (flags & RESCHED_KPREEMPT) {
519 #ifdef __HAVE_PREEMPTION
520 		atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
521 		if (ci == cur_ci) {
522 			softint_trigger(SOFTINT_KPREEMPT);
523                 } else {
524                         cpu_send_ipi(ci, IPI_KPREEMPT);
525                 }
526 #endif
527 		return;
528 	}
529 	l->l_md.md_astpending = 1;		/* force call to ast() */
530 #ifdef MULTIPROCESSOR
531 	if (ci != cur_ci && (flags & RESCHED_IMMED)) {
532 		cpu_send_ipi(ci, IPI_AST);
533 	}
534 #endif
535 }
536 
537 uint32_t
cpu_clkf_usermode_mask(void)538 cpu_clkf_usermode_mask(void)
539 {
540 	return CPUISMIPS3 ? MIPS_SR_KSU_USER : MIPS_SR_KU_PREV;
541 }
542 
543 void
cpu_signotify(struct lwp * l)544 cpu_signotify(struct lwp *l)
545 {
546 	KASSERT(kpreempt_disabled());
547 #ifdef __HAVE_FAST_SOFTINTS
548 	KASSERT(lwp_locked(l, NULL));
549 #endif
550 	KASSERT(l->l_stat == LSONPROC || l->l_stat == LSRUN || l->l_stat == LSSTOP);
551 
552 	l->l_md.md_astpending = 1; 		/* force call to ast() */
553 }
554 
555 void
cpu_need_proftick(struct lwp * l)556 cpu_need_proftick(struct lwp *l)
557 {
558 	KASSERT(kpreempt_disabled());
559 	KASSERT(l->l_cpu == curcpu());
560 
561 	l->l_pflag |= LP_OWEUPC;
562 	l->l_md.md_astpending = 1;		/* force call to ast() */
563 }
564 
565 void
cpu_set_curpri(int pri)566 cpu_set_curpri(int pri)
567 {
568 	kpreempt_disable();
569 	curcpu()->ci_schedstate.spc_curpriority = pri;
570 	kpreempt_enable();
571 }
572 
573 
574 #ifdef __HAVE_PREEMPTION
575 bool
cpu_kpreempt_enter(uintptr_t where,int s)576 cpu_kpreempt_enter(uintptr_t where, int s)
577 {
578         KASSERT(kpreempt_disabled());
579 
580 #if 0
581 	if (where == (intptr_t)-2) {
582 		KASSERT(curcpu()->ci_mtx_count == 0);
583 		/*
584 		 * We must be called via kern_intr (which already checks for
585 		 * IPL_NONE so of course we call be preempted).
586 		 */
587 		return true;
588 	}
589 	/*
590 	 * We are called from KPREEMPT_ENABLE().  If we are at IPL_NONE,
591 	 * of course we can be preempted.  If we aren't, ask for a
592 	 * softint so that kern_intr can call kpreempt.
593 	 */
594 	if (s == IPL_NONE) {
595 		KASSERT(curcpu()->ci_mtx_count == 0);
596 		return true;
597 	}
598 	softint_trigger(SOFTINT_KPREEMPT);
599 #endif
600 	return false;
601 }
602 
603 void
cpu_kpreempt_exit(uintptr_t where)604 cpu_kpreempt_exit(uintptr_t where)
605 {
606 
607 	/* do nothing */
608 }
609 
610 /*
611  * Return true if preemption is disabled for MD reasons.  Must be called
612  * with preemption disabled, and thus is only for diagnostic checks.
613  */
614 bool
cpu_kpreempt_disabled(void)615 cpu_kpreempt_disabled(void)
616 {
617 	/*
618 	 * Any elevated IPL disables preemption.
619 	 */
620 	return curcpu()->ci_cpl > IPL_NONE;
621 }
622 #endif /* __HAVE_PREEMPTION */
623 
624 void
cpu_idle(void)625 cpu_idle(void)
626 {
627 	void (*const mach_idle)(void) = mips_locoresw.lsw_cpu_idle;
628 	struct cpu_info * const ci = curcpu();
629 
630 	while (!ci->ci_want_resched) {
631 #ifdef __HAVE_FAST_SOFTINTS
632 		KASSERT(ci->ci_data.cpu_softints == 0);
633 #endif
634 		(*mach_idle)();
635 	}
636 }
637 
638 bool
cpu_intr_p(void)639 cpu_intr_p(void)
640 {
641 	bool rv;
642 	kpreempt_disable();
643 	rv = (curcpu()->ci_idepth != 0);
644 	kpreempt_enable();
645 	return rv;
646 }
647 
648 #ifdef MULTIPROCESSOR
649 
650 void
cpu_broadcast_ipi(int tag)651 cpu_broadcast_ipi(int tag)
652 {
653 	// No reason to remove ourselves since multicast_ipi will do that for us
654 	cpu_multicast_ipi(cpus_running, tag);
655 }
656 
657 void
cpu_multicast_ipi(const kcpuset_t * kcp,int tag)658 cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
659 {
660 	struct cpu_info * const ci = curcpu();
661 	kcpuset_t *kcp2;
662 
663 	if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
664 		return;
665 
666 	kcpuset_clone(&kcp2, kcp);
667 	kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset);
668 	for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
669 		kcpuset_clear(kcp2, --cii);
670 		(void)cpu_send_ipi(cpu_lookup(cii), tag);
671 	}
672 	kcpuset_destroy(kcp2);
673 }
674 
675 int
cpu_send_ipi(struct cpu_info * ci,int tag)676 cpu_send_ipi(struct cpu_info *ci, int tag)
677 {
678 
679 	return (*mips_locoresw.lsw_send_ipi)(ci, tag);
680 }
681 
682 static void
cpu_ipi_wait(const char * s,const kcpuset_t * watchset,const kcpuset_t * wanted)683 cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
684 {
685 	bool done = false;
686 	kcpuset_t *kcp;
687 	kcpuset_create(&kcp, false);
688 
689 	/* some finite amount of time */
690 
691 	for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) {
692 		kcpuset_copy(kcp, watchset);
693 		kcpuset_intersect(kcp, wanted);
694 		done = kcpuset_match(kcp, wanted);
695 	}
696 
697 	if (!done) {
698 		cpuid_t cii;
699 		kcpuset_copy(kcp, wanted);
700 		kcpuset_remove(kcp, watchset);
701 		if ((cii = kcpuset_ffs(kcp)) != 0) {
702 			printf("Failed to %s:", s);
703 			do {
704 				kcpuset_clear(kcp, --cii);
705 				printf(" cpu%lu", cii);
706 			} while ((cii = kcpuset_ffs(kcp)) != 0);
707 			printf("\n");
708 		}
709 	}
710 
711 	kcpuset_destroy(kcp);
712 }
713 
714 /*
715  * Halt this cpu
716  */
717 void
cpu_halt(void)718 cpu_halt(void)
719 {
720 	cpuid_t cii = cpu_index(curcpu());
721 
722 	printf("cpu%lu: shutting down\n", cii);
723 	kcpuset_atomic_set(cpus_halted, cii);
724 	spl0();		/* allow interrupts e.g. further ipi ? */
725 	for (;;) ;	/* spin */
726 
727 	/* NOTREACHED */
728 }
729 
730 /*
731  * Halt all running cpus, excluding current cpu.
732  */
733 void
cpu_halt_others(void)734 cpu_halt_others(void)
735 {
736 	kcpuset_t *kcp;
737 
738 	// If we are the only CPU running, there's nothing to do.
739 	if (kcpuset_match(cpus_running, curcpu()->ci_data.cpu_kcpuset))
740 		return;
741 
742 	// Get all running CPUs
743 	kcpuset_clone(&kcp, cpus_running);
744 	// Remove ourself
745 	kcpuset_remove(kcp, curcpu()->ci_data.cpu_kcpuset);
746 	// Remove any halted CPUs
747 	kcpuset_remove(kcp, cpus_halted);
748 	// If there are CPUs left, send the IPIs
749 	if (!kcpuset_iszero(kcp)) {
750 		cpu_multicast_ipi(kcp, IPI_HALT);
751 		cpu_ipi_wait("halt", cpus_halted, kcp);
752 	}
753 	kcpuset_destroy(kcp);
754 
755 	/*
756 	 * TBD
757 	 * Depending on available firmware methods, other cpus will
758 	 * either shut down themselves, or spin and wait for us to
759 	 * stop them.
760 	 */
761 }
762 
763 /*
764  * Pause this cpu
765  */
766 void
cpu_pause(struct reg * regsp)767 cpu_pause(struct reg *regsp)
768 {
769 	int s = splhigh();
770 	cpuid_t cii = cpu_index(curcpu());
771 
772 	if (__predict_false(cold))
773 		return;
774 
775 	do {
776 		kcpuset_atomic_set(cpus_paused, cii);
777 		do {
778 			;
779 		} while (kcpuset_isset(cpus_paused, cii));
780 		kcpuset_atomic_set(cpus_resumed, cii);
781 #if defined(DDB)
782 		if (ddb_running_on_this_cpu_p())
783 			cpu_Debugger();
784 		if (ddb_running_on_any_cpu_p())
785 			continue;
786 #endif
787 	} while (false);
788 
789 	splx(s);
790 }
791 
792 /*
793  * Pause all running cpus, excluding current cpu.
794  */
795 void
cpu_pause_others(void)796 cpu_pause_others(void)
797 {
798 	struct cpu_info * const ci = curcpu();
799 	kcpuset_t *kcp;
800 
801 	if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
802 		return;
803 
804 	kcpuset_clone(&kcp, cpus_running);
805 	kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset);
806 	kcpuset_remove(kcp, cpus_paused);
807 
808 	cpu_broadcast_ipi(IPI_SUSPEND);
809 	cpu_ipi_wait("pause", cpus_paused, kcp);
810 
811 	kcpuset_destroy(kcp);
812 }
813 
814 /*
815  * Resume a single cpu
816  */
817 void
cpu_resume(cpuid_t cii)818 cpu_resume(cpuid_t cii)
819 {
820 	kcpuset_t *kcp;
821 
822 	if (__predict_false(cold))
823 		return;
824 
825 	kcpuset_create(&kcp, true);
826 	kcpuset_set(kcp, cii);
827 	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
828 	kcpuset_atomic_clear(cpus_paused, cii);
829 
830 	cpu_ipi_wait("resume", cpus_resumed, kcp);
831 
832 	kcpuset_destroy(kcp);
833 }
834 
835 /*
836  * Resume all paused cpus.
837  */
838 void
cpu_resume_others(void)839 cpu_resume_others(void)
840 {
841 	kcpuset_t *kcp;
842 
843 	if (__predict_false(cold))
844 		return;
845 
846 	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
847 	kcpuset_clone(&kcp, cpus_paused);
848 	kcpuset_atomicly_remove(cpus_paused, cpus_paused);
849 
850 	/* CPUs awake on cpus_paused clear */
851 	cpu_ipi_wait("resume", cpus_resumed, kcp);
852 
853 	kcpuset_destroy(kcp);
854 }
855 
856 bool
cpu_is_paused(cpuid_t cii)857 cpu_is_paused(cpuid_t cii)
858 {
859 
860 	return !cold && kcpuset_isset(cpus_paused, cii);
861 }
862 
863 #ifdef DDB
864 void
cpu_debug_dump(void)865 cpu_debug_dump(void)
866 {
867 	CPU_INFO_ITERATOR cii;
868 	struct cpu_info *ci;
869 	char running, hatched, paused, resumed, halted;
870 
871 	db_printf("CPU CPUID STATE CPUINFO            CPL INT MTX IPIS\n");
872 	for (CPU_INFO_FOREACH(cii, ci)) {
873 		hatched = (kcpuset_isset(cpus_hatched, cpu_index(ci)) ? 'H' : '-');
874 		running = (kcpuset_isset(cpus_running, cpu_index(ci)) ? 'R' : '-');
875 		paused  = (kcpuset_isset(cpus_paused,  cpu_index(ci)) ? 'P' : '-');
876 		resumed = (kcpuset_isset(cpus_resumed, cpu_index(ci)) ? 'r' : '-');
877 		halted  = (kcpuset_isset(cpus_halted,  cpu_index(ci)) ? 'h' : '-');
878 		db_printf("%3d 0x%03lx %c%c%c%c%c %p "
879 			"%3d %3d %3d "
880 			"0x%02" PRIx64 "/0x%02" PRIx64 "\n",
881 			cpu_index(ci), ci->ci_cpuid,
882 			running, hatched, paused, resumed, halted,
883 			ci, ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count,
884 			ci->ci_active_ipis, ci->ci_request_ipis);
885 	}
886 }
887 #endif
888 
889 void
cpu_hatch(struct cpu_info * ci)890 cpu_hatch(struct cpu_info *ci)
891 {
892 	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
893 
894 	/*
895 	 * Invalidate all the TLB enties (even wired ones) and then reserve
896 	 * space for the wired TLB entries.
897 	 */
898 	mips3_cp0_wired_write(0);
899 	tlb_invalidate_all();
900 	mips3_cp0_wired_write(ti->ti_wired);
901 
902 	/*
903 	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
904 	 */
905 	cpu_hwrena_setup();
906 
907 	/*
908 	 * If we are using register zero relative addressing to access cpu_info
909 	 * in the exception vectors, enter that mapping into TLB now.
910 	 */
911 	if (ci->ci_tlb_slot >= 0) {
912 		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
913 		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
914 		const struct tlbmask tlbmask = {
915 			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
916 #if (PGSHIFT & 1)
917 			.tlb_lo0 = tlb_lo,
918 			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
919 #else
920 			.tlb_lo0 = 0,
921 			.tlb_lo1 = tlb_lo,
922 #endif
923 			.tlb_mask = -1,
924 		};
925 
926 		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
927 		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
928 	}
929 
930 	/*
931 	 * Flush the icache just be sure.
932 	 */
933 	mips_icache_sync_all();
934 
935 	/*
936 	 * Let this CPU do its own initialization (for things that have to be
937 	 * done on the local CPU).
938 	 */
939 	(*mips_locoresw.lsw_cpu_init)(ci);
940 
941 	// Show this CPU as present.
942 	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);
943 
944 	/*
945 	 * Announce we are hatched
946 	 */
947 	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));
948 
949 	/*
950 	 * Now wait to be set free!
951 	 */
952 	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
953 		/* spin, spin, spin */
954 	}
955 
956 	/*
957 	 * initialize the MIPS count/compare clock
958 	 */
959 	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
960 	KASSERT(ci->ci_cycles_per_hz != 0);
961 	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
962 	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
963 	ci->ci_data.cpu_cc_skew = 0;
964 
965 	/*
966 	 * Let this CPU do its own post-running initialization
967 	 * (for things that have to be done on the local CPU).
968 	 */
969 	(*mips_locoresw.lsw_cpu_run)(ci);
970 
971 	/*
972 	 * Now turn on interrupts (and verify they are on).
973 	 */
974 	spl0();
975 	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
976 	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
977 
978 	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
979 	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));
980 
981 	/*
982 	 * And do a tail call to idle_loop
983 	 */
984 	idle_loop(NULL);
985 }
986 
987 void
cpu_boot_secondary_processors(void)988 cpu_boot_secondary_processors(void)
989 {
990 	CPU_INFO_ITERATOR cii;
991 	struct cpu_info *ci;
992 	for (CPU_INFO_FOREACH(cii, ci)) {
993 		if (CPU_IS_PRIMARY(ci))
994 			continue;
995 		KASSERT(ci->ci_data.cpu_idlelwp);
996 
997 		/*
998 		 * Skip this CPU if it didn't sucessfully hatch.
999 		 */
1000 		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
1001 			continue;
1002 
1003 		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
1004 		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
1005 		kcpuset_set(cpus_running, cpu_index(ci));
1006 		// Spin until the cpu calls idle_loop
1007 		for (u_int i = 0; i < 100; i++) {
1008 			if (kcpuset_isset(cpus_running, cpu_index(ci)))
1009 				break;
1010 			delay(1000);
1011 		}
1012 	}
1013 }
1014 
1015 void
xc_send_ipi(struct cpu_info * ci)1016 xc_send_ipi(struct cpu_info *ci)
1017 {
1018 
1019 	(*mips_locoresw.lsw_send_ipi)(ci, IPI_XCALL);
1020 }
1021 
1022 void
cpu_ipi(struct cpu_info * ci)1023 cpu_ipi(struct cpu_info *ci)
1024 {
1025 	(*mips_locoresw.lsw_send_ipi)(ci, IPI_GENERIC);
1026 }
1027 
1028 #endif /* MULTIPROCESSOR */
1029 
1030 void
cpu_offline_md(void)1031 cpu_offline_md(void)
1032 {
1033 
1034 	(*mips_locoresw.lsw_cpu_offline_md)();
1035 }
1036 
1037 #ifdef _LP64
1038 void
cpu_vmspace_exec(lwp_t * l,vaddr_t start,vaddr_t end)1039 cpu_vmspace_exec(lwp_t *l, vaddr_t start, vaddr_t end)
1040 {
1041 	/*
1042 	 * We need to turn on/off UX so that copyout/copyin will work
1043 	 * well before setreg gets called.
1044 	 */
1045 	uint32_t sr = mips_cp0_status_read();
1046 	if (end != (uint32_t) end) {
1047 		mips_cp0_status_write(sr | MIPS3_SR_UX);
1048 	} else {
1049 		mips_cp0_status_write(sr & ~MIPS3_SR_UX);
1050 	}
1051 }
1052 #endif
1053 
1054 int
cpu_lwp_setprivate(lwp_t * l,void * v)1055 cpu_lwp_setprivate(lwp_t *l, void *v)
1056 {
1057 #if (MIPS32R2 + MIPS64R2) > 0
1058 	if (l == curlwp
1059 	    && (mips_options.mips_cpu->cpu_cp0flags & MIPS_CP0FL_USERLOCAL)) {
1060 		mipsNN_cp0_userlocal_write(v);
1061 	}
1062 #endif
1063 	return 0;
1064 }
1065 
1066 
1067 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1068 
1069 #if (CPUWATCH_MAX != 8)
1070 # error CPUWATCH_MAX
1071 #endif
1072 
1073 /*
1074  * cpuwatch_discover - determine how many COP0 watchpoints this CPU supports
1075  */
1076 u_int
cpuwatch_discover(void)1077 cpuwatch_discover(void)
1078 {
1079 	int i;
1080 
1081 	for (i=0; i < CPUWATCH_MAX; i++) {
1082 		uint32_t watchhi = mipsNN_cp0_watchhi_read(i);
1083 		if ((watchhi & __BIT(31)) == 0)	/* test 'M' bit */
1084 			break;
1085 	}
1086 	return i + 1;
1087 }
1088 
1089 void
cpuwatch_free(cpu_watchpoint_t * cwp)1090 cpuwatch_free(cpu_watchpoint_t *cwp)
1091 {
1092 #ifdef DIAGNOSTIC
1093 	struct cpu_info * const ci = curcpu();
1094 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
1095 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
1096 #endif
1097 	cwp->cw_mode = 0;
1098 	cwp->cw_asid = 0;
1099 	cwp->cw_addr = 0;
1100 	cpuwatch_clr(cwp);
1101 }
1102 
1103 /*
1104  * cpuwatch_alloc
1105  * 	find an empty slot
1106  *	no locking for the table since it is CPU private
1107  */
1108 cpu_watchpoint_t *
cpuwatch_alloc(void)1109 cpuwatch_alloc(void)
1110 {
1111 	struct cpu_info * const ci = curcpu();
1112 	cpu_watchpoint_t *cwp;
1113 
1114 	for (int i=0; i < ci->ci_cpuwatch_count; i++) {
1115 		cwp = &ci->ci_cpuwatch_tab[i];
1116 		if ((cwp->cw_mode & CPUWATCH_RWX) == 0)
1117 			return cwp;
1118 	}
1119 	return NULL;
1120 }
1121 
1122 
1123 void
cpuwatch_set_all(void)1124 cpuwatch_set_all(void)
1125 {
1126 	struct cpu_info * const ci = curcpu();
1127 	cpu_watchpoint_t *cwp;
1128 	int i;
1129 
1130 	for (i=0; i < ci->ci_cpuwatch_count; i++) {
1131 		cwp = &ci->ci_cpuwatch_tab[i];
1132 		if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
1133 			cpuwatch_set(cwp);
1134 	}
1135 }
1136 
1137 void
cpuwatch_clr_all(void)1138 cpuwatch_clr_all(void)
1139 {
1140 	struct cpu_info * const ci = curcpu();
1141 	cpu_watchpoint_t *cwp;
1142 	int i;
1143 
1144 	for (i=0; i < ci->ci_cpuwatch_count; i++) {
1145 		cwp = &ci->ci_cpuwatch_tab[i];
1146 		if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
1147 			cpuwatch_clr(cwp);
1148 	}
1149 }
1150 
1151 /*
1152  * cpuwatch_set - establish a MIPS COP0 watchpoint
1153  */
1154 void
cpuwatch_set(cpu_watchpoint_t * cwp)1155 cpuwatch_set(cpu_watchpoint_t *cwp)
1156 {
1157 	struct cpu_info * const ci = curcpu();
1158 	uint32_t watchhi;
1159 	register_t watchlo;
1160 	int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
1161 
1162 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
1163 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
1164 
1165 	watchlo = cwp->cw_addr;
1166 	if (cwp->cw_mode & CPUWATCH_WRITE)
1167 		watchlo |= __BIT(0);
1168 	if (cwp->cw_mode & CPUWATCH_READ)
1169 		watchlo |= __BIT(1);
1170 	if (cwp->cw_mode & CPUWATCH_EXEC)
1171 		watchlo |= __BIT(2);
1172 
1173 	if (cwp->cw_mode & CPUWATCH_ASID)
1174 		watchhi = cwp->cw_asid << 16;	/* addr qualified by asid */
1175 	else
1176 		watchhi = __BIT(30);		/* addr not qual. by asid (Global) */
1177 	if (cwp->cw_mode & CPUWATCH_MASK)
1178 		watchhi |= cwp->cw_mask;	/* set "dont care" addr match bits */
1179 
1180 	mipsNN_cp0_watchhi_write(cwnum, watchhi);
1181 	mipsNN_cp0_watchlo_write(cwnum, watchlo);
1182 }
1183 
1184 /*
1185  * cpuwatch_clr - disestablish a MIPS COP0 watchpoint
1186  */
1187 void
cpuwatch_clr(cpu_watchpoint_t * cwp)1188 cpuwatch_clr(cpu_watchpoint_t *cwp)
1189 {
1190 	struct cpu_info * const ci = curcpu();
1191 	int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
1192 
1193 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
1194 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
1195 
1196 	mipsNN_cp0_watchhi_write(cwnum, 0);
1197 	mipsNN_cp0_watchlo_write(cwnum, 0);
1198 }
1199 
1200 #endif	/* (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
1201