xref: /freebsd/sys/arm64/arm64/machdep.c (revision c697fb7f)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/csan.h>
42 #include <sys/devmap.h>
43 #include <sys/efi.h>
44 #include <sys/exec.h>
45 #include <sys/imgact.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/limits.h>
50 #include <sys/linker.h>
51 #include <sys/msgbuf.h>
52 #include <sys/pcpu.h>
53 #include <sys/proc.h>
54 #include <sys/ptrace.h>
55 #include <sys/reboot.h>
56 #include <sys/rwlock.h>
57 #include <sys/sched.h>
58 #include <sys/signalvar.h>
59 #include <sys/syscallsubr.h>
60 #include <sys/sysent.h>
61 #include <sys/sysproto.h>
62 #include <sys/ucontext.h>
63 #include <sys/vdso.h>
64 
65 #include <vm/vm.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_pager.h>
72 
73 #include <machine/armreg.h>
74 #include <machine/cpu.h>
75 #include <machine/debug_monitor.h>
76 #include <machine/kdb.h>
77 #include <machine/machdep.h>
78 #include <machine/metadata.h>
79 #include <machine/md_var.h>
80 #include <machine/pcb.h>
81 #include <machine/reg.h>
82 #include <machine/undefined.h>
83 #include <machine/vmparam.h>
84 
85 #include <arm/include/physmem.h>
86 
87 #ifdef VFP
88 #include <machine/vfp.h>
89 #endif
90 
91 #ifdef DEV_ACPI
92 #include <contrib/dev/acpica/include/acpi.h>
93 #include <machine/acpica_machdep.h>
94 #endif
95 
96 #ifdef FDT
97 #include <dev/fdt/fdt_common.h>
98 #include <dev/ofw/openfirm.h>
99 #endif
100 
101 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
102 static void set_fpcontext(struct thread *td, mcontext_t *mcp);
103 
104 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
105 
106 struct pcpu __pcpu[MAXCPU];
107 
108 static struct trapframe proc0_tf;
109 
110 int early_boot = 1;
111 int cold = 1;
112 static int boot_el;
113 
114 struct kva_md_info kmi;
115 
116 int64_t dczva_line_size;	/* The size of cache line the dc zva zeroes */
117 int has_pan;
118 
119 /*
120  * Physical address of the EFI System Table. Stashed from the metadata hints
121  * passed into the kernel and used by the EFI code to call runtime services.
122  */
123 vm_paddr_t efi_systbl_phys;
124 
125 /* pagezero_* implementations are provided in support.S */
126 void pagezero_simple(void *);
127 void pagezero_cache(void *);
128 
129 /* pagezero_simple is default pagezero */
130 void (*pagezero)(void *p) = pagezero_simple;
131 
132 static void
133 pan_setup(void)
134 {
135 	uint64_t id_aa64mfr1;
136 
137 	id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
138 	if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
139 		has_pan = 1;
140 }
141 
142 void
143 pan_enable(void)
144 {
145 
146 	/*
147 	 * The LLVM integrated assembler doesn't understand the PAN
148 	 * PSTATE field. Because of this we need to manually create
149 	 * the instruction in an asm block. This is equivalent to:
150 	 * msr pan, #1
151 	 *
152 	 * This sets the PAN bit, stopping the kernel from accessing
153 	 * memory when userspace can also access it unless the kernel
154 	 * uses the userspace load/store instructions.
155 	 */
156 	if (has_pan) {
157 		WRITE_SPECIALREG(sctlr_el1,
158 		    READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
159 		__asm __volatile(".inst 0xd500409f | (0x1 << 8)");
160 	}
161 }
162 
163 bool
164 has_hyp(void)
165 {
166 
167 	return (boot_el == 2);
168 }
169 
170 static void
171 cpu_startup(void *dummy)
172 {
173 
174 	undef_init();
175 	identify_cpu();
176 	install_cpu_errata();
177 
178 	vm_ksubmap_init(&kmi);
179 	bufinit();
180 	vm_pager_bufferinit();
181 }
182 
183 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
184 
185 int
186 cpu_idle_wakeup(int cpu)
187 {
188 
189 	return (0);
190 }
191 
192 int
193 fill_regs(struct thread *td, struct reg *regs)
194 {
195 	struct trapframe *frame;
196 
197 	frame = td->td_frame;
198 	regs->sp = frame->tf_sp;
199 	regs->lr = frame->tf_lr;
200 	regs->elr = frame->tf_elr;
201 	regs->spsr = frame->tf_spsr;
202 
203 	memcpy(regs->x, frame->tf_x, sizeof(regs->x));
204 
205 #ifdef COMPAT_FREEBSD32
206 	/*
207 	 * We may be called here for a 32bits process, if we're using a
208 	 * 64bits debugger. If so, put PC and SPSR where it expects it.
209 	 */
210 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
211 		regs->x[15] = frame->tf_elr;
212 		regs->x[16] = frame->tf_spsr;
213 	}
214 #endif
215 	return (0);
216 }
217 
218 int
219 set_regs(struct thread *td, struct reg *regs)
220 {
221 	struct trapframe *frame;
222 
223 	frame = td->td_frame;
224 	frame->tf_sp = regs->sp;
225 	frame->tf_lr = regs->lr;
226 	frame->tf_elr = regs->elr;
227 	frame->tf_spsr &= ~PSR_FLAGS;
228 	frame->tf_spsr |= regs->spsr & PSR_FLAGS;
229 
230 	memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
231 
232 #ifdef COMPAT_FREEBSD32
233 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
234 		/*
235 		 * We may be called for a 32bits process if we're using
236 		 * a 64bits debugger. If so, get PC and SPSR from where
237 		 * it put it.
238 		 */
239 		frame->tf_elr = regs->x[15];
240 		frame->tf_spsr = regs->x[16] & PSR_FLAGS;
241 	}
242 #endif
243 	return (0);
244 }
245 
246 int
247 fill_fpregs(struct thread *td, struct fpreg *regs)
248 {
249 #ifdef VFP
250 	struct pcb *pcb;
251 
252 	pcb = td->td_pcb;
253 	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
254 		/*
255 		 * If we have just been running VFP instructions we will
256 		 * need to save the state to memcpy it below.
257 		 */
258 		if (td == curthread)
259 			vfp_save_state(td, pcb);
260 
261 		KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
262 		    ("Called fill_fpregs while the kernel is using the VFP"));
263 		memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
264 		    sizeof(regs->fp_q));
265 		regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
266 		regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
267 	} else
268 #endif
269 		memset(regs, 0, sizeof(*regs));
270 	return (0);
271 }
272 
273 int
274 set_fpregs(struct thread *td, struct fpreg *regs)
275 {
276 #ifdef VFP
277 	struct pcb *pcb;
278 
279 	pcb = td->td_pcb;
280 	KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
281 	    ("Called set_fpregs while the kernel is using the VFP"));
282 	memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
283 	pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
284 	pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
285 #endif
286 	return (0);
287 }
288 
289 int
290 fill_dbregs(struct thread *td, struct dbreg *regs)
291 {
292 	struct debug_monitor_state *monitor;
293 	int count, i;
294 	uint8_t debug_ver, nbkpts;
295 
296 	memset(regs, 0, sizeof(*regs));
297 
298 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_DebugVer_SHIFT,
299 	    &debug_ver);
300 	extract_user_id_field(ID_AA64DFR0_EL1, ID_AA64DFR0_BRPs_SHIFT,
301 	    &nbkpts);
302 
303 	/*
304 	 * The BRPs field contains the number of breakpoints - 1. Armv8-A
305 	 * allows the hardware to provide 2-16 breakpoints so this won't
306 	 * overflow an 8 bit value.
307 	 */
308 	count = nbkpts + 1;
309 
310 	regs->db_info = debug_ver;
311 	regs->db_info <<= 8;
312 	regs->db_info |= count;
313 
314 	monitor = &td->td_pcb->pcb_dbg_regs;
315 	if ((monitor->dbg_flags & DBGMON_ENABLED) != 0) {
316 		for (i = 0; i < count; i++) {
317 			regs->db_regs[i].dbr_addr = monitor->dbg_bvr[i];
318 			regs->db_regs[i].dbr_ctrl = monitor->dbg_bcr[i];
319 		}
320 	}
321 
322 	return (0);
323 }
324 
325 int
326 set_dbregs(struct thread *td, struct dbreg *regs)
327 {
328 	struct debug_monitor_state *monitor;
329 	int count;
330 	int i;
331 
332 	monitor = &td->td_pcb->pcb_dbg_regs;
333 	count = 0;
334 	monitor->dbg_enable_count = 0;
335 	for (i = 0; i < DBG_BRP_MAX; i++) {
336 		/* TODO: Check these values */
337 		monitor->dbg_bvr[i] = regs->db_regs[i].dbr_addr;
338 		monitor->dbg_bcr[i] = regs->db_regs[i].dbr_ctrl;
339 		if ((monitor->dbg_bcr[i] & 1) != 0)
340 			monitor->dbg_enable_count++;
341 	}
342 	if (monitor->dbg_enable_count > 0)
343 		monitor->dbg_flags |= DBGMON_ENABLED;
344 
345 	return (0);
346 }
347 
348 #ifdef COMPAT_FREEBSD32
349 int
350 fill_regs32(struct thread *td, struct reg32 *regs)
351 {
352 	int i;
353 	struct trapframe *tf;
354 
355 	tf = td->td_frame;
356 	for (i = 0; i < 13; i++)
357 		regs->r[i] = tf->tf_x[i];
358 	/* For arm32, SP is r13 and LR is r14 */
359 	regs->r_sp = tf->tf_x[13];
360 	regs->r_lr = tf->tf_x[14];
361 	regs->r_pc = tf->tf_elr;
362 	regs->r_cpsr = tf->tf_spsr;
363 
364 	return (0);
365 }
366 
367 int
368 set_regs32(struct thread *td, struct reg32 *regs)
369 {
370 	int i;
371 	struct trapframe *tf;
372 
373 	tf = td->td_frame;
374 	for (i = 0; i < 13; i++)
375 		tf->tf_x[i] = regs->r[i];
376 	/* For arm 32, SP is r13 an LR is r14 */
377 	tf->tf_x[13] = regs->r_sp;
378 	tf->tf_x[14] = regs->r_lr;
379 	tf->tf_elr = regs->r_pc;
380 	tf->tf_spsr = regs->r_cpsr;
381 
382 
383 	return (0);
384 }
385 
386 int
387 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
388 {
389 
390 	printf("ARM64TODO: fill_fpregs32");
391 	return (EDOOFUS);
392 }
393 
394 int
395 set_fpregs32(struct thread *td, struct fpreg32 *regs)
396 {
397 
398 	printf("ARM64TODO: set_fpregs32");
399 	return (EDOOFUS);
400 }
401 
402 int
403 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
404 {
405 
406 	printf("ARM64TODO: fill_dbregs32");
407 	return (EDOOFUS);
408 }
409 
410 int
411 set_dbregs32(struct thread *td, struct dbreg32 *regs)
412 {
413 
414 	printf("ARM64TODO: set_dbregs32");
415 	return (EDOOFUS);
416 }
417 #endif
418 
419 int
420 ptrace_set_pc(struct thread *td, u_long addr)
421 {
422 
423 	td->td_frame->tf_elr = addr;
424 	return (0);
425 }
426 
427 int
428 ptrace_single_step(struct thread *td)
429 {
430 
431 	td->td_frame->tf_spsr |= PSR_SS;
432 	td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
433 	return (0);
434 }
435 
436 int
437 ptrace_clear_single_step(struct thread *td)
438 {
439 
440 	td->td_frame->tf_spsr &= ~PSR_SS;
441 	td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
442 	return (0);
443 }
444 
445 void
446 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
447 {
448 	struct trapframe *tf = td->td_frame;
449 
450 	memset(tf, 0, sizeof(struct trapframe));
451 
452 	tf->tf_x[0] = stack;
453 	tf->tf_sp = STACKALIGN(stack);
454 	tf->tf_lr = imgp->entry_addr;
455 	tf->tf_elr = imgp->entry_addr;
456 }
457 
458 /* Sanity check these are the same size, they will be memcpy'd to and fro */
459 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
460     sizeof((struct gpregs *)0)->gp_x);
461 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
462     sizeof((struct reg *)0)->x);
463 
464 int
465 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
466 {
467 	struct trapframe *tf = td->td_frame;
468 
469 	if (clear_ret & GET_MC_CLEAR_RET) {
470 		mcp->mc_gpregs.gp_x[0] = 0;
471 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
472 	} else {
473 		mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
474 		mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
475 	}
476 
477 	memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
478 	    sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
479 
480 	mcp->mc_gpregs.gp_sp = tf->tf_sp;
481 	mcp->mc_gpregs.gp_lr = tf->tf_lr;
482 	mcp->mc_gpregs.gp_elr = tf->tf_elr;
483 	get_fpcontext(td, mcp);
484 
485 	return (0);
486 }
487 
488 int
489 set_mcontext(struct thread *td, mcontext_t *mcp)
490 {
491 	struct trapframe *tf = td->td_frame;
492 	uint32_t spsr;
493 
494 	spsr = mcp->mc_gpregs.gp_spsr;
495 	if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
496 	    (spsr & PSR_AARCH32) != 0 ||
497 	    (spsr & PSR_DAIF) != (td->td_frame->tf_spsr & PSR_DAIF))
498 		return (EINVAL);
499 
500 	memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
501 
502 	tf->tf_sp = mcp->mc_gpregs.gp_sp;
503 	tf->tf_lr = mcp->mc_gpregs.gp_lr;
504 	tf->tf_elr = mcp->mc_gpregs.gp_elr;
505 	tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
506 	set_fpcontext(td, mcp);
507 
508 	return (0);
509 }
510 
511 static void
512 get_fpcontext(struct thread *td, mcontext_t *mcp)
513 {
514 #ifdef VFP
515 	struct pcb *curpcb;
516 
517 	critical_enter();
518 
519 	curpcb = curthread->td_pcb;
520 
521 	if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
522 		/*
523 		 * If we have just been running VFP instructions we will
524 		 * need to save the state to memcpy it below.
525 		 */
526 		vfp_save_state(td, curpcb);
527 
528 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
529 		    ("Called get_fpcontext while the kernel is using the VFP"));
530 		KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
531 		    ("Non-userspace FPU flags set in get_fpcontext"));
532 		memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
533 		    sizeof(mcp->mc_fpregs));
534 		mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
535 		mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
536 		mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
537 		mcp->mc_flags |= _MC_FP_VALID;
538 	}
539 
540 	critical_exit();
541 #endif
542 }
543 
544 static void
545 set_fpcontext(struct thread *td, mcontext_t *mcp)
546 {
547 #ifdef VFP
548 	struct pcb *curpcb;
549 
550 	critical_enter();
551 
552 	if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
553 		curpcb = curthread->td_pcb;
554 
555 		/*
556 		 * Discard any vfp state for the current thread, we
557 		 * are about to override it.
558 		 */
559 		vfp_discard(td);
560 
561 		KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
562 		    ("Called set_fpcontext while the kernel is using the VFP"));
563 		memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
564 		    sizeof(mcp->mc_fpregs));
565 		curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
566 		curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
567 		curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
568 	}
569 
570 	critical_exit();
571 #endif
572 }
573 
574 void
575 cpu_idle(int busy)
576 {
577 
578 	spinlock_enter();
579 	if (!busy)
580 		cpu_idleclock();
581 	if (!sched_runnable())
582 		__asm __volatile(
583 		    "dsb sy \n"
584 		    "wfi    \n");
585 	if (!busy)
586 		cpu_activeclock();
587 	spinlock_exit();
588 }
589 
590 void
591 cpu_halt(void)
592 {
593 
594 	/* We should have shutdown by now, if not enter a low power sleep */
595 	intr_disable();
596 	while (1) {
597 		__asm __volatile("wfi");
598 	}
599 }
600 
601 /*
602  * Flush the D-cache for non-DMA I/O so that the I-cache can
603  * be made coherent later.
604  */
605 void
606 cpu_flush_dcache(void *ptr, size_t len)
607 {
608 
609 	/* ARM64TODO TBD */
610 }
611 
612 /* Get current clock frequency for the given CPU ID. */
613 int
614 cpu_est_clockrate(int cpu_id, uint64_t *rate)
615 {
616 	struct pcpu *pc;
617 
618 	pc = pcpu_find(cpu_id);
619 	if (pc == NULL || rate == NULL)
620 		return (EINVAL);
621 
622 	if (pc->pc_clock == 0)
623 		return (EOPNOTSUPP);
624 
625 	*rate = pc->pc_clock;
626 	return (0);
627 }
628 
629 void
630 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
631 {
632 
633 	pcpu->pc_acpi_id = 0xffffffff;
634 }
635 
636 void
637 spinlock_enter(void)
638 {
639 	struct thread *td;
640 	register_t daif;
641 
642 	td = curthread;
643 	if (td->td_md.md_spinlock_count == 0) {
644 		daif = intr_disable();
645 		td->td_md.md_spinlock_count = 1;
646 		td->td_md.md_saved_daif = daif;
647 		critical_enter();
648 	} else
649 		td->td_md.md_spinlock_count++;
650 }
651 
652 void
653 spinlock_exit(void)
654 {
655 	struct thread *td;
656 	register_t daif;
657 
658 	td = curthread;
659 	daif = td->td_md.md_saved_daif;
660 	td->td_md.md_spinlock_count--;
661 	if (td->td_md.md_spinlock_count == 0) {
662 		critical_exit();
663 		intr_restore(daif);
664 	}
665 }
666 
667 #ifndef	_SYS_SYSPROTO_H_
668 struct sigreturn_args {
669 	ucontext_t *ucp;
670 };
671 #endif
672 
673 int
674 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
675 {
676 	ucontext_t uc;
677 	int error;
678 
679 	if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
680 		return (EFAULT);
681 
682 	error = set_mcontext(td, &uc.uc_mcontext);
683 	if (error != 0)
684 		return (error);
685 
686 	/* Restore signal mask. */
687 	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
688 
689 	return (EJUSTRETURN);
690 }
691 
692 /*
693  * Construct a PCB from a trapframe. This is called from kdb_trap() where
694  * we want to start a backtrace from the function that caused us to enter
695  * the debugger. We have the context in the trapframe, but base the trace
696  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
697  * enough for a backtrace.
698  */
699 void
700 makectx(struct trapframe *tf, struct pcb *pcb)
701 {
702 	int i;
703 
704 	for (i = 0; i < PCB_LR; i++)
705 		pcb->pcb_x[i] = tf->tf_x[i];
706 
707 	pcb->pcb_x[PCB_LR] = tf->tf_lr;
708 	pcb->pcb_pc = tf->tf_elr;
709 	pcb->pcb_sp = tf->tf_sp;
710 }
711 
712 void
713 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
714 {
715 	struct thread *td;
716 	struct proc *p;
717 	struct trapframe *tf;
718 	struct sigframe *fp, frame;
719 	struct sigacts *psp;
720 	struct sysentvec *sysent;
721 	int onstack, sig;
722 
723 	td = curthread;
724 	p = td->td_proc;
725 	PROC_LOCK_ASSERT(p, MA_OWNED);
726 
727 	sig = ksi->ksi_signo;
728 	psp = p->p_sigacts;
729 	mtx_assert(&psp->ps_mtx, MA_OWNED);
730 
731 	tf = td->td_frame;
732 	onstack = sigonstack(tf->tf_sp);
733 
734 	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
735 	    catcher, sig);
736 
737 	/* Allocate and validate space for the signal handler context. */
738 	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
739 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
740 		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
741 		    td->td_sigstk.ss_size);
742 #if defined(COMPAT_43)
743 		td->td_sigstk.ss_flags |= SS_ONSTACK;
744 #endif
745 	} else {
746 		fp = (struct sigframe *)td->td_frame->tf_sp;
747 	}
748 
749 	/* Make room, keeping the stack aligned */
750 	fp--;
751 	fp = (struct sigframe *)STACKALIGN(fp);
752 
753 	/* Fill in the frame to copy out */
754 	bzero(&frame, sizeof(frame));
755 	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
756 	frame.sf_si = ksi->ksi_info;
757 	frame.sf_uc.uc_sigmask = *mask;
758 	frame.sf_uc.uc_stack = td->td_sigstk;
759 	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ?
760 	    (onstack ? SS_ONSTACK : 0) : SS_DISABLE;
761 	mtx_unlock(&psp->ps_mtx);
762 	PROC_UNLOCK(td->td_proc);
763 
764 	/* Copy the sigframe out to the user's stack. */
765 	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
766 		/* Process has trashed its stack. Kill it. */
767 		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
768 		PROC_LOCK(p);
769 		sigexit(td, SIGILL);
770 	}
771 
772 	tf->tf_x[0]= sig;
773 	tf->tf_x[1] = (register_t)&fp->sf_si;
774 	tf->tf_x[2] = (register_t)&fp->sf_uc;
775 
776 	tf->tf_elr = (register_t)catcher;
777 	tf->tf_sp = (register_t)fp;
778 	sysent = p->p_sysent;
779 	if (sysent->sv_sigcode_base != 0)
780 		tf->tf_lr = (register_t)sysent->sv_sigcode_base;
781 	else
782 		tf->tf_lr = (register_t)(sysent->sv_psstrings -
783 		    *(sysent->sv_szsigcode));
784 
785 	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
786 	    tf->tf_sp);
787 
788 	PROC_LOCK(p);
789 	mtx_lock(&psp->ps_mtx);
790 }
791 
792 static void
793 init_proc0(vm_offset_t kstack)
794 {
795 	struct pcpu *pcpup = &__pcpu[0];
796 
797 	proc_linkup0(&proc0, &thread0);
798 	thread0.td_kstack = kstack;
799 	thread0.td_kstack_pages = KSTACK_PAGES;
800 	thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
801 	    thread0.td_kstack_pages * PAGE_SIZE) - 1;
802 	thread0.td_pcb->pcb_fpflags = 0;
803 	thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
804 	thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
805 	thread0.td_frame = &proc0_tf;
806 	pcpup->pc_curpcb = thread0.td_pcb;
807 }
808 
809 typedef struct {
810 	uint32_t type;
811 	uint64_t phys_start;
812 	uint64_t virt_start;
813 	uint64_t num_pages;
814 	uint64_t attr;
815 } EFI_MEMORY_DESCRIPTOR;
816 
817 typedef void (*efi_map_entry_cb)(struct efi_md *);
818 
819 static void
820 foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb)
821 {
822 	struct efi_md *map, *p;
823 	size_t efisz;
824 	int ndesc, i;
825 
826 	/*
827 	 * Memory map data provided by UEFI via the GetMemoryMap
828 	 * Boot Services API.
829 	 */
830 	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
831 	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
832 
833 	if (efihdr->descriptor_size == 0)
834 		return;
835 	ndesc = efihdr->memory_size / efihdr->descriptor_size;
836 
837 	for (i = 0, p = map; i < ndesc; i++,
838 	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
839 		cb(p);
840 	}
841 }
842 
843 static void
844 exclude_efi_map_entry(struct efi_md *p)
845 {
846 
847 	switch (p->md_type) {
848 	case EFI_MD_TYPE_CODE:
849 	case EFI_MD_TYPE_DATA:
850 	case EFI_MD_TYPE_BS_CODE:
851 	case EFI_MD_TYPE_BS_DATA:
852 	case EFI_MD_TYPE_FREE:
853 		/*
854 		 * We're allowed to use any entry with these types.
855 		 */
856 		break;
857 	default:
858 		arm_physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE,
859 		    EXFLAG_NOALLOC);
860 	}
861 }
862 
863 static void
864 exclude_efi_map_entries(struct efi_map_header *efihdr)
865 {
866 
867 	foreach_efi_map_entry(efihdr, exclude_efi_map_entry);
868 }
869 
870 static void
871 add_efi_map_entry(struct efi_md *p)
872 {
873 
874 	switch (p->md_type) {
875 	case EFI_MD_TYPE_RT_DATA:
876 		/*
877 		 * Runtime data will be excluded after the DMAP
878 		 * region is created to stop it from being added
879 		 * to phys_avail.
880 		 */
881 	case EFI_MD_TYPE_CODE:
882 	case EFI_MD_TYPE_DATA:
883 	case EFI_MD_TYPE_BS_CODE:
884 	case EFI_MD_TYPE_BS_DATA:
885 	case EFI_MD_TYPE_FREE:
886 		/*
887 		 * We're allowed to use any entry with these types.
888 		 */
889 		arm_physmem_hardware_region(p->md_phys,
890 		    p->md_pages * PAGE_SIZE);
891 		break;
892 	}
893 }
894 
895 static void
896 add_efi_map_entries(struct efi_map_header *efihdr)
897 {
898 
899 	foreach_efi_map_entry(efihdr, add_efi_map_entry);
900 }
901 
902 static void
903 print_efi_map_entry(struct efi_md *p)
904 {
905 	const char *type;
906 	static const char *types[] = {
907 		"Reserved",
908 		"LoaderCode",
909 		"LoaderData",
910 		"BootServicesCode",
911 		"BootServicesData",
912 		"RuntimeServicesCode",
913 		"RuntimeServicesData",
914 		"ConventionalMemory",
915 		"UnusableMemory",
916 		"ACPIReclaimMemory",
917 		"ACPIMemoryNVS",
918 		"MemoryMappedIO",
919 		"MemoryMappedIOPortSpace",
920 		"PalCode",
921 		"PersistentMemory"
922 	};
923 
924 	if (p->md_type < nitems(types))
925 		type = types[p->md_type];
926 	else
927 		type = "<INVALID>";
928 	printf("%23s %012lx %12p %08lx ", type, p->md_phys,
929 	    p->md_virt, p->md_pages);
930 	if (p->md_attr & EFI_MD_ATTR_UC)
931 		printf("UC ");
932 	if (p->md_attr & EFI_MD_ATTR_WC)
933 		printf("WC ");
934 	if (p->md_attr & EFI_MD_ATTR_WT)
935 		printf("WT ");
936 	if (p->md_attr & EFI_MD_ATTR_WB)
937 		printf("WB ");
938 	if (p->md_attr & EFI_MD_ATTR_UCE)
939 		printf("UCE ");
940 	if (p->md_attr & EFI_MD_ATTR_WP)
941 		printf("WP ");
942 	if (p->md_attr & EFI_MD_ATTR_RP)
943 		printf("RP ");
944 	if (p->md_attr & EFI_MD_ATTR_XP)
945 		printf("XP ");
946 	if (p->md_attr & EFI_MD_ATTR_NV)
947 		printf("NV ");
948 	if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
949 		printf("MORE_RELIABLE ");
950 	if (p->md_attr & EFI_MD_ATTR_RO)
951 		printf("RO ");
952 	if (p->md_attr & EFI_MD_ATTR_RT)
953 		printf("RUNTIME");
954 	printf("\n");
955 }
956 
957 static void
958 print_efi_map_entries(struct efi_map_header *efihdr)
959 {
960 
961 	printf("%23s %12s %12s %8s %4s\n",
962 	    "Type", "Physical", "Virtual", "#Pages", "Attr");
963 	foreach_efi_map_entry(efihdr, print_efi_map_entry);
964 }
965 
966 #ifdef FDT
967 static void
968 try_load_dtb(caddr_t kmdp)
969 {
970 	vm_offset_t dtbp;
971 
972 	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
973 #if defined(FDT_DTB_STATIC)
974 	/*
975 	 * In case the device tree blob was not retrieved (from metadata) try
976 	 * to use the statically embedded one.
977 	 */
978 	if (dtbp == 0)
979 		dtbp = (vm_offset_t)&fdt_static_dtb;
980 #endif
981 
982 	if (dtbp == (vm_offset_t)NULL) {
983 		printf("ERROR loading DTB\n");
984 		return;
985 	}
986 
987 	if (OF_install(OFW_FDT, 0) == FALSE)
988 		panic("Cannot install FDT");
989 
990 	if (OF_init((void *)dtbp) != 0)
991 		panic("OF_init failed with the found device tree");
992 
993 	parse_fdt_bootargs();
994 }
995 #endif
996 
997 static bool
998 bus_probe(void)
999 {
1000 	bool has_acpi, has_fdt;
1001 	char *order, *env;
1002 
1003 	has_acpi = has_fdt = false;
1004 
1005 #ifdef FDT
1006 	has_fdt = (OF_peer(0) != 0);
1007 #endif
1008 #ifdef DEV_ACPI
1009 	has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
1010 #endif
1011 
1012 	env = kern_getenv("kern.cfg.order");
1013 	if (env != NULL) {
1014 		order = env;
1015 		while (order != NULL) {
1016 			if (has_acpi &&
1017 			    strncmp(order, "acpi", 4) == 0 &&
1018 			    (order[4] == ',' || order[4] == '\0')) {
1019 				arm64_bus_method = ARM64_BUS_ACPI;
1020 				break;
1021 			}
1022 			if (has_fdt &&
1023 			    strncmp(order, "fdt", 3) == 0 &&
1024 			    (order[3] == ',' || order[3] == '\0')) {
1025 				arm64_bus_method = ARM64_BUS_FDT;
1026 				break;
1027 			}
1028 			order = strchr(order, ',');
1029 		}
1030 		freeenv(env);
1031 
1032 		/* If we set the bus method it is valid */
1033 		if (arm64_bus_method != ARM64_BUS_NONE)
1034 			return (true);
1035 	}
1036 	/* If no order or an invalid order was set use the default */
1037 	if (arm64_bus_method == ARM64_BUS_NONE) {
1038 		if (has_fdt)
1039 			arm64_bus_method = ARM64_BUS_FDT;
1040 		else if (has_acpi)
1041 			arm64_bus_method = ARM64_BUS_ACPI;
1042 	}
1043 
1044 	/*
1045 	 * If no option was set the default is valid, otherwise we are
1046 	 * setting one to get cninit() working, then calling panic to tell
1047 	 * the user about the invalid bus setup.
1048 	 */
1049 	return (env == NULL);
1050 }
1051 
1052 static void
1053 cache_setup(void)
1054 {
1055 	int dczva_line_shift;
1056 	uint32_t dczid_el0;
1057 
1058 	identify_cache(READ_SPECIALREG(ctr_el0));
1059 
1060 	dczid_el0 = READ_SPECIALREG(dczid_el0);
1061 
1062 	/* Check if dc zva is not prohibited */
1063 	if (dczid_el0 & DCZID_DZP)
1064 		dczva_line_size = 0;
1065 	else {
1066 		/* Same as with above calculations */
1067 		dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
1068 		dczva_line_size = sizeof(int) << dczva_line_shift;
1069 
1070 		/* Change pagezero function */
1071 		pagezero = pagezero_cache;
1072 	}
1073 }
1074 
1075 void
1076 initarm(struct arm64_bootparams *abp)
1077 {
1078 	struct efi_fb *efifb;
1079 	struct efi_map_header *efihdr;
1080 	struct pcpu *pcpup;
1081 	char *env;
1082 #ifdef FDT
1083 	struct mem_region mem_regions[FDT_MEM_REGIONS];
1084 	int mem_regions_sz;
1085 #endif
1086 	vm_offset_t lastaddr;
1087 	caddr_t kmdp;
1088 	bool valid;
1089 
1090 	boot_el = abp->boot_el;
1091 
1092 	/* Parse loader or FDT boot parametes. Determine last used address. */
1093 	lastaddr = parse_boot_param(abp);
1094 
1095 	/* Find the kernel address */
1096 	kmdp = preload_search_by_type("elf kernel");
1097 	if (kmdp == NULL)
1098 		kmdp = preload_search_by_type("elf64 kernel");
1099 
1100 	link_elf_ireloc(kmdp);
1101 	try_load_dtb(kmdp);
1102 
1103 	efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1104 
1105 	/* Load the physical memory ranges */
1106 	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1107 	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1108 	if (efihdr != NULL)
1109 		add_efi_map_entries(efihdr);
1110 #ifdef FDT
1111 	else {
1112 		/* Grab physical memory regions information from device tree. */
1113 		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1114 		    NULL) != 0)
1115 			panic("Cannot get physical memory regions");
1116 		arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
1117 	}
1118 	if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
1119 		arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
1120 		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
1121 #endif
1122 
1123 	/* Exclude the EFI framebuffer from our view of physical memory. */
1124 	efifb = (struct efi_fb *)preload_search_info(kmdp,
1125 	    MODINFO_METADATA | MODINFOMD_EFI_FB);
1126 	if (efifb != NULL)
1127 		arm_physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
1128 		    EXFLAG_NOALLOC);
1129 
1130 	/* Set the pcpu data, this is needed by pmap_bootstrap */
1131 	pcpup = &__pcpu[0];
1132 	pcpu_init(pcpup, 0, sizeof(struct pcpu));
1133 
1134 	/*
1135 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
1136 	 * loaded when entering the kernel from userland.
1137 	 */
1138 	__asm __volatile(
1139 	    "mov x18, %0 \n"
1140 	    "msr tpidr_el1, %0" :: "r"(pcpup));
1141 
1142 	PCPU_SET(curthread, &thread0);
1143 
1144 	/* Do basic tuning, hz etc */
1145 	init_param1();
1146 
1147 	cache_setup();
1148 	pan_setup();
1149 
1150 	/* Bootstrap enough of pmap  to enter the kernel proper */
1151 	pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1152 	    KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1153 	/* Exclude entries neexed in teh DMAP region, but not phys_avail */
1154 	if (efihdr != NULL)
1155 		exclude_efi_map_entries(efihdr);
1156 	arm_physmem_init_kernel_globals();
1157 
1158 	devmap_bootstrap(0, NULL);
1159 
1160 	valid = bus_probe();
1161 
1162 	cninit();
1163 
1164 	if (!valid)
1165 		panic("Invalid bus configuration: %s",
1166 		    kern_getenv("kern.cfg.order"));
1167 
1168 	init_proc0(abp->kern_stack);
1169 	msgbufinit(msgbufp, msgbufsize);
1170 	mutex_init();
1171 	init_param2(physmem);
1172 
1173 	dbg_init();
1174 	kdb_init();
1175 	pan_enable();
1176 
1177 	kcsan_cpu_init(0);
1178 
1179 	env = kern_getenv("kernelname");
1180 	if (env != NULL)
1181 		strlcpy(kernelname, env, sizeof(kernelname));
1182 
1183 	if (boothowto & RB_VERBOSE) {
1184 		print_efi_map_entries(efihdr);
1185 		arm_physmem_print_tables();
1186 	}
1187 
1188 	early_boot = 0;
1189 }
1190 
1191 void
1192 dbg_init(void)
1193 {
1194 
1195 	/* Clear OS lock */
1196 	WRITE_SPECIALREG(oslar_el1, 0);
1197 
1198 	/* This permits DDB to use debug registers for watchpoints. */
1199 	dbg_monitor_init();
1200 
1201 	/* TODO: Eventually will need to initialize debug registers here. */
1202 }
1203 
1204 #ifdef DDB
1205 #include <ddb/ddb.h>
1206 
1207 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1208 {
1209 #define	PRINT_REG(reg)	\
1210     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1211 
1212 	PRINT_REG(actlr_el1);
1213 	PRINT_REG(afsr0_el1);
1214 	PRINT_REG(afsr1_el1);
1215 	PRINT_REG(aidr_el1);
1216 	PRINT_REG(amair_el1);
1217 	PRINT_REG(ccsidr_el1);
1218 	PRINT_REG(clidr_el1);
1219 	PRINT_REG(contextidr_el1);
1220 	PRINT_REG(cpacr_el1);
1221 	PRINT_REG(csselr_el1);
1222 	PRINT_REG(ctr_el0);
1223 	PRINT_REG(currentel);
1224 	PRINT_REG(daif);
1225 	PRINT_REG(dczid_el0);
1226 	PRINT_REG(elr_el1);
1227 	PRINT_REG(esr_el1);
1228 	PRINT_REG(far_el1);
1229 #if 0
1230 	/* ARM64TODO: Enable VFP before reading floating-point registers */
1231 	PRINT_REG(fpcr);
1232 	PRINT_REG(fpsr);
1233 #endif
1234 	PRINT_REG(id_aa64afr0_el1);
1235 	PRINT_REG(id_aa64afr1_el1);
1236 	PRINT_REG(id_aa64dfr0_el1);
1237 	PRINT_REG(id_aa64dfr1_el1);
1238 	PRINT_REG(id_aa64isar0_el1);
1239 	PRINT_REG(id_aa64isar1_el1);
1240 	PRINT_REG(id_aa64pfr0_el1);
1241 	PRINT_REG(id_aa64pfr1_el1);
1242 	PRINT_REG(id_afr0_el1);
1243 	PRINT_REG(id_dfr0_el1);
1244 	PRINT_REG(id_isar0_el1);
1245 	PRINT_REG(id_isar1_el1);
1246 	PRINT_REG(id_isar2_el1);
1247 	PRINT_REG(id_isar3_el1);
1248 	PRINT_REG(id_isar4_el1);
1249 	PRINT_REG(id_isar5_el1);
1250 	PRINT_REG(id_mmfr0_el1);
1251 	PRINT_REG(id_mmfr1_el1);
1252 	PRINT_REG(id_mmfr2_el1);
1253 	PRINT_REG(id_mmfr3_el1);
1254 #if 0
1255 	/* Missing from llvm */
1256 	PRINT_REG(id_mmfr4_el1);
1257 #endif
1258 	PRINT_REG(id_pfr0_el1);
1259 	PRINT_REG(id_pfr1_el1);
1260 	PRINT_REG(isr_el1);
1261 	PRINT_REG(mair_el1);
1262 	PRINT_REG(midr_el1);
1263 	PRINT_REG(mpidr_el1);
1264 	PRINT_REG(mvfr0_el1);
1265 	PRINT_REG(mvfr1_el1);
1266 	PRINT_REG(mvfr2_el1);
1267 	PRINT_REG(revidr_el1);
1268 	PRINT_REG(sctlr_el1);
1269 	PRINT_REG(sp_el0);
1270 	PRINT_REG(spsel);
1271 	PRINT_REG(spsr_el1);
1272 	PRINT_REG(tcr_el1);
1273 	PRINT_REG(tpidr_el0);
1274 	PRINT_REG(tpidr_el1);
1275 	PRINT_REG(tpidrro_el0);
1276 	PRINT_REG(ttbr0_el1);
1277 	PRINT_REG(ttbr1_el1);
1278 	PRINT_REG(vbar_el1);
1279 #undef PRINT_REG
1280 }
1281 
1282 DB_SHOW_COMMAND(vtop, db_show_vtop)
1283 {
1284 	uint64_t phys;
1285 
1286 	if (have_addr) {
1287 		phys = arm64_address_translate_s1e1r(addr);
1288 		db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1289 		phys = arm64_address_translate_s1e1w(addr);
1290 		db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1291 		phys = arm64_address_translate_s1e0r(addr);
1292 		db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1293 		phys = arm64_address_translate_s1e0w(addr);
1294 		db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1295 	} else
1296 		db_printf("show vtop <virt_addr>\n");
1297 }
1298 #endif
1299