xref: /freebsd/sys/arm/arm/machdep.c (revision 39beb93c)
1 /*	$NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $	*/
2 
3 /*-
4  * Copyright (c) 2004 Olivier Houchard
5  * Copyright (c) 1994-1998 Mark Brinicombe.
6  * Copyright (c) 1994 Brini.
7  * All rights reserved.
8  *
9  * This code is derived from software written for Brini by Mark Brinicombe
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by Mark Brinicombe
22  *	for the NetBSD Project.
23  * 4. The name of the company nor the name of the author may be used to
24  *    endorse or promote products derived from this software without specific
25  *    prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * Machine dependant functions for kernel setup
40  *
41  * Created      : 17/09/94
42  * Updated	: 18/04/01 updated for new wscons
43  */
44 
45 #include "opt_compat.h"
46 #include "opt_ddb.h"
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 #include <sys/param.h>
52 #include <sys/proc.h>
53 #include <sys/systm.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/bus.h>
57 #include <sys/cons.h>
58 #include <sys/cpu.h>
59 #include <sys/exec.h>
60 #include <sys/imgact.h>
61 #include <sys/kernel.h>
62 #include <sys/ktr.h>
63 #include <sys/linker.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/pcpu.h>
68 #include <sys/ptrace.h>
69 #include <sys/signalvar.h>
70 #include <sys/sysent.h>
71 #include <sys/sysproto.h>
72 #include <sys/uio.h>
73 
74 #include <vm/vm.h>
75 #include <vm/pmap.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_object.h>
78 #include <vm/vm_page.h>
79 #include <vm/vm_pager.h>
80 #include <vm/vnode_pager.h>
81 
82 #include <machine/armreg.h>
83 #include <machine/cpu.h>
84 #include <machine/machdep.h>
85 #include <machine/md_var.h>
86 #include <machine/metadata.h>
87 #include <machine/pcb.h>
88 #include <machine/pmap.h>
89 #include <machine/reg.h>
90 #include <machine/trap.h>
91 #include <machine/undefined.h>
92 #include <machine/vmparam.h>
93 #include <machine/sysarch.h>
94 
95 uint32_t cpu_reset_address = 0;
96 int cold = 1;
97 vm_offset_t vector_page;
98 
99 long realmem = 0;
100 
101 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
102 int (*_arm_bzero)(void *, int, int) = NULL;
103 int _min_memcpy_size = 0;
104 int _min_bzero_size = 0;
105 
106 extern int *end;
107 #ifdef DDB
108 extern vm_offset_t ksym_start, ksym_end;
109 #endif
110 
111 void
112 sendsig(catcher, ksi, mask)
113 	sig_t catcher;
114 	ksiginfo_t *ksi;
115 	sigset_t *mask;
116 {
117 	struct thread *td;
118 	struct proc *p;
119 	struct trapframe *tf;
120 	struct sigframe *fp, frame;
121 	struct sigacts *psp;
122 	int onstack;
123 	int sig;
124 	int code;
125 
126 	td = curthread;
127 	p = td->td_proc;
128 	PROC_LOCK_ASSERT(p, MA_OWNED);
129 	sig = ksi->ksi_signo;
130 	code = ksi->ksi_code;
131 	psp = p->p_sigacts;
132 	mtx_assert(&psp->ps_mtx, MA_OWNED);
133 	tf = td->td_frame;
134 	onstack = sigonstack(tf->tf_usr_sp);
135 
136 	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
137 	    catcher, sig);
138 
139 	/* Allocate and validate space for the signal handler context. */
140 	if ((td->td_flags & TDP_ALTSTACK) != 0 && !(onstack) &&
141 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
142 		fp = (struct sigframe *)(td->td_sigstk.ss_sp +
143 		    td->td_sigstk.ss_size);
144 #if defined(COMPAT_43)
145 		td->td_sigstk.ss_flags |= SS_ONSTACK;
146 #endif
147 	} else
148 		fp = (struct sigframe *)td->td_frame->tf_usr_sp;
149 
150 	/* make room on the stack */
151 	fp--;
152 
153 	/* make the stack aligned */
154 	fp = (struct sigframe *)STACKALIGN(fp);
155 	/* Populate the siginfo frame. */
156 	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
157 	frame.sf_si = ksi->ksi_info;
158 	frame.sf_uc.uc_sigmask = *mask;
159 	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
160 	    ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
161 	frame.sf_uc.uc_stack = td->td_sigstk;
162 	mtx_unlock(&psp->ps_mtx);
163 	PROC_UNLOCK(td->td_proc);
164 
165 	/* Copy the sigframe out to the user's stack. */
166 	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
167 		/* Process has trashed its stack. Kill it. */
168 		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
169 		PROC_LOCK(p);
170 		sigexit(td, SIGILL);
171 	}
172 
173 	/* Translate the signal if appropriate. */
174 	if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
175 		sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
176 
177 	/*
178 	 * Build context to run handler in.  We invoke the handler
179 	 * directly, only returning via the trampoline.  Note the
180 	 * trampoline version numbers are coordinated with machine-
181 	 * dependent code in libc.
182 	 */
183 
184 	tf->tf_r0 = sig;
185 	tf->tf_r1 = (register_t)&fp->sf_si;
186 	tf->tf_r2 = (register_t)&fp->sf_uc;
187 
188 	/* the trampoline uses r5 as the uc address */
189 	tf->tf_r5 = (register_t)&fp->sf_uc;
190 	tf->tf_pc = (register_t)catcher;
191 	tf->tf_usr_sp = (register_t)fp;
192 	tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
193 
194 	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
195 	    tf->tf_usr_sp);
196 
197 	PROC_LOCK(p);
198 	mtx_lock(&psp->ps_mtx);
199 }
200 
201 struct kva_md_info kmi;
202 
203 /*
204  * arm32_vector_init:
205  *
206  *	Initialize the vector page, and select whether or not to
207  *	relocate the vectors.
208  *
209  *	NOTE: We expect the vector page to be mapped at its expected
210  *	destination.
211  */
212 
213 extern unsigned int page0[], page0_data[];
214 void
215 arm_vector_init(vm_offset_t va, int which)
216 {
217 	unsigned int *vectors = (int *) va;
218 	unsigned int *vectors_data = vectors + (page0_data - page0);
219 	int vec;
220 
221 	/*
222 	 * Loop through the vectors we're taking over, and copy the
223 	 * vector's insn and data word.
224 	 */
225 	for (vec = 0; vec < ARM_NVEC; vec++) {
226 		if ((which & (1 << vec)) == 0) {
227 			/* Don't want to take over this vector. */
228 			continue;
229 		}
230 		vectors[vec] = page0[vec];
231 		vectors_data[vec] = page0_data[vec];
232 	}
233 
234 	/* Now sync the vectors. */
235 	cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
236 
237 	vector_page = va;
238 
239 	if (va == ARM_VECTORS_HIGH) {
240 		/*
241 		 * Assume the MD caller knows what it's doing here, and
242 		 * really does want the vector page relocated.
243 		 *
244 		 * Note: This has to be done here (and not just in
245 		 * cpu_setup()) because the vector page needs to be
246 		 * accessible *before* cpu_startup() is called.
247 		 * Think ddb(9) ...
248 		 *
249 		 * NOTE: If the CPU control register is not readable,
250 		 * this will totally fail!  We'll just assume that
251 		 * any system that has high vector support has a
252 		 * readable CPU control register, for now.  If we
253 		 * ever encounter one that does not, we'll have to
254 		 * rethink this.
255 		 */
256 		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
257 	}
258 }
259 
260 static void
261 cpu_startup(void *dummy)
262 {
263 	struct pcb *pcb = thread0.td_pcb;
264 #ifndef ARM_CACHE_LOCK_ENABLE
265 	vm_page_t m;
266 #endif
267 
268 	cpu_setup("");
269 	identify_arm_cpu();
270 
271 	printf("real memory  = %ju (%ju MB)\n", (uintmax_t)ptoa(physmem),
272 	    (uintmax_t)ptoa(physmem) / 1048576);
273 	realmem = physmem;
274 
275 	/*
276 	 * Display the RAM layout.
277 	 */
278 	if (bootverbose) {
279 		int indx;
280 
281 		printf("Physical memory chunk(s):\n");
282 		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
283 			vm_paddr_t size;
284 
285 			size = phys_avail[indx + 1] - phys_avail[indx];
286 			printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
287 			    (uintmax_t)phys_avail[indx],
288 			    (uintmax_t)phys_avail[indx + 1] - 1,
289 			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
290 		}
291 	}
292 
293 	vm_ksubmap_init(&kmi);
294 
295 	printf("avail memory = %ju (%ju MB)\n",
296 	    (uintmax_t)ptoa(cnt.v_free_count),
297 	    (uintmax_t)ptoa(cnt.v_free_count) / 1048576);
298 
299 	bufinit();
300 	vm_pager_bufferinit();
301 	pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
302 	    USPACE_UNDEF_STACK_TOP;
303 	pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
304 	    USPACE_SVC_STACK_TOP;
305 	vector_page_setprot(VM_PROT_READ);
306 	pmap_set_pcb_pagedir(pmap_kernel(), pcb);
307 	pmap_postinit();
308 #ifdef ARM_CACHE_LOCK_ENABLE
309 	pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
310 	arm_lock_cache_line(ARM_TP_ADDRESS);
311 #else
312 	m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
313 	pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
314 #endif
315 }
316 
317 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
318 
319 /* Get current clock frequency for the given cpu id. */
320 int
321 cpu_est_clockrate(int cpu_id, uint64_t *rate)
322 {
323 
324 	return (ENXIO);
325 }
326 
327 void
328 cpu_idle(int busy)
329 {
330 	cpu_sleep(0);
331 }
332 
333 int
334 cpu_idle_wakeup(int cpu)
335 {
336 
337 	return (0);
338 }
339 
340 int
341 fill_regs(struct thread *td, struct reg *regs)
342 {
343 	struct trapframe *tf = td->td_frame;
344 	bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
345 	regs->r_sp = tf->tf_usr_sp;
346 	regs->r_lr = tf->tf_usr_lr;
347 	regs->r_pc = tf->tf_pc;
348 	regs->r_cpsr = tf->tf_spsr;
349 	return (0);
350 }
351 int
352 fill_fpregs(struct thread *td, struct fpreg *regs)
353 {
354 	bzero(regs, sizeof(*regs));
355 	return (0);
356 }
357 
358 int
359 set_regs(struct thread *td, struct reg *regs)
360 {
361 	struct trapframe *tf = td->td_frame;
362 
363 	bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
364 	tf->tf_usr_sp = regs->r_sp;
365 	tf->tf_usr_lr = regs->r_lr;
366 	tf->tf_pc = regs->r_pc;
367 	tf->tf_spsr &=  ~PSR_FLAGS;
368 	tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
369 	return (0);
370 }
371 
372 int
373 set_fpregs(struct thread *td, struct fpreg *regs)
374 {
375 	return (0);
376 }
377 
378 int
379 fill_dbregs(struct thread *td, struct dbreg *regs)
380 {
381 	return (0);
382 }
383 int
384 set_dbregs(struct thread *td, struct dbreg *regs)
385 {
386 	return (0);
387 }
388 
389 
390 static int
391 ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
392 {
393 	struct iovec iov;
394 	struct uio uio;
395 
396 	PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
397 	iov.iov_base = (caddr_t) v;
398 	iov.iov_len = sizeof(u_int32_t);
399 	uio.uio_iov = &iov;
400 	uio.uio_iovcnt = 1;
401 	uio.uio_offset = (off_t)addr;
402 	uio.uio_resid = sizeof(u_int32_t);
403 	uio.uio_segflg = UIO_SYSSPACE;
404 	uio.uio_rw = UIO_READ;
405 	uio.uio_td = td;
406 	return proc_rwmem(td->td_proc, &uio);
407 }
408 
409 static int
410 ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
411 {
412 	struct iovec iov;
413 	struct uio uio;
414 
415 	PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
416 	iov.iov_base = (caddr_t) &v;
417 	iov.iov_len = sizeof(u_int32_t);
418 	uio.uio_iov = &iov;
419 	uio.uio_iovcnt = 1;
420 	uio.uio_offset = (off_t)addr;
421 	uio.uio_resid = sizeof(u_int32_t);
422 	uio.uio_segflg = UIO_SYSSPACE;
423 	uio.uio_rw = UIO_WRITE;
424 	uio.uio_td = td;
425 	return proc_rwmem(td->td_proc, &uio);
426 }
427 
428 int
429 ptrace_single_step(struct thread *td)
430 {
431 	struct proc *p;
432 	int error;
433 
434 	KASSERT(td->td_md.md_ptrace_instr == 0,
435 	 ("Didn't clear single step"));
436 	p = td->td_proc;
437 	PROC_UNLOCK(p);
438 	error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
439 	    &td->td_md.md_ptrace_instr);
440 	if (error)
441 		goto out;
442 	error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
443 	    PTRACE_BREAKPOINT);
444 	if (error)
445 		td->td_md.md_ptrace_instr = 0;
446 	td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
447 out:
448 	PROC_LOCK(p);
449 	return (error);
450 }
451 
452 int
453 ptrace_clear_single_step(struct thread *td)
454 {
455 	struct proc *p;
456 
457 	if (td->td_md.md_ptrace_instr) {
458 		p = td->td_proc;
459 		PROC_UNLOCK(p);
460 		ptrace_write_int(td, td->td_md.md_ptrace_addr,
461 		    td->td_md.md_ptrace_instr);
462 		PROC_LOCK(p);
463 		td->td_md.md_ptrace_instr = 0;
464 	}
465 	return (0);
466 }
467 
468 int
469 ptrace_set_pc(struct thread *td, unsigned long addr)
470 {
471 	td->td_frame->tf_pc = addr;
472 	return (0);
473 }
474 
475 void
476 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
477 {
478 }
479 
480 void
481 spinlock_enter(void)
482 {
483 	struct thread *td;
484 
485 	td = curthread;
486 	if (td->td_md.md_spinlock_count == 0)
487 		td->td_md.md_saved_cspr = disable_interrupts(I32_bit | F32_bit);
488 	td->td_md.md_spinlock_count++;
489 	critical_enter();
490 }
491 
492 void
493 spinlock_exit(void)
494 {
495 	struct thread *td;
496 
497 	td = curthread;
498 	critical_exit();
499 	td->td_md.md_spinlock_count--;
500 	if (td->td_md.md_spinlock_count == 0)
501 		restore_interrupts(td->td_md.md_saved_cspr);
502 }
503 
504 /*
505  * Clear registers on exec
506  */
507 void
508 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
509 {
510 	struct trapframe *tf = td->td_frame;
511 
512 	memset(tf, 0, sizeof(*tf));
513 	tf->tf_usr_sp = stack;
514 	tf->tf_usr_lr = entry;
515 	tf->tf_svc_lr = 0x77777777;
516 	tf->tf_pc = entry;
517 	tf->tf_spsr = PSR_USR32_MODE;
518 }
519 
520 /*
521  * Get machine context.
522  */
523 int
524 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
525 {
526 	struct trapframe *tf = td->td_frame;
527 	__greg_t *gr = mcp->__gregs;
528 
529 	if (clear_ret & GET_MC_CLEAR_RET)
530 		gr[_REG_R0] = 0;
531 	else
532 		gr[_REG_R0]   = tf->tf_r0;
533 	gr[_REG_R1]   = tf->tf_r1;
534 	gr[_REG_R2]   = tf->tf_r2;
535 	gr[_REG_R3]   = tf->tf_r3;
536 	gr[_REG_R4]   = tf->tf_r4;
537 	gr[_REG_R5]   = tf->tf_r5;
538 	gr[_REG_R6]   = tf->tf_r6;
539 	gr[_REG_R7]   = tf->tf_r7;
540 	gr[_REG_R8]   = tf->tf_r8;
541 	gr[_REG_R9]   = tf->tf_r9;
542 	gr[_REG_R10]  = tf->tf_r10;
543 	gr[_REG_R11]  = tf->tf_r11;
544 	gr[_REG_R12]  = tf->tf_r12;
545 	gr[_REG_SP]   = tf->tf_usr_sp;
546 	gr[_REG_LR]   = tf->tf_usr_lr;
547 	gr[_REG_PC]   = tf->tf_pc;
548 	gr[_REG_CPSR] = tf->tf_spsr;
549 
550 	return (0);
551 }
552 
553 /*
554  * Set machine context.
555  *
556  * However, we don't set any but the user modifiable flags, and we won't
557  * touch the cs selector.
558  */
559 int
560 set_mcontext(struct thread *td, const mcontext_t *mcp)
561 {
562 	struct trapframe *tf = td->td_frame;
563 	const __greg_t *gr = mcp->__gregs;
564 
565 	tf->tf_r0 = gr[_REG_R0];
566 	tf->tf_r1 = gr[_REG_R1];
567 	tf->tf_r2 = gr[_REG_R2];
568 	tf->tf_r3 = gr[_REG_R3];
569 	tf->tf_r4 = gr[_REG_R4];
570 	tf->tf_r5 = gr[_REG_R5];
571 	tf->tf_r6 = gr[_REG_R6];
572 	tf->tf_r7 = gr[_REG_R7];
573 	tf->tf_r8 = gr[_REG_R8];
574 	tf->tf_r9 = gr[_REG_R9];
575 	tf->tf_r10 = gr[_REG_R10];
576 	tf->tf_r11 = gr[_REG_R11];
577 	tf->tf_r12 = gr[_REG_R12];
578 	tf->tf_usr_sp = gr[_REG_SP];
579 	tf->tf_usr_lr = gr[_REG_LR];
580 	tf->tf_pc = gr[_REG_PC];
581 	tf->tf_spsr = gr[_REG_CPSR];
582 
583 	return (0);
584 }
585 
586 /*
587  * MPSAFE
588  */
589 int
590 sigreturn(td, uap)
591 	struct thread *td;
592 	struct sigreturn_args /* {
593 		const struct __ucontext *sigcntxp;
594 	} */ *uap;
595 {
596 	struct proc *p = td->td_proc;
597 	struct sigframe sf;
598 	struct trapframe *tf;
599 	int spsr;
600 
601 	if (uap == NULL)
602 		return (EFAULT);
603 	if (copyin(uap->sigcntxp, &sf, sizeof(sf)))
604 		return (EFAULT);
605 	/*
606 	 * Make sure the processor mode has not been tampered with and
607 	 * interrupts have not been disabled.
608 	 */
609 	spsr = sf.sf_uc.uc_mcontext.__gregs[_REG_CPSR];
610 	if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
611 	    (spsr & (I32_bit | F32_bit)) != 0)
612 		return (EINVAL);
613 		/* Restore register context. */
614 	tf = td->td_frame;
615 	set_mcontext(td, &sf.sf_uc.uc_mcontext);
616 
617 	/* Restore signal mask. */
618 	PROC_LOCK(p);
619 	td->td_sigmask = sf.sf_uc.uc_sigmask;
620 	SIG_CANTMASK(td->td_sigmask);
621 	signotify(td);
622 	PROC_UNLOCK(p);
623 
624 	return (EJUSTRETURN);
625 }
626 
627 
628 /*
629  * Construct a PCB from a trapframe. This is called from kdb_trap() where
630  * we want to start a backtrace from the function that caused us to enter
631  * the debugger. We have the context in the trapframe, but base the trace
632  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
633  * enough for a backtrace.
634  */
635 void
636 makectx(struct trapframe *tf, struct pcb *pcb)
637 {
638 	pcb->un_32.pcb32_r8 = tf->tf_r8;
639 	pcb->un_32.pcb32_r9 = tf->tf_r9;
640 	pcb->un_32.pcb32_r10 = tf->tf_r10;
641 	pcb->un_32.pcb32_r11 = tf->tf_r11;
642 	pcb->un_32.pcb32_r12 = tf->tf_r12;
643 	pcb->un_32.pcb32_pc = tf->tf_pc;
644 	pcb->un_32.pcb32_lr = tf->tf_usr_lr;
645 	pcb->un_32.pcb32_sp = tf->tf_usr_sp;
646 }
647 
648 /*
649  * Fake up a boot descriptor table
650  */
651 vm_offset_t
652 fake_preload_metadata(void)
653 {
654 #ifdef DDB
655 	vm_offset_t zstart = 0, zend = 0;
656 #endif
657 	vm_offset_t lastaddr;
658 	int i = 0;
659 	static uint32_t fake_preload[35];
660 
661 	fake_preload[i++] = MODINFO_NAME;
662 	fake_preload[i++] = strlen("elf kernel") + 1;
663 	strcpy((char*)&fake_preload[i++], "elf kernel");
664 	i += 2;
665 	fake_preload[i++] = MODINFO_TYPE;
666 	fake_preload[i++] = strlen("elf kernel") + 1;
667 	strcpy((char*)&fake_preload[i++], "elf kernel");
668 	i += 2;
669 	fake_preload[i++] = MODINFO_ADDR;
670 	fake_preload[i++] = sizeof(vm_offset_t);
671 	fake_preload[i++] = KERNVIRTADDR;
672 	fake_preload[i++] = MODINFO_SIZE;
673 	fake_preload[i++] = sizeof(uint32_t);
674 	fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
675 #ifdef DDB
676 	if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
677 		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
678 		fake_preload[i++] = sizeof(vm_offset_t);
679 		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
680 		fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
681 		fake_preload[i++] = sizeof(vm_offset_t);
682 		fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
683 		lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
684 		zend = lastaddr;
685 		zstart = *(uint32_t *)(KERNVIRTADDR + 4);
686 		ksym_start = zstart;
687 		ksym_end = zend;
688 	} else
689 #endif
690 		lastaddr = (vm_offset_t)&end;
691 	fake_preload[i++] = 0;
692 	fake_preload[i] = 0;
693 	preload_metadata = (void *)fake_preload;
694 
695 	return (lastaddr);
696 }
697