1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 OMRON Corporation.
4  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department.
10  *
11  * %sccs.include.redist.c%
12  *
13  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
14  * from: hp300/hp300/vm_machdep.c	7.14 (Berkeley) 12/27/92
15  *
16  *	@(#)vm_machdep.c	7.4 (Berkeley) 01/03/93
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/malloc.h>
23 #include <sys/buf.h>
24 #include <sys/vnode.h>
25 #include <sys/user.h>
26 
27 #include <machine/cpu.h>
28 
29 #include <vm/vm.h>
30 #include <vm/vm_kern.h>
31 #include <luna68k/luna68k/pte.h>
32 
33 /*
34  * Finish a fork operation, with process p2 nearly set up.
35  * Copy and update the kernel stack and pcb, making the child
36  * ready to run, and marking it so that it can return differently
37  * than the parent.  Returns 1 in the child process, 0 in the parent.
38  * We currently double-map the user area so that the stack is at the same
39  * address in each process; in the future we will probably relocate
40  * the frame pointers on the stack after copying.
41  */
42 cpu_fork(p1, p2)
43 	register struct proc *p1, *p2;
44 {
45 	register struct user *up = p2->p_addr;
46 	int offset;
47 	extern caddr_t getsp();
48 	extern char kstack[];
49 
50 	p2->p_md.md_regs = p1->p_md.md_regs;
51 	p2->p_md.md_flags = (p1->p_md.md_flags & ~(MDP_AST|MDP_HPUXTRACE));
52 
53 	/*
54 	 * Copy pcb and stack from proc p1 to p2.
55 	 * We do this as cheaply as possible, copying only the active
56 	 * part of the stack.  The stack and pcb need to agree;
57 	 * this is tricky, as the final pcb is constructed by savectx,
58 	 * but its frame isn't yet on the stack when the stack is copied.
59 	 * swtch compensates for this when the child eventually runs.
60 	 * This should be done differently, with a single call
61 	 * that copies and updates the pcb+stack,
62 	 * replacing the bcopy and savectx.
63 	 */
64 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
65 	offset = getsp() - kstack;
66 	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
67 	    (unsigned) ctob(UPAGES) - offset);
68 
69 	PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &up->u_pcb, 0);
70 
71 	/*
72 	 * Arrange for a non-local goto when the new process
73 	 * is started, to resume here, returning nonzero from setjmp.
74 	 */
75 	if (savectx(up, 1)) {
76 		/*
77 		 * Return 1 in child.
78 		 */
79 		return (1);
80 	}
81 	return (0);
82 }
83 
84 /*
85  * cpu_exit is called as the last action during exit.
86  * We release the address space and machine-dependent resources,
87  * including the memory for the user structure and kernel stack.
88  * Once finished, we call swtch_exit, which switches to a temporary
89  * pcb and stack and never returns.  We block memory allocation
90  * until swtch_exit has made things safe again.
91  */
92 cpu_exit(p)
93 	struct proc *p;
94 {
95 
96 	vmspace_free(p->p_vmspace);
97 
98 	(void) splimp();
99 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
100 	swtch_exit();
101 	/* NOTREACHED */
102 }
103 
104 /*
105  * Dump the machine specific header information at the start of a core dump.
106  */
107 cpu_coredump(p, vp, cred)
108 	struct proc *p;
109 	struct vnode *vp;
110 	struct ucred *cred;
111 {
112 	int error;
113 
114 	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
115 	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL,
116 	    p));
117 }
118 
119 /*
120  * Move pages from one kernel virtual address to another.
121  * Both addresses are assumed to reside in the Sysmap,
122  * and size must be a multiple of CLSIZE.
123  */
124 pagemove(from, to, size)
125 	register caddr_t from, to;
126 	int size;
127 {
128 	register struct pte *fpte, *tpte;
129 
130 	if (size % CLBYTES)
131 		panic("pagemove");
132 	fpte = kvtopte(from);
133 	tpte = kvtopte(to);
134 	while (size > 0) {
135 		*tpte++ = *fpte;
136 		*(int *)fpte++ = PG_NV;
137 		TBIS(from);
138 		TBIS(to);
139 		from += NBPG;
140 		to += NBPG;
141 		size -= NBPG;
142 	}
143 }
144 
145 /*
146  * Map `size' bytes of physical memory starting at `paddr' into
147  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
148  * are specified by `prot'.
149  */
150 physaccess(vaddr, paddr, size, prot)
151 	caddr_t vaddr, paddr;
152 	register int size, prot;
153 {
154 	register struct pte *pte;
155 	register u_int page;
156 
157 	pte = kvtopte(vaddr);
158 	page = (u_int)paddr & PG_FRAME;
159 	for (size = btoc(size); size; size--) {
160 		*(int *)pte++ = PG_V | prot | page;
161 		page += NBPG;
162 	}
163 	TBIAS();
164 }
165 
166 physunaccess(vaddr, size)
167 	caddr_t vaddr;
168 	register int size;
169 {
170 	register struct pte *pte;
171 
172 	pte = kvtopte(vaddr);
173 	for (size = btoc(size); size; size--)
174 		*(int *)pte++ = PG_NV;
175 	TBIAS();
176 }
177 
178 /*
179  * Set a red zone in the kernel stack after the u. area.
180  * We don't support a redzone right now.  It really isn't clear
181  * that it is a good idea since, if the kernel stack were to roll
182  * into a write protected page, the processor would lock up (since
183  * it cannot create an exception frame) and we would get no useful
184  * post-mortem info.  Currently, under the DEBUG option, we just
185  * check at every clock interrupt to see if the current k-stack has
186  * gone too far (i.e. into the "redzone" page) and if so, panic.
187  * Look at _lev6intr in locore.s for more details.
188  */
189 /*ARGSUSED*/
190 setredzone(pte, vaddr)
191 	struct pte *pte;
192 	caddr_t vaddr;
193 {
194 }
195 
196 /*
197  * Convert kernel VA to physical address
198  */
199 kvtop(addr)
200 	register caddr_t addr;
201 {
202 	vm_offset_t va;
203 
204 	va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
205 	if (va == 0)
206 		panic("kvtop: zero page frame");
207 	return((int)va);
208 }
209 
210 extern vm_map_t phys_map;
211 
212 /*
213  * Map an IO request into kernel virtual address space.
214  *
215  * XXX we allocate KVA space by using kmem_alloc_wait which we know
216  * allocates space without backing physical memory.  This implementation
217  * is a total crock, the multiple mappings of these physical pages should
218  * be reflected in the higher-level VM structures to avoid problems.
219  */
220 vmapbuf(bp)
221 	register struct buf *bp;
222 {
223 	register int npf;
224 	register caddr_t addr;
225 	register long flags = bp->b_flags;
226 	struct proc *p;
227 	int off;
228 	vm_offset_t kva;
229 	register vm_offset_t pa;
230 
231 	if ((flags & B_PHYS) == 0)
232 		panic("vmapbuf");
233 	addr = bp->b_saveaddr = bp->b_un.b_addr;
234 	off = (int)addr & PGOFSET;
235 	p = bp->b_proc;
236 	npf = btoc(round_page(bp->b_bcount + off));
237 	kva = kmem_alloc_wait(phys_map, ctob(npf));
238 	bp->b_un.b_addr = (caddr_t) (kva + off);
239 	while (npf--) {
240 		pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
241 		    (vm_offset_t)addr);
242 		if (pa == 0)
243 			panic("vmapbuf: null page frame");
244 		pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
245 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
246 		addr += PAGE_SIZE;
247 		kva += PAGE_SIZE;
248 	}
249 }
250 
251 /*
252  * Free the io map PTEs associated with this IO operation.
253  */
254 vunmapbuf(bp)
255 	register struct buf *bp;
256 {
257 	register int npf;
258 	register caddr_t addr = bp->b_un.b_addr;
259 	vm_offset_t kva;
260 
261 	if ((bp->b_flags & B_PHYS) == 0)
262 		panic("vunmapbuf");
263 	npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
264 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
265 	kmem_free_wakeup(phys_map, kva, ctob(npf));
266 	bp->b_un.b_addr = bp->b_saveaddr;
267 	bp->b_saveaddr = NULL;
268 }
269