1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 OMRON Corporation.
4  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department.
10  *
11  * %sccs.include.redist.c%
12  *
13  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
14  * OMRON: $Id: vm_machdep.c,v 1.2 92/06/14 06:24:23 moti Exp $
15  *
16  * from: hp300/hp300/vm_machdep.c	7.12 (Berkeley) 6/5/92
17  *
18  *	@(#)vm_machdep.c	7.1 (Berkeley) 06/15/92
19  */
20 
21 #include "param.h"
22 #include "systm.h"
23 #include "proc.h"
24 #include "malloc.h"
25 #include "buf.h"
26 #include "vnode.h"
27 #include "user.h"
28 
29 #include "../include/cpu.h"
30 
31 #include "vm/vm.h"
32 #include "vm/vm_kern.h"
33 #include "pte.h"
34 
35 /*
36  * Finish a fork operation, with process p2 nearly set up.
37  * Copy and update the kernel stack and pcb, making the child
38  * ready to run, and marking it so that it can return differently
39  * than the parent.  Returns 1 in the child process, 0 in the parent.
40  * We currently double-map the user area so that the stack is at the same
41  * address in each process; in the future we will probably relocate
42  * the frame pointers on the stack after copying.
43  */
44 cpu_fork(p1, p2)
45 	register struct proc *p1, *p2;
46 {
47 	register struct user *up = p2->p_addr;
48 	int offset;
49 	extern caddr_t getsp();
50 	extern char kstack[];
51 
52 	/*
53 	 * Copy pcb and stack from proc p1 to p2.
54 	 * We do this as cheaply as possible, copying only the active
55 	 * part of the stack.  The stack and pcb need to agree;
56 	 * this is tricky, as the final pcb is constructed by savectx,
57 	 * but its frame isn't yet on the stack when the stack is copied.
58 	 * swtch compensates for this when the child eventually runs.
59 	 * This should be done differently, with a single call
60 	 * that copies and updates the pcb+stack,
61 	 * replacing the bcopy and savectx.
62 	 */
63 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
64 	offset = getsp() - kstack;
65 	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
66 	    (unsigned) ctob(UPAGES) - offset);
67 
68 	PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &up->u_pcb, 0);
69 
70 	/*
71 	 * Arrange for a non-local goto when the new process
72 	 * is started, to resume here, returning nonzero from setjmp.
73 	 */
74 	if (savectx(up, 1)) {
75 		/*
76 		 * Return 1 in child.
77 		 */
78 		return (1);
79 	}
80 	return (0);
81 }
82 
83 /*
84  * cpu_exit is called as the last action during exit.
85  * We release the address space and machine-dependent resources,
86  * including the memory for the user structure and kernel stack.
87  * Once finished, we call swtch_exit, which switches to a temporary
88  * pcb and stack and never returns.  We block memory allocation
89  * until swtch_exit has made things safe again.
90  */
91 cpu_exit(p)
92 	struct proc *p;
93 {
94 
95 	vmspace_free(p->p_vmspace);
96 
97 	(void) splimp();
98 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
99 	swtch_exit();
100 	/* NOTREACHED */
101 }
102 
103 /*
104  * Dump the machine specific header information at the start of a core dump.
105  */
106 cpu_coredump(p, vp, cred)
107 	struct proc *p;
108 	struct vnode *vp;
109 	struct ucred *cred;
110 {
111 	int error;
112 
113 	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
114 	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL,
115 	    p));
116 }
117 
118 /*
119  * Move pages from one kernel virtual address to another.
120  * Both addresses are assumed to reside in the Sysmap,
121  * and size must be a multiple of CLSIZE.
122  */
123 pagemove(from, to, size)
124 	register caddr_t from, to;
125 	int size;
126 {
127 	register struct pte *fpte, *tpte;
128 
129 	if (size % CLBYTES)
130 		panic("pagemove");
131 	fpte = kvtopte(from);
132 	tpte = kvtopte(to);
133 	while (size > 0) {
134 		*tpte++ = *fpte;
135 		*(int *)fpte++ = PG_NV;
136 		TBIS(from);
137 		TBIS(to);
138 		from += NBPG;
139 		to += NBPG;
140 		size -= NBPG;
141 	}
142 }
143 
144 /*
145  * Map `size' bytes of physical memory starting at `paddr' into
146  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
147  * are specified by `prot'.
148  */
149 physaccess(vaddr, paddr, size, prot)
150 	caddr_t vaddr, paddr;
151 	register int size, prot;
152 {
153 	register struct pte *pte;
154 	register u_int page;
155 
156 	pte = kvtopte(vaddr);
157 	page = (u_int)paddr & PG_FRAME;
158 	for (size = btoc(size); size; size--) {
159 		*(int *)pte++ = PG_V | prot | page;
160 		page += NBPG;
161 	}
162 	TBIAS();
163 }
164 
165 physunaccess(vaddr, size)
166 	caddr_t vaddr;
167 	register int size;
168 {
169 	register struct pte *pte;
170 
171 	pte = kvtopte(vaddr);
172 	for (size = btoc(size); size; size--)
173 		*(int *)pte++ = PG_NV;
174 	TBIAS();
175 }
176 
177 /*
178  * Set a red zone in the kernel stack after the u. area.
179  * We don't support a redzone right now.  It really isn't clear
180  * that it is a good idea since, if the kernel stack were to roll
181  * into a write protected page, the processor would lock up (since
182  * it cannot create an exception frame) and we would get no useful
183  * post-mortem info.  Currently, under the DEBUG option, we just
184  * check at every clock interrupt to see if the current k-stack has
185  * gone too far (i.e. into the "redzone" page) and if so, panic.
186  * Look at _lev6intr in locore.s for more details.
187  */
188 /*ARGSUSED*/
189 setredzone(pte, vaddr)
190 	struct pte *pte;
191 	caddr_t vaddr;
192 {
193 }
194 
195 /*
196  * Convert kernel VA to physical address
197  */
198 kvtop(addr)
199 	register caddr_t addr;
200 {
201 	vm_offset_t va;
202 
203 	va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
204 	if (va == 0)
205 		panic("kvtop: zero page frame");
206 	return((int)va);
207 }
208 
209 extern vm_map_t phys_map;
210 
211 /*
212  * Map an IO request into kernel virtual address space.
213  *
214  * XXX we allocate KVA space by using kmem_alloc_wait which we know
215  * allocates space without backing physical memory.  This implementation
216  * is a total crock, the multiple mappings of these physical pages should
217  * be reflected in the higher-level VM structures to avoid problems.
218  */
219 vmapbuf(bp)
220 	register struct buf *bp;
221 {
222 	register int npf;
223 	register caddr_t addr;
224 	register long flags = bp->b_flags;
225 	struct proc *p;
226 	int off;
227 	vm_offset_t kva;
228 	register vm_offset_t pa;
229 
230 	if ((flags & B_PHYS) == 0)
231 		panic("vmapbuf");
232 	addr = bp->b_saveaddr = bp->b_un.b_addr;
233 	off = (int)addr & PGOFSET;
234 	p = bp->b_proc;
235 	npf = btoc(round_page(bp->b_bcount + off));
236 	kva = kmem_alloc_wait(phys_map, ctob(npf));
237 	bp->b_un.b_addr = (caddr_t) (kva + off);
238 	while (npf--) {
239 		pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
240 		    (vm_offset_t)addr);
241 		if (pa == 0)
242 			panic("vmapbuf: null page frame");
243 		pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
244 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
245 		addr += PAGE_SIZE;
246 		kva += PAGE_SIZE;
247 	}
248 }
249 
250 /*
251  * Free the io map PTEs associated with this IO operation.
252  */
253 vunmapbuf(bp)
254 	register struct buf *bp;
255 {
256 	register int npf;
257 	register caddr_t addr = bp->b_un.b_addr;
258 	vm_offset_t kva;
259 
260 	if ((bp->b_flags & B_PHYS) == 0)
261 		panic("vunmapbuf");
262 	npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
263 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
264 	kmem_free_wakeup(phys_map, kva, ctob(npf));
265 	bp->b_un.b_addr = bp->b_saveaddr;
266 	bp->b_saveaddr = NULL;
267 }
268