xref: /original-bsd/sys/hp300/hp300/vm_machdep.c (revision 3705696b)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
13  *
14  *	@(#)vm_machdep.c	8.1 (Berkeley) 06/10/93
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/malloc.h>
21 #include <sys/buf.h>
22 #include <sys/vnode.h>
23 #include <sys/user.h>
24 
25 #include <machine/cpu.h>
26 
27 #include <vm/vm.h>
28 #include <vm/vm_kern.h>
29 #include <hp300/hp300/pte.h>
30 
31 /*
32  * Finish a fork operation, with process p2 nearly set up.
33  * Copy and update the kernel stack and pcb, making the child
34  * ready to run, and marking it so that it can return differently
35  * than the parent.  Returns 1 in the child process, 0 in the parent.
36  * We currently double-map the user area so that the stack is at the same
37  * address in each process; in the future we will probably relocate
38  * the frame pointers on the stack after copying.
39  */
40 cpu_fork(p1, p2)
41 	register struct proc *p1, *p2;
42 {
43 	register struct user *up = p2->p_addr;
44 	int offset;
45 	extern caddr_t getsp();
46 	extern char kstack[];
47 
48 	p2->p_md.md_regs = p1->p_md.md_regs;
49 	p2->p_md.md_flags = (p1->p_md.md_flags & ~(MDP_AST|MDP_HPUXTRACE));
50 
51 	/*
52 	 * Copy pcb and stack from proc p1 to p2.
53 	 * We do this as cheaply as possible, copying only the active
54 	 * part of the stack.  The stack and pcb need to agree;
55 	 * this is tricky, as the final pcb is constructed by savectx,
56 	 * but its frame isn't yet on the stack when the stack is copied.
57 	 * swtch compensates for this when the child eventually runs.
58 	 * This should be done differently, with a single call
59 	 * that copies and updates the pcb+stack,
60 	 * replacing the bcopy and savectx.
61 	 */
62 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
63 	offset = getsp() - kstack;
64 	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
65 	    (unsigned) ctob(UPAGES) - offset);
66 
67 	PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &up->u_pcb, 0);
68 
69 	/*
70 	 * Arrange for a non-local goto when the new process
71 	 * is started, to resume here, returning nonzero from setjmp.
72 	 */
73 	if (savectx(up, 1)) {
74 		/*
75 		 * Return 1 in child.
76 		 */
77 		return (1);
78 	}
79 	return (0);
80 }
81 
82 /*
83  * cpu_exit is called as the last action during exit.
84  * We release the address space and machine-dependent resources,
85  * including the memory for the user structure and kernel stack.
86  * Once finished, we call swtch_exit, which switches to a temporary
87  * pcb and stack and never returns.  We block memory allocation
88  * until swtch_exit has made things safe again.
89  */
90 cpu_exit(p)
91 	struct proc *p;
92 {
93 
94 	vmspace_free(p->p_vmspace);
95 
96 	(void) splimp();
97 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
98 	swtch_exit();
99 	/* NOTREACHED */
100 }
101 
102 /*
103  * Dump the machine specific header information at the start of a core dump.
104  */
105 cpu_coredump(p, vp, cred)
106 	struct proc *p;
107 	struct vnode *vp;
108 	struct ucred *cred;
109 {
110 	int error;
111 
112 #ifdef HPUXCOMPAT
113 	/*
114 	 * If we loaded from an HP-UX format binary file we dump enough
115 	 * of an HP-UX style user struct so that the HP-UX debuggers can
116 	 * grok it.
117 	 */
118 	if (p->p_md.md_flags & MDP_HPUX)
119 		return (hpuxdumpu(vp, cred));
120 #endif
121 	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
122 	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL,
123 	    p));
124 }
125 
126 /*
127  * Move pages from one kernel virtual address to another.
128  * Both addresses are assumed to reside in the Sysmap,
129  * and size must be a multiple of CLSIZE.
130  */
131 pagemove(from, to, size)
132 	register caddr_t from, to;
133 	int size;
134 {
135 	register struct pte *fpte, *tpte;
136 
137 	if (size % CLBYTES)
138 		panic("pagemove");
139 	fpte = kvtopte(from);
140 	tpte = kvtopte(to);
141 	while (size > 0) {
142 		*tpte++ = *fpte;
143 		*(int *)fpte++ = PG_NV;
144 		TBIS(from);
145 		TBIS(to);
146 		from += NBPG;
147 		to += NBPG;
148 		size -= NBPG;
149 	}
150 	DCIS();
151 }
152 
153 /*
154  * Map `size' bytes of physical memory starting at `paddr' into
155  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
156  * are specified by `prot'.
157  */
158 physaccess(vaddr, paddr, size, prot)
159 	caddr_t vaddr, paddr;
160 	register int size, prot;
161 {
162 	register struct pte *pte;
163 	register u_int page;
164 
165 	pte = kvtopte(vaddr);
166 	page = (u_int)paddr & PG_FRAME;
167 	for (size = btoc(size); size; size--) {
168 		*(int *)pte++ = PG_V | prot | page;
169 		page += NBPG;
170 	}
171 	TBIAS();
172 }
173 
174 physunaccess(vaddr, size)
175 	caddr_t vaddr;
176 	register int size;
177 {
178 	register struct pte *pte;
179 
180 	pte = kvtopte(vaddr);
181 	for (size = btoc(size); size; size--)
182 		*(int *)pte++ = PG_NV;
183 	TBIAS();
184 }
185 
186 /*
187  * Set a red zone in the kernel stack after the u. area.
188  * We don't support a redzone right now.  It really isn't clear
189  * that it is a good idea since, if the kernel stack were to roll
190  * into a write protected page, the processor would lock up (since
191  * it cannot create an exception frame) and we would get no useful
192  * post-mortem info.  Currently, under the DEBUG option, we just
193  * check at every clock interrupt to see if the current k-stack has
194  * gone too far (i.e. into the "redzone" page) and if so, panic.
195  * Look at _lev6intr in locore.s for more details.
196  */
197 /*ARGSUSED*/
198 setredzone(pte, vaddr)
199 	struct pte *pte;
200 	caddr_t vaddr;
201 {
202 }
203 
204 /*
205  * Convert kernel VA to physical address
206  */
207 kvtop(addr)
208 	register caddr_t addr;
209 {
210 	vm_offset_t va;
211 
212 	va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
213 	if (va == 0)
214 		panic("kvtop: zero page frame");
215 	return((int)va);
216 }
217 
218 extern vm_map_t phys_map;
219 
220 /*
221  * Map an IO request into kernel virtual address space.
222  *
223  * XXX we allocate KVA space by using kmem_alloc_wait which we know
224  * allocates space without backing physical memory.  This implementation
225  * is a total crock, the multiple mappings of these physical pages should
226  * be reflected in the higher-level VM structures to avoid problems.
227  */
228 vmapbuf(bp)
229 	register struct buf *bp;
230 {
231 	register int npf;
232 	register caddr_t addr;
233 	register long flags = bp->b_flags;
234 	struct proc *p;
235 	int off;
236 	vm_offset_t kva;
237 	register vm_offset_t pa;
238 
239 	if ((flags & B_PHYS) == 0)
240 		panic("vmapbuf");
241 	addr = bp->b_saveaddr = bp->b_un.b_addr;
242 	off = (int)addr & PGOFSET;
243 	p = bp->b_proc;
244 	npf = btoc(round_page(bp->b_bcount + off));
245 	kva = kmem_alloc_wait(phys_map, ctob(npf));
246 	bp->b_un.b_addr = (caddr_t) (kva + off);
247 	while (npf--) {
248 		pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
249 		    (vm_offset_t)addr);
250 		if (pa == 0)
251 			panic("vmapbuf: null page frame");
252 		pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
253 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
254 		addr += PAGE_SIZE;
255 		kva += PAGE_SIZE;
256 	}
257 }
258 
259 /*
260  * Free the io map PTEs associated with this IO operation.
261  */
262 vunmapbuf(bp)
263 	register struct buf *bp;
264 {
265 	register int npf;
266 	register caddr_t addr = bp->b_un.b_addr;
267 	vm_offset_t kva;
268 
269 	if ((bp->b_flags & B_PHYS) == 0)
270 		panic("vunmapbuf");
271 	npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
272 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
273 	kmem_free_wakeup(phys_map, kva, ctob(npf));
274 	bp->b_un.b_addr = bp->b_saveaddr;
275 	bp->b_saveaddr = NULL;
276 }
277