xref: /minix/minix/servers/vm/pagefaults.c (revision 83133719)
1 
2 #define _SYSTEM 1
3 
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/bitmap.h>
17 #include <minix/vfsif.h>
18 
19 #include <machine/vmparam.h>
20 
21 #include <errno.h>
22 #include <string.h>
23 #include <env.h>
24 #include <stdio.h>
25 #include <fcntl.h>
26 #include <signal.h>
27 #include <assert.h>
28 
29 #include "glo.h"
30 #include "proto.h"
31 #include "util.h"
32 #include "region.h"
33 
34 struct pf_state {
35         endpoint_t ep;
36         vir_bytes vaddr;
37 	u32_t err;
38 };
39 
40 struct hm_state {
41 	endpoint_t caller;	/* KERNEL or process? if NONE, no callback */
42 	endpoint_t requestor;	/* on behalf of whom? */
43 	int transid;		/* VFS transaction id if valid */
44 	struct vmproc *vmp;	/* target address space */
45 	vir_bytes mem, len;	/* memory range */
46 	int wrflag;		/* must it be writable or not */
47 	int valid;		/* sanity check */
48 	int vfs_avail;		/* may vfs be called to satisfy this range? */
49 #define VALID	0xc0ff1
50 };
51 
52 static void handle_memory_continue(struct vmproc *vmp, message *m,
53         void *arg, void *statearg);
54 static int handle_memory_step(struct hm_state *hmstate);
55 static void handle_memory_final(struct hm_state *state, int result);
56 
57 /*===========================================================================*
58  *				pf_errstr	     		     	*
59  *===========================================================================*/
60 char *pf_errstr(u32_t err)
61 {
62 	static char buf[100];
63 
64 	sprintf(buf, "err 0x%lx ", (long)err);
65 	if(PFERR_NOPAGE(err)) strcat(buf, "nopage ");
66 	if(PFERR_PROT(err)) strcat(buf, "protection ");
67 	if(PFERR_WRITE(err)) strcat(buf, "write");
68 	if(PFERR_READ(err)) strcat(buf, "read");
69 
70 	return buf;
71 }
72 
73 static void pf_cont(struct vmproc *vmp, message *m, void *arg, void *statearg);
74 
75 static void handle_memory_continue(struct vmproc *vmp, message *m, void *arg, void *statearg);
76 
77 static void handle_pagefault(endpoint_t ep, vir_bytes addr, u32_t err, int retry)
78 {
79 	struct vmproc *vmp;
80 	int s, result;
81 	struct vir_region *region;
82 	vir_bytes offset;
83 	int p, wr = PFERR_WRITE(err);
84 	int io = 0;
85 
86 	if(vm_isokendpt(ep, &p) != OK)
87 		panic("handle_pagefault: endpoint wrong: %d", ep);
88 
89 	vmp = &vmproc[p];
90 	assert(vmp->vm_flags & VMF_INUSE);
91 
92 	/* See if address is valid at all. */
93 	if(!(region = map_lookup(vmp, addr, NULL))) {
94 		if(PFERR_PROT(err))  {
95 			printf("VM: pagefault: SIGSEGV %d protected addr 0x%lx; %s\n",
96 				ep, addr, pf_errstr(err));
97 		} else {
98 			assert(PFERR_NOPAGE(err));
99 			printf("VM: pagefault: SIGSEGV %d bad addr 0x%lx; %s\n",
100 					ep, addr, pf_errstr(err));
101 			sys_diagctl_stacktrace(ep);
102 		}
103 		if((s=sys_kill(vmp->vm_endpoint, SIGSEGV)) != OK)
104 			panic("sys_kill failed: %d", s);
105 		if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, 0 /*unused*/)) != OK)
106 			panic("do_pagefaults: sys_vmctl failed: %d", ep);
107 		return;
108 	}
109 
110 	/* If process was writing, see if it's writable. */
111 	if(!(region->flags & VR_WRITABLE) && wr) {
112 		printf("VM: pagefault: SIGSEGV %d ro map 0x%lx %s\n",
113 				ep, addr, pf_errstr(err));
114 		if((s=sys_kill(vmp->vm_endpoint, SIGSEGV)) != OK)
115 			panic("sys_kill failed: %d", s);
116 		if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, 0 /*unused*/)) != OK)
117 			panic("do_pagefaults: sys_vmctl failed: %d", ep);
118 		return;
119 	}
120 
121 	assert(addr >= region->vaddr);
122 	offset = addr - region->vaddr;
123 
124 	/* Access is allowed; handle it. */
125 	if(retry) {
126 		result = map_pf(vmp, region, offset, wr, NULL, NULL, 0, &io);
127 		assert(result != SUSPEND);
128 	} else {
129 		struct pf_state state;
130 		state.ep = ep;
131 		state.vaddr = addr;
132 		state.err = err;
133 		result = map_pf(vmp, region, offset, wr, pf_cont,
134 			&state, sizeof(state), &io);
135 	}
136 	if (io)
137 		vmp->vm_major_page_fault++;
138 	else
139 		vmp->vm_minor_page_fault++;
140 
141 	if(result == SUSPEND) {
142 		return;
143 	}
144 
145 	if(result != OK) {
146 		printf("VM: pagefault: SIGSEGV %d pagefault not handled\n", ep);
147 		if((s=sys_kill(ep, SIGSEGV)) != OK)
148 			panic("sys_kill failed: %d", s);
149 		if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, 0 /*unused*/)) != OK)
150 			panic("do_pagefaults: sys_vmctl failed: %d", ep);
151 		return;
152 	}
153 
154         pt_clearmapcache();
155 
156 	/* Pagefault is handled, so now reactivate the process. */
157 	if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, 0 /*unused*/)) != OK)
158 		panic("do_pagefaults: sys_vmctl failed: %d", ep);
159 }
160 
161 
162 static void pf_cont(struct vmproc *vmp, message *m,
163         void *arg, void *statearg)
164 {
165 	struct pf_state *state = statearg;
166 	int p;
167 	if(vm_isokendpt(state->ep, &p) != OK) return;	/* signal */
168 	handle_pagefault(state->ep, state->vaddr, state->err, 1);
169 }
170 
171 static void handle_memory_continue(struct vmproc *vmp, message *m,
172         void *arg, void *statearg)
173 {
174 	int r;
175 	struct hm_state *state = statearg;
176 	assert(state);
177 	assert(state->caller != NONE);
178 	assert(state->valid == VALID);
179 
180 	if(m->VMV_RESULT != OK) {
181 		printf("VM: handle_memory_continue: vfs request failed\n");
182 		handle_memory_final(state, m->VMV_RESULT);
183 		return;
184 	}
185 
186 	r = handle_memory_step(state);
187 
188 	assert(state->valid == VALID);
189 
190 	if(r == SUSPEND) {
191 		return;
192 	}
193 
194 	assert(state->valid == VALID);
195 
196 	handle_memory_final(state, r);
197 }
198 
199 static void handle_memory_final(struct hm_state *state, int result)
200 {
201 	int r;
202 
203 	assert(state);
204 	assert(state->valid == VALID);
205 
206 	if(state->caller == KERNEL) {
207 		if((r=sys_vmctl(state->requestor, VMCTL_MEMREQ_REPLY, result)) != OK)
208 			panic("handle_memory_continue: sys_vmctl failed: %d", r);
209 	} else if(state->caller != NONE) {
210 		/* Send a reply msg */
211 		message msg;
212 		memset(&msg, 0, sizeof(msg));
213 		msg.m_type = result;
214 
215 		if(IS_VFS_FS_TRANSID(state->transid)) {
216 			assert(state->caller == VFS_PROC_NR);
217 			/* If a transaction ID was set, reset it */
218 			msg.m_type = TRNS_ADD_ID(msg.m_type, state->transid);
219 		}
220 
221 		if(asynsend3(state->caller, &msg, 0) != OK) {
222 			panic("handle_memory_final: asynsend3 failed");
223 		}
224 
225 		assert(state->valid == VALID);
226 
227 		/* fail fast if anyone tries to access this state again */
228 		memset(state, 0, sizeof(*state));
229 	}
230 }
231 
232 /*===========================================================================*
233  *				do_pagefaults	     		     *
234  *===========================================================================*/
235 void do_pagefaults(message *m)
236 {
237 	handle_pagefault(m->m_source, m->VPF_ADDR, m->VPF_FLAGS, 0);
238 }
239 
240 int handle_memory_once(struct vmproc *vmp, vir_bytes mem, vir_bytes len,
241 	int wrflag)
242 {
243 	int r;
244 	r = handle_memory_start(vmp, mem, len, wrflag, NONE, NONE, 0, 0);
245 	assert(r != SUSPEND);
246 	return r;
247 }
248 
249 int handle_memory_start(struct vmproc *vmp, vir_bytes mem, vir_bytes len,
250 	int wrflag, endpoint_t caller, endpoint_t requestor, int transid,
251 	int vfs_avail)
252 {
253 	int r;
254 	struct hm_state state;
255 	vir_bytes o;
256 
257 	if((o = mem % PAGE_SIZE)) {
258 		mem -= o;
259 		len += o;
260 	}
261 
262 	len = roundup(len, PAGE_SIZE);
263 
264 	state.vmp = vmp;
265 	state.mem = mem;
266 	state.len = len;
267 	state.wrflag = wrflag;
268 	state.requestor = requestor;
269 	state.caller = caller;
270 	state.transid = transid;
271 	state.valid = VALID;
272 	state.vfs_avail = vfs_avail;
273 
274 	r = handle_memory_step(&state);
275 
276 	if(r == SUSPEND) {
277 		assert(caller != NONE);
278 		assert(vfs_avail);
279 	} else {
280 		handle_memory_final(&state, r);
281 	}
282 
283 	return r;
284 }
285 
286 /*===========================================================================*
287  *				   do_memory	     			     *
288  *===========================================================================*/
289 void do_memory(void)
290 {
291 	endpoint_t who, who_s, requestor;
292 	vir_bytes mem, mem_s;
293 	vir_bytes len;
294 	int wrflag;
295 
296 	while(1) {
297 		int p, r = OK;
298 		struct vmproc *vmp;
299 
300 		r = sys_vmctl_get_memreq(&who, &mem, &len, &wrflag, &who_s,
301 			&mem_s, &requestor);
302 
303 		switch(r) {
304 		case VMPTYPE_CHECK:
305 		{
306 			int transid = 0;
307 			int vfs_avail;
308 
309 			if(vm_isokendpt(who, &p) != OK)
310 				panic("do_memory: bad endpoint: %d", who);
311 			vmp = &vmproc[p];
312 
313 			assert(!IS_VFS_FS_TRANSID(transid));
314 
315 			/* is VFS blocked? */
316 			if(requestor == VFS_PROC_NR) vfs_avail = 0;
317 			else vfs_avail = 1;
318 
319 			handle_memory_start(vmp, mem, len, wrflag,
320 				KERNEL, requestor, transid, vfs_avail);
321 
322 			break;
323 		}
324 
325 		default:
326 			return;
327 		}
328 	}
329 }
330 
331 static int handle_memory_step(struct hm_state *hmstate)
332 {
333 	struct vir_region *region;
334 
335 	/* Page-align memory and length. */
336 	assert(hmstate);
337 	assert(hmstate->valid == VALID);
338 	assert(!(hmstate->mem % VM_PAGE_SIZE));
339 	assert(!(hmstate->len % VM_PAGE_SIZE));
340 
341 	while(hmstate->len > 0) {
342 		int r;
343 		if(!(region = map_lookup(hmstate->vmp, hmstate->mem, NULL))) {
344 #if VERBOSE
345 			map_printmap(hmstate->vmp);
346 			printf("VM: do_memory: memory doesn't exist\n");
347 #endif
348 			return EFAULT;
349 		} else if(!(region->flags & VR_WRITABLE) && hmstate->wrflag) {
350 #if VERBOSE
351 			printf("VM: do_memory: write to unwritable map\n");
352 #endif
353 			return EFAULT;
354 		} else {
355 			vir_bytes offset, sublen;
356 			assert(region->vaddr <= hmstate->mem);
357 			assert(!(region->vaddr % VM_PAGE_SIZE));
358 			offset = hmstate->mem - region->vaddr;
359 			sublen = hmstate->len;
360 			if(offset + sublen > region->length)
361 				sublen = region->length - offset;
362 
363 			if((region->def_memtype == &mem_type_mappedfile &&
364 			  !hmstate->vfs_avail) || hmstate->caller == NONE) {
365 				r = map_handle_memory(hmstate->vmp, region, offset,
366 				   sublen, hmstate->wrflag, NULL, NULL, 0);
367 				assert(r != SUSPEND);
368 			} else {
369 				r = map_handle_memory(hmstate->vmp, region, offset,
370 				   sublen, hmstate->wrflag, handle_memory_continue,
371 					hmstate, sizeof(*hmstate));
372 			}
373 
374 			if(r != OK) return r;
375 
376 			hmstate->len -= sublen;
377 			hmstate->mem += sublen;
378 		}
379 	}
380 
381 	return OK;
382 }
383 
384