xref: /minix/minix/tests/kernel/sys_vumap/vumaptest.c (revision fb9c64b2)
1 /* Test for sys_vumap() - by D.C. van Moolenbroek */
2 #include <minix/drivers.h>
3 #include <minix/ds.h>
4 #include <sys/mman.h>
5 #include <machine/vmparam.h>
6 #include <assert.h>
7 
8 #include "com.h"
9 
10 struct buf {
11 	int pages;
12 	int flags;
13 	vir_bytes addr;
14 	phys_bytes phys;
15 };
16 #define BUF_PREALLOC	0x1	/* if set, immediately allocate the page */
17 #define BUF_ADJACENT	0x2	/* virtually contiguous with the last buffer */
18 
19 static unsigned int count = 0, failures = 0;
20 
21 static int success;
22 static char *fail_file;
23 static int fail_line;
24 
25 static int relay;
26 static endpoint_t endpt;
27 
28 static int verbose;
29 
30 static enum {
31 	GE_NONE,		/* no exception */
32 	GE_REVOKED,		/* revoked grant */
33 	GE_INVALID		/* invalid grant */
34 } grant_exception = GE_NONE;
35 
36 static int grant_access = 0;
37 
38 #define expect(r)	expect_f((r), __FILE__, __LINE__)
39 
40 static void alloc_buf(struct buf *buf, phys_bytes next)
41 {
42 	void *tmp = NULL;
43 	vir_bytes addr;
44 	size_t len;
45 	int r, prealloc, flags;
46 
47 	/* is_allocated() cannot handle buffers that are not physically
48 	 * contiguous, and we cannot guarantee physical contiguity if not
49 	 * not preallocating.
50 	 */
51 	assert((buf->flags & BUF_PREALLOC) || buf->pages == 1);
52 
53 	len = buf->pages * PAGE_SIZE;
54 	prealloc = (buf->flags & BUF_PREALLOC);
55 	flags = MAP_ANON | (prealloc ? (MAP_CONTIG | MAP_PREALLOC) : 0);
56 
57 	if (prealloc) {
58 		/* Allocate a same-sized piece of memory elsewhere, to make it
59 		 * very unlikely that the actual piece of memory will end up
60 		 * being physically contiguous with the last piece.
61 		 */
62 		tmp = mmap((void *) (buf->addr + len + PAGE_SIZE), len,
63 			PROT_READ | PROT_WRITE, MAP_ANON | MAP_PREALLOC |
64 			MAP_CONTIG, -1, 0L);
65 
66 		if (tmp == MAP_FAILED)
67 			panic("unable to allocate temporary buffer");
68 	}
69 
70 	addr = (vir_bytes) mmap((void *) buf->addr, len,
71 		PROT_READ | PROT_WRITE, flags, -1, 0L);
72 
73 	if (addr != buf->addr)
74 		panic("unable to allocate buffer (2)");
75 
76 	if (!prealloc)
77 		return;
78 
79 	if ((r = munmap(tmp, len)) != OK)
80 		panic("unable to unmap buffer (%d)", errno);
81 
82 	if ((r = sys_umap(SELF, VM_D, addr, len, &buf->phys)) < 0)
83 		panic("unable to get physical address of buffer (%d)", r);
84 
85 	if (buf->phys != next)
86 		return;
87 
88 	if (verbose)
89 		printf("WARNING: alloc noncontigous range, second try\n");
90 
91 	/* Can't remap this to elsewhere, so we run the risk of allocating the
92 	 * exact same physically contiguous page again. However, now that we've
93 	 * unmapped the temporary memory also, there's a small chance we'll end
94 	 * up with a different physical page this time. Who knows.
95 	 */
96 	munmap((void *) addr, len);
97 
98 	addr = (vir_bytes) mmap((void *) buf->addr, len,
99 		PROT_READ | PROT_WRITE, flags, -1, 0L);
100 
101 	if (addr != buf->addr)
102 		panic("unable to allocate buffer, second try");
103 
104 	if ((r = sys_umap(SELF, VM_D, addr, len, &buf->phys)) < 0)
105 		panic("unable to get physical address of buffer (%d)", r);
106 
107 	/* Still the same page? Screw it. */
108 	if (buf->phys == next)
109 		panic("unable to allocate noncontiguous range");
110 }
111 
112 static void alloc_bufs(struct buf *buf, int count)
113 {
114 	static vir_bytes base = 0x80000000L;
115 	phys_bytes next;
116 	int i;
117 
118 	/* Allocate the given memory in virtually contiguous blocks whenever
119 	 * each next buffer is requested to be adjacent. Insert a virtual gap
120 	 * after each such block. Make sure that each two adjacent buffers in a
121 	 * block are physically non-contiguous.
122 	 */
123 	for (i = 0; i < count; i++) {
124 		if (i > 0 && (buf[i].flags & BUF_ADJACENT)) {
125 			next = buf[i-1].phys + buf[i-1].pages * PAGE_SIZE;
126 		} else {
127 			base += PAGE_SIZE * 16;
128 			next = 0L;
129 		}
130 
131 		buf[i].addr = base;
132 
133 		alloc_buf(&buf[i], next);
134 
135 		base += buf[i].pages * PAGE_SIZE;
136 	}
137 
138 #if DEBUG
139 	for (i = 0; i < count; i++)
140 		printf("Buf %d: %d pages, flags %x, vir %08x, phys %08x\n", i,
141 			buf[i].pages, buf[i].flags, buf[i].addr, buf[i].phys);
142 #endif
143 }
144 
145 static void free_bufs(struct buf *buf, int count)
146 {
147 	int i, j, r;
148 
149 	for (i = 0; i < count; i++) {
150 		for (j = 0; j < buf[i].pages; j++) {
151 			r = munmap((void *) (buf[i].addr + j * PAGE_SIZE),
152 				PAGE_SIZE);
153 
154 			if (r != OK)
155 				panic("unable to unmap range (%d)", errno);
156 		}
157 	}
158 }
159 
160 static int is_allocated(vir_bytes addr, size_t bytes, phys_bytes *phys)
161 {
162 	int r;
163 
164 	/* This will have to do for now. Of course, we could use sys_vumap with
165 	 * VUA_READ for this, but that would defeat the point of one test. It
166 	 * is still a decent alternative in case sys_umap's behavior ever
167 	 * changes, though.
168 	 */
169 	r = sys_umap(SELF, VM_D, addr, bytes, phys);
170 
171 	return r == OK;
172 }
173 
174 static int is_buf_allocated(struct buf *buf)
175 {
176 	return is_allocated(buf->addr, buf->pages * PAGE_SIZE, &buf->phys);
177 }
178 
179 static void test_group(char *name)
180 {
181 	if (verbose)
182 		printf("Test group: %s (%s)\n",
183 			name, relay ? "relay" : "local");
184 }
185 
186 static void expect_f(int res, char *file, int line)
187 {
188 	if (!res && success) {
189 		success = FALSE;
190 		fail_file = file;
191 		fail_line = line;
192 	}
193 }
194 
195 static void got_result(char *desc)
196 {
197 	count++;
198 
199 	if (!success) {
200 		failures++;
201 
202 		printf("#%02d: %-38s\t[FAIL]\n", count, desc);
203 		printf("- failure at %s:%d\n", fail_file, fail_line);
204 	} else {
205 		if (verbose)
206 			printf("#%02d: %-38s\t[PASS]\n", count, desc);
207 	}
208 }
209 
210 static int relay_vumap(struct vumap_vir *vvec, int vcount, size_t offset,
211 	int access, struct vumap_phys *pvec, int *pcount)
212 {
213 	struct vumap_vir gvvec[MAPVEC_NR + 3];
214 	cp_grant_id_t vgrant, pgrant;
215 	message m;
216 	int i, r, gaccess;
217 
218 	assert(vcount > 0 && vcount <= MAPVEC_NR + 3);
219 	assert(*pcount > 0 && *pcount <= MAPVEC_NR + 3);
220 
221 	/* Allow grant access flags to be overridden for testing purposes. */
222 	if (!(gaccess = grant_access)) {
223 		if (access & VUA_READ) gaccess |= CPF_READ;
224 		if (access & VUA_WRITE) gaccess |= CPF_WRITE;
225 	}
226 
227 	for (i = 0; i < vcount; i++) {
228 		gvvec[i].vv_grant = cpf_grant_direct(endpt, vvec[i].vv_addr,
229 			vvec[i].vv_size, gaccess);
230 		assert(gvvec[i].vv_grant != GRANT_INVALID);
231 		gvvec[i].vv_size = vvec[i].vv_size;
232 	}
233 
234 	vgrant = cpf_grant_direct(endpt, (vir_bytes) gvvec,
235 		sizeof(gvvec[0]) * vcount, CPF_READ);
236 	assert(vgrant != GRANT_INVALID);
237 
238 	pgrant = cpf_grant_direct(endpt, (vir_bytes) pvec,
239 		sizeof(pvec[0]) * *pcount, CPF_WRITE);
240 	assert(pgrant != GRANT_INVALID);
241 
242 	/* This must be done after allocating all other grants. */
243 	if (grant_exception != GE_NONE) {
244 		cpf_revoke(gvvec[vcount - 1].vv_grant);
245 		if (grant_exception == GE_INVALID)
246 			gvvec[vcount - 1].vv_grant = GRANT_INVALID;
247 	}
248 
249 	m.m_type = VTR_RELAY;
250 	m.VTR_VGRANT = vgrant;
251 	m.VTR_VCOUNT = vcount;
252 	m.VTR_OFFSET = offset;
253 	m.VTR_ACCESS = access;
254 	m.VTR_PGRANT = pgrant;
255 	m.VTR_PCOUNT = *pcount;
256 
257 	r = ipc_sendrec(endpt, &m);
258 
259 	cpf_revoke(pgrant);
260 	cpf_revoke(vgrant);
261 
262 	for (i = 0; i < vcount - !!grant_exception; i++)
263 		cpf_revoke(gvvec[i].vv_grant);
264 
265 	*pcount = m.VTR_PCOUNT;
266 
267 	return (r != OK) ? r : m.m_type;
268 }
269 
270 static int do_vumap(endpoint_t endpt, struct vumap_vir *vvec, int vcount,
271 	size_t offset, int access, struct vumap_phys *pvec, int *pcount)
272 {
273 	struct vumap_phys pv_backup[MAPVEC_NR + 3];
274 	int r, pc_backup, pv_test = FALSE;
275 
276 	/* Make a copy of pvec and pcount for later. */
277 	pc_backup = *pcount;
278 
279 	/* We cannot compare pvec contents before and after when relaying,
280 	 * since the original contents are not transferred.
281 	 */
282 	if (!relay && pvec != NULL && pc_backup >= 1 &&
283 			pc_backup <= MAPVEC_NR + 3) {
284 		pv_test = TRUE;
285 		memcpy(pv_backup, pvec, sizeof(*pvec) * pc_backup);
286 	}
287 
288 	/* Reset the test result. */
289 	success = TRUE;
290 
291 	/* Perform the vumap call, either directly or through a relay. */
292 	if (relay) {
293 		assert(endpt == SELF);
294 		r = relay_vumap(vvec, vcount, offset, access, pvec, pcount);
295 	} else {
296 		r = sys_vumap(endpt, vvec, vcount, offset, access, pvec,
297 			pcount);
298 	}
299 
300 	/* Upon failure, pvec and pcount must be unchanged. */
301 	if (r != OK) {
302 		expect(pc_backup == *pcount);
303 
304 		if (pv_test)
305 			expect(memcmp(pv_backup, pvec,
306 				sizeof(*pvec) * pc_backup) == 0);
307 	}
308 
309 	return r;
310 }
311 
312 static void test_basics(void)
313 {
314 	struct vumap_vir vvec[2];
315 	struct vumap_phys pvec[4];
316 	struct buf buf[4];
317 	int r, pcount;
318 
319 	test_group("basics");
320 
321 	buf[0].pages = 1;
322 	buf[0].flags = BUF_PREALLOC;
323 	buf[1].pages = 2;
324 	buf[1].flags = BUF_PREALLOC;
325 	buf[2].pages = 1;
326 	buf[2].flags = BUF_PREALLOC;
327 	buf[3].pages = 1;
328 	buf[3].flags = BUF_PREALLOC | BUF_ADJACENT;
329 
330 	alloc_bufs(buf, 4);
331 
332 	/* Test single whole page. */
333 	vvec[0].vv_addr = buf[0].addr;
334 	vvec[0].vv_size = PAGE_SIZE;
335 	pcount = 1;
336 
337 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
338 
339 	expect(r == OK);
340 	expect(pcount == 1);
341 	expect(pvec[0].vp_addr == buf[0].phys);
342 	expect(pvec[0].vp_size == vvec[0].vv_size);
343 
344 	got_result("single whole page");
345 
346 	/* Test single partial page. */
347 	vvec[0].vv_addr = buf[0].addr + 123;
348 	vvec[0].vv_size = PAGE_SIZE - 456;
349 	pcount = 1;
350 
351 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
352 
353 	expect(r == OK);
354 	expect(pcount == 1);
355 	expect(pvec[0].vp_addr == buf[0].phys + 123);
356 	expect(pvec[0].vp_size == vvec[0].vv_size);
357 
358 	got_result("single partial page");
359 
360 	/* Test multiple contiguous whole pages. */
361 	vvec[0].vv_addr = buf[1].addr;
362 	vvec[0].vv_size = PAGE_SIZE * 2;
363 	pcount = 1;
364 
365 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
366 
367 	expect(r == OK);
368 	expect(pcount == 1);
369 	expect(pvec[0].vp_addr == buf[1].phys);
370 	expect(pvec[0].vp_size == vvec[0].vv_size);
371 
372 	got_result("multiple contiguous whole pages");
373 
374 	/* Test range in multiple contiguous pages. */
375 	vvec[0].vv_addr = buf[1].addr + 234;
376 	vvec[0].vv_size = PAGE_SIZE * 2 - 234;
377 	pcount = 2;
378 
379 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
380 
381 	expect(r == OK);
382 	expect(pcount == 1);
383 	expect(pvec[0].vp_addr == buf[1].phys + 234);
384 	expect(pvec[0].vp_size == vvec[0].vv_size);
385 
386 	got_result("range in multiple contiguous pages");
387 
388 	/* Test multiple noncontiguous whole pages. */
389 	vvec[0].vv_addr = buf[2].addr;
390 	vvec[0].vv_size = PAGE_SIZE * 2;
391 	pcount = 3;
392 
393 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
394 
395 	expect(r == OK);
396 	expect(pcount == 2);
397 	expect(pvec[0].vp_addr == buf[2].phys);
398 	expect(pvec[0].vp_size == PAGE_SIZE);
399 	expect(pvec[1].vp_addr == buf[3].phys);
400 	expect(pvec[1].vp_size == PAGE_SIZE);
401 
402 	got_result("multiple noncontiguous whole pages");
403 
404 	/* Test range in multiple noncontiguous pages. */
405 	vvec[0].vv_addr = buf[2].addr + 1;
406 	vvec[0].vv_size = PAGE_SIZE * 2 - 2;
407 	pcount = 2;
408 
409 	r = do_vumap(SELF, vvec, 1, 0, VUA_WRITE, pvec, &pcount);
410 
411 	expect(r == OK);
412 	expect(pcount == 2);
413 	expect(pvec[0].vp_addr == buf[2].phys + 1);
414 	expect(pvec[0].vp_size == PAGE_SIZE - 1);
415 	expect(pvec[1].vp_addr == buf[3].phys);
416 	expect(pvec[1].vp_size == PAGE_SIZE - 1);
417 
418 	got_result("range in multiple noncontiguous pages");
419 
420 	/* Test single-input result truncation. */
421 	vvec[0].vv_addr = buf[2].addr + PAGE_SIZE / 2;
422 	vvec[0].vv_size = PAGE_SIZE;
423 	pvec[1].vp_addr = 0L;
424 	pvec[1].vp_size = 0;
425 	pcount = 1;
426 
427 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
428 
429 	expect(r == OK);
430 	expect(pcount == 1);
431 	expect(pvec[0].vp_addr == buf[2].phys + PAGE_SIZE / 2);
432 	expect(pvec[0].vp_size == PAGE_SIZE / 2);
433 	expect(pvec[1].vp_addr == 0L);
434 	expect(pvec[1].vp_size == 0);
435 
436 	got_result("single-input result truncation");
437 
438 	/* Test multiple inputs, contiguous first. */
439 	vvec[0].vv_addr = buf[0].addr;
440 	vvec[0].vv_size = PAGE_SIZE;
441 	vvec[1].vv_addr = buf[2].addr + PAGE_SIZE - 1;
442 	vvec[1].vv_size = 2;
443 	pcount = 3;
444 
445 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
446 
447 	expect(r == OK);
448 	expect(pcount == 3);
449 	expect(pvec[0].vp_addr == buf[0].phys);
450 	expect(pvec[0].vp_size == PAGE_SIZE);
451 	expect(pvec[1].vp_addr == buf[2].phys + PAGE_SIZE - 1);
452 	expect(pvec[1].vp_size == 1);
453 	expect(pvec[2].vp_addr == buf[3].phys);
454 	expect(pvec[2].vp_size == 1);
455 
456 	got_result("multiple inputs, contiguous first");
457 
458 	/* Test multiple inputs, contiguous last. */
459 	vvec[0].vv_addr = buf[2].addr + 123;
460 	vvec[0].vv_size = PAGE_SIZE * 2 - 456;
461 	vvec[1].vv_addr = buf[1].addr + 234;
462 	vvec[1].vv_size = PAGE_SIZE * 2 - 345;
463 	pcount = 4;
464 
465 	r = do_vumap(SELF, vvec, 2, 0, VUA_WRITE, pvec, &pcount);
466 
467 	expect(r == OK);
468 	expect(pcount == 3);
469 	expect(pvec[0].vp_addr == buf[2].phys + 123);
470 	expect(pvec[0].vp_size == PAGE_SIZE - 123);
471 	expect(pvec[1].vp_addr == buf[3].phys);
472 	expect(pvec[1].vp_size == PAGE_SIZE - (456 - 123));
473 	expect(pvec[2].vp_addr == buf[1].phys + 234);
474 	expect(pvec[2].vp_size == vvec[1].vv_size);
475 
476 	got_result("multiple inputs, contiguous last");
477 
478 	/* Test multiple-inputs result truncation. */
479 	vvec[0].vv_addr = buf[2].addr + 2;
480 	vvec[0].vv_size = PAGE_SIZE * 2 - 3;
481 	vvec[1].vv_addr = buf[0].addr;
482 	vvec[1].vv_size = 135;
483 	pvec[2].vp_addr = 0xDEADBEEFL;
484 	pvec[2].vp_size = 1234;
485 	pcount = 2;
486 
487 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
488 
489 	expect(r == OK);
490 	expect(pcount == 2);
491 	expect(pvec[0].vp_addr == buf[2].phys + 2);
492 	expect(pvec[0].vp_size == PAGE_SIZE - 2);
493 	expect(pvec[1].vp_addr == buf[3].phys);
494 	expect(pvec[1].vp_size == PAGE_SIZE - 1);
495 	expect(pvec[2].vp_addr == 0xDEADBEEFL);
496 	expect(pvec[2].vp_size == 1234);
497 
498 	got_result("multiple-inputs result truncation");
499 
500 	free_bufs(buf, 4);
501 }
502 
503 static void test_endpt(void)
504 {
505 	struct vumap_vir vvec[1];
506 	struct vumap_phys pvec[1];
507 	struct buf buf[1];
508 	int r, pcount;
509 
510 	test_group("endpoint");
511 
512 	buf[0].pages = 1;
513 	buf[0].flags = BUF_PREALLOC;
514 
515 	alloc_bufs(buf, 1);
516 
517 	/* Test NONE endpoint. */
518 	vvec[0].vv_addr = buf[0].addr;
519 	vvec[0].vv_size = PAGE_SIZE;
520 	pcount = 1;
521 
522 	r = do_vumap(NONE, vvec, 1, 0, VUA_READ, pvec, &pcount);
523 
524 	expect(r == EINVAL);
525 
526 	got_result("NONE endpoint");
527 
528 	/* Test ANY endpoint. */
529 	vvec[0].vv_addr = buf[0].addr;
530 	vvec[0].vv_size = PAGE_SIZE;
531 	pcount = 1;
532 
533 	r = do_vumap(ANY, vvec, 1, 0, VUA_READ, pvec, &pcount);
534 
535 	expect(r == EINVAL);
536 
537 	got_result("ANY endpoint");
538 
539 	free_bufs(buf, 1);
540 }
541 
542 static void test_vector1(void)
543 {
544 	struct vumap_vir vvec[2];
545 	struct vumap_phys pvec[3];
546 	struct buf buf[2];
547 	int r, pcount;
548 
549 	test_group("vector, part 1");
550 
551 	buf[0].pages = 2;
552 	buf[0].flags = BUF_PREALLOC;
553 	buf[1].pages = 1;
554 	buf[1].flags = BUF_PREALLOC;
555 
556 	alloc_bufs(buf, 2);
557 
558 	/* Test zero virtual memory size. */
559 	vvec[0].vv_addr = buf[0].addr;
560 	vvec[0].vv_size = PAGE_SIZE * 2;
561 	vvec[1].vv_addr = buf[1].addr;
562 	vvec[1].vv_size = 0;
563 	pcount = 3;
564 
565 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
566 
567 	expect(r == EINVAL);
568 
569 	got_result("zero virtual memory size");
570 
571 	/* Test excessive virtual memory size. */
572 	vvec[1].vv_size = (vir_bytes) -1;
573 
574 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
575 
576 	expect(r == EFAULT || r == EPERM);
577 
578 	got_result("excessive virtual memory size");
579 
580 	/* Test invalid virtual memory. */
581 	vvec[1].vv_addr = 0L;
582 	vvec[1].vv_size = PAGE_SIZE;
583 
584 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
585 
586 	expect(r == EFAULT);
587 
588 	got_result("invalid virtual memory");
589 
590 	/* Test virtual memory overrun. */
591 	vvec[0].vv_size++;
592 	vvec[1].vv_addr = buf[1].addr;
593 
594 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
595 
596 	expect(r == EFAULT);
597 
598 	got_result("virtual memory overrun");
599 
600 	free_bufs(buf, 2);
601 }
602 
603 static void test_vector2(void)
604 {
605 	struct vumap_vir vvec[2], *vvecp;
606 	struct vumap_phys pvec[3], *pvecp;
607 	struct buf buf[2];
608 	phys_bytes dummy;
609 	int r, pcount;
610 
611 	test_group("vector, part 2");
612 
613 	buf[0].pages = 2;
614 	buf[0].flags = BUF_PREALLOC;
615 	buf[1].pages = 1;
616 	buf[1].flags = BUF_PREALLOC;
617 
618 	alloc_bufs(buf, 2);
619 
620 	/* Test zero virtual count. */
621 	vvec[0].vv_addr = buf[0].addr;
622 	vvec[0].vv_size = PAGE_SIZE * 2;
623 	vvec[1].vv_addr = buf[1].addr;
624 	vvec[1].vv_size = PAGE_SIZE;
625 	pcount = 3;
626 
627 	r = do_vumap(SELF, vvec, 0, 0, VUA_READ, pvec, &pcount);
628 
629 	expect(r == EINVAL);
630 
631 	got_result("zero virtual count");
632 
633 	/* Test negative virtual count. */
634 	r = do_vumap(SELF, vvec, -1, 0, VUA_WRITE, pvec, &pcount);
635 
636 	expect(r == EINVAL);
637 
638 	got_result("negative virtual count");
639 
640 	/* Test zero physical count. */
641 	pcount = 0;
642 
643 	r = do_vumap(SELF, vvec, 2, 0, VUA_WRITE, pvec, &pcount);
644 
645 	expect(r == EINVAL);
646 
647 	got_result("zero physical count");
648 
649 	/* Test negative physical count. */
650 	pcount = -1;
651 
652 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
653 
654 	expect(r == EINVAL);
655 
656 	got_result("negative physical count");
657 
658 	/* Test invalid virtual vector pointer. */
659 	pcount = 2;
660 
661 	r = do_vumap(SELF, NULL, 2, 0, VUA_READ, pvec, &pcount);
662 
663 	expect(r == EFAULT);
664 
665 	got_result("invalid virtual vector pointer");
666 
667 	/* Test unallocated virtual vector. */
668 	vvecp = (struct vumap_vir *) mmap(NULL, PAGE_SIZE,
669 		PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
670 
671 	if (vvecp == MAP_FAILED)
672 		panic("unable to allocate virtual vector");
673 
674 	r = do_vumap(SELF, vvecp, 2, 0, VUA_READ, pvec, &pcount);
675 
676 	expect(r == EFAULT);
677 	expect(!is_allocated((vir_bytes) vvecp, PAGE_SIZE, &dummy));
678 
679 	got_result("unallocated virtual vector pointer");
680 
681 	munmap((void *) vvecp, PAGE_SIZE);
682 
683 	/* Test invalid physical vector pointer. */
684 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, NULL, &pcount);
685 
686 	expect(r == EFAULT);
687 
688 	got_result("invalid physical vector pointer");
689 
690 	/* Test unallocated physical vector. */
691 	pvecp = (struct vumap_phys *) mmap(NULL, PAGE_SIZE,
692 		PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
693 
694 	if (pvecp == MAP_FAILED)
695 		panic("unable to allocate physical vector");
696 
697 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvecp, &pcount);
698 
699 	expect(r == OK);
700 	expect(is_allocated((vir_bytes) pvecp, PAGE_SIZE, &dummy));
701 	expect(pcount == 2);
702 	expect(pvecp[0].vp_size == PAGE_SIZE * 2);
703 	expect(pvecp[0].vp_addr == buf[0].phys);
704 	expect(pvecp[1].vp_size == PAGE_SIZE);
705 	expect(pvecp[1].vp_addr == buf[1].phys);
706 
707 	got_result("unallocated physical vector pointer");
708 
709 	munmap((void *) pvecp, PAGE_SIZE);
710 
711 	free_bufs(buf, 2);
712 }
713 
714 static void test_grant(void)
715 {
716 	struct vumap_vir vvec[2];
717 	struct vumap_phys pvec[3];
718 	struct buf buf[2];
719 	int r, pcount;
720 
721 	test_group("grant");
722 
723 	buf[0].pages = 1;
724 	buf[0].flags = BUF_PREALLOC;
725 	buf[1].pages = 2;
726 	buf[1].flags = BUF_PREALLOC;
727 
728 	alloc_bufs(buf, 2);
729 
730 	/* Test write-only access on read-only grant. */
731 	grant_access = CPF_READ; /* override */
732 
733 	vvec[0].vv_addr = buf[0].addr;
734 	vvec[0].vv_size = PAGE_SIZE;
735 	pcount = 1;
736 
737 	r = do_vumap(SELF, vvec, 1, 0, VUA_WRITE, pvec, &pcount);
738 
739 	expect(r == EPERM);
740 
741 	got_result("write-only access on read-only grant");
742 
743 	/* Test read-write access on read-only grant. */
744 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ | VUA_WRITE, pvec, &pcount);
745 
746 	expect(r == EPERM);
747 
748 	got_result("read-write access on read-only grant");
749 
750 	/* Test read-only access on write-only grant. */
751 	grant_access = CPF_WRITE; /* override */
752 
753 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
754 
755 	expect(r == EPERM);
756 
757 	got_result("read-only access on write-only grant");
758 
759 	/* Test read-write access on write grant. */
760 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ | VUA_WRITE, pvec, &pcount);
761 
762 	expect(r == EPERM);
763 
764 	got_result("read-write access on write-only grant");
765 
766 	/* Test read-only access on read-write grant. */
767 	grant_access = CPF_READ | CPF_WRITE; /* override */
768 
769 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
770 
771 	expect(r == OK);
772 	expect(pcount == 1);
773 	expect(pvec[0].vp_size == PAGE_SIZE);
774 	expect(pvec[0].vp_addr == buf[0].phys);
775 
776 	got_result("read-only access on read-write grant");
777 
778 	grant_access = 0; /* reset */
779 
780 	/* Test invalid grant. */
781 	grant_exception = GE_INVALID;
782 
783 	vvec[0].vv_addr = buf[0].addr;
784 	vvec[0].vv_size = PAGE_SIZE;
785 	vvec[1].vv_addr = buf[1].addr;
786 	vvec[1].vv_size = PAGE_SIZE * 2;
787 	pcount = 3;
788 
789 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
790 
791 	expect(r == EINVAL);
792 
793 	got_result("invalid grant");
794 
795 	/* Test revoked grant. */
796 	grant_exception = GE_REVOKED;
797 
798 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
799 
800 	expect(r == EPERM);
801 
802 	got_result("revoked grant");
803 
804 	grant_exception = GE_NONE;
805 
806 	free_bufs(buf, 2);
807 }
808 
809 static void test_offset(void)
810 {
811 	struct vumap_vir vvec[2];
812 	struct vumap_phys pvec[3];
813 	struct buf buf[4];
814 	size_t off, off2;
815 	int r, pcount;
816 
817 	test_group("offsets");
818 
819 	buf[0].pages = 1;
820 	buf[0].flags = BUF_PREALLOC;
821 	buf[1].pages = 2;
822 	buf[1].flags = BUF_PREALLOC;
823 	buf[2].pages = 1;
824 	buf[2].flags = BUF_PREALLOC;
825 	buf[3].pages = 1;
826 	buf[3].flags = BUF_PREALLOC | BUF_ADJACENT;
827 
828 	alloc_bufs(buf, 4);
829 
830 	/* Test offset into aligned page. */
831 	off = 123;
832 	vvec[0].vv_addr = buf[0].addr;
833 	vvec[0].vv_size = PAGE_SIZE;
834 	pcount = 2;
835 
836 	r = do_vumap(SELF, vvec, 1, off, VUA_READ, pvec, &pcount);
837 
838 	expect(r == OK);
839 	expect(pcount == 1);
840 	expect(pvec[0].vp_addr == buf[0].phys + off);
841 	expect(pvec[0].vp_size == vvec[0].vv_size - off);
842 
843 	got_result("offset into aligned page");
844 
845 	/* Test offset into unaligned page. */
846 	off2 = 456;
847 	assert(off + off2 < PAGE_SIZE);
848 	vvec[0].vv_addr = buf[0].addr + off;
849 	vvec[0].vv_size = PAGE_SIZE - off;
850 	pcount = 2;
851 
852 	r = do_vumap(SELF, vvec, 1, off2, VUA_READ, pvec, &pcount);
853 
854 	expect(r == OK);
855 	expect(pcount == 1);
856 	expect(pvec[0].vp_addr == buf[0].phys + off + off2);
857 	expect(pvec[0].vp_size == vvec[0].vv_size - off2);
858 
859 	got_result("offset into unaligned page");
860 
861 	/* Test offset into unaligned page set. */
862 	off = 1234;
863 	off2 = 567;
864 	assert(off + off2 < PAGE_SIZE);
865 	vvec[0].vv_addr = buf[1].addr + off;
866 	vvec[0].vv_size = (PAGE_SIZE - off) * 2;
867 	pcount = 3;
868 
869 	r = do_vumap(SELF, vvec, 1, off2, VUA_READ, pvec, &pcount);
870 
871 	expect(r == OK);
872 	expect(pcount == 1);
873 	expect(pvec[0].vp_addr == buf[1].phys + off + off2);
874 	expect(pvec[0].vp_size == vvec[0].vv_size - off2);
875 
876 	got_result("offset into contiguous page set");
877 
878 	/* Test offset into noncontiguous page set. */
879 	vvec[0].vv_addr = buf[2].addr + off;
880 	vvec[0].vv_size = (PAGE_SIZE - off) * 2;
881 	pcount = 3;
882 
883 	r = do_vumap(SELF, vvec, 1, off2, VUA_READ, pvec, &pcount);
884 
885 	expect(r == OK);
886 	expect(pcount == 2);
887 	expect(pvec[0].vp_addr == buf[2].phys + off + off2);
888 	expect(pvec[0].vp_size == PAGE_SIZE - off - off2);
889 	expect(pvec[1].vp_addr == buf[3].phys);
890 	expect(pvec[1].vp_size == PAGE_SIZE - off);
891 
892 	got_result("offset into noncontiguous page set");
893 
894 	/* Test offset to last byte. */
895 	off = PAGE_SIZE - off2 - 1;
896 	vvec[0].vv_addr = buf[0].addr + off2;
897 	vvec[0].vv_size = PAGE_SIZE - off2;
898 	pcount = 2;
899 
900 	r = do_vumap(SELF, vvec, 1, off, VUA_READ, pvec, &pcount);
901 
902 	expect(r == OK);
903 	expect(pcount == 1);
904 	expect(pvec[0].vp_addr == buf[0].phys + off + off2);
905 	expect(pvec[0].vp_size == 1);
906 
907 	got_result("offset to last byte");
908 
909 	/* Test offset at range end. */
910 	off = 234;
911 	vvec[0].vv_addr = buf[1].addr + off;
912 	vvec[0].vv_size = PAGE_SIZE - off * 2;
913 	vvec[1].vv_addr = vvec[0].vv_addr + vvec[0].vv_size;
914 	vvec[1].vv_size = off;
915 
916 	r = do_vumap(SELF, vvec, 2, vvec[0].vv_size, VUA_READ, pvec, &pcount);
917 
918 	expect(r == EINVAL);
919 
920 	got_result("offset at range end");
921 
922 	/* Test offset beyond range end. */
923 	vvec[0].vv_addr = buf[1].addr;
924 	vvec[0].vv_size = PAGE_SIZE;
925 	vvec[1].vv_addr = buf[1].addr + PAGE_SIZE;
926 	vvec[1].vv_size = PAGE_SIZE;
927 
928 	r = do_vumap(SELF, vvec, 2, PAGE_SIZE + off, VUA_READ, pvec, &pcount);
929 
930 	expect(r == EINVAL);
931 
932 	got_result("offset beyond range end");
933 
934 	/* Test negative offset. */
935 	vvec[0].vv_addr = buf[1].addr + off + off2;
936 	vvec[0].vv_size = PAGE_SIZE;
937 
938 	r = do_vumap(SELF, vvec, 1, (size_t) -1, VUA_READ, pvec, &pcount);
939 
940 	expect(r == EINVAL);
941 
942 	got_result("negative offset");
943 
944 	free_bufs(buf, 4);
945 }
946 
947 static void test_access(void)
948 {
949 	struct vumap_vir vvec[3];
950 	struct vumap_phys pvec[4], *pvecp;
951 	struct buf buf[7];
952 	int i, r, pcount, pindex;
953 
954 	test_group("access");
955 
956 	buf[0].pages = 1;
957 	buf[0].flags = 0;
958 	buf[1].pages = 1;
959 	buf[1].flags = BUF_PREALLOC | BUF_ADJACENT;
960 	buf[2].pages = 1;
961 	buf[2].flags = BUF_ADJACENT;
962 
963 	alloc_bufs(buf, 3);
964 
965 	/* Test no access flags. */
966 	vvec[0].vv_addr = buf[0].addr;
967 	vvec[0].vv_size = PAGE_SIZE * 3;
968 	pcount = 4;
969 
970 	r = do_vumap(SELF, vvec, 1, 0, 0, pvec, &pcount);
971 
972 	expect(r == EINVAL);
973 	expect(!is_buf_allocated(&buf[0]));
974 	expect(is_buf_allocated(&buf[1]));
975 	expect(!is_buf_allocated(&buf[2]));
976 
977 	got_result("no access flags");
978 
979 	/* Test read-only access. */
980 	vvec[0].vv_addr = buf[0].addr;
981 	vvec[0].vv_size = PAGE_SIZE * 3;
982 	pcount = 1;
983 
984 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
985 
986 	expect(r == EFAULT);
987 	expect(!is_buf_allocated(&buf[0]));
988 	expect(is_buf_allocated(&buf[1]));
989 	expect(!is_buf_allocated(&buf[2]));
990 
991 	got_result("read-only access");
992 
993 	/* Test read-write access. */
994 	vvec[0].vv_addr = buf[0].addr;
995 	vvec[0].vv_size = PAGE_SIZE * 3;
996 	pcount = 4;
997 
998 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ | VUA_WRITE, pvec, &pcount);
999 
1000 	expect(r == EFAULT);
1001 	expect(!is_buf_allocated(&buf[0]));
1002 	expect(is_buf_allocated(&buf[1]));
1003 	expect(!is_buf_allocated(&buf[2]));
1004 
1005 	got_result("read-write access");
1006 
1007 	/* Test write-only access. */
1008 	vvec[0].vv_addr = buf[0].addr;
1009 	vvec[0].vv_size = PAGE_SIZE * 3;
1010 	pcount = 4;
1011 
1012 	r = do_vumap(SELF, vvec, 1, 0, VUA_WRITE, pvec, &pcount);
1013 
1014 	expect(r == OK);
1015 	/* We don't control the physical addresses of the faulted-in pages, so
1016 	 * they may or may not end up being contiguous with their neighbours.
1017 	 */
1018 	expect(pcount >= 1 && pcount <= 3);
1019 	expect(is_buf_allocated(&buf[0]));
1020 	expect(is_buf_allocated(&buf[1]));
1021 	expect(is_buf_allocated(&buf[2]));
1022 	expect(pvec[0].vp_addr == buf[0].phys);
1023 	switch (pcount) {
1024 	case 1:
1025 		expect(pvec[0].vp_size == PAGE_SIZE * 3);
1026 		break;
1027 	case 2:
1028 		expect(pvec[0].vp_size + pvec[1].vp_size == PAGE_SIZE * 3);
1029 		if (pvec[0].vp_size > PAGE_SIZE)
1030 			expect(pvec[1].vp_addr == buf[2].phys);
1031 		else
1032 			expect(pvec[1].vp_addr == buf[1].phys);
1033 		break;
1034 	case 3:
1035 		expect(pvec[0].vp_size == PAGE_SIZE);
1036 		expect(pvec[1].vp_addr == buf[1].phys);
1037 		expect(pvec[1].vp_size == PAGE_SIZE);
1038 		expect(pvec[2].vp_addr == buf[2].phys);
1039 		expect(pvec[2].vp_size == PAGE_SIZE);
1040 		break;
1041 	}
1042 
1043 	got_result("write-only access");
1044 
1045 	free_bufs(buf, 3);
1046 
1047 	/* Test page faulting. */
1048 	buf[0].pages = 1;
1049 	buf[0].flags = 0;
1050 	buf[1].pages = 1;
1051 	buf[1].flags = BUF_PREALLOC | BUF_ADJACENT;
1052 	buf[2].pages = 1;
1053 	buf[2].flags = 0;
1054 	buf[3].pages = 2;
1055 	buf[3].flags = BUF_PREALLOC;
1056 	buf[4].pages = 1;
1057 	buf[4].flags = BUF_ADJACENT;
1058 	buf[5].pages = 1;
1059 	buf[5].flags = BUF_ADJACENT;
1060 	buf[6].pages = 1;
1061 	buf[6].flags = 0;
1062 
1063 	alloc_bufs(buf, 7);
1064 
1065 	vvec[0].vv_addr = buf[0].addr + PAGE_SIZE - 1;
1066 	vvec[0].vv_size = PAGE_SIZE - 1;
1067 	vvec[1].vv_addr = buf[2].addr;
1068 	vvec[1].vv_size = PAGE_SIZE;
1069 	vvec[2].vv_addr = buf[3].addr + 123;
1070 	vvec[2].vv_size = PAGE_SIZE * 4 - 456;
1071 	pvecp = (struct vumap_phys *) buf[6].addr;
1072 	pcount = 7;
1073 	assert(sizeof(struct vumap_phys) * pcount <= PAGE_SIZE);
1074 
1075 	r = do_vumap(SELF, vvec, 3, 0, VUA_WRITE, pvecp, &pcount);
1076 
1077 	expect(r == OK);
1078 	/* Same story but more possibilities. I hope I got this right. */
1079 	expect(pcount >= 3 && pcount <= 6);
1080 	for (i = 0; i < 7; i++)
1081 		expect(is_buf_allocated(&buf[i]));
1082 	expect(pvecp[0].vp_addr = buf[0].phys);
1083 	if (pvecp[0].vp_size == 1) {
1084 		expect(pvecp[1].vp_addr == buf[1].phys);
1085 		expect(pvecp[1].vp_size == PAGE_SIZE - 2);
1086 		pindex = 2;
1087 	} else {
1088 		expect(pvecp[0].vp_size == PAGE_SIZE - 1);
1089 		pindex = 1;
1090 	}
1091 	expect(pvecp[pindex].vp_addr == buf[2].phys);
1092 	expect(pvecp[pindex].vp_size == PAGE_SIZE);
1093 	pindex++;
1094 	expect(pvecp[pindex].vp_addr == buf[3].phys + 123);
1095 	switch (pcount - pindex) {
1096 	case 1:
1097 		expect(pvecp[pindex].vp_size == PAGE_SIZE * 4 - 456);
1098 		break;
1099 	case 2:
1100 		if (pvecp[pindex].vp_size > PAGE_SIZE * 2 - 123) {
1101 			expect(pvecp[pindex].vp_size == PAGE_SIZE * 3 - 123);
1102 			expect(pvecp[pindex + 1].vp_addr == buf[5].phys);
1103 			expect(pvecp[pindex + 1].vp_size ==
1104 				PAGE_SIZE - (456 - 123));
1105 		} else {
1106 			expect(pvecp[pindex].vp_size == PAGE_SIZE * 2 - 123);
1107 			expect(pvecp[pindex + 1].vp_addr == buf[4].phys);
1108 			expect(pvecp[pindex + 1].vp_size ==
1109 				PAGE_SIZE * 2 - (456 - 123));
1110 		}
1111 		break;
1112 	case 3:
1113 		expect(pvecp[pindex].vp_size == PAGE_SIZE * 2 - 123);
1114 		expect(pvecp[pindex + 1].vp_addr == buf[4].phys);
1115 		expect(pvecp[pindex + 1].vp_size == PAGE_SIZE);
1116 		expect(pvecp[pindex + 2].vp_addr == buf[5].phys);
1117 		expect(pvecp[pindex + 2].vp_size == PAGE_SIZE - (456 - 123));
1118 		break;
1119 	default:
1120 		expect(0);
1121 	}
1122 
1123 	got_result("page faulting");
1124 
1125 	free_bufs(buf, 7);
1126 
1127 	/* MISSING: tests to see whether a request with VUA_WRITE or
1128 	 * (VUA_READ|VUA_WRITE) correctly gets an EFAULT for a read-only page.
1129 	 * As of writing, support for such protection is missing from the
1130 	 * system at all.
1131 	 */
1132 }
1133 
1134 static void phys_limit(struct vumap_vir *vvec, int vcount,
1135 	struct vumap_phys *pvec, int pcount, struct buf *buf, char *desc)
1136 {
1137 	int i, r;
1138 
1139 	r = do_vumap(SELF, vvec, vcount, 0, VUA_READ, pvec, &pcount);
1140 
1141 	expect(r == OK);
1142 	expect(pcount == MAPVEC_NR);
1143 	for (i = 0; i < MAPVEC_NR; i++) {
1144 		expect(pvec[i].vp_addr == buf[i].phys);
1145 		expect(pvec[i].vp_size == PAGE_SIZE);
1146 	}
1147 
1148 	got_result(desc);
1149 }
1150 
1151 static void test_limits(void)
1152 {
1153 	struct vumap_vir vvec[MAPVEC_NR + 3];
1154 	struct vumap_phys pvec[MAPVEC_NR + 3];
1155 	struct buf buf[MAPVEC_NR + 9];
1156 	int i, r, vcount, pcount, nr_bufs;
1157 
1158 	test_group("limits");
1159 
1160 	/* Test large contiguous range. */
1161 	buf[0].pages = MAPVEC_NR + 2;
1162 	buf[0].flags = BUF_PREALLOC;
1163 
1164 	alloc_bufs(buf, 1);
1165 
1166 	vvec[0].vv_addr = buf[0].addr;
1167 	vvec[0].vv_size = (MAPVEC_NR + 2) * PAGE_SIZE;
1168 	pcount = 2;
1169 
1170 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
1171 
1172 	expect(r == OK);
1173 	expect(pcount == 1);
1174 	expect(pvec[0].vp_addr == buf[0].phys);
1175 	expect(pvec[0].vp_size == vvec[0].vv_size);
1176 
1177 	got_result("large contiguous range");
1178 
1179 	free_bufs(buf, 1);
1180 
1181 	/* I'd like to test MAPVEC_NR contiguous ranges of MAPVEC_NR pages
1182 	 * each, but chances are we don't have that much contiguous memory
1183 	 * available at all. In fact, the previous test may already fail
1184 	 * because of this..
1185 	 */
1186 
1187 	for (i = 0; i < MAPVEC_NR + 2; i++) {
1188 		buf[i].pages = 1;
1189 		buf[i].flags = BUF_PREALLOC;
1190 	}
1191 	buf[i].pages = 1;
1192 	buf[i].flags = BUF_PREALLOC | BUF_ADJACENT;
1193 
1194 	alloc_bufs(buf, MAPVEC_NR + 3);
1195 
1196 	/* Test virtual limit, one below. */
1197 	for (i = 0; i < MAPVEC_NR + 2; i++) {
1198 		vvec[i].vv_addr = buf[i].addr;
1199 		vvec[i].vv_size = PAGE_SIZE;
1200 	}
1201 	vvec[i - 1].vv_size += PAGE_SIZE;
1202 
1203 	pcount = MAPVEC_NR + 3;
1204 
1205 	r = do_vumap(SELF, vvec, MAPVEC_NR - 1, 0, VUA_READ, pvec, &pcount);
1206 
1207 	expect(r == OK);
1208 	expect(pcount == MAPVEC_NR - 1);
1209 	for (i = 0; i < MAPVEC_NR - 1; i++) {
1210 		expect(pvec[i].vp_addr == buf[i].phys);
1211 		expect(pvec[i].vp_size == PAGE_SIZE);
1212 	}
1213 
1214 	got_result("virtual limit, one below");
1215 
1216 	/* Test virtual limit, exact match. */
1217 	pcount = MAPVEC_NR + 3;
1218 
1219 	r = do_vumap(SELF, vvec, MAPVEC_NR, 0, VUA_WRITE, pvec, &pcount);
1220 
1221 	expect(r == OK);
1222 	expect(pcount == MAPVEC_NR);
1223 	for (i = 0; i < MAPVEC_NR; i++) {
1224 		expect(pvec[i].vp_addr == buf[i].phys);
1225 		expect(pvec[i].vp_size == PAGE_SIZE);
1226 	}
1227 
1228 	got_result("virtual limit, exact match");
1229 
1230 	/* Test virtual limit, one above. */
1231 	pcount = MAPVEC_NR + 3;
1232 
1233 	r = do_vumap(SELF, vvec, MAPVEC_NR + 1, 0, VUA_READ, pvec, &pcount);
1234 
1235 	expect(r == OK);
1236 	expect(pcount == MAPVEC_NR);
1237 	for (i = 0; i < MAPVEC_NR; i++) {
1238 		expect(pvec[i].vp_addr == buf[i].phys);
1239 		expect(pvec[i].vp_size == PAGE_SIZE);
1240 	}
1241 
1242 	got_result("virtual limit, one above");
1243 
1244 	/* Test virtual limit, two above. */
1245 	pcount = MAPVEC_NR + 3;
1246 
1247 	r = do_vumap(SELF, vvec, MAPVEC_NR + 2, 0, VUA_WRITE, pvec, &pcount);
1248 
1249 	expect(r == OK);
1250 	expect(pcount == MAPVEC_NR);
1251 	for (i = 0; i < MAPVEC_NR; i++) {
1252 		expect(pvec[i].vp_addr == buf[i].phys);
1253 		expect(pvec[i].vp_size == PAGE_SIZE);
1254 	}
1255 
1256 	got_result("virtual limit, two above");
1257 
1258 	/* Test physical limit, one below, aligned. */
1259 	pcount = MAPVEC_NR - 1;
1260 
1261 	r = do_vumap(SELF, vvec + 2, MAPVEC_NR, 0, VUA_READ, pvec, &pcount);
1262 
1263 	expect(r == OK);
1264 	expect(pcount == MAPVEC_NR - 1);
1265 	for (i = 0; i < MAPVEC_NR - 1; i++) {
1266 		expect(pvec[i].vp_addr == buf[i + 2].phys);
1267 		expect(pvec[i].vp_size == PAGE_SIZE);
1268 	}
1269 
1270 	got_result("physical limit, one below, aligned");
1271 
1272 	/* Test physical limit, one below, unaligned. */
1273 	pcount = MAPVEC_NR - 1;
1274 
1275 	r = do_vumap(SELF, vvec + 3, MAPVEC_NR, 0, VUA_READ, pvec, &pcount);
1276 
1277 	expect(r == OK);
1278 	expect(pcount == MAPVEC_NR - 1);
1279 	for (i = 0; i < MAPVEC_NR - 1; i++) {
1280 		expect(pvec[i].vp_addr == buf[i + 3].phys);
1281 		expect(pvec[i].vp_size == PAGE_SIZE);
1282 	}
1283 
1284 	got_result("physical limit, one below, unaligned");
1285 
1286 	free_bufs(buf, MAPVEC_NR + 3);
1287 
1288 	nr_bufs = sizeof(buf) / sizeof(buf[0]);
1289 
1290 	/* This ends up looking in our virtual address space as follows:
1291 	 * [P] [P] [P] [PPP] [PPP] ...(MAPVEC_NR x [PPP])... [PPP]
1292 	 * ..where P is a page, and the blocks are virtually contiguous.
1293 	 */
1294 	for (i = 0; i < nr_bufs; i += 3) {
1295 		buf[i].pages = 1;
1296 		buf[i].flags = BUF_PREALLOC;
1297 		buf[i + 1].pages = 1;
1298 		buf[i + 1].flags =
1299 			BUF_PREALLOC | ((i >= 3) ? BUF_ADJACENT : 0);
1300 		buf[i + 2].pages = 1;
1301 		buf[i + 2].flags =
1302 			BUF_PREALLOC | ((i >= 3) ? BUF_ADJACENT : 0);
1303 	}
1304 
1305 	alloc_bufs(buf, nr_bufs);
1306 
1307 	for (i = 0; i < 3; i++) {
1308 		vvec[i].vv_addr = buf[i].addr;
1309 		vvec[i].vv_size = PAGE_SIZE;
1310 	}
1311 	for ( ; i < nr_bufs / 3 + 1; i++) {
1312 		vvec[i].vv_addr = buf[(i - 2) * 3].addr;
1313 		vvec[i].vv_size = PAGE_SIZE * 3;
1314 	}
1315 	vcount = i;
1316 
1317 	/* Out of each of the following tests, one will be aligned (that is,
1318 	 * the last pvec entry will be for the last page in a vvec entry) and
1319 	 * two will be unaligned.
1320 	 */
1321 
1322 	/* Test physical limit, exact match. */
1323 	phys_limit(vvec, vcount, pvec, MAPVEC_NR, buf,
1324 		"physical limit, exact match, try 1");
1325 	phys_limit(vvec + 1, vcount - 1, pvec, MAPVEC_NR, buf + 1,
1326 		"physical limit, exact match, try 2");
1327 	phys_limit(vvec + 2, vcount - 2, pvec, MAPVEC_NR, buf + 2,
1328 		"physical limit, exact match, try 3");
1329 
1330 	/* Test physical limit, one above. */
1331 	phys_limit(vvec, vcount, pvec, MAPVEC_NR + 1, buf,
1332 		"physical limit, one above, try 1");
1333 	phys_limit(vvec + 1, vcount - 1, pvec, MAPVEC_NR + 1, buf + 1,
1334 		"physical limit, one above, try 2");
1335 	phys_limit(vvec + 2, vcount - 2, pvec, MAPVEC_NR + 1, buf + 2,
1336 		"physical limit, one above, try 3");
1337 
1338 	/* Test physical limit, two above. */
1339 	phys_limit(vvec, vcount, pvec, MAPVEC_NR + 2, buf,
1340 		"physical limit, two above, try 1");
1341 	phys_limit(vvec + 1, vcount - 1, pvec, MAPVEC_NR + 2, buf + 1,
1342 		"physical limit, two above, try 2");
1343 	phys_limit(vvec + 2, vcount - 2, pvec, MAPVEC_NR + 2, buf + 2,
1344 		"physical limit, two above, try 3");
1345 
1346 	free_bufs(buf, nr_bufs);
1347 }
1348 
1349 static void do_tests(int use_relay)
1350 {
1351 	relay = use_relay;
1352 
1353 	test_basics();
1354 
1355 	if (!relay) test_endpt();	/* local only */
1356 
1357 	test_vector1();
1358 
1359 	if (!relay) test_vector2();	/* local only */
1360 
1361 	if (relay) test_grant();	/* remote only */
1362 
1363 	test_offset();
1364 
1365 	test_access();
1366 
1367 	test_limits();
1368 }
1369 
1370 static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info))
1371 {
1372 	int r;
1373 
1374 	verbose = (env_argc > 1 && !strcmp(env_argv[1], "-v"));
1375 
1376 	if (verbose)
1377 		printf("Starting sys_vumap test set\n");
1378 
1379 	do_tests(FALSE /*use_relay*/);
1380 
1381 	if ((r = ds_retrieve_label_endpt("vumaprelay", &endpt)) != OK)
1382 		panic("unable to obtain endpoint for 'vumaprelay' (%d)", r);
1383 
1384 	do_tests(TRUE /*use_relay*/);
1385 
1386 	if (verbose)
1387 		printf("Completed sys_vumap test set, %u/%u tests failed\n",
1388 			failures, count);
1389 
1390 	/* The returned code will determine the outcome of the RS call, and
1391 	 * thus the entire test. The actual error code does not matter.
1392 	 */
1393 	return (failures) ? EINVAL : OK;
1394 }
1395 
1396 static void sef_local_startup(void)
1397 {
1398 	sef_setcb_init_fresh(sef_cb_init_fresh);
1399 
1400 	sef_startup();
1401 }
1402 
1403 int main(int argc, char **argv)
1404 {
1405 	env_setargs(argc, argv);
1406 
1407 	sef_local_startup();
1408 
1409 	return 0;
1410 }
1411