xref: /minix/minix/tests/kernel/sys_vumap/vumaptest.c (revision 83133719)
1 /* Test for sys_vumap() - by D.C. van Moolenbroek */
2 #include <minix/drivers.h>
3 #include <minix/ds.h>
4 #include <sys/mman.h>
5 #include <assert.h>
6 
7 #include "com.h"
8 
9 struct buf {
10 	int pages;
11 	int flags;
12 	vir_bytes addr;
13 	phys_bytes phys;
14 };
15 #define BUF_PREALLOC	0x1	/* if set, immediately allocate the page */
16 #define BUF_ADJACENT	0x2	/* virtually contiguous with the last buffer */
17 
18 static unsigned int count = 0, failures = 0;
19 
20 static int success;
21 static char *fail_file;
22 static int fail_line;
23 
24 static int relay;
25 static endpoint_t endpt;
26 
27 static int verbose;
28 
29 static enum {
30 	GE_NONE,		/* no exception */
31 	GE_REVOKED,		/* revoked grant */
32 	GE_INVALID		/* invalid grant */
33 } grant_exception = GE_NONE;
34 
35 static int grant_access = 0;
36 
37 #define expect(r)	expect_f((r), __FILE__, __LINE__)
38 
39 static void alloc_buf(struct buf *buf, phys_bytes next)
40 {
41 	void *tmp = NULL;
42 	vir_bytes addr;
43 	size_t len;
44 	int r, prealloc, flags;
45 
46 	/* is_allocated() cannot handle buffers that are not physically
47 	 * contiguous, and we cannot guarantee physical contiguity if not
48 	 * not preallocating.
49 	 */
50 	assert((buf->flags & BUF_PREALLOC) || buf->pages == 1);
51 
52 	len = buf->pages * PAGE_SIZE;
53 	prealloc = (buf->flags & BUF_PREALLOC);
54 	flags = MAP_ANON | (prealloc ? (MAP_CONTIG | MAP_PREALLOC) : 0);
55 
56 	if (prealloc) {
57 		/* Allocate a same-sized piece of memory elsewhere, to make it
58 		 * very unlikely that the actual piece of memory will end up
59 		 * being physically contiguous with the last piece.
60 		 */
61 		tmp = mmap((void *) (buf->addr + len + PAGE_SIZE), len,
62 			PROT_READ | PROT_WRITE, MAP_ANON | MAP_PREALLOC |
63 			MAP_CONTIG, -1, 0L);
64 
65 		if (tmp == MAP_FAILED)
66 			panic("unable to allocate temporary buffer");
67 	}
68 
69 	addr = (vir_bytes) mmap((void *) buf->addr, len,
70 		PROT_READ | PROT_WRITE, flags, -1, 0L);
71 
72 	if (addr != buf->addr)
73 		panic("unable to allocate buffer (2)");
74 
75 	if (!prealloc)
76 		return;
77 
78 	if ((r = munmap(tmp, len)) != OK)
79 		panic("unable to unmap buffer (%d)", errno);
80 
81 	if ((r = sys_umap(SELF, VM_D, addr, len, &buf->phys)) < 0)
82 		panic("unable to get physical address of buffer (%d)", r);
83 
84 	if (buf->phys != next)
85 		return;
86 
87 	if (verbose)
88 		printf("WARNING: alloc noncontigous range, second try\n");
89 
90 	/* Can't remap this to elsewhere, so we run the risk of allocating the
91 	 * exact same physically contiguous page again. However, now that we've
92 	 * unmapped the temporary memory also, there's a small chance we'll end
93 	 * up with a different physical page this time. Who knows.
94 	 */
95 	munmap((void *) addr, len);
96 
97 	addr = (vir_bytes) mmap((void *) buf->addr, len,
98 		PROT_READ | PROT_WRITE, flags, -1, 0L);
99 
100 	if (addr != buf->addr)
101 		panic("unable to allocate buffer, second try");
102 
103 	if ((r = sys_umap(SELF, VM_D, addr, len, &buf->phys)) < 0)
104 		panic("unable to get physical address of buffer (%d)", r);
105 
106 	/* Still the same page? Screw it. */
107 	if (buf->phys == next)
108 		panic("unable to allocate noncontiguous range");
109 }
110 
111 static void alloc_bufs(struct buf *buf, int count)
112 {
113 	static vir_bytes base = 0x80000000L;
114 	phys_bytes next;
115 	int i;
116 
117 	/* Allocate the given memory in virtually contiguous blocks whenever
118 	 * each next buffer is requested to be adjacent. Insert a virtual gap
119 	 * after each such block. Make sure that each two adjacent buffers in a
120 	 * block are physically non-contiguous.
121 	 */
122 	for (i = 0; i < count; i++) {
123 		if (i > 0 && (buf[i].flags & BUF_ADJACENT)) {
124 			next = buf[i-1].phys + buf[i-1].pages * PAGE_SIZE;
125 		} else {
126 			base += PAGE_SIZE * 16;
127 			next = 0L;
128 		}
129 
130 		buf[i].addr = base;
131 
132 		alloc_buf(&buf[i], next);
133 
134 		base += buf[i].pages * PAGE_SIZE;
135 	}
136 
137 #if DEBUG
138 	for (i = 0; i < count; i++)
139 		printf("Buf %d: %d pages, flags %x, vir %08x, phys %08x\n", i,
140 			buf[i].pages, buf[i].flags, buf[i].addr, buf[i].phys);
141 #endif
142 }
143 
144 static void free_bufs(struct buf *buf, int count)
145 {
146 	int i, j, r;
147 
148 	for (i = 0; i < count; i++) {
149 		for (j = 0; j < buf[i].pages; j++) {
150 			r = munmap((void *) (buf[i].addr + j * PAGE_SIZE),
151 				PAGE_SIZE);
152 
153 			if (r != OK)
154 				panic("unable to unmap range (%d)", errno);
155 		}
156 	}
157 }
158 
159 static int is_allocated(vir_bytes addr, size_t bytes, phys_bytes *phys)
160 {
161 	int r;
162 
163 	/* This will have to do for now. Of course, we could use sys_vumap with
164 	 * VUA_READ for this, but that would defeat the point of one test. It
165 	 * is still a decent alternative in case sys_umap's behavior ever
166 	 * changes, though.
167 	 */
168 	r = sys_umap(SELF, VM_D, addr, bytes, phys);
169 
170 	return r == OK;
171 }
172 
173 static int is_buf_allocated(struct buf *buf)
174 {
175 	return is_allocated(buf->addr, buf->pages * PAGE_SIZE, &buf->phys);
176 }
177 
178 static void test_group(char *name)
179 {
180 	if (verbose)
181 		printf("Test group: %s (%s)\n",
182 			name, relay ? "relay" : "local");
183 }
184 
185 static void expect_f(int res, char *file, int line)
186 {
187 	if (!res && success) {
188 		success = FALSE;
189 		fail_file = file;
190 		fail_line = line;
191 	}
192 }
193 
194 static void got_result(char *desc)
195 {
196 	count++;
197 
198 	if (!success) {
199 		failures++;
200 
201 		printf("#%02d: %-38s\t[FAIL]\n", count, desc);
202 		printf("- failure at %s:%d\n", fail_file, fail_line);
203 	} else {
204 		if (verbose)
205 			printf("#%02d: %-38s\t[PASS]\n", count, desc);
206 	}
207 }
208 
209 static int relay_vumap(struct vumap_vir *vvec, int vcount, size_t offset,
210 	int access, struct vumap_phys *pvec, int *pcount)
211 {
212 	struct vumap_vir gvvec[MAPVEC_NR + 3];
213 	cp_grant_id_t vgrant, pgrant;
214 	message m;
215 	int i, r, gaccess;
216 
217 	assert(vcount > 0 && vcount <= MAPVEC_NR + 3);
218 	assert(*pcount > 0 && *pcount <= MAPVEC_NR + 3);
219 
220 	/* Allow grant access flags to be overridden for testing purposes. */
221 	if (!(gaccess = grant_access)) {
222 		if (access & VUA_READ) gaccess |= CPF_READ;
223 		if (access & VUA_WRITE) gaccess |= CPF_WRITE;
224 	}
225 
226 	for (i = 0; i < vcount; i++) {
227 		gvvec[i].vv_grant = cpf_grant_direct(endpt, vvec[i].vv_addr,
228 			vvec[i].vv_size, gaccess);
229 		assert(gvvec[i].vv_grant != GRANT_INVALID);
230 		gvvec[i].vv_size = vvec[i].vv_size;
231 	}
232 
233 	vgrant = cpf_grant_direct(endpt, (vir_bytes) gvvec,
234 		sizeof(gvvec[0]) * vcount, CPF_READ);
235 	assert(vgrant != GRANT_INVALID);
236 
237 	pgrant = cpf_grant_direct(endpt, (vir_bytes) pvec,
238 		sizeof(pvec[0]) * *pcount, CPF_WRITE);
239 	assert(pgrant != GRANT_INVALID);
240 
241 	/* This must be done after allocating all other grants. */
242 	if (grant_exception != GE_NONE) {
243 		cpf_revoke(gvvec[vcount - 1].vv_grant);
244 		if (grant_exception == GE_INVALID)
245 			gvvec[vcount - 1].vv_grant = GRANT_INVALID;
246 	}
247 
248 	m.m_type = VTR_RELAY;
249 	m.VTR_VGRANT = vgrant;
250 	m.VTR_VCOUNT = vcount;
251 	m.VTR_OFFSET = offset;
252 	m.VTR_ACCESS = access;
253 	m.VTR_PGRANT = pgrant;
254 	m.VTR_PCOUNT = *pcount;
255 
256 	r = ipc_sendrec(endpt, &m);
257 
258 	cpf_revoke(pgrant);
259 	cpf_revoke(vgrant);
260 
261 	for (i = 0; i < vcount - !!grant_exception; i++)
262 		cpf_revoke(gvvec[i].vv_grant);
263 
264 	*pcount = m.VTR_PCOUNT;
265 
266 	return (r != OK) ? r : m.m_type;
267 }
268 
269 static int do_vumap(endpoint_t endpt, struct vumap_vir *vvec, int vcount,
270 	size_t offset, int access, struct vumap_phys *pvec, int *pcount)
271 {
272 	struct vumap_phys pv_backup[MAPVEC_NR + 3];
273 	int r, pc_backup, pv_test = FALSE;
274 
275 	/* Make a copy of pvec and pcount for later. */
276 	pc_backup = *pcount;
277 
278 	/* We cannot compare pvec contents before and after when relaying,
279 	 * since the original contents are not transferred.
280 	 */
281 	if (!relay && pvec != NULL && pc_backup >= 1 &&
282 			pc_backup <= MAPVEC_NR + 3) {
283 		pv_test = TRUE;
284 		memcpy(pv_backup, pvec, sizeof(*pvec) * pc_backup);
285 	}
286 
287 	/* Reset the test result. */
288 	success = TRUE;
289 
290 	/* Perform the vumap call, either directly or through a relay. */
291 	if (relay) {
292 		assert(endpt == SELF);
293 		r = relay_vumap(vvec, vcount, offset, access, pvec, pcount);
294 	} else {
295 		r = sys_vumap(endpt, vvec, vcount, offset, access, pvec,
296 			pcount);
297 	}
298 
299 	/* Upon failure, pvec and pcount must be unchanged. */
300 	if (r != OK) {
301 		expect(pc_backup == *pcount);
302 
303 		if (pv_test)
304 			expect(memcmp(pv_backup, pvec,
305 				sizeof(*pvec) * pc_backup) == 0);
306 	}
307 
308 	return r;
309 }
310 
311 static void test_basics(void)
312 {
313 	struct vumap_vir vvec[2];
314 	struct vumap_phys pvec[4];
315 	struct buf buf[4];
316 	int r, pcount;
317 
318 	test_group("basics");
319 
320 	buf[0].pages = 1;
321 	buf[0].flags = BUF_PREALLOC;
322 	buf[1].pages = 2;
323 	buf[1].flags = BUF_PREALLOC;
324 	buf[2].pages = 1;
325 	buf[2].flags = BUF_PREALLOC;
326 	buf[3].pages = 1;
327 	buf[3].flags = BUF_PREALLOC | BUF_ADJACENT;
328 
329 	alloc_bufs(buf, 4);
330 
331 	/* Test single whole page. */
332 	vvec[0].vv_addr = buf[0].addr;
333 	vvec[0].vv_size = PAGE_SIZE;
334 	pcount = 1;
335 
336 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
337 
338 	expect(r == OK);
339 	expect(pcount == 1);
340 	expect(pvec[0].vp_addr == buf[0].phys);
341 	expect(pvec[0].vp_size == vvec[0].vv_size);
342 
343 	got_result("single whole page");
344 
345 	/* Test single partial page. */
346 	vvec[0].vv_addr = buf[0].addr + 123;
347 	vvec[0].vv_size = PAGE_SIZE - 456;
348 	pcount = 1;
349 
350 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
351 
352 	expect(r == OK);
353 	expect(pcount == 1);
354 	expect(pvec[0].vp_addr == buf[0].phys + 123);
355 	expect(pvec[0].vp_size == vvec[0].vv_size);
356 
357 	got_result("single partial page");
358 
359 	/* Test multiple contiguous whole pages. */
360 	vvec[0].vv_addr = buf[1].addr;
361 	vvec[0].vv_size = PAGE_SIZE * 2;
362 	pcount = 1;
363 
364 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
365 
366 	expect(r == OK);
367 	expect(pcount == 1);
368 	expect(pvec[0].vp_addr == buf[1].phys);
369 	expect(pvec[0].vp_size == vvec[0].vv_size);
370 
371 	got_result("multiple contiguous whole pages");
372 
373 	/* Test range in multiple contiguous pages. */
374 	vvec[0].vv_addr = buf[1].addr + 234;
375 	vvec[0].vv_size = PAGE_SIZE * 2 - 234;
376 	pcount = 2;
377 
378 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
379 
380 	expect(r == OK);
381 	expect(pcount == 1);
382 	expect(pvec[0].vp_addr == buf[1].phys + 234);
383 	expect(pvec[0].vp_size == vvec[0].vv_size);
384 
385 	got_result("range in multiple contiguous pages");
386 
387 	/* Test multiple noncontiguous whole pages. */
388 	vvec[0].vv_addr = buf[2].addr;
389 	vvec[0].vv_size = PAGE_SIZE * 2;
390 	pcount = 3;
391 
392 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
393 
394 	expect(r == OK);
395 	expect(pcount == 2);
396 	expect(pvec[0].vp_addr == buf[2].phys);
397 	expect(pvec[0].vp_size == PAGE_SIZE);
398 	expect(pvec[1].vp_addr == buf[3].phys);
399 	expect(pvec[1].vp_size == PAGE_SIZE);
400 
401 	got_result("multiple noncontiguous whole pages");
402 
403 	/* Test range in multiple noncontiguous pages. */
404 	vvec[0].vv_addr = buf[2].addr + 1;
405 	vvec[0].vv_size = PAGE_SIZE * 2 - 2;
406 	pcount = 2;
407 
408 	r = do_vumap(SELF, vvec, 1, 0, VUA_WRITE, pvec, &pcount);
409 
410 	expect(r == OK);
411 	expect(pcount == 2);
412 	expect(pvec[0].vp_addr == buf[2].phys + 1);
413 	expect(pvec[0].vp_size == PAGE_SIZE - 1);
414 	expect(pvec[1].vp_addr == buf[3].phys);
415 	expect(pvec[1].vp_size == PAGE_SIZE - 1);
416 
417 	got_result("range in multiple noncontiguous pages");
418 
419 	/* Test single-input result truncation. */
420 	vvec[0].vv_addr = buf[2].addr + PAGE_SIZE / 2;
421 	vvec[0].vv_size = PAGE_SIZE;
422 	pvec[1].vp_addr = 0L;
423 	pvec[1].vp_size = 0;
424 	pcount = 1;
425 
426 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
427 
428 	expect(r == OK);
429 	expect(pcount == 1);
430 	expect(pvec[0].vp_addr == buf[2].phys + PAGE_SIZE / 2);
431 	expect(pvec[0].vp_size == PAGE_SIZE / 2);
432 	expect(pvec[1].vp_addr == 0L);
433 	expect(pvec[1].vp_size == 0);
434 
435 	got_result("single-input result truncation");
436 
437 	/* Test multiple inputs, contiguous first. */
438 	vvec[0].vv_addr = buf[0].addr;
439 	vvec[0].vv_size = PAGE_SIZE;
440 	vvec[1].vv_addr = buf[2].addr + PAGE_SIZE - 1;
441 	vvec[1].vv_size = 2;
442 	pcount = 3;
443 
444 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
445 
446 	expect(r == OK);
447 	expect(pcount == 3);
448 	expect(pvec[0].vp_addr == buf[0].phys);
449 	expect(pvec[0].vp_size == PAGE_SIZE);
450 	expect(pvec[1].vp_addr == buf[2].phys + PAGE_SIZE - 1);
451 	expect(pvec[1].vp_size == 1);
452 	expect(pvec[2].vp_addr == buf[3].phys);
453 	expect(pvec[2].vp_size == 1);
454 
455 	got_result("multiple inputs, contiguous first");
456 
457 	/* Test multiple inputs, contiguous last. */
458 	vvec[0].vv_addr = buf[2].addr + 123;
459 	vvec[0].vv_size = PAGE_SIZE * 2 - 456;
460 	vvec[1].vv_addr = buf[1].addr + 234;
461 	vvec[1].vv_size = PAGE_SIZE * 2 - 345;
462 	pcount = 4;
463 
464 	r = do_vumap(SELF, vvec, 2, 0, VUA_WRITE, pvec, &pcount);
465 
466 	expect(r == OK);
467 	expect(pcount == 3);
468 	expect(pvec[0].vp_addr == buf[2].phys + 123);
469 	expect(pvec[0].vp_size == PAGE_SIZE - 123);
470 	expect(pvec[1].vp_addr == buf[3].phys);
471 	expect(pvec[1].vp_size == PAGE_SIZE - (456 - 123));
472 	expect(pvec[2].vp_addr == buf[1].phys + 234);
473 	expect(pvec[2].vp_size == vvec[1].vv_size);
474 
475 	got_result("multiple inputs, contiguous last");
476 
477 	/* Test multiple-inputs result truncation. */
478 	vvec[0].vv_addr = buf[2].addr + 2;
479 	vvec[0].vv_size = PAGE_SIZE * 2 - 3;
480 	vvec[1].vv_addr = buf[0].addr;
481 	vvec[1].vv_size = 135;
482 	pvec[2].vp_addr = 0xDEADBEEFL;
483 	pvec[2].vp_size = 1234;
484 	pcount = 2;
485 
486 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
487 
488 	expect(r == OK);
489 	expect(pcount == 2);
490 	expect(pvec[0].vp_addr == buf[2].phys + 2);
491 	expect(pvec[0].vp_size == PAGE_SIZE - 2);
492 	expect(pvec[1].vp_addr == buf[3].phys);
493 	expect(pvec[1].vp_size == PAGE_SIZE - 1);
494 	expect(pvec[2].vp_addr == 0xDEADBEEFL);
495 	expect(pvec[2].vp_size == 1234);
496 
497 	got_result("multiple-inputs result truncation");
498 
499 	free_bufs(buf, 4);
500 }
501 
502 static void test_endpt(void)
503 {
504 	struct vumap_vir vvec[1];
505 	struct vumap_phys pvec[1];
506 	struct buf buf[1];
507 	int r, pcount;
508 
509 	test_group("endpoint");
510 
511 	buf[0].pages = 1;
512 	buf[0].flags = BUF_PREALLOC;
513 
514 	alloc_bufs(buf, 1);
515 
516 	/* Test NONE endpoint. */
517 	vvec[0].vv_addr = buf[0].addr;
518 	vvec[0].vv_size = PAGE_SIZE;
519 	pcount = 1;
520 
521 	r = do_vumap(NONE, vvec, 1, 0, VUA_READ, pvec, &pcount);
522 
523 	expect(r == EINVAL);
524 
525 	got_result("NONE endpoint");
526 
527 	/* Test ANY endpoint. */
528 	vvec[0].vv_addr = buf[0].addr;
529 	vvec[0].vv_size = PAGE_SIZE;
530 	pcount = 1;
531 
532 	r = do_vumap(ANY, vvec, 1, 0, VUA_READ, pvec, &pcount);
533 
534 	expect(r == EINVAL);
535 
536 	got_result("ANY endpoint");
537 
538 	free_bufs(buf, 1);
539 }
540 
541 static void test_vector1(void)
542 {
543 	struct vumap_vir vvec[2];
544 	struct vumap_phys pvec[3];
545 	struct buf buf[2];
546 	int r, pcount;
547 
548 	test_group("vector, part 1");
549 
550 	buf[0].pages = 2;
551 	buf[0].flags = BUF_PREALLOC;
552 	buf[1].pages = 1;
553 	buf[1].flags = BUF_PREALLOC;
554 
555 	alloc_bufs(buf, 2);
556 
557 	/* Test zero virtual memory size. */
558 	vvec[0].vv_addr = buf[0].addr;
559 	vvec[0].vv_size = PAGE_SIZE * 2;
560 	vvec[1].vv_addr = buf[1].addr;
561 	vvec[1].vv_size = 0;
562 	pcount = 3;
563 
564 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
565 
566 	expect(r == EINVAL);
567 
568 	got_result("zero virtual memory size");
569 
570 	/* Test excessive virtual memory size. */
571 	vvec[1].vv_size = (vir_bytes) -1;
572 
573 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
574 
575 	expect(r == EFAULT || r == EPERM);
576 
577 	got_result("excessive virtual memory size");
578 
579 	/* Test invalid virtual memory. */
580 	vvec[1].vv_addr = 0L;
581 	vvec[1].vv_size = PAGE_SIZE;
582 
583 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
584 
585 	expect(r == EFAULT);
586 
587 	got_result("invalid virtual memory");
588 
589 	/* Test virtual memory overrun. */
590 	vvec[0].vv_size++;
591 	vvec[1].vv_addr = buf[1].addr;
592 
593 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
594 
595 	expect(r == EFAULT);
596 
597 	got_result("virtual memory overrun");
598 
599 	free_bufs(buf, 2);
600 }
601 
602 static void test_vector2(void)
603 {
604 	struct vumap_vir vvec[2], *vvecp;
605 	struct vumap_phys pvec[3], *pvecp;
606 	struct buf buf[2];
607 	phys_bytes dummy;
608 	int r, pcount;
609 
610 	test_group("vector, part 2");
611 
612 	buf[0].pages = 2;
613 	buf[0].flags = BUF_PREALLOC;
614 	buf[1].pages = 1;
615 	buf[1].flags = BUF_PREALLOC;
616 
617 	alloc_bufs(buf, 2);
618 
619 	/* Test zero virtual count. */
620 	vvec[0].vv_addr = buf[0].addr;
621 	vvec[0].vv_size = PAGE_SIZE * 2;
622 	vvec[1].vv_addr = buf[1].addr;
623 	vvec[1].vv_size = PAGE_SIZE;
624 	pcount = 3;
625 
626 	r = do_vumap(SELF, vvec, 0, 0, VUA_READ, pvec, &pcount);
627 
628 	expect(r == EINVAL);
629 
630 	got_result("zero virtual count");
631 
632 	/* Test negative virtual count. */
633 	r = do_vumap(SELF, vvec, -1, 0, VUA_WRITE, pvec, &pcount);
634 
635 	expect(r == EINVAL);
636 
637 	got_result("negative virtual count");
638 
639 	/* Test zero physical count. */
640 	pcount = 0;
641 
642 	r = do_vumap(SELF, vvec, 2, 0, VUA_WRITE, pvec, &pcount);
643 
644 	expect(r == EINVAL);
645 
646 	got_result("zero physical count");
647 
648 	/* Test negative physical count. */
649 	pcount = -1;
650 
651 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
652 
653 	expect(r == EINVAL);
654 
655 	got_result("negative physical count");
656 
657 	/* Test invalid virtual vector pointer. */
658 	pcount = 2;
659 
660 	r = do_vumap(SELF, NULL, 2, 0, VUA_READ, pvec, &pcount);
661 
662 	expect(r == EFAULT);
663 
664 	got_result("invalid virtual vector pointer");
665 
666 	/* Test unallocated virtual vector. */
667 	vvecp = (struct vumap_vir *) mmap(NULL, PAGE_SIZE,
668 		PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
669 
670 	if (vvecp == MAP_FAILED)
671 		panic("unable to allocate virtual vector");
672 
673 	r = do_vumap(SELF, vvecp, 2, 0, VUA_READ, pvec, &pcount);
674 
675 	expect(r == EFAULT);
676 	expect(!is_allocated((vir_bytes) vvecp, PAGE_SIZE, &dummy));
677 
678 	got_result("unallocated virtual vector pointer");
679 
680 	munmap((void *) vvecp, PAGE_SIZE);
681 
682 	/* Test invalid physical vector pointer. */
683 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, NULL, &pcount);
684 
685 	expect(r == EFAULT);
686 
687 	got_result("invalid physical vector pointer");
688 
689 	/* Test unallocated physical vector. */
690 	pvecp = (struct vumap_phys *) mmap(NULL, PAGE_SIZE,
691 		PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
692 
693 	if (pvecp == MAP_FAILED)
694 		panic("unable to allocate physical vector");
695 
696 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvecp, &pcount);
697 
698 	expect(r == OK);
699 	expect(is_allocated((vir_bytes) pvecp, PAGE_SIZE, &dummy));
700 	expect(pcount == 2);
701 	expect(pvecp[0].vp_size == PAGE_SIZE * 2);
702 	expect(pvecp[0].vp_addr == buf[0].phys);
703 	expect(pvecp[1].vp_size == PAGE_SIZE);
704 	expect(pvecp[1].vp_addr == buf[1].phys);
705 
706 	got_result("unallocated physical vector pointer");
707 
708 	munmap((void *) pvecp, PAGE_SIZE);
709 
710 	free_bufs(buf, 2);
711 }
712 
713 static void test_grant(void)
714 {
715 	struct vumap_vir vvec[2];
716 	struct vumap_phys pvec[3];
717 	struct buf buf[2];
718 	int r, pcount;
719 
720 	test_group("grant");
721 
722 	buf[0].pages = 1;
723 	buf[0].flags = BUF_PREALLOC;
724 	buf[1].pages = 2;
725 	buf[1].flags = BUF_PREALLOC;
726 
727 	alloc_bufs(buf, 2);
728 
729 	/* Test write-only access on read-only grant. */
730 	grant_access = CPF_READ; /* override */
731 
732 	vvec[0].vv_addr = buf[0].addr;
733 	vvec[0].vv_size = PAGE_SIZE;
734 	pcount = 1;
735 
736 	r = do_vumap(SELF, vvec, 1, 0, VUA_WRITE, pvec, &pcount);
737 
738 	expect(r == EPERM);
739 
740 	got_result("write-only access on read-only grant");
741 
742 	/* Test read-write access on read-only grant. */
743 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ | VUA_WRITE, pvec, &pcount);
744 
745 	expect(r == EPERM);
746 
747 	got_result("read-write access on read-only grant");
748 
749 	/* Test read-only access on write-only grant. */
750 	grant_access = CPF_WRITE; /* override */
751 
752 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
753 
754 	expect(r == EPERM);
755 
756 	got_result("read-only access on write-only grant");
757 
758 	/* Test read-write access on write grant. */
759 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ | VUA_WRITE, pvec, &pcount);
760 
761 	expect(r == EPERM);
762 
763 	got_result("read-write access on write-only grant");
764 
765 	/* Test read-only access on read-write grant. */
766 	grant_access = CPF_READ | CPF_WRITE; /* override */
767 
768 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
769 
770 	expect(r == OK);
771 	expect(pcount == 1);
772 	expect(pvec[0].vp_size == PAGE_SIZE);
773 	expect(pvec[0].vp_addr == buf[0].phys);
774 
775 	got_result("read-only access on read-write grant");
776 
777 	grant_access = 0; /* reset */
778 
779 	/* Test invalid grant. */
780 	grant_exception = GE_INVALID;
781 
782 	vvec[0].vv_addr = buf[0].addr;
783 	vvec[0].vv_size = PAGE_SIZE;
784 	vvec[1].vv_addr = buf[1].addr;
785 	vvec[1].vv_size = PAGE_SIZE * 2;
786 	pcount = 3;
787 
788 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
789 
790 	expect(r == EINVAL);
791 
792 	got_result("invalid grant");
793 
794 	/* Test revoked grant. */
795 	grant_exception = GE_REVOKED;
796 
797 	r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
798 
799 	expect(r == EPERM);
800 
801 	got_result("revoked grant");
802 
803 	grant_exception = GE_NONE;
804 
805 	free_bufs(buf, 2);
806 }
807 
808 static void test_offset(void)
809 {
810 	struct vumap_vir vvec[2];
811 	struct vumap_phys pvec[3];
812 	struct buf buf[4];
813 	size_t off, off2;
814 	int r, pcount;
815 
816 	test_group("offsets");
817 
818 	buf[0].pages = 1;
819 	buf[0].flags = BUF_PREALLOC;
820 	buf[1].pages = 2;
821 	buf[1].flags = BUF_PREALLOC;
822 	buf[2].pages = 1;
823 	buf[2].flags = BUF_PREALLOC;
824 	buf[3].pages = 1;
825 	buf[3].flags = BUF_PREALLOC | BUF_ADJACENT;
826 
827 	alloc_bufs(buf, 4);
828 
829 	/* Test offset into aligned page. */
830 	off = 123;
831 	vvec[0].vv_addr = buf[0].addr;
832 	vvec[0].vv_size = PAGE_SIZE;
833 	pcount = 2;
834 
835 	r = do_vumap(SELF, vvec, 1, off, VUA_READ, pvec, &pcount);
836 
837 	expect(r == OK);
838 	expect(pcount == 1);
839 	expect(pvec[0].vp_addr == buf[0].phys + off);
840 	expect(pvec[0].vp_size == vvec[0].vv_size - off);
841 
842 	got_result("offset into aligned page");
843 
844 	/* Test offset into unaligned page. */
845 	off2 = 456;
846 	assert(off + off2 < PAGE_SIZE);
847 	vvec[0].vv_addr = buf[0].addr + off;
848 	vvec[0].vv_size = PAGE_SIZE - off;
849 	pcount = 2;
850 
851 	r = do_vumap(SELF, vvec, 1, off2, VUA_READ, pvec, &pcount);
852 
853 	expect(r == OK);
854 	expect(pcount == 1);
855 	expect(pvec[0].vp_addr == buf[0].phys + off + off2);
856 	expect(pvec[0].vp_size == vvec[0].vv_size - off2);
857 
858 	got_result("offset into unaligned page");
859 
860 	/* Test offset into unaligned page set. */
861 	off = 1234;
862 	off2 = 567;
863 	assert(off + off2 < PAGE_SIZE);
864 	vvec[0].vv_addr = buf[1].addr + off;
865 	vvec[0].vv_size = (PAGE_SIZE - off) * 2;
866 	pcount = 3;
867 
868 	r = do_vumap(SELF, vvec, 1, off2, VUA_READ, pvec, &pcount);
869 
870 	expect(r == OK);
871 	expect(pcount == 1);
872 	expect(pvec[0].vp_addr == buf[1].phys + off + off2);
873 	expect(pvec[0].vp_size == vvec[0].vv_size - off2);
874 
875 	got_result("offset into contiguous page set");
876 
877 	/* Test offset into noncontiguous page set. */
878 	vvec[0].vv_addr = buf[2].addr + off;
879 	vvec[0].vv_size = (PAGE_SIZE - off) * 2;
880 	pcount = 3;
881 
882 	r = do_vumap(SELF, vvec, 1, off2, VUA_READ, pvec, &pcount);
883 
884 	expect(r == OK);
885 	expect(pcount == 2);
886 	expect(pvec[0].vp_addr == buf[2].phys + off + off2);
887 	expect(pvec[0].vp_size == PAGE_SIZE - off - off2);
888 	expect(pvec[1].vp_addr == buf[3].phys);
889 	expect(pvec[1].vp_size == PAGE_SIZE - off);
890 
891 	got_result("offset into noncontiguous page set");
892 
893 	/* Test offset to last byte. */
894 	off = PAGE_SIZE - off2 - 1;
895 	vvec[0].vv_addr = buf[0].addr + off2;
896 	vvec[0].vv_size = PAGE_SIZE - off2;
897 	pcount = 2;
898 
899 	r = do_vumap(SELF, vvec, 1, off, VUA_READ, pvec, &pcount);
900 
901 	expect(r == OK);
902 	expect(pcount == 1);
903 	expect(pvec[0].vp_addr == buf[0].phys + off + off2);
904 	expect(pvec[0].vp_size == 1);
905 
906 	got_result("offset to last byte");
907 
908 	/* Test offset at range end. */
909 	off = 234;
910 	vvec[0].vv_addr = buf[1].addr + off;
911 	vvec[0].vv_size = PAGE_SIZE - off * 2;
912 	vvec[1].vv_addr = vvec[0].vv_addr + vvec[0].vv_size;
913 	vvec[1].vv_size = off;
914 
915 	r = do_vumap(SELF, vvec, 2, vvec[0].vv_size, VUA_READ, pvec, &pcount);
916 
917 	expect(r == EINVAL);
918 
919 	got_result("offset at range end");
920 
921 	/* Test offset beyond range end. */
922 	vvec[0].vv_addr = buf[1].addr;
923 	vvec[0].vv_size = PAGE_SIZE;
924 	vvec[1].vv_addr = buf[1].addr + PAGE_SIZE;
925 	vvec[1].vv_size = PAGE_SIZE;
926 
927 	r = do_vumap(SELF, vvec, 2, PAGE_SIZE + off, VUA_READ, pvec, &pcount);
928 
929 	expect(r == EINVAL);
930 
931 	got_result("offset beyond range end");
932 
933 	/* Test negative offset. */
934 	vvec[0].vv_addr = buf[1].addr + off + off2;
935 	vvec[0].vv_size = PAGE_SIZE;
936 
937 	r = do_vumap(SELF, vvec, 1, (size_t) -1, VUA_READ, pvec, &pcount);
938 
939 	expect(r == EINVAL);
940 
941 	got_result("negative offset");
942 
943 	free_bufs(buf, 4);
944 }
945 
946 static void test_access(void)
947 {
948 	struct vumap_vir vvec[3];
949 	struct vumap_phys pvec[4], *pvecp;
950 	struct buf buf[7];
951 	int i, r, pcount, pindex;
952 
953 	test_group("access");
954 
955 	buf[0].pages = 1;
956 	buf[0].flags = 0;
957 	buf[1].pages = 1;
958 	buf[1].flags = BUF_PREALLOC | BUF_ADJACENT;
959 	buf[2].pages = 1;
960 	buf[2].flags = BUF_ADJACENT;
961 
962 	alloc_bufs(buf, 3);
963 
964 	/* Test no access flags. */
965 	vvec[0].vv_addr = buf[0].addr;
966 	vvec[0].vv_size = PAGE_SIZE * 3;
967 	pcount = 4;
968 
969 	r = do_vumap(SELF, vvec, 1, 0, 0, pvec, &pcount);
970 
971 	expect(r == EINVAL);
972 	expect(!is_buf_allocated(&buf[0]));
973 	expect(is_buf_allocated(&buf[1]));
974 	expect(!is_buf_allocated(&buf[2]));
975 
976 	got_result("no access flags");
977 
978 	/* Test read-only access. */
979 	vvec[0].vv_addr = buf[0].addr;
980 	vvec[0].vv_size = PAGE_SIZE * 3;
981 	pcount = 1;
982 
983 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
984 
985 	expect(r == EFAULT);
986 	expect(!is_buf_allocated(&buf[0]));
987 	expect(is_buf_allocated(&buf[1]));
988 	expect(!is_buf_allocated(&buf[2]));
989 
990 	got_result("read-only access");
991 
992 	/* Test read-write access. */
993 	vvec[0].vv_addr = buf[0].addr;
994 	vvec[0].vv_size = PAGE_SIZE * 3;
995 	pcount = 4;
996 
997 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ | VUA_WRITE, pvec, &pcount);
998 
999 	expect(r == EFAULT);
1000 	expect(!is_buf_allocated(&buf[0]));
1001 	expect(is_buf_allocated(&buf[1]));
1002 	expect(!is_buf_allocated(&buf[2]));
1003 
1004 	got_result("read-write access");
1005 
1006 	/* Test write-only access. */
1007 	vvec[0].vv_addr = buf[0].addr;
1008 	vvec[0].vv_size = PAGE_SIZE * 3;
1009 	pcount = 4;
1010 
1011 	r = do_vumap(SELF, vvec, 1, 0, VUA_WRITE, pvec, &pcount);
1012 
1013 	expect(r == OK);
1014 	/* We don't control the physical addresses of the faulted-in pages, so
1015 	 * they may or may not end up being contiguous with their neighbours.
1016 	 */
1017 	expect(pcount >= 1 && pcount <= 3);
1018 	expect(is_buf_allocated(&buf[0]));
1019 	expect(is_buf_allocated(&buf[1]));
1020 	expect(is_buf_allocated(&buf[2]));
1021 	expect(pvec[0].vp_addr == buf[0].phys);
1022 	switch (pcount) {
1023 	case 1:
1024 		expect(pvec[0].vp_size == PAGE_SIZE * 3);
1025 		break;
1026 	case 2:
1027 		expect(pvec[0].vp_size + pvec[1].vp_size == PAGE_SIZE * 3);
1028 		if (pvec[0].vp_size > PAGE_SIZE)
1029 			expect(pvec[1].vp_addr == buf[2].phys);
1030 		else
1031 			expect(pvec[1].vp_addr == buf[1].phys);
1032 		break;
1033 	case 3:
1034 		expect(pvec[0].vp_size == PAGE_SIZE);
1035 		expect(pvec[1].vp_addr == buf[1].phys);
1036 		expect(pvec[1].vp_size == PAGE_SIZE);
1037 		expect(pvec[2].vp_addr == buf[2].phys);
1038 		expect(pvec[2].vp_size == PAGE_SIZE);
1039 		break;
1040 	}
1041 
1042 	got_result("write-only access");
1043 
1044 	free_bufs(buf, 3);
1045 
1046 	/* Test page faulting. */
1047 	buf[0].pages = 1;
1048 	buf[0].flags = 0;
1049 	buf[1].pages = 1;
1050 	buf[1].flags = BUF_PREALLOC | BUF_ADJACENT;
1051 	buf[2].pages = 1;
1052 	buf[2].flags = 0;
1053 	buf[3].pages = 2;
1054 	buf[3].flags = BUF_PREALLOC;
1055 	buf[4].pages = 1;
1056 	buf[4].flags = BUF_ADJACENT;
1057 	buf[5].pages = 1;
1058 	buf[5].flags = BUF_ADJACENT;
1059 	buf[6].pages = 1;
1060 	buf[6].flags = 0;
1061 
1062 	alloc_bufs(buf, 7);
1063 
1064 	vvec[0].vv_addr = buf[0].addr + PAGE_SIZE - 1;
1065 	vvec[0].vv_size = PAGE_SIZE - 1;
1066 	vvec[1].vv_addr = buf[2].addr;
1067 	vvec[1].vv_size = PAGE_SIZE;
1068 	vvec[2].vv_addr = buf[3].addr + 123;
1069 	vvec[2].vv_size = PAGE_SIZE * 4 - 456;
1070 	pvecp = (struct vumap_phys *) buf[6].addr;
1071 	pcount = 7;
1072 	assert(sizeof(struct vumap_phys) * pcount <= PAGE_SIZE);
1073 
1074 	r = do_vumap(SELF, vvec, 3, 0, VUA_WRITE, pvecp, &pcount);
1075 
1076 	expect(r == OK);
1077 	/* Same story but more possibilities. I hope I got this right. */
1078 	expect(pcount >= 3 || pcount <= 6);
1079 	for (i = 0; i < 7; i++)
1080 		expect(is_buf_allocated(&buf[i]));
1081 	expect(pvecp[0].vp_addr = buf[0].phys);
1082 	if (pvecp[0].vp_size == 1) {
1083 		expect(pvecp[1].vp_addr == buf[1].phys);
1084 		expect(pvecp[1].vp_size == PAGE_SIZE - 2);
1085 		pindex = 2;
1086 	} else {
1087 		expect(pvecp[0].vp_size == PAGE_SIZE - 1);
1088 		pindex = 1;
1089 	}
1090 	expect(pvecp[pindex].vp_addr == buf[2].phys);
1091 	expect(pvecp[pindex].vp_size == PAGE_SIZE);
1092 	pindex++;
1093 	expect(pvecp[pindex].vp_addr == buf[3].phys + 123);
1094 	switch (pcount - pindex) {
1095 	case 1:
1096 		expect(pvecp[pindex].vp_size == PAGE_SIZE * 4 - 456);
1097 		break;
1098 	case 2:
1099 		if (pvecp[pindex].vp_size > PAGE_SIZE * 2 - 123) {
1100 			expect(pvecp[pindex].vp_size == PAGE_SIZE * 3 - 123);
1101 			expect(pvecp[pindex + 1].vp_addr == buf[5].phys);
1102 			expect(pvecp[pindex + 1].vp_size ==
1103 				PAGE_SIZE - (456 - 123));
1104 		} else {
1105 			expect(pvecp[pindex].vp_size == PAGE_SIZE * 2 - 123);
1106 			expect(pvecp[pindex + 1].vp_addr == buf[4].phys);
1107 			expect(pvecp[pindex + 1].vp_size ==
1108 				PAGE_SIZE * 2 - (456 - 123));
1109 		}
1110 		break;
1111 	case 3:
1112 		expect(pvecp[pindex].vp_size == PAGE_SIZE * 2 - 123);
1113 		expect(pvecp[pindex + 1].vp_addr == buf[4].phys);
1114 		expect(pvecp[pindex + 1].vp_size == PAGE_SIZE);
1115 		expect(pvecp[pindex + 2].vp_addr == buf[5].phys);
1116 		expect(pvecp[pindex + 2].vp_size == PAGE_SIZE - (456 - 123));
1117 		break;
1118 	default:
1119 		expect(0);
1120 	}
1121 
1122 	got_result("page faulting");
1123 
1124 	free_bufs(buf, 7);
1125 
1126 	/* MISSING: tests to see whether a request with VUA_WRITE or
1127 	 * (VUA_READ|VUA_WRITE) correctly gets an EFAULT for a read-only page.
1128 	 * As of writing, support for such protection is missing from the
1129 	 * system at all.
1130 	 */
1131 }
1132 
1133 static void phys_limit(struct vumap_vir *vvec, int vcount,
1134 	struct vumap_phys *pvec, int pcount, struct buf *buf, char *desc)
1135 {
1136 	int i, r;
1137 
1138 	r = do_vumap(SELF, vvec, vcount, 0, VUA_READ, pvec, &pcount);
1139 
1140 	expect(r == OK);
1141 	expect(pcount == MAPVEC_NR);
1142 	for (i = 0; i < MAPVEC_NR; i++) {
1143 		expect(pvec[i].vp_addr == buf[i].phys);
1144 		expect(pvec[i].vp_size == PAGE_SIZE);
1145 	}
1146 
1147 	got_result(desc);
1148 }
1149 
1150 static void test_limits(void)
1151 {
1152 	struct vumap_vir vvec[MAPVEC_NR + 3];
1153 	struct vumap_phys pvec[MAPVEC_NR + 3];
1154 	struct buf buf[MAPVEC_NR + 9];
1155 	int i, r, vcount, pcount, nr_bufs;
1156 
1157 	test_group("limits");
1158 
1159 	/* Test large contiguous range. */
1160 	buf[0].pages = MAPVEC_NR + 2;
1161 	buf[0].flags = BUF_PREALLOC;
1162 
1163 	alloc_bufs(buf, 1);
1164 
1165 	vvec[0].vv_addr = buf[0].addr;
1166 	vvec[0].vv_size = (MAPVEC_NR + 2) * PAGE_SIZE;
1167 	pcount = 2;
1168 
1169 	r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
1170 
1171 	expect(r == OK);
1172 	expect(pcount == 1);
1173 	expect(pvec[0].vp_addr == buf[0].phys);
1174 	expect(pvec[0].vp_size == vvec[0].vv_size);
1175 
1176 	got_result("large contiguous range");
1177 
1178 	free_bufs(buf, 1);
1179 
1180 	/* I'd like to test MAPVEC_NR contiguous ranges of MAPVEC_NR pages
1181 	 * each, but chances are we don't have that much contiguous memory
1182 	 * available at all. In fact, the previous test may already fail
1183 	 * because of this..
1184 	 */
1185 
1186 	for (i = 0; i < MAPVEC_NR + 2; i++) {
1187 		buf[i].pages = 1;
1188 		buf[i].flags = BUF_PREALLOC;
1189 	}
1190 	buf[i].pages = 1;
1191 	buf[i].flags = BUF_PREALLOC | BUF_ADJACENT;
1192 
1193 	alloc_bufs(buf, MAPVEC_NR + 3);
1194 
1195 	/* Test virtual limit, one below. */
1196 	for (i = 0; i < MAPVEC_NR + 2; i++) {
1197 		vvec[i].vv_addr = buf[i].addr;
1198 		vvec[i].vv_size = PAGE_SIZE;
1199 	}
1200 	vvec[i - 1].vv_size += PAGE_SIZE;
1201 
1202 	pcount = MAPVEC_NR + 3;
1203 
1204 	r = do_vumap(SELF, vvec, MAPVEC_NR - 1, 0, VUA_READ, pvec, &pcount);
1205 
1206 	expect(r == OK);
1207 	expect(pcount == MAPVEC_NR - 1);
1208 	for (i = 0; i < MAPVEC_NR - 1; i++) {
1209 		expect(pvec[i].vp_addr == buf[i].phys);
1210 		expect(pvec[i].vp_size == PAGE_SIZE);
1211 	}
1212 
1213 	got_result("virtual limit, one below");
1214 
1215 	/* Test virtual limit, exact match. */
1216 	pcount = MAPVEC_NR + 3;
1217 
1218 	r = do_vumap(SELF, vvec, MAPVEC_NR, 0, VUA_WRITE, pvec, &pcount);
1219 
1220 	expect(r == OK);
1221 	expect(pcount == MAPVEC_NR);
1222 	for (i = 0; i < MAPVEC_NR; i++) {
1223 		expect(pvec[i].vp_addr == buf[i].phys);
1224 		expect(pvec[i].vp_size == PAGE_SIZE);
1225 	}
1226 
1227 	got_result("virtual limit, exact match");
1228 
1229 	/* Test virtual limit, one above. */
1230 	pcount = MAPVEC_NR + 3;
1231 
1232 	r = do_vumap(SELF, vvec, MAPVEC_NR + 1, 0, VUA_READ, pvec, &pcount);
1233 
1234 	expect(r == OK);
1235 	expect(pcount == MAPVEC_NR);
1236 	for (i = 0; i < MAPVEC_NR; i++) {
1237 		expect(pvec[i].vp_addr == buf[i].phys);
1238 		expect(pvec[i].vp_size == PAGE_SIZE);
1239 	}
1240 
1241 	got_result("virtual limit, one above");
1242 
1243 	/* Test virtual limit, two above. */
1244 	pcount = MAPVEC_NR + 3;
1245 
1246 	r = do_vumap(SELF, vvec, MAPVEC_NR + 2, 0, VUA_WRITE, pvec, &pcount);
1247 
1248 	expect(r == OK);
1249 	expect(pcount == MAPVEC_NR);
1250 	for (i = 0; i < MAPVEC_NR; i++) {
1251 		expect(pvec[i].vp_addr == buf[i].phys);
1252 		expect(pvec[i].vp_size == PAGE_SIZE);
1253 	}
1254 
1255 	got_result("virtual limit, two above");
1256 
1257 	/* Test physical limit, one below, aligned. */
1258 	pcount = MAPVEC_NR - 1;
1259 
1260 	r = do_vumap(SELF, vvec + 2, MAPVEC_NR, 0, VUA_READ, pvec, &pcount);
1261 
1262 	expect(r == OK);
1263 	expect(pcount == MAPVEC_NR - 1);
1264 	for (i = 0; i < MAPVEC_NR - 1; i++) {
1265 		expect(pvec[i].vp_addr == buf[i + 2].phys);
1266 		expect(pvec[i].vp_size == PAGE_SIZE);
1267 	}
1268 
1269 	got_result("physical limit, one below, aligned");
1270 
1271 	/* Test physical limit, one below, unaligned. */
1272 	pcount = MAPVEC_NR - 1;
1273 
1274 	r = do_vumap(SELF, vvec + 3, MAPVEC_NR, 0, VUA_READ, pvec, &pcount);
1275 
1276 	expect(r == OK);
1277 	expect(pcount == MAPVEC_NR - 1);
1278 	for (i = 0; i < MAPVEC_NR - 1; i++) {
1279 		expect(pvec[i].vp_addr == buf[i + 3].phys);
1280 		expect(pvec[i].vp_size == PAGE_SIZE);
1281 	}
1282 
1283 	got_result("physical limit, one below, unaligned");
1284 
1285 	free_bufs(buf, MAPVEC_NR + 3);
1286 
1287 	nr_bufs = sizeof(buf) / sizeof(buf[0]);
1288 
1289 	/* This ends up looking in our virtual address space as follows:
1290 	 * [P] [P] [P] [PPP] [PPP] ...(MAPVEC_NR x [PPP])... [PPP]
1291 	 * ..where P is a page, and the blocks are virtually contiguous.
1292 	 */
1293 	for (i = 0; i < nr_bufs; i += 3) {
1294 		buf[i].pages = 1;
1295 		buf[i].flags = BUF_PREALLOC;
1296 		buf[i + 1].pages = 1;
1297 		buf[i + 1].flags =
1298 			BUF_PREALLOC | ((i >= 3) ? BUF_ADJACENT : 0);
1299 		buf[i + 2].pages = 1;
1300 		buf[i + 2].flags =
1301 			BUF_PREALLOC | ((i >= 3) ? BUF_ADJACENT : 0);
1302 	}
1303 
1304 	alloc_bufs(buf, nr_bufs);
1305 
1306 	for (i = 0; i < 3; i++) {
1307 		vvec[i].vv_addr = buf[i].addr;
1308 		vvec[i].vv_size = PAGE_SIZE;
1309 	}
1310 	for ( ; i < nr_bufs / 3 + 1; i++) {
1311 		vvec[i].vv_addr = buf[(i - 2) * 3].addr;
1312 		vvec[i].vv_size = PAGE_SIZE * 3;
1313 	}
1314 	vcount = i;
1315 
1316 	/* Out of each of the following tests, one will be aligned (that is,
1317 	 * the last pvec entry will be for the last page in a vvec entry) and
1318 	 * two will be unaligned.
1319 	 */
1320 
1321 	/* Test physical limit, exact match. */
1322 	phys_limit(vvec, vcount, pvec, MAPVEC_NR, buf,
1323 		"physical limit, exact match, try 1");
1324 	phys_limit(vvec + 1, vcount - 1, pvec, MAPVEC_NR, buf + 1,
1325 		"physical limit, exact match, try 2");
1326 	phys_limit(vvec + 2, vcount - 2, pvec, MAPVEC_NR, buf + 2,
1327 		"physical limit, exact match, try 3");
1328 
1329 	/* Test physical limit, one above. */
1330 	phys_limit(vvec, vcount, pvec, MAPVEC_NR + 1, buf,
1331 		"physical limit, one above, try 1");
1332 	phys_limit(vvec + 1, vcount - 1, pvec, MAPVEC_NR + 1, buf + 1,
1333 		"physical limit, one above, try 2");
1334 	phys_limit(vvec + 2, vcount - 2, pvec, MAPVEC_NR + 1, buf + 2,
1335 		"physical limit, one above, try 3");
1336 
1337 	/* Test physical limit, two above. */
1338 	phys_limit(vvec, vcount, pvec, MAPVEC_NR + 2, buf,
1339 		"physical limit, two above, try 1");
1340 	phys_limit(vvec + 1, vcount - 1, pvec, MAPVEC_NR + 2, buf + 1,
1341 		"physical limit, two above, try 2");
1342 	phys_limit(vvec + 2, vcount - 2, pvec, MAPVEC_NR + 2, buf + 2,
1343 		"physical limit, two above, try 3");
1344 
1345 	free_bufs(buf, nr_bufs);
1346 }
1347 
1348 static void do_tests(int use_relay)
1349 {
1350 	relay = use_relay;
1351 
1352 	test_basics();
1353 
1354 	if (!relay) test_endpt();	/* local only */
1355 
1356 	test_vector1();
1357 
1358 	if (!relay) test_vector2();	/* local only */
1359 
1360 	if (relay) test_grant();	/* remote only */
1361 
1362 	test_offset();
1363 
1364 	test_access();
1365 
1366 	test_limits();
1367 }
1368 
1369 static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info))
1370 {
1371 	int r;
1372 
1373 	verbose = (env_argc > 1 && !strcmp(env_argv[1], "-v"));
1374 
1375 	if (verbose)
1376 		printf("Starting sys_vumap test set\n");
1377 
1378 	do_tests(FALSE /*use_relay*/);
1379 
1380 	if ((r = ds_retrieve_label_endpt("vumaprelay", &endpt)) != OK)
1381 		panic("unable to obtain endpoint for 'vumaprelay' (%d)", r);
1382 
1383 	do_tests(TRUE /*use_relay*/);
1384 
1385 	if (verbose)
1386 		printf("Completed sys_vumap test set, %u/%u tests failed\n",
1387 			failures, count);
1388 
1389 	/* The returned code will determine the outcome of the RS call, and
1390 	 * thus the entire test. The actual error code does not matter.
1391 	 */
1392 	return (failures) ? EINVAL : OK;
1393 }
1394 
1395 static void sef_local_startup(void)
1396 {
1397 	sef_setcb_init_fresh(sef_cb_init_fresh);
1398 
1399 	sef_startup();
1400 }
1401 
1402 int main(int argc, char **argv)
1403 {
1404 	env_setargs(argc, argv);
1405 
1406 	sef_local_startup();
1407 
1408 	return 0;
1409 }
1410