xref: /minix/minix/tests/blocktest/blocktest.c (revision 83133719)
1 /* Block Device Driver Test driver, by D.C. van Moolenbroek */
2 #include <stdlib.h>
3 #include <stdarg.h>
4 #include <minix/blockdriver.h>
5 #include <minix/drvlib.h>
6 #include <minix/ds.h>
7 #include <minix/optset.h>
8 #include <sys/ioc_disk.h>
9 #include <sys/mman.h>
10 #include <assert.h>
11 
12 enum {
13 	RESULT_OK,			/* exactly as expected */
14 	RESULT_DEATH,			/* driver died */
15 	RESULT_COMMFAIL,		/* communication failed */
16 	RESULT_BADTYPE,			/* bad type in message */
17 	RESULT_BADID,			/* bad request ID in message */
18 	RESULT_BADSTATUS,		/* bad/unexpected status in message */
19 	RESULT_TRUNC,			/* request truncated unexpectedly */
20 	RESULT_CORRUPT,			/* buffer touched erroneously */
21 	RESULT_MISSING,			/* buffer left untouched erroneously */
22 	RESULT_OVERFLOW,		/* area around buffer touched */
23 	RESULT_BADVALUE			/* bad/unexpected return value */
24 };
25 
26 typedef struct {
27 	int type;
28 	ssize_t value;
29 } result_t;
30 
31 static char driver_label[32] = "";	/* driver DS label */
32 static devminor_t driver_minor = -1;	/* driver's partition minor to use */
33 static endpoint_t driver_endpt;	/* driver endpoint */
34 
35 static int may_write = FALSE;		/* may we write to the device? */
36 static int sector_size = 512;		/* size of a single disk sector */
37 static int min_read = 512;		/* minimum total size of read req */
38 static int min_write = 0;		/* minimum total size of write req */
39 static int element_size = 512;		/* minimum I/O vector element size */
40 static int max_size = 131072;		/* maximum total size of any req */
41 /* Note that we do not test exceeding the max_size limit, so it is safe to set
42  * it to a value lower than the driver supports.
43  */
44 
45 /* These settings are used for automated test runs. */
46 static int contig = TRUE;		/* allocate contiguous DMA memory? */
47 static int silent = FALSE;		/* do not produce console output? */
48 
49 static struct part_geom part;		/* base and size of target partition */
50 
51 #define NR_OPENED 10			/* maximum number of opened devices */
52 static dev_t opened[NR_OPENED];	/* list of currently opened devices */
53 static int nr_opened = 0;		/* current number of opened devices */
54 
55 static int total_tests = 0;		/* total number of tests performed */
56 static int failed_tests = 0;		/* number of tests that failed */
57 static int failed_groups = 0;		/* nr of groups that had failures */
58 static int group_failure;		/* has this group had a failure yet? */
59 static int driver_deaths = 0;		/* number of restarts that we saw */
60 
61 /* Options supported by this driver. */
62 static struct optset optset_table[] = {
63 	{ "label",	OPT_STRING,	driver_label,	sizeof(driver_label) },
64 	{ "minor",	OPT_INT,	&driver_minor,	10		     },
65 	{ "rw",		OPT_BOOL,	&may_write,	TRUE		     },
66 	{ "ro",		OPT_BOOL,	&may_write,	FALSE		     },
67 	{ "sector",	OPT_INT,	&sector_size,	10		     },
68 	{ "element",	OPT_INT,	&element_size,	10		     },
69 	{ "min_read",	OPT_INT,	&min_read,	10		     },
70 	{ "min_write",	OPT_INT,	&min_write,	10		     },
71 	{ "max",	OPT_INT,	&max_size,	10		     },
72 	{ "nocontig",	OPT_BOOL,	&contig,	FALSE		     },
73 	{ "silent",	OPT_BOOL,	&silent,	TRUE		     },
74 	{ NULL,		0,		NULL,		0		     }
75 };
76 
77 static void output(char *fmt, ...)
78 {
79 	/* Print debugging information, unless configured to be silent.
80 	 */
81 	va_list argp;
82 
83 	if (silent)
84 		return;
85 
86 	va_start(argp, fmt);
87 
88 	vprintf(fmt, argp);
89 
90 	va_end(argp);
91 }
92 
93 static void *alloc_dma_memory(size_t size)
94 {
95 	/* Allocate memory that may be used for direct DMA. For most drivers,
96 	 * this means that the memory has to be physically contiguous. For some
97 	 * drivers (e.g. VND) we allow non-contiguous allocation, because VM is
98 	 * currently flaky and does not always manage to provide contiguous
99 	 * memory even when it should, thus causing needless test failures.
100 	 */
101 	void *ptr;
102 
103 	if (contig)
104 		ptr = alloc_contig(size, 0, NULL);
105 	else
106 		ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
107 			MAP_PREALLOC | MAP_ANON, -1, 0);
108 
109 	if (ptr == MAP_FAILED)
110 		panic("unable to allocate %d bytes of memory", size);
111 
112 	return ptr;
113 }
114 
115 static void free_dma_memory(void *ptr, size_t size)
116 {
117 	/* Free memory previously allocated for direct DMA. */
118 	if (contig)
119 		free_contig(ptr, size);
120 	else
121 		munmap(ptr, size);
122 }
123 
124 static int set_result(result_t *res, int type, ssize_t value)
125 {
126 	/* Set the result to the given result type and with the given optional
127 	 * extra value. Return the type.
128 	 */
129 	res->type = type;
130 	res->value = value;
131 
132 	return type;
133 }
134 
135 static int accept_result(result_t *res, int type, ssize_t value)
136 {
137 	/* If the result is of the given type and value, reset it to a success
138 	 * result. This allows for a logical OR on error codes. Return whether
139 	 * the result was indeed reset.
140 	 */
141 
142 	if (res->type == type && res->value == value) {
143 		set_result(res, RESULT_OK, 0);
144 
145 		return TRUE;
146 	}
147 
148 	return FALSE;
149 }
150 
151 static void got_result(result_t *res, char *desc)
152 {
153 	/* Process the result of a test. Keep statistics.
154 	 */
155 	static int i = 0;
156 
157 	total_tests++;
158 	if (res->type != RESULT_OK) {
159 		failed_tests++;
160 
161 		if (group_failure == FALSE) {
162 			failed_groups++;
163 			group_failure = TRUE;
164 		}
165 	}
166 
167 	output("#%02d: %-38s\t[%s]\n", ++i, desc,
168 		(res->type == RESULT_OK) ? "PASS" : "FAIL");
169 
170 	switch (res->type) {
171 	case RESULT_DEATH:
172 		output("- driver died\n");
173 		break;
174 	case RESULT_COMMFAIL:
175 		output("- communication failed; ipc_sendrec returned %d\n",
176 			res->value);
177 		break;
178 	case RESULT_BADTYPE:
179 		output("- bad type %d in reply message\n", res->value);
180 		break;
181 	case RESULT_BADID:
182 		output("- mismatched ID %d in reply message\n", res->value);
183 		break;
184 	case RESULT_BADSTATUS:
185 		output("- bad or unexpected status %d in reply message\n",
186 			res->value);
187 		break;
188 	case RESULT_TRUNC:
189 		output("- result size not as expected (%u bytes left)\n",
190 			res->value);
191 		break;
192 	case RESULT_CORRUPT:
193 		output("- buffer has been modified erroneously\n");
194 		break;
195 	case RESULT_MISSING:
196 		output("- buffer has been left untouched erroneously\n");
197 		break;
198 	case RESULT_OVERFLOW:
199 		output("- area around target buffer modified\n");
200 		break;
201 	case RESULT_BADVALUE:
202 		output("- bad or unexpected return value %d from call\n",
203 			res->value);
204 		break;
205 	}
206 }
207 
208 static void test_group(char *name, int exec)
209 {
210 	/* Start a new group of tests.
211 	 */
212 
213 	output("Test group: %s%s\n", name, exec ? "" : " (skipping)");
214 
215 	group_failure = FALSE;
216 }
217 
218 static void reopen_device(dev_t minor)
219 {
220 	/* Reopen a device after we were notified that the driver has died.
221 	 * Explicitly ignore any errors here; this is a feeble attempt to get
222 	 * ourselves back into business again.
223 	 */
224 	message m;
225 
226 	memset(&m, 0, sizeof(m));
227 	m.m_type = BDEV_OPEN;
228 	m.m_lbdev_lblockdriver_msg.minor = minor;
229 	m.m_lbdev_lblockdriver_msg.access = (may_write) ? (BDEV_R_BIT | BDEV_W_BIT) : BDEV_R_BIT;
230 	m.m_lbdev_lblockdriver_msg.id = 0;
231 
232 	(void) ipc_sendrec(driver_endpt, &m);
233 }
234 
235 static int sendrec_driver(message *m_ptr, ssize_t exp, result_t *res)
236 {
237 	/* Make a call to the driver, and perform basic checks on the return
238 	 * message. Fill in the result structure, wiping out what was in there
239 	 * before. If the driver dies in the process, attempt to recover but
240 	 * fail the request.
241 	 */
242 	message m_orig;
243 	endpoint_t last_endpt;
244 	int i, r;
245 
246 	m_orig = *m_ptr;
247 
248 	r = ipc_sendrec(driver_endpt, m_ptr);
249 
250 	if (r == EDEADSRCDST) {
251 		/* The driver has died. Find its new endpoint, and reopen all
252 		 * devices that we opened earlier. Then return failure.
253 		 */
254 		output("WARNING: driver has died, attempting to proceed\n");
255 
256 		driver_deaths++;
257 
258 		/* Keep trying until we get a new endpoint. */
259 		last_endpt = driver_endpt;
260 		for (;;) {
261 			r = ds_retrieve_label_endpt(driver_label,
262 				&driver_endpt);
263 
264 			if (r == OK && last_endpt != driver_endpt)
265 				break;
266 
267 			micro_delay(100000);
268 		}
269 
270 		for (i = 0; i < nr_opened; i++)
271 			reopen_device(opened[i]);
272 
273 		return set_result(res, RESULT_DEATH, 0);
274 	}
275 
276 	if (r != OK)
277 		return set_result(res, RESULT_COMMFAIL, r);
278 
279 	if (m_ptr->m_type != BDEV_REPLY)
280 		return set_result(res, RESULT_BADTYPE, m_ptr->m_type);
281 
282 	if (m_ptr->m_lblockdriver_lbdev_reply.id != m_orig.m_lbdev_lblockdriver_msg.id)
283 		return set_result(res, RESULT_BADID,
284 				m_ptr->m_lblockdriver_lbdev_reply.id);
285 
286 	if ((exp < 0 && m_ptr->m_lblockdriver_lbdev_reply.status >= 0) ||
287 			(exp >= 0 &&
288 			 m_ptr->m_lblockdriver_lbdev_reply.status < 0))
289 		return set_result(res, RESULT_BADSTATUS,
290 				m_ptr->m_lblockdriver_lbdev_reply.status);
291 
292 	return set_result(res, RESULT_OK, 0);
293 }
294 
295 static void raw_xfer(dev_t minor, u64_t pos, iovec_s_t *iovec, int nr_req,
296 	int write, ssize_t exp, result_t *res)
297 {
298 	/* Perform a transfer with a safecopy iovec already supplied.
299 	 */
300 	cp_grant_id_t grant;
301 	message m;
302 	int r;
303 
304 	assert(nr_req <= NR_IOREQS);
305 	assert(!write || may_write);
306 
307 	if ((grant = cpf_grant_direct(driver_endpt, (vir_bytes) iovec,
308 			sizeof(*iovec) * nr_req, CPF_READ)) == GRANT_INVALID)
309 		panic("unable to allocate grant");
310 
311 	memset(&m, 0, sizeof(m));
312 	m.m_type = write ? BDEV_SCATTER : BDEV_GATHER;
313 	m.m_lbdev_lblockdriver_msg.minor = minor;
314 	m.m_lbdev_lblockdriver_msg.pos = pos;
315 	m.m_lbdev_lblockdriver_msg.count = nr_req;
316 	m.m_lbdev_lblockdriver_msg.grant = grant;
317 	m.m_lbdev_lblockdriver_msg.id = lrand48();
318 
319 	r = sendrec_driver(&m, exp, res);
320 
321 	if (cpf_revoke(grant) != OK)
322 		panic("unable to revoke grant");
323 
324 	if (r != RESULT_OK)
325 		return;
326 
327 	if (m.m_lblockdriver_lbdev_reply.status == exp)
328 		return;
329 
330 	if (exp < 0)
331 		set_result(res, RESULT_BADSTATUS,
332 			m.m_lblockdriver_lbdev_reply.status);
333 	else
334 		set_result(res, RESULT_TRUNC,
335 			exp - m.m_lblockdriver_lbdev_reply.status);
336 }
337 
338 static void vir_xfer(dev_t minor, u64_t pos, iovec_t *iovec, int nr_req,
339 	int write, ssize_t exp, result_t *res)
340 {
341 	/* Perform a transfer, creating and revoking grants for the I/O vector.
342 	 */
343 	iovec_s_t iov_s[NR_IOREQS];
344 	int i;
345 
346 	assert(nr_req <= NR_IOREQS);
347 
348 	for (i = 0; i < nr_req; i++) {
349 		iov_s[i].iov_size = iovec[i].iov_size;
350 
351 		if ((iov_s[i].iov_grant = cpf_grant_direct(driver_endpt,
352 			(vir_bytes) iovec[i].iov_addr, iovec[i].iov_size,
353 			write ? CPF_READ : CPF_WRITE)) == GRANT_INVALID)
354 			panic("unable to allocate grant");
355 	}
356 
357 	raw_xfer(minor, pos, iov_s, nr_req, write, exp, res);
358 
359 	for (i = 0; i < nr_req; i++) {
360 		iovec[i].iov_size = iov_s[i].iov_size;
361 
362 		if (cpf_revoke(iov_s[i].iov_grant) != OK)
363 			panic("unable to revoke grant");
364 	}
365 }
366 
367 static void simple_xfer(dev_t minor, u64_t pos, u8_t *buf, size_t size,
368 	int write, ssize_t exp, result_t *res)
369 {
370 	/* Perform a transfer involving a single buffer.
371 	 */
372 	iovec_t iov;
373 
374 	iov.iov_addr = (vir_bytes) buf;
375 	iov.iov_size = size;
376 
377 	vir_xfer(minor, pos, &iov, 1, write, exp, res);
378 }
379 
380 static void alloc_buf_and_grant(u8_t **ptr, cp_grant_id_t *grant,
381 	size_t size, int perms)
382 {
383 	/* Allocate a buffer suitable for DMA (i.e. contiguous) and create a
384 	 * grant for it with the requested CPF_* grant permissions.
385 	 */
386 
387 	*ptr = alloc_dma_memory(size);
388 
389 	if ((*grant = cpf_grant_direct(driver_endpt, (vir_bytes) *ptr, size,
390 			perms)) == GRANT_INVALID)
391 		panic("unable to allocate grant");
392 }
393 
394 static void free_buf_and_grant(u8_t *ptr, cp_grant_id_t grant, size_t size)
395 {
396 	/* Revoke a grant and free a buffer.
397 	 */
398 
399 	cpf_revoke(grant);
400 
401 	free_dma_memory(ptr, size);
402 }
403 
404 static void bad_read1(void)
405 {
406 	/* Test various illegal read transfer requests, part 1.
407 	 */
408 	message mt, m;
409 	iovec_s_t iovt, iov;
410 	cp_grant_id_t grant, grant2, grant3;
411 	u8_t *buf_ptr;
412 	vir_bytes buf_size;
413 	result_t res;
414 
415 	test_group("bad read requests, part one", TRUE);
416 
417 #define BUF_SIZE	4096
418 	buf_size = BUF_SIZE;
419 
420 	alloc_buf_and_grant(&buf_ptr, &grant2, buf_size, CPF_WRITE);
421 
422 	if ((grant = cpf_grant_direct(driver_endpt, (vir_bytes) &iov,
423 			sizeof(iov), CPF_READ)) == GRANT_INVALID)
424 		panic("unable to allocate grant");
425 
426 	/* Initialize the defaults for some of the tests.
427 	 * This is a legitimate request for the first block of the partition.
428 	 */
429 	memset(&mt, 0, sizeof(mt));
430 	mt.m_type = BDEV_GATHER;
431 	mt.m_lbdev_lblockdriver_msg.minor = driver_minor;
432 	mt.m_lbdev_lblockdriver_msg.pos = 0LL;
433 	mt.m_lbdev_lblockdriver_msg.count = 1;
434 	mt.m_lbdev_lblockdriver_msg.grant = grant;
435 	mt.m_lbdev_lblockdriver_msg.id = lrand48();
436 
437 	memset(&iovt, 0, sizeof(iovt));
438 	iovt.iov_grant = grant2;
439 	iovt.iov_size = buf_size;
440 
441 	/* Test normal request. */
442 	m = mt;
443 	iov = iovt;
444 
445 	sendrec_driver(&m, OK, &res);
446 
447 	if (res.type == RESULT_OK &&
448 		m.m_lblockdriver_lbdev_reply.status != (ssize_t) iov.iov_size) {
449 		res.type = RESULT_TRUNC;
450 		res.value = m.m_lblockdriver_lbdev_reply.status;
451 	}
452 
453 	got_result(&res, "normal request");
454 
455 	/* Test zero iovec elements. */
456 	m = mt;
457 	iov = iovt;
458 
459 	m.m_lbdev_lblockdriver_msg.count = 0;
460 
461 	sendrec_driver(&m, EINVAL, &res);
462 
463 	got_result(&res, "zero iovec elements");
464 
465 	/* Test bad iovec grant. */
466 	m = mt;
467 
468 	m.m_lbdev_lblockdriver_msg.grant = GRANT_INVALID;
469 
470 	sendrec_driver(&m, EINVAL, &res);
471 
472 	got_result(&res, "bad iovec grant");
473 
474 	/* Test revoked iovec grant. */
475 	m = mt;
476 	iov = iovt;
477 
478 	if ((grant3 = cpf_grant_direct(driver_endpt, (vir_bytes) &iov,
479 			sizeof(iov), CPF_READ)) == GRANT_INVALID)
480 		panic("unable to allocate grant");
481 
482 	cpf_revoke(grant3);
483 
484 	m.m_lbdev_lblockdriver_msg.grant = grant3;
485 
486 	sendrec_driver(&m, EINVAL, &res);
487 
488 	accept_result(&res, RESULT_BADSTATUS, EPERM);
489 
490 	got_result(&res, "revoked iovec grant");
491 
492 	/* Test normal request (final check). */
493 	m = mt;
494 	iov = iovt;
495 
496 	sendrec_driver(&m, OK, &res);
497 
498 	if (res.type == RESULT_OK &&
499 		m.m_lblockdriver_lbdev_reply.status != (ssize_t) iov.iov_size) {
500 		res.type = RESULT_TRUNC;
501 		res.value = m.m_lblockdriver_lbdev_reply.status;
502 	}
503 
504 	got_result(&res, "normal request");
505 
506 	/* Clean up. */
507 	free_buf_and_grant(buf_ptr, grant2, buf_size);
508 
509 	cpf_revoke(grant);
510 }
511 
512 static u32_t get_sum(u8_t *ptr, size_t size)
513 {
514 	/* Compute a checksum over the given buffer.
515 	 */
516 	u32_t sum;
517 
518 	for (sum = 0; size > 0; size--, ptr++)
519 		sum = sum ^ (sum << 5) ^ *ptr;
520 
521 	return sum;
522 }
523 
524 static u32_t fill_rand(u8_t *ptr, size_t size)
525 {
526 	/* Fill the given buffer with random data. Return a checksum over the
527 	 * resulting data.
528 	 */
529 	size_t i;
530 
531 	for (i = 0; i < size; i++)
532 		ptr[i] = lrand48() % 256;
533 
534 	return get_sum(ptr, size);
535 }
536 
537 static void test_sum(u8_t *ptr, size_t size, u32_t sum, int should_match,
538 	result_t *res)
539 {
540 	/* If the test succeeded so far, check whether the given buffer does
541 	 * or does not match the given checksum, and adjust the test result
542 	 * accordingly.
543 	 */
544 	u32_t sum2;
545 
546 	if (res->type != RESULT_OK)
547 		return;
548 
549 	sum2 = get_sum(ptr, size);
550 
551 	if ((sum == sum2) != should_match) {
552 		res->type = should_match ? RESULT_CORRUPT : RESULT_MISSING;
553 		res->value = 0;		/* not much that's useful here */
554 	}
555 }
556 
557 static void bad_read2(void)
558 {
559 	/* Test various illegal read transfer requests, part 2.
560 	 *
561 	 * Consider allowing this test to be run twice, with different buffer
562 	 * sizes. It appears that we can make at_wini misbehave by making the
563 	 * size exceed the per-operation size (128KB ?). On the other hand, we
564 	 * then need to start checking partition sizes, possibly.
565 	 */
566 	u8_t *buf_ptr, *buf2_ptr, *buf3_ptr, c1, c2;
567 	size_t buf_size, buf2_size, buf3_size;
568 	cp_grant_id_t buf_grant, buf2_grant, buf3_grant, grant;
569 	u32_t buf_sum, buf2_sum, buf3_sum;
570 	iovec_s_t iov[3], iovt[3];
571 	result_t res;
572 
573 	test_group("bad read requests, part two", TRUE);
574 
575 	buf_size = buf2_size = buf3_size = BUF_SIZE;
576 
577 	alloc_buf_and_grant(&buf_ptr, &buf_grant, buf_size, CPF_WRITE);
578 	alloc_buf_and_grant(&buf2_ptr, &buf2_grant, buf2_size, CPF_WRITE);
579 	alloc_buf_and_grant(&buf3_ptr, &buf3_grant, buf3_size, CPF_WRITE);
580 
581 	iovt[0].iov_grant = buf_grant;
582 	iovt[0].iov_size = buf_size;
583 	iovt[1].iov_grant = buf2_grant;
584 	iovt[1].iov_size = buf2_size;
585 	iovt[2].iov_grant = buf3_grant;
586 	iovt[2].iov_size = buf3_size;
587 
588 	/* Test normal vector request. */
589 	memcpy(iov, iovt, sizeof(iovt));
590 
591 	buf_sum = fill_rand(buf_ptr, buf_size);
592 	buf2_sum = fill_rand(buf2_ptr, buf2_size);
593 	buf3_sum = fill_rand(buf3_ptr, buf3_size);
594 
595 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE,
596 		buf_size + buf2_size + buf3_size, &res);
597 
598 	test_sum(buf_ptr, buf_size, buf_sum, FALSE, &res);
599 	test_sum(buf2_ptr, buf2_size, buf2_sum, FALSE, &res);
600 	test_sum(buf3_ptr, buf3_size, buf3_sum, FALSE, &res);
601 
602 	got_result(&res, "normal vector request");
603 
604 	/* Test zero sized iovec element. */
605 	memcpy(iov, iovt, sizeof(iovt));
606 	iov[1].iov_size = 0;
607 
608 	buf_sum = fill_rand(buf_ptr, buf_size);
609 	buf2_sum = fill_rand(buf2_ptr, buf2_size);
610 	buf3_sum = fill_rand(buf3_ptr, buf3_size);
611 
612 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE, EINVAL, &res);
613 
614 	test_sum(buf_ptr, buf_size, buf_sum, TRUE, &res);
615 	test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
616 	test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
617 
618 	got_result(&res, "zero size in iovec element");
619 
620 	/* Test negative sized iovec element. */
621 	memcpy(iov, iovt, sizeof(iovt));
622 	iov[1].iov_size = (vir_bytes) LONG_MAX + 1;
623 
624 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE, EINVAL, &res);
625 
626 	test_sum(buf_ptr, buf_size, buf_sum, TRUE, &res);
627 	test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
628 	test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
629 
630 	got_result(&res, "negative size in iovec element");
631 
632 	/* Test iovec with negative total size. */
633 	memcpy(iov, iovt, sizeof(iovt));
634 	iov[0].iov_size = LONG_MAX / 2 - 1;
635 	iov[1].iov_size = LONG_MAX / 2 - 1;
636 
637 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE, EINVAL, &res);
638 
639 	test_sum(buf_ptr, buf_size, buf_sum, TRUE, &res);
640 	test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
641 	test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
642 
643 	got_result(&res, "negative total size");
644 
645 	/* Test iovec with wrapping total size. */
646 	memcpy(iov, iovt, sizeof(iovt));
647 	iov[0].iov_size = LONG_MAX - 1;
648 	iov[1].iov_size = LONG_MAX - 1;
649 
650 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE, EINVAL, &res);
651 
652 	test_sum(buf_ptr, buf_size, buf_sum, TRUE, &res);
653 	test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
654 	test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
655 
656 	got_result(&res, "wrapping total size");
657 
658 	/* Test word-unaligned iovec element size. */
659 	memcpy(iov, iovt, sizeof(iovt));
660 	iov[1].iov_size--;
661 
662 	buf_sum = fill_rand(buf_ptr, buf_size);
663 	buf2_sum = fill_rand(buf2_ptr, buf2_size);
664 	buf3_sum = fill_rand(buf3_ptr, buf3_size);
665 	c1 = buf2_ptr[buf2_size - 1];
666 
667 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE, BUF_SIZE * 3 - 1,
668 		&res);
669 
670 	if (accept_result(&res, RESULT_BADSTATUS, EINVAL)) {
671 		/* Do not test the first buffer, as it may contain a partial
672 		 * result.
673 		 */
674 		test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
675 		test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
676 	} else {
677 		test_sum(buf_ptr, buf_size, buf_sum, FALSE, &res);
678 		test_sum(buf2_ptr, buf2_size, buf2_sum, FALSE, &res);
679 		test_sum(buf3_ptr, buf3_size, buf3_sum, FALSE, &res);
680 		if (c1 != buf2_ptr[buf2_size - 1])
681 			set_result(&res, RESULT_CORRUPT, 0);
682 	}
683 
684 	got_result(&res, "word-unaligned size in iovec element");
685 
686 	/* Test invalid grant in iovec element. */
687 	memcpy(iov, iovt, sizeof(iovt));
688 	iov[1].iov_grant = GRANT_INVALID;
689 
690 	fill_rand(buf_ptr, buf_size);
691 	buf2_sum = fill_rand(buf2_ptr, buf2_size);
692 	buf3_sum = fill_rand(buf3_ptr, buf3_size);
693 
694 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE, EINVAL, &res);
695 
696 	/* Do not test the first buffer, as it may contain a partial result. */
697 	test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
698 	test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
699 
700 	got_result(&res, "invalid grant in iovec element");
701 
702 	/* Test revoked grant in iovec element. */
703 	memcpy(iov, iovt, sizeof(iovt));
704 	if ((grant = cpf_grant_direct(driver_endpt, (vir_bytes) buf2_ptr,
705 			buf2_size, CPF_WRITE)) == GRANT_INVALID)
706 		panic("unable to allocate grant");
707 
708 	cpf_revoke(grant);
709 
710 	iov[1].iov_grant = grant;
711 
712 	buf_sum = fill_rand(buf_ptr, buf_size);
713 	buf2_sum = fill_rand(buf2_ptr, buf2_size);
714 	buf3_sum = fill_rand(buf3_ptr, buf3_size);
715 
716 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE, EINVAL, &res);
717 
718 	accept_result(&res, RESULT_BADSTATUS, EPERM);
719 
720 	/* Do not test the first buffer, as it may contain a partial result. */
721 	test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
722 	test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
723 
724 	got_result(&res, "revoked grant in iovec element");
725 
726 	/* Test read-only grant in iovec element. */
727 	memcpy(iov, iovt, sizeof(iovt));
728 	if ((grant = cpf_grant_direct(driver_endpt, (vir_bytes) buf2_ptr,
729 			buf2_size, CPF_READ)) == GRANT_INVALID)
730 		panic("unable to allocate grant");
731 
732 	iov[1].iov_grant = grant;
733 
734 	buf_sum = fill_rand(buf_ptr, buf_size);
735 	buf2_sum = fill_rand(buf2_ptr, buf2_size);
736 	buf3_sum = fill_rand(buf3_ptr, buf3_size);
737 
738 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE, EINVAL, &res);
739 
740 	accept_result(&res, RESULT_BADSTATUS, EPERM);
741 
742 	/* Do not test the first buffer, as it may contain a partial result. */
743 	test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
744 	test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
745 
746 	got_result(&res, "read-only grant in iovec element");
747 
748 	cpf_revoke(grant);
749 
750 	/* Test word-unaligned iovec element buffer. */
751 	memcpy(iov, iovt, sizeof(iovt));
752 	if ((grant = cpf_grant_direct(driver_endpt, (vir_bytes) (buf2_ptr + 1),
753 			buf2_size - 2, CPF_WRITE)) == GRANT_INVALID)
754 		panic("unable to allocate grant");
755 
756 	iov[1].iov_grant = grant;
757 	iov[1].iov_size = buf2_size - 2;
758 
759 	buf_sum = fill_rand(buf_ptr, buf_size);
760 	buf2_sum = fill_rand(buf2_ptr, buf2_size);
761 	buf3_sum = fill_rand(buf3_ptr, buf3_size);
762 	c1 = buf2_ptr[0];
763 	c2 = buf2_ptr[buf2_size - 1];
764 
765 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE, BUF_SIZE * 3 - 2, &res);
766 
767 	if (accept_result(&res, RESULT_BADSTATUS, EINVAL)) {
768 		/* Do not test the first buffer, as it may contain a partial
769 		 * result.
770 		 */
771 		test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
772 		test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
773 	} else {
774 		test_sum(buf_ptr, buf_size, buf_sum, FALSE, &res);
775 		test_sum(buf2_ptr, buf2_size, buf2_sum, FALSE, &res);
776 		test_sum(buf3_ptr, buf3_size, buf3_sum, FALSE, &res);
777 		if (c1 != buf2_ptr[0] || c2 != buf2_ptr[buf2_size - 1])
778 			set_result(&res, RESULT_CORRUPT, 0);
779 	}
780 
781 	got_result(&res, "word-unaligned buffer in iovec element");
782 
783 	cpf_revoke(grant);
784 
785 	/* Test word-unaligned position. */
786 	/* Only perform this test if the minimum read size is not 1, in which
787 	 * case it is safe to assume that the driver expects no position
788 	 * alignment either. These tests are indeed not exhaustive yet. For now
789 	 * we assume that if no alignment is required at all, the driver does
790 	 * not implement special logic to achieve this, so we don't need to
791 	 * test all possible positions and sizes either (yes, laziness..).
792 	 */
793 	if (min_read > 1) {
794 		memcpy(iov, iovt, sizeof(iovt));
795 
796 		buf_sum = fill_rand(buf_ptr, buf_size);
797 		buf2_sum = fill_rand(buf2_ptr, buf2_size);
798 		buf3_sum = fill_rand(buf3_ptr, buf3_size);
799 
800 		raw_xfer(driver_minor, 1ULL, iov, 3, FALSE, EINVAL, &res);
801 
802 		test_sum(buf_ptr, buf_size, buf_sum, TRUE, &res);
803 		test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
804 		test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
805 
806 		got_result(&res, "word-unaligned position");
807 	}
808 
809 	/* Test normal vector request (final check). */
810 	memcpy(iov, iovt, sizeof(iovt));
811 
812 	buf_sum = fill_rand(buf_ptr, buf_size);
813 	buf2_sum = fill_rand(buf2_ptr, buf2_size);
814 	buf3_sum = fill_rand(buf3_ptr, buf3_size);
815 
816 	raw_xfer(driver_minor, 0ULL, iov, 3, FALSE,
817 		buf_size + buf2_size + buf3_size, &res);
818 
819 	test_sum(buf_ptr, buf_size, buf_sum, FALSE, &res);
820 	test_sum(buf2_ptr, buf2_size, buf2_sum, FALSE, &res);
821 	test_sum(buf3_ptr, buf3_size, buf3_sum, FALSE, &res);
822 
823 	got_result(&res, "normal vector request");
824 
825 	/* Clean up. */
826 	free_buf_and_grant(buf3_ptr, buf3_grant, buf3_size);
827 	free_buf_and_grant(buf2_ptr, buf2_grant, buf2_size);
828 	free_buf_and_grant(buf_ptr, buf_grant, buf_size);
829 }
830 
831 static void bad_write(void)
832 {
833 	/* Test various illegal write transfer requests, if writing is allowed.
834 	 * If handled correctly, these requests will not actually write data.
835 	 * This part of the test set is in need of further expansion.
836 	 */
837 	u8_t *buf_ptr, *buf2_ptr, *buf3_ptr;
838 	size_t buf_size, buf2_size, buf3_size, sector_unalign;
839 	cp_grant_id_t buf_grant, buf2_grant, buf3_grant;
840 	cp_grant_id_t grant;
841 	u32_t buf_sum, buf2_sum, buf3_sum;
842 	iovec_s_t iov[3], iovt[3];
843 	result_t res;
844 
845 	test_group("bad write requests", may_write);
846 
847 	if (!may_write)
848 		return;
849 
850 	buf_size = buf2_size = buf3_size = BUF_SIZE;
851 
852 	alloc_buf_and_grant(&buf_ptr, &buf_grant, buf_size, CPF_READ);
853 	alloc_buf_and_grant(&buf2_ptr, &buf2_grant, buf2_size, CPF_READ);
854 	alloc_buf_and_grant(&buf3_ptr, &buf3_grant, buf3_size, CPF_READ);
855 
856 	iovt[0].iov_grant = buf_grant;
857 	iovt[0].iov_size = buf_size;
858 	iovt[1].iov_grant = buf2_grant;
859 	iovt[1].iov_size = buf2_size;
860 	iovt[2].iov_grant = buf3_grant;
861 	iovt[2].iov_size = buf3_size;
862 
863 	/* Only perform write alignment tests if writes require alignment. */
864 	if (min_write == 0)
865 		min_write = sector_size;
866 
867 	if (min_write > 1) {
868 		/* If min_write is larger than 2, use 2 as sector-unaligned
869 		 * size, as word-unaligned values (e.g., 1) may be filtered out
870 		 * on another code path.
871 		 */
872 		sector_unalign = (min_write > 2) ? 2 : 1;
873 
874 		/* Test sector-unaligned write position. */
875 		memcpy(iov, iovt, sizeof(iovt));
876 
877 		buf_sum = fill_rand(buf_ptr, buf_size);
878 		buf2_sum = fill_rand(buf2_ptr, buf2_size);
879 		buf3_sum = fill_rand(buf3_ptr, buf3_size);
880 
881 		raw_xfer(driver_minor, (u64_t)sector_unalign, iov, 3, TRUE,
882 			EINVAL, &res);
883 
884 		test_sum(buf_ptr, buf_size, buf_sum, TRUE, &res);
885 		test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
886 		test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
887 
888 		got_result(&res, "sector-unaligned write position");
889 
890 		/* Test sector-unaligned write size. */
891 		memcpy(iov, iovt, sizeof(iovt));
892 		iov[1].iov_size -= sector_unalign;
893 
894 		buf_sum = fill_rand(buf_ptr, buf_size);
895 		buf2_sum = fill_rand(buf2_ptr, buf2_size);
896 		buf3_sum = fill_rand(buf3_ptr, buf3_size);
897 
898 		raw_xfer(driver_minor, 0ULL, iov, 3, TRUE, EINVAL, &res);
899 
900 		test_sum(buf_ptr, buf_size, buf_sum, TRUE, &res);
901 		test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
902 		test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
903 
904 		got_result(&res, "sector-unaligned write size");
905 	}
906 
907 	/* Test write-only grant in iovec element. */
908 	memcpy(iov, iovt, sizeof(iovt));
909 	if ((grant = cpf_grant_direct(driver_endpt, (vir_bytes) buf2_ptr,
910 			buf2_size, CPF_WRITE)) == GRANT_INVALID)
911 		panic("unable to allocate grant");
912 
913 	iov[1].iov_grant = grant;
914 
915 	buf_sum = fill_rand(buf_ptr, buf_size);
916 	buf2_sum = fill_rand(buf2_ptr, buf2_size);
917 	buf3_sum = fill_rand(buf3_ptr, buf3_size);
918 
919 	raw_xfer(driver_minor, 0ULL, iov, 3, TRUE, EINVAL, &res);
920 
921 	accept_result(&res, RESULT_BADSTATUS, EPERM);
922 
923 	test_sum(buf_ptr, buf_size, buf_sum, TRUE, &res);
924 	test_sum(buf2_ptr, buf2_size, buf2_sum, TRUE, &res);
925 	test_sum(buf3_ptr, buf3_size, buf3_sum, TRUE, &res);
926 
927 	got_result(&res, "write-only grant in iovec element");
928 
929 	cpf_revoke(grant);
930 
931 	/* Clean up. */
932 	free_buf_and_grant(buf3_ptr, buf3_grant, buf3_size);
933 	free_buf_and_grant(buf2_ptr, buf2_grant, buf2_size);
934 	free_buf_and_grant(buf_ptr, buf_grant, buf_size);
935 }
936 
937 static void vector_and_large_sub(size_t small_size)
938 {
939 	/* Check whether large vectored requests, and large single requests,
940 	 * succeed.
941 	 */
942 	size_t large_size, buf_size, buf2_size;
943 	u8_t *buf_ptr, *buf2_ptr;
944 	iovec_t iovec[NR_IOREQS];
945 	u64_t base_pos;
946 	result_t res;
947 	int i;
948 
949 	base_pos = (u64_t)sector_size;
950 
951 	large_size = small_size * NR_IOREQS;
952 
953 	buf_size = large_size + sizeof(u32_t) * 2;
954 	buf2_size = large_size + sizeof(u32_t) * (NR_IOREQS + 1);
955 
956 	buf_ptr = alloc_dma_memory(buf_size);
957 	buf2_ptr = alloc_dma_memory(buf2_size);
958 
959 	/* The first buffer has one large chunk with dword-sized guards on each
960 	 * side. LPTR(n) points to the start of the nth small data chunk within
961 	 * the large chunk. The second buffer contains several small chunks. It
962 	 * has dword-sized guards before each chunk and after the last chunk.
963 	 * SPTR(n) points to the start of the nth small chunk.
964 	 */
965 #define SPTR(n) (buf2_ptr + sizeof(u32_t) + (n) * (sizeof(u32_t) + small_size))
966 #define LPTR(n) (buf_ptr + sizeof(u32_t) + small_size * (n))
967 
968 	/* Write one large chunk, if writing is allowed. */
969 	if (may_write) {
970 		fill_rand(buf_ptr, buf_size); /* don't need the checksum */
971 
972 		iovec[0].iov_addr = (vir_bytes) (buf_ptr + sizeof(u32_t));
973 		iovec[0].iov_size = large_size;
974 
975 		vir_xfer(driver_minor, base_pos, iovec, 1, TRUE, large_size,
976 			&res);
977 
978 		got_result(&res, "large write");
979 	}
980 
981 	/* Read back in many small chunks. If writing is not allowed, do not
982 	 * check checksums.
983 	 */
984 	for (i = 0; i < NR_IOREQS; i++) {
985 		* (((u32_t *) SPTR(i)) - 1) = 0xDEADBEEFL + i;
986 		iovec[i].iov_addr = (vir_bytes) SPTR(i);
987 		iovec[i].iov_size = small_size;
988 	}
989 	* (((u32_t *) SPTR(i)) - 1) = 0xFEEDFACEL;
990 
991 	vir_xfer(driver_minor, base_pos, iovec, NR_IOREQS, FALSE, large_size,
992 		&res);
993 
994 	if (res.type == RESULT_OK) {
995 		for (i = 0; i < NR_IOREQS; i++) {
996 			if (* (((u32_t *) SPTR(i)) - 1) != 0xDEADBEEFL + i)
997 				set_result(&res, RESULT_OVERFLOW, 0);
998 		}
999 		if (* (((u32_t *) SPTR(i)) - 1) != 0xFEEDFACEL)
1000 			set_result(&res, RESULT_OVERFLOW, 0);
1001 	}
1002 
1003 	if (res.type == RESULT_OK && may_write) {
1004 		for (i = 0; i < NR_IOREQS; i++) {
1005 			test_sum(SPTR(i), small_size,
1006 				get_sum(LPTR(i), small_size), TRUE, &res);
1007 		}
1008 	}
1009 
1010 	got_result(&res, "vectored read");
1011 
1012 	/* Write new data in many small chunks, if writing is allowed. */
1013 	if (may_write) {
1014 		fill_rand(buf2_ptr, buf2_size); /* don't need the checksum */
1015 
1016 		for (i = 0; i < NR_IOREQS; i++) {
1017 			iovec[i].iov_addr = (vir_bytes) SPTR(i);
1018 			iovec[i].iov_size = small_size;
1019 		}
1020 
1021 		vir_xfer(driver_minor, base_pos, iovec, NR_IOREQS, TRUE,
1022 			large_size, &res);
1023 
1024 		got_result(&res, "vectored write");
1025 	}
1026 
1027 	/* Read back in one large chunk. If writing is allowed, the checksums
1028 	 * must match the last write; otherwise, they must match the last read.
1029 	 * In both cases, the expected content is in the second buffer.
1030 	 */
1031 
1032 	* (u32_t *) buf_ptr = 0xCAFEBABEL;
1033 	* (u32_t *) (buf_ptr + sizeof(u32_t) + large_size) = 0xDECAFBADL;
1034 
1035 	iovec[0].iov_addr = (vir_bytes) (buf_ptr + sizeof(u32_t));
1036 	iovec[0].iov_size = large_size;
1037 
1038 	vir_xfer(driver_minor, base_pos, iovec, 1, FALSE, large_size, &res);
1039 
1040 	if (res.type == RESULT_OK) {
1041 		if (* (u32_t *) buf_ptr != 0xCAFEBABEL)
1042 			set_result(&res, RESULT_OVERFLOW, 0);
1043 		if (* (u32_t *) (buf_ptr + sizeof(u32_t) + large_size) !=
1044 				0xDECAFBADL)
1045 			set_result(&res, RESULT_OVERFLOW, 0);
1046 	}
1047 
1048 	if (res.type == RESULT_OK) {
1049 		for (i = 0; i < NR_IOREQS; i++) {
1050 			test_sum(SPTR(i), small_size,
1051 				get_sum(LPTR(i), small_size), TRUE, &res);
1052 		}
1053 	}
1054 
1055 	got_result(&res, "large read");
1056 
1057 #undef LPTR
1058 #undef SPTR
1059 
1060 	/* Clean up. */
1061 	free_dma_memory(buf2_ptr, buf2_size);
1062 	free_dma_memory(buf_ptr, buf_size);
1063 }
1064 
1065 static void vector_and_large(void)
1066 {
1067 	/* Check whether large vectored requests, and large single requests,
1068 	 * succeed. These are request patterns commonly used by MFS and the
1069 	 * filter driver, respectively. We try the same test twice: once with
1070 	 * a common block size, and once to push against the max request size.
1071 	 */
1072 	size_t max_block;
1073 
1074 	/* Make sure that the maximum size does not exceed the target device
1075 	 * size, minus the margins we need for testing here and there.
1076 	 */
1077 	if (max_size > part.size - sector_size * 4)
1078 		max_size = part.size - sector_size * 4;
1079 
1080 	/* Compute the largest sector multiple which, when multiplied by
1081 	 * NR_IOREQS, is no more than the maximum transfer size. Note that if
1082 	 * max_size is not a multiple of sector_size, we're not going up to the
1083 	 * limit entirely this way.
1084 	 */
1085 	max_block = max_size / NR_IOREQS;
1086 	max_block -= max_block % sector_size;
1087 
1088 #define COMMON_BLOCK_SIZE	4096
1089 
1090 	test_group("vector and large, common block", TRUE);
1091 
1092 	vector_and_large_sub(COMMON_BLOCK_SIZE);
1093 
1094 	if (max_block != COMMON_BLOCK_SIZE) {
1095 		test_group("vector and large, large block", TRUE);
1096 
1097 		vector_and_large_sub(max_block);
1098 	}
1099 }
1100 
1101 static void open_device(dev_t minor)
1102 {
1103 	/* Open a partition or subpartition. Remember that it has been opened,
1104 	 * so that we can reopen it later in the event of a driver crash.
1105 	 */
1106 	message m;
1107 	result_t res;
1108 
1109 	memset(&m, 0, sizeof(m));
1110 	m.m_type = BDEV_OPEN;
1111 	m.m_lbdev_lblockdriver_msg.minor = minor;
1112 	m.m_lbdev_lblockdriver_msg.access = may_write ? (BDEV_R_BIT | BDEV_W_BIT) : BDEV_R_BIT;
1113 	m.m_lbdev_lblockdriver_msg.id = lrand48();
1114 
1115 	sendrec_driver(&m, OK, &res);
1116 
1117 	/* We assume that this call is supposed to succeed. We pretend it
1118 	 * always succeeds, so that close_device() won't get confused later.
1119 	 */
1120 	assert(nr_opened < NR_OPENED);
1121 	opened[nr_opened++] = minor;
1122 
1123 	got_result(&res, minor == driver_minor ? "opening the main partition" :
1124 		"opening a subpartition");
1125 }
1126 
1127 static void close_device(dev_t minor)
1128 {
1129 	/* Close a partition or subpartition. Remove it from the list of opened
1130 	 * devices.
1131 	 */
1132 	message m;
1133 	result_t res;
1134 	int i;
1135 
1136 	memset(&m, 0, sizeof(m));
1137 	m.m_type = BDEV_CLOSE;
1138 	m.m_lbdev_lblockdriver_msg.minor = minor;
1139 	m.m_lbdev_lblockdriver_msg.id = lrand48();
1140 
1141 	sendrec_driver(&m, OK, &res);
1142 
1143 	assert(nr_opened > 0);
1144 	for (i = 0; i < nr_opened; i++) {
1145 		if (opened[i] == minor) {
1146 			opened[i] = opened[--nr_opened];
1147 			break;
1148 		}
1149 	}
1150 
1151 	got_result(&res, minor == driver_minor ? "closing the main partition" :
1152 		"closing a subpartition");
1153 }
1154 
1155 static int vir_ioctl(dev_t minor, int req, void *ptr, ssize_t exp,
1156 	result_t *res)
1157 {
1158 	/* Perform an I/O control request, using a local buffer.
1159 	 */
1160 	cp_grant_id_t grant;
1161 	message m;
1162 	int r, perm;
1163 
1164 	assert(!_MINIX_IOCTL_BIG(req));	/* not supported */
1165 
1166 	perm = 0;
1167 	if (_MINIX_IOCTL_IOR(req)) perm |= CPF_WRITE;
1168 	if (_MINIX_IOCTL_IOW(req)) perm |= CPF_READ;
1169 
1170 	if ((grant = cpf_grant_direct(driver_endpt, (vir_bytes) ptr,
1171 			_MINIX_IOCTL_SIZE(req), perm)) == GRANT_INVALID)
1172 		panic("unable to allocate grant");
1173 
1174 	memset(&m, 0, sizeof(m));
1175 	m.m_type = BDEV_IOCTL;
1176 	m.m_lbdev_lblockdriver_msg.minor = minor;
1177 	m.m_lbdev_lblockdriver_msg.request = req;
1178 	m.m_lbdev_lblockdriver_msg.grant = grant;
1179 	m.m_lbdev_lblockdriver_msg.user = NONE;
1180 	m.m_lbdev_lblockdriver_msg.id = lrand48();
1181 
1182 	r = sendrec_driver(&m, exp, res);
1183 
1184 	if (cpf_revoke(grant) != OK)
1185 		panic("unable to revoke grant");
1186 
1187 	return r;
1188 }
1189 
1190 static void misc_ioctl(void)
1191 {
1192 	/* Test some ioctls.
1193 	 */
1194 	result_t res;
1195 	int openct;
1196 
1197 	test_group("test miscellaneous ioctls", TRUE);
1198 
1199 	/* Retrieve the main partition's base and size. Save for later. */
1200 	vir_ioctl(driver_minor, DIOCGETP, &part, OK, &res);
1201 
1202 	got_result(&res, "ioctl to get partition");
1203 
1204 	/* The other tests do not check whether there is sufficient room. */
1205 	if (res.type == RESULT_OK && part.size < (u64_t)max_size * 2)
1206 		output("WARNING: small partition, some tests may fail\n");
1207 
1208 	/* Test retrieving global driver open count. */
1209 	openct = 0x0badcafe;
1210 
1211 	vir_ioctl(driver_minor, DIOCOPENCT, &openct, OK, &res);
1212 
1213 	/* We assume that we're the only client to the driver right now. */
1214 	if (res.type == RESULT_OK && openct != 1) {
1215 		res.type = RESULT_BADVALUE;
1216 		res.value = openct;
1217 	}
1218 
1219 	got_result(&res, "ioctl to get open count");
1220 
1221 	/* Test increasing and re-retrieving open count. */
1222 	open_device(driver_minor);
1223 
1224 	openct = 0x0badcafe;
1225 
1226 	vir_ioctl(driver_minor, DIOCOPENCT, &openct, OK, &res);
1227 
1228 	if (res.type == RESULT_OK && openct != 2) {
1229 		res.type = RESULT_BADVALUE;
1230 		res.value = openct;
1231 	}
1232 
1233 	got_result(&res, "increased open count after opening");
1234 
1235 	/* Test decreasing and re-retrieving open count. */
1236 	close_device(driver_minor);
1237 
1238 	openct = 0x0badcafe;
1239 
1240 	vir_ioctl(driver_minor, DIOCOPENCT, &openct, OK, &res);
1241 
1242 	if (res.type == RESULT_OK && openct != 1) {
1243 		res.type = RESULT_BADVALUE;
1244 		res.value = openct;
1245 	}
1246 
1247 	got_result(&res, "decreased open count after closing");
1248 }
1249 
1250 static void read_limits(dev_t sub0_minor, dev_t sub1_minor, size_t sub_size)
1251 {
1252 	/* Test reads up to, across, and beyond partition limits.
1253 	 */
1254 	u8_t *buf_ptr;
1255 	size_t buf_size;
1256 	u32_t sum, sum2, sum3;
1257 	result_t res;
1258 
1259 	test_group("read around subpartition limits", TRUE);
1260 
1261 	buf_size = sector_size * 3;
1262 	buf_ptr = alloc_dma_memory(buf_size);
1263 
1264 	/* Read one sector up to the partition limit. */
1265 	fill_rand(buf_ptr, buf_size);
1266 
1267 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size, buf_ptr,
1268 		sector_size, FALSE, sector_size, &res);
1269 
1270 	sum = get_sum(buf_ptr, sector_size);
1271 
1272 	got_result(&res, "one sector read up to partition end");
1273 
1274 	/* Read three sectors up to the partition limit. */
1275 	fill_rand(buf_ptr, buf_size);
1276 
1277 	simple_xfer(sub0_minor, (u64_t)sub_size - buf_size, buf_ptr, buf_size,
1278 		FALSE, buf_size, &res);
1279 
1280 	test_sum(buf_ptr + sector_size * 2, sector_size, sum, TRUE, &res);
1281 
1282 	sum2 = get_sum(buf_ptr + sector_size, sector_size * 2);
1283 
1284 	got_result(&res, "multisector read up to partition end");
1285 
1286 	/* Read three sectors, two up to and one beyond the partition end. */
1287 	fill_rand(buf_ptr, buf_size);
1288 	sum3 = get_sum(buf_ptr + sector_size * 2, sector_size);
1289 
1290 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size * 2, buf_ptr,
1291 		buf_size, FALSE, sector_size * 2, &res);
1292 
1293 	test_sum(buf_ptr, sector_size * 2, sum2, TRUE, &res);
1294 	test_sum(buf_ptr + sector_size * 2, sector_size, sum3, TRUE, &res);
1295 
1296 	got_result(&res, "read somewhat across partition end");
1297 
1298 	/* Read three sectors, one up to and two beyond the partition end. */
1299 	fill_rand(buf_ptr, buf_size);
1300 	sum2 = get_sum(buf_ptr + sector_size, sector_size * 2);
1301 
1302 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size, buf_ptr,
1303 		buf_size, FALSE, sector_size, &res);
1304 
1305 	test_sum(buf_ptr, sector_size, sum, TRUE, &res);
1306 	test_sum(buf_ptr + sector_size, sector_size * 2, sum2, TRUE, &res);
1307 
1308 	got_result(&res, "read mostly across partition end");
1309 
1310 	/* Read one sector starting at the partition end. */
1311 	sum = fill_rand(buf_ptr, buf_size);
1312 	sum2 = get_sum(buf_ptr, sector_size);
1313 
1314 	simple_xfer(sub0_minor, (u64_t)sub_size, buf_ptr, sector_size, FALSE,
1315 		0, &res);
1316 
1317 	test_sum(buf_ptr, sector_size, sum2, TRUE, &res);
1318 
1319 	got_result(&res, "one sector read at partition end");
1320 
1321 	/* Read three sectors starting at the partition end. */
1322 	simple_xfer(sub0_minor, (u64_t)sub_size, buf_ptr, buf_size, FALSE, 0,
1323 		&res);
1324 
1325 	test_sum(buf_ptr, buf_size, sum, TRUE, &res);
1326 
1327 	got_result(&res, "multisector read at partition end");
1328 
1329 	/* Read one sector beyond the partition end. */
1330 	simple_xfer(sub0_minor, (u64_t)sub_size + sector_size, buf_ptr,
1331 		buf_size, FALSE, 0, &res);
1332 
1333 	test_sum(buf_ptr, sector_size, sum2, TRUE, &res);
1334 
1335 	got_result(&res, "single sector read beyond partition end");
1336 
1337 	/* Read three sectors way beyond the partition end. */
1338 	simple_xfer(sub0_minor, 0x1000000000000000ULL, buf_ptr, buf_size,
1339 		FALSE, 0, &res);
1340 
1341 	test_sum(buf_ptr, buf_size, sum, TRUE, &res);
1342 
1343 	/* Test negative offsets. This request should return EOF or fail; we
1344 	 * assume that it return EOF here (because that is what the AHCI driver
1345 	 * does, to avoid producing errors for requests close to the 2^64 byte
1346 	 * position limit [yes, this will indeed never happen anyway]). This is
1347 	 * more or less a bad requests test, but we cannot do it without
1348 	 * setting up subpartitions first.
1349 	 */
1350 	simple_xfer(sub1_minor, 0xffffffffffffffffULL - sector_size + 1,
1351 		buf_ptr, sector_size, FALSE, 0, &res);
1352 
1353 	test_sum(buf_ptr, sector_size, sum2, TRUE, &res);
1354 
1355 	got_result(&res, "read with negative offset");
1356 
1357 	/* Clean up. */
1358 	free_dma_memory(buf_ptr, buf_size);
1359 }
1360 
1361 static void write_limits(dev_t sub0_minor, dev_t sub1_minor, size_t sub_size)
1362 {
1363 	/* Test writes up to, across, and beyond partition limits. Use the
1364 	 * first given subpartition to test, and the second to make sure there
1365 	 * are no overruns. The given size is the size of each of the
1366 	 * subpartitions. Note that the necessity to check the results using
1367 	 * readback, makes this more or less a superset of the read test.
1368 	 */
1369 	u8_t *buf_ptr;
1370 	size_t buf_size;
1371 	u32_t sum, sum2, sum3, sub1_sum;
1372 	result_t res;
1373 
1374 	test_group("write around subpartition limits", may_write);
1375 
1376 	if (!may_write)
1377 		return;
1378 
1379 	buf_size = sector_size * 3;
1380 	buf_ptr = alloc_dma_memory(buf_size);
1381 
1382 	/* Write to the start of the second subpartition, so that we can
1383 	 * reliably check whether the contents have changed later.
1384 	 */
1385 	sub1_sum = fill_rand(buf_ptr, buf_size);
1386 
1387 	simple_xfer(sub1_minor, 0ULL, buf_ptr, buf_size, TRUE, buf_size, &res);
1388 
1389 	got_result(&res, "write to second subpartition");
1390 
1391 	/* Write one sector, up to the partition limit. */
1392 	sum = fill_rand(buf_ptr, sector_size);
1393 
1394 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size, buf_ptr,
1395 		sector_size, TRUE, sector_size, &res);
1396 
1397 	got_result(&res, "write up to partition end");
1398 
1399 	/* Read back to make sure the results have persisted. */
1400 	fill_rand(buf_ptr, sector_size * 2);
1401 
1402 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size * 2, buf_ptr,
1403 		sector_size * 2, FALSE, sector_size * 2, &res);
1404 
1405 	test_sum(buf_ptr + sector_size, sector_size, sum, TRUE, &res);
1406 
1407 	got_result(&res, "read up to partition end");
1408 
1409 	/* Write three sectors, two up to and one beyond the partition end. */
1410 	fill_rand(buf_ptr, buf_size);
1411 	sum = get_sum(buf_ptr + sector_size, sector_size);
1412 	sum3 = get_sum(buf_ptr, sector_size);
1413 
1414 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size * 2, buf_ptr,
1415 		buf_size, TRUE, sector_size * 2, &res);
1416 
1417 	got_result(&res, "write somewhat across partition end");
1418 
1419 	/* Read three sectors, one up to and two beyond the partition end. */
1420 	fill_rand(buf_ptr, buf_size);
1421 	sum2 = get_sum(buf_ptr + sector_size, sector_size * 2);
1422 
1423 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size, buf_ptr,
1424 		buf_size, FALSE, sector_size, &res);
1425 
1426 	test_sum(buf_ptr, sector_size, sum, TRUE, &res);
1427 	test_sum(buf_ptr + sector_size, sector_size * 2, sum2, TRUE, &res);
1428 
1429 	got_result(&res, "read mostly across partition end");
1430 
1431 	/* Repeat this but with write and read start positions swapped. */
1432 	fill_rand(buf_ptr, buf_size);
1433 	sum = get_sum(buf_ptr, sector_size);
1434 
1435 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size, buf_ptr,
1436 		buf_size, TRUE, sector_size, &res);
1437 
1438 	got_result(&res, "write mostly across partition end");
1439 
1440 	fill_rand(buf_ptr, buf_size);
1441 	sum2 = get_sum(buf_ptr + sector_size * 2, sector_size);
1442 
1443 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size * 2, buf_ptr,
1444 		buf_size, FALSE, sector_size * 2, &res);
1445 
1446 	test_sum(buf_ptr, sector_size, sum3, TRUE, &res);
1447 	test_sum(buf_ptr + sector_size, sector_size, sum, TRUE, &res);
1448 	test_sum(buf_ptr + sector_size * 2, sector_size, sum2, TRUE, &res);
1449 
1450 	got_result(&res, "read somewhat across partition end");
1451 
1452 	/* Write one sector at the end of the partition. */
1453 	fill_rand(buf_ptr, sector_size);
1454 
1455 	simple_xfer(sub0_minor, (u64_t)sub_size, buf_ptr, sector_size, TRUE, 0,
1456 		&res);
1457 
1458 	got_result(&res, "write at partition end");
1459 
1460 	/* Write one sector beyond the end of the partition. */
1461 	simple_xfer(sub0_minor, (u64_t)sub_size + sector_size, buf_ptr,
1462 		sector_size, TRUE, 0, &res);
1463 
1464 	got_result(&res, "write beyond partition end");
1465 
1466 	/* Read from the start of the second subpartition, and see if it
1467 	 * matches what we wrote into it earlier.
1468 	 */
1469 	fill_rand(buf_ptr, buf_size);
1470 
1471 	simple_xfer(sub1_minor, 0ULL, buf_ptr, buf_size, FALSE, buf_size,
1472 		&res);
1473 
1474 	test_sum(buf_ptr, buf_size, sub1_sum, TRUE, &res);
1475 
1476 	got_result(&res, "read from second subpartition");
1477 
1478 	/* Test offset wrapping, but this time for writes. */
1479 	fill_rand(buf_ptr, sector_size);
1480 
1481 	simple_xfer(sub1_minor, 0xffffffffffffffffULL - sector_size + 1,
1482 		buf_ptr, sector_size, TRUE, 0, &res);
1483 
1484 	got_result(&res, "write with negative offset");
1485 
1486 	/* If the last request erroneously succeeded, it would have overwritten
1487 	 * the last sector of the first subpartition.
1488 	 */
1489 	simple_xfer(sub0_minor, (u64_t)sub_size - sector_size, buf_ptr,
1490 		sector_size, FALSE, sector_size, &res);
1491 
1492 	test_sum(buf_ptr, sector_size, sum, TRUE, &res);
1493 
1494 	got_result(&res, "read up to partition end");
1495 
1496 	/* Clean up. */
1497 	free_dma_memory(buf_ptr, buf_size);
1498 }
1499 
1500 static void vir_limits(dev_t sub0_minor, dev_t sub1_minor, int part_secs)
1501 {
1502 	/* Create virtual, temporary subpartitions through the DIOCSETP ioctl,
1503 	 * and perform tests on the resulting subpartitions.
1504 	 */
1505 	struct part_geom subpart, subpart2;
1506 	size_t sub_size;
1507 	result_t res;
1508 
1509 	test_group("virtual subpartition limits", TRUE);
1510 
1511 	/* Open the subpartitions. This is somewhat dodgy; we rely on the
1512 	 * driver allowing this even if no subpartitions exist. We cannot do
1513 	 * this test without doing a DIOCSETP on an open subdevice, though.
1514 	 */
1515 	open_device(sub0_minor);
1516 	open_device(sub1_minor);
1517 
1518 	sub_size = sector_size * part_secs;
1519 
1520 	/* Set, and check, the size of the first subpartition. */
1521 	subpart = part;
1522 	subpart.size = (u64_t)sub_size;
1523 
1524 	vir_ioctl(sub0_minor, DIOCSETP, &subpart, OK, &res);
1525 
1526 	got_result(&res, "ioctl to set first subpartition");
1527 
1528 	vir_ioctl(sub0_minor, DIOCGETP, &subpart2, OK, &res);
1529 
1530 	if (res.type == RESULT_OK && (subpart.base != subpart2.base ||
1531 			subpart.size != subpart2.size)) {
1532 		res.type = RESULT_BADVALUE;
1533 		res.value = 0;
1534 	}
1535 
1536 	got_result(&res, "ioctl to get first subpartition");
1537 
1538 	/* Set, and check, the base and size of the second subpartition. */
1539 	subpart = part;
1540 	subpart.base += sub_size;
1541 	subpart.size = (u64_t)sub_size;
1542 
1543 	vir_ioctl(sub1_minor, DIOCSETP, &subpart, OK, &res);
1544 
1545 	got_result(&res, "ioctl to set second subpartition");
1546 
1547 	vir_ioctl(sub1_minor, DIOCGETP, &subpart2, OK, &res);
1548 
1549 	if (res.type == RESULT_OK && (subpart.base != subpart2.base ||
1550 			subpart.size != subpart2.size)) {
1551 		res.type = RESULT_BADVALUE;
1552 		res.value = 0;
1553 	}
1554 
1555 	got_result(&res, "ioctl to get second subpartition");
1556 
1557 	/* Perform the actual I/O tests. */
1558 	read_limits(sub0_minor, sub1_minor, sub_size);
1559 
1560 	write_limits(sub0_minor, sub1_minor, sub_size);
1561 
1562 	/* Clean up. */
1563 	close_device(sub1_minor);
1564 	close_device(sub0_minor);
1565 }
1566 
1567 static void real_limits(dev_t sub0_minor, dev_t sub1_minor, int part_secs)
1568 {
1569 	/* Create our own subpartitions by writing a partition table, and
1570 	 * perform tests on the resulting real subpartitions.
1571 	 */
1572 	u8_t *buf_ptr;
1573 	size_t buf_size, sub_size;
1574 	struct part_geom subpart;
1575 	struct part_entry *entry;
1576 	result_t res;
1577 
1578 	test_group("real subpartition limits", may_write);
1579 
1580 	if (!may_write)
1581 		return;
1582 
1583 	sub_size = sector_size * part_secs;
1584 
1585 	/* Technically, we should be using 512 instead of sector_size in
1586 	 * various places, because even on CD-ROMs, the partition tables are
1587 	 * 512 bytes and the sector counts are based on 512-byte sectors in it.
1588 	 * We ignore this subtlety because CD-ROMs are assumed to be read-only
1589 	 * anyway.
1590 	 */
1591 	buf_size = sector_size;
1592 	buf_ptr = alloc_dma_memory(buf_size);
1593 
1594 	memset(buf_ptr, 0, buf_size);
1595 
1596 	/* Write an invalid partition table. */
1597 	simple_xfer(driver_minor, 0ULL, buf_ptr, buf_size, TRUE, buf_size,
1598 		&res);
1599 
1600 	got_result(&res, "write of invalid partition table");
1601 
1602 	/* Get the disk driver to reread the partition table. This should
1603 	 * happen (at least) when the device is fully closed and then reopened.
1604 	 * The ioctl test already made sure that we're the only client.
1605 	 */
1606 	close_device(driver_minor);
1607 	open_device(driver_minor);
1608 
1609 	/* See if our changes are visible. We expect the subpartitions to have
1610 	 * a size of zero now, indicating that they're not there. For actual
1611 	 * subpartitions (as opposed to normal partitions), this requires the
1612 	 * driver to zero them out, because the partition code does not do so.
1613 	 */
1614 	open_device(sub0_minor);
1615 	open_device(sub1_minor);
1616 
1617 	vir_ioctl(sub0_minor, DIOCGETP, &subpart, 0, &res);
1618 
1619 	if (res.type == RESULT_OK && subpart.size != 0) {
1620 		res.type = RESULT_BADVALUE;
1621 		res.value = ex64lo(subpart.size);
1622 	}
1623 
1624 	got_result(&res, "ioctl to get first subpartition");
1625 
1626 	vir_ioctl(sub1_minor, DIOCGETP, &subpart, 0, &res);
1627 
1628 	if (res.type == RESULT_OK && subpart.size != 0) {
1629 		res.type = RESULT_BADVALUE;
1630 		res.value = ex64lo(subpart.size);
1631 	}
1632 
1633 	got_result(&res, "ioctl to get second subpartition");
1634 
1635 	close_device(sub1_minor);
1636 	close_device(sub0_minor);
1637 
1638 	/* Now write a valid partition table. */
1639 	memset(buf_ptr, 0, buf_size);
1640 
1641 	entry = (struct part_entry *) &buf_ptr[PART_TABLE_OFF];
1642 
1643 	entry[0].sysind = MINIX_PART;
1644 	entry[0].lowsec = part.base / sector_size + 1;
1645 	entry[0].size = part_secs;
1646 	entry[1].sysind = MINIX_PART;
1647 	entry[1].lowsec = entry[0].lowsec + entry[0].size;
1648 	entry[1].size = part_secs;
1649 
1650 	buf_ptr[510] = 0x55;
1651 	buf_ptr[511] = 0xAA;
1652 
1653 	simple_xfer(driver_minor, 0ULL, buf_ptr, buf_size, TRUE, buf_size,
1654 		&res);
1655 
1656 	got_result(&res, "write of valid partition table");
1657 
1658 	/* Same as above. */
1659 	close_device(driver_minor);
1660 	open_device(driver_minor);
1661 
1662 	/* Again, see if our changes are visible. This time the proper base and
1663 	 * size should be there.
1664 	 */
1665 	open_device(sub0_minor);
1666 	open_device(sub1_minor);
1667 
1668 	vir_ioctl(sub0_minor, DIOCGETP, &subpart, 0, &res);
1669 
1670 	if (res.type == RESULT_OK &&
1671 		(subpart.base != part.base + sector_size ||
1672 		subpart.size != (u64_t)part_secs * sector_size)) {
1673 
1674 		res.type = RESULT_BADVALUE;
1675 		res.value = 0;
1676 	}
1677 
1678 	got_result(&res, "ioctl to get first subpartition");
1679 
1680 	vir_ioctl(sub1_minor, DIOCGETP, &subpart, 0, &res);
1681 
1682 	if (res.type == RESULT_OK &&
1683 		(subpart.base != part.base + (1 + part_secs) * sector_size ||
1684 		subpart.size != (u64_t)part_secs * sector_size)) {
1685 
1686 		res.type = RESULT_BADVALUE;
1687 		res.value = 0;
1688 	}
1689 
1690 	got_result(&res, "ioctl to get second subpartition");
1691 
1692 	/* Now perform the actual I/O tests. */
1693 	read_limits(sub0_minor, sub1_minor, sub_size);
1694 
1695 	write_limits(sub0_minor, sub1_minor, sub_size);
1696 
1697 	/* Clean up. */
1698 	close_device(sub0_minor);
1699 	close_device(sub1_minor);
1700 
1701 	free_dma_memory(buf_ptr, buf_size);
1702 }
1703 
1704 static void part_limits(void)
1705 {
1706 	/* Test reads and writes up to, across, and beyond partition limits.
1707 	 * As a side effect, test reading and writing partition sizes and
1708 	 * rereading partition tables.
1709 	 */
1710 	dev_t par, sub0_minor, sub1_minor;
1711 
1712 	/* First determine the first two subpartitions of the partition that we
1713 	 * are operating on. If we are already operating on a subpartition, we
1714 	 * cannot conduct this test.
1715 	 */
1716 	if (driver_minor >= MINOR_d0p0s0) {
1717 		output("WARNING: operating on subpartition, "
1718 			"skipping partition tests\n");
1719 		return;
1720 	}
1721 	par = driver_minor % DEV_PER_DRIVE;
1722 	if (par > 0) /* adapted from libdriver's drvlib code */
1723 		sub0_minor = MINOR_d0p0s0 + ((driver_minor / DEV_PER_DRIVE) *
1724 			NR_PARTITIONS + par - 1) * NR_PARTITIONS;
1725 	else
1726 		sub0_minor = driver_minor + 1;
1727 	sub1_minor = sub0_minor + 1;
1728 
1729 #define PART_SECS	9	/* sectors in each partition. must be >= 4. */
1730 
1731 	/* First try the test with temporarily specified subpartitions. */
1732 	vir_limits(sub0_minor, sub1_minor, PART_SECS);
1733 
1734 	/* Then, if we're allowed to write, try the test with real, persisted
1735 	 * subpartitions.
1736 	 */
1737 	real_limits(sub0_minor, sub1_minor, PART_SECS - 1);
1738 
1739 }
1740 
1741 static void unaligned_size_io(u64_t base_pos, u8_t *buf_ptr, size_t buf_size,
1742 	u8_t *sec_ptr[2], int sectors, int pattern, u32_t ssum[5])
1743 {
1744 	/* Perform a single small-element I/O read, write, readback test.
1745 	 * The number of sectors and the pattern varies with each call.
1746 	 * The ssum array has to be updated to reflect the five sectors'
1747 	 * checksums on disk, if writing is enabled. Note that for
1748 	 */
1749 	iovec_t iov[3], iovt[3];
1750 	u32_t rsum[3];
1751 	result_t res;
1752 	size_t total_size;
1753 	int i, nr_req;
1754 
1755 	base_pos += sector_size;
1756 	total_size = sector_size * sectors;
1757 
1758 	/* If the limit is two elements per sector, we cannot test three
1759 	 * elements in a single sector.
1760 	 */
1761 	if (sector_size / element_size == 2 && sectors == 1 && pattern == 2)
1762 		return;
1763 
1764 	/* Set up the buffers and I/O vector. We use different buffers for the
1765 	 * elements to minimize the chance that something "accidentally" goes
1766 	 * right, but that means we have to do memory copying to do checksum
1767 	 * computation.
1768 	 */
1769 	fill_rand(sec_ptr[0], sector_size);
1770 	rsum[0] =
1771 		get_sum(sec_ptr[0] + element_size, sector_size - element_size);
1772 
1773 	fill_rand(buf_ptr, buf_size);
1774 
1775 	switch (pattern) {
1776 	case 0:
1777 		/* First pattern: a small element on the left. */
1778 		iovt[0].iov_addr = (vir_bytes) sec_ptr[0];
1779 		iovt[0].iov_size = element_size;
1780 
1781 		iovt[1].iov_addr = (vir_bytes) buf_ptr;
1782 		iovt[1].iov_size = total_size - element_size;
1783 		rsum[1] = get_sum(buf_ptr + iovt[1].iov_size, element_size);
1784 
1785 		nr_req = 2;
1786 		break;
1787 	case 1:
1788 		/* Second pattern: a small element on the right. */
1789 		iovt[0].iov_addr = (vir_bytes) buf_ptr;
1790 		iovt[0].iov_size = total_size - element_size;
1791 		rsum[1] = get_sum(buf_ptr + iovt[0].iov_size, element_size);
1792 
1793 		iovt[1].iov_addr = (vir_bytes) sec_ptr[0];
1794 		iovt[1].iov_size = element_size;
1795 
1796 		nr_req = 2;
1797 		break;
1798 	case 2:
1799 		/* Third pattern: a small element on each side. */
1800 		iovt[0].iov_addr = (vir_bytes) sec_ptr[0];
1801 		iovt[0].iov_size = element_size;
1802 
1803 		iovt[1].iov_addr = (vir_bytes) buf_ptr;
1804 		iovt[1].iov_size = total_size - element_size * 2;
1805 		rsum[1] = get_sum(buf_ptr + iovt[1].iov_size,
1806 			element_size * 2);
1807 
1808 		fill_rand(sec_ptr[1], sector_size);
1809 		iovt[2].iov_addr = (vir_bytes) sec_ptr[1];
1810 		iovt[2].iov_size = element_size;
1811 		rsum[2] = get_sum(sec_ptr[1] + element_size,
1812 			sector_size - element_size);
1813 
1814 		nr_req = 3;
1815 		break;
1816 	default:
1817 		assert(0);
1818 	}
1819 
1820 	/* Perform a read with small elements, and test whether the result is
1821 	 * as expected.
1822 	 */
1823 	memcpy(iov, iovt, sizeof(iov));
1824 	vir_xfer(driver_minor, base_pos, iov, nr_req, FALSE, total_size, &res);
1825 
1826 	test_sum(sec_ptr[0] + element_size, sector_size - element_size,
1827 		rsum[0], TRUE, &res);
1828 
1829 	switch (pattern) {
1830 	case 0:
1831 		test_sum(buf_ptr + iovt[1].iov_size, element_size, rsum[1],
1832 			TRUE, &res);
1833 		memmove(buf_ptr + element_size, buf_ptr, iovt[1].iov_size);
1834 		memcpy(buf_ptr, sec_ptr[0], element_size);
1835 		break;
1836 	case 1:
1837 		test_sum(buf_ptr + iovt[0].iov_size, element_size, rsum[1],
1838 			TRUE, &res);
1839 		memcpy(buf_ptr + iovt[0].iov_size, sec_ptr[0], element_size);
1840 		break;
1841 	case 2:
1842 		test_sum(buf_ptr + iovt[1].iov_size, element_size * 2, rsum[1],
1843 			TRUE, &res);
1844 		test_sum(sec_ptr[1] + element_size, sector_size - element_size,
1845 			rsum[2], TRUE, &res);
1846 		memmove(buf_ptr + element_size, buf_ptr, iovt[1].iov_size);
1847 		memcpy(buf_ptr, sec_ptr[0], element_size);
1848 		memcpy(buf_ptr + element_size + iovt[1].iov_size, sec_ptr[1],
1849 			element_size);
1850 
1851 		break;
1852 	}
1853 
1854 	for (i = 0; i < sectors; i++)
1855 		test_sum(buf_ptr + sector_size * i, sector_size, ssum[1 + i],
1856 			TRUE, &res);
1857 
1858 	got_result(&res, "read with small elements");
1859 
1860 	/* In read-only mode, we have nothing more to do. */
1861 	if (!may_write)
1862 		return;
1863 
1864 	/* Use the same I/O vector to perform a write with small elements.
1865 	 * This will cause the checksums of the target sectors to change,
1866 	 * so we need to update those for both verification and later usage.
1867 	 */
1868 	for (i = 0; i < sectors; i++)
1869 		ssum[1 + i] =
1870 			fill_rand(buf_ptr + sector_size * i, sector_size);
1871 
1872 	switch (pattern) {
1873 	case 0:
1874 		memcpy(sec_ptr[0], buf_ptr, element_size);
1875 		memmove(buf_ptr, buf_ptr + element_size, iovt[1].iov_size);
1876 		fill_rand(buf_ptr + iovt[1].iov_size, element_size);
1877 		break;
1878 	case 1:
1879 		memcpy(sec_ptr[0], buf_ptr + iovt[0].iov_size, element_size);
1880 		fill_rand(buf_ptr + iovt[0].iov_size, element_size);
1881 		break;
1882 	case 2:
1883 		memcpy(sec_ptr[0], buf_ptr, element_size);
1884 		memcpy(sec_ptr[1], buf_ptr + element_size + iovt[1].iov_size,
1885 			element_size);
1886 		memmove(buf_ptr, buf_ptr + element_size, iovt[1].iov_size);
1887 		fill_rand(buf_ptr + iovt[1].iov_size, element_size * 2);
1888 		break;
1889 	}
1890 
1891 	memcpy(iov, iovt, sizeof(iov));
1892 
1893 	vir_xfer(driver_minor, base_pos, iov, nr_req, TRUE, total_size, &res);
1894 
1895 	got_result(&res, "write with small elements");
1896 
1897 	/* Now perform normal readback verification. */
1898 	fill_rand(buf_ptr, sector_size * 3);
1899 
1900 	simple_xfer(driver_minor, base_pos, buf_ptr, sector_size * 3, FALSE,
1901 		sector_size * 3, &res);
1902 
1903 	for (i = 0; i < 3; i++)
1904 		test_sum(buf_ptr + sector_size * i, sector_size, ssum[1 + i],
1905 			TRUE, &res);
1906 
1907 	got_result(&res, "readback verification");
1908 }
1909 
1910 static void unaligned_size(void)
1911 {
1912 	/* Test sector-unaligned sizes in I/O vector elements. The total size
1913 	 * of the request, however, has to add up to the sector size.
1914 	 */
1915 	u8_t *buf_ptr, *sec_ptr[2];
1916 	size_t buf_size;
1917 	u32_t sum = 0L, ssum[5];
1918 	u64_t base_pos;
1919 	result_t res;
1920 	int i;
1921 
1922 	test_group("sector-unaligned elements", sector_size != element_size);
1923 
1924 	/* We can only do this test if the driver allows small elements. */
1925 	if (sector_size == element_size)
1926 		return;
1927 
1928 	/* Crashing on bad user input, terrible! */
1929 	assert(sector_size % element_size == 0);
1930 
1931 	/* Establish a baseline by writing and reading back five sectors; or
1932 	 * by reading only, if writing is disabled.
1933 	 */
1934 	buf_size = sector_size * 5;
1935 
1936 	base_pos = (u64_t)sector_size * 2;
1937 
1938 	buf_ptr = alloc_dma_memory(buf_size);
1939 	sec_ptr[0] = alloc_dma_memory(sector_size);
1940 	sec_ptr[1] = alloc_dma_memory(sector_size);
1941 
1942 	if (may_write) {
1943 		sum = fill_rand(buf_ptr, buf_size);
1944 
1945 		for (i = 0; i < 5; i++)
1946 			ssum[i] = get_sum(buf_ptr + sector_size * i,
1947 				sector_size);
1948 
1949 		simple_xfer(driver_minor, base_pos, buf_ptr, buf_size, TRUE,
1950 			buf_size, &res);
1951 
1952 		got_result(&res, "write several sectors");
1953 	}
1954 
1955 	fill_rand(buf_ptr, buf_size);
1956 
1957 	simple_xfer(driver_minor, base_pos, buf_ptr, buf_size, FALSE, buf_size,
1958 		&res);
1959 
1960 	if (may_write) {
1961 		test_sum(buf_ptr, buf_size, sum, TRUE, &res);
1962 	}
1963 	else {
1964 		for (i = 0; i < 5; i++)
1965 			ssum[i] = get_sum(buf_ptr + sector_size * i,
1966 				sector_size);
1967 	}
1968 
1969 	got_result(&res, "read several sectors");
1970 
1971 	/* We do nine subtests. The first three involve only the second sector;
1972 	 * the second three involve the second and third sectors, and the third
1973 	 * three involve all of the middle sectors. Each triplet tests small
1974 	 * elements at the left, at the right, and at both the left and the
1975 	 * right of the area. For each operation, we first do an unaligned
1976 	 * read, and if writing is enabled, an unaligned write and an aligned
1977 	 * read.
1978 	 */
1979 	for (i = 0; i < 9; i++) {
1980 		unaligned_size_io(base_pos, buf_ptr, buf_size, sec_ptr,
1981 			i / 3 + 1, i % 3, ssum);
1982 	}
1983 
1984 	/* If writing was enabled, make sure that the first and fifth sector
1985 	 * have remained untouched.
1986 	 */
1987 	if (may_write) {
1988 		fill_rand(buf_ptr, buf_size);
1989 
1990 		simple_xfer(driver_minor, base_pos, buf_ptr, buf_size, FALSE,
1991 			buf_size, &res);
1992 
1993 		test_sum(buf_ptr, sector_size, ssum[0], TRUE, &res);
1994 		test_sum(buf_ptr + sector_size * 4, sector_size, ssum[4], TRUE,
1995 			&res);
1996 
1997 		got_result(&res, "check first and last sectors");
1998 	}
1999 
2000 	/* Clean up. */
2001 	free_dma_memory(sec_ptr[1], sector_size);
2002 	free_dma_memory(sec_ptr[0], sector_size);
2003 	free_dma_memory(buf_ptr, buf_size);
2004 }
2005 
2006 static void unaligned_pos1(void)
2007 {
2008 	/* Test sector-unaligned positions and total sizes for requests. This
2009 	 * is a read-only test for now. Write support should be added later.
2010 	 * In the current context, the term "lead" means an unwanted first part
2011 	 * of a sector, and "trail" means an unwanted last part of a sector.
2012 	 */
2013 	u8_t *buf_ptr, *buf2_ptr;
2014 	size_t buf_size, buf2_size, size;
2015 	u32_t sum, sum2;
2016 	u64_t base_pos;
2017 	result_t res;
2018 
2019 	test_group("sector-unaligned positions, part one",
2020 		min_read != sector_size);
2021 
2022 	/* We can only do this test if the driver allows small read requests.
2023 	 */
2024 	if (min_read == sector_size)
2025 		return;
2026 
2027 	assert(sector_size % min_read == 0);
2028 	assert(min_read % element_size == 0);
2029 
2030 	/* Establish a baseline by writing and reading back three sectors; or
2031 	 * by reading only, if writing is disabled.
2032 	 */
2033 	buf_size = buf2_size = sector_size * 3;
2034 
2035 	base_pos = (u64_t)sector_size * 3;
2036 
2037 	buf_ptr = alloc_dma_memory(buf_size);
2038 	buf2_ptr = alloc_dma_memory(buf2_size);
2039 
2040 	if (may_write) {
2041 		sum = fill_rand(buf_ptr, buf_size);
2042 
2043 		simple_xfer(driver_minor, base_pos, buf_ptr, buf_size, TRUE,
2044 			buf_size, &res);
2045 
2046 		got_result(&res, "write several sectors");
2047 	}
2048 
2049 	fill_rand(buf_ptr, buf_size);
2050 
2051 	simple_xfer(driver_minor, base_pos, buf_ptr, buf_size, FALSE, buf_size,
2052 		&res);
2053 
2054 	if (may_write)
2055 		test_sum(buf_ptr, buf_size, sum, TRUE, &res);
2056 
2057 	got_result(&res, "read several sectors");
2058 
2059 	/* Start with a simple test that operates within a single sector,
2060 	 * first using a lead.
2061 	 */
2062 	fill_rand(buf2_ptr, sector_size);
2063 	sum = get_sum(buf2_ptr + min_read, sector_size - min_read);
2064 
2065 	simple_xfer(driver_minor, base_pos + sector_size - min_read,
2066 		buf2_ptr, min_read, FALSE, min_read, &res);
2067 
2068 	test_sum(buf2_ptr, min_read, get_sum(buf_ptr + sector_size - min_read,
2069 		min_read), TRUE, &res);
2070 	test_sum(buf2_ptr + min_read, sector_size - min_read, sum, TRUE,
2071 		&res);
2072 
2073 	got_result(&res, "single sector read with lead");
2074 
2075 	/* Then a trail. */
2076 	fill_rand(buf2_ptr, sector_size);
2077 	sum = get_sum(buf2_ptr, sector_size - min_read);
2078 
2079 	simple_xfer(driver_minor, base_pos, buf2_ptr + sector_size - min_read,
2080 		min_read, FALSE, min_read, &res);
2081 
2082 	test_sum(buf2_ptr + sector_size - min_read, min_read, get_sum(buf_ptr,
2083 		min_read), TRUE, &res);
2084 	test_sum(buf2_ptr, sector_size - min_read, sum, TRUE, &res);
2085 
2086 	got_result(&res, "single sector read with trail");
2087 
2088 	/* And then a lead and a trail, unless min_read is half the sector
2089 	 * size, in which case this will be another lead test.
2090 	 */
2091 	fill_rand(buf2_ptr, sector_size);
2092 	sum = get_sum(buf2_ptr, min_read);
2093 	sum2 = get_sum(buf2_ptr + min_read * 2, sector_size - min_read * 2);
2094 
2095 	simple_xfer(driver_minor, base_pos + min_read, buf2_ptr + min_read,
2096 		min_read, FALSE, min_read, &res);
2097 
2098 	test_sum(buf2_ptr + min_read, min_read, get_sum(buf_ptr + min_read,
2099 		min_read), TRUE, &res);
2100 	test_sum(buf2_ptr, min_read, sum, TRUE, &res);
2101 	test_sum(buf2_ptr + min_read * 2, sector_size - min_read * 2, sum2,
2102 		TRUE, &res);
2103 
2104 	got_result(&res, "single sector read with lead and trail");
2105 
2106 	/* Now do the same but with three sectors, and still only one I/O
2107 	 * vector element. First up: lead.
2108 	 */
2109 	size = min_read + sector_size * 2;
2110 
2111 	fill_rand(buf2_ptr, buf2_size);
2112 	sum = get_sum(buf2_ptr + size, buf2_size - size);
2113 
2114 	simple_xfer(driver_minor, base_pos + sector_size - min_read, buf2_ptr,
2115 		size, FALSE, size, &res);
2116 
2117 	test_sum(buf2_ptr, size, get_sum(buf_ptr + sector_size - min_read,
2118 		size), TRUE, &res);
2119 	test_sum(buf2_ptr + size, buf2_size - size, sum, TRUE, &res);
2120 
2121 	got_result(&res, "multisector read with lead");
2122 
2123 	/* Then trail. */
2124 	fill_rand(buf2_ptr, buf2_size);
2125 	sum = get_sum(buf2_ptr + size, buf2_size - size);
2126 
2127 	simple_xfer(driver_minor, base_pos, buf2_ptr, size, FALSE, size, &res);
2128 
2129 	test_sum(buf2_ptr, size, get_sum(buf_ptr, size), TRUE, &res);
2130 	test_sum(buf2_ptr + size, buf2_size - size, sum, TRUE, &res);
2131 
2132 	got_result(&res, "multisector read with trail");
2133 
2134 	/* Then lead and trail. Use sector size as transfer unit to throw off
2135 	 * simplistic lead/trail detection.
2136 	 */
2137 	fill_rand(buf2_ptr, buf2_size);
2138 	sum = get_sum(buf2_ptr + sector_size, buf2_size - sector_size);
2139 
2140 	simple_xfer(driver_minor, base_pos + min_read, buf2_ptr, sector_size,
2141 		FALSE, sector_size, &res);
2142 
2143 	test_sum(buf2_ptr, sector_size, get_sum(buf_ptr + min_read,
2144 		sector_size), TRUE, &res);
2145 	test_sum(buf2_ptr + sector_size, buf2_size - sector_size, sum, TRUE,
2146 		&res);
2147 
2148 	got_result(&res, "multisector read with lead and trail");
2149 
2150 	/* Clean up. */
2151 	free_dma_memory(buf2_ptr, buf2_size);
2152 	free_dma_memory(buf_ptr, buf_size);
2153 }
2154 
2155 static void unaligned_pos2(void)
2156 {
2157 	/* Test sector-unaligned positions and total sizes for requests, second
2158 	 * part. This one tests the use of multiple I/O vector elements, and
2159 	 * tries to push the limits of the driver by completely filling an I/O
2160 	 * vector and going up to the maximum request size.
2161 	 */
2162 	u8_t *buf_ptr, *buf2_ptr;
2163 	size_t buf_size, buf2_size, max_block;
2164 	u32_t sum = 0L, sum2 = 0L, rsum[NR_IOREQS];
2165 	u64_t base_pos;
2166 	iovec_t iov[NR_IOREQS];
2167 	result_t res;
2168 	int i;
2169 
2170 	test_group("sector-unaligned positions, part two",
2171 		min_read != sector_size);
2172 
2173 	/* We can only do this test if the driver allows small read requests.
2174 	 */
2175 	if (min_read == sector_size)
2176 		return;
2177 
2178 	buf_size = buf2_size = max_size + sector_size;
2179 
2180 	base_pos = (u64_t)sector_size * 3;
2181 
2182 	buf_ptr = alloc_dma_memory(buf_size);
2183 	buf2_ptr = alloc_dma_memory(buf2_size);
2184 
2185 	/* First establish a baseline. We need two requests for this, as the
2186 	 * total area intentionally exceeds the max request size.
2187 	 */
2188 	if (may_write) {
2189 		sum = fill_rand(buf_ptr, max_size);
2190 
2191 		simple_xfer(driver_minor, base_pos, buf_ptr, max_size, TRUE,
2192 			max_size, &res);
2193 
2194 		got_result(&res, "large baseline write");
2195 
2196 		sum2 = fill_rand(buf_ptr + max_size, sector_size);
2197 
2198 		simple_xfer(driver_minor, base_pos + max_size,
2199 			buf_ptr + max_size, sector_size, TRUE, sector_size,
2200 			&res);
2201 
2202 		got_result(&res, "small baseline write");
2203 	}
2204 
2205 	fill_rand(buf_ptr, buf_size);
2206 
2207 	simple_xfer(driver_minor, base_pos, buf_ptr, max_size, FALSE, max_size,
2208 		&res);
2209 
2210 	if (may_write)
2211 		test_sum(buf_ptr, max_size, sum, TRUE, &res);
2212 
2213 	got_result(&res, "large baseline read");
2214 
2215 	simple_xfer(driver_minor, base_pos + max_size, buf_ptr + max_size,
2216 		sector_size, FALSE, sector_size, &res);
2217 
2218 	if (may_write)
2219 		test_sum(buf_ptr + max_size, sector_size, sum2, TRUE, &res);
2220 
2221 	got_result(&res, "small baseline read");
2222 
2223 	/* First construct a full vector with minimal sizes. The resulting area
2224 	 * may well fall within a single sector, if min_read is small enough.
2225 	 */
2226 	fill_rand(buf2_ptr, buf2_size);
2227 
2228 	for (i = 0; i < NR_IOREQS; i++) {
2229 		iov[i].iov_addr = (vir_bytes) buf2_ptr + i * sector_size;
2230 		iov[i].iov_size = min_read;
2231 
2232 		rsum[i] = get_sum(buf2_ptr + i * sector_size + min_read,
2233 			sector_size - min_read);
2234 	}
2235 
2236 	vir_xfer(driver_minor, base_pos + min_read, iov, NR_IOREQS, FALSE,
2237 		min_read * NR_IOREQS, &res);
2238 
2239 	for (i = 0; i < NR_IOREQS; i++) {
2240 		test_sum(buf2_ptr + i * sector_size + min_read,
2241 			sector_size - min_read, rsum[i], TRUE, &res);
2242 		memmove(buf2_ptr + i * min_read, buf2_ptr + i * sector_size,
2243 			min_read);
2244 	}
2245 
2246 	test_sum(buf2_ptr, min_read * NR_IOREQS, get_sum(buf_ptr + min_read,
2247 		min_read * NR_IOREQS), TRUE, &res);
2248 
2249 	got_result(&res, "small fully unaligned filled vector");
2250 
2251 	/* Sneak in a maximum sized request with a single I/O vector element,
2252 	 * unaligned. If the driver splits up such large requests into smaller
2253 	 * chunks, this tests whether it does so correctly in the presence of
2254 	 * leads and trails.
2255 	 */
2256 	fill_rand(buf2_ptr, buf2_size);
2257 
2258 	simple_xfer(driver_minor, base_pos + min_read, buf2_ptr, max_size,
2259 		FALSE, max_size, &res);
2260 
2261 	test_sum(buf2_ptr, max_size, get_sum(buf_ptr + min_read, max_size),
2262 		TRUE, &res);
2263 
2264 	got_result(&res, "large fully unaligned single element");
2265 
2266 	/* Then try with a vector where each element is as large as possible.
2267 	 * We don't have room to do bounds integrity checking here (we could
2268 	 * make room, but this may be a lot of memory already).
2269 	 */
2270 	/* Compute the largest sector multiple which, when multiplied by
2271 	 * NR_IOREQS, is no more than the maximum transfer size.
2272 	 */
2273 	max_block = max_size / NR_IOREQS;
2274 	max_block -= max_block % sector_size;
2275 
2276 	fill_rand(buf2_ptr, buf2_size);
2277 
2278 	for (i = 0; i < NR_IOREQS; i++) {
2279 		iov[i].iov_addr = (vir_bytes) buf2_ptr + i * max_block;
2280 		iov[i].iov_size = max_block;
2281 	}
2282 
2283 	vir_xfer(driver_minor, base_pos + min_read, iov, NR_IOREQS, FALSE,
2284 		max_block * NR_IOREQS, &res);
2285 
2286 	test_sum(buf2_ptr, max_block * NR_IOREQS, get_sum(buf_ptr + min_read,
2287 		max_block * NR_IOREQS), TRUE, &res);
2288 
2289 	got_result(&res, "large fully unaligned filled vector");
2290 
2291 	/* Clean up. */
2292 	free_dma_memory(buf2_ptr, buf2_size);
2293 	free_dma_memory(buf_ptr, buf_size);
2294 }
2295 
2296 static void sweep_area(u64_t base_pos)
2297 {
2298 	/* Go over an eight-sector area from left (low address) to right (high
2299 	 * address), reading and optionally writing in three-sector chunks, and
2300 	 * advancing one sector at a time.
2301 	 */
2302 	u8_t *buf_ptr;
2303 	size_t buf_size;
2304 	u32_t sum = 0L, ssum[8];
2305 	result_t res;
2306 	int i, j;
2307 
2308 	buf_size = sector_size * 8;
2309 	buf_ptr = alloc_dma_memory(buf_size);
2310 
2311 	/* First (write to, if allowed, and) read from the entire area in one
2312 	 * go, so that we know the (initial) contents of the area.
2313 	 */
2314 	if (may_write) {
2315 		sum = fill_rand(buf_ptr, buf_size);
2316 
2317 		simple_xfer(driver_minor, base_pos, buf_ptr, buf_size, TRUE,
2318 			buf_size, &res);
2319 
2320 		got_result(&res, "write to full area");
2321 	}
2322 
2323 	fill_rand(buf_ptr, buf_size);
2324 
2325 	simple_xfer(driver_minor, base_pos, buf_ptr, buf_size, FALSE, buf_size,
2326 		&res);
2327 
2328 	if (may_write)
2329 		test_sum(buf_ptr, buf_size, sum, TRUE, &res);
2330 
2331 	for (i = 0; i < 8; i++)
2332 		ssum[i] = get_sum(buf_ptr + sector_size * i, sector_size);
2333 
2334 	got_result(&res, "read from full area");
2335 
2336 	/* For each of the six three-sector subareas, first read from the
2337 	 * subarea, check its checksum, and then (if allowed) write new content
2338 	 * to it.
2339 	 */
2340 	for (i = 0; i < 6; i++) {
2341 		fill_rand(buf_ptr, sector_size * 3);
2342 
2343 		simple_xfer(driver_minor, base_pos + sector_size * i, buf_ptr,
2344 			sector_size * 3, FALSE, sector_size * 3, &res);
2345 
2346 		for (j = 0; j < 3; j++)
2347 			test_sum(buf_ptr + sector_size * j, sector_size,
2348 				ssum[i + j], TRUE, &res);
2349 
2350 		got_result(&res, "read from subarea");
2351 
2352 		if (!may_write)
2353 			continue;
2354 
2355 		fill_rand(buf_ptr, sector_size * 3);
2356 
2357 		simple_xfer(driver_minor, base_pos + sector_size * i, buf_ptr,
2358 			sector_size * 3, TRUE, sector_size * 3, &res);
2359 
2360 		for (j = 0; j < 3; j++)
2361 			ssum[i + j] = get_sum(buf_ptr + sector_size * j,
2362 				sector_size);
2363 
2364 		got_result(&res, "write to subarea");
2365 	}
2366 
2367 	/* Finally, if writing was enabled, do one final readback. */
2368 	if (may_write) {
2369 		fill_rand(buf_ptr, buf_size);
2370 
2371 		simple_xfer(driver_minor, base_pos, buf_ptr, buf_size, FALSE,
2372 			buf_size, &res);
2373 
2374 		for (i = 0; i < 8; i++)
2375 			test_sum(buf_ptr + sector_size * i, sector_size,
2376 				ssum[i], TRUE, &res);
2377 
2378 		got_result(&res, "readback from full area");
2379 	}
2380 
2381 	/* Clean up. */
2382 	free_dma_memory(buf_ptr, buf_size);
2383 }
2384 
2385 static void sweep_and_check(u64_t pos, int check_integ)
2386 {
2387 	/* Perform an area sweep at the given position. If asked for, get an
2388 	 * integrity checksum over the beginning of the disk (first writing
2389 	 * known data into it if that is allowed) before doing the sweep, and
2390 	 * test the integrity checksum against the disk contents afterwards.
2391 	 */
2392 	u8_t *buf_ptr;
2393 	size_t buf_size;
2394 	u32_t sum = 0L;
2395 	result_t res;
2396 
2397 	if (check_integ) {
2398 		buf_size = sector_size * 3;
2399 		buf_ptr = alloc_dma_memory(buf_size);
2400 
2401 		if (may_write) {
2402 			sum = fill_rand(buf_ptr, buf_size);
2403 
2404 			simple_xfer(driver_minor, 0ULL, buf_ptr, buf_size,
2405 				TRUE, buf_size, &res);
2406 
2407 			got_result(&res, "write integrity zone");
2408 		}
2409 
2410 		fill_rand(buf_ptr, buf_size);
2411 
2412 		simple_xfer(driver_minor, 0ULL, buf_ptr, buf_size, FALSE,
2413 			buf_size, &res);
2414 
2415 		if (may_write)
2416 			test_sum(buf_ptr, buf_size, sum, TRUE, &res);
2417 		else
2418 			sum = get_sum(buf_ptr, buf_size);
2419 
2420 		got_result(&res, "read integrity zone");
2421 	}
2422 
2423 	sweep_area(pos);
2424 
2425 	if (check_integ) {
2426 		fill_rand(buf_ptr, buf_size);
2427 
2428 		simple_xfer(driver_minor, 0ULL, buf_ptr, buf_size, FALSE,
2429 			buf_size, &res);
2430 
2431 		test_sum(buf_ptr, buf_size, sum, TRUE, &res);
2432 
2433 		got_result(&res, "check integrity zone");
2434 
2435 		free_dma_memory(buf_ptr, buf_size);
2436 	}
2437 }
2438 
2439 static void basic_sweep(void)
2440 {
2441 	/* Perform a basic area sweep.
2442 	 */
2443 
2444 	test_group("basic area sweep", TRUE);
2445 
2446 	sweep_area((u64_t)sector_size);
2447 }
2448 
2449 static void high_disk_pos(void)
2450 {
2451 	/* Test 64-bit absolute disk positions. This means that after adding
2452 	 * partition base to the given position, the driver will be dealing
2453 	 * with a position above 32 bit. We want to test the transition area
2454 	 * only; if the entire partition base is above 32 bit, we have already
2455 	 * effectively performed this test many times over. In other words, for
2456 	 * this test, the partition must start below 4GB and end above 4GB,
2457 	 * with at least four sectors on each side.
2458 	 */
2459 	u64_t base_pos;
2460 
2461 	base_pos = 0x100000000ULL | (sector_size * 4);
2462 	base_pos -= base_pos % sector_size;
2463 
2464 	/* The partition end must exceed 32 bits. */
2465 	if (part.base + part.size < base_pos) {
2466 		test_group("high disk positions", FALSE);
2467 
2468 		return;
2469 	}
2470 
2471 	base_pos -= sector_size * 8;
2472 
2473 	/* The partition start must not. */
2474 	if (base_pos < part.base) {
2475 		test_group("high disk positions", FALSE);
2476 		return;
2477 	}
2478 
2479 	test_group("high disk positions", TRUE);
2480 
2481 	base_pos -= part.base;
2482 
2483 	sweep_and_check(base_pos, part.base == 0ULL);
2484 }
2485 
2486 static void high_part_pos(void)
2487 {
2488 	/* Test 64-bit partition-relative disk positions. In other words, use
2489 	 * within the current partition a position that exceeds a 32-bit value.
2490 	 * This requires the partition to be more than 4GB in size; we need an
2491 	 * additional 4 sectors, to be exact.
2492 	 */
2493 	u64_t base_pos;
2494 
2495 	/* If the partition starts at the beginning of the disk, this test is
2496 	 * no different from the high disk position test.
2497 	 */
2498 	if (part.base == 0ULL) {
2499 		/* don't complain: the test is simply superfluous now */
2500 		return;
2501 	}
2502 
2503 	base_pos = 0x100000000ULL | (sector_size * 4);
2504 	base_pos -= base_pos % sector_size;
2505 
2506 	if (part.size < base_pos) {
2507 		test_group("high partition positions", FALSE);
2508 
2509 		return;
2510 	}
2511 
2512 	test_group("high partition positions", TRUE);
2513 
2514 	base_pos -= sector_size * 8;
2515 
2516 	sweep_and_check(base_pos, TRUE);
2517 }
2518 
2519 static void high_lba_pos1(void)
2520 {
2521 	/* Test 48-bit LBA positions, as opposed to *24-bit*. Drivers that only
2522 	 * support 48-bit LBA ATA transfers, will treat the lower and upper 24
2523 	 * bits differently. This is again relative to the disk start, not the
2524 	 * partition start. For 512-byte sectors, the lowest position exceeding
2525 	 * 24 bit is at 8GB. As usual, we need four sectors more, and fewer, on
2526 	 * the other side. The partition that we're operating on, must cover
2527 	 * this area.
2528 	 */
2529 	u64_t base_pos;
2530 
2531 	base_pos = (1ULL << 24) * sector_size;
2532 
2533 	/* The partition end must exceed the 24-bit sector point. */
2534 	if (part.base + part.size < base_pos) {
2535 		test_group("high LBA positions, part one", FALSE);
2536 
2537 		return;
2538 	}
2539 
2540 	base_pos -= sector_size * 8;
2541 
2542 	/* The partition start must not. */
2543 	if (base_pos < part.base) {
2544 		test_group("high LBA positions, part one", FALSE);
2545 
2546 		return;
2547 	}
2548 
2549 	test_group("high LBA positions, part one", TRUE);
2550 
2551 	base_pos -= part.base;
2552 
2553 	sweep_and_check(base_pos, part.base == 0ULL);
2554 }
2555 
2556 static void high_lba_pos2(void)
2557 {
2558 	/* Test 48-bit LBA positions, as opposed to *28-bit*. That means sector
2559 	 * numbers in excess of 28-bit values; the old ATA upper limit. The
2560 	 * same considerations as above apply, except that we now need a 128+GB
2561 	 * partition.
2562 	 */
2563 	u64_t base_pos;
2564 
2565 	base_pos = (1ULL << 28) * sector_size;
2566 
2567 	/* The partition end must exceed the 28-bit sector point. */
2568 	if (part.base + part.size < base_pos) {
2569 		test_group("high LBA positions, part two", FALSE);
2570 
2571 		return;
2572 	}
2573 
2574 	base_pos -= sector_size * 8;
2575 
2576 	/* The partition start must not. */
2577 	if (base_pos < part.base) {
2578 		test_group("high LBA positions, part two", FALSE);
2579 
2580 		return;
2581 	}
2582 
2583 	test_group("high LBA positions, part two", TRUE);
2584 
2585 	base_pos -= part.base;
2586 
2587 	sweep_and_check(base_pos, part.base == 0ULL);
2588 }
2589 
2590 static void high_pos(void)
2591 {
2592 	/* Check whether the driver deals well with 64-bit positions and
2593 	 * 48-bit LBA addresses. We test three cases: disk byte position beyond
2594 	 * what fits in 32 bit, in-partition byte position beyond what fits in
2595 	 * 32 bit, and disk sector position beyond what fits in 24 bit. With
2596 	 * the partition we've been given, we may not be able to test all of
2597 	 * them (or any, for that matter).
2598 	 */
2599 	/* In certain rare cases, we might be able to perform integrity
2600 	 * checking on the area that would be affected if a 32-bit/24-bit
2601 	 * counter were to wrap. More specifically: we can do that if we can
2602 	 * access the start of the disk. This is why we should be given the
2603 	 * entire disk as test area if at all possible.
2604 	 */
2605 
2606 	basic_sweep();
2607 
2608 	high_disk_pos();
2609 
2610 	high_part_pos();
2611 
2612 	high_lba_pos1();
2613 
2614 	high_lba_pos2();
2615 }
2616 
2617 static void open_primary(void)
2618 {
2619 	/* Open the primary device. This call has its own test group.
2620 	 */
2621 
2622 	test_group("device open", TRUE);
2623 
2624 	open_device(driver_minor);
2625 }
2626 
2627 static void close_primary(void)
2628 {
2629 	/* Close the primary device. This call has its own test group.
2630 	 */
2631 
2632 	test_group("device close", TRUE);
2633 
2634 	close_device(driver_minor);
2635 
2636 	assert(nr_opened == 0);
2637 }
2638 
2639 static void do_tests(void)
2640 {
2641 	/* Perform all the tests.
2642 	 */
2643 
2644 	open_primary();
2645 
2646 	misc_ioctl();
2647 
2648 	bad_read1();
2649 
2650 	bad_read2();
2651 
2652 	/* It is assumed that the driver implementation uses shared
2653 	 * code paths for read and write for the basic checks, so we do
2654 	 * not repeat those for writes.
2655 	 */
2656 	bad_write();
2657 
2658 	vector_and_large();
2659 
2660 	part_limits();
2661 
2662 	unaligned_size();
2663 
2664 	unaligned_pos1();
2665 
2666 	unaligned_pos2();
2667 
2668 	high_pos();
2669 
2670 	close_primary();
2671 }
2672 
2673 static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info))
2674 {
2675 	/* Initialize.
2676 	 */
2677 	int r;
2678 	clock_t now;
2679 
2680 	if (env_argc > 1)
2681 		optset_parse(optset_table, env_argv[1]);
2682 
2683 	if (driver_label[0] == '\0')
2684 		panic("no driver label given");
2685 
2686 	if (ds_retrieve_label_endpt(driver_label, &driver_endpt))
2687 		panic("unable to resolve driver label");
2688 
2689 	if (driver_minor > 255)
2690 		panic("invalid or no driver minor given");
2691 
2692 	if ((r = getticks(&now)) != OK)
2693 		panic("unable to get uptime: %d", r);
2694 
2695 	srand48(now);
2696 
2697 	output("BLOCKTEST: driver label '%s' (endpt %d), minor %d\n",
2698 		driver_label, driver_endpt, driver_minor);
2699 
2700 	do_tests();
2701 
2702 	output("BLOCKTEST: summary: %d out of %d tests failed "
2703 		"across %d group%s; %d driver deaths\n",
2704 		failed_tests, total_tests, failed_groups,
2705 		failed_groups == 1 ? "" : "s", driver_deaths);
2706 
2707 	/* The returned code will determine the outcome of the RS call, and
2708 	 * thus the entire test. The actual error code does not matter.
2709 	 */
2710 	return (failed_tests) ? EINVAL : OK;
2711 }
2712 
2713 static void sef_local_startup(void)
2714 {
2715 	/* Initialize the SEF framework.
2716 	 */
2717 
2718 	sef_setcb_init_fresh(sef_cb_init_fresh);
2719 
2720 	sef_startup();
2721 }
2722 
2723 int main(int argc, char **argv)
2724 {
2725 	/* Driver task.
2726 	 */
2727 
2728 	env_setargs(argc, argv);
2729 	sef_local_startup();
2730 
2731 	return 0;
2732 }
2733