xref: /openbsd/sys/scsi/sd.c (revision 274d7c50)
1 /*	$OpenBSD: sd.c,v 1.301 2019/11/26 20:51:20 krw Exp $	*/
2 /*	$NetBSD: sd.c,v 1.111 1997/04/02 02:29:41 mycroft Exp $	*/
3 
4 /*-
5  * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Charles M. Hannum.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Originally written by Julian Elischer (julian@dialix.oz.au)
35  * for TRW Financial Systems for use under the MACH(2.5) operating system.
36  *
37  * TRW Financial Systems, in accordance with their agreement with Carnegie
38  * Mellon University, makes this software available to CMU to distribute
39  * or use in any manner that they see fit as long as this message is kept with
40  * the software. For this reason TFS also grants any other persons or
41  * organisations permission to use or modify this software.
42  *
43  * TFS supplies this software to be publicly redistributed
44  * on the understanding that TFS is not responsible for the correct
45  * functioning of this software in any circumstances.
46  *
47  * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
48  */
49 
50 #include <sys/stdint.h>
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/timeout.h>
54 #include <sys/fcntl.h>
55 #include <sys/stat.h>
56 #include <sys/ioctl.h>
57 #include <sys/mtio.h>
58 #include <sys/mutex.h>
59 #include <sys/buf.h>
60 #include <sys/uio.h>
61 #include <sys/malloc.h>
62 #include <sys/pool.h>
63 #include <sys/errno.h>
64 #include <sys/device.h>
65 #include <sys/disklabel.h>
66 #include <sys/disk.h>
67 #include <sys/conf.h>
68 #include <sys/scsiio.h>
69 #include <sys/dkio.h>
70 #include <sys/reboot.h>
71 
72 #include <scsi/scsi_all.h>
73 #include <scsi/scsi_disk.h>
74 #include <scsi/scsiconf.h>
75 #include <scsi/sdvar.h>
76 
77 #include <ufs/ffs/fs.h>			/* for BBSIZE and SBSIZE */
78 
79 #include <sys/vnode.h>
80 
81 int	sdmatch(struct device *, void *, void *);
82 void	sdattach(struct device *, struct device *, void *);
83 int	sdactivate(struct device *, int);
84 int	sddetach(struct device *, int);
85 
86 void	sdminphys(struct buf *);
87 int	sdgetdisklabel(dev_t, struct sd_softc *, struct disklabel *, int);
88 void	sdstart(struct scsi_xfer *);
89 int	sd_interpret_sense(struct scsi_xfer *);
90 int	sd_read_cap_10(struct sd_softc *, int);
91 int	sd_read_cap_16(struct sd_softc *, int);
92 int	sd_read_cap(struct sd_softc *, int);
93 int	sd_thin_pages(struct sd_softc *, int);
94 int	sd_vpd_block_limits(struct sd_softc *, int);
95 int	sd_vpd_thin(struct sd_softc *, int);
96 int	sd_thin_params(struct sd_softc *, int);
97 int	sd_get_parms(struct sd_softc *, int);
98 int	sd_flush(struct sd_softc *, int);
99 
100 void	viscpy(u_char *, u_char *, int);
101 
102 int	sd_ioctl_inquiry(struct sd_softc *, struct dk_inquiry *);
103 int	sd_ioctl_cache(struct sd_softc *, long, struct dk_cache *);
104 
105 void	sd_cmd_rw6(struct scsi_xfer *, int, u_int64_t, u_int);
106 void	sd_cmd_rw10(struct scsi_xfer *, int, u_int64_t, u_int);
107 void	sd_cmd_rw12(struct scsi_xfer *, int, u_int64_t, u_int);
108 void	sd_cmd_rw16(struct scsi_xfer *, int, u_int64_t, u_int);
109 
110 void	sd_buf_done(struct scsi_xfer *);
111 
112 struct cfattach sd_ca = {
113 	sizeof(struct sd_softc), sdmatch, sdattach,
114 	sddetach, sdactivate
115 };
116 
117 struct cfdriver sd_cd = {
118 	NULL, "sd", DV_DISK
119 };
120 
121 const struct scsi_inquiry_pattern sd_patterns[] = {
122 	{T_DIRECT, T_FIXED,
123 	 "",         "",                 ""},
124 	{T_DIRECT, T_REMOV,
125 	 "",         "",                 ""},
126 	{T_RDIRECT, T_FIXED,
127 	 "",         "",                 ""},
128 	{T_RDIRECT, T_REMOV,
129 	 "",         "",                 ""},
130 	{T_OPTICAL, T_FIXED,
131 	 "",         "",                 ""},
132 	{T_OPTICAL, T_REMOV,
133 	 "",         "",                 ""},
134 };
135 
136 #define sdlookup(unit) (struct sd_softc *)disk_lookup(&sd_cd, (unit))
137 
138 int
139 sdmatch(struct device *parent, void *match, void *aux)
140 {
141 	struct scsi_attach_args *sa = aux;
142 	int priority;
143 
144 	(void)scsi_inqmatch(sa->sa_inqbuf,
145 	    sd_patterns, nitems(sd_patterns),
146 	    sizeof(sd_patterns[0]), &priority);
147 
148 	return (priority);
149 }
150 
151 /*
152  * The routine called by the low level scsi routine when it discovers
153  * a device suitable for this driver.
154  */
155 void
156 sdattach(struct device *parent, struct device *self, void *aux)
157 {
158 	struct sd_softc *sc = (struct sd_softc *)self;
159 	struct scsi_attach_args *sa = aux;
160 	struct disk_parms *dp = &sc->params;
161 	struct scsi_link *link = sa->sa_sc_link;
162 	int sd_autoconf = scsi_autoconf | SCSI_SILENT |
163 	    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE;
164 	struct dk_cache dkc;
165 	int error, sortby = BUFQ_DEFAULT;
166 
167 	SC_DEBUG(link, SDEV_DB2, ("sdattach:\n"));
168 
169 	/*
170 	 * Store information needed to contact our base driver
171 	 */
172 	sc->sc_link = link;
173 	link->interpret_sense = sd_interpret_sense;
174 	link->device_softc = sc;
175 
176 	if (ISSET(link->flags, SDEV_ATAPI) && ISSET(link->flags, SDEV_REMOVABLE))
177 		SET(link->quirks, SDEV_NOSYNCCACHE);
178 
179 	if (!ISSET(link->inqdata.flags, SID_RelAdr))
180 		SET(link->quirks, SDEV_ONLYBIG);
181 
182 	/*
183 	 * Note if this device is ancient.  This is used in sdminphys().
184 	 */
185 	if (!ISSET(link->flags, SDEV_ATAPI) &&
186 	    SID_ANSII_REV(sa->sa_inqbuf) == SCSI_REV_0)
187 		SET(sc->flags, SDF_ANCIENT);
188 
189 	/*
190 	 * Use the subdriver to request information regarding
191 	 * the drive. We cannot use interrupts yet, so the
192 	 * request must specify this.
193 	 */
194 	printf("\n");
195 
196 	scsi_xsh_set(&sc->sc_xsh, link, sdstart);
197 	timeout_set(&sc->sc_timeout, (void (*)(void *))scsi_xsh_add,
198 	    &sc->sc_xsh);
199 
200 	/* Spin up non-UMASS devices ready or not. */
201 	if (!ISSET(link->flags, SDEV_UMASS))
202 		scsi_start(link, SSS_START, sd_autoconf);
203 
204 	/*
205 	 * Some devices (e.g. BlackBerry Pearl) won't admit they have
206 	 * media loaded unless its been locked in.
207 	 */
208 	if (ISSET(link->flags, SDEV_REMOVABLE))
209 		scsi_prevent(link, PR_PREVENT, sd_autoconf);
210 
211 	/* Check that it is still responding and ok. */
212 	error = scsi_test_unit_ready(sc->sc_link, TEST_READY_RETRIES * 3,
213 	    sd_autoconf);
214 	if (error == 0)
215 		error = sd_get_parms(sc, sd_autoconf);
216 
217 	if (ISSET(link->flags, SDEV_REMOVABLE))
218 		scsi_prevent(link, PR_ALLOW, sd_autoconf);
219 
220 	if (error == 0) {
221 		printf("%s: %lluMB, %u bytes/sector, %llu sectors",
222 		    sc->sc_dev.dv_xname,
223 		    dp->disksize / (1048576 / dp->secsize), dp->secsize,
224 		    dp->disksize);
225 		if (ISSET(sc->flags, SDF_THIN)) {
226 			sortby = BUFQ_FIFO;
227 			printf(", thin");
228 		}
229 		if (ISSET(link->flags, SDEV_READONLY))
230 			printf(", readonly");
231 		printf("\n");
232 	}
233 
234 	/*
235 	 * Initialize disk structures.
236 	 */
237 	sc->sc_dk.dk_name = sc->sc_dev.dv_xname;
238 	bufq_init(&sc->sc_bufq, sortby);
239 
240 	/*
241 	 * Enable write cache by default.
242 	 */
243 	memset(&dkc, 0, sizeof(dkc));
244 	if (sd_ioctl_cache(sc, DIOCGCACHE, &dkc) == 0 && dkc.wrcache == 0) {
245 		dkc.wrcache = 1;
246 		sd_ioctl_cache(sc, DIOCSCACHE, &dkc);
247 	}
248 
249 	/* Attach disk. */
250 	disk_attach(&sc->sc_dev, &sc->sc_dk);
251 }
252 
253 int
254 sdactivate(struct device *self, int act)
255 {
256 	struct scsi_link *link;
257 	struct sd_softc *sc = (struct sd_softc *)self;
258 
259 	if (ISSET(sc->flags, SDF_DYING))
260 		return (ENXIO);
261 	link = sc->sc_link;
262 
263 	switch (act) {
264 	case DVACT_SUSPEND:
265 		/*
266 		 * We flush the cache, since we our next step before
267 		 * DVACT_POWERDOWN might be a hibernate operation.
268 		 */
269 		if (ISSET(sc->flags, SDF_DIRTY))
270 			sd_flush(sc, SCSI_AUTOCONF);
271 		break;
272 	case DVACT_POWERDOWN:
273 		/*
274 		 * Stop the disk.  Stopping the disk should flush the
275 		 * cache, but we are paranoid so we flush the cache
276 		 * first.  We're cold at this point, so we poll for
277 		 * completion.
278 		 */
279 		if (ISSET(sc->flags, SDF_DIRTY))
280 			sd_flush(sc, SCSI_AUTOCONF);
281 		if (ISSET(boothowto, RB_POWERDOWN))
282 			scsi_start(link, SSS_STOP,
283 			    SCSI_IGNORE_ILLEGAL_REQUEST |
284 			    SCSI_IGNORE_NOT_READY | SCSI_AUTOCONF);
285 		break;
286 	case DVACT_RESUME:
287 		scsi_start(link, SSS_START,
288 		    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_AUTOCONF);
289 		break;
290 	case DVACT_DEACTIVATE:
291 		SET(sc->flags, SDF_DYING);
292 		timeout_del(&sc->sc_timeout);
293 		scsi_xsh_del(&sc->sc_xsh);
294 		break;
295 	}
296 	return (0);
297 }
298 
299 int
300 sddetach(struct device *self, int flags)
301 {
302 	struct sd_softc *sc = (struct sd_softc *)self;
303 
304 	bufq_drain(&sc->sc_bufq);
305 
306 	disk_gone(sdopen, self->dv_unit);
307 
308 	/* Detach disk. */
309 	bufq_destroy(&sc->sc_bufq);
310 	disk_detach(&sc->sc_dk);
311 
312 	return (0);
313 }
314 
315 /*
316  * Open the device. Make sure the partition info is as up-to-date as can be.
317  */
318 int
319 sdopen(dev_t dev, int flag, int fmt, struct proc *p)
320 {
321 	struct scsi_link *link;
322 	struct sd_softc *sc;
323 	int error = 0, part, rawopen, unit;
324 
325 	unit = DISKUNIT(dev);
326 	part = DISKPART(dev);
327 
328 	rawopen = (part == RAW_PART) && (fmt == S_IFCHR);
329 
330 	sc = sdlookup(unit);
331 	if (sc == NULL)
332 		return (ENXIO);
333 	if (ISSET(sc->flags, SDF_DYING)) {
334 		device_unref(&sc->sc_dev);
335 		return (ENXIO);
336 	}
337 	link = sc->sc_link;
338 
339 	if (ISSET(flag, FWRITE) && ISSET(link->flags, SDEV_READONLY)) {
340 		device_unref(&sc->sc_dev);
341 		return (EACCES);
342 	}
343 
344 	SC_DEBUG(link, SDEV_DB1,
345 	    ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
346 	    sd_cd.cd_ndevs, part));
347 
348 	if ((error = disk_lock(&sc->sc_dk)) != 0) {
349 		device_unref(&sc->sc_dev);
350 		return (error);
351 	}
352 
353 	if (sc->sc_dk.dk_openmask != 0) {
354 		/*
355 		 * If any partition is open, but the disk has been invalidated,
356 		 * disallow further opens of non-raw partition.
357 		 */
358 		if (ISSET(sc->flags, SDF_DYING)) {
359 			error = ENXIO;
360 			goto die;
361 		}
362 		if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) {
363 			if (rawopen)
364 				goto out;
365 			error = EIO;
366 			goto bad;
367 		}
368 	} else {
369 		/* Spin up non-UMASS devices ready or not. */
370 		if (ISSET(sc->flags, SDF_DYING)) {
371 			error = ENXIO;
372 			goto die;
373 		}
374 		if (!ISSET(link->flags, SDEV_UMASS))
375 			scsi_start(link, SSS_START, (rawopen ? SCSI_SILENT :
376 			    0) | SCSI_IGNORE_ILLEGAL_REQUEST |
377 			    SCSI_IGNORE_MEDIA_CHANGE);
378 
379 		/* Use sd_interpret_sense() for sense errors.
380 		 *
381 		 * But only after spinning the disk up! Just in case a broken
382 		 * device returns "Initialization command required." and causes
383 		 * a loop of scsi_start() calls.
384 		 */
385 		if (ISSET(sc->flags, SDF_DYING)) {
386 			error = ENXIO;
387 			goto die;
388 		}
389 		SET(link->flags, SDEV_OPEN);
390 
391 		/*
392 		 * Try to prevent the unloading of a removable device while
393 		 * it's open. But allow the open to proceed if the device can't
394 		 * be locked in.
395 		 */
396 		if (ISSET(link->flags, SDEV_REMOVABLE)) {
397 			scsi_prevent(link, PR_PREVENT, SCSI_SILENT |
398 			    SCSI_IGNORE_ILLEGAL_REQUEST |
399 			    SCSI_IGNORE_MEDIA_CHANGE);
400 		}
401 
402 		/* Check that it is still responding and ok. */
403 		if (ISSET(sc->flags, SDF_DYING)) {
404 			error = ENXIO;
405 			goto die;
406 		}
407 		error = scsi_test_unit_ready(link,
408 		    TEST_READY_RETRIES, SCSI_SILENT |
409 		    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE);
410 		if (error) {
411 			if (rawopen) {
412 				error = 0;
413 				goto out;
414 			} else
415 				goto bad;
416 		}
417 
418 		/* Load the physical device parameters. */
419 		if (ISSET(sc->flags, SDF_DYING)) {
420 			error = ENXIO;
421 			goto die;
422 		}
423 		SET(link->flags, SDEV_MEDIA_LOADED);
424 		if (sd_get_parms(sc, (rawopen ? SCSI_SILENT : 0)) == -1) {
425 			if (ISSET(sc->flags, SDF_DYING)) {
426 				error = ENXIO;
427 				goto die;
428 			}
429 			CLR(link->flags, SDEV_MEDIA_LOADED);
430 			error = ENXIO;
431 			goto bad;
432 		}
433 		SC_DEBUG(link, SDEV_DB3, ("Params loaded\n"));
434 
435 		/* Load the partition info if not already loaded. */
436 		error = sdgetdisklabel(dev, sc, sc->sc_dk.dk_label, 0);
437 		if (error == EIO || error == ENXIO)
438 			goto bad;
439 		SC_DEBUG(link, SDEV_DB3, ("Disklabel loaded\n"));
440 	}
441 
442 out:
443 	if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0)
444 		goto bad;
445 
446 	SC_DEBUG(link, SDEV_DB3, ("open complete\n"));
447 
448 	/* It's OK to fall through because dk_openmask is now non-zero. */
449 bad:
450 	if (sc->sc_dk.dk_openmask == 0) {
451 		if (ISSET(sc->flags, SDF_DYING)) {
452 			error = ENXIO;
453 			goto die;
454 		}
455 		if (ISSET(link->flags, SDEV_REMOVABLE))
456 			scsi_prevent(link, PR_ALLOW, SCSI_SILENT |
457 			    SCSI_IGNORE_ILLEGAL_REQUEST |
458 			    SCSI_IGNORE_MEDIA_CHANGE);
459 		if (ISSET(sc->flags, SDF_DYING)) {
460 			error = ENXIO;
461 			goto die;
462 		}
463 		CLR(link->flags, SDEV_OPEN | SDEV_MEDIA_LOADED);
464 	}
465 
466 die:
467 	disk_unlock(&sc->sc_dk);
468 	device_unref(&sc->sc_dev);
469 	return (error);
470 }
471 
472 /*
473  * Close the device. Only called if we are the last occurrence of an open
474  * device.  Convenient now but usually a pain.
475  */
476 int
477 sdclose(dev_t dev, int flag, int fmt, struct proc *p)
478 {
479 	struct scsi_link *link;
480 	struct sd_softc *sc;
481 	int part = DISKPART(dev);
482 	int error = 0;
483 
484 	sc = sdlookup(DISKUNIT(dev));
485 	if (sc == NULL)
486 		return (ENXIO);
487 	if (ISSET(sc->flags, SDF_DYING)) {
488 		device_unref(&sc->sc_dev);
489 		return (ENXIO);
490 	}
491 	link = sc->sc_link;
492 
493 	disk_lock_nointr(&sc->sc_dk);
494 
495 	disk_closepart(&sc->sc_dk, part, fmt);
496 
497 	if ((ISSET(flag, FWRITE) || sc->sc_dk.dk_openmask == 0) &&
498 	    ISSET(sc->flags, SDF_DIRTY))
499 		sd_flush(sc, 0);
500 
501 	if (sc->sc_dk.dk_openmask == 0) {
502 		if (ISSET(sc->flags, SDF_DYING)) {
503 			error = ENXIO;
504 			goto die;
505 		}
506 		if (ISSET(link->flags, SDEV_REMOVABLE))
507 			scsi_prevent(link, PR_ALLOW,
508 			    SCSI_IGNORE_ILLEGAL_REQUEST |
509 			    SCSI_IGNORE_NOT_READY | SCSI_SILENT);
510 		if (ISSET(sc->flags, SDF_DYING)) {
511 			error = ENXIO;
512 			goto die;
513 		}
514 		CLR(link->flags, SDEV_OPEN | SDEV_MEDIA_LOADED);
515 
516 		if (ISSET(link->flags, SDEV_EJECTING)) {
517 			scsi_start(link, SSS_STOP|SSS_LOEJ, 0);
518 			if (ISSET(sc->flags, SDF_DYING)) {
519 				error = ENXIO;
520 				goto die;
521 			}
522 			CLR(link->flags, SDEV_EJECTING);
523 		}
524 
525 		timeout_del(&sc->sc_timeout);
526 		scsi_xsh_del(&sc->sc_xsh);
527 	}
528 
529 die:
530 	disk_unlock(&sc->sc_dk);
531 	device_unref(&sc->sc_dev);
532 	return (error);
533 }
534 
535 /*
536  * Actually translate the requested transfer into one the physical driver
537  * can understand.  The transfer is described by a buf and will include
538  * only one physical transfer.
539  */
540 void
541 sdstrategy(struct buf *bp)
542 {
543 	struct scsi_link *link;
544 	struct sd_softc *sc;
545 	int s;
546 
547 	sc = sdlookup(DISKUNIT(bp->b_dev));
548 	if (sc == NULL) {
549 		bp->b_error = ENXIO;
550 		goto bad;
551 	}
552 	if (ISSET(sc->flags, SDF_DYING)) {
553 		bp->b_error = ENXIO;
554 		goto bad;
555 	}
556 	link = sc->sc_link;
557 
558 	SC_DEBUG(link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %lld\n",
559 	    bp->b_bcount, (long long)bp->b_blkno));
560 	/*
561 	 * If the device has been made invalid, error out
562 	 */
563 	if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) {
564 		if (ISSET(link->flags, SDEV_OPEN))
565 			bp->b_error = EIO;
566 		else
567 			bp->b_error = ENODEV;
568 		goto bad;
569 	}
570 
571 	/* Validate the request. */
572 	if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1)
573 		goto done;
574 
575 	/* Place it in the queue of disk activities for this disk. */
576 	bufq_queue(&sc->sc_bufq, bp);
577 
578 	/*
579 	 * Tell the device to get going on the transfer if it's
580 	 * not doing anything, otherwise just wait for completion
581 	 */
582 	scsi_xsh_add(&sc->sc_xsh);
583 
584 	device_unref(&sc->sc_dev);
585 	return;
586 
587 bad:
588 	SET(bp->b_flags, B_ERROR);
589 	bp->b_resid = bp->b_bcount;
590 done:
591 	s = splbio();
592 	biodone(bp);
593 	splx(s);
594 	if (sc != NULL)
595 		device_unref(&sc->sc_dev);
596 }
597 
598 void
599 sd_cmd_rw6(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
600 {
601 	struct scsi_rw *cmd = (struct scsi_rw *)xs->cmd;
602 
603 	cmd->opcode = read ? READ_COMMAND : WRITE_COMMAND;
604 	_lto3b(secno, cmd->addr);
605 	cmd->length = nsecs;
606 
607 	xs->cmdlen = sizeof(*cmd);
608 }
609 
610 void
611 sd_cmd_rw10(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
612 {
613 	struct scsi_rw_big *cmd = (struct scsi_rw_big *)xs->cmd;
614 
615 	cmd->opcode = read ? READ_BIG : WRITE_BIG;
616 	_lto4b(secno, cmd->addr);
617 	_lto2b(nsecs, cmd->length);
618 
619 	xs->cmdlen = sizeof(*cmd);
620 }
621 
622 void
623 sd_cmd_rw12(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
624 {
625 	struct scsi_rw_12 *cmd = (struct scsi_rw_12 *)xs->cmd;
626 
627 	cmd->opcode = read ? READ_12 : WRITE_12;
628 	_lto4b(secno, cmd->addr);
629 	_lto4b(nsecs, cmd->length);
630 
631 	xs->cmdlen = sizeof(*cmd);
632 }
633 
634 void
635 sd_cmd_rw16(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
636 {
637 	struct scsi_rw_16 *cmd = (struct scsi_rw_16 *)xs->cmd;
638 
639 	cmd->opcode = read ? READ_16 : WRITE_16;
640 	_lto8b(secno, cmd->addr);
641 	_lto4b(nsecs, cmd->length);
642 
643 	xs->cmdlen = sizeof(*cmd);
644 }
645 
646 /*
647  * sdstart looks to see if there is a buf waiting for the device
648  * and that the device is not already busy. If both are true,
649  * It dequeues the buf and creates a scsi command to perform the
650  * transfer in the buf. The transfer request will call scsi_done
651  * on completion, which will in turn call this routine again
652  * so that the next queued transfer is performed.
653  * The bufs are queued by the strategy routine (sdstrategy)
654  *
655  * This routine is also called after other non-queued requests
656  * have been made of the scsi driver, to ensure that the queue
657  * continues to be drained.
658  */
659 void
660 sdstart(struct scsi_xfer *xs)
661 {
662 	struct scsi_link *link = xs->sc_link;
663 	struct sd_softc *sc = link->device_softc;
664 	struct buf *bp;
665 	u_int64_t secno;
666 	int nsecs;
667 	int read;
668 	struct partition *p;
669 
670 	if (ISSET(sc->flags, SDF_DYING)) {
671 		scsi_xs_put(xs);
672 		return;
673 	}
674 	if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) {
675 		bufq_drain(&sc->sc_bufq);
676 		scsi_xs_put(xs);
677 		return;
678 	}
679 
680 	bp = bufq_dequeue(&sc->sc_bufq);
681 	if (bp == NULL) {
682 		scsi_xs_put(xs);
683 		return;
684 	}
685 
686 	secno = DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno);
687 
688 	p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
689 	secno += DL_GETPOFFSET(p);
690 	nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize);
691 	read = bp->b_flags & B_READ;
692 
693 	/*
694 	 *  Fill out the scsi command.  If the transfer will
695 	 *  fit in a "small" cdb, use it.
696 	 */
697 	if (!ISSET(link->flags, SDEV_ATAPI) &&
698 	    !ISSET(link->quirks, SDEV_ONLYBIG) &&
699 	    ((secno & 0x1fffff) == secno) &&
700 	    ((nsecs & 0xff) == nsecs))
701 		sd_cmd_rw6(xs, read, secno, nsecs);
702 	else if (((secno & 0xffffffff) == secno) &&
703 	    ((nsecs & 0xffff) == nsecs))
704 		sd_cmd_rw10(xs, read, secno, nsecs);
705 	else if (((secno & 0xffffffff) == secno) &&
706 	    ((nsecs & 0xffffffff) == nsecs))
707 		sd_cmd_rw12(xs, read, secno, nsecs);
708 	else
709 		sd_cmd_rw16(xs, read, secno, nsecs);
710 
711 	SET(xs->flags, (read ? SCSI_DATA_IN : SCSI_DATA_OUT));
712 	xs->timeout = 60000;
713 	xs->data = bp->b_data;
714 	xs->datalen = bp->b_bcount;
715 
716 	xs->done = sd_buf_done;
717 	xs->cookie = bp;
718 	xs->bp = bp;
719 
720 	/* Instrumentation. */
721 	disk_busy(&sc->sc_dk);
722 
723 	/* Mark disk as dirty. */
724 	if (!read)
725 		SET(sc->flags, SDF_DIRTY);
726 
727 	scsi_xs_exec(xs);
728 
729 	/* move onto the next io */
730 	if (ISSET(sc->flags, SDF_WAITING))
731 		CLR(sc->flags, SDF_WAITING);
732 	else if (bufq_peek(&sc->sc_bufq))
733 		scsi_xsh_add(&sc->sc_xsh);
734 }
735 
736 void
737 sd_buf_done(struct scsi_xfer *xs)
738 {
739 	struct sd_softc *sc = xs->sc_link->device_softc;
740 	struct buf *bp = xs->cookie;
741 	int error, s;
742 
743 	switch (xs->error) {
744 	case XS_NOERROR:
745 		bp->b_error = 0;
746 		CLR(bp->b_flags, B_ERROR);
747 		bp->b_resid = xs->resid;
748 		break;
749 
750 	case XS_SENSE:
751 	case XS_SHORTSENSE:
752 		SC_DEBUG_SENSE(xs);
753 		error = sd_interpret_sense(xs);
754 		if (error == 0) {
755 			bp->b_error = 0;
756 			CLR(bp->b_flags, B_ERROR);
757 			bp->b_resid = xs->resid;
758 			break;
759 		}
760 		if (error != ERESTART) {
761 			bp->b_error = error;
762 			SET(bp->b_flags, B_ERROR);
763 			xs->retries = 0;
764 		}
765 		goto retry;
766 
767 	case XS_BUSY:
768 		if (xs->retries) {
769 			if (scsi_delay(xs, 1) != ERESTART)
770 				xs->retries = 0;
771 		}
772 		goto retry;
773 
774 	case XS_TIMEOUT:
775 retry:
776 		if (xs->retries--) {
777 			scsi_xs_exec(xs);
778 			return;
779 		}
780 		/* FALLTHROUGH */
781 
782 	default:
783 		if (bp->b_error == 0)
784 			bp->b_error = EIO;
785 		SET(bp->b_flags, B_ERROR);
786 		bp->b_resid = bp->b_bcount;
787 		break;
788 	}
789 
790 	disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid, bp->b_blkno,
791 	    bp->b_flags & B_READ);
792 
793 	s = splbio();
794 	biodone(bp);
795 	splx(s);
796 	scsi_xs_put(xs);
797 }
798 
799 void
800 sdminphys(struct buf *bp)
801 {
802 	struct scsi_link *link;
803 	struct sd_softc *sc;
804 	long max;
805 
806 	sc = sdlookup(DISKUNIT(bp->b_dev));
807 	if (sc == NULL)
808 		return;  /* XXX - right way to fail this? */
809 	if (ISSET(sc->flags, SDF_DYING)) {
810 		device_unref(&sc->sc_dev);
811 		return;
812 	}
813 	link = sc->sc_link;
814 
815 	/*
816 	 * If the device is ancient, we want to make sure that
817 	 * the transfer fits into a 6-byte cdb.
818 	 *
819 	 * XXX Note that the SCSI-I spec says that 256-block transfers
820 	 * are allowed in a 6-byte read/write, and are specified
821 	 * by setting the "length" to 0.  However, we're conservative
822 	 * here, allowing only 255-block transfers in case an
823 	 * ancient device gets confused by length == 0.  A length of 0
824 	 * in a 10-byte read/write actually means 0 blocks.
825 	 */
826 	if (ISSET(sc->flags, SDF_ANCIENT)) {
827 		max = sc->sc_dk.dk_label->d_secsize * 0xff;
828 
829 		if (bp->b_bcount > max)
830 			bp->b_bcount = max;
831 	}
832 
833 	(*link->adapter->scsi_minphys)(bp, link);
834 
835 	device_unref(&sc->sc_dev);
836 }
837 
838 int
839 sdread(dev_t dev, struct uio *uio, int ioflag)
840 {
841 	return (physio(sdstrategy, dev, B_READ, sdminphys, uio));
842 }
843 
844 int
845 sdwrite(dev_t dev, struct uio *uio, int ioflag)
846 {
847 	return (physio(sdstrategy, dev, B_WRITE, sdminphys, uio));
848 }
849 
850 /*
851  * Perform special action on behalf of the user
852  * Knows about the internals of this device
853  */
854 int
855 sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
856 {
857 	struct scsi_link *link;
858 	struct sd_softc *sc;
859 	struct disklabel *lp;
860 	int error = 0;
861 	int part = DISKPART(dev);
862 
863 	sc = sdlookup(DISKUNIT(dev));
864 	if (sc == NULL)
865 		return (ENXIO);
866 	if (ISSET(sc->flags, SDF_DYING)) {
867 		device_unref(&sc->sc_dev);
868 		return (ENXIO);
869 	}
870 	link = sc->sc_link;
871 
872 	SC_DEBUG(link, SDEV_DB2, ("sdioctl 0x%lx\n", cmd));
873 
874 	/*
875 	 * If the device is not valid.. abandon ship
876 	 */
877 	if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) {
878 		switch (cmd) {
879 		case DIOCLOCK:
880 		case DIOCEJECT:
881 		case SCIOCIDENTIFY:
882 		case SCIOCCOMMAND:
883 		case SCIOCDEBUG:
884 			if (part == RAW_PART)
885 				break;
886 		/* FALLTHROUGH */
887 		default:
888 			if (!ISSET(link->flags, SDEV_OPEN)) {
889 				error = ENODEV;
890 				goto exit;
891 			} else {
892 				error = EIO;
893 				goto exit;
894 			}
895 		}
896 	}
897 
898 	switch (cmd) {
899 	case DIOCRLDINFO:
900 		lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK);
901 		sdgetdisklabel(dev, sc, lp, 0);
902 		memcpy(sc->sc_dk.dk_label, lp, sizeof(*lp));
903 		free(lp, M_TEMP, sizeof(*lp));
904 		goto exit;
905 
906 	case DIOCGPDINFO:
907 		sdgetdisklabel(dev, sc, (struct disklabel *)addr, 1);
908 		goto exit;
909 
910 	case DIOCGDINFO:
911 		*(struct disklabel *)addr = *(sc->sc_dk.dk_label);
912 		goto exit;
913 
914 	case DIOCGPART:
915 		((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
916 		((struct partinfo *)addr)->part =
917 		    &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)];
918 		goto exit;
919 
920 	case DIOCWDINFO:
921 	case DIOCSDINFO:
922 		if (!ISSET(flag, FWRITE)) {
923 			error = EBADF;
924 			goto exit;
925 		}
926 
927 		if ((error = disk_lock(&sc->sc_dk)) != 0)
928 			goto exit;
929 
930 		error = setdisklabel(sc->sc_dk.dk_label,
931 		    (struct disklabel *)addr, sc->sc_dk.dk_openmask);
932 		if (error == 0) {
933 			if (cmd == DIOCWDINFO)
934 				error = writedisklabel(DISKLABELDEV(dev),
935 				    sdstrategy, sc->sc_dk.dk_label);
936 		}
937 
938 		disk_unlock(&sc->sc_dk);
939 		goto exit;
940 
941 	case DIOCLOCK:
942 		error = scsi_prevent(link,
943 		    (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0);
944 		goto exit;
945 
946 	case MTIOCTOP:
947 		if (((struct mtop *)addr)->mt_op != MTOFFL) {
948 			error = EIO;
949 			goto exit;
950 		}
951 		/* FALLTHROUGH */
952 	case DIOCEJECT:
953 		if (!ISSET(link->flags, SDEV_REMOVABLE)) {
954 			error = ENOTTY;
955 			goto exit;
956 		}
957 		SET(link->flags, SDEV_EJECTING);
958 		goto exit;
959 
960 	case DIOCINQ:
961 		error = scsi_do_ioctl(link, cmd, addr, flag);
962 		if (error == ENOTTY)
963 			error = sd_ioctl_inquiry(sc,
964 			    (struct dk_inquiry *)addr);
965 		goto exit;
966 
967 	case DIOCSCACHE:
968 		if (!ISSET(flag, FWRITE)) {
969 			error = EBADF;
970 			goto exit;
971 		}
972 		/* FALLTHROUGH */
973 	case DIOCGCACHE:
974 		error = sd_ioctl_cache(sc, cmd, (struct dk_cache *)addr);
975 		goto exit;
976 
977 	case DIOCCACHESYNC:
978 		if (!ISSET(flag, FWRITE)) {
979 			error = EBADF;
980 			goto exit;
981 		}
982 		if (ISSET(sc->flags, SDF_DIRTY) || *(int *)addr != 0)
983 			error = sd_flush(sc, 0);
984 		goto exit;
985 
986 	default:
987 		if (part != RAW_PART) {
988 			error = ENOTTY;
989 			goto exit;
990 		}
991 		error = scsi_do_ioctl(link, cmd, addr, flag);
992 	}
993 
994  exit:
995 	device_unref(&sc->sc_dev);
996 	return (error);
997 }
998 
999 int
1000 sd_ioctl_inquiry(struct sd_softc *sc, struct dk_inquiry *di)
1001 {
1002 	struct scsi_link *link;
1003 	struct scsi_vpd_serial *vpd;
1004 
1005 	vpd = dma_alloc(sizeof(*vpd), PR_WAITOK | PR_ZERO);
1006 
1007 	if (ISSET(sc->flags, SDF_DYING)) {
1008 		dma_free(vpd, sizeof(*vpd));
1009 		return (ENXIO);
1010 	}
1011 	link = sc->sc_link;
1012 
1013 	bzero(di, sizeof(struct dk_inquiry));
1014 	scsi_strvis(di->vendor, link->inqdata.vendor,
1015 	    sizeof(link->inqdata.vendor));
1016 	scsi_strvis(di->product, link->inqdata.product,
1017 	    sizeof(link->inqdata.product));
1018 	scsi_strvis(di->revision, link->inqdata.revision,
1019 	    sizeof(link->inqdata.revision));
1020 
1021 	/* the serial vpd page is optional */
1022 	if (scsi_inquire_vpd(link, vpd, sizeof(*vpd), SI_PG_SERIAL, 0) == 0)
1023 		scsi_strvis(di->serial, vpd->serial, sizeof(vpd->serial));
1024 	else
1025 		strlcpy(di->serial, "(unknown)", sizeof(vpd->serial));
1026 
1027 	dma_free(vpd, sizeof(*vpd));
1028 	return (0);
1029 }
1030 
1031 int
1032 sd_ioctl_cache(struct sd_softc *sc, long cmd, struct dk_cache *dkc)
1033 {
1034 	struct scsi_link *link;
1035 	union scsi_mode_sense_buf *buf;
1036 	struct page_caching_mode *mode = NULL;
1037 	u_int wrcache, rdcache;
1038 	int big;
1039 	int rv;
1040 
1041 	if (ISSET(sc->flags, SDF_DYING))
1042 		return (ENXIO);
1043 	link = sc->sc_link;
1044 
1045 	if (ISSET(link->flags, SDEV_UMASS))
1046 		return (EOPNOTSUPP);
1047 
1048 	/* see if the adapter has special handling */
1049 	rv = scsi_do_ioctl(link, cmd, (caddr_t)dkc, 0);
1050 	if (rv != ENOTTY)
1051 		return (rv);
1052 
1053 	buf = dma_alloc(sizeof(*buf), PR_WAITOK);
1054 	if (buf == NULL)
1055 		return (ENOMEM);
1056 
1057 	if (ISSET(sc->flags, SDF_DYING)) {
1058 		rv = ENXIO;
1059 		goto done;
1060 	}
1061 	rv = scsi_do_mode_sense(link, PAGE_CACHING_MODE,
1062 	    buf, (void **)&mode, NULL, NULL, NULL,
1063 	    sizeof(*mode) - 4, scsi_autoconf | SCSI_SILENT, &big);
1064 	if (rv != 0)
1065 		goto done;
1066 
1067 	if ((mode == NULL) || (!DISK_PGCODE(mode, PAGE_CACHING_MODE))) {
1068 		rv = EIO;
1069 		goto done;
1070 	}
1071 
1072 	wrcache = (ISSET(mode->flags, PG_CACHE_FL_WCE) ? 1 : 0);
1073 	rdcache = (ISSET(mode->flags, PG_CACHE_FL_RCD) ? 0 : 1);
1074 
1075 	switch (cmd) {
1076 	case DIOCGCACHE:
1077 		dkc->wrcache = wrcache;
1078 		dkc->rdcache = rdcache;
1079 		break;
1080 
1081 	case DIOCSCACHE:
1082 		if (dkc->wrcache == wrcache && dkc->rdcache == rdcache)
1083 			break;
1084 
1085 		if (dkc->wrcache)
1086 			SET(mode->flags, PG_CACHE_FL_WCE);
1087 		else
1088 			CLR(mode->flags, PG_CACHE_FL_WCE);
1089 
1090 		if (dkc->rdcache)
1091 			CLR(mode->flags, PG_CACHE_FL_RCD);
1092 		else
1093 			SET(mode->flags, PG_CACHE_FL_RCD);
1094 
1095 		if (ISSET(sc->flags, SDF_DYING)) {
1096 			rv = ENXIO;
1097 			goto done;
1098 		}
1099 		if (big) {
1100 			rv = scsi_mode_select_big(link, SMS_PF,
1101 			    &buf->hdr_big, scsi_autoconf | SCSI_SILENT, 20000);
1102 		} else {
1103 			rv = scsi_mode_select(link, SMS_PF,
1104 			    &buf->hdr, scsi_autoconf | SCSI_SILENT, 20000);
1105 		}
1106 		break;
1107 	}
1108 
1109 done:
1110 	dma_free(buf, sizeof(*buf));
1111 	return (rv);
1112 }
1113 
1114 /*
1115  * Load the label information on the named device
1116  */
1117 int
1118 sdgetdisklabel(dev_t dev, struct sd_softc *sc, struct disklabel *lp,
1119     int spoofonly)
1120 {
1121 	struct scsi_link *link;
1122 	size_t len;
1123 	char packname[sizeof(lp->d_packname) + 1];
1124 	char product[17], vendor[9];
1125 
1126 	if (ISSET(sc->flags, SDF_DYING))
1127 		return (ENXIO);
1128 	link = sc->sc_link;
1129 
1130 	bzero(lp, sizeof(struct disklabel));
1131 
1132 	lp->d_secsize = sc->params.secsize;
1133 	lp->d_ntracks = sc->params.heads;
1134 	lp->d_nsectors = sc->params.sectors;
1135 	lp->d_ncylinders = sc->params.cyls;
1136 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1137 	if (lp->d_secpercyl == 0) {
1138 		lp->d_secpercyl = 100;
1139 		/* as long as it's not 0 - readdisklabel divides by it */
1140 	}
1141 
1142 	lp->d_type = DTYPE_SCSI;
1143 	if ((link->inqdata.device & SID_TYPE) == T_OPTICAL)
1144 		strncpy(lp->d_typename, "SCSI optical",
1145 		    sizeof(lp->d_typename));
1146 	else
1147 		strncpy(lp->d_typename, "SCSI disk",
1148 		    sizeof(lp->d_typename));
1149 
1150 	/*
1151 	 * Try to fit '<vendor> <product>' into d_packname. If that doesn't fit
1152 	 * then leave out '<vendor> ' and use only as much of '<product>' as
1153 	 * does fit.
1154 	 */
1155 	viscpy(vendor, link->inqdata.vendor, 8);
1156 	viscpy(product, link->inqdata.product, 16);
1157 	len = snprintf(packname, sizeof(packname), "%s %s", vendor, product);
1158 	if (len > sizeof(lp->d_packname)) {
1159 		strlcpy(packname, product, sizeof(packname));
1160 		len = strlen(packname);
1161 	}
1162 	/*
1163 	 * It is safe to use len as the count of characters to copy because
1164 	 * packname is sizeof(lp->d_packname)+1, the string in packname is
1165 	 * always null terminated and len does not count the terminating null.
1166 	 * d_packname is not a null terminated string.
1167 	 */
1168 	memcpy(lp->d_packname, packname, len);
1169 
1170 	DL_SETDSIZE(lp, sc->params.disksize);
1171 	lp->d_version = 1;
1172 	lp->d_flags = 0;
1173 
1174 	/* XXX - these values for BBSIZE and SBSIZE assume ffs */
1175 	lp->d_bbsize = BBSIZE;
1176 	lp->d_sbsize = SBSIZE;
1177 
1178 	lp->d_magic = DISKMAGIC;
1179 	lp->d_magic2 = DISKMAGIC;
1180 	lp->d_checksum = dkcksum(lp);
1181 
1182 	/*
1183 	 * Call the generic disklabel extraction routine
1184 	 */
1185 	return readdisklabel(DISKLABELDEV(dev), sdstrategy, lp, spoofonly);
1186 }
1187 
1188 
1189 /*
1190  * Check Errors
1191  */
1192 int
1193 sd_interpret_sense(struct scsi_xfer *xs)
1194 {
1195 	struct scsi_sense_data *sense = &xs->sense;
1196 	struct scsi_link *link = xs->sc_link;
1197 	u_int8_t serr = sense->error_code & SSD_ERRCODE;
1198 	int retval;
1199 
1200 	/*
1201 	 * Let the generic code handle everything except a few categories of
1202 	 * LUN not ready errors on open devices.
1203 	 */
1204 	if ((!ISSET(link->flags, SDEV_OPEN)) ||
1205 	    (serr != SSD_ERRCODE_CURRENT && serr != SSD_ERRCODE_DEFERRED) ||
1206 	    ((sense->flags & SSD_KEY) != SKEY_NOT_READY) ||
1207 	    (sense->extra_len < 6))
1208 		return (scsi_interpret_sense(xs));
1209 
1210 	if (ISSET(xs->flags, SCSI_IGNORE_NOT_READY))
1211 		return (0);
1212 
1213 	switch (ASC_ASCQ(sense)) {
1214 	case SENSE_NOT_READY_BECOMING_READY:
1215 		SC_DEBUG(link, SDEV_DB1, ("becoming ready.\n"));
1216 		retval = scsi_delay(xs, 5);
1217 		break;
1218 
1219 	case SENSE_NOT_READY_INIT_REQUIRED:
1220 		SC_DEBUG(link, SDEV_DB1, ("spinning up\n"));
1221 		retval = scsi_start(link, SSS_START,
1222 		    SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_NOSLEEP);
1223 		if (retval == 0)
1224 			retval = ERESTART;
1225 		else if (retval == ENOMEM)
1226 			/* Can't issue the command. Fall back on a delay. */
1227 			retval = scsi_delay(xs, 5);
1228 		else
1229 			SC_DEBUG(link, SDEV_DB1, ("spin up failed (%#x)\n",
1230 			    retval));
1231 		break;
1232 
1233 	default:
1234 		retval = scsi_interpret_sense(xs);
1235 		break;
1236 	}
1237 
1238 	return (retval);
1239 }
1240 
1241 daddr_t
1242 sdsize(dev_t dev)
1243 {
1244 	struct disklabel *lp;
1245 	struct sd_softc *sc;
1246 	int part, omask;
1247 	daddr_t size;
1248 
1249 	sc = sdlookup(DISKUNIT(dev));
1250 	if (sc == NULL)
1251 		return -1;
1252 	if (ISSET(sc->flags, SDF_DYING)) {
1253 		size = -1;
1254 		goto exit;
1255 	}
1256 
1257 	part = DISKPART(dev);
1258 	omask = sc->sc_dk.dk_openmask & (1 << part);
1259 
1260 	if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) {
1261 		size = -1;
1262 		goto exit;
1263 	}
1264 
1265 	lp = sc->sc_dk.dk_label;
1266 	if (ISSET(sc->flags, SDF_DYING)) {
1267 		size = -1;
1268 		goto exit;
1269 	}
1270 	if (!ISSET(sc->sc_link->flags, SDEV_MEDIA_LOADED))
1271 		size = -1;
1272 	else if (lp->d_partitions[part].p_fstype != FS_SWAP)
1273 		size = -1;
1274 	else
1275 		size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part]));
1276 	if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1277 		size = -1;
1278 
1279  exit:
1280 	device_unref(&sc->sc_dev);
1281 	return size;
1282 }
1283 
1284 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1285 static int sddoingadump;
1286 
1287 /*
1288  * dump all of physical memory into the partition specified, starting
1289  * at offset 'dumplo' into the partition.
1290  */
1291 int
1292 sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
1293 {
1294 	struct sd_softc *sc;	/* disk unit to do the I/O */
1295 	struct disklabel *lp;	/* disk's disklabel */
1296 	int	unit, part;
1297 	u_int32_t sectorsize;	/* size of a disk sector */
1298 	u_int64_t nsects;	/* number of sectors in partition */
1299 	u_int64_t sectoff;	/* sector offset of partition */
1300 	u_int64_t totwrt;	/* total number of sectors left to write */
1301 	u_int32_t nwrt;		/* current number of sectors to write */
1302 	struct scsi_xfer *xs;	/* ... convenience */
1303 	int rv;
1304 
1305 	/* Check if recursive dump; if so, punt. */
1306 	if (sddoingadump)
1307 		return EFAULT;
1308 	if (blkno < 0)
1309 		return EINVAL;
1310 
1311 	/* Mark as active early. */
1312 	sddoingadump = 1;
1313 
1314 	unit = DISKUNIT(dev);	/* Decompose unit & partition. */
1315 	part = DISKPART(dev);
1316 
1317 	/* Check for acceptable drive number. */
1318 	if (unit >= sd_cd.cd_ndevs || (sc = sd_cd.cd_devs[unit]) == NULL)
1319 		return ENXIO;
1320 
1321 	/*
1322 	 * XXX Can't do this check, since the media might have been
1323 	 * XXX marked `invalid' by successful unmounting of all
1324 	 * XXX filesystems.
1325 	 */
1326 #if 0
1327 	/* Make sure it was initialized. */
1328 	if (!ISSET(sc->sc_link->flags, SDEV_MEDIA_LOADED))
1329 		return ENXIO;
1330 #endif /* 0 */
1331 
1332 	/* Convert to disk sectors.  Request must be a multiple of size. */
1333 	lp = sc->sc_dk.dk_label;
1334 	sectorsize = lp->d_secsize;
1335 	if ((size % sectorsize) != 0)
1336 		return EFAULT;
1337 	if ((blkno % DL_BLKSPERSEC(lp)) != 0)
1338 		return EFAULT;
1339 	totwrt = size / sectorsize;
1340 	blkno = DL_BLKTOSEC(lp, blkno);
1341 
1342 	nsects = DL_GETPSIZE(&lp->d_partitions[part]);
1343 	sectoff = DL_GETPOFFSET(&lp->d_partitions[part]);
1344 
1345 	/* Check transfer bounds against partition size. */
1346 	if ((blkno + totwrt) > nsects)
1347 		return EINVAL;
1348 
1349 	/* Offset block number to start of partition. */
1350 	blkno += sectoff;
1351 
1352 	while (totwrt > 0) {
1353 		if (totwrt > UINT32_MAX)
1354 			nwrt = UINT32_MAX;
1355 		else
1356 			nwrt = totwrt;
1357 
1358 #ifndef	SD_DUMP_NOT_TRUSTED
1359 		xs = scsi_xs_get(sc->sc_link, SCSI_NOSLEEP);
1360 		if (xs == NULL)
1361 			return (ENOMEM);
1362 
1363 		xs->timeout = 10000;
1364 		SET(xs->flags, SCSI_DATA_OUT);
1365 		xs->data = va;
1366 		xs->datalen = nwrt * sectorsize;
1367 
1368 		sd_cmd_rw10(xs, 0, blkno, nwrt); /* XXX */
1369 
1370 		rv = scsi_xs_sync(xs);
1371 		scsi_xs_put(xs);
1372 		if (rv != 0)
1373 			return (ENXIO);
1374 #else	/* SD_DUMP_NOT_TRUSTED */
1375 		/* Let's just talk about this first... */
1376 		printf("sd%d: dump addr 0x%x, blk %lld\n", unit, va,
1377 		    (long long)blkno);
1378 		delay(500 * 1000);	/* half a second */
1379 #endif	/* ~SD_DUMP_NOT_TRUSTED */
1380 
1381 		/* update block count */
1382 		totwrt -= nwrt;
1383 		blkno += nwrt;
1384 		va += sectorsize * nwrt;
1385 	}
1386 
1387 	sddoingadump = 0;
1388 
1389 	return (0);
1390 }
1391 
1392 /*
1393  * Copy up to len chars from src to dst, ignoring non-printables.
1394  * Must be room for len+1 chars in dst so we can write the NUL.
1395  * Does not assume src is NUL-terminated.
1396  */
1397 void
1398 viscpy(u_char *dst, u_char *src, int len)
1399 {
1400 	while (len > 0 && *src != '\0') {
1401 		if (*src < 0x20 || *src >= 0x80) {
1402 			src++;
1403 			continue;
1404 		}
1405 		*dst++ = *src++;
1406 		len--;
1407 	}
1408 	*dst = '\0';
1409 }
1410 
1411 int
1412 sd_read_cap_10(struct sd_softc *sc, int flags)
1413 {
1414 	struct scsi_read_cap_data	*rdcap;
1415 	int				 rv;
1416 
1417 	rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
1418 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1419 	if (rdcap == NULL)
1420 		return -1;
1421 
1422 	if (ISSET(sc->flags, SDF_DYING)) {
1423 		rv = -1;
1424 		goto done;
1425 	}
1426 
1427 	rv = scsi_read_cap_10(sc->sc_link, rdcap, flags);
1428 	if (rv == 0) {
1429 		if (_8btol(rdcap->addr) == 0) {
1430 			rv = -1;
1431 			goto done;
1432 		}
1433 		sc->params.disksize = _4btol(rdcap->addr) + 1ll;
1434 		sc->params.secsize = _4btol(rdcap->length);
1435 		CLR(sc->flags, SDF_THIN);
1436 	}
1437 
1438 done:
1439 	dma_free(rdcap, sizeof(*rdcap));
1440 	return rv;
1441 }
1442 
1443 int
1444 sd_read_cap_16(struct sd_softc *sc, int flags)
1445 {
1446 	struct scsi_read_cap_data_16	*rdcap;
1447 	int				 rv;
1448 
1449 	rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
1450 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1451 	if (rdcap == NULL)
1452 		return -1;
1453 
1454 	if (ISSET(sc->flags, SDF_DYING)) {
1455 		rv = -1;
1456 		goto done;
1457 	}
1458 
1459 	rv = scsi_read_cap_16(sc->sc_link, rdcap, flags);
1460 	if (rv == 0) {
1461 		if (_8btol(rdcap->addr) == 0) {
1462 			rv = -1;
1463 			goto done;
1464 		}
1465 		sc->params.disksize = _8btol(rdcap->addr) + 1ll;
1466 		sc->params.secsize = _4btol(rdcap->length);
1467 		if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE))
1468 			SET(sc->flags, SDF_THIN);
1469 		else
1470 			CLR(sc->flags, SDF_THIN);
1471 	}
1472 
1473 done:
1474 	dma_free(rdcap, sizeof(*rdcap));
1475 	return rv;
1476 }
1477 
1478 int
1479 sd_read_cap(struct sd_softc *sc, int flags)
1480 {
1481 	int rv;
1482 
1483 	CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
1484 
1485 	/*
1486 	 * post-SPC2 (i.e. post-SCSI-3) devices can start with 16 byte
1487 	 * read capacity commands. Older devices start with the 10 byte
1488 	 * version and move up to the 16 byte version if the device
1489 	 * says it has more sectors than can be reported via the 10 byte
1490 	 * read capacity.
1491 	 */
1492 	if (SID_ANSII_REV(&sc->sc_link->inqdata) > SCSI_REV_SPC2) {
1493 		rv = sd_read_cap_16(sc, flags);
1494 		if (rv != 0)
1495 			rv = sd_read_cap_10(sc, flags);
1496 	} else {
1497 		rv = sd_read_cap_10(sc, flags);
1498 		if (rv == 0 && sc->params.disksize == 0x100000000ll)
1499 			rv = sd_read_cap_16(sc, flags);
1500 	}
1501 
1502 	return rv;
1503 }
1504 
1505 int
1506 sd_thin_pages(struct sd_softc *sc, int flags)
1507 {
1508 	struct scsi_vpd_hdr *pg;
1509 	size_t len = 0;
1510 	u_int8_t *pages;
1511 	int i, score = 0;
1512 	int rv;
1513 
1514 	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1515 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1516 	if (pg == NULL)
1517 		return (ENOMEM);
1518 
1519 	if (ISSET(sc->flags, SDF_DYING)) {
1520 		rv = ENXIO;
1521 		goto done;
1522 	}
1523 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1524 	    SI_PG_SUPPORTED, flags);
1525 	if (rv != 0)
1526 		goto done;
1527 
1528 	len = _2btol(pg->page_length);
1529 
1530 	dma_free(pg, sizeof(*pg));
1531 	pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ?
1532 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1533 	if (pg == NULL)
1534 		return (ENOMEM);
1535 
1536 	if (ISSET(sc->flags, SDF_DYING)) {
1537 		rv = ENXIO;
1538 		goto done;
1539 	}
1540 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len,
1541 	    SI_PG_SUPPORTED, flags);
1542 	if (rv != 0)
1543 		goto done;
1544 
1545 	pages = (u_int8_t *)(pg + 1);
1546 	if (pages[0] != SI_PG_SUPPORTED) {
1547 		rv = EIO;
1548 		goto done;
1549 	}
1550 
1551 	for (i = 1; i < len; i++) {
1552 		switch (pages[i]) {
1553 		case SI_PG_DISK_LIMITS:
1554 		case SI_PG_DISK_THIN:
1555 			score++;
1556 			break;
1557 		}
1558 	}
1559 
1560 	if (score < 2)
1561 		rv = EOPNOTSUPP;
1562 
1563 done:
1564 	dma_free(pg, sizeof(*pg) + len);
1565 	return (rv);
1566 }
1567 
1568 int
1569 sd_vpd_block_limits(struct sd_softc *sc, int flags)
1570 {
1571 	struct scsi_vpd_disk_limits *pg;
1572 	int rv;
1573 
1574 	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1575 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1576 	if (pg == NULL)
1577 		return (ENOMEM);
1578 
1579 	if (ISSET(sc->flags, SDF_DYING)) {
1580 		rv = ENXIO;
1581 		goto done;
1582 	}
1583 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1584 	    SI_PG_DISK_LIMITS, flags);
1585 	if (rv != 0)
1586 		goto done;
1587 
1588 	if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) {
1589 		sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count);
1590 		sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count);
1591 	} else
1592 		rv = EOPNOTSUPP;
1593 
1594 done:
1595 	dma_free(pg, sizeof(*pg));
1596 	return (rv);
1597 }
1598 
1599 int
1600 sd_vpd_thin(struct sd_softc *sc, int flags)
1601 {
1602 	struct scsi_vpd_disk_thin *pg;
1603 	int rv;
1604 
1605 	pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
1606 	    PR_NOWAIT : PR_WAITOK) | PR_ZERO);
1607 	if (pg == NULL)
1608 		return (ENOMEM);
1609 
1610 	if (ISSET(sc->flags, SDF_DYING)) {
1611 		rv = ENXIO;
1612 		goto done;
1613 	}
1614 	rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
1615 	    SI_PG_DISK_THIN, flags);
1616 	if (rv != 0)
1617 		goto done;
1618 
1619 #ifdef notyet
1620 	if (ISSET(pg->flags, VPD_DISK_THIN_TPU))
1621 		sc->sc_delete = sd_unmap;
1622 	else if (ISSET(pg->flags, VPD_DISK_THIN_TPWS)) {
1623 		sc->sc_delete = sd_write_same_16;
1624 		sc->params.unmap_descs = 1; /* WRITE SAME 16 only does one */
1625 	} else
1626 		rv = EOPNOTSUPP;
1627 #endif /* notyet */
1628 
1629 done:
1630 	dma_free(pg, sizeof(*pg));
1631 	return (rv);
1632 }
1633 
1634 int
1635 sd_thin_params(struct sd_softc *sc, int flags)
1636 {
1637 	int rv;
1638 
1639 	rv = sd_thin_pages(sc, flags);
1640 	if (rv != 0)
1641 		return (rv);
1642 
1643 	rv = sd_vpd_block_limits(sc, flags);
1644 	if (rv != 0)
1645 		return (rv);
1646 
1647 	rv = sd_vpd_thin(sc, flags);
1648 	if (rv != 0)
1649 		return (rv);
1650 
1651 	return (0);
1652 }
1653 
1654 /*
1655  * Fill out the disk parameter structure. Return 0 if the structure is correctly
1656  * filled in, otherwise return -1.
1657  *
1658  * The caller is responsible for clearing the SDEV_MEDIA_LOADED flag if the
1659  * structure cannot be completed.
1660  */
1661 int
1662 sd_get_parms(struct sd_softc *sc, int flags)
1663 {
1664 	struct disk_parms dp;
1665 	struct scsi_link *link = sc->sc_link;
1666 	union scsi_mode_sense_buf *buf = NULL;
1667 	struct page_rigid_geometry *rigid = NULL;
1668 	struct page_flex_geometry *flex = NULL;
1669 	struct page_reduced_geometry *reduced = NULL;
1670 	u_char *page0 = NULL;
1671 	int err = 0, big;
1672 
1673 	if (sd_read_cap(sc, flags) != 0)
1674 		return -1;
1675 
1676 	if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) {
1677 		/* we dont know the unmap limits, so we cant use thin shizz */
1678 		CLR(sc->flags, SDF_THIN);
1679 	}
1680 
1681 	/*
1682 	 * Work on a copy of the values initialized by sd_read_cap() and
1683 	 * sd_thin_params().
1684 	 */
1685 	dp = sc->params;
1686 
1687 	buf = dma_alloc(sizeof(*buf), PR_NOWAIT);
1688 	if (buf == NULL)
1689 		goto validate;
1690 
1691 	if (ISSET(sc->flags, SDF_DYING))
1692 		goto die;
1693 
1694 	/*
1695 	 * Ask for page 0 (vendor specific) mode sense data to find
1696 	 * READONLY info. The only thing USB devices will ask for.
1697 	 */
1698 	err = scsi_do_mode_sense(link, 0, buf, (void **)&page0,
1699 	    NULL, NULL, NULL, 1, flags | SCSI_SILENT, &big);
1700 	if (ISSET(sc->flags, SDF_DYING))
1701 		goto die;
1702 	if (err == 0) {
1703 		if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT)
1704 			SET(link->flags, SDEV_READONLY);
1705 		else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT)
1706 			SET(link->flags, SDEV_READONLY);
1707 		else
1708 			CLR(link->flags, SDEV_READONLY);
1709 	}
1710 
1711 	/*
1712 	 * Many UMASS devices choke when asked about their geometry. Most
1713 	 * don't have a meaningful geometry anyway, so just fake it if
1714 	 * sd_read_cap() worked.
1715 	 */
1716 	if (ISSET(link->flags, SDEV_UMASS) && dp.disksize > 0)
1717 		goto validate;
1718 
1719 	switch (link->inqdata.device & SID_TYPE) {
1720 	case T_OPTICAL:
1721 		/* No more information needed or available. */
1722 		break;
1723 
1724 	case T_RDIRECT:
1725 		/* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */
1726 		err = scsi_do_mode_sense(link, PAGE_REDUCED_GEOMETRY,
1727 		    buf, (void **)&reduced, NULL, NULL, &dp.secsize,
1728 		    sizeof(*reduced), flags | SCSI_SILENT, NULL);
1729 		if (!err && reduced &&
1730 		    DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) {
1731 			if (dp.disksize == 0)
1732 				dp.disksize = _5btol(reduced->sectors);
1733 			if (dp.secsize == 0)
1734 				dp.secsize = _2btol(reduced->bytes_s);
1735 		}
1736 		break;
1737 
1738 	default:
1739 		/*
1740 		 * NOTE: Some devices leave off the last four bytes of
1741 		 * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages.
1742 		 * The only information in those four bytes is RPM information
1743 		 * so accept the page. The extra bytes will be zero and RPM will
1744 		 * end up with the default value of 3600.
1745 		 */
1746 		err = 0;
1747 		if (!ISSET(link->flags, SDEV_ATAPI) ||
1748 		    !ISSET(link->flags, SDEV_REMOVABLE))
1749 			err = scsi_do_mode_sense(link,
1750 			    PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL,
1751 			    NULL, &dp.secsize, sizeof(*rigid) - 4,
1752 			    flags | SCSI_SILENT, NULL);
1753 		if (!err && rigid && DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) {
1754 			dp.heads = rigid->nheads;
1755 			dp.cyls = _3btol(rigid->ncyl);
1756 			if (dp.heads * dp.cyls > 0)
1757 				dp.sectors = dp.disksize / (dp.heads * dp.cyls);
1758 		} else {
1759 			if (ISSET(sc->flags, SDF_DYING))
1760 				goto die;
1761 			err = scsi_do_mode_sense(link,
1762 			    PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL,
1763 			    &dp.secsize, sizeof(*flex) - 4,
1764 			    flags | SCSI_SILENT, NULL);
1765 			if (!err && flex &&
1766 			    DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) {
1767 				dp.sectors = flex->ph_sec_tr;
1768 				dp.heads = flex->nheads;
1769 				dp.cyls = _2btol(flex->ncyl);
1770 				if (dp.secsize == 0)
1771 					dp.secsize = _2btol(flex->bytes_s);
1772 				if (dp.disksize == 0)
1773 					dp.disksize = (u_int64_t)dp.cyls *
1774 					    dp.heads * dp.sectors;
1775 			}
1776 		}
1777 		break;
1778 	}
1779 
1780 validate:
1781 	if (buf) {
1782 		dma_free(buf, sizeof(*buf));
1783 		buf = NULL;
1784 	}
1785 
1786 	if (dp.disksize == 0)
1787 		goto die;
1788 
1789 	/*
1790 	 * Restrict secsize values to powers of two between 512 and 64k.
1791 	 */
1792 	switch (dp.secsize) {
1793 	case 0:
1794 		dp.secsize = DEV_BSIZE;
1795 		break;
1796 	case 0x200:	/* == 512, == DEV_BSIZE on all architectures. */
1797 	case 0x400:
1798 	case 0x800:
1799 	case 0x1000:
1800 	case 0x2000:
1801 	case 0x4000:
1802 	case 0x8000:
1803 	case 0x10000:
1804 		break;
1805 	default:
1806 		SC_DEBUG(sc->sc_link, SDEV_DB1,
1807 		    ("sd_get_parms: bad secsize: %#x\n", dp.secsize));
1808 		return -1;
1809 	}
1810 
1811 	/*
1812 	 * XXX THINK ABOUT THIS!!  Using values such that sectors * heads *
1813 	 * cyls is <= disk_size can lead to wasted space. We need a more
1814 	 * careful calculation/validation to make everything work out
1815 	 * optimally.
1816 	 */
1817 	if (dp.disksize > 0xffffffff && (dp.heads * dp.sectors) < 0xffff) {
1818 		dp.heads = 511;
1819 		dp.sectors = 255;
1820 		dp.cyls = 0;
1821 	}
1822 
1823 	/*
1824 	 * Use standard geometry values for anything we still don't
1825 	 * know.
1826 	 */
1827 	if (dp.heads == 0)
1828 		dp.heads = 255;
1829 	if (dp.sectors == 0)
1830 		dp.sectors = 63;
1831 	if (dp.cyls == 0) {
1832 		dp.cyls = dp.disksize / (dp.heads * dp.sectors);
1833 		if (dp.cyls == 0) {
1834 			/* Put everything into one cylinder. */
1835 			dp.heads = dp.cyls = 1;
1836 			dp.sectors = dp.disksize;
1837 		}
1838 	}
1839 
1840 #ifdef SCSIDEBUG
1841 	if (dp.disksize != (u_int64_t)dp.cyls * dp.heads * dp.sectors) {
1842 		sc_print_addr(sc->sc_link);
1843 		printf("disksize (%llu) != cyls (%u) * heads (%u) * "
1844 		    "sectors/track (%u) (%llu)\n", dp.disksize, dp.cyls,
1845 		    dp.heads, dp.sectors,
1846 		    (u_int64_t)dp.cyls * dp.heads * dp.sectors);
1847 	}
1848 #endif /* SCSIDEBUG */
1849 
1850 	sc->params = dp;
1851 	return 0;
1852 
1853 die:
1854 	dma_free(buf, sizeof(*buf));
1855 	return -1;
1856 }
1857 
1858 int
1859 sd_flush(struct sd_softc *sc, int flags)
1860 {
1861 	struct scsi_link *link;
1862 	struct scsi_xfer *xs;
1863 	struct scsi_synchronize_cache *cmd;
1864 	int error;
1865 
1866 	if (ISSET(sc->flags, SDF_DYING))
1867 		return (ENXIO);
1868 	link = sc->sc_link;
1869 
1870 	if (ISSET(link->quirks, SDEV_NOSYNCCACHE))
1871 		return (0);
1872 
1873 	/*
1874 	 * Issue a SYNCHRONIZE CACHE. Address 0, length 0 means "all remaining
1875 	 * blocks starting at address 0". Ignore ILLEGAL REQUEST in the event
1876 	 * that the command is not supported by the device.
1877 	 */
1878 
1879 	xs = scsi_xs_get(link, flags);
1880 	if (xs == NULL) {
1881 		SC_DEBUG(link, SDEV_DB1, ("cache sync failed to get xs\n"));
1882 		return (EIO);
1883 	}
1884 
1885 	cmd = (struct scsi_synchronize_cache *)xs->cmd;
1886 	cmd->opcode = SYNCHRONIZE_CACHE;
1887 
1888 	xs->cmdlen = sizeof(*cmd);
1889 	xs->timeout = 100000;
1890 	SET(xs->flags, SCSI_IGNORE_ILLEGAL_REQUEST);
1891 
1892 	error = scsi_xs_sync(xs);
1893 
1894 	scsi_xs_put(xs);
1895 
1896 	if (error)
1897 		SC_DEBUG(link, SDEV_DB1, ("cache sync failed\n"));
1898 	else
1899 		CLR(sc->flags, SDF_DIRTY);
1900 
1901 	return (error);
1902 }
1903