xref: /dragonfly/sys/bus/cam/scsi/scsi_da.c (revision 58645856)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/cam/scsi/scsi_da.c,v 1.42.2.46 2003/10/21 22:18:19 thomas Exp $
29  */
30 
31 #include <sys/param.h>
32 
33 #ifdef _KERNEL
34 
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/buf.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/conf.h>
42 #include <sys/devicestat.h>
43 #include <sys/disk.h>
44 #include <sys/dtype.h>
45 #include <sys/eventhandler.h>
46 #include <sys/malloc.h>
47 #include <sys/cons.h>
48 #include <sys/proc.h>
49 
50 #include <sys/buf2.h>
51 
52 #endif /* _KERNEL */
53 
54 #ifdef _KERNEL
55 #include <vm/pmap.h>
56 #endif
57 
58 #ifndef _KERNEL
59 #include <stdio.h>
60 #include <string.h>
61 #endif /* _KERNEL */
62 
63 #include <sys/camlib.h>
64 #include "../cam.h"
65 #include "../cam_ccb.h"
66 #include "../cam_extend.h"
67 #include "../cam_periph.h"
68 #include "../cam_xpt_periph.h"
69 #include "../cam_sim.h"
70 
71 #include "scsi_daio.h"
72 #include "scsi_message.h"
73 
74 #ifndef _KERNEL
75 #include "scsi_da.h"
76 #endif /* !_KERNEL */
77 
78 #ifdef _KERNEL
79 typedef enum {
80 	DA_STATE_PROBE,
81 	DA_STATE_PROBE2,
82 	DA_STATE_NORMAL
83 } da_state;
84 
85 typedef enum {
86 	DA_FLAG_PACK_INVALID	= 0x001,
87 	DA_FLAG_NEW_PACK	= 0x002,
88 	DA_FLAG_PACK_LOCKED	= 0x004,
89 	DA_FLAG_PACK_REMOVABLE	= 0x008,
90 	DA_FLAG_TAGGED_QUEUING	= 0x010,
91 	DA_FLAG_RETRY_UA	= 0x080,
92 	DA_FLAG_OPEN		= 0x100,
93 	DA_FLAG_SCTX_INIT	= 0x200,
94 	DA_FLAG_RD_LIMIT	= 0x400,
95 	DA_FLAG_WR_LIMIT	= 0x800,
96 	DA_FLAG_CAN_TRIM	= 0x1000,
97 	DA_FLAG_CAP_MUTE	= 0x2000
98 } da_flags;
99 
100 typedef enum {
101 	DA_Q_NONE		= 0x00,
102 	DA_Q_NO_SYNC_CACHE	= 0x01,
103 	DA_Q_NO_6_BYTE		= 0x02,
104 	DA_Q_NO_PREVENT		= 0x04
105 } da_quirks;
106 
107 typedef enum {
108 	DA_CCB_POLLED		= 0x00,
109 	DA_CCB_PROBE		= 0x01,
110 	DA_CCB_PROBE2		= 0x02,
111 	DA_CCB_BUFFER_IO	= 0x03,
112 	DA_CCB_WAITING		= 0x04,
113 	DA_CCB_DUMP		= 0x05,
114 	DA_CCB_TRIM		= 0x06,
115 	DA_CCB_TYPE_MASK	= 0x0F,
116 	DA_CCB_RETRY_UA		= 0x10
117 } da_ccb_state;
118 
119 /* Offsets into our private area for storing information */
120 #define ccb_state	ppriv_field0
121 #define ccb_bio		ppriv_ptr1
122 
123 struct disk_params {
124 	u_int8_t  heads;
125 	u_int32_t cylinders;
126 	u_int8_t  secs_per_track;
127 	u_int32_t secsize;	/* Number of bytes/sector */
128 	u_int64_t sectors;	/* total number sectors */
129 };
130 
131 #define TRIM_MAX_BLOCKS 8
132 #define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64
133 struct trim_request {
134         uint8_t         data[TRIM_MAX_RANGES * 8];
135         struct bio      *bios[TRIM_MAX_RANGES];
136 };
137 
138 struct da_softc {
139 	struct	 bio_queue_head bio_queue_rd;
140 	struct	 bio_queue_head bio_queue_wr;
141 	struct	 bio_queue_head bio_queue_trim;
142 	struct	 devstat device_stats;
143 	SLIST_ENTRY(da_softc) links;
144 	LIST_HEAD(, ccb_hdr) pending_ccbs;
145 	da_state state;
146 	da_flags flags;
147 	da_quirks quirks;
148 	int	 minimum_cmd_size;
149 	int	 outstanding_cmds_rd;	/* outstanding read requests */
150 	int	 outstanding_cmds_wr;	/* outstanding write requests */
151 	int	 tps_ticks;
152 	long	 tps_rd;		/* read bandwidth exponential/tick */
153 	long	 tps_wr;		/* write bandwidth exponential/tick */
154 	int      trim_max_ranges;
155 	int      trim_running;
156 	int      trim_enabled;
157 	struct	 disk_params params;
158 	struct	 disk disk;
159 	union	 ccb saved_ccb;
160 	struct task		sysctl_task;
161 	struct sysctl_ctx_list	sysctl_ctx;
162 	struct sysctl_oid	*sysctl_tree;
163 	struct trim_request     trim_req;
164 };
165 
166 struct da_quirk_entry {
167 	struct scsi_inquiry_pattern inq_pat;
168 	da_quirks quirks;
169 };
170 
171 static const char quantum[] = "QUANTUM";
172 static const char microp[] = "MICROP";
173 
174 static struct da_quirk_entry da_quirk_table[] =
175 {
176 	/* SPI, FC devices */
177 	{
178 		/*
179 		 * Fujitsu M2513A MO drives.
180 		 * Tested devices: M2513A2 firmware versions 1200 & 1300.
181 		 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
182 		 * Reported by: W.Scholten <whs@xs4all.nl>
183 		 */
184 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
185 		/*quirks*/ DA_Q_NO_SYNC_CACHE
186 	},
187 	{
188 		/* See above. */
189 		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
190 		/*quirks*/ DA_Q_NO_SYNC_CACHE
191 	},
192 	{
193 		/*
194 		 * This particular Fujitsu drive doesn't like the
195 		 * synchronize cache command.
196 		 * Reported by: Tom Jackson <toj@gorilla.net>
197 		 */
198 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
199 		/*quirks*/ DA_Q_NO_SYNC_CACHE
200 	},
201 	{
202 		/*
203 		 * This drive doesn't like the synchronize cache command
204 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
205 		 * in NetBSD PR kern/6027, August 24, 1998.
206 		 */
207 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
208 		/*quirks*/ DA_Q_NO_SYNC_CACHE
209 	},
210 	{
211 		/*
212 		 * This drive doesn't like the synchronize cache command
213 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
214 		 * (PR 8882).
215 		 */
216 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
217 		/*quirks*/ DA_Q_NO_SYNC_CACHE
218 	},
219 	{
220 		/*
221 		 * Doesn't like the synchronize cache command.
222 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
223 		 */
224 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
225 		/*quirks*/ DA_Q_NO_SYNC_CACHE
226 	},
227 	{
228 		/*
229 		 * Doesn't like the synchronize cache command.
230 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
231 		 */
232 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
233 		/*quirks*/ DA_Q_NO_SYNC_CACHE
234 	},
235 	{
236 		/*
237 		 * Doesn't like the synchronize cache command.
238 		 */
239 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
240 		/*quirks*/ DA_Q_NO_SYNC_CACHE
241 	},
242 	{
243 		/*
244 		 * Doesn't like the synchronize cache command.
245 		 * Reported by: walter@pelissero.de
246 		 */
247 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
248 		/*quirks*/ DA_Q_NO_SYNC_CACHE
249 	},
250 	{
251 		/*
252 		 * Doesn't work correctly with 6 byte reads/writes.
253 		 * Returns illegal request, and points to byte 9 of the
254 		 * 6-byte CDB.
255 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
256 		 */
257 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
258 		/*quirks*/ DA_Q_NO_6_BYTE
259 	},
260 	{
261 		/* See above. */
262 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
263 		/*quirks*/ DA_Q_NO_6_BYTE
264 	},
265 	{
266 		/*
267 		 * Doesn't like the synchronize cache command.
268 		 * Reported by: walter@pelissero.de
269 		 */
270 		{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
271 		/*quirks*/ DA_Q_NO_SYNC_CACHE
272 	},
273 	{
274 		/*
275 		 * The CISS RAID controllers do not support SYNC_CACHE
276 		 */
277 		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
278 		/*quirks*/ DA_Q_NO_SYNC_CACHE
279 	},
280 	{
281 		/*
282 		 * The same goes for the mly(4) controllers
283 		 */
284 		{T_DIRECT, SIP_MEDIA_FIXED, "MLY*", "*", "MYLX"},
285 		/*quirks*/ DA_Q_NO_SYNC_CACHE
286 	},
287 	/*
288 	 * USB mass storage devices supported by umass(4)
289 	 *
290 	 * NOTE: USB attachments automatically set DA_Q_NO_SYNC_CACHE so
291 	 *	 it does not have to be specified here.
292 	 */
293  	{
294  		/*
295  		 * Creative Nomad MUVO mp3 player (USB)
296  		 * PR: kern/53094
297  		 */
298  		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
299 		/*quirks*/ DA_Q_NO_PREVENT
300  	},
301 	{
302 		/*
303 		 * Sigmatel USB Flash MP3 Player
304 		 * PR: kern/57046
305 		 */
306 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
307 		/*quirks*/ DA_Q_NO_PREVENT
308 	},
309 	{
310 		/*
311 		 * SEAGRAND NP-900 MP3 Player
312 		 * PR: kern/64563
313 		 */
314 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
315 		/*quirks*/ DA_Q_NO_PREVENT
316 	},
317 	{
318 		/*
319 		 * Creative MUVO Slim mp3 player (USB)
320 		 * PR: usb/86131
321 		 */
322 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
323 		"*"}, /*quirks*/ DA_Q_NO_PREVENT
324 	},
325 	{
326 		/*
327 		 * Philips USB Key Audio KEY013
328 		 * PR: usb/68412
329 		 */
330 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
331 		/*quirks*/ DA_Q_NO_PREVENT
332 	},
333 };
334 
335 static	d_open_t	daopen;
336 static	d_close_t	daclose;
337 static	d_strategy_t	dastrategy;
338 static	d_dump_t	dadump;
339 static	d_ioctl_t	daioctl;
340 static	periph_init_t	dainit;
341 static	void		daasync(void *callback_arg, u_int32_t code,
342 				struct cam_path *path, void *arg);
343 static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
344 static	periph_ctor_t	daregister;
345 static	periph_dtor_t	dacleanup;
346 static	periph_start_t	dastart;
347 static	periph_oninv_t	daoninvalidate;
348 static	void		dadone(struct cam_periph *periph,
349 			       union ccb *done_ccb);
350 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
351 				u_int32_t sense_flags);
352 static void		daprevent(struct cam_periph *periph, int action);
353 static int		dagetcapacity(struct cam_periph *periph, int ccbflags);
354 static int		dacheckmedia(struct cam_periph *periph);
355 static void		dasetgeom(struct cam_periph *periph, uint32_t block_len,
356 				  uint64_t maxsector);
357 static void		daflushbioq(struct bio_queue_head *bioq, int error);
358 static void		dashutdown(void *arg, int howto);
359 
360 #ifndef DA_DEFAULT_TIMEOUT
361 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
362 #endif
363 
364 #ifndef	DA_DEFAULT_RETRY
365 #define	DA_DEFAULT_RETRY	4
366 #endif
367 
368 __read_mostly int da_retry_count = DA_DEFAULT_RETRY;
369 __read_mostly int da_default_timeout = DA_DEFAULT_TIMEOUT;
370 __read_mostly static int da_balance_enable = 1;
371 __read_mostly static int da_balance_ratio = 100;	/* read-to-write */
372 __read_mostly static int da_balance_debug = 0;
373 
374 SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
375             "CAM Direct Access Disk driver");
376 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
377            &da_retry_count, 0, "Normal I/O retry count");
378 TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
379 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
380            &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
381 TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
382 
383 SYSCTL_INT(_kern_cam_da, OID_AUTO, balance_enable, CTLFLAG_RW,
384            &da_balance_enable, 0, "Enable tps balancing");
385 SYSCTL_INT(_kern_cam_da, OID_AUTO, balance_ratio, CTLFLAG_RW,
386            &da_balance_ratio, 0, "Set read-to-write ratio 100=1:1");
387 SYSCTL_INT(_kern_cam_da, OID_AUTO, balance_debug, CTLFLAG_RW,
388            &da_balance_debug, 0, "Enable tps balance debugging");
389 
390 static struct periph_driver dadriver =
391 {
392 	dainit, "da",
393 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
394 };
395 
396 PERIPHDRIVER_DECLARE(da, dadriver);
397 
398 static struct dev_ops da_ops = {
399 	{ "da", 0, D_DISK | D_MPSAFE },
400 	.d_open =	daopen,
401 	.d_close =	daclose,
402 	.d_read =	physread,
403 	.d_write =	physwrite,
404 	.d_strategy =	dastrategy,
405 	.d_dump =	dadump,
406 	.d_ioctl =	daioctl
407 };
408 
409 static struct extend_array *daperiphs;
410 
411 MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
412 
413 static int
414 daioctl(struct dev_ioctl_args *ap)
415 {
416 	int unit;
417 	int error = 0;
418 	struct buf *bp;
419 	struct cam_periph *periph;
420 	int byte_count;
421 
422 	off_t *del_num = (off_t*)ap->a_data;
423 	off_t bytes_left;
424 	off_t bytes_start;
425 
426 	cdev_t dev = ap->a_head.a_dev;
427 
428 
429 	unit = dkunit(dev);
430 	periph = cam_extend_get(daperiphs, unit);
431 	if (periph == NULL)
432 		return(ENXIO);
433 
434 	switch (ap->a_cmd) {
435 	case DAIOCTRIM:
436 	{
437 
438 		bytes_left = del_num[1];
439 		bytes_start = del_num[0];
440 
441 		/* TRIM occurs on 512-byte sectors. */
442 		KKASSERT((bytes_left % 512) == 0);
443 		KKASSERT((bytes_start% 512) == 0);
444 
445 
446 		/* Break TRIM up into int-sized commands because of b_bcount */
447 		while(bytes_left) {
448 
449 			/*
450 			 * Rather than than squezing out more blocks in b_bcount
451 			 * and having to break up the TRIM request in da_start(),
452 			 * we ensure we can always TRIM this many bytes with one
453 			 * TRIM command (this happens if the device only
454 			 * supports one TRIM block).
455 			 *
456 			 * With min TRIM blksize of 1, TRIM command free
457 			 * 4194240 blks(64*65535): each LBA range can address
458 			 * 65535 blks and there 64 such ranges in a 512-byte
459 			 * block. And, 4194240 * 512 = 0x7FFF8000
460 			 *
461 			 */
462 			byte_count = MIN(bytes_left,0x7FFF8000);
463 			bp = getnewbuf(0, 0, 0, 1);
464 
465 			bp->b_cmd = BUF_CMD_FREEBLKS;
466 			bp->b_bio1.bio_offset = bytes_start;
467 			bp->b_bcount = byte_count;
468 			bp->b_bio1.bio_flags |= BIO_SYNC;
469 			bp->b_bio1.bio_done = biodone_sync;
470 
471 			dev_dstrategy(ap->a_head.a_dev, &bp->b_bio1);
472 
473 			if (biowait(&bp->b_bio1, "TRIM")) {
474 				kprintf("Error:%d\n", bp->b_error);
475 				brelse(bp);
476 				return(bp->b_error ? bp->b_error : EIO);
477 			}
478 			brelse(bp);
479 			bytes_left -= byte_count;
480 			bytes_start += byte_count;
481 		}
482 		break;
483 	}
484 	default:
485 		return(EINVAL);
486 	}
487 
488 	return(error);
489 }
490 
491 static int
492 daopen(struct dev_open_args *ap)
493 {
494 	cdev_t dev = ap->a_head.a_dev;
495 	struct cam_periph *periph;
496 	struct da_softc *softc;
497 	struct disk_info info;
498 	int unit;
499 	int error;
500 
501 	unit = dkunit(dev);
502 	periph = cam_extend_get(daperiphs, unit);
503 	if (periph == NULL) {
504 		return (ENXIO);
505 	}
506 
507 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
508 		return(ENXIO);
509 	}
510 
511 	cam_periph_lock(periph);
512 	if ((error = cam_periph_hold(periph, PCATCH)) != 0) {
513 		cam_periph_unlock(periph);
514 		cam_periph_release(periph);
515 		return (error);
516 	}
517 
518 	unit = periph->unit_number;
519 	softc = (struct da_softc *)periph->softc;
520 
521 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
522 	    ("daopen: dev=%s (unit %d)\n", devtoname(dev),
523 	     unit));
524 
525 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
526 		/* Invalidate our pack information. */
527 		disk_invalidate(&softc->disk);
528 		softc->flags &= ~DA_FLAG_PACK_INVALID;
529 	}
530 
531 	error = dacheckmedia(periph);
532 	softc->flags |= DA_FLAG_OPEN;
533 
534 	if (error == 0) {
535 		struct ccb_getdev *cgd;
536 
537 		/* Build disk information structure */
538 		bzero(&info, sizeof(info));
539 		info.d_type = DTYPE_SCSI;
540 
541 		/*
542 		 * Grab the inquiry data to get the vendor and product names.
543 		 * Put them in the typename and packname for the label.
544 		 */
545 		cgd = &xpt_alloc_ccb()->cgd;
546 		xpt_setup_ccb(&cgd->ccb_h, periph->path, /*priority*/ 1);
547 		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
548 		xpt_action((union ccb *)cgd);
549 		xpt_free_ccb(&cgd->ccb_h);
550 
551 		/*
552 		 * Check to see whether or not the blocksize is set yet.
553 		 * If it isn't, set it and then clear the blocksize
554 		 * unavailable flag for the device statistics.
555 		 */
556 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
557 			softc->device_stats.block_size = softc->params.secsize;
558 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
559 		}
560 	}
561 
562 	if (error == 0) {
563 		softc->flags &= ~DA_FLAG_CAP_MUTE;
564 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
565 		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
566 			daprevent(periph, PR_PREVENT);
567 	} else {
568 		softc->flags |= DA_FLAG_CAP_MUTE;
569 		softc->flags &= ~DA_FLAG_OPEN;
570 		cam_periph_release(periph);
571 	}
572 	cam_periph_unhold(periph, 1);
573 	return (error);
574 }
575 
576 static int
577 daclose(struct dev_close_args *ap)
578 {
579 	cdev_t dev = ap->a_head.a_dev;
580 	struct	cam_periph *periph;
581 	struct	da_softc *softc;
582 	int	unit;
583 	int	error;
584 
585 	unit = dkunit(dev);
586 	periph = cam_extend_get(daperiphs, unit);
587 	if (periph == NULL)
588 		return (ENXIO);
589 
590 	cam_periph_lock(periph);
591 	if ((error = cam_periph_hold(periph, 0)) != 0) {
592 		cam_periph_unlock(periph);
593 		cam_periph_release(periph);
594 		return (error);
595 	}
596 
597 	softc = (struct da_softc *)periph->softc;
598 
599 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
600 		union	ccb *ccb;
601 
602 		ccb = cam_periph_getccb(periph, /*priority*/1);
603 		ccb->ccb_h.ccb_state = DA_CCB_POLLED;
604 
605 		scsi_synchronize_cache(&ccb->csio,
606 				       /*retries*/1,
607 				       /*cbfcnp*/dadone,
608 				       MSG_SIMPLE_Q_TAG,
609 				       /*begin_lba*/0,/* Cover the whole disk */
610 				       /*lb_count*/0,
611 				       SSD_FULL_SIZE,
612 				       5 * 60 * 1000);
613 
614 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
615 				  /*sense_flags*/SF_RETRY_UA,
616 				  &softc->device_stats);
617 
618 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
619 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
620 			     CAM_SCSI_STATUS_ERROR) {
621 				int asc, ascq;
622 				int sense_key, error_code;
623 
624 				scsi_extract_sense(&ccb->csio.sense_data,
625 						   &error_code,
626 						   &sense_key,
627 						   &asc, &ascq);
628 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
629 					scsi_sense_print(&ccb->csio);
630 			} else {
631 				xpt_print(periph->path, "Synchronize cache "
632 				    "failed, status == 0x%x, scsi status == "
633 				    "0x%x\n", ccb->csio.ccb_h.status,
634 				    ccb->csio.scsi_status);
635 			}
636 		}
637 
638 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
639 			cam_release_devq(ccb->ccb_h.path,
640 					 /*relsim_flags*/0,
641 					 /*reduction*/0,
642 					 /*timeout*/0,
643 					 /*getcount_only*/0);
644 
645 		xpt_release_ccb(ccb);
646 
647 	}
648 
649 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
650 		if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
651 			daprevent(periph, PR_ALLOW);
652 		/*
653 		 * If we've got removeable media, mark the blocksize as
654 		 * unavailable, since it could change when new media is
655 		 * inserted.
656 		 */
657 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
658 	}
659 
660 	/*
661 	 * Don't compound any ref counting software bugs with more.
662 	 */
663 	if (softc->flags & DA_FLAG_OPEN) {
664 		softc->flags &= ~DA_FLAG_OPEN;
665 		cam_periph_release(periph);
666 	} else {
667 		xpt_print(periph->path,
668 			  "daclose() called on an already closed device!\n");
669 	}
670 	cam_periph_unhold(periph, 1);
671 	return (0);
672 }
673 
674 /*
675  * Actually translate the requested transfer into one the physical driver
676  * can understand.  The transfer is described by a buf and will include
677  * only one physical transfer.
678  */
679 static int
680 dastrategy(struct dev_strategy_args *ap)
681 {
682 	cdev_t dev = ap->a_head.a_dev;
683 	struct bio *bio = ap->a_bio;
684 	struct buf *bp = bio->bio_buf;
685 	struct cam_periph *periph;
686 	struct da_softc *softc;
687 	u_int  unit;
688 
689 	unit = dkunit(dev);
690 	periph = cam_extend_get(daperiphs, unit);
691 	if (periph == NULL) {
692 		bp->b_error = ENXIO;
693 		goto bad;
694 	}
695 	softc = (struct da_softc *)periph->softc;
696 
697 	cam_periph_lock(periph);
698 
699 #if 0
700 	/*
701 	 * check it's not too big a transfer for our adapter
702 	 */
703 	scsi_minphys(bp, &sd_switch);
704 #endif
705 
706 	/*
707 	 * Mask interrupts so that the pack cannot be invalidated until
708 	 * after we are in the queue.  Otherwise, we might not properly
709 	 * clean up one of the buffers.
710 	 */
711 
712 	/*
713 	 * If the device has been made invalid, error out
714 	 */
715 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
716 		cam_periph_unlock(periph);
717 		bp->b_error = ENXIO;
718 		goto bad;
719 	}
720 
721 	/*
722 	 * Place it in the queue of disk activities for this disk
723 	 */
724 	if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH)
725 		bioqdisksort(&softc->bio_queue_wr, bio);
726 	else if (bp->b_cmd == BUF_CMD_FREEBLKS)
727 		bioqdisksort(&softc->bio_queue_trim, bio);
728 	else
729 		bioqdisksort(&softc->bio_queue_rd, bio);
730 
731 	/*
732 	 * Schedule ourselves for performing the work.
733 	 */
734 	xpt_schedule(periph, /* XXX priority */1);
735 	cam_periph_unlock(periph);
736 
737 	return(0);
738 bad:
739 	bp->b_flags |= B_ERROR;
740 
741 	/*
742 	 * Correctly set the buf to indicate a completed xfer
743 	 */
744 	bp->b_resid = bp->b_bcount;
745 	biodone(bio);
746 	return(0);
747 }
748 
749 static int
750 dadump(struct dev_dump_args *ap)
751 {
752 	cdev_t dev = ap->a_head.a_dev;
753 	struct	    cam_periph *periph;
754 	struct	    da_softc *softc;
755 	u_int	    unit;
756 	u_int32_t   secsize;
757 	struct	    ccb_scsiio *csio;
758 
759 	unit = dkunit(dev);
760 	periph = cam_extend_get(daperiphs, unit);
761 	if (periph == NULL)
762 		return (ENXIO);
763 
764 	softc = (struct da_softc *)periph->softc;
765 	cam_periph_lock(periph);
766 	secsize = softc->params.secsize; /* XXX: or ap->a_secsize? */
767 
768 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
769 		cam_periph_unlock(periph);
770 		return (ENXIO);
771 	}
772 
773 	csio = &xpt_alloc_ccb()->csio;
774 
775 	/*
776 	 * because length == 0 means we are supposed to flush cache, we only
777 	 * try to write something if length > 0.
778 	 */
779 	if (ap->a_length > 0) {
780 		xpt_setup_ccb(&csio->ccb_h, periph->path, /*priority*/1);
781 		csio->ccb_h.flags |= CAM_POLLED;
782 		csio->ccb_h.ccb_state = DA_CCB_DUMP;
783 		scsi_read_write(csio,
784 				/*retries*/1,
785 				dadone,
786 				MSG_ORDERED_Q_TAG,
787 				/*read*/FALSE,
788 				/*byte2*/0,
789 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
790 				ap->a_offset / secsize,
791 				ap->a_length / secsize,
792 				/*data_ptr*/(u_int8_t *) ap->a_virtual,
793 				/*dxfer_len*/ap->a_length,
794 				/*sense_len*/SSD_FULL_SIZE,
795 				DA_DEFAULT_TIMEOUT * 1000);
796 		xpt_polled_action((union ccb *)csio);
797 
798 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
799 			kprintf("Aborting dump due to I/O error.\n");
800 			if ((csio->ccb_h.status & CAM_STATUS_MASK) ==
801 			     CAM_SCSI_STATUS_ERROR)
802 				scsi_sense_print(csio);
803 			else
804 				kprintf("status == 0x%x, scsi status == 0x%x\n",
805 				       csio->ccb_h.status, csio->scsi_status);
806 			cam_periph_unlock(periph);
807 			xpt_free_ccb(&csio->ccb_h);
808 			return(EIO);
809 		}
810 		goto done;
811 	}
812 
813 	/*
814 	 * Sync the disk cache contents to the physical media.
815 	 */
816 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
817 
818 		xpt_setup_ccb(&csio->ccb_h, periph->path, /*priority*/1);
819 		csio->ccb_h.ccb_state = DA_CCB_DUMP;
820 		scsi_synchronize_cache(csio,
821 				       /*retries*/1,
822 				       /*cbfcnp*/dadone,
823 				       MSG_SIMPLE_Q_TAG,
824 				       /*begin_lba*/0,/* Cover the whole disk */
825 				       /*lb_count*/0,
826 				       SSD_FULL_SIZE,
827 				       5 * 60 * 1000);
828 		xpt_polled_action((union ccb *)csio);
829 
830 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
831 			if ((csio->ccb_h.status & CAM_STATUS_MASK) ==
832 			     CAM_SCSI_STATUS_ERROR) {
833 				int asc, ascq;
834 				int sense_key, error_code;
835 
836 				scsi_extract_sense(&csio->sense_data,
837 						   &error_code,
838 						   &sense_key,
839 						   &asc, &ascq);
840 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
841 					scsi_sense_print(csio);
842 			} else {
843 				xpt_print(periph->path, "Synchronize cache "
844 				    "failed, status == 0x%x, scsi status == "
845 				    "0x%x\n",
846 				    csio->ccb_h.status, csio->scsi_status);
847 			}
848 		}
849 	}
850 done:
851 	cam_periph_unlock(periph);
852 	xpt_free_ccb(&csio->ccb_h);
853 
854 	return (0);
855 }
856 
857 static void
858 dainit(void)
859 {
860 	cam_status status;
861 
862 	/*
863 	 * Create our extend array for storing the devices we attach to.
864 	 */
865 	daperiphs = cam_extend_new();
866 	if (daperiphs == NULL) {
867 		kprintf("da: Failed to alloc extend array!\n");
868 		return;
869 	}
870 
871 	/*
872 	 * Install a global async callback.  This callback will
873 	 * receive async callbacks like "new device found".
874 	 */
875 	status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
876 
877 	if (status != CAM_REQ_CMP) {
878 		kprintf("da: Failed to attach master async callback "
879 		       "due to status 0x%x!\n", status);
880 	} else {
881 		/* Register our shutdown event handler */
882 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
883 					   NULL, SHUTDOWN_PRI_SECOND)) == NULL)
884 			kprintf("%s: shutdown event registration failed!\n",
885 			    __func__);
886 	}
887 }
888 
889 static void
890 daoninvalidate(struct cam_periph *periph)
891 {
892 	struct da_softc *softc;
893 
894 	softc = (struct da_softc *)periph->softc;
895 
896 	/*
897 	 * De-register any async callbacks.
898 	 */
899 	xpt_register_async(0, daasync, periph, periph->path);
900 
901 	softc->flags |= DA_FLAG_PACK_INVALID;
902 
903 	/*
904 	 * Return all queued I/O with ENXIO.
905 	 * XXX Handle any transactions queued to the card
906 	 *     with XPT_ABORT_CCB.
907 	 */
908 	daflushbioq(&softc->bio_queue_trim, ENXIO);
909 	daflushbioq(&softc->bio_queue_wr, ENXIO);
910 	daflushbioq(&softc->bio_queue_rd, ENXIO);
911 	xpt_print(periph->path, "lost device\n");
912 }
913 
914 static void
915 daflushbioq(struct bio_queue_head *bioq, int error)
916 {
917 	struct bio *q_bio;
918 	struct buf *q_bp;
919 
920 	while ((q_bio = bioq_first(bioq)) != NULL){
921 		bioq_remove(bioq, q_bio);
922 		q_bp = q_bio->bio_buf;
923 		q_bp->b_resid = q_bp->b_bcount;
924 		q_bp->b_error = error;
925 		q_bp->b_flags |= B_ERROR;
926 		biodone(q_bio);
927 	}
928 }
929 
930 static void
931 dacleanup(struct cam_periph *periph)
932 {
933 	struct da_softc *softc;
934 
935 	softc = (struct da_softc *)periph->softc;
936 
937 	devstat_remove_entry(&softc->device_stats);
938 	cam_extend_release(daperiphs, periph->unit_number);
939 	xpt_print(periph->path, "removing device entry\n");
940 	/*
941 	 * If we can't free the sysctl tree, oh well...
942 	 */
943 	if ((softc->flags & DA_FLAG_SCTX_INIT) != 0
944 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
945 		xpt_print(periph->path, "can't remove sysctl context\n");
946 	}
947 	periph->softc = NULL;
948 	if (softc->disk.d_rawdev) {
949 		cam_periph_unlock(periph);
950 		disk_destroy(&softc->disk);
951 		cam_periph_lock(periph);
952 	}
953 
954 	kfree(softc, M_DEVBUF);
955 }
956 
957 static void
958 daasync(void *callback_arg, u_int32_t code,
959 	struct cam_path *path, void *arg)
960 {
961 	struct cam_periph *periph;
962 
963 	periph = (struct cam_periph *)callback_arg;
964 
965 	switch (code) {
966 	case AC_FOUND_DEVICE:
967 	{
968 		struct ccb_getdev *cgd;
969 		cam_status status;
970 
971 		cgd = (struct ccb_getdev *)arg;
972 		if (cgd == NULL)
973 			break;
974 
975 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
976 		    && SID_TYPE(&cgd->inq_data) != T_RBC
977 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
978 			break;
979 
980 		/*
981 		 * Don't complain if a valid peripheral is already attached.
982 		 */
983 		periph = cam_periph_find(cgd->ccb_h.path, "da");
984 		if (periph && (periph->flags & CAM_PERIPH_INVALID) == 0)
985 			break;
986 
987 		/*
988 		 * Allocate a peripheral instance for
989 		 * this device and start the probe
990 		 * process.
991 		 */
992 		status = cam_periph_alloc(daregister, daoninvalidate,
993 					  dacleanup, dastart,
994 					  "da", CAM_PERIPH_BIO,
995 					  cgd->ccb_h.path, daasync,
996 					  AC_FOUND_DEVICE, cgd);
997 
998 		if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
999 			kprintf("%s: Unable to attach to new device "
1000 			    "due to status 0x%x\n", __func__, status);
1001 		}
1002 		break;
1003 	}
1004 	case AC_SENT_BDR:
1005 	case AC_BUS_RESET:
1006 	{
1007 		struct da_softc *softc;
1008 		struct ccb_hdr *ccbh;
1009 
1010 		softc = (struct da_softc *)periph->softc;
1011 		/*
1012 		 * Don't fail on the expected unit attention
1013 		 * that will occur.
1014 		 */
1015 		softc->flags |= DA_FLAG_RETRY_UA;
1016 		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
1017 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
1018 		/* FALLTHROUGH*/
1019 	}
1020 	default:
1021 		cam_periph_async(periph, code, path, arg);
1022 		break;
1023 	}
1024 }
1025 
1026 static void
1027 dasysctlinit(void *context, int pending)
1028 {
1029 	struct cam_periph *periph;
1030 	struct da_softc *softc;
1031 	char tmpstr[80], tmpstr2[80];
1032 
1033 	periph = (struct cam_periph *)context;
1034 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1035 		return;
1036 	}
1037 
1038 	softc = (struct da_softc *)periph->softc;
1039 	ksnprintf(tmpstr, sizeof(tmpstr),
1040 		  "CAM DA unit %d", periph->unit_number);
1041 	ksnprintf(tmpstr2, sizeof(tmpstr2),
1042 		  "%d", periph->unit_number);
1043 
1044 	sysctl_ctx_free(&softc->sysctl_ctx);
1045 	sysctl_ctx_init(&softc->sysctl_ctx);
1046 	softc->flags |= DA_FLAG_SCTX_INIT;
1047 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1048 		SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1049 		CTLFLAG_RD, 0, tmpstr);
1050 	if (softc->sysctl_tree == NULL) {
1051 		kprintf("%s: unable to allocate sysctl tree\n", __func__);
1052 		cam_periph_release(periph);
1053 		return;
1054 	}
1055 
1056 	/*
1057 	 * Now register the sysctl handler, so the user can the value on
1058 	 * the fly.
1059 	 */
1060 	SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1061 		OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1062 		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1063 		"Minimum CDB size");
1064 
1065 	/* Only create the option if the device supports TRIM */
1066 	if (softc->disk.d_info.d_trimflag) {
1067 		SYSCTL_ADD_INT(&softc->sysctl_ctx,
1068 		    SYSCTL_CHILDREN(softc->sysctl_tree),
1069 		    OID_AUTO,
1070 		    "trim_enabled",
1071 		    CTLFLAG_RW,
1072 		    &softc->trim_enabled,
1073 		    0,
1074 		    "Enable TRIM for this device (SSD))");
1075 	}
1076 
1077 	cam_periph_release(periph);
1078 }
1079 
1080 static int
1081 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
1082 {
1083 	int error, value;
1084 
1085 	value = *(int *)arg1;
1086 
1087 	error = sysctl_handle_int(oidp, &value, 0, req);
1088 
1089 	if ((error != 0)
1090 	 || (req->newptr == NULL))
1091 		return (error);
1092 
1093 	/*
1094 	 * Acceptable values here are 6, 10 or 12, or 16.
1095 	 */
1096 	if (value < 6)
1097 		value = 6;
1098 	else if ((value > 6)
1099 	      && (value <= 10))
1100 		value = 10;
1101 	else if ((value > 10)
1102 	      && (value <= 12))
1103 		value = 12;
1104 	else if (value > 12)
1105 		value = 16;
1106 
1107 	*(int *)arg1 = value;
1108 
1109 	return (0);
1110 }
1111 
1112 static cam_status
1113 daregister(struct cam_periph *periph, void *arg)
1114 {
1115 	struct da_softc *softc;
1116 	struct ccb_pathinq *cpi;
1117 	struct ccb_getdev *cgd;
1118 	char tmpstr[80];
1119 	caddr_t match;
1120 
1121 	cgd = (struct ccb_getdev *)arg;
1122 	if (periph == NULL) {
1123 		kprintf("%s: periph was NULL!!\n", __func__);
1124 		return(CAM_REQ_CMP_ERR);
1125 	}
1126 
1127 	if (cgd == NULL) {
1128 		kprintf("%s: no getdev CCB, can't register device\n",
1129 		    __func__);
1130 		return(CAM_REQ_CMP_ERR);
1131 	}
1132 
1133 	softc = kmalloc(sizeof(*softc), M_DEVBUF, M_INTWAIT | M_ZERO);
1134 	sysctl_ctx_init(&softc->sysctl_ctx);
1135 	LIST_INIT(&softc->pending_ccbs);
1136 	softc->state = DA_STATE_PROBE;
1137 	bioq_init(&softc->bio_queue_trim);
1138 	bioq_init(&softc->bio_queue_rd);
1139 	bioq_init(&softc->bio_queue_wr);
1140 	if (SID_IS_REMOVABLE(&cgd->inq_data))
1141 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
1142 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
1143 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
1144 
1145 	/* Used to get TRIM status from AHCI driver */
1146 	if (cgd->inq_data.vendor_specific1[0] == 1) {
1147 		/*
1148 		 * max number of lba ranges an SSD can handle in a single
1149 		 * TRIM command. vendor_specific1[1] is the num of 512-byte
1150 		 * blocks the SSD reports that can be passed in a TRIM cmd.
1151 		 */
1152 		softc->trim_max_ranges =
1153 		   min(cgd->inq_data.vendor_specific1[1] * 64, TRIM_MAX_RANGES);
1154 	}
1155 
1156 	periph->softc = softc;
1157 
1158 	cam_extend_set(daperiphs, periph->unit_number, periph);
1159 
1160 	/*
1161 	 * See if this device has any quirks.
1162 	 */
1163 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1164 			       (caddr_t)da_quirk_table,
1165 			       NELEM(da_quirk_table),
1166 			       sizeof(*da_quirk_table), scsi_inquiry_match);
1167 
1168 	if (match != NULL)
1169 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1170 	else
1171 		softc->quirks = DA_Q_NONE;
1172 
1173 	/*
1174 	 * Unconditionally disable the synchronize cache command for
1175 	 * usb attachments.  It's just impossible to determine if the
1176 	 * device supports it or not and if it doesn't the port can
1177 	 * brick.
1178 	 */
1179 	if (strncmp(periph->sim->sim_name, "umass", 4) == 0) {
1180 		softc->quirks |= DA_Q_NO_SYNC_CACHE;
1181 	}
1182 
1183 	TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
1184 
1185 	/* Check if the SIM does not want 6 byte commands */
1186 	cpi = &xpt_alloc_ccb()->cpi;
1187 	xpt_setup_ccb(&cpi->ccb_h, periph->path, /*priority*/1);
1188 	cpi->ccb_h.func_code = XPT_PATH_INQ;
1189 	xpt_action((union ccb *)cpi);
1190 	if (cpi->ccb_h.status == CAM_REQ_CMP && (cpi->hba_misc & PIM_NO_6_BYTE))
1191 		softc->quirks |= DA_Q_NO_6_BYTE;
1192 
1193 	/*
1194 	 * RBC devices don't have to support READ(6), only READ(10).
1195 	 */
1196 	if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
1197 		softc->minimum_cmd_size = 10;
1198 	else
1199 		softc->minimum_cmd_size = 6;
1200 
1201 	/*
1202 	 * Load the user's default, if any.
1203 	 */
1204 	ksnprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
1205 		 periph->unit_number);
1206 	TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
1207 
1208 	/*
1209 	 * 6, 10, 12, and 16 are the currently permissible values.
1210 	 */
1211 	if (softc->minimum_cmd_size < 6)
1212 		softc->minimum_cmd_size = 6;
1213 	else if ((softc->minimum_cmd_size > 6)
1214 	      && (softc->minimum_cmd_size <= 10))
1215 		softc->minimum_cmd_size = 10;
1216 	else if ((softc->minimum_cmd_size > 10)
1217 	      && (softc->minimum_cmd_size <= 12))
1218 		softc->minimum_cmd_size = 12;
1219 	else if (softc->minimum_cmd_size > 12)
1220 		softc->minimum_cmd_size = 16;
1221 
1222 	/*
1223 	 * The DA driver supports a blocksize, but
1224 	 * we don't know the blocksize until we do
1225 	 * a read capacity.  So, set a flag to
1226 	 * indicate that the blocksize is
1227 	 * unavailable right now.  We'll clear the
1228 	 * flag as soon as we've done a read capacity.
1229 	 */
1230 	devstat_add_entry(&softc->device_stats, "da",
1231 			  periph->unit_number, 0,
1232 	  		  DEVSTAT_BS_UNAVAILABLE,
1233 			  SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1234 			  DEVSTAT_PRIORITY_DISK);
1235 
1236 	/*
1237 	 * Register this media as a disk
1238 	 */
1239 	CAM_SIM_UNLOCK(periph->sim);
1240 	disk_create(periph->unit_number, &softc->disk, &da_ops);
1241 	if (cpi->maxio == 0 || cpi->maxio > MAXPHYS)
1242 		softc->disk.d_rawdev->si_iosize_max = MAXPHYS;
1243 	else
1244 		softc->disk.d_rawdev->si_iosize_max = cpi->maxio;
1245 	if (bootverbose) {
1246 		kprintf("%s%d: si_iosize_max:%d\n",
1247 		    periph->periph_name,
1248 		    periph->unit_number,
1249 		    softc->disk.d_rawdev->si_iosize_max);
1250 	}
1251 	CAM_SIM_LOCK(periph->sim);
1252 
1253 	/*
1254 	 * Add async callbacks for bus reset and
1255 	 * bus device reset calls.  I don't bother
1256 	 * checking if this fails as, in most cases,
1257 	 * the system will function just fine without
1258 	 * them and the only alternative would be to
1259 	 * not attach the device on failure.
1260 	 */
1261 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
1262 			   daasync, periph, periph->path);
1263 
1264 	/*
1265 	 * Take an exclusive refcount on the periph while dastart is called
1266 	 * to finish the probe.  The reference will be dropped in dadone at
1267 	 * the end of probe.
1268 	 */
1269 	xpt_free_ccb(&cpi->ccb_h);
1270 	cam_periph_hold(periph, 0);
1271 	xpt_schedule(periph, /*priority*/5);
1272 
1273 	return(CAM_REQ_CMP);
1274 }
1275 
1276 static void
1277 dastart(struct cam_periph *periph, union ccb *start_ccb)
1278 {
1279 	struct da_softc *softc;
1280 
1281 	softc = (struct da_softc *)periph->softc;
1282 
1283 	switch (softc->state) {
1284 	case DA_STATE_NORMAL:
1285 	{
1286 		/* Pull a buffer from the queue and get going on it */
1287 		struct bio *bio;
1288 		struct bio *bio_rd;
1289 		struct bio *bio_wr;
1290 		struct buf *bp;
1291 		u_int8_t tag_code;
1292 		int rd_limit;
1293 		int wr_limit;
1294 
1295 		/*
1296 		 * See if there is a buf with work for us to do..
1297 		 */
1298 		bio_rd = bioq_first(&softc->bio_queue_rd);
1299 		bio_wr = bioq_first(&softc->bio_queue_wr);
1300 
1301 		if (periph->immediate_priority <= periph->pinfo.priority) {
1302 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1303 					("queuing for immediate ccb\n"));
1304 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1305 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1306 					  periph_links.sle);
1307 			periph->immediate_priority = CAM_PRIORITY_NONE;
1308 			wakeup(&periph->ccb_list);
1309 			if (bio_rd || bio_wr) {
1310 				/*
1311 				 * Have more work to do, so ensure we stay
1312 				 * scheduled
1313 				 */
1314 				xpt_schedule(periph, /* XXX priority */1);
1315 			}
1316 			break;
1317 		}
1318 
1319 		/* Run the trim command if not already running */
1320 		if (!softc->trim_running &&
1321 		   (bio = bioq_first(&softc->bio_queue_trim)) != NULL) {
1322 			struct trim_request *req = &softc->trim_req;
1323 			struct bio *bio1;
1324 			int bps = 0, ranges = 0;
1325 
1326 			softc->trim_running = 1;
1327 			bzero(req, sizeof(*req));
1328 			bio1 = bio;
1329 			while (1) {
1330 				uint64_t lba;
1331 				int count;
1332 
1333 				bp = bio1->bio_buf;
1334 				count = bp->b_bcount / softc->params.secsize;
1335 				lba = bio1->bio_offset/softc->params.secsize;
1336 
1337 				bioq_remove(&softc->bio_queue_trim, bio1);
1338 				while (count > 0) {
1339 					int c = min(count, 0xffff);
1340 					int off = ranges * 8;
1341 
1342 					req->data[off + 0] = lba & 0xff;
1343 					req->data[off + 1] = (lba >> 8) & 0xff;
1344 					req->data[off + 2] = (lba >> 16) & 0xff;
1345 					req->data[off + 3] = (lba >> 24) & 0xff;
1346 					req->data[off + 4] = (lba >> 32) & 0xff;
1347 					req->data[off + 5] = (lba >> 40) & 0xff;
1348 					req->data[off + 6] = c & 0xff;
1349 					req->data[off + 7] = (c >> 8) & 0xff;
1350 					lba += c;
1351 					count -= c;
1352 					ranges++;
1353 				}
1354 
1355 				/* Try to merge multiple TRIM requests */
1356 				req->bios[bps++] = bio1;
1357 				bio1 = bioq_first(&softc->bio_queue_trim);
1358 				if (bio1 == NULL ||
1359 				    bio1->bio_buf->b_bcount / softc->params.secsize >
1360 				    (softc->trim_max_ranges - ranges) * 0xffff)
1361 					break;
1362 			}
1363 
1364 
1365 			cam_fill_csio(&start_ccb->csio,
1366 			    1/*retries*/,
1367 			    dadone,
1368 			    CAM_DIR_OUT,
1369 			    MSG_SIMPLE_Q_TAG,
1370 			    req->data,
1371 			    ((ranges +63)/64)*512,
1372 			    SSD_FULL_SIZE,
1373 			    sizeof(struct scsi_rw_6),
1374 			    da_default_timeout*2);
1375 
1376 			start_ccb->ccb_h.ccb_state = DA_CCB_TRIM;
1377 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1378 			    &start_ccb->ccb_h, periph_links.le);
1379 			start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1380 			start_ccb->ccb_h.ccb_bio = bio;
1381 			devstat_start_transaction(&softc->device_stats);
1382 			xpt_action(start_ccb);
1383 			xpt_schedule(periph, 1);
1384 			break;
1385 		}
1386 
1387 		/*
1388 		 * Select a read or write buffer to queue.  Limit the number
1389 		 * of tags dedicated to reading or writing, giving reads
1390 		 * precedence.
1391 		 *
1392 		 * Writes to modern hard drives go into the HDs cache and
1393 		 * return completion nearly instantly.  That is until the
1394 		 * cache becomes full.  When the HDs cache becomes full
1395 		 * write commands will begin to stall.  If all available
1396 		 * tags are taken up by writes which saturate the drive
1397 		 * reads will become tag-starved.
1398 		 *
1399 		 * A similar situation can occur with reads.  With many
1400 		 * parallel readers all tags can be taken up by reads
1401 		 * and prevent any writes from draining, even if the HD's
1402 		 * cache is not full.
1403 		 */
1404 		rd_limit = periph->sim->max_tagged_dev_openings * 2 / 3 + 1;
1405 		wr_limit = rd_limit;
1406 
1407 		/*
1408 		 * When TPS balancing is enabled we force wr_limit to 0
1409 		 * as necessary to balance the read TPS against the write
1410 		 * TPS.  A lower or higher read:write ratio may be selected
1411 		 * via da_balance_ratio.
1412 		 *
1413 		 * wr_limit forcing stops queueing writes.  This is generally
1414 		 * necessary because devices buffer writes and may starve
1415 		 * reads even when plenty of read tags are available.
1416 		 *
1417 		 * When no reads are being done, normalize tps_rd to avoid
1418 		 * instantly crowbaring the write tps.
1419 		 */
1420 		if (da_balance_enable &&
1421 		    periph->sim->max_tagged_dev_openings >= 8 &&
1422 		    (bio_rd || softc->outstanding_cmds_rd) &&
1423 		    (bio_wr || softc->outstanding_cmds_wr) &&
1424 		    softc->tps_rd * 100 < softc->tps_wr * da_balance_ratio) {
1425 			wr_limit = 0;
1426 		} else if (bio_rd == NULL && softc->outstanding_cmds_rd == 0 &&
1427 			   softc->tps_rd < softc->tps_wr * 2) {
1428 			softc->tps_rd += 100;
1429 		}
1430 		if (softc->tps_ticks != ticks) {
1431 			softc->tps_ticks = ticks;
1432 			softc->tps_rd = (softc->tps_rd * (hz - 1)) / hz;
1433 			softc->tps_wr = (softc->tps_wr * (hz - 1)) / hz;
1434 		}
1435 
1436 #if 1
1437 		/* DEBUGGING */
1438 		static time_t savets;
1439 		if (da_balance_debug &&
1440 		    time_uptime != savets && (bio_rd || bio_wr ||
1441 					      softc->outstanding_cmds_rd ||
1442 					      softc->outstanding_cmds_wr)) {
1443 			kprintf("softc=%p %d/%d %d/%d tps %ld/%ld\n",
1444 				softc,
1445 				softc->outstanding_cmds_rd, rd_limit,
1446 				softc->outstanding_cmds_wr, wr_limit,
1447 				softc->tps_rd / 100, softc->tps_wr / 100);
1448 			savets = time_uptime;
1449 		}
1450 #endif
1451 		if (bio_rd && softc->outstanding_cmds_rd < rd_limit) {
1452 			bio = bio_rd;
1453 			bioq_remove(&softc->bio_queue_rd, bio);
1454 			softc->tps_rd += 100;
1455 		} else if (bio_wr && softc->outstanding_cmds_wr < wr_limit) {
1456 			bio = bio_wr;
1457 			bioq_remove(&softc->bio_queue_wr, bio);
1458 			softc->tps_wr += 100;
1459 		} else {
1460 			if (bio_rd)
1461 				softc->flags |= DA_FLAG_RD_LIMIT;
1462 			if (bio_wr)
1463 				softc->flags |= DA_FLAG_WR_LIMIT;
1464 			xpt_release_ccb(start_ccb);
1465 			break;
1466 		}
1467 
1468 		/*
1469 		 * We can queue new work.
1470 		 */
1471 		bp = bio->bio_buf;
1472 
1473 		devstat_start_transaction(&softc->device_stats);
1474 
1475 		tag_code = MSG_SIMPLE_Q_TAG;
1476 
1477 		switch(bp->b_cmd) {
1478 		case BUF_CMD_READ:
1479 		case BUF_CMD_WRITE:
1480 			/*
1481 			 * Block read/write op
1482 			 */
1483 			KKASSERT(bio->bio_offset % softc->params.secsize == 0);
1484 
1485 			scsi_read_write(
1486 				&start_ccb->csio,
1487 				da_retry_count,		/* retries */
1488 				dadone,
1489 				tag_code,
1490 				(bp->b_cmd == BUF_CMD_READ),
1491 				0,			/* byte2 */
1492 				softc->minimum_cmd_size,
1493 				bio->bio_offset / softc->params.secsize,
1494 				bp->b_bcount / softc->params.secsize,
1495 				bp->b_data,
1496 				bp->b_bcount,
1497 				SSD_FULL_SIZE,		/* sense_len */
1498 				da_default_timeout * 1000
1499 			);
1500 			break;
1501 		case BUF_CMD_FLUSH:
1502 			/*
1503 			 * Silently complete a flush request if the device
1504 			 * cannot handle it.
1505 			 */
1506 			if (softc->quirks & DA_Q_NO_SYNC_CACHE) {
1507 				xpt_release_ccb(start_ccb);
1508 				start_ccb = NULL;
1509 				devstat_end_transaction_buf(
1510 					&softc->device_stats, bp);
1511 				biodone(bio);
1512 			} else {
1513 				scsi_synchronize_cache(
1514 					&start_ccb->csio,
1515 					1,		/* retries */
1516 					dadone,		/* cbfcnp */
1517 					MSG_SIMPLE_Q_TAG,
1518 					0,		/* lba */
1519 					0,		/* count (whole disk) */
1520 					SSD_FULL_SIZE,
1521 					da_default_timeout*1000	/* timeout */
1522 				);
1523 			}
1524 			break;
1525 		case BUF_CMD_FREEBLKS:
1526 			if (softc->disk.d_info.d_trimflag & DA_FLAG_CAN_TRIM){
1527 				start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1528 				break;
1529 			}
1530 		default:
1531 			xpt_release_ccb(start_ccb);
1532 			start_ccb = NULL;
1533 			panic("dastart: unrecognized bio cmd %d", bp->b_cmd);
1534 			break; /* NOT REACHED */
1535 		}
1536 
1537 		/*
1538 		 * Block out any asyncronous callbacks
1539 		 * while we touch the pending ccb list.
1540 		 */
1541 		if (start_ccb) {
1542 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1543 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1544 					 &start_ccb->ccb_h, periph_links.le);
1545 			if (bp->b_cmd == BUF_CMD_WRITE ||
1546 			    bp->b_cmd == BUF_CMD_FLUSH) {
1547 				++softc->outstanding_cmds_wr;
1548 			} else {
1549 				++softc->outstanding_cmds_rd;
1550 			}
1551 
1552 			/* We expect a unit attention from this device */
1553 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1554 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1555 				softc->flags &= ~DA_FLAG_RETRY_UA;
1556 			}
1557 
1558 			start_ccb->ccb_h.ccb_bio = bio;
1559 			xpt_action(start_ccb);
1560 		}
1561 
1562 		/*
1563 		 * Be sure we stay scheduled if we have more work to do.
1564 		 */
1565 		if (bioq_first(&softc->bio_queue_rd) ||
1566 		    bioq_first(&softc->bio_queue_wr)) {
1567 			xpt_schedule(periph, 1);
1568 		}
1569 		break;
1570 	}
1571 	case DA_STATE_PROBE:
1572 	{
1573 		struct ccb_scsiio *csio;
1574 		struct scsi_read_capacity_data *rcap;
1575 
1576 		rcap = kmalloc(sizeof(*rcap), M_SCSIDA, M_INTWAIT | M_ZERO);
1577 		csio = &start_ccb->csio;
1578 		scsi_read_capacity(csio,
1579 				   /*retries*/4,
1580 				   dadone,
1581 				   MSG_SIMPLE_Q_TAG,
1582 				   rcap,
1583 				   SSD_FULL_SIZE,
1584 				   /*timeout*/5000);
1585 		start_ccb->ccb_h.ccb_bio = NULL;
1586 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1587 		xpt_action(start_ccb);
1588 		break;
1589 	}
1590 	case DA_STATE_PROBE2:
1591 	{
1592 		struct ccb_scsiio *csio;
1593 		struct scsi_read_capacity_data_16 *rcaplong;
1594 
1595 		rcaplong = kmalloc(sizeof(*rcaplong), M_SCSIDA,
1596 				   M_INTWAIT | M_ZERO);
1597 		csio = &start_ccb->csio;
1598 		scsi_read_capacity_16(csio,
1599 				    /*retries*/ 4,
1600 				    /*cbfcnp*/ dadone,
1601 				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
1602 				    /*lba*/ 0,
1603 				    /*reladr*/ 0,
1604 				    /*pmi*/ 0,
1605 				    rcaplong,
1606 				    /*sense_len*/ SSD_FULL_SIZE,
1607 				    /*timeout*/ 60000);
1608 		start_ccb->ccb_h.ccb_bio = NULL;
1609 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE2;
1610 		xpt_action(start_ccb);
1611 		break;
1612 	}
1613 	}
1614 }
1615 
1616 static int
1617 cmd6workaround(union ccb *ccb)
1618 {
1619 	struct scsi_rw_6 cmd6;
1620 	struct scsi_rw_10 *cmd10;
1621 	struct da_softc *softc;
1622 	u_int8_t *cdb;
1623 	int frozen;
1624 
1625 	cdb = ccb->csio.cdb_io.cdb_bytes;
1626 
1627 	/* Translation only possible if CDB is an array and cmd is R/W6 */
1628 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
1629 	    (*cdb != READ_6 && *cdb != WRITE_6))
1630 		return 0;
1631 
1632 	xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
1633 	    "increasing minimum_cmd_size to 10.\n");
1634  	softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
1635 	softc->minimum_cmd_size = 10;
1636 
1637 	bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
1638 	cmd10 = (struct scsi_rw_10 *)cdb;
1639 	cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
1640 	cmd10->byte2 = 0;
1641 	scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
1642 	cmd10->reserved = 0;
1643 	scsi_ulto2b(cmd6.length, cmd10->length);
1644 	cmd10->control = cmd6.control;
1645 	ccb->csio.cdb_len = sizeof(*cmd10);
1646 
1647 	/* Requeue request, unfreezing queue if necessary */
1648 	frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1649  	ccb->ccb_h.status = CAM_REQUEUE_REQ;
1650 	xpt_action(ccb);
1651 	if (frozen) {
1652 		cam_release_devq(ccb->ccb_h.path,
1653 				 /*relsim_flags*/0,
1654 				 /*reduction*/0,
1655 				 /*timeout*/0,
1656 				 /*getcount_only*/0);
1657 	}
1658 	return (ERESTART);
1659 }
1660 
1661 static void
1662 dadone(struct cam_periph *periph, union ccb *done_ccb)
1663 {
1664 	struct da_softc *softc;
1665 	struct ccb_scsiio *csio;
1666 	struct disk_info info;
1667 
1668 	softc = (struct da_softc *)periph->softc;
1669 	csio = &done_ccb->csio;
1670 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1671 	case DA_CCB_BUFFER_IO:
1672 	case DA_CCB_TRIM:
1673 	{
1674 		struct buf *bp;
1675 		struct bio *bio;
1676 		int mustsched = 0;
1677 
1678 		bio = (struct bio *)done_ccb->ccb_h.ccb_bio;
1679 		bp = bio->bio_buf;
1680 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1681 			int error;
1682 			int sf;
1683 
1684 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1685 				sf = SF_RETRY_UA;
1686 			else
1687 				sf = 0;
1688 
1689 			error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
1690 			if (error == ERESTART) {
1691 				/*
1692 				 * A retry was scheuled, so
1693 				 * just return.
1694 				 */
1695 				return;
1696 			}
1697 			if (error != 0) {
1698 				if (error == ENXIO) {
1699 					/*
1700 					 * Catastrophic error.  Mark our pack as
1701 					 * invalid.
1702 					 */
1703 					/*
1704 					 * XXX See if this is really a media
1705 					 * XXX change first?
1706 					 */
1707 					xpt_print(periph->path,
1708 					    "Invalidating pack\n");
1709 					softc->flags |= DA_FLAG_PACK_INVALID;
1710 				}
1711 
1712 				/*
1713 				 * Return all queued write I/O's with EIO
1714 				 * so the client can retry these I/Os in the
1715 				 * proper order should it attempt to recover.
1716 				 *
1717 				 * Leave read I/O's alone.
1718 				 */
1719 				daflushbioq(&softc->bio_queue_wr, EIO);
1720 				bp->b_error = error;
1721 				bp->b_resid = bp->b_bcount;
1722 				bp->b_flags |= B_ERROR;
1723 			} else {
1724 				bp->b_resid = csio->resid;
1725 				bp->b_error = 0;
1726 				if (bp->b_resid != 0)
1727 					bp->b_flags |= B_ERROR;
1728 			}
1729 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1730 				cam_release_devq(done_ccb->ccb_h.path,
1731 						 /*relsim_flags*/0,
1732 						 /*reduction*/0,
1733 						 /*timeout*/0,
1734 						 /*getcount_only*/0);
1735 		} else {
1736 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1737 				panic("REQ_CMP with QFRZN");
1738 			bp->b_resid = csio->resid;
1739 			if (csio->resid > 0)
1740 				bp->b_flags |= B_ERROR;
1741 		}
1742 
1743 		/*
1744 		 * Schedule the peripheral to pipeline further reads and
1745 		 * writes.  A completed write wakes up more pending writes.
1746 		 * A completed read must wake up on either pending reads
1747 		 * or writes due to TPS balancing.
1748 		 *
1749 		 * Block out any asyncronous callbacks while we touch the
1750 		 * pending ccb list.
1751 		 */
1752 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1753 		if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH) {
1754 			--softc->outstanding_cmds_wr;
1755 			if (softc->flags & DA_FLAG_WR_LIMIT) {
1756 				softc->flags &= ~DA_FLAG_WR_LIMIT;
1757 				mustsched = 1;
1758 			}
1759 		} else {
1760 			--softc->outstanding_cmds_rd;
1761 			if (softc->flags &
1762 			    (DA_FLAG_RD_LIMIT | DA_FLAG_WR_LIMIT)) {
1763 				softc->flags &=
1764 					~(DA_FLAG_RD_LIMIT | DA_FLAG_WR_LIMIT);
1765 				mustsched = 1;
1766 			}
1767 		}
1768 
1769 		devstat_end_transaction_buf(&softc->device_stats, bp);
1770 		if ((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) ==
1771 		    DA_CCB_TRIM) {
1772 			struct trim_request *req =
1773 			    (struct trim_request *) csio->data_ptr;
1774 			int i;
1775 
1776 			for (i = 1; i < softc->trim_max_ranges &&
1777 			    req->bios[i]; i++) {
1778 				struct bio *bp1 = req->bios[i];
1779 
1780 				bp1->bio_buf->b_resid = bp->b_resid;
1781 				bp1->bio_buf->b_error = bp->b_error;
1782 				if (bp->b_flags & B_ERROR)
1783 					bp1->bio_buf->b_flags |= B_ERROR;
1784 				biodone(bp1);
1785 			}
1786 			softc->trim_running = 0;
1787 			biodone(bio);
1788 			xpt_schedule(periph,1);
1789 		} else
1790 			biodone(bio);
1791 
1792 
1793 		if (mustsched)
1794 			xpt_schedule(periph, /*priority*/1);
1795 
1796 		break;
1797 	}
1798 	case DA_CCB_PROBE:
1799 	case DA_CCB_PROBE2:
1800 	{
1801 		struct	   scsi_read_capacity_data *rdcap;
1802 		struct     scsi_read_capacity_data_16 *rcaplong;
1803 		char	   announce_buf[80];
1804 		int	   doinfo = 0;
1805 
1806 		rdcap = NULL;
1807 		rcaplong = NULL;
1808 		if (softc->state == DA_STATE_PROBE)
1809 			rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
1810 		else
1811 			rcaplong = (struct scsi_read_capacity_data_16 *)
1812 				    csio->data_ptr;
1813 
1814 		bzero(&info, sizeof(info));
1815 		info.d_type = DTYPE_SCSI;
1816 		info.d_serialno = xpt_path_serialno(periph->path);
1817 
1818 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1819 			struct disk_params *dp;
1820 			uint32_t block_size;
1821 			uint64_t maxsector;
1822 
1823 			if (softc->state == DA_STATE_PROBE) {
1824 				block_size = scsi_4btoul(rdcap->length);
1825 				maxsector = scsi_4btoul(rdcap->addr);
1826 
1827 				/*
1828 				 * According to SBC-2, if the standard 10
1829 				 * byte READ CAPACITY command returns 2^32,
1830 				 * we should issue the 16 byte version of
1831 				 * the command, since the device in question
1832 				 * has more sectors than can be represented
1833 				 * with the short version of the command.
1834 				 */
1835 				if (maxsector == 0xffffffff) {
1836 					softc->state = DA_STATE_PROBE2;
1837 					kfree(rdcap, M_SCSIDA);
1838 					xpt_release_ccb(done_ccb);
1839 					xpt_schedule(periph, /*priority*/5);
1840 					return;
1841 				}
1842 			} else {
1843 				block_size = scsi_4btoul(rcaplong->length);
1844 				maxsector = scsi_8btou64(rcaplong->addr);
1845 			}
1846 			dasetgeom(periph, block_size, maxsector);
1847 			dp = &softc->params;
1848 			ksnprintf(announce_buf, sizeof(announce_buf),
1849 				"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1850 				(uintmax_t) (((uintmax_t)dp->secsize *
1851 				dp->sectors) / (1024*1024)),
1852 				(uintmax_t)dp->sectors,
1853 				dp->secsize, dp->heads, dp->secs_per_track,
1854 				dp->cylinders);
1855 
1856 			info.d_media_blksize = softc->params.secsize;
1857 			info.d_media_blocks = softc->params.sectors;
1858 			info.d_media_size = 0;
1859 			info.d_secpertrack = softc->params.secs_per_track;
1860 			info.d_nheads = softc->params.heads;
1861 			info.d_ncylinders = softc->params.cylinders;
1862 			info.d_secpercyl = softc->params.heads *
1863 						softc->params.secs_per_track;
1864 			info.d_serialno = xpt_path_serialno(periph->path);
1865 			doinfo = 1;
1866 		} else {
1867 			int	error;
1868 
1869 			announce_buf[0] = '\0';
1870 
1871 			/*
1872 			 * Retry any UNIT ATTENTION type errors.  They
1873 			 * are expected at boot.
1874 			 */
1875 			error = daerror(done_ccb, CAM_RETRY_SELTO,
1876 					SF_RETRY_UA|SF_NO_PRINT);
1877 			if (error == ERESTART) {
1878 				/*
1879 				 * A retry was scheuled, so
1880 				 * just return.
1881 				 */
1882 				return;
1883 			} else if (error != 0) {
1884 				struct scsi_sense_data *sense;
1885 				int asc, ascq;
1886 				int sense_key, error_code;
1887 				int have_sense;
1888 				cam_status status;
1889 				struct ccb_getdev *cgd;
1890 
1891 				/* Don't wedge this device's queue */
1892 				status = done_ccb->ccb_h.status;
1893 				if ((status & CAM_DEV_QFRZN) != 0)
1894 					cam_release_devq(done_ccb->ccb_h.path,
1895 							 /*relsim_flags*/0,
1896 							 /*reduction*/0,
1897 							 /*timeout*/0,
1898 							 /*getcount_only*/0);
1899 
1900 				cgd = &xpt_alloc_ccb()->cgd;
1901 				xpt_setup_ccb(&cgd->ccb_h,
1902 					      done_ccb->ccb_h.path,
1903 					      /* priority */ 1);
1904 				cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1905 				xpt_action((union ccb *)cgd);
1906 
1907 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1908 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1909 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1910 					have_sense = FALSE;
1911 				else
1912 					have_sense = TRUE;
1913 
1914 				if (have_sense) {
1915 					sense = &csio->sense_data;
1916 					scsi_extract_sense(sense, &error_code,
1917 							   &sense_key,
1918 							   &asc, &ascq);
1919 				}
1920 				/*
1921 				 * Attach to anything that claims to be a
1922 				 * direct access or optical disk device,
1923 				 * as long as it doesn't return a "Logical
1924 				 * unit not supported" (0x25) error.
1925 				 */
1926 				if ((have_sense) && (asc != 0x25)
1927 				 && (error_code == SSD_CURRENT_ERROR)) {
1928 					const char *sense_key_desc;
1929 					const char *asc_desc;
1930 
1931 					scsi_sense_desc(sense_key, asc, ascq,
1932 							&cgd->inq_data,
1933 							&sense_key_desc,
1934 							&asc_desc);
1935 					ksnprintf(announce_buf,
1936 					    sizeof(announce_buf),
1937 						"Attempt to query device "
1938 						"size failed: %s, %s",
1939 						sense_key_desc,
1940 						asc_desc);
1941 					info.d_media_blksize = 512;
1942 					doinfo = 1;
1943 				} else {
1944 					if (have_sense)
1945 						scsi_sense_print(
1946 							&done_ccb->csio);
1947 					else {
1948 						xpt_print(periph->path,
1949 						    "got CAM status %#x\n",
1950 						    done_ccb->ccb_h.status);
1951 					}
1952 
1953 					xpt_print(periph->path, "fatal error, "
1954 					    "failed to attach to device\n");
1955 
1956 					/*
1957 					 * Free up resources.
1958 					 */
1959 					cam_periph_invalidate(periph);
1960 				}
1961 				xpt_free_ccb(&cgd->ccb_h);
1962 			}
1963 		}
1964 		kfree(csio->data_ptr, M_SCSIDA);
1965 		if (announce_buf[0] != '\0')
1966 			xpt_announce_periph(periph, announce_buf);
1967 
1968 		if (softc->trim_max_ranges) {
1969 			info.d_trimflag |= DA_FLAG_CAN_TRIM;
1970 			kprintf("%s%d: supports TRIM\n",
1971 		   	    periph->periph_name,
1972 		   	    periph->unit_number);
1973 		}
1974 		softc->state = DA_STATE_NORMAL;
1975 
1976 		/*
1977 		 * Since our peripheral may be invalidated by an error
1978 		 * above or an external event, we must release our CCB
1979 		 * before releasing the probe lock on the peripheral.
1980 		 * The peripheral will only go away once the last lock
1981 		 * is removed, and we need it around for the CCB release
1982 		 * operation.
1983 		 */
1984 		xpt_release_ccb(done_ccb);
1985 		cam_periph_unhold(periph, 0);
1986 		if (doinfo) {
1987 			CAM_SIM_UNLOCK(periph->sim);
1988 			disk_setdiskinfo(&softc->disk, &info);
1989 			CAM_SIM_LOCK(periph->sim);
1990 
1991 			/*
1992 			 * Create our sysctl variables, now that we know
1993 			 * we have successfully attached.
1994 			 */
1995 			taskqueue_enqueue(taskqueue_thread[mycpuid],
1996 					  &softc->sysctl_task);
1997 		}
1998 		return;
1999 	}
2000 	case DA_CCB_WAITING:
2001 	{
2002 		/* Caller will release the CCB */
2003 		wakeup(&done_ccb->ccb_h.cbfcnp);
2004 		return;
2005 	}
2006 	case DA_CCB_DUMP:
2007 		/* No-op.  We're polling */
2008 		return;
2009 	case DA_CCB_POLLED:
2010 		/* Caller releases ccb */
2011 		wakeup(&done_ccb->ccb_h.cbfcnp);
2012 		return;
2013 	default:
2014 		break;
2015 	}
2016 	xpt_release_ccb(done_ccb);
2017 }
2018 
2019 static int
2020 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
2021 {
2022 	struct da_softc	  *softc;
2023 	struct cam_periph *periph;
2024 	int error;
2025 
2026 	periph = xpt_path_periph(ccb->ccb_h.path);
2027 	softc = (struct da_softc *)periph->softc;
2028 
2029  	/*
2030 	 * Automatically detect devices that do not support
2031  	 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
2032  	 */
2033 	error = 0;
2034 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
2035 		error = cmd6workaround(ccb);
2036 	} else if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
2037 		   CAM_SCSI_STATUS_ERROR)
2038 	 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)
2039 	 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
2040 	 && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0)
2041 	 && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
2042 		int sense_key, error_code, asc, ascq;
2043 
2044  		scsi_extract_sense(&ccb->csio.sense_data,
2045 				   &error_code, &sense_key, &asc, &ascq);
2046 		if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
2047  			error = cmd6workaround(ccb);
2048 	}
2049 	if (error == ERESTART)
2050 		return (ERESTART);
2051 
2052 	/*
2053 	 * XXX
2054 	 * Until we have a better way of doing pack validation,
2055 	 * don't treat UAs as errors.
2056 	 */
2057 	sense_flags |= SF_RETRY_UA;
2058 	return(cam_periph_error(ccb, cam_flags, sense_flags,
2059 				&softc->saved_ccb));
2060 }
2061 
2062 static void
2063 daprevent(struct cam_periph *periph, int action)
2064 {
2065 	struct	da_softc *softc;
2066 	union	ccb *ccb;
2067 	int	error;
2068 
2069 	softc = (struct da_softc *)periph->softc;
2070 
2071 	if (((action == PR_ALLOW)
2072 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
2073 	 || ((action == PR_PREVENT)
2074 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
2075 		return;
2076 	}
2077 
2078 	ccb = cam_periph_getccb(periph, /*priority*/1);
2079 	ccb->ccb_h.ccb_state = DA_CCB_POLLED;
2080 
2081 	scsi_prevent(&ccb->csio,
2082 		     /*retries*/1,
2083 		     /*cbcfp*/dadone,
2084 		     MSG_SIMPLE_Q_TAG,
2085 		     action,
2086 		     SSD_FULL_SIZE,
2087 		     5000);
2088 
2089 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO,
2090 				  SF_RETRY_UA, &softc->device_stats);
2091 
2092 	if (error == 0) {
2093 		if (action == PR_ALLOW)
2094 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
2095 		else
2096 			softc->flags |= DA_FLAG_PACK_LOCKED;
2097 	}
2098 
2099 	xpt_release_ccb(ccb);
2100 }
2101 
2102 /*
2103  * Check media on open, e.g. card reader devices which had no initial media.
2104  */
2105 static int
2106 dacheckmedia(struct cam_periph *periph)
2107 {
2108 	struct disk_params *dp;
2109 	struct da_softc *softc;
2110 	struct disk_info info;
2111 	int error;
2112 	int mute;
2113 
2114 	softc = (struct da_softc *)periph->softc;
2115 	dp = &softc->params;
2116 
2117 	if (softc->flags & DA_FLAG_CAP_MUTE)	/* additional ccb flags */
2118 		mute = CAM_QUIET;
2119 	else
2120 		mute = 0;
2121 
2122 	error = dagetcapacity(periph, mute);
2123 
2124 	/*
2125 	 * Only reprobe on initial open and if the media is removable.
2126 	 *
2127 	 * NOTE: If we setdiskinfo() it will take the device probe
2128 	 *	 a bit of time to probe the slices and partitions,
2129 	 *	 and mess up booting.  So avoid if nothing has changed.
2130 	 *	 XXX
2131 	 */
2132 	if (softc->flags & DA_FLAG_OPEN)
2133 		return (error);
2134 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) == 0)
2135 		return (error);
2136 
2137 	bzero(&info, sizeof(info));
2138 	info.d_type = DTYPE_SCSI;
2139 	info.d_serialno = xpt_path_serialno(periph->path);
2140 
2141 	if (error == 0) {
2142 		CAM_SIM_UNLOCK(periph->sim);
2143 		info.d_media_blksize = softc->params.secsize;
2144 		info.d_media_blocks = softc->params.sectors;
2145 		info.d_media_size = 0;
2146 		info.d_secpertrack = softc->params.secs_per_track;
2147 		info.d_nheads = softc->params.heads;
2148 		info.d_ncylinders = softc->params.cylinders;
2149 		info.d_secpercyl = softc->params.heads *
2150 					softc->params.secs_per_track;
2151 		info.d_serialno = xpt_path_serialno(periph->path);
2152 		if (info.d_media_blocks != softc->disk.d_info.d_media_blocks) {
2153 			kprintf("%s%d: open removable media: "
2154 				"%juMB (%ju %u byte sectors: %dH %dS/T %dC)\n",
2155 				periph->periph_name, periph->unit_number,
2156 				(uintmax_t)(((uintmax_t)dp->secsize *
2157 					     dp->sectors) / (1024*1024)),
2158 				(uintmax_t)dp->sectors, dp->secsize,
2159 				dp->heads, dp->secs_per_track, dp->cylinders);
2160 			disk_setdiskinfo(&softc->disk, &info);
2161 		}
2162 		CAM_SIM_LOCK(periph->sim);
2163 	} else {
2164 		if (!mute || bootverbose) {
2165 			kprintf("%s%d: open removable media: "
2166 				"no media present\n",
2167 				periph->periph_name, periph->unit_number);
2168 		}
2169 		info.d_media_blksize = 512;
2170 		disk_setdiskinfo(&softc->disk, &info);
2171 	}
2172 	return (error);
2173 }
2174 
2175 static int
2176 dagetcapacity(struct cam_periph *periph, int ccbflags)
2177 {
2178 	struct da_softc *softc;
2179 	union ccb *ccb;
2180 	struct scsi_read_capacity_data *rcap;
2181 	struct scsi_read_capacity_data_16 *rcaplong;
2182 	uint32_t block_len;
2183 	uint64_t maxsector;
2184 	int error;
2185 
2186 	softc = (struct da_softc *)periph->softc;
2187 	block_len = 0;
2188 	maxsector = 0;
2189 	error = 0;
2190 
2191 	/* Do a read capacity */
2192 	rcap = (struct scsi_read_capacity_data *)kmalloc(sizeof(*rcaplong),
2193 							 M_SCSIDA, M_INTWAIT);
2194 
2195 	ccb = cam_periph_getccb(periph, /*priority*/1);
2196 	ccb->ccb_h.ccb_state = DA_CCB_POLLED;
2197 
2198 	scsi_read_capacity(&ccb->csio,
2199 			   /*retries*/4,
2200 			   /*cbfncp*/dadone,
2201 			   MSG_SIMPLE_Q_TAG,
2202 			   rcap,
2203 			   SSD_FULL_SIZE,
2204 			   /*timeout*/60000);
2205 	ccb->ccb_h.ccb_bio = NULL;
2206 	ccb->ccb_h.flags |= ccbflags;
2207 
2208 	error = cam_periph_runccb(ccb, daerror,
2209 				  /*cam_flags*/CAM_RETRY_SELTO,
2210 				  /*sense_flags*/SF_RETRY_UA,
2211 				  &softc->device_stats);
2212 
2213 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2214 		cam_release_devq(ccb->ccb_h.path,
2215 				 /*relsim_flags*/0,
2216 				 /*reduction*/0,
2217 				 /*timeout*/0,
2218 				 /*getcount_only*/0);
2219 
2220 	if (error == 0) {
2221 		block_len = scsi_4btoul(rcap->length);
2222 		maxsector = scsi_4btoul(rcap->addr);
2223 
2224 		if (maxsector != 0xffffffff)
2225 			goto done;
2226 	} else
2227 		goto done;
2228 
2229 	rcaplong = (struct scsi_read_capacity_data_16 *)rcap;
2230 
2231 	scsi_read_capacity_16(&ccb->csio,
2232 			      /*retries*/ 4,
2233 			      /*cbfcnp*/ dadone,
2234 			      /*tag_action*/ MSG_SIMPLE_Q_TAG,
2235 			      /*lba*/ 0,
2236 			      /*reladr*/ 0,
2237 			      /*pmi*/ 0,
2238 			      rcaplong,
2239 			      /*sense_len*/ SSD_FULL_SIZE,
2240 			      /*timeout*/ 60000);
2241 	ccb->ccb_h.ccb_bio = NULL;
2242 
2243 	error = cam_periph_runccb(ccb, daerror,
2244 				  /*cam_flags*/CAM_RETRY_SELTO,
2245 				  /*sense_flags*/SF_RETRY_UA,
2246 				  &softc->device_stats);
2247 
2248 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2249 		cam_release_devq(ccb->ccb_h.path,
2250 				 /*relsim_flags*/0,
2251 				 /*reduction*/0,
2252 				 /*timeout*/0,
2253 				 /*getcount_only*/0);
2254 
2255 	if (error == 0) {
2256 		block_len = scsi_4btoul(rcaplong->length);
2257 		maxsector = scsi_8btou64(rcaplong->addr);
2258 	}
2259 
2260 done:
2261 
2262 	if (error == 0)
2263 		dasetgeom(periph, block_len, maxsector);
2264 
2265 	xpt_release_ccb(ccb);
2266 
2267 	kfree(rcap, M_SCSIDA);
2268 
2269 	return (error);
2270 }
2271 
2272 static void
2273 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector)
2274 {
2275 	struct ccb_calc_geometry *ccg;
2276 	struct da_softc *softc;
2277 	struct disk_params *dp;
2278 
2279 	softc = (struct da_softc *)periph->softc;
2280 
2281 	dp = &softc->params;
2282 	dp->secsize = block_len;
2283 	dp->sectors = maxsector + 1;
2284 	/*
2285 	 * Have the controller provide us with a geometry
2286 	 * for this disk.  The only time the geometry
2287 	 * matters is when we boot and the controller
2288 	 * is the only one knowledgeable enough to come
2289 	 * up with something that will make this a bootable
2290 	 * device.
2291 	 */
2292 	ccg = &xpt_alloc_ccb()->ccg;
2293 	xpt_setup_ccb(&ccg->ccb_h, periph->path, /*priority*/1);
2294 	ccg->ccb_h.func_code = XPT_CALC_GEOMETRY;
2295 	ccg->block_size = dp->secsize;
2296 	ccg->volume_size = dp->sectors;
2297 	ccg->heads = 0;
2298 	ccg->secs_per_track = 0;
2299 	ccg->cylinders = 0;
2300 	xpt_action((union ccb*)ccg);
2301 	if ((ccg->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2302 		/*
2303 		 * We don't know what went wrong here- but just pick
2304 		 * a geometry so we don't have nasty things like divide
2305 		 * by zero.
2306 		 */
2307 		dp->heads = 255;
2308 		dp->secs_per_track = 255;
2309 		dp->cylinders = dp->sectors / (255 * 255);
2310 		if (dp->cylinders == 0) {
2311 			dp->cylinders = 1;
2312 		}
2313 	} else {
2314 		dp->heads = ccg->heads;
2315 		dp->secs_per_track = ccg->secs_per_track;
2316 		dp->cylinders = ccg->cylinders;
2317 	}
2318 	xpt_free_ccb(&ccg->ccb_h);
2319 }
2320 
2321 /*
2322  * Step through all DA peripheral drivers, and if the device is still open,
2323  * sync the disk cache to physical media.
2324  */
2325 static void
2326 dashutdown(void * arg, int howto)
2327 {
2328 	struct cam_periph *periph;
2329 	struct da_softc *softc;
2330 
2331 	TAILQ_FOREACH(periph, &dadriver.units, unit_links) {
2332 		union ccb *ccb;
2333 
2334 		cam_periph_lock(periph);
2335 		softc = (struct da_softc *)periph->softc;
2336 
2337 		/*
2338 		 * We only sync the cache if the drive is still open, and
2339 		 * if the drive is capable of it..
2340 		 */
2341 		if (((softc->flags & DA_FLAG_OPEN) == 0)
2342 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
2343 			cam_periph_unlock(periph);
2344 			continue;
2345 		}
2346 
2347 		ccb = xpt_alloc_ccb();
2348 		xpt_setup_ccb(&ccb->ccb_h, periph->path, /*priority*/1);
2349 
2350 		ccb->ccb_h.ccb_state = DA_CCB_DUMP;
2351 		scsi_synchronize_cache(&ccb->csio,
2352 				       /*retries*/1,
2353 				       /*cbfcnp*/dadone,
2354 				       MSG_SIMPLE_Q_TAG,
2355 				       /*begin_lba*/0, /* whole disk */
2356 				       /*lb_count*/0,
2357 				       SSD_FULL_SIZE,
2358 				       60 * 60 * 1000);
2359 
2360 		xpt_polled_action(ccb);
2361 
2362 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2363 			if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
2364 			     CAM_SCSI_STATUS_ERROR)
2365 			 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)){
2366 				int error_code, sense_key, asc, ascq;
2367 
2368 				scsi_extract_sense(&ccb->csio.sense_data,
2369 						   &error_code, &sense_key,
2370 						   &asc, &ascq);
2371 
2372 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
2373 					scsi_sense_print(&ccb->csio);
2374 			} else {
2375 				xpt_print(periph->path, "Synchronize "
2376 				    "cache failed, status == 0x%x, scsi status "
2377 				    "== 0x%x\n", ccb->ccb_h.status,
2378 				    ccb->csio.scsi_status);
2379 			}
2380 		}
2381 
2382 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2383 			cam_release_devq(ccb->ccb_h.path,
2384 					 /*relsim_flags*/0,
2385 					 /*reduction*/0,
2386 					 /*timeout*/0,
2387 					 /*getcount_only*/0);
2388 
2389 		cam_periph_unlock(periph);
2390 		xpt_free_ccb(&ccb->ccb_h);
2391 	}
2392 }
2393 
2394 #else /* !_KERNEL */
2395 
2396 /*
2397  * XXX This is only left out of the kernel build to silence warnings.  If,
2398  * for some reason this function is used in the kernel, the ifdefs should
2399  * be moved so it is included both in the kernel and userland.
2400  */
2401 void
2402 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
2403 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
2404 		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
2405 		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
2406 		 u_int32_t timeout)
2407 {
2408 	struct scsi_format_unit *scsi_cmd;
2409 
2410 	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
2411 	scsi_cmd->opcode = FORMAT_UNIT;
2412 	scsi_cmd->byte2 = byte2;
2413 	scsi_ulto2b(ileave, scsi_cmd->interleave);
2414 
2415 	cam_fill_csio(csio,
2416 		      retries,
2417 		      cbfcnp,
2418 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
2419 		      tag_action,
2420 		      data_ptr,
2421 		      dxfer_len,
2422 		      sense_len,
2423 		      sizeof(*scsi_cmd),
2424 		      timeout);
2425 }
2426 
2427 #endif /* _KERNEL */
2428