xref: /dragonfly/sys/bus/cam/scsi/scsi_da.c (revision 193f58b8)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/cam/scsi/scsi_da.c,v 1.42.2.46 2003/10/21 22:18:19 thomas Exp $
29  */
30 
31 #include <sys/param.h>
32 
33 #ifdef _KERNEL
34 
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/buf.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/conf.h>
42 #include <sys/devicestat.h>
43 #include <sys/disk.h>
44 #include <sys/dtype.h>
45 #include <sys/eventhandler.h>
46 #include <sys/malloc.h>
47 #include <sys/cons.h>
48 #include <sys/proc.h>
49 
50 #include <sys/buf2.h>
51 
52 #endif /* _KERNEL */
53 
54 #ifdef _KERNEL
55 #include <vm/pmap.h>
56 #endif
57 
58 #ifndef _KERNEL
59 #include <stdio.h>
60 #include <string.h>
61 #endif /* _KERNEL */
62 
63 #include <sys/camlib.h>
64 #include "../cam.h"
65 #include "../cam_ccb.h"
66 #include "../cam_extend.h"
67 #include "../cam_periph.h"
68 #include "../cam_xpt_periph.h"
69 #include "../cam_sim.h"
70 
71 #include "scsi_daio.h"
72 #include "scsi_message.h"
73 
74 #ifndef _KERNEL
75 #include "scsi_da.h"
76 #endif /* !_KERNEL */
77 
78 #ifdef _KERNEL
79 typedef enum {
80 	DA_STATE_PROBE,
81 	DA_STATE_PROBE2,
82 	DA_STATE_NORMAL
83 } da_state;
84 
85 typedef enum {
86 	DA_FLAG_PACK_INVALID	= 0x001,
87 	DA_FLAG_NEW_PACK	= 0x002,
88 	DA_FLAG_PACK_LOCKED	= 0x004,
89 	DA_FLAG_PACK_REMOVABLE	= 0x008,
90 	DA_FLAG_TAGGED_QUEUING	= 0x010,
91 	DA_FLAG_RETRY_UA	= 0x080,
92 	DA_FLAG_OPEN		= 0x100,
93 	DA_FLAG_SCTX_INIT	= 0x200,
94 	DA_FLAG_RD_LIMIT	= 0x400,
95 	DA_FLAG_WR_LIMIT	= 0x800,
96 	DA_FLAG_CAN_TRIM	= 0x1000,
97 	DA_FLAG_CAP_MUTE	= 0x2000
98 } da_flags;
99 
100 typedef enum {
101 	DA_Q_NONE		= 0x00,
102 	DA_Q_NO_SYNC_CACHE	= 0x01,
103 	DA_Q_NO_6_BYTE		= 0x02,
104 	DA_Q_NO_PREVENT		= 0x04
105 } da_quirks;
106 
107 typedef enum {
108 	DA_CCB_POLLED		= 0x00,
109 	DA_CCB_PROBE		= 0x01,
110 	DA_CCB_PROBE2		= 0x02,
111 	DA_CCB_BUFFER_IO	= 0x03,
112 	DA_CCB_WAITING		= 0x04,
113 	DA_CCB_DUMP		= 0x05,
114 	DA_CCB_TRIM		= 0x06,
115 	DA_CCB_TYPE_MASK	= 0x0F,
116 	DA_CCB_RETRY_UA		= 0x10
117 } da_ccb_state;
118 
119 /* Offsets into our private area for storing information */
120 #define ccb_state	ppriv_field0
121 #define ccb_bio		ppriv_ptr1
122 
123 struct disk_params {
124 	u_int8_t  heads;
125 	u_int32_t cylinders;
126 	u_int8_t  secs_per_track;
127 	u_int32_t secsize;	/* Number of bytes/sector */
128 	u_int64_t sectors;	/* total number sectors */
129 };
130 
131 #define TRIM_MAX_BLOCKS 8
132 #define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64
133 struct trim_request {
134         uint8_t         data[TRIM_MAX_RANGES * 8];
135         struct bio      *bios[TRIM_MAX_RANGES];
136 };
137 
138 struct da_softc {
139 	struct	 bio_queue_head bio_queue_rd;
140 	struct	 bio_queue_head bio_queue_wr;
141 	struct	 bio_queue_head bio_queue_trim;
142 	struct	 devstat device_stats;
143 	SLIST_ENTRY(da_softc) links;
144 	LIST_HEAD(, ccb_hdr) pending_ccbs;
145 	da_state state;
146 	da_flags flags;
147 	da_quirks quirks;
148 	int	 minimum_cmd_size;
149 	int	 outstanding_cmds_rd;
150 	int	 outstanding_cmds_wr;
151 	int      trim_max_ranges;
152 	int      trim_running;
153 	int      trim_enabled;
154 	struct	 disk_params params;
155 	struct	 disk disk;
156 	union	 ccb saved_ccb;
157 	struct task		sysctl_task;
158 	struct sysctl_ctx_list	sysctl_ctx;
159 	struct sysctl_oid	*sysctl_tree;
160 	struct trim_request     trim_req;
161 };
162 
163 struct da_quirk_entry {
164 	struct scsi_inquiry_pattern inq_pat;
165 	da_quirks quirks;
166 };
167 
168 static const char quantum[] = "QUANTUM";
169 static const char microp[] = "MICROP";
170 
171 static struct da_quirk_entry da_quirk_table[] =
172 {
173 	/* SPI, FC devices */
174 	{
175 		/*
176 		 * Fujitsu M2513A MO drives.
177 		 * Tested devices: M2513A2 firmware versions 1200 & 1300.
178 		 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
179 		 * Reported by: W.Scholten <whs@xs4all.nl>
180 		 */
181 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
182 		/*quirks*/ DA_Q_NO_SYNC_CACHE
183 	},
184 	{
185 		/* See above. */
186 		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
187 		/*quirks*/ DA_Q_NO_SYNC_CACHE
188 	},
189 	{
190 		/*
191 		 * This particular Fujitsu drive doesn't like the
192 		 * synchronize cache command.
193 		 * Reported by: Tom Jackson <toj@gorilla.net>
194 		 */
195 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
196 		/*quirks*/ DA_Q_NO_SYNC_CACHE
197 	},
198 	{
199 		/*
200 		 * This drive doesn't like the synchronize cache command
201 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
202 		 * in NetBSD PR kern/6027, August 24, 1998.
203 		 */
204 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
205 		/*quirks*/ DA_Q_NO_SYNC_CACHE
206 	},
207 	{
208 		/*
209 		 * This drive doesn't like the synchronize cache command
210 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
211 		 * (PR 8882).
212 		 */
213 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
214 		/*quirks*/ DA_Q_NO_SYNC_CACHE
215 	},
216 	{
217 		/*
218 		 * Doesn't like the synchronize cache command.
219 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
220 		 */
221 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
222 		/*quirks*/ DA_Q_NO_SYNC_CACHE
223 	},
224 	{
225 		/*
226 		 * Doesn't like the synchronize cache command.
227 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
228 		 */
229 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
230 		/*quirks*/ DA_Q_NO_SYNC_CACHE
231 	},
232 	{
233 		/*
234 		 * Doesn't like the synchronize cache command.
235 		 */
236 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
237 		/*quirks*/ DA_Q_NO_SYNC_CACHE
238 	},
239 	{
240 		/*
241 		 * Doesn't like the synchronize cache command.
242 		 * Reported by: walter@pelissero.de
243 		 */
244 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
245 		/*quirks*/ DA_Q_NO_SYNC_CACHE
246 	},
247 	{
248 		/*
249 		 * Doesn't work correctly with 6 byte reads/writes.
250 		 * Returns illegal request, and points to byte 9 of the
251 		 * 6-byte CDB.
252 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
253 		 */
254 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
255 		/*quirks*/ DA_Q_NO_6_BYTE
256 	},
257 	{
258 		/* See above. */
259 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
260 		/*quirks*/ DA_Q_NO_6_BYTE
261 	},
262 	{
263 		/*
264 		 * Doesn't like the synchronize cache command.
265 		 * Reported by: walter@pelissero.de
266 		 */
267 		{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
268 		/*quirks*/ DA_Q_NO_SYNC_CACHE
269 	},
270 	{
271 		/*
272 		 * The CISS RAID controllers do not support SYNC_CACHE
273 		 */
274 		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
275 		/*quirks*/ DA_Q_NO_SYNC_CACHE
276 	},
277 	{
278 		/*
279 		 * The same goes for the mly(4) controllers
280 		 */
281 		{T_DIRECT, SIP_MEDIA_FIXED, "MLY*", "*", "MYLX"},
282 		/*quirks*/ DA_Q_NO_SYNC_CACHE
283 	},
284 	/*
285 	 * USB mass storage devices supported by umass(4)
286 	 *
287 	 * NOTE: USB attachments automatically set DA_Q_NO_SYNC_CACHE so
288 	 *	 it does not have to be specified here.
289 	 */
290  	{
291  		/*
292  		 * Creative Nomad MUVO mp3 player (USB)
293  		 * PR: kern/53094
294  		 */
295  		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
296 		/*quirks*/ DA_Q_NO_PREVENT
297  	},
298 	{
299 		/*
300 		 * Sigmatel USB Flash MP3 Player
301 		 * PR: kern/57046
302 		 */
303 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
304 		/*quirks*/ DA_Q_NO_PREVENT
305 	},
306 	{
307 		/*
308 		 * SEAGRAND NP-900 MP3 Player
309 		 * PR: kern/64563
310 		 */
311 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
312 		/*quirks*/ DA_Q_NO_PREVENT
313 	},
314 	{
315 		/*
316 		 * Creative MUVO Slim mp3 player (USB)
317 		 * PR: usb/86131
318 		 */
319 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
320 		"*"}, /*quirks*/ DA_Q_NO_PREVENT
321 	},
322 	{
323 		/*
324 		 * Philips USB Key Audio KEY013
325 		 * PR: usb/68412
326 		 */
327 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
328 		/*quirks*/ DA_Q_NO_PREVENT
329 	},
330 };
331 
332 static	d_open_t	daopen;
333 static	d_close_t	daclose;
334 static	d_strategy_t	dastrategy;
335 static	d_dump_t	dadump;
336 static	d_ioctl_t	daioctl;
337 static	periph_init_t	dainit;
338 static	void		daasync(void *callback_arg, u_int32_t code,
339 				struct cam_path *path, void *arg);
340 static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
341 static	periph_ctor_t	daregister;
342 static	periph_dtor_t	dacleanup;
343 static	periph_start_t	dastart;
344 static	periph_oninv_t	daoninvalidate;
345 static	void		dadone(struct cam_periph *periph,
346 			       union ccb *done_ccb);
347 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
348 				u_int32_t sense_flags);
349 static void		daprevent(struct cam_periph *periph, int action);
350 static int		dagetcapacity(struct cam_periph *periph, int ccbflags);
351 static int		dacheckmedia(struct cam_periph *periph);
352 static void		dasetgeom(struct cam_periph *periph, uint32_t block_len,
353 				  uint64_t maxsector);
354 static void		daflushbioq(struct bio_queue_head *bioq, int error);
355 static void		dashutdown(void *arg, int howto);
356 
357 #ifndef DA_DEFAULT_TIMEOUT
358 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
359 #endif
360 
361 #ifndef	DA_DEFAULT_RETRY
362 #define	DA_DEFAULT_RETRY	4
363 #endif
364 
365 static int da_retry_count = DA_DEFAULT_RETRY;
366 static int da_default_timeout = DA_DEFAULT_TIMEOUT;
367 
368 SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
369             "CAM Direct Access Disk driver");
370 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
371            &da_retry_count, 0, "Normal I/O retry count");
372 TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
373 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
374            &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
375 TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
376 
377 static struct periph_driver dadriver =
378 {
379 	dainit, "da",
380 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
381 };
382 
383 PERIPHDRIVER_DECLARE(da, dadriver);
384 
385 static struct dev_ops da_ops = {
386 	{ "da", 0, D_DISK | D_MPSAFE },
387 	.d_open =	daopen,
388 	.d_close =	daclose,
389 	.d_read =	physread,
390 	.d_write =	physwrite,
391 	.d_strategy =	dastrategy,
392 	.d_dump =	dadump,
393 	.d_ioctl =	daioctl
394 };
395 
396 static struct extend_array *daperiphs;
397 
398 MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
399 
400 static int
401 daioctl(struct dev_ioctl_args *ap)
402 {
403 	int unit;
404 	int error = 0;
405 	struct buf *bp;
406 	struct cam_periph *periph;
407 	int byte_count;
408 
409 	off_t *del_num = (off_t*)ap->a_data;
410 	off_t bytes_left;
411 	off_t bytes_start;
412 
413 	cdev_t dev = ap->a_head.a_dev;
414 
415 
416 	unit = dkunit(dev);
417 	periph = cam_extend_get(daperiphs, unit);
418 	if (periph == NULL)
419 		return(ENXIO);
420 
421 	switch (ap->a_cmd) {
422 	case DAIOCTRIM:
423 	{
424 
425 		bytes_left = del_num[1];
426 		bytes_start = del_num[0];
427 
428 		/* TRIM occurs on 512-byte sectors. */
429 		KKASSERT((bytes_left % 512) == 0);
430 		KKASSERT((bytes_start% 512) == 0);
431 
432 
433 		/* Break TRIM up into int-sized commands because of b_bcount */
434 		while(bytes_left) {
435 
436 			/*
437 			 * Rather than than squezing out more blocks in b_bcount
438 			 * and having to break up the TRIM request in da_start(),
439 			 * we ensure we can always TRIM this many bytes with one
440 			 * TRIM command (this happens if the device only
441 			 * supports one TRIM block).
442 			 *
443 			 * With min TRIM blksize of 1, TRIM command free
444 			 * 4194240 blks(64*65535): each LBA range can address
445 			 * 65535 blks and there 64 such ranges in a 512-byte
446 			 * block. And, 4194240 * 512 = 0x7FFF8000
447 			 *
448 			 */
449 			byte_count = MIN(bytes_left,0x7FFF8000);
450 			bp = getnewbuf(0, 0, 0, 1);
451 
452 			bp->b_cmd = BUF_CMD_FREEBLKS;
453 			bp->b_bio1.bio_offset = bytes_start;
454 			bp->b_bcount = byte_count;
455 			bp->b_bio1.bio_flags |= BIO_SYNC;
456 			bp->b_bio1.bio_done = biodone_sync;
457 
458 			dev_dstrategy(ap->a_head.a_dev, &bp->b_bio1);
459 
460 			if (biowait(&bp->b_bio1, "TRIM")) {
461 				kprintf("Error:%d\n", bp->b_error);
462 				brelse(bp);
463 				return(bp->b_error ? bp->b_error : EIO);
464 			}
465 			brelse(bp);
466 			bytes_left -= byte_count;
467 			bytes_start += byte_count;
468 		}
469 		break;
470 	}
471 	default:
472 		return(EINVAL);
473 	}
474 
475 	return(error);
476 }
477 
478 static int
479 daopen(struct dev_open_args *ap)
480 {
481 	cdev_t dev = ap->a_head.a_dev;
482 	struct cam_periph *periph;
483 	struct da_softc *softc;
484 	struct disk_info info;
485 	int unit;
486 	int error;
487 
488 	unit = dkunit(dev);
489 	periph = cam_extend_get(daperiphs, unit);
490 	if (periph == NULL) {
491 		return (ENXIO);
492 	}
493 
494 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
495 		return(ENXIO);
496 	}
497 
498 	cam_periph_lock(periph);
499 	if ((error = cam_periph_hold(periph, PCATCH)) != 0) {
500 		cam_periph_unlock(periph);
501 		cam_periph_release(periph);
502 		return (error);
503 	}
504 
505 	unit = periph->unit_number;
506 	softc = (struct da_softc *)periph->softc;
507 
508 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
509 	    ("daopen: dev=%s (unit %d)\n", devtoname(dev),
510 	     unit));
511 
512 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
513 		/* Invalidate our pack information. */
514 		disk_invalidate(&softc->disk);
515 		softc->flags &= ~DA_FLAG_PACK_INVALID;
516 	}
517 
518 	error = dacheckmedia(periph);
519 	softc->flags |= DA_FLAG_OPEN;
520 
521 	if (error == 0) {
522 		struct ccb_getdev *cgd;
523 
524 		/* Build disk information structure */
525 		bzero(&info, sizeof(info));
526 		info.d_type = DTYPE_SCSI;
527 
528 		/*
529 		 * Grab the inquiry data to get the vendor and product names.
530 		 * Put them in the typename and packname for the label.
531 		 */
532 		cgd = &xpt_alloc_ccb()->cgd;
533 		xpt_setup_ccb(&cgd->ccb_h, periph->path, /*priority*/ 1);
534 		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
535 		xpt_action((union ccb *)cgd);
536 		xpt_free_ccb(&cgd->ccb_h);
537 
538 		/*
539 		 * Check to see whether or not the blocksize is set yet.
540 		 * If it isn't, set it and then clear the blocksize
541 		 * unavailable flag for the device statistics.
542 		 */
543 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
544 			softc->device_stats.block_size = softc->params.secsize;
545 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
546 		}
547 	}
548 
549 	if (error == 0) {
550 		softc->flags &= ~DA_FLAG_CAP_MUTE;
551 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
552 		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
553 			daprevent(periph, PR_PREVENT);
554 	} else {
555 		softc->flags |= DA_FLAG_CAP_MUTE;
556 		softc->flags &= ~DA_FLAG_OPEN;
557 		cam_periph_release(periph);
558 	}
559 	cam_periph_unhold(periph, 1);
560 	return (error);
561 }
562 
563 static int
564 daclose(struct dev_close_args *ap)
565 {
566 	cdev_t dev = ap->a_head.a_dev;
567 	struct	cam_periph *periph;
568 	struct	da_softc *softc;
569 	int	unit;
570 	int	error;
571 
572 	unit = dkunit(dev);
573 	periph = cam_extend_get(daperiphs, unit);
574 	if (periph == NULL)
575 		return (ENXIO);
576 
577 	cam_periph_lock(periph);
578 	if ((error = cam_periph_hold(periph, 0)) != 0) {
579 		cam_periph_unlock(periph);
580 		cam_periph_release(periph);
581 		return (error);
582 	}
583 
584 	softc = (struct da_softc *)periph->softc;
585 
586 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
587 		union	ccb *ccb;
588 
589 		ccb = cam_periph_getccb(periph, /*priority*/1);
590 		ccb->ccb_h.ccb_state = DA_CCB_POLLED;
591 
592 		scsi_synchronize_cache(&ccb->csio,
593 				       /*retries*/1,
594 				       /*cbfcnp*/dadone,
595 				       MSG_SIMPLE_Q_TAG,
596 				       /*begin_lba*/0,/* Cover the whole disk */
597 				       /*lb_count*/0,
598 				       SSD_FULL_SIZE,
599 				       5 * 60 * 1000);
600 
601 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
602 				  /*sense_flags*/SF_RETRY_UA,
603 				  &softc->device_stats);
604 
605 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
606 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
607 			     CAM_SCSI_STATUS_ERROR) {
608 				int asc, ascq;
609 				int sense_key, error_code;
610 
611 				scsi_extract_sense(&ccb->csio.sense_data,
612 						   &error_code,
613 						   &sense_key,
614 						   &asc, &ascq);
615 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
616 					scsi_sense_print(&ccb->csio);
617 			} else {
618 				xpt_print(periph->path, "Synchronize cache "
619 				    "failed, status == 0x%x, scsi status == "
620 				    "0x%x\n", ccb->csio.ccb_h.status,
621 				    ccb->csio.scsi_status);
622 			}
623 		}
624 
625 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
626 			cam_release_devq(ccb->ccb_h.path,
627 					 /*relsim_flags*/0,
628 					 /*reduction*/0,
629 					 /*timeout*/0,
630 					 /*getcount_only*/0);
631 
632 		xpt_release_ccb(ccb);
633 
634 	}
635 
636 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
637 		if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
638 			daprevent(periph, PR_ALLOW);
639 		/*
640 		 * If we've got removeable media, mark the blocksize as
641 		 * unavailable, since it could change when new media is
642 		 * inserted.
643 		 */
644 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
645 	}
646 
647 	/*
648 	 * Don't compound any ref counting software bugs with more.
649 	 */
650 	if (softc->flags & DA_FLAG_OPEN) {
651 		softc->flags &= ~DA_FLAG_OPEN;
652 		cam_periph_release(periph);
653 	} else {
654 		xpt_print(periph->path,
655 			  "daclose() called on an already closed device!\n");
656 	}
657 	cam_periph_unhold(periph, 1);
658 	return (0);
659 }
660 
661 /*
662  * Actually translate the requested transfer into one the physical driver
663  * can understand.  The transfer is described by a buf and will include
664  * only one physical transfer.
665  */
666 static int
667 dastrategy(struct dev_strategy_args *ap)
668 {
669 	cdev_t dev = ap->a_head.a_dev;
670 	struct bio *bio = ap->a_bio;
671 	struct buf *bp = bio->bio_buf;
672 	struct cam_periph *periph;
673 	struct da_softc *softc;
674 	u_int  unit;
675 
676 	unit = dkunit(dev);
677 	periph = cam_extend_get(daperiphs, unit);
678 	if (periph == NULL) {
679 		bp->b_error = ENXIO;
680 		goto bad;
681 	}
682 	softc = (struct da_softc *)periph->softc;
683 
684 	cam_periph_lock(periph);
685 
686 #if 0
687 	/*
688 	 * check it's not too big a transfer for our adapter
689 	 */
690 	scsi_minphys(bp, &sd_switch);
691 #endif
692 
693 	/*
694 	 * Mask interrupts so that the pack cannot be invalidated until
695 	 * after we are in the queue.  Otherwise, we might not properly
696 	 * clean up one of the buffers.
697 	 */
698 
699 	/*
700 	 * If the device has been made invalid, error out
701 	 */
702 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
703 		cam_periph_unlock(periph);
704 		bp->b_error = ENXIO;
705 		goto bad;
706 	}
707 
708 	/*
709 	 * Place it in the queue of disk activities for this disk
710 	 */
711 	if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH)
712 		bioqdisksort(&softc->bio_queue_wr, bio);
713 	else if (bp->b_cmd == BUF_CMD_FREEBLKS)
714 		bioqdisksort(&softc->bio_queue_trim, bio);
715 	else
716 		bioqdisksort(&softc->bio_queue_rd, bio);
717 
718 	/*
719 	 * Schedule ourselves for performing the work.
720 	 */
721 	xpt_schedule(periph, /* XXX priority */1);
722 	cam_periph_unlock(periph);
723 
724 	return(0);
725 bad:
726 	bp->b_flags |= B_ERROR;
727 
728 	/*
729 	 * Correctly set the buf to indicate a completed xfer
730 	 */
731 	bp->b_resid = bp->b_bcount;
732 	biodone(bio);
733 	return(0);
734 }
735 
736 static int
737 dadump(struct dev_dump_args *ap)
738 {
739 	cdev_t dev = ap->a_head.a_dev;
740 	struct	    cam_periph *periph;
741 	struct	    da_softc *softc;
742 	u_int	    unit;
743 	u_int32_t   secsize;
744 	struct	    ccb_scsiio *csio;
745 
746 	unit = dkunit(dev);
747 	periph = cam_extend_get(daperiphs, unit);
748 	if (periph == NULL)
749 		return (ENXIO);
750 
751 	softc = (struct da_softc *)periph->softc;
752 	cam_periph_lock(periph);
753 	secsize = softc->params.secsize; /* XXX: or ap->a_secsize? */
754 
755 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
756 		cam_periph_unlock(periph);
757 		return (ENXIO);
758 	}
759 
760 	csio = &xpt_alloc_ccb()->csio;
761 
762 	/*
763 	 * because length == 0 means we are supposed to flush cache, we only
764 	 * try to write something if length > 0.
765 	 */
766 	if (ap->a_length > 0) {
767 		xpt_setup_ccb(&csio->ccb_h, periph->path, /*priority*/1);
768 		csio->ccb_h.flags |= CAM_POLLED;
769 		csio->ccb_h.ccb_state = DA_CCB_DUMP;
770 		scsi_read_write(csio,
771 				/*retries*/1,
772 				dadone,
773 				MSG_ORDERED_Q_TAG,
774 				/*read*/FALSE,
775 				/*byte2*/0,
776 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
777 				ap->a_offset / secsize,
778 				ap->a_length / secsize,
779 				/*data_ptr*/(u_int8_t *) ap->a_virtual,
780 				/*dxfer_len*/ap->a_length,
781 				/*sense_len*/SSD_FULL_SIZE,
782 				DA_DEFAULT_TIMEOUT * 1000);
783 		xpt_polled_action((union ccb *)csio);
784 
785 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
786 			kprintf("Aborting dump due to I/O error.\n");
787 			if ((csio->ccb_h.status & CAM_STATUS_MASK) ==
788 			     CAM_SCSI_STATUS_ERROR)
789 				scsi_sense_print(csio);
790 			else
791 				kprintf("status == 0x%x, scsi status == 0x%x\n",
792 				       csio->ccb_h.status, csio->scsi_status);
793 			cam_periph_unlock(periph);
794 			xpt_free_ccb(&csio->ccb_h);
795 			return(EIO);
796 		}
797 		goto done;
798 	}
799 
800 	/*
801 	 * Sync the disk cache contents to the physical media.
802 	 */
803 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
804 
805 		xpt_setup_ccb(&csio->ccb_h, periph->path, /*priority*/1);
806 		csio->ccb_h.ccb_state = DA_CCB_DUMP;
807 		scsi_synchronize_cache(csio,
808 				       /*retries*/1,
809 				       /*cbfcnp*/dadone,
810 				       MSG_SIMPLE_Q_TAG,
811 				       /*begin_lba*/0,/* Cover the whole disk */
812 				       /*lb_count*/0,
813 				       SSD_FULL_SIZE,
814 				       5 * 60 * 1000);
815 		xpt_polled_action((union ccb *)csio);
816 
817 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
818 			if ((csio->ccb_h.status & CAM_STATUS_MASK) ==
819 			     CAM_SCSI_STATUS_ERROR) {
820 				int asc, ascq;
821 				int sense_key, error_code;
822 
823 				scsi_extract_sense(&csio->sense_data,
824 						   &error_code,
825 						   &sense_key,
826 						   &asc, &ascq);
827 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
828 					scsi_sense_print(csio);
829 			} else {
830 				xpt_print(periph->path, "Synchronize cache "
831 				    "failed, status == 0x%x, scsi status == "
832 				    "0x%x\n",
833 				    csio->ccb_h.status, csio->scsi_status);
834 			}
835 		}
836 	}
837 done:
838 	cam_periph_unlock(periph);
839 	xpt_free_ccb(&csio->ccb_h);
840 
841 	return (0);
842 }
843 
844 static void
845 dainit(void)
846 {
847 	cam_status status;
848 
849 	/*
850 	 * Create our extend array for storing the devices we attach to.
851 	 */
852 	daperiphs = cam_extend_new();
853 	if (daperiphs == NULL) {
854 		kprintf("da: Failed to alloc extend array!\n");
855 		return;
856 	}
857 
858 	/*
859 	 * Install a global async callback.  This callback will
860 	 * receive async callbacks like "new device found".
861 	 */
862 	status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
863 
864 	if (status != CAM_REQ_CMP) {
865 		kprintf("da: Failed to attach master async callback "
866 		       "due to status 0x%x!\n", status);
867 	} else {
868 		/* Register our shutdown event handler */
869 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
870 					   NULL, SHUTDOWN_PRI_SECOND)) == NULL)
871 			kprintf("%s: shutdown event registration failed!\n",
872 			    __func__);
873 	}
874 }
875 
876 static void
877 daoninvalidate(struct cam_periph *periph)
878 {
879 	struct da_softc *softc;
880 
881 	softc = (struct da_softc *)periph->softc;
882 
883 	/*
884 	 * De-register any async callbacks.
885 	 */
886 	xpt_register_async(0, daasync, periph, periph->path);
887 
888 	softc->flags |= DA_FLAG_PACK_INVALID;
889 
890 	/*
891 	 * Return all queued I/O with ENXIO.
892 	 * XXX Handle any transactions queued to the card
893 	 *     with XPT_ABORT_CCB.
894 	 */
895 	daflushbioq(&softc->bio_queue_trim, ENXIO);
896 	daflushbioq(&softc->bio_queue_wr, ENXIO);
897 	daflushbioq(&softc->bio_queue_rd, ENXIO);
898 	xpt_print(periph->path, "lost device\n");
899 }
900 
901 static void
902 daflushbioq(struct bio_queue_head *bioq, int error)
903 {
904 	struct bio *q_bio;
905 	struct buf *q_bp;
906 
907 	while ((q_bio = bioq_first(bioq)) != NULL){
908 		bioq_remove(bioq, q_bio);
909 		q_bp = q_bio->bio_buf;
910 		q_bp->b_resid = q_bp->b_bcount;
911 		q_bp->b_error = error;
912 		q_bp->b_flags |= B_ERROR;
913 		biodone(q_bio);
914 	}
915 }
916 
917 static void
918 dacleanup(struct cam_periph *periph)
919 {
920 	struct da_softc *softc;
921 
922 	softc = (struct da_softc *)periph->softc;
923 
924 	devstat_remove_entry(&softc->device_stats);
925 	cam_extend_release(daperiphs, periph->unit_number);
926 	xpt_print(periph->path, "removing device entry\n");
927 	/*
928 	 * If we can't free the sysctl tree, oh well...
929 	 */
930 	if ((softc->flags & DA_FLAG_SCTX_INIT) != 0
931 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
932 		xpt_print(periph->path, "can't remove sysctl context\n");
933 	}
934 	periph->softc = NULL;
935 	if (softc->disk.d_rawdev) {
936 		cam_periph_unlock(periph);
937 		disk_destroy(&softc->disk);
938 		cam_periph_lock(periph);
939 	}
940 
941 	kfree(softc, M_DEVBUF);
942 }
943 
944 static void
945 daasync(void *callback_arg, u_int32_t code,
946 	struct cam_path *path, void *arg)
947 {
948 	struct cam_periph *periph;
949 
950 	periph = (struct cam_periph *)callback_arg;
951 
952 	switch (code) {
953 	case AC_FOUND_DEVICE:
954 	{
955 		struct ccb_getdev *cgd;
956 		cam_status status;
957 
958 		cgd = (struct ccb_getdev *)arg;
959 		if (cgd == NULL)
960 			break;
961 
962 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
963 		    && SID_TYPE(&cgd->inq_data) != T_RBC
964 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
965 			break;
966 
967 		/*
968 		 * Don't complain if a valid peripheral is already attached.
969 		 */
970 		periph = cam_periph_find(cgd->ccb_h.path, "da");
971 		if (periph && (periph->flags & CAM_PERIPH_INVALID) == 0)
972 			break;
973 
974 		/*
975 		 * Allocate a peripheral instance for
976 		 * this device and start the probe
977 		 * process.
978 		 */
979 		status = cam_periph_alloc(daregister, daoninvalidate,
980 					  dacleanup, dastart,
981 					  "da", CAM_PERIPH_BIO,
982 					  cgd->ccb_h.path, daasync,
983 					  AC_FOUND_DEVICE, cgd);
984 
985 		if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
986 			kprintf("%s: Unable to attach to new device "
987 			    "due to status 0x%x\n", __func__, status);
988 		}
989 		break;
990 	}
991 	case AC_SENT_BDR:
992 	case AC_BUS_RESET:
993 	{
994 		struct da_softc *softc;
995 		struct ccb_hdr *ccbh;
996 
997 		softc = (struct da_softc *)periph->softc;
998 		/*
999 		 * Don't fail on the expected unit attention
1000 		 * that will occur.
1001 		 */
1002 		softc->flags |= DA_FLAG_RETRY_UA;
1003 		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
1004 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
1005 		/* FALLTHROUGH*/
1006 	}
1007 	default:
1008 		cam_periph_async(periph, code, path, arg);
1009 		break;
1010 	}
1011 }
1012 
1013 static void
1014 dasysctlinit(void *context, int pending)
1015 {
1016 	struct cam_periph *periph;
1017 	struct da_softc *softc;
1018 	char tmpstr[80], tmpstr2[80];
1019 
1020 	periph = (struct cam_periph *)context;
1021 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1022 		return;
1023 	}
1024 
1025 	softc = (struct da_softc *)periph->softc;
1026 	ksnprintf(tmpstr, sizeof(tmpstr),
1027 		  "CAM DA unit %d", periph->unit_number);
1028 	ksnprintf(tmpstr2, sizeof(tmpstr2),
1029 		  "%d", periph->unit_number);
1030 
1031 	sysctl_ctx_free(&softc->sysctl_ctx);
1032 	sysctl_ctx_init(&softc->sysctl_ctx);
1033 	softc->flags |= DA_FLAG_SCTX_INIT;
1034 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1035 		SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1036 		CTLFLAG_RD, 0, tmpstr);
1037 	if (softc->sysctl_tree == NULL) {
1038 		kprintf("%s: unable to allocate sysctl tree\n", __func__);
1039 		cam_periph_release(periph);
1040 		return;
1041 	}
1042 
1043 	/*
1044 	 * Now register the sysctl handler, so the user can the value on
1045 	 * the fly.
1046 	 */
1047 	SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1048 		OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1049 		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1050 		"Minimum CDB size");
1051 
1052 	/* Only create the option if the device supports TRIM */
1053 	if (softc->disk.d_info.d_trimflag) {
1054 		SYSCTL_ADD_INT(&softc->sysctl_ctx,
1055 		    SYSCTL_CHILDREN(softc->sysctl_tree),
1056 		    OID_AUTO,
1057 		    "trim_enabled",
1058 		    CTLFLAG_RW,
1059 		    &softc->trim_enabled,
1060 		    0,
1061 		    "Enable TRIM for this device (SSD))");
1062 	}
1063 
1064 	cam_periph_release(periph);
1065 }
1066 
1067 static int
1068 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
1069 {
1070 	int error, value;
1071 
1072 	value = *(int *)arg1;
1073 
1074 	error = sysctl_handle_int(oidp, &value, 0, req);
1075 
1076 	if ((error != 0)
1077 	 || (req->newptr == NULL))
1078 		return (error);
1079 
1080 	/*
1081 	 * Acceptable values here are 6, 10 or 12, or 16.
1082 	 */
1083 	if (value < 6)
1084 		value = 6;
1085 	else if ((value > 6)
1086 	      && (value <= 10))
1087 		value = 10;
1088 	else if ((value > 10)
1089 	      && (value <= 12))
1090 		value = 12;
1091 	else if (value > 12)
1092 		value = 16;
1093 
1094 	*(int *)arg1 = value;
1095 
1096 	return (0);
1097 }
1098 
1099 static cam_status
1100 daregister(struct cam_periph *periph, void *arg)
1101 {
1102 	struct da_softc *softc;
1103 	struct ccb_pathinq *cpi;
1104 	struct ccb_getdev *cgd;
1105 	char tmpstr[80];
1106 	caddr_t match;
1107 
1108 	cgd = (struct ccb_getdev *)arg;
1109 	if (periph == NULL) {
1110 		kprintf("%s: periph was NULL!!\n", __func__);
1111 		return(CAM_REQ_CMP_ERR);
1112 	}
1113 
1114 	if (cgd == NULL) {
1115 		kprintf("%s: no getdev CCB, can't register device\n",
1116 		    __func__);
1117 		return(CAM_REQ_CMP_ERR);
1118 	}
1119 
1120 	softc = kmalloc(sizeof(*softc), M_DEVBUF, M_INTWAIT | M_ZERO);
1121 	sysctl_ctx_init(&softc->sysctl_ctx);
1122 	LIST_INIT(&softc->pending_ccbs);
1123 	softc->state = DA_STATE_PROBE;
1124 	bioq_init(&softc->bio_queue_trim);
1125 	bioq_init(&softc->bio_queue_rd);
1126 	bioq_init(&softc->bio_queue_wr);
1127 	if (SID_IS_REMOVABLE(&cgd->inq_data))
1128 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
1129 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
1130 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
1131 
1132 	/* Used to get TRIM status from AHCI driver */
1133 	if (cgd->inq_data.vendor_specific1[0] == 1) {
1134 		/*
1135 		 * max number of lba ranges an SSD can handle in a single
1136 		 * TRIM command. vendor_specific1[1] is the num of 512-byte
1137 		 * blocks the SSD reports that can be passed in a TRIM cmd.
1138 		 */
1139 		softc->trim_max_ranges =
1140 		   min(cgd->inq_data.vendor_specific1[1] * 64, TRIM_MAX_RANGES);
1141 	}
1142 
1143 	periph->softc = softc;
1144 
1145 	cam_extend_set(daperiphs, periph->unit_number, periph);
1146 
1147 	/*
1148 	 * See if this device has any quirks.
1149 	 */
1150 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1151 			       (caddr_t)da_quirk_table,
1152 			       NELEM(da_quirk_table),
1153 			       sizeof(*da_quirk_table), scsi_inquiry_match);
1154 
1155 	if (match != NULL)
1156 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1157 	else
1158 		softc->quirks = DA_Q_NONE;
1159 
1160 	/*
1161 	 * Unconditionally disable the synchronize cache command for
1162 	 * usb attachments.  It's just impossible to determine if the
1163 	 * device supports it or not and if it doesn't the port can
1164 	 * brick.
1165 	 */
1166 	if (strncmp(periph->sim->sim_name, "umass", 4) == 0) {
1167 		softc->quirks |= DA_Q_NO_SYNC_CACHE;
1168 	}
1169 
1170 	TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
1171 
1172 	/* Check if the SIM does not want 6 byte commands */
1173 	cpi = &xpt_alloc_ccb()->cpi;
1174 	xpt_setup_ccb(&cpi->ccb_h, periph->path, /*priority*/1);
1175 	cpi->ccb_h.func_code = XPT_PATH_INQ;
1176 	xpt_action((union ccb *)cpi);
1177 	if (cpi->ccb_h.status == CAM_REQ_CMP && (cpi->hba_misc & PIM_NO_6_BYTE))
1178 		softc->quirks |= DA_Q_NO_6_BYTE;
1179 
1180 	/*
1181 	 * RBC devices don't have to support READ(6), only READ(10).
1182 	 */
1183 	if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
1184 		softc->minimum_cmd_size = 10;
1185 	else
1186 		softc->minimum_cmd_size = 6;
1187 
1188 	/*
1189 	 * Load the user's default, if any.
1190 	 */
1191 	ksnprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
1192 		 periph->unit_number);
1193 	TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
1194 
1195 	/*
1196 	 * 6, 10, 12, and 16 are the currently permissible values.
1197 	 */
1198 	if (softc->minimum_cmd_size < 6)
1199 		softc->minimum_cmd_size = 6;
1200 	else if ((softc->minimum_cmd_size > 6)
1201 	      && (softc->minimum_cmd_size <= 10))
1202 		softc->minimum_cmd_size = 10;
1203 	else if ((softc->minimum_cmd_size > 10)
1204 	      && (softc->minimum_cmd_size <= 12))
1205 		softc->minimum_cmd_size = 12;
1206 	else if (softc->minimum_cmd_size > 12)
1207 		softc->minimum_cmd_size = 16;
1208 
1209 	/*
1210 	 * The DA driver supports a blocksize, but
1211 	 * we don't know the blocksize until we do
1212 	 * a read capacity.  So, set a flag to
1213 	 * indicate that the blocksize is
1214 	 * unavailable right now.  We'll clear the
1215 	 * flag as soon as we've done a read capacity.
1216 	 */
1217 	devstat_add_entry(&softc->device_stats, "da",
1218 			  periph->unit_number, 0,
1219 	  		  DEVSTAT_BS_UNAVAILABLE,
1220 			  SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1221 			  DEVSTAT_PRIORITY_DISK);
1222 
1223 	/*
1224 	 * Register this media as a disk
1225 	 */
1226 	CAM_SIM_UNLOCK(periph->sim);
1227 	disk_create(periph->unit_number, &softc->disk, &da_ops);
1228 	if (cpi->maxio == 0 || cpi->maxio > MAXPHYS)
1229 		softc->disk.d_rawdev->si_iosize_max = MAXPHYS;
1230 	else
1231 		softc->disk.d_rawdev->si_iosize_max = cpi->maxio;
1232 	if (bootverbose) {
1233 		kprintf("%s%d: si_iosize_max:%d\n",
1234 		    periph->periph_name,
1235 		    periph->unit_number,
1236 		    softc->disk.d_rawdev->si_iosize_max);
1237 	}
1238 	CAM_SIM_LOCK(periph->sim);
1239 
1240 	/*
1241 	 * Add async callbacks for bus reset and
1242 	 * bus device reset calls.  I don't bother
1243 	 * checking if this fails as, in most cases,
1244 	 * the system will function just fine without
1245 	 * them and the only alternative would be to
1246 	 * not attach the device on failure.
1247 	 */
1248 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
1249 			   daasync, periph, periph->path);
1250 
1251 	/*
1252 	 * Take an exclusive refcount on the periph while dastart is called
1253 	 * to finish the probe.  The reference will be dropped in dadone at
1254 	 * the end of probe.
1255 	 */
1256 	xpt_free_ccb(&cpi->ccb_h);
1257 	cam_periph_hold(periph, 0);
1258 	xpt_schedule(periph, /*priority*/5);
1259 
1260 	return(CAM_REQ_CMP);
1261 }
1262 
1263 static void
1264 dastart(struct cam_periph *periph, union ccb *start_ccb)
1265 {
1266 	struct da_softc *softc;
1267 
1268 	softc = (struct da_softc *)periph->softc;
1269 
1270 	switch (softc->state) {
1271 	case DA_STATE_NORMAL:
1272 	{
1273 		/* Pull a buffer from the queue and get going on it */
1274 		struct bio *bio;
1275 		struct bio *bio_rd;
1276 		struct bio *bio_wr;
1277 		struct buf *bp;
1278 		u_int8_t tag_code;
1279 		int limit;
1280 
1281 		/*
1282 		 * See if there is a buf with work for us to do..
1283 		 */
1284 		bio_rd = bioq_first(&softc->bio_queue_rd);
1285 		bio_wr = bioq_first(&softc->bio_queue_wr);
1286 
1287 		if (periph->immediate_priority <= periph->pinfo.priority) {
1288 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1289 					("queuing for immediate ccb\n"));
1290 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1291 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1292 					  periph_links.sle);
1293 			periph->immediate_priority = CAM_PRIORITY_NONE;
1294 			wakeup(&periph->ccb_list);
1295 			if (bio_rd || bio_wr) {
1296 				/*
1297 				 * Have more work to do, so ensure we stay
1298 				 * scheduled
1299 				 */
1300 				xpt_schedule(periph, /* XXX priority */1);
1301 			}
1302 			break;
1303 		}
1304 
1305 		/* Run the trim command if not already running */
1306 		if (!softc->trim_running &&
1307 		   (bio = bioq_first(&softc->bio_queue_trim)) != NULL) {
1308 			struct trim_request *req = &softc->trim_req;
1309 			struct bio *bio1;
1310 			int bps = 0, ranges = 0;
1311 
1312 			softc->trim_running = 1;
1313 			bzero(req, sizeof(*req));
1314 			bio1 = bio;
1315 			while (1) {
1316 				uint64_t lba;
1317 				int count;
1318 
1319 				bp = bio1->bio_buf;
1320 				count = bp->b_bcount / softc->params.secsize;
1321 				lba = bio1->bio_offset/softc->params.secsize;
1322 
1323 				bioq_remove(&softc->bio_queue_trim, bio1);
1324 				while (count > 0) {
1325 					int c = min(count, 0xffff);
1326 					int off = ranges * 8;
1327 
1328 					req->data[off + 0] = lba & 0xff;
1329 					req->data[off + 1] = (lba >> 8) & 0xff;
1330 					req->data[off + 2] = (lba >> 16) & 0xff;
1331 					req->data[off + 3] = (lba >> 24) & 0xff;
1332 					req->data[off + 4] = (lba >> 32) & 0xff;
1333 					req->data[off + 5] = (lba >> 40) & 0xff;
1334 					req->data[off + 6] = c & 0xff;
1335 					req->data[off + 7] = (c >> 8) & 0xff;
1336 					lba += c;
1337 					count -= c;
1338 					ranges++;
1339 				}
1340 
1341 				/* Try to merge multiple TRIM requests */
1342 				req->bios[bps++] = bio1;
1343 				bio1 = bioq_first(&softc->bio_queue_trim);
1344 				if (bio1 == NULL ||
1345 				    bio1->bio_buf->b_bcount / softc->params.secsize >
1346 				    (softc->trim_max_ranges - ranges) * 0xffff)
1347 					break;
1348 			}
1349 
1350 
1351 			cam_fill_csio(&start_ccb->csio,
1352 			    1/*retries*/,
1353 			    dadone,
1354 			    CAM_DIR_OUT,
1355 			    MSG_SIMPLE_Q_TAG,
1356 			    req->data,
1357 			    ((ranges +63)/64)*512,
1358 			    SSD_FULL_SIZE,
1359 			    sizeof(struct scsi_rw_6),
1360 			    da_default_timeout*2);
1361 
1362 			start_ccb->ccb_h.ccb_state = DA_CCB_TRIM;
1363 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1364 			    &start_ccb->ccb_h, periph_links.le);
1365 			start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1366 			start_ccb->ccb_h.ccb_bio = bio;
1367 			devstat_start_transaction(&softc->device_stats);
1368 			xpt_action(start_ccb);
1369 			xpt_schedule(periph, 1);
1370 			break;
1371 		}
1372 
1373 		/*
1374 		 * Select a read or write buffer to queue.  Limit the number
1375 		 * of tags dedicated to reading or writing, giving reads
1376 		 * precedence.
1377 		 *
1378 		 * Writes to modern hard drives go into the HDs cache and
1379 		 * return completion nearly instantly.  That is until the
1380 		 * cache becomes full.  When the HDs cache becomes full
1381 		 * write commands will begin to stall.  If all available
1382 		 * tags are taken up by writes which saturate the drive
1383 		 * reads will become tag-starved.
1384 		 *
1385 		 * A similar situation can occur with reads.  With many
1386 		 * parallel readers all tags can be taken up by reads
1387 		 * and prevent any writes from draining, even if the HD's
1388 		 * cache is not full.
1389 		 */
1390 		limit = periph->sim->max_tagged_dev_openings * 2 / 3 + 1;
1391 #if 0
1392 		/* DEBUGGING */
1393 		static int savets;
1394 		static long savets2;
1395 		if (1 || time_uptime != savets2 || (ticks != savets && (softc->outstanding_cmds_rd || softc->outstanding_cmds_wr))) {
1396 			kprintf("%d %d (%d)\n",
1397 				softc->outstanding_cmds_rd,
1398 				softc->outstanding_cmds_wr,
1399 				limit);
1400 			savets = ticks;
1401 			savets2 = time_uptime;
1402 		}
1403 #endif
1404 		if (bio_rd && softc->outstanding_cmds_rd < limit) {
1405 			bio = bio_rd;
1406 			bioq_remove(&softc->bio_queue_rd, bio);
1407 		} else if (bio_wr && softc->outstanding_cmds_wr < limit) {
1408 			bio = bio_wr;
1409 			bioq_remove(&softc->bio_queue_wr, bio);
1410 		} else {
1411 			if (bio_rd)
1412 				softc->flags |= DA_FLAG_RD_LIMIT;
1413 			if (bio_wr)
1414 				softc->flags |= DA_FLAG_WR_LIMIT;
1415 			xpt_release_ccb(start_ccb);
1416 			break;
1417 		}
1418 
1419 		/*
1420 		 * We can queue new work.
1421 		 */
1422 		bp = bio->bio_buf;
1423 
1424 		devstat_start_transaction(&softc->device_stats);
1425 
1426 		tag_code = MSG_SIMPLE_Q_TAG;
1427 
1428 		switch(bp->b_cmd) {
1429 		case BUF_CMD_READ:
1430 		case BUF_CMD_WRITE:
1431 			/*
1432 			 * Block read/write op
1433 			 */
1434 			KKASSERT(bio->bio_offset % softc->params.secsize == 0);
1435 
1436 			scsi_read_write(
1437 				&start_ccb->csio,
1438 				da_retry_count,		/* retries */
1439 				dadone,
1440 				tag_code,
1441 				(bp->b_cmd == BUF_CMD_READ),
1442 				0,			/* byte2 */
1443 				softc->minimum_cmd_size,
1444 				bio->bio_offset / softc->params.secsize,
1445 				bp->b_bcount / softc->params.secsize,
1446 				bp->b_data,
1447 				bp->b_bcount,
1448 				SSD_FULL_SIZE,		/* sense_len */
1449 				da_default_timeout * 1000
1450 			);
1451 			break;
1452 		case BUF_CMD_FLUSH:
1453 			/*
1454 			 * Silently complete a flush request if the device
1455 			 * cannot handle it.
1456 			 */
1457 			if (softc->quirks & DA_Q_NO_SYNC_CACHE) {
1458 				xpt_release_ccb(start_ccb);
1459 				start_ccb = NULL;
1460 				devstat_end_transaction_buf(
1461 					&softc->device_stats, bp);
1462 				biodone(bio);
1463 			} else {
1464 				scsi_synchronize_cache(
1465 					&start_ccb->csio,
1466 					1,		/* retries */
1467 					dadone,		/* cbfcnp */
1468 					MSG_SIMPLE_Q_TAG,
1469 					0,		/* lba */
1470 					0,		/* count (whole disk) */
1471 					SSD_FULL_SIZE,
1472 					da_default_timeout*1000	/* timeout */
1473 				);
1474 			}
1475 			break;
1476 		case BUF_CMD_FREEBLKS:
1477 			if (softc->disk.d_info.d_trimflag & DA_FLAG_CAN_TRIM){
1478 				start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1479 				break;
1480 			}
1481 		default:
1482 			xpt_release_ccb(start_ccb);
1483 			start_ccb = NULL;
1484 			panic("dastart: unrecognized bio cmd %d", bp->b_cmd);
1485 			break; /* NOT REACHED */
1486 		}
1487 
1488 		/*
1489 		 * Block out any asyncronous callbacks
1490 		 * while we touch the pending ccb list.
1491 		 */
1492 		if (start_ccb) {
1493 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1494 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1495 					 &start_ccb->ccb_h, periph_links.le);
1496 			if (bp->b_cmd == BUF_CMD_WRITE ||
1497 			    bp->b_cmd == BUF_CMD_FLUSH) {
1498 				++softc->outstanding_cmds_wr;
1499 			} else {
1500 				++softc->outstanding_cmds_rd;
1501 			}
1502 
1503 			/* We expect a unit attention from this device */
1504 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1505 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1506 				softc->flags &= ~DA_FLAG_RETRY_UA;
1507 			}
1508 
1509 			start_ccb->ccb_h.ccb_bio = bio;
1510 			xpt_action(start_ccb);
1511 		}
1512 
1513 		/*
1514 		 * Be sure we stay scheduled if we have more work to do.
1515 		 */
1516 		if (bioq_first(&softc->bio_queue_rd) ||
1517 		    bioq_first(&softc->bio_queue_wr)) {
1518 			xpt_schedule(periph, 1);
1519 		}
1520 		break;
1521 	}
1522 	case DA_STATE_PROBE:
1523 	{
1524 		struct ccb_scsiio *csio;
1525 		struct scsi_read_capacity_data *rcap;
1526 
1527 		rcap = kmalloc(sizeof(*rcap), M_SCSIDA, M_INTWAIT | M_ZERO);
1528 		csio = &start_ccb->csio;
1529 		scsi_read_capacity(csio,
1530 				   /*retries*/4,
1531 				   dadone,
1532 				   MSG_SIMPLE_Q_TAG,
1533 				   rcap,
1534 				   SSD_FULL_SIZE,
1535 				   /*timeout*/5000);
1536 		start_ccb->ccb_h.ccb_bio = NULL;
1537 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1538 		xpt_action(start_ccb);
1539 		break;
1540 	}
1541 	case DA_STATE_PROBE2:
1542 	{
1543 		struct ccb_scsiio *csio;
1544 		struct scsi_read_capacity_data_16 *rcaplong;
1545 
1546 		rcaplong = kmalloc(sizeof(*rcaplong), M_SCSIDA,
1547 				   M_INTWAIT | M_ZERO);
1548 		csio = &start_ccb->csio;
1549 		scsi_read_capacity_16(csio,
1550 				    /*retries*/ 4,
1551 				    /*cbfcnp*/ dadone,
1552 				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
1553 				    /*lba*/ 0,
1554 				    /*reladr*/ 0,
1555 				    /*pmi*/ 0,
1556 				    rcaplong,
1557 				    /*sense_len*/ SSD_FULL_SIZE,
1558 				    /*timeout*/ 60000);
1559 		start_ccb->ccb_h.ccb_bio = NULL;
1560 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE2;
1561 		xpt_action(start_ccb);
1562 		break;
1563 	}
1564 	}
1565 }
1566 
1567 static int
1568 cmd6workaround(union ccb *ccb)
1569 {
1570 	struct scsi_rw_6 cmd6;
1571 	struct scsi_rw_10 *cmd10;
1572 	struct da_softc *softc;
1573 	u_int8_t *cdb;
1574 	int frozen;
1575 
1576 	cdb = ccb->csio.cdb_io.cdb_bytes;
1577 
1578 	/* Translation only possible if CDB is an array and cmd is R/W6 */
1579 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
1580 	    (*cdb != READ_6 && *cdb != WRITE_6))
1581 		return 0;
1582 
1583 	xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
1584 	    "increasing minimum_cmd_size to 10.\n");
1585  	softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
1586 	softc->minimum_cmd_size = 10;
1587 
1588 	bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
1589 	cmd10 = (struct scsi_rw_10 *)cdb;
1590 	cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
1591 	cmd10->byte2 = 0;
1592 	scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
1593 	cmd10->reserved = 0;
1594 	scsi_ulto2b(cmd6.length, cmd10->length);
1595 	cmd10->control = cmd6.control;
1596 	ccb->csio.cdb_len = sizeof(*cmd10);
1597 
1598 	/* Requeue request, unfreezing queue if necessary */
1599 	frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1600  	ccb->ccb_h.status = CAM_REQUEUE_REQ;
1601 	xpt_action(ccb);
1602 	if (frozen) {
1603 		cam_release_devq(ccb->ccb_h.path,
1604 				 /*relsim_flags*/0,
1605 				 /*reduction*/0,
1606 				 /*timeout*/0,
1607 				 /*getcount_only*/0);
1608 	}
1609 	return (ERESTART);
1610 }
1611 
1612 static void
1613 dadone(struct cam_periph *periph, union ccb *done_ccb)
1614 {
1615 	struct da_softc *softc;
1616 	struct ccb_scsiio *csio;
1617 	struct disk_info info;
1618 
1619 	softc = (struct da_softc *)periph->softc;
1620 	csio = &done_ccb->csio;
1621 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1622 	case DA_CCB_BUFFER_IO:
1623 	case DA_CCB_TRIM:
1624 	{
1625 		struct buf *bp;
1626 		struct bio *bio;
1627 		int mustsched = 0;
1628 
1629 		bio = (struct bio *)done_ccb->ccb_h.ccb_bio;
1630 		bp = bio->bio_buf;
1631 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1632 			int error;
1633 			int sf;
1634 
1635 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1636 				sf = SF_RETRY_UA;
1637 			else
1638 				sf = 0;
1639 
1640 			error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
1641 			if (error == ERESTART) {
1642 				/*
1643 				 * A retry was scheuled, so
1644 				 * just return.
1645 				 */
1646 				return;
1647 			}
1648 			if (error != 0) {
1649 				if (error == ENXIO) {
1650 					/*
1651 					 * Catastrophic error.  Mark our pack as
1652 					 * invalid.
1653 					 */
1654 					/*
1655 					 * XXX See if this is really a media
1656 					 * XXX change first?
1657 					 */
1658 					xpt_print(periph->path,
1659 					    "Invalidating pack\n");
1660 					softc->flags |= DA_FLAG_PACK_INVALID;
1661 				}
1662 
1663 				/*
1664 				 * Return all queued write I/O's with EIO
1665 				 * so the client can retry these I/Os in the
1666 				 * proper order should it attempt to recover.
1667 				 *
1668 				 * Leave read I/O's alone.
1669 				 */
1670 				daflushbioq(&softc->bio_queue_wr, EIO);
1671 				bp->b_error = error;
1672 				bp->b_resid = bp->b_bcount;
1673 				bp->b_flags |= B_ERROR;
1674 			} else {
1675 				bp->b_resid = csio->resid;
1676 				bp->b_error = 0;
1677 				if (bp->b_resid != 0)
1678 					bp->b_flags |= B_ERROR;
1679 			}
1680 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1681 				cam_release_devq(done_ccb->ccb_h.path,
1682 						 /*relsim_flags*/0,
1683 						 /*reduction*/0,
1684 						 /*timeout*/0,
1685 						 /*getcount_only*/0);
1686 		} else {
1687 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1688 				panic("REQ_CMP with QFRZN");
1689 			bp->b_resid = csio->resid;
1690 			if (csio->resid > 0)
1691 				bp->b_flags |= B_ERROR;
1692 		}
1693 
1694 		/*
1695 		 * Block out any asyncronous callbacks
1696 		 * while we touch the pending ccb list.
1697 		 */
1698 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1699 		if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH) {
1700 			--softc->outstanding_cmds_wr;
1701 			if (softc->flags & DA_FLAG_WR_LIMIT) {
1702 				softc->flags &= ~DA_FLAG_WR_LIMIT;
1703 				mustsched = 1;
1704 			}
1705 		} else {
1706 			--softc->outstanding_cmds_rd;
1707 			if (softc->flags & DA_FLAG_RD_LIMIT) {
1708 				softc->flags &= ~DA_FLAG_RD_LIMIT;
1709 				mustsched = 1;
1710 			}
1711 		}
1712 
1713 		devstat_end_transaction_buf(&softc->device_stats, bp);
1714 		if ((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) ==
1715 		    DA_CCB_TRIM) {
1716 			struct trim_request *req =
1717 			    (struct trim_request *) csio->data_ptr;
1718 			int i;
1719 
1720 			for (i = 1; i < softc->trim_max_ranges &&
1721 			    req->bios[i]; i++) {
1722 				struct bio *bp1 = req->bios[i];
1723 
1724 				bp1->bio_buf->b_resid = bp->b_resid;
1725 				bp1->bio_buf->b_error = bp->b_error;
1726 				if (bp->b_flags & B_ERROR)
1727 					bp1->bio_buf->b_flags |= B_ERROR;
1728 				biodone(bp1);
1729 			}
1730 			softc->trim_running = 0;
1731 			biodone(bio);
1732 			xpt_schedule(periph,1);
1733 		} else
1734 			biodone(bio);
1735 
1736 
1737 		if (mustsched)
1738 			xpt_schedule(periph, /*priority*/1);
1739 
1740 		break;
1741 	}
1742 	case DA_CCB_PROBE:
1743 	case DA_CCB_PROBE2:
1744 	{
1745 		struct	   scsi_read_capacity_data *rdcap;
1746 		struct     scsi_read_capacity_data_16 *rcaplong;
1747 		char	   announce_buf[80];
1748 		int	   doinfo = 0;
1749 
1750 		rdcap = NULL;
1751 		rcaplong = NULL;
1752 		if (softc->state == DA_STATE_PROBE)
1753 			rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
1754 		else
1755 			rcaplong = (struct scsi_read_capacity_data_16 *)
1756 				    csio->data_ptr;
1757 
1758 		bzero(&info, sizeof(info));
1759 		info.d_type = DTYPE_SCSI;
1760 		info.d_serialno = xpt_path_serialno(periph->path);
1761 
1762 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1763 			struct disk_params *dp;
1764 			uint32_t block_size;
1765 			uint64_t maxsector;
1766 
1767 			if (softc->state == DA_STATE_PROBE) {
1768 				block_size = scsi_4btoul(rdcap->length);
1769 				maxsector = scsi_4btoul(rdcap->addr);
1770 
1771 				/*
1772 				 * According to SBC-2, if the standard 10
1773 				 * byte READ CAPACITY command returns 2^32,
1774 				 * we should issue the 16 byte version of
1775 				 * the command, since the device in question
1776 				 * has more sectors than can be represented
1777 				 * with the short version of the command.
1778 				 */
1779 				if (maxsector == 0xffffffff) {
1780 					softc->state = DA_STATE_PROBE2;
1781 					kfree(rdcap, M_SCSIDA);
1782 					xpt_release_ccb(done_ccb);
1783 					xpt_schedule(periph, /*priority*/5);
1784 					return;
1785 				}
1786 			} else {
1787 				block_size = scsi_4btoul(rcaplong->length);
1788 				maxsector = scsi_8btou64(rcaplong->addr);
1789 			}
1790 			dasetgeom(periph, block_size, maxsector);
1791 			dp = &softc->params;
1792 			ksnprintf(announce_buf, sizeof(announce_buf),
1793 				"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1794 				(uintmax_t) (((uintmax_t)dp->secsize *
1795 				dp->sectors) / (1024*1024)),
1796 				(uintmax_t)dp->sectors,
1797 				dp->secsize, dp->heads, dp->secs_per_track,
1798 				dp->cylinders);
1799 
1800 			info.d_media_blksize = softc->params.secsize;
1801 			info.d_media_blocks = softc->params.sectors;
1802 			info.d_media_size = 0;
1803 			info.d_secpertrack = softc->params.secs_per_track;
1804 			info.d_nheads = softc->params.heads;
1805 			info.d_ncylinders = softc->params.cylinders;
1806 			info.d_secpercyl = softc->params.heads *
1807 						softc->params.secs_per_track;
1808 			info.d_serialno = xpt_path_serialno(periph->path);
1809 			doinfo = 1;
1810 		} else {
1811 			int	error;
1812 
1813 			announce_buf[0] = '\0';
1814 
1815 			/*
1816 			 * Retry any UNIT ATTENTION type errors.  They
1817 			 * are expected at boot.
1818 			 */
1819 			error = daerror(done_ccb, CAM_RETRY_SELTO,
1820 					SF_RETRY_UA|SF_NO_PRINT);
1821 			if (error == ERESTART) {
1822 				/*
1823 				 * A retry was scheuled, so
1824 				 * just return.
1825 				 */
1826 				return;
1827 			} else if (error != 0) {
1828 				struct scsi_sense_data *sense;
1829 				int asc, ascq;
1830 				int sense_key, error_code;
1831 				int have_sense;
1832 				cam_status status;
1833 				struct ccb_getdev *cgd;
1834 
1835 				/* Don't wedge this device's queue */
1836 				status = done_ccb->ccb_h.status;
1837 				if ((status & CAM_DEV_QFRZN) != 0)
1838 					cam_release_devq(done_ccb->ccb_h.path,
1839 							 /*relsim_flags*/0,
1840 							 /*reduction*/0,
1841 							 /*timeout*/0,
1842 							 /*getcount_only*/0);
1843 
1844 				cgd = &xpt_alloc_ccb()->cgd;
1845 				xpt_setup_ccb(&cgd->ccb_h,
1846 					      done_ccb->ccb_h.path,
1847 					      /* priority */ 1);
1848 				cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1849 				xpt_action((union ccb *)cgd);
1850 
1851 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1852 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1853 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1854 					have_sense = FALSE;
1855 				else
1856 					have_sense = TRUE;
1857 
1858 				if (have_sense) {
1859 					sense = &csio->sense_data;
1860 					scsi_extract_sense(sense, &error_code,
1861 							   &sense_key,
1862 							   &asc, &ascq);
1863 				}
1864 				/*
1865 				 * Attach to anything that claims to be a
1866 				 * direct access or optical disk device,
1867 				 * as long as it doesn't return a "Logical
1868 				 * unit not supported" (0x25) error.
1869 				 */
1870 				if ((have_sense) && (asc != 0x25)
1871 				 && (error_code == SSD_CURRENT_ERROR)) {
1872 					const char *sense_key_desc;
1873 					const char *asc_desc;
1874 
1875 					scsi_sense_desc(sense_key, asc, ascq,
1876 							&cgd->inq_data,
1877 							&sense_key_desc,
1878 							&asc_desc);
1879 					ksnprintf(announce_buf,
1880 					    sizeof(announce_buf),
1881 						"Attempt to query device "
1882 						"size failed: %s, %s",
1883 						sense_key_desc,
1884 						asc_desc);
1885 					info.d_media_blksize = 512;
1886 					doinfo = 1;
1887 				} else {
1888 					if (have_sense)
1889 						scsi_sense_print(
1890 							&done_ccb->csio);
1891 					else {
1892 						xpt_print(periph->path,
1893 						    "got CAM status %#x\n",
1894 						    done_ccb->ccb_h.status);
1895 					}
1896 
1897 					xpt_print(periph->path, "fatal error, "
1898 					    "failed to attach to device\n");
1899 
1900 					/*
1901 					 * Free up resources.
1902 					 */
1903 					cam_periph_invalidate(periph);
1904 				}
1905 				xpt_free_ccb(&cgd->ccb_h);
1906 			}
1907 		}
1908 		kfree(csio->data_ptr, M_SCSIDA);
1909 		if (announce_buf[0] != '\0') {
1910 			xpt_announce_periph(periph, announce_buf);
1911 			/*
1912 			 * Create our sysctl variables, now that we know
1913 			 * we have successfully attached.
1914 			 */
1915 			taskqueue_enqueue(taskqueue_thread[mycpuid],
1916 			    &softc->sysctl_task);
1917 		}
1918 
1919 		if (softc->trim_max_ranges) {
1920 			softc->disk.d_info.d_trimflag |= DA_FLAG_CAN_TRIM;
1921 			kprintf("%s%d: supports TRIM\n",
1922 		   	    periph->periph_name,
1923 		   	    periph->unit_number);
1924 		}
1925 		softc->state = DA_STATE_NORMAL;
1926 		/*
1927 		 * Since our peripheral may be invalidated by an error
1928 		 * above or an external event, we must release our CCB
1929 		 * before releasing the probe lock on the peripheral.
1930 		 * The peripheral will only go away once the last lock
1931 		 * is removed, and we need it around for the CCB release
1932 		 * operation.
1933 		 */
1934 		xpt_release_ccb(done_ccb);
1935 		cam_periph_unhold(periph, 0);
1936 		if (doinfo) {
1937 			CAM_SIM_UNLOCK(periph->sim);
1938 			disk_setdiskinfo(&softc->disk, &info);
1939 			CAM_SIM_LOCK(periph->sim);
1940 		}
1941 		return;
1942 	}
1943 	case DA_CCB_WAITING:
1944 	{
1945 		/* Caller will release the CCB */
1946 		wakeup(&done_ccb->ccb_h.cbfcnp);
1947 		return;
1948 	}
1949 	case DA_CCB_DUMP:
1950 		/* No-op.  We're polling */
1951 		return;
1952 	case DA_CCB_POLLED:
1953 		/* Caller releases ccb */
1954 		wakeup(&done_ccb->ccb_h.cbfcnp);
1955 		return;
1956 	default:
1957 		break;
1958 	}
1959 	xpt_release_ccb(done_ccb);
1960 }
1961 
1962 static int
1963 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1964 {
1965 	struct da_softc	  *softc;
1966 	struct cam_periph *periph;
1967 	int error;
1968 
1969 	periph = xpt_path_periph(ccb->ccb_h.path);
1970 	softc = (struct da_softc *)periph->softc;
1971 
1972  	/*
1973 	 * Automatically detect devices that do not support
1974  	 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
1975  	 */
1976 	error = 0;
1977 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
1978 		error = cmd6workaround(ccb);
1979 	} else if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
1980 		   CAM_SCSI_STATUS_ERROR)
1981 	 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)
1982 	 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
1983 	 && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0)
1984 	 && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
1985 		int sense_key, error_code, asc, ascq;
1986 
1987  		scsi_extract_sense(&ccb->csio.sense_data,
1988 				   &error_code, &sense_key, &asc, &ascq);
1989 		if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
1990  			error = cmd6workaround(ccb);
1991 	}
1992 	if (error == ERESTART)
1993 		return (ERESTART);
1994 
1995 	/*
1996 	 * XXX
1997 	 * Until we have a better way of doing pack validation,
1998 	 * don't treat UAs as errors.
1999 	 */
2000 	sense_flags |= SF_RETRY_UA;
2001 	return(cam_periph_error(ccb, cam_flags, sense_flags,
2002 				&softc->saved_ccb));
2003 }
2004 
2005 static void
2006 daprevent(struct cam_periph *periph, int action)
2007 {
2008 	struct	da_softc *softc;
2009 	union	ccb *ccb;
2010 	int	error;
2011 
2012 	softc = (struct da_softc *)periph->softc;
2013 
2014 	if (((action == PR_ALLOW)
2015 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
2016 	 || ((action == PR_PREVENT)
2017 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
2018 		return;
2019 	}
2020 
2021 	ccb = cam_periph_getccb(periph, /*priority*/1);
2022 	ccb->ccb_h.ccb_state = DA_CCB_POLLED;
2023 
2024 	scsi_prevent(&ccb->csio,
2025 		     /*retries*/1,
2026 		     /*cbcfp*/dadone,
2027 		     MSG_SIMPLE_Q_TAG,
2028 		     action,
2029 		     SSD_FULL_SIZE,
2030 		     5000);
2031 
2032 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO,
2033 				  SF_RETRY_UA, &softc->device_stats);
2034 
2035 	if (error == 0) {
2036 		if (action == PR_ALLOW)
2037 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
2038 		else
2039 			softc->flags |= DA_FLAG_PACK_LOCKED;
2040 	}
2041 
2042 	xpt_release_ccb(ccb);
2043 }
2044 
2045 /*
2046  * Check media on open, e.g. card reader devices which had no initial media.
2047  */
2048 static int
2049 dacheckmedia(struct cam_periph *periph)
2050 {
2051 	struct disk_params *dp;
2052 	struct da_softc *softc;
2053 	struct disk_info info;
2054 	int error;
2055 	int mute;
2056 
2057 	softc = (struct da_softc *)periph->softc;
2058 	dp = &softc->params;
2059 
2060 	if (softc->flags & DA_FLAG_CAP_MUTE)	/* additional ccb flags */
2061 		mute = CAM_QUIET;
2062 	else
2063 		mute = 0;
2064 
2065 	error = dagetcapacity(periph, mute);
2066 
2067 	/*
2068 	 * Only reprobe on initial open and if the media is removable.
2069 	 *
2070 	 * NOTE: If we setdiskinfo() it will take the device probe
2071 	 *	 a bit of time to probe the slices and partitions,
2072 	 *	 and mess up booting.  So avoid if nothing has changed.
2073 	 *	 XXX
2074 	 */
2075 	if (softc->flags & DA_FLAG_OPEN)
2076 		return (error);
2077 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) == 0)
2078 		return (error);
2079 
2080 	bzero(&info, sizeof(info));
2081 	info.d_type = DTYPE_SCSI;
2082 	info.d_serialno = xpt_path_serialno(periph->path);
2083 
2084 	if (error == 0) {
2085 		CAM_SIM_UNLOCK(periph->sim);
2086 		info.d_media_blksize = softc->params.secsize;
2087 		info.d_media_blocks = softc->params.sectors;
2088 		info.d_media_size = 0;
2089 		info.d_secpertrack = softc->params.secs_per_track;
2090 		info.d_nheads = softc->params.heads;
2091 		info.d_ncylinders = softc->params.cylinders;
2092 		info.d_secpercyl = softc->params.heads *
2093 					softc->params.secs_per_track;
2094 		info.d_serialno = xpt_path_serialno(periph->path);
2095 		if (info.d_media_blocks != softc->disk.d_info.d_media_blocks) {
2096 			kprintf("%s%d: open removable media: "
2097 				"%juMB (%ju %u byte sectors: %dH %dS/T %dC)\n",
2098 				periph->periph_name, periph->unit_number,
2099 				(uintmax_t)(((uintmax_t)dp->secsize *
2100 					     dp->sectors) / (1024*1024)),
2101 				(uintmax_t)dp->sectors, dp->secsize,
2102 				dp->heads, dp->secs_per_track, dp->cylinders);
2103 			disk_setdiskinfo(&softc->disk, &info);
2104 		}
2105 		CAM_SIM_LOCK(periph->sim);
2106 	} else {
2107 		if (!mute || bootverbose) {
2108 			kprintf("%s%d: open removable media: "
2109 				"no media present\n",
2110 				periph->periph_name, periph->unit_number);
2111 		}
2112 		info.d_media_blksize = 512;
2113 		disk_setdiskinfo(&softc->disk, &info);
2114 	}
2115 	return (error);
2116 }
2117 
2118 static int
2119 dagetcapacity(struct cam_periph *periph, int ccbflags)
2120 {
2121 	struct da_softc *softc;
2122 	union ccb *ccb;
2123 	struct scsi_read_capacity_data *rcap;
2124 	struct scsi_read_capacity_data_16 *rcaplong;
2125 	uint32_t block_len;
2126 	uint64_t maxsector;
2127 	int error;
2128 
2129 	softc = (struct da_softc *)periph->softc;
2130 	block_len = 0;
2131 	maxsector = 0;
2132 	error = 0;
2133 
2134 	/* Do a read capacity */
2135 	rcap = (struct scsi_read_capacity_data *)kmalloc(sizeof(*rcaplong),
2136 							 M_SCSIDA, M_INTWAIT);
2137 
2138 	ccb = cam_periph_getccb(periph, /*priority*/1);
2139 	ccb->ccb_h.ccb_state = DA_CCB_POLLED;
2140 
2141 	scsi_read_capacity(&ccb->csio,
2142 			   /*retries*/4,
2143 			   /*cbfncp*/dadone,
2144 			   MSG_SIMPLE_Q_TAG,
2145 			   rcap,
2146 			   SSD_FULL_SIZE,
2147 			   /*timeout*/60000);
2148 	ccb->ccb_h.ccb_bio = NULL;
2149 	ccb->ccb_h.flags |= ccbflags;
2150 
2151 	error = cam_periph_runccb(ccb, daerror,
2152 				  /*cam_flags*/CAM_RETRY_SELTO,
2153 				  /*sense_flags*/SF_RETRY_UA,
2154 				  &softc->device_stats);
2155 
2156 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2157 		cam_release_devq(ccb->ccb_h.path,
2158 				 /*relsim_flags*/0,
2159 				 /*reduction*/0,
2160 				 /*timeout*/0,
2161 				 /*getcount_only*/0);
2162 
2163 	if (error == 0) {
2164 		block_len = scsi_4btoul(rcap->length);
2165 		maxsector = scsi_4btoul(rcap->addr);
2166 
2167 		if (maxsector != 0xffffffff)
2168 			goto done;
2169 	} else
2170 		goto done;
2171 
2172 	rcaplong = (struct scsi_read_capacity_data_16 *)rcap;
2173 
2174 	scsi_read_capacity_16(&ccb->csio,
2175 			      /*retries*/ 4,
2176 			      /*cbfcnp*/ dadone,
2177 			      /*tag_action*/ MSG_SIMPLE_Q_TAG,
2178 			      /*lba*/ 0,
2179 			      /*reladr*/ 0,
2180 			      /*pmi*/ 0,
2181 			      rcaplong,
2182 			      /*sense_len*/ SSD_FULL_SIZE,
2183 			      /*timeout*/ 60000);
2184 	ccb->ccb_h.ccb_bio = NULL;
2185 
2186 	error = cam_periph_runccb(ccb, daerror,
2187 				  /*cam_flags*/CAM_RETRY_SELTO,
2188 				  /*sense_flags*/SF_RETRY_UA,
2189 				  &softc->device_stats);
2190 
2191 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2192 		cam_release_devq(ccb->ccb_h.path,
2193 				 /*relsim_flags*/0,
2194 				 /*reduction*/0,
2195 				 /*timeout*/0,
2196 				 /*getcount_only*/0);
2197 
2198 	if (error == 0) {
2199 		block_len = scsi_4btoul(rcaplong->length);
2200 		maxsector = scsi_8btou64(rcaplong->addr);
2201 	}
2202 
2203 done:
2204 
2205 	if (error == 0)
2206 		dasetgeom(periph, block_len, maxsector);
2207 
2208 	xpt_release_ccb(ccb);
2209 
2210 	kfree(rcap, M_SCSIDA);
2211 
2212 	return (error);
2213 }
2214 
2215 static void
2216 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector)
2217 {
2218 	struct ccb_calc_geometry *ccg;
2219 	struct da_softc *softc;
2220 	struct disk_params *dp;
2221 
2222 	softc = (struct da_softc *)periph->softc;
2223 
2224 	dp = &softc->params;
2225 	dp->secsize = block_len;
2226 	dp->sectors = maxsector + 1;
2227 	/*
2228 	 * Have the controller provide us with a geometry
2229 	 * for this disk.  The only time the geometry
2230 	 * matters is when we boot and the controller
2231 	 * is the only one knowledgeable enough to come
2232 	 * up with something that will make this a bootable
2233 	 * device.
2234 	 */
2235 	ccg = &xpt_alloc_ccb()->ccg;
2236 	xpt_setup_ccb(&ccg->ccb_h, periph->path, /*priority*/1);
2237 	ccg->ccb_h.func_code = XPT_CALC_GEOMETRY;
2238 	ccg->block_size = dp->secsize;
2239 	ccg->volume_size = dp->sectors;
2240 	ccg->heads = 0;
2241 	ccg->secs_per_track = 0;
2242 	ccg->cylinders = 0;
2243 	xpt_action((union ccb*)ccg);
2244 	if ((ccg->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2245 		/*
2246 		 * We don't know what went wrong here- but just pick
2247 		 * a geometry so we don't have nasty things like divide
2248 		 * by zero.
2249 		 */
2250 		dp->heads = 255;
2251 		dp->secs_per_track = 255;
2252 		dp->cylinders = dp->sectors / (255 * 255);
2253 		if (dp->cylinders == 0) {
2254 			dp->cylinders = 1;
2255 		}
2256 	} else {
2257 		dp->heads = ccg->heads;
2258 		dp->secs_per_track = ccg->secs_per_track;
2259 		dp->cylinders = ccg->cylinders;
2260 	}
2261 	xpt_free_ccb(&ccg->ccb_h);
2262 }
2263 
2264 /*
2265  * Step through all DA peripheral drivers, and if the device is still open,
2266  * sync the disk cache to physical media.
2267  */
2268 static void
2269 dashutdown(void * arg, int howto)
2270 {
2271 	struct cam_periph *periph;
2272 	struct da_softc *softc;
2273 
2274 	TAILQ_FOREACH(periph, &dadriver.units, unit_links) {
2275 		union ccb *ccb;
2276 
2277 		cam_periph_lock(periph);
2278 		softc = (struct da_softc *)periph->softc;
2279 
2280 		/*
2281 		 * We only sync the cache if the drive is still open, and
2282 		 * if the drive is capable of it..
2283 		 */
2284 		if (((softc->flags & DA_FLAG_OPEN) == 0)
2285 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
2286 			cam_periph_unlock(periph);
2287 			continue;
2288 		}
2289 
2290 		ccb = xpt_alloc_ccb();
2291 		xpt_setup_ccb(&ccb->ccb_h, periph->path, /*priority*/1);
2292 
2293 		ccb->ccb_h.ccb_state = DA_CCB_DUMP;
2294 		scsi_synchronize_cache(&ccb->csio,
2295 				       /*retries*/1,
2296 				       /*cbfcnp*/dadone,
2297 				       MSG_SIMPLE_Q_TAG,
2298 				       /*begin_lba*/0, /* whole disk */
2299 				       /*lb_count*/0,
2300 				       SSD_FULL_SIZE,
2301 				       60 * 60 * 1000);
2302 
2303 		xpt_polled_action(ccb);
2304 
2305 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2306 			if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
2307 			     CAM_SCSI_STATUS_ERROR)
2308 			 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)){
2309 				int error_code, sense_key, asc, ascq;
2310 
2311 				scsi_extract_sense(&ccb->csio.sense_data,
2312 						   &error_code, &sense_key,
2313 						   &asc, &ascq);
2314 
2315 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
2316 					scsi_sense_print(&ccb->csio);
2317 			} else {
2318 				xpt_print(periph->path, "Synchronize "
2319 				    "cache failed, status == 0x%x, scsi status "
2320 				    "== 0x%x\n", ccb->ccb_h.status,
2321 				    ccb->csio.scsi_status);
2322 			}
2323 		}
2324 
2325 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2326 			cam_release_devq(ccb->ccb_h.path,
2327 					 /*relsim_flags*/0,
2328 					 /*reduction*/0,
2329 					 /*timeout*/0,
2330 					 /*getcount_only*/0);
2331 
2332 		cam_periph_unlock(periph);
2333 		xpt_free_ccb(&ccb->ccb_h);
2334 	}
2335 }
2336 
2337 #else /* !_KERNEL */
2338 
2339 /*
2340  * XXX This is only left out of the kernel build to silence warnings.  If,
2341  * for some reason this function is used in the kernel, the ifdefs should
2342  * be moved so it is included both in the kernel and userland.
2343  */
2344 void
2345 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
2346 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
2347 		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
2348 		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
2349 		 u_int32_t timeout)
2350 {
2351 	struct scsi_format_unit *scsi_cmd;
2352 
2353 	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
2354 	scsi_cmd->opcode = FORMAT_UNIT;
2355 	scsi_cmd->byte2 = byte2;
2356 	scsi_ulto2b(ileave, scsi_cmd->interleave);
2357 
2358 	cam_fill_csio(csio,
2359 		      retries,
2360 		      cbfcnp,
2361 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
2362 		      tag_action,
2363 		      data_ptr,
2364 		      dxfer_len,
2365 		      sense_len,
2366 		      sizeof(*scsi_cmd),
2367 		      timeout);
2368 }
2369 
2370 #endif /* _KERNEL */
2371