xref: /dragonfly/sys/bus/cam/scsi/scsi_da.c (revision cec957e9)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/cam/scsi/scsi_da.c,v 1.42.2.46 2003/10/21 22:18:19 thomas Exp $
29  */
30 
31 #include <sys/param.h>
32 
33 #ifdef _KERNEL
34 
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/buf.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/conf.h>
42 #include <sys/devicestat.h>
43 #include <sys/disk.h>
44 #include <sys/dtype.h>
45 #include <sys/eventhandler.h>
46 #include <sys/malloc.h>
47 #include <sys/cons.h>
48 #include <sys/proc.h>
49 
50 #include <sys/buf2.h>
51 
52 #endif /* _KERNEL */
53 
54 #ifdef _KERNEL
55 #include <vm/pmap.h>
56 #endif
57 
58 #ifndef _KERNEL
59 #include <stdio.h>
60 #include <string.h>
61 #endif /* _KERNEL */
62 
63 #include <sys/camlib.h>
64 #include "../cam.h"
65 #include "../cam_ccb.h"
66 #include "../cam_extend.h"
67 #include "../cam_periph.h"
68 #include "../cam_xpt_periph.h"
69 #include "../cam_sim.h"
70 
71 #include "scsi_daio.h"
72 #include "scsi_message.h"
73 
74 #ifndef _KERNEL
75 #include "scsi_da.h"
76 #endif /* !_KERNEL */
77 
78 #ifdef _KERNEL
79 typedef enum {
80 	DA_STATE_PROBE,
81 	DA_STATE_PROBE2,
82 	DA_STATE_NORMAL
83 } da_state;
84 
85 typedef enum {
86 	DA_FLAG_PACK_INVALID	= 0x001,
87 	DA_FLAG_NEW_PACK	= 0x002,
88 	DA_FLAG_PACK_LOCKED	= 0x004,
89 	DA_FLAG_PACK_REMOVABLE	= 0x008,
90 	DA_FLAG_TAGGED_QUEUING	= 0x010,
91 	DA_FLAG_RETRY_UA	= 0x080,
92 	DA_FLAG_OPEN		= 0x100,
93 	DA_FLAG_SCTX_INIT	= 0x200,
94 	DA_FLAG_RD_LIMIT	= 0x400,
95 	DA_FLAG_WR_LIMIT	= 0x800,
96 	DA_FLAG_CAN_TRIM	= 0x1000
97 } da_flags;
98 
99 typedef enum {
100 	DA_Q_NONE		= 0x00,
101 	DA_Q_NO_SYNC_CACHE	= 0x01,
102 	DA_Q_NO_6_BYTE		= 0x02,
103 	DA_Q_NO_PREVENT		= 0x04
104 } da_quirks;
105 
106 typedef enum {
107 	DA_CCB_POLLED		= 0x00,
108 	DA_CCB_PROBE		= 0x01,
109 	DA_CCB_PROBE2		= 0x02,
110 	DA_CCB_BUFFER_IO	= 0x03,
111 	DA_CCB_WAITING		= 0x04,
112 	DA_CCB_DUMP		= 0x05,
113 	DA_CCB_TRIM		= 0x06,
114 	DA_CCB_TYPE_MASK	= 0x0F,
115 	DA_CCB_RETRY_UA		= 0x10
116 } da_ccb_state;
117 
118 /* Offsets into our private area for storing information */
119 #define ccb_state	ppriv_field0
120 #define ccb_bio		ppriv_ptr1
121 
122 struct disk_params {
123 	u_int8_t  heads;
124 	u_int32_t cylinders;
125 	u_int8_t  secs_per_track;
126 	u_int32_t secsize;	/* Number of bytes/sector */
127 	u_int64_t sectors;	/* total number sectors */
128 };
129 
130 #define TRIM_MAX_BLOCKS 8
131 #define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64
132 struct trim_request {
133         uint8_t         data[TRIM_MAX_RANGES * 8];
134         struct bio      *bios[TRIM_MAX_RANGES];
135 };
136 
137 struct da_softc {
138 	struct	 bio_queue_head bio_queue_rd;
139 	struct	 bio_queue_head bio_queue_wr;
140 	struct	 bio_queue_head bio_queue_trim;
141 	struct	 devstat device_stats;
142 	SLIST_ENTRY(da_softc) links;
143 	LIST_HEAD(, ccb_hdr) pending_ccbs;
144 	da_state state;
145 	da_flags flags;
146 	da_quirks quirks;
147 	int	 minimum_cmd_size;
148 	int	 outstanding_cmds_rd;
149 	int	 outstanding_cmds_wr;
150 	int      trim_max_ranges;
151 	int      trim_running;
152 	int      trim_enabled;
153 	struct	 disk_params params;
154 	struct	 disk disk;
155 	union	 ccb saved_ccb;
156 	struct task		sysctl_task;
157 	struct sysctl_ctx_list	sysctl_ctx;
158 	struct sysctl_oid	*sysctl_tree;
159 	struct trim_request     trim_req;
160 };
161 
162 struct da_quirk_entry {
163 	struct scsi_inquiry_pattern inq_pat;
164 	da_quirks quirks;
165 };
166 
167 static const char quantum[] = "QUANTUM";
168 static const char microp[] = "MICROP";
169 
170 static struct da_quirk_entry da_quirk_table[] =
171 {
172 	/* SPI, FC devices */
173 	{
174 		/*
175 		 * Fujitsu M2513A MO drives.
176 		 * Tested devices: M2513A2 firmware versions 1200 & 1300.
177 		 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
178 		 * Reported by: W.Scholten <whs@xs4all.nl>
179 		 */
180 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
181 		/*quirks*/ DA_Q_NO_SYNC_CACHE
182 	},
183 	{
184 		/* See above. */
185 		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
186 		/*quirks*/ DA_Q_NO_SYNC_CACHE
187 	},
188 	{
189 		/*
190 		 * This particular Fujitsu drive doesn't like the
191 		 * synchronize cache command.
192 		 * Reported by: Tom Jackson <toj@gorilla.net>
193 		 */
194 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
195 		/*quirks*/ DA_Q_NO_SYNC_CACHE
196 	},
197 	{
198 		/*
199 		 * This drive doesn't like the synchronize cache command
200 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
201 		 * in NetBSD PR kern/6027, August 24, 1998.
202 		 */
203 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
204 		/*quirks*/ DA_Q_NO_SYNC_CACHE
205 	},
206 	{
207 		/*
208 		 * This drive doesn't like the synchronize cache command
209 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
210 		 * (PR 8882).
211 		 */
212 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
213 		/*quirks*/ DA_Q_NO_SYNC_CACHE
214 	},
215 	{
216 		/*
217 		 * Doesn't like the synchronize cache command.
218 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
219 		 */
220 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
221 		/*quirks*/ DA_Q_NO_SYNC_CACHE
222 	},
223 	{
224 		/*
225 		 * Doesn't like the synchronize cache command.
226 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
227 		 */
228 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
229 		/*quirks*/ DA_Q_NO_SYNC_CACHE
230 	},
231 	{
232 		/*
233 		 * Doesn't like the synchronize cache command.
234 		 */
235 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
236 		/*quirks*/ DA_Q_NO_SYNC_CACHE
237 	},
238 	{
239 		/*
240 		 * Doesn't like the synchronize cache command.
241 		 * Reported by: walter@pelissero.de
242 		 */
243 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
244 		/*quirks*/ DA_Q_NO_SYNC_CACHE
245 	},
246 	{
247 		/*
248 		 * Doesn't work correctly with 6 byte reads/writes.
249 		 * Returns illegal request, and points to byte 9 of the
250 		 * 6-byte CDB.
251 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
252 		 */
253 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
254 		/*quirks*/ DA_Q_NO_6_BYTE
255 	},
256 	{
257 		/* See above. */
258 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
259 		/*quirks*/ DA_Q_NO_6_BYTE
260 	},
261 	{
262 		/*
263 		 * Doesn't like the synchronize cache command.
264 		 * Reported by: walter@pelissero.de
265 		 */
266 		{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
267 		/*quirks*/ DA_Q_NO_SYNC_CACHE
268 	},
269 	{
270 		/*
271 		 * The CISS RAID controllers do not support SYNC_CACHE
272 		 */
273 		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
274 		/*quirks*/ DA_Q_NO_SYNC_CACHE
275 	},
276 	{
277 		/*
278 		 * The same goes for the mly(4) controllers
279 		 */
280 		{T_DIRECT, SIP_MEDIA_FIXED, "MLY*", "*", "MYLX"},
281 		/*quirks*/ DA_Q_NO_SYNC_CACHE
282 	},
283 	/*
284 	 * USB mass storage devices supported by umass(4)
285 	 *
286 	 * NOTE: USB attachments automatically set DA_Q_NO_SYNC_CACHE so
287 	 *	 it does not have to be specified here.
288 	 */
289  	{
290  		/*
291  		 * Creative Nomad MUVO mp3 player (USB)
292  		 * PR: kern/53094
293  		 */
294  		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
295 		/*quirks*/ DA_Q_NO_PREVENT
296  	},
297 	{
298 		/*
299 		 * Sigmatel USB Flash MP3 Player
300 		 * PR: kern/57046
301 		 */
302 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
303 		/*quirks*/ DA_Q_NO_PREVENT
304 	},
305 	{
306 		/*
307 		 * SEAGRAND NP-900 MP3 Player
308 		 * PR: kern/64563
309 		 */
310 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
311 		/*quirks*/ DA_Q_NO_PREVENT
312 	},
313 	{
314 		/*
315 		 * Creative MUVO Slim mp3 player (USB)
316 		 * PR: usb/86131
317 		 */
318 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
319 		"*"}, /*quirks*/ DA_Q_NO_PREVENT
320 	},
321 	{
322 		/*
323 		 * Philips USB Key Audio KEY013
324 		 * PR: usb/68412
325 		 */
326 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
327 		/*quirks*/ DA_Q_NO_PREVENT
328 	},
329 };
330 
331 static	d_open_t	daopen;
332 static	d_close_t	daclose;
333 static	d_strategy_t	dastrategy;
334 static	d_dump_t	dadump;
335 static	d_ioctl_t	daioctl;
336 static	periph_init_t	dainit;
337 static	void		daasync(void *callback_arg, u_int32_t code,
338 				struct cam_path *path, void *arg);
339 static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
340 static	periph_ctor_t	daregister;
341 static	periph_dtor_t	dacleanup;
342 static	periph_start_t	dastart;
343 static	periph_oninv_t	daoninvalidate;
344 static	void		dadone(struct cam_periph *periph,
345 			       union ccb *done_ccb);
346 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
347 				u_int32_t sense_flags);
348 static void		daprevent(struct cam_periph *periph, int action);
349 static int		dagetcapacity(struct cam_periph *periph);
350 static int		dacheckmedia(struct cam_periph *periph);
351 static void		dasetgeom(struct cam_periph *periph, uint32_t block_len,
352 				  uint64_t maxsector);
353 static void		daflushbioq(struct bio_queue_head *bioq, int error);
354 static void		dashutdown(void *arg, int howto);
355 
356 #ifndef DA_DEFAULT_TIMEOUT
357 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
358 #endif
359 
360 #ifndef	DA_DEFAULT_RETRY
361 #define	DA_DEFAULT_RETRY	4
362 #endif
363 
364 static int da_retry_count = DA_DEFAULT_RETRY;
365 static int da_default_timeout = DA_DEFAULT_TIMEOUT;
366 
367 SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
368             "CAM Direct Access Disk driver");
369 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
370            &da_retry_count, 0, "Normal I/O retry count");
371 TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
372 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
373            &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
374 TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
375 
376 static struct periph_driver dadriver =
377 {
378 	dainit, "da",
379 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
380 };
381 
382 PERIPHDRIVER_DECLARE(da, dadriver);
383 
384 static struct dev_ops da_ops = {
385 	{ "da", 0, D_DISK | D_MPSAFE },
386 	.d_open =	daopen,
387 	.d_close =	daclose,
388 	.d_read =	physread,
389 	.d_write =	physwrite,
390 	.d_strategy =	dastrategy,
391 	.d_dump =	dadump,
392 	.d_ioctl =	daioctl
393 };
394 
395 static struct extend_array *daperiphs;
396 
397 MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
398 
399 static int
400 daioctl(struct dev_ioctl_args *ap)
401 {
402 	int unit;
403 	int error = 0;
404 	struct buf *bp;
405 	struct cam_periph *periph;
406 	int byte_count;
407 
408 	off_t *del_num = (off_t*)ap->a_data;
409 	off_t bytes_left;
410 	off_t bytes_start;
411 
412 	cdev_t dev = ap->a_head.a_dev;
413 
414 
415 	unit = dkunit(dev);
416 	periph = cam_extend_get(daperiphs, unit);
417 	if (periph == NULL)
418 		return(ENXIO);
419 
420 	switch (ap->a_cmd) {
421 	case DAIOCTRIM:
422 	{
423 
424 		bytes_left = del_num[1];
425 		bytes_start = del_num[0];
426 
427 		/* TRIM occurs on 512-byte sectors. */
428 		KKASSERT((bytes_left % 512) == 0);
429 		KKASSERT((bytes_start% 512) == 0);
430 
431 
432 		/* Break TRIM up into int-sized commands because of b_bcount */
433 		while(bytes_left) {
434 
435 			/*
436 			 * Rather than than squezing out more blocks in b_bcount
437 			 * and having to break up the TRIM request in da_start(),
438 			 * we ensure we can always TRIM this many bytes with one
439 			 * TRIM command (this happens if the device only
440 			 * supports one TRIM block).
441 			 *
442 			 * With min TRIM blksize of 1, TRIM command free
443 			 * 4194240 blks(64*65535): each LBA range can address
444 			 * 65535 blks and there 64 such ranges in a 512-byte
445 			 * block. And, 4194240 * 512 = 0x7FFF8000
446 			 *
447 			 */
448 			byte_count = MIN(bytes_left,0x7FFF8000);
449 			bp = getnewbuf(0, 0, 0, 1);
450 
451 			bp->b_cmd = BUF_CMD_FREEBLKS;
452 			bp->b_bio1.bio_offset = bytes_start;
453 			bp->b_bcount = byte_count;
454 			bp->b_bio1.bio_flags |= BIO_SYNC;
455 			bp->b_bio1.bio_done = biodone_sync;
456 
457 			dev_dstrategy(ap->a_head.a_dev, &bp->b_bio1);
458 
459 			if (biowait(&bp->b_bio1, "TRIM")) {
460 				kprintf("Error:%d\n", bp->b_error);
461 				brelse(bp);
462 				return(bp->b_error ? bp->b_error : EIO);
463 			}
464 			brelse(bp);
465 			bytes_left -= byte_count;
466 			bytes_start += byte_count;
467 		}
468 		break;
469 	}
470 	default:
471 		return(EINVAL);
472 	}
473 
474 	return(error);
475 }
476 
477 static int
478 daopen(struct dev_open_args *ap)
479 {
480 	cdev_t dev = ap->a_head.a_dev;
481 	struct cam_periph *periph;
482 	struct da_softc *softc;
483 	struct disk_info info;
484 	int unit;
485 	int error;
486 
487 	unit = dkunit(dev);
488 	periph = cam_extend_get(daperiphs, unit);
489 	if (periph == NULL) {
490 		return (ENXIO);
491 	}
492 
493 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
494 		return(ENXIO);
495 	}
496 
497 	cam_periph_lock(periph);
498 	if ((error = cam_periph_hold(periph, PCATCH)) != 0) {
499 		cam_periph_unlock(periph);
500 		cam_periph_release(periph);
501 		return (error);
502 	}
503 
504 	unit = periph->unit_number;
505 	softc = (struct da_softc *)periph->softc;
506 
507 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
508 	    ("daopen: dev=%s (unit %d)\n", devtoname(dev),
509 	     unit));
510 
511 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
512 		/* Invalidate our pack information. */
513 		disk_invalidate(&softc->disk);
514 		softc->flags &= ~DA_FLAG_PACK_INVALID;
515 	}
516 
517 	error = dacheckmedia(periph);
518 	softc->flags |= DA_FLAG_OPEN;
519 
520 	if (error == 0) {
521 		struct ccb_getdev *cgd;
522 
523 		/* Build disk information structure */
524 		bzero(&info, sizeof(info));
525 		info.d_type = DTYPE_SCSI;
526 
527 		/*
528 		 * Grab the inquiry data to get the vendor and product names.
529 		 * Put them in the typename and packname for the label.
530 		 */
531 		cgd = &xpt_alloc_ccb()->cgd;
532 		xpt_setup_ccb(&cgd->ccb_h, periph->path, /*priority*/ 1);
533 		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
534 		xpt_action((union ccb *)cgd);
535 		xpt_free_ccb(&cgd->ccb_h);
536 
537 		/*
538 		 * Check to see whether or not the blocksize is set yet.
539 		 * If it isn't, set it and then clear the blocksize
540 		 * unavailable flag for the device statistics.
541 		 */
542 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
543 			softc->device_stats.block_size = softc->params.secsize;
544 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
545 		}
546 	}
547 
548 	if (error == 0) {
549 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
550 		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
551 			daprevent(periph, PR_PREVENT);
552 	} else {
553 		softc->flags &= ~DA_FLAG_OPEN;
554 		cam_periph_release(periph);
555 	}
556 	cam_periph_unhold(periph, 1);
557 	return (error);
558 }
559 
560 static int
561 daclose(struct dev_close_args *ap)
562 {
563 	cdev_t dev = ap->a_head.a_dev;
564 	struct	cam_periph *periph;
565 	struct	da_softc *softc;
566 	int	unit;
567 	int	error;
568 
569 	unit = dkunit(dev);
570 	periph = cam_extend_get(daperiphs, unit);
571 	if (periph == NULL)
572 		return (ENXIO);
573 
574 	cam_periph_lock(periph);
575 	if ((error = cam_periph_hold(periph, 0)) != 0) {
576 		cam_periph_unlock(periph);
577 		cam_periph_release(periph);
578 		return (error);
579 	}
580 
581 	softc = (struct da_softc *)periph->softc;
582 
583 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
584 		union	ccb *ccb;
585 
586 		ccb = cam_periph_getccb(periph, /*priority*/1);
587 		ccb->ccb_h.ccb_state = DA_CCB_POLLED;
588 
589 		scsi_synchronize_cache(&ccb->csio,
590 				       /*retries*/1,
591 				       /*cbfcnp*/dadone,
592 				       MSG_SIMPLE_Q_TAG,
593 				       /*begin_lba*/0,/* Cover the whole disk */
594 				       /*lb_count*/0,
595 				       SSD_FULL_SIZE,
596 				       5 * 60 * 1000);
597 
598 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
599 				  /*sense_flags*/SF_RETRY_UA,
600 				  &softc->device_stats);
601 
602 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
603 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
604 			     CAM_SCSI_STATUS_ERROR) {
605 				int asc, ascq;
606 				int sense_key, error_code;
607 
608 				scsi_extract_sense(&ccb->csio.sense_data,
609 						   &error_code,
610 						   &sense_key,
611 						   &asc, &ascq);
612 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
613 					scsi_sense_print(&ccb->csio);
614 			} else {
615 				xpt_print(periph->path, "Synchronize cache "
616 				    "failed, status == 0x%x, scsi status == "
617 				    "0x%x\n", ccb->csio.ccb_h.status,
618 				    ccb->csio.scsi_status);
619 			}
620 		}
621 
622 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
623 			cam_release_devq(ccb->ccb_h.path,
624 					 /*relsim_flags*/0,
625 					 /*reduction*/0,
626 					 /*timeout*/0,
627 					 /*getcount_only*/0);
628 
629 		xpt_release_ccb(ccb);
630 
631 	}
632 
633 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
634 		if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
635 			daprevent(periph, PR_ALLOW);
636 		/*
637 		 * If we've got removeable media, mark the blocksize as
638 		 * unavailable, since it could change when new media is
639 		 * inserted.
640 		 */
641 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
642 	}
643 
644 	/*
645 	 * Don't compound any ref counting software bugs with more.
646 	 */
647 	if (softc->flags & DA_FLAG_OPEN) {
648 		softc->flags &= ~DA_FLAG_OPEN;
649 		cam_periph_release(periph);
650 	} else {
651 		xpt_print(periph->path,
652 			  "daclose() called on an already closed device!\n");
653 	}
654 	cam_periph_unhold(periph, 1);
655 	return (0);
656 }
657 
658 /*
659  * Actually translate the requested transfer into one the physical driver
660  * can understand.  The transfer is described by a buf and will include
661  * only one physical transfer.
662  */
663 static int
664 dastrategy(struct dev_strategy_args *ap)
665 {
666 	cdev_t dev = ap->a_head.a_dev;
667 	struct bio *bio = ap->a_bio;
668 	struct buf *bp = bio->bio_buf;
669 	struct cam_periph *periph;
670 	struct da_softc *softc;
671 	u_int  unit;
672 
673 	unit = dkunit(dev);
674 	periph = cam_extend_get(daperiphs, unit);
675 	if (periph == NULL) {
676 		bp->b_error = ENXIO;
677 		goto bad;
678 	}
679 	softc = (struct da_softc *)periph->softc;
680 
681 	cam_periph_lock(periph);
682 
683 #if 0
684 	/*
685 	 * check it's not too big a transfer for our adapter
686 	 */
687 	scsi_minphys(bp, &sd_switch);
688 #endif
689 
690 	/*
691 	 * Mask interrupts so that the pack cannot be invalidated until
692 	 * after we are in the queue.  Otherwise, we might not properly
693 	 * clean up one of the buffers.
694 	 */
695 
696 	/*
697 	 * If the device has been made invalid, error out
698 	 */
699 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
700 		cam_periph_unlock(periph);
701 		bp->b_error = ENXIO;
702 		goto bad;
703 	}
704 
705 	/*
706 	 * Place it in the queue of disk activities for this disk
707 	 */
708 	if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH)
709 		bioqdisksort(&softc->bio_queue_wr, bio);
710 	else if (bp->b_cmd == BUF_CMD_FREEBLKS)
711 		bioqdisksort(&softc->bio_queue_trim, bio);
712 	else
713 		bioqdisksort(&softc->bio_queue_rd, bio);
714 
715 	/*
716 	 * Schedule ourselves for performing the work.
717 	 */
718 	xpt_schedule(periph, /* XXX priority */1);
719 	cam_periph_unlock(periph);
720 
721 	return(0);
722 bad:
723 	bp->b_flags |= B_ERROR;
724 
725 	/*
726 	 * Correctly set the buf to indicate a completed xfer
727 	 */
728 	bp->b_resid = bp->b_bcount;
729 	biodone(bio);
730 	return(0);
731 }
732 
733 static int
734 dadump(struct dev_dump_args *ap)
735 {
736 	cdev_t dev = ap->a_head.a_dev;
737 	struct	    cam_periph *periph;
738 	struct	    da_softc *softc;
739 	u_int	    unit;
740 	u_int32_t   secsize;
741 	struct	    ccb_scsiio *csio;
742 
743 	unit = dkunit(dev);
744 	periph = cam_extend_get(daperiphs, unit);
745 	if (periph == NULL)
746 		return (ENXIO);
747 
748 	softc = (struct da_softc *)periph->softc;
749 	cam_periph_lock(periph);
750 	secsize = softc->params.secsize; /* XXX: or ap->a_secsize? */
751 
752 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
753 		cam_periph_unlock(periph);
754 		return (ENXIO);
755 	}
756 
757 	csio = &xpt_alloc_ccb()->csio;
758 
759 	/*
760 	 * because length == 0 means we are supposed to flush cache, we only
761 	 * try to write something if length > 0.
762 	 */
763 	if (ap->a_length > 0) {
764 		xpt_setup_ccb(&csio->ccb_h, periph->path, /*priority*/1);
765 		csio->ccb_h.flags |= CAM_POLLED;
766 		csio->ccb_h.ccb_state = DA_CCB_DUMP;
767 		scsi_read_write(csio,
768 				/*retries*/1,
769 				dadone,
770 				MSG_ORDERED_Q_TAG,
771 				/*read*/FALSE,
772 				/*byte2*/0,
773 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
774 				ap->a_offset / secsize,
775 				ap->a_length / secsize,
776 				/*data_ptr*/(u_int8_t *) ap->a_virtual,
777 				/*dxfer_len*/ap->a_length,
778 				/*sense_len*/SSD_FULL_SIZE,
779 				DA_DEFAULT_TIMEOUT * 1000);
780 		xpt_polled_action((union ccb *)csio);
781 
782 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
783 			kprintf("Aborting dump due to I/O error.\n");
784 			if ((csio->ccb_h.status & CAM_STATUS_MASK) ==
785 			     CAM_SCSI_STATUS_ERROR)
786 				scsi_sense_print(csio);
787 			else
788 				kprintf("status == 0x%x, scsi status == 0x%x\n",
789 				       csio->ccb_h.status, csio->scsi_status);
790 			cam_periph_unlock(periph);
791 			xpt_free_ccb(&csio->ccb_h);
792 			return(EIO);
793 		}
794 		goto done;
795 	}
796 
797 	/*
798 	 * Sync the disk cache contents to the physical media.
799 	 */
800 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
801 
802 		xpt_setup_ccb(&csio->ccb_h, periph->path, /*priority*/1);
803 		csio->ccb_h.ccb_state = DA_CCB_DUMP;
804 		scsi_synchronize_cache(csio,
805 				       /*retries*/1,
806 				       /*cbfcnp*/dadone,
807 				       MSG_SIMPLE_Q_TAG,
808 				       /*begin_lba*/0,/* Cover the whole disk */
809 				       /*lb_count*/0,
810 				       SSD_FULL_SIZE,
811 				       5 * 60 * 1000);
812 		xpt_polled_action((union ccb *)csio);
813 
814 		if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
815 			if ((csio->ccb_h.status & CAM_STATUS_MASK) ==
816 			     CAM_SCSI_STATUS_ERROR) {
817 				int asc, ascq;
818 				int sense_key, error_code;
819 
820 				scsi_extract_sense(&csio->sense_data,
821 						   &error_code,
822 						   &sense_key,
823 						   &asc, &ascq);
824 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
825 					scsi_sense_print(csio);
826 			} else {
827 				xpt_print(periph->path, "Synchronize cache "
828 				    "failed, status == 0x%x, scsi status == "
829 				    "0x%x\n",
830 				    csio->ccb_h.status, csio->scsi_status);
831 			}
832 		}
833 	}
834 done:
835 	cam_periph_unlock(periph);
836 	xpt_free_ccb(&csio->ccb_h);
837 
838 	return (0);
839 }
840 
841 static void
842 dainit(void)
843 {
844 	cam_status status;
845 
846 	/*
847 	 * Create our extend array for storing the devices we attach to.
848 	 */
849 	daperiphs = cam_extend_new();
850 	if (daperiphs == NULL) {
851 		kprintf("da: Failed to alloc extend array!\n");
852 		return;
853 	}
854 
855 	/*
856 	 * Install a global async callback.  This callback will
857 	 * receive async callbacks like "new device found".
858 	 */
859 	status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
860 
861 	if (status != CAM_REQ_CMP) {
862 		kprintf("da: Failed to attach master async callback "
863 		       "due to status 0x%x!\n", status);
864 	} else {
865 		/* Register our shutdown event handler */
866 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
867 					   NULL, SHUTDOWN_PRI_SECOND)) == NULL)
868 			kprintf("%s: shutdown event registration failed!\n",
869 			    __func__);
870 	}
871 }
872 
873 static void
874 daoninvalidate(struct cam_periph *periph)
875 {
876 	struct da_softc *softc;
877 
878 	softc = (struct da_softc *)periph->softc;
879 
880 	/*
881 	 * De-register any async callbacks.
882 	 */
883 	xpt_register_async(0, daasync, periph, periph->path);
884 
885 	softc->flags |= DA_FLAG_PACK_INVALID;
886 
887 	/*
888 	 * Return all queued I/O with ENXIO.
889 	 * XXX Handle any transactions queued to the card
890 	 *     with XPT_ABORT_CCB.
891 	 */
892 	daflushbioq(&softc->bio_queue_trim, ENXIO);
893 	daflushbioq(&softc->bio_queue_wr, ENXIO);
894 	daflushbioq(&softc->bio_queue_rd, ENXIO);
895 	xpt_print(periph->path, "lost device\n");
896 }
897 
898 static void
899 daflushbioq(struct bio_queue_head *bioq, int error)
900 {
901 	struct bio *q_bio;
902 	struct buf *q_bp;
903 
904 	while ((q_bio = bioq_first(bioq)) != NULL){
905 		bioq_remove(bioq, q_bio);
906 		q_bp = q_bio->bio_buf;
907 		q_bp->b_resid = q_bp->b_bcount;
908 		q_bp->b_error = error;
909 		q_bp->b_flags |= B_ERROR;
910 		biodone(q_bio);
911 	}
912 }
913 
914 static void
915 dacleanup(struct cam_periph *periph)
916 {
917 	struct da_softc *softc;
918 
919 	softc = (struct da_softc *)periph->softc;
920 
921 	devstat_remove_entry(&softc->device_stats);
922 	cam_extend_release(daperiphs, periph->unit_number);
923 	xpt_print(periph->path, "removing device entry\n");
924 	/*
925 	 * If we can't free the sysctl tree, oh well...
926 	 */
927 	if ((softc->flags & DA_FLAG_SCTX_INIT) != 0
928 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
929 		xpt_print(periph->path, "can't remove sysctl context\n");
930 	}
931 	periph->softc = NULL;
932 	if (softc->disk.d_rawdev) {
933 		cam_periph_unlock(periph);
934 		disk_destroy(&softc->disk);
935 		cam_periph_lock(periph);
936 	}
937 
938 	kfree(softc, M_DEVBUF);
939 }
940 
941 static void
942 daasync(void *callback_arg, u_int32_t code,
943 	struct cam_path *path, void *arg)
944 {
945 	struct cam_periph *periph;
946 
947 	periph = (struct cam_periph *)callback_arg;
948 
949 	switch (code) {
950 	case AC_FOUND_DEVICE:
951 	{
952 		struct ccb_getdev *cgd;
953 		cam_status status;
954 
955 		cgd = (struct ccb_getdev *)arg;
956 		if (cgd == NULL)
957 			break;
958 
959 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
960 		    && SID_TYPE(&cgd->inq_data) != T_RBC
961 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
962 			break;
963 
964 		/*
965 		 * Don't complain if a valid peripheral is already attached.
966 		 */
967 		periph = cam_periph_find(cgd->ccb_h.path, "da");
968 		if (periph && (periph->flags & CAM_PERIPH_INVALID) == 0)
969 			break;
970 
971 		/*
972 		 * Allocate a peripheral instance for
973 		 * this device and start the probe
974 		 * process.
975 		 */
976 		status = cam_periph_alloc(daregister, daoninvalidate,
977 					  dacleanup, dastart,
978 					  "da", CAM_PERIPH_BIO,
979 					  cgd->ccb_h.path, daasync,
980 					  AC_FOUND_DEVICE, cgd);
981 
982 		if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
983 			kprintf("%s: Unable to attach to new device "
984 			    "due to status 0x%x\n", __func__, status);
985 		}
986 		break;
987 	}
988 	case AC_SENT_BDR:
989 	case AC_BUS_RESET:
990 	{
991 		struct da_softc *softc;
992 		struct ccb_hdr *ccbh;
993 
994 		softc = (struct da_softc *)periph->softc;
995 		/*
996 		 * Don't fail on the expected unit attention
997 		 * that will occur.
998 		 */
999 		softc->flags |= DA_FLAG_RETRY_UA;
1000 		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
1001 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
1002 		/* FALLTHROUGH*/
1003 	}
1004 	default:
1005 		cam_periph_async(periph, code, path, arg);
1006 		break;
1007 	}
1008 }
1009 
1010 static void
1011 dasysctlinit(void *context, int pending)
1012 {
1013 	struct cam_periph *periph;
1014 	struct da_softc *softc;
1015 	char tmpstr[80], tmpstr2[80];
1016 
1017 	periph = (struct cam_periph *)context;
1018 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1019 		return;
1020 	}
1021 
1022 	softc = (struct da_softc *)periph->softc;
1023 	ksnprintf(tmpstr, sizeof(tmpstr),
1024 		  "CAM DA unit %d", periph->unit_number);
1025 	ksnprintf(tmpstr2, sizeof(tmpstr2),
1026 		  "%d", periph->unit_number);
1027 
1028 	sysctl_ctx_free(&softc->sysctl_ctx);
1029 	sysctl_ctx_init(&softc->sysctl_ctx);
1030 	softc->flags |= DA_FLAG_SCTX_INIT;
1031 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1032 		SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1033 		CTLFLAG_RD, 0, tmpstr);
1034 	if (softc->sysctl_tree == NULL) {
1035 		kprintf("%s: unable to allocate sysctl tree\n", __func__);
1036 		cam_periph_release(periph);
1037 		return;
1038 	}
1039 
1040 	/*
1041 	 * Now register the sysctl handler, so the user can the value on
1042 	 * the fly.
1043 	 */
1044 	SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1045 		OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1046 		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1047 		"Minimum CDB size");
1048 
1049 	/* Only create the option if the device supports TRIM */
1050 	if (softc->disk.d_info.d_trimflag) {
1051 		SYSCTL_ADD_INT(&softc->sysctl_ctx,
1052 		    SYSCTL_CHILDREN(softc->sysctl_tree),
1053 		    OID_AUTO,
1054 		    "trim_enabled",
1055 		    CTLFLAG_RW,
1056 		    &softc->trim_enabled,
1057 		    0,
1058 		    "Enable TRIM for this device (SSD))");
1059 	}
1060 
1061 	cam_periph_release(periph);
1062 }
1063 
1064 static int
1065 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
1066 {
1067 	int error, value;
1068 
1069 	value = *(int *)arg1;
1070 
1071 	error = sysctl_handle_int(oidp, &value, 0, req);
1072 
1073 	if ((error != 0)
1074 	 || (req->newptr == NULL))
1075 		return (error);
1076 
1077 	/*
1078 	 * Acceptable values here are 6, 10 or 12, or 16.
1079 	 */
1080 	if (value < 6)
1081 		value = 6;
1082 	else if ((value > 6)
1083 	      && (value <= 10))
1084 		value = 10;
1085 	else if ((value > 10)
1086 	      && (value <= 12))
1087 		value = 12;
1088 	else if (value > 12)
1089 		value = 16;
1090 
1091 	*(int *)arg1 = value;
1092 
1093 	return (0);
1094 }
1095 
1096 static cam_status
1097 daregister(struct cam_periph *periph, void *arg)
1098 {
1099 	struct da_softc *softc;
1100 	struct ccb_pathinq *cpi;
1101 	struct ccb_getdev *cgd;
1102 	char tmpstr[80];
1103 	caddr_t match;
1104 
1105 	cgd = (struct ccb_getdev *)arg;
1106 	if (periph == NULL) {
1107 		kprintf("%s: periph was NULL!!\n", __func__);
1108 		return(CAM_REQ_CMP_ERR);
1109 	}
1110 
1111 	if (cgd == NULL) {
1112 		kprintf("%s: no getdev CCB, can't register device\n",
1113 		    __func__);
1114 		return(CAM_REQ_CMP_ERR);
1115 	}
1116 
1117 	softc = kmalloc(sizeof(*softc), M_DEVBUF, M_INTWAIT | M_ZERO);
1118 	sysctl_ctx_init(&softc->sysctl_ctx);
1119 	LIST_INIT(&softc->pending_ccbs);
1120 	softc->state = DA_STATE_PROBE;
1121 	bioq_init(&softc->bio_queue_trim);
1122 	bioq_init(&softc->bio_queue_rd);
1123 	bioq_init(&softc->bio_queue_wr);
1124 	if (SID_IS_REMOVABLE(&cgd->inq_data))
1125 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
1126 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
1127 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
1128 
1129 	/* Used to get TRIM status from AHCI driver */
1130 	if (cgd->inq_data.vendor_specific1[0] == 1) {
1131 		/*
1132 		 * max number of lba ranges an SSD can handle in a single
1133 		 * TRIM command. vendor_specific1[1] is the num of 512-byte
1134 		 * blocks the SSD reports that can be passed in a TRIM cmd.
1135 		 */
1136 		softc->trim_max_ranges =
1137 		   min(cgd->inq_data.vendor_specific1[1] * 64, TRIM_MAX_RANGES);
1138 	}
1139 
1140 	periph->softc = softc;
1141 
1142 	cam_extend_set(daperiphs, periph->unit_number, periph);
1143 
1144 	/*
1145 	 * See if this device has any quirks.
1146 	 */
1147 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1148 			       (caddr_t)da_quirk_table,
1149 			       NELEM(da_quirk_table),
1150 			       sizeof(*da_quirk_table), scsi_inquiry_match);
1151 
1152 	if (match != NULL)
1153 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1154 	else
1155 		softc->quirks = DA_Q_NONE;
1156 
1157 	/*
1158 	 * Unconditionally disable the synchronize cache command for
1159 	 * usb attachments.  It's just impossible to determine if the
1160 	 * device supports it or not and if it doesn't the port can
1161 	 * brick.
1162 	 */
1163 	if (strncmp(periph->sim->sim_name, "umass", 4) == 0) {
1164 		softc->quirks |= DA_Q_NO_SYNC_CACHE;
1165 	}
1166 
1167 	TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
1168 
1169 	/* Check if the SIM does not want 6 byte commands */
1170 	cpi = &xpt_alloc_ccb()->cpi;
1171 	xpt_setup_ccb(&cpi->ccb_h, periph->path, /*priority*/1);
1172 	cpi->ccb_h.func_code = XPT_PATH_INQ;
1173 	xpt_action((union ccb *)cpi);
1174 	if (cpi->ccb_h.status == CAM_REQ_CMP && (cpi->hba_misc & PIM_NO_6_BYTE))
1175 		softc->quirks |= DA_Q_NO_6_BYTE;
1176 
1177 	/*
1178 	 * RBC devices don't have to support READ(6), only READ(10).
1179 	 */
1180 	if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
1181 		softc->minimum_cmd_size = 10;
1182 	else
1183 		softc->minimum_cmd_size = 6;
1184 
1185 	/*
1186 	 * Load the user's default, if any.
1187 	 */
1188 	ksnprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
1189 		 periph->unit_number);
1190 	TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
1191 
1192 	/*
1193 	 * 6, 10, 12, and 16 are the currently permissible values.
1194 	 */
1195 	if (softc->minimum_cmd_size < 6)
1196 		softc->minimum_cmd_size = 6;
1197 	else if ((softc->minimum_cmd_size > 6)
1198 	      && (softc->minimum_cmd_size <= 10))
1199 		softc->minimum_cmd_size = 10;
1200 	else if ((softc->minimum_cmd_size > 10)
1201 	      && (softc->minimum_cmd_size <= 12))
1202 		softc->minimum_cmd_size = 12;
1203 	else if (softc->minimum_cmd_size > 12)
1204 		softc->minimum_cmd_size = 16;
1205 
1206 	/*
1207 	 * The DA driver supports a blocksize, but
1208 	 * we don't know the blocksize until we do
1209 	 * a read capacity.  So, set a flag to
1210 	 * indicate that the blocksize is
1211 	 * unavailable right now.  We'll clear the
1212 	 * flag as soon as we've done a read capacity.
1213 	 */
1214 	devstat_add_entry(&softc->device_stats, "da",
1215 			  periph->unit_number, 0,
1216 	  		  DEVSTAT_BS_UNAVAILABLE,
1217 			  SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1218 			  DEVSTAT_PRIORITY_DISK);
1219 
1220 	/*
1221 	 * Register this media as a disk
1222 	 */
1223 	CAM_SIM_UNLOCK(periph->sim);
1224 	disk_create(periph->unit_number, &softc->disk, &da_ops);
1225 	if (cpi->maxio == 0 || cpi->maxio > MAXPHYS)
1226 		softc->disk.d_rawdev->si_iosize_max = MAXPHYS;
1227 	else
1228 		softc->disk.d_rawdev->si_iosize_max = cpi->maxio;
1229 	if (bootverbose) {
1230 		kprintf("%s%d: si_iosize_max:%d\n",
1231 		    periph->periph_name,
1232 		    periph->unit_number,
1233 		    softc->disk.d_rawdev->si_iosize_max);
1234 	}
1235 	CAM_SIM_LOCK(periph->sim);
1236 
1237 	/*
1238 	 * Add async callbacks for bus reset and
1239 	 * bus device reset calls.  I don't bother
1240 	 * checking if this fails as, in most cases,
1241 	 * the system will function just fine without
1242 	 * them and the only alternative would be to
1243 	 * not attach the device on failure.
1244 	 */
1245 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
1246 			   daasync, periph, periph->path);
1247 
1248 	/*
1249 	 * Take an exclusive refcount on the periph while dastart is called
1250 	 * to finish the probe.  The reference will be dropped in dadone at
1251 	 * the end of probe.
1252 	 */
1253 	xpt_free_ccb(&cpi->ccb_h);
1254 	cam_periph_hold(periph, 0);
1255 	xpt_schedule(periph, /*priority*/5);
1256 
1257 	return(CAM_REQ_CMP);
1258 }
1259 
1260 static void
1261 dastart(struct cam_periph *periph, union ccb *start_ccb)
1262 {
1263 	struct da_softc *softc;
1264 
1265 	softc = (struct da_softc *)periph->softc;
1266 
1267 	switch (softc->state) {
1268 	case DA_STATE_NORMAL:
1269 	{
1270 		/* Pull a buffer from the queue and get going on it */
1271 		struct bio *bio;
1272 		struct bio *bio_rd;
1273 		struct bio *bio_wr;
1274 		struct buf *bp;
1275 		u_int8_t tag_code;
1276 		int limit;
1277 
1278 		/*
1279 		 * See if there is a buf with work for us to do..
1280 		 */
1281 		bio_rd = bioq_first(&softc->bio_queue_rd);
1282 		bio_wr = bioq_first(&softc->bio_queue_wr);
1283 
1284 		if (periph->immediate_priority <= periph->pinfo.priority) {
1285 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1286 					("queuing for immediate ccb\n"));
1287 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1288 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1289 					  periph_links.sle);
1290 			periph->immediate_priority = CAM_PRIORITY_NONE;
1291 			wakeup(&periph->ccb_list);
1292 			if (bio_rd || bio_wr) {
1293 				/*
1294 				 * Have more work to do, so ensure we stay
1295 				 * scheduled
1296 				 */
1297 				xpt_schedule(periph, /* XXX priority */1);
1298 			}
1299 			break;
1300 		}
1301 
1302 		/* Run the trim command if not already running */
1303 		if (!softc->trim_running &&
1304 		   (bio = bioq_first(&softc->bio_queue_trim)) != NULL) {
1305 			struct trim_request *req = &softc->trim_req;
1306 			struct bio *bio1;
1307 			int bps = 0, ranges = 0;
1308 
1309 			softc->trim_running = 1;
1310 			bzero(req, sizeof(*req));
1311 			bio1 = bio;
1312 			while (1) {
1313 				uint64_t lba;
1314 				int count;
1315 
1316 				bp = bio1->bio_buf;
1317 				count = bp->b_bcount / softc->params.secsize;
1318 				lba = bio1->bio_offset/softc->params.secsize;
1319 
1320 				bioq_remove(&softc->bio_queue_trim, bio1);
1321 				while (count > 0) {
1322 					int c = min(count, 0xffff);
1323 					int off = ranges * 8;
1324 
1325 					req->data[off + 0] = lba & 0xff;
1326 					req->data[off + 1] = (lba >> 8) & 0xff;
1327 					req->data[off + 2] = (lba >> 16) & 0xff;
1328 					req->data[off + 3] = (lba >> 24) & 0xff;
1329 					req->data[off + 4] = (lba >> 32) & 0xff;
1330 					req->data[off + 5] = (lba >> 40) & 0xff;
1331 					req->data[off + 6] = c & 0xff;
1332 					req->data[off + 7] = (c >> 8) & 0xff;
1333 					lba += c;
1334 					count -= c;
1335 					ranges++;
1336 				}
1337 
1338 				/* Try to merge multiple TRIM requests */
1339 				req->bios[bps++] = bio1;
1340 				bio1 = bioq_first(&softc->bio_queue_trim);
1341 				if (bio1 == NULL ||
1342 				    bio1->bio_buf->b_bcount / softc->params.secsize >
1343 				    (softc->trim_max_ranges - ranges) * 0xffff)
1344 					break;
1345 			}
1346 
1347 
1348 			cam_fill_csio(&start_ccb->csio,
1349 			    1/*retries*/,
1350 			    dadone,
1351 			    CAM_DIR_OUT,
1352 			    MSG_SIMPLE_Q_TAG,
1353 			    req->data,
1354 			    ((ranges +63)/64)*512,
1355 			    SSD_FULL_SIZE,
1356 			    sizeof(struct scsi_rw_6),
1357 			    da_default_timeout*2);
1358 
1359 			start_ccb->ccb_h.ccb_state = DA_CCB_TRIM;
1360 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1361 			    &start_ccb->ccb_h, periph_links.le);
1362 			start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1363 			start_ccb->ccb_h.ccb_bio = bio;
1364 			devstat_start_transaction(&softc->device_stats);
1365 			xpt_action(start_ccb);
1366 			xpt_schedule(periph, 1);
1367 			break;
1368 		}
1369 
1370 		/*
1371 		 * Select a read or write buffer to queue.  Limit the number
1372 		 * of tags dedicated to reading or writing, giving reads
1373 		 * precedence.
1374 		 *
1375 		 * Writes to modern hard drives go into the HDs cache and
1376 		 * return completion nearly instantly.  That is until the
1377 		 * cache becomes full.  When the HDs cache becomes full
1378 		 * write commands will begin to stall.  If all available
1379 		 * tags are taken up by writes which saturate the drive
1380 		 * reads will become tag-starved.
1381 		 *
1382 		 * A similar situation can occur with reads.  With many
1383 		 * parallel readers all tags can be taken up by reads
1384 		 * and prevent any writes from draining, even if the HD's
1385 		 * cache is not full.
1386 		 */
1387 		limit = periph->sim->max_tagged_dev_openings * 2 / 3 + 1;
1388 #if 0
1389 		/* DEBUGGING */
1390 		static int savets;
1391 		static long savets2;
1392 		if (1 || time_uptime != savets2 || (ticks != savets && (softc->outstanding_cmds_rd || softc->outstanding_cmds_wr))) {
1393 			kprintf("%d %d (%d)\n",
1394 				softc->outstanding_cmds_rd,
1395 				softc->outstanding_cmds_wr,
1396 				limit);
1397 			savets = ticks;
1398 			savets2 = time_uptime;
1399 		}
1400 #endif
1401 		if (bio_rd && softc->outstanding_cmds_rd < limit) {
1402 			bio = bio_rd;
1403 			bioq_remove(&softc->bio_queue_rd, bio);
1404 		} else if (bio_wr && softc->outstanding_cmds_wr < limit) {
1405 			bio = bio_wr;
1406 			bioq_remove(&softc->bio_queue_wr, bio);
1407 		} else {
1408 			if (bio_rd)
1409 				softc->flags |= DA_FLAG_RD_LIMIT;
1410 			if (bio_wr)
1411 				softc->flags |= DA_FLAG_WR_LIMIT;
1412 			xpt_release_ccb(start_ccb);
1413 			break;
1414 		}
1415 
1416 		/*
1417 		 * We can queue new work.
1418 		 */
1419 		bp = bio->bio_buf;
1420 
1421 		devstat_start_transaction(&softc->device_stats);
1422 
1423 		tag_code = MSG_SIMPLE_Q_TAG;
1424 
1425 		switch(bp->b_cmd) {
1426 		case BUF_CMD_READ:
1427 		case BUF_CMD_WRITE:
1428 			/*
1429 			 * Block read/write op
1430 			 */
1431 			KKASSERT(bio->bio_offset % softc->params.secsize == 0);
1432 
1433 			scsi_read_write(
1434 				&start_ccb->csio,
1435 				da_retry_count,		/* retries */
1436 				dadone,
1437 				tag_code,
1438 				(bp->b_cmd == BUF_CMD_READ),
1439 				0,			/* byte2 */
1440 				softc->minimum_cmd_size,
1441 				bio->bio_offset / softc->params.secsize,
1442 				bp->b_bcount / softc->params.secsize,
1443 				bp->b_data,
1444 				bp->b_bcount,
1445 				SSD_FULL_SIZE,		/* sense_len */
1446 				da_default_timeout * 1000
1447 			);
1448 			break;
1449 		case BUF_CMD_FLUSH:
1450 			/*
1451 			 * Silently complete a flush request if the device
1452 			 * cannot handle it.
1453 			 */
1454 			if (softc->quirks & DA_Q_NO_SYNC_CACHE) {
1455 				xpt_release_ccb(start_ccb);
1456 				start_ccb = NULL;
1457 				devstat_end_transaction_buf(
1458 					&softc->device_stats, bp);
1459 				biodone(bio);
1460 			} else {
1461 				scsi_synchronize_cache(
1462 					&start_ccb->csio,
1463 					1,		/* retries */
1464 					dadone,		/* cbfcnp */
1465 					MSG_SIMPLE_Q_TAG,
1466 					0,		/* lba */
1467 					0,		/* count (whole disk) */
1468 					SSD_FULL_SIZE,
1469 					da_default_timeout*1000	/* timeout */
1470 				);
1471 			}
1472 			break;
1473 		case BUF_CMD_FREEBLKS:
1474 			if (softc->disk.d_info.d_trimflag & DA_FLAG_CAN_TRIM){
1475 				start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1476 				break;
1477 			}
1478 		default:
1479 			xpt_release_ccb(start_ccb);
1480 			start_ccb = NULL;
1481 			panic("dastart: unrecognized bio cmd %d", bp->b_cmd);
1482 			break; /* NOT REACHED */
1483 		}
1484 
1485 		/*
1486 		 * Block out any asyncronous callbacks
1487 		 * while we touch the pending ccb list.
1488 		 */
1489 		if (start_ccb) {
1490 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1491 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1492 					 &start_ccb->ccb_h, periph_links.le);
1493 			if (bp->b_cmd == BUF_CMD_WRITE ||
1494 			    bp->b_cmd == BUF_CMD_FLUSH) {
1495 				++softc->outstanding_cmds_wr;
1496 			} else {
1497 				++softc->outstanding_cmds_rd;
1498 			}
1499 
1500 			/* We expect a unit attention from this device */
1501 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1502 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1503 				softc->flags &= ~DA_FLAG_RETRY_UA;
1504 			}
1505 
1506 			start_ccb->ccb_h.ccb_bio = bio;
1507 			xpt_action(start_ccb);
1508 		}
1509 
1510 		/*
1511 		 * Be sure we stay scheduled if we have more work to do.
1512 		 */
1513 		if (bioq_first(&softc->bio_queue_rd) ||
1514 		    bioq_first(&softc->bio_queue_wr)) {
1515 			xpt_schedule(periph, 1);
1516 		}
1517 		break;
1518 	}
1519 	case DA_STATE_PROBE:
1520 	{
1521 		struct ccb_scsiio *csio;
1522 		struct scsi_read_capacity_data *rcap;
1523 
1524 		rcap = kmalloc(sizeof(*rcap), M_SCSIDA, M_INTWAIT | M_ZERO);
1525 		csio = &start_ccb->csio;
1526 		scsi_read_capacity(csio,
1527 				   /*retries*/4,
1528 				   dadone,
1529 				   MSG_SIMPLE_Q_TAG,
1530 				   rcap,
1531 				   SSD_FULL_SIZE,
1532 				   /*timeout*/5000);
1533 		start_ccb->ccb_h.ccb_bio = NULL;
1534 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1535 		xpt_action(start_ccb);
1536 		break;
1537 	}
1538 	case DA_STATE_PROBE2:
1539 	{
1540 		struct ccb_scsiio *csio;
1541 		struct scsi_read_capacity_data_16 *rcaplong;
1542 
1543 		rcaplong = kmalloc(sizeof(*rcaplong), M_SCSIDA,
1544 				   M_INTWAIT | M_ZERO);
1545 		csio = &start_ccb->csio;
1546 		scsi_read_capacity_16(csio,
1547 				    /*retries*/ 4,
1548 				    /*cbfcnp*/ dadone,
1549 				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
1550 				    /*lba*/ 0,
1551 				    /*reladr*/ 0,
1552 				    /*pmi*/ 0,
1553 				    rcaplong,
1554 				    /*sense_len*/ SSD_FULL_SIZE,
1555 				    /*timeout*/ 60000);
1556 		start_ccb->ccb_h.ccb_bio = NULL;
1557 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE2;
1558 		xpt_action(start_ccb);
1559 		break;
1560 	}
1561 	}
1562 }
1563 
1564 static int
1565 cmd6workaround(union ccb *ccb)
1566 {
1567 	struct scsi_rw_6 cmd6;
1568 	struct scsi_rw_10 *cmd10;
1569 	struct da_softc *softc;
1570 	u_int8_t *cdb;
1571 	int frozen;
1572 
1573 	cdb = ccb->csio.cdb_io.cdb_bytes;
1574 
1575 	/* Translation only possible if CDB is an array and cmd is R/W6 */
1576 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
1577 	    (*cdb != READ_6 && *cdb != WRITE_6))
1578 		return 0;
1579 
1580 	xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
1581 	    "increasing minimum_cmd_size to 10.\n");
1582  	softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
1583 	softc->minimum_cmd_size = 10;
1584 
1585 	bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
1586 	cmd10 = (struct scsi_rw_10 *)cdb;
1587 	cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
1588 	cmd10->byte2 = 0;
1589 	scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
1590 	cmd10->reserved = 0;
1591 	scsi_ulto2b(cmd6.length, cmd10->length);
1592 	cmd10->control = cmd6.control;
1593 	ccb->csio.cdb_len = sizeof(*cmd10);
1594 
1595 	/* Requeue request, unfreezing queue if necessary */
1596 	frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1597  	ccb->ccb_h.status = CAM_REQUEUE_REQ;
1598 	xpt_action(ccb);
1599 	if (frozen) {
1600 		cam_release_devq(ccb->ccb_h.path,
1601 				 /*relsim_flags*/0,
1602 				 /*reduction*/0,
1603 				 /*timeout*/0,
1604 				 /*getcount_only*/0);
1605 	}
1606 	return (ERESTART);
1607 }
1608 
1609 static void
1610 dadone(struct cam_periph *periph, union ccb *done_ccb)
1611 {
1612 	struct da_softc *softc;
1613 	struct ccb_scsiio *csio;
1614 	struct disk_info info;
1615 
1616 	softc = (struct da_softc *)periph->softc;
1617 	csio = &done_ccb->csio;
1618 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1619 	case DA_CCB_BUFFER_IO:
1620 	case DA_CCB_TRIM:
1621 	{
1622 		struct buf *bp;
1623 		struct bio *bio;
1624 		int mustsched = 0;
1625 
1626 		bio = (struct bio *)done_ccb->ccb_h.ccb_bio;
1627 		bp = bio->bio_buf;
1628 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1629 			int error;
1630 			int sf;
1631 
1632 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1633 				sf = SF_RETRY_UA;
1634 			else
1635 				sf = 0;
1636 
1637 			error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
1638 			if (error == ERESTART) {
1639 				/*
1640 				 * A retry was scheuled, so
1641 				 * just return.
1642 				 */
1643 				return;
1644 			}
1645 			if (error != 0) {
1646 				if (error == ENXIO) {
1647 					/*
1648 					 * Catastrophic error.  Mark our pack as
1649 					 * invalid.
1650 					 */
1651 					/*
1652 					 * XXX See if this is really a media
1653 					 * XXX change first?
1654 					 */
1655 					xpt_print(periph->path,
1656 					    "Invalidating pack\n");
1657 					softc->flags |= DA_FLAG_PACK_INVALID;
1658 				}
1659 
1660 				/*
1661 				 * Return all queued write I/O's with EIO
1662 				 * so the client can retry these I/Os in the
1663 				 * proper order should it attempt to recover.
1664 				 *
1665 				 * Leave read I/O's alone.
1666 				 */
1667 				daflushbioq(&softc->bio_queue_wr, EIO);
1668 				bp->b_error = error;
1669 				bp->b_resid = bp->b_bcount;
1670 				bp->b_flags |= B_ERROR;
1671 			} else {
1672 				bp->b_resid = csio->resid;
1673 				bp->b_error = 0;
1674 				if (bp->b_resid != 0)
1675 					bp->b_flags |= B_ERROR;
1676 			}
1677 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1678 				cam_release_devq(done_ccb->ccb_h.path,
1679 						 /*relsim_flags*/0,
1680 						 /*reduction*/0,
1681 						 /*timeout*/0,
1682 						 /*getcount_only*/0);
1683 		} else {
1684 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1685 				panic("REQ_CMP with QFRZN");
1686 			bp->b_resid = csio->resid;
1687 			if (csio->resid > 0)
1688 				bp->b_flags |= B_ERROR;
1689 		}
1690 
1691 		/*
1692 		 * Block out any asyncronous callbacks
1693 		 * while we touch the pending ccb list.
1694 		 */
1695 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1696 		if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH) {
1697 			--softc->outstanding_cmds_wr;
1698 			if (softc->flags & DA_FLAG_WR_LIMIT) {
1699 				softc->flags &= ~DA_FLAG_WR_LIMIT;
1700 				mustsched = 1;
1701 			}
1702 		} else {
1703 			--softc->outstanding_cmds_rd;
1704 			if (softc->flags & DA_FLAG_RD_LIMIT) {
1705 				softc->flags &= ~DA_FLAG_RD_LIMIT;
1706 				mustsched = 1;
1707 			}
1708 		}
1709 
1710 		devstat_end_transaction_buf(&softc->device_stats, bp);
1711 		if ((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) ==
1712 		    DA_CCB_TRIM) {
1713 			struct trim_request *req =
1714 			    (struct trim_request *) csio->data_ptr;
1715 			int i;
1716 
1717 			for (i = 1; i < softc->trim_max_ranges &&
1718 			    req->bios[i]; i++) {
1719 				struct bio *bp1 = req->bios[i];
1720 
1721 				bp1->bio_buf->b_resid = bp->b_resid;
1722 				bp1->bio_buf->b_error = bp->b_error;
1723 				if (bp->b_flags & B_ERROR)
1724 					bp1->bio_buf->b_flags |= B_ERROR;
1725 				biodone(bp1);
1726 			}
1727 			softc->trim_running = 0;
1728 			biodone(bio);
1729 			xpt_schedule(periph,1);
1730 		} else
1731 			biodone(bio);
1732 
1733 
1734 		if (mustsched)
1735 			xpt_schedule(periph, /*priority*/1);
1736 
1737 		break;
1738 	}
1739 	case DA_CCB_PROBE:
1740 	case DA_CCB_PROBE2:
1741 	{
1742 		struct	   scsi_read_capacity_data *rdcap;
1743 		struct     scsi_read_capacity_data_16 *rcaplong;
1744 		char	   announce_buf[80];
1745 		int	   doinfo = 0;
1746 
1747 		rdcap = NULL;
1748 		rcaplong = NULL;
1749 		if (softc->state == DA_STATE_PROBE)
1750 			rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
1751 		else
1752 			rcaplong = (struct scsi_read_capacity_data_16 *)
1753 				    csio->data_ptr;
1754 
1755 		bzero(&info, sizeof(info));
1756 		info.d_type = DTYPE_SCSI;
1757 		info.d_serialno = xpt_path_serialno(periph->path);
1758 
1759 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1760 			struct disk_params *dp;
1761 			uint32_t block_size;
1762 			uint64_t maxsector;
1763 
1764 			if (softc->state == DA_STATE_PROBE) {
1765 				block_size = scsi_4btoul(rdcap->length);
1766 				maxsector = scsi_4btoul(rdcap->addr);
1767 
1768 				/*
1769 				 * According to SBC-2, if the standard 10
1770 				 * byte READ CAPACITY command returns 2^32,
1771 				 * we should issue the 16 byte version of
1772 				 * the command, since the device in question
1773 				 * has more sectors than can be represented
1774 				 * with the short version of the command.
1775 				 */
1776 				if (maxsector == 0xffffffff) {
1777 					softc->state = DA_STATE_PROBE2;
1778 					kfree(rdcap, M_SCSIDA);
1779 					xpt_release_ccb(done_ccb);
1780 					xpt_schedule(periph, /*priority*/5);
1781 					return;
1782 				}
1783 			} else {
1784 				block_size = scsi_4btoul(rcaplong->length);
1785 				maxsector = scsi_8btou64(rcaplong->addr);
1786 			}
1787 			dasetgeom(periph, block_size, maxsector);
1788 			dp = &softc->params;
1789 			ksnprintf(announce_buf, sizeof(announce_buf),
1790 				"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1791 				(uintmax_t) (((uintmax_t)dp->secsize *
1792 				dp->sectors) / (1024*1024)),
1793 				(uintmax_t)dp->sectors,
1794 				dp->secsize, dp->heads, dp->secs_per_track,
1795 				dp->cylinders);
1796 
1797 			info.d_media_blksize = softc->params.secsize;
1798 			info.d_media_blocks = softc->params.sectors;
1799 			info.d_media_size = 0;
1800 			info.d_secpertrack = softc->params.secs_per_track;
1801 			info.d_nheads = softc->params.heads;
1802 			info.d_ncylinders = softc->params.cylinders;
1803 			info.d_secpercyl = softc->params.heads *
1804 						softc->params.secs_per_track;
1805 			info.d_serialno = xpt_path_serialno(periph->path);
1806 			doinfo = 1;
1807 		} else {
1808 			int	error;
1809 
1810 			announce_buf[0] = '\0';
1811 
1812 			/*
1813 			 * Retry any UNIT ATTENTION type errors.  They
1814 			 * are expected at boot.
1815 			 */
1816 			error = daerror(done_ccb, CAM_RETRY_SELTO,
1817 					SF_RETRY_UA|SF_NO_PRINT);
1818 			if (error == ERESTART) {
1819 				/*
1820 				 * A retry was scheuled, so
1821 				 * just return.
1822 				 */
1823 				return;
1824 			} else if (error != 0) {
1825 				struct scsi_sense_data *sense;
1826 				int asc, ascq;
1827 				int sense_key, error_code;
1828 				int have_sense;
1829 				cam_status status;
1830 				struct ccb_getdev *cgd;
1831 
1832 				/* Don't wedge this device's queue */
1833 				status = done_ccb->ccb_h.status;
1834 				if ((status & CAM_DEV_QFRZN) != 0)
1835 					cam_release_devq(done_ccb->ccb_h.path,
1836 							 /*relsim_flags*/0,
1837 							 /*reduction*/0,
1838 							 /*timeout*/0,
1839 							 /*getcount_only*/0);
1840 
1841 				cgd = &xpt_alloc_ccb()->cgd;
1842 				xpt_setup_ccb(&cgd->ccb_h,
1843 					      done_ccb->ccb_h.path,
1844 					      /* priority */ 1);
1845 				cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1846 				xpt_action((union ccb *)cgd);
1847 
1848 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1849 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1850 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1851 					have_sense = FALSE;
1852 				else
1853 					have_sense = TRUE;
1854 
1855 				if (have_sense) {
1856 					sense = &csio->sense_data;
1857 					scsi_extract_sense(sense, &error_code,
1858 							   &sense_key,
1859 							   &asc, &ascq);
1860 				}
1861 				/*
1862 				 * Attach to anything that claims to be a
1863 				 * direct access or optical disk device,
1864 				 * as long as it doesn't return a "Logical
1865 				 * unit not supported" (0x25) error.
1866 				 */
1867 				if ((have_sense) && (asc != 0x25)
1868 				 && (error_code == SSD_CURRENT_ERROR)) {
1869 					const char *sense_key_desc;
1870 					const char *asc_desc;
1871 
1872 					scsi_sense_desc(sense_key, asc, ascq,
1873 							&cgd->inq_data,
1874 							&sense_key_desc,
1875 							&asc_desc);
1876 					ksnprintf(announce_buf,
1877 					    sizeof(announce_buf),
1878 						"Attempt to query device "
1879 						"size failed: %s, %s",
1880 						sense_key_desc,
1881 						asc_desc);
1882 					info.d_media_blksize = 512;
1883 					doinfo = 1;
1884 				} else {
1885 					if (have_sense)
1886 						scsi_sense_print(
1887 							&done_ccb->csio);
1888 					else {
1889 						xpt_print(periph->path,
1890 						    "got CAM status %#x\n",
1891 						    done_ccb->ccb_h.status);
1892 					}
1893 
1894 					xpt_print(periph->path, "fatal error, "
1895 					    "failed to attach to device\n");
1896 
1897 					/*
1898 					 * Free up resources.
1899 					 */
1900 					cam_periph_invalidate(periph);
1901 				}
1902 				xpt_free_ccb(&cgd->ccb_h);
1903 			}
1904 		}
1905 		kfree(csio->data_ptr, M_SCSIDA);
1906 		if (announce_buf[0] != '\0') {
1907 			xpt_announce_periph(periph, announce_buf);
1908 			/*
1909 			 * Create our sysctl variables, now that we know
1910 			 * we have successfully attached.
1911 			 */
1912 			taskqueue_enqueue(taskqueue_thread[mycpuid],
1913 			    &softc->sysctl_task);
1914 		}
1915 
1916 		if (softc->trim_max_ranges) {
1917 			softc->disk.d_info.d_trimflag |= DA_FLAG_CAN_TRIM;
1918 			kprintf("%s%d: supports TRIM\n",
1919 		   	    periph->periph_name,
1920 		   	    periph->unit_number);
1921 		}
1922 		softc->state = DA_STATE_NORMAL;
1923 		/*
1924 		 * Since our peripheral may be invalidated by an error
1925 		 * above or an external event, we must release our CCB
1926 		 * before releasing the probe lock on the peripheral.
1927 		 * The peripheral will only go away once the last lock
1928 		 * is removed, and we need it around for the CCB release
1929 		 * operation.
1930 		 */
1931 		xpt_release_ccb(done_ccb);
1932 		cam_periph_unhold(periph, 0);
1933 		if (doinfo) {
1934 			CAM_SIM_UNLOCK(periph->sim);
1935 			disk_setdiskinfo(&softc->disk, &info);
1936 			CAM_SIM_LOCK(periph->sim);
1937 		}
1938 		return;
1939 	}
1940 	case DA_CCB_WAITING:
1941 	{
1942 		/* Caller will release the CCB */
1943 		wakeup(&done_ccb->ccb_h.cbfcnp);
1944 		return;
1945 	}
1946 	case DA_CCB_DUMP:
1947 		/* No-op.  We're polling */
1948 		return;
1949 	case DA_CCB_POLLED:
1950 		/* Caller releases ccb */
1951 		wakeup(&done_ccb->ccb_h.cbfcnp);
1952 		return;
1953 	default:
1954 		break;
1955 	}
1956 	xpt_release_ccb(done_ccb);
1957 }
1958 
1959 static int
1960 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1961 {
1962 	struct da_softc	  *softc;
1963 	struct cam_periph *periph;
1964 	int error;
1965 
1966 	periph = xpt_path_periph(ccb->ccb_h.path);
1967 	softc = (struct da_softc *)periph->softc;
1968 
1969  	/*
1970 	 * Automatically detect devices that do not support
1971  	 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
1972  	 */
1973 	error = 0;
1974 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
1975 		error = cmd6workaround(ccb);
1976 	} else if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
1977 		   CAM_SCSI_STATUS_ERROR)
1978 	 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)
1979 	 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
1980 	 && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0)
1981 	 && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
1982 		int sense_key, error_code, asc, ascq;
1983 
1984  		scsi_extract_sense(&ccb->csio.sense_data,
1985 				   &error_code, &sense_key, &asc, &ascq);
1986 		if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
1987  			error = cmd6workaround(ccb);
1988 	}
1989 	if (error == ERESTART)
1990 		return (ERESTART);
1991 
1992 	/*
1993 	 * XXX
1994 	 * Until we have a better way of doing pack validation,
1995 	 * don't treat UAs as errors.
1996 	 */
1997 	sense_flags |= SF_RETRY_UA;
1998 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1999 				&softc->saved_ccb));
2000 }
2001 
2002 static void
2003 daprevent(struct cam_periph *periph, int action)
2004 {
2005 	struct	da_softc *softc;
2006 	union	ccb *ccb;
2007 	int	error;
2008 
2009 	softc = (struct da_softc *)periph->softc;
2010 
2011 	if (((action == PR_ALLOW)
2012 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
2013 	 || ((action == PR_PREVENT)
2014 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
2015 		return;
2016 	}
2017 
2018 	ccb = cam_periph_getccb(periph, /*priority*/1);
2019 	ccb->ccb_h.ccb_state = DA_CCB_POLLED;
2020 
2021 	scsi_prevent(&ccb->csio,
2022 		     /*retries*/1,
2023 		     /*cbcfp*/dadone,
2024 		     MSG_SIMPLE_Q_TAG,
2025 		     action,
2026 		     SSD_FULL_SIZE,
2027 		     5000);
2028 
2029 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO,
2030 				  SF_RETRY_UA, &softc->device_stats);
2031 
2032 	if (error == 0) {
2033 		if (action == PR_ALLOW)
2034 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
2035 		else
2036 			softc->flags |= DA_FLAG_PACK_LOCKED;
2037 	}
2038 
2039 	xpt_release_ccb(ccb);
2040 }
2041 
2042 /*
2043  * Check media on open, e.g. card reader devices which had no initial media.
2044  */
2045 static int
2046 dacheckmedia(struct cam_periph *periph)
2047 {
2048 	struct disk_params *dp;
2049 	struct da_softc *softc;
2050 	struct disk_info info;
2051 	int error;
2052 
2053 	softc = (struct da_softc *)periph->softc;
2054 	dp = &softc->params;
2055 
2056 	error = dagetcapacity(periph);
2057 
2058 	/*
2059 	 * Only reprobe on initial open and if the media is removable.
2060 	 *
2061 	 * NOTE: If we setdiskinfo() it will take the device probe
2062 	 *	 a bit of time to probe the slices and partitions,
2063 	 *	 and mess up booting.  So avoid if nothing has changed.
2064 	 *	 XXX
2065 	 */
2066 	if (softc->flags & DA_FLAG_OPEN)
2067 		return (error);
2068 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) == 0)
2069 		return (error);
2070 
2071 	bzero(&info, sizeof(info));
2072 	info.d_type = DTYPE_SCSI;
2073 	info.d_serialno = xpt_path_serialno(periph->path);
2074 
2075 	if (error == 0) {
2076 		CAM_SIM_UNLOCK(periph->sim);
2077 		info.d_media_blksize = softc->params.secsize;
2078 		info.d_media_blocks = softc->params.sectors;
2079 		info.d_media_size = 0;
2080 		info.d_secpertrack = softc->params.secs_per_track;
2081 		info.d_nheads = softc->params.heads;
2082 		info.d_ncylinders = softc->params.cylinders;
2083 		info.d_secpercyl = softc->params.heads *
2084 					softc->params.secs_per_track;
2085 		info.d_serialno = xpt_path_serialno(periph->path);
2086 		if (info.d_media_blocks != softc->disk.d_info.d_media_blocks) {
2087 			kprintf("%s%d: open removable media: "
2088 				"%juMB (%ju %u byte sectors: %dH %dS/T %dC)\n",
2089 				periph->periph_name, periph->unit_number,
2090 				(uintmax_t)(((uintmax_t)dp->secsize *
2091 					     dp->sectors) / (1024*1024)),
2092 				(uintmax_t)dp->sectors, dp->secsize,
2093 				dp->heads, dp->secs_per_track, dp->cylinders);
2094 			disk_setdiskinfo(&softc->disk, &info);
2095 		}
2096 		CAM_SIM_LOCK(periph->sim);
2097 	} else {
2098 		kprintf("%s%d: open removable media: no media present\n",
2099 			periph->periph_name, periph->unit_number);
2100 		info.d_media_blksize = 512;
2101 		disk_setdiskinfo(&softc->disk, &info);
2102 	}
2103 	return (error);
2104 }
2105 
2106 static int
2107 dagetcapacity(struct cam_periph *periph)
2108 {
2109 	struct da_softc *softc;
2110 	union ccb *ccb;
2111 	struct scsi_read_capacity_data *rcap;
2112 	struct scsi_read_capacity_data_16 *rcaplong;
2113 	uint32_t block_len;
2114 	uint64_t maxsector;
2115 	int error;
2116 
2117 	softc = (struct da_softc *)periph->softc;
2118 	block_len = 0;
2119 	maxsector = 0;
2120 	error = 0;
2121 
2122 	/* Do a read capacity */
2123 	rcap = (struct scsi_read_capacity_data *)kmalloc(sizeof(*rcaplong),
2124 							 M_SCSIDA, M_INTWAIT);
2125 
2126 	ccb = cam_periph_getccb(periph, /*priority*/1);
2127 	ccb->ccb_h.ccb_state = DA_CCB_POLLED;
2128 
2129 	scsi_read_capacity(&ccb->csio,
2130 			   /*retries*/4,
2131 			   /*cbfncp*/dadone,
2132 			   MSG_SIMPLE_Q_TAG,
2133 			   rcap,
2134 			   SSD_FULL_SIZE,
2135 			   /*timeout*/60000);
2136 	ccb->ccb_h.ccb_bio = NULL;
2137 
2138 	error = cam_periph_runccb(ccb, daerror,
2139 				  /*cam_flags*/CAM_RETRY_SELTO,
2140 				  /*sense_flags*/SF_RETRY_UA,
2141 				  &softc->device_stats);
2142 
2143 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2144 		cam_release_devq(ccb->ccb_h.path,
2145 				 /*relsim_flags*/0,
2146 				 /*reduction*/0,
2147 				 /*timeout*/0,
2148 				 /*getcount_only*/0);
2149 
2150 	if (error == 0) {
2151 		block_len = scsi_4btoul(rcap->length);
2152 		maxsector = scsi_4btoul(rcap->addr);
2153 
2154 		if (maxsector != 0xffffffff)
2155 			goto done;
2156 	} else
2157 		goto done;
2158 
2159 	rcaplong = (struct scsi_read_capacity_data_16 *)rcap;
2160 
2161 	scsi_read_capacity_16(&ccb->csio,
2162 			      /*retries*/ 4,
2163 			      /*cbfcnp*/ dadone,
2164 			      /*tag_action*/ MSG_SIMPLE_Q_TAG,
2165 			      /*lba*/ 0,
2166 			      /*reladr*/ 0,
2167 			      /*pmi*/ 0,
2168 			      rcaplong,
2169 			      /*sense_len*/ SSD_FULL_SIZE,
2170 			      /*timeout*/ 60000);
2171 	ccb->ccb_h.ccb_bio = NULL;
2172 
2173 	error = cam_periph_runccb(ccb, daerror,
2174 				  /*cam_flags*/CAM_RETRY_SELTO,
2175 				  /*sense_flags*/SF_RETRY_UA,
2176 				  &softc->device_stats);
2177 
2178 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2179 		cam_release_devq(ccb->ccb_h.path,
2180 				 /*relsim_flags*/0,
2181 				 /*reduction*/0,
2182 				 /*timeout*/0,
2183 				 /*getcount_only*/0);
2184 
2185 	if (error == 0) {
2186 		block_len = scsi_4btoul(rcaplong->length);
2187 		maxsector = scsi_8btou64(rcaplong->addr);
2188 	}
2189 
2190 done:
2191 
2192 	if (error == 0)
2193 		dasetgeom(periph, block_len, maxsector);
2194 
2195 	xpt_release_ccb(ccb);
2196 
2197 	kfree(rcap, M_SCSIDA);
2198 
2199 	return (error);
2200 }
2201 
2202 static void
2203 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector)
2204 {
2205 	struct ccb_calc_geometry *ccg;
2206 	struct da_softc *softc;
2207 	struct disk_params *dp;
2208 
2209 	softc = (struct da_softc *)periph->softc;
2210 
2211 	dp = &softc->params;
2212 	dp->secsize = block_len;
2213 	dp->sectors = maxsector + 1;
2214 	/*
2215 	 * Have the controller provide us with a geometry
2216 	 * for this disk.  The only time the geometry
2217 	 * matters is when we boot and the controller
2218 	 * is the only one knowledgeable enough to come
2219 	 * up with something that will make this a bootable
2220 	 * device.
2221 	 */
2222 	ccg = &xpt_alloc_ccb()->ccg;
2223 	xpt_setup_ccb(&ccg->ccb_h, periph->path, /*priority*/1);
2224 	ccg->ccb_h.func_code = XPT_CALC_GEOMETRY;
2225 	ccg->block_size = dp->secsize;
2226 	ccg->volume_size = dp->sectors;
2227 	ccg->heads = 0;
2228 	ccg->secs_per_track = 0;
2229 	ccg->cylinders = 0;
2230 	xpt_action((union ccb*)ccg);
2231 	if ((ccg->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2232 		/*
2233 		 * We don't know what went wrong here- but just pick
2234 		 * a geometry so we don't have nasty things like divide
2235 		 * by zero.
2236 		 */
2237 		dp->heads = 255;
2238 		dp->secs_per_track = 255;
2239 		dp->cylinders = dp->sectors / (255 * 255);
2240 		if (dp->cylinders == 0) {
2241 			dp->cylinders = 1;
2242 		}
2243 	} else {
2244 		dp->heads = ccg->heads;
2245 		dp->secs_per_track = ccg->secs_per_track;
2246 		dp->cylinders = ccg->cylinders;
2247 	}
2248 	xpt_free_ccb(&ccg->ccb_h);
2249 }
2250 
2251 /*
2252  * Step through all DA peripheral drivers, and if the device is still open,
2253  * sync the disk cache to physical media.
2254  */
2255 static void
2256 dashutdown(void * arg, int howto)
2257 {
2258 	struct cam_periph *periph;
2259 	struct da_softc *softc;
2260 
2261 	TAILQ_FOREACH(periph, &dadriver.units, unit_links) {
2262 		union ccb *ccb;
2263 
2264 		cam_periph_lock(periph);
2265 		softc = (struct da_softc *)periph->softc;
2266 
2267 		/*
2268 		 * We only sync the cache if the drive is still open, and
2269 		 * if the drive is capable of it..
2270 		 */
2271 		if (((softc->flags & DA_FLAG_OPEN) == 0)
2272 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
2273 			cam_periph_unlock(periph);
2274 			continue;
2275 		}
2276 
2277 		ccb = xpt_alloc_ccb();
2278 		xpt_setup_ccb(&ccb->ccb_h, periph->path, /*priority*/1);
2279 
2280 		ccb->ccb_h.ccb_state = DA_CCB_DUMP;
2281 		scsi_synchronize_cache(&ccb->csio,
2282 				       /*retries*/1,
2283 				       /*cbfcnp*/dadone,
2284 				       MSG_SIMPLE_Q_TAG,
2285 				       /*begin_lba*/0, /* whole disk */
2286 				       /*lb_count*/0,
2287 				       SSD_FULL_SIZE,
2288 				       60 * 60 * 1000);
2289 
2290 		xpt_polled_action(ccb);
2291 
2292 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2293 			if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
2294 			     CAM_SCSI_STATUS_ERROR)
2295 			 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)){
2296 				int error_code, sense_key, asc, ascq;
2297 
2298 				scsi_extract_sense(&ccb->csio.sense_data,
2299 						   &error_code, &sense_key,
2300 						   &asc, &ascq);
2301 
2302 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
2303 					scsi_sense_print(&ccb->csio);
2304 			} else {
2305 				xpt_print(periph->path, "Synchronize "
2306 				    "cache failed, status == 0x%x, scsi status "
2307 				    "== 0x%x\n", ccb->ccb_h.status,
2308 				    ccb->csio.scsi_status);
2309 			}
2310 		}
2311 
2312 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2313 			cam_release_devq(ccb->ccb_h.path,
2314 					 /*relsim_flags*/0,
2315 					 /*reduction*/0,
2316 					 /*timeout*/0,
2317 					 /*getcount_only*/0);
2318 
2319 		cam_periph_unlock(periph);
2320 		xpt_free_ccb(&ccb->ccb_h);
2321 	}
2322 }
2323 
2324 #else /* !_KERNEL */
2325 
2326 /*
2327  * XXX This is only left out of the kernel build to silence warnings.  If,
2328  * for some reason this function is used in the kernel, the ifdefs should
2329  * be moved so it is included both in the kernel and userland.
2330  */
2331 void
2332 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
2333 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
2334 		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
2335 		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
2336 		 u_int32_t timeout)
2337 {
2338 	struct scsi_format_unit *scsi_cmd;
2339 
2340 	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
2341 	scsi_cmd->opcode = FORMAT_UNIT;
2342 	scsi_cmd->byte2 = byte2;
2343 	scsi_ulto2b(ileave, scsi_cmd->interleave);
2344 
2345 	cam_fill_csio(csio,
2346 		      retries,
2347 		      cbfcnp,
2348 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
2349 		      tag_action,
2350 		      data_ptr,
2351 		      dxfer_len,
2352 		      sense_len,
2353 		      sizeof(*scsi_cmd),
2354 		      timeout);
2355 }
2356 
2357 #endif /* _KERNEL */
2358