xref: /dragonfly/sys/bus/cam/scsi/scsi_da.c (revision 59b0b316)
1 /*
2  * Implementation of SCSI Direct Access Peripheral driver for CAM.
3  *
4  * Copyright (c) 1997 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/cam/scsi/scsi_da.c,v 1.42.2.46 2003/10/21 22:18:19 thomas Exp $
29  */
30 
31 #include <sys/param.h>
32 
33 #ifdef _KERNEL
34 
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/buf.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/lock.h>
41 #include <sys/conf.h>
42 #include <sys/devicestat.h>
43 #include <sys/disk.h>
44 #include <sys/dtype.h>
45 #include <sys/eventhandler.h>
46 #include <sys/malloc.h>
47 #include <sys/cons.h>
48 #include <sys/proc.h>
49 #include <sys/ioctl_compat.h>
50 
51 #include <sys/buf2.h>
52 #include <sys/thread2.h>
53 
54 #endif /* _KERNEL */
55 
56 #ifdef _KERNEL
57 #include <vm/pmap.h>
58 #endif
59 
60 #ifndef _KERNEL
61 #include <stdio.h>
62 #include <string.h>
63 #endif /* _KERNEL */
64 
65 #include <sys/camlib.h>
66 #include "../cam.h"
67 #include "../cam_ccb.h"
68 #include "../cam_extend.h"
69 #include "../cam_periph.h"
70 #include "../cam_xpt_periph.h"
71 #include "../cam_sim.h"
72 
73 #include "scsi_message.h"
74 
75 #ifndef _KERNEL
76 #include "scsi_da.h"
77 #endif /* !_KERNEL */
78 
79 #ifdef _KERNEL
80 typedef enum {
81 	DA_STATE_PROBE,
82 	DA_STATE_PROBE2,
83 	DA_STATE_NORMAL
84 } da_state;
85 
86 typedef enum {
87 	DA_FLAG_PACK_INVALID	= 0x001,
88 	DA_FLAG_NEW_PACK	= 0x002,
89 	DA_FLAG_PACK_LOCKED	= 0x004,
90 	DA_FLAG_PACK_REMOVABLE	= 0x008,
91 	DA_FLAG_TAGGED_QUEUING	= 0x010,
92 	DA_FLAG_RETRY_UA	= 0x080,
93 	DA_FLAG_OPEN		= 0x100,
94 	DA_FLAG_SCTX_INIT	= 0x200,
95 	DA_FLAG_RD_LIMIT	= 0x400,
96 	DA_FLAG_WR_LIMIT	= 0x800,
97 	DA_FLAG_CAN_TRIM	= 0x1000
98 } da_flags;
99 
100 typedef enum {
101 	DA_Q_NONE		= 0x00,
102 	DA_Q_NO_SYNC_CACHE	= 0x01,
103 	DA_Q_NO_6_BYTE		= 0x02,
104 	DA_Q_NO_PREVENT		= 0x04
105 } da_quirks;
106 
107 typedef enum {
108 	DA_CCB_PROBE		= 0x01,
109 	DA_CCB_PROBE2		= 0x02,
110 	DA_CCB_BUFFER_IO	= 0x03,
111 	DA_CCB_WAITING		= 0x04,
112 	DA_CCB_DUMP		= 0x05,
113 	DA_CCB_TRIM		= 0x06,
114 	DA_CCB_TYPE_MASK	= 0x0F,
115 	DA_CCB_RETRY_UA		= 0x10
116 } da_ccb_state;
117 
118 /* Offsets into our private area for storing information */
119 #define ccb_state	ppriv_field0
120 #define ccb_bio		ppriv_ptr1
121 
122 struct disk_params {
123 	u_int8_t  heads;
124 	u_int32_t cylinders;
125 	u_int8_t  secs_per_track;
126 	u_int32_t secsize;	/* Number of bytes/sector */
127 	u_int64_t sectors;	/* total number sectors */
128 };
129 
130 #define TRIM_MAX_BLOCKS 8
131 #define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64
132 struct trim_request {
133         uint8_t         data[TRIM_MAX_RANGES * 8];
134         struct bio      *bios[TRIM_MAX_RANGES];
135 };
136 
137 struct da_softc {
138 	struct	 bio_queue_head bio_queue_rd;
139 	struct	 bio_queue_head bio_queue_wr;
140 	struct	 bio_queue_head bio_queue_trim;
141 	struct	 devstat device_stats;
142 	SLIST_ENTRY(da_softc) links;
143 	LIST_HEAD(, ccb_hdr) pending_ccbs;
144 	da_state state;
145 	da_flags flags;
146 	da_quirks quirks;
147 	int	 minimum_cmd_size;
148 	int	 outstanding_cmds_rd;
149 	int	 outstanding_cmds_wr;
150 	int      trim_max_ranges;
151 	int      trim_running;
152 	int      trim_enabled;
153 	struct	 disk_params params;
154 	struct	 disk disk;
155 	union	 ccb saved_ccb;
156 	struct task		sysctl_task;
157 	struct sysctl_ctx_list	sysctl_ctx;
158 	struct sysctl_oid	*sysctl_tree;
159 	struct trim_request     trim_req;
160 };
161 
162 struct da_quirk_entry {
163 	struct scsi_inquiry_pattern inq_pat;
164 	da_quirks quirks;
165 };
166 
167 static const char quantum[] = "QUANTUM";
168 static const char microp[] = "MICROP";
169 
170 static struct da_quirk_entry da_quirk_table[] =
171 {
172 	/* SPI, FC devices */
173 	{
174 		/*
175 		 * Fujitsu M2513A MO drives.
176 		 * Tested devices: M2513A2 firmware versions 1200 & 1300.
177 		 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
178 		 * Reported by: W.Scholten <whs@xs4all.nl>
179 		 */
180 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
181 		/*quirks*/ DA_Q_NO_SYNC_CACHE
182 	},
183 	{
184 		/* See above. */
185 		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
186 		/*quirks*/ DA_Q_NO_SYNC_CACHE
187 	},
188 	{
189 		/*
190 		 * This particular Fujitsu drive doesn't like the
191 		 * synchronize cache command.
192 		 * Reported by: Tom Jackson <toj@gorilla.net>
193 		 */
194 		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
195 		/*quirks*/ DA_Q_NO_SYNC_CACHE
196 	},
197 	{
198 		/*
199 		 * This drive doesn't like the synchronize cache command
200 		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
201 		 * in NetBSD PR kern/6027, August 24, 1998.
202 		 */
203 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
204 		/*quirks*/ DA_Q_NO_SYNC_CACHE
205 	},
206 	{
207 		/*
208 		 * This drive doesn't like the synchronize cache command
209 		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
210 		 * (PR 8882).
211 		 */
212 		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
213 		/*quirks*/ DA_Q_NO_SYNC_CACHE
214 	},
215 	{
216 		/*
217 		 * Doesn't like the synchronize cache command.
218 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
219 		 */
220 		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
221 		/*quirks*/ DA_Q_NO_SYNC_CACHE
222 	},
223 	{
224 		/*
225 		 * Doesn't like the synchronize cache command.
226 		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
227 		 */
228 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
229 		/*quirks*/ DA_Q_NO_SYNC_CACHE
230 	},
231 	{
232 		/*
233 		 * Doesn't like the synchronize cache command.
234 		 */
235 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
236 		/*quirks*/ DA_Q_NO_SYNC_CACHE
237 	},
238 	{
239 		/*
240 		 * Doesn't like the synchronize cache command.
241 		 * Reported by: walter@pelissero.de
242 		 */
243 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
244 		/*quirks*/ DA_Q_NO_SYNC_CACHE
245 	},
246 	{
247 		/*
248 		 * Doesn't work correctly with 6 byte reads/writes.
249 		 * Returns illegal request, and points to byte 9 of the
250 		 * 6-byte CDB.
251 		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
252 		 */
253 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
254 		/*quirks*/ DA_Q_NO_6_BYTE
255 	},
256 	{
257 		/* See above. */
258 		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
259 		/*quirks*/ DA_Q_NO_6_BYTE
260 	},
261 	{
262 		/*
263 		 * Doesn't like the synchronize cache command.
264 		 * Reported by: walter@pelissero.de
265 		 */
266 		{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
267 		/*quirks*/ DA_Q_NO_SYNC_CACHE
268 	},
269 	{
270 		/*
271 		 * The CISS RAID controllers do not support SYNC_CACHE
272 		 */
273 		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
274 		/*quirks*/ DA_Q_NO_SYNC_CACHE
275 	},
276 	{
277 		/*
278 		 * The same goes for the mly(4) controllers
279 		 */
280 		{T_DIRECT, SIP_MEDIA_FIXED, "MLY*", "*", "MYLX"},
281 		/*quirks*/ DA_Q_NO_SYNC_CACHE
282 	},
283 	/*
284 	 * USB mass storage devices supported by umass(4)
285 	 *
286 	 * NOTE: USB attachments automatically set DA_Q_NO_SYNC_CACHE so
287 	 *	 it does not have to be specified here.
288 	 */
289  	{
290  		/*
291  		 * Creative Nomad MUVO mp3 player (USB)
292  		 * PR: kern/53094
293  		 */
294  		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
295 		/*quirks*/ DA_Q_NO_PREVENT
296  	},
297 	{
298 		/*
299 		 * Sigmatel USB Flash MP3 Player
300 		 * PR: kern/57046
301 		 */
302 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
303 		/*quirks*/ DA_Q_NO_PREVENT
304 	},
305 	{
306 		/*
307 		 * SEAGRAND NP-900 MP3 Player
308 		 * PR: kern/64563
309 		 */
310 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
311 		/*quirks*/ DA_Q_NO_PREVENT
312 	},
313 	{
314 		/*
315 		 * Creative MUVO Slim mp3 player (USB)
316 		 * PR: usb/86131
317 		 */
318 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
319 		"*"}, /*quirks*/ DA_Q_NO_PREVENT
320 	},
321 	{
322 		/*
323 		 * Philips USB Key Audio KEY013
324 		 * PR: usb/68412
325 		 */
326 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
327 		/*quirks*/ DA_Q_NO_PREVENT
328 	},
329 };
330 
331 static	d_open_t	daopen;
332 static	d_close_t	daclose;
333 static	d_strategy_t	dastrategy;
334 static	d_dump_t	dadump;
335 static	d_ioctl_t	daioctl;
336 static	periph_init_t	dainit;
337 static	void		daasync(void *callback_arg, u_int32_t code,
338 				struct cam_path *path, void *arg);
339 static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
340 static	periph_ctor_t	daregister;
341 static	periph_dtor_t	dacleanup;
342 static	periph_start_t	dastart;
343 static	periph_oninv_t	daoninvalidate;
344 static	void		dadone(struct cam_periph *periph,
345 			       union ccb *done_ccb);
346 static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
347 				u_int32_t sense_flags);
348 static void		daprevent(struct cam_periph *periph, int action);
349 static int		dagetcapacity(struct cam_periph *periph);
350 static int		dacheckmedia(struct cam_periph *periph);
351 static void		dasetgeom(struct cam_periph *periph, uint32_t block_len,
352 				  uint64_t maxsector);
353 static void		daflushbioq(struct bio_queue_head *bioq, int error);
354 static void		dashutdown(void *arg, int howto);
355 
356 #ifndef DA_DEFAULT_TIMEOUT
357 #define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
358 #endif
359 
360 #ifndef	DA_DEFAULT_RETRY
361 #define	DA_DEFAULT_RETRY	4
362 #endif
363 
364 static int da_retry_count = DA_DEFAULT_RETRY;
365 static int da_default_timeout = DA_DEFAULT_TIMEOUT;
366 
367 SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
368             "CAM Direct Access Disk driver");
369 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW,
370            &da_retry_count, 0, "Normal I/O retry count");
371 TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count);
372 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW,
373            &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
374 TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout);
375 
376 static struct periph_driver dadriver =
377 {
378 	dainit, "da",
379 	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
380 };
381 
382 PERIPHDRIVER_DECLARE(da, dadriver);
383 
384 static struct dev_ops da_ops = {
385 	{ "da", 0, D_DISK | D_MPSAFE },
386 	.d_open =	daopen,
387 	.d_close =	daclose,
388 	.d_read =	physread,
389 	.d_write =	physwrite,
390 	.d_strategy =	dastrategy,
391 	.d_dump =	dadump,
392 	.d_ioctl =	daioctl
393 };
394 
395 static struct extend_array *daperiphs;
396 
397 MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
398 
399 static int
400 daioctl(struct dev_ioctl_args *ap)
401 {
402 	int unit;
403 	int error = 0;
404 	struct buf *bp;
405 	struct cam_periph *periph;
406 	int byte_count;
407 
408 	off_t *del_num = (off_t*)ap->a_data;
409 	off_t bytes_left;
410 	off_t bytes_start;
411 
412 	cdev_t dev = ap->a_head.a_dev;
413 
414 
415 	unit = dkunit(dev);
416 	periph = cam_extend_get(daperiphs, unit);
417 	if (periph == NULL)
418 		return(ENXIO);
419 
420 	switch (ap->a_cmd) {
421 	case IOCTLTRIM:
422 	{
423 
424 		bytes_left = del_num[1];
425 		bytes_start = del_num[0];
426 
427 		/* TRIM occurs on 512-byte sectors. */
428 		KKASSERT((bytes_left % 512) == 0);
429 		KKASSERT((bytes_start% 512) == 0);
430 
431 
432 		/* Break TRIM up into int-sized commands because of b_bcount */
433 		while(bytes_left) {
434 
435 			/*
436 			 * Rather than than squezing out more blocks in b_bcount
437 			 * and having to break up the TRIM request in da_start(),
438 			 * we ensure we can always TRIM this many bytes with one
439 			 * TRIM command (this happens if the device only
440 			 * supports one TRIM block).
441 			 *
442 			 * With min TRIM blksize of 1, TRIM command free
443 			 * 4194240 blks(64*65535): each LBA range can address
444 			 * 65535 blks and there 64 such ranges in a 512-byte
445 			 * block. And, 4194240 * 512 = 0x7FFF8000
446 			 *
447 			 */
448 			byte_count = MIN(bytes_left,0x7FFF8000);
449 			bp = getnewbuf(0, 0, 0, 1, NULL);
450 
451 			bp->b_cmd = BUF_CMD_FREEBLKS;
452 			bp->b_bio1.bio_offset = bytes_start;
453 			bp->b_bcount = byte_count;
454 			bp->b_bio1.bio_flags |= BIO_SYNC;
455 			bp->b_bio1.bio_done = biodone_sync;
456 
457 			dev_dstrategy(ap->a_head.a_dev, &bp->b_bio1);
458 
459 			if (biowait(&bp->b_bio1, "TRIM")) {
460 				kprintf("Error:%d\n", bp->b_error);
461 				brelse(bp);
462 				return(bp->b_error ? bp->b_error : EIO);
463 			}
464 			brelse(bp);
465 			bytes_left -= byte_count;
466 			bytes_start += byte_count;
467 		}
468 		break;
469 	}
470 	default:
471 		return(EINVAL);
472 	}
473 
474 	return(error);
475 }
476 
477 static int
478 daopen(struct dev_open_args *ap)
479 {
480 	cdev_t dev = ap->a_head.a_dev;
481 	struct cam_periph *periph;
482 	struct da_softc *softc;
483 	struct disk_info info;
484 	int unit;
485 	int error;
486 
487 	unit = dkunit(dev);
488 	periph = cam_extend_get(daperiphs, unit);
489 	if (periph == NULL) {
490 		return (ENXIO);
491 	}
492 
493 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
494 		return(ENXIO);
495 	}
496 
497 	cam_periph_lock(periph);
498 	if ((error = cam_periph_hold(periph, PCATCH)) != 0) {
499 		cam_periph_unlock(periph);
500 		cam_periph_release(periph);
501 		return (error);
502 	}
503 
504 	unit = periph->unit_number;
505 	softc = (struct da_softc *)periph->softc;
506 
507 	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
508 	    ("daopen: dev=%s (unit %d)\n", devtoname(dev),
509 	     unit));
510 
511 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
512 		/* Invalidate our pack information. */
513 		disk_invalidate(&softc->disk);
514 		softc->flags &= ~DA_FLAG_PACK_INVALID;
515 	}
516 
517 	error = dacheckmedia(periph);
518 	softc->flags |= DA_FLAG_OPEN;
519 
520 	if (error == 0) {
521 		struct ccb_getdev cgd;
522 
523 		/* Build disk information structure */
524 		bzero(&info, sizeof(info));
525 		info.d_type = DTYPE_SCSI;
526 
527 		/*
528 		 * Grab the inquiry data to get the vendor and product names.
529 		 * Put them in the typename and packname for the label.
530 		 */
531 		xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
532 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
533 		xpt_action((union ccb *)&cgd);
534 
535 		/*
536 		 * Check to see whether or not the blocksize is set yet.
537 		 * If it isn't, set it and then clear the blocksize
538 		 * unavailable flag for the device statistics.
539 		 */
540 		if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
541 			softc->device_stats.block_size = softc->params.secsize;
542 			softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
543 		}
544 	}
545 
546 	if (error == 0) {
547 		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
548 		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
549 			daprevent(periph, PR_PREVENT);
550 	} else {
551 		softc->flags &= ~DA_FLAG_OPEN;
552 		cam_periph_release(periph);
553 	}
554 	cam_periph_unhold(periph, 1);
555 	return (error);
556 }
557 
558 static int
559 daclose(struct dev_close_args *ap)
560 {
561 	cdev_t dev = ap->a_head.a_dev;
562 	struct	cam_periph *periph;
563 	struct	da_softc *softc;
564 	int	unit;
565 	int	error;
566 
567 	unit = dkunit(dev);
568 	periph = cam_extend_get(daperiphs, unit);
569 	if (periph == NULL)
570 		return (ENXIO);
571 
572 	cam_periph_lock(periph);
573 	if ((error = cam_periph_hold(periph, 0)) != 0) {
574 		cam_periph_unlock(periph);
575 		cam_periph_release(periph);
576 		return (error);
577 	}
578 
579 	softc = (struct da_softc *)periph->softc;
580 
581 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
582 		union	ccb *ccb;
583 
584 		ccb = cam_periph_getccb(periph, /*priority*/1);
585 
586 		scsi_synchronize_cache(&ccb->csio,
587 				       /*retries*/1,
588 				       /*cbfcnp*/dadone,
589 				       MSG_SIMPLE_Q_TAG,
590 				       /*begin_lba*/0,/* Cover the whole disk */
591 				       /*lb_count*/0,
592 				       SSD_FULL_SIZE,
593 				       5 * 60 * 1000);
594 
595 		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
596 				  /*sense_flags*/SF_RETRY_UA,
597 				  &softc->device_stats);
598 
599 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
600 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
601 			     CAM_SCSI_STATUS_ERROR) {
602 				int asc, ascq;
603 				int sense_key, error_code;
604 
605 				scsi_extract_sense(&ccb->csio.sense_data,
606 						   &error_code,
607 						   &sense_key,
608 						   &asc, &ascq);
609 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
610 					scsi_sense_print(&ccb->csio);
611 			} else {
612 				xpt_print(periph->path, "Synchronize cache "
613 				    "failed, status == 0x%x, scsi status == "
614 				    "0x%x\n", ccb->csio.ccb_h.status,
615 				    ccb->csio.scsi_status);
616 			}
617 		}
618 
619 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
620 			cam_release_devq(ccb->ccb_h.path,
621 					 /*relsim_flags*/0,
622 					 /*reduction*/0,
623 					 /*timeout*/0,
624 					 /*getcount_only*/0);
625 
626 		xpt_release_ccb(ccb);
627 
628 	}
629 
630 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
631 		if ((softc->quirks & DA_Q_NO_PREVENT) == 0)
632 			daprevent(periph, PR_ALLOW);
633 		/*
634 		 * If we've got removeable media, mark the blocksize as
635 		 * unavailable, since it could change when new media is
636 		 * inserted.
637 		 */
638 		softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
639 	}
640 
641 	/*
642 	 * Don't compound any ref counting software bugs with more.
643 	 */
644 	if (softc->flags & DA_FLAG_OPEN) {
645 		softc->flags &= ~DA_FLAG_OPEN;
646 		cam_periph_release(periph);
647 	} else {
648 		xpt_print(periph->path,
649 			  "daclose() called on an already closed device!\n");
650 	}
651 	cam_periph_unhold(periph, 1);
652 	return (0);
653 }
654 
655 /*
656  * Actually translate the requested transfer into one the physical driver
657  * can understand.  The transfer is described by a buf and will include
658  * only one physical transfer.
659  */
660 static int
661 dastrategy(struct dev_strategy_args *ap)
662 {
663 	cdev_t dev = ap->a_head.a_dev;
664 	struct bio *bio = ap->a_bio;
665 	struct buf *bp = bio->bio_buf;
666 	struct cam_periph *periph;
667 	struct da_softc *softc;
668 	u_int  unit;
669 
670 	unit = dkunit(dev);
671 	periph = cam_extend_get(daperiphs, unit);
672 	if (periph == NULL) {
673 		bp->b_error = ENXIO;
674 		goto bad;
675 	}
676 	softc = (struct da_softc *)periph->softc;
677 
678 	cam_periph_lock(periph);
679 
680 #if 0
681 	/*
682 	 * check it's not too big a transfer for our adapter
683 	 */
684 	scsi_minphys(bp, &sd_switch);
685 #endif
686 
687 	/*
688 	 * Mask interrupts so that the pack cannot be invalidated until
689 	 * after we are in the queue.  Otherwise, we might not properly
690 	 * clean up one of the buffers.
691 	 */
692 
693 	/*
694 	 * If the device has been made invalid, error out
695 	 */
696 	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
697 		cam_periph_unlock(periph);
698 		bp->b_error = ENXIO;
699 		goto bad;
700 	}
701 
702 	/*
703 	 * Place it in the queue of disk activities for this disk
704 	 */
705 	if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH)
706 		bioqdisksort(&softc->bio_queue_wr, bio);
707 	else if (bp->b_cmd == BUF_CMD_FREEBLKS)
708 		bioqdisksort(&softc->bio_queue_trim, bio);
709 	else
710 		bioqdisksort(&softc->bio_queue_rd, bio);
711 
712 	/*
713 	 * Schedule ourselves for performing the work.
714 	 */
715 	xpt_schedule(periph, /* XXX priority */1);
716 	cam_periph_unlock(periph);
717 
718 	return(0);
719 bad:
720 	bp->b_flags |= B_ERROR;
721 
722 	/*
723 	 * Correctly set the buf to indicate a completed xfer
724 	 */
725 	bp->b_resid = bp->b_bcount;
726 	biodone(bio);
727 	return(0);
728 }
729 
730 static int
731 dadump(struct dev_dump_args *ap)
732 {
733 	cdev_t dev = ap->a_head.a_dev;
734 	struct	    cam_periph *periph;
735 	struct	    da_softc *softc;
736 	u_int	    unit;
737 	u_int32_t   secsize;
738 	struct	    ccb_scsiio csio;
739 
740 	unit = dkunit(dev);
741 	periph = cam_extend_get(daperiphs, unit);
742 	if (periph == NULL)
743 		return (ENXIO);
744 
745 	softc = (struct da_softc *)periph->softc;
746 	cam_periph_lock(periph);
747 	secsize = softc->params.secsize; /* XXX: or ap->a_secsize? */
748 
749 	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
750 		cam_periph_unlock(periph);
751 		return (ENXIO);
752 	}
753 
754 	/*
755 	 * because length == 0 means we are supposed to flush cache, we only
756 	 * try to write something if length > 0.
757 	 */
758 	if (ap->a_length > 0) {
759 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
760 		csio.ccb_h.flags |= CAM_POLLED;
761 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
762 		scsi_read_write(&csio,
763 				/*retries*/1,
764 				dadone,
765 				MSG_ORDERED_Q_TAG,
766 				/*read*/FALSE,
767 				/*byte2*/0,
768 				/*minimum_cmd_size*/ softc->minimum_cmd_size,
769 				ap->a_offset / secsize,
770 				ap->a_length / secsize,
771 				/*data_ptr*/(u_int8_t *) ap->a_virtual,
772 				/*dxfer_len*/ap->a_length,
773 				/*sense_len*/SSD_FULL_SIZE,
774 				DA_DEFAULT_TIMEOUT * 1000);
775 		xpt_polled_action((union ccb *)&csio);
776 
777 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
778 			kprintf("Aborting dump due to I/O error.\n");
779 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
780 			     CAM_SCSI_STATUS_ERROR)
781 				scsi_sense_print(&csio);
782 			else
783 				kprintf("status == 0x%x, scsi status == 0x%x\n",
784 				       csio.ccb_h.status, csio.scsi_status);
785 			return(EIO);
786 		}
787 		cam_periph_unlock(periph);
788 		return 0;
789 	}
790 
791 	/*
792 	 * Sync the disk cache contents to the physical media.
793 	 */
794 	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
795 
796 		xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
797 		csio.ccb_h.ccb_state = DA_CCB_DUMP;
798 		scsi_synchronize_cache(&csio,
799 				       /*retries*/1,
800 				       /*cbfcnp*/dadone,
801 				       MSG_SIMPLE_Q_TAG,
802 				       /*begin_lba*/0,/* Cover the whole disk */
803 				       /*lb_count*/0,
804 				       SSD_FULL_SIZE,
805 				       5 * 60 * 1000);
806 		xpt_polled_action((union ccb *)&csio);
807 
808 		if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
809 			if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
810 			     CAM_SCSI_STATUS_ERROR) {
811 				int asc, ascq;
812 				int sense_key, error_code;
813 
814 				scsi_extract_sense(&csio.sense_data,
815 						   &error_code,
816 						   &sense_key,
817 						   &asc, &ascq);
818 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
819 					scsi_sense_print(&csio);
820 			} else {
821 				xpt_print(periph->path, "Synchronize cache "
822 				    "failed, status == 0x%x, scsi status == "
823 				    "0x%x\n", csio.ccb_h.status,
824 				    csio.scsi_status);
825 			}
826 		}
827 	}
828 	cam_periph_unlock(periph);
829 	return (0);
830 }
831 
832 static void
833 dainit(void)
834 {
835 	cam_status status;
836 
837 	/*
838 	 * Create our extend array for storing the devices we attach to.
839 	 */
840 	daperiphs = cam_extend_new();
841 	if (daperiphs == NULL) {
842 		kprintf("da: Failed to alloc extend array!\n");
843 		return;
844 	}
845 
846 	/*
847 	 * Install a global async callback.  This callback will
848 	 * receive async callbacks like "new device found".
849 	 */
850 	status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
851 
852 	if (status != CAM_REQ_CMP) {
853 		kprintf("da: Failed to attach master async callback "
854 		       "due to status 0x%x!\n", status);
855 	} else {
856 		/* Register our shutdown event handler */
857 		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
858 					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
859 			kprintf("%s: shutdown event registration failed!\n",
860 			    __func__);
861 	}
862 }
863 
864 static void
865 daoninvalidate(struct cam_periph *periph)
866 {
867 	struct da_softc *softc;
868 
869 	softc = (struct da_softc *)periph->softc;
870 
871 	/*
872 	 * De-register any async callbacks.
873 	 */
874 	xpt_register_async(0, daasync, periph, periph->path);
875 
876 	softc->flags |= DA_FLAG_PACK_INVALID;
877 
878 	/*
879 	 * Return all queued I/O with ENXIO.
880 	 * XXX Handle any transactions queued to the card
881 	 *     with XPT_ABORT_CCB.
882 	 */
883 	daflushbioq(&softc->bio_queue_trim, ENXIO);
884 	daflushbioq(&softc->bio_queue_wr, ENXIO);
885 	daflushbioq(&softc->bio_queue_rd, ENXIO);
886 	xpt_print(periph->path, "lost device\n");
887 }
888 
889 static void
890 daflushbioq(struct bio_queue_head *bioq, int error)
891 {
892 	struct bio *q_bio;
893 	struct buf *q_bp;
894 
895 	while ((q_bio = bioq_first(bioq)) != NULL){
896 		bioq_remove(bioq, q_bio);
897 		q_bp = q_bio->bio_buf;
898 		q_bp->b_resid = q_bp->b_bcount;
899 		q_bp->b_error = error;
900 		q_bp->b_flags |= B_ERROR;
901 		biodone(q_bio);
902 	}
903 }
904 
905 static void
906 dacleanup(struct cam_periph *periph)
907 {
908 	struct da_softc *softc;
909 
910 	softc = (struct da_softc *)periph->softc;
911 
912 	devstat_remove_entry(&softc->device_stats);
913 	cam_extend_release(daperiphs, periph->unit_number);
914 	xpt_print(periph->path, "removing device entry\n");
915 	/*
916 	 * If we can't free the sysctl tree, oh well...
917 	 */
918 	if ((softc->flags & DA_FLAG_SCTX_INIT) != 0
919 	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
920 		xpt_print(periph->path, "can't remove sysctl context\n");
921 	}
922 	periph->softc = NULL;
923 	if (softc->disk.d_rawdev) {
924 		cam_periph_unlock(periph);
925 		disk_destroy(&softc->disk);
926 		cam_periph_lock(periph);
927 	}
928 
929 	kfree(softc, M_DEVBUF);
930 }
931 
932 static void
933 daasync(void *callback_arg, u_int32_t code,
934 	struct cam_path *path, void *arg)
935 {
936 	struct cam_periph *periph;
937 
938 	periph = (struct cam_periph *)callback_arg;
939 
940 	switch (code) {
941 	case AC_FOUND_DEVICE:
942 	{
943 		struct ccb_getdev *cgd;
944 		cam_status status;
945 
946 		cgd = (struct ccb_getdev *)arg;
947 		if (cgd == NULL)
948 			break;
949 
950 		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
951 		    && SID_TYPE(&cgd->inq_data) != T_RBC
952 		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
953 			break;
954 
955 		/*
956 		 * Don't complain if a valid peripheral is already attached.
957 		 */
958 		periph = cam_periph_find(cgd->ccb_h.path, "da");
959 		if (periph && (periph->flags & CAM_PERIPH_INVALID) == 0)
960 			break;
961 
962 		/*
963 		 * Allocate a peripheral instance for
964 		 * this device and start the probe
965 		 * process.
966 		 */
967 		status = cam_periph_alloc(daregister, daoninvalidate,
968 					  dacleanup, dastart,
969 					  "da", CAM_PERIPH_BIO,
970 					  cgd->ccb_h.path, daasync,
971 					  AC_FOUND_DEVICE, cgd);
972 
973 		if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) {
974 			kprintf("%s: Unable to attach to new device "
975 			    "due to status 0x%x\n", __func__, status);
976 		}
977 		break;
978 	}
979 	case AC_SENT_BDR:
980 	case AC_BUS_RESET:
981 	{
982 		struct da_softc *softc;
983 		struct ccb_hdr *ccbh;
984 
985 		softc = (struct da_softc *)periph->softc;
986 		/*
987 		 * Don't fail on the expected unit attention
988 		 * that will occur.
989 		 */
990 		softc->flags |= DA_FLAG_RETRY_UA;
991 		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
992 			ccbh->ccb_state |= DA_CCB_RETRY_UA;
993 		/* FALLTHROUGH*/
994 	}
995 	default:
996 		cam_periph_async(periph, code, path, arg);
997 		break;
998 	}
999 }
1000 
1001 static void
1002 dasysctlinit(void *context, int pending)
1003 {
1004 	struct cam_periph *periph;
1005 	struct da_softc *softc;
1006 	char tmpstr[80], tmpstr2[80];
1007 
1008 	periph = (struct cam_periph *)context;
1009 	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1010 		return;
1011 	}
1012 
1013 	softc = (struct da_softc *)periph->softc;
1014 	ksnprintf(tmpstr, sizeof(tmpstr),
1015 		  "CAM DA unit %d", periph->unit_number);
1016 	ksnprintf(tmpstr2, sizeof(tmpstr2),
1017 		  "%d", periph->unit_number);
1018 
1019 	sysctl_ctx_free(&softc->sysctl_ctx);
1020 	sysctl_ctx_init(&softc->sysctl_ctx);
1021 	softc->flags |= DA_FLAG_SCTX_INIT;
1022 	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1023 		SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1024 		CTLFLAG_RD, 0, tmpstr);
1025 	if (softc->sysctl_tree == NULL) {
1026 		kprintf("%s: unable to allocate sysctl tree\n", __func__);
1027 		cam_periph_release(periph);
1028 		return;
1029 	}
1030 
1031 	/*
1032 	 * Now register the sysctl handler, so the user can the value on
1033 	 * the fly.
1034 	 */
1035 	SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1036 		OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1037 		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1038 		"Minimum CDB size");
1039 
1040 	/* Only create the option if the device supports TRIM */
1041 	if (softc->disk.d_info.d_trimflag) {
1042 		SYSCTL_ADD_INT(&softc->sysctl_ctx,
1043 		    SYSCTL_CHILDREN(softc->sysctl_tree),
1044 		    OID_AUTO,
1045 		    "trim_enabled",
1046 		    CTLFLAG_RW,
1047 		    &softc->trim_enabled,
1048 		    0,
1049 		    "Enable TRIM for this device (SSD))");
1050 	}
1051 
1052 	cam_periph_release(periph);
1053 }
1054 
1055 static int
1056 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
1057 {
1058 	int error, value;
1059 
1060 	value = *(int *)arg1;
1061 
1062 	error = sysctl_handle_int(oidp, &value, 0, req);
1063 
1064 	if ((error != 0)
1065 	 || (req->newptr == NULL))
1066 		return (error);
1067 
1068 	/*
1069 	 * Acceptable values here are 6, 10 or 12, or 16.
1070 	 */
1071 	if (value < 6)
1072 		value = 6;
1073 	else if ((value > 6)
1074 	      && (value <= 10))
1075 		value = 10;
1076 	else if ((value > 10)
1077 	      && (value <= 12))
1078 		value = 12;
1079 	else if (value > 12)
1080 		value = 16;
1081 
1082 	*(int *)arg1 = value;
1083 
1084 	return (0);
1085 }
1086 
1087 static cam_status
1088 daregister(struct cam_periph *periph, void *arg)
1089 {
1090 	struct da_softc *softc;
1091 	struct ccb_pathinq cpi;
1092 	struct ccb_getdev *cgd;
1093 	char tmpstr[80];
1094 	caddr_t match;
1095 
1096 	cgd = (struct ccb_getdev *)arg;
1097 	if (periph == NULL) {
1098 		kprintf("%s: periph was NULL!!\n", __func__);
1099 		return(CAM_REQ_CMP_ERR);
1100 	}
1101 
1102 	if (cgd == NULL) {
1103 		kprintf("%s: no getdev CCB, can't register device\n",
1104 		    __func__);
1105 		return(CAM_REQ_CMP_ERR);
1106 	}
1107 
1108 	softc = kmalloc(sizeof(*softc), M_DEVBUF, M_INTWAIT | M_ZERO);
1109 	sysctl_ctx_init(&softc->sysctl_ctx);
1110 	LIST_INIT(&softc->pending_ccbs);
1111 	softc->state = DA_STATE_PROBE;
1112 	bioq_init(&softc->bio_queue_trim);
1113 	bioq_init(&softc->bio_queue_rd);
1114 	bioq_init(&softc->bio_queue_wr);
1115 	if (SID_IS_REMOVABLE(&cgd->inq_data))
1116 		softc->flags |= DA_FLAG_PACK_REMOVABLE;
1117 	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
1118 		softc->flags |= DA_FLAG_TAGGED_QUEUING;
1119 
1120 	/* Used to get TRIM status from AHCI driver */
1121 	if (cgd->inq_data.vendor_specific1[0] == 1) {
1122 		/*
1123 		 * max number of lba ranges an SSD can handle in a single
1124 		 * TRIM command. vendor_specific1[1] is the num of 512-byte
1125 		 * blocks the SSD reports that can be passed in a TRIM cmd.
1126 		 */
1127 		softc->trim_max_ranges =
1128 		   min(cgd->inq_data.vendor_specific1[1] * 64, TRIM_MAX_RANGES);
1129 	}
1130 
1131 	periph->softc = softc;
1132 
1133 	cam_extend_set(daperiphs, periph->unit_number, periph);
1134 
1135 	/*
1136 	 * See if this device has any quirks.
1137 	 */
1138 	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1139 			       (caddr_t)da_quirk_table,
1140 			       NELEM(da_quirk_table),
1141 			       sizeof(*da_quirk_table), scsi_inquiry_match);
1142 
1143 	if (match != NULL)
1144 		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1145 	else
1146 		softc->quirks = DA_Q_NONE;
1147 
1148 	/*
1149 	 * Unconditionally disable the synchronize cache command for
1150 	 * usb attachments.  It's just impossible to determine if the
1151 	 * device supports it or not and if it doesn't the port can
1152 	 * brick.
1153 	 */
1154 	if (strncmp(periph->sim->sim_name, "umass", 4) == 0) {
1155 		softc->quirks |= DA_Q_NO_SYNC_CACHE;
1156 	}
1157 
1158 	TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
1159 
1160 	/* Check if the SIM does not want 6 byte commands */
1161 	bzero(&cpi, sizeof(cpi));
1162 	xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
1163 	cpi.ccb_h.func_code = XPT_PATH_INQ;
1164 	xpt_action((union ccb *)&cpi);
1165 	if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
1166 		softc->quirks |= DA_Q_NO_6_BYTE;
1167 
1168 	/*
1169 	 * RBC devices don't have to support READ(6), only READ(10).
1170 	 */
1171 	if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
1172 		softc->minimum_cmd_size = 10;
1173 	else
1174 		softc->minimum_cmd_size = 6;
1175 
1176 	/*
1177 	 * Load the user's default, if any.
1178 	 */
1179 	ksnprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
1180 		 periph->unit_number);
1181 	TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
1182 
1183 	/*
1184 	 * 6, 10, 12, and 16 are the currently permissible values.
1185 	 */
1186 	if (softc->minimum_cmd_size < 6)
1187 		softc->minimum_cmd_size = 6;
1188 	else if ((softc->minimum_cmd_size > 6)
1189 	      && (softc->minimum_cmd_size <= 10))
1190 		softc->minimum_cmd_size = 10;
1191 	else if ((softc->minimum_cmd_size > 10)
1192 	      && (softc->minimum_cmd_size <= 12))
1193 		softc->minimum_cmd_size = 12;
1194 	else if (softc->minimum_cmd_size > 12)
1195 		softc->minimum_cmd_size = 16;
1196 
1197 	/*
1198 	 * The DA driver supports a blocksize, but
1199 	 * we don't know the blocksize until we do
1200 	 * a read capacity.  So, set a flag to
1201 	 * indicate that the blocksize is
1202 	 * unavailable right now.  We'll clear the
1203 	 * flag as soon as we've done a read capacity.
1204 	 */
1205 	devstat_add_entry(&softc->device_stats, "da",
1206 			  periph->unit_number, 0,
1207 	  		  DEVSTAT_BS_UNAVAILABLE,
1208 			  SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI,
1209 			  DEVSTAT_PRIORITY_DISK);
1210 
1211 	/*
1212 	 * Register this media as a disk
1213 	 */
1214 	CAM_SIM_UNLOCK(periph->sim);
1215 	disk_create(periph->unit_number, &softc->disk, &da_ops);
1216 	if (cpi.maxio == 0 || cpi.maxio > MAXPHYS)
1217 		softc->disk.d_rawdev->si_iosize_max = MAXPHYS;
1218 	else
1219 		softc->disk.d_rawdev->si_iosize_max = cpi.maxio;
1220 	if (bootverbose) {
1221 		kprintf("%s%d: si_iosize_max:%d\n",
1222 		    periph->periph_name,
1223 		    periph->unit_number,
1224 		    softc->disk.d_rawdev->si_iosize_max);
1225 	}
1226 	CAM_SIM_LOCK(periph->sim);
1227 
1228 	/*
1229 	 * Add async callbacks for bus reset and
1230 	 * bus device reset calls.  I don't bother
1231 	 * checking if this fails as, in most cases,
1232 	 * the system will function just fine without
1233 	 * them and the only alternative would be to
1234 	 * not attach the device on failure.
1235 	 */
1236 	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
1237 			   daasync, periph, periph->path);
1238 
1239 	/*
1240 	 * Take an exclusive refcount on the periph while dastart is called
1241 	 * to finish the probe.  The reference will be dropped in dadone at
1242 	 * the end of probe.
1243 	 */
1244 	cam_periph_hold(periph, 0);
1245 	xpt_schedule(periph, /*priority*/5);
1246 
1247 	return(CAM_REQ_CMP);
1248 }
1249 
1250 static void
1251 dastart(struct cam_periph *periph, union ccb *start_ccb)
1252 {
1253 	struct da_softc *softc;
1254 
1255 	softc = (struct da_softc *)periph->softc;
1256 
1257 	switch (softc->state) {
1258 	case DA_STATE_NORMAL:
1259 	{
1260 		/* Pull a buffer from the queue and get going on it */
1261 		struct bio *bio;
1262 		struct bio *bio_rd;
1263 		struct bio *bio_wr;
1264 		struct buf *bp;
1265 		u_int8_t tag_code;
1266 		int limit;
1267 
1268 		/*
1269 		 * See if there is a buf with work for us to do..
1270 		 */
1271 		bio_rd = bioq_first(&softc->bio_queue_rd);
1272 		bio_wr = bioq_first(&softc->bio_queue_wr);
1273 
1274 		if (periph->immediate_priority <= periph->pinfo.priority) {
1275 			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1276 					("queuing for immediate ccb\n"));
1277 			start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1278 			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1279 					  periph_links.sle);
1280 			periph->immediate_priority = CAM_PRIORITY_NONE;
1281 			wakeup(&periph->ccb_list);
1282 			if (bio_rd || bio_wr) {
1283 				/*
1284 				 * Have more work to do, so ensure we stay
1285 				 * scheduled
1286 				 */
1287 				xpt_schedule(periph, /* XXX priority */1);
1288 			}
1289 			break;
1290 		}
1291 
1292 		/* Run the trim command if not already running */
1293 		if (!softc->trim_running &&
1294 		   (bio = bioq_first(&softc->bio_queue_trim)) != NULL) {
1295 			struct trim_request *req = &softc->trim_req;
1296 			struct bio *bio1;
1297 			int bps = 0, ranges = 0;
1298 
1299 			softc->trim_running = 1;
1300 			bzero(req, sizeof(*req));
1301 			bio1 = bio;
1302 			while (1) {
1303 				uint64_t lba;
1304 				int count;
1305 
1306 				bp = bio1->bio_buf;
1307 				count = bp->b_bcount / softc->params.secsize;
1308 				lba = bio1->bio_offset/softc->params.secsize;
1309 
1310 				kprintf("trim lba:%llu boff:%llu count:%d\n",
1311 				    (unsigned long long) lba,
1312 				    (unsigned long long) bio1->bio_offset,
1313 				    count);
1314 
1315 				bioq_remove(&softc->bio_queue_trim, bio1);
1316 				while (count > 0) {
1317 					int c = min(count, 0xffff);
1318 					int off = ranges * 8;
1319 
1320 					req->data[off + 0] = lba & 0xff;
1321 					req->data[off + 1] = (lba >> 8) & 0xff;
1322 					req->data[off + 2] = (lba >> 16) & 0xff;
1323 					req->data[off + 3] = (lba >> 24) & 0xff;
1324 					req->data[off + 4] = (lba >> 32) & 0xff;
1325 					req->data[off + 5] = (lba >> 40) & 0xff;
1326 					req->data[off + 6] = c & 0xff;
1327 					req->data[off + 7] = (c >> 8) & 0xff;
1328 					lba += c;
1329 					count -= c;
1330 					ranges++;
1331 				}
1332 
1333 				/* Try to merge multiple TRIM requests */
1334 				req->bios[bps++] = bio1;
1335 				bio1 = bioq_first(&softc->bio_queue_trim);
1336 				if (bio1 == NULL ||
1337 				    bio1->bio_buf->b_bcount / softc->params.secsize >
1338 				    (softc->trim_max_ranges - ranges) * 0xffff)
1339 					break;
1340 			}
1341 
1342 
1343 			cam_fill_csio(&start_ccb->csio,
1344 			    1/*retries*/,
1345 			    dadone,
1346 			    CAM_DIR_OUT,
1347 			    MSG_SIMPLE_Q_TAG,
1348 			    req->data,
1349 			    ((ranges +63)/64)*512,
1350 			    SSD_FULL_SIZE,
1351 			    sizeof(struct scsi_rw_6),
1352 			    da_default_timeout*2);
1353 
1354 			start_ccb->ccb_h.ccb_state = DA_CCB_TRIM;
1355 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1356 			    &start_ccb->ccb_h, periph_links.le);
1357 			start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1358 			start_ccb->ccb_h.ccb_bio = bio;
1359 			devstat_start_transaction(&softc->device_stats);
1360 			xpt_action(start_ccb);
1361 			xpt_schedule(periph, 1);
1362 			break;
1363 		}
1364 
1365 		/*
1366 		 * Select a read or write buffer to queue.  Limit the number
1367 		 * of tags dedicated to reading or writing, giving reads
1368 		 * precedence.
1369 		 *
1370 		 * Writes to modern hard drives go into the HDs cache and
1371 		 * return completion nearly instantly.  That is until the
1372 		 * cache becomes full.  When the HDs cache becomes full
1373 		 * write commands will begin to stall.  If all available
1374 		 * tags are taken up by writes which saturate the drive
1375 		 * reads will become tag-starved.
1376 		 *
1377 		 * A similar situation can occur with reads.  With many
1378 		 * parallel readers all tags can be taken up by reads
1379 		 * and prevent any writes from draining, even if the HD's
1380 		 * cache is not full.
1381 		 */
1382 		limit = periph->sim->max_tagged_dev_openings * 2 / 3 + 1;
1383 #if 0
1384 		/* DEBUGGING */
1385 		static int savets;
1386 		static long savets2;
1387 		if (1 || time_uptime != savets2 || (ticks != savets && (softc->outstanding_cmds_rd || softc->outstanding_cmds_wr))) {
1388 			kprintf("%d %d (%d)\n",
1389 				softc->outstanding_cmds_rd,
1390 				softc->outstanding_cmds_wr,
1391 				limit);
1392 			savets = ticks;
1393 			savets2 = time_uptime;
1394 		}
1395 #endif
1396 		if (bio_rd && softc->outstanding_cmds_rd < limit) {
1397 			bio = bio_rd;
1398 			bioq_remove(&softc->bio_queue_rd, bio);
1399 		} else if (bio_wr && softc->outstanding_cmds_wr < limit) {
1400 			bio = bio_wr;
1401 			bioq_remove(&softc->bio_queue_wr, bio);
1402 		} else {
1403 			if (bio_rd)
1404 				softc->flags |= DA_FLAG_RD_LIMIT;
1405 			if (bio_wr)
1406 				softc->flags |= DA_FLAG_WR_LIMIT;
1407 			xpt_release_ccb(start_ccb);
1408 			break;
1409 		}
1410 
1411 		/*
1412 		 * We can queue new work.
1413 		 */
1414 		bp = bio->bio_buf;
1415 
1416 		devstat_start_transaction(&softc->device_stats);
1417 
1418 		tag_code = MSG_SIMPLE_Q_TAG;
1419 
1420 		switch(bp->b_cmd) {
1421 		case BUF_CMD_READ:
1422 		case BUF_CMD_WRITE:
1423 			/*
1424 			 * Block read/write op
1425 			 */
1426 			KKASSERT(bio->bio_offset % softc->params.secsize == 0);
1427 
1428 			scsi_read_write(
1429 				&start_ccb->csio,
1430 				da_retry_count,		/* retries */
1431 				dadone,
1432 				tag_code,
1433 				(bp->b_cmd == BUF_CMD_READ),
1434 				0,			/* byte2 */
1435 				softc->minimum_cmd_size,
1436 				bio->bio_offset / softc->params.secsize,
1437 				bp->b_bcount / softc->params.secsize,
1438 				bp->b_data,
1439 				bp->b_bcount,
1440 				SSD_FULL_SIZE,		/* sense_len */
1441 				da_default_timeout * 1000
1442 			);
1443 			break;
1444 		case BUF_CMD_FLUSH:
1445 			/*
1446 			 * Silently complete a flush request if the device
1447 			 * cannot handle it.
1448 			 */
1449 			if (softc->quirks & DA_Q_NO_SYNC_CACHE) {
1450 				xpt_release_ccb(start_ccb);
1451 				start_ccb = NULL;
1452 				devstat_end_transaction_buf(
1453 					&softc->device_stats, bp);
1454 				biodone(bio);
1455 			} else {
1456 				scsi_synchronize_cache(
1457 					&start_ccb->csio,
1458 					1,		/* retries */
1459 					dadone,		/* cbfcnp */
1460 					MSG_SIMPLE_Q_TAG,
1461 					0,		/* lba */
1462 					0,		/* count (whole disk) */
1463 					SSD_FULL_SIZE,
1464 					da_default_timeout*1000	/* timeout */
1465 				);
1466 			}
1467 			break;
1468 		case BUF_CMD_FREEBLKS:
1469 			if (softc->disk.d_info.d_trimflag & DA_FLAG_CAN_TRIM){
1470 				start_ccb->csio.ccb_h.func_code = XPT_TRIM;
1471 				break;
1472 			}
1473 		default:
1474 			xpt_release_ccb(start_ccb);
1475 			start_ccb = NULL;
1476 			panic("dastart: unrecognized bio cmd %d", bp->b_cmd);
1477 			break; /* NOT REACHED */
1478 		}
1479 
1480 		/*
1481 		 * Block out any asyncronous callbacks
1482 		 * while we touch the pending ccb list.
1483 		 */
1484 		if (start_ccb) {
1485 			start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1486 			LIST_INSERT_HEAD(&softc->pending_ccbs,
1487 					 &start_ccb->ccb_h, periph_links.le);
1488 			if (bp->b_cmd == BUF_CMD_WRITE ||
1489 			    bp->b_cmd == BUF_CMD_FLUSH) {
1490 				++softc->outstanding_cmds_wr;
1491 			} else {
1492 				++softc->outstanding_cmds_rd;
1493 			}
1494 
1495 			/* We expect a unit attention from this device */
1496 			if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1497 				start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1498 				softc->flags &= ~DA_FLAG_RETRY_UA;
1499 			}
1500 
1501 			start_ccb->ccb_h.ccb_bio = bio;
1502 			xpt_action(start_ccb);
1503 		}
1504 
1505 		/*
1506 		 * Be sure we stay scheduled if we have more work to do.
1507 		 */
1508 		if (bioq_first(&softc->bio_queue_rd) ||
1509 		    bioq_first(&softc->bio_queue_wr)) {
1510 			xpt_schedule(periph, 1);
1511 		}
1512 		break;
1513 	}
1514 	case DA_STATE_PROBE:
1515 	{
1516 		struct ccb_scsiio *csio;
1517 		struct scsi_read_capacity_data *rcap;
1518 
1519 		rcap = kmalloc(sizeof(*rcap), M_SCSIDA, M_INTWAIT | M_ZERO);
1520 		csio = &start_ccb->csio;
1521 		scsi_read_capacity(csio,
1522 				   /*retries*/4,
1523 				   dadone,
1524 				   MSG_SIMPLE_Q_TAG,
1525 				   rcap,
1526 				   SSD_FULL_SIZE,
1527 				   /*timeout*/5000);
1528 		start_ccb->ccb_h.ccb_bio = NULL;
1529 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1530 		xpt_action(start_ccb);
1531 		break;
1532 	}
1533 	case DA_STATE_PROBE2:
1534 	{
1535 		struct ccb_scsiio *csio;
1536 		struct scsi_read_capacity_data_16 *rcaplong;
1537 
1538 		rcaplong = kmalloc(sizeof(*rcaplong), M_SCSIDA,
1539 				   M_INTWAIT | M_ZERO);
1540 		csio = &start_ccb->csio;
1541 		scsi_read_capacity_16(csio,
1542 				    /*retries*/ 4,
1543 				    /*cbfcnp*/ dadone,
1544 				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
1545 				    /*lba*/ 0,
1546 				    /*reladr*/ 0,
1547 				    /*pmi*/ 0,
1548 				    rcaplong,
1549 				    /*sense_len*/ SSD_FULL_SIZE,
1550 				    /*timeout*/ 60000);
1551 		start_ccb->ccb_h.ccb_bio = NULL;
1552 		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE2;
1553 		xpt_action(start_ccb);
1554 		break;
1555 	}
1556 	}
1557 }
1558 
1559 static int
1560 cmd6workaround(union ccb *ccb)
1561 {
1562 	struct scsi_rw_6 cmd6;
1563 	struct scsi_rw_10 *cmd10;
1564 	struct da_softc *softc;
1565 	u_int8_t *cdb;
1566 	int frozen;
1567 
1568 	cdb = ccb->csio.cdb_io.cdb_bytes;
1569 
1570 	/* Translation only possible if CDB is an array and cmd is R/W6 */
1571 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
1572 	    (*cdb != READ_6 && *cdb != WRITE_6))
1573 		return 0;
1574 
1575 	xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
1576 	    "increasing minimum_cmd_size to 10.\n");
1577  	softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
1578 	softc->minimum_cmd_size = 10;
1579 
1580 	bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
1581 	cmd10 = (struct scsi_rw_10 *)cdb;
1582 	cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
1583 	cmd10->byte2 = 0;
1584 	scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
1585 	cmd10->reserved = 0;
1586 	scsi_ulto2b(cmd6.length, cmd10->length);
1587 	cmd10->control = cmd6.control;
1588 	ccb->csio.cdb_len = sizeof(*cmd10);
1589 
1590 	/* Requeue request, unfreezing queue if necessary */
1591 	frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
1592  	ccb->ccb_h.status = CAM_REQUEUE_REQ;
1593 	xpt_action(ccb);
1594 	if (frozen) {
1595 		cam_release_devq(ccb->ccb_h.path,
1596 				 /*relsim_flags*/0,
1597 				 /*reduction*/0,
1598 				 /*timeout*/0,
1599 				 /*getcount_only*/0);
1600 	}
1601 	return (ERESTART);
1602 }
1603 
1604 static void
1605 dadone(struct cam_periph *periph, union ccb *done_ccb)
1606 {
1607 	struct da_softc *softc;
1608 	struct ccb_scsiio *csio;
1609 	struct disk_info info;
1610 
1611 	softc = (struct da_softc *)periph->softc;
1612 	csio = &done_ccb->csio;
1613 	switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1614 	case DA_CCB_BUFFER_IO:
1615 	case DA_CCB_TRIM:
1616 	{
1617 		struct buf *bp;
1618 		struct bio *bio;
1619 		int mustsched = 0;
1620 
1621 		bio = (struct bio *)done_ccb->ccb_h.ccb_bio;
1622 		bp = bio->bio_buf;
1623 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1624 			int error;
1625 			int sf;
1626 
1627 			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1628 				sf = SF_RETRY_UA;
1629 			else
1630 				sf = 0;
1631 
1632 			error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
1633 			if (error == ERESTART) {
1634 				/*
1635 				 * A retry was scheuled, so
1636 				 * just return.
1637 				 */
1638 				return;
1639 			}
1640 			if (error != 0) {
1641 				if (error == ENXIO) {
1642 					/*
1643 					 * Catastrophic error.  Mark our pack as
1644 					 * invalid.
1645 					 */
1646 					/*
1647 					 * XXX See if this is really a media
1648 					 * XXX change first?
1649 					 */
1650 					xpt_print(periph->path,
1651 					    "Invalidating pack\n");
1652 					softc->flags |= DA_FLAG_PACK_INVALID;
1653 				}
1654 
1655 				/*
1656 				 * Return all queued write I/O's with EIO
1657 				 * so the client can retry these I/Os in the
1658 				 * proper order should it attempt to recover.
1659 				 *
1660 				 * Leave read I/O's alone.
1661 				 */
1662 				daflushbioq(&softc->bio_queue_wr, EIO);
1663 				bp->b_error = error;
1664 				bp->b_resid = bp->b_bcount;
1665 				bp->b_flags |= B_ERROR;
1666 			} else {
1667 				bp->b_resid = csio->resid;
1668 				bp->b_error = 0;
1669 				if (bp->b_resid != 0)
1670 					bp->b_flags |= B_ERROR;
1671 			}
1672 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1673 				cam_release_devq(done_ccb->ccb_h.path,
1674 						 /*relsim_flags*/0,
1675 						 /*reduction*/0,
1676 						 /*timeout*/0,
1677 						 /*getcount_only*/0);
1678 		} else {
1679 			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1680 				panic("REQ_CMP with QFRZN");
1681 			bp->b_resid = csio->resid;
1682 			if (csio->resid > 0)
1683 				bp->b_flags |= B_ERROR;
1684 		}
1685 
1686 		/*
1687 		 * Block out any asyncronous callbacks
1688 		 * while we touch the pending ccb list.
1689 		 */
1690 		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1691 		if (bp->b_cmd == BUF_CMD_WRITE || bp->b_cmd == BUF_CMD_FLUSH) {
1692 			--softc->outstanding_cmds_wr;
1693 			if (softc->flags & DA_FLAG_WR_LIMIT) {
1694 				softc->flags &= ~DA_FLAG_WR_LIMIT;
1695 				mustsched = 1;
1696 			}
1697 		} else {
1698 			--softc->outstanding_cmds_rd;
1699 			if (softc->flags & DA_FLAG_RD_LIMIT) {
1700 				softc->flags &= ~DA_FLAG_RD_LIMIT;
1701 				mustsched = 1;
1702 			}
1703 		}
1704 
1705 		devstat_end_transaction_buf(&softc->device_stats, bp);
1706 		if ((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) ==
1707 		    DA_CCB_TRIM) {
1708 			struct trim_request *req =
1709 			    (struct trim_request *) csio->data_ptr;
1710 			int i;
1711 
1712 			for (i = 1; i < softc->trim_max_ranges &&
1713 			    req->bios[i]; i++) {
1714 				struct bio *bp1 = req->bios[i];
1715 
1716 				bp1->bio_buf->b_resid = bp->b_resid;
1717 				bp1->bio_buf->b_error = bp->b_error;
1718 				if (bp->b_flags & B_ERROR)
1719 					bp1->bio_buf->b_flags |= B_ERROR;
1720 				biodone(bp1);
1721 			}
1722 			softc->trim_running = 0;
1723 			biodone(bio);
1724 			xpt_schedule(periph,1);
1725 		} else
1726 			biodone(bio);
1727 
1728 
1729 		if (mustsched)
1730 			xpt_schedule(periph, /*priority*/1);
1731 
1732 		break;
1733 	}
1734 	case DA_CCB_PROBE:
1735 	case DA_CCB_PROBE2:
1736 	{
1737 		struct	   scsi_read_capacity_data *rdcap;
1738 		struct     scsi_read_capacity_data_16 *rcaplong;
1739 		char	   announce_buf[80];
1740 
1741 		rdcap = NULL;
1742 		rcaplong = NULL;
1743 		if (softc->state == DA_STATE_PROBE)
1744 			rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
1745 		else
1746 			rcaplong = (struct scsi_read_capacity_data_16 *)
1747 				    csio->data_ptr;
1748 
1749 		bzero(&info, sizeof(info));
1750 		info.d_type = DTYPE_SCSI;
1751 		info.d_serialno = xpt_path_serialno(periph->path);
1752 
1753 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1754 			struct disk_params *dp;
1755 			uint32_t block_size;
1756 			uint64_t maxsector;
1757 
1758 			if (softc->state == DA_STATE_PROBE) {
1759 				block_size = scsi_4btoul(rdcap->length);
1760 				maxsector = scsi_4btoul(rdcap->addr);
1761 
1762 				/*
1763 				 * According to SBC-2, if the standard 10
1764 				 * byte READ CAPACITY command returns 2^32,
1765 				 * we should issue the 16 byte version of
1766 				 * the command, since the device in question
1767 				 * has more sectors than can be represented
1768 				 * with the short version of the command.
1769 				 */
1770 				if (maxsector == 0xffffffff) {
1771 					softc->state = DA_STATE_PROBE2;
1772 					kfree(rdcap, M_SCSIDA);
1773 					xpt_release_ccb(done_ccb);
1774 					xpt_schedule(periph, /*priority*/5);
1775 					return;
1776 				}
1777 			} else {
1778 				block_size = scsi_4btoul(rcaplong->length);
1779 				maxsector = scsi_8btou64(rcaplong->addr);
1780 			}
1781 			dasetgeom(periph, block_size, maxsector);
1782 			dp = &softc->params;
1783 			ksnprintf(announce_buf, sizeof(announce_buf),
1784 				"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1785 				(uintmax_t) (((uintmax_t)dp->secsize *
1786 				dp->sectors) / (1024*1024)),
1787 				(uintmax_t)dp->sectors,
1788 				dp->secsize, dp->heads, dp->secs_per_track,
1789 				dp->cylinders);
1790 
1791 			CAM_SIM_UNLOCK(periph->sim);
1792 			info.d_media_blksize = softc->params.secsize;
1793 			info.d_media_blocks = softc->params.sectors;
1794 			info.d_media_size = 0;
1795 			info.d_secpertrack = softc->params.secs_per_track;
1796 			info.d_nheads = softc->params.heads;
1797 			info.d_ncylinders = softc->params.cylinders;
1798 			info.d_secpercyl = softc->params.heads *
1799 						softc->params.secs_per_track;
1800 			info.d_serialno = xpt_path_serialno(periph->path);
1801 			disk_setdiskinfo(&softc->disk, &info);
1802 			CAM_SIM_LOCK(periph->sim);
1803 		} else {
1804 			int	error;
1805 
1806 			announce_buf[0] = '\0';
1807 
1808 			/*
1809 			 * Retry any UNIT ATTENTION type errors.  They
1810 			 * are expected at boot.
1811 			 */
1812 			error = daerror(done_ccb, CAM_RETRY_SELTO,
1813 					SF_RETRY_UA|SF_NO_PRINT);
1814 			if (error == ERESTART) {
1815 				/*
1816 				 * A retry was scheuled, so
1817 				 * just return.
1818 				 */
1819 				return;
1820 			} else if (error != 0) {
1821 				struct scsi_sense_data *sense;
1822 				int asc, ascq;
1823 				int sense_key, error_code;
1824 				int have_sense;
1825 				cam_status status;
1826 				struct ccb_getdev cgd;
1827 
1828 				/* Don't wedge this device's queue */
1829 				status = done_ccb->ccb_h.status;
1830 				if ((status & CAM_DEV_QFRZN) != 0)
1831 					cam_release_devq(done_ccb->ccb_h.path,
1832 							 /*relsim_flags*/0,
1833 							 /*reduction*/0,
1834 							 /*timeout*/0,
1835 							 /*getcount_only*/0);
1836 
1837 
1838 				xpt_setup_ccb(&cgd.ccb_h,
1839 					      done_ccb->ccb_h.path,
1840 					      /* priority */ 1);
1841 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1842 				xpt_action((union ccb *)&cgd);
1843 
1844 				if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1845 				 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1846 				 || ((status & CAM_AUTOSNS_VALID) == 0))
1847 					have_sense = FALSE;
1848 				else
1849 					have_sense = TRUE;
1850 
1851 				if (have_sense) {
1852 					sense = &csio->sense_data;
1853 					scsi_extract_sense(sense, &error_code,
1854 							   &sense_key,
1855 							   &asc, &ascq);
1856 				}
1857 				/*
1858 				 * Attach to anything that claims to be a
1859 				 * direct access or optical disk device,
1860 				 * as long as it doesn't return a "Logical
1861 				 * unit not supported" (0x25) error.
1862 				 */
1863 				if ((have_sense) && (asc != 0x25)
1864 				 && (error_code == SSD_CURRENT_ERROR)) {
1865 					const char *sense_key_desc;
1866 					const char *asc_desc;
1867 
1868 					scsi_sense_desc(sense_key, asc, ascq,
1869 							&cgd.inq_data,
1870 							&sense_key_desc,
1871 							&asc_desc);
1872 					ksnprintf(announce_buf,
1873 					    sizeof(announce_buf),
1874 						"Attempt to query device "
1875 						"size failed: %s, %s",
1876 						sense_key_desc,
1877 						asc_desc);
1878 					info.d_media_blksize = 512;
1879 					disk_setdiskinfo(&softc->disk, &info);
1880 				} else {
1881 					if (have_sense)
1882 						scsi_sense_print(
1883 							&done_ccb->csio);
1884 					else {
1885 						xpt_print(periph->path,
1886 						    "got CAM status %#x\n",
1887 						    done_ccb->ccb_h.status);
1888 					}
1889 
1890 					xpt_print(periph->path, "fatal error, "
1891 					    "failed to attach to device\n");
1892 
1893 					/*
1894 					 * Free up resources.
1895 					 */
1896 					cam_periph_invalidate(periph);
1897 				}
1898 			}
1899 		}
1900 		kfree(csio->data_ptr, M_SCSIDA);
1901 		if (announce_buf[0] != '\0') {
1902 			xpt_announce_periph(periph, announce_buf);
1903 			/*
1904 			 * Create our sysctl variables, now that we know
1905 			 * we have successfully attached.
1906 			 */
1907 			taskqueue_enqueue(taskqueue_thread[mycpuid],
1908 			    &softc->sysctl_task);
1909 		}
1910 
1911 		if (softc->trim_max_ranges) {
1912 			softc->disk.d_info.d_trimflag |= DA_FLAG_CAN_TRIM;
1913 			kprintf("%s%d: supports TRIM\n",
1914 		   	    periph->periph_name,
1915 		   	    periph->unit_number);
1916 		}
1917 		softc->state = DA_STATE_NORMAL;
1918 		/*
1919 		 * Since our peripheral may be invalidated by an error
1920 		 * above or an external event, we must release our CCB
1921 		 * before releasing the probe lock on the peripheral.
1922 		 * The peripheral will only go away once the last lock
1923 		 * is removed, and we need it around for the CCB release
1924 		 * operation.
1925 		 */
1926 		xpt_release_ccb(done_ccb);
1927 		cam_periph_unhold(periph, 0);
1928 		return;
1929 	}
1930 	case DA_CCB_WAITING:
1931 	{
1932 		/* Caller will release the CCB */
1933 		wakeup(&done_ccb->ccb_h.cbfcnp);
1934 		return;
1935 	}
1936 	case DA_CCB_DUMP:
1937 		/* No-op.  We're polling */
1938 		return;
1939 	default:
1940 		break;
1941 	}
1942 	xpt_release_ccb(done_ccb);
1943 }
1944 
1945 static int
1946 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1947 {
1948 	struct da_softc	  *softc;
1949 	struct cam_periph *periph;
1950 	int error;
1951 
1952 	periph = xpt_path_periph(ccb->ccb_h.path);
1953 	softc = (struct da_softc *)periph->softc;
1954 
1955  	/*
1956 	 * Automatically detect devices that do not support
1957  	 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
1958  	 */
1959 	error = 0;
1960 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
1961 		error = cmd6workaround(ccb);
1962 	} else if (((ccb->ccb_h.status & CAM_STATUS_MASK) ==
1963 		   CAM_SCSI_STATUS_ERROR)
1964 	 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)
1965 	 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
1966 	 && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0)
1967 	 && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) {
1968 		int sense_key, error_code, asc, ascq;
1969 
1970  		scsi_extract_sense(&ccb->csio.sense_data,
1971 				   &error_code, &sense_key, &asc, &ascq);
1972 		if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
1973  			error = cmd6workaround(ccb);
1974 	}
1975 	if (error == ERESTART)
1976 		return (ERESTART);
1977 
1978 	/*
1979 	 * XXX
1980 	 * Until we have a better way of doing pack validation,
1981 	 * don't treat UAs as errors.
1982 	 */
1983 	sense_flags |= SF_RETRY_UA;
1984 	return(cam_periph_error(ccb, cam_flags, sense_flags,
1985 				&softc->saved_ccb));
1986 }
1987 
1988 static void
1989 daprevent(struct cam_periph *periph, int action)
1990 {
1991 	struct	da_softc *softc;
1992 	union	ccb *ccb;
1993 	int	error;
1994 
1995 	softc = (struct da_softc *)periph->softc;
1996 
1997 	if (((action == PR_ALLOW)
1998 	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1999 	 || ((action == PR_PREVENT)
2000 	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
2001 		return;
2002 	}
2003 
2004 	ccb = cam_periph_getccb(periph, /*priority*/1);
2005 
2006 	scsi_prevent(&ccb->csio,
2007 		     /*retries*/1,
2008 		     /*cbcfp*/dadone,
2009 		     MSG_SIMPLE_Q_TAG,
2010 		     action,
2011 		     SSD_FULL_SIZE,
2012 		     5000);
2013 
2014 	error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO,
2015 				  SF_RETRY_UA, &softc->device_stats);
2016 
2017 	if (error == 0) {
2018 		if (action == PR_ALLOW)
2019 			softc->flags &= ~DA_FLAG_PACK_LOCKED;
2020 		else
2021 			softc->flags |= DA_FLAG_PACK_LOCKED;
2022 	}
2023 
2024 	xpt_release_ccb(ccb);
2025 }
2026 
2027 /*
2028  * Check media on open, e.g. card reader devices which had no initial media.
2029  */
2030 static int
2031 dacheckmedia(struct cam_periph *periph)
2032 {
2033 	struct disk_params *dp;
2034 	struct da_softc *softc;
2035 	struct disk_info info;
2036 	int error;
2037 
2038 	softc = (struct da_softc *)periph->softc;
2039 	dp = &softc->params;
2040 
2041 	error = dagetcapacity(periph);
2042 
2043 	/*
2044 	 * Only reprobe on initial open and if the media is removable.
2045 	 *
2046 	 * NOTE: If we setdiskinfo() it will take the device probe
2047 	 *	 a bit of time to probe the slices and partitions,
2048 	 *	 and mess up booting.  So avoid if nothing has changed.
2049 	 *	 XXX
2050 	 */
2051 	if (softc->flags & DA_FLAG_OPEN)
2052 		return (error);
2053 	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) == 0)
2054 		return (error);
2055 
2056 	bzero(&info, sizeof(info));
2057 	info.d_type = DTYPE_SCSI;
2058 	info.d_serialno = xpt_path_serialno(periph->path);
2059 
2060 	if (error == 0) {
2061 		CAM_SIM_UNLOCK(periph->sim);
2062 		info.d_media_blksize = softc->params.secsize;
2063 		info.d_media_blocks = softc->params.sectors;
2064 		info.d_media_size = 0;
2065 		info.d_secpertrack = softc->params.secs_per_track;
2066 		info.d_nheads = softc->params.heads;
2067 		info.d_ncylinders = softc->params.cylinders;
2068 		info.d_secpercyl = softc->params.heads *
2069 					softc->params.secs_per_track;
2070 		info.d_serialno = xpt_path_serialno(periph->path);
2071 		if (info.d_media_blocks != softc->disk.d_info.d_media_blocks) {
2072 			kprintf("%s%d: open removable media: "
2073 				"%juMB (%ju %u byte sectors: %dH %dS/T %dC)\n",
2074 				periph->periph_name, periph->unit_number,
2075 				(uintmax_t)(((uintmax_t)dp->secsize *
2076 					     dp->sectors) / (1024*1024)),
2077 				(uintmax_t)dp->sectors, dp->secsize,
2078 				dp->heads, dp->secs_per_track, dp->cylinders);
2079 			disk_setdiskinfo(&softc->disk, &info);
2080 		}
2081 		CAM_SIM_LOCK(periph->sim);
2082 	} else {
2083 		kprintf("%s%d: open removable media: no media present\n",
2084 			periph->periph_name, periph->unit_number);
2085 		info.d_media_blksize = 512;
2086 		disk_setdiskinfo(&softc->disk, &info);
2087 	}
2088 	return (error);
2089 }
2090 
2091 static int
2092 dagetcapacity(struct cam_periph *periph)
2093 {
2094 	struct da_softc *softc;
2095 	union ccb *ccb;
2096 	struct scsi_read_capacity_data *rcap;
2097 	struct scsi_read_capacity_data_16 *rcaplong;
2098 	uint32_t block_len;
2099 	uint64_t maxsector;
2100 	int error;
2101 
2102 	softc = (struct da_softc *)periph->softc;
2103 	block_len = 0;
2104 	maxsector = 0;
2105 	error = 0;
2106 
2107 	/* Do a read capacity */
2108 	rcap = (struct scsi_read_capacity_data *)kmalloc(sizeof(*rcaplong),
2109 							 M_SCSIDA, M_INTWAIT);
2110 
2111 	ccb = cam_periph_getccb(periph, /*priority*/1);
2112 	scsi_read_capacity(&ccb->csio,
2113 			   /*retries*/4,
2114 			   /*cbfncp*/dadone,
2115 			   MSG_SIMPLE_Q_TAG,
2116 			   rcap,
2117 			   SSD_FULL_SIZE,
2118 			   /*timeout*/60000);
2119 	ccb->ccb_h.ccb_bio = NULL;
2120 
2121 	error = cam_periph_runccb(ccb, daerror,
2122 				  /*cam_flags*/CAM_RETRY_SELTO,
2123 				  /*sense_flags*/SF_RETRY_UA,
2124 				  &softc->device_stats);
2125 
2126 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2127 		cam_release_devq(ccb->ccb_h.path,
2128 				 /*relsim_flags*/0,
2129 				 /*reduction*/0,
2130 				 /*timeout*/0,
2131 				 /*getcount_only*/0);
2132 
2133 	if (error == 0) {
2134 		block_len = scsi_4btoul(rcap->length);
2135 		maxsector = scsi_4btoul(rcap->addr);
2136 
2137 		if (maxsector != 0xffffffff)
2138 			goto done;
2139 	} else
2140 		goto done;
2141 
2142 	rcaplong = (struct scsi_read_capacity_data_16 *)rcap;
2143 
2144 	scsi_read_capacity_16(&ccb->csio,
2145 			      /*retries*/ 4,
2146 			      /*cbfcnp*/ dadone,
2147 			      /*tag_action*/ MSG_SIMPLE_Q_TAG,
2148 			      /*lba*/ 0,
2149 			      /*reladr*/ 0,
2150 			      /*pmi*/ 0,
2151 			      rcaplong,
2152 			      /*sense_len*/ SSD_FULL_SIZE,
2153 			      /*timeout*/ 60000);
2154 	ccb->ccb_h.ccb_bio = NULL;
2155 
2156 	error = cam_periph_runccb(ccb, daerror,
2157 				  /*cam_flags*/CAM_RETRY_SELTO,
2158 				  /*sense_flags*/SF_RETRY_UA,
2159 				  &softc->device_stats);
2160 
2161 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2162 		cam_release_devq(ccb->ccb_h.path,
2163 				 /*relsim_flags*/0,
2164 				 /*reduction*/0,
2165 				 /*timeout*/0,
2166 				 /*getcount_only*/0);
2167 
2168 	if (error == 0) {
2169 		block_len = scsi_4btoul(rcaplong->length);
2170 		maxsector = scsi_8btou64(rcaplong->addr);
2171 	}
2172 
2173 done:
2174 
2175 	if (error == 0)
2176 		dasetgeom(periph, block_len, maxsector);
2177 
2178 	xpt_release_ccb(ccb);
2179 
2180 	kfree(rcap, M_SCSIDA);
2181 
2182 	return (error);
2183 }
2184 
2185 static void
2186 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector)
2187 {
2188 	struct ccb_calc_geometry ccg;
2189 	struct da_softc *softc;
2190 	struct disk_params *dp;
2191 
2192 	softc = (struct da_softc *)periph->softc;
2193 
2194 	dp = &softc->params;
2195 	dp->secsize = block_len;
2196 	dp->sectors = maxsector + 1;
2197 	/*
2198 	 * Have the controller provide us with a geometry
2199 	 * for this disk.  The only time the geometry
2200 	 * matters is when we boot and the controller
2201 	 * is the only one knowledgeable enough to come
2202 	 * up with something that will make this a bootable
2203 	 * device.
2204 	 */
2205 	xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
2206 	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
2207 	ccg.block_size = dp->secsize;
2208 	ccg.volume_size = dp->sectors;
2209 	ccg.heads = 0;
2210 	ccg.secs_per_track = 0;
2211 	ccg.cylinders = 0;
2212 	xpt_action((union ccb*)&ccg);
2213 	if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2214 		/*
2215 		 * We don't know what went wrong here- but just pick
2216 		 * a geometry so we don't have nasty things like divide
2217 		 * by zero.
2218 		 */
2219 		dp->heads = 255;
2220 		dp->secs_per_track = 255;
2221 		dp->cylinders = dp->sectors / (255 * 255);
2222 		if (dp->cylinders == 0) {
2223 			dp->cylinders = 1;
2224 		}
2225 	} else {
2226 		dp->heads = ccg.heads;
2227 		dp->secs_per_track = ccg.secs_per_track;
2228 		dp->cylinders = ccg.cylinders;
2229 	}
2230 }
2231 
2232 /*
2233  * Step through all DA peripheral drivers, and if the device is still open,
2234  * sync the disk cache to physical media.
2235  */
2236 static void
2237 dashutdown(void * arg, int howto)
2238 {
2239 	struct cam_periph *periph;
2240 	struct da_softc *softc;
2241 
2242 	TAILQ_FOREACH(periph, &dadriver.units, unit_links) {
2243 		union ccb ccb;
2244 
2245 		cam_periph_lock(periph);
2246 		softc = (struct da_softc *)periph->softc;
2247 
2248 		/*
2249 		 * We only sync the cache if the drive is still open, and
2250 		 * if the drive is capable of it..
2251 		 */
2252 		if (((softc->flags & DA_FLAG_OPEN) == 0)
2253 		 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
2254 			cam_periph_unlock(periph);
2255 			continue;
2256 		}
2257 
2258 		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
2259 
2260 		ccb.ccb_h.ccb_state = DA_CCB_DUMP;
2261 		scsi_synchronize_cache(&ccb.csio,
2262 				       /*retries*/1,
2263 				       /*cbfcnp*/dadone,
2264 				       MSG_SIMPLE_Q_TAG,
2265 				       /*begin_lba*/0, /* whole disk */
2266 				       /*lb_count*/0,
2267 				       SSD_FULL_SIZE,
2268 				       60 * 60 * 1000);
2269 
2270 		xpt_polled_action(&ccb);
2271 
2272 		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2273 			if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
2274 			     CAM_SCSI_STATUS_ERROR)
2275 			 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
2276 				int error_code, sense_key, asc, ascq;
2277 
2278 				scsi_extract_sense(&ccb.csio.sense_data,
2279 						   &error_code, &sense_key,
2280 						   &asc, &ascq);
2281 
2282 				if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
2283 					scsi_sense_print(&ccb.csio);
2284 			} else {
2285 				xpt_print(periph->path, "Synchronize "
2286 				    "cache failed, status == 0x%x, scsi status "
2287 				    "== 0x%x\n", ccb.ccb_h.status,
2288 				    ccb.csio.scsi_status);
2289 			}
2290 		}
2291 
2292 		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
2293 			cam_release_devq(ccb.ccb_h.path,
2294 					 /*relsim_flags*/0,
2295 					 /*reduction*/0,
2296 					 /*timeout*/0,
2297 					 /*getcount_only*/0);
2298 
2299 		cam_periph_unlock(periph);
2300 	}
2301 }
2302 
2303 #else /* !_KERNEL */
2304 
2305 /*
2306  * XXX This is only left out of the kernel build to silence warnings.  If,
2307  * for some reason this function is used in the kernel, the ifdefs should
2308  * be moved so it is included both in the kernel and userland.
2309  */
2310 void
2311 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
2312 		 void (*cbfcnp)(struct cam_periph *, union ccb *),
2313 		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
2314 		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
2315 		 u_int32_t timeout)
2316 {
2317 	struct scsi_format_unit *scsi_cmd;
2318 
2319 	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
2320 	scsi_cmd->opcode = FORMAT_UNIT;
2321 	scsi_cmd->byte2 = byte2;
2322 	scsi_ulto2b(ileave, scsi_cmd->interleave);
2323 
2324 	cam_fill_csio(csio,
2325 		      retries,
2326 		      cbfcnp,
2327 		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
2328 		      tag_action,
2329 		      data_ptr,
2330 		      dxfer_len,
2331 		      sense_len,
2332 		      sizeof(*scsi_cmd),
2333 		      timeout);
2334 }
2335 
2336 #endif /* _KERNEL */
2337