xref: /freebsd/sys/dev/aacraid/aacraid_cam.c (revision d6b92ffa)
1 /*-
2  * Copyright (c) 2002-2010 Adaptec, Inc.
3  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * CAM front-end for communicating with non-DASD devices
33  */
34 
35 #include "opt_aacraid.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/sysctl.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
45 
46 #include <cam/cam.h>
47 #include <cam/cam_ccb.h>
48 #include <cam/cam_debug.h>
49 #include <cam/cam_periph.h>
50 #if __FreeBSD_version < 801000
51 #include <cam/cam_xpt_periph.h>
52 #endif
53 #include <cam/cam_sim.h>
54 #include <cam/cam_xpt_sim.h>
55 #include <cam/scsi/scsi_all.h>
56 #include <cam/scsi/scsi_message.h>
57 
58 #include <sys/bus.h>
59 #include <sys/conf.h>
60 #include <sys/disk.h>
61 
62 #include <machine/md_var.h>
63 #include <machine/bus.h>
64 #include <sys/rman.h>
65 
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 
69 #include <dev/aacraid/aacraid_reg.h>
70 #include <sys/aac_ioctl.h>
71 #include <dev/aacraid/aacraid_debug.h>
72 #include <dev/aacraid/aacraid_var.h>
73 
74 #if __FreeBSD_version >= 700025
75 #ifndef	CAM_NEW_TRAN_CODE
76 #define	CAM_NEW_TRAN_CODE	1
77 #endif
78 #endif
79 
80 #ifndef SVPD_SUPPORTED_PAGE_LIST
81 struct scsi_vpd_supported_page_list
82 {
83 	u_int8_t device;
84 	u_int8_t page_code;
85 #define	SVPD_SUPPORTED_PAGE_LIST 0x00
86 	u_int8_t reserved;
87 	u_int8_t length;	/* number of VPD entries */
88 #define	SVPD_SUPPORTED_PAGES_SIZE	251
89 	u_int8_t list[SVPD_SUPPORTED_PAGES_SIZE];
90 };
91 #endif
92 
93 /************************** Version Compatibility *************************/
94 #if	__FreeBSD_version < 700031
95 #define	aac_sim_alloc(a,b,c,d,e,f,g,h,i)	cam_sim_alloc(a,b,c,d,e,g,h,i)
96 #else
97 #define	aac_sim_alloc				cam_sim_alloc
98 #endif
99 
100 struct aac_cam {
101 	device_t		dev;
102 	struct aac_sim		*inf;
103 	struct cam_sim		*sim;
104 	struct cam_path		*path;
105 };
106 
107 static int aac_cam_probe(device_t dev);
108 static int aac_cam_attach(device_t dev);
109 static int aac_cam_detach(device_t dev);
110 static void aac_cam_action(struct cam_sim *, union ccb *);
111 static void aac_cam_poll(struct cam_sim *);
112 static void aac_cam_complete(struct aac_command *);
113 static void aac_container_complete(struct aac_command *);
114 #if __FreeBSD_version >= 700000
115 static void aac_cam_rescan(struct aac_softc *sc, uint32_t channel,
116 	uint32_t target_id);
117 #endif
118 static void aac_set_scsi_error(struct aac_softc *sc, union ccb *ccb,
119 	u_int8_t status, u_int8_t key, u_int8_t asc, u_int8_t ascq);
120 static int aac_load_map_command_sg(struct aac_softc *, struct aac_command *);
121 static u_int64_t aac_eval_blockno(u_int8_t *);
122 static void aac_container_rw_command(struct cam_sim *, union ccb *, u_int8_t *);
123 static void aac_container_special_command(struct cam_sim *, union ccb *,
124 	u_int8_t *);
125 static void aac_passthrough_command(struct cam_sim *, union ccb *);
126 
127 static u_int32_t aac_cam_reset_bus(struct cam_sim *, union ccb *);
128 static u_int32_t aac_cam_abort_ccb(struct cam_sim *, union ccb *);
129 static u_int32_t aac_cam_term_io(struct cam_sim *, union ccb *);
130 
131 static devclass_t	aacraid_pass_devclass;
132 
133 static device_method_t	aacraid_pass_methods[] = {
134 	DEVMETHOD(device_probe,		aac_cam_probe),
135 	DEVMETHOD(device_attach,	aac_cam_attach),
136 	DEVMETHOD(device_detach,	aac_cam_detach),
137 	{ 0, 0 }
138 };
139 
140 static driver_t	aacraid_pass_driver = {
141 	"aacraidp",
142 	aacraid_pass_methods,
143 	sizeof(struct aac_cam)
144 };
145 
146 DRIVER_MODULE(aacraidp, aacraid, aacraid_pass_driver, aacraid_pass_devclass, 0, 0);
147 MODULE_DEPEND(aacraidp, cam, 1, 1, 1);
148 
149 MALLOC_DEFINE(M_AACRAIDCAM, "aacraidcam", "AACRAID CAM info");
150 
151 static void
152 aac_set_scsi_error(struct aac_softc *sc, union ccb *ccb, u_int8_t status,
153 	u_int8_t key, u_int8_t asc, u_int8_t ascq)
154 {
155 #if __FreeBSD_version >= 900000
156 	struct scsi_sense_data_fixed *sense =
157 		(struct scsi_sense_data_fixed *)&ccb->csio.sense_data;
158 #else
159 	struct scsi_sense_data *sense = &ccb->csio.sense_data;
160 #endif
161 
162 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "Error %d!", status);
163 
164 	ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
165 	ccb->csio.scsi_status = status;
166 	if (status == SCSI_STATUS_CHECK_COND) {
167 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
168 		bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
169 		ccb->csio.sense_data.error_code =
170 			SSD_CURRENT_ERROR | SSD_ERRCODE_VALID;
171 		sense->flags = key;
172 		if (ccb->csio.sense_len >= 14) {
173 			sense->extra_len = 6;
174 			sense->add_sense_code = asc;
175 			sense->add_sense_code_qual = ascq;
176 		}
177 	}
178 }
179 
180 #if __FreeBSD_version >= 700000
181 static void
182 aac_cam_rescan(struct aac_softc *sc, uint32_t channel, uint32_t target_id)
183 {
184 	union ccb *ccb;
185 	struct aac_sim *sim;
186 	struct aac_cam *camsc;
187 
188 	if (target_id == AAC_CAM_TARGET_WILDCARD)
189 		target_id = CAM_TARGET_WILDCARD;
190 
191 	TAILQ_FOREACH(sim, &sc->aac_sim_tqh, sim_link) {
192 		camsc = sim->aac_cam;
193 		if (camsc == NULL || camsc->inf == NULL ||
194 		    camsc->inf->BusNumber != channel)
195 			continue;
196 
197 		ccb = xpt_alloc_ccb_nowait();
198 		if (ccb == NULL) {
199 			device_printf(sc->aac_dev,
200 			    "Cannot allocate ccb for bus rescan.\n");
201 			return;
202 		}
203 
204 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
205 		    cam_sim_path(camsc->sim),
206 		    target_id, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
207 			xpt_free_ccb(ccb);
208 			device_printf(sc->aac_dev,
209 			    "Cannot create path for bus rescan.\n");
210 			return;
211 		}
212 		xpt_rescan(ccb);
213 		break;
214 	}
215 }
216 #endif
217 
218 static void
219 aac_cam_event(struct aac_softc *sc, struct aac_event *event, void *arg)
220 {
221 	union ccb *ccb;
222 	struct aac_cam *camsc;
223 
224 	switch (event->ev_type) {
225 	case AAC_EVENT_CMFREE:
226 		ccb = arg;
227 		camsc = ccb->ccb_h.sim_priv.entries[0].ptr;
228 		free(event, M_AACRAIDCAM);
229 		xpt_release_simq(camsc->sim, 1);
230 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
231 		xpt_done(ccb);
232 		break;
233 	default:
234 		device_printf(sc->aac_dev, "unknown event %d in aac_cam\n",
235 		    event->ev_type);
236 		break;
237 	}
238 
239 	return;
240 }
241 
242 static int
243 aac_cam_probe(device_t dev)
244 {
245 	struct aac_cam *camsc;
246 
247 	camsc = (struct aac_cam *)device_get_softc(dev);
248 	if (!camsc->inf)
249 		return (0);
250 	fwprintf(camsc->inf->aac_sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
251 	return (0);
252 }
253 
254 static int
255 aac_cam_detach(device_t dev)
256 {
257 	struct aac_softc *sc;
258 	struct aac_cam *camsc;
259 
260 	camsc = (struct aac_cam *)device_get_softc(dev);
261 	if (!camsc->inf)
262 		return (0);
263 	sc = camsc->inf->aac_sc;
264 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
265 	camsc->inf->aac_cam = NULL;
266 
267 	mtx_lock(&sc->aac_io_lock);
268 
269 	xpt_async(AC_LOST_DEVICE, camsc->path, NULL);
270 	xpt_free_path(camsc->path);
271 	xpt_bus_deregister(cam_sim_path(camsc->sim));
272 	cam_sim_free(camsc->sim, /*free_devq*/TRUE);
273 
274 	sc->cam_rescan_cb = NULL;
275 
276 	mtx_unlock(&sc->aac_io_lock);
277 
278 	return (0);
279 }
280 
281 /*
282  * Register the driver as a CAM SIM
283  */
284 static int
285 aac_cam_attach(device_t dev)
286 {
287 	struct cam_devq *devq;
288 	struct cam_sim *sim;
289 	struct cam_path *path;
290 	struct aac_cam *camsc;
291 	struct aac_sim *inf;
292 
293 	camsc = (struct aac_cam *)device_get_softc(dev);
294 	inf = (struct aac_sim *)device_get_ivars(dev);
295 	if (!inf)
296 		return (EIO);
297 	fwprintf(inf->aac_sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
298 	camsc->inf = inf;
299 	camsc->inf->aac_cam = camsc;
300 
301 	devq = cam_simq_alloc(inf->TargetsPerBus);
302 	if (devq == NULL)
303 		return (EIO);
304 
305 	sim = aac_sim_alloc(aac_cam_action, aac_cam_poll, "aacraidp", camsc,
306 	    device_get_unit(dev), &inf->aac_sc->aac_io_lock, 1, 1, devq);
307 	if (sim == NULL) {
308 		cam_simq_free(devq);
309 		return (EIO);
310 	}
311 
312 	/* Since every bus has it's own sim, every bus 'appears' as bus 0 */
313 	mtx_lock(&inf->aac_sc->aac_io_lock);
314 	if (aac_xpt_bus_register(sim, dev, 0) != CAM_SUCCESS) {
315 		cam_sim_free(sim, TRUE);
316 		mtx_unlock(&inf->aac_sc->aac_io_lock);
317 		return (EIO);
318 	}
319 
320 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
321 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
322 		xpt_bus_deregister(cam_sim_path(sim));
323 		cam_sim_free(sim, TRUE);
324 		mtx_unlock(&inf->aac_sc->aac_io_lock);
325 		return (EIO);
326 	}
327 
328 #if __FreeBSD_version >= 700000
329 	inf->aac_sc->cam_rescan_cb = aac_cam_rescan;
330 #endif
331 	mtx_unlock(&inf->aac_sc->aac_io_lock);
332 
333 	camsc->sim = sim;
334 	camsc->path = path;
335 
336 	return (0);
337 }
338 
339 static u_int64_t
340 aac_eval_blockno(u_int8_t *cmdp)
341 {
342 	u_int64_t blockno;
343 
344 	switch (cmdp[0]) {
345 	case READ_6:
346 	case WRITE_6:
347 		blockno = scsi_3btoul(((struct scsi_rw_6 *)cmdp)->addr);
348 		break;
349 	case READ_10:
350 	case WRITE_10:
351 		blockno = scsi_4btoul(((struct scsi_rw_10 *)cmdp)->addr);
352 		break;
353 	case READ_12:
354 	case WRITE_12:
355 		blockno = scsi_4btoul(((struct scsi_rw_12 *)cmdp)->addr);
356 		break;
357 	case READ_16:
358 	case WRITE_16:
359 		blockno = scsi_8btou64(((struct scsi_rw_16 *)cmdp)->addr);
360 		break;
361 	default:
362 		blockno = 0;
363 		break;
364 	}
365 	return(blockno);
366 }
367 
368 static void
369 aac_container_rw_command(struct cam_sim *sim, union ccb *ccb, u_int8_t *cmdp)
370 {
371 	struct	aac_cam *camsc;
372 	struct	aac_softc *sc;
373 	struct	aac_command *cm;
374 	struct	aac_fib *fib;
375 	u_int64_t blockno;
376 
377 	camsc = (struct aac_cam *)cam_sim_softc(sim);
378 	sc = camsc->inf->aac_sc;
379 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
380 
381 	if (aacraid_alloc_command(sc, &cm)) {
382 		struct aac_event *event;
383 
384 		xpt_freeze_simq(sim, 1);
385 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
386 		ccb->ccb_h.sim_priv.entries[0].ptr = camsc;
387 		event = malloc(sizeof(struct aac_event), M_AACRAIDCAM,
388 		    M_NOWAIT | M_ZERO);
389 		if (event == NULL) {
390 			device_printf(sc->aac_dev,
391 			    "Warning, out of memory for event\n");
392 			return;
393 		}
394 		event->ev_callback = aac_cam_event;
395 		event->ev_arg = ccb;
396 		event->ev_type = AAC_EVENT_CMFREE;
397 		aacraid_add_event(sc, event);
398 		return;
399 	}
400 
401 	fib = cm->cm_fib;
402 	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
403 	case CAM_DIR_IN:
404 		cm->cm_flags |= AAC_CMD_DATAIN;
405 		break;
406 	case CAM_DIR_OUT:
407 		cm->cm_flags |= AAC_CMD_DATAOUT;
408 		break;
409 	case CAM_DIR_NONE:
410 		break;
411 	default:
412 		cm->cm_flags |= AAC_CMD_DATAIN | AAC_CMD_DATAOUT;
413 		break;
414 	}
415 
416 	blockno = aac_eval_blockno(cmdp);
417 
418 	cm->cm_complete = aac_container_complete;
419 	cm->cm_ccb = ccb;
420 	cm->cm_timestamp = time_uptime;
421 	cm->cm_data = (void *)ccb->csio.data_ptr;
422 	cm->cm_datalen = ccb->csio.dxfer_len;
423 
424 	fib->Header.Size = sizeof(struct aac_fib_header);
425 	fib->Header.XferState =
426 		AAC_FIBSTATE_HOSTOWNED   |
427 		AAC_FIBSTATE_INITIALISED |
428 		AAC_FIBSTATE_EMPTY	 |
429 		AAC_FIBSTATE_FROMHOST	 |
430 		AAC_FIBSTATE_REXPECTED   |
431 		AAC_FIBSTATE_NORM	 |
432 		AAC_FIBSTATE_ASYNC	 |
433 		AAC_FIBSTATE_FAST_RESPONSE;
434 
435 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
436 		struct aac_raw_io2 *raw;
437 		raw = (struct aac_raw_io2 *)&fib->data[0];
438 		bzero(raw, sizeof(struct aac_raw_io2));
439 		fib->Header.Command = RawIo2;
440 		raw->strtBlkLow = (u_int32_t)blockno;
441 		raw->strtBlkHigh = (u_int32_t)(blockno >> 32);
442 		raw->byteCnt = cm->cm_datalen;
443 		raw->ldNum = ccb->ccb_h.target_id;
444 		fib->Header.Size += sizeof(struct aac_raw_io2);
445 		cm->cm_sgtable = (struct aac_sg_table *)raw->sge;
446 		if (cm->cm_flags & AAC_CMD_DATAIN)
447 			raw->flags = RIO2_IO_TYPE_READ | RIO2_SG_FORMAT_IEEE1212;
448 		else
449 			raw->flags = RIO2_IO_TYPE_WRITE | RIO2_SG_FORMAT_IEEE1212;
450 	} else if (sc->flags & AAC_FLAGS_RAW_IO) {
451 		struct aac_raw_io *raw;
452 		raw = (struct aac_raw_io *)&fib->data[0];
453 		bzero(raw, sizeof(struct aac_raw_io));
454 		fib->Header.Command = RawIo;
455 		raw->BlockNumber = blockno;
456 		raw->ByteCount = cm->cm_datalen;
457 		raw->ContainerId = ccb->ccb_h.target_id;
458 		fib->Header.Size += sizeof(struct aac_raw_io);
459 		cm->cm_sgtable = (struct aac_sg_table *)
460 			&raw->SgMapRaw;
461 		if (cm->cm_flags & AAC_CMD_DATAIN)
462 			raw->Flags = 1;
463 	} else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
464 		fib->Header.Command = ContainerCommand;
465 		if (cm->cm_flags & AAC_CMD_DATAIN) {
466 			struct aac_blockread *br;
467 			br = (struct aac_blockread *)&fib->data[0];
468 			br->Command = VM_CtBlockRead;
469 			br->ContainerId = ccb->ccb_h.target_id;
470 			br->BlockNumber = blockno;
471 			br->ByteCount = cm->cm_datalen;
472 			fib->Header.Size += sizeof(struct aac_blockread);
473 			cm->cm_sgtable = &br->SgMap;
474 		} else {
475 			struct aac_blockwrite *bw;
476 			bw = (struct aac_blockwrite *)&fib->data[0];
477 			bw->Command = VM_CtBlockWrite;
478 			bw->ContainerId = ccb->ccb_h.target_id;
479 			bw->BlockNumber = blockno;
480 			bw->ByteCount = cm->cm_datalen;
481 			bw->Stable = CUNSTABLE;
482 			fib->Header.Size += sizeof(struct aac_blockwrite);
483 			cm->cm_sgtable = &bw->SgMap;
484 		}
485 	} else {
486 		fib->Header.Command = ContainerCommand64;
487 		if (cm->cm_flags & AAC_CMD_DATAIN) {
488 			struct aac_blockread64 *br;
489 			br = (struct aac_blockread64 *)&fib->data[0];
490 			br->Command = VM_CtHostRead64;
491 			br->ContainerId = ccb->ccb_h.target_id;
492 			br->SectorCount = cm->cm_datalen/AAC_BLOCK_SIZE;
493 			br->BlockNumber = blockno;
494 			br->Pad = 0;
495 			br->Flags = 0;
496 			fib->Header.Size += sizeof(struct aac_blockread64);
497 			cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64;
498 		} else {
499 			struct aac_blockwrite64 *bw;
500 			bw = (struct aac_blockwrite64 *)&fib->data[0];
501 			bw->Command = VM_CtHostWrite64;
502 			bw->ContainerId = ccb->ccb_h.target_id;
503 			bw->SectorCount = cm->cm_datalen/AAC_BLOCK_SIZE;
504 			bw->BlockNumber = blockno;
505 			bw->Pad = 0;
506 			bw->Flags = 0;
507 			fib->Header.Size += sizeof(struct aac_blockwrite64);
508 			cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64;
509 		}
510 	}
511 	aac_enqueue_ready(cm);
512 	aacraid_startio(cm->cm_sc);
513 }
514 
515 static void
516 aac_container_special_command(struct cam_sim *sim, union ccb *ccb,
517 	u_int8_t *cmdp)
518 {
519 	struct	aac_cam *camsc;
520 	struct	aac_softc *sc;
521 	struct	aac_container *co;
522 
523 	camsc = (struct aac_cam *)cam_sim_softc(sim);
524 	sc = camsc->inf->aac_sc;
525 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
526 
527 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
528 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "found container %d search for %d", co->co_mntobj.ObjectId, ccb->ccb_h.target_id);
529 		if (co->co_mntobj.ObjectId == ccb->ccb_h.target_id)
530 			break;
531 	}
532 	if (co == NULL || ccb->ccb_h.target_lun != 0) {
533 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B,
534 			"Container not present: cmd 0x%x id %d lun %d len %d",
535 			*cmdp, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
536 			ccb->csio.dxfer_len);
537 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
538 		xpt_done(ccb);
539 		return;
540 	}
541 
542 	if (ccb->csio.dxfer_len)
543 		bzero(ccb->csio.data_ptr, ccb->csio.dxfer_len);
544 
545 	switch (*cmdp) {
546 	case INQUIRY:
547 	{
548 		struct scsi_inquiry *inq = (struct scsi_inquiry *)cmdp;
549 
550 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B,
551 		"Container INQUIRY id %d lun %d len %d VPD 0x%x Page 0x%x",
552 			ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
553 			ccb->csio.dxfer_len, inq->byte2, inq->page_code);
554 		if (!(inq->byte2 & SI_EVPD)) {
555 			struct scsi_inquiry_data *p =
556 				(struct scsi_inquiry_data *)ccb->csio.data_ptr;
557 			if (inq->page_code != 0) {
558 				aac_set_scsi_error(sc, ccb,
559 					SCSI_STATUS_CHECK_COND,
560 					SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00);
561 				xpt_done(ccb);
562 				return;
563 			}
564 			p->device = T_DIRECT;
565 			p->version = SCSI_REV_SPC2;
566 			p->response_format = 2;
567 			if (ccb->csio.dxfer_len >= 36) {
568 				p->additional_length = 31;
569 				p->flags = SID_WBus16|SID_Sync|SID_CmdQue;
570 				/* OEM Vendor defines */
571 				strncpy(p->vendor, "Adaptec ", sizeof(p->vendor));
572 				strncpy(p->product, "Array           ",
573 				    sizeof(p->product));
574 				strncpy(p->revision, "V1.0",
575 				    sizeof(p->revision));
576 			}
577 		} else {
578 			if (inq->page_code == SVPD_SUPPORTED_PAGE_LIST) {
579 				struct scsi_vpd_supported_page_list *p =
580 					(struct scsi_vpd_supported_page_list *)
581 					ccb->csio.data_ptr;
582 				p->device = T_DIRECT;
583 				p->page_code = SVPD_SUPPORTED_PAGE_LIST;
584 				p->length = 2;
585 				p->list[0] = SVPD_SUPPORTED_PAGE_LIST;
586 				p->list[1] = SVPD_UNIT_SERIAL_NUMBER;
587 			} else if (inq->page_code == SVPD_UNIT_SERIAL_NUMBER) {
588 				struct scsi_vpd_unit_serial_number *p =
589 					(struct scsi_vpd_unit_serial_number *)
590 					ccb->csio.data_ptr;
591 				p->device = T_DIRECT;
592 				p->page_code = SVPD_UNIT_SERIAL_NUMBER;
593 				p->length = sprintf((char *)p->serial_num,
594 					"%08X%02X", co->co_uid,
595 					ccb->ccb_h.target_id);
596 			} else {
597 				aac_set_scsi_error(sc, ccb,
598 					SCSI_STATUS_CHECK_COND,
599 					SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00);
600 				xpt_done(ccb);
601 				return;
602 			}
603 		}
604 		ccb->ccb_h.status = CAM_REQ_CMP;
605 		break;
606 	}
607 
608 	case REPORT_LUNS:
609 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B,
610 		"Container REPORT_LUNS id %d lun %d len %d",
611 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
612 		ccb->csio.dxfer_len);
613 		ccb->ccb_h.status = CAM_REQ_CMP;
614 		break;
615 
616 	case START_STOP:
617 	{
618 		struct scsi_start_stop_unit *ss =
619 			(struct scsi_start_stop_unit *)cmdp;
620 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B,
621 		"Container START_STOP id %d lun %d len %d",
622 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
623 		ccb->csio.dxfer_len);
624 		if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
625 			struct aac_command *cm;
626 			struct aac_fib *fib;
627 			struct aac_cnt_config *ccfg;
628 
629 			if (aacraid_alloc_command(sc, &cm)) {
630 				struct aac_event *event;
631 
632 				xpt_freeze_simq(sim, 1);
633 				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
634 				ccb->ccb_h.sim_priv.entries[0].ptr = camsc;
635 				event = malloc(sizeof(struct aac_event), M_AACRAIDCAM,
636 					M_NOWAIT | M_ZERO);
637 				if (event == NULL) {
638 					device_printf(sc->aac_dev,
639 						"Warning, out of memory for event\n");
640 					return;
641 				}
642 				event->ev_callback = aac_cam_event;
643 				event->ev_arg = ccb;
644 				event->ev_type = AAC_EVENT_CMFREE;
645 				aacraid_add_event(sc, event);
646 				return;
647 			}
648 
649 			fib = cm->cm_fib;
650 			cm->cm_timestamp = time_uptime;
651 			cm->cm_datalen = 0;
652 
653 			fib->Header.Size =
654 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
655 			fib->Header.XferState =
656 				AAC_FIBSTATE_HOSTOWNED   |
657 				AAC_FIBSTATE_INITIALISED |
658 				AAC_FIBSTATE_EMPTY	 |
659 				AAC_FIBSTATE_FROMHOST	 |
660 				AAC_FIBSTATE_REXPECTED   |
661 				AAC_FIBSTATE_NORM	 |
662 				AAC_FIBSTATE_ASYNC	 |
663 				AAC_FIBSTATE_FAST_RESPONSE;
664 			fib->Header.Command = ContainerCommand;
665 
666 			/* Start unit */
667 			ccfg = (struct aac_cnt_config *)&fib->data[0];
668 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
669 			ccfg->Command = VM_ContainerConfig;
670 			ccfg->CTCommand.command = CT_PM_DRIVER_SUPPORT;
671 			ccfg->CTCommand.param[0] = (ss->how & SSS_START ?
672 				AAC_PM_DRIVERSUP_START_UNIT :
673 				AAC_PM_DRIVERSUP_STOP_UNIT);
674 			ccfg->CTCommand.param[1] = co->co_mntobj.ObjectId;
675 			ccfg->CTCommand.param[2] = 0;	/* 1 - immediate */
676 
677 			if (aacraid_wait_command(cm) != 0 ||
678 				*(u_int32_t *)&fib->data[0] != 0) {
679 				printf("Power Management: Error start/stop container %d\n",
680 				co->co_mntobj.ObjectId);
681 			}
682 			aacraid_release_command(cm);
683 		}
684 		ccb->ccb_h.status = CAM_REQ_CMP;
685 		break;
686 	}
687 
688 	case TEST_UNIT_READY:
689 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B,
690 		"Container TEST_UNIT_READY id %d lun %d len %d",
691 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
692 		ccb->csio.dxfer_len);
693 		ccb->ccb_h.status = CAM_REQ_CMP;
694 		break;
695 
696 	case REQUEST_SENSE:
697 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B,
698 		"Container REQUEST_SENSE id %d lun %d len %d",
699 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
700 		ccb->csio.dxfer_len);
701 		ccb->ccb_h.status = CAM_REQ_CMP;
702 		break;
703 
704 	case READ_CAPACITY:
705 	{
706 		struct scsi_read_capacity_data *p =
707 			(struct scsi_read_capacity_data *)ccb->csio.data_ptr;
708 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B,
709 		"Container READ_CAPACITY id %d lun %d len %d",
710 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
711 		ccb->csio.dxfer_len);
712 		scsi_ulto4b(co->co_mntobj.ObjExtension.BlockDevice.BlockSize, p->length);
713 		/* check if greater than 2TB */
714 		if (co->co_mntobj.CapacityHigh) {
715 			if (sc->flags & AAC_FLAGS_LBA_64BIT)
716 				scsi_ulto4b(0xffffffff, p->addr);
717 		} else {
718 			scsi_ulto4b(co->co_mntobj.Capacity-1, p->addr);
719 		}
720 		ccb->ccb_h.status = CAM_REQ_CMP;
721 		break;
722 	}
723 
724 	case SERVICE_ACTION_IN:
725 	{
726 		struct scsi_read_capacity_data_long *p =
727 			(struct scsi_read_capacity_data_long *)
728 			ccb->csio.data_ptr;
729 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B,
730 		"Container SERVICE_ACTION_IN id %d lun %d len %d",
731 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
732 		ccb->csio.dxfer_len);
733 		if (((struct scsi_read_capacity_16 *)cmdp)->service_action !=
734 			SRC16_SERVICE_ACTION) {
735 			aac_set_scsi_error(sc, ccb, SCSI_STATUS_CHECK_COND,
736 				SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00);
737 			xpt_done(ccb);
738 			return;
739 		}
740 		scsi_ulto4b(co->co_mntobj.ObjExtension.BlockDevice.BlockSize, p->length);
741 		scsi_ulto4b(co->co_mntobj.CapacityHigh, p->addr);
742 		scsi_ulto4b(co->co_mntobj.Capacity-1, &p->addr[4]);
743 
744 		if (ccb->csio.dxfer_len >= 14) {
745 			u_int32_t mapping = co->co_mntobj.ObjExtension.BlockDevice.bdLgclPhysMap;
746 			p->prot_lbppbe = 0;
747 			while (mapping > 1) {
748 				mapping >>= 1;
749 				p->prot_lbppbe++;
750 			}
751 			p->prot_lbppbe &= 0x0f;
752 		}
753 
754 		ccb->ccb_h.status = CAM_REQ_CMP;
755 		break;
756 	}
757 
758 	case MODE_SENSE_6:
759 	{
760 		struct scsi_mode_sense_6 *msp =(struct scsi_mode_sense_6 *)cmdp;
761 		struct ms6_data {
762 			struct scsi_mode_hdr_6 hd;
763 			struct scsi_mode_block_descr bd;
764 			char pages;
765 		} *p = (struct ms6_data *)ccb->csio.data_ptr;
766 		char *pagep;
767 		int return_all_pages = FALSE;
768 
769 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B,
770 		"Container MODE_SENSE id %d lun %d len %d page %d",
771 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
772 		ccb->csio.dxfer_len, msp->page);
773 		p->hd.datalen = sizeof(struct scsi_mode_hdr_6) - 1;
774 		if (co->co_mntobj.ContentState & AAC_FSCS_READONLY)
775 			p->hd.dev_specific = 0x80;	/* WP */
776 		p->hd.dev_specific |= 0x10;	/* DPOFUA */
777 		if (msp->byte2 & SMS_DBD) {
778 			p->hd.block_descr_len = 0;
779 		} else {
780 			p->hd.block_descr_len =
781 				sizeof(struct scsi_mode_block_descr);
782 			p->hd.datalen += p->hd.block_descr_len;
783 			scsi_ulto3b(co->co_mntobj.ObjExtension.BlockDevice.BlockSize, p->bd.block_len);
784 			if (co->co_mntobj.Capacity > 0xffffff ||
785 				co->co_mntobj.CapacityHigh) {
786 				p->bd.num_blocks[0] = 0xff;
787 				p->bd.num_blocks[1] = 0xff;
788 				p->bd.num_blocks[2] = 0xff;
789 			} else {
790 				p->bd.num_blocks[0] = (u_int8_t)
791 					(co->co_mntobj.Capacity >> 16);
792 				p->bd.num_blocks[1] = (u_int8_t)
793 					(co->co_mntobj.Capacity >> 8);
794 				p->bd.num_blocks[2] = (u_int8_t)
795 					(co->co_mntobj.Capacity);
796 			}
797 		}
798 		pagep = &p->pages;
799 		switch (msp->page & SMS_PAGE_CODE) {
800 		case SMS_ALL_PAGES_PAGE:
801 			return_all_pages = TRUE;
802 		case SMS_CONTROL_MODE_PAGE:
803 		{
804 			struct scsi_control_page *cp =
805 				(struct scsi_control_page *)pagep;
806 
807 			if (ccb->csio.dxfer_len <= p->hd.datalen + 8) {
808 				aac_set_scsi_error(sc, ccb,
809 					SCSI_STATUS_CHECK_COND,
810 					SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00);
811 				xpt_done(ccb);
812 				return;
813 			}
814 			cp->page_code = SMS_CONTROL_MODE_PAGE;
815 			cp->page_length = 6;
816 			p->hd.datalen += 8;
817 			pagep += 8;
818 			if (!return_all_pages)
819 				break;
820 		}
821 		case SMS_VENDOR_SPECIFIC_PAGE:
822 			break;
823 		default:
824 			aac_set_scsi_error(sc, ccb, SCSI_STATUS_CHECK_COND,
825 				SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00);
826 			xpt_done(ccb);
827 			return;
828 		}
829 		ccb->ccb_h.status = CAM_REQ_CMP;
830 		break;
831 	}
832 
833 	case SYNCHRONIZE_CACHE:
834 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B,
835 		"Container SYNCHRONIZE_CACHE id %d lun %d len %d",
836 		ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
837 		ccb->csio.dxfer_len);
838 		ccb->ccb_h.status = CAM_REQ_CMP;
839 		break;
840 
841 	default:
842 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B,
843 		"Container unsupp. cmd 0x%x id %d lun %d len %d",
844 		*cmdp, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
845 		ccb->csio.dxfer_len);
846 		ccb->ccb_h.status = CAM_REQ_CMP; /*CAM_REQ_INVALID*/
847 		break;
848 	}
849 	xpt_done(ccb);
850 }
851 
852 static void
853 aac_passthrough_command(struct cam_sim *sim, union ccb *ccb)
854 {
855 	struct	aac_cam *camsc;
856 	struct	aac_softc *sc;
857 	struct	aac_command *cm;
858 	struct	aac_fib *fib;
859 	struct	aac_srb *srb;
860 
861 	camsc = (struct aac_cam *)cam_sim_softc(sim);
862 	sc = camsc->inf->aac_sc;
863 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
864 
865 	if (aacraid_alloc_command(sc, &cm)) {
866 		struct aac_event *event;
867 
868 		xpt_freeze_simq(sim, 1);
869 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
870 		ccb->ccb_h.sim_priv.entries[0].ptr = camsc;
871 		event = malloc(sizeof(struct aac_event), M_AACRAIDCAM,
872 		    M_NOWAIT | M_ZERO);
873 		if (event == NULL) {
874 			device_printf(sc->aac_dev,
875 			    "Warning, out of memory for event\n");
876 			return;
877 		}
878 		event->ev_callback = aac_cam_event;
879 		event->ev_arg = ccb;
880 		event->ev_type = AAC_EVENT_CMFREE;
881 		aacraid_add_event(sc, event);
882 		return;
883 	}
884 
885 	fib = cm->cm_fib;
886 	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
887 	case CAM_DIR_IN:
888 		cm->cm_flags |= AAC_CMD_DATAIN;
889 		break;
890 	case CAM_DIR_OUT:
891 		cm->cm_flags |= AAC_CMD_DATAOUT;
892 		break;
893 	case CAM_DIR_NONE:
894 		break;
895 	default:
896 		cm->cm_flags |= AAC_CMD_DATAIN | AAC_CMD_DATAOUT;
897 		break;
898 	}
899 
900 	srb = (struct aac_srb *)&fib->data[0];
901 	srb->function = AAC_SRB_FUNC_EXECUTE_SCSI;
902 	if (cm->cm_flags & (AAC_CMD_DATAIN|AAC_CMD_DATAOUT))
903 		srb->flags = AAC_SRB_FLAGS_UNSPECIFIED_DIRECTION;
904 	if (cm->cm_flags & AAC_CMD_DATAIN)
905 		srb->flags = AAC_SRB_FLAGS_DATA_IN;
906 	else if (cm->cm_flags & AAC_CMD_DATAOUT)
907 		srb->flags = AAC_SRB_FLAGS_DATA_OUT;
908 	else
909 		srb->flags = AAC_SRB_FLAGS_NO_DATA_XFER;
910 
911 	/*
912 	 * Copy the CDB into the SRB.  It's only 6-16 bytes,
913 	 * so a copy is not too expensive.
914 	 */
915 	srb->cdb_len = ccb->csio.cdb_len;
916 	if (ccb->ccb_h.flags & CAM_CDB_POINTER)
917 		bcopy(ccb->csio.cdb_io.cdb_ptr, (u_int8_t *)&srb->cdb[0],
918 			srb->cdb_len);
919 	else
920 		bcopy(ccb->csio.cdb_io.cdb_bytes, (u_int8_t *)&srb->cdb[0],
921 			srb->cdb_len);
922 
923 	/* Set command */
924 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
925 		ScsiPortCommandU64 : ScsiPortCommand;
926 	fib->Header.Size = sizeof(struct aac_fib_header) +
927 			sizeof(struct aac_srb);
928 
929 	/* Map the s/g list */
930 	cm->cm_sgtable = &srb->sg_map;
931 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
932 		/*
933 		 * Arrange things so that the S/G
934 		 * map will get set up automagically
935 		 */
936 		cm->cm_data = (void *)ccb->csio.data_ptr;
937 		cm->cm_datalen = ccb->csio.dxfer_len;
938 		srb->data_len = ccb->csio.dxfer_len;
939 	} else {
940 		cm->cm_data = NULL;
941 		cm->cm_datalen = 0;
942 		srb->data_len = 0;
943 	}
944 
945 	srb->bus = camsc->inf->BusNumber - 1; /* Bus no. rel. to the card */
946 	srb->target = ccb->ccb_h.target_id;
947 	srb->lun = ccb->ccb_h.target_lun;
948 	srb->timeout = ccb->ccb_h.timeout;	/* XXX */
949 	srb->retry_limit = 0;
950 
951 	cm->cm_complete = aac_cam_complete;
952 	cm->cm_ccb = ccb;
953 	cm->cm_timestamp = time_uptime;
954 
955 	fib->Header.XferState =
956 			AAC_FIBSTATE_HOSTOWNED	|
957 			AAC_FIBSTATE_INITIALISED	|
958 			AAC_FIBSTATE_FROMHOST	|
959 			AAC_FIBSTATE_REXPECTED	|
960 			AAC_FIBSTATE_NORM	|
961 			AAC_FIBSTATE_ASYNC	 |
962 			AAC_FIBSTATE_FAST_RESPONSE;
963 
964 	aac_enqueue_ready(cm);
965 	aacraid_startio(cm->cm_sc);
966 }
967 
968 static void
969 aac_cam_action(struct cam_sim *sim, union ccb *ccb)
970 {
971 	struct	aac_cam *camsc;
972 	struct	aac_softc *sc;
973 
974 	camsc = (struct aac_cam *)cam_sim_softc(sim);
975 	sc = camsc->inf->aac_sc;
976 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
977 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
978 
979 	/* Synchronous ops, and ops that don't require communication with the
980 	 * controller */
981 	switch(ccb->ccb_h.func_code) {
982 	case XPT_SCSI_IO:
983 		/* This is handled down below */
984 		break;
985 	case XPT_CALC_GEOMETRY:
986 	{
987 		struct ccb_calc_geometry *ccg;
988 		u_int32_t size_mb;
989 		u_int32_t secs_per_cylinder;
990 
991 		ccg = &ccb->ccg;
992 		size_mb = ccg->volume_size /
993 		    ((1024L * 1024L) / ccg->block_size);
994 		if (size_mb >= (2 * 1024)) {		/* 2GB */
995 			ccg->heads = 255;
996 			ccg->secs_per_track = 63;
997 		} else if (size_mb >= (1 * 1024)) {	/* 1GB */
998 			ccg->heads = 128;
999 			ccg->secs_per_track = 32;
1000 		} else {
1001 			ccg->heads = 64;
1002 			ccg->secs_per_track = 32;
1003 		}
1004 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1005 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1006 
1007 		ccb->ccb_h.status = CAM_REQ_CMP;
1008 		xpt_done(ccb);
1009 		return;
1010 	}
1011 	case XPT_PATH_INQ:
1012 	{
1013 		struct ccb_pathinq *cpi = &ccb->cpi;
1014 
1015 		cpi->version_num = 1;
1016 		cpi->target_sprt = 0;
1017 		cpi->hba_eng_cnt = 0;
1018 		cpi->max_target = camsc->inf->TargetsPerBus;
1019 		cpi->max_lun = 8;	/* Per the controller spec */
1020 		cpi->initiator_id = camsc->inf->InitiatorBusId;
1021 		cpi->bus_id = camsc->inf->BusNumber;
1022 #if __FreeBSD_version >= 800000
1023 		cpi->maxio = sc->aac_max_sectors << 9;
1024 #endif
1025 
1026 		/*
1027 		 * Resetting via the passthrough or parallel bus scan
1028 		 * causes problems.
1029 		 */
1030 		cpi->hba_misc = PIM_NOBUSRESET;
1031 		cpi->hba_inquiry = PI_TAG_ABLE;
1032 		cpi->base_transfer_speed = 300000;
1033 #ifdef CAM_NEW_TRAN_CODE
1034 		cpi->hba_misc |= PIM_SEQSCAN;
1035 		cpi->protocol = PROTO_SCSI;
1036 		cpi->transport = XPORT_SAS;
1037 		cpi->transport_version = 0;
1038 		cpi->protocol_version = SCSI_REV_SPC2;
1039 #endif
1040 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1041 		strlcpy(cpi->hba_vid, "PMC-Sierra", HBA_IDLEN);
1042 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1043 		cpi->unit_number = cam_sim_unit(sim);
1044 		ccb->ccb_h.status = CAM_REQ_CMP;
1045 		xpt_done(ccb);
1046 		return;
1047 	}
1048 	case XPT_GET_TRAN_SETTINGS:
1049 	{
1050 #ifdef CAM_NEW_TRAN_CODE
1051 		struct ccb_trans_settings_scsi *scsi =
1052 			&ccb->cts.proto_specific.scsi;
1053 		struct ccb_trans_settings_spi *spi =
1054 			&ccb->cts.xport_specific.spi;
1055 		ccb->cts.protocol = PROTO_SCSI;
1056 		ccb->cts.protocol_version = SCSI_REV_SPC2;
1057 		ccb->cts.transport = XPORT_SAS;
1058 		ccb->cts.transport_version = 0;
1059 		scsi->valid = CTS_SCSI_VALID_TQ;
1060 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1061 		spi->valid |= CTS_SPI_VALID_DISC;
1062 		spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
1063 #else
1064 		ccb->cts.flags = ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
1065 		ccb->cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1066 #endif
1067 		ccb->ccb_h.status = CAM_REQ_CMP;
1068 		xpt_done(ccb);
1069 		return;
1070 	}
1071 	case XPT_SET_TRAN_SETTINGS:
1072 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1073 		xpt_done(ccb);
1074 		return;
1075 	case XPT_RESET_BUS:
1076 		if (!(sc->flags & AAC_FLAGS_CAM_NORESET) &&
1077 			camsc->inf->BusType != CONTAINER_BUS) {
1078 			ccb->ccb_h.status = aac_cam_reset_bus(sim, ccb);
1079 		} else {
1080 			ccb->ccb_h.status = CAM_REQ_CMP;
1081 		}
1082 		xpt_done(ccb);
1083 		return;
1084 	case XPT_RESET_DEV:
1085 		ccb->ccb_h.status = CAM_REQ_CMP;
1086 		xpt_done(ccb);
1087 		return;
1088 	case XPT_ABORT:
1089 		ccb->ccb_h.status = aac_cam_abort_ccb(sim, ccb);
1090 		xpt_done(ccb);
1091 		return;
1092 	case XPT_TERM_IO:
1093 		ccb->ccb_h.status = aac_cam_term_io(sim, ccb);
1094 		xpt_done(ccb);
1095 		return;
1096 	default:
1097 		device_printf(sc->aac_dev, "Unsupported command 0x%x\n",
1098 		    ccb->ccb_h.func_code);
1099 		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1100 		xpt_done(ccb);
1101 		return;
1102 	}
1103 
1104 	/* Async ops that require communcation with the controller */
1105 	if (camsc->inf->BusType == CONTAINER_BUS) {
1106 		u_int8_t *cmdp;
1107 
1108 		if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1109 			cmdp = ccb->csio.cdb_io.cdb_ptr;
1110 		else
1111 			cmdp = &ccb->csio.cdb_io.cdb_bytes[0];
1112 
1113 		if (*cmdp==READ_6 || *cmdp==WRITE_6 || *cmdp==READ_10 ||
1114 			*cmdp==WRITE_10 || *cmdp==READ_12 || *cmdp==WRITE_12 ||
1115 			*cmdp==READ_16 || *cmdp==WRITE_16)
1116 			aac_container_rw_command(sim, ccb, cmdp);
1117 		else
1118 			aac_container_special_command(sim, ccb, cmdp);
1119 	} else {
1120 		aac_passthrough_command(sim, ccb);
1121 	}
1122 }
1123 
1124 static void
1125 aac_cam_poll(struct cam_sim *sim)
1126 {
1127 	/*
1128 	 * Pinging the interrupt routine isn't very safe, nor is it
1129 	 * really necessary.  Do nothing.
1130 	 */
1131 }
1132 
1133 static void
1134 aac_container_complete(struct aac_command *cm)
1135 {
1136 	union	ccb *ccb;
1137 	u_int32_t status;
1138 
1139 	fwprintf(cm->cm_sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1140 	ccb = cm->cm_ccb;
1141 	status = ((u_int32_t *)cm->cm_fib->data)[0];
1142 
1143 	if (cm->cm_flags & AAC_CMD_RESET) {
1144 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1145 	} else if (status == ST_OK) {
1146 		ccb->ccb_h.status = CAM_REQ_CMP;
1147 	} else if (status == ST_NOT_READY) {
1148 		ccb->ccb_h.status = CAM_BUSY;
1149 	} else {
1150 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1151 	}
1152 
1153 	aacraid_release_command(cm);
1154 	xpt_done(ccb);
1155 }
1156 
1157 static void
1158 aac_cam_complete(struct aac_command *cm)
1159 {
1160 	union	ccb *ccb;
1161 	struct 	aac_srb_response *srbr;
1162 	struct	aac_softc *sc;
1163 
1164 	sc = cm->cm_sc;
1165 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1166 	ccb = cm->cm_ccb;
1167 	srbr = (struct aac_srb_response *)&cm->cm_fib->data[0];
1168 
1169 	if (cm->cm_flags & AAC_CMD_FASTRESP) {
1170 		/* fast response */
1171 		srbr->srb_status = CAM_REQ_CMP;
1172 		srbr->scsi_status = SCSI_STATUS_OK;
1173 		srbr->sense_len = 0;
1174 	}
1175 
1176 	if (cm->cm_flags & AAC_CMD_RESET) {
1177 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1178 	} else if (srbr->fib_status != 0) {
1179 		device_printf(sc->aac_dev, "Passthru FIB failed!\n");
1180 		ccb->ccb_h.status = CAM_REQ_ABORTED;
1181 	} else {
1182 		/*
1183 		 * The SRB error codes just happen to match the CAM error
1184 		 * codes.  How convenient!
1185 		 */
1186 		ccb->ccb_h.status = srbr->srb_status;
1187 
1188 		/* Take care of SCSI_IO ops. */
1189 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1190 			u_int8_t command, device;
1191 
1192 			ccb->csio.scsi_status = srbr->scsi_status;
1193 
1194 			/* Take care of autosense */
1195 			if (srbr->sense_len) {
1196 				int sense_len, scsi_sense_len;
1197 
1198 				scsi_sense_len = sizeof(struct scsi_sense_data);
1199 				bzero(&ccb->csio.sense_data, scsi_sense_len);
1200 				sense_len = (srbr->sense_len >
1201 				    scsi_sense_len) ? scsi_sense_len :
1202 				    srbr->sense_len;
1203 				bcopy(&srbr->sense[0], &ccb->csio.sense_data,
1204 				    srbr->sense_len);
1205 				ccb->csio.sense_len = sense_len;
1206 				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1207 				// scsi_sense_print(&ccb->csio);
1208 			}
1209 
1210 			/* If this is an inquiry command, fake things out */
1211 			if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1212 				command = ccb->csio.cdb_io.cdb_ptr[0];
1213 			else
1214 				command = ccb->csio.cdb_io.cdb_bytes[0];
1215 
1216 			if (command == INQUIRY) {
1217 				if (ccb->ccb_h.status == CAM_REQ_CMP) {
1218 				  device = ccb->csio.data_ptr[0] & 0x1f;
1219 				  /*
1220 				   * We want DASD and PROC devices to only be
1221 				   * visible through the pass device.
1222 				   */
1223 				  if ((device == T_DIRECT &&
1224 				    !(sc->aac_feature_bits & AAC_SUPPL_SUPPORTED_JBOD)) ||
1225 				    (device == T_PROCESSOR))
1226 				    ccb->csio.data_ptr[0] =
1227 				  	((device & 0xe0) | T_NODEVICE);
1228 
1229 				  /* handle phys. components of a log. drive */
1230 				  if (ccb->csio.data_ptr[0] & 0x20) {
1231 					if (sc->hint_flags & 8) {
1232 					  /* expose phys. device (daXX) */
1233 					  ccb->csio.data_ptr[0] &= 0xdf;
1234 					} else {
1235 					  /* phys. device only visible through pass device (passXX) */
1236 					  ccb->csio.data_ptr[0] |= 0x10;
1237 					}
1238 				  }
1239 				} else if (ccb->ccb_h.status == CAM_SEL_TIMEOUT &&
1240 				  ccb->ccb_h.target_lun != 0) {
1241 				  /* fix for INQUIRYs on Lun>0 */
1242 				  ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1243 				}
1244 			}
1245 		}
1246 	}
1247 
1248 	aacraid_release_command(cm);
1249 	xpt_done(ccb);
1250 }
1251 
1252 static u_int32_t
1253 aac_cam_reset_bus(struct cam_sim *sim, union ccb *ccb)
1254 {
1255 	struct aac_command *cm;
1256 	struct aac_fib *fib;
1257 	struct aac_softc *sc;
1258 	struct aac_cam *camsc;
1259 	struct aac_vmioctl *vmi;
1260 	struct aac_resetbus *rbc;
1261 	u_int32_t rval;
1262 
1263 	camsc = (struct aac_cam *)cam_sim_softc(sim);
1264 	sc = camsc->inf->aac_sc;
1265 
1266 	if (sc == NULL) {
1267 		printf("aac: Null sc?\n");
1268 		return (CAM_REQ_ABORTED);
1269 	}
1270 
1271 	if (aacraid_alloc_command(sc, &cm)) {
1272 		struct aac_event *event;
1273 
1274 		xpt_freeze_simq(sim, 1);
1275 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1276 		ccb->ccb_h.sim_priv.entries[0].ptr = camsc;
1277 		event = malloc(sizeof(struct aac_event), M_AACRAIDCAM,
1278 			M_NOWAIT | M_ZERO);
1279 		if (event == NULL) {
1280 			device_printf(sc->aac_dev,
1281 				"Warning, out of memory for event\n");
1282 			return (CAM_REQ_ABORTED);
1283 		}
1284 		event->ev_callback = aac_cam_event;
1285 		event->ev_arg = ccb;
1286 		event->ev_type = AAC_EVENT_CMFREE;
1287 		aacraid_add_event(sc, event);
1288 		return (CAM_REQ_ABORTED);
1289 	}
1290 
1291 	fib = cm->cm_fib;
1292 	cm->cm_timestamp = time_uptime;
1293 	cm->cm_datalen = 0;
1294 
1295 	fib->Header.Size =
1296 		sizeof(struct aac_fib_header) + sizeof(struct aac_vmioctl);
1297 	fib->Header.XferState =
1298 		AAC_FIBSTATE_HOSTOWNED   |
1299 		AAC_FIBSTATE_INITIALISED |
1300 		AAC_FIBSTATE_EMPTY	 |
1301 		AAC_FIBSTATE_FROMHOST	 |
1302 		AAC_FIBSTATE_REXPECTED   |
1303 		AAC_FIBSTATE_NORM	 |
1304 		AAC_FIBSTATE_ASYNC	 |
1305 		AAC_FIBSTATE_FAST_RESPONSE;
1306 	fib->Header.Command = ContainerCommand;
1307 
1308 	vmi = (struct aac_vmioctl *)&fib->data[0];
1309 	bzero(vmi, sizeof(struct aac_vmioctl));
1310 
1311 	vmi->Command = VM_Ioctl;
1312 	vmi->ObjType = FT_DRIVE;
1313 	vmi->MethId = sc->scsi_method_id;
1314 	vmi->ObjId = 0;
1315 	vmi->IoctlCmd = ResetBus;
1316 
1317 	rbc = (struct aac_resetbus *)&vmi->IoctlBuf[0];
1318 	rbc->BusNumber = camsc->inf->BusNumber - 1;
1319 
1320 	if (aacraid_wait_command(cm) != 0) {
1321 		device_printf(sc->aac_dev,"Error sending ResetBus command\n");
1322 		rval = CAM_REQ_ABORTED;
1323 	} else {
1324 		rval = CAM_REQ_CMP;
1325 	}
1326 	aacraid_release_command(cm);
1327 	return (rval);
1328 }
1329 
1330 static u_int32_t
1331 aac_cam_abort_ccb(struct cam_sim *sim, union ccb *ccb)
1332 {
1333 	return (CAM_UA_ABORT);
1334 }
1335 
1336 static u_int32_t
1337 aac_cam_term_io(struct cam_sim *sim, union ccb *ccb)
1338 {
1339 	return (CAM_UA_TERMIO);
1340 }
1341 
1342 static int
1343 aac_load_map_command_sg(struct aac_softc *sc, struct aac_command *cm)
1344 {
1345 	int error;
1346 
1347 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1348 	error = bus_dmamap_load(sc->aac_buffer_dmat,
1349 				cm->cm_datamap, cm->cm_data, cm->cm_datalen,
1350 				aacraid_map_command_sg, cm, 0);
1351 	if (error == EINPROGRESS) {
1352 		fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "freezing queue\n");
1353 		sc->flags |= AAC_QUEUE_FRZN;
1354 		error = 0;
1355 	} else if (error != 0) {
1356 		panic("aac_load_map_command_sg: unexpected error %d from "
1357 	     		"busdma", error);
1358 	}
1359 	return(error);
1360 }
1361 
1362 /*
1363  * Start as much queued I/O as possible on the controller
1364  */
1365 void
1366 aacraid_startio(struct aac_softc *sc)
1367 {
1368 	struct aac_command *cm;
1369 
1370 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1371 
1372 	for (;;) {
1373 		if (sc->aac_state & AAC_STATE_RESET) {
1374 			fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "AAC_STATE_RESET");
1375 			break;
1376 		}
1377 		/*
1378 		 * This flag might be set if the card is out of resources.
1379 		 * Checking it here prevents an infinite loop of deferrals.
1380 		 */
1381 		if (sc->flags & AAC_QUEUE_FRZN) {
1382 			fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "AAC_QUEUE_FRZN");
1383 			break;
1384 		}
1385 
1386 		/*
1387 		 * Try to get a command that's been put off for lack of
1388 		 * resources
1389 		 */
1390 		if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1391 			/* sync. transfer mode */
1392 			if (sc->aac_sync_cm)
1393 				break;
1394 			cm = aac_dequeue_ready(sc);
1395 			sc->aac_sync_cm = cm;
1396 		} else {
1397 			cm = aac_dequeue_ready(sc);
1398 		}
1399 
1400 		/* nothing to do? */
1401 		if (cm == NULL)
1402 			break;
1403 
1404 		/* don't map more than once */
1405 		if (cm->cm_flags & AAC_CMD_MAPPED)
1406 			panic("aac: command %p already mapped", cm);
1407 
1408 		/*
1409 		 * Set up the command to go to the controller.  If there are no
1410 		 * data buffers associated with the command then it can bypass
1411 		 * busdma.
1412 		 */
1413 		if (cm->cm_datalen)
1414 			aac_load_map_command_sg(sc, cm);
1415 		else
1416 			aacraid_map_command_sg(cm, NULL, 0, 0);
1417 	}
1418 }
1419