xref: /freebsd/sys/dev/mpi3mr/mpi3mr_cam.c (revision 315ee00f)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/cdefs.h>
45 #include <sys/types.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/selinfo.h>
50 #include <sys/module.h>
51 #include <sys/bus.h>
52 #include <sys/conf.h>
53 #include <sys/bio.h>
54 #include <sys/malloc.h>
55 #include <sys/uio.h>
56 #include <sys/sysctl.h>
57 #include <sys/endian.h>
58 #include <sys/queue.h>
59 #include <sys/kthread.h>
60 #include <sys/taskqueue.h>
61 #include <sys/sbuf.h>
62 
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <sys/rman.h>
66 
67 #include <machine/stdarg.h>
68 
69 #include <cam/cam.h>
70 #include <cam/cam_ccb.h>
71 #include <cam/cam_debug.h>
72 #include <cam/cam_sim.h>
73 #include <cam/cam_xpt_sim.h>
74 #include <cam/cam_xpt_periph.h>
75 #include <cam/cam_periph.h>
76 #include <cam/scsi/scsi_all.h>
77 #include <cam/scsi/scsi_message.h>
78 #include <cam/scsi/smp_all.h>
79 
80 #include <dev/nvme/nvme.h>
81 #include "mpi/mpi30_api.h"
82 #include "mpi3mr_cam.h"
83 #include "mpi3mr.h"
84 #include <sys/time.h>			/* XXX for pcpu.h */
85 #include <sys/pcpu.h>			/* XXX for PCPU_GET */
86 
87 #define	smp_processor_id()  PCPU_GET(cpuid)
88 
89 static int
90 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm);
91 void
92 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc);
93 static void
94 mpi3mr_freeup_events(struct mpi3mr_softc *sc);
95 
96 extern int
97 mpi3mr_register_events(struct mpi3mr_softc *sc);
98 extern void mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
99     bus_addr_t dma_addr);
100 extern void mpi3mr_build_zero_len_sge(void *paddr);
101 
102 static U32 event_count;
103 
104 static void mpi3mr_prepare_sgls(void *arg,
105 	bus_dma_segment_t *segs, int nsegs, int error)
106 {
107 	struct mpi3mr_softc *sc;
108 	struct mpi3mr_cmd *cm;
109 	u_int i;
110 	bus_addr_t chain_dma;
111 	void *chain;
112 	U8 *sg_local;
113 	U32 chain_length;
114 	int sges_left;
115 	U32 sges_in_segment;
116 	U8 simple_sgl_flags;
117 	U8 simple_sgl_flags_last;
118 	U8 last_chain_sgl_flags;
119 	struct mpi3mr_chain *chain_req;
120 	Mpi3SCSIIORequest_t *scsiio_req;
121 
122 	cm = (struct mpi3mr_cmd *)arg;
123 	sc = cm->sc;
124 	scsiio_req = (Mpi3SCSIIORequest_t *) &cm->io_request;
125 
126 	if (error) {
127 		cm->error_code = error;
128 		device_printf(sc->mpi3mr_dev, "%s: error=%d\n",__func__, error);
129 		if (error == EFBIG) {
130 			cm->ccb->ccb_h.status = CAM_REQ_TOO_BIG;
131 			return;
132 		}
133 	}
134 
135 	if (cm->data_dir == MPI3MR_READ)
136 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
137 		    BUS_DMASYNC_PREREAD);
138 	if (cm->data_dir == MPI3MR_WRITE)
139 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
140 		    BUS_DMASYNC_PREWRITE);
141 	if (nsegs > MPI3MR_SG_DEPTH) {
142 		device_printf(sc->mpi3mr_dev, "SGE count is too large or 0.\n");
143 		return;
144 	}
145 
146 	simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
147 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
148 	simple_sgl_flags_last = simple_sgl_flags |
149 	    MPI3_SGE_FLAGS_END_OF_LIST;
150 	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
151 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
152 
153 	sg_local = (U8 *)&scsiio_req->SGL;
154 
155 	if (!scsiio_req->DataLength) {
156 		mpi3mr_build_zero_len_sge(sg_local);
157 		return;
158 	}
159 
160 	sges_left = nsegs;
161 
162 	if (sges_left < 0) {
163 		printf("scsi_dma_map failed: request for %d bytes!\n",
164 			scsiio_req->DataLength);
165 		return;
166 	}
167 	if (sges_left > MPI3MR_SG_DEPTH) {
168 		printf("scsi_dma_map returned unsupported sge count %d!\n",
169 			sges_left);
170 		return;
171 	}
172 
173 	sges_in_segment = (sc->facts.op_req_sz -
174 	    offsetof(Mpi3SCSIIORequest_t, SGL))/sizeof(Mpi3SGESimple_t);
175 
176 	i = 0;
177 
178 	mpi3mr_dprint(sc, MPI3MR_TRACE, "SGE count: %d IO size: %d\n",
179 		nsegs, scsiio_req->DataLength);
180 
181 	if (sges_left <= sges_in_segment)
182 		goto fill_in_last_segment;
183 
184 	/* fill in main message segment when there is a chain following */
185 	while (sges_in_segment > 1) {
186 		mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
187 		    segs[i].ds_len, segs[i].ds_addr);
188 		sg_local += sizeof(Mpi3SGESimple_t);
189 		sges_left--;
190 		sges_in_segment--;
191 		i++;
192 	}
193 
194 	chain_req = &sc->chain_sgl_list[cm->hosttag];
195 
196 	chain = chain_req->buf;
197 	chain_dma = chain_req->buf_phys;
198 	memset(chain_req->buf, 0, PAGE_SIZE);
199 	sges_in_segment = sges_left;
200 	chain_length = sges_in_segment * sizeof(Mpi3SGESimple_t);
201 
202 	mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
203 	    chain_length, chain_dma);
204 
205 	sg_local = chain;
206 
207 fill_in_last_segment:
208 	while (sges_left > 0) {
209 		if (sges_left == 1)
210 			mpi3mr_add_sg_single(sg_local,
211 			    simple_sgl_flags_last, segs[i].ds_len,
212 			    segs[i].ds_addr);
213 		else
214 			mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
215 			    segs[i].ds_len, segs[i].ds_addr);
216 		sg_local += sizeof(Mpi3SGESimple_t);
217 		sges_left--;
218 		i++;
219 	}
220 
221 	return;
222 }
223 
224 int
225 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm)
226 {
227 	u_int32_t retcode = 0;
228 
229 	if (cm->data != NULL) {
230 		mtx_lock(&sc->io_lock);
231 		/* Map data buffer into bus space */
232 		retcode = bus_dmamap_load_ccb(sc->buffer_dmat, cm->dmamap,
233 		    cm->ccb, mpi3mr_prepare_sgls, cm, 0);
234 		mtx_unlock(&sc->io_lock);
235 		if (retcode)
236 			device_printf(sc->mpi3mr_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
237 		if (retcode == EINPROGRESS) {
238 			device_printf(sc->mpi3mr_dev, "request load in progress\n");
239 			xpt_freeze_simq(sc->cam_sc->sim, 1);
240 		}
241 	}
242 	if (cm->error_code)
243 		return cm->error_code;
244 	if (retcode)
245 		mpi3mr_set_ccbstatus(cm->ccb, CAM_REQ_INVALID);
246 
247 	return (retcode);
248 }
249 
250 void
251 mpi3mr_unmap_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
252 {
253 	if (cmd->data != NULL) {
254 		if (cmd->data_dir == MPI3MR_READ)
255 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTREAD);
256 		if (cmd->data_dir == MPI3MR_WRITE)
257 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTWRITE);
258 		mtx_lock(&sc->io_lock);
259 		bus_dmamap_unload(sc->buffer_dmat, cmd->dmamap);
260 		mtx_unlock(&sc->io_lock);
261 	}
262 }
263 
264 /**
265  * mpi3mr_allow_unmap_to_fw - Whether an unmap is allowed to fw
266  * @sc: Adapter instance reference
267  * @ccb: SCSI Command reference
268  *
269  * The controller hardware cannot handle certain unmap commands
270  * for NVMe drives, this routine checks those and return true
271  * and completes the SCSI command with proper status and sense
272  * data.
273  *
274  * Return: TRUE for allowed unmap, FALSE otherwise.
275  */
276 static bool mpi3mr_allow_unmap_to_fw(struct mpi3mr_softc *sc,
277 	union ccb *ccb)
278 {
279 	struct ccb_scsiio *csio;
280 	uint16_t param_list_len, block_desc_len, trunc_param_len = 0;
281 
282 	csio = &ccb->csio;
283 	param_list_len = (uint16_t) ((scsiio_cdb_ptr(csio)[7] << 8) | scsiio_cdb_ptr(csio)[8]);
284 
285 	switch(pci_get_revid(sc->mpi3mr_dev)) {
286 	case SAS4116_CHIP_REV_A0:
287 		if (!param_list_len) {
288 			mpi3mr_dprint(sc, MPI3MR_ERROR,
289 			    "%s: CDB received with zero parameter length\n",
290 			    __func__);
291 			mpi3mr_print_cdb(ccb);
292 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
293 			xpt_done(ccb);
294 			return false;
295 		}
296 
297 		if (param_list_len < 24) {
298 			mpi3mr_dprint(sc, MPI3MR_ERROR,
299 			    "%s: CDB received with invalid param_list_len: %d\n",
300 			    __func__, param_list_len);
301 			mpi3mr_print_cdb(ccb);
302 			scsi_set_sense_data(&ccb->csio.sense_data,
303 				/*sense_format*/ SSD_TYPE_FIXED,
304 				/*current_error*/ 1,
305 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
306 				/*asc*/ 0x1A,
307 				/*ascq*/ 0x00,
308 				/*extra args*/ SSD_ELEM_NONE);
309 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
310 			ccb->ccb_h.status =
311 			    CAM_SCSI_STATUS_ERROR |
312 			    CAM_AUTOSNS_VALID;
313 			return false;
314 		}
315 
316 		if (param_list_len != csio->dxfer_len) {
317 			mpi3mr_dprint(sc, MPI3MR_ERROR,
318 			    "%s: CDB received with param_list_len: %d bufflen: %d\n",
319 			    __func__, param_list_len, csio->dxfer_len);
320 			mpi3mr_print_cdb(ccb);
321 			scsi_set_sense_data(&ccb->csio.sense_data,
322 				/*sense_format*/ SSD_TYPE_FIXED,
323 				/*current_error*/ 1,
324 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
325 				/*asc*/ 0x1A,
326 				/*ascq*/ 0x00,
327 				/*extra args*/ SSD_ELEM_NONE);
328 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
329 			ccb->ccb_h.status =
330 			    CAM_SCSI_STATUS_ERROR |
331 			    CAM_AUTOSNS_VALID;
332 			xpt_done(ccb);
333 			return false;
334 		}
335 
336 		block_desc_len = (uint16_t) (csio->data_ptr[2] << 8 | csio->data_ptr[3]);
337 
338 		if (block_desc_len < 16) {
339 			mpi3mr_dprint(sc, MPI3MR_ERROR,
340 			    "%s: Invalid descriptor length in param list: %d\n",
341 			    __func__, block_desc_len);
342 			mpi3mr_print_cdb(ccb);
343 			scsi_set_sense_data(&ccb->csio.sense_data,
344 				/*sense_format*/ SSD_TYPE_FIXED,
345 				/*current_error*/ 1,
346 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
347 				/*asc*/ 0x26,
348 				/*ascq*/ 0x00,
349 				/*extra args*/ SSD_ELEM_NONE);
350 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
351 			ccb->ccb_h.status =
352 			    CAM_SCSI_STATUS_ERROR |
353 			    CAM_AUTOSNS_VALID;
354 			xpt_done(ccb);
355 			return false;
356 		}
357 
358 		if (param_list_len > (block_desc_len + 8)) {
359 			mpi3mr_print_cdb(ccb);
360 			mpi3mr_dprint(sc, MPI3MR_INFO,
361 			    "%s: Truncating param_list_len(%d) to block_desc_len+8(%d)\n",
362 			    __func__, param_list_len, (block_desc_len + 8));
363 			param_list_len = block_desc_len + 8;
364 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
365 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
366 			mpi3mr_print_cdb(ccb);
367 		}
368 		break;
369 
370 	case SAS4116_CHIP_REV_B0:
371 		if ((param_list_len > 24) && ((param_list_len - 8) & 0xF)) {
372 			trunc_param_len -= (param_list_len - 8) & 0xF;
373 			mpi3mr_print_cdb(ccb);
374 			mpi3mr_dprint(sc, MPI3MR_INFO,
375 			    "%s: Truncating param_list_len from (%d) to (%d)\n",
376 			    __func__, param_list_len, trunc_param_len);
377 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
378 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
379 			mpi3mr_print_cdb(ccb);
380 		}
381 		break;
382 	}
383 
384 	return true;
385 }
386 
387 /**
388  * mpi3mr_tm_response_name -  get TM response as a string
389  * @resp_code: TM response code
390  *
391  * Convert known task management response code as a readable
392  * string.
393  *
394  * Return: response code string.
395  */
396 static const char* mpi3mr_tm_response_name(U8 resp_code)
397 {
398 	char *desc;
399 
400 	switch (resp_code) {
401 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
402 		desc = "task management request completed";
403 		break;
404 	case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
405 		desc = "invalid frame";
406 		break;
407 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
408 		desc = "task management request not supported";
409 		break;
410 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
411 		desc = "task management request failed";
412 		break;
413 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
414 		desc = "task management request succeeded";
415 		break;
416 	case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
417 		desc = "invalid LUN";
418 		break;
419 	case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
420 		desc = "overlapped tag attempted";
421 		break;
422 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
423 		desc = "task queued, however not sent to target";
424 		break;
425 	case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
426 		desc = "task management request denied by NVMe device";
427 		break;
428 	default:
429 		desc = "unknown";
430 		break;
431 	}
432 
433 	return desc;
434 }
435 
436 void mpi3mr_poll_pend_io_completions(struct mpi3mr_softc *sc)
437 {
438 	int i;
439 	int num_of_reply_queues = sc->num_queues;
440 	struct mpi3mr_irq_context *irq_ctx;
441 
442 	for (i = 0; i < num_of_reply_queues; i++) {
443 		irq_ctx = &sc->irq_ctx[i];
444 		mpi3mr_complete_io_cmd(sc, irq_ctx);
445 	}
446 }
447 
448 void
449 trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U32 reset_reason)
450 {
451 	if (sc->reset_in_progress) {
452 		mpi3mr_dprint(sc, MPI3MR_INFO, "Another reset is in progress, no need to trigger the reset\n");
453 		return;
454 	}
455 	sc->reset.type = reset_type;
456 	sc->reset.reason = reset_reason;
457 
458 	return;
459 }
460 
461 /**
462  * mpi3mr_issue_tm - Issue Task Management request
463  * @sc: Adapter instance reference
464  * @tm_type: Task Management type
465  * @handle: Device handle
466  * @lun: lun ID
467  * @htag: Host tag of the TM request
468  * @timeout: TM timeout value
469  * @drv_cmd: Internal command tracker
470  * @resp_code: Response code place holder
471  * @cmd: Timed out command reference
472  *
473  * Issues a Task Management Request to the controller for a
474  * specified target, lun and command and wait for its completion
475  * and check TM response. Recover the TM if it timed out by
476  * issuing controller reset.
477  *
478  * Return: 0 on success, non-zero on errors
479  */
480 static int
481 mpi3mr_issue_tm(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd,
482 		U8 tm_type, unsigned long timeout)
483 {
484 	int retval = 0;
485 	MPI3_SCSI_TASK_MGMT_REQUEST tm_req;
486 	MPI3_SCSI_TASK_MGMT_REPLY *tm_reply = NULL;
487 	struct mpi3mr_drvr_cmd *drv_cmd = NULL;
488 	struct mpi3mr_target *tgtdev = NULL;
489 	struct mpi3mr_op_req_queue *op_req_q = NULL;
490 	union ccb *ccb;
491 	U8 resp_code;
492 
493 
494 	if (sc->unrecoverable) {
495 		mpi3mr_dprint(sc, MPI3MR_INFO,
496 			"Controller is in unrecoverable state!! TM not required\n");
497 		return retval;
498 	}
499 	if (sc->reset_in_progress) {
500 		mpi3mr_dprint(sc, MPI3MR_INFO,
501 			"controller reset in progress!! TM not required\n");
502 		return retval;
503 	}
504 
505 	if (!cmd->ccb) {
506 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
507 		return retval;
508 	}
509 	ccb = cmd->ccb;
510 
511 	tgtdev = cmd->targ;
512 	if (tgtdev == NULL)  {
513 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device does not exist target ID:0x%x,"
514 			      "TM is not required\n", ccb->ccb_h.target_id);
515 		return retval;
516 	}
517 	if (tgtdev->dev_removed == 1)  {
518 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device(0x%x) is removed, TM is not required\n",
519 			      ccb->ccb_h.target_id);
520 		return retval;
521 	}
522 
523 	drv_cmd = &sc->host_tm_cmds;
524 	mtx_lock(&drv_cmd->lock);
525 
526 	memset(&tm_req, 0, sizeof(tm_req));
527 	tm_req.DevHandle = htole16(tgtdev->dev_handle);
528 	tm_req.TaskType = tm_type;
529 	tm_req.HostTag = htole16(MPI3MR_HOSTTAG_TMS);
530 	int_to_lun(ccb->ccb_h.target_lun, tm_req.LUN);
531 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
532 	drv_cmd->state = MPI3MR_CMD_PENDING;
533 	drv_cmd->is_waiting = 1;
534 	drv_cmd->callback = NULL;
535 
536 	if (ccb) {
537 		if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
538 			op_req_q = &sc->op_req_q[cmd->req_qidx];
539 			tm_req.TaskHostTag = htole16(cmd->hosttag);
540 			tm_req.TaskRequestQueueID = htole16(op_req_q->qid);
541 		}
542 	}
543 
544 	if (tgtdev)
545 		mpi3mr_atomic_inc(&tgtdev->block_io);
546 
547 	if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
548 		if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
549 		     && tgtdev->dev_spec.pcie_inf.abort_to)
550  			timeout = tgtdev->dev_spec.pcie_inf.abort_to;
551 		else if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET)
552 			 && tgtdev->dev_spec.pcie_inf.reset_to)
553 			 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
554 	}
555 
556 	sc->tm_chan = (void *)&drv_cmd;
557 
558 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
559 		      "posting task management request: type(%d), handle(0x%04x)\n",
560 		       tm_type, tgtdev->dev_handle);
561 
562 	init_completion(&drv_cmd->completion);
563 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
564 	if (retval) {
565 		mpi3mr_dprint(sc, MPI3MR_ERROR,
566 			      "posting task management request is failed\n");
567 		retval = -1;
568 		goto out_unlock;
569 	}
570 	wait_for_completion_timeout_tm(&drv_cmd->completion, timeout, sc);
571 
572 	if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
573 		drv_cmd->is_waiting = 0;
574 		retval = -1;
575 		if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
576 			mpi3mr_dprint(sc, MPI3MR_ERROR,
577 				      "task management request timed out after %ld seconds\n", timeout);
578 			if (sc->mpi3mr_debug & MPI3MR_DEBUG_TM) {
579 				mpi3mr_dprint(sc, MPI3MR_INFO, "tm_request dump\n");
580 				mpi3mr_hexdump(&tm_req, sizeof(tm_req), 8);
581 			}
582 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_TM_TIMEOUT);
583 			retval = ETIMEDOUT;
584 		}
585 		goto out_unlock;
586 	}
587 
588 	if (!(drv_cmd->state & MPI3MR_CMD_REPLYVALID)) {
589 		mpi3mr_dprint(sc, MPI3MR_ERROR,
590 			      "invalid task management reply message\n");
591 		retval = -1;
592 		goto out_unlock;
593 	}
594 	tm_reply = (MPI3_SCSI_TASK_MGMT_REPLY *)drv_cmd->reply;
595 
596 	switch (drv_cmd->ioc_status) {
597 	case MPI3_IOCSTATUS_SUCCESS:
598 		resp_code = tm_reply->ResponseData & MPI3MR_RI_MASK_RESPCODE;
599 		break;
600 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
601 		resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
602 		break;
603 	default:
604 		mpi3mr_dprint(sc, MPI3MR_ERROR,
605 			      "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
606 			       tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
607 		retval = -1;
608 		goto out_unlock;
609 	}
610 
611 	switch (resp_code) {
612 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
613 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
614 		break;
615 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
616 		if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
617 			retval = -1;
618 		break;
619 	default:
620 		retval = -1;
621 		break;
622 	}
623 
624 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
625 		      "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x)"
626 		      "termination_count(%u), response:%s(0x%x)\n", tm_type, tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
627 		      tm_reply->TerminationCount, mpi3mr_tm_response_name(resp_code), resp_code);
628 
629 	if (retval)
630 		goto out_unlock;
631 
632 	mpi3mr_disable_interrupts(sc);
633 	mpi3mr_poll_pend_io_completions(sc);
634 	mpi3mr_enable_interrupts(sc);
635 	mpi3mr_poll_pend_io_completions(sc);
636 
637 	switch (tm_type) {
638 	case MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
639 		if (cmd->state == MPI3MR_CMD_STATE_IN_TM) {
640 			mpi3mr_dprint(sc, MPI3MR_ERROR,
641 				      "%s: task abort returned success from firmware but corresponding CCB (%p) was not terminated"
642 				      "marking task abort failed!\n", sc->name, cmd->ccb);
643 			retval = -1;
644 		}
645 		break;
646 	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
647 		if (mpi3mr_atomic_read(&tgtdev->outstanding)) {
648 			mpi3mr_dprint(sc, MPI3MR_ERROR,
649 				      "%s: target reset returned success from firmware but IOs are still pending on the target (%p)"
650 				      "marking target reset failed!\n",
651 				      sc->name, tgtdev);
652 			retval = -1;
653 		}
654 		break;
655 	default:
656 		break;
657 	}
658 
659 out_unlock:
660 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
661 	mtx_unlock(&drv_cmd->lock);
662 	if (tgtdev && mpi3mr_atomic_read(&tgtdev->block_io) > 0)
663 		mpi3mr_atomic_dec(&tgtdev->block_io);
664 
665 	return retval;
666 }
667 
668 /**
669  * mpi3mr_task_abort- Abort error handling callback
670  * @cmd: Timed out command reference
671  *
672  * Issue Abort Task Management if the command is in LLD scope
673  * and verify if it is aborted successfully and return status
674  * accordingly.
675  *
676  * Return: SUCCESS of successful abort the SCSI command else FAILED
677  */
678 static int mpi3mr_task_abort(struct mpi3mr_cmd *cmd)
679 {
680 	int retval = 0;
681 	struct mpi3mr_softc *sc;
682 	union ccb *ccb;
683 
684 	sc = cmd->sc;
685 
686 	if (!cmd->ccb) {
687 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
688 		return retval;
689 	}
690 	ccb = cmd->ccb;
691 
692 	mpi3mr_dprint(sc, MPI3MR_INFO,
693 		      "attempting abort task for ccb(%p)\n", ccb);
694 
695 	mpi3mr_print_cdb(ccb);
696 
697 	if (cmd->state != MPI3MR_CMD_STATE_BUSY) {
698 		mpi3mr_dprint(sc, MPI3MR_INFO,
699 			      "%s: ccb is not in driver scope, abort task is not required\n",
700 			      sc->name);
701 		return retval;
702 	}
703 	cmd->state = MPI3MR_CMD_STATE_IN_TM;
704 
705 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, MPI3MR_ABORTTM_TIMEOUT);
706 
707 	mpi3mr_dprint(sc, MPI3MR_INFO,
708 		      "abort task is %s for ccb(%p)\n", ((retval == 0) ? "SUCCESS" : "FAILED"), ccb);
709 
710 	return retval;
711 }
712 
713 /**
714  * mpi3mr_target_reset - Target reset error handling callback
715  * @cmd: Timed out command reference
716  *
717  * Issue Target reset Task Management and verify the SCSI commands are
718  * terminated successfully and return status accordingly.
719  *
720  * Return: SUCCESS of successful termination of the SCSI commands else
721  *         FAILED
722  */
723 static int mpi3mr_target_reset(struct mpi3mr_cmd *cmd)
724 {
725 	int retval = 0;
726 	struct mpi3mr_softc *sc;
727 	struct mpi3mr_target *target;
728 
729 	sc = cmd->sc;
730 
731 	target = cmd->targ;
732 	if (target == NULL)  {
733 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device does not exist for target:0x%p,"
734 			      "target reset is not required\n", target);
735 		return retval;
736 	}
737 
738 	mpi3mr_dprint(sc, MPI3MR_INFO,
739 		      "attempting target reset on target(%d)\n", target->per_id);
740 
741 
742 	if (mpi3mr_atomic_read(&target->outstanding)) {
743 		mpi3mr_dprint(sc, MPI3MR_INFO,
744 			      "no outstanding IOs on the target(%d),"
745 			      " target reset not required.\n", target->per_id);
746 		return retval;
747 	}
748 
749 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, MPI3MR_RESETTM_TIMEOUT);
750 
751 	mpi3mr_dprint(sc, MPI3MR_INFO,
752 		      "target reset is %s for target(%d)\n", ((retval == 0) ? "SUCCESS" : "FAILED"),
753 		      target->per_id);
754 
755 	return retval;
756 }
757 
758 /**
759  * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
760  * @sc: Adapter instance reference
761  *
762  * Calculate the pending I/Os for the controller and return.
763  *
764  * Return: Number of pending I/Os
765  */
766 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_softc *sc)
767 {
768 	U16 i, pend_ios = 0;
769 
770 	for (i = 0; i < sc->num_queues; i++)
771 		pend_ios += mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
772 	return pend_ios;
773 }
774 
775 /**
776  * mpi3mr_wait_for_host_io - block for I/Os to complete
777  * @sc: Adapter instance reference
778  * @timeout: time out in seconds
779  *
780  * Waits for pending I/Os for the given adapter to complete or
781  * to hit the timeout.
782  *
783  * Return: Nothing
784  */
785 static int mpi3mr_wait_for_host_io(struct mpi3mr_softc *sc, U32 timeout)
786 {
787 	enum mpi3mr_iocstate iocstate;
788 
789 	iocstate = mpi3mr_get_iocstate(sc);
790 	if (iocstate != MRIOC_STATE_READY) {
791 		mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller is in NON-READY state! Proceed with Reset\n", __func__);
792 		return -1;
793 	}
794 
795 	if (!mpi3mr_get_fw_pending_ios(sc))
796 		return 0;
797 
798 	mpi3mr_dprint(sc, MPI3MR_INFO,
799 		      "%s :Waiting for %d seconds prior to reset for %d pending I/Os to complete\n",
800 		      __func__, timeout, mpi3mr_get_fw_pending_ios(sc));
801 
802 	int i;
803 	for (i = 0; i < timeout; i++) {
804 		if (!mpi3mr_get_fw_pending_ios(sc)) {
805 			mpi3mr_dprint(sc, MPI3MR_INFO, "%s :All pending I/Os got completed while waiting! Reset not required\n", __func__);
806 			return 0;
807 
808 		}
809 		iocstate = mpi3mr_get_iocstate(sc);
810 		if (iocstate != MRIOC_STATE_READY) {
811 			mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller state becomes NON-READY while waiting! dont wait further"
812 				      "Proceed with Reset\n", __func__);
813 			return -1;
814 		}
815 		DELAY(1000 * 1000);
816 	}
817 
818 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Pending I/Os after wait exaust is %d! Proceed with Reset\n", __func__,
819 		      mpi3mr_get_fw_pending_ios(sc));
820 
821 	return -1;
822 }
823 
824 static void
825 mpi3mr_scsiio_timeout(void *data)
826 {
827 	int retval = 0;
828 	struct mpi3mr_softc *sc;
829 	struct mpi3mr_cmd *cmd;
830 	struct mpi3mr_target *targ_dev = NULL;
831 
832 	if (!data)
833 		return;
834 
835 	cmd = (struct mpi3mr_cmd *)data;
836 	sc = cmd->sc;
837 
838 	if (cmd->ccb == NULL) {
839 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
840 		return;
841 	}
842 
843 	/*
844 	 * TMs are not supported for IO timeouts on VD/LD, so directly issue controller reset
845 	 * with max timeout for outstanding IOs to complete is 180sec.
846 	 */
847 	targ_dev = cmd->targ;
848 	if (targ_dev && (targ_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)) {
849 		if (mpi3mr_wait_for_host_io(sc, MPI3MR_RAID_ERRREC_RESET_TIMEOUT))
850 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
851 		return;
852  	}
853 
854 	/* Issue task abort to recover the timed out IO */
855 	retval = mpi3mr_task_abort(cmd);
856 	if (!retval || (retval == ETIMEDOUT))
857 		return;
858 
859 	/*
860 	 * task abort has failed to recover the timed out IO,
861 	 * try with the target reset
862 	 */
863 	retval = mpi3mr_target_reset(cmd);
864 	if (!retval || (retval == ETIMEDOUT))
865 		return;
866 
867 	/*
868 	 * task abort and target reset has failed. So issue Controller reset(soft reset)
869 	 * through OCR thread context
870 	 */
871 	trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
872 
873 	return;
874 }
875 
876 void int_to_lun(unsigned int lun, U8 *req_lun)
877 {
878 	int i;
879 
880 	memset(req_lun, 0, sizeof(*req_lun));
881 
882 	for (i = 0; i < sizeof(lun); i += 2) {
883 		req_lun[i] = (lun >> 8) & 0xFF;
884 		req_lun[i+1] = lun & 0xFF;
885 		lun = lun >> 16;
886 	}
887 
888 }
889 
890 static U16 get_req_queue_index(struct mpi3mr_softc *sc)
891 {
892 	U16 i = 0, reply_q_index = 0, reply_q_pend_ios = 0;
893 
894 	reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[0].pend_ios);
895 	for (i = 0; i < sc->num_queues; i++) {
896 		if (reply_q_pend_ios > mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios)) {
897 			reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
898 			reply_q_index = i;
899 		}
900 	}
901 
902 	return reply_q_index;
903 }
904 
905 static void
906 mpi3mr_action_scsiio(struct mpi3mr_cam_softc *cam_sc, union ccb *ccb)
907 {
908 	Mpi3SCSIIORequest_t *req = NULL;
909 	struct ccb_scsiio *csio;
910 	struct mpi3mr_softc *sc;
911 	struct mpi3mr_target *targ;
912 	struct mpi3mr_cmd *cm;
913 	uint8_t scsi_opcode, queue_idx;
914 	uint32_t mpi_control;
915 	struct mpi3mr_op_req_queue *opreqq = NULL;
916 	U32 data_len_blks = 0;
917 	U32 tracked_io_sz = 0;
918 	U32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
919 	struct mpi3mr_throttle_group_info *tg = NULL;
920 	static int ratelimit;
921 
922 	sc = cam_sc->sc;
923 	mtx_assert(&sc->mpi3mr_mtx, MA_OWNED);
924 
925 	if (sc->unrecoverable) {
926 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
927 		xpt_done(ccb);
928 		return;
929 	}
930 
931 	csio = &ccb->csio;
932 	KASSERT(csio->ccb_h.target_id < cam_sc->maxtargets,
933 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
934 	     csio->ccb_h.target_id));
935 
936 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
937 
938 	if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) &&
939 	    !((scsi_opcode == SYNCHRONIZE_CACHE) ||
940 	      (scsi_opcode == START_STOP_UNIT))) {
941 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
942 		xpt_done(ccb);
943 		return;
944 	}
945 
946 	targ = mpi3mr_find_target_by_per_id(cam_sc, csio->ccb_h.target_id);
947 	if (targ == NULL)  {
948 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x does not exist\n",
949 			      csio->ccb_h.target_id);
950 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
951 		xpt_done(ccb);
952 		return;
953 	}
954 
955 	if (targ && targ->is_hidden)  {
956 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is hidden\n",
957 			      csio->ccb_h.target_id);
958 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
959 		xpt_done(ccb);
960 		return;
961 	}
962 
963 	if (targ->dev_removed == 1)  {
964 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is removed\n", csio->ccb_h.target_id);
965 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
966 		xpt_done(ccb);
967 		return;
968 	}
969 
970 	if (targ->dev_handle == 0x0) {
971 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s NULL handle for target 0x%x\n",
972 		    __func__, csio->ccb_h.target_id);
973 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
974 		xpt_done(ccb);
975 		return;
976 	}
977 
978 	if (mpi3mr_atomic_read(&targ->block_io) ||
979 		(sc->reset_in_progress == 1) || (sc->prepare_for_reset == 1)) {
980 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s target is busy target_id: 0x%x\n",
981 		    __func__, csio->ccb_h.target_id);
982 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
983 		xpt_done(ccb);
984 		return;
985 	}
986 
987 	/*
988 	 * Sometimes, it is possible to get a command that is not "In
989 	 * Progress" and was actually aborted by the upper layer.  Check for
990 	 * this here and complete the command without error.
991 	 */
992 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
993 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s Command is not in progress for "
994 		    "target %u\n", __func__, csio->ccb_h.target_id);
995 		xpt_done(ccb);
996 		return;
997 	}
998 	/*
999 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1000 	 * that the volume has timed out.  We want volumes to be enumerated
1001 	 * until they are deleted/removed, not just failed.
1002 	 */
1003 	if (targ->flags & MPI3MRSAS_TARGET_INREMOVAL) {
1004 		if (targ->devinfo == 0)
1005 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1006 		else
1007 			mpi3mr_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1008 		xpt_done(ccb);
1009 		return;
1010 	}
1011 
1012 	if ((scsi_opcode == UNMAP) &&
1013 		(pci_get_device(sc->mpi3mr_dev) == MPI3_MFGPAGE_DEVID_SAS4116) &&
1014 		(targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1015 		(mpi3mr_allow_unmap_to_fw(sc, ccb) == false))
1016 		return;
1017 
1018 	cm = mpi3mr_get_command(sc);
1019 	if (cm == NULL || (sc->mpi3mr_flags & MPI3MR_FLAGS_DIAGRESET)) {
1020 		if (cm != NULL) {
1021 			mpi3mr_release_command(cm);
1022 		}
1023 		if ((cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) == 0) {
1024 			xpt_freeze_simq(cam_sc->sim, 1);
1025 			cam_sc->flags |= MPI3MRSAS_QUEUE_FROZEN;
1026 		}
1027 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1028 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1029 		xpt_done(ccb);
1030 		return;
1031 	}
1032 
1033 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1034 	case CAM_DIR_IN:
1035 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
1036 		cm->data_dir = MPI3MR_READ;
1037 		break;
1038 	case CAM_DIR_OUT:
1039 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
1040 		cm->data_dir = MPI3MR_WRITE;
1041 		break;
1042 	case CAM_DIR_NONE:
1043 	default:
1044 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
1045 		break;
1046 	}
1047 
1048 	if (csio->cdb_len > 16)
1049 		mpi_control |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
1050 
1051 	req = (Mpi3SCSIIORequest_t *)&cm->io_request;
1052 	bzero(req, sizeof(*req));
1053 	req->Function = MPI3_FUNCTION_SCSI_IO;
1054 	req->HostTag = cm->hosttag;
1055 	req->DataLength = htole32(csio->dxfer_len);
1056 	req->DevHandle = htole16(targ->dev_handle);
1057 
1058 	/*
1059 	 * It looks like the hardware doesn't require an explicit tag
1060 	 * number for each transaction.  SAM Task Management not supported
1061 	 * at the moment.
1062 	 */
1063 	switch (csio->tag_action) {
1064 	case MSG_HEAD_OF_Q_TAG:
1065 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ;
1066 		break;
1067 	case MSG_ORDERED_Q_TAG:
1068 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ;
1069 		break;
1070 	case MSG_ACA_TASK:
1071 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ;
1072 		break;
1073 	case CAM_TAG_ACTION_NONE:
1074 	case MSG_SIMPLE_Q_TAG:
1075 	default:
1076 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
1077 		break;
1078 	}
1079 
1080 	req->Flags = htole32(mpi_control);
1081 
1082 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1083 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1084 	else {
1085 		KASSERT(csio->cdb_len <= IOCDBLEN,
1086 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
1087 		    "is not set", csio->cdb_len));
1088 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1089 	}
1090 
1091 	cm->length = csio->dxfer_len;
1092 	cm->targ = targ;
1093 	int_to_lun(csio->ccb_h.target_lun, req->LUN);
1094 	cm->ccb = ccb;
1095 	csio->ccb_h.qos.sim_data = sbinuptime();
1096 	queue_idx = get_req_queue_index(sc);
1097 	cm->req_qidx = queue_idx;
1098 
1099 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]: func: %s line:%d CDB: 0x%x targetid: %x SMID: 0x%x\n",
1100 		(queue_idx + 1), __func__, __LINE__, scsi_opcode, csio->ccb_h.target_id, cm->hosttag);
1101 
1102 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1103 
1104 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
1105 	case CAM_DATA_PADDR:
1106 	case CAM_DATA_SG_PADDR:
1107 		device_printf(sc->mpi3mr_dev, "%s: physical addresses not supported\n",
1108 		    __func__);
1109 		mpi3mr_release_command(cm);
1110 		ccb->ccb_h.status = CAM_REQ_INVALID;
1111 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1112 		xpt_done(ccb);
1113 		return;
1114 	case CAM_DATA_SG:
1115 		device_printf(sc->mpi3mr_dev, "%s: scatter gather is not supported\n",
1116 		    __func__);
1117 		mpi3mr_release_command(cm);
1118 		ccb->ccb_h.status = CAM_REQ_INVALID;
1119 		xpt_done(ccb);
1120 		return;
1121 	case CAM_DATA_VADDR:
1122 	case CAM_DATA_BIO:
1123 		if (csio->dxfer_len > (MPI3MR_SG_DEPTH * MPI3MR_4K_PGSZ)) {
1124 			mpi3mr_release_command(cm);
1125 			ccb->ccb_h.status = CAM_REQ_TOO_BIG;
1126 			xpt_done(ccb);
1127 			return;
1128 		}
1129 		cm->length = csio->dxfer_len;
1130 		if (cm->length)
1131 			cm->data = csio->data_ptr;
1132 		break;
1133 	default:
1134 		ccb->ccb_h.status = CAM_REQ_INVALID;
1135 		xpt_done(ccb);
1136 		return;
1137 	}
1138 
1139 	/* Prepare SGEs */
1140 	if (mpi3mr_map_request(sc, cm)) {
1141 		mpi3mr_release_command(cm);
1142 		xpt_done(ccb);
1143 		printf("func: %s line: %d Build SGLs failed\n", __func__, __LINE__);
1144 		return;
1145 	}
1146 
1147 	opreqq = &sc->op_req_q[queue_idx];
1148 
1149 	if (sc->iot_enable) {
1150 		data_len_blks = csio->dxfer_len >> 9;
1151 
1152 		if ((data_len_blks >= sc->io_throttle_data_length) &&
1153 		    targ->io_throttle_enabled) {
1154 			tracked_io_sz = data_len_blks;
1155 			tg = targ->throttle_group;
1156 			if (tg) {
1157 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1158 				mpi3mr_atomic_add(&tg->pend_large_data_sz, data_len_blks);
1159 
1160 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1161 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
1162 
1163 				if (ratelimit % 1000) {
1164 					mpi3mr_dprint(sc, MPI3MR_IOT,
1165 						"large vd_io persist_id(%d), handle(0x%04x), data_len(%d),"
1166 						"ioc_pending(%d), tg_pending(%d), ioc_high(%d), tg_high(%d)\n",
1167 						targ->per_id, targ->dev_handle,
1168 						data_len_blks, ioc_pend_data_len,
1169 						tg_pend_data_len, sc->io_throttle_high,
1170 						tg->high);
1171 					ratelimit++;
1172 				}
1173 
1174 				if (!tg->io_divert  && ((ioc_pend_data_len >=
1175 				    sc->io_throttle_high) ||
1176 				    (tg_pend_data_len >= tg->high))) {
1177 					tg->io_divert = 1;
1178 					mpi3mr_dprint(sc, MPI3MR_IOT,
1179 						"VD: Setting divert flag for tg_id(%d), persist_id(%d)\n",
1180 						tg->id, targ->per_id);
1181 					if (sc->mpi3mr_debug | MPI3MR_IOT)
1182 						mpi3mr_print_cdb(ccb);
1183 					mpi3mr_set_io_divert_for_all_vd_in_tg(sc,
1184 					    tg, 1);
1185 				}
1186 			} else {
1187 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1188 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1189 				if (ratelimit % 1000) {
1190 					mpi3mr_dprint(sc, MPI3MR_IOT,
1191 					    "large pd_io persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_high(%d)\n",
1192 					    targ->per_id, targ->dev_handle,
1193 					    data_len_blks, ioc_pend_data_len,
1194 					    sc->io_throttle_high);
1195 					ratelimit++;
1196 				}
1197 
1198 				if (ioc_pend_data_len >= sc->io_throttle_high) {
1199 					targ->io_divert = 1;
1200 					mpi3mr_dprint(sc, MPI3MR_IOT,
1201 						"PD: Setting divert flag for persist_id(%d)\n",
1202 						targ->per_id);
1203 					if (sc->mpi3mr_debug | MPI3MR_IOT)
1204 						mpi3mr_print_cdb(ccb);
1205 				}
1206 			}
1207 		}
1208 
1209 		if (targ->io_divert) {
1210 			req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
1211 			mpi_control |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
1212 		}
1213 	}
1214 	req->Flags = htole32(mpi_control);
1215 
1216 	if (mpi3mr_submit_io(sc, opreqq,
1217 	    	(U8 *)&cm->io_request)) {
1218 		mpi3mr_release_command(cm);
1219 		if (tracked_io_sz) {
1220 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, tracked_io_sz);
1221 			if (tg)
1222 				mpi3mr_atomic_sub(&tg->pend_large_data_sz, tracked_io_sz);
1223 		}
1224 		mpi3mr_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
1225 		xpt_done(ccb);
1226 	} else {
1227 		callout_reset_sbt(&cm->callout, SBT_1S * 90 , 0,
1228 				  mpi3mr_scsiio_timeout, cm, 0);
1229 		mpi3mr_atomic_inc(&sc->fw_outstanding);
1230 		mpi3mr_atomic_inc(&targ->outstanding);
1231 		if (mpi3mr_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
1232 			sc->io_cmds_highwater++;
1233 	}
1234 
1235 	cm->callout_owner = true;
1236 	return;
1237 }
1238 
1239 static void
1240 mpi3mr_cam_poll(struct cam_sim *sim)
1241 {
1242 	struct mpi3mr_cam_softc *cam_sc;
1243 	struct mpi3mr_irq_context *irq_ctx;
1244 	struct mpi3mr_softc *sc;
1245 	int i;
1246 
1247 	cam_sc = cam_sim_softc(sim);
1248 	sc = cam_sc->sc;
1249 
1250 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "func: %s line: %d is called\n",
1251 		__func__, __LINE__);
1252 
1253 	for (i = 0; i < sc->num_queues; i++) {
1254 		irq_ctx = sc->irq_ctx + i;
1255 		if (irq_ctx->op_reply_q->qid) {
1256 			mpi3mr_complete_io_cmd(sc, irq_ctx);
1257 		}
1258 	}
1259 }
1260 
1261 static void
1262 mpi3mr_cam_action(struct cam_sim *sim, union ccb *ccb)
1263 {
1264 	struct mpi3mr_cam_softc *cam_sc;
1265 	struct mpi3mr_target *targ;
1266 
1267 	cam_sc = cam_sim_softc(sim);
1268 
1269 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "ccb func_code 0x%x target id: 0x%x\n",
1270 	    ccb->ccb_h.func_code, ccb->ccb_h.target_id);
1271 
1272 	mtx_assert(&cam_sc->sc->mpi3mr_mtx, MA_OWNED);
1273 
1274 	switch (ccb->ccb_h.func_code) {
1275 	case XPT_PATH_INQ:
1276 	{
1277 		struct ccb_pathinq *cpi = &ccb->cpi;
1278 
1279 		cpi->version_num = 1;
1280 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1281 		cpi->target_sprt = 0;
1282 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1283 		cpi->hba_eng_cnt = 0;
1284 		cpi->max_target = cam_sc->maxtargets - 1;
1285 		cpi->max_lun = 0;
1286 
1287 		/*
1288 		 * initiator_id is set here to an ID outside the set of valid
1289 		 * target IDs (including volumes).
1290 		 */
1291 		cpi->initiator_id = cam_sc->maxtargets;
1292 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1293 		strlcpy(cpi->hba_vid, "Broadcom", HBA_IDLEN);
1294 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1295 		cpi->unit_number = cam_sim_unit(sim);
1296 		cpi->bus_id = cam_sim_bus(sim);
1297 		/*
1298 		 * XXXSLM-I think this needs to change based on config page or
1299 		 * something instead of hardcoded to 150000.
1300 		 */
1301 		cpi->base_transfer_speed = 150000;
1302 		cpi->transport = XPORT_SAS;
1303 		cpi->transport_version = 0;
1304 		cpi->protocol = PROTO_SCSI;
1305 		cpi->protocol_version = SCSI_REV_SPC;
1306 
1307 		targ = mpi3mr_find_target_by_per_id(cam_sc, ccb->ccb_h.target_id);
1308 
1309 		if (targ && (targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1310 		    ((targ->dev_spec.pcie_inf.dev_info &
1311 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
1312 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)) {
1313 			cpi->maxio = targ->dev_spec.pcie_inf.mdts;
1314 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1315 				"PCI device target_id: %u max io size: %u\n",
1316 				ccb->ccb_h.target_id, cpi->maxio);
1317 		} else {
1318 			cpi->maxio = PAGE_SIZE * (MPI3MR_SG_DEPTH - 1);
1319 		}
1320 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1321 		break;
1322 	}
1323 	case XPT_GET_TRAN_SETTINGS:
1324 	{
1325 		struct ccb_trans_settings	*cts;
1326 		struct ccb_trans_settings_sas	*sas;
1327 		struct ccb_trans_settings_scsi	*scsi;
1328 
1329 		cts = &ccb->cts;
1330 		sas = &cts->xport_specific.sas;
1331 		scsi = &cts->proto_specific.scsi;
1332 
1333 		KASSERT(cts->ccb_h.target_id < cam_sc->maxtargets,
1334 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1335 		    cts->ccb_h.target_id));
1336 		targ = mpi3mr_find_target_by_per_id(cam_sc, cts->ccb_h.target_id);
1337 
1338 		if (targ == NULL) {
1339 			mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "Device with target ID: 0x%x does not exist\n",
1340 			cts->ccb_h.target_id);
1341 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1342 			break;
1343 		}
1344 
1345 		if ((targ->dev_handle == 0x0) || (targ->dev_removed == 1))  {
1346 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1347 			break;
1348 		}
1349 
1350 		cts->protocol_version = SCSI_REV_SPC2;
1351 		cts->transport = XPORT_SAS;
1352 		cts->transport_version = 0;
1353 
1354 		sas->valid = CTS_SAS_VALID_SPEED;
1355 
1356 		switch (targ->link_rate) {
1357 		case 0x08:
1358 			sas->bitrate = 150000;
1359 			break;
1360 		case 0x09:
1361 			sas->bitrate = 300000;
1362 			break;
1363 		case 0x0a:
1364 			sas->bitrate = 600000;
1365 			break;
1366 		case 0x0b:
1367 			sas->bitrate = 1200000;
1368 			break;
1369 		default:
1370 			sas->valid = 0;
1371 		}
1372 
1373 		cts->protocol = PROTO_SCSI;
1374 		scsi->valid = CTS_SCSI_VALID_TQ;
1375 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1376 
1377 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1378 		break;
1379 	}
1380 	case XPT_CALC_GEOMETRY:
1381 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1382 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1383 		break;
1384 	case XPT_RESET_DEV:
1385 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action "
1386 		    "XPT_RESET_DEV\n");
1387 		return;
1388 	case XPT_RESET_BUS:
1389 	case XPT_ABORT:
1390 	case XPT_TERM_IO:
1391 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action faking success "
1392 		    "for abort or reset\n");
1393 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1394 		break;
1395 	case XPT_SCSI_IO:
1396 		mpi3mr_action_scsiio(cam_sc, ccb);
1397 		return;
1398 	default:
1399 		mpi3mr_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1400 		break;
1401 	}
1402 	xpt_done(ccb);
1403 }
1404 
1405 void
1406 mpi3mr_startup_increment(struct mpi3mr_cam_softc *cam_sc)
1407 {
1408 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1409 		if (cam_sc->startup_refcount++ == 0) {
1410 			/* just starting, freeze the simq */
1411 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1412 			    "%s freezing simq\n", __func__);
1413 			xpt_hold_boot();
1414 		}
1415 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1416 		    cam_sc->startup_refcount);
1417 	}
1418 }
1419 
1420 void
1421 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc)
1422 {
1423 	if (cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) {
1424 		cam_sc->flags &= ~MPI3MRSAS_QUEUE_FROZEN;
1425 		xpt_release_simq(cam_sc->sim, 1);
1426 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "Unfreezing SIM queue\n");
1427 	}
1428 }
1429 
1430 void
1431 mpi3mr_rescan_target(struct mpi3mr_softc *sc, struct mpi3mr_target *targ)
1432 {
1433 	struct mpi3mr_cam_softc *cam_sc = sc->cam_sc;
1434 	path_id_t pathid;
1435 	target_id_t targetid;
1436 	union ccb *ccb;
1437 
1438 	pathid = cam_sim_path(cam_sc->sim);
1439 	if (targ == NULL)
1440 		targetid = CAM_TARGET_WILDCARD;
1441 	else
1442 		targetid = targ->per_id;
1443 
1444 	/*
1445 	 * Allocate a CCB and schedule a rescan.
1446 	 */
1447 	ccb = xpt_alloc_ccb_nowait();
1448 	if (ccb == NULL) {
1449 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to alloc CCB for rescan\n");
1450 		return;
1451 	}
1452 
1453 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
1454 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1455 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to create path for rescan\n");
1456 		xpt_free_ccb(ccb);
1457 		return;
1458 	}
1459 
1460 	if (targetid == CAM_TARGET_WILDCARD)
1461 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
1462 	else
1463 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
1464 
1465 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s target id 0x%x\n", __func__, targetid);
1466 	xpt_rescan(ccb);
1467 }
1468 
1469 void
1470 mpi3mr_startup_decrement(struct mpi3mr_cam_softc *cam_sc)
1471 {
1472 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1473 		if (--cam_sc->startup_refcount == 0) {
1474 			/* finished all discovery-related actions, release
1475 			 * the simq and rescan for the latest topology.
1476 			 */
1477 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1478 			    "%s releasing simq\n", __func__);
1479 			cam_sc->flags &= ~MPI3MRSAS_IN_STARTUP;
1480 			xpt_release_simq(cam_sc->sim, 1);
1481 			xpt_release_boot();
1482 		}
1483 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1484 		    cam_sc->startup_refcount);
1485 	}
1486 }
1487 
1488 static void
1489 mpi3mr_fw_event_free(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1490 {
1491 	if (!fw_event)
1492 		return;
1493 
1494 	if (fw_event->event_data != NULL) {
1495 		free(fw_event->event_data, M_MPI3MR);
1496 		fw_event->event_data = NULL;
1497 	}
1498 
1499 	free(fw_event, M_MPI3MR);
1500 	fw_event = NULL;
1501 }
1502 
1503 static void
1504 mpi3mr_freeup_events(struct mpi3mr_softc *sc)
1505 {
1506 	struct mpi3mr_fw_event_work *fw_event = NULL;
1507 	mtx_lock(&sc->mpi3mr_mtx);
1508 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
1509 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
1510 		mpi3mr_fw_event_free(sc, fw_event);
1511 	}
1512 	mtx_unlock(&sc->mpi3mr_mtx);
1513 }
1514 
1515 static void
1516 mpi3mr_sastopochg_evt_debug(struct mpi3mr_softc *sc,
1517 	Mpi3EventDataSasTopologyChangeList_t *event_data)
1518 {
1519 	int i;
1520 	U16 handle;
1521 	U8 reason_code, phy_number;
1522 	char *status_str = NULL;
1523 	U8 link_rate, prev_link_rate;
1524 
1525 	switch (event_data->ExpStatus) {
1526 	case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1527 		status_str = "remove";
1528 		break;
1529 	case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1530 		status_str =  "responding";
1531 		break;
1532 	case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1533 		status_str = "remove delay";
1534 		break;
1535 	case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1536 		status_str = "direct attached";
1537 		break;
1538 	default:
1539 		status_str = "unknown status";
1540 		break;
1541 	}
1542 
1543 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :sas topology change: (%s)\n",
1544 	    __func__, status_str);
1545 	mpi3mr_dprint(sc, MPI3MR_INFO,
1546 		"%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) "
1547 	    "start_phy(%02d), num_entries(%d)\n", __func__,
1548 	    (event_data->ExpanderDevHandle),
1549 	    (event_data->EnclosureHandle),
1550 	    event_data->StartPhyNum, event_data->NumEntries);
1551 	for (i = 0; i < event_data->NumEntries; i++) {
1552 		handle = (event_data->PhyEntry[i].AttachedDevHandle);
1553 		if (!handle)
1554 			continue;
1555 		phy_number = event_data->StartPhyNum + i;
1556 		reason_code = event_data->PhyEntry[i].Status &
1557 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1558 		switch (reason_code) {
1559 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1560 			status_str = "target remove";
1561 			break;
1562 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1563 			status_str = "delay target remove";
1564 			break;
1565 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1566 			status_str = "link rate change";
1567 			break;
1568 		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1569 			status_str = "target responding";
1570 			break;
1571 		default:
1572 			status_str = "unknown";
1573 			break;
1574 		}
1575 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1576 		prev_link_rate = event_data->PhyEntry[i].LinkRate & 0xF;
1577 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tphy(%02d), attached_handle(0x%04x): %s:"
1578 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1579 		    phy_number, handle, status_str, link_rate, prev_link_rate);
1580 	}
1581 }
1582 
1583 static void
1584 mpi3mr_process_sastopochg_evt(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fwevt)
1585 {
1586 
1587 	Mpi3EventDataSasTopologyChangeList_t *event_data =
1588 		    (Mpi3EventDataSasTopologyChangeList_t *)fwevt->event_data;
1589 	int i;
1590 	U16 handle;
1591 	U8 reason_code, link_rate;
1592 	struct mpi3mr_target *target = NULL;
1593 
1594 
1595 	mpi3mr_sastopochg_evt_debug(sc, event_data);
1596 
1597 	for (i = 0; i < event_data->NumEntries; i++) {
1598 		handle = le16toh(event_data->PhyEntry[i].AttachedDevHandle);
1599 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1600 
1601 		if (!handle)
1602 			continue;
1603 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1604 
1605 		if (!target)
1606 			continue;
1607 
1608 		target->link_rate = link_rate;
1609 		reason_code = event_data->PhyEntry[i].Status &
1610 			MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1611 
1612 		switch (reason_code) {
1613 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1614 			if (target->exposed_to_os)
1615 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1616 			mpi3mr_remove_device_from_list(sc, target, false);
1617 			break;
1618 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1619 			break;
1620 		default:
1621 			break;
1622 		}
1623 	}
1624 
1625 	/*
1626 	 * refcount was incremented for this event in
1627 	 * mpi3mr_evt_handler. Decrement it here because the event has
1628 	 * been processed.
1629 	 */
1630 	mpi3mr_startup_decrement(sc->cam_sc);
1631 	return;
1632 }
1633 
1634 static inline void
1635 mpi3mr_logdata_evt_bh(struct mpi3mr_softc *sc,
1636 		      struct mpi3mr_fw_event_work *fwevt)
1637 {
1638 	mpi3mr_app_save_logdata(sc, fwevt->event_data,
1639 				fwevt->event_data_size);
1640 }
1641 
1642 static void
1643 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_softc *sc,
1644 	Mpi3EventDataPcieTopologyChangeList_t *event_data)
1645 {
1646 	int i;
1647 	U16 handle;
1648 	U16 reason_code;
1649 	U8 port_number;
1650 	char *status_str = NULL;
1651 	U8 link_rate, prev_link_rate;
1652 
1653 	switch (event_data->SwitchStatus) {
1654 	case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1655 		status_str = "remove";
1656 		break;
1657 	case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1658 		status_str =  "responding";
1659 		break;
1660 	case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1661 		status_str = "remove delay";
1662 		break;
1663 	case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1664 		status_str = "direct attached";
1665 		break;
1666 	default:
1667 		status_str = "unknown status";
1668 		break;
1669 	}
1670 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :pcie topology change: (%s)\n",
1671 		__func__, status_str);
1672 	mpi3mr_dprint(sc, MPI3MR_INFO,
1673 		"%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
1674 		"start_port(%02d), num_entries(%d)\n", __func__,
1675 		le16toh(event_data->SwitchDevHandle),
1676 		le16toh(event_data->EnclosureHandle),
1677 		event_data->StartPortNum, event_data->NumEntries);
1678 	for (i = 0; i < event_data->NumEntries; i++) {
1679 		handle =
1680 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1681 		if (!handle)
1682 			continue;
1683 		port_number = event_data->StartPortNum + i;
1684 		reason_code = event_data->PortEntry[i].PortStatus;
1685 		switch (reason_code) {
1686 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1687 			status_str = "target remove";
1688 			break;
1689 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1690 			status_str = "delay target remove";
1691 			break;
1692 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1693 			status_str = "link rate change";
1694 			break;
1695 		case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1696 			status_str = "target responding";
1697 			break;
1698 		default:
1699 			status_str = "unknown";
1700 			break;
1701 		}
1702 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1703 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1704 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
1705 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1706 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tport(%02d), attached_handle(0x%04x): %s:"
1707 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1708 		    port_number, handle, status_str, link_rate, prev_link_rate);
1709 	}
1710 }
1711 
1712 static void mpi3mr_process_pcietopochg_evt(struct mpi3mr_softc *sc,
1713     struct mpi3mr_fw_event_work *fwevt)
1714 {
1715 	Mpi3EventDataPcieTopologyChangeList_t *event_data =
1716 		    (Mpi3EventDataPcieTopologyChangeList_t *)fwevt->event_data;
1717 	int i;
1718 	U16 handle;
1719 	U8 reason_code, link_rate;
1720 	struct mpi3mr_target *target = NULL;
1721 
1722 
1723 	mpi3mr_pcietopochg_evt_debug(sc, event_data);
1724 
1725 	for (i = 0; i < event_data->NumEntries; i++) {
1726 		handle =
1727 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1728 		if (!handle)
1729 			continue;
1730 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1731 		if (!target)
1732 			continue;
1733 
1734 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1735 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1736 		target->link_rate = link_rate;
1737 
1738 		reason_code = event_data->PortEntry[i].PortStatus;
1739 
1740 		switch (reason_code) {
1741 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1742 			if (target->exposed_to_os)
1743 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1744 			mpi3mr_remove_device_from_list(sc, target, false);
1745 			break;
1746 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1747 			break;
1748 		default:
1749 			break;
1750 		}
1751 	}
1752 
1753 	/*
1754 	 * refcount was incremented for this event in
1755 	 * mpi3mr_evt_handler. Decrement it here because the event has
1756 	 * been processed.
1757 	 */
1758 	mpi3mr_startup_decrement(sc->cam_sc);
1759 	return;
1760 }
1761 
1762 void mpi3mr_add_device(struct mpi3mr_softc *sc, U16 per_id)
1763 {
1764 	struct mpi3mr_target *target;
1765 
1766 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1767 		"Adding device(persistent id: 0x%x)\n", per_id);
1768 
1769 	mpi3mr_startup_increment(sc->cam_sc);
1770 	target = mpi3mr_find_target_by_per_id(sc->cam_sc, per_id);
1771 
1772 	if (!target) {
1773 		mpi3mr_dprint(sc, MPI3MR_INFO, "Not available in driver's"
1774 		    "internal target list, persistent_id: %d\n",
1775 		    per_id);
1776 		goto out;
1777 	}
1778 
1779 	if (target->is_hidden) {
1780 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Target is hidden, persistent_id: %d\n",
1781 			per_id);
1782 		goto out;
1783 	}
1784 
1785 	if (!target->exposed_to_os && !sc->reset_in_progress) {
1786 		mpi3mr_rescan_target(sc, target);
1787 		mpi3mr_dprint(sc, MPI3MR_INFO,
1788 			"Added device persistent_id: %d dev_handle: %d\n", per_id, target->dev_handle);
1789 		target->exposed_to_os = 1;
1790 	}
1791 
1792 out:
1793 	mpi3mr_startup_decrement(sc->cam_sc);
1794 }
1795 
1796 int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
1797 {
1798 	U32 i = 0;
1799 	int retval = 0;
1800 	struct mpi3mr_target *target;
1801 
1802 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1803 		"Removing Device (dev_handle: %d)\n", handle);
1804 
1805 	target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1806 
1807 	if (!target) {
1808 		mpi3mr_dprint(sc, MPI3MR_INFO,
1809 			"Device (persistent_id: %d dev_handle: %d) is already removed from driver's list\n",
1810 			target->per_id, handle);
1811 		mpi3mr_rescan_target(sc, NULL);
1812 		retval = -1;
1813 		goto out;
1814 	}
1815 
1816 	target->flags |= MPI3MRSAS_TARGET_INREMOVAL;
1817 
1818 	while (mpi3mr_atomic_read(&target->outstanding) && (i < 30)) {
1819 		i++;
1820 		if (!(i % 2)) {
1821 			mpi3mr_dprint(sc, MPI3MR_INFO,
1822 			    "[%2d]waiting for "
1823 			    "waiting for outstanding commands to complete on target: %d\n",
1824 			    i, target->per_id);
1825 		}
1826 		DELAY(1000 * 1000);
1827 	}
1828 
1829 	if (target->exposed_to_os && !sc->reset_in_progress) {
1830 		mpi3mr_rescan_target(sc, target);
1831 		mpi3mr_dprint(sc, MPI3MR_INFO,
1832 			"Removed device(persistent_id: %d dev_handle: %d)\n", target->per_id, handle);
1833 		target->exposed_to_os = 0;
1834 	}
1835 
1836 	target->flags &= ~MPI3MRSAS_TARGET_INREMOVAL;
1837 out:
1838 	return retval;
1839 }
1840 
1841 void mpi3mr_remove_device_from_list(struct mpi3mr_softc *sc,
1842 	struct mpi3mr_target *target, bool must_delete)
1843 {
1844 	mtx_lock_spin(&sc->target_lock);
1845 	if ((target->state == MPI3MR_DEV_REMOVE_HS_STARTED) ||
1846 	    (must_delete == true)) {
1847 		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
1848 		target->state = MPI3MR_DEV_DELETED;
1849 	}
1850 	mtx_unlock_spin(&sc->target_lock);
1851 
1852 	if (target->state == MPI3MR_DEV_DELETED) {
1853  		free(target, M_MPI3MR);
1854  		target = NULL;
1855  	}
1856 
1857 	return;
1858 }
1859 
1860 /**
1861  * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1862  * @sc: Adapter instance reference
1863  * @fwevt: Firmware event
1864  *
1865  * Process Device Status Change event and based on device's new
1866  * information, either expose the device to the upper layers, or
1867  * remove the device from upper layers.
1868  *
1869  * Return: Nothing.
1870  */
1871 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_softc *sc,
1872 	struct mpi3mr_fw_event_work *fwevt)
1873 {
1874 	U16 dev_handle = 0;
1875 	U8 uhide = 0, delete = 0, cleanup = 0;
1876 	struct mpi3mr_target *tgtdev = NULL;
1877 	Mpi3EventDataDeviceStatusChange_t *evtdata =
1878 	    (Mpi3EventDataDeviceStatusChange_t *)fwevt->event_data;
1879 
1880 
1881 
1882 	dev_handle = le16toh(evtdata->DevHandle);
1883 	mpi3mr_dprint(sc, MPI3MR_INFO,
1884 	    "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1885 	    __func__, dev_handle, evtdata->ReasonCode);
1886 	switch (evtdata->ReasonCode) {
1887 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1888 		delete = 1;
1889 		break;
1890 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1891 		uhide = 1;
1892 		break;
1893 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1894 		delete = 1;
1895 		cleanup = 1;
1896 		break;
1897 	default:
1898 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Unhandled reason code(0x%x)\n", __func__,
1899 		    evtdata->ReasonCode);
1900 		break;
1901 	}
1902 
1903 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1904 	if (!tgtdev)
1905 		return;
1906 
1907 	if (uhide) {
1908 		if (!tgtdev->exposed_to_os)
1909 			mpi3mr_add_device(sc, tgtdev->per_id);
1910 	}
1911 
1912 	if (delete)
1913 		mpi3mr_remove_device_from_os(sc, dev_handle);
1914 
1915 	if (cleanup)
1916 		mpi3mr_remove_device_from_list(sc, tgtdev, false);
1917 }
1918 
1919 /**
1920  * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1921  * @sc: Adapter instance reference
1922  * @dev_pg0: New device page0
1923  *
1924  * Process Device Info Change event and based on device's new
1925  * information, either expose the device to the upper layers, or
1926  * remove the device from upper layers or update the details of
1927  * the device.
1928  *
1929  * Return: Nothing.
1930  */
1931 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_softc *sc,
1932 	Mpi3DevicePage0_t *dev_pg0)
1933 {
1934 	struct mpi3mr_target *tgtdev = NULL;
1935 	U16 dev_handle = 0, perst_id = 0;
1936 
1937 	perst_id = le16toh(dev_pg0->PersistentID);
1938 	dev_handle = le16toh(dev_pg0->DevHandle);
1939 	mpi3mr_dprint(sc, MPI3MR_INFO,
1940 	    "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1941 	    __func__, dev_handle, perst_id);
1942 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1943 	if (!tgtdev)
1944 		return;
1945 
1946 	mpi3mr_update_device(sc, tgtdev, dev_pg0, false);
1947 	if (!tgtdev->is_hidden && !tgtdev->exposed_to_os)
1948 		mpi3mr_add_device(sc, perst_id);
1949 
1950 	if (tgtdev->is_hidden && tgtdev->exposed_to_os)
1951 		mpi3mr_remove_device_from_os(sc, tgtdev->dev_handle);
1952 }
1953 
1954 static void
1955 mpi3mr_fw_work(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1956 {
1957 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
1958 		goto out;
1959 
1960 	if (!fw_event->process_event)
1961 		goto evt_ack;
1962 
1963 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Working on  Event: [%x]\n",
1964 	    event_count++, __func__, fw_event->event);
1965 
1966 	switch (fw_event->event) {
1967 	case MPI3_EVENT_DEVICE_ADDED:
1968 	{
1969 		Mpi3DevicePage0_t *dev_pg0 =
1970 			(Mpi3DevicePage0_t *) fw_event->event_data;
1971 		mpi3mr_add_device(sc, dev_pg0->PersistentID);
1972 		break;
1973 	}
1974 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
1975 	{
1976 		mpi3mr_devinfochg_evt_bh(sc,
1977 		    (Mpi3DevicePage0_t *) fw_event->event_data);
1978 		break;
1979 	}
1980 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
1981 	{
1982 		mpi3mr_devstatuschg_evt_bh(sc, fw_event);
1983 		break;
1984 	}
1985 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1986 	{
1987 		mpi3mr_process_sastopochg_evt(sc, fw_event);
1988 		break;
1989 	}
1990 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1991 	{
1992 		mpi3mr_process_pcietopochg_evt(sc, fw_event);
1993 		break;
1994 	}
1995 	case MPI3_EVENT_LOG_DATA:
1996 	{
1997 		mpi3mr_logdata_evt_bh(sc, fw_event);
1998 		break;
1999 	}
2000 	default:
2001 		mpi3mr_dprint(sc, MPI3MR_TRACE,"Unhandled event 0x%0X\n",
2002 		    fw_event->event);
2003 		break;
2004 
2005 	}
2006 
2007 evt_ack:
2008 	if (fw_event->send_ack) {
2009 		mpi3mr_dprint(sc, MPI3MR_EVENT,"Process event ACK for event 0x%0X\n",
2010 		    fw_event->event);
2011 		mpi3mr_process_event_ack(sc, fw_event->event,
2012 		    fw_event->event_context);
2013 	}
2014 
2015 out:
2016 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Event Free: [%x]\n", event_count,
2017 	    __func__, fw_event->event);
2018 
2019 	mpi3mr_fw_event_free(sc, fw_event);
2020 }
2021 
2022 void
2023 mpi3mr_firmware_event_work(void *arg, int pending)
2024 {
2025 	struct mpi3mr_fw_event_work *fw_event;
2026 	struct mpi3mr_softc *sc;
2027 
2028 	sc = (struct mpi3mr_softc *)arg;
2029 
2030 	mtx_lock(&sc->fwevt_lock);
2031 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
2032 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
2033 		mtx_unlock(&sc->fwevt_lock);
2034 		mpi3mr_fw_work(sc, fw_event);
2035 		mtx_lock(&sc->fwevt_lock);
2036 	}
2037 	mtx_unlock(&sc->fwevt_lock);
2038 }
2039 
2040 
2041 /*
2042  * mpi3mr_cam_attach - CAM layer registration
2043  * @sc: Adapter reference
2044  *
2045  * This function does simq allocation, cam registration, xpt_bus registration,
2046  * event taskqueue initialization and async event handler registration.
2047  *
2048  * Return: 0 on success and proper error codes on failure
2049  */
2050 int
2051 mpi3mr_cam_attach(struct mpi3mr_softc *sc)
2052 {
2053 	struct mpi3mr_cam_softc *cam_sc;
2054 	cam_status status;
2055 	int unit, error = 0, reqs;
2056 
2057 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Starting CAM Attach\n");
2058 
2059 	cam_sc = malloc(sizeof(struct mpi3mr_cam_softc), M_MPI3MR, M_WAITOK|M_ZERO);
2060 	if (!cam_sc) {
2061 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2062 		    "Failed to allocate memory for controller CAM instance\n");
2063 		return (ENOMEM);
2064 	}
2065 
2066 	cam_sc->maxtargets = sc->facts.max_perids + 1;
2067 
2068 	TAILQ_INIT(&cam_sc->tgt_list);
2069 
2070 	sc->cam_sc = cam_sc;
2071 	cam_sc->sc = sc;
2072 
2073 	reqs = sc->max_host_ios;
2074 
2075 	if ((cam_sc->devq = cam_simq_alloc(reqs)) == NULL) {
2076 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIMQ\n");
2077 		error = ENOMEM;
2078 		goto out;
2079 	}
2080 
2081 	unit = device_get_unit(sc->mpi3mr_dev);
2082 	cam_sc->sim = cam_sim_alloc(mpi3mr_cam_action, mpi3mr_cam_poll, "mpi3mr", cam_sc,
2083 	    unit, &sc->mpi3mr_mtx, reqs, reqs, cam_sc->devq);
2084 	if (cam_sc->sim == NULL) {
2085 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIM\n");
2086 		error = EINVAL;
2087 		goto out;
2088 	}
2089 
2090 	TAILQ_INIT(&cam_sc->ev_queue);
2091 
2092 	/* Initialize taskqueue for Event Handling */
2093 	TASK_INIT(&cam_sc->ev_task, 0, mpi3mr_firmware_event_work, sc);
2094 	cam_sc->ev_tq = taskqueue_create("mpi3mr_taskq", M_NOWAIT | M_ZERO,
2095 	    taskqueue_thread_enqueue, &cam_sc->ev_tq);
2096 	taskqueue_start_threads(&cam_sc->ev_tq, 1, PRIBIO, "%s taskq",
2097 	    device_get_nameunit(sc->mpi3mr_dev));
2098 
2099 	mtx_lock(&sc->mpi3mr_mtx);
2100 
2101 	/*
2102 	 * XXX There should be a bus for every port on the adapter, but since
2103 	 * we're just going to fake the topology for now, we'll pretend that
2104 	 * everything is just a target on a single bus.
2105 	 */
2106 	if ((error = xpt_bus_register(cam_sc->sim, sc->mpi3mr_dev, 0)) != 0) {
2107 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2108 		    "Error 0x%x registering SCSI bus\n", error);
2109 		mtx_unlock(&sc->mpi3mr_mtx);
2110 		goto out;
2111 	}
2112 
2113 	/*
2114 	 * Assume that discovery events will start right away.
2115 	 *
2116 	 * Hold off boot until discovery is complete.
2117 	 */
2118 	cam_sc->flags |= MPI3MRSAS_IN_STARTUP | MPI3MRSAS_IN_DISCOVERY;
2119 	sc->cam_sc->startup_refcount = 0;
2120 	mpi3mr_startup_increment(cam_sc);
2121 
2122 	callout_init(&cam_sc->discovery_callout, 1 /*mpsafe*/);
2123 
2124 	/*
2125 	 * Register for async events so we can determine the EEDP
2126 	 * capabilities of devices.
2127 	 */
2128 	status = xpt_create_path(&cam_sc->path, /*periph*/NULL,
2129 	    cam_sim_path(sc->cam_sc->sim), CAM_TARGET_WILDCARD,
2130 	    CAM_LUN_WILDCARD);
2131 	if (status != CAM_REQ_CMP) {
2132 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2133 		    "Error 0x%x creating sim path\n", status);
2134 		cam_sc->path = NULL;
2135 	}
2136 
2137 	if (status != CAM_REQ_CMP) {
2138 		/*
2139 		 * EEDP use is the exception, not the rule.
2140 		 * Warn the user, but do not fail to attach.
2141 		 */
2142 		mpi3mr_dprint(sc, MPI3MR_INFO, "EEDP capabilities disabled.\n");
2143 	}
2144 
2145 	mtx_unlock(&sc->mpi3mr_mtx);
2146 
2147 	error = mpi3mr_register_events(sc);
2148 
2149 out:
2150 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s Exiting CAM attach, error: 0x%x n", __func__, error);
2151 	return (error);
2152 }
2153 
2154 int
2155 mpi3mr_cam_detach(struct mpi3mr_softc *sc)
2156 {
2157 	struct mpi3mr_cam_softc *cam_sc;
2158 	struct mpi3mr_target *target;
2159 
2160 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Starting CAM detach\n", __func__);
2161 	if (sc->cam_sc == NULL)
2162 		return (0);
2163 
2164 	cam_sc = sc->cam_sc;
2165 
2166 	mpi3mr_freeup_events(sc);
2167 
2168 	/*
2169 	 * Drain and free the event handling taskqueue with the lock
2170 	 * unheld so that any parallel processing tasks drain properly
2171 	 * without deadlocking.
2172 	 */
2173 	if (cam_sc->ev_tq != NULL)
2174 		taskqueue_free(cam_sc->ev_tq);
2175 
2176 	mtx_lock(&sc->mpi3mr_mtx);
2177 
2178 	while (cam_sc->startup_refcount != 0)
2179 		mpi3mr_startup_decrement(cam_sc);
2180 
2181 	/* Deregister our async handler */
2182 	if (cam_sc->path != NULL) {
2183 		xpt_free_path(cam_sc->path);
2184 		cam_sc->path = NULL;
2185 	}
2186 
2187 	if (cam_sc->flags & MPI3MRSAS_IN_STARTUP)
2188 		xpt_release_simq(cam_sc->sim, 1);
2189 
2190 	if (cam_sc->sim != NULL) {
2191 		xpt_bus_deregister(cam_sim_path(cam_sc->sim));
2192 		cam_sim_free(cam_sc->sim, FALSE);
2193 	}
2194 
2195 	mtx_unlock(&sc->mpi3mr_mtx);
2196 
2197 	if (cam_sc->devq != NULL)
2198 		cam_simq_free(cam_sc->devq);
2199 
2200 get_target:
2201 	mtx_lock_spin(&sc->target_lock);
2202  	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
2203  		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
2204 		mtx_unlock_spin(&sc->target_lock);
2205 		goto out_tgt_free;
2206 	}
2207 	mtx_unlock_spin(&sc->target_lock);
2208 out_tgt_free:
2209 	if (target) {
2210 		free(target, M_MPI3MR);
2211 		target = NULL;
2212 		goto get_target;
2213  	}
2214 
2215 	free(cam_sc, M_MPI3MR);
2216 	sc->cam_sc = NULL;
2217 
2218 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Exiting CAM detach\n", __func__);
2219 	return (0);
2220 }
2221