xref: /freebsd/sys/dev/mpi3mr/mpi3mr.c (revision bdd1243d)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #include <sys/types.h>
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/module.h>
52 #include <sys/bus.h>
53 #include <sys/conf.h>
54 #include <sys/malloc.h>
55 #include <sys/sysctl.h>
56 #include <sys/uio.h>
57 
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 #include <sys/rman.h>
61 
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pci_private.h>
65 
66 #include <cam/cam.h>
67 #include <cam/cam_ccb.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #include <cam/scsi/smp_all.h>
76 #include <sys/queue.h>
77 #include <sys/kthread.h>
78 #include "mpi3mr.h"
79 #include "mpi3mr_cam.h"
80 #include "mpi3mr_app.h"
81 
82 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
83 	U64 reply_dma);
84 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc);
85 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
86 	struct mpi3mr_drvr_cmd *drvrcmd);
87 static void mpi3mr_flush_io(struct mpi3mr_softc *sc);
88 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
89 	U32 reset_reason);
90 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
91 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc);
92 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
93 	struct mpi3mr_drvr_cmd *drv_cmd);
94 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
95 	struct mpi3mr_drvr_cmd *drv_cmd);
96 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
97 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx);
98 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc);
99 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc);
100 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code);
101 
102 void
103 mpi3mr_hexdump(void *buf, int sz, int format)
104 {
105         int i;
106         U32 *buf_loc = (U32 *)buf;
107 
108         for (i = 0; i < (sz / sizeof(U32)); i++) {
109                 if ((i % format) == 0) {
110                         if (i != 0)
111                                 printf("\n");
112                         printf("%08x: ", (i * 4));
113                 }
114                 printf("%08x ", buf_loc[i]);
115         }
116         printf("\n");
117 }
118 
119 void
120 init_completion(struct completion *completion)
121 {
122 	completion->done = 0;
123 }
124 
125 void
126 complete(struct completion *completion)
127 {
128 	completion->done = 1;
129 	wakeup(complete);
130 }
131 
132 void wait_for_completion_timeout(struct completion *completion,
133 	    U32 timeout)
134 {
135 	U32 count = timeout * 1000;
136 
137 	while ((completion->done == 0) && count) {
138                 DELAY(1000);
139 		count--;
140 	}
141 
142 	if (completion->done == 0) {
143 		printf("%s: Command is timedout\n", __func__);
144 		completion->done = 1;
145 	}
146 }
147 void wait_for_completion_timeout_tm(struct completion *completion,
148 	    U32 timeout, struct mpi3mr_softc *sc)
149 {
150 	U32 count = timeout * 1000;
151 
152 	while ((completion->done == 0) && count) {
153 		msleep(&sc->tm_chan, &sc->mpi3mr_mtx, PRIBIO,
154 		       "TM command", 1 * hz);
155 		count--;
156 	}
157 
158 	if (completion->done == 0) {
159 		printf("%s: Command is timedout\n", __func__);
160 		completion->done = 1;
161 	}
162 }
163 
164 
165 void
166 poll_for_command_completion(struct mpi3mr_softc *sc,
167        struct mpi3mr_drvr_cmd *cmd, U16 wait)
168 {
169 	int wait_time = wait * 1000;
170        while (wait_time) {
171                mpi3mr_complete_admin_cmd(sc);
172                if (cmd->state & MPI3MR_CMD_COMPLETE)
173                        break;
174 	       DELAY(1000);
175                wait_time--;
176        }
177 }
178 
179 /**
180  * mpi3mr_trigger_snapdump - triggers firmware snapdump
181  * @sc: Adapter instance reference
182  * @reason_code: reason code for the fault.
183  *
184  * This routine will trigger the snapdump and wait for it to
185  * complete or timeout before it returns.
186  * This will be called during initilaization time faults/resets/timeouts
187  * before soft reset invocation.
188  *
189  * Return:  None.
190  */
191 static void
192 mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U32 reason_code)
193 {
194 	U32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
195 
196 	mpi3mr_dprint(sc, MPI3MR_INFO, "snapdump triggered: reason code: %s\n",
197 	    mpi3mr_reset_rc_name(reason_code));
198 
199 	mpi3mr_set_diagsave(sc);
200 	mpi3mr_issue_reset(sc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
201 			   reason_code);
202 
203 	do {
204 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
205 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
206 			break;
207                 DELAY(100 * 1000);
208 	} while (--timeout);
209 
210 	return;
211 }
212 
213 /**
214  * mpi3mr_check_rh_fault_ioc - check reset history and fault
215  * controller
216  * @sc: Adapter instance reference
217  * @reason_code, reason code for the fault.
218  *
219  * This routine will fault the controller with
220  * the given reason code if it is not already in the fault or
221  * not asynchronosuly reset. This will be used to handle
222  * initilaization time faults/resets/timeout as in those cases
223  * immediate soft reset invocation is not required.
224  *
225  * Return:  None.
226  */
227 static void mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc *sc, U32 reason_code)
228 {
229 	U32 ioc_status;
230 
231 	if (sc->unrecoverable) {
232 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is unrecoverable\n");
233 		return;
234 	}
235 
236 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
237 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
238 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
239 		mpi3mr_print_fault_info(sc);
240 		return;
241 	}
242 
243 	mpi3mr_trigger_snapdump(sc, reason_code);
244 
245 	return;
246 }
247 
248 static void * mpi3mr_get_reply_virt_addr(struct mpi3mr_softc *sc,
249     bus_addr_t phys_addr)
250 {
251 	if (!phys_addr)
252 		return NULL;
253 	if ((phys_addr < sc->reply_buf_dma_min_address) ||
254 	    (phys_addr > sc->reply_buf_dma_max_address))
255 		return NULL;
256 
257 	return sc->reply_buf + (phys_addr - sc->reply_buf_phys);
258 }
259 
260 static void * mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc *sc,
261     bus_addr_t phys_addr)
262 {
263 	if (!phys_addr)
264 		return NULL;
265 	return sc->sense_buf + (phys_addr - sc->sense_buf_phys);
266 }
267 
268 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
269     U64 reply_dma)
270 {
271 	U32 old_idx = 0;
272 
273 	mtx_lock_spin(&sc->reply_free_q_lock);
274 	old_idx  =  sc->reply_free_q_host_index;
275 	sc->reply_free_q_host_index = ((sc->reply_free_q_host_index ==
276 	    (sc->reply_free_q_sz - 1)) ? 0 :
277 	    (sc->reply_free_q_host_index + 1));
278 	sc->reply_free_q[old_idx] = reply_dma;
279 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
280 		sc->reply_free_q_host_index);
281 	mtx_unlock_spin(&sc->reply_free_q_lock);
282 }
283 
284 static void mpi3mr_repost_sense_buf(struct mpi3mr_softc *sc,
285     U64 sense_buf_phys)
286 {
287 	U32 old_idx = 0;
288 
289 	mtx_lock_spin(&sc->sense_buf_q_lock);
290 	old_idx  =  sc->sense_buf_q_host_index;
291 	sc->sense_buf_q_host_index = ((sc->sense_buf_q_host_index ==
292 	    (sc->sense_buf_q_sz - 1)) ? 0 :
293 	    (sc->sense_buf_q_host_index + 1));
294 	sc->sense_buf_q[old_idx] = sense_buf_phys;
295 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
296 		sc->sense_buf_q_host_index);
297 	mtx_unlock_spin(&sc->sense_buf_q_lock);
298 
299 }
300 
301 void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc *sc,
302 	struct mpi3mr_throttle_group_info *tg, U8 divert_value)
303 {
304 	struct mpi3mr_target *target;
305 
306 	mtx_lock_spin(&sc->target_lock);
307 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
308 		if (target->throttle_group == tg)
309 			target->io_divert = divert_value;
310 	}
311 	mtx_unlock_spin(&sc->target_lock);
312 }
313 
314 /**
315  * mpi3mr_submit_admin_cmd - Submit request to admin queue
316  * @mrioc: Adapter reference
317  * @admin_req: MPI3 request
318  * @admin_req_sz: Request size
319  *
320  * Post the MPI3 request into admin request queue and
321  * inform the controller, if the queue is full return
322  * appropriate error.
323  *
324  * Return: 0 on success, non-zero on failure.
325  */
326 int mpi3mr_submit_admin_cmd(struct mpi3mr_softc *sc, void *admin_req,
327     U16 admin_req_sz)
328 {
329 	U16 areq_pi = 0, areq_ci = 0, max_entries = 0;
330 	int retval = 0;
331 	U8 *areq_entry;
332 
333 	mtx_lock_spin(&sc->admin_req_lock);
334 	areq_pi = sc->admin_req_pi;
335 	areq_ci = sc->admin_req_ci;
336 	max_entries = sc->num_admin_reqs;
337 
338 	if (sc->unrecoverable)
339 		return -EFAULT;
340 
341 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
342 					   (areq_pi == (max_entries - 1)))) {
343 		printf(IOCNAME "AdminReqQ full condition detected\n",
344 		    sc->name);
345 		retval = -EAGAIN;
346 		goto out;
347 	}
348 	areq_entry = (U8 *)sc->admin_req + (areq_pi *
349 						     MPI3MR_AREQ_FRAME_SZ);
350 	memset(areq_entry, 0, MPI3MR_AREQ_FRAME_SZ);
351 	memcpy(areq_entry, (U8 *)admin_req, admin_req_sz);
352 
353 	if (++areq_pi == max_entries)
354 		areq_pi = 0;
355 	sc->admin_req_pi = areq_pi;
356 
357 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
358 
359 out:
360 	mtx_unlock_spin(&sc->admin_req_lock);
361 	return retval;
362 }
363 
364 /**
365  * mpi3mr_check_req_qfull - Check request queue is full or not
366  * @op_req_q: Operational reply queue info
367  *
368  * Return: true when queue full, false otherwise.
369  */
370 static inline bool
371 mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue *op_req_q)
372 {
373 	U16 pi, ci, max_entries;
374 	bool is_qfull = false;
375 
376 	pi = op_req_q->pi;
377 	ci = op_req_q->ci;
378 	max_entries = op_req_q->num_reqs;
379 
380 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
381 		is_qfull = true;
382 
383 	return is_qfull;
384 }
385 
386 /**
387  * mpi3mr_submit_io - Post IO command to firmware
388  * @sc:		      Adapter instance reference
389  * @op_req_q:	      Operational Request queue reference
390  * @req:	      MPT request data
391  *
392  * This function submits IO command to firmware.
393  *
394  * Return: Nothing
395  */
396 int mpi3mr_submit_io(struct mpi3mr_softc *sc,
397     struct mpi3mr_op_req_queue *op_req_q, U8 *req)
398 {
399 	U16 pi, max_entries;
400 	int retval = 0;
401 	U8 *req_entry;
402 	U16 req_sz = sc->facts.op_req_sz;
403 	struct mpi3mr_irq_context *irq_ctx;
404 
405 	mtx_lock_spin(&op_req_q->q_lock);
406 
407 	pi = op_req_q->pi;
408 	max_entries = op_req_q->num_reqs;
409 	if (mpi3mr_check_req_qfull(op_req_q)) {
410 		irq_ctx = &sc->irq_ctx[op_req_q->reply_qid - 1];
411 		mpi3mr_complete_io_cmd(sc, irq_ctx);
412 
413 		if (mpi3mr_check_req_qfull(op_req_q)) {
414 			printf(IOCNAME "OpReqQ full condition detected\n",
415 				sc->name);
416 			retval = -EBUSY;
417 			goto out;
418 		}
419 	}
420 
421 	req_entry = (U8 *)op_req_q->q_base + (pi * req_sz);
422 	memset(req_entry, 0, req_sz);
423 	memcpy(req_entry, req, MPI3MR_AREQ_FRAME_SZ);
424 	if (++pi == max_entries)
425 		pi = 0;
426 	op_req_q->pi = pi;
427 
428 	mpi3mr_atomic_inc(&sc->op_reply_q[op_req_q->reply_qid - 1].pend_ios);
429 
430 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(op_req_q->qid), op_req_q->pi);
431 	if (sc->mpi3mr_debug & MPI3MR_TRACE) {
432 		device_printf(sc->mpi3mr_dev, "IO submission: QID:%d PI:0x%x\n", op_req_q->qid, op_req_q->pi);
433 		mpi3mr_hexdump(req_entry, MPI3MR_AREQ_FRAME_SZ, 8);
434 	}
435 
436 out:
437 	mtx_unlock_spin(&op_req_q->q_lock);
438 	return retval;
439 }
440 
441 inline void
442 mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
443 		     bus_addr_t dma_addr)
444 {
445 	Mpi3SGESimple_t *sgel = paddr;
446 
447 	sgel->Flags = flags;
448 	sgel->Length = (length);
449 	sgel->Address = (U64)dma_addr;
450 }
451 
452 void mpi3mr_build_zero_len_sge(void *paddr)
453 {
454 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
455 		MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_LIST);
456 
457 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
458 
459 }
460 
461 void mpi3mr_enable_interrupts(struct mpi3mr_softc *sc)
462 {
463 	sc->intr_enabled = 1;
464 }
465 
466 void mpi3mr_disable_interrupts(struct mpi3mr_softc *sc)
467 {
468 	sc->intr_enabled = 0;
469 }
470 
471 void
472 mpi3mr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
473 {
474 	bus_addr_t *addr;
475 
476 	addr = arg;
477 	*addr = segs[0].ds_addr;
478 }
479 
480 static int mpi3mr_delete_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
481 {
482 	Mpi3DeleteReplyQueueRequest_t delq_req;
483 	struct mpi3mr_op_reply_queue *op_reply_q;
484 	int retval = 0;
485 
486 
487 	op_reply_q = &sc->op_reply_q[qid - 1];
488 
489 	if (!op_reply_q->qid)
490 	{
491 		retval = -1;
492 		printf(IOCNAME "Issue DelRepQ: called with invalid Reply QID\n",
493 		    sc->name);
494 		goto out;
495 	}
496 
497 	memset(&delq_req, 0, sizeof(delq_req));
498 
499 	mtx_lock(&sc->init_cmds.completion.lock);
500 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
501 		retval = -1;
502 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
503 		    sc->name);
504 		mtx_unlock(&sc->init_cmds.completion.lock);
505 		goto out;
506 	}
507 
508 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
509 		retval = -1;
510 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
511 		    sc->name);
512 		goto out;
513 	}
514 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
515 	sc->init_cmds.is_waiting = 1;
516 	sc->init_cmds.callback = NULL;
517 	delq_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
518 	delq_req.Function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
519 	delq_req.QueueID = qid;
520 
521 	init_completion(&sc->init_cmds.completion);
522 	retval = mpi3mr_submit_admin_cmd(sc, &delq_req, sizeof(delq_req));
523 	if (retval) {
524 		printf(IOCNAME "Issue DelRepQ: Admin Post failed\n",
525 		    sc->name);
526 		goto out_unlock;
527 	}
528 	wait_for_completion_timeout(&sc->init_cmds.completion,
529 	    (MPI3MR_INTADMCMD_TIMEOUT));
530 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
531 		printf(IOCNAME "Issue DelRepQ: command timed out\n",
532 		    sc->name);
533 		mpi3mr_check_rh_fault_ioc(sc,
534 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
535 		sc->unrecoverable = 1;
536 
537 		retval = -1;
538 		goto out_unlock;
539 	}
540 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
541 	     != MPI3_IOCSTATUS_SUCCESS ) {
542 		printf(IOCNAME "Issue DelRepQ: Failed IOCStatus(0x%04x) "
543 		    " Loginfo(0x%08x) \n" , sc->name,
544 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
545 		    sc->init_cmds.ioc_loginfo);
546 		retval = -1;
547 		goto out_unlock;
548 	}
549 	sc->irq_ctx[qid - 1].op_reply_q = NULL;
550 
551 	if (sc->op_reply_q[qid - 1].q_base_phys != 0)
552 		bus_dmamap_unload(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base_dmamap);
553 	if (sc->op_reply_q[qid - 1].q_base != NULL)
554 		bus_dmamem_free(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base, sc->op_reply_q[qid - 1].q_base_dmamap);
555 	if (sc->op_reply_q[qid - 1].q_base_tag != NULL)
556 		bus_dma_tag_destroy(sc->op_reply_q[qid - 1].q_base_tag);
557 
558 	sc->op_reply_q[qid - 1].q_base = NULL;
559 	sc->op_reply_q[qid - 1].qid = 0;
560 out_unlock:
561 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
562 	mtx_unlock(&sc->init_cmds.completion.lock);
563 out:
564 	return retval;
565 }
566 
567 /**
568  * mpi3mr_create_op_reply_queue - create operational reply queue
569  * @sc: Adapter instance reference
570  * @qid: operational reply queue id
571  *
572  * Create operatinal reply queue by issuing MPI request
573  * through admin queue.
574  *
575  * Return:  0 on success, non-zero on failure.
576  */
577 static int mpi3mr_create_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
578 {
579 	Mpi3CreateReplyQueueRequest_t create_req;
580 	struct mpi3mr_op_reply_queue *op_reply_q;
581 	int retval = 0;
582 	char q_lock_name[32];
583 
584 	op_reply_q = &sc->op_reply_q[qid - 1];
585 
586 	if (op_reply_q->qid)
587 	{
588 		retval = -1;
589 		printf(IOCNAME "CreateRepQ: called for duplicate qid %d\n",
590 		    sc->name, op_reply_q->qid);
591 		return retval;
592 	}
593 
594 	op_reply_q->ci = 0;
595 	if (pci_get_revid(sc->mpi3mr_dev) == SAS4116_CHIP_REV_A0)
596 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD_A0;
597 	else
598 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
599 
600 	op_reply_q->qsz = op_reply_q->num_replies * sc->op_reply_sz;
601 	op_reply_q->ephase = 1;
602 
603         if (!op_reply_q->q_base) {
604 		snprintf(q_lock_name, 32, "Reply Queue Lock[%d]", qid);
605 		mtx_init(&op_reply_q->q_lock, q_lock_name, NULL, MTX_SPIN);
606 
607 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
608 					4, 0,			/* algnmnt, boundary */
609 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
610 					BUS_SPACE_MAXADDR,	/* highaddr */
611 					NULL, NULL,		/* filter, filterarg */
612 					op_reply_q->qsz,		/* maxsize */
613 					1,			/* nsegments */
614 					op_reply_q->qsz,		/* maxsegsize */
615 					0,			/* flags */
616 					NULL, NULL,		/* lockfunc, lockarg */
617 					&op_reply_q->q_base_tag)) {
618 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Operational reply DMA tag\n");
619 			return (ENOMEM);
620 		}
621 
622 		if (bus_dmamem_alloc(op_reply_q->q_base_tag, (void **)&op_reply_q->q_base,
623 		    BUS_DMA_NOWAIT, &op_reply_q->q_base_dmamap)) {
624 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
625 			return (ENOMEM);
626 		}
627 		bzero(op_reply_q->q_base, op_reply_q->qsz);
628 		bus_dmamap_load(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap, op_reply_q->q_base, op_reply_q->qsz,
629 		    mpi3mr_memaddr_cb, &op_reply_q->q_base_phys, 0);
630 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Reply queue ID: %d phys addr= %#016jx virt_addr: %pa size= %d\n",
631 		    qid, (uintmax_t)op_reply_q->q_base_phys, op_reply_q->q_base, op_reply_q->qsz);
632 
633 		if (!op_reply_q->q_base)
634 		{
635 			retval = -1;
636 			printf(IOCNAME "CreateRepQ: memory alloc failed for qid %d\n",
637 			    sc->name, qid);
638 			goto out;
639 		}
640 	}
641 
642 	memset(&create_req, 0, sizeof(create_req));
643 
644 	mtx_lock(&sc->init_cmds.completion.lock);
645 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
646 		retval = -1;
647 		printf(IOCNAME "CreateRepQ: Init command is in use\n",
648 		    sc->name);
649 		mtx_unlock(&sc->init_cmds.completion.lock);
650 		goto out;
651 	}
652 
653 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
654 	sc->init_cmds.is_waiting = 1;
655 	sc->init_cmds.callback = NULL;
656 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
657 	create_req.Function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
658 	create_req.QueueID = qid;
659 	create_req.Flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
660 	create_req.MSIxIndex = sc->irq_ctx[qid - 1].msix_index;
661 	create_req.BaseAddress = (U64)op_reply_q->q_base_phys;
662 	create_req.Size = op_reply_q->num_replies;
663 
664 	init_completion(&sc->init_cmds.completion);
665 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
666 	    sizeof(create_req));
667 	if (retval) {
668 		printf(IOCNAME "CreateRepQ: Admin Post failed\n",
669 		    sc->name);
670 		goto out_unlock;
671 	}
672 
673 	wait_for_completion_timeout(&sc->init_cmds.completion,
674 	  	MPI3MR_INTADMCMD_TIMEOUT);
675 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
676 		printf(IOCNAME "CreateRepQ: command timed out\n",
677 		    sc->name);
678 		mpi3mr_check_rh_fault_ioc(sc,
679 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
680 		sc->unrecoverable = 1;
681 		retval = -1;
682 		goto out_unlock;
683 	}
684 
685 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
686 	     != MPI3_IOCSTATUS_SUCCESS ) {
687 		printf(IOCNAME "CreateRepQ: Failed IOCStatus(0x%04x) "
688 		    " Loginfo(0x%08x) \n" , sc->name,
689 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
690 		    sc->init_cmds.ioc_loginfo);
691 		retval = -1;
692 		goto out_unlock;
693 	}
694 	op_reply_q->qid = qid;
695 	sc->irq_ctx[qid - 1].op_reply_q = op_reply_q;
696 
697 out_unlock:
698 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
699 	mtx_unlock(&sc->init_cmds.completion.lock);
700 out:
701 	if (retval) {
702 		if (op_reply_q->q_base_phys != 0)
703 			bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
704 		if (op_reply_q->q_base != NULL)
705 			bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
706 		if (op_reply_q->q_base_tag != NULL)
707 			bus_dma_tag_destroy(op_reply_q->q_base_tag);
708 		op_reply_q->q_base = NULL;
709 		op_reply_q->qid = 0;
710 	}
711 
712 	return retval;
713 }
714 
715 /**
716  * mpi3mr_create_op_req_queue - create operational request queue
717  * @sc: Adapter instance reference
718  * @req_qid: operational request queue id
719  * @reply_qid: Reply queue ID
720  *
721  * Create operatinal request queue by issuing MPI request
722  * through admin queue.
723  *
724  * Return:  0 on success, non-zero on failure.
725  */
726 static int mpi3mr_create_op_req_queue(struct mpi3mr_softc *sc, U16 req_qid, U8 reply_qid)
727 {
728 	Mpi3CreateRequestQueueRequest_t create_req;
729 	struct mpi3mr_op_req_queue *op_req_q;
730 	int retval = 0;
731 	char q_lock_name[32];
732 
733 	op_req_q = &sc->op_req_q[req_qid - 1];
734 
735 	if (op_req_q->qid)
736 	{
737 		retval = -1;
738 		printf(IOCNAME "CreateReqQ: called for duplicate qid %d\n",
739 		    sc->name, op_req_q->qid);
740 		return retval;
741 	}
742 
743 	op_req_q->ci = 0;
744 	op_req_q->pi = 0;
745 	op_req_q->num_reqs = MPI3MR_OP_REQ_Q_QD;
746 	op_req_q->qsz = op_req_q->num_reqs * sc->facts.op_req_sz;
747 	op_req_q->reply_qid = reply_qid;
748 
749 	if (!op_req_q->q_base) {
750 		snprintf(q_lock_name, 32, "Request Queue Lock[%d]", req_qid);
751 		mtx_init(&op_req_q->q_lock, q_lock_name, NULL, MTX_SPIN);
752 
753 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
754 					4, 0,			/* algnmnt, boundary */
755 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
756 					BUS_SPACE_MAXADDR,	/* highaddr */
757 					NULL, NULL,		/* filter, filterarg */
758 					op_req_q->qsz,		/* maxsize */
759 					1,			/* nsegments */
760 					op_req_q->qsz,		/* maxsegsize */
761 					0,			/* flags */
762 					NULL, NULL,		/* lockfunc, lockarg */
763 					&op_req_q->q_base_tag)) {
764 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
765 			return (ENOMEM);
766 		}
767 
768 		if (bus_dmamem_alloc(op_req_q->q_base_tag, (void **)&op_req_q->q_base,
769 		    BUS_DMA_NOWAIT, &op_req_q->q_base_dmamap)) {
770 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
771 			return (ENOMEM);
772 		}
773 
774 		bzero(op_req_q->q_base, op_req_q->qsz);
775 
776 		bus_dmamap_load(op_req_q->q_base_tag, op_req_q->q_base_dmamap, op_req_q->q_base, op_req_q->qsz,
777 		    mpi3mr_memaddr_cb, &op_req_q->q_base_phys, 0);
778 
779 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Request QID: %d phys addr= %#016jx virt addr= %pa size= %d associated Reply QID: %d\n",
780 		    req_qid, (uintmax_t)op_req_q->q_base_phys, op_req_q->q_base, op_req_q->qsz, reply_qid);
781 
782 		if (!op_req_q->q_base) {
783 			retval = -1;
784 			printf(IOCNAME "CreateReqQ: memory alloc failed for qid %d\n",
785 			    sc->name, req_qid);
786 			goto out;
787 		}
788 	}
789 
790 	memset(&create_req, 0, sizeof(create_req));
791 
792 	mtx_lock(&sc->init_cmds.completion.lock);
793 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
794 		retval = -1;
795 		printf(IOCNAME "CreateReqQ: Init command is in use\n",
796 		    sc->name);
797 		mtx_unlock(&sc->init_cmds.completion.lock);
798 		goto out;
799 	}
800 
801 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
802 	sc->init_cmds.is_waiting = 1;
803 	sc->init_cmds.callback = NULL;
804 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
805 	create_req.Function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
806 	create_req.QueueID = req_qid;
807 	create_req.Flags = 0;
808 	create_req.ReplyQueueID = reply_qid;
809 	create_req.BaseAddress = (U64)op_req_q->q_base_phys;
810 	create_req.Size = op_req_q->num_reqs;
811 
812 	init_completion(&sc->init_cmds.completion);
813 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
814 	    sizeof(create_req));
815 	if (retval) {
816 		printf(IOCNAME "CreateReqQ: Admin Post failed\n",
817 		    sc->name);
818 		goto out_unlock;
819 	}
820 
821 	wait_for_completion_timeout(&sc->init_cmds.completion,
822 	    (MPI3MR_INTADMCMD_TIMEOUT));
823 
824 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
825 		printf(IOCNAME "CreateReqQ: command timed out\n",
826 		    sc->name);
827 		mpi3mr_check_rh_fault_ioc(sc,
828 			MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
829 		sc->unrecoverable = 1;
830 		retval = -1;
831 		goto out_unlock;
832 	}
833 
834 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
835 	     != MPI3_IOCSTATUS_SUCCESS ) {
836 		printf(IOCNAME "CreateReqQ: Failed IOCStatus(0x%04x) "
837 		    " Loginfo(0x%08x) \n" , sc->name,
838 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
839 		    sc->init_cmds.ioc_loginfo);
840 		retval = -1;
841 		goto out_unlock;
842 	}
843 	op_req_q->qid = req_qid;
844 
845 out_unlock:
846 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
847 	mtx_unlock(&sc->init_cmds.completion.lock);
848 out:
849 	if (retval) {
850 		if (op_req_q->q_base_phys != 0)
851 			bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
852 		if (op_req_q->q_base != NULL)
853 			bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
854 		if (op_req_q->q_base_tag != NULL)
855 			bus_dma_tag_destroy(op_req_q->q_base_tag);
856 		op_req_q->q_base = NULL;
857 		op_req_q->qid = 0;
858 	}
859 	return retval;
860 }
861 
862 /**
863  * mpi3mr_create_op_queues - create operational queues
864  * @sc: Adapter instance reference
865  *
866  * Create operatinal queues(request queues and reply queues).
867  * Return:  0 on success, non-zero on failure.
868  */
869 static int mpi3mr_create_op_queues(struct mpi3mr_softc *sc)
870 {
871 	int retval = 0;
872 	U16 num_queues = 0, i = 0, qid;
873 
874 	num_queues = min(sc->facts.max_op_reply_q,
875 	    sc->facts.max_op_req_q);
876 	num_queues = min(num_queues, sc->msix_count);
877 
878 	/*
879 	 * During reset set the num_queues to the number of queues
880 	 * that was set before the reset.
881 	 */
882 	if (sc->num_queues)
883 		num_queues = sc->num_queues;
884 
885 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Trying to create %d Operational Q pairs\n",
886 	    num_queues);
887 
888 	if (!sc->op_req_q) {
889 		sc->op_req_q = malloc(sizeof(struct mpi3mr_op_req_queue) *
890 		    num_queues, M_MPI3MR, M_NOWAIT | M_ZERO);
891 
892 		if (!sc->op_req_q) {
893 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Request queue info\n");
894 			retval = -1;
895 			goto out_failed;
896 		}
897 	}
898 
899 	if (!sc->op_reply_q) {
900 		sc->op_reply_q = malloc(sizeof(struct mpi3mr_op_reply_queue) * num_queues,
901 			M_MPI3MR, M_NOWAIT | M_ZERO);
902 
903 		if (!sc->op_reply_q) {
904 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Reply queue info\n");
905 			retval = -1;
906 			goto out_failed;
907 		}
908 	}
909 
910 	sc->num_hosttag_op_req_q = (sc->max_host_ios + 1) / num_queues;
911 
912 	/*Operational Request and reply queue ID starts with 1*/
913 	for (i = 0; i < num_queues; i++) {
914 		qid = i + 1;
915 		if (mpi3mr_create_op_reply_queue(sc, qid)) {
916 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Reply queue %d\n",
917 			    qid);
918 			break;
919 		}
920 		if (mpi3mr_create_op_req_queue(sc, qid,
921 		    sc->op_reply_q[qid - 1].qid)) {
922 			mpi3mr_delete_op_reply_queue(sc, qid);
923 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Request queue %d\n",
924 			    qid);
925 			break;
926 		}
927 
928 	}
929 
930 	/* Not even one queue is created successfully*/
931         if (i == 0) {
932                 retval = -1;
933                 goto out_failed;
934         }
935 
936 	if (!sc->num_queues) {
937 		sc->num_queues = i;
938 	} else {
939 		if (num_queues != i) {
940 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Number of queues (%d) post reset are not same as"
941 					"queues allocated (%d) during driver init\n", i, num_queues);
942 			goto out_failed;
943 		}
944 	}
945 
946 	mpi3mr_dprint(sc, MPI3MR_INFO, "Successfully created %d Operational Queue pairs\n",
947 	    sc->num_queues);
948 	mpi3mr_dprint(sc, MPI3MR_INFO, "Request Queue QD: %d Reply queue QD: %d\n",
949 	    sc->op_req_q[0].num_reqs, sc->op_reply_q[0].num_replies);
950 
951 	return retval;
952 out_failed:
953 	if (sc->op_req_q) {
954 		free(sc->op_req_q, M_MPI3MR);
955 		sc->op_req_q = NULL;
956 	}
957 	if (sc->op_reply_q) {
958 		free(sc->op_reply_q, M_MPI3MR);
959 		sc->op_reply_q = NULL;
960 	}
961 	return retval;
962 }
963 
964 /**
965  * mpi3mr_setup_admin_qpair - Setup admin queue pairs
966  * @sc: Adapter instance reference
967  *
968  * Allocation and setup admin queues(request queues and reply queues).
969  * Return:  0 on success, non-zero on failure.
970  */
971 static int mpi3mr_setup_admin_qpair(struct mpi3mr_softc *sc)
972 {
973 	int retval = 0;
974 	U32 num_adm_entries = 0;
975 
976 	sc->admin_req_q_sz = MPI3MR_AREQQ_SIZE;
977 	sc->num_admin_reqs = sc->admin_req_q_sz / MPI3MR_AREQ_FRAME_SZ;
978 	sc->admin_req_ci = sc->admin_req_pi = 0;
979 
980 	sc->admin_reply_q_sz = MPI3MR_AREPQ_SIZE;
981 	sc->num_admin_replies = sc->admin_reply_q_sz/ MPI3MR_AREP_FRAME_SZ;
982 	sc->admin_reply_ci = 0;
983 	sc->admin_reply_ephase = 1;
984 
985 	if (!sc->admin_req) {
986 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
987 					4, 0,			/* algnmnt, boundary */
988 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
989 					BUS_SPACE_MAXADDR,	/* highaddr */
990 					NULL, NULL,		/* filter, filterarg */
991 					sc->admin_req_q_sz,	/* maxsize */
992 					1,			/* nsegments */
993 					sc->admin_req_q_sz,	/* maxsegsize */
994 					0,			/* flags */
995 					NULL, NULL,		/* lockfunc, lockarg */
996 					&sc->admin_req_tag)) {
997 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
998 			return (ENOMEM);
999 		}
1000 
1001 		if (bus_dmamem_alloc(sc->admin_req_tag, (void **)&sc->admin_req,
1002 		    BUS_DMA_NOWAIT, &sc->admin_req_dmamap)) {
1003 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
1004 			return (ENOMEM);
1005 		}
1006 		bzero(sc->admin_req, sc->admin_req_q_sz);
1007 		bus_dmamap_load(sc->admin_req_tag, sc->admin_req_dmamap, sc->admin_req, sc->admin_req_q_sz,
1008 		    mpi3mr_memaddr_cb, &sc->admin_req_phys, 0);
1009 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Req queue phys addr= %#016jx size= %d\n",
1010 		    (uintmax_t)sc->admin_req_phys, sc->admin_req_q_sz);
1011 
1012 		if (!sc->admin_req)
1013 		{
1014 			retval = -1;
1015 			printf(IOCNAME "Memory alloc for AdminReqQ: failed\n",
1016 			    sc->name);
1017 			goto out_failed;
1018 		}
1019 	}
1020 
1021 	if (!sc->admin_reply) {
1022 		mtx_init(&sc->admin_reply_lock, "Admin Reply Queue Lock", NULL, MTX_SPIN);
1023 
1024 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1025 					4, 0,			/* algnmnt, boundary */
1026 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1027 					BUS_SPACE_MAXADDR,	/* highaddr */
1028 					NULL, NULL,		/* filter, filterarg */
1029 					sc->admin_reply_q_sz,	/* maxsize */
1030 					1,			/* nsegments */
1031 					sc->admin_reply_q_sz,	/* maxsegsize */
1032 					0,			/* flags */
1033 					NULL, NULL,		/* lockfunc, lockarg */
1034 					&sc->admin_reply_tag)) {
1035 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply DMA tag\n");
1036 			return (ENOMEM);
1037 		}
1038 
1039 		if (bus_dmamem_alloc(sc->admin_reply_tag, (void **)&sc->admin_reply,
1040 		    BUS_DMA_NOWAIT, &sc->admin_reply_dmamap)) {
1041 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
1042 			return (ENOMEM);
1043 		}
1044 		bzero(sc->admin_reply, sc->admin_reply_q_sz);
1045 		bus_dmamap_load(sc->admin_reply_tag, sc->admin_reply_dmamap, sc->admin_reply, sc->admin_reply_q_sz,
1046 		    mpi3mr_memaddr_cb, &sc->admin_reply_phys, 0);
1047 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Reply queue phys addr= %#016jx size= %d\n",
1048 		    (uintmax_t)sc->admin_reply_phys, sc->admin_req_q_sz);
1049 
1050 
1051 		if (!sc->admin_reply)
1052 		{
1053 			retval = -1;
1054 			printf(IOCNAME "Memory alloc for AdminRepQ: failed\n",
1055 			    sc->name);
1056 			goto out_failed;
1057 		}
1058 	}
1059 
1060 	num_adm_entries = (sc->num_admin_replies << 16) |
1061 				(sc->num_admin_reqs);
1062 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET, num_adm_entries);
1063 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET, sc->admin_req_phys);
1064 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET, sc->admin_reply_phys);
1065 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
1066 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, sc->admin_reply_ci);
1067 
1068 	return retval;
1069 
1070 out_failed:
1071 	/* Free Admin reply*/
1072 	if (sc->admin_reply_phys)
1073 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
1074 
1075 	if (sc->admin_reply != NULL)
1076 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
1077 		    sc->admin_reply_dmamap);
1078 
1079 	if (sc->admin_reply_tag != NULL)
1080 		bus_dma_tag_destroy(sc->admin_reply_tag);
1081 
1082 	/* Free Admin request*/
1083 	if (sc->admin_req_phys)
1084 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
1085 
1086 	if (sc->admin_req != NULL)
1087 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
1088 		    sc->admin_req_dmamap);
1089 
1090 	if (sc->admin_req_tag != NULL)
1091 		bus_dma_tag_destroy(sc->admin_req_tag);
1092 
1093 	return retval;
1094 }
1095 
1096 /**
1097  * mpi3mr_print_fault_info - Display fault information
1098  * @sc: Adapter instance reference
1099  *
1100  * Display the controller fault information if there is a
1101  * controller fault.
1102  *
1103  * Return: Nothing.
1104  */
1105 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc)
1106 {
1107 	U32 ioc_status, code, code1, code2, code3;
1108 
1109 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1110 
1111 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1112 		code = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
1113 			MPI3_SYSIF_FAULT_CODE_MASK;
1114 		code1 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO0_OFFSET);
1115 		code2 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO1_OFFSET);
1116 		code3 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO2_OFFSET);
1117 		printf(IOCNAME "fault codes 0x%04x:0x%04x:0x%04x:0x%04x\n",
1118 		    sc->name, code, code1, code2, code3);
1119 	}
1120 }
1121 
1122 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_softc *sc)
1123 {
1124 	U32 ioc_status, ioc_control;
1125 	U8 ready, enabled;
1126 
1127 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1128 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1129 
1130 	if(sc->unrecoverable)
1131 		return MRIOC_STATE_UNRECOVERABLE;
1132 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1133 		return MRIOC_STATE_FAULT;
1134 
1135 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1136 	enabled = (ioc_control & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1137 
1138 	if (ready && enabled)
1139 		return MRIOC_STATE_READY;
1140 	if ((!ready) && (!enabled))
1141 		return MRIOC_STATE_RESET;
1142 	if ((!ready) && (enabled))
1143 		return MRIOC_STATE_BECOMING_READY;
1144 
1145 	return MRIOC_STATE_RESET_REQUESTED;
1146 }
1147 
1148 static inline void mpi3mr_clear_resethistory(struct mpi3mr_softc *sc)
1149 {
1150         U32 ioc_status;
1151 
1152 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1153         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1154 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
1155 
1156 }
1157 
1158 /**
1159  * mpi3mr_mur_ioc - Message unit Reset handler
1160  * @sc: Adapter instance reference
1161  * @reset_reason: Reset reason code
1162  *
1163  * Issue Message unit Reset to the controller and wait for it to
1164  * be complete.
1165  *
1166  * Return: 0 on success, -1 on failure.
1167  */
1168 static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U32 reset_reason)
1169 {
1170         U32 ioc_config, timeout, ioc_status;
1171         int retval = -1;
1172 
1173         mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Message Unit Reset(MUR)\n");
1174         if (sc->unrecoverable) {
1175                 mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC is unrecoverable MUR not issued\n");
1176                 return retval;
1177         }
1178         mpi3mr_clear_resethistory(sc);
1179 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
1180 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1181         ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1182 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1183 
1184         timeout = MPI3MR_MUR_TIMEOUT * 10;
1185         do {
1186 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1187                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1188                         mpi3mr_clear_resethistory(sc);
1189 			ioc_config =
1190 				mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1191                         if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1192                             (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1193                             (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
1194                                 retval = 0;
1195                                 break;
1196                         }
1197                 }
1198                 DELAY(100 * 1000);
1199         } while (--timeout);
1200 
1201 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1202 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1203 
1204         mpi3mr_dprint(sc, MPI3MR_INFO, "IOC Status/Config after %s MUR is (0x%x)/(0x%x)\n",
1205                 !retval ? "successful":"failed", ioc_status, ioc_config);
1206         return retval;
1207 }
1208 
1209 /**
1210  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1211  * @sc: Adapter instance reference
1212  *
1213  * Set Enable IOC bit in IOC configuration register and wait for
1214  * the controller to become ready.
1215  *
1216  * Return: 0 on success, appropriate error on failure.
1217  */
1218 static int mpi3mr_bring_ioc_ready(struct mpi3mr_softc *sc)
1219 {
1220         U32 ioc_config, timeout;
1221         enum mpi3mr_iocstate current_state;
1222 
1223 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1224         ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1225 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1226 
1227         timeout = sc->ready_timeout * 10;
1228         do {
1229                 current_state = mpi3mr_get_iocstate(sc);
1230                 if (current_state == MRIOC_STATE_READY)
1231                         return 0;
1232                 DELAY(100 * 1000);
1233         } while (--timeout);
1234 
1235         return -1;
1236 }
1237 
1238 static const struct {
1239 	enum mpi3mr_iocstate value;
1240 	char *name;
1241 } mrioc_states[] = {
1242 	{ MRIOC_STATE_READY, "ready" },
1243 	{ MRIOC_STATE_FAULT, "fault" },
1244 	{ MRIOC_STATE_RESET, "reset" },
1245 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
1246 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
1247 	{ MRIOC_STATE_COUNT, "Count" },
1248 };
1249 
1250 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
1251 {
1252 	int i;
1253 	char *name = NULL;
1254 
1255 	for (i = 0; i < MRIOC_STATE_COUNT; i++) {
1256 		if (mrioc_states[i].value == mrioc_state){
1257 			name = mrioc_states[i].name;
1258 			break;
1259 		}
1260 	}
1261 	return name;
1262 }
1263 
1264 /* Reset reason to name mapper structure*/
1265 static const struct {
1266 	enum mpi3mr_reset_reason value;
1267 	char *name;
1268 } mpi3mr_reset_reason_codes[] = {
1269 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
1270 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
1271 	{ MPI3MR_RESET_FROM_IOCTL, "application" },
1272 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
1273 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
1274 	{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
1275 	{ MPI3MR_RESET_FROM_SCSIIO_TIMEOUT, "SCSIIO timeout" },
1276 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
1277 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
1278 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
1279 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
1280 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
1281 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
1282 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
1283 	{
1284 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
1285 		"create request queue timeout"
1286 	},
1287 	{
1288 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
1289 		"create reply queue timeout"
1290 	},
1291 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
1292 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
1293 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
1294 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
1295 	{
1296 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
1297 		"component image activation timeout"
1298 	},
1299 	{
1300 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
1301 		"get package version timeout"
1302 	},
1303 	{
1304 		MPI3MR_RESET_FROM_PELABORT_TIMEOUT,
1305 		"persistent event log abort timeout"
1306 	},
1307 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
1308 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
1309 	{
1310 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
1311 		"diagnostic buffer post timeout"
1312 	},
1313 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" },
1314 	{ MPI3MR_RESET_REASON_COUNT, "Reset reason count" },
1315 };
1316 
1317 /**
1318  * mpi3mr_reset_rc_name - get reset reason code name
1319  * @reason_code: reset reason code value
1320  *
1321  * Map reset reason to an NULL terminated ASCII string
1322  *
1323  * Return: Name corresponding to reset reason value or NULL.
1324  */
1325 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1326 {
1327 	int i;
1328 	char *name = NULL;
1329 
1330 	for (i = 0; i < MPI3MR_RESET_REASON_COUNT; i++) {
1331 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1332 			name = mpi3mr_reset_reason_codes[i].name;
1333 			break;
1334 		}
1335 	}
1336 	return name;
1337 }
1338 
1339 #define MAX_RESET_TYPE 3
1340 /* Reset type to name mapper structure*/
1341 static const struct {
1342 	U16 reset_type;
1343 	char *name;
1344 } mpi3mr_reset_types[] = {
1345 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1346 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1347 	{ MAX_RESET_TYPE, "count"}
1348 };
1349 
1350 /**
1351  * mpi3mr_reset_type_name - get reset type name
1352  * @reset_type: reset type value
1353  *
1354  * Map reset type to an NULL terminated ASCII string
1355  *
1356  * Return: Name corresponding to reset type value or NULL.
1357  */
1358 static const char *mpi3mr_reset_type_name(U16 reset_type)
1359 {
1360 	int i;
1361 	char *name = NULL;
1362 
1363 	for (i = 0; i < MAX_RESET_TYPE; i++) {
1364 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1365 			name = mpi3mr_reset_types[i].name;
1366 			break;
1367 		}
1368 	}
1369 	return name;
1370 }
1371 
1372 /**
1373  * mpi3mr_soft_reset_success - Check softreset is success or not
1374  * @ioc_status: IOC status register value
1375  * @ioc_config: IOC config register value
1376  *
1377  * Check whether the soft reset is successful or not based on
1378  * IOC status and IOC config register values.
1379  *
1380  * Return: True when the soft reset is success, false otherwise.
1381  */
1382 static inline bool
1383 mpi3mr_soft_reset_success(U32 ioc_status, U32 ioc_config)
1384 {
1385 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1386 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1387 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1388 		return true;
1389 	return false;
1390 }
1391 
1392 /**
1393  * mpi3mr_diagfault_success - Check diag fault is success or not
1394  * @sc: Adapter reference
1395  * @ioc_status: IOC status register value
1396  *
1397  * Check whether the controller hit diag reset fault code.
1398  *
1399  * Return: True when there is diag fault, false otherwise.
1400  */
1401 static inline bool mpi3mr_diagfault_success(struct mpi3mr_softc *sc,
1402 	U32 ioc_status)
1403 {
1404 	U32 fault;
1405 
1406 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1407 		return false;
1408 	fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) & MPI3_SYSIF_FAULT_CODE_MASK;
1409 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
1410 		return true;
1411 	return false;
1412 }
1413 
1414 /**
1415  * mpi3mr_issue_iocfacts - Send IOC Facts
1416  * @sc: Adapter instance reference
1417  * @facts_data: Cached IOC facts data
1418  *
1419  * Issue IOC Facts MPI request through admin queue and wait for
1420  * the completion of it or time out.
1421  *
1422  * Return: 0 on success, non-zero on failures.
1423  */
1424 static int mpi3mr_issue_iocfacts(struct mpi3mr_softc *sc,
1425     Mpi3IOCFactsData_t *facts_data)
1426 {
1427 	Mpi3IOCFactsRequest_t iocfacts_req;
1428 	bus_dma_tag_t data_tag = NULL;
1429 	bus_dmamap_t data_map = NULL;
1430 	bus_addr_t data_phys = 0;
1431 	void *data = NULL;
1432 	U32 data_len = sizeof(*facts_data);
1433 	int retval = 0;
1434 
1435 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1436                 	MPI3_SGE_FLAGS_DLAS_SYSTEM |
1437 			MPI3_SGE_FLAGS_END_OF_LIST);
1438 
1439 
1440         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1441 				4, 0,			/* algnmnt, boundary */
1442 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1443 				BUS_SPACE_MAXADDR,	/* highaddr */
1444 				NULL, NULL,		/* filter, filterarg */
1445                                 data_len,		/* maxsize */
1446                                 1,			/* nsegments */
1447                                 data_len,		/* maxsegsize */
1448                                 0,			/* flags */
1449                                 NULL, NULL,		/* lockfunc, lockarg */
1450                                 &data_tag)) {
1451 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1452 		return (ENOMEM);
1453         }
1454 
1455         if (bus_dmamem_alloc(data_tag, (void **)&data,
1456 	    BUS_DMA_NOWAIT, &data_map)) {
1457 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
1458 			__func__, __LINE__);
1459 		return (ENOMEM);
1460         }
1461 
1462         bzero(data, data_len);
1463         bus_dmamap_load(data_tag, data_map, data, data_len,
1464 	    mpi3mr_memaddr_cb, &data_phys, 0);
1465 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts data phys addr= %#016jx size= %d\n",
1466 	    __func__, __LINE__, (uintmax_t)data_phys, data_len);
1467 
1468 	if (!data)
1469 	{
1470 		retval = -1;
1471 		printf(IOCNAME "Memory alloc for IOCFactsData: failed\n",
1472 		    sc->name);
1473 		goto out;
1474 	}
1475 
1476 	mtx_lock(&sc->init_cmds.completion.lock);
1477 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
1478 
1479 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1480 		retval = -1;
1481 		printf(IOCNAME "Issue IOCFacts: Init command is in use\n",
1482 		    sc->name);
1483 		mtx_unlock(&sc->init_cmds.completion.lock);
1484 		goto out;
1485 	}
1486 
1487 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
1488 	sc->init_cmds.is_waiting = 1;
1489 	sc->init_cmds.callback = NULL;
1490 	iocfacts_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
1491 	iocfacts_req.Function = MPI3_FUNCTION_IOC_FACTS;
1492 
1493 	mpi3mr_add_sg_single(&iocfacts_req.SGL, sgl_flags, data_len,
1494 	    data_phys);
1495 
1496 	init_completion(&sc->init_cmds.completion);
1497 
1498 	retval = mpi3mr_submit_admin_cmd(sc, &iocfacts_req,
1499 	    sizeof(iocfacts_req));
1500 
1501 	if (retval) {
1502 		printf(IOCNAME "Issue IOCFacts: Admin Post failed\n",
1503 		    sc->name);
1504 		goto out_unlock;
1505 	}
1506 
1507 	wait_for_completion_timeout(&sc->init_cmds.completion,
1508 	    (MPI3MR_INTADMCMD_TIMEOUT));
1509 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1510 		printf(IOCNAME "Issue IOCFacts: command timed out\n",
1511 		    sc->name);
1512 		mpi3mr_check_rh_fault_ioc(sc,
1513 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
1514 		sc->unrecoverable = 1;
1515 		retval = -1;
1516 		goto out_unlock;
1517 	}
1518 
1519 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1520 	     != MPI3_IOCSTATUS_SUCCESS ) {
1521 		printf(IOCNAME "Issue IOCFacts: Failed IOCStatus(0x%04x) "
1522 		    " Loginfo(0x%08x) \n" , sc->name,
1523 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1524 		    sc->init_cmds.ioc_loginfo);
1525 		retval = -1;
1526 		goto out_unlock;
1527 	}
1528 
1529 	memcpy(facts_data, (U8 *)data, data_len);
1530 out_unlock:
1531 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1532 	mtx_unlock(&sc->init_cmds.completion.lock);
1533 
1534 out:
1535 	if (data_phys != 0)
1536 		bus_dmamap_unload(data_tag, data_map);
1537 	if (data != NULL)
1538 		bus_dmamem_free(data_tag, data, data_map);
1539 	if (data_tag != NULL)
1540 		bus_dma_tag_destroy(data_tag);
1541 	return retval;
1542 }
1543 
1544 /**
1545  * mpi3mr_process_factsdata - Process IOC facts data
1546  * @sc: Adapter instance reference
1547  * @facts_data: Cached IOC facts data
1548  *
1549  * Convert IOC facts data into cpu endianness and cache it in
1550  * the driver .
1551  *
1552  * Return: Nothing.
1553  */
1554 static int mpi3mr_process_factsdata(struct mpi3mr_softc *sc,
1555     Mpi3IOCFactsData_t *facts_data)
1556 {
1557 	int retval = 0;
1558 	U32 ioc_config, req_sz, facts_flags;
1559 
1560 	if (le16toh(facts_data->IOCFactsDataLength) !=
1561 	    (sizeof(*facts_data) / 4)) {
1562 		mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data length mismatch "
1563 		    " driver_sz(%ld) firmware_sz(%d) \n",
1564 		    sizeof(*facts_data),
1565 		    facts_data->IOCFactsDataLength);
1566 	}
1567 
1568 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1569         req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
1570                   MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
1571 
1572 	if (facts_data->IOCRequestFrameSize != (req_sz/4)) {
1573 		 mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data reqFrameSize mismatch "
1574 		    " hw_size(%d) firmware_sz(%d) \n" , req_sz/4,
1575 		    facts_data->IOCRequestFrameSize);
1576 	}
1577 
1578 	memset(&sc->facts, 0, sizeof(sc->facts));
1579 
1580 	facts_flags = le32toh(facts_data->Flags);
1581 	sc->facts.op_req_sz = req_sz;
1582 	sc->op_reply_sz = 1 << ((ioc_config &
1583                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
1584                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
1585 
1586 	sc->facts.ioc_num = facts_data->IOCNumber;
1587         sc->facts.who_init = facts_data->WhoInit;
1588         sc->facts.max_msix_vectors = facts_data->MaxMSIxVectors;
1589 	sc->facts.personality = (facts_flags &
1590 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
1591 	sc->facts.dma_mask = (facts_flags &
1592 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
1593 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
1594         sc->facts.protocol_flags = facts_data->ProtocolFlags;
1595         sc->facts.mpi_version = (facts_data->MPIVersion.Word);
1596         sc->facts.max_reqs = (facts_data->MaxOutstandingRequests);
1597         sc->facts.product_id = (facts_data->ProductID);
1598 	sc->facts.reply_sz = (facts_data->ReplyFrameSize) * 4;
1599         sc->facts.exceptions = (facts_data->IOCExceptions);
1600         sc->facts.max_perids = (facts_data->MaxPersistentID);
1601         sc->facts.max_vds = (facts_data->MaxVDs);
1602         sc->facts.max_hpds = (facts_data->MaxHostPDs);
1603         sc->facts.max_advhpds = (facts_data->MaxAdvHostPDs);
1604         sc->facts.max_raidpds = (facts_data->MaxRAIDPDs);
1605         sc->facts.max_nvme = (facts_data->MaxNVMe);
1606         sc->facts.max_pcieswitches =
1607                 (facts_data->MaxPCIeSwitches);
1608         sc->facts.max_sasexpanders =
1609                 (facts_data->MaxSASExpanders);
1610         sc->facts.max_sasinitiators =
1611                 (facts_data->MaxSASInitiators);
1612         sc->facts.max_enclosures = (facts_data->MaxEnclosures);
1613         sc->facts.min_devhandle = (facts_data->MinDevHandle);
1614         sc->facts.max_devhandle = (facts_data->MaxDevHandle);
1615 	sc->facts.max_op_req_q =
1616                 (facts_data->MaxOperationalRequestQueues);
1617 	sc->facts.max_op_reply_q =
1618                 (facts_data->MaxOperationalReplyQueues);
1619         sc->facts.ioc_capabilities =
1620                 (facts_data->IOCCapabilities);
1621         sc->facts.fw_ver.build_num =
1622                 (facts_data->FWVersion.BuildNum);
1623         sc->facts.fw_ver.cust_id =
1624                 (facts_data->FWVersion.CustomerID);
1625         sc->facts.fw_ver.ph_minor = facts_data->FWVersion.PhaseMinor;
1626         sc->facts.fw_ver.ph_major = facts_data->FWVersion.PhaseMajor;
1627         sc->facts.fw_ver.gen_minor = facts_data->FWVersion.GenMinor;
1628         sc->facts.fw_ver.gen_major = facts_data->FWVersion.GenMajor;
1629         sc->max_msix_vectors = min(sc->max_msix_vectors,
1630             sc->facts.max_msix_vectors);
1631         sc->facts.sge_mod_mask = facts_data->SGEModifierMask;
1632         sc->facts.sge_mod_value = facts_data->SGEModifierValue;
1633         sc->facts.sge_mod_shift = facts_data->SGEModifierShift;
1634         sc->facts.shutdown_timeout =
1635                 (facts_data->ShutdownTimeout);
1636 	sc->facts.max_dev_per_tg = facts_data->MaxDevicesPerThrottleGroup;
1637 	sc->facts.io_throttle_data_length =
1638 	    facts_data->IOThrottleDataLength;
1639 	sc->facts.max_io_throttle_group =
1640 	    facts_data->MaxIOThrottleGroup;
1641 	sc->facts.io_throttle_low = facts_data->IOThrottleLow;
1642 	sc->facts.io_throttle_high = facts_data->IOThrottleHigh;
1643 
1644 	/*Store in 512b block count*/
1645 	if (sc->facts.io_throttle_data_length)
1646 		sc->io_throttle_data_length =
1647 		    (sc->facts.io_throttle_data_length * 2 * 4);
1648 	else
1649 		/* set the length to 1MB + 1K to disable throttle*/
1650 		sc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
1651 
1652 	sc->io_throttle_high = (sc->facts.io_throttle_high * 2 * 1024);
1653 	sc->io_throttle_low = (sc->facts.io_throttle_low * 2 * 1024);
1654 
1655 	mpi3mr_dprint(sc, MPI3MR_INFO, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),"
1656             "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
1657 	    sc->facts.ioc_num, sc->facts.max_op_req_q,
1658 	    sc->facts.max_op_reply_q, sc->facts.max_devhandle,
1659             sc->facts.max_reqs, sc->facts.min_devhandle,
1660             sc->facts.max_pds, sc->facts.max_msix_vectors,
1661             sc->facts.max_perids);
1662         mpi3mr_dprint(sc, MPI3MR_INFO, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x\n",
1663             sc->facts.sge_mod_mask, sc->facts.sge_mod_value,
1664             sc->facts.sge_mod_shift);
1665 	mpi3mr_dprint(sc, MPI3MR_INFO,
1666 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d), io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
1667 	    sc->facts.max_dev_per_tg, sc->facts.max_io_throttle_group,
1668 	    sc->facts.io_throttle_data_length * 4,
1669 	    sc->facts.io_throttle_high, sc->facts.io_throttle_low);
1670 
1671 	sc->max_host_ios = sc->facts.max_reqs -
1672 	    (MPI3MR_INTERNALCMDS_RESVD + 1);
1673 
1674 	return retval;
1675 }
1676 
1677 static inline void mpi3mr_setup_reply_free_queues(struct mpi3mr_softc *sc)
1678 {
1679 	int i;
1680 	bus_addr_t phys_addr;
1681 
1682 	/* initialize Reply buffer Queue */
1683 	for (i = 0, phys_addr = sc->reply_buf_phys;
1684 	    i < sc->num_reply_bufs; i++, phys_addr += sc->reply_sz)
1685 		sc->reply_free_q[i] = phys_addr;
1686 	sc->reply_free_q[i] = (0);
1687 
1688 	/* initialize Sense Buffer Queue */
1689 	for (i = 0, phys_addr = sc->sense_buf_phys;
1690 	    i < sc->num_sense_bufs; i++, phys_addr += MPI3MR_SENSEBUF_SZ)
1691 		sc->sense_buf_q[i] = phys_addr;
1692 	sc->sense_buf_q[i] = (0);
1693 
1694 }
1695 
1696 static int mpi3mr_reply_dma_alloc(struct mpi3mr_softc *sc)
1697 {
1698 	U32 sz;
1699 
1700 	sc->num_reply_bufs = sc->facts.max_reqs + MPI3MR_NUM_EVTREPLIES;
1701 	sc->reply_free_q_sz = sc->num_reply_bufs + 1;
1702 	sc->num_sense_bufs = sc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
1703 	sc->sense_buf_q_sz = sc->num_sense_bufs + 1;
1704 
1705 	sz = sc->num_reply_bufs * sc->reply_sz;
1706 
1707 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1708 				16, 0,			/* algnmnt, boundary */
1709 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1710 				BUS_SPACE_MAXADDR,	/* highaddr */
1711 				NULL, NULL,		/* filter, filterarg */
1712                                 sz,			/* maxsize */
1713                                 1,			/* nsegments */
1714                                 sz,			/* maxsegsize */
1715                                 0,			/* flags */
1716                                 NULL, NULL,		/* lockfunc, lockarg */
1717                                 &sc->reply_buf_tag)) {
1718 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1719 		return (ENOMEM);
1720         }
1721 
1722 	if (bus_dmamem_alloc(sc->reply_buf_tag, (void **)&sc->reply_buf,
1723 	    BUS_DMA_NOWAIT, &sc->reply_buf_dmamap)) {
1724 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1725 			__func__, __LINE__);
1726 		return (ENOMEM);
1727         }
1728 
1729 	bzero(sc->reply_buf, sz);
1730         bus_dmamap_load(sc->reply_buf_tag, sc->reply_buf_dmamap, sc->reply_buf, sz,
1731 	    mpi3mr_memaddr_cb, &sc->reply_buf_phys, 0);
1732 
1733 	sc->reply_buf_dma_min_address = sc->reply_buf_phys;
1734 	sc->reply_buf_dma_max_address = sc->reply_buf_phys + sz;
1735 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply buf (0x%p): depth(%d), frame_size(%d), "
1736 	    "pool_size(%d kB), reply_buf_dma(0x%llx)\n",
1737 	    sc->reply_buf, sc->num_reply_bufs, sc->reply_sz,
1738 	    (sz / 1024), (unsigned long long)sc->reply_buf_phys);
1739 
1740 	/* reply free queue, 8 byte align */
1741 	sz = sc->reply_free_q_sz * 8;
1742 
1743         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1744 				8, 0,			/* algnmnt, boundary */
1745 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1746 				BUS_SPACE_MAXADDR,	/* highaddr */
1747 				NULL, NULL,		/* filter, filterarg */
1748                                 sz,			/* maxsize */
1749                                 1,			/* nsegments */
1750                                 sz,			/* maxsegsize */
1751                                 0,			/* flags */
1752                                 NULL, NULL,		/* lockfunc, lockarg */
1753                                 &sc->reply_free_q_tag)) {
1754 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply free queue DMA tag\n");
1755 		return (ENOMEM);
1756         }
1757 
1758         if (bus_dmamem_alloc(sc->reply_free_q_tag, (void **)&sc->reply_free_q,
1759 	    BUS_DMA_NOWAIT, &sc->reply_free_q_dmamap)) {
1760 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1761 			__func__, __LINE__);
1762 		return (ENOMEM);
1763         }
1764 
1765 	bzero(sc->reply_free_q, sz);
1766         bus_dmamap_load(sc->reply_free_q_tag, sc->reply_free_q_dmamap, sc->reply_free_q, sz,
1767 	    mpi3mr_memaddr_cb, &sc->reply_free_q_phys, 0);
1768 
1769 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply_free_q (0x%p): depth(%d), frame_size(%d), "
1770 	    "pool_size(%d kB), reply_free_q_dma(0x%llx)\n",
1771 	    sc->reply_free_q, sc->reply_free_q_sz, 8, (sz / 1024),
1772 	    (unsigned long long)sc->reply_free_q_phys);
1773 
1774 	/* sense buffer pool,  4 byte align */
1775 	sz = sc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
1776 
1777         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1778 				4, 0,			/* algnmnt, boundary */
1779 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1780 				BUS_SPACE_MAXADDR,	/* highaddr */
1781 				NULL, NULL,		/* filter, filterarg */
1782                                 sz,			/* maxsize */
1783                                 1,			/* nsegments */
1784                                 sz,			/* maxsegsize */
1785                                 0,			/* flags */
1786                                 NULL, NULL,		/* lockfunc, lockarg */
1787                                 &sc->sense_buf_tag)) {
1788 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer DMA tag\n");
1789 		return (ENOMEM);
1790         }
1791 
1792 	if (bus_dmamem_alloc(sc->sense_buf_tag, (void **)&sc->sense_buf,
1793 	    BUS_DMA_NOWAIT, &sc->sense_buf_dmamap)) {
1794 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1795 			__func__, __LINE__);
1796 		return (ENOMEM);
1797         }
1798 
1799 	bzero(sc->sense_buf, sz);
1800         bus_dmamap_load(sc->sense_buf_tag, sc->sense_buf_dmamap, sc->sense_buf, sz,
1801 	    mpi3mr_memaddr_cb, &sc->sense_buf_phys, 0);
1802 
1803 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf (0x%p): depth(%d), frame_size(%d), "
1804 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1805 	    sc->sense_buf, sc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
1806 	    (sz / 1024), (unsigned long long)sc->sense_buf_phys);
1807 
1808 	/* sense buffer queue, 8 byte align */
1809 	sz = sc->sense_buf_q_sz * 8;
1810 
1811         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1812 				8, 0,			/* algnmnt, boundary */
1813 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1814 				BUS_SPACE_MAXADDR,	/* highaddr */
1815 				NULL, NULL,		/* filter, filterarg */
1816                                 sz,			/* maxsize */
1817                                 1,			/* nsegments */
1818                                 sz,			/* maxsegsize */
1819                                 0,			/* flags */
1820                                 NULL, NULL,		/* lockfunc, lockarg */
1821                                 &sc->sense_buf_q_tag)) {
1822 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer Queue DMA tag\n");
1823 		return (ENOMEM);
1824         }
1825 
1826 	if (bus_dmamem_alloc(sc->sense_buf_q_tag, (void **)&sc->sense_buf_q,
1827 	    BUS_DMA_NOWAIT, &sc->sense_buf_q_dmamap)) {
1828 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1829 			__func__, __LINE__);
1830 		return (ENOMEM);
1831         }
1832 
1833 	bzero(sc->sense_buf_q, sz);
1834         bus_dmamap_load(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap, sc->sense_buf_q, sz,
1835 	    mpi3mr_memaddr_cb, &sc->sense_buf_q_phys, 0);
1836 
1837 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf_q (0x%p): depth(%d), frame_size(%d), "
1838 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1839 	    sc->sense_buf_q, sc->sense_buf_q_sz, 8, (sz / 1024),
1840 	    (unsigned long long)sc->sense_buf_q_phys);
1841 
1842 	return 0;
1843 }
1844 
1845 static int mpi3mr_reply_alloc(struct mpi3mr_softc *sc)
1846 {
1847 	int retval = 0;
1848 	U32 i;
1849 
1850 	if (sc->init_cmds.reply)
1851 		goto post_reply_sbuf;
1852 
1853 	sc->init_cmds.reply = malloc(sc->reply_sz,
1854 		M_MPI3MR, M_NOWAIT | M_ZERO);
1855 
1856 	if (!sc->init_cmds.reply) {
1857 		printf(IOCNAME "Cannot allocate memory for init_cmds.reply\n",
1858 		    sc->name);
1859 		goto out_failed;
1860 	}
1861 
1862 	sc->ioctl_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1863 	if (!sc->ioctl_cmds.reply) {
1864 		printf(IOCNAME "Cannot allocate memory for ioctl_cmds.reply\n",
1865 		    sc->name);
1866 		goto out_failed;
1867 	}
1868 
1869 	sc->host_tm_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1870 	if (!sc->host_tm_cmds.reply) {
1871 		printf(IOCNAME "Cannot allocate memory for host_tm.reply\n",
1872 		    sc->name);
1873 		goto out_failed;
1874 	}
1875 	for (i=0; i<MPI3MR_NUM_DEVRMCMD; i++) {
1876 		sc->dev_rmhs_cmds[i].reply = malloc(sc->reply_sz,
1877 		    M_MPI3MR, M_NOWAIT | M_ZERO);
1878 		if (!sc->dev_rmhs_cmds[i].reply) {
1879 			printf(IOCNAME "Cannot allocate memory for"
1880 			    " dev_rmhs_cmd[%d].reply\n",
1881 			    sc->name, i);
1882 			goto out_failed;
1883 		}
1884 	}
1885 
1886 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
1887 		sc->evtack_cmds[i].reply = malloc(sc->reply_sz,
1888 			M_MPI3MR, M_NOWAIT | M_ZERO);
1889 		if (!sc->evtack_cmds[i].reply)
1890 			goto out_failed;
1891 	}
1892 
1893 	sc->dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
1894 
1895 	sc->removepend_bitmap = malloc(sc->dev_handle_bitmap_sz,
1896 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1897 	if (!sc->removepend_bitmap) {
1898 		printf(IOCNAME "Cannot alloc memory for remove pend bitmap\n",
1899 		    sc->name);
1900 		goto out_failed;
1901 	}
1902 
1903 	sc->devrem_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_DEVRMCMD, 8);
1904 	sc->devrem_bitmap = malloc(sc->devrem_bitmap_sz,
1905 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1906 	if (!sc->devrem_bitmap) {
1907 		printf(IOCNAME "Cannot alloc memory for dev remove bitmap\n",
1908 		    sc->name);
1909 		goto out_failed;
1910 	}
1911 
1912 	sc->evtack_cmds_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_EVTACKCMD, 8);
1913 
1914 	sc->evtack_cmds_bitmap = malloc(sc->evtack_cmds_bitmap_sz,
1915 		M_MPI3MR, M_NOWAIT | M_ZERO);
1916 	if (!sc->evtack_cmds_bitmap)
1917 		goto out_failed;
1918 
1919 	if (mpi3mr_reply_dma_alloc(sc)) {
1920 		printf(IOCNAME "func:%s line:%d DMA memory allocation failed\n",
1921 		    sc->name, __func__, __LINE__);
1922 		goto out_failed;
1923 	}
1924 
1925 post_reply_sbuf:
1926 	mpi3mr_setup_reply_free_queues(sc);
1927 	return retval;
1928 out_failed:
1929 	mpi3mr_cleanup_interrupts(sc);
1930 	mpi3mr_free_mem(sc);
1931 	retval = -1;
1932 	return retval;
1933 }
1934 
1935 static void
1936 mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc *sc)
1937 {
1938 	int retval = 0;
1939 	void *fw_pkg_ver = NULL;
1940 	bus_dma_tag_t fw_pkg_ver_tag;
1941 	bus_dmamap_t fw_pkg_ver_map;
1942 	bus_addr_t fw_pkg_ver_dma;
1943 	Mpi3CIUploadRequest_t ci_upload;
1944 	Mpi3ComponentImageHeader_t *ci_header;
1945 	U32 fw_pkg_ver_len = sizeof(*ci_header);
1946 	U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
1947 
1948 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1949 				4, 0,			/* algnmnt, boundary */
1950 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1951 				BUS_SPACE_MAXADDR,	/* highaddr */
1952 				NULL, NULL,		/* filter, filterarg */
1953 				fw_pkg_ver_len,		/* maxsize */
1954 				1,			/* nsegments */
1955 				fw_pkg_ver_len,		/* maxsegsize */
1956 				0,			/* flags */
1957 				NULL, NULL,		/* lockfunc, lockarg */
1958 				&fw_pkg_ver_tag)) {
1959 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate fw package version request DMA tag\n");
1960 		return;
1961 	}
1962 
1963 	if (bus_dmamem_alloc(fw_pkg_ver_tag, (void **)&fw_pkg_ver, BUS_DMA_NOWAIT, &fw_pkg_ver_map)) {
1964 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d fw package version DMA mem alloc failed\n",
1965 			      __func__, __LINE__);
1966 		return;
1967 	}
1968 
1969 	bzero(fw_pkg_ver, fw_pkg_ver_len);
1970 
1971 	bus_dmamap_load(fw_pkg_ver_tag, fw_pkg_ver_map, fw_pkg_ver, fw_pkg_ver_len, mpi3mr_memaddr_cb, &fw_pkg_ver_dma, 0);
1972 
1973 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d fw package version phys addr= %#016jx size= %d\n",
1974 		      __func__, __LINE__, (uintmax_t)fw_pkg_ver_dma, fw_pkg_ver_len);
1975 
1976 	if (!fw_pkg_ver) {
1977 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Memory alloc for fw package version failed\n");
1978 		goto out;
1979 	}
1980 
1981 	memset(&ci_upload, 0, sizeof(ci_upload));
1982 	mtx_lock(&sc->init_cmds.completion.lock);
1983 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1984 		mpi3mr_dprint(sc, MPI3MR_INFO,"Issue CI Header Upload: command is in use\n");
1985 		mtx_unlock(&sc->init_cmds.completion.lock);
1986 		goto out;
1987 	}
1988 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
1989 	sc->init_cmds.is_waiting = 1;
1990 	sc->init_cmds.callback = NULL;
1991 	ci_upload.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
1992 	ci_upload.Function = MPI3_FUNCTION_CI_UPLOAD;
1993 	ci_upload.MsgFlags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
1994 	ci_upload.ImageOffset = MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET;
1995 	ci_upload.SegmentSize = MPI3_IMAGE_HEADER_SIZE;
1996 
1997 	mpi3mr_add_sg_single(&ci_upload.SGL, sgl_flags, fw_pkg_ver_len,
1998 	    fw_pkg_ver_dma);
1999 
2000 	init_completion(&sc->init_cmds.completion);
2001 	if ((retval = mpi3mr_submit_admin_cmd(sc, &ci_upload, sizeof(ci_upload)))) {
2002 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: Admin Post failed\n");
2003 		goto out_unlock;
2004 	}
2005 	wait_for_completion_timeout(&sc->init_cmds.completion,
2006 		(MPI3MR_INTADMCMD_TIMEOUT));
2007 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2008 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: command timed out\n");
2009 		sc->init_cmds.is_waiting = 0;
2010 		if (!(sc->init_cmds.state & MPI3MR_CMD_RESET))
2011 			mpi3mr_check_rh_fault_ioc(sc,
2012 				MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2013 		goto out_unlock;
2014 	}
2015 	if ((GET_IOC_STATUS(sc->init_cmds.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) {
2016 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2017 			      "Issue CI Header Upload: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n",
2018 			      GET_IOC_STATUS(sc->init_cmds.ioc_status), sc->init_cmds.ioc_loginfo);
2019 		goto out_unlock;
2020 	}
2021 
2022 	ci_header = (Mpi3ComponentImageHeader_t *) fw_pkg_ver;
2023 	mpi3mr_dprint(sc, MPI3MR_XINFO,
2024 		      "Issue CI Header Upload:EnvVariableOffset(0x%x) \
2025 		      HeaderSize(0x%x) Signature1(0x%x)\n",
2026 		      ci_header->EnvironmentVariableOffset,
2027 		      ci_header->HeaderSize,
2028 		      ci_header->Signature1);
2029 	mpi3mr_dprint(sc, MPI3MR_INFO, "FW Package Version: %02d.%02d.%02d.%02d\n",
2030 		      ci_header->ComponentImageVersion.GenMajor,
2031 		      ci_header->ComponentImageVersion.GenMinor,
2032 		      ci_header->ComponentImageVersion.PhaseMajor,
2033 		      ci_header->ComponentImageVersion.PhaseMinor);
2034 out_unlock:
2035 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2036 	mtx_unlock(&sc->init_cmds.completion.lock);
2037 
2038 out:
2039 	if (fw_pkg_ver_dma != 0)
2040 		bus_dmamap_unload(fw_pkg_ver_tag, fw_pkg_ver_map);
2041 	if (fw_pkg_ver)
2042 		bus_dmamem_free(fw_pkg_ver_tag, fw_pkg_ver, fw_pkg_ver_map);
2043 	if (fw_pkg_ver_tag)
2044 		bus_dma_tag_destroy(fw_pkg_ver_tag);
2045 
2046 }
2047 
2048 /**
2049  * mpi3mr_issue_iocinit - Send IOC Init
2050  * @sc: Adapter instance reference
2051  *
2052  * Issue IOC Init MPI request through admin queue and wait for
2053  * the completion of it or time out.
2054  *
2055  * Return: 0 on success, non-zero on failures.
2056  */
2057 static int mpi3mr_issue_iocinit(struct mpi3mr_softc *sc)
2058 {
2059 	Mpi3IOCInitRequest_t iocinit_req;
2060 	Mpi3DriverInfoLayout_t *drvr_info = NULL;
2061 	bus_dma_tag_t drvr_info_tag;
2062 	bus_dmamap_t drvr_info_map;
2063 	bus_addr_t drvr_info_phys;
2064 	U32 drvr_info_len = sizeof(*drvr_info);
2065 	int retval = 0;
2066 	struct timeval now;
2067 	uint64_t time_in_msec;
2068 
2069 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2070 				4, 0,			/* algnmnt, boundary */
2071 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2072 				BUS_SPACE_MAXADDR,	/* highaddr */
2073 				NULL, NULL,		/* filter, filterarg */
2074                                 drvr_info_len,		/* maxsize */
2075                                 1,			/* nsegments */
2076                                 drvr_info_len,		/* maxsegsize */
2077                                 0,			/* flags */
2078                                 NULL, NULL,		/* lockfunc, lockarg */
2079                                 &drvr_info_tag)) {
2080 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
2081 		return (ENOMEM);
2082         }
2083 
2084 	if (bus_dmamem_alloc(drvr_info_tag, (void **)&drvr_info,
2085 	    BUS_DMA_NOWAIT, &drvr_info_map)) {
2086 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
2087 			__func__, __LINE__);
2088 		return (ENOMEM);
2089         }
2090 
2091 	bzero(drvr_info, drvr_info_len);
2092         bus_dmamap_load(drvr_info_tag, drvr_info_map, drvr_info, drvr_info_len,
2093 	    mpi3mr_memaddr_cb, &drvr_info_phys, 0);
2094 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts drvr_info phys addr= %#016jx size= %d\n",
2095 	    __func__, __LINE__, (uintmax_t)drvr_info_phys, drvr_info_len);
2096 
2097 	if (!drvr_info)
2098 	{
2099 		retval = -1;
2100 		printf(IOCNAME "Memory alloc for Driver Info failed\n",
2101 		    sc->name);
2102 		goto out;
2103 	}
2104 	drvr_info->InformationLength = (drvr_info_len);
2105 	strcpy(drvr_info->DriverSignature, "Broadcom");
2106 	strcpy(drvr_info->OsName, "FreeBSD");
2107 	strcpy(drvr_info->OsVersion, fmt_os_ver);
2108 	strcpy(drvr_info->DriverName, MPI3MR_DRIVER_NAME);
2109 	strcpy(drvr_info->DriverVersion, MPI3MR_DRIVER_VERSION);
2110 	strcpy(drvr_info->DriverReleaseDate, MPI3MR_DRIVER_RELDATE);
2111 	drvr_info->DriverCapabilities = 0;
2112 	memcpy((U8 *)&sc->driver_info, (U8 *)drvr_info, sizeof(sc->driver_info));
2113 
2114 	memset(&iocinit_req, 0, sizeof(iocinit_req));
2115 	mtx_lock(&sc->init_cmds.completion.lock);
2116 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2117 		retval = -1;
2118 		printf(IOCNAME "Issue IOCInit: Init command is in use\n",
2119 		    sc->name);
2120 		mtx_unlock(&sc->init_cmds.completion.lock);
2121 		goto out;
2122 	}
2123 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2124 	sc->init_cmds.is_waiting = 1;
2125 	sc->init_cmds.callback = NULL;
2126         iocinit_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
2127         iocinit_req.Function = MPI3_FUNCTION_IOC_INIT;
2128         iocinit_req.MPIVersion.Struct.Dev = MPI3_VERSION_DEV;
2129         iocinit_req.MPIVersion.Struct.Unit = MPI3_VERSION_UNIT;
2130         iocinit_req.MPIVersion.Struct.Major = MPI3_VERSION_MAJOR;
2131         iocinit_req.MPIVersion.Struct.Minor = MPI3_VERSION_MINOR;
2132         iocinit_req.WhoInit = MPI3_WHOINIT_HOST_DRIVER;
2133         iocinit_req.ReplyFreeQueueDepth = sc->reply_free_q_sz;
2134         iocinit_req.ReplyFreeQueueAddress =
2135                 sc->reply_free_q_phys;
2136         iocinit_req.SenseBufferLength = MPI3MR_SENSEBUF_SZ;
2137         iocinit_req.SenseBufferFreeQueueDepth =
2138                 sc->sense_buf_q_sz;
2139         iocinit_req.SenseBufferFreeQueueAddress =
2140                 sc->sense_buf_q_phys;
2141         iocinit_req.DriverInformationAddress = drvr_info_phys;
2142 
2143 	getmicrotime(&now);
2144 	time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
2145 	iocinit_req.TimeStamp = htole64(time_in_msec);
2146 
2147 	init_completion(&sc->init_cmds.completion);
2148 	retval = mpi3mr_submit_admin_cmd(sc, &iocinit_req,
2149 	    sizeof(iocinit_req));
2150 
2151 	if (retval) {
2152 		printf(IOCNAME "Issue IOCInit: Admin Post failed\n",
2153 		    sc->name);
2154 		goto out_unlock;
2155 	}
2156 
2157 	wait_for_completion_timeout(&sc->init_cmds.completion,
2158 	    (MPI3MR_INTADMCMD_TIMEOUT));
2159 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2160 		printf(IOCNAME "Issue IOCInit: command timed out\n",
2161 		    sc->name);
2162 		mpi3mr_check_rh_fault_ioc(sc,
2163 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2164 		sc->unrecoverable = 1;
2165 		retval = -1;
2166 		goto out_unlock;
2167 	}
2168 
2169 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2170 	     != MPI3_IOCSTATUS_SUCCESS ) {
2171 		printf(IOCNAME "Issue IOCInit: Failed IOCStatus(0x%04x) "
2172 		    " Loginfo(0x%08x) \n" , sc->name,
2173 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2174 		    sc->init_cmds.ioc_loginfo);
2175 		retval = -1;
2176 		goto out_unlock;
2177 	}
2178 
2179 out_unlock:
2180 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2181 	mtx_unlock(&sc->init_cmds.completion.lock);
2182 
2183 out:
2184 	if (drvr_info_phys != 0)
2185 		bus_dmamap_unload(drvr_info_tag, drvr_info_map);
2186 	if (drvr_info != NULL)
2187 		bus_dmamem_free(drvr_info_tag, drvr_info, drvr_info_map);
2188 	if (drvr_info_tag != NULL)
2189 		bus_dma_tag_destroy(drvr_info_tag);
2190 	return retval;
2191 }
2192 
2193 static void
2194 mpi3mr_display_ioc_info(struct mpi3mr_softc *sc)
2195 {
2196         int i = 0;
2197         char personality[16];
2198         struct mpi3mr_compimg_ver *fwver = &sc->facts.fw_ver;
2199 
2200         switch (sc->facts.personality) {
2201         case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
2202                 strcpy(personality, "Enhanced HBA");
2203                 break;
2204         case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
2205                 strcpy(personality, "RAID");
2206                 break;
2207         default:
2208                 strcpy(personality, "Unknown");
2209                 break;
2210         }
2211 
2212 	mpi3mr_dprint(sc, MPI3MR_INFO, "Current Personality: %s\n", personality);
2213 
2214 	mpi3mr_dprint(sc, MPI3MR_INFO, "FW Version: %d.%d.%d.%d.%05d-%05d\n",
2215 		      fwver->gen_major, fwver->gen_minor, fwver->ph_major,
2216 		      fwver->ph_minor, fwver->cust_id, fwver->build_num);
2217 
2218         mpi3mr_dprint(sc, MPI3MR_INFO, "Protocol=(");
2219 
2220         if (sc->facts.protocol_flags &
2221             MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2222                 printf("Initiator");
2223                 i++;
2224         }
2225 
2226         if (sc->facts.protocol_flags &
2227             MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2228                 printf("%sTarget", i ? "," : "");
2229                 i++;
2230         }
2231 
2232         if (sc->facts.protocol_flags &
2233             MPI3_IOCFACTS_PROTOCOL_NVME) {
2234                 printf("%sNVMe attachment", i ? "," : "");
2235                 i++;
2236         }
2237         i = 0;
2238         printf("), ");
2239         printf("Capabilities=(");
2240 
2241         if (sc->facts.ioc_capabilities &
2242             MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE) {
2243                 printf("RAID");
2244                 i++;
2245         }
2246 
2247         printf(")\n");
2248 }
2249 
2250 /**
2251  * mpi3mr_unmask_events - Unmask events in event mask bitmap
2252  * @sc: Adapter instance reference
2253  * @event: MPI event ID
2254  *
2255  * Un mask the specific event by resetting the event_mask
2256  * bitmap.
2257  *
2258  * Return: None.
2259  */
2260 static void mpi3mr_unmask_events(struct mpi3mr_softc *sc, U16 event)
2261 {
2262 	U32 desired_event;
2263 
2264 	if (event >= 128)
2265 		return;
2266 
2267 	desired_event = (1 << (event % 32));
2268 
2269 	if (event < 32)
2270 		sc->event_masks[0] &= ~desired_event;
2271 	else if (event < 64)
2272 		sc->event_masks[1] &= ~desired_event;
2273 	else if (event < 96)
2274 		sc->event_masks[2] &= ~desired_event;
2275 	else if (event < 128)
2276 		sc->event_masks[3] &= ~desired_event;
2277 }
2278 
2279 static void mpi3mr_set_events_mask(struct mpi3mr_softc *sc)
2280 {
2281 	int i;
2282 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2283 		sc->event_masks[i] = -1;
2284 
2285         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_ADDED);
2286         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_INFO_CHANGED);
2287         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
2288 
2289         mpi3mr_unmask_events(sc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
2290 
2291         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
2292         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DISCOVERY);
2293         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
2294         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
2295 
2296         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
2297         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_ENUMERATION);
2298 
2299         mpi3mr_unmask_events(sc, MPI3_EVENT_PREPARE_FOR_RESET);
2300         mpi3mr_unmask_events(sc, MPI3_EVENT_CABLE_MGMT);
2301         mpi3mr_unmask_events(sc, MPI3_EVENT_ENERGY_PACK_CHANGE);
2302 }
2303 
2304 /**
2305  * mpi3mr_issue_event_notification - Send event notification
2306  * @sc: Adapter instance reference
2307  *
2308  * Issue event notification MPI request through admin queue and
2309  * wait for the completion of it or time out.
2310  *
2311  * Return: 0 on success, non-zero on failures.
2312  */
2313 int mpi3mr_issue_event_notification(struct mpi3mr_softc *sc)
2314 {
2315 	Mpi3EventNotificationRequest_t evtnotify_req;
2316 	int retval = 0;
2317 	U8 i;
2318 
2319 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2320 	mtx_lock(&sc->init_cmds.completion.lock);
2321 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2322 		retval = -1;
2323 		printf(IOCNAME "Issue EvtNotify: Init command is in use\n",
2324 		    sc->name);
2325 		mtx_unlock(&sc->init_cmds.completion.lock);
2326 		goto out;
2327 	}
2328 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2329 	sc->init_cmds.is_waiting = 1;
2330 	sc->init_cmds.callback = NULL;
2331 	evtnotify_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
2332 	evtnotify_req.Function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2333 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2334 		evtnotify_req.EventMasks[i] =
2335 		    (sc->event_masks[i]);
2336 	init_completion(&sc->init_cmds.completion);
2337 	retval = mpi3mr_submit_admin_cmd(sc, &evtnotify_req,
2338 	    sizeof(evtnotify_req));
2339 	if (retval) {
2340 		printf(IOCNAME "Issue EvtNotify: Admin Post failed\n",
2341 		    sc->name);
2342 		goto out_unlock;
2343 	}
2344 
2345 	poll_for_command_completion(sc,
2346 				    &sc->init_cmds,
2347 				    (MPI3MR_INTADMCMD_TIMEOUT));
2348 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2349 		printf(IOCNAME "Issue EvtNotify: command timed out\n",
2350 		    sc->name);
2351 		mpi3mr_check_rh_fault_ioc(sc,
2352 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2353 		retval = -1;
2354 		goto out_unlock;
2355 	}
2356 
2357 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2358 	     != MPI3_IOCSTATUS_SUCCESS ) {
2359 		printf(IOCNAME "Issue EvtNotify: Failed IOCStatus(0x%04x) "
2360 		    " Loginfo(0x%08x) \n" , sc->name,
2361 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2362 		    sc->init_cmds.ioc_loginfo);
2363 		retval = -1;
2364 		goto out_unlock;
2365 	}
2366 
2367 out_unlock:
2368 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2369 	mtx_unlock(&sc->init_cmds.completion.lock);
2370 
2371 out:
2372 	return retval;
2373 }
2374 
2375 int
2376 mpi3mr_register_events(struct mpi3mr_softc *sc)
2377 {
2378 	int error;
2379 
2380 	mpi3mr_set_events_mask(sc);
2381 
2382 	error = mpi3mr_issue_event_notification(sc);
2383 
2384 	if (error) {
2385 		printf(IOCNAME "Failed to issue event notification %d\n",
2386 		    sc->name, error);
2387 	}
2388 
2389 	return error;
2390 }
2391 
2392 /**
2393  * mpi3mr_process_event_ack - Process event acknowledgment
2394  * @sc: Adapter instance reference
2395  * @event: MPI3 event ID
2396  * @event_ctx: Event context
2397  *
2398  * Send event acknowledgement through admin queue and wait for
2399  * it to complete.
2400  *
2401  * Return: 0 on success, non-zero on failures.
2402  */
2403 int mpi3mr_process_event_ack(struct mpi3mr_softc *sc, U8 event,
2404 	U32 event_ctx)
2405 {
2406 	Mpi3EventAckRequest_t evtack_req;
2407 	int retval = 0;
2408 
2409 	memset(&evtack_req, 0, sizeof(evtack_req));
2410 	mtx_lock(&sc->init_cmds.completion.lock);
2411 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2412 		retval = -1;
2413 		printf(IOCNAME "Issue EvtAck: Init command is in use\n",
2414 		    sc->name);
2415 		mtx_unlock(&sc->init_cmds.completion.lock);
2416 		goto out;
2417 	}
2418 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2419 	sc->init_cmds.is_waiting = 1;
2420 	sc->init_cmds.callback = NULL;
2421 	evtack_req.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2422 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
2423 	evtack_req.Event = event;
2424 	evtack_req.EventContext = htole32(event_ctx);
2425 
2426 	init_completion(&sc->init_cmds.completion);
2427 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
2428 	    sizeof(evtack_req));
2429 	if (retval) {
2430 		printf(IOCNAME "Issue EvtAck: Admin Post failed\n",
2431 		    sc->name);
2432 		goto out_unlock;
2433 	}
2434 
2435 	wait_for_completion_timeout(&sc->init_cmds.completion,
2436 	    (MPI3MR_INTADMCMD_TIMEOUT));
2437 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2438 		printf(IOCNAME "Issue EvtAck: command timed out\n",
2439 		    sc->name);
2440 		retval = -1;
2441 		goto out_unlock;
2442 	}
2443 
2444 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2445 	     != MPI3_IOCSTATUS_SUCCESS ) {
2446 		printf(IOCNAME "Issue EvtAck: Failed IOCStatus(0x%04x) "
2447 		    " Loginfo(0x%08x) \n" , sc->name,
2448 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2449 		    sc->init_cmds.ioc_loginfo);
2450 		retval = -1;
2451 		goto out_unlock;
2452 	}
2453 
2454 out_unlock:
2455 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2456 	mtx_unlock(&sc->init_cmds.completion.lock);
2457 
2458 out:
2459 	return retval;
2460 }
2461 
2462 
2463 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_softc *sc)
2464 {
2465 	int retval = 0;
2466 	U32 sz, i;
2467 	U16 num_chains;
2468 
2469 	num_chains = sc->max_host_ios;
2470 
2471 	sc->chain_buf_count = num_chains;
2472 	sz = sizeof(struct mpi3mr_chain) * num_chains;
2473 
2474 	sc->chain_sgl_list = malloc(sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2475 
2476 	if (!sc->chain_sgl_list) {
2477 		printf(IOCNAME "Cannot allocate memory for chain SGL list\n",
2478 		    sc->name);
2479 		retval = -1;
2480 		goto out_failed;
2481 	}
2482 
2483 	sz = MPI3MR_CHAINSGE_SIZE;
2484 
2485         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2486 				4096, 0,		/* algnmnt, boundary */
2487 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2488 				BUS_SPACE_MAXADDR,	/* highaddr */
2489 				NULL, NULL,		/* filter, filterarg */
2490                                 sz,			/* maxsize */
2491                                 1,			/* nsegments */
2492                                 sz,			/* maxsegsize */
2493                                 0,			/* flags */
2494                                 NULL, NULL,		/* lockfunc, lockarg */
2495                                 &sc->chain_sgl_list_tag)) {
2496 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Chain buffer DMA tag\n");
2497 		return (ENOMEM);
2498         }
2499 
2500 	for (i = 0; i < num_chains; i++) {
2501 		if (bus_dmamem_alloc(sc->chain_sgl_list_tag, (void **)&sc->chain_sgl_list[i].buf,
2502 		    BUS_DMA_NOWAIT, &sc->chain_sgl_list[i].buf_dmamap)) {
2503 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
2504 				__func__, __LINE__);
2505 			return (ENOMEM);
2506 		}
2507 
2508 		bzero(sc->chain_sgl_list[i].buf, sz);
2509 		bus_dmamap_load(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap, sc->chain_sgl_list[i].buf, sz,
2510 		    mpi3mr_memaddr_cb, &sc->chain_sgl_list[i].buf_phys, 0);
2511 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d phys addr= %#016jx size= %d\n",
2512 		    __func__, __LINE__, (uintmax_t)sc->chain_sgl_list[i].buf_phys, sz);
2513 	}
2514 
2515 	sc->chain_bitmap_sz = MPI3MR_DIV_ROUND_UP(num_chains, 8);
2516 
2517 	sc->chain_bitmap = malloc(sc->chain_bitmap_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2518 	if (!sc->chain_bitmap) {
2519 		mpi3mr_dprint(sc, MPI3MR_INFO, "Cannot alloc memory for chain bitmap\n");
2520 		retval = -1;
2521 		goto out_failed;
2522 	}
2523 	return retval;
2524 
2525 out_failed:
2526 	for (i = 0; i < num_chains; i++) {
2527 		if (sc->chain_sgl_list[i].buf_phys != 0)
2528 			bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
2529 		if (sc->chain_sgl_list[i].buf != NULL)
2530 			bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf, sc->chain_sgl_list[i].buf_dmamap);
2531 	}
2532 	if (sc->chain_sgl_list_tag != NULL)
2533 		bus_dma_tag_destroy(sc->chain_sgl_list_tag);
2534 	return retval;
2535 }
2536 
2537 static int mpi3mr_pel_alloc(struct mpi3mr_softc *sc)
2538 {
2539 	int retval = 0;
2540 
2541 	if (!sc->pel_cmds.reply) {
2542 		sc->pel_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2543 		if (!sc->pel_cmds.reply) {
2544 			printf(IOCNAME "Cannot allocate memory for pel_cmds.reply\n",
2545 			    sc->name);
2546 			goto out_failed;
2547 		}
2548 	}
2549 
2550 	if (!sc->pel_abort_cmd.reply) {
2551 		sc->pel_abort_cmd.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2552 		if (!sc->pel_abort_cmd.reply) {
2553 			printf(IOCNAME "Cannot allocate memory for pel_abort_cmd.reply\n",
2554 			    sc->name);
2555 			goto out_failed;
2556 		}
2557 	}
2558 
2559 	if (!sc->pel_seq_number) {
2560 		sc->pel_seq_number_sz = sizeof(Mpi3PELSeq_t);
2561 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,   /* parent */
2562 				 4, 0,                           /* alignment, boundary */
2563 				 BUS_SPACE_MAXADDR_32BIT,        /* lowaddr */
2564 				 BUS_SPACE_MAXADDR,              /* highaddr */
2565 				 NULL, NULL,                     /* filter, filterarg */
2566 				 sc->pel_seq_number_sz,		 /* maxsize */
2567 				 1,                              /* nsegments */
2568 				 sc->pel_seq_number_sz,          /* maxsegsize */
2569 				 0,                              /* flags */
2570 				 NULL, NULL,                     /* lockfunc, lockarg */
2571 				 &sc->pel_seq_num_dmatag)) {
2572 			 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create PEL seq number dma memory tag\n");
2573 			 retval = -ENOMEM;
2574 			 goto out_failed;
2575 		}
2576 
2577 		if (bus_dmamem_alloc(sc->pel_seq_num_dmatag, (void **)&sc->pel_seq_number,
2578 		    BUS_DMA_NOWAIT, &sc->pel_seq_num_dmamap)) {
2579 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate PEL seq number kernel buffer dma memory\n");
2580 			retval = -ENOMEM;
2581 			goto out_failed;
2582 		}
2583 
2584 		bzero(sc->pel_seq_number, sc->pel_seq_number_sz);
2585 
2586 		bus_dmamap_load(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap, sc->pel_seq_number,
2587 		    sc->pel_seq_number_sz, mpi3mr_memaddr_cb, &sc->pel_seq_number_dma, 0);
2588 
2589 		if (!sc->pel_seq_number) {
2590 			printf(IOCNAME "%s:%d Cannot load PEL seq number dma memory for size: %d\n", sc->name,
2591 				__func__, __LINE__, sc->pel_seq_number_sz);
2592 			retval = -ENOMEM;
2593 			goto out_failed;
2594 		}
2595 	}
2596 
2597 out_failed:
2598 	return retval;
2599 }
2600 
2601 /**
2602  * mpi3mr_validate_fw_update - validate IOCFacts post adapter reset
2603  * @sc: Adapter instance reference
2604  *
2605  * Return zero if the new IOCFacts is compatible with previous values
2606  * else return appropriate error
2607  */
2608 static int
2609 mpi3mr_validate_fw_update(struct mpi3mr_softc *sc)
2610 {
2611 	U16 dev_handle_bitmap_sz;
2612 	U8 *removepend_bitmap;
2613 
2614 	if (sc->facts.reply_sz > sc->reply_sz) {
2615 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2616 		    "Cannot increase reply size from %d to %d\n",
2617 		    sc->reply_sz, sc->reply_sz);
2618 		return -EPERM;
2619 	}
2620 
2621 	if (sc->num_io_throttle_group != sc->facts.max_io_throttle_group) {
2622 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2623 		    "max io throttle group doesn't match old(%d), new(%d)\n",
2624 		    sc->num_io_throttle_group,
2625 		    sc->facts.max_io_throttle_group);
2626 		return -EPERM;
2627 	}
2628 
2629 	if (sc->facts.max_op_reply_q < sc->num_queues) {
2630 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2631 		    "Cannot reduce number of operational reply queues from %d to %d\n",
2632 		    sc->num_queues,
2633 		    sc->facts.max_op_reply_q);
2634 		return -EPERM;
2635 	}
2636 
2637 	if (sc->facts.max_op_req_q < sc->num_queues) {
2638 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2639 		    "Cannot reduce number of operational request queues from %d to %d\n",
2640 		    sc->num_queues, sc->facts.max_op_req_q);
2641 		return -EPERM;
2642 	}
2643 
2644 	dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
2645 
2646 	if (dev_handle_bitmap_sz > sc->dev_handle_bitmap_sz) {
2647 		removepend_bitmap = realloc(sc->removepend_bitmap,
2648 		    dev_handle_bitmap_sz, M_MPI3MR, M_NOWAIT);
2649 
2650 		if (!removepend_bitmap) {
2651 			mpi3mr_dprint(sc, MPI3MR_ERROR,
2652 			    "failed to increase removepend_bitmap sz from: %d to %d\n",
2653 			    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2654 			return -ENOMEM;
2655 		}
2656 
2657 		memset(removepend_bitmap + sc->dev_handle_bitmap_sz, 0,
2658 		    dev_handle_bitmap_sz - sc->dev_handle_bitmap_sz);
2659 		sc->removepend_bitmap = removepend_bitmap;
2660 		mpi3mr_dprint(sc, MPI3MR_INFO,
2661 		    "increased dev_handle_bitmap_sz from %d to %d\n",
2662 		    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2663 		sc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
2664 	}
2665 
2666 	return 0;
2667 }
2668 
2669 /*
2670  * mpi3mr_initialize_ioc - Controller initialization
2671  * @dev: pointer to device struct
2672  *
2673  * This function allocates the controller wide resources and brings
2674  * the controller to operational state
2675  *
2676  * Return: 0 on success and proper error codes on failure
2677  */
2678 int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
2679 {
2680 	int retval = 0;
2681 	enum mpi3mr_iocstate ioc_state;
2682 	U64 ioc_info;
2683 	U32 ioc_status, ioc_control, i, timeout;
2684 	Mpi3IOCFactsData_t facts_data;
2685 	char str[32];
2686 	U32 size;
2687 
2688 	sc->cpu_count = mp_ncpus;
2689 
2690 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
2691 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
2692 	ioc_info = mpi3mr_regread64(sc, MPI3_SYSIF_IOC_INFO_LOW_OFFSET);
2693 
2694 	mpi3mr_dprint(sc, MPI3MR_INFO, "SOD ioc_status: 0x%x ioc_control: 0x%x "
2695 	    "ioc_info: 0x%lx\n", ioc_status, ioc_control, ioc_info);
2696 
2697         /*The timeout value is in 2sec unit, changing it to seconds*/
2698 	sc->ready_timeout =
2699                 ((ioc_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
2700                     MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
2701 
2702 	ioc_state = mpi3mr_get_iocstate(sc);
2703 
2704 	mpi3mr_dprint(sc, MPI3MR_INFO, "IOC state: %s   IOC ready timeout: %d\n",
2705 	    mpi3mr_iocstate_name(ioc_state), sc->ready_timeout);
2706 
2707 	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
2708 	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
2709 		timeout = sc->ready_timeout * 10;
2710 		do {
2711 			DELAY(1000 * 100);
2712 		} while (--timeout);
2713 
2714 		ioc_state = mpi3mr_get_iocstate(sc);
2715 		mpi3mr_dprint(sc, MPI3MR_INFO,
2716 			"IOC in %s state after waiting for reset time\n",
2717 			mpi3mr_iocstate_name(ioc_state));
2718 	}
2719 
2720 	if (ioc_state == MRIOC_STATE_READY) {
2721                 retval = mpi3mr_mur_ioc(sc, MPI3MR_RESET_FROM_BRINGUP);
2722                 if (retval) {
2723                         mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to MU reset IOC, error 0x%x\n",
2724                                 retval);
2725                 }
2726                 ioc_state = mpi3mr_get_iocstate(sc);
2727         }
2728 
2729         if (ioc_state != MRIOC_STATE_RESET) {
2730                 mpi3mr_print_fault_info(sc);
2731 		 mpi3mr_dprint(sc, MPI3MR_ERROR, "issuing soft reset to bring to reset state\n");
2732                  retval = mpi3mr_issue_reset(sc,
2733                      MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
2734                      MPI3MR_RESET_FROM_BRINGUP);
2735                 if (retval) {
2736                         mpi3mr_dprint(sc, MPI3MR_ERROR,
2737                             "%s :Failed to soft reset IOC, error 0x%d\n",
2738                             __func__, retval);
2739                         goto out_failed;
2740                 }
2741         }
2742 
2743 	ioc_state = mpi3mr_get_iocstate(sc);
2744 
2745         if (ioc_state != MRIOC_STATE_RESET) {
2746 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot bring IOC to reset state\n");
2747 		goto out_failed;
2748         }
2749 
2750 	retval = mpi3mr_setup_admin_qpair(sc);
2751 	if (retval) {
2752 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup Admin queues, error 0x%x\n",
2753 		    retval);
2754 		goto out_failed;
2755 	}
2756 
2757 	retval = mpi3mr_bring_ioc_ready(sc);
2758 	if (retval) {
2759 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to bring IOC ready, error 0x%x\n",
2760 		    retval);
2761 		goto out_failed;
2762 	}
2763 
2764 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2765 		retval = mpi3mr_alloc_interrupts(sc, 1);
2766 		if (retval) {
2767 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error 0x%x\n",
2768 			    retval);
2769 			goto out_failed;
2770 		}
2771 
2772 		retval = mpi3mr_setup_irqs(sc);
2773 		if (retval) {
2774 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error 0x%x\n",
2775 			    retval);
2776 			goto out_failed;
2777 		}
2778 	}
2779 
2780 	mpi3mr_enable_interrupts(sc);
2781 
2782 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2783 		mtx_init(&sc->mpi3mr_mtx, "SIM lock", NULL, MTX_DEF);
2784 		mtx_init(&sc->io_lock, "IO lock", NULL, MTX_DEF);
2785 		mtx_init(&sc->admin_req_lock, "Admin Request Queue lock", NULL, MTX_SPIN);
2786 		mtx_init(&sc->reply_free_q_lock, "Reply free Queue lock", NULL, MTX_SPIN);
2787 		mtx_init(&sc->sense_buf_q_lock, "Sense buffer Queue lock", NULL, MTX_SPIN);
2788 		mtx_init(&sc->chain_buf_lock, "Chain buffer lock", NULL, MTX_SPIN);
2789 		mtx_init(&sc->cmd_pool_lock, "Command pool lock", NULL, MTX_DEF);
2790 //		mtx_init(&sc->fwevt_lock, "Firmware Event lock", NULL, MTX_SPIN);
2791 		mtx_init(&sc->fwevt_lock, "Firmware Event lock", NULL, MTX_DEF);
2792 		mtx_init(&sc->target_lock, "Target lock", NULL, MTX_SPIN);
2793 		mtx_init(&sc->reset_mutex, "Reset lock", NULL, MTX_DEF);
2794 
2795 		mtx_init(&sc->init_cmds.completion.lock, "Init commands lock", NULL, MTX_DEF);
2796 		sc->init_cmds.reply = NULL;
2797 		sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2798 		sc->init_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2799 		sc->init_cmds.host_tag = MPI3MR_HOSTTAG_INITCMDS;
2800 
2801 		mtx_init(&sc->ioctl_cmds.completion.lock, "IOCTL commands lock", NULL, MTX_DEF);
2802 		sc->ioctl_cmds.reply = NULL;
2803 		sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
2804 		sc->ioctl_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2805 		sc->ioctl_cmds.host_tag = MPI3MR_HOSTTAG_IOCTLCMDS;
2806 
2807 		mtx_init(&sc->pel_abort_cmd.completion.lock, "PEL Abort command lock", NULL, MTX_DEF);
2808 		sc->pel_abort_cmd.reply = NULL;
2809 		sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
2810 		sc->pel_abort_cmd.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2811 		sc->pel_abort_cmd.host_tag = MPI3MR_HOSTTAG_PELABORT;
2812 
2813 		mtx_init(&sc->host_tm_cmds.completion.lock, "TM commands lock", NULL, MTX_DEF);
2814 		sc->host_tm_cmds.reply = NULL;
2815 		sc->host_tm_cmds.state = MPI3MR_CMD_NOTUSED;
2816 		sc->host_tm_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2817 		sc->host_tm_cmds.host_tag = MPI3MR_HOSTTAG_TMS;
2818 
2819 		TAILQ_INIT(&sc->cmd_list_head);
2820 		TAILQ_INIT(&sc->event_list);
2821 		TAILQ_INIT(&sc->delayed_rmhs_list);
2822 		TAILQ_INIT(&sc->delayed_evtack_cmds_list);
2823 
2824 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2825 			snprintf(str, 32, "Dev REMHS commands lock[%d]", i);
2826 			mtx_init(&sc->dev_rmhs_cmds[i].completion.lock, str, NULL, MTX_DEF);
2827 			sc->dev_rmhs_cmds[i].reply = NULL;
2828 			sc->dev_rmhs_cmds[i].state = MPI3MR_CMD_NOTUSED;
2829 			sc->dev_rmhs_cmds[i].dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2830 			sc->dev_rmhs_cmds[i].host_tag = MPI3MR_HOSTTAG_DEVRMCMD_MIN
2831 							    + i;
2832 		}
2833 	}
2834 
2835 	retval = mpi3mr_issue_iocfacts(sc, &facts_data);
2836 	if (retval) {
2837 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Facts, retval: 0x%x\n",
2838 		    retval);
2839 		goto out_failed;
2840 	}
2841 
2842 	retval = mpi3mr_process_factsdata(sc, &facts_data);
2843 	if (retval) {
2844 		mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC Facts data processing failedi, retval: 0x%x\n",
2845 		    retval);
2846 		goto out_failed;
2847 	}
2848 
2849 	sc->num_io_throttle_group = sc->facts.max_io_throttle_group;
2850 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
2851 
2852 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2853 		retval = mpi3mr_validate_fw_update(sc);
2854 		if (retval)
2855 			goto out_failed;
2856 	} else {
2857 		sc->reply_sz = sc->facts.reply_sz;
2858 	}
2859 
2860 
2861 	mpi3mr_display_ioc_info(sc);
2862 
2863 	retval = mpi3mr_reply_alloc(sc);
2864 	if (retval) {
2865 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated reply and sense buffers, retval: 0x%x\n",
2866 		    retval);
2867 		goto out_failed;
2868 	}
2869 
2870 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2871 		retval = mpi3mr_alloc_chain_bufs(sc);
2872 		if (retval) {
2873 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated chain buffers, retval: 0x%x\n",
2874 			    retval);
2875 			goto out_failed;
2876 		}
2877 	}
2878 
2879 	retval = mpi3mr_issue_iocinit(sc);
2880 	if (retval) {
2881 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Init, retval: 0x%x\n",
2882 		    retval);
2883 		goto out_failed;
2884 	}
2885 
2886 	mpi3mr_print_fw_pkg_ver(sc);
2887 
2888 	sc->reply_free_q_host_index = sc->num_reply_bufs;
2889 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
2890 		sc->reply_free_q_host_index);
2891 
2892 	sc->sense_buf_q_host_index = sc->num_sense_bufs;
2893 
2894 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
2895 		sc->sense_buf_q_host_index);
2896 
2897 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2898 		retval = mpi3mr_alloc_interrupts(sc, 0);
2899 		if (retval) {
2900 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, retval: 0x%x\n",
2901 			    retval);
2902 			goto out_failed;
2903 		}
2904 
2905 		retval = mpi3mr_setup_irqs(sc);
2906 		if (retval) {
2907 			printf(IOCNAME "Failed to setup ISR, error: 0x%x\n",
2908 			    sc->name, retval);
2909 			goto out_failed;
2910 		}
2911 
2912 		mpi3mr_enable_interrupts(sc);
2913 
2914 	} else
2915 		mpi3mr_enable_interrupts(sc);
2916 
2917 	retval = mpi3mr_create_op_queues(sc);
2918 
2919 	if (retval) {
2920 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create operational queues, error: %d\n",
2921 		    retval);
2922 		goto out_failed;
2923 	}
2924 
2925 	if (!sc->throttle_groups && sc->num_io_throttle_group) {
2926 		mpi3mr_dprint(sc, MPI3MR_ERROR, "allocating memory for throttle groups\n");
2927 		size = sizeof(struct mpi3mr_throttle_group_info);
2928 		sc->throttle_groups = (struct mpi3mr_throttle_group_info *)
2929 					  malloc(sc->num_io_throttle_group *
2930 					      size, M_MPI3MR, M_NOWAIT | M_ZERO);
2931 		if (!sc->throttle_groups)
2932 			goto out_failed;
2933 	}
2934 
2935 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2936 		mpi3mr_dprint(sc, MPI3MR_INFO, "Re-register events\n");
2937 		retval = mpi3mr_register_events(sc);
2938 		if (retval) {
2939 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to re-register events, retval: 0x%x\n",
2940 			    retval);
2941 			goto out_failed;
2942 		}
2943 
2944 		mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Port Enable\n");
2945 		retval = mpi3mr_issue_port_enable(sc, 0);
2946 		if (retval) {
2947 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to issue port enable, retval: 0x%x\n",
2948 			    retval);
2949 			goto out_failed;
2950 		}
2951 	}
2952 	retval = mpi3mr_pel_alloc(sc);
2953 	if (retval) {
2954 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate memory for PEL, retval: 0x%x\n",
2955 		    retval);
2956 		goto out_failed;
2957 	}
2958 
2959 	return retval;
2960 
2961 out_failed:
2962 	retval = -1;
2963 	return retval;
2964 }
2965 
2966 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
2967     struct mpi3mr_drvr_cmd *drvrcmd)
2968 {
2969 	drvrcmd->state = MPI3MR_CMD_NOTUSED;
2970 	drvrcmd->callback = NULL;
2971 	printf(IOCNAME "Completing Port Enable Request\n", sc->name);
2972 	sc->mpi3mr_flags |= MPI3MR_FLAGS_PORT_ENABLE_DONE;
2973 	mpi3mr_startup_decrement(sc->cam_sc);
2974 }
2975 
2976 int mpi3mr_issue_port_enable(struct mpi3mr_softc *sc, U8 async)
2977 {
2978 	Mpi3PortEnableRequest_t pe_req;
2979 	int retval = 0;
2980 
2981 	memset(&pe_req, 0, sizeof(pe_req));
2982 	mtx_lock(&sc->init_cmds.completion.lock);
2983 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2984 		retval = -1;
2985 		printf(IOCNAME "Issue PortEnable: Init command is in use\n", sc->name);
2986 		mtx_unlock(&sc->init_cmds.completion.lock);
2987 		goto out;
2988 	}
2989 
2990 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2991 
2992 	if (async) {
2993 		sc->init_cmds.is_waiting = 0;
2994 		sc->init_cmds.callback = mpi3mr_port_enable_complete;
2995 	} else {
2996 		sc->init_cmds.is_waiting = 1;
2997 		sc->init_cmds.callback = NULL;
2998 		init_completion(&sc->init_cmds.completion);
2999 	}
3000 	pe_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
3001 	pe_req.Function = MPI3_FUNCTION_PORT_ENABLE;
3002 
3003 	printf(IOCNAME "Sending Port Enable Request\n", sc->name);
3004 	retval = mpi3mr_submit_admin_cmd(sc, &pe_req, sizeof(pe_req));
3005 	if (retval) {
3006 		printf(IOCNAME "Issue PortEnable: Admin Post failed\n",
3007 		    sc->name);
3008 		goto out_unlock;
3009 	}
3010 
3011 	if (!async) {
3012 		wait_for_completion_timeout(&sc->init_cmds.completion,
3013 		    MPI3MR_PORTENABLE_TIMEOUT);
3014 		if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3015 			printf(IOCNAME "Issue PortEnable: command timed out\n",
3016 			    sc->name);
3017 			retval = -1;
3018 			mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3019 			goto out_unlock;
3020 		}
3021 		mpi3mr_port_enable_complete(sc, &sc->init_cmds);
3022 	}
3023 out_unlock:
3024 	mtx_unlock(&sc->init_cmds.completion.lock);
3025 
3026 out:
3027 	return retval;
3028 }
3029 
3030 void
3031 mpi3mr_watchdog_thread(void *arg)
3032 {
3033 	struct mpi3mr_softc *sc;
3034 	enum mpi3mr_iocstate ioc_state;
3035 	U32 fault, host_diagnostic, ioc_status;
3036 
3037 	sc = (struct mpi3mr_softc *)arg;
3038 
3039 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s\n", __func__);
3040 
3041 	sc->watchdog_thread_active = 1;
3042 	mtx_lock(&sc->reset_mutex);
3043 	for (;;) {
3044 		/* Sleep for 1 second and check the queue status */
3045 		msleep(&sc->watchdog_chan, &sc->reset_mutex, PRIBIO,
3046 		    "mpi3mr_watchdog", 1 * hz);
3047 		if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ||
3048 		    (sc->unrecoverable == 1)) {
3049 			mpi3mr_dprint(sc, MPI3MR_INFO,
3050 			    "Exit due to %s from %s\n",
3051 			   sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ? "Shutdown" :
3052 			    "Hardware critical error", __func__);
3053 			break;
3054 		}
3055 
3056 		if ((sc->prepare_for_reset) &&
3057 		    ((sc->prepare_for_reset_timeout_counter++) >=
3058 		     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
3059 			mpi3mr_soft_reset_handler(sc,
3060 			    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
3061 			continue;
3062 		}
3063 
3064 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
3065 
3066 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
3067 			mpi3mr_soft_reset_handler(sc, MPI3MR_RESET_FROM_FIRMWARE, 0);
3068 			continue;
3069 		}
3070 
3071 		ioc_state = mpi3mr_get_iocstate(sc);
3072 		if (ioc_state == MRIOC_STATE_FAULT) {
3073 			fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
3074 			    MPI3_SYSIF_FAULT_CODE_MASK;
3075 
3076 			host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
3077 			if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
3078 				if (!sc->diagsave_timeout) {
3079 					mpi3mr_print_fault_info(sc);
3080 					mpi3mr_dprint(sc, MPI3MR_INFO,
3081 						"diag save in progress\n");
3082 				}
3083 				if ((sc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
3084 					continue;
3085 			}
3086 			mpi3mr_print_fault_info(sc);
3087 			sc->diagsave_timeout = 0;
3088 
3089 			if ((fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) ||
3090 			    (fault == MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED)) {
3091 				mpi3mr_dprint(sc, MPI3MR_INFO,
3092 				    "Controller requires system power cycle or complete reset is needed,"
3093 				    "fault code: 0x%x. marking controller as unrecoverable\n", fault);
3094 				sc->unrecoverable = 1;
3095 				goto out;
3096 			}
3097 			if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
3098 			    || (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS)
3099 			    || (sc->reset_in_progress))
3100 				goto out;
3101 			if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
3102 				mpi3mr_soft_reset_handler(sc,
3103 				    MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
3104 			else
3105 				mpi3mr_soft_reset_handler(sc,
3106 				    MPI3MR_RESET_FROM_FAULT_WATCH, 0);
3107 
3108 		}
3109 
3110 		if (sc->reset.type == MPI3MR_TRIGGER_SOFT_RESET) {
3111 			mpi3mr_print_fault_info(sc);
3112 			mpi3mr_soft_reset_handler(sc, sc->reset.reason, 1);
3113 		}
3114 	}
3115 out:
3116 	mtx_unlock(&sc->reset_mutex);
3117 	sc->watchdog_thread_active = 0;
3118 	mpi3mr_kproc_exit(0);
3119 }
3120 
3121 static void mpi3mr_display_event_data(struct mpi3mr_softc *sc,
3122 	Mpi3EventNotificationReply_t *event_rep)
3123 {
3124 	char *desc = NULL;
3125 	U16 event;
3126 
3127 	event = event_rep->Event;
3128 
3129 	switch (event) {
3130 	case MPI3_EVENT_LOG_DATA:
3131 		desc = "Log Data";
3132 		break;
3133 	case MPI3_EVENT_CHANGE:
3134 		desc = "Event Change";
3135 		break;
3136 	case MPI3_EVENT_GPIO_INTERRUPT:
3137 		desc = "GPIO Interrupt";
3138 		break;
3139 	case MPI3_EVENT_CABLE_MGMT:
3140 		desc = "Cable Management";
3141 		break;
3142 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
3143 		desc = "Energy Pack Change";
3144 		break;
3145 	case MPI3_EVENT_DEVICE_ADDED:
3146 	{
3147 		Mpi3DevicePage0_t *event_data =
3148 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3149 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Added: Dev=0x%04x Form=0x%x Perst id: 0x%x\n",
3150 			event_data->DevHandle, event_data->DeviceForm, event_data->PersistentID);
3151 		return;
3152 	}
3153 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
3154 	{
3155 		Mpi3DevicePage0_t *event_data =
3156 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3157 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Info Changed: Dev=0x%04x Form=0x%x\n",
3158 			event_data->DevHandle, event_data->DeviceForm);
3159 		return;
3160 	}
3161 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
3162 	{
3163 		Mpi3EventDataDeviceStatusChange_t *event_data =
3164 		    (Mpi3EventDataDeviceStatusChange_t *)event_rep->EventData;
3165 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Status Change: Dev=0x%04x RC=0x%x\n",
3166 			event_data->DevHandle, event_data->ReasonCode);
3167 		return;
3168 	}
3169 	case MPI3_EVENT_SAS_DISCOVERY:
3170 	{
3171 		Mpi3EventDataSasDiscovery_t *event_data =
3172 		    (Mpi3EventDataSasDiscovery_t *)event_rep->EventData;
3173 		mpi3mr_dprint(sc, MPI3MR_EVENT, "SAS Discovery: (%s)",
3174 			(event_data->ReasonCode == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
3175 		    "start" : "stop");
3176 		if (event_data->DiscoveryStatus &&
3177 		    (sc->mpi3mr_debug & MPI3MR_EVENT)) {
3178 			printf("discovery_status(0x%08x)",
3179 			    event_data->DiscoveryStatus);
3180 
3181 		}
3182 
3183 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3184 			printf("\n");
3185 		return;
3186 	}
3187 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
3188 		desc = "SAS Broadcast Primitive";
3189 		break;
3190 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
3191 		desc = "SAS Notify Primitive";
3192 		break;
3193 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
3194 		desc = "SAS Init Device Status Change";
3195 		break;
3196 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
3197 		desc = "SAS Init Table Overflow";
3198 		break;
3199 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
3200 		desc = "SAS Topology Change List";
3201 		break;
3202 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
3203 		desc = "Enclosure Device Status Change";
3204 		break;
3205 	case MPI3_EVENT_HARD_RESET_RECEIVED:
3206 		desc = "Hard Reset Received";
3207 		break;
3208 	case MPI3_EVENT_SAS_PHY_COUNTER:
3209 		desc = "SAS PHY Counter";
3210 		break;
3211 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
3212 		desc = "SAS Device Discovery Error";
3213 		break;
3214 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
3215 		desc = "PCIE Topology Change List";
3216 		break;
3217 	case MPI3_EVENT_PCIE_ENUMERATION:
3218 	{
3219 		Mpi3EventDataPcieEnumeration_t *event_data =
3220 			(Mpi3EventDataPcieEnumeration_t *)event_rep->EventData;
3221 		mpi3mr_dprint(sc, MPI3MR_EVENT, "PCIE Enumeration: (%s)",
3222 			(event_data->ReasonCode ==
3223 			    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" :
3224 			    "stop");
3225 		if (event_data->EnumerationStatus)
3226 			mpi3mr_dprint(sc, MPI3MR_EVENT, "enumeration_status(0x%08x)",
3227 			   event_data->EnumerationStatus);
3228 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3229 			printf("\n");
3230 		return;
3231 	}
3232 	case MPI3_EVENT_PREPARE_FOR_RESET:
3233 		desc = "Prepare For Reset";
3234 		break;
3235 	}
3236 
3237 	if (!desc)
3238 		return;
3239 
3240 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s\n", desc);
3241 }
3242 
3243 struct mpi3mr_target *
3244 mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc *cam_sc,
3245     uint16_t per_id)
3246 {
3247 	struct mpi3mr_target *target = NULL;
3248 
3249 	mtx_lock_spin(&cam_sc->sc->target_lock);
3250 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3251 		if (target->per_id == per_id)
3252 			break;
3253 	}
3254 
3255 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3256 	return target;
3257 }
3258 
3259 struct mpi3mr_target *
3260 mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc *cam_sc,
3261     uint16_t handle)
3262 {
3263 	struct mpi3mr_target *target = NULL;
3264 
3265 	mtx_lock_spin(&cam_sc->sc->target_lock);
3266 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3267 		if (target->dev_handle == handle)
3268 			break;
3269 
3270 	}
3271 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3272 	return target;
3273 }
3274 
3275 void mpi3mr_update_device(struct mpi3mr_softc *sc,
3276     struct mpi3mr_target *tgtdev, Mpi3DevicePage0_t *dev_pg0,
3277     bool is_added)
3278 {
3279 	U16 flags = 0;
3280 
3281 	tgtdev->per_id = (dev_pg0->PersistentID);
3282 	tgtdev->dev_handle = (dev_pg0->DevHandle);
3283 	tgtdev->dev_type = dev_pg0->DeviceForm;
3284 	tgtdev->encl_handle = (dev_pg0->EnclosureHandle);
3285 	tgtdev->parent_handle = (dev_pg0->ParentDevHandle);
3286 	tgtdev->slot = (dev_pg0->Slot);
3287 	tgtdev->qdepth = (dev_pg0->QueueDepth);
3288 	tgtdev->wwid = (dev_pg0->WWID);
3289 
3290 	flags = (dev_pg0->Flags);
3291 	tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
3292 	if (is_added == true)
3293 		tgtdev->io_throttle_enabled =
3294 		    (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
3295 
3296 	switch (dev_pg0->AccessStatus) {
3297 	case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
3298 	case MPI3_DEVICE0_ASTATUS_PREPARE:
3299 	case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
3300 	case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
3301 		break;
3302 	default:
3303 		tgtdev->is_hidden = 1;
3304 		break;
3305 	}
3306 
3307 	switch (tgtdev->dev_type) {
3308 	case MPI3_DEVICE_DEVFORM_SAS_SATA:
3309 	{
3310 		Mpi3Device0SasSataFormat_t *sasinf =
3311 		    &dev_pg0->DeviceSpecific.SasSataFormat;
3312 		U16 dev_info = (sasinf->DeviceInfo);
3313 		tgtdev->dev_spec.sassata_inf.dev_info = dev_info;
3314 		tgtdev->dev_spec.sassata_inf.sas_address =
3315 		    (sasinf->SASAddress);
3316 		if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
3317 		    MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
3318 			tgtdev->is_hidden = 1;
3319 		else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
3320 			    MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
3321 			tgtdev->is_hidden = 1;
3322 		break;
3323 	}
3324 	case MPI3_DEVICE_DEVFORM_PCIE:
3325 	{
3326 		Mpi3Device0PcieFormat_t *pcieinf =
3327 		    &dev_pg0->DeviceSpecific.PcieFormat;
3328 		U16 dev_info = (pcieinf->DeviceInfo);
3329 
3330 		tgtdev->q_depth = dev_pg0->QueueDepth;
3331 		tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
3332 		tgtdev->dev_spec.pcie_inf.capb =
3333 		    (pcieinf->Capabilities);
3334 		tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
3335 		if (dev_pg0->AccessStatus == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
3336 			tgtdev->dev_spec.pcie_inf.mdts =
3337 			    (pcieinf->MaximumDataTransferSize);
3338 			tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->PageSize;
3339 			tgtdev->dev_spec.pcie_inf.reset_to =
3340 				pcieinf->ControllerResetTO;
3341 			tgtdev->dev_spec.pcie_inf.abort_to =
3342 				pcieinf->NVMeAbortTO;
3343 		}
3344 		if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
3345 			tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
3346 
3347 		if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3348 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
3349 		    ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3350 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
3351 			tgtdev->is_hidden = 1;
3352 
3353 		break;
3354 	}
3355 	case MPI3_DEVICE_DEVFORM_VD:
3356 	{
3357 		Mpi3Device0VdFormat_t *vdinf =
3358 		    &dev_pg0->DeviceSpecific.VdFormat;
3359 		struct mpi3mr_throttle_group_info *tg = NULL;
3360 
3361 		tgtdev->dev_spec.vol_inf.state = vdinf->VdState;
3362 		if (vdinf->VdState == MPI3_DEVICE0_VD_STATE_OFFLINE)
3363 			tgtdev->is_hidden = 1;
3364 		tgtdev->dev_spec.vol_inf.tg_id = vdinf->IOThrottleGroup;
3365 		tgtdev->dev_spec.vol_inf.tg_high =
3366 			vdinf->IOThrottleGroupHigh * 2048;
3367 		tgtdev->dev_spec.vol_inf.tg_low =
3368 			vdinf->IOThrottleGroupLow * 2048;
3369 		if (vdinf->IOThrottleGroup < sc->num_io_throttle_group) {
3370 			tg = sc->throttle_groups + vdinf->IOThrottleGroup;
3371 			tg->id = vdinf->IOThrottleGroup;
3372 			tg->high = tgtdev->dev_spec.vol_inf.tg_high;
3373 			tg->low = tgtdev->dev_spec.vol_inf.tg_low;
3374 			if (is_added == true)
3375 				tg->fw_qd = tgtdev->q_depth;
3376 			tg->modified_qd = tgtdev->q_depth;
3377 		}
3378 		tgtdev->dev_spec.vol_inf.tg = tg;
3379 		tgtdev->throttle_group = tg;
3380 		break;
3381 	}
3382 	default:
3383 		goto out;
3384 	}
3385 
3386 out:
3387 	return;
3388 }
3389 
3390 int mpi3mr_create_device(struct mpi3mr_softc *sc,
3391     Mpi3DevicePage0_t *dev_pg0)
3392 {
3393 	int retval = 0;
3394 	struct mpi3mr_target *target = NULL;
3395 	U16 per_id = 0;
3396 
3397 	per_id = dev_pg0->PersistentID;
3398 
3399 	mtx_lock_spin(&sc->target_lock);
3400 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
3401 		if (target->per_id == per_id) {
3402 			target->state = MPI3MR_DEV_CREATED;
3403 			break;
3404 		}
3405 	}
3406 	mtx_unlock_spin(&sc->target_lock);
3407 
3408 	if (target) {
3409 			mpi3mr_update_device(sc, target, dev_pg0, true);
3410 	} else {
3411 			target = malloc(sizeof(*target), M_MPI3MR,
3412 				 M_NOWAIT | M_ZERO);
3413 
3414 			if (target == NULL) {
3415 				retval = -1;
3416 				goto out;
3417 			}
3418 
3419 			target->exposed_to_os = 0;
3420 			mpi3mr_update_device(sc, target, dev_pg0, true);
3421 			mtx_lock_spin(&sc->target_lock);
3422 			TAILQ_INSERT_TAIL(&sc->cam_sc->tgt_list, target, tgt_next);
3423 			target->state = MPI3MR_DEV_CREATED;
3424 			mtx_unlock_spin(&sc->target_lock);
3425 	}
3426 out:
3427 	return retval;
3428 }
3429 
3430 /**
3431  * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
3432  * @sc: Adapter instance reference
3433  * @drv_cmd: Internal command tracker
3434  *
3435  * Issues a target reset TM to the firmware from the device
3436  * removal TM pend list or retry the removal handshake sequence
3437  * based on the IOU control request IOC status.
3438  *
3439  * Return: Nothing
3440  */
3441 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
3442 	struct mpi3mr_drvr_cmd *drv_cmd)
3443 {
3444 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3445 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3446 
3447 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3448 	    "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
3449 	    __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
3450 	    drv_cmd->ioc_loginfo);
3451 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3452 		if (drv_cmd->retry_count < MPI3MR_DEVRMHS_RETRYCOUNT) {
3453 			drv_cmd->retry_count++;
3454 			mpi3mr_dprint(sc, MPI3MR_EVENT,
3455 			    "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
3456 			    __func__, drv_cmd->dev_handle,
3457 			    drv_cmd->retry_count);
3458 			mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle,
3459 			    drv_cmd, drv_cmd->iou_rc);
3460 			return;
3461 		}
3462 		mpi3mr_dprint(sc, MPI3MR_ERROR,
3463 		    "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
3464 		    __func__, drv_cmd->dev_handle);
3465 	} else {
3466 		mpi3mr_dprint(sc, MPI3MR_INFO,
3467 		    "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
3468 		    __func__, drv_cmd->dev_handle);
3469 		mpi3mr_clear_bit(drv_cmd->dev_handle, sc->removepend_bitmap);
3470 	}
3471 
3472 	if (!TAILQ_EMPTY(&sc->delayed_rmhs_list)) {
3473 		delayed_dev_rmhs = TAILQ_FIRST(&sc->delayed_rmhs_list);
3474 		drv_cmd->dev_handle = delayed_dev_rmhs->handle;
3475 		drv_cmd->retry_count = 0;
3476 		drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
3477 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3478 		    "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
3479 		    __func__, drv_cmd->dev_handle);
3480 		mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle, drv_cmd,
3481 		    drv_cmd->iou_rc);
3482 		TAILQ_REMOVE(&sc->delayed_rmhs_list, delayed_dev_rmhs, list);
3483 		free(delayed_dev_rmhs, M_MPI3MR);
3484 		return;
3485 	}
3486 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3487 	drv_cmd->callback = NULL;
3488 	drv_cmd->retry_count = 0;
3489 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3490 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3491 }
3492 
3493 /**
3494  * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
3495  * @sc: Adapter instance reference
3496  * @drv_cmd: Internal command tracker
3497  *
3498  * Issues a target reset TM to the firmware from the device
3499  * removal TM pend list or issue IO Unit control request as
3500  * part of device removal or hidden acknowledgment handshake.
3501  *
3502  * Return: Nothing
3503  */
3504 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
3505 	struct mpi3mr_drvr_cmd *drv_cmd)
3506 {
3507 	Mpi3IoUnitControlRequest_t iou_ctrl;
3508 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3509 	Mpi3SCSITaskMgmtReply_t *tm_reply = NULL;
3510 	int retval;
3511 
3512 	if (drv_cmd->state & MPI3MR_CMD_REPLYVALID)
3513 		tm_reply = (Mpi3SCSITaskMgmtReply_t *)drv_cmd->reply;
3514 
3515 	if (tm_reply)
3516 		printf(IOCNAME
3517 		    "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
3518 		    sc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
3519 		    drv_cmd->ioc_loginfo,
3520 		    le32toh(tm_reply->TerminationCount));
3521 
3522 	printf(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
3523 	    sc->name, drv_cmd->dev_handle, cmd_idx);
3524 
3525 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
3526 
3527 	drv_cmd->state = MPI3MR_CMD_PENDING;
3528 	drv_cmd->is_waiting = 0;
3529 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
3530 	iou_ctrl.Operation = drv_cmd->iou_rc;
3531 	iou_ctrl.Param16[0] = htole16(drv_cmd->dev_handle);
3532 	iou_ctrl.HostTag = htole16(drv_cmd->host_tag);
3533 	iou_ctrl.Function = MPI3_FUNCTION_IO_UNIT_CONTROL;
3534 
3535 	retval = mpi3mr_submit_admin_cmd(sc, &iou_ctrl, sizeof(iou_ctrl));
3536 	if (retval) {
3537 		printf(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
3538 		    sc->name);
3539 		goto out_failed;
3540 	}
3541 
3542 	return;
3543 out_failed:
3544 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3545 	drv_cmd->callback = NULL;
3546 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3547 	drv_cmd->retry_count = 0;
3548 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3549 }
3550 
3551 /**
3552  * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
3553  * @sc: Adapter instance reference
3554  * @handle: Device handle
3555  * @cmdparam: Internal command tracker
3556  * @iou_rc: IO Unit reason code
3557  *
3558  * Issues a target reset TM to the firmware or add it to a pend
3559  * list as part of device removal or hidden acknowledgment
3560  * handshake.
3561  *
3562  * Return: Nothing
3563  */
3564 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
3565 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc)
3566 {
3567 	Mpi3SCSITaskMgmtRequest_t tm_req;
3568 	int retval = 0;
3569 	U16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
3570 	U8 retrycount = 5;
3571 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3572 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3573 	struct mpi3mr_target *tgtdev = NULL;
3574 
3575 	mtx_lock_spin(&sc->target_lock);
3576 	TAILQ_FOREACH(tgtdev, &sc->cam_sc->tgt_list, tgt_next) {
3577 		if ((tgtdev->dev_handle == handle) &&
3578 		    (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) {
3579 			tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
3580 			break;
3581 		}
3582 	}
3583 	mtx_unlock_spin(&sc->target_lock);
3584 
3585 	if (drv_cmd)
3586 		goto issue_cmd;
3587 	do {
3588 		cmd_idx = mpi3mr_find_first_zero_bit(sc->devrem_bitmap,
3589 		    MPI3MR_NUM_DEVRMCMD);
3590 		if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
3591 			if (!mpi3mr_test_and_set_bit(cmd_idx, sc->devrem_bitmap))
3592 				break;
3593 			cmd_idx = MPI3MR_NUM_DEVRMCMD;
3594 		}
3595 	} while (retrycount--);
3596 
3597 	if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
3598 		delayed_dev_rmhs = malloc(sizeof(*delayed_dev_rmhs),M_MPI3MR,
3599 		     M_ZERO|M_NOWAIT);
3600 
3601 		if (!delayed_dev_rmhs)
3602 			return;
3603 		delayed_dev_rmhs->handle = handle;
3604 		delayed_dev_rmhs->iou_rc = iou_rc;
3605 		TAILQ_INSERT_TAIL(&(sc->delayed_rmhs_list), delayed_dev_rmhs, list);
3606 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
3607 		    __func__, handle);
3608 
3609 
3610 		return;
3611 	}
3612 	drv_cmd = &sc->dev_rmhs_cmds[cmd_idx];
3613 
3614 issue_cmd:
3615 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3616 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3617 	    "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
3618 	    __func__, handle, cmd_idx);
3619 
3620 	memset(&tm_req, 0, sizeof(tm_req));
3621 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3622 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Issue TM: Command is in use\n", __func__);
3623 		goto out;
3624 	}
3625 	drv_cmd->state = MPI3MR_CMD_PENDING;
3626 	drv_cmd->is_waiting = 0;
3627 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
3628 	drv_cmd->dev_handle = handle;
3629 	drv_cmd->iou_rc = iou_rc;
3630 	tm_req.DevHandle = htole16(handle);
3631 	tm_req.TaskType = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3632 	tm_req.HostTag = htole16(drv_cmd->host_tag);
3633 	tm_req.TaskHostTag = htole16(MPI3MR_HOSTTAG_INVALID);
3634 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3635 
3636 	mpi3mr_set_bit(handle, sc->removepend_bitmap);
3637 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
3638 	if (retval) {
3639 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :Issue DevRmHsTM: Admin Post failed\n",
3640 		    __func__);
3641 		goto out_failed;
3642 	}
3643 out:
3644 	return;
3645 out_failed:
3646 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3647 	drv_cmd->callback = NULL;
3648 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3649 	drv_cmd->retry_count = 0;
3650 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3651 }
3652 
3653 /**
3654  * mpi3mr_complete_evt_ack - Event ack request completion
3655  * @sc: Adapter instance reference
3656  * @drv_cmd: Internal command tracker
3657  *
3658  * This is the completion handler for non blocking event
3659  * acknowledgment sent to the firmware and this will issue any
3660  * pending event acknowledgment request.
3661  *
3662  * Return: Nothing
3663  */
3664 static void mpi3mr_complete_evt_ack(struct mpi3mr_softc *sc,
3665 	struct mpi3mr_drvr_cmd *drv_cmd)
3666 {
3667 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3668 	struct delayed_evtack_node *delayed_evtack = NULL;
3669 
3670 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3671 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3672 		    "%s: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", __func__,
3673 		    (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3674 		    drv_cmd->ioc_loginfo);
3675 	}
3676 
3677 	if (!TAILQ_EMPTY(&sc->delayed_evtack_cmds_list)) {
3678 		delayed_evtack = TAILQ_FIRST(&sc->delayed_evtack_cmds_list);
3679 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3680 		    "%s: processing delayed event ack for event %d\n",
3681 		    __func__, delayed_evtack->event);
3682 		mpi3mr_send_evt_ack(sc, delayed_evtack->event, drv_cmd,
3683 		    delayed_evtack->event_ctx);
3684 		TAILQ_REMOVE(&sc->delayed_evtack_cmds_list, delayed_evtack, list);
3685 		free(delayed_evtack, M_MPI3MR);
3686 		return;
3687 	}
3688 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3689 	drv_cmd->callback = NULL;
3690 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3691 }
3692 
3693 /**
3694  * mpi3mr_send_evt_ack - Issue event acknwoledgment request
3695  * @sc: Adapter instance reference
3696  * @event: MPI3 event id
3697  * @cmdparam: Internal command tracker
3698  * @event_ctx: Event context
3699  *
3700  * Issues event acknowledgment request to the firmware if there
3701  * is a free command to send the event ack else it to a pend
3702  * list so that it will be processed on a completion of a prior
3703  * event acknowledgment .
3704  *
3705  * Return: Nothing
3706  */
3707 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
3708 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx)
3709 {
3710 	Mpi3EventAckRequest_t evtack_req;
3711 	int retval = 0;
3712 	U8 retrycount = 5;
3713 	U16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
3714 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3715 	struct delayed_evtack_node *delayed_evtack = NULL;
3716 
3717 	if (drv_cmd)
3718 		goto issue_cmd;
3719 	do {
3720 		cmd_idx = mpi3mr_find_first_zero_bit(sc->evtack_cmds_bitmap,
3721 		    MPI3MR_NUM_EVTACKCMD);
3722 		if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
3723 			if (!mpi3mr_test_and_set_bit(cmd_idx,
3724 			    sc->evtack_cmds_bitmap))
3725 				break;
3726 			cmd_idx = MPI3MR_NUM_EVTACKCMD;
3727 		}
3728 	} while (retrycount--);
3729 
3730 	if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
3731 		delayed_evtack = malloc(sizeof(*delayed_evtack),M_MPI3MR,
3732 		     M_ZERO | M_NOWAIT);
3733 		if (!delayed_evtack)
3734 			return;
3735 		delayed_evtack->event = event;
3736 		delayed_evtack->event_ctx = event_ctx;
3737 		TAILQ_INSERT_TAIL(&(sc->delayed_evtack_cmds_list), delayed_evtack, list);
3738 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s : Event ack for event:%d is postponed\n",
3739 		    __func__, event);
3740 		return;
3741 	}
3742 	drv_cmd = &sc->evtack_cmds[cmd_idx];
3743 
3744 issue_cmd:
3745 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3746 
3747 	memset(&evtack_req, 0, sizeof(evtack_req));
3748 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3749 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s: Command is in use\n", __func__);
3750 		goto out;
3751 	}
3752 	drv_cmd->state = MPI3MR_CMD_PENDING;
3753 	drv_cmd->is_waiting = 0;
3754 	drv_cmd->callback = mpi3mr_complete_evt_ack;
3755 	evtack_req.HostTag = htole16(drv_cmd->host_tag);
3756 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
3757 	evtack_req.Event = event;
3758 	evtack_req.EventContext = htole32(event_ctx);
3759 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
3760 	    sizeof(evtack_req));
3761 
3762 	if (retval) {
3763 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Admin Post failed\n", __func__);
3764 		goto out_failed;
3765 	}
3766 out:
3767 	return;
3768 out_failed:
3769 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3770 	drv_cmd->callback = NULL;
3771 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3772 }
3773 
3774 /*
3775  * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
3776  * @sc: Adapter instance reference
3777  * @event_reply: Event data
3778  *
3779  * Checks for the reason code and based on that either block I/O
3780  * to device, or unblock I/O to the device, or start the device
3781  * removal handshake with reason as remove with the firmware for
3782  * PCIe devices.
3783  *
3784  * Return: Nothing
3785  */
3786 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc *sc,
3787 	Mpi3EventNotificationReply_t *event_reply)
3788 {
3789 	Mpi3EventDataPcieTopologyChangeList_t *topo_evt =
3790 	    (Mpi3EventDataPcieTopologyChangeList_t *) event_reply->EventData;
3791 	int i;
3792 	U16 handle;
3793 	U8 reason_code;
3794 	struct mpi3mr_target *tgtdev = NULL;
3795 
3796 	for (i = 0; i < topo_evt->NumEntries; i++) {
3797 		handle = le16toh(topo_evt->PortEntry[i].AttachedDevHandle);
3798 		if (!handle)
3799 			continue;
3800 		reason_code = topo_evt->PortEntry[i].PortStatus;
3801 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3802 		switch (reason_code) {
3803 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
3804 			if (tgtdev) {
3805 				tgtdev->dev_removed = 1;
3806 				tgtdev->dev_removedelay = 0;
3807 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3808 			}
3809 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3810 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3811 			break;
3812 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
3813 			if (tgtdev) {
3814 				tgtdev->dev_removedelay = 1;
3815 				mpi3mr_atomic_inc(&tgtdev->block_io);
3816 			}
3817 			break;
3818 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
3819 			if (tgtdev &&
3820 			    tgtdev->dev_removedelay) {
3821 				tgtdev->dev_removedelay = 0;
3822 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3823 					mpi3mr_atomic_dec(&tgtdev->block_io);
3824 			}
3825 			break;
3826 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
3827 		default:
3828 			break;
3829 		}
3830 	}
3831 }
3832 
3833 /**
3834  * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
3835  * @sc: Adapter instance reference
3836  * @event_reply: Event data
3837  *
3838  * Checks for the reason code and based on that either block I/O
3839  * to device, or unblock I/O to the device, or start the device
3840  * removal handshake with reason as remove with the firmware for
3841  * SAS/SATA devices.
3842  *
3843  * Return: Nothing
3844  */
3845 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_softc *sc,
3846 	Mpi3EventNotificationReply_t *event_reply)
3847 {
3848 	Mpi3EventDataSasTopologyChangeList_t *topo_evt =
3849 	    (Mpi3EventDataSasTopologyChangeList_t *)event_reply->EventData;
3850 	int i;
3851 	U16 handle;
3852 	U8 reason_code;
3853 	struct mpi3mr_target *tgtdev = NULL;
3854 
3855 	for (i = 0; i < topo_evt->NumEntries; i++) {
3856 		handle = le16toh(topo_evt->PhyEntry[i].AttachedDevHandle);
3857 		if (!handle)
3858 			continue;
3859 		reason_code = topo_evt->PhyEntry[i].Status &
3860 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
3861 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3862 		switch (reason_code) {
3863 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
3864 			if (tgtdev) {
3865 				tgtdev->dev_removed = 1;
3866 				tgtdev->dev_removedelay = 0;
3867 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3868 			}
3869 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3870 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3871 			break;
3872 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
3873 			if (tgtdev) {
3874 				tgtdev->dev_removedelay = 1;
3875 				mpi3mr_atomic_inc(&tgtdev->block_io);
3876 			}
3877 			break;
3878 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
3879 			if (tgtdev &&
3880 			    tgtdev->dev_removedelay) {
3881 				tgtdev->dev_removedelay = 0;
3882 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3883 					mpi3mr_atomic_dec(&tgtdev->block_io);
3884 			}
3885 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
3886 		default:
3887 			break;
3888 		}
3889 	}
3890 
3891 }
3892 /**
3893  * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
3894  * @sc: Adapter instance reference
3895  * @event_reply: Event data
3896  *
3897  * Checks for the reason code and based on that either block I/O
3898  * to device, or unblock I/O to the device, or start the device
3899  * removal handshake with reason as remove/hide acknowledgment
3900  * with the firmware.
3901  *
3902  * Return: Nothing
3903  */
3904 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc *sc,
3905 	Mpi3EventNotificationReply_t *event_reply)
3906 {
3907 	U16 dev_handle = 0;
3908 	U8 ublock = 0, block = 0, hide = 0, uhide = 0, delete = 0, remove = 0;
3909 	struct mpi3mr_target *tgtdev = NULL;
3910 	Mpi3EventDataDeviceStatusChange_t *evtdata =
3911 	    (Mpi3EventDataDeviceStatusChange_t *) event_reply->EventData;
3912 
3913 	dev_handle = le16toh(evtdata->DevHandle);
3914 
3915 	switch (evtdata->ReasonCode) {
3916 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
3917 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
3918 		block = 1;
3919 		break;
3920 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
3921 		delete = 1;
3922 		hide = 1;
3923 		break;
3924 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
3925 		uhide = 1;
3926 		break;
3927 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
3928 		delete = 1;
3929 		remove = 1;
3930 		break;
3931 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
3932 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
3933 		ublock = 1;
3934 		break;
3935 	default:
3936 		break;
3937 	}
3938 
3939 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
3940 
3941 	if (!tgtdev) {
3942 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :target with dev_handle:0x%x not found\n",
3943 		    __func__, dev_handle);
3944 		return;
3945 	}
3946 
3947 	if (block)
3948 		mpi3mr_atomic_inc(&tgtdev->block_io);
3949 
3950 	if (hide)
3951 		tgtdev->is_hidden = hide;
3952 
3953 	if (uhide) {
3954 		tgtdev->is_hidden = 0;
3955 		tgtdev->dev_removed = 0;
3956 	}
3957 
3958 	if (delete)
3959 		tgtdev->dev_removed = 1;
3960 
3961 	if (ublock) {
3962 		if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3963 			mpi3mr_atomic_dec(&tgtdev->block_io);
3964 	}
3965 
3966 	if (remove) {
3967 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
3968 					MPI3_CTRL_OP_REMOVE_DEVICE);
3969 	}
3970 	if (hide)
3971 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
3972 					MPI3_CTRL_OP_HIDDEN_ACK);
3973 }
3974 
3975 /**
3976  * mpi3mr_preparereset_evt_th - Prepareforreset evt tophalf
3977  * @sc: Adapter instance reference
3978  * @event_reply: Event data
3979  *
3980  * Blocks and unblocks host level I/O based on the reason code
3981  *
3982  * Return: Nothing
3983  */
3984 static void mpi3mr_preparereset_evt_th(struct mpi3mr_softc *sc,
3985 	Mpi3EventNotificationReply_t *event_reply)
3986 {
3987 	Mpi3EventDataPrepareForReset_t *evtdata =
3988 	    (Mpi3EventDataPrepareForReset_t *)event_reply->EventData;
3989 
3990 	if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_START) {
3991 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=START\n",
3992 		    __func__);
3993 		if (sc->prepare_for_reset)
3994 			return;
3995 		sc->prepare_for_reset = 1;
3996 		sc->prepare_for_reset_timeout_counter = 0;
3997 	} else if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
3998 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=ABORT\n",
3999 		    __func__);
4000 		sc->prepare_for_reset = 0;
4001 		sc->prepare_for_reset_timeout_counter = 0;
4002 	}
4003 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4004 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4005 		mpi3mr_send_evt_ack(sc, event_reply->Event, NULL,
4006 		    le32toh(event_reply->EventContext));
4007 }
4008 
4009 /**
4010  * mpi3mr_energypackchg_evt_th - Energypackchange evt tophalf
4011  * @sc: Adapter instance reference
4012  * @event_reply: Event data
4013  *
4014  * Identifies the new shutdown timeout value and update.
4015  *
4016  * Return: Nothing
4017  */
4018 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_softc *sc,
4019 	Mpi3EventNotificationReply_t *event_reply)
4020 {
4021 	Mpi3EventDataEnergyPackChange_t *evtdata =
4022 	    (Mpi3EventDataEnergyPackChange_t *)event_reply->EventData;
4023 	U16 shutdown_timeout = le16toh(evtdata->ShutdownTimeout);
4024 
4025 	if (shutdown_timeout <= 0) {
4026 		mpi3mr_dprint(sc, MPI3MR_ERROR,
4027 		    "%s :Invalid Shutdown Timeout received = %d\n",
4028 		    __func__, shutdown_timeout);
4029 		return;
4030 	}
4031 
4032 	mpi3mr_dprint(sc, MPI3MR_EVENT,
4033 	    "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
4034 	    __func__, sc->facts.shutdown_timeout, shutdown_timeout);
4035 	sc->facts.shutdown_timeout = shutdown_timeout;
4036 }
4037 
4038 /**
4039  * mpi3mr_cablemgmt_evt_th - Cable mgmt evt tophalf
4040  * @sc: Adapter instance reference
4041  * @event_reply: Event data
4042  *
4043  * Displays Cable manegemt event details.
4044  *
4045  * Return: Nothing
4046  */
4047 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc *sc,
4048 	Mpi3EventNotificationReply_t *event_reply)
4049 {
4050 	Mpi3EventDataCableManagement_t *evtdata =
4051 	    (Mpi3EventDataCableManagement_t *)event_reply->EventData;
4052 
4053 	switch (evtdata->Status) {
4054 	case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
4055 	{
4056 		mpi3mr_dprint(sc, MPI3MR_INFO, "An active cable with ReceptacleID %d cannot be powered.\n"
4057 		    "Devices connected to this cable are not detected.\n"
4058 		    "This cable requires %d mW of power.\n",
4059 		    evtdata->ReceptacleID,
4060 		    le32toh(evtdata->ActiveCablePowerRequirement));
4061 		break;
4062 	}
4063 	case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
4064 	{
4065 		mpi3mr_dprint(sc, MPI3MR_INFO, "A cable with ReceptacleID %d is not running at optimal speed\n",
4066 		    evtdata->ReceptacleID);
4067 		break;
4068 	}
4069 	default:
4070 		break;
4071 	}
4072 }
4073 
4074 /**
4075  * mpi3mr_process_events - Event's toph-half handler
4076  * @sc: Adapter instance reference
4077  * @event_reply: Event data
4078  *
4079  * Top half of event processing.
4080  *
4081  * Return: Nothing
4082  */
4083 static void mpi3mr_process_events(struct mpi3mr_softc *sc,
4084     uintptr_t data, Mpi3EventNotificationReply_t *event_reply)
4085 {
4086 	U16 evt_type;
4087 	bool ack_req = 0, process_evt_bh = 0;
4088 	struct mpi3mr_fw_event_work *fw_event;
4089 	U16 sz;
4090 
4091 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
4092 		goto out;
4093 
4094 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4095 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4096 		ack_req = 1;
4097 
4098 	evt_type = event_reply->Event;
4099 
4100 	switch (evt_type) {
4101 	case MPI3_EVENT_DEVICE_ADDED:
4102 	{
4103 		Mpi3DevicePage0_t *dev_pg0 =
4104 			(Mpi3DevicePage0_t *) event_reply->EventData;
4105 		if (mpi3mr_create_device(sc, dev_pg0))
4106 			mpi3mr_dprint(sc, MPI3MR_ERROR,
4107 			"%s :Failed to add device in the device add event\n",
4108 			__func__);
4109 		else
4110 			process_evt_bh = 1;
4111 		break;
4112 	}
4113 
4114 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
4115 	{
4116 		process_evt_bh = 1;
4117 		mpi3mr_devstatuschg_evt_th(sc, event_reply);
4118 		break;
4119 	}
4120 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
4121 	{
4122 		process_evt_bh = 1;
4123 		mpi3mr_sastopochg_evt_th(sc, event_reply);
4124 		break;
4125 	}
4126 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
4127 	{
4128 		process_evt_bh = 1;
4129 		mpi3mr_pcietopochg_evt_th(sc, event_reply);
4130 		break;
4131 	}
4132 	case MPI3_EVENT_PREPARE_FOR_RESET:
4133 	{
4134 		mpi3mr_preparereset_evt_th(sc, event_reply);
4135 		ack_req = 0;
4136 		break;
4137 	}
4138 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
4139 	case MPI3_EVENT_LOG_DATA:
4140 	{
4141 		process_evt_bh = 1;
4142 		break;
4143 	}
4144 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
4145 	{
4146 		mpi3mr_energypackchg_evt_th(sc, event_reply);
4147 		break;
4148 	}
4149 	case MPI3_EVENT_CABLE_MGMT:
4150 	{
4151 		mpi3mr_cablemgmt_evt_th(sc, event_reply);
4152 		break;
4153 	}
4154 
4155 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
4156 	case MPI3_EVENT_SAS_DISCOVERY:
4157 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
4158 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
4159 	case MPI3_EVENT_PCIE_ENUMERATION:
4160 		break;
4161 	default:
4162 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Event 0x%02x is not handled by driver\n",
4163 		    __func__, evt_type);
4164 		break;
4165 	}
4166 
4167 	if (process_evt_bh || ack_req) {
4168 		fw_event = malloc(sizeof(struct mpi3mr_fw_event_work), M_MPI3MR,
4169 		     M_ZERO|M_NOWAIT);
4170 
4171 		if (!fw_event) {
4172 			printf("%s: allocate failed for fw_event\n", __func__);
4173 			return;
4174 		}
4175 
4176 		sz = le16toh(event_reply->EventDataLength) * 4;
4177 		fw_event->event_data = malloc(sz, M_MPI3MR, M_ZERO|M_NOWAIT);
4178 
4179 		if (!fw_event->event_data) {
4180 			printf("%s: allocate failed for event_data\n", __func__);
4181 			free(fw_event, M_MPI3MR);
4182 			return;
4183 		}
4184 
4185 		bcopy(event_reply->EventData, fw_event->event_data, sz);
4186 		fw_event->event = event_reply->Event;
4187 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4188 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4189 		    event_reply->Event == MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE ) &&
4190 		    sc->track_mapping_events)
4191 			sc->pending_map_events++;
4192 
4193 		/*
4194 		 * Events should be processed after Port enable is completed.
4195 		 */
4196 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4197 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ) &&
4198 		    !(sc->mpi3mr_flags & MPI3MR_FLAGS_PORT_ENABLE_DONE))
4199 			mpi3mr_startup_increment(sc->cam_sc);
4200 
4201 		fw_event->send_ack = ack_req;
4202 		fw_event->event_context = le32toh(event_reply->EventContext);
4203 		fw_event->event_data_size = sz;
4204 		fw_event->process_event = process_evt_bh;
4205 
4206 		mtx_lock(&sc->fwevt_lock);
4207 		TAILQ_INSERT_TAIL(&sc->cam_sc->ev_queue, fw_event, ev_link);
4208 		taskqueue_enqueue(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
4209 		mtx_unlock(&sc->fwevt_lock);
4210 
4211 	}
4212 out:
4213 	return;
4214 }
4215 
4216 static void mpi3mr_handle_events(struct mpi3mr_softc *sc, uintptr_t data,
4217     Mpi3DefaultReply_t *def_reply)
4218 {
4219 	Mpi3EventNotificationReply_t *event_reply =
4220 		(Mpi3EventNotificationReply_t *)def_reply;
4221 
4222 	sc->change_count = event_reply->IOCChangeCount;
4223 	mpi3mr_display_event_data(sc, event_reply);
4224 
4225 	mpi3mr_process_events(sc, data, event_reply);
4226 }
4227 
4228 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
4229     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4230 {
4231 	U16 reply_desc_type, host_tag = 0, idx;
4232 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4233 	U32 ioc_loginfo = 0;
4234 	Mpi3StatusReplyDescriptor_t *status_desc;
4235 	Mpi3AddressReplyDescriptor_t *addr_desc;
4236 	Mpi3SuccessReplyDescriptor_t *success_desc;
4237 	Mpi3DefaultReply_t *def_reply = NULL;
4238 	struct mpi3mr_drvr_cmd *cmdptr = NULL;
4239 	Mpi3SCSIIOReply_t *scsi_reply;
4240 	U8 *sense_buf = NULL;
4241 
4242 	*reply_dma = 0;
4243 	reply_desc_type = reply_desc->ReplyFlags &
4244 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4245 	switch (reply_desc_type) {
4246 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4247 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4248 		host_tag = status_desc->HostTag;
4249 		ioc_status = status_desc->IOCStatus;
4250 		if (ioc_status &
4251 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4252 			ioc_loginfo = status_desc->IOCLogInfo;
4253 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4254 		break;
4255 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4256 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4257 		*reply_dma = addr_desc->ReplyFrameAddress;
4258 		def_reply = mpi3mr_get_reply_virt_addr(sc, *reply_dma);
4259 		if (def_reply == NULL)
4260 			goto out;
4261 		host_tag = def_reply->HostTag;
4262 		ioc_status = def_reply->IOCStatus;
4263 		if (ioc_status &
4264 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4265 			ioc_loginfo = def_reply->IOCLogInfo;
4266 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4267 		if (def_reply->Function == MPI3_FUNCTION_SCSI_IO) {
4268 			scsi_reply = (Mpi3SCSIIOReply_t *)def_reply;
4269 			sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4270 			    scsi_reply->SenseDataBufferAddress);
4271 		}
4272 		break;
4273 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4274 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4275 		host_tag = success_desc->HostTag;
4276 		break;
4277 	default:
4278 		break;
4279 	}
4280 	switch (host_tag) {
4281 	case MPI3MR_HOSTTAG_INITCMDS:
4282 		cmdptr = &sc->init_cmds;
4283 		break;
4284 	case MPI3MR_HOSTTAG_IOCTLCMDS:
4285 		cmdptr = &sc->ioctl_cmds;
4286 		break;
4287 	case MPI3MR_HOSTTAG_TMS:
4288 		cmdptr = &sc->host_tm_cmds;
4289 		wakeup((void *)&sc->tm_chan);
4290 		break;
4291 	case MPI3MR_HOSTTAG_PELABORT:
4292 		cmdptr = &sc->pel_abort_cmd;
4293 		break;
4294 	case MPI3MR_HOSTTAG_PELWAIT:
4295 		cmdptr = &sc->pel_cmds;
4296 		break;
4297 	case MPI3MR_HOSTTAG_INVALID:
4298 		if (def_reply && def_reply->Function ==
4299 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
4300 			mpi3mr_handle_events(sc, *reply_dma ,def_reply);
4301 	default:
4302 		break;
4303 	}
4304 
4305 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
4306 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX ) {
4307 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
4308 		cmdptr = &sc->dev_rmhs_cmds[idx];
4309 	}
4310 
4311 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
4312 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
4313 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
4314 		cmdptr = &sc->evtack_cmds[idx];
4315 	}
4316 
4317 	if (cmdptr) {
4318 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
4319 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
4320 			cmdptr->ioc_loginfo = ioc_loginfo;
4321 			cmdptr->ioc_status = ioc_status;
4322 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
4323 			if (def_reply) {
4324 				cmdptr->state |= MPI3MR_CMD_REPLYVALID;
4325 				memcpy((U8 *)cmdptr->reply, (U8 *)def_reply,
4326 				    sc->reply_sz);
4327 			}
4328 			if (sense_buf && cmdptr->sensebuf) {
4329 				cmdptr->is_senseprst = 1;
4330 				memcpy(cmdptr->sensebuf, sense_buf,
4331 				    MPI3MR_SENSEBUF_SZ);
4332 			}
4333 			if (cmdptr->is_waiting) {
4334 				complete(&cmdptr->completion);
4335 				cmdptr->is_waiting = 0;
4336 			} else if (cmdptr->callback)
4337 				cmdptr->callback(sc, cmdptr);
4338 		}
4339 	}
4340 out:
4341 	if (sense_buf != NULL)
4342 		mpi3mr_repost_sense_buf(sc,
4343 		    scsi_reply->SenseDataBufferAddress);
4344 	return;
4345 }
4346 
4347 /*
4348  * mpi3mr_complete_admin_cmd:	ISR routine for admin commands
4349  * @sc:				Adapter's soft instance
4350  *
4351  * This function processes admin command completions.
4352  */
4353 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc)
4354 {
4355 	U32 exp_phase = sc->admin_reply_ephase;
4356 	U32 adm_reply_ci = sc->admin_reply_ci;
4357 	U32 num_adm_reply = 0;
4358 	U64 reply_dma = 0;
4359 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4360 
4361 	mtx_lock_spin(&sc->admin_reply_lock);
4362 	if (sc->admin_in_use == false) {
4363 		sc->admin_in_use = true;
4364 		mtx_unlock_spin(&sc->admin_reply_lock);
4365 	} else {
4366 		mtx_unlock_spin(&sc->admin_reply_lock);
4367 		return 0;
4368 	}
4369 
4370 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4371 		adm_reply_ci;
4372 
4373 	if ((reply_desc->ReplyFlags &
4374 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
4375 		mtx_lock_spin(&sc->admin_reply_lock);
4376 		sc->admin_in_use = false;
4377 		mtx_unlock_spin(&sc->admin_reply_lock);
4378 		return 0;
4379 	}
4380 
4381 	do {
4382 		sc->admin_req_ci = reply_desc->RequestQueueCI;
4383 		mpi3mr_process_admin_reply_desc(sc, reply_desc, &reply_dma);
4384 		if (reply_dma)
4385 			mpi3mr_repost_reply_buf(sc, reply_dma);
4386 		num_adm_reply++;
4387 		if (++adm_reply_ci == sc->num_admin_replies) {
4388 			adm_reply_ci = 0;
4389 			exp_phase ^= 1;
4390 		}
4391 		reply_desc =
4392 			(Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4393 			    adm_reply_ci;
4394 		if ((reply_desc->ReplyFlags &
4395 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4396 			break;
4397 	} while (1);
4398 
4399 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4400 	sc->admin_reply_ci = adm_reply_ci;
4401 	sc->admin_reply_ephase = exp_phase;
4402 	mtx_lock_spin(&sc->admin_reply_lock);
4403 	sc->admin_in_use = false;
4404 	mtx_unlock_spin(&sc->admin_reply_lock);
4405 	return num_adm_reply;
4406 }
4407 
4408 static void
4409 mpi3mr_cmd_done(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
4410 {
4411 	mpi3mr_unmap_request(sc, cmd);
4412 
4413 	mtx_lock(&sc->mpi3mr_mtx);
4414 	if (cmd->callout_owner) {
4415 		callout_stop(&cmd->callout);
4416 		cmd->callout_owner = false;
4417 	}
4418 
4419 	if (sc->unrecoverable)
4420 		mpi3mr_set_ccbstatus(cmd->ccb, CAM_DEV_NOT_THERE);
4421 
4422 	xpt_done(cmd->ccb);
4423 	cmd->ccb = NULL;
4424 	mtx_unlock(&sc->mpi3mr_mtx);
4425 	mpi3mr_release_command(cmd);
4426 }
4427 
4428 void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
4429     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4430 {
4431 	U16 reply_desc_type, host_tag = 0;
4432 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4433 	U32 ioc_loginfo = 0;
4434 	Mpi3StatusReplyDescriptor_t *status_desc = NULL;
4435 	Mpi3AddressReplyDescriptor_t *addr_desc = NULL;
4436 	Mpi3SuccessReplyDescriptor_t *success_desc = NULL;
4437 	Mpi3SCSIIOReply_t *scsi_reply = NULL;
4438 	U8 *sense_buf = NULL;
4439 	U8 scsi_state = 0, scsi_status = 0, sense_state = 0;
4440 	U32 xfer_count = 0, sense_count =0, resp_data = 0;
4441 	struct mpi3mr_cmd *cm = NULL;
4442 	union ccb *ccb;
4443 	struct ccb_scsiio *csio;
4444 	struct mpi3mr_cam_softc *cam_sc;
4445 	U32 target_id;
4446 	U8 *scsi_cdb;
4447 	struct mpi3mr_target *target = NULL;
4448 	U32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
4449 	struct mpi3mr_throttle_group_info *tg = NULL;
4450 	U8 throttle_enabled_dev = 0;
4451 	static int ratelimit;
4452 
4453 	*reply_dma = 0;
4454 	reply_desc_type = reply_desc->ReplyFlags &
4455 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4456 	switch (reply_desc_type) {
4457 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4458 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4459 		host_tag = status_desc->HostTag;
4460 		ioc_status = status_desc->IOCStatus;
4461 		if (ioc_status &
4462 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4463 			ioc_loginfo = status_desc->IOCLogInfo;
4464 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4465 		break;
4466 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4467 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4468 		*reply_dma = addr_desc->ReplyFrameAddress;
4469 		scsi_reply = mpi3mr_get_reply_virt_addr(sc,
4470 		    *reply_dma);
4471 		if (scsi_reply == NULL) {
4472 			mpi3mr_dprint(sc, MPI3MR_ERROR, "scsi_reply is NULL, "
4473 			    "this shouldn't happen, reply_desc: %p\n",
4474 			    reply_desc);
4475 			goto out;
4476 		}
4477 
4478 		host_tag = scsi_reply->HostTag;
4479 		ioc_status = scsi_reply->IOCStatus;
4480 		scsi_status = scsi_reply->SCSIStatus;
4481 		scsi_state = scsi_reply->SCSIState;
4482 		sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
4483 		xfer_count = scsi_reply->TransferCount;
4484 		sense_count = scsi_reply->SenseCount;
4485 		resp_data = scsi_reply->ResponseData;
4486 		sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4487 		    scsi_reply->SenseDataBufferAddress);
4488 		if (ioc_status &
4489 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4490 			ioc_loginfo = scsi_reply->IOCLogInfo;
4491 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4492 		if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
4493 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Ran out of sense buffers\n");
4494 
4495 		break;
4496 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4497 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4498 		host_tag = success_desc->HostTag;
4499 
4500 	default:
4501 		break;
4502 	}
4503 
4504 	cm = sc->cmd_list[host_tag];
4505 
4506 	if (cm->state == MPI3MR_CMD_STATE_FREE)
4507 		goto out;
4508 
4509 	cam_sc = sc->cam_sc;
4510 	ccb = cm->ccb;
4511 	csio = &ccb->csio;
4512 	target_id = csio->ccb_h.target_id;
4513 
4514 	scsi_cdb = scsiio_cdb_ptr(csio);
4515 
4516 	target = mpi3mr_find_target_by_per_id(cam_sc, target_id);
4517 	if (sc->iot_enable) {
4518 		data_len_blks = csio->dxfer_len >> 9;
4519 
4520 		if (target) {
4521 			tg = target->throttle_group;
4522 			throttle_enabled_dev =
4523 				target->io_throttle_enabled;
4524 		}
4525 
4526 		if ((data_len_blks >= sc->io_throttle_data_length) &&
4527 		     throttle_enabled_dev) {
4528 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, data_len_blks);
4529 			ioc_pend_data_len = mpi3mr_atomic_read(
4530 			    &sc->pend_large_data_sz);
4531 			if (tg) {
4532 				mpi3mr_atomic_sub(&tg->pend_large_data_sz,
4533 					data_len_blks);
4534 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4535 				if (ratelimit % 1000) {
4536 					mpi3mr_dprint(sc, MPI3MR_IOT,
4537 						"large vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4538 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4539 						    target->per_id,
4540 						    target->dev_handle,
4541 						    data_len_blks, ioc_pend_data_len,
4542 						    tg_pend_data_len,
4543 						    sc->io_throttle_low,
4544 						    tg->low);
4545 					ratelimit++;
4546 				}
4547 				if (tg->io_divert  && ((ioc_pend_data_len <=
4548 				    sc->io_throttle_low) &&
4549 				    (tg_pend_data_len <= tg->low))) {
4550 					tg->io_divert = 0;
4551 					mpi3mr_dprint(sc, MPI3MR_IOT,
4552 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4553 						target->per_id, tg->id);
4554 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4555 					    sc, tg, 0);
4556 				}
4557 			} else {
4558 				if (ratelimit % 1000) {
4559 					mpi3mr_dprint(sc, MPI3MR_IOT,
4560 					    "large pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4561 					    target->per_id,
4562 					    target->dev_handle,
4563 					    data_len_blks, ioc_pend_data_len,
4564 					    sc->io_throttle_low);
4565 					ratelimit++;
4566 				}
4567 
4568 				if (ioc_pend_data_len <= sc->io_throttle_low) {
4569 					target->io_divert = 0;
4570 					mpi3mr_dprint(sc, MPI3MR_IOT,
4571 						"PD: Coming out of divert perst_id(%d)\n",
4572 						target->per_id);
4573 				}
4574 			}
4575 
4576 			} else if (target->io_divert) {
4577 			ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
4578 			if (!tg) {
4579 				if (ratelimit % 1000) {
4580 					mpi3mr_dprint(sc, MPI3MR_IOT,
4581 					    "pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4582 					    target->per_id,
4583 					    target->dev_handle,
4584 					    data_len_blks, ioc_pend_data_len,
4585 					    sc->io_throttle_low);
4586 					ratelimit++;
4587 				}
4588 
4589 				if ( ioc_pend_data_len <= sc->io_throttle_low) {
4590 					mpi3mr_dprint(sc, MPI3MR_IOT,
4591 						"PD: Coming out of divert perst_id(%d)\n",
4592 						target->per_id);
4593 					target->io_divert = 0;
4594 				}
4595 
4596 			} else if (ioc_pend_data_len <= sc->io_throttle_low) {
4597 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4598 				if (ratelimit % 1000) {
4599 					mpi3mr_dprint(sc, MPI3MR_IOT,
4600 						"vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4601 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4602 						    target->per_id,
4603 						    target->dev_handle,
4604 						    data_len_blks, ioc_pend_data_len,
4605 						    tg_pend_data_len,
4606 						    sc->io_throttle_low,
4607 						    tg->low);
4608 					ratelimit++;
4609 				}
4610 				if (tg->io_divert  && (tg_pend_data_len <= tg->low)) {
4611 					tg->io_divert = 0;
4612 					mpi3mr_dprint(sc, MPI3MR_IOT,
4613 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4614 						target->per_id, tg->id);
4615 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4616 					    sc, tg, 0);
4617 				}
4618 
4619 			}
4620 		}
4621 	}
4622 
4623 	if (success_desc) {
4624 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4625 		goto out_success;
4626 	}
4627 
4628 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN
4629 	    && xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
4630 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
4631 	    scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
4632 		ioc_status = MPI3_IOCSTATUS_SUCCESS;
4633 
4634 	if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count
4635 	    && sense_buf) {
4636 		int sense_len, returned_sense_len;
4637 
4638 		returned_sense_len = min(le32toh(sense_count),
4639 		    sizeof(struct scsi_sense_data));
4640 		if (returned_sense_len < csio->sense_len)
4641 			csio->sense_resid = csio->sense_len -
4642 			    returned_sense_len;
4643 		else
4644 			csio->sense_resid = 0;
4645 
4646 		sense_len = min(returned_sense_len,
4647 		    csio->sense_len - csio->sense_resid);
4648 		bzero(&csio->sense_data, sizeof(csio->sense_data));
4649 		bcopy(sense_buf, &csio->sense_data, sense_len);
4650 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4651 	}
4652 
4653 	switch (ioc_status) {
4654 	case MPI3_IOCSTATUS_BUSY:
4655 	case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
4656 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
4657 		break;
4658 	case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4659 		/*
4660 		 * If devinfo is 0 this will be a volume.  In that case don't
4661 		 * tell CAM that the volume is not there.  We want volumes to
4662 		 * be enumerated until they are deleted/removed, not just
4663 		 * failed.
4664 		 */
4665 		if (cm->targ->devinfo == 0)
4666 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4667 		else
4668 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
4669 		break;
4670 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
4671 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
4672 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
4673 		mpi3mr_set_ccbstatus(ccb, CAM_SCSI_BUSY);
4674 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4675 		    "func: %s line:%d tgt %u Hosttag %u loginfo %x\n",
4676 		    __func__, __LINE__,
4677 		    target_id, cm->hosttag,
4678 		    le32toh(scsi_reply->IOCLogInfo));
4679 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4680 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
4681 		    scsi_reply->SCSIStatus, scsi_reply->SCSIState,
4682 		    le32toh(xfer_count));
4683 		break;
4684 	case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
4685 		/* resid is ignored for this condition */
4686 		csio->resid = 0;
4687 		mpi3mr_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
4688 		break;
4689 	case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
4690 		csio->resid = cm->length - le32toh(xfer_count);
4691 	case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
4692 	case MPI3_IOCSTATUS_SUCCESS:
4693 		if ((scsi_reply->IOCStatus & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK) ==
4694 		    MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR)
4695 			mpi3mr_dprint(sc, MPI3MR_XINFO, "func: %s line: %d recovered error\n",  __func__, __LINE__);
4696 
4697 		/* Completion failed at the transport level. */
4698 		if (scsi_reply->SCSIState & (MPI3_SCSI_STATE_NO_SCSI_STATUS |
4699 		    MPI3_SCSI_STATE_TERMINATED)) {
4700 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4701 			break;
4702 		}
4703 
4704 		/* In a modern packetized environment, an autosense failure
4705 		 * implies that there's not much else that can be done to
4706 		 * recover the command.
4707 		 */
4708 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4709 			mpi3mr_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
4710 			break;
4711 		}
4712 
4713 		/*
4714 		 * Intentionally override the normal SCSI status reporting
4715 		 * for these two cases.  These are likely to happen in a
4716 		 * multi-initiator environment, and we want to make sure that
4717 		 * CAM retries these commands rather than fail them.
4718 		 */
4719 		if ((scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_COMMAND_TERMINATED) ||
4720 		    (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_TASK_ABORTED)) {
4721 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_ABORTED);
4722 			break;
4723 		}
4724 
4725 		/* Handle normal status and sense */
4726 		csio->scsi_status = scsi_reply->SCSIStatus;
4727 		if (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_GOOD)
4728 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4729 		else
4730 			mpi3mr_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
4731 
4732 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4733 			int sense_len, returned_sense_len;
4734 
4735 			returned_sense_len = min(le32toh(scsi_reply->SenseCount),
4736 			    sizeof(struct scsi_sense_data));
4737 			if (returned_sense_len < csio->sense_len)
4738 				csio->sense_resid = csio->sense_len -
4739 				    returned_sense_len;
4740 			else
4741 				csio->sense_resid = 0;
4742 
4743 			sense_len = min(returned_sense_len,
4744 			    csio->sense_len - csio->sense_resid);
4745 			bzero(&csio->sense_data, sizeof(csio->sense_data));
4746 			bcopy(cm->sense, &csio->sense_data, sense_len);
4747 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4748 		}
4749 
4750 		break;
4751 	case MPI3_IOCSTATUS_INVALID_SGL:
4752 		mpi3mr_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
4753 		break;
4754 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
4755 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
4756 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
4757 	case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4758 	case MPI3_IOCSTATUS_INVALID_FUNCTION:
4759 	case MPI3_IOCSTATUS_INTERNAL_ERROR:
4760 	case MPI3_IOCSTATUS_INVALID_FIELD:
4761 	case MPI3_IOCSTATUS_INVALID_STATE:
4762 	case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
4763 	case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4764 	case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
4765 	case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4766 	default:
4767 		csio->resid = cm->length;
4768 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4769 		break;
4770 	}
4771 
4772 out_success:
4773 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_CMP) {
4774 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
4775 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
4776 	}
4777 
4778 	mpi3mr_atomic_dec(&cm->targ->outstanding);
4779 	mpi3mr_cmd_done(sc, cm);
4780 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Completion IO path :"
4781 		" cdb[0]: %x targetid: 0x%x SMID: %x ioc_status: 0x%x ioc_loginfo: 0x%x scsi_status: 0x%x "
4782 		"scsi_state: 0x%x response_data: 0x%x\n", scsi_cdb[0], target_id, host_tag,
4783 		ioc_status, ioc_loginfo, scsi_status, scsi_state, resp_data);
4784 	mpi3mr_atomic_dec(&sc->fw_outstanding);
4785 out:
4786 
4787 	if (sense_buf)
4788 		mpi3mr_repost_sense_buf(sc,
4789 		    scsi_reply->SenseDataBufferAddress);
4790 	return;
4791 }
4792 
4793 /*
4794  * mpi3mr_complete_io_cmd:	ISR routine for IO commands
4795  * @sc:				Adapter's soft instance
4796  * @irq_ctx:			Driver's internal per IRQ structure
4797  *
4798  * This function processes IO command completions.
4799  */
4800 int mpi3mr_complete_io_cmd(struct mpi3mr_softc *sc,
4801     struct mpi3mr_irq_context *irq_ctx)
4802 {
4803 	struct mpi3mr_op_reply_queue *op_reply_q = irq_ctx->op_reply_q;
4804 	U32 exp_phase = op_reply_q->ephase;
4805 	U32 reply_ci = op_reply_q->ci;
4806 	U32 num_op_replies = 0;
4807 	U64 reply_dma = 0;
4808 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4809 	U16 req_qid = 0;
4810 
4811 	mtx_lock_spin(&op_reply_q->q_lock);
4812 	if (op_reply_q->in_use == false) {
4813 		op_reply_q->in_use = true;
4814 		mtx_unlock_spin(&op_reply_q->q_lock);
4815 	} else {
4816 		mtx_unlock_spin(&op_reply_q->q_lock);
4817 		return 0;
4818 	}
4819 
4820 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4821 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]:reply_desc: (%pa) reply_ci: %x"
4822 		" reply_desc->ReplyFlags: 0x%x\n"
4823 		"reply_q_base_phys: %#016jx reply_q_base: (%pa) exp_phase: %x\n",
4824 		op_reply_q->qid, reply_desc, reply_ci, reply_desc->ReplyFlags, op_reply_q->q_base_phys,
4825 		op_reply_q->q_base, exp_phase);
4826 
4827 	if (((reply_desc->ReplyFlags &
4828 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) || !op_reply_q->qid) {
4829 		mtx_lock_spin(&op_reply_q->q_lock);
4830 		op_reply_q->in_use = false;
4831 		mtx_unlock_spin(&op_reply_q->q_lock);
4832 		return 0;
4833 	}
4834 
4835 	do {
4836 		req_qid = reply_desc->RequestQueueID;
4837 		sc->op_req_q[req_qid - 1].ci =
4838 		    reply_desc->RequestQueueCI;
4839 
4840 		mpi3mr_process_op_reply_desc(sc, reply_desc, &reply_dma);
4841 		mpi3mr_atomic_dec(&op_reply_q->pend_ios);
4842 		if (reply_dma)
4843 			mpi3mr_repost_reply_buf(sc, reply_dma);
4844 		num_op_replies++;
4845 		if (++reply_ci == op_reply_q->num_replies) {
4846 			reply_ci = 0;
4847 			exp_phase ^= 1;
4848 		}
4849 		reply_desc =
4850 		    (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4851 		if ((reply_desc->ReplyFlags &
4852 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4853 			break;
4854 	} while (1);
4855 
4856 
4857 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
4858 	op_reply_q->ci = reply_ci;
4859 	op_reply_q->ephase = exp_phase;
4860 	mtx_lock_spin(&op_reply_q->q_lock);
4861 	op_reply_q->in_use = false;
4862 	mtx_unlock_spin(&op_reply_q->q_lock);
4863 	return num_op_replies;
4864 }
4865 
4866 /*
4867  * mpi3mr_isr:			Primary ISR function
4868  * privdata:			Driver's internal per IRQ structure
4869  *
4870  * This is driver's primary ISR function which is being called whenever any admin/IO
4871  * command completion.
4872  */
4873 void mpi3mr_isr(void *privdata)
4874 {
4875 	struct mpi3mr_irq_context *irq_ctx = (struct mpi3mr_irq_context *)privdata;
4876 	struct mpi3mr_softc *sc = irq_ctx->sc;
4877 	U16 msi_idx;
4878 
4879 	if (!irq_ctx)
4880 		return;
4881 
4882 	msi_idx = irq_ctx->msix_index;
4883 
4884 	if (!sc->intr_enabled)
4885 		return;
4886 
4887 	if (!msi_idx)
4888 		mpi3mr_complete_admin_cmd(sc);
4889 
4890 	if (irq_ctx->op_reply_q && irq_ctx->op_reply_q->qid) {
4891 		mpi3mr_complete_io_cmd(sc, irq_ctx);
4892 	}
4893 }
4894 
4895 /*
4896  * mpi3mr_alloc_requests - Allocates host commands
4897  * @sc: Adapter reference
4898  *
4899  * This function allocates controller supported host commands
4900  *
4901  * Return: 0 on success and proper error codes on failure
4902  */
4903 int
4904 mpi3mr_alloc_requests(struct mpi3mr_softc *sc)
4905 {
4906 	struct mpi3mr_cmd *cmd;
4907 	int i, j, nsegs, ret;
4908 
4909 	nsegs = MPI3MR_SG_DEPTH;
4910 	ret = bus_dma_tag_create( sc->mpi3mr_parent_dmat,    /* parent */
4911 				1, 0,			/* algnmnt, boundary */
4912 				BUS_SPACE_MAXADDR,	/* lowaddr */
4913 				BUS_SPACE_MAXADDR,	/* highaddr */
4914 				NULL, NULL,		/* filter, filterarg */
4915 				MAXPHYS,/* maxsize */
4916                                 nsegs,			/* nsegments */
4917 				MAXPHYS,/* maxsegsize */
4918                                 BUS_DMA_ALLOCNOW,	/* flags */
4919                                 busdma_lock_mutex,	/* lockfunc */
4920 				&sc->io_lock,	/* lockarg */
4921 				&sc->buffer_dmat);
4922 	if (ret) {
4923 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate buffer DMA tag ret: %d\n", ret);
4924 		return (ENOMEM);
4925         }
4926 
4927 	/*
4928 	 * sc->cmd_list is an array of struct mpi3mr_cmd pointers.
4929 	 * Allocate the dynamic array first and then allocate individual
4930 	 * commands.
4931 	 */
4932 	sc->cmd_list = malloc(sizeof(struct mpi3mr_cmd *) * sc->max_host_ios,
4933 	    M_MPI3MR, M_NOWAIT | M_ZERO);
4934 
4935 	if (!sc->cmd_list) {
4936 		device_printf(sc->mpi3mr_dev, "Cannot alloc memory for mpt_cmd_list.\n");
4937 		return (ENOMEM);
4938 	}
4939 
4940 	for (i = 0; i < sc->max_host_ios; i++) {
4941 		sc->cmd_list[i] = malloc(sizeof(struct mpi3mr_cmd),
4942 		    M_MPI3MR, M_NOWAIT | M_ZERO);
4943 		if (!sc->cmd_list[i]) {
4944 			for (j = 0; j < i; j++)
4945 				free(sc->cmd_list[j], M_MPI3MR);
4946 			free(sc->cmd_list, M_MPI3MR);
4947 			sc->cmd_list = NULL;
4948 			return (ENOMEM);
4949 		}
4950 	}
4951 
4952 	for (i = 1; i < sc->max_host_ios; i++) {
4953 		cmd = sc->cmd_list[i];
4954 		cmd->hosttag = i;
4955 		cmd->sc = sc;
4956 		cmd->state = MPI3MR_CMD_STATE_BUSY;
4957 		callout_init_mtx(&cmd->callout, &sc->mpi3mr_mtx, 0);
4958 		cmd->ccb = NULL;
4959 		TAILQ_INSERT_TAIL(&(sc->cmd_list_head), cmd, next);
4960 		if (bus_dmamap_create(sc->buffer_dmat, 0, &cmd->dmamap))
4961 			return ENOMEM;
4962 	}
4963 	return (0);
4964 }
4965 
4966 /*
4967  * mpi3mr_get_command:		Get a coomand structure from free command pool
4968  * @sc:				Adapter soft instance
4969  * Return:			MPT command reference
4970  *
4971  * This function returns an MPT command to the caller.
4972  */
4973 struct mpi3mr_cmd *
4974 mpi3mr_get_command(struct mpi3mr_softc *sc)
4975 {
4976 	struct mpi3mr_cmd *cmd = NULL;
4977 
4978 	mtx_lock(&sc->cmd_pool_lock);
4979 	if (!TAILQ_EMPTY(&sc->cmd_list_head)) {
4980 		cmd = TAILQ_FIRST(&sc->cmd_list_head);
4981 		TAILQ_REMOVE(&sc->cmd_list_head, cmd, next);
4982 	} else {
4983 		goto out;
4984 	}
4985 
4986 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Get command SMID: 0x%x\n", cmd->hosttag);
4987 
4988 	memset((uint8_t *)&cmd->io_request, 0, MPI3MR_AREQ_FRAME_SZ);
4989 	cmd->data_dir = 0;
4990 	cmd->ccb = NULL;
4991 	cmd->targ = NULL;
4992 	cmd->max_segs = 0;
4993 	cmd->lun = 0;
4994 	cmd->state = MPI3MR_CMD_STATE_BUSY;
4995 	cmd->data = NULL;
4996 	cmd->length = 0;
4997 	cmd->out_len = 0;
4998 out:
4999 	mtx_unlock(&sc->cmd_pool_lock);
5000 	return cmd;
5001 }
5002 
5003 /*
5004  * mpi3mr_release_command:	Return a cmd to free command pool
5005  * input:			Command packet for return to free command pool
5006  *
5007  * This function returns an MPT command to the free command list.
5008  */
5009 void
5010 mpi3mr_release_command(struct mpi3mr_cmd *cmd)
5011 {
5012 	struct mpi3mr_softc *sc = cmd->sc;
5013 
5014 	mtx_lock(&sc->cmd_pool_lock);
5015 	TAILQ_INSERT_HEAD(&(sc->cmd_list_head), cmd, next);
5016 	cmd->state = MPI3MR_CMD_STATE_FREE;
5017 	cmd->req_qidx = 0;
5018 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Release command SMID: 0x%x\n", cmd->hosttag);
5019 	mtx_unlock(&sc->cmd_pool_lock);
5020 
5021 	return;
5022 }
5023 
5024  /**
5025  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
5026  * @sc: Adapter instance reference
5027  *
5028  * Free the DMA memory allocated for IOCTL handling purpose.
5029  *
5030  * Return: None
5031  */
5032 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc *sc)
5033 {
5034 	U16 i;
5035 	struct dma_memory_desc *mem_desc;
5036 
5037 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5038 		mem_desc = &sc->ioctl_sge[i];
5039 		if (mem_desc->addr && mem_desc->dma_addr) {
5040 			bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5041 			bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5042 			mem_desc->addr = NULL;
5043 			if (mem_desc->tag != NULL)
5044 				bus_dma_tag_destroy(mem_desc->tag);
5045 		}
5046 	}
5047 
5048 	mem_desc = &sc->ioctl_chain_sge;
5049 	if (mem_desc->addr && mem_desc->dma_addr) {
5050 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5051 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5052 		mem_desc->addr = NULL;
5053 		if (mem_desc->tag != NULL)
5054 			bus_dma_tag_destroy(mem_desc->tag);
5055 	}
5056 
5057 	mem_desc = &sc->ioctl_resp_sge;
5058 	if (mem_desc->addr && mem_desc->dma_addr) {
5059 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5060 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5061 		mem_desc->addr = NULL;
5062 		if (mem_desc->tag != NULL)
5063 			bus_dma_tag_destroy(mem_desc->tag);
5064 	}
5065 
5066 	sc->ioctl_sges_allocated = false;
5067 }
5068 
5069 /**
5070  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
5071  * @sc: Adapter instance reference
5072  *
5073  * This function allocates dmaable memory required to handle the
5074  * application issued MPI3 IOCTL requests.
5075  *
5076  * Return: None
5077  */
5078 void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc *sc)
5079 {
5080 	struct dma_memory_desc *mem_desc;
5081 	U16 i;
5082 
5083 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5084 		mem_desc = &sc->ioctl_sge[i];
5085 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
5086 
5087 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5088 					4, 0,			/* algnmnt, boundary */
5089 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
5090 					BUS_SPACE_MAXADDR,	/* highaddr */
5091 					NULL, NULL,		/* filter, filterarg */
5092 					mem_desc->size,		/* maxsize */
5093 					1,			/* nsegments */
5094 					mem_desc->size,		/* maxsegsize */
5095 					0,			/* flags */
5096 					NULL, NULL,		/* lockfunc, lockarg */
5097 					&mem_desc->tag)) {
5098 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5099 			goto out_failed;
5100 		}
5101 
5102 		if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5103 		    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5104 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5105 			goto out_failed;
5106 		}
5107 		bzero(mem_desc->addr, mem_desc->size);
5108 		bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5109 		    mpi3mr_memaddr_cb, &mem_desc->dma_addr, 0);
5110 
5111 		if (!mem_desc->addr)
5112 			goto out_failed;
5113 	}
5114 
5115 	mem_desc = &sc->ioctl_chain_sge;
5116 	mem_desc->size = MPI3MR_4K_PGSZ;
5117 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5118 				4, 0,			/* algnmnt, boundary */
5119 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
5120 				BUS_SPACE_MAXADDR,	/* highaddr */
5121 				NULL, NULL,		/* filter, filterarg */
5122 				mem_desc->size,		/* maxsize */
5123 				1,			/* nsegments */
5124 				mem_desc->size,		/* maxsegsize */
5125 				0,			/* flags */
5126 				NULL, NULL,		/* lockfunc, lockarg */
5127 				&mem_desc->tag)) {
5128 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5129 		goto out_failed;
5130 	}
5131 
5132 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5133 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5134 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5135 		goto out_failed;
5136 	}
5137 	bzero(mem_desc->addr, mem_desc->size);
5138 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5139 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, 0);
5140 
5141 	if (!mem_desc->addr)
5142 		goto out_failed;
5143 
5144 	mem_desc = &sc->ioctl_resp_sge;
5145 	mem_desc->size = MPI3MR_4K_PGSZ;
5146 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5147 				4, 0,			/* algnmnt, boundary */
5148 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
5149 				BUS_SPACE_MAXADDR,	/* highaddr */
5150 				NULL, NULL,		/* filter, filterarg */
5151 				mem_desc->size,		/* maxsize */
5152 				1,			/* nsegments */
5153 				mem_desc->size,		/* maxsegsize */
5154 				0,			/* flags */
5155 				NULL, NULL,		/* lockfunc, lockarg */
5156 				&mem_desc->tag)) {
5157 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5158 		goto out_failed;
5159 	}
5160 
5161 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5162 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5163 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5164 		goto out_failed;
5165 	}
5166 	bzero(mem_desc->addr, mem_desc->size);
5167 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5168 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, 0);
5169 
5170 	if (!mem_desc->addr)
5171 		goto out_failed;
5172 
5173 	sc->ioctl_sges_allocated = true;
5174 
5175 	return;
5176 out_failed:
5177 	printf("cannot allocate DMA memory for the mpt commands"
5178 	    "  from the applications, application interface for MPT command is disabled\n");
5179 	mpi3mr_free_ioctl_dma_memory(sc);
5180 }
5181 
5182 void
5183 mpi3mr_destory_mtx(struct mpi3mr_softc *sc)
5184 {
5185 	int i;
5186 	struct mpi3mr_op_req_queue *op_req_q;
5187 	struct mpi3mr_op_reply_queue *op_reply_q;
5188 
5189 	if (sc->admin_reply) {
5190 		if (mtx_initialized(&sc->admin_reply_lock))
5191 			mtx_destroy(&sc->admin_reply_lock);
5192 	}
5193 
5194 	if (sc->op_reply_q) {
5195 		for(i = 0; i < sc->num_queues; i++) {
5196 			op_reply_q = sc->op_reply_q + i;
5197 			if (mtx_initialized(&op_reply_q->q_lock))
5198 				mtx_destroy(&op_reply_q->q_lock);
5199 		}
5200 	}
5201 
5202 	if (sc->op_req_q) {
5203 		for(i = 0; i < sc->num_queues; i++) {
5204 			op_req_q = sc->op_req_q + i;
5205 			if (mtx_initialized(&op_req_q->q_lock))
5206 				mtx_destroy(&op_req_q->q_lock);
5207 		}
5208 	}
5209 
5210 	if (mtx_initialized(&sc->init_cmds.completion.lock))
5211 		mtx_destroy(&sc->init_cmds.completion.lock);
5212 
5213 	if (mtx_initialized(&sc->ioctl_cmds.completion.lock))
5214 		mtx_destroy(&sc->ioctl_cmds.completion.lock);
5215 
5216 	if (mtx_initialized(&sc->host_tm_cmds.completion.lock))
5217 		mtx_destroy(&sc->host_tm_cmds.completion.lock);
5218 
5219 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5220 		if (mtx_initialized(&sc->dev_rmhs_cmds[i].completion.lock))
5221 			mtx_destroy(&sc->dev_rmhs_cmds[i].completion.lock);
5222 	}
5223 
5224 	if (mtx_initialized(&sc->reset_mutex))
5225 		mtx_destroy(&sc->reset_mutex);
5226 
5227 	if (mtx_initialized(&sc->target_lock))
5228 		mtx_destroy(&sc->target_lock);
5229 
5230 	if (mtx_initialized(&sc->fwevt_lock))
5231 		mtx_destroy(&sc->fwevt_lock);
5232 
5233 	if (mtx_initialized(&sc->cmd_pool_lock))
5234 		mtx_destroy(&sc->cmd_pool_lock);
5235 
5236 	if (mtx_initialized(&sc->reply_free_q_lock))
5237 		mtx_destroy(&sc->reply_free_q_lock);
5238 
5239 	if (mtx_initialized(&sc->sense_buf_q_lock))
5240 		mtx_destroy(&sc->sense_buf_q_lock);
5241 
5242 	if (mtx_initialized(&sc->chain_buf_lock))
5243 		mtx_destroy(&sc->chain_buf_lock);
5244 
5245 	if (mtx_initialized(&sc->admin_req_lock))
5246 		mtx_destroy(&sc->admin_req_lock);
5247 
5248 	if (mtx_initialized(&sc->mpi3mr_mtx))
5249 		mtx_destroy(&sc->mpi3mr_mtx);
5250 }
5251 
5252 /**
5253  * mpi3mr_free_mem - Freeup adapter level data structures
5254  * @sc: Adapter reference
5255  *
5256  * Return: Nothing.
5257  */
5258 void
5259 mpi3mr_free_mem(struct mpi3mr_softc *sc)
5260 {
5261 	int i;
5262 	struct mpi3mr_op_req_queue *op_req_q;
5263 	struct mpi3mr_op_reply_queue *op_reply_q;
5264 	struct mpi3mr_irq_context *irq_ctx;
5265 
5266 	if (sc->cmd_list) {
5267 		for (i = 0; i < sc->max_host_ios; i++) {
5268 			free(sc->cmd_list[i], M_MPI3MR);
5269 		}
5270 		free(sc->cmd_list, M_MPI3MR);
5271 		sc->cmd_list = NULL;
5272 	}
5273 
5274 	if (sc->pel_seq_number && sc->pel_seq_number_dma) {
5275 		bus_dmamap_unload(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap);
5276 		bus_dmamem_free(sc->pel_seq_num_dmatag, sc->pel_seq_number, sc->pel_seq_num_dmamap);
5277 		sc->pel_seq_number = NULL;
5278 		if (sc->pel_seq_num_dmatag != NULL)
5279 			bus_dma_tag_destroy(sc->pel_seq_num_dmatag);
5280 	}
5281 
5282 	if (sc->throttle_groups) {
5283 		free(sc->throttle_groups, M_MPI3MR);
5284 		sc->throttle_groups = NULL;
5285 	}
5286 
5287 	/* Free up operational queues*/
5288 	if (sc->op_req_q) {
5289 		for (i = 0; i < sc->num_queues; i++) {
5290 			op_req_q = sc->op_req_q + i;
5291 			if (op_req_q->q_base && op_req_q->q_base_phys) {
5292 				bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
5293 				bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
5294 				op_req_q->q_base = NULL;
5295 				if (op_req_q->q_base_tag != NULL)
5296 					bus_dma_tag_destroy(op_req_q->q_base_tag);
5297 			}
5298 		}
5299 		free(sc->op_req_q, M_MPI3MR);
5300 		sc->op_req_q = NULL;
5301 	}
5302 
5303 	if (sc->op_reply_q) {
5304 		for (i = 0; i < sc->num_queues; i++) {
5305 			op_reply_q = sc->op_reply_q + i;
5306 			if (op_reply_q->q_base && op_reply_q->q_base_phys) {
5307 				bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
5308 				bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
5309 				op_reply_q->q_base = NULL;
5310 				if (op_reply_q->q_base_tag != NULL)
5311 					bus_dma_tag_destroy(op_reply_q->q_base_tag);
5312 			}
5313 		}
5314 		free(sc->op_reply_q, M_MPI3MR);
5315 		sc->op_reply_q = NULL;
5316 	}
5317 
5318 	/* Free up chain buffers*/
5319 	if (sc->chain_sgl_list) {
5320 		for (i = 0; i < sc->chain_buf_count; i++) {
5321 			if (sc->chain_sgl_list[i].buf && sc->chain_sgl_list[i].buf_phys) {
5322 				bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
5323 				bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf,
5324 						sc->chain_sgl_list[i].buf_dmamap);
5325 				sc->chain_sgl_list[i].buf = NULL;
5326 			}
5327 		}
5328 		if (sc->chain_sgl_list_tag != NULL)
5329 			bus_dma_tag_destroy(sc->chain_sgl_list_tag);
5330 		free(sc->chain_sgl_list, M_MPI3MR);
5331 		sc->chain_sgl_list = NULL;
5332 	}
5333 
5334 	if (sc->chain_bitmap) {
5335 		free(sc->chain_bitmap, M_MPI3MR);
5336 		sc->chain_bitmap = NULL;
5337 	}
5338 
5339 	for (i = 0; i < sc->msix_count; i++) {
5340 		irq_ctx = sc->irq_ctx + i;
5341 		if (irq_ctx)
5342 			irq_ctx->op_reply_q = NULL;
5343 	}
5344 
5345 	/* Free reply_buf_tag */
5346 	if (sc->reply_buf && sc->reply_buf_phys) {
5347 		bus_dmamap_unload(sc->reply_buf_tag, sc->reply_buf_dmamap);
5348 		bus_dmamem_free(sc->reply_buf_tag, sc->reply_buf,
5349 				sc->reply_buf_dmamap);
5350 		sc->reply_buf = NULL;
5351 		if (sc->reply_buf_tag != NULL)
5352 			bus_dma_tag_destroy(sc->reply_buf_tag);
5353 	}
5354 
5355 	/* Free reply_free_q_tag */
5356 	if (sc->reply_free_q && sc->reply_free_q_phys) {
5357 		bus_dmamap_unload(sc->reply_free_q_tag, sc->reply_free_q_dmamap);
5358 		bus_dmamem_free(sc->reply_free_q_tag, sc->reply_free_q,
5359 				sc->reply_free_q_dmamap);
5360 		sc->reply_free_q = NULL;
5361 		if (sc->reply_free_q_tag != NULL)
5362 			bus_dma_tag_destroy(sc->reply_free_q_tag);
5363 	}
5364 
5365 	/* Free sense_buf_tag */
5366 	if (sc->sense_buf && sc->sense_buf_phys) {
5367 		bus_dmamap_unload(sc->sense_buf_tag, sc->sense_buf_dmamap);
5368 		bus_dmamem_free(sc->sense_buf_tag, sc->sense_buf,
5369 				sc->sense_buf_dmamap);
5370 		sc->sense_buf = NULL;
5371 		if (sc->sense_buf_tag != NULL)
5372 			bus_dma_tag_destroy(sc->sense_buf_tag);
5373 	}
5374 
5375 	/* Free sense_buf_q_tag */
5376 	if (sc->sense_buf_q && sc->sense_buf_q_phys) {
5377 		bus_dmamap_unload(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap);
5378 		bus_dmamem_free(sc->sense_buf_q_tag, sc->sense_buf_q,
5379 				sc->sense_buf_q_dmamap);
5380 		sc->sense_buf_q = NULL;
5381 		if (sc->sense_buf_q_tag != NULL)
5382 			bus_dma_tag_destroy(sc->sense_buf_q_tag);
5383 	}
5384 
5385 	/* Free up internal(non-IO) commands*/
5386 	if (sc->init_cmds.reply) {
5387 		free(sc->init_cmds.reply, M_MPI3MR);
5388 		sc->init_cmds.reply = NULL;
5389 	}
5390 
5391 	if (sc->ioctl_cmds.reply) {
5392 		free(sc->ioctl_cmds.reply, M_MPI3MR);
5393 		sc->ioctl_cmds.reply = NULL;
5394 	}
5395 
5396 	if (sc->pel_cmds.reply) {
5397 		free(sc->pel_cmds.reply, M_MPI3MR);
5398 		sc->pel_cmds.reply = NULL;
5399 	}
5400 
5401 	if (sc->pel_abort_cmd.reply) {
5402 		free(sc->pel_abort_cmd.reply, M_MPI3MR);
5403 		sc->pel_abort_cmd.reply = NULL;
5404 	}
5405 
5406 	if (sc->host_tm_cmds.reply) {
5407 		free(sc->host_tm_cmds.reply, M_MPI3MR);
5408 		sc->host_tm_cmds.reply = NULL;
5409 	}
5410 
5411 	if (sc->log_data_buffer) {
5412 		free(sc->log_data_buffer, M_MPI3MR);
5413 		sc->log_data_buffer = NULL;
5414 	}
5415 
5416 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5417 		if (sc->dev_rmhs_cmds[i].reply) {
5418 			free(sc->dev_rmhs_cmds[i].reply, M_MPI3MR);
5419 			sc->dev_rmhs_cmds[i].reply = NULL;
5420 		}
5421 	}
5422 
5423 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5424 		if (sc->evtack_cmds[i].reply) {
5425 			free(sc->evtack_cmds[i].reply, M_MPI3MR);
5426 			sc->evtack_cmds[i].reply = NULL;
5427 		}
5428 	}
5429 
5430 	if (sc->removepend_bitmap) {
5431 		free(sc->removepend_bitmap, M_MPI3MR);
5432 		sc->removepend_bitmap = NULL;
5433 	}
5434 
5435 	if (sc->devrem_bitmap) {
5436 		free(sc->devrem_bitmap, M_MPI3MR);
5437 		sc->devrem_bitmap = NULL;
5438 	}
5439 
5440 	if (sc->evtack_cmds_bitmap) {
5441 		free(sc->evtack_cmds_bitmap, M_MPI3MR);
5442 		sc->evtack_cmds_bitmap = NULL;
5443 	}
5444 
5445 	/* Free Admin reply*/
5446 	if (sc->admin_reply && sc->admin_reply_phys) {
5447 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
5448 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
5449 				sc->admin_reply_dmamap);
5450 		sc->admin_reply = NULL;
5451 		if (sc->admin_reply_tag != NULL)
5452 			bus_dma_tag_destroy(sc->admin_reply_tag);
5453 	}
5454 
5455 	/* Free Admin request*/
5456 	if (sc->admin_req && sc->admin_req_phys) {
5457 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
5458 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
5459 				sc->admin_req_dmamap);
5460 		sc->admin_req = NULL;
5461 		if (sc->admin_req_tag != NULL)
5462 			bus_dma_tag_destroy(sc->admin_req_tag);
5463 	}
5464 	mpi3mr_free_ioctl_dma_memory(sc);
5465 
5466 }
5467 
5468 /**
5469  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5470  * @sc: Adapter instance reference
5471  * @cmdptr: Internal command tracker
5472  *
5473  * Complete an internal driver commands with state indicating it
5474  * is completed due to reset.
5475  *
5476  * Return: Nothing.
5477  */
5478 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc *sc,
5479 	struct mpi3mr_drvr_cmd *cmdptr)
5480 {
5481 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5482 		cmdptr->state |= MPI3MR_CMD_RESET;
5483 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5484 		if (cmdptr->is_waiting) {
5485 			complete(&cmdptr->completion);
5486 			cmdptr->is_waiting = 0;
5487 		} else if (cmdptr->callback)
5488 			cmdptr->callback(sc, cmdptr);
5489 	}
5490 }
5491 
5492 /**
5493  * mpi3mr_flush_drv_cmds - Flush internal driver commands
5494  * @sc: Adapter instance reference
5495  *
5496  * Flush all internal driver commands post reset
5497  *
5498  * Return: Nothing.
5499  */
5500 static void mpi3mr_flush_drv_cmds(struct mpi3mr_softc *sc)
5501 {
5502 	int i = 0;
5503 	struct mpi3mr_drvr_cmd *cmdptr;
5504 
5505 	cmdptr = &sc->init_cmds;
5506 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5507 
5508 	cmdptr = &sc->ioctl_cmds;
5509 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5510 
5511 	cmdptr = &sc->host_tm_cmds;
5512 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5513 
5514 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5515 		cmdptr = &sc->dev_rmhs_cmds[i];
5516 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5517 	}
5518 
5519 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5520 		cmdptr = &sc->evtack_cmds[i];
5521 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5522 	}
5523 
5524 	cmdptr = &sc->pel_cmds;
5525 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5526 
5527 	cmdptr = &sc->pel_abort_cmd;
5528 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5529 }
5530 
5531 
5532 /**
5533  * mpi3mr_memset_buffers - memset memory for a controller
5534  * @sc: Adapter instance reference
5535  *
5536  * clear all the memory allocated for a controller, typically
5537  * called post reset to reuse the memory allocated during the
5538  * controller init.
5539  *
5540  * Return: Nothing.
5541  */
5542 static void mpi3mr_memset_buffers(struct mpi3mr_softc *sc)
5543 {
5544 	U16 i;
5545 	struct mpi3mr_throttle_group_info *tg;
5546 
5547 	memset(sc->admin_req, 0, sc->admin_req_q_sz);
5548 	memset(sc->admin_reply, 0, sc->admin_reply_q_sz);
5549 
5550 	memset(sc->init_cmds.reply, 0, sc->reply_sz);
5551 	memset(sc->ioctl_cmds.reply, 0, sc->reply_sz);
5552 	memset(sc->host_tm_cmds.reply, 0, sc->reply_sz);
5553 	memset(sc->pel_cmds.reply, 0, sc->reply_sz);
5554 	memset(sc->pel_abort_cmd.reply, 0, sc->reply_sz);
5555 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
5556 		memset(sc->dev_rmhs_cmds[i].reply, 0, sc->reply_sz);
5557 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
5558 		memset(sc->evtack_cmds[i].reply, 0, sc->reply_sz);
5559 	memset(sc->removepend_bitmap, 0, sc->dev_handle_bitmap_sz);
5560 	memset(sc->devrem_bitmap, 0, sc->devrem_bitmap_sz);
5561 	memset(sc->evtack_cmds_bitmap, 0, sc->evtack_cmds_bitmap_sz);
5562 
5563 	for (i = 0; i < sc->num_queues; i++) {
5564 		sc->op_reply_q[i].qid = 0;
5565 		sc->op_reply_q[i].ci = 0;
5566 		sc->op_reply_q[i].num_replies = 0;
5567 		sc->op_reply_q[i].ephase = 0;
5568 		mpi3mr_atomic_set(&sc->op_reply_q[i].pend_ios, 0);
5569 		memset(sc->op_reply_q[i].q_base, 0, sc->op_reply_q[i].qsz);
5570 
5571 		sc->op_req_q[i].ci = 0;
5572 		sc->op_req_q[i].pi = 0;
5573 		sc->op_req_q[i].num_reqs = 0;
5574 		sc->op_req_q[i].qid = 0;
5575 		sc->op_req_q[i].reply_qid = 0;
5576 		memset(sc->op_req_q[i].q_base, 0, sc->op_req_q[i].qsz);
5577 	}
5578 
5579 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
5580 	if (sc->throttle_groups) {
5581 		tg = sc->throttle_groups;
5582 		for (i = 0; i < sc->num_io_throttle_group; i++, tg++) {
5583 			tg->id = 0;
5584 			tg->fw_qd = 0;
5585 			tg->modified_qd = 0;
5586 			tg->io_divert= 0;
5587 			tg->high = 0;
5588 			tg->low = 0;
5589 			mpi3mr_atomic_set(&tg->pend_large_data_sz, 0);
5590 		}
5591  	}
5592 }
5593 
5594 /**
5595  * mpi3mr_invalidate_devhandles -Invalidate device handles
5596  * @sc: Adapter instance reference
5597  *
5598  * Invalidate the device handles in the target device structures
5599  * . Called post reset prior to reinitializing the controller.
5600  *
5601  * Return: Nothing.
5602  */
5603 static void mpi3mr_invalidate_devhandles(struct mpi3mr_softc *sc)
5604 {
5605 	struct mpi3mr_target *target = NULL;
5606 
5607 	mtx_lock_spin(&sc->target_lock);
5608 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5609 		if (target) {
5610 			target->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
5611 			target->io_throttle_enabled = 0;
5612 			target->io_divert = 0;
5613 			target->throttle_group = NULL;
5614 		}
5615 	}
5616 	mtx_unlock_spin(&sc->target_lock);
5617 }
5618 
5619 /**
5620  * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
5621  * @sc: Adapter instance reference
5622  *
5623  * This is executed post controller reset to identify any
5624  * missing devices during reset and remove from the upper layers
5625  * or expose any newly detected device to the upper layers.
5626  *
5627  * Return: Nothing.
5628  */
5629 
5630 static void mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc *sc)
5631 {
5632 	struct mpi3mr_target *target = NULL;
5633 	struct mpi3mr_target *target_temp = NULL;
5634 
5635 	TAILQ_FOREACH_SAFE(target, &sc->cam_sc->tgt_list, tgt_next, target_temp) {
5636 		if (target->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
5637 			if (target->exposed_to_os)
5638 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
5639 			mpi3mr_remove_device_from_list(sc, target, true);
5640 		}
5641 	}
5642 
5643 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5644 		if ((target->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
5645 		    !target->is_hidden && !target->exposed_to_os) {
5646 			mpi3mr_add_device(sc, target->per_id);
5647 		}
5648 	}
5649 
5650 }
5651 
5652 static void mpi3mr_flush_io(struct mpi3mr_softc *sc)
5653 {
5654 	int i;
5655 	struct mpi3mr_cmd *cmd = NULL;
5656 	union ccb *ccb = NULL;
5657 
5658 	for (i = 0; i < sc->max_host_ios; i++) {
5659 		cmd = sc->cmd_list[i];
5660 
5661 		if (cmd && cmd->ccb) {
5662 			if (cmd->callout_owner) {
5663 				ccb = (union ccb *)(cmd->ccb);
5664 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
5665 				mpi3mr_cmd_done(sc, cmd);
5666 			} else {
5667 				cmd->ccb = NULL;
5668 				mpi3mr_release_command(cmd);
5669 			}
5670 		}
5671 	}
5672 }
5673 /**
5674  * mpi3mr_clear_reset_history - Clear reset history
5675  * @sc: Adapter instance reference
5676  *
5677  * Write the reset history bit in IOC Status to clear the bit,
5678  * if it is already set.
5679  *
5680  * Return: Nothing.
5681  */
5682 static inline void mpi3mr_clear_reset_history(struct mpi3mr_softc *sc)
5683 {
5684 	U32 ioc_status;
5685 
5686 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5687 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
5688 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
5689 }
5690 
5691 /**
5692  * mpi3mr_set_diagsave - Set diag save bit for snapdump
5693  * @sc: Adapter reference
5694  *
5695  * Set diag save bit in IOC configuration register to enable
5696  * snapdump.
5697  *
5698  * Return: Nothing.
5699  */
5700 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc)
5701 {
5702 	U32 ioc_config;
5703 
5704 	ioc_config =
5705 	    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5706 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
5707 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
5708 }
5709 
5710 /**
5711  * mpi3mr_issue_reset - Issue reset to the controller
5712  * @sc: Adapter reference
5713  * @reset_type: Reset type
5714  * @reset_reason: Reset reason code
5715  *
5716  * Unlock the host diagnostic registers and write the specific
5717  * reset type to that, wait for reset acknowledgement from the
5718  * controller, if the reset is not successful retry for the
5719  * predefined number of times.
5720  *
5721  * Return: 0 on success, non-zero on failure.
5722  */
5723 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
5724 	U32 reset_reason)
5725 {
5726 	int retval = -1;
5727 	U8 unlock_retry_count = 0;
5728 	U32 host_diagnostic, ioc_status, ioc_config;
5729 	U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
5730 
5731 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
5732 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
5733 		return retval;
5734 	if (sc->unrecoverable)
5735 		return retval;
5736 
5737 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
5738 		retval = 0;
5739 		return retval;
5740 	}
5741 
5742 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s reset due to %s(0x%x)\n",
5743 	    mpi3mr_reset_type_name(reset_type),
5744 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
5745 
5746 	mpi3mr_clear_reset_history(sc);
5747 	do {
5748 		mpi3mr_dprint(sc, MPI3MR_INFO,
5749 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
5750 		    ++unlock_retry_count);
5751 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
5752 			mpi3mr_dprint(sc, MPI3MR_ERROR,
5753 			    "%s reset failed! due to host diag register unlock failure"
5754 			    "host_diagnostic(0x%08x)\n", mpi3mr_reset_type_name(reset_type),
5755 			    host_diagnostic);
5756 			sc->unrecoverable = 1;
5757 			return retval;
5758 		}
5759 
5760 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5761 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH);
5762 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5763 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST);
5764 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5765 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5766 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5767 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD);
5768 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5769 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH);
5770 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5771 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH);
5772 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5773 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH);
5774 
5775 		DELAY(1000); /* delay in usec */
5776 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
5777 		mpi3mr_dprint(sc, MPI3MR_INFO,
5778 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
5779 		    unlock_retry_count, host_diagnostic);
5780 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
5781 
5782 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
5783 	mpi3mr_regwrite(sc, MPI3_SYSIF_HOST_DIAG_OFFSET, host_diagnostic | reset_type);
5784 
5785 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
5786 		do {
5787 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5788 			if (ioc_status &
5789 			    MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
5790 				ioc_config =
5791 				    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5792 				if (mpi3mr_soft_reset_success(ioc_status,
5793 				    ioc_config)) {
5794 					mpi3mr_clear_reset_history(sc);
5795 					retval = 0;
5796 					break;
5797 				}
5798 			}
5799 			DELAY(100 * 1000);
5800 		} while (--timeout);
5801 	} else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
5802 		do {
5803 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5804 			if (mpi3mr_diagfault_success(sc, ioc_status)) {
5805 				retval = 0;
5806 				break;
5807 			}
5808 			DELAY(100 * 1000);
5809 		} while (--timeout);
5810 	}
5811 
5812 	mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5813 		MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5814 
5815 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5816 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5817 
5818 	mpi3mr_dprint(sc, MPI3MR_INFO,
5819 	    "IOC Status/Config after %s reset is (0x%x)/(0x%x)\n",
5820 	    !retval ? "successful":"failed", ioc_status,
5821 	    ioc_config);
5822 
5823 	if (retval)
5824 		sc->unrecoverable = 1;
5825 
5826 	return retval;
5827 }
5828 
5829 inline void mpi3mr_cleanup_event_taskq(struct mpi3mr_softc *sc)
5830 {
5831 	mtx_lock(&sc->fwevt_lock);
5832 	taskqueue_drain(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
5833 	taskqueue_block(sc->cam_sc->ev_tq);
5834 	mtx_unlock(&sc->fwevt_lock);
5835 	return;
5836 }
5837 
5838 /**
5839  * mpi3mr_soft_reset_handler - Reset the controller
5840  * @sc: Adapter instance reference
5841  * @reset_reason: Reset reason code
5842  * @snapdump: snapdump enable/disbale bit
5843  *
5844  * This is an handler for recovering controller by issuing soft
5845  * reset or diag fault reset. This is a blocking function and
5846  * when one reset is executed if any other resets they will be
5847  * blocked. All IOCTLs/IO will be blocked during the reset. If
5848  * controller reset is successful then the controller will be
5849  * reinitalized, otherwise the controller will be marked as not
5850  * recoverable
5851  *
5852  * Return: 0 on success, non-zero on failure.
5853  */
5854 int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
5855 	U32 reset_reason, bool snapdump)
5856 {
5857 	int retval = 0, i = 0;
5858 	enum mpi3mr_iocstate ioc_state;
5859 
5860 	mpi3mr_dprint(sc, MPI3MR_INFO, "soft reset invoked: reason code: %s\n",
5861 	    mpi3mr_reset_rc_name(reset_reason));
5862 
5863 	if ((reset_reason == MPI3MR_RESET_FROM_IOCTL) &&
5864 	     (sc->reset.ioctl_reset_snapdump != true))
5865 		snapdump = false;
5866 
5867 	mpi3mr_dprint(sc, MPI3MR_INFO,
5868 	    "soft_reset_handler: wait if diag save is in progress\n");
5869 	while (sc->diagsave_timeout)
5870 		DELAY(1000 * 1000);
5871 
5872 	ioc_state = mpi3mr_get_iocstate(sc);
5873 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE) {
5874 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is in unrecoverable state, exit\n");
5875 		sc->reset.type = MPI3MR_NO_RESET;
5876 		sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
5877 		sc->reset.status = -1;
5878 		sc->reset.ioctl_reset_snapdump = false;
5879 		return -1;
5880 	}
5881 
5882 	if (sc->reset_in_progress) {
5883 		mpi3mr_dprint(sc, MPI3MR_INFO, "reset is already in progress, exit\n");
5884 		return -1;
5885 	}
5886 
5887 	/* Pause IOs, drain and block the event taskqueue */
5888 	xpt_freeze_simq(sc->cam_sc->sim, 1);
5889 
5890 	mpi3mr_cleanup_event_taskq(sc);
5891 
5892 	sc->reset_in_progress = 1;
5893 	sc->block_ioctls = 1;
5894 
5895 	while (mpi3mr_atomic_read(&sc->pend_ioctls) && (i < PEND_IOCTLS_COMP_WAIT_TIME)) {
5896 		ioc_state = mpi3mr_get_iocstate(sc);
5897 		if (ioc_state == MRIOC_STATE_FAULT)
5898 			break;
5899 		i++;
5900 		if (!(i % 5)) {
5901 			mpi3mr_dprint(sc, MPI3MR_INFO,
5902 			    "[%2ds]waiting for IOCTL to be finished from %s\n", i, __func__);
5903 		}
5904 		DELAY(1000 * 1000);
5905 	}
5906 
5907 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5908 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5909 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5910 
5911 		mpi3mr_dprint(sc, MPI3MR_INFO, "Turn off events prior to reset\n");
5912 
5913 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5914 			sc->event_masks[i] = -1;
5915 		mpi3mr_issue_event_notification(sc);
5916 	}
5917 
5918 	mpi3mr_disable_interrupts(sc);
5919 
5920 	if (snapdump)
5921 		mpi3mr_trigger_snapdump(sc, reset_reason);
5922 
5923 	retval = mpi3mr_issue_reset(sc,
5924 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5925 	if (retval) {
5926 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to issue soft reset to the ioc\n");
5927 		goto out;
5928 	}
5929 
5930 	mpi3mr_flush_drv_cmds(sc);
5931 	mpi3mr_flush_io(sc);
5932 	mpi3mr_invalidate_devhandles(sc);
5933 	mpi3mr_memset_buffers(sc);
5934 
5935 	if (sc->prepare_for_reset) {
5936 		sc->prepare_for_reset = 0;
5937 		sc->prepare_for_reset_timeout_counter = 0;
5938 	}
5939 
5940 	retval = mpi3mr_initialize_ioc(sc, MPI3MR_INIT_TYPE_RESET);
5941 	if (retval) {
5942 		mpi3mr_dprint(sc, MPI3MR_ERROR, "reinit after soft reset failed: reason %d\n",
5943 		    reset_reason);
5944 		goto out;
5945 	}
5946 
5947 	DELAY((1000 * 1000) * 10);
5948 out:
5949 	if (!retval) {
5950 		sc->diagsave_timeout = 0;
5951 		sc->reset_in_progress = 0;
5952 		mpi3mr_rfresh_tgtdevs(sc);
5953 		sc->ts_update_counter = 0;
5954 		sc->block_ioctls = 0;
5955 		sc->pel_abort_requested = 0;
5956 		if (sc->pel_wait_pend) {
5957 			sc->pel_cmds.retry_count = 0;
5958 			mpi3mr_issue_pel_wait(sc, &sc->pel_cmds);
5959 			mpi3mr_app_send_aen(sc);
5960 		}
5961 	} else {
5962 		mpi3mr_issue_reset(sc,
5963 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5964 		sc->unrecoverable = 1;
5965 		sc->reset_in_progress = 0;
5966 	}
5967 
5968 	mpi3mr_dprint(sc, MPI3MR_INFO, "Soft Reset: %s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
5969 
5970 	taskqueue_unblock(sc->cam_sc->ev_tq);
5971 	xpt_release_simq(sc->cam_sc->sim, 1);
5972 
5973 	sc->reset.type = MPI3MR_NO_RESET;
5974 	sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
5975 	sc->reset.status = retval;
5976 	sc->reset.ioctl_reset_snapdump = false;
5977 
5978 	return retval;
5979 }
5980 
5981 /**
5982  * mpi3mr_issue_ioc_shutdown - shutdown controller
5983  * @sc: Adapter instance reference
5984  *
5985  * Send shutodwn notification to the controller and wait for the
5986  * shutdown_timeout for it to be completed.
5987  *
5988  * Return: Nothing.
5989  */
5990 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc *sc)
5991 {
5992 	U32 ioc_config, ioc_status;
5993 	U8 retval = 1, retry = 0;
5994 	U32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
5995 
5996 	mpi3mr_dprint(sc, MPI3MR_INFO, "sending shutdown notification\n");
5997 	if (sc->unrecoverable) {
5998 		mpi3mr_dprint(sc, MPI3MR_ERROR,
5999 		    "controller is unrecoverable, shutdown not issued\n");
6000 		return;
6001 	}
6002 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6003 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6004 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
6005 		mpi3mr_dprint(sc, MPI3MR_ERROR, "shutdown already in progress\n");
6006 		return;
6007 	}
6008 
6009 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6010 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6011 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6012 
6013 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6014 
6015 	if (sc->facts.shutdown_timeout)
6016 		timeout = sc->facts.shutdown_timeout * 10;
6017 
6018 	do {
6019 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6020 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6021 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
6022 			retval = 0;
6023 			break;
6024 		}
6025 
6026 		if (sc->unrecoverable)
6027 			break;
6028 
6029 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
6030 			mpi3mr_print_fault_info(sc);
6031 
6032 			if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
6033 				break;
6034 
6035 			if (mpi3mr_issue_reset(sc,
6036 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6037 			    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6038 				break;
6039 
6040 			ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6041 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6042 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6043 
6044 			mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6045 
6046 			if (sc->facts.shutdown_timeout)
6047 				timeout = sc->facts.shutdown_timeout * 10;
6048 
6049 			retry++;
6050 		}
6051 
6052                 DELAY(100 * 1000);
6053 
6054 	} while (--timeout);
6055 
6056 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6057 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6058 
6059 	if (retval) {
6060 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6061 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
6062 			mpi3mr_dprint(sc, MPI3MR_ERROR,
6063 			    "shutdown still in progress after timeout\n");
6064 	}
6065 
6066 	mpi3mr_dprint(sc, MPI3MR_INFO,
6067 	    "ioc_status/ioc_config after %s shutdown is (0x%x)/(0x%x)\n",
6068 	    (!retval)?"successful":"failed", ioc_status,
6069 	    ioc_config);
6070 }
6071 
6072 /**
6073  * mpi3mr_cleanup_ioc - Cleanup controller
6074  * @sc: Adapter instance reference
6075 
6076  * controller cleanup handler, Message unit reset or soft reset
6077  * and shutdown notification is issued to the controller.
6078  *
6079  * Return: Nothing.
6080  */
6081 void mpi3mr_cleanup_ioc(struct mpi3mr_softc *sc)
6082 {
6083 	enum mpi3mr_iocstate ioc_state;
6084 
6085 	mpi3mr_dprint(sc, MPI3MR_INFO, "cleaning up the controller\n");
6086 	mpi3mr_disable_interrupts(sc);
6087 
6088 	ioc_state = mpi3mr_get_iocstate(sc);
6089 
6090 	if ((!sc->unrecoverable) && (!sc->reset_in_progress) &&
6091 	    (ioc_state == MRIOC_STATE_READY)) {
6092 		if (mpi3mr_mur_ioc(sc,
6093 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6094 			mpi3mr_issue_reset(sc,
6095 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6096 			    MPI3MR_RESET_FROM_MUR_FAILURE);
6097 		mpi3mr_issue_ioc_shutdown(sc);
6098 	}
6099 
6100 	mpi3mr_dprint(sc, MPI3MR_INFO, "controller cleanup completed\n");
6101 }
6102