xref: /freebsd/sys/dev/mpi3mr/mpi3mr.c (revision 3f3a1554)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2024, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/module.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
53 #include <sys/uio.h>
54 
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/rman.h>
58 
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 #include <dev/pci/pci_private.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/smp_all.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include "mpi3mr.h"
76 #include "mpi3mr_cam.h"
77 #include "mpi3mr_app.h"
78 
79 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
80 	U64 reply_dma);
81 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc);
82 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
83 	struct mpi3mr_drvr_cmd *drvrcmd);
84 static void mpi3mr_flush_io(struct mpi3mr_softc *sc);
85 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
86 	U16 reset_reason);
87 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
88 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc);
89 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
90 	struct mpi3mr_drvr_cmd *drv_cmd);
91 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
92 	struct mpi3mr_drvr_cmd *drv_cmd);
93 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
94 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx);
95 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc);
96 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc);
97 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code);
98 
99 void
mpi3mr_hexdump(void * buf,int sz,int format)100 mpi3mr_hexdump(void *buf, int sz, int format)
101 {
102         int i;
103         U32 *buf_loc = (U32 *)buf;
104 
105         for (i = 0; i < (sz / sizeof(U32)); i++) {
106                 if ((i % format) == 0) {
107                         if (i != 0)
108                                 printf("\n");
109                         printf("%08x: ", (i * 4));
110                 }
111                 printf("%08x ", buf_loc[i]);
112         }
113         printf("\n");
114 }
115 
116 void
init_completion(struct completion * completion)117 init_completion(struct completion *completion)
118 {
119 	completion->done = 0;
120 }
121 
122 void
complete(struct completion * completion)123 complete(struct completion *completion)
124 {
125 	completion->done = 1;
126 	wakeup(complete);
127 }
128 
wait_for_completion_timeout(struct completion * completion,U32 timeout)129 void wait_for_completion_timeout(struct completion *completion,
130 	    U32 timeout)
131 {
132 	U32 count = timeout * 1000;
133 
134 	while ((completion->done == 0) && count) {
135                 DELAY(1000);
136 		count--;
137 	}
138 
139 	if (completion->done == 0) {
140 		printf("%s: Command is timedout\n", __func__);
141 		completion->done = 1;
142 	}
143 }
wait_for_completion_timeout_tm(struct completion * completion,U32 timeout,struct mpi3mr_softc * sc)144 void wait_for_completion_timeout_tm(struct completion *completion,
145 	    U32 timeout, struct mpi3mr_softc *sc)
146 {
147 	U32 count = timeout * 1000;
148 
149 	while ((completion->done == 0) && count) {
150 		msleep(&sc->tm_chan, &sc->mpi3mr_mtx, PRIBIO,
151 		       "TM command", 1 * hz);
152 		count--;
153 	}
154 
155 	if (completion->done == 0) {
156 		printf("%s: Command is timedout\n", __func__);
157 		completion->done = 1;
158 	}
159 }
160 
161 
162 void
poll_for_command_completion(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * cmd,U16 wait)163 poll_for_command_completion(struct mpi3mr_softc *sc,
164        struct mpi3mr_drvr_cmd *cmd, U16 wait)
165 {
166 	int wait_time = wait * 1000;
167        while (wait_time) {
168                mpi3mr_complete_admin_cmd(sc);
169                if (cmd->state & MPI3MR_CMD_COMPLETE)
170                        break;
171 	       DELAY(1000);
172                wait_time--;
173        }
174 }
175 
176 /**
177  * mpi3mr_trigger_snapdump - triggers firmware snapdump
178  * @sc: Adapter instance reference
179  * @reason_code: reason code for the fault.
180  *
181  * This routine will trigger the snapdump and wait for it to
182  * complete or timeout before it returns.
183  * This will be called during initilaization time faults/resets/timeouts
184  * before soft reset invocation.
185  *
186  * Return:  None.
187  */
188 static void
mpi3mr_trigger_snapdump(struct mpi3mr_softc * sc,U16 reason_code)189 mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U16 reason_code)
190 {
191 	U32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
192 
193 	mpi3mr_dprint(sc, MPI3MR_INFO, "snapdump triggered: reason code: %s\n",
194 	    mpi3mr_reset_rc_name(reason_code));
195 
196 	mpi3mr_set_diagsave(sc);
197 	mpi3mr_issue_reset(sc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
198 			   reason_code);
199 
200 	do {
201 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
202 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
203 			break;
204                 DELAY(100 * 1000);
205 	} while (--timeout);
206 
207 	return;
208 }
209 
210 /**
211  * mpi3mr_check_rh_fault_ioc - check reset history and fault
212  * controller
213  * @sc: Adapter instance reference
214  * @reason_code, reason code for the fault.
215  *
216  * This routine will fault the controller with
217  * the given reason code if it is not already in the fault or
218  * not asynchronosuly reset. This will be used to handle
219  * initilaization time faults/resets/timeout as in those cases
220  * immediate soft reset invocation is not required.
221  *
222  * Return:  None.
223  */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc * sc,U16 reason_code)224 static void mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc *sc, U16 reason_code)
225 {
226 	U32 ioc_status;
227 
228 	if (sc->unrecoverable) {
229 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is unrecoverable\n");
230 		return;
231 	}
232 
233 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
234 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
235 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
236 		mpi3mr_print_fault_info(sc);
237 		return;
238 	}
239 
240 	mpi3mr_trigger_snapdump(sc, reason_code);
241 
242 	return;
243 }
244 
mpi3mr_get_reply_virt_addr(struct mpi3mr_softc * sc,bus_addr_t phys_addr)245 static void * mpi3mr_get_reply_virt_addr(struct mpi3mr_softc *sc,
246     bus_addr_t phys_addr)
247 {
248 	if (!phys_addr)
249 		return NULL;
250 	if ((phys_addr < sc->reply_buf_dma_min_address) ||
251 	    (phys_addr > sc->reply_buf_dma_max_address))
252 		return NULL;
253 
254 	return sc->reply_buf + (phys_addr - sc->reply_buf_phys);
255 }
256 
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc * sc,bus_addr_t phys_addr)257 static void * mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc *sc,
258     bus_addr_t phys_addr)
259 {
260 	if (!phys_addr)
261 		return NULL;
262 	return sc->sense_buf + (phys_addr - sc->sense_buf_phys);
263 }
264 
mpi3mr_repost_reply_buf(struct mpi3mr_softc * sc,U64 reply_dma)265 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
266     U64 reply_dma)
267 {
268 	U32 old_idx = 0;
269 
270 	mtx_lock_spin(&sc->reply_free_q_lock);
271 	old_idx  =  sc->reply_free_q_host_index;
272 	sc->reply_free_q_host_index = ((sc->reply_free_q_host_index ==
273 	    (sc->reply_free_q_sz - 1)) ? 0 :
274 	    (sc->reply_free_q_host_index + 1));
275 	sc->reply_free_q[old_idx] = reply_dma;
276 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
277 		sc->reply_free_q_host_index);
278 	mtx_unlock_spin(&sc->reply_free_q_lock);
279 }
280 
mpi3mr_repost_sense_buf(struct mpi3mr_softc * sc,U64 sense_buf_phys)281 static void mpi3mr_repost_sense_buf(struct mpi3mr_softc *sc,
282     U64 sense_buf_phys)
283 {
284 	U32 old_idx = 0;
285 
286 	mtx_lock_spin(&sc->sense_buf_q_lock);
287 	old_idx  =  sc->sense_buf_q_host_index;
288 	sc->sense_buf_q_host_index = ((sc->sense_buf_q_host_index ==
289 	    (sc->sense_buf_q_sz - 1)) ? 0 :
290 	    (sc->sense_buf_q_host_index + 1));
291 	sc->sense_buf_q[old_idx] = sense_buf_phys;
292 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
293 		sc->sense_buf_q_host_index);
294 	mtx_unlock_spin(&sc->sense_buf_q_lock);
295 
296 }
297 
mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc * sc,struct mpi3mr_throttle_group_info * tg,U8 divert_value)298 void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc *sc,
299 	struct mpi3mr_throttle_group_info *tg, U8 divert_value)
300 {
301 	struct mpi3mr_target *target;
302 
303 	mtx_lock_spin(&sc->target_lock);
304 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
305 		if (target->throttle_group == tg)
306 			target->io_divert = divert_value;
307 	}
308 	mtx_unlock_spin(&sc->target_lock);
309 }
310 
311 /**
312  * mpi3mr_submit_admin_cmd - Submit request to admin queue
313  * @mrioc: Adapter reference
314  * @admin_req: MPI3 request
315  * @admin_req_sz: Request size
316  *
317  * Post the MPI3 request into admin request queue and
318  * inform the controller, if the queue is full return
319  * appropriate error.
320  *
321  * Return: 0 on success, non-zero on failure.
322  */
mpi3mr_submit_admin_cmd(struct mpi3mr_softc * sc,void * admin_req,U16 admin_req_sz)323 int mpi3mr_submit_admin_cmd(struct mpi3mr_softc *sc, void *admin_req,
324     U16 admin_req_sz)
325 {
326 	U16 areq_pi = 0, areq_ci = 0, max_entries = 0;
327 	int retval = 0;
328 	U8 *areq_entry;
329 
330 	mtx_lock_spin(&sc->admin_req_lock);
331 	areq_pi = sc->admin_req_pi;
332 	areq_ci = sc->admin_req_ci;
333 	max_entries = sc->num_admin_reqs;
334 
335 	if (sc->unrecoverable)
336 		return -EFAULT;
337 
338 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
339 					   (areq_pi == (max_entries - 1)))) {
340 		printf(IOCNAME "AdminReqQ full condition detected\n",
341 		    sc->name);
342 		retval = -EAGAIN;
343 		goto out;
344 	}
345 	areq_entry = (U8 *)sc->admin_req + (areq_pi *
346 						     MPI3MR_AREQ_FRAME_SZ);
347 	memset(areq_entry, 0, MPI3MR_AREQ_FRAME_SZ);
348 	memcpy(areq_entry, (U8 *)admin_req, admin_req_sz);
349 
350 	if (++areq_pi == max_entries)
351 		areq_pi = 0;
352 	sc->admin_req_pi = areq_pi;
353 
354 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
355 
356 out:
357 	mtx_unlock_spin(&sc->admin_req_lock);
358 	return retval;
359 }
360 
361 /**
362  * mpi3mr_check_req_qfull - Check request queue is full or not
363  * @op_req_q: Operational reply queue info
364  *
365  * Return: true when queue full, false otherwise.
366  */
367 static inline bool
mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue * op_req_q)368 mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue *op_req_q)
369 {
370 	U16 pi, ci, max_entries;
371 	bool is_qfull = false;
372 
373 	pi = op_req_q->pi;
374 	ci = op_req_q->ci;
375 	max_entries = op_req_q->num_reqs;
376 
377 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
378 		is_qfull = true;
379 
380 	return is_qfull;
381 }
382 
383 /**
384  * mpi3mr_submit_io - Post IO command to firmware
385  * @sc:		      Adapter instance reference
386  * @op_req_q:	      Operational Request queue reference
387  * @req:	      MPT request data
388  *
389  * This function submits IO command to firmware.
390  *
391  * Return: Nothing
392  */
mpi3mr_submit_io(struct mpi3mr_softc * sc,struct mpi3mr_op_req_queue * op_req_q,U8 * req)393 int mpi3mr_submit_io(struct mpi3mr_softc *sc,
394     struct mpi3mr_op_req_queue *op_req_q, U8 *req)
395 {
396 	U16 pi, max_entries;
397 	int retval = 0;
398 	U8 *req_entry;
399 	U16 req_sz = sc->facts.op_req_sz;
400 	struct mpi3mr_irq_context *irq_ctx;
401 
402 	mtx_lock_spin(&op_req_q->q_lock);
403 
404 	pi = op_req_q->pi;
405 	max_entries = op_req_q->num_reqs;
406 	if (mpi3mr_check_req_qfull(op_req_q)) {
407 		irq_ctx = &sc->irq_ctx[op_req_q->reply_qid - 1];
408 		mpi3mr_complete_io_cmd(sc, irq_ctx);
409 
410 		if (mpi3mr_check_req_qfull(op_req_q)) {
411 			printf(IOCNAME "OpReqQ full condition detected\n",
412 				sc->name);
413 			retval = -EBUSY;
414 			goto out;
415 		}
416 	}
417 
418 	req_entry = (U8 *)op_req_q->q_base + (pi * req_sz);
419 	memset(req_entry, 0, req_sz);
420 	memcpy(req_entry, req, MPI3MR_AREQ_FRAME_SZ);
421 	if (++pi == max_entries)
422 		pi = 0;
423 	op_req_q->pi = pi;
424 
425 	mpi3mr_atomic_inc(&sc->op_reply_q[op_req_q->reply_qid - 1].pend_ios);
426 
427 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(op_req_q->qid), op_req_q->pi);
428 	if (sc->mpi3mr_debug & MPI3MR_TRACE) {
429 		device_printf(sc->mpi3mr_dev, "IO submission: QID:%d PI:0x%x\n", op_req_q->qid, op_req_q->pi);
430 		mpi3mr_hexdump(req_entry, MPI3MR_AREQ_FRAME_SZ, 8);
431 	}
432 
433 out:
434 	mtx_unlock_spin(&op_req_q->q_lock);
435 	return retval;
436 }
437 
438 inline void
mpi3mr_add_sg_single(void * paddr,U8 flags,U32 length,bus_addr_t dma_addr)439 mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
440 		     bus_addr_t dma_addr)
441 {
442 	Mpi3SGESimple_t *sgel = paddr;
443 
444 	sgel->Flags = flags;
445 	sgel->Length = (length);
446 	sgel->Address = (U64)dma_addr;
447 }
448 
mpi3mr_build_zero_len_sge(void * paddr)449 void mpi3mr_build_zero_len_sge(void *paddr)
450 {
451 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
452 		MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_LIST);
453 
454 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
455 
456 }
457 
mpi3mr_enable_interrupts(struct mpi3mr_softc * sc)458 void mpi3mr_enable_interrupts(struct mpi3mr_softc *sc)
459 {
460 	sc->intr_enabled = 1;
461 }
462 
mpi3mr_disable_interrupts(struct mpi3mr_softc * sc)463 void mpi3mr_disable_interrupts(struct mpi3mr_softc *sc)
464 {
465 	sc->intr_enabled = 0;
466 }
467 
468 void
mpi3mr_memaddr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)469 mpi3mr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
470 {
471 	bus_addr_t *addr;
472 
473 	addr = arg;
474 	*addr = segs[0].ds_addr;
475 }
476 
mpi3mr_delete_op_reply_queue(struct mpi3mr_softc * sc,U16 qid)477 static int mpi3mr_delete_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
478 {
479 	Mpi3DeleteReplyQueueRequest_t delq_req;
480 	struct mpi3mr_op_reply_queue *op_reply_q;
481 	int retval = 0;
482 
483 
484 	op_reply_q = &sc->op_reply_q[qid - 1];
485 
486 	if (!op_reply_q->qid)
487 	{
488 		retval = -1;
489 		printf(IOCNAME "Issue DelRepQ: called with invalid Reply QID\n",
490 		    sc->name);
491 		goto out;
492 	}
493 
494 	memset(&delq_req, 0, sizeof(delq_req));
495 
496 	mtx_lock(&sc->init_cmds.completion.lock);
497 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
498 		retval = -1;
499 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
500 		    sc->name);
501 		mtx_unlock(&sc->init_cmds.completion.lock);
502 		goto out;
503 	}
504 
505 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
506 		retval = -1;
507 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
508 		    sc->name);
509 		goto out;
510 	}
511 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
512 	sc->init_cmds.is_waiting = 1;
513 	sc->init_cmds.callback = NULL;
514 	delq_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
515 	delq_req.Function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
516 	delq_req.QueueID = qid;
517 
518 	init_completion(&sc->init_cmds.completion);
519 	retval = mpi3mr_submit_admin_cmd(sc, &delq_req, sizeof(delq_req));
520 	if (retval) {
521 		printf(IOCNAME "Issue DelRepQ: Admin Post failed\n",
522 		    sc->name);
523 		goto out_unlock;
524 	}
525 	wait_for_completion_timeout(&sc->init_cmds.completion,
526 	    (MPI3MR_INTADMCMD_TIMEOUT));
527 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
528 		printf(IOCNAME "Issue DelRepQ: command timed out\n",
529 		    sc->name);
530 		mpi3mr_check_rh_fault_ioc(sc,
531 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
532 		sc->unrecoverable = 1;
533 
534 		retval = -1;
535 		goto out_unlock;
536 	}
537 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
538 	     != MPI3_IOCSTATUS_SUCCESS ) {
539 		printf(IOCNAME "Issue DelRepQ: Failed IOCStatus(0x%04x) "
540 		    " Loginfo(0x%08x) \n" , sc->name,
541 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
542 		    sc->init_cmds.ioc_loginfo);
543 		retval = -1;
544 		goto out_unlock;
545 	}
546 	sc->irq_ctx[qid - 1].op_reply_q = NULL;
547 
548 	if (sc->op_reply_q[qid - 1].q_base_phys != 0)
549 		bus_dmamap_unload(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base_dmamap);
550 	if (sc->op_reply_q[qid - 1].q_base != NULL)
551 		bus_dmamem_free(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base, sc->op_reply_q[qid - 1].q_base_dmamap);
552 	if (sc->op_reply_q[qid - 1].q_base_tag != NULL)
553 		bus_dma_tag_destroy(sc->op_reply_q[qid - 1].q_base_tag);
554 
555 	sc->op_reply_q[qid - 1].q_base = NULL;
556 	sc->op_reply_q[qid - 1].qid = 0;
557 out_unlock:
558 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
559 	mtx_unlock(&sc->init_cmds.completion.lock);
560 out:
561 	return retval;
562 }
563 
564 /**
565  * mpi3mr_create_op_reply_queue - create operational reply queue
566  * @sc: Adapter instance reference
567  * @qid: operational reply queue id
568  *
569  * Create operatinal reply queue by issuing MPI request
570  * through admin queue.
571  *
572  * Return:  0 on success, non-zero on failure.
573  */
mpi3mr_create_op_reply_queue(struct mpi3mr_softc * sc,U16 qid)574 static int mpi3mr_create_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
575 {
576 	Mpi3CreateReplyQueueRequest_t create_req;
577 	struct mpi3mr_op_reply_queue *op_reply_q;
578 	int retval = 0;
579 	char q_lock_name[32];
580 
581 	op_reply_q = &sc->op_reply_q[qid - 1];
582 
583 	if (op_reply_q->qid)
584 	{
585 		retval = -1;
586 		printf(IOCNAME "CreateRepQ: called for duplicate qid %d\n",
587 		    sc->name, op_reply_q->qid);
588 		return retval;
589 	}
590 
591 	op_reply_q->ci = 0;
592 	if (pci_get_revid(sc->mpi3mr_dev) == SAS4116_CHIP_REV_A0)
593 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD_A0;
594 	else
595 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
596 
597 	op_reply_q->qsz = op_reply_q->num_replies * sc->op_reply_sz;
598 	op_reply_q->ephase = 1;
599 
600         if (!op_reply_q->q_base) {
601 		snprintf(q_lock_name, 32, "Reply Queue Lock[%d]", qid);
602 		mtx_init(&op_reply_q->q_lock, q_lock_name, NULL, MTX_SPIN);
603 
604 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
605 					4, 0,			/* algnmnt, boundary */
606 					sc->dma_loaddr,		/* lowaddr */
607 					BUS_SPACE_MAXADDR,	/* highaddr */
608 					NULL, NULL,		/* filter, filterarg */
609 					op_reply_q->qsz,		/* maxsize */
610 					1,			/* nsegments */
611 					op_reply_q->qsz,		/* maxsegsize */
612 					0,			/* flags */
613 					NULL, NULL,		/* lockfunc, lockarg */
614 					&op_reply_q->q_base_tag)) {
615 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Operational reply DMA tag\n");
616 			return (ENOMEM);
617 		}
618 
619 		if (bus_dmamem_alloc(op_reply_q->q_base_tag, (void **)&op_reply_q->q_base,
620 		    BUS_DMA_NOWAIT, &op_reply_q->q_base_dmamap)) {
621 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
622 			return (ENOMEM);
623 		}
624 		bzero(op_reply_q->q_base, op_reply_q->qsz);
625 		bus_dmamap_load(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap, op_reply_q->q_base, op_reply_q->qsz,
626 		    mpi3mr_memaddr_cb, &op_reply_q->q_base_phys, BUS_DMA_NOWAIT);
627 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Reply queue ID: %d phys addr= %#016jx virt_addr: %pa size= %d\n",
628 		    qid, (uintmax_t)op_reply_q->q_base_phys, op_reply_q->q_base, op_reply_q->qsz);
629 
630 		if (!op_reply_q->q_base)
631 		{
632 			retval = -1;
633 			printf(IOCNAME "CreateRepQ: memory alloc failed for qid %d\n",
634 			    sc->name, qid);
635 			goto out;
636 		}
637 	}
638 
639 	memset(&create_req, 0, sizeof(create_req));
640 
641 	mtx_lock(&sc->init_cmds.completion.lock);
642 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
643 		retval = -1;
644 		printf(IOCNAME "CreateRepQ: Init command is in use\n",
645 		    sc->name);
646 		mtx_unlock(&sc->init_cmds.completion.lock);
647 		goto out;
648 	}
649 
650 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
651 	sc->init_cmds.is_waiting = 1;
652 	sc->init_cmds.callback = NULL;
653 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
654 	create_req.Function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
655 	create_req.QueueID = qid;
656 	create_req.Flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
657 	create_req.MSIxIndex = sc->irq_ctx[qid - 1].msix_index;
658 	create_req.BaseAddress = (U64)op_reply_q->q_base_phys;
659 	create_req.Size = op_reply_q->num_replies;
660 
661 	init_completion(&sc->init_cmds.completion);
662 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
663 	    sizeof(create_req));
664 	if (retval) {
665 		printf(IOCNAME "CreateRepQ: Admin Post failed\n",
666 		    sc->name);
667 		goto out_unlock;
668 	}
669 
670 	wait_for_completion_timeout(&sc->init_cmds.completion,
671 	  	MPI3MR_INTADMCMD_TIMEOUT);
672 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
673 		printf(IOCNAME "CreateRepQ: command timed out\n",
674 		    sc->name);
675 		mpi3mr_check_rh_fault_ioc(sc,
676 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
677 		sc->unrecoverable = 1;
678 		retval = -1;
679 		goto out_unlock;
680 	}
681 
682 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
683 	     != MPI3_IOCSTATUS_SUCCESS ) {
684 		printf(IOCNAME "CreateRepQ: Failed IOCStatus(0x%04x) "
685 		    " Loginfo(0x%08x) \n" , sc->name,
686 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
687 		    sc->init_cmds.ioc_loginfo);
688 		retval = -1;
689 		goto out_unlock;
690 	}
691 	op_reply_q->qid = qid;
692 	sc->irq_ctx[qid - 1].op_reply_q = op_reply_q;
693 
694 out_unlock:
695 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
696 	mtx_unlock(&sc->init_cmds.completion.lock);
697 out:
698 	if (retval) {
699 		if (op_reply_q->q_base_phys != 0)
700 			bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
701 		if (op_reply_q->q_base != NULL)
702 			bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
703 		if (op_reply_q->q_base_tag != NULL)
704 			bus_dma_tag_destroy(op_reply_q->q_base_tag);
705 		op_reply_q->q_base = NULL;
706 		op_reply_q->qid = 0;
707 	}
708 
709 	return retval;
710 }
711 
712 /**
713  * mpi3mr_create_op_req_queue - create operational request queue
714  * @sc: Adapter instance reference
715  * @req_qid: operational request queue id
716  * @reply_qid: Reply queue ID
717  *
718  * Create operatinal request queue by issuing MPI request
719  * through admin queue.
720  *
721  * Return:  0 on success, non-zero on failure.
722  */
mpi3mr_create_op_req_queue(struct mpi3mr_softc * sc,U16 req_qid,U8 reply_qid)723 static int mpi3mr_create_op_req_queue(struct mpi3mr_softc *sc, U16 req_qid, U8 reply_qid)
724 {
725 	Mpi3CreateRequestQueueRequest_t create_req;
726 	struct mpi3mr_op_req_queue *op_req_q;
727 	int retval = 0;
728 	char q_lock_name[32];
729 
730 	op_req_q = &sc->op_req_q[req_qid - 1];
731 
732 	if (op_req_q->qid)
733 	{
734 		retval = -1;
735 		printf(IOCNAME "CreateReqQ: called for duplicate qid %d\n",
736 		    sc->name, op_req_q->qid);
737 		return retval;
738 	}
739 
740 	op_req_q->ci = 0;
741 	op_req_q->pi = 0;
742 	op_req_q->num_reqs = MPI3MR_OP_REQ_Q_QD;
743 	op_req_q->qsz = op_req_q->num_reqs * sc->facts.op_req_sz;
744 	op_req_q->reply_qid = reply_qid;
745 
746 	if (!op_req_q->q_base) {
747 		snprintf(q_lock_name, 32, "Request Queue Lock[%d]", req_qid);
748 		mtx_init(&op_req_q->q_lock, q_lock_name, NULL, MTX_SPIN);
749 
750 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
751 					4, 0,			/* algnmnt, boundary */
752 					sc->dma_loaddr,		/* lowaddr */
753 					BUS_SPACE_MAXADDR,	/* highaddr */
754 					NULL, NULL,		/* filter, filterarg */
755 					op_req_q->qsz,		/* maxsize */
756 					1,			/* nsegments */
757 					op_req_q->qsz,		/* maxsegsize */
758 					0,			/* flags */
759 					NULL, NULL,		/* lockfunc, lockarg */
760 					&op_req_q->q_base_tag)) {
761 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
762 			return (ENOMEM);
763 		}
764 
765 		if (bus_dmamem_alloc(op_req_q->q_base_tag, (void **)&op_req_q->q_base,
766 		    BUS_DMA_NOWAIT, &op_req_q->q_base_dmamap)) {
767 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
768 			return (ENOMEM);
769 		}
770 
771 		bzero(op_req_q->q_base, op_req_q->qsz);
772 
773 		bus_dmamap_load(op_req_q->q_base_tag, op_req_q->q_base_dmamap, op_req_q->q_base, op_req_q->qsz,
774 		    mpi3mr_memaddr_cb, &op_req_q->q_base_phys, BUS_DMA_NOWAIT);
775 
776 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Request QID: %d phys addr= %#016jx virt addr= %pa size= %d associated Reply QID: %d\n",
777 		    req_qid, (uintmax_t)op_req_q->q_base_phys, op_req_q->q_base, op_req_q->qsz, reply_qid);
778 
779 		if (!op_req_q->q_base) {
780 			retval = -1;
781 			printf(IOCNAME "CreateReqQ: memory alloc failed for qid %d\n",
782 			    sc->name, req_qid);
783 			goto out;
784 		}
785 	}
786 
787 	memset(&create_req, 0, sizeof(create_req));
788 
789 	mtx_lock(&sc->init_cmds.completion.lock);
790 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
791 		retval = -1;
792 		printf(IOCNAME "CreateReqQ: Init command is in use\n",
793 		    sc->name);
794 		mtx_unlock(&sc->init_cmds.completion.lock);
795 		goto out;
796 	}
797 
798 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
799 	sc->init_cmds.is_waiting = 1;
800 	sc->init_cmds.callback = NULL;
801 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
802 	create_req.Function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
803 	create_req.QueueID = req_qid;
804 	create_req.Flags = 0;
805 	create_req.ReplyQueueID = reply_qid;
806 	create_req.BaseAddress = (U64)op_req_q->q_base_phys;
807 	create_req.Size = op_req_q->num_reqs;
808 
809 	init_completion(&sc->init_cmds.completion);
810 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
811 	    sizeof(create_req));
812 	if (retval) {
813 		printf(IOCNAME "CreateReqQ: Admin Post failed\n",
814 		    sc->name);
815 		goto out_unlock;
816 	}
817 
818 	wait_for_completion_timeout(&sc->init_cmds.completion,
819 	    (MPI3MR_INTADMCMD_TIMEOUT));
820 
821 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
822 		printf(IOCNAME "CreateReqQ: command timed out\n",
823 		    sc->name);
824 		mpi3mr_check_rh_fault_ioc(sc,
825 			MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
826 		sc->unrecoverable = 1;
827 		retval = -1;
828 		goto out_unlock;
829 	}
830 
831 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
832 	     != MPI3_IOCSTATUS_SUCCESS ) {
833 		printf(IOCNAME "CreateReqQ: Failed IOCStatus(0x%04x) "
834 		    " Loginfo(0x%08x) \n" , sc->name,
835 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
836 		    sc->init_cmds.ioc_loginfo);
837 		retval = -1;
838 		goto out_unlock;
839 	}
840 	op_req_q->qid = req_qid;
841 
842 out_unlock:
843 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
844 	mtx_unlock(&sc->init_cmds.completion.lock);
845 out:
846 	if (retval) {
847 		if (op_req_q->q_base_phys != 0)
848 			bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
849 		if (op_req_q->q_base != NULL)
850 			bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
851 		if (op_req_q->q_base_tag != NULL)
852 			bus_dma_tag_destroy(op_req_q->q_base_tag);
853 		op_req_q->q_base = NULL;
854 		op_req_q->qid = 0;
855 	}
856 	return retval;
857 }
858 
859 /**
860  * mpi3mr_create_op_queues - create operational queues
861  * @sc: Adapter instance reference
862  *
863  * Create operatinal queues(request queues and reply queues).
864  * Return:  0 on success, non-zero on failure.
865  */
mpi3mr_create_op_queues(struct mpi3mr_softc * sc)866 static int mpi3mr_create_op_queues(struct mpi3mr_softc *sc)
867 {
868 	int retval = 0;
869 	U16 num_queues = 0, i = 0, qid;
870 
871 	num_queues = min(sc->facts.max_op_reply_q,
872 	    sc->facts.max_op_req_q);
873 	num_queues = min(num_queues, sc->msix_count);
874 
875 	/*
876 	 * During reset set the num_queues to the number of queues
877 	 * that was set before the reset.
878 	 */
879 	if (sc->num_queues)
880 		num_queues = sc->num_queues;
881 
882 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Trying to create %d Operational Q pairs\n",
883 	    num_queues);
884 
885 	if (!sc->op_req_q) {
886 		sc->op_req_q = malloc(sizeof(struct mpi3mr_op_req_queue) *
887 		    num_queues, M_MPI3MR, M_NOWAIT | M_ZERO);
888 
889 		if (!sc->op_req_q) {
890 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Request queue info\n");
891 			retval = -1;
892 			goto out_failed;
893 		}
894 	}
895 
896 	if (!sc->op_reply_q) {
897 		sc->op_reply_q = malloc(sizeof(struct mpi3mr_op_reply_queue) * num_queues,
898 			M_MPI3MR, M_NOWAIT | M_ZERO);
899 
900 		if (!sc->op_reply_q) {
901 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Reply queue info\n");
902 			retval = -1;
903 			goto out_failed;
904 		}
905 	}
906 
907 	sc->num_hosttag_op_req_q = (sc->max_host_ios + 1) / num_queues;
908 
909 	/*Operational Request and reply queue ID starts with 1*/
910 	for (i = 0; i < num_queues; i++) {
911 		qid = i + 1;
912 		if (mpi3mr_create_op_reply_queue(sc, qid)) {
913 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Reply queue %d\n",
914 			    qid);
915 			break;
916 		}
917 		if (mpi3mr_create_op_req_queue(sc, qid,
918 		    sc->op_reply_q[qid - 1].qid)) {
919 			mpi3mr_delete_op_reply_queue(sc, qid);
920 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Request queue %d\n",
921 			    qid);
922 			break;
923 		}
924 
925 	}
926 
927 	/* Not even one queue is created successfully*/
928         if (i == 0) {
929                 retval = -1;
930                 goto out_failed;
931         }
932 
933 	if (!sc->num_queues) {
934 		sc->num_queues = i;
935 	} else {
936 		if (num_queues != i) {
937 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Number of queues (%d) post reset are not same as"
938 					"queues allocated (%d) during driver init\n", i, num_queues);
939 			goto out_failed;
940 		}
941 	}
942 
943 	mpi3mr_dprint(sc, MPI3MR_INFO, "Successfully created %d Operational Queue pairs\n",
944 	    sc->num_queues);
945 	mpi3mr_dprint(sc, MPI3MR_INFO, "Request Queue QD: %d Reply queue QD: %d\n",
946 	    sc->op_req_q[0].num_reqs, sc->op_reply_q[0].num_replies);
947 
948 	return retval;
949 out_failed:
950 	if (sc->op_req_q) {
951 		free(sc->op_req_q, M_MPI3MR);
952 		sc->op_req_q = NULL;
953 	}
954 	if (sc->op_reply_q) {
955 		free(sc->op_reply_q, M_MPI3MR);
956 		sc->op_reply_q = NULL;
957 	}
958 	return retval;
959 }
960 
961 /**
962  * mpi3mr_setup_admin_qpair - Setup admin queue pairs
963  * @sc: Adapter instance reference
964  *
965  * Allocation and setup admin queues(request queues and reply queues).
966  * Return:  0 on success, non-zero on failure.
967  */
mpi3mr_setup_admin_qpair(struct mpi3mr_softc * sc)968 static int mpi3mr_setup_admin_qpair(struct mpi3mr_softc *sc)
969 {
970 	int retval = 0;
971 	U32 num_adm_entries = 0;
972 
973 	sc->admin_req_q_sz = MPI3MR_AREQQ_SIZE;
974 	sc->num_admin_reqs = sc->admin_req_q_sz / MPI3MR_AREQ_FRAME_SZ;
975 	sc->admin_req_ci = sc->admin_req_pi = 0;
976 
977 	sc->admin_reply_q_sz = MPI3MR_AREPQ_SIZE;
978 	sc->num_admin_replies = sc->admin_reply_q_sz/ MPI3MR_AREP_FRAME_SZ;
979 	sc->admin_reply_ci = 0;
980 	sc->admin_reply_ephase = 1;
981 
982 	if (!sc->admin_req) {
983 		/*
984 		 * We need to create the tag for the admin queue to get the
985 		 * iofacts to see how many bits the controller decodes.  Solve
986 		 * this chicken and egg problem by only doing lower 4GB DMA.
987 		 */
988 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
989 					4, 0,			/* algnmnt, boundary */
990 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
991 					BUS_SPACE_MAXADDR,	/* highaddr */
992 					NULL, NULL,		/* filter, filterarg */
993 					sc->admin_req_q_sz,	/* maxsize */
994 					1,			/* nsegments */
995 					sc->admin_req_q_sz,	/* maxsegsize */
996 					0,			/* flags */
997 					NULL, NULL,		/* lockfunc, lockarg */
998 					&sc->admin_req_tag)) {
999 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1000 			return (ENOMEM);
1001 		}
1002 
1003 		if (bus_dmamem_alloc(sc->admin_req_tag, (void **)&sc->admin_req,
1004 		    BUS_DMA_NOWAIT, &sc->admin_req_dmamap)) {
1005 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
1006 			return (ENOMEM);
1007 		}
1008 		bzero(sc->admin_req, sc->admin_req_q_sz);
1009 		bus_dmamap_load(sc->admin_req_tag, sc->admin_req_dmamap, sc->admin_req, sc->admin_req_q_sz,
1010 		    mpi3mr_memaddr_cb, &sc->admin_req_phys, BUS_DMA_NOWAIT);
1011 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Req queue phys addr= %#016jx size= %d\n",
1012 		    (uintmax_t)sc->admin_req_phys, sc->admin_req_q_sz);
1013 
1014 		if (!sc->admin_req)
1015 		{
1016 			retval = -1;
1017 			printf(IOCNAME "Memory alloc for AdminReqQ: failed\n",
1018 			    sc->name);
1019 			goto out_failed;
1020 		}
1021 	}
1022 
1023 	if (!sc->admin_reply) {
1024 		mtx_init(&sc->admin_reply_lock, "Admin Reply Queue Lock", NULL, MTX_SPIN);
1025 
1026 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1027 					4, 0,			/* algnmnt, boundary */
1028 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1029 					BUS_SPACE_MAXADDR,	/* highaddr */
1030 					NULL, NULL,		/* filter, filterarg */
1031 					sc->admin_reply_q_sz,	/* maxsize */
1032 					1,			/* nsegments */
1033 					sc->admin_reply_q_sz,	/* maxsegsize */
1034 					0,			/* flags */
1035 					NULL, NULL,		/* lockfunc, lockarg */
1036 					&sc->admin_reply_tag)) {
1037 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply DMA tag\n");
1038 			return (ENOMEM);
1039 		}
1040 
1041 		if (bus_dmamem_alloc(sc->admin_reply_tag, (void **)&sc->admin_reply,
1042 		    BUS_DMA_NOWAIT, &sc->admin_reply_dmamap)) {
1043 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
1044 			return (ENOMEM);
1045 		}
1046 		bzero(sc->admin_reply, sc->admin_reply_q_sz);
1047 		bus_dmamap_load(sc->admin_reply_tag, sc->admin_reply_dmamap, sc->admin_reply, sc->admin_reply_q_sz,
1048 		    mpi3mr_memaddr_cb, &sc->admin_reply_phys, BUS_DMA_NOWAIT);
1049 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Reply queue phys addr= %#016jx size= %d\n",
1050 		    (uintmax_t)sc->admin_reply_phys, sc->admin_req_q_sz);
1051 
1052 
1053 		if (!sc->admin_reply)
1054 		{
1055 			retval = -1;
1056 			printf(IOCNAME "Memory alloc for AdminRepQ: failed\n",
1057 			    sc->name);
1058 			goto out_failed;
1059 		}
1060 	}
1061 
1062 	num_adm_entries = (sc->num_admin_replies << 16) |
1063 				(sc->num_admin_reqs);
1064 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET, num_adm_entries);
1065 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET, sc->admin_req_phys);
1066 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET, sc->admin_reply_phys);
1067 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
1068 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, sc->admin_reply_ci);
1069 
1070 	return retval;
1071 
1072 out_failed:
1073 	/* Free Admin reply*/
1074 	if (sc->admin_reply_phys)
1075 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
1076 
1077 	if (sc->admin_reply != NULL)
1078 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
1079 		    sc->admin_reply_dmamap);
1080 
1081 	if (sc->admin_reply_tag != NULL)
1082 		bus_dma_tag_destroy(sc->admin_reply_tag);
1083 
1084 	/* Free Admin request*/
1085 	if (sc->admin_req_phys)
1086 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
1087 
1088 	if (sc->admin_req != NULL)
1089 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
1090 		    sc->admin_req_dmamap);
1091 
1092 	if (sc->admin_req_tag != NULL)
1093 		bus_dma_tag_destroy(sc->admin_req_tag);
1094 
1095 	return retval;
1096 }
1097 
1098 /**
1099  * mpi3mr_print_fault_info - Display fault information
1100  * @sc: Adapter instance reference
1101  *
1102  * Display the controller fault information if there is a
1103  * controller fault.
1104  *
1105  * Return: Nothing.
1106  */
mpi3mr_print_fault_info(struct mpi3mr_softc * sc)1107 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc)
1108 {
1109 	U32 ioc_status, code, code1, code2, code3;
1110 
1111 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1112 
1113 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1114 		code = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
1115 			MPI3_SYSIF_FAULT_CODE_MASK;
1116 		code1 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO0_OFFSET);
1117 		code2 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO1_OFFSET);
1118 		code3 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO2_OFFSET);
1119 		printf(IOCNAME "fault codes 0x%04x:0x%04x:0x%04x:0x%04x\n",
1120 		    sc->name, code, code1, code2, code3);
1121 	}
1122 }
1123 
mpi3mr_get_iocstate(struct mpi3mr_softc * sc)1124 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_softc *sc)
1125 {
1126 	U32 ioc_status, ioc_control;
1127 	U8 ready, enabled;
1128 
1129 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1130 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1131 
1132 	if(sc->unrecoverable)
1133 		return MRIOC_STATE_UNRECOVERABLE;
1134 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1135 		return MRIOC_STATE_FAULT;
1136 
1137 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1138 	enabled = (ioc_control & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1139 
1140 	if (ready && enabled)
1141 		return MRIOC_STATE_READY;
1142 	if ((!ready) && (!enabled))
1143 		return MRIOC_STATE_RESET;
1144 	if ((!ready) && (enabled))
1145 		return MRIOC_STATE_BECOMING_READY;
1146 
1147 	return MRIOC_STATE_RESET_REQUESTED;
1148 }
1149 
mpi3mr_clear_resethistory(struct mpi3mr_softc * sc)1150 static inline void mpi3mr_clear_resethistory(struct mpi3mr_softc *sc)
1151 {
1152         U32 ioc_status;
1153 
1154 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1155         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1156 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
1157 
1158 }
1159 
1160 /**
1161  * mpi3mr_mur_ioc - Message unit Reset handler
1162  * @sc: Adapter instance reference
1163  * @reset_reason: Reset reason code
1164  *
1165  * Issue Message unit Reset to the controller and wait for it to
1166  * be complete.
1167  *
1168  * Return: 0 on success, -1 on failure.
1169  */
mpi3mr_mur_ioc(struct mpi3mr_softc * sc,U16 reset_reason)1170 static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U16 reset_reason)
1171 {
1172 	U32 ioc_config, timeout, ioc_status, scratch_pad0;
1173         int retval = -1;
1174 
1175         mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Message Unit Reset(MUR)\n");
1176         if (sc->unrecoverable) {
1177                 mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC is unrecoverable MUR not issued\n");
1178                 return retval;
1179         }
1180         mpi3mr_clear_resethistory(sc);
1181 
1182 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_FREEBSD <<
1183 			MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1184 			(sc->facts.ioc_num <<
1185 			MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1186 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, scratch_pad0);
1187 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1188         ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1189 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1190 
1191         timeout = MPI3MR_MUR_TIMEOUT * 10;
1192         do {
1193 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1194                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1195                         mpi3mr_clear_resethistory(sc);
1196 			ioc_config =
1197 				mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1198                         if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1199                             (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1200                             (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
1201                                 retval = 0;
1202                                 break;
1203                         }
1204                 }
1205                 DELAY(100 * 1000);
1206         } while (--timeout);
1207 
1208 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1209 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1210 
1211         mpi3mr_dprint(sc, MPI3MR_INFO, "IOC Status/Config after %s MUR is (0x%x)/(0x%x)\n",
1212                 !retval ? "successful":"failed", ioc_status, ioc_config);
1213         return retval;
1214 }
1215 
1216 /**
1217  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1218  * @sc: Adapter instance reference
1219  *
1220  * Set Enable IOC bit in IOC configuration register and wait for
1221  * the controller to become ready.
1222  *
1223  * Return: 0 on success, appropriate error on failure.
1224  */
mpi3mr_bring_ioc_ready(struct mpi3mr_softc * sc)1225 static int mpi3mr_bring_ioc_ready(struct mpi3mr_softc *sc)
1226 {
1227         U32 ioc_config, timeout;
1228         enum mpi3mr_iocstate current_state;
1229 
1230 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1231         ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1232 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1233 
1234         timeout = sc->ready_timeout * 10;
1235         do {
1236                 current_state = mpi3mr_get_iocstate(sc);
1237                 if (current_state == MRIOC_STATE_READY)
1238                         return 0;
1239                 DELAY(100 * 1000);
1240         } while (--timeout);
1241 
1242         return -1;
1243 }
1244 
1245 static const struct {
1246 	enum mpi3mr_iocstate value;
1247 	char *name;
1248 } mrioc_states[] = {
1249 	{ MRIOC_STATE_READY, "ready" },
1250 	{ MRIOC_STATE_FAULT, "fault" },
1251 	{ MRIOC_STATE_RESET, "reset" },
1252 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
1253 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
1254 	{ MRIOC_STATE_COUNT, "Count" },
1255 };
1256 
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)1257 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
1258 {
1259 	int i;
1260 	char *name = NULL;
1261 
1262 	for (i = 0; i < MRIOC_STATE_COUNT; i++) {
1263 		if (mrioc_states[i].value == mrioc_state){
1264 			name = mrioc_states[i].name;
1265 			break;
1266 		}
1267 	}
1268 	return name;
1269 }
1270 
1271 /* Reset reason to name mapper structure*/
1272 static const struct {
1273 	enum mpi3mr_reset_reason value;
1274 	char *name;
1275 } mpi3mr_reset_reason_codes[] = {
1276 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
1277 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
1278 	{ MPI3MR_RESET_FROM_IOCTL, "application" },
1279 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
1280 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
1281 	{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
1282 	{ MPI3MR_RESET_FROM_SCSIIO_TIMEOUT, "SCSIIO timeout" },
1283 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
1284 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
1285 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
1286 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
1287 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
1288 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
1289 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
1290 	{
1291 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
1292 		"create request queue timeout"
1293 	},
1294 	{
1295 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
1296 		"create reply queue timeout"
1297 	},
1298 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
1299 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
1300 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
1301 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
1302 	{
1303 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
1304 		"component image activation timeout"
1305 	},
1306 	{
1307 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
1308 		"get package version timeout"
1309 	},
1310 	{
1311 		MPI3MR_RESET_FROM_PELABORT_TIMEOUT,
1312 		"persistent event log abort timeout"
1313 	},
1314 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
1315 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
1316 	{
1317 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
1318 		"diagnostic buffer post timeout"
1319 	},
1320 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" },
1321 	{ MPI3MR_RESET_REASON_COUNT, "Reset reason count" },
1322 };
1323 
1324 /**
1325  * mpi3mr_reset_rc_name - get reset reason code name
1326  * @reason_code: reset reason code value
1327  *
1328  * Map reset reason to an NULL terminated ASCII string
1329  *
1330  * Return: Name corresponding to reset reason value or NULL.
1331  */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)1332 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1333 {
1334 	int i;
1335 	char *name = NULL;
1336 
1337 	for (i = 0; i < MPI3MR_RESET_REASON_COUNT; i++) {
1338 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1339 			name = mpi3mr_reset_reason_codes[i].name;
1340 			break;
1341 		}
1342 	}
1343 	return name;
1344 }
1345 
1346 #define MAX_RESET_TYPE 3
1347 /* Reset type to name mapper structure*/
1348 static const struct {
1349 	U16 reset_type;
1350 	char *name;
1351 } mpi3mr_reset_types[] = {
1352 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1353 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1354 	{ MAX_RESET_TYPE, "count"}
1355 };
1356 
1357 /**
1358  * mpi3mr_reset_type_name - get reset type name
1359  * @reset_type: reset type value
1360  *
1361  * Map reset type to an NULL terminated ASCII string
1362  *
1363  * Return: Name corresponding to reset type value or NULL.
1364  */
mpi3mr_reset_type_name(U16 reset_type)1365 static const char *mpi3mr_reset_type_name(U16 reset_type)
1366 {
1367 	int i;
1368 	char *name = NULL;
1369 
1370 	for (i = 0; i < MAX_RESET_TYPE; i++) {
1371 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1372 			name = mpi3mr_reset_types[i].name;
1373 			break;
1374 		}
1375 	}
1376 	return name;
1377 }
1378 
1379 /**
1380  * mpi3mr_soft_reset_success - Check softreset is success or not
1381  * @ioc_status: IOC status register value
1382  * @ioc_config: IOC config register value
1383  *
1384  * Check whether the soft reset is successful or not based on
1385  * IOC status and IOC config register values.
1386  *
1387  * Return: True when the soft reset is success, false otherwise.
1388  */
1389 static inline bool
mpi3mr_soft_reset_success(U32 ioc_status,U32 ioc_config)1390 mpi3mr_soft_reset_success(U32 ioc_status, U32 ioc_config)
1391 {
1392 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1393 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1394 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1395 		return true;
1396 	return false;
1397 }
1398 
1399 /**
1400  * mpi3mr_diagfault_success - Check diag fault is success or not
1401  * @sc: Adapter reference
1402  * @ioc_status: IOC status register value
1403  *
1404  * Check whether the controller hit diag reset fault code.
1405  *
1406  * Return: True when there is diag fault, false otherwise.
1407  */
mpi3mr_diagfault_success(struct mpi3mr_softc * sc,U32 ioc_status)1408 static inline bool mpi3mr_diagfault_success(struct mpi3mr_softc *sc,
1409 	U32 ioc_status)
1410 {
1411 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1412 		return false;
1413 	mpi3mr_print_fault_info(sc);
1414 	return true;
1415 }
1416 
1417 /**
1418  * mpi3mr_issue_iocfacts - Send IOC Facts
1419  * @sc: Adapter instance reference
1420  * @facts_data: Cached IOC facts data
1421  *
1422  * Issue IOC Facts MPI request through admin queue and wait for
1423  * the completion of it or time out.
1424  *
1425  * Return: 0 on success, non-zero on failures.
1426  */
mpi3mr_issue_iocfacts(struct mpi3mr_softc * sc,Mpi3IOCFactsData_t * facts_data)1427 static int mpi3mr_issue_iocfacts(struct mpi3mr_softc *sc,
1428     Mpi3IOCFactsData_t *facts_data)
1429 {
1430 	Mpi3IOCFactsRequest_t iocfacts_req;
1431 	bus_dma_tag_t data_tag = NULL;
1432 	bus_dmamap_t data_map = NULL;
1433 	bus_addr_t data_phys = 0;
1434 	void *data = NULL;
1435 	U32 data_len = sizeof(*facts_data);
1436 	int retval = 0;
1437 
1438 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1439                 	MPI3_SGE_FLAGS_DLAS_SYSTEM |
1440 			MPI3_SGE_FLAGS_END_OF_LIST);
1441 
1442 
1443 	/*
1444 	 * We can't use sc->dma_loaddr here.  We set those only after we get the
1445 	 * iocfacts.  So allocate in the lower 4GB.  The amount of data is tiny
1446 	 * and we don't do this that often, so any bouncing we might have to do
1447 	 * isn't a cause for concern.
1448 	 */
1449         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1450 				4, 0,			/* algnmnt, boundary */
1451 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1452 				BUS_SPACE_MAXADDR,	/* highaddr */
1453 				NULL, NULL,		/* filter, filterarg */
1454                                 data_len,		/* maxsize */
1455                                 1,			/* nsegments */
1456                                 data_len,		/* maxsegsize */
1457                                 0,			/* flags */
1458                                 NULL, NULL,		/* lockfunc, lockarg */
1459                                 &data_tag)) {
1460 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1461 		return (ENOMEM);
1462         }
1463 
1464         if (bus_dmamem_alloc(data_tag, (void **)&data,
1465 	    BUS_DMA_NOWAIT, &data_map)) {
1466 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
1467 			__func__, __LINE__);
1468 		return (ENOMEM);
1469         }
1470 
1471         bzero(data, data_len);
1472         bus_dmamap_load(data_tag, data_map, data, data_len,
1473 	    mpi3mr_memaddr_cb, &data_phys, BUS_DMA_NOWAIT);
1474 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts data phys addr= %#016jx size= %d\n",
1475 	    __func__, __LINE__, (uintmax_t)data_phys, data_len);
1476 
1477 	if (!data)
1478 	{
1479 		retval = -1;
1480 		printf(IOCNAME "Memory alloc for IOCFactsData: failed\n",
1481 		    sc->name);
1482 		goto out;
1483 	}
1484 
1485 	mtx_lock(&sc->init_cmds.completion.lock);
1486 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
1487 
1488 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1489 		retval = -1;
1490 		printf(IOCNAME "Issue IOCFacts: Init command is in use\n",
1491 		    sc->name);
1492 		mtx_unlock(&sc->init_cmds.completion.lock);
1493 		goto out;
1494 	}
1495 
1496 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
1497 	sc->init_cmds.is_waiting = 1;
1498 	sc->init_cmds.callback = NULL;
1499 	iocfacts_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
1500 	iocfacts_req.Function = MPI3_FUNCTION_IOC_FACTS;
1501 
1502 	mpi3mr_add_sg_single(&iocfacts_req.SGL, sgl_flags, data_len,
1503 	    data_phys);
1504 
1505 	init_completion(&sc->init_cmds.completion);
1506 
1507 	retval = mpi3mr_submit_admin_cmd(sc, &iocfacts_req,
1508 	    sizeof(iocfacts_req));
1509 
1510 	if (retval) {
1511 		printf(IOCNAME "Issue IOCFacts: Admin Post failed\n",
1512 		    sc->name);
1513 		goto out_unlock;
1514 	}
1515 
1516 	wait_for_completion_timeout(&sc->init_cmds.completion,
1517 	    (MPI3MR_INTADMCMD_TIMEOUT));
1518 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1519 		printf(IOCNAME "Issue IOCFacts: command timed out\n",
1520 		    sc->name);
1521 		mpi3mr_check_rh_fault_ioc(sc,
1522 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
1523 		sc->unrecoverable = 1;
1524 		retval = -1;
1525 		goto out_unlock;
1526 	}
1527 
1528 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1529 	     != MPI3_IOCSTATUS_SUCCESS ) {
1530 		printf(IOCNAME "Issue IOCFacts: Failed IOCStatus(0x%04x) "
1531 		    " Loginfo(0x%08x) \n" , sc->name,
1532 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1533 		    sc->init_cmds.ioc_loginfo);
1534 		retval = -1;
1535 		goto out_unlock;
1536 	}
1537 
1538 	memcpy(facts_data, (U8 *)data, data_len);
1539 out_unlock:
1540 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1541 	mtx_unlock(&sc->init_cmds.completion.lock);
1542 
1543 out:
1544 	if (data_phys != 0)
1545 		bus_dmamap_unload(data_tag, data_map);
1546 	if (data != NULL)
1547 		bus_dmamem_free(data_tag, data, data_map);
1548 	if (data_tag != NULL)
1549 		bus_dma_tag_destroy(data_tag);
1550 	return retval;
1551 }
1552 
1553 /**
1554  * mpi3mr_process_factsdata - Process IOC facts data
1555  * @sc: Adapter instance reference
1556  * @facts_data: Cached IOC facts data
1557  *
1558  * Convert IOC facts data into cpu endianness and cache it in
1559  * the driver .
1560  *
1561  * Return: Nothing.
1562  */
mpi3mr_process_factsdata(struct mpi3mr_softc * sc,Mpi3IOCFactsData_t * facts_data)1563 static int mpi3mr_process_factsdata(struct mpi3mr_softc *sc,
1564     Mpi3IOCFactsData_t *facts_data)
1565 {
1566 	int retval = 0;
1567 	U32 ioc_config, req_sz, facts_flags;
1568         struct mpi3mr_compimg_ver *fwver;
1569 
1570 	if (le16toh(facts_data->IOCFactsDataLength) !=
1571 	    (sizeof(*facts_data) / 4)) {
1572 		mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data length mismatch "
1573 		    " driver_sz(%ld) firmware_sz(%d) \n",
1574 		    sizeof(*facts_data),
1575 		    facts_data->IOCFactsDataLength);
1576 	}
1577 
1578 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1579         req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
1580                   MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
1581 
1582 	if (facts_data->IOCRequestFrameSize != (req_sz/4)) {
1583 		 mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data reqFrameSize mismatch "
1584 		    " hw_size(%d) firmware_sz(%d) \n" , req_sz/4,
1585 		    facts_data->IOCRequestFrameSize);
1586 	}
1587 
1588 	memset(&sc->facts, 0, sizeof(sc->facts));
1589 
1590 	facts_flags = le32toh(facts_data->Flags);
1591 	sc->facts.op_req_sz = req_sz;
1592 	sc->op_reply_sz = 1 << ((ioc_config &
1593                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
1594                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
1595 
1596 	sc->facts.ioc_num = facts_data->IOCNumber;
1597         sc->facts.who_init = facts_data->WhoInit;
1598         sc->facts.max_msix_vectors = facts_data->MaxMSIxVectors;
1599 	sc->facts.personality = (facts_flags &
1600 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
1601 	sc->facts.dma_mask = (facts_flags &
1602 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
1603 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
1604         sc->facts.protocol_flags = facts_data->ProtocolFlags;
1605         sc->facts.mpi_version = (facts_data->MPIVersion.Word);
1606         sc->facts.max_reqs = (facts_data->MaxOutstandingRequests);
1607         sc->facts.product_id = (facts_data->ProductID);
1608 	sc->facts.reply_sz = (facts_data->ReplyFrameSize) * 4;
1609         sc->facts.exceptions = (facts_data->IOCExceptions);
1610         sc->facts.max_perids = (facts_data->MaxPersistentID);
1611         sc->facts.max_vds = (facts_data->MaxVDs);
1612         sc->facts.max_hpds = (facts_data->MaxHostPDs);
1613         sc->facts.max_advhpds = (facts_data->MaxAdvHostPDs);
1614         sc->facts.max_raidpds = (facts_data->MaxRAIDPDs);
1615         sc->facts.max_nvme = (facts_data->MaxNVMe);
1616         sc->facts.max_pcieswitches =
1617                 (facts_data->MaxPCIeSwitches);
1618         sc->facts.max_sasexpanders =
1619                 (facts_data->MaxSASExpanders);
1620         sc->facts.max_sasinitiators =
1621                 (facts_data->MaxSASInitiators);
1622         sc->facts.max_enclosures = (facts_data->MaxEnclosures);
1623         sc->facts.min_devhandle = (facts_data->MinDevHandle);
1624         sc->facts.max_devhandle = (facts_data->MaxDevHandle);
1625 	sc->facts.max_op_req_q =
1626                 (facts_data->MaxOperationalRequestQueues);
1627 	sc->facts.max_op_reply_q =
1628                 (facts_data->MaxOperationalReplyQueues);
1629         sc->facts.ioc_capabilities =
1630                 (facts_data->IOCCapabilities);
1631         sc->facts.fw_ver.build_num =
1632                 (facts_data->FWVersion.BuildNum);
1633         sc->facts.fw_ver.cust_id =
1634                 (facts_data->FWVersion.CustomerID);
1635         sc->facts.fw_ver.ph_minor = facts_data->FWVersion.PhaseMinor;
1636         sc->facts.fw_ver.ph_major = facts_data->FWVersion.PhaseMajor;
1637         sc->facts.fw_ver.gen_minor = facts_data->FWVersion.GenMinor;
1638         sc->facts.fw_ver.gen_major = facts_data->FWVersion.GenMajor;
1639         sc->max_msix_vectors = min(sc->max_msix_vectors,
1640             sc->facts.max_msix_vectors);
1641         sc->facts.sge_mod_mask = facts_data->SGEModifierMask;
1642         sc->facts.sge_mod_value = facts_data->SGEModifierValue;
1643         sc->facts.sge_mod_shift = facts_data->SGEModifierShift;
1644         sc->facts.shutdown_timeout =
1645                 (facts_data->ShutdownTimeout);
1646 	sc->facts.max_dev_per_tg = facts_data->MaxDevicesPerThrottleGroup;
1647 	sc->facts.io_throttle_data_length =
1648 	    facts_data->IOThrottleDataLength;
1649 	sc->facts.max_io_throttle_group =
1650 	    facts_data->MaxIOThrottleGroup;
1651 	sc->facts.io_throttle_low = facts_data->IOThrottleLow;
1652 	sc->facts.io_throttle_high = facts_data->IOThrottleHigh;
1653 
1654 	/*Store in 512b block count*/
1655 	if (sc->facts.io_throttle_data_length)
1656 		sc->io_throttle_data_length =
1657 		    (sc->facts.io_throttle_data_length * 2 * 4);
1658 	else
1659 		/* set the length to 1MB + 1K to disable throttle*/
1660 		sc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
1661 
1662 	sc->io_throttle_high = (sc->facts.io_throttle_high * 2 * 1024);
1663 	sc->io_throttle_low = (sc->facts.io_throttle_low * 2 * 1024);
1664 
1665 	fwver = &sc->facts.fw_ver;
1666 	snprintf(sc->fw_version, sizeof(sc->fw_version),
1667 	    "%d.%d.%d.%d.%05d-%05d",
1668 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
1669 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
1670 
1671 	mpi3mr_dprint(sc, MPI3MR_INFO, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),"
1672             "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
1673 	    sc->facts.ioc_num, sc->facts.max_op_req_q,
1674 	    sc->facts.max_op_reply_q, sc->facts.max_devhandle,
1675             sc->facts.max_reqs, sc->facts.min_devhandle,
1676             sc->facts.max_pds, sc->facts.max_msix_vectors,
1677             sc->facts.max_perids);
1678         mpi3mr_dprint(sc, MPI3MR_INFO, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x\n",
1679             sc->facts.sge_mod_mask, sc->facts.sge_mod_value,
1680             sc->facts.sge_mod_shift);
1681 	mpi3mr_dprint(sc, MPI3MR_INFO,
1682 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d), io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
1683 	    sc->facts.max_dev_per_tg, sc->facts.max_io_throttle_group,
1684 	    sc->facts.io_throttle_data_length * 4,
1685 	    sc->facts.io_throttle_high, sc->facts.io_throttle_low);
1686 
1687 	sc->max_host_ios = sc->facts.max_reqs -
1688 	    (MPI3MR_INTERNALCMDS_RESVD + 1);
1689 
1690 	/*
1691 	 * Set the DMA mask for the card.  dma_mask is the number of bits that
1692 	 * can have bits set in them.  Translate this into bus_dma loaddr args.
1693 	 * Add sanity for more bits than address space or other overflow
1694 	 * situations.
1695 	 */
1696 	if (sc->facts.dma_mask == 0 ||
1697 	    (sc->facts.dma_mask >= sizeof(bus_addr_t) * 8))
1698 		sc->dma_loaddr = BUS_SPACE_MAXADDR;
1699 	else
1700 		sc->dma_loaddr = ~((1ull << sc->facts.dma_mask) - 1);
1701 	mpi3mr_dprint(sc, MPI3MR_INFO,
1702 	    "dma_mask bits: %d loaddr 0x%jx\n",
1703 	    sc->facts.dma_mask, sc->dma_loaddr);
1704 
1705 	return retval;
1706 }
1707 
mpi3mr_setup_reply_free_queues(struct mpi3mr_softc * sc)1708 static inline void mpi3mr_setup_reply_free_queues(struct mpi3mr_softc *sc)
1709 {
1710 	int i;
1711 	bus_addr_t phys_addr;
1712 
1713 	/* initialize Reply buffer Queue */
1714 	for (i = 0, phys_addr = sc->reply_buf_phys;
1715 	    i < sc->num_reply_bufs; i++, phys_addr += sc->reply_sz)
1716 		sc->reply_free_q[i] = phys_addr;
1717 	sc->reply_free_q[i] = (0);
1718 
1719 	/* initialize Sense Buffer Queue */
1720 	for (i = 0, phys_addr = sc->sense_buf_phys;
1721 	    i < sc->num_sense_bufs; i++, phys_addr += MPI3MR_SENSEBUF_SZ)
1722 		sc->sense_buf_q[i] = phys_addr;
1723 	sc->sense_buf_q[i] = (0);
1724 
1725 }
1726 
mpi3mr_reply_dma_alloc(struct mpi3mr_softc * sc)1727 static int mpi3mr_reply_dma_alloc(struct mpi3mr_softc *sc)
1728 {
1729 	U32 sz;
1730 
1731 	sc->num_reply_bufs = sc->facts.max_reqs + MPI3MR_NUM_EVTREPLIES;
1732 	sc->reply_free_q_sz = sc->num_reply_bufs + 1;
1733 	sc->num_sense_bufs = sc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
1734 	sc->sense_buf_q_sz = sc->num_sense_bufs + 1;
1735 
1736 	sz = sc->num_reply_bufs * sc->reply_sz;
1737 
1738 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1739 				16, 0,			/* algnmnt, boundary */
1740 				sc->dma_loaddr,		/* lowaddr */
1741 				BUS_SPACE_MAXADDR,	/* highaddr */
1742 				NULL, NULL,		/* filter, filterarg */
1743                                 sz,			/* maxsize */
1744                                 1,			/* nsegments */
1745                                 sz,			/* maxsegsize */
1746                                 0,			/* flags */
1747                                 NULL, NULL,		/* lockfunc, lockarg */
1748                                 &sc->reply_buf_tag)) {
1749 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1750 		return (ENOMEM);
1751         }
1752 
1753 	if (bus_dmamem_alloc(sc->reply_buf_tag, (void **)&sc->reply_buf,
1754 	    BUS_DMA_NOWAIT, &sc->reply_buf_dmamap)) {
1755 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1756 			__func__, __LINE__);
1757 		return (ENOMEM);
1758         }
1759 
1760 	bzero(sc->reply_buf, sz);
1761         bus_dmamap_load(sc->reply_buf_tag, sc->reply_buf_dmamap, sc->reply_buf, sz,
1762 	    mpi3mr_memaddr_cb, &sc->reply_buf_phys, BUS_DMA_NOWAIT);
1763 
1764 	sc->reply_buf_dma_min_address = sc->reply_buf_phys;
1765 	sc->reply_buf_dma_max_address = sc->reply_buf_phys + sz;
1766 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply buf (0x%p): depth(%d), frame_size(%d), "
1767 	    "pool_size(%d kB), reply_buf_dma(0x%llx)\n",
1768 	    sc->reply_buf, sc->num_reply_bufs, sc->reply_sz,
1769 	    (sz / 1024), (unsigned long long)sc->reply_buf_phys);
1770 
1771 	/* reply free queue, 8 byte align */
1772 	sz = sc->reply_free_q_sz * 8;
1773 
1774         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1775 				8, 0,			/* algnmnt, boundary */
1776 				sc->dma_loaddr,		/* lowaddr */
1777 				BUS_SPACE_MAXADDR,	/* highaddr */
1778 				NULL, NULL,		/* filter, filterarg */
1779                                 sz,			/* maxsize */
1780                                 1,			/* nsegments */
1781                                 sz,			/* maxsegsize */
1782                                 0,			/* flags */
1783                                 NULL, NULL,		/* lockfunc, lockarg */
1784                                 &sc->reply_free_q_tag)) {
1785 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply free queue DMA tag\n");
1786 		return (ENOMEM);
1787         }
1788 
1789         if (bus_dmamem_alloc(sc->reply_free_q_tag, (void **)&sc->reply_free_q,
1790 	    BUS_DMA_NOWAIT, &sc->reply_free_q_dmamap)) {
1791 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1792 			__func__, __LINE__);
1793 		return (ENOMEM);
1794         }
1795 
1796 	bzero(sc->reply_free_q, sz);
1797         bus_dmamap_load(sc->reply_free_q_tag, sc->reply_free_q_dmamap, sc->reply_free_q, sz,
1798 	    mpi3mr_memaddr_cb, &sc->reply_free_q_phys, BUS_DMA_NOWAIT);
1799 
1800 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply_free_q (0x%p): depth(%d), frame_size(%d), "
1801 	    "pool_size(%d kB), reply_free_q_dma(0x%llx)\n",
1802 	    sc->reply_free_q, sc->reply_free_q_sz, 8, (sz / 1024),
1803 	    (unsigned long long)sc->reply_free_q_phys);
1804 
1805 	/* sense buffer pool,  4 byte align */
1806 	sz = sc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
1807 
1808         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1809 				4, 0,			/* algnmnt, boundary */
1810 				sc->dma_loaddr,		/* lowaddr */
1811 				BUS_SPACE_MAXADDR,	/* highaddr */
1812 				NULL, NULL,		/* filter, filterarg */
1813                                 sz,			/* maxsize */
1814                                 1,			/* nsegments */
1815                                 sz,			/* maxsegsize */
1816                                 0,			/* flags */
1817                                 NULL, NULL,		/* lockfunc, lockarg */
1818                                 &sc->sense_buf_tag)) {
1819 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer DMA tag\n");
1820 		return (ENOMEM);
1821         }
1822 
1823 	if (bus_dmamem_alloc(sc->sense_buf_tag, (void **)&sc->sense_buf,
1824 	    BUS_DMA_NOWAIT, &sc->sense_buf_dmamap)) {
1825 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1826 			__func__, __LINE__);
1827 		return (ENOMEM);
1828         }
1829 
1830 	bzero(sc->sense_buf, sz);
1831         bus_dmamap_load(sc->sense_buf_tag, sc->sense_buf_dmamap, sc->sense_buf, sz,
1832 	    mpi3mr_memaddr_cb, &sc->sense_buf_phys, BUS_DMA_NOWAIT);
1833 
1834 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf (0x%p): depth(%d), frame_size(%d), "
1835 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1836 	    sc->sense_buf, sc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
1837 	    (sz / 1024), (unsigned long long)sc->sense_buf_phys);
1838 
1839 	/* sense buffer queue, 8 byte align */
1840 	sz = sc->sense_buf_q_sz * 8;
1841 
1842         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1843 				8, 0,			/* algnmnt, boundary */
1844 				sc->dma_loaddr,		/* lowaddr */
1845 				BUS_SPACE_MAXADDR,	/* highaddr */
1846 				NULL, NULL,		/* filter, filterarg */
1847                                 sz,			/* maxsize */
1848                                 1,			/* nsegments */
1849                                 sz,			/* maxsegsize */
1850                                 0,			/* flags */
1851                                 NULL, NULL,		/* lockfunc, lockarg */
1852                                 &sc->sense_buf_q_tag)) {
1853 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer Queue DMA tag\n");
1854 		return (ENOMEM);
1855         }
1856 
1857 	if (bus_dmamem_alloc(sc->sense_buf_q_tag, (void **)&sc->sense_buf_q,
1858 	    BUS_DMA_NOWAIT, &sc->sense_buf_q_dmamap)) {
1859 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1860 			__func__, __LINE__);
1861 		return (ENOMEM);
1862         }
1863 
1864 	bzero(sc->sense_buf_q, sz);
1865         bus_dmamap_load(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap, sc->sense_buf_q, sz,
1866 	    mpi3mr_memaddr_cb, &sc->sense_buf_q_phys, BUS_DMA_NOWAIT);
1867 
1868 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf_q (0x%p): depth(%d), frame_size(%d), "
1869 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1870 	    sc->sense_buf_q, sc->sense_buf_q_sz, 8, (sz / 1024),
1871 	    (unsigned long long)sc->sense_buf_q_phys);
1872 
1873 	return 0;
1874 }
1875 
mpi3mr_reply_alloc(struct mpi3mr_softc * sc)1876 static int mpi3mr_reply_alloc(struct mpi3mr_softc *sc)
1877 {
1878 	int retval = 0;
1879 	U32 i;
1880 
1881 	if (sc->init_cmds.reply)
1882 		goto post_reply_sbuf;
1883 
1884 	sc->init_cmds.reply = malloc(sc->reply_sz,
1885 		M_MPI3MR, M_NOWAIT | M_ZERO);
1886 
1887 	if (!sc->init_cmds.reply) {
1888 		printf(IOCNAME "Cannot allocate memory for init_cmds.reply\n",
1889 		    sc->name);
1890 		goto out_failed;
1891 	}
1892 
1893 	sc->ioctl_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1894 	if (!sc->ioctl_cmds.reply) {
1895 		printf(IOCNAME "Cannot allocate memory for ioctl_cmds.reply\n",
1896 		    sc->name);
1897 		goto out_failed;
1898 	}
1899 
1900 	sc->host_tm_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1901 	if (!sc->host_tm_cmds.reply) {
1902 		printf(IOCNAME "Cannot allocate memory for host_tm.reply\n",
1903 		    sc->name);
1904 		goto out_failed;
1905 	}
1906 	for (i=0; i<MPI3MR_NUM_DEVRMCMD; i++) {
1907 		sc->dev_rmhs_cmds[i].reply = malloc(sc->reply_sz,
1908 		    M_MPI3MR, M_NOWAIT | M_ZERO);
1909 		if (!sc->dev_rmhs_cmds[i].reply) {
1910 			printf(IOCNAME "Cannot allocate memory for"
1911 			    " dev_rmhs_cmd[%d].reply\n",
1912 			    sc->name, i);
1913 			goto out_failed;
1914 		}
1915 	}
1916 
1917 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
1918 		sc->evtack_cmds[i].reply = malloc(sc->reply_sz,
1919 			M_MPI3MR, M_NOWAIT | M_ZERO);
1920 		if (!sc->evtack_cmds[i].reply)
1921 			goto out_failed;
1922 	}
1923 
1924 	sc->dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
1925 
1926 	sc->removepend_bitmap = malloc(sc->dev_handle_bitmap_sz,
1927 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1928 	if (!sc->removepend_bitmap) {
1929 		printf(IOCNAME "Cannot alloc memory for remove pend bitmap\n",
1930 		    sc->name);
1931 		goto out_failed;
1932 	}
1933 
1934 	sc->devrem_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_DEVRMCMD, 8);
1935 	sc->devrem_bitmap = malloc(sc->devrem_bitmap_sz,
1936 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1937 	if (!sc->devrem_bitmap) {
1938 		printf(IOCNAME "Cannot alloc memory for dev remove bitmap\n",
1939 		    sc->name);
1940 		goto out_failed;
1941 	}
1942 
1943 	sc->evtack_cmds_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_EVTACKCMD, 8);
1944 
1945 	sc->evtack_cmds_bitmap = malloc(sc->evtack_cmds_bitmap_sz,
1946 		M_MPI3MR, M_NOWAIT | M_ZERO);
1947 	if (!sc->evtack_cmds_bitmap)
1948 		goto out_failed;
1949 
1950 	if (mpi3mr_reply_dma_alloc(sc)) {
1951 		printf(IOCNAME "func:%s line:%d DMA memory allocation failed\n",
1952 		    sc->name, __func__, __LINE__);
1953 		goto out_failed;
1954 	}
1955 
1956 post_reply_sbuf:
1957 	mpi3mr_setup_reply_free_queues(sc);
1958 	return retval;
1959 out_failed:
1960 	mpi3mr_cleanup_interrupts(sc);
1961 	mpi3mr_free_mem(sc);
1962 	retval = -1;
1963 	return retval;
1964 }
1965 
1966 static void
mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc * sc)1967 mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc *sc)
1968 {
1969 	int retval = 0;
1970 	void *fw_pkg_ver = NULL;
1971 	bus_dma_tag_t fw_pkg_ver_tag;
1972 	bus_dmamap_t fw_pkg_ver_map;
1973 	bus_addr_t fw_pkg_ver_dma;
1974 	Mpi3CIUploadRequest_t ci_upload;
1975 	Mpi3ComponentImageHeader_t *ci_header;
1976 	U32 fw_pkg_ver_len = sizeof(*ci_header);
1977 	U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
1978 
1979 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1980 				4, 0,			/* algnmnt, boundary */
1981 				sc->dma_loaddr,		/* lowaddr */
1982 				BUS_SPACE_MAXADDR,	/* highaddr */
1983 				NULL, NULL,		/* filter, filterarg */
1984 				fw_pkg_ver_len,		/* maxsize */
1985 				1,			/* nsegments */
1986 				fw_pkg_ver_len,		/* maxsegsize */
1987 				0,			/* flags */
1988 				NULL, NULL,		/* lockfunc, lockarg */
1989 				&fw_pkg_ver_tag)) {
1990 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate fw package version request DMA tag\n");
1991 		return;
1992 	}
1993 
1994 	if (bus_dmamem_alloc(fw_pkg_ver_tag, (void **)&fw_pkg_ver, BUS_DMA_NOWAIT, &fw_pkg_ver_map)) {
1995 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d fw package version DMA mem alloc failed\n",
1996 			      __func__, __LINE__);
1997 		return;
1998 	}
1999 
2000 	bzero(fw_pkg_ver, fw_pkg_ver_len);
2001 
2002 	bus_dmamap_load(fw_pkg_ver_tag, fw_pkg_ver_map, fw_pkg_ver, fw_pkg_ver_len,
2003 	    mpi3mr_memaddr_cb, &fw_pkg_ver_dma, BUS_DMA_NOWAIT);
2004 
2005 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d fw package version phys addr= %#016jx size= %d\n",
2006 		      __func__, __LINE__, (uintmax_t)fw_pkg_ver_dma, fw_pkg_ver_len);
2007 
2008 	if (!fw_pkg_ver) {
2009 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Memory alloc for fw package version failed\n");
2010 		goto out;
2011 	}
2012 
2013 	memset(&ci_upload, 0, sizeof(ci_upload));
2014 	mtx_lock(&sc->init_cmds.completion.lock);
2015 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2016 		mpi3mr_dprint(sc, MPI3MR_INFO,"Issue CI Header Upload: command is in use\n");
2017 		mtx_unlock(&sc->init_cmds.completion.lock);
2018 		goto out;
2019 	}
2020 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2021 	sc->init_cmds.is_waiting = 1;
2022 	sc->init_cmds.callback = NULL;
2023 	ci_upload.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2024 	ci_upload.Function = MPI3_FUNCTION_CI_UPLOAD;
2025 	ci_upload.MsgFlags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2026 	ci_upload.ImageOffset = MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET;
2027 	ci_upload.SegmentSize = MPI3_IMAGE_HEADER_SIZE;
2028 
2029 	mpi3mr_add_sg_single(&ci_upload.SGL, sgl_flags, fw_pkg_ver_len,
2030 	    fw_pkg_ver_dma);
2031 
2032 	init_completion(&sc->init_cmds.completion);
2033 	if ((retval = mpi3mr_submit_admin_cmd(sc, &ci_upload, sizeof(ci_upload)))) {
2034 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: Admin Post failed\n");
2035 		goto out_unlock;
2036 	}
2037 	wait_for_completion_timeout(&sc->init_cmds.completion,
2038 		(MPI3MR_INTADMCMD_TIMEOUT));
2039 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2040 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: command timed out\n");
2041 		sc->init_cmds.is_waiting = 0;
2042 		if (!(sc->init_cmds.state & MPI3MR_CMD_RESET))
2043 			mpi3mr_check_rh_fault_ioc(sc,
2044 				MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2045 		goto out_unlock;
2046 	}
2047 	if ((GET_IOC_STATUS(sc->init_cmds.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) {
2048 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2049 			      "Issue CI Header Upload: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n",
2050 			      GET_IOC_STATUS(sc->init_cmds.ioc_status), sc->init_cmds.ioc_loginfo);
2051 		goto out_unlock;
2052 	}
2053 
2054 	ci_header = (Mpi3ComponentImageHeader_t *) fw_pkg_ver;
2055 	mpi3mr_dprint(sc, MPI3MR_XINFO,
2056 		      "Issue CI Header Upload:EnvVariableOffset(0x%x) \
2057 		      HeaderSize(0x%x) Signature1(0x%x)\n",
2058 		      ci_header->EnvironmentVariableOffset,
2059 		      ci_header->HeaderSize,
2060 		      ci_header->Signature1);
2061 	mpi3mr_dprint(sc, MPI3MR_INFO, "FW Package Version: %02d.%02d.%02d.%02d\n",
2062 		      ci_header->ComponentImageVersion.GenMajor,
2063 		      ci_header->ComponentImageVersion.GenMinor,
2064 		      ci_header->ComponentImageVersion.PhaseMajor,
2065 		      ci_header->ComponentImageVersion.PhaseMinor);
2066 out_unlock:
2067 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2068 	mtx_unlock(&sc->init_cmds.completion.lock);
2069 
2070 out:
2071 	if (fw_pkg_ver_dma != 0)
2072 		bus_dmamap_unload(fw_pkg_ver_tag, fw_pkg_ver_map);
2073 	if (fw_pkg_ver)
2074 		bus_dmamem_free(fw_pkg_ver_tag, fw_pkg_ver, fw_pkg_ver_map);
2075 	if (fw_pkg_ver_tag)
2076 		bus_dma_tag_destroy(fw_pkg_ver_tag);
2077 
2078 }
2079 
2080 /**
2081  * mpi3mr_issue_iocinit - Send IOC Init
2082  * @sc: Adapter instance reference
2083  *
2084  * Issue IOC Init MPI request through admin queue and wait for
2085  * the completion of it or time out.
2086  *
2087  * Return: 0 on success, non-zero on failures.
2088  */
mpi3mr_issue_iocinit(struct mpi3mr_softc * sc)2089 static int mpi3mr_issue_iocinit(struct mpi3mr_softc *sc)
2090 {
2091 	Mpi3IOCInitRequest_t iocinit_req;
2092 	Mpi3DriverInfoLayout_t *drvr_info = NULL;
2093 	bus_dma_tag_t drvr_info_tag;
2094 	bus_dmamap_t drvr_info_map;
2095 	bus_addr_t drvr_info_phys;
2096 	U32 drvr_info_len = sizeof(*drvr_info);
2097 	int retval = 0;
2098 	struct timeval now;
2099 	uint64_t time_in_msec;
2100 
2101 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2102 				4, 0,			/* algnmnt, boundary */
2103 				sc->dma_loaddr,		/* lowaddr */
2104 				BUS_SPACE_MAXADDR,	/* highaddr */
2105 				NULL, NULL,		/* filter, filterarg */
2106                                 drvr_info_len,		/* maxsize */
2107                                 1,			/* nsegments */
2108                                 drvr_info_len,		/* maxsegsize */
2109                                 0,			/* flags */
2110                                 NULL, NULL,		/* lockfunc, lockarg */
2111                                 &drvr_info_tag)) {
2112 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
2113 		return (ENOMEM);
2114         }
2115 
2116 	if (bus_dmamem_alloc(drvr_info_tag, (void **)&drvr_info,
2117 	    BUS_DMA_NOWAIT, &drvr_info_map)) {
2118 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
2119 			__func__, __LINE__);
2120 		return (ENOMEM);
2121         }
2122 
2123 	bzero(drvr_info, drvr_info_len);
2124         bus_dmamap_load(drvr_info_tag, drvr_info_map, drvr_info, drvr_info_len,
2125 	    mpi3mr_memaddr_cb, &drvr_info_phys, BUS_DMA_NOWAIT);
2126 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts drvr_info phys addr= %#016jx size= %d\n",
2127 	    __func__, __LINE__, (uintmax_t)drvr_info_phys, drvr_info_len);
2128 
2129 	if (!drvr_info)
2130 	{
2131 		retval = -1;
2132 		printf(IOCNAME "Memory alloc for Driver Info failed\n",
2133 		    sc->name);
2134 		goto out;
2135 	}
2136 	drvr_info->InformationLength = (drvr_info_len);
2137 	strcpy(drvr_info->DriverSignature, "Broadcom");
2138 	strcpy(drvr_info->OsName, "FreeBSD");
2139 	strcpy(drvr_info->OsVersion, fmt_os_ver);
2140 	strcpy(drvr_info->DriverName, MPI3MR_DRIVER_NAME);
2141 	strcpy(drvr_info->DriverVersion, MPI3MR_DRIVER_VERSION);
2142 	strcpy(drvr_info->DriverReleaseDate, MPI3MR_DRIVER_RELDATE);
2143 	drvr_info->DriverCapabilities = 0;
2144 	memcpy((U8 *)&sc->driver_info, (U8 *)drvr_info, sizeof(sc->driver_info));
2145 
2146 	memset(&iocinit_req, 0, sizeof(iocinit_req));
2147 	mtx_lock(&sc->init_cmds.completion.lock);
2148 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2149 		retval = -1;
2150 		printf(IOCNAME "Issue IOCInit: Init command is in use\n",
2151 		    sc->name);
2152 		mtx_unlock(&sc->init_cmds.completion.lock);
2153 		goto out;
2154 	}
2155 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2156 	sc->init_cmds.is_waiting = 1;
2157 	sc->init_cmds.callback = NULL;
2158         iocinit_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
2159         iocinit_req.Function = MPI3_FUNCTION_IOC_INIT;
2160         iocinit_req.MPIVersion.Struct.Dev = MPI3_VERSION_DEV;
2161         iocinit_req.MPIVersion.Struct.Unit = MPI3_VERSION_UNIT;
2162         iocinit_req.MPIVersion.Struct.Major = MPI3_VERSION_MAJOR;
2163         iocinit_req.MPIVersion.Struct.Minor = MPI3_VERSION_MINOR;
2164         iocinit_req.WhoInit = MPI3_WHOINIT_HOST_DRIVER;
2165         iocinit_req.ReplyFreeQueueDepth = sc->reply_free_q_sz;
2166         iocinit_req.ReplyFreeQueueAddress =
2167                 sc->reply_free_q_phys;
2168         iocinit_req.SenseBufferLength = MPI3MR_SENSEBUF_SZ;
2169         iocinit_req.SenseBufferFreeQueueDepth =
2170                 sc->sense_buf_q_sz;
2171         iocinit_req.SenseBufferFreeQueueAddress =
2172                 sc->sense_buf_q_phys;
2173         iocinit_req.DriverInformationAddress = drvr_info_phys;
2174 
2175 	getmicrotime(&now);
2176 	time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
2177 	iocinit_req.TimeStamp = htole64(time_in_msec);
2178 
2179 	iocinit_req.MsgFlags |= MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
2180 
2181 	init_completion(&sc->init_cmds.completion);
2182 	retval = mpi3mr_submit_admin_cmd(sc, &iocinit_req,
2183 	    sizeof(iocinit_req));
2184 
2185 	if (retval) {
2186 		printf(IOCNAME "Issue IOCInit: Admin Post failed\n",
2187 		    sc->name);
2188 		goto out_unlock;
2189 	}
2190 
2191 	wait_for_completion_timeout(&sc->init_cmds.completion,
2192 	    (MPI3MR_INTADMCMD_TIMEOUT));
2193 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2194 		printf(IOCNAME "Issue IOCInit: command timed out\n",
2195 		    sc->name);
2196 		mpi3mr_check_rh_fault_ioc(sc,
2197 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2198 		sc->unrecoverable = 1;
2199 		retval = -1;
2200 		goto out_unlock;
2201 	}
2202 
2203 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2204 	     != MPI3_IOCSTATUS_SUCCESS ) {
2205 		printf(IOCNAME "Issue IOCInit: Failed IOCStatus(0x%04x) "
2206 		    " Loginfo(0x%08x) \n" , sc->name,
2207 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2208 		    sc->init_cmds.ioc_loginfo);
2209 		retval = -1;
2210 		goto out_unlock;
2211 	}
2212 
2213 out_unlock:
2214 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2215 	mtx_unlock(&sc->init_cmds.completion.lock);
2216 
2217 out:
2218 	if (drvr_info_phys != 0)
2219 		bus_dmamap_unload(drvr_info_tag, drvr_info_map);
2220 	if (drvr_info != NULL)
2221 		bus_dmamem_free(drvr_info_tag, drvr_info, drvr_info_map);
2222 	if (drvr_info_tag != NULL)
2223 		bus_dma_tag_destroy(drvr_info_tag);
2224 	return retval;
2225 }
2226 
2227 static void
mpi3mr_display_ioc_info(struct mpi3mr_softc * sc)2228 mpi3mr_display_ioc_info(struct mpi3mr_softc *sc)
2229 {
2230         int i = 0;
2231         char personality[16];
2232 
2233         switch (sc->facts.personality) {
2234         case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
2235                 strcpy(personality, "Enhanced HBA");
2236                 break;
2237         case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
2238                 strcpy(personality, "RAID");
2239                 break;
2240         default:
2241                 strcpy(personality, "Unknown");
2242                 break;
2243         }
2244 
2245 	mpi3mr_dprint(sc, MPI3MR_INFO, "Current Personality: %s\n", personality);
2246 
2247 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s\n", sc->fw_version);
2248 
2249         mpi3mr_dprint(sc, MPI3MR_INFO, "Protocol=(");
2250 
2251         if (sc->facts.protocol_flags &
2252             MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2253                 printf("Initiator");
2254                 i++;
2255         }
2256 
2257         if (sc->facts.protocol_flags &
2258             MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2259                 printf("%sTarget", i ? "," : "");
2260                 i++;
2261         }
2262 
2263         if (sc->facts.protocol_flags &
2264             MPI3_IOCFACTS_PROTOCOL_NVME) {
2265                 printf("%sNVMe attachment", i ? "," : "");
2266                 i++;
2267         }
2268         i = 0;
2269         printf("), ");
2270         printf("Capabilities=(");
2271 
2272         if (sc->facts.ioc_capabilities &
2273 	    MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED) {
2274                 printf("RAID");
2275                 i++;
2276         }
2277 
2278         printf(")\n");
2279 }
2280 
2281 /**
2282  * mpi3mr_unmask_events - Unmask events in event mask bitmap
2283  * @sc: Adapter instance reference
2284  * @event: MPI event ID
2285  *
2286  * Un mask the specific event by resetting the event_mask
2287  * bitmap.
2288  *
2289  * Return: None.
2290  */
mpi3mr_unmask_events(struct mpi3mr_softc * sc,U16 event)2291 static void mpi3mr_unmask_events(struct mpi3mr_softc *sc, U16 event)
2292 {
2293 	U32 desired_event;
2294 
2295 	if (event >= 128)
2296 		return;
2297 
2298 	desired_event = (1 << (event % 32));
2299 
2300 	if (event < 32)
2301 		sc->event_masks[0] &= ~desired_event;
2302 	else if (event < 64)
2303 		sc->event_masks[1] &= ~desired_event;
2304 	else if (event < 96)
2305 		sc->event_masks[2] &= ~desired_event;
2306 	else if (event < 128)
2307 		sc->event_masks[3] &= ~desired_event;
2308 }
2309 
mpi3mr_set_events_mask(struct mpi3mr_softc * sc)2310 static void mpi3mr_set_events_mask(struct mpi3mr_softc *sc)
2311 {
2312 	int i;
2313 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2314 		sc->event_masks[i] = -1;
2315 
2316         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_ADDED);
2317         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_INFO_CHANGED);
2318         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
2319 
2320         mpi3mr_unmask_events(sc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
2321 
2322         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
2323         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DISCOVERY);
2324         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
2325         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
2326 
2327         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
2328         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_ENUMERATION);
2329 
2330         mpi3mr_unmask_events(sc, MPI3_EVENT_PREPARE_FOR_RESET);
2331         mpi3mr_unmask_events(sc, MPI3_EVENT_CABLE_MGMT);
2332         mpi3mr_unmask_events(sc, MPI3_EVENT_ENERGY_PACK_CHANGE);
2333 }
2334 
2335 /**
2336  * mpi3mr_issue_event_notification - Send event notification
2337  * @sc: Adapter instance reference
2338  *
2339  * Issue event notification MPI request through admin queue and
2340  * wait for the completion of it or time out.
2341  *
2342  * Return: 0 on success, non-zero on failures.
2343  */
mpi3mr_issue_event_notification(struct mpi3mr_softc * sc)2344 int mpi3mr_issue_event_notification(struct mpi3mr_softc *sc)
2345 {
2346 	Mpi3EventNotificationRequest_t evtnotify_req;
2347 	int retval = 0;
2348 	U8 i;
2349 
2350 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2351 	mtx_lock(&sc->init_cmds.completion.lock);
2352 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2353 		retval = -1;
2354 		printf(IOCNAME "Issue EvtNotify: Init command is in use\n",
2355 		    sc->name);
2356 		mtx_unlock(&sc->init_cmds.completion.lock);
2357 		goto out;
2358 	}
2359 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2360 	sc->init_cmds.is_waiting = 1;
2361 	sc->init_cmds.callback = NULL;
2362 	evtnotify_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
2363 	evtnotify_req.Function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2364 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2365 		evtnotify_req.EventMasks[i] =
2366 		    (sc->event_masks[i]);
2367 	init_completion(&sc->init_cmds.completion);
2368 	retval = mpi3mr_submit_admin_cmd(sc, &evtnotify_req,
2369 	    sizeof(evtnotify_req));
2370 	if (retval) {
2371 		printf(IOCNAME "Issue EvtNotify: Admin Post failed\n",
2372 		    sc->name);
2373 		goto out_unlock;
2374 	}
2375 
2376 	poll_for_command_completion(sc,
2377 				    &sc->init_cmds,
2378 				    (MPI3MR_INTADMCMD_TIMEOUT));
2379 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2380 		printf(IOCNAME "Issue EvtNotify: command timed out\n",
2381 		    sc->name);
2382 		mpi3mr_check_rh_fault_ioc(sc,
2383 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2384 		retval = -1;
2385 		goto out_unlock;
2386 	}
2387 
2388 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2389 	     != MPI3_IOCSTATUS_SUCCESS ) {
2390 		printf(IOCNAME "Issue EvtNotify: Failed IOCStatus(0x%04x) "
2391 		    " Loginfo(0x%08x) \n" , sc->name,
2392 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2393 		    sc->init_cmds.ioc_loginfo);
2394 		retval = -1;
2395 		goto out_unlock;
2396 	}
2397 
2398 out_unlock:
2399 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2400 	mtx_unlock(&sc->init_cmds.completion.lock);
2401 
2402 out:
2403 	return retval;
2404 }
2405 
2406 int
mpi3mr_register_events(struct mpi3mr_softc * sc)2407 mpi3mr_register_events(struct mpi3mr_softc *sc)
2408 {
2409 	int error;
2410 
2411 	mpi3mr_set_events_mask(sc);
2412 
2413 	error = mpi3mr_issue_event_notification(sc);
2414 
2415 	if (error) {
2416 		printf(IOCNAME "Failed to issue event notification %d\n",
2417 		    sc->name, error);
2418 	}
2419 
2420 	return error;
2421 }
2422 
2423 /**
2424  * mpi3mr_process_event_ack - Process event acknowledgment
2425  * @sc: Adapter instance reference
2426  * @event: MPI3 event ID
2427  * @event_ctx: Event context
2428  *
2429  * Send event acknowledgement through admin queue and wait for
2430  * it to complete.
2431  *
2432  * Return: 0 on success, non-zero on failures.
2433  */
mpi3mr_process_event_ack(struct mpi3mr_softc * sc,U8 event,U32 event_ctx)2434 int mpi3mr_process_event_ack(struct mpi3mr_softc *sc, U8 event,
2435 	U32 event_ctx)
2436 {
2437 	Mpi3EventAckRequest_t evtack_req;
2438 	int retval = 0;
2439 
2440 	memset(&evtack_req, 0, sizeof(evtack_req));
2441 	mtx_lock(&sc->init_cmds.completion.lock);
2442 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2443 		retval = -1;
2444 		printf(IOCNAME "Issue EvtAck: Init command is in use\n",
2445 		    sc->name);
2446 		mtx_unlock(&sc->init_cmds.completion.lock);
2447 		goto out;
2448 	}
2449 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2450 	sc->init_cmds.is_waiting = 1;
2451 	sc->init_cmds.callback = NULL;
2452 	evtack_req.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2453 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
2454 	evtack_req.Event = event;
2455 	evtack_req.EventContext = htole32(event_ctx);
2456 
2457 	init_completion(&sc->init_cmds.completion);
2458 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
2459 	    sizeof(evtack_req));
2460 	if (retval) {
2461 		printf(IOCNAME "Issue EvtAck: Admin Post failed\n",
2462 		    sc->name);
2463 		goto out_unlock;
2464 	}
2465 
2466 	wait_for_completion_timeout(&sc->init_cmds.completion,
2467 	    (MPI3MR_INTADMCMD_TIMEOUT));
2468 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2469 		printf(IOCNAME "Issue EvtAck: command timed out\n",
2470 		    sc->name);
2471 		retval = -1;
2472 		goto out_unlock;
2473 	}
2474 
2475 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2476 	     != MPI3_IOCSTATUS_SUCCESS ) {
2477 		printf(IOCNAME "Issue EvtAck: Failed IOCStatus(0x%04x) "
2478 		    " Loginfo(0x%08x) \n" , sc->name,
2479 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2480 		    sc->init_cmds.ioc_loginfo);
2481 		retval = -1;
2482 		goto out_unlock;
2483 	}
2484 
2485 out_unlock:
2486 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2487 	mtx_unlock(&sc->init_cmds.completion.lock);
2488 
2489 out:
2490 	return retval;
2491 }
2492 
2493 
mpi3mr_alloc_chain_bufs(struct mpi3mr_softc * sc)2494 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_softc *sc)
2495 {
2496 	int retval = 0;
2497 	U32 sz, i;
2498 	U16 num_chains;
2499 
2500 	num_chains = sc->max_host_ios;
2501 
2502 	sc->chain_buf_count = num_chains;
2503 	sz = sizeof(struct mpi3mr_chain) * num_chains;
2504 
2505 	sc->chain_sgl_list = malloc(sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2506 
2507 	if (!sc->chain_sgl_list) {
2508 		printf(IOCNAME "Cannot allocate memory for chain SGL list\n",
2509 		    sc->name);
2510 		retval = -1;
2511 		goto out_failed;
2512 	}
2513 
2514 	sz = MPI3MR_CHAINSGE_SIZE;
2515 
2516         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2517 				4096, 0,		/* algnmnt, boundary */
2518 				sc->dma_loaddr,		/* lowaddr */
2519 				BUS_SPACE_MAXADDR,	/* highaddr */
2520 				NULL, NULL,		/* filter, filterarg */
2521                                 sz,			/* maxsize */
2522                                 1,			/* nsegments */
2523                                 sz,			/* maxsegsize */
2524                                 0,			/* flags */
2525                                 NULL, NULL,		/* lockfunc, lockarg */
2526                                 &sc->chain_sgl_list_tag)) {
2527 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Chain buffer DMA tag\n");
2528 		return (ENOMEM);
2529         }
2530 
2531 	for (i = 0; i < num_chains; i++) {
2532 		if (bus_dmamem_alloc(sc->chain_sgl_list_tag, (void **)&sc->chain_sgl_list[i].buf,
2533 		    BUS_DMA_NOWAIT, &sc->chain_sgl_list[i].buf_dmamap)) {
2534 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
2535 				__func__, __LINE__);
2536 			return (ENOMEM);
2537 		}
2538 
2539 		bzero(sc->chain_sgl_list[i].buf, sz);
2540 		bus_dmamap_load(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap, sc->chain_sgl_list[i].buf, sz,
2541 		    mpi3mr_memaddr_cb, &sc->chain_sgl_list[i].buf_phys, BUS_DMA_NOWAIT);
2542 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d phys addr= %#016jx size= %d\n",
2543 		    __func__, __LINE__, (uintmax_t)sc->chain_sgl_list[i].buf_phys, sz);
2544 	}
2545 
2546 	sc->chain_bitmap_sz = MPI3MR_DIV_ROUND_UP(num_chains, 8);
2547 
2548 	sc->chain_bitmap = malloc(sc->chain_bitmap_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2549 	if (!sc->chain_bitmap) {
2550 		mpi3mr_dprint(sc, MPI3MR_INFO, "Cannot alloc memory for chain bitmap\n");
2551 		retval = -1;
2552 		goto out_failed;
2553 	}
2554 	return retval;
2555 
2556 out_failed:
2557 	for (i = 0; i < num_chains; i++) {
2558 		if (sc->chain_sgl_list[i].buf_phys != 0)
2559 			bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
2560 		if (sc->chain_sgl_list[i].buf != NULL)
2561 			bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf, sc->chain_sgl_list[i].buf_dmamap);
2562 	}
2563 	if (sc->chain_sgl_list_tag != NULL)
2564 		bus_dma_tag_destroy(sc->chain_sgl_list_tag);
2565 	return retval;
2566 }
2567 
mpi3mr_pel_alloc(struct mpi3mr_softc * sc)2568 static int mpi3mr_pel_alloc(struct mpi3mr_softc *sc)
2569 {
2570 	int retval = 0;
2571 
2572 	if (!sc->pel_cmds.reply) {
2573 		sc->pel_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2574 		if (!sc->pel_cmds.reply) {
2575 			printf(IOCNAME "Cannot allocate memory for pel_cmds.reply\n",
2576 			    sc->name);
2577 			goto out_failed;
2578 		}
2579 	}
2580 
2581 	if (!sc->pel_abort_cmd.reply) {
2582 		sc->pel_abort_cmd.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2583 		if (!sc->pel_abort_cmd.reply) {
2584 			printf(IOCNAME "Cannot allocate memory for pel_abort_cmd.reply\n",
2585 			    sc->name);
2586 			goto out_failed;
2587 		}
2588 	}
2589 
2590 	if (!sc->pel_seq_number) {
2591 		sc->pel_seq_number_sz = sizeof(Mpi3PELSeq_t);
2592 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,   /* parent */
2593 				 4, 0,                           /* alignment, boundary */
2594 				 sc->dma_loaddr,	         /* lowaddr */
2595 				 BUS_SPACE_MAXADDR,		 /* highaddr */
2596 				 NULL, NULL,                     /* filter, filterarg */
2597 				 sc->pel_seq_number_sz,		 /* maxsize */
2598 				 1,                              /* nsegments */
2599 				 sc->pel_seq_number_sz,          /* maxsegsize */
2600 				 0,                              /* flags */
2601 				 NULL, NULL,                     /* lockfunc, lockarg */
2602 				 &sc->pel_seq_num_dmatag)) {
2603 			 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create PEL seq number dma memory tag\n");
2604 			 retval = -ENOMEM;
2605 			 goto out_failed;
2606 		}
2607 
2608 		if (bus_dmamem_alloc(sc->pel_seq_num_dmatag, (void **)&sc->pel_seq_number,
2609 		    BUS_DMA_NOWAIT, &sc->pel_seq_num_dmamap)) {
2610 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate PEL seq number kernel buffer dma memory\n");
2611 			retval = -ENOMEM;
2612 			goto out_failed;
2613 		}
2614 
2615 		bzero(sc->pel_seq_number, sc->pel_seq_number_sz);
2616 
2617 		bus_dmamap_load(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap, sc->pel_seq_number,
2618 		    sc->pel_seq_number_sz, mpi3mr_memaddr_cb, &sc->pel_seq_number_dma, BUS_DMA_NOWAIT);
2619 
2620 		if (!sc->pel_seq_number) {
2621 			printf(IOCNAME "%s:%d Cannot load PEL seq number dma memory for size: %d\n", sc->name,
2622 				__func__, __LINE__, sc->pel_seq_number_sz);
2623 			retval = -ENOMEM;
2624 			goto out_failed;
2625 		}
2626 	}
2627 
2628 out_failed:
2629 	return retval;
2630 }
2631 
2632 /**
2633  * mpi3mr_validate_fw_update - validate IOCFacts post adapter reset
2634  * @sc: Adapter instance reference
2635  *
2636  * Return zero if the new IOCFacts is compatible with previous values
2637  * else return appropriate error
2638  */
2639 static int
mpi3mr_validate_fw_update(struct mpi3mr_softc * sc)2640 mpi3mr_validate_fw_update(struct mpi3mr_softc *sc)
2641 {
2642 	U16 dev_handle_bitmap_sz;
2643 	U8 *removepend_bitmap;
2644 
2645 	if (sc->facts.reply_sz > sc->reply_sz) {
2646 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2647 		    "Cannot increase reply size from %d to %d\n",
2648 		    sc->reply_sz, sc->reply_sz);
2649 		return -EPERM;
2650 	}
2651 
2652 	if (sc->num_io_throttle_group != sc->facts.max_io_throttle_group) {
2653 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2654 		    "max io throttle group doesn't match old(%d), new(%d)\n",
2655 		    sc->num_io_throttle_group,
2656 		    sc->facts.max_io_throttle_group);
2657 		return -EPERM;
2658 	}
2659 
2660 	if (sc->facts.max_op_reply_q < sc->num_queues) {
2661 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2662 		    "Cannot reduce number of operational reply queues from %d to %d\n",
2663 		    sc->num_queues,
2664 		    sc->facts.max_op_reply_q);
2665 		return -EPERM;
2666 	}
2667 
2668 	if (sc->facts.max_op_req_q < sc->num_queues) {
2669 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2670 		    "Cannot reduce number of operational request queues from %d to %d\n",
2671 		    sc->num_queues, sc->facts.max_op_req_q);
2672 		return -EPERM;
2673 	}
2674 
2675 	dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
2676 
2677 	if (dev_handle_bitmap_sz > sc->dev_handle_bitmap_sz) {
2678 		removepend_bitmap = realloc(sc->removepend_bitmap,
2679 		    dev_handle_bitmap_sz, M_MPI3MR, M_NOWAIT);
2680 
2681 		if (!removepend_bitmap) {
2682 			mpi3mr_dprint(sc, MPI3MR_ERROR,
2683 			    "failed to increase removepend_bitmap sz from: %d to %d\n",
2684 			    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2685 			return -ENOMEM;
2686 		}
2687 
2688 		memset(removepend_bitmap + sc->dev_handle_bitmap_sz, 0,
2689 		    dev_handle_bitmap_sz - sc->dev_handle_bitmap_sz);
2690 		sc->removepend_bitmap = removepend_bitmap;
2691 		mpi3mr_dprint(sc, MPI3MR_INFO,
2692 		    "increased dev_handle_bitmap_sz from %d to %d\n",
2693 		    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2694 		sc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
2695 	}
2696 
2697 	return 0;
2698 }
2699 
2700 /*
2701  * mpi3mr_initialize_ioc - Controller initialization
2702  * @dev: pointer to device struct
2703  *
2704  * This function allocates the controller wide resources and brings
2705  * the controller to operational state
2706  *
2707  * Return: 0 on success and proper error codes on failure
2708  */
mpi3mr_initialize_ioc(struct mpi3mr_softc * sc,U8 init_type)2709 int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
2710 {
2711 	int retval = 0;
2712 	enum mpi3mr_iocstate ioc_state;
2713 	U64 ioc_info;
2714 	U32 ioc_status, ioc_control, i, timeout;
2715 	Mpi3IOCFactsData_t facts_data;
2716 	char str[32];
2717 	U32 size;
2718 
2719 	sc->cpu_count = mp_ncpus;
2720 
2721 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
2722 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
2723 	ioc_info = mpi3mr_regread64(sc, MPI3_SYSIF_IOC_INFO_LOW_OFFSET);
2724 
2725 	mpi3mr_dprint(sc, MPI3MR_INFO, "SOD ioc_status: 0x%x ioc_control: 0x%x "
2726 	    "ioc_info: 0x%lx\n", ioc_status, ioc_control, ioc_info);
2727 
2728         /*The timeout value is in 2sec unit, changing it to seconds*/
2729 	sc->ready_timeout =
2730                 ((ioc_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
2731                     MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
2732 
2733 	ioc_state = mpi3mr_get_iocstate(sc);
2734 
2735 	mpi3mr_dprint(sc, MPI3MR_INFO, "IOC state: %s   IOC ready timeout: %d\n",
2736 	    mpi3mr_iocstate_name(ioc_state), sc->ready_timeout);
2737 
2738 	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
2739 	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
2740 		timeout = sc->ready_timeout * 10;
2741 		do {
2742 			DELAY(1000 * 100);
2743 		} while (--timeout);
2744 
2745 		ioc_state = mpi3mr_get_iocstate(sc);
2746 		mpi3mr_dprint(sc, MPI3MR_INFO,
2747 			"IOC in %s state after waiting for reset time\n",
2748 			mpi3mr_iocstate_name(ioc_state));
2749 	}
2750 
2751 	if (ioc_state == MRIOC_STATE_READY) {
2752                 retval = mpi3mr_mur_ioc(sc, MPI3MR_RESET_FROM_BRINGUP);
2753                 if (retval) {
2754                         mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to MU reset IOC, error 0x%x\n",
2755                                 retval);
2756                 }
2757                 ioc_state = mpi3mr_get_iocstate(sc);
2758         }
2759 
2760         if (ioc_state != MRIOC_STATE_RESET) {
2761                 mpi3mr_print_fault_info(sc);
2762 		 mpi3mr_dprint(sc, MPI3MR_ERROR, "issuing soft reset to bring to reset state\n");
2763                  retval = mpi3mr_issue_reset(sc,
2764                      MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
2765                      MPI3MR_RESET_FROM_BRINGUP);
2766                 if (retval) {
2767                         mpi3mr_dprint(sc, MPI3MR_ERROR,
2768                             "%s :Failed to soft reset IOC, error 0x%d\n",
2769                             __func__, retval);
2770                         goto out_failed;
2771                 }
2772         }
2773 
2774 	ioc_state = mpi3mr_get_iocstate(sc);
2775 
2776         if (ioc_state != MRIOC_STATE_RESET) {
2777 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot bring IOC to reset state\n");
2778 		goto out_failed;
2779         }
2780 
2781 	retval = mpi3mr_setup_admin_qpair(sc);
2782 	if (retval) {
2783 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup Admin queues, error 0x%x\n",
2784 		    retval);
2785 		goto out_failed;
2786 	}
2787 
2788 	retval = mpi3mr_bring_ioc_ready(sc);
2789 	if (retval) {
2790 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to bring IOC ready, error 0x%x\n",
2791 		    retval);
2792 		goto out_failed;
2793 	}
2794 
2795 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2796 		retval = mpi3mr_alloc_interrupts(sc, 1);
2797 		if (retval) {
2798 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error 0x%x\n",
2799 			    retval);
2800 			goto out_failed;
2801 		}
2802 
2803 		retval = mpi3mr_setup_irqs(sc);
2804 		if (retval) {
2805 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error 0x%x\n",
2806 			    retval);
2807 			goto out_failed;
2808 		}
2809 	}
2810 
2811 	mpi3mr_enable_interrupts(sc);
2812 
2813 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2814 		mtx_init(&sc->mpi3mr_mtx, "SIM lock", NULL, MTX_DEF);
2815 		mtx_init(&sc->io_lock, "IO lock", NULL, MTX_DEF);
2816 		mtx_init(&sc->admin_req_lock, "Admin Request Queue lock", NULL, MTX_SPIN);
2817 		mtx_init(&sc->reply_free_q_lock, "Reply free Queue lock", NULL, MTX_SPIN);
2818 		mtx_init(&sc->sense_buf_q_lock, "Sense buffer Queue lock", NULL, MTX_SPIN);
2819 		mtx_init(&sc->chain_buf_lock, "Chain buffer lock", NULL, MTX_SPIN);
2820 		mtx_init(&sc->cmd_pool_lock, "Command pool lock", NULL, MTX_DEF);
2821 		mtx_init(&sc->fwevt_lock, "Firmware Event lock", NULL, MTX_DEF);
2822 		mtx_init(&sc->target_lock, "Target lock", NULL, MTX_SPIN);
2823 		mtx_init(&sc->reset_mutex, "Reset lock", NULL, MTX_DEF);
2824 
2825 		mtx_init(&sc->init_cmds.completion.lock, "Init commands lock", NULL, MTX_DEF);
2826 		sc->init_cmds.reply = NULL;
2827 		sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2828 		sc->init_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2829 		sc->init_cmds.host_tag = MPI3MR_HOSTTAG_INITCMDS;
2830 
2831 		mtx_init(&sc->ioctl_cmds.completion.lock, "IOCTL commands lock", NULL, MTX_DEF);
2832 		sc->ioctl_cmds.reply = NULL;
2833 		sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
2834 		sc->ioctl_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2835 		sc->ioctl_cmds.host_tag = MPI3MR_HOSTTAG_IOCTLCMDS;
2836 
2837 		mtx_init(&sc->pel_abort_cmd.completion.lock, "PEL Abort command lock", NULL, MTX_DEF);
2838 		sc->pel_abort_cmd.reply = NULL;
2839 		sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
2840 		sc->pel_abort_cmd.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2841 		sc->pel_abort_cmd.host_tag = MPI3MR_HOSTTAG_PELABORT;
2842 
2843 		mtx_init(&sc->host_tm_cmds.completion.lock, "TM commands lock", NULL, MTX_DEF);
2844 		sc->host_tm_cmds.reply = NULL;
2845 		sc->host_tm_cmds.state = MPI3MR_CMD_NOTUSED;
2846 		sc->host_tm_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2847 		sc->host_tm_cmds.host_tag = MPI3MR_HOSTTAG_TMS;
2848 
2849 		TAILQ_INIT(&sc->cmd_list_head);
2850 		TAILQ_INIT(&sc->event_list);
2851 		TAILQ_INIT(&sc->delayed_rmhs_list);
2852 		TAILQ_INIT(&sc->delayed_evtack_cmds_list);
2853 
2854 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2855 			snprintf(str, 32, "Dev REMHS commands lock[%d]", i);
2856 			mtx_init(&sc->dev_rmhs_cmds[i].completion.lock, str, NULL, MTX_DEF);
2857 			sc->dev_rmhs_cmds[i].reply = NULL;
2858 			sc->dev_rmhs_cmds[i].state = MPI3MR_CMD_NOTUSED;
2859 			sc->dev_rmhs_cmds[i].dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2860 			sc->dev_rmhs_cmds[i].host_tag = MPI3MR_HOSTTAG_DEVRMCMD_MIN
2861 							    + i;
2862 		}
2863 	}
2864 
2865 	retval = mpi3mr_issue_iocfacts(sc, &facts_data);
2866 	if (retval) {
2867 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Facts, retval: 0x%x\n",
2868 		    retval);
2869 		goto out_failed;
2870 	}
2871 
2872 	retval = mpi3mr_process_factsdata(sc, &facts_data);
2873 	if (retval) {
2874 		mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC Facts data processing failedi, retval: 0x%x\n",
2875 		    retval);
2876 		goto out_failed;
2877 	}
2878 
2879 	sc->num_io_throttle_group = sc->facts.max_io_throttle_group;
2880 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
2881 
2882 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2883 		retval = mpi3mr_validate_fw_update(sc);
2884 		if (retval)
2885 			goto out_failed;
2886 	} else {
2887 		sc->reply_sz = sc->facts.reply_sz;
2888 	}
2889 
2890 	mpi3mr_display_ioc_info(sc);
2891 
2892 	retval = mpi3mr_reply_alloc(sc);
2893 	if (retval) {
2894 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated reply and sense buffers, retval: 0x%x\n",
2895 		    retval);
2896 		goto out_failed;
2897 	}
2898 
2899 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2900 		retval = mpi3mr_alloc_chain_bufs(sc);
2901 		if (retval) {
2902 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated chain buffers, retval: 0x%x\n",
2903 			    retval);
2904 			goto out_failed;
2905 		}
2906 	}
2907 
2908 	retval = mpi3mr_issue_iocinit(sc);
2909 	if (retval) {
2910 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Init, retval: 0x%x\n",
2911 		    retval);
2912 		goto out_failed;
2913 	}
2914 
2915 	mpi3mr_print_fw_pkg_ver(sc);
2916 
2917 	sc->reply_free_q_host_index = sc->num_reply_bufs;
2918 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
2919 		sc->reply_free_q_host_index);
2920 
2921 	sc->sense_buf_q_host_index = sc->num_sense_bufs;
2922 
2923 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
2924 		sc->sense_buf_q_host_index);
2925 
2926 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2927 		retval = mpi3mr_alloc_interrupts(sc, 0);
2928 		if (retval) {
2929 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, retval: 0x%x\n",
2930 			    retval);
2931 			goto out_failed;
2932 		}
2933 
2934 		retval = mpi3mr_setup_irqs(sc);
2935 		if (retval) {
2936 			printf(IOCNAME "Failed to setup ISR, error: 0x%x\n",
2937 			    sc->name, retval);
2938 			goto out_failed;
2939 		}
2940 
2941 		mpi3mr_enable_interrupts(sc);
2942 
2943 	} else
2944 		mpi3mr_enable_interrupts(sc);
2945 
2946 	retval = mpi3mr_create_op_queues(sc);
2947 
2948 	if (retval) {
2949 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create operational queues, error: %d\n",
2950 		    retval);
2951 		goto out_failed;
2952 	}
2953 
2954 	if (!sc->throttle_groups && sc->num_io_throttle_group) {
2955 		mpi3mr_dprint(sc, MPI3MR_ERROR, "allocating memory for throttle groups\n");
2956 		size = sizeof(struct mpi3mr_throttle_group_info);
2957 		sc->throttle_groups = (struct mpi3mr_throttle_group_info *)
2958 					  malloc(sc->num_io_throttle_group *
2959 					      size, M_MPI3MR, M_NOWAIT | M_ZERO);
2960 		if (!sc->throttle_groups)
2961 			goto out_failed;
2962 	}
2963 
2964 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2965 		mpi3mr_dprint(sc, MPI3MR_INFO, "Re-register events\n");
2966 		retval = mpi3mr_register_events(sc);
2967 		if (retval) {
2968 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to re-register events, retval: 0x%x\n",
2969 			    retval);
2970 			goto out_failed;
2971 		}
2972 
2973 		mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Port Enable\n");
2974 		retval = mpi3mr_issue_port_enable(sc, 0);
2975 		if (retval) {
2976 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to issue port enable, retval: 0x%x\n",
2977 			    retval);
2978 			goto out_failed;
2979 		}
2980 	}
2981 	retval = mpi3mr_pel_alloc(sc);
2982 	if (retval) {
2983 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate memory for PEL, retval: 0x%x\n",
2984 		    retval);
2985 		goto out_failed;
2986 	}
2987 
2988 	return retval;
2989 
2990 out_failed:
2991 	retval = -1;
2992 	return retval;
2993 }
2994 
mpi3mr_port_enable_complete(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drvrcmd)2995 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
2996     struct mpi3mr_drvr_cmd *drvrcmd)
2997 {
2998 	drvrcmd->state = MPI3MR_CMD_NOTUSED;
2999 	drvrcmd->callback = NULL;
3000 	printf(IOCNAME "Completing Port Enable Request\n", sc->name);
3001 	sc->mpi3mr_flags |= MPI3MR_FLAGS_PORT_ENABLE_DONE;
3002 	mpi3mr_startup_decrement(sc->cam_sc);
3003 }
3004 
mpi3mr_issue_port_enable(struct mpi3mr_softc * sc,U8 async)3005 int mpi3mr_issue_port_enable(struct mpi3mr_softc *sc, U8 async)
3006 {
3007 	Mpi3PortEnableRequest_t pe_req;
3008 	int retval = 0;
3009 
3010 	memset(&pe_req, 0, sizeof(pe_req));
3011 	mtx_lock(&sc->init_cmds.completion.lock);
3012 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
3013 		retval = -1;
3014 		printf(IOCNAME "Issue PortEnable: Init command is in use\n", sc->name);
3015 		mtx_unlock(&sc->init_cmds.completion.lock);
3016 		goto out;
3017 	}
3018 
3019 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
3020 
3021 	if (async) {
3022 		sc->init_cmds.is_waiting = 0;
3023 		sc->init_cmds.callback = mpi3mr_port_enable_complete;
3024 	} else {
3025 		sc->init_cmds.is_waiting = 1;
3026 		sc->init_cmds.callback = NULL;
3027 		init_completion(&sc->init_cmds.completion);
3028 	}
3029 	pe_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
3030 	pe_req.Function = MPI3_FUNCTION_PORT_ENABLE;
3031 
3032 	printf(IOCNAME "Sending Port Enable Request\n", sc->name);
3033 	retval = mpi3mr_submit_admin_cmd(sc, &pe_req, sizeof(pe_req));
3034 	if (retval) {
3035 		printf(IOCNAME "Issue PortEnable: Admin Post failed\n",
3036 		    sc->name);
3037 		goto out_unlock;
3038 	}
3039 
3040 	if (!async) {
3041 		wait_for_completion_timeout(&sc->init_cmds.completion,
3042 		    MPI3MR_PORTENABLE_TIMEOUT);
3043 		if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3044 			printf(IOCNAME "Issue PortEnable: command timed out\n",
3045 			    sc->name);
3046 			retval = -1;
3047 			mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3048 			goto out_unlock;
3049 		}
3050 		mpi3mr_port_enable_complete(sc, &sc->init_cmds);
3051 	}
3052 out_unlock:
3053 	mtx_unlock(&sc->init_cmds.completion.lock);
3054 
3055 out:
3056 	return retval;
3057 }
3058 
3059 void
mpi3mr_watchdog_thread(void * arg)3060 mpi3mr_watchdog_thread(void *arg)
3061 {
3062 	struct mpi3mr_softc *sc;
3063 	enum mpi3mr_iocstate ioc_state;
3064 	U32 fault, host_diagnostic, ioc_status;
3065 
3066 	sc = (struct mpi3mr_softc *)arg;
3067 
3068 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s\n", __func__);
3069 
3070 	sc->watchdog_thread_active = 1;
3071 	mtx_lock(&sc->reset_mutex);
3072 	for (;;) {
3073 		if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ||
3074 		    (sc->unrecoverable == 1)) {
3075 			mpi3mr_dprint(sc, MPI3MR_INFO,
3076 			    "Exit due to %s from %s\n",
3077 			   sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ? "Shutdown" :
3078 			    "Hardware critical error", __func__);
3079 			break;
3080 		}
3081 		mtx_unlock(&sc->reset_mutex);
3082 
3083 		if ((sc->prepare_for_reset) &&
3084 		    ((sc->prepare_for_reset_timeout_counter++) >=
3085 		     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
3086 			mpi3mr_soft_reset_handler(sc,
3087 			    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
3088 			goto sleep;
3089 		}
3090 
3091 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
3092 
3093 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
3094 			mpi3mr_soft_reset_handler(sc, MPI3MR_RESET_FROM_FIRMWARE, 0);
3095 			goto sleep;
3096 		}
3097 
3098 		ioc_state = mpi3mr_get_iocstate(sc);
3099 		if (ioc_state == MRIOC_STATE_FAULT) {
3100 			fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
3101 			    MPI3_SYSIF_FAULT_CODE_MASK;
3102 
3103 			host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
3104 			if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
3105 				if (!sc->diagsave_timeout) {
3106 					mpi3mr_print_fault_info(sc);
3107 					mpi3mr_dprint(sc, MPI3MR_INFO,
3108 						"diag save in progress\n");
3109 				}
3110 				if ((sc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
3111 					goto sleep;
3112 			}
3113 			mpi3mr_print_fault_info(sc);
3114 			sc->diagsave_timeout = 0;
3115 
3116 			if ((fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) ||
3117 			    (fault == MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED)) {
3118 				mpi3mr_dprint(sc, MPI3MR_INFO,
3119 				    "Controller requires system power cycle or complete reset is needed,"
3120 				    "fault code: 0x%x. marking controller as unrecoverable\n", fault);
3121 				sc->unrecoverable = 1;
3122 				break;
3123 			}
3124 			if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
3125 			    || (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS)
3126 			    || (sc->reset_in_progress))
3127 				break;
3128 			if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
3129 				mpi3mr_soft_reset_handler(sc,
3130 				    MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
3131 			else
3132 				mpi3mr_soft_reset_handler(sc,
3133 				    MPI3MR_RESET_FROM_FAULT_WATCH, 0);
3134 
3135 		}
3136 
3137 		if (sc->reset.type == MPI3MR_TRIGGER_SOFT_RESET) {
3138 			mpi3mr_print_fault_info(sc);
3139 			mpi3mr_soft_reset_handler(sc, sc->reset.reason, 1);
3140 		}
3141 sleep:
3142 		mtx_lock(&sc->reset_mutex);
3143 		/*
3144 		 * Sleep for 1 second if we're not exiting, then loop to top
3145 		 * to poll exit status and hardware health.
3146 		 */
3147 		if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) == 0 &&
3148 		    !sc->unrecoverable) {
3149 			msleep(&sc->watchdog_chan, &sc->reset_mutex, PRIBIO,
3150 			    "mpi3mr_watchdog", 1 * hz);
3151 		}
3152 	}
3153 	mtx_unlock(&sc->reset_mutex);
3154 	sc->watchdog_thread_active = 0;
3155 	mpi3mr_kproc_exit(0);
3156 }
3157 
mpi3mr_display_event_data(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_rep)3158 static void mpi3mr_display_event_data(struct mpi3mr_softc *sc,
3159 	Mpi3EventNotificationReply_t *event_rep)
3160 {
3161 	char *desc = NULL;
3162 	U16 event;
3163 
3164 	event = event_rep->Event;
3165 
3166 	switch (event) {
3167 	case MPI3_EVENT_LOG_DATA:
3168 		desc = "Log Data";
3169 		break;
3170 	case MPI3_EVENT_CHANGE:
3171 		desc = "Event Change";
3172 		break;
3173 	case MPI3_EVENT_GPIO_INTERRUPT:
3174 		desc = "GPIO Interrupt";
3175 		break;
3176 	case MPI3_EVENT_CABLE_MGMT:
3177 		desc = "Cable Management";
3178 		break;
3179 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
3180 		desc = "Energy Pack Change";
3181 		break;
3182 	case MPI3_EVENT_DEVICE_ADDED:
3183 	{
3184 		Mpi3DevicePage0_t *event_data =
3185 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3186 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Added: Dev=0x%04x Form=0x%x Perst id: 0x%x\n",
3187 			event_data->DevHandle, event_data->DeviceForm, event_data->PersistentID);
3188 		return;
3189 	}
3190 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
3191 	{
3192 		Mpi3DevicePage0_t *event_data =
3193 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3194 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Info Changed: Dev=0x%04x Form=0x%x\n",
3195 			event_data->DevHandle, event_data->DeviceForm);
3196 		return;
3197 	}
3198 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
3199 	{
3200 		Mpi3EventDataDeviceStatusChange_t *event_data =
3201 		    (Mpi3EventDataDeviceStatusChange_t *)event_rep->EventData;
3202 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Status Change: Dev=0x%04x RC=0x%x\n",
3203 			event_data->DevHandle, event_data->ReasonCode);
3204 		return;
3205 	}
3206 	case MPI3_EVENT_SAS_DISCOVERY:
3207 	{
3208 		Mpi3EventDataSasDiscovery_t *event_data =
3209 		    (Mpi3EventDataSasDiscovery_t *)event_rep->EventData;
3210 		mpi3mr_dprint(sc, MPI3MR_EVENT, "SAS Discovery: (%s)",
3211 			(event_data->ReasonCode == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
3212 		    "start" : "stop");
3213 		if (event_data->DiscoveryStatus &&
3214 		    (sc->mpi3mr_debug & MPI3MR_EVENT)) {
3215 			printf("discovery_status(0x%08x)",
3216 			    event_data->DiscoveryStatus);
3217 
3218 		}
3219 
3220 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3221 			printf("\n");
3222 		return;
3223 	}
3224 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
3225 		desc = "SAS Broadcast Primitive";
3226 		break;
3227 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
3228 		desc = "SAS Notify Primitive";
3229 		break;
3230 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
3231 		desc = "SAS Init Device Status Change";
3232 		break;
3233 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
3234 		desc = "SAS Init Table Overflow";
3235 		break;
3236 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
3237 		desc = "SAS Topology Change List";
3238 		break;
3239 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
3240 		desc = "Enclosure Device Status Change";
3241 		break;
3242 	case MPI3_EVENT_HARD_RESET_RECEIVED:
3243 		desc = "Hard Reset Received";
3244 		break;
3245 	case MPI3_EVENT_SAS_PHY_COUNTER:
3246 		desc = "SAS PHY Counter";
3247 		break;
3248 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
3249 		desc = "SAS Device Discovery Error";
3250 		break;
3251 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
3252 		desc = "PCIE Topology Change List";
3253 		break;
3254 	case MPI3_EVENT_PCIE_ENUMERATION:
3255 	{
3256 		Mpi3EventDataPcieEnumeration_t *event_data =
3257 			(Mpi3EventDataPcieEnumeration_t *)event_rep->EventData;
3258 		mpi3mr_dprint(sc, MPI3MR_EVENT, "PCIE Enumeration: (%s)",
3259 			(event_data->ReasonCode ==
3260 			    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" :
3261 			    "stop");
3262 		if (event_data->EnumerationStatus)
3263 			mpi3mr_dprint(sc, MPI3MR_EVENT, "enumeration_status(0x%08x)",
3264 			   event_data->EnumerationStatus);
3265 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3266 			printf("\n");
3267 		return;
3268 	}
3269 	case MPI3_EVENT_PREPARE_FOR_RESET:
3270 		desc = "Prepare For Reset";
3271 		break;
3272 	}
3273 
3274 	if (!desc)
3275 		return;
3276 
3277 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s\n", desc);
3278 }
3279 
3280 struct mpi3mr_target *
mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc * cam_sc,uint16_t per_id)3281 mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc *cam_sc,
3282     uint16_t per_id)
3283 {
3284 	struct mpi3mr_target *target = NULL;
3285 
3286 	mtx_lock_spin(&cam_sc->sc->target_lock);
3287 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3288 		if (target->per_id == per_id)
3289 			break;
3290 	}
3291 
3292 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3293 	return target;
3294 }
3295 
3296 struct mpi3mr_target *
mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc * cam_sc,uint16_t handle)3297 mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc *cam_sc,
3298     uint16_t handle)
3299 {
3300 	struct mpi3mr_target *target = NULL;
3301 
3302 	mtx_lock_spin(&cam_sc->sc->target_lock);
3303 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3304 		if (target->dev_handle == handle)
3305 			break;
3306 
3307 	}
3308 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3309 	return target;
3310 }
3311 
mpi3mr_update_device(struct mpi3mr_softc * sc,struct mpi3mr_target * tgtdev,Mpi3DevicePage0_t * dev_pg0,bool is_added)3312 void mpi3mr_update_device(struct mpi3mr_softc *sc,
3313     struct mpi3mr_target *tgtdev, Mpi3DevicePage0_t *dev_pg0,
3314     bool is_added)
3315 {
3316 	U16 flags = 0;
3317 
3318 	tgtdev->per_id = (dev_pg0->PersistentID);
3319 	tgtdev->dev_handle = (dev_pg0->DevHandle);
3320 	tgtdev->dev_type = dev_pg0->DeviceForm;
3321 	tgtdev->encl_handle = (dev_pg0->EnclosureHandle);
3322 	tgtdev->parent_handle = (dev_pg0->ParentDevHandle);
3323 	tgtdev->slot = (dev_pg0->Slot);
3324 	tgtdev->qdepth = (dev_pg0->QueueDepth);
3325 	tgtdev->wwid = (dev_pg0->WWID);
3326 
3327 	flags = (dev_pg0->Flags);
3328 	tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
3329 	if (is_added == true)
3330 		tgtdev->io_throttle_enabled =
3331 		    (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
3332 
3333 	switch (dev_pg0->AccessStatus) {
3334 	case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
3335 	case MPI3_DEVICE0_ASTATUS_PREPARE:
3336 	case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
3337 	case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
3338 		break;
3339 	default:
3340 		tgtdev->is_hidden = 1;
3341 		break;
3342 	}
3343 
3344 	switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
3345 	case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
3346 		tgtdev->ws_len = 256;
3347 		break;
3348 	case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB:
3349 		tgtdev->ws_len = 2048;
3350 		break;
3351 	case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT:
3352 	default:
3353 		tgtdev->ws_len = 0;
3354 		break;
3355 	}
3356 
3357 	switch (tgtdev->dev_type) {
3358 	case MPI3_DEVICE_DEVFORM_SAS_SATA:
3359 	{
3360 		Mpi3Device0SasSataFormat_t *sasinf =
3361 		    &dev_pg0->DeviceSpecific.SasSataFormat;
3362 		U16 dev_info = (sasinf->DeviceInfo);
3363 		tgtdev->dev_spec.sassata_inf.dev_info = dev_info;
3364 		tgtdev->dev_spec.sassata_inf.sas_address =
3365 		    (sasinf->SASAddress);
3366 		if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
3367 		    MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
3368 			tgtdev->is_hidden = 1;
3369 		else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
3370 			    MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
3371 			tgtdev->is_hidden = 1;
3372 		break;
3373 	}
3374 	case MPI3_DEVICE_DEVFORM_PCIE:
3375 	{
3376 		Mpi3Device0PcieFormat_t *pcieinf =
3377 		    &dev_pg0->DeviceSpecific.PcieFormat;
3378 		U16 dev_info = (pcieinf->DeviceInfo);
3379 
3380 		tgtdev->q_depth = dev_pg0->QueueDepth;
3381 		tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
3382 		tgtdev->dev_spec.pcie_inf.capb =
3383 		    (pcieinf->Capabilities);
3384 		tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
3385 		if (dev_pg0->AccessStatus == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
3386 			tgtdev->dev_spec.pcie_inf.mdts =
3387 			    (pcieinf->MaximumDataTransferSize);
3388 			tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->PageSize;
3389 			tgtdev->dev_spec.pcie_inf.reset_to =
3390 				pcieinf->ControllerResetTO;
3391 			tgtdev->dev_spec.pcie_inf.abort_to =
3392 				pcieinf->NVMeAbortTO;
3393 		}
3394 		if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
3395 			tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
3396 
3397 		if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3398 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
3399 		    ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3400 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
3401 			tgtdev->is_hidden = 1;
3402 
3403 		break;
3404 	}
3405 	case MPI3_DEVICE_DEVFORM_VD:
3406 	{
3407 		Mpi3Device0VdFormat_t *vdinf =
3408 		    &dev_pg0->DeviceSpecific.VdFormat;
3409 		struct mpi3mr_throttle_group_info *tg = NULL;
3410 
3411 		tgtdev->dev_spec.vol_inf.state = vdinf->VdState;
3412 		if (vdinf->VdState == MPI3_DEVICE0_VD_STATE_OFFLINE)
3413 			tgtdev->is_hidden = 1;
3414 		tgtdev->dev_spec.vol_inf.tg_id = vdinf->IOThrottleGroup;
3415 		tgtdev->dev_spec.vol_inf.tg_high =
3416 			vdinf->IOThrottleGroupHigh * 2048;
3417 		tgtdev->dev_spec.vol_inf.tg_low =
3418 			vdinf->IOThrottleGroupLow * 2048;
3419 		if (vdinf->IOThrottleGroup < sc->num_io_throttle_group) {
3420 			tg = sc->throttle_groups + vdinf->IOThrottleGroup;
3421 			tg->id = vdinf->IOThrottleGroup;
3422 			tg->high = tgtdev->dev_spec.vol_inf.tg_high;
3423 			tg->low = tgtdev->dev_spec.vol_inf.tg_low;
3424 			if (is_added == true)
3425 				tg->fw_qd = tgtdev->q_depth;
3426 			tg->modified_qd = tgtdev->q_depth;
3427 		}
3428 		tgtdev->dev_spec.vol_inf.tg = tg;
3429 		tgtdev->throttle_group = tg;
3430 		break;
3431 	}
3432 	default:
3433 		goto out;
3434 	}
3435 
3436 out:
3437 	return;
3438 }
3439 
mpi3mr_create_device(struct mpi3mr_softc * sc,Mpi3DevicePage0_t * dev_pg0)3440 int mpi3mr_create_device(struct mpi3mr_softc *sc,
3441     Mpi3DevicePage0_t *dev_pg0)
3442 {
3443 	int retval = 0;
3444 	struct mpi3mr_target *target = NULL;
3445 	U16 per_id = 0;
3446 
3447 	per_id = dev_pg0->PersistentID;
3448 
3449 	mtx_lock_spin(&sc->target_lock);
3450 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
3451 		if (target->per_id == per_id) {
3452 			target->state = MPI3MR_DEV_CREATED;
3453 			break;
3454 		}
3455 	}
3456 	mtx_unlock_spin(&sc->target_lock);
3457 
3458 	if (target) {
3459 			mpi3mr_update_device(sc, target, dev_pg0, true);
3460 	} else {
3461 			target = malloc(sizeof(*target), M_MPI3MR,
3462 				 M_NOWAIT | M_ZERO);
3463 
3464 			if (target == NULL) {
3465 				retval = -1;
3466 				goto out;
3467 			}
3468 
3469 			target->exposed_to_os = 0;
3470 			mpi3mr_update_device(sc, target, dev_pg0, true);
3471 			mtx_lock_spin(&sc->target_lock);
3472 			TAILQ_INSERT_TAIL(&sc->cam_sc->tgt_list, target, tgt_next);
3473 			target->state = MPI3MR_DEV_CREATED;
3474 			mtx_unlock_spin(&sc->target_lock);
3475 	}
3476 out:
3477 	return retval;
3478 }
3479 
3480 /**
3481  * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
3482  * @sc: Adapter instance reference
3483  * @drv_cmd: Internal command tracker
3484  *
3485  * Issues a target reset TM to the firmware from the device
3486  * removal TM pend list or retry the removal handshake sequence
3487  * based on the IOU control request IOC status.
3488  *
3489  * Return: Nothing
3490  */
mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drv_cmd)3491 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
3492 	struct mpi3mr_drvr_cmd *drv_cmd)
3493 {
3494 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3495 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3496 	struct mpi3mr_target *tgtdev = NULL;
3497 
3498 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3499 	    "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
3500 	    __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
3501 	    drv_cmd->ioc_loginfo);
3502 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3503 		if (drv_cmd->retry_count < MPI3MR_DEVRMHS_RETRYCOUNT) {
3504 			drv_cmd->retry_count++;
3505 			mpi3mr_dprint(sc, MPI3MR_EVENT,
3506 			    "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
3507 			    __func__, drv_cmd->dev_handle,
3508 			    drv_cmd->retry_count);
3509 			mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle,
3510 			    drv_cmd, drv_cmd->iou_rc);
3511 			return;
3512 		}
3513 		mpi3mr_dprint(sc, MPI3MR_ERROR,
3514 		    "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
3515 		    __func__, drv_cmd->dev_handle);
3516 	} else {
3517 		mtx_lock_spin(&sc->target_lock);
3518 		TAILQ_FOREACH(tgtdev, &sc->cam_sc->tgt_list, tgt_next) {
3519 		       if (tgtdev->dev_handle == drv_cmd->dev_handle)
3520 			       tgtdev->state = MPI3MR_DEV_REMOVE_HS_COMPLETED;
3521 		}
3522 		mtx_unlock_spin(&sc->target_lock);
3523 
3524 		mpi3mr_dprint(sc, MPI3MR_INFO,
3525 		    "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
3526 		    __func__, drv_cmd->dev_handle);
3527 		mpi3mr_clear_bit(drv_cmd->dev_handle, sc->removepend_bitmap);
3528 	}
3529 
3530 	if (!TAILQ_EMPTY(&sc->delayed_rmhs_list)) {
3531 		delayed_dev_rmhs = TAILQ_FIRST(&sc->delayed_rmhs_list);
3532 		drv_cmd->dev_handle = delayed_dev_rmhs->handle;
3533 		drv_cmd->retry_count = 0;
3534 		drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
3535 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3536 		    "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
3537 		    __func__, drv_cmd->dev_handle);
3538 		mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle, drv_cmd,
3539 		    drv_cmd->iou_rc);
3540 		TAILQ_REMOVE(&sc->delayed_rmhs_list, delayed_dev_rmhs, list);
3541 		free(delayed_dev_rmhs, M_MPI3MR);
3542 		return;
3543 	}
3544 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3545 	drv_cmd->callback = NULL;
3546 	drv_cmd->retry_count = 0;
3547 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3548 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3549 }
3550 
3551 /**
3552  * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
3553  * @sc: Adapter instance reference
3554  * @drv_cmd: Internal command tracker
3555  *
3556  * Issues a target reset TM to the firmware from the device
3557  * removal TM pend list or issue IO Unit control request as
3558  * part of device removal or hidden acknowledgment handshake.
3559  *
3560  * Return: Nothing
3561  */
mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drv_cmd)3562 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
3563 	struct mpi3mr_drvr_cmd *drv_cmd)
3564 {
3565 	Mpi3IoUnitControlRequest_t iou_ctrl;
3566 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3567 	Mpi3SCSITaskMgmtReply_t *tm_reply = NULL;
3568 	int retval;
3569 
3570 	if (drv_cmd->state & MPI3MR_CMD_REPLYVALID)
3571 		tm_reply = (Mpi3SCSITaskMgmtReply_t *)drv_cmd->reply;
3572 
3573 	if (tm_reply)
3574 		printf(IOCNAME
3575 		    "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
3576 		    sc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
3577 		    drv_cmd->ioc_loginfo,
3578 		    le32toh(tm_reply->TerminationCount));
3579 
3580 	printf(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
3581 	    sc->name, drv_cmd->dev_handle, cmd_idx);
3582 
3583 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
3584 
3585 	drv_cmd->state = MPI3MR_CMD_PENDING;
3586 	drv_cmd->is_waiting = 0;
3587 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
3588 	iou_ctrl.Operation = drv_cmd->iou_rc;
3589 	iou_ctrl.Param16[0] = htole16(drv_cmd->dev_handle);
3590 	iou_ctrl.HostTag = htole16(drv_cmd->host_tag);
3591 	iou_ctrl.Function = MPI3_FUNCTION_IO_UNIT_CONTROL;
3592 
3593 	retval = mpi3mr_submit_admin_cmd(sc, &iou_ctrl, sizeof(iou_ctrl));
3594 	if (retval) {
3595 		printf(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
3596 		    sc->name);
3597 		goto out_failed;
3598 	}
3599 
3600 	return;
3601 out_failed:
3602 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3603 	drv_cmd->callback = NULL;
3604 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3605 	drv_cmd->retry_count = 0;
3606 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3607 }
3608 
3609 /**
3610  * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
3611  * @sc: Adapter instance reference
3612  * @handle: Device handle
3613  * @cmdparam: Internal command tracker
3614  * @iou_rc: IO Unit reason code
3615  *
3616  * Issues a target reset TM to the firmware or add it to a pend
3617  * list as part of device removal or hidden acknowledgment
3618  * handshake.
3619  *
3620  * Return: Nothing
3621  */
mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc * sc,U16 handle,struct mpi3mr_drvr_cmd * cmdparam,U8 iou_rc)3622 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
3623 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc)
3624 {
3625 	Mpi3SCSITaskMgmtRequest_t tm_req;
3626 	int retval = 0;
3627 	U16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
3628 	U8 retrycount = 5;
3629 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3630 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3631 
3632 	if (drv_cmd)
3633 		goto issue_cmd;
3634 	do {
3635 		cmd_idx = mpi3mr_find_first_zero_bit(sc->devrem_bitmap,
3636 		    MPI3MR_NUM_DEVRMCMD);
3637 		if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
3638 			if (!mpi3mr_test_and_set_bit(cmd_idx, sc->devrem_bitmap))
3639 				break;
3640 			cmd_idx = MPI3MR_NUM_DEVRMCMD;
3641 		}
3642 	} while (retrycount--);
3643 
3644 	if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
3645 		delayed_dev_rmhs = malloc(sizeof(*delayed_dev_rmhs),M_MPI3MR,
3646 		     M_ZERO|M_NOWAIT);
3647 
3648 		if (!delayed_dev_rmhs)
3649 			return;
3650 		delayed_dev_rmhs->handle = handle;
3651 		delayed_dev_rmhs->iou_rc = iou_rc;
3652 		TAILQ_INSERT_TAIL(&(sc->delayed_rmhs_list), delayed_dev_rmhs, list);
3653 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
3654 		    __func__, handle);
3655 
3656 
3657 		return;
3658 	}
3659 	drv_cmd = &sc->dev_rmhs_cmds[cmd_idx];
3660 
3661 issue_cmd:
3662 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3663 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3664 	    "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
3665 	    __func__, handle, cmd_idx);
3666 
3667 	memset(&tm_req, 0, sizeof(tm_req));
3668 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3669 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Issue TM: Command is in use\n", __func__);
3670 		goto out;
3671 	}
3672 	drv_cmd->state = MPI3MR_CMD_PENDING;
3673 	drv_cmd->is_waiting = 0;
3674 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
3675 	drv_cmd->dev_handle = handle;
3676 	drv_cmd->iou_rc = iou_rc;
3677 	tm_req.DevHandle = htole16(handle);
3678 	tm_req.TaskType = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3679 	tm_req.HostTag = htole16(drv_cmd->host_tag);
3680 	tm_req.TaskHostTag = htole16(MPI3MR_HOSTTAG_INVALID);
3681 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3682 
3683 	mpi3mr_set_bit(handle, sc->removepend_bitmap);
3684 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
3685 	if (retval) {
3686 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :Issue DevRmHsTM: Admin Post failed\n",
3687 		    __func__);
3688 		goto out_failed;
3689 	}
3690 out:
3691 	return;
3692 out_failed:
3693 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3694 	drv_cmd->callback = NULL;
3695 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3696 	drv_cmd->retry_count = 0;
3697 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3698 }
3699 
3700 /**
3701  * mpi3mr_complete_evt_ack - Event ack request completion
3702  * @sc: Adapter instance reference
3703  * @drv_cmd: Internal command tracker
3704  *
3705  * This is the completion handler for non blocking event
3706  * acknowledgment sent to the firmware and this will issue any
3707  * pending event acknowledgment request.
3708  *
3709  * Return: Nothing
3710  */
mpi3mr_complete_evt_ack(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drv_cmd)3711 static void mpi3mr_complete_evt_ack(struct mpi3mr_softc *sc,
3712 	struct mpi3mr_drvr_cmd *drv_cmd)
3713 {
3714 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3715 	struct delayed_evtack_node *delayed_evtack = NULL;
3716 
3717 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3718 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3719 		    "%s: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", __func__,
3720 		    (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3721 		    drv_cmd->ioc_loginfo);
3722 	}
3723 
3724 	if (!TAILQ_EMPTY(&sc->delayed_evtack_cmds_list)) {
3725 		delayed_evtack = TAILQ_FIRST(&sc->delayed_evtack_cmds_list);
3726 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3727 		    "%s: processing delayed event ack for event %d\n",
3728 		    __func__, delayed_evtack->event);
3729 		mpi3mr_send_evt_ack(sc, delayed_evtack->event, drv_cmd,
3730 		    delayed_evtack->event_ctx);
3731 		TAILQ_REMOVE(&sc->delayed_evtack_cmds_list, delayed_evtack, list);
3732 		free(delayed_evtack, M_MPI3MR);
3733 		return;
3734 	}
3735 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3736 	drv_cmd->callback = NULL;
3737 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3738 }
3739 
3740 /**
3741  * mpi3mr_send_evt_ack - Issue event acknwoledgment request
3742  * @sc: Adapter instance reference
3743  * @event: MPI3 event id
3744  * @cmdparam: Internal command tracker
3745  * @event_ctx: Event context
3746  *
3747  * Issues event acknowledgment request to the firmware if there
3748  * is a free command to send the event ack else it to a pend
3749  * list so that it will be processed on a completion of a prior
3750  * event acknowledgment .
3751  *
3752  * Return: Nothing
3753  */
mpi3mr_send_evt_ack(struct mpi3mr_softc * sc,U8 event,struct mpi3mr_drvr_cmd * cmdparam,U32 event_ctx)3754 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
3755 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx)
3756 {
3757 	Mpi3EventAckRequest_t evtack_req;
3758 	int retval = 0;
3759 	U8 retrycount = 5;
3760 	U16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
3761 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3762 	struct delayed_evtack_node *delayed_evtack = NULL;
3763 
3764 	if (drv_cmd)
3765 		goto issue_cmd;
3766 	do {
3767 		cmd_idx = mpi3mr_find_first_zero_bit(sc->evtack_cmds_bitmap,
3768 		    MPI3MR_NUM_EVTACKCMD);
3769 		if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
3770 			if (!mpi3mr_test_and_set_bit(cmd_idx,
3771 			    sc->evtack_cmds_bitmap))
3772 				break;
3773 			cmd_idx = MPI3MR_NUM_EVTACKCMD;
3774 		}
3775 	} while (retrycount--);
3776 
3777 	if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
3778 		delayed_evtack = malloc(sizeof(*delayed_evtack),M_MPI3MR,
3779 		     M_ZERO | M_NOWAIT);
3780 		if (!delayed_evtack)
3781 			return;
3782 		delayed_evtack->event = event;
3783 		delayed_evtack->event_ctx = event_ctx;
3784 		TAILQ_INSERT_TAIL(&(sc->delayed_evtack_cmds_list), delayed_evtack, list);
3785 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s : Event ack for event:%d is postponed\n",
3786 		    __func__, event);
3787 		return;
3788 	}
3789 	drv_cmd = &sc->evtack_cmds[cmd_idx];
3790 
3791 issue_cmd:
3792 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3793 
3794 	memset(&evtack_req, 0, sizeof(evtack_req));
3795 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3796 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s: Command is in use\n", __func__);
3797 		goto out;
3798 	}
3799 	drv_cmd->state = MPI3MR_CMD_PENDING;
3800 	drv_cmd->is_waiting = 0;
3801 	drv_cmd->callback = mpi3mr_complete_evt_ack;
3802 	evtack_req.HostTag = htole16(drv_cmd->host_tag);
3803 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
3804 	evtack_req.Event = event;
3805 	evtack_req.EventContext = htole32(event_ctx);
3806 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
3807 	    sizeof(evtack_req));
3808 
3809 	if (retval) {
3810 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Admin Post failed\n", __func__);
3811 		goto out_failed;
3812 	}
3813 out:
3814 	return;
3815 out_failed:
3816 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3817 	drv_cmd->callback = NULL;
3818 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3819 }
3820 
3821 /*
3822  * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
3823  * @sc: Adapter instance reference
3824  * @event_reply: Event data
3825  *
3826  * Checks for the reason code and based on that either block I/O
3827  * to device, or unblock I/O to the device, or start the device
3828  * removal handshake with reason as remove with the firmware for
3829  * PCIe devices.
3830  *
3831  * Return: Nothing
3832  */
mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)3833 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc *sc,
3834 	Mpi3EventNotificationReply_t *event_reply)
3835 {
3836 	Mpi3EventDataPcieTopologyChangeList_t *topo_evt =
3837 	    (Mpi3EventDataPcieTopologyChangeList_t *) event_reply->EventData;
3838 	int i;
3839 	U16 handle;
3840 	U8 reason_code;
3841 	struct mpi3mr_target *tgtdev = NULL;
3842 
3843 	for (i = 0; i < topo_evt->NumEntries; i++) {
3844 		handle = le16toh(topo_evt->PortEntry[i].AttachedDevHandle);
3845 		if (!handle)
3846 			continue;
3847 		reason_code = topo_evt->PortEntry[i].PortStatus;
3848 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3849 		switch (reason_code) {
3850 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
3851 			if (tgtdev) {
3852 				tgtdev->dev_removed = 1;
3853 				tgtdev->dev_removedelay = 0;
3854 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3855 			}
3856 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3857 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3858 			break;
3859 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
3860 			if (tgtdev) {
3861 				tgtdev->dev_removedelay = 1;
3862 				mpi3mr_atomic_inc(&tgtdev->block_io);
3863 			}
3864 			break;
3865 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
3866 			if (tgtdev &&
3867 			    tgtdev->dev_removedelay) {
3868 				tgtdev->dev_removedelay = 0;
3869 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3870 					mpi3mr_atomic_dec(&tgtdev->block_io);
3871 			}
3872 			break;
3873 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
3874 		default:
3875 			break;
3876 		}
3877 	}
3878 }
3879 
3880 /**
3881  * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
3882  * @sc: Adapter instance reference
3883  * @event_reply: Event data
3884  *
3885  * Checks for the reason code and based on that either block I/O
3886  * to device, or unblock I/O to the device, or start the device
3887  * removal handshake with reason as remove with the firmware for
3888  * SAS/SATA devices.
3889  *
3890  * Return: Nothing
3891  */
mpi3mr_sastopochg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)3892 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_softc *sc,
3893 	Mpi3EventNotificationReply_t *event_reply)
3894 {
3895 	Mpi3EventDataSasTopologyChangeList_t *topo_evt =
3896 	    (Mpi3EventDataSasTopologyChangeList_t *)event_reply->EventData;
3897 	int i;
3898 	U16 handle;
3899 	U8 reason_code;
3900 	struct mpi3mr_target *tgtdev = NULL;
3901 
3902 	for (i = 0; i < topo_evt->NumEntries; i++) {
3903 		handle = le16toh(topo_evt->PhyEntry[i].AttachedDevHandle);
3904 		if (!handle)
3905 			continue;
3906 		reason_code = topo_evt->PhyEntry[i].PhyStatus &
3907 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
3908 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3909 		switch (reason_code) {
3910 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
3911 			if (tgtdev) {
3912 				tgtdev->dev_removed = 1;
3913 				tgtdev->dev_removedelay = 0;
3914 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3915 			}
3916 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3917 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3918 			break;
3919 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
3920 			if (tgtdev) {
3921 				tgtdev->dev_removedelay = 1;
3922 				mpi3mr_atomic_inc(&tgtdev->block_io);
3923 			}
3924 			break;
3925 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
3926 			if (tgtdev &&
3927 			    tgtdev->dev_removedelay) {
3928 				tgtdev->dev_removedelay = 0;
3929 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3930 					mpi3mr_atomic_dec(&tgtdev->block_io);
3931 			}
3932 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
3933 		default:
3934 			break;
3935 		}
3936 	}
3937 
3938 }
3939 /**
3940  * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
3941  * @sc: Adapter instance reference
3942  * @event_reply: Event data
3943  *
3944  * Checks for the reason code and based on that either block I/O
3945  * to device, or unblock I/O to the device, or start the device
3946  * removal handshake with reason as remove/hide acknowledgment
3947  * with the firmware.
3948  *
3949  * Return: Nothing
3950  */
mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)3951 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc *sc,
3952 	Mpi3EventNotificationReply_t *event_reply)
3953 {
3954 	U16 dev_handle = 0;
3955 	U8 ublock = 0, block = 0, hide = 0, uhide = 0, delete = 0, remove = 0;
3956 	struct mpi3mr_target *tgtdev = NULL;
3957 	Mpi3EventDataDeviceStatusChange_t *evtdata =
3958 	    (Mpi3EventDataDeviceStatusChange_t *) event_reply->EventData;
3959 
3960 	dev_handle = le16toh(evtdata->DevHandle);
3961 
3962 	switch (evtdata->ReasonCode) {
3963 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
3964 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
3965 		block = 1;
3966 		break;
3967 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
3968 		delete = 1;
3969 		hide = 1;
3970 		break;
3971 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
3972 		uhide = 1;
3973 		break;
3974 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
3975 		delete = 1;
3976 		remove = 1;
3977 		break;
3978 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
3979 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
3980 		ublock = 1;
3981 		break;
3982 	default:
3983 		break;
3984 	}
3985 
3986 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
3987 
3988 	if (!tgtdev) {
3989 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :target with dev_handle:0x%x not found\n",
3990 		    __func__, dev_handle);
3991 		return;
3992 	}
3993 
3994 	if (block)
3995 		mpi3mr_atomic_inc(&tgtdev->block_io);
3996 
3997 	if (hide)
3998 		tgtdev->is_hidden = hide;
3999 
4000 	if (uhide) {
4001 		tgtdev->is_hidden = 0;
4002 		tgtdev->dev_removed = 0;
4003 	}
4004 
4005 	if (delete)
4006 		tgtdev->dev_removed = 1;
4007 
4008 	if (ublock) {
4009 		if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
4010 			mpi3mr_atomic_dec(&tgtdev->block_io);
4011 	}
4012 
4013 	if (remove) {
4014 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
4015 					MPI3_CTRL_OP_REMOVE_DEVICE);
4016 	}
4017 	if (hide)
4018 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
4019 					MPI3_CTRL_OP_HIDDEN_ACK);
4020 }
4021 
4022 /**
4023  * mpi3mr_preparereset_evt_th - Prepareforreset evt tophalf
4024  * @sc: Adapter instance reference
4025  * @event_reply: Event data
4026  *
4027  * Blocks and unblocks host level I/O based on the reason code
4028  *
4029  * Return: Nothing
4030  */
mpi3mr_preparereset_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4031 static void mpi3mr_preparereset_evt_th(struct mpi3mr_softc *sc,
4032 	Mpi3EventNotificationReply_t *event_reply)
4033 {
4034 	Mpi3EventDataPrepareForReset_t *evtdata =
4035 	    (Mpi3EventDataPrepareForReset_t *)event_reply->EventData;
4036 
4037 	if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_START) {
4038 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=START\n",
4039 		    __func__);
4040 		if (sc->prepare_for_reset)
4041 			return;
4042 		sc->prepare_for_reset = 1;
4043 		sc->prepare_for_reset_timeout_counter = 0;
4044 	} else if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
4045 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=ABORT\n",
4046 		    __func__);
4047 		sc->prepare_for_reset = 0;
4048 		sc->prepare_for_reset_timeout_counter = 0;
4049 	}
4050 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4051 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4052 		mpi3mr_send_evt_ack(sc, event_reply->Event, NULL,
4053 		    le32toh(event_reply->EventContext));
4054 }
4055 
4056 /**
4057  * mpi3mr_energypackchg_evt_th - Energypackchange evt tophalf
4058  * @sc: Adapter instance reference
4059  * @event_reply: Event data
4060  *
4061  * Identifies the new shutdown timeout value and update.
4062  *
4063  * Return: Nothing
4064  */
mpi3mr_energypackchg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4065 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_softc *sc,
4066 	Mpi3EventNotificationReply_t *event_reply)
4067 {
4068 	Mpi3EventDataEnergyPackChange_t *evtdata =
4069 	    (Mpi3EventDataEnergyPackChange_t *)event_reply->EventData;
4070 	U16 shutdown_timeout = le16toh(evtdata->ShutdownTimeout);
4071 
4072 	if (shutdown_timeout <= 0) {
4073 		mpi3mr_dprint(sc, MPI3MR_ERROR,
4074 		    "%s :Invalid Shutdown Timeout received = %d\n",
4075 		    __func__, shutdown_timeout);
4076 		return;
4077 	}
4078 
4079 	mpi3mr_dprint(sc, MPI3MR_EVENT,
4080 	    "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
4081 	    __func__, sc->facts.shutdown_timeout, shutdown_timeout);
4082 	sc->facts.shutdown_timeout = shutdown_timeout;
4083 }
4084 
4085 /**
4086  * mpi3mr_cablemgmt_evt_th - Cable mgmt evt tophalf
4087  * @sc: Adapter instance reference
4088  * @event_reply: Event data
4089  *
4090  * Displays Cable manegemt event details.
4091  *
4092  * Return: Nothing
4093  */
mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4094 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc *sc,
4095 	Mpi3EventNotificationReply_t *event_reply)
4096 {
4097 	Mpi3EventDataCableManagement_t *evtdata =
4098 	    (Mpi3EventDataCableManagement_t *)event_reply->EventData;
4099 
4100 	switch (evtdata->Status) {
4101 	case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
4102 	{
4103 		mpi3mr_dprint(sc, MPI3MR_INFO, "An active cable with ReceptacleID %d cannot be powered.\n"
4104 		    "Devices connected to this cable are not detected.\n"
4105 		    "This cable requires %d mW of power.\n",
4106 		    evtdata->ReceptacleID,
4107 		    le32toh(evtdata->ActiveCablePowerRequirement));
4108 		break;
4109 	}
4110 	case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
4111 	{
4112 		mpi3mr_dprint(sc, MPI3MR_INFO, "A cable with ReceptacleID %d is not running at optimal speed\n",
4113 		    evtdata->ReceptacleID);
4114 		break;
4115 	}
4116 	default:
4117 		break;
4118 	}
4119 }
4120 
4121 /**
4122  * mpi3mr_process_events - Event's toph-half handler
4123  * @sc: Adapter instance reference
4124  * @event_reply: Event data
4125  *
4126  * Top half of event processing.
4127  *
4128  * Return: Nothing
4129  */
mpi3mr_process_events(struct mpi3mr_softc * sc,uintptr_t data,Mpi3EventNotificationReply_t * event_reply)4130 static void mpi3mr_process_events(struct mpi3mr_softc *sc,
4131     uintptr_t data, Mpi3EventNotificationReply_t *event_reply)
4132 {
4133 	U16 evt_type;
4134 	bool ack_req = 0, process_evt_bh = 0;
4135 	struct mpi3mr_fw_event_work *fw_event;
4136 	U16 sz;
4137 
4138 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
4139 		goto out;
4140 
4141 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4142 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4143 		ack_req = 1;
4144 
4145 	evt_type = event_reply->Event;
4146 
4147 	switch (evt_type) {
4148 	case MPI3_EVENT_DEVICE_ADDED:
4149 	{
4150 		Mpi3DevicePage0_t *dev_pg0 =
4151 			(Mpi3DevicePage0_t *) event_reply->EventData;
4152 		if (mpi3mr_create_device(sc, dev_pg0))
4153 			mpi3mr_dprint(sc, MPI3MR_ERROR,
4154 			"%s :Failed to add device in the device add event\n",
4155 			__func__);
4156 		else
4157 			process_evt_bh = 1;
4158 		break;
4159 	}
4160 
4161 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
4162 	{
4163 		process_evt_bh = 1;
4164 		mpi3mr_devstatuschg_evt_th(sc, event_reply);
4165 		break;
4166 	}
4167 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
4168 	{
4169 		process_evt_bh = 1;
4170 		mpi3mr_sastopochg_evt_th(sc, event_reply);
4171 		break;
4172 	}
4173 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
4174 	{
4175 		process_evt_bh = 1;
4176 		mpi3mr_pcietopochg_evt_th(sc, event_reply);
4177 		break;
4178 	}
4179 	case MPI3_EVENT_PREPARE_FOR_RESET:
4180 	{
4181 		mpi3mr_preparereset_evt_th(sc, event_reply);
4182 		ack_req = 0;
4183 		break;
4184 	}
4185 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
4186 	case MPI3_EVENT_LOG_DATA:
4187 	{
4188 		process_evt_bh = 1;
4189 		break;
4190 	}
4191 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
4192 	{
4193 		mpi3mr_energypackchg_evt_th(sc, event_reply);
4194 		break;
4195 	}
4196 	case MPI3_EVENT_CABLE_MGMT:
4197 	{
4198 		mpi3mr_cablemgmt_evt_th(sc, event_reply);
4199 		break;
4200 	}
4201 
4202 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
4203 	case MPI3_EVENT_SAS_DISCOVERY:
4204 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
4205 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
4206 	case MPI3_EVENT_PCIE_ENUMERATION:
4207 		break;
4208 	default:
4209 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Event 0x%02x is not handled by driver\n",
4210 		    __func__, evt_type);
4211 		break;
4212 	}
4213 
4214 	if (process_evt_bh || ack_req) {
4215 		fw_event = malloc(sizeof(struct mpi3mr_fw_event_work), M_MPI3MR,
4216 		     M_ZERO|M_NOWAIT);
4217 
4218 		if (!fw_event) {
4219 			printf("%s: allocate failed for fw_event\n", __func__);
4220 			return;
4221 		}
4222 
4223 		sz = le16toh(event_reply->EventDataLength) * 4;
4224 		fw_event->event_data = malloc(sz, M_MPI3MR, M_ZERO|M_NOWAIT);
4225 
4226 		if (!fw_event->event_data) {
4227 			printf("%s: allocate failed for event_data\n", __func__);
4228 			free(fw_event, M_MPI3MR);
4229 			return;
4230 		}
4231 
4232 		bcopy(event_reply->EventData, fw_event->event_data, sz);
4233 		fw_event->event = event_reply->Event;
4234 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4235 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4236 		    event_reply->Event == MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE ) &&
4237 		    sc->track_mapping_events)
4238 			sc->pending_map_events++;
4239 
4240 		/*
4241 		 * Events should be processed after Port enable is completed.
4242 		 */
4243 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4244 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ) &&
4245 		    !(sc->mpi3mr_flags & MPI3MR_FLAGS_PORT_ENABLE_DONE))
4246 			mpi3mr_startup_increment(sc->cam_sc);
4247 
4248 		fw_event->send_ack = ack_req;
4249 		fw_event->event_context = le32toh(event_reply->EventContext);
4250 		fw_event->event_data_size = sz;
4251 		fw_event->process_event = process_evt_bh;
4252 
4253 		mtx_lock(&sc->fwevt_lock);
4254 		TAILQ_INSERT_TAIL(&sc->cam_sc->ev_queue, fw_event, ev_link);
4255 		taskqueue_enqueue(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
4256 		mtx_unlock(&sc->fwevt_lock);
4257 
4258 	}
4259 out:
4260 	return;
4261 }
4262 
mpi3mr_handle_events(struct mpi3mr_softc * sc,uintptr_t data,Mpi3DefaultReply_t * def_reply)4263 static void mpi3mr_handle_events(struct mpi3mr_softc *sc, uintptr_t data,
4264     Mpi3DefaultReply_t *def_reply)
4265 {
4266 	Mpi3EventNotificationReply_t *event_reply =
4267 		(Mpi3EventNotificationReply_t *)def_reply;
4268 
4269 	sc->change_count = event_reply->IOCChangeCount;
4270 	mpi3mr_display_event_data(sc, event_reply);
4271 
4272 	mpi3mr_process_events(sc, data, event_reply);
4273 }
4274 
mpi3mr_process_admin_reply_desc(struct mpi3mr_softc * sc,Mpi3DefaultReplyDescriptor_t * reply_desc,U64 * reply_dma)4275 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
4276     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4277 {
4278 	U16 reply_desc_type, host_tag = 0, idx;
4279 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4280 	U32 ioc_loginfo = 0;
4281 	Mpi3StatusReplyDescriptor_t *status_desc;
4282 	Mpi3AddressReplyDescriptor_t *addr_desc;
4283 	Mpi3SuccessReplyDescriptor_t *success_desc;
4284 	Mpi3DefaultReply_t *def_reply = NULL;
4285 	struct mpi3mr_drvr_cmd *cmdptr = NULL;
4286 	Mpi3SCSIIOReply_t *scsi_reply;
4287 	U8 *sense_buf = NULL;
4288 
4289 	*reply_dma = 0;
4290 	reply_desc_type = reply_desc->ReplyFlags &
4291 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4292 	switch (reply_desc_type) {
4293 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4294 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4295 		host_tag = status_desc->HostTag;
4296 		ioc_status = status_desc->IOCStatus;
4297 		if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
4298 			ioc_loginfo = status_desc->IOCLogInfo;
4299 		ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
4300 		break;
4301 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4302 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4303 		*reply_dma = addr_desc->ReplyFrameAddress;
4304 		def_reply = mpi3mr_get_reply_virt_addr(sc, *reply_dma);
4305 		if (def_reply == NULL)
4306 			goto out;
4307 		host_tag = def_reply->HostTag;
4308 		ioc_status = def_reply->IOCStatus;
4309 		if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
4310 			ioc_loginfo = def_reply->IOCLogInfo;
4311 		ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
4312 		if (def_reply->Function == MPI3_FUNCTION_SCSI_IO) {
4313 			scsi_reply = (Mpi3SCSIIOReply_t *)def_reply;
4314 			sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4315 			    scsi_reply->SenseDataBufferAddress);
4316 		}
4317 		break;
4318 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4319 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4320 		host_tag = success_desc->HostTag;
4321 		break;
4322 	default:
4323 		break;
4324 	}
4325 	switch (host_tag) {
4326 	case MPI3MR_HOSTTAG_INITCMDS:
4327 		cmdptr = &sc->init_cmds;
4328 		break;
4329 	case MPI3MR_HOSTTAG_IOCTLCMDS:
4330 		cmdptr = &sc->ioctl_cmds;
4331 		break;
4332 	case MPI3MR_HOSTTAG_TMS:
4333 		cmdptr = &sc->host_tm_cmds;
4334 		wakeup((void *)&sc->tm_chan);
4335 		break;
4336 	case MPI3MR_HOSTTAG_PELABORT:
4337 		cmdptr = &sc->pel_abort_cmd;
4338 		break;
4339 	case MPI3MR_HOSTTAG_PELWAIT:
4340 		cmdptr = &sc->pel_cmds;
4341 		break;
4342 	case MPI3MR_HOSTTAG_INVALID:
4343 		if (def_reply && def_reply->Function ==
4344 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
4345 			mpi3mr_handle_events(sc, *reply_dma ,def_reply);
4346 	default:
4347 		break;
4348 	}
4349 
4350 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
4351 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX ) {
4352 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
4353 		cmdptr = &sc->dev_rmhs_cmds[idx];
4354 	}
4355 
4356 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
4357 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
4358 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
4359 		cmdptr = &sc->evtack_cmds[idx];
4360 	}
4361 
4362 	if (cmdptr) {
4363 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
4364 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
4365 			cmdptr->ioc_loginfo = ioc_loginfo;
4366 			cmdptr->ioc_status = ioc_status;
4367 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
4368 			if (def_reply) {
4369 				cmdptr->state |= MPI3MR_CMD_REPLYVALID;
4370 				memcpy((U8 *)cmdptr->reply, (U8 *)def_reply,
4371 				    sc->reply_sz);
4372 			}
4373 			if (sense_buf && cmdptr->sensebuf) {
4374 				cmdptr->is_senseprst = 1;
4375 				memcpy(cmdptr->sensebuf, sense_buf,
4376 				    MPI3MR_SENSEBUF_SZ);
4377 			}
4378 			if (cmdptr->is_waiting) {
4379 				complete(&cmdptr->completion);
4380 				cmdptr->is_waiting = 0;
4381 			} else if (cmdptr->callback)
4382 				cmdptr->callback(sc, cmdptr);
4383 		}
4384 	}
4385 out:
4386 	if (sense_buf != NULL)
4387 		mpi3mr_repost_sense_buf(sc,
4388 		    scsi_reply->SenseDataBufferAddress);
4389 	return;
4390 }
4391 
4392 /*
4393  * mpi3mr_complete_admin_cmd:	ISR routine for admin commands
4394  * @sc:				Adapter's soft instance
4395  *
4396  * This function processes admin command completions.
4397  */
mpi3mr_complete_admin_cmd(struct mpi3mr_softc * sc)4398 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc)
4399 {
4400 	U32 exp_phase = sc->admin_reply_ephase;
4401 	U32 adm_reply_ci = sc->admin_reply_ci;
4402 	U32 num_adm_reply = 0;
4403 	U64 reply_dma = 0;
4404 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4405 	U16 threshold_comps = 0;
4406 
4407 	mtx_lock_spin(&sc->admin_reply_lock);
4408 	if (sc->admin_in_use == false) {
4409 		sc->admin_in_use = true;
4410 		mtx_unlock_spin(&sc->admin_reply_lock);
4411 	} else {
4412 		mtx_unlock_spin(&sc->admin_reply_lock);
4413 		return 0;
4414 	}
4415 
4416 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4417 		adm_reply_ci;
4418 
4419 	if ((reply_desc->ReplyFlags &
4420 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
4421 		mtx_lock_spin(&sc->admin_reply_lock);
4422 		sc->admin_in_use = false;
4423 		mtx_unlock_spin(&sc->admin_reply_lock);
4424 		return 0;
4425 	}
4426 
4427 	do {
4428 		sc->admin_req_ci = reply_desc->RequestQueueCI;
4429 		mpi3mr_process_admin_reply_desc(sc, reply_desc, &reply_dma);
4430 		if (reply_dma)
4431 			mpi3mr_repost_reply_buf(sc, reply_dma);
4432 		num_adm_reply++;
4433 		if (++adm_reply_ci == sc->num_admin_replies) {
4434 			adm_reply_ci = 0;
4435 			exp_phase ^= 1;
4436 		}
4437 		reply_desc =
4438 			(Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4439 			    adm_reply_ci;
4440 		if ((reply_desc->ReplyFlags &
4441 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4442 			break;
4443 
4444 		if (++threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
4445 			mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4446 			threshold_comps = 0;
4447 		}
4448 	} while (1);
4449 
4450 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4451 	sc->admin_reply_ci = adm_reply_ci;
4452 	sc->admin_reply_ephase = exp_phase;
4453 	mtx_lock_spin(&sc->admin_reply_lock);
4454 	sc->admin_in_use = false;
4455 	mtx_unlock_spin(&sc->admin_reply_lock);
4456 	return num_adm_reply;
4457 }
4458 
4459 static void
mpi3mr_cmd_done(struct mpi3mr_softc * sc,struct mpi3mr_cmd * cmd)4460 mpi3mr_cmd_done(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
4461 {
4462 	mpi3mr_unmap_request(sc, cmd);
4463 
4464 	mtx_lock(&sc->mpi3mr_mtx);
4465 	if (cmd->callout_owner) {
4466 		callout_stop(&cmd->callout);
4467 		cmd->callout_owner = false;
4468 	}
4469 
4470 	if (sc->unrecoverable)
4471 		mpi3mr_set_ccbstatus(cmd->ccb, CAM_DEV_NOT_THERE);
4472 
4473 	xpt_done(cmd->ccb);
4474 	cmd->ccb = NULL;
4475 	mtx_unlock(&sc->mpi3mr_mtx);
4476 	mpi3mr_release_command(cmd);
4477 }
4478 
mpi3mr_process_op_reply_desc(struct mpi3mr_softc * sc,Mpi3DefaultReplyDescriptor_t * reply_desc,U64 * reply_dma)4479 void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
4480     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4481 {
4482 	U16 reply_desc_type, host_tag = 0;
4483 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4484 	U32 ioc_loginfo = 0;
4485 	Mpi3StatusReplyDescriptor_t *status_desc = NULL;
4486 	Mpi3AddressReplyDescriptor_t *addr_desc = NULL;
4487 	Mpi3SuccessReplyDescriptor_t *success_desc = NULL;
4488 	Mpi3SCSIIOReply_t *scsi_reply = NULL;
4489 	U8 *sense_buf = NULL;
4490 	U8 scsi_state = 0, scsi_status = 0, sense_state = 0;
4491 	U32 xfer_count = 0, sense_count =0, resp_data = 0;
4492 	struct mpi3mr_cmd *cm = NULL;
4493 	union ccb *ccb;
4494 	struct ccb_scsiio *csio;
4495 	struct mpi3mr_cam_softc *cam_sc;
4496 	U32 target_id;
4497 	U8 *scsi_cdb;
4498 	struct mpi3mr_target *target = NULL;
4499 	U32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
4500 	struct mpi3mr_throttle_group_info *tg = NULL;
4501 	U8 throttle_enabled_dev = 0;
4502 	static int ratelimit;
4503 
4504 	*reply_dma = 0;
4505 	reply_desc_type = reply_desc->ReplyFlags &
4506 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4507 	switch (reply_desc_type) {
4508 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4509 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4510 		host_tag = status_desc->HostTag;
4511 		ioc_status = status_desc->IOCStatus;
4512 		if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
4513 			ioc_loginfo = status_desc->IOCLogInfo;
4514 		ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
4515 		break;
4516 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4517 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4518 		*reply_dma = addr_desc->ReplyFrameAddress;
4519 		scsi_reply = mpi3mr_get_reply_virt_addr(sc,
4520 		    *reply_dma);
4521 		if (scsi_reply == NULL) {
4522 			mpi3mr_dprint(sc, MPI3MR_ERROR, "scsi_reply is NULL, "
4523 			    "this shouldn't happen, reply_desc: %p\n",
4524 			    reply_desc);
4525 			goto out;
4526 		}
4527 
4528 		host_tag = scsi_reply->HostTag;
4529 		ioc_status = scsi_reply->IOCStatus;
4530 		scsi_status = scsi_reply->SCSIStatus;
4531 		scsi_state = scsi_reply->SCSIState;
4532 		sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
4533 		xfer_count = scsi_reply->TransferCount;
4534 		sense_count = scsi_reply->SenseCount;
4535 		resp_data = scsi_reply->ResponseData;
4536 		sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4537 		    scsi_reply->SenseDataBufferAddress);
4538 		if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
4539 			ioc_loginfo = scsi_reply->IOCLogInfo;
4540 		ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
4541 		if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
4542 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Ran out of sense buffers\n");
4543 
4544 		break;
4545 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4546 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4547 		host_tag = success_desc->HostTag;
4548 
4549 	default:
4550 		break;
4551 	}
4552 
4553 	cm = sc->cmd_list[host_tag];
4554 
4555 	if (cm->state == MPI3MR_CMD_STATE_FREE)
4556 		goto out;
4557 
4558 	cam_sc = sc->cam_sc;
4559 	ccb = cm->ccb;
4560 	csio = &ccb->csio;
4561 	target_id = csio->ccb_h.target_id;
4562 
4563 	scsi_cdb = scsiio_cdb_ptr(csio);
4564 
4565 	target = mpi3mr_find_target_by_per_id(cam_sc, target_id);
4566 	if (sc->iot_enable) {
4567 		data_len_blks = csio->dxfer_len >> 9;
4568 
4569 		if (target) {
4570 			tg = target->throttle_group;
4571 			throttle_enabled_dev =
4572 				target->io_throttle_enabled;
4573 		}
4574 
4575 		if ((data_len_blks >= sc->io_throttle_data_length) &&
4576 		     throttle_enabled_dev) {
4577 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, data_len_blks);
4578 			ioc_pend_data_len = mpi3mr_atomic_read(
4579 			    &sc->pend_large_data_sz);
4580 			if (tg) {
4581 				mpi3mr_atomic_sub(&tg->pend_large_data_sz,
4582 					data_len_blks);
4583 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4584 				if (ratelimit % 1000) {
4585 					mpi3mr_dprint(sc, MPI3MR_IOT,
4586 						"large vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4587 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4588 						    target->per_id,
4589 						    target->dev_handle,
4590 						    data_len_blks, ioc_pend_data_len,
4591 						    tg_pend_data_len,
4592 						    sc->io_throttle_low,
4593 						    tg->low);
4594 					ratelimit++;
4595 				}
4596 				if (tg->io_divert  && ((ioc_pend_data_len <=
4597 				    sc->io_throttle_low) &&
4598 				    (tg_pend_data_len <= tg->low))) {
4599 					tg->io_divert = 0;
4600 					mpi3mr_dprint(sc, MPI3MR_IOT,
4601 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4602 						target->per_id, tg->id);
4603 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4604 					    sc, tg, 0);
4605 				}
4606 			} else {
4607 				if (ratelimit % 1000) {
4608 					mpi3mr_dprint(sc, MPI3MR_IOT,
4609 					    "large pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4610 					    target->per_id,
4611 					    target->dev_handle,
4612 					    data_len_blks, ioc_pend_data_len,
4613 					    sc->io_throttle_low);
4614 					ratelimit++;
4615 				}
4616 
4617 				if (ioc_pend_data_len <= sc->io_throttle_low) {
4618 					target->io_divert = 0;
4619 					mpi3mr_dprint(sc, MPI3MR_IOT,
4620 						"PD: Coming out of divert perst_id(%d)\n",
4621 						target->per_id);
4622 				}
4623 			}
4624 
4625 			} else if (target->io_divert) {
4626 			ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
4627 			if (!tg) {
4628 				if (ratelimit % 1000) {
4629 					mpi3mr_dprint(sc, MPI3MR_IOT,
4630 					    "pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4631 					    target->per_id,
4632 					    target->dev_handle,
4633 					    data_len_blks, ioc_pend_data_len,
4634 					    sc->io_throttle_low);
4635 					ratelimit++;
4636 				}
4637 
4638 				if ( ioc_pend_data_len <= sc->io_throttle_low) {
4639 					mpi3mr_dprint(sc, MPI3MR_IOT,
4640 						"PD: Coming out of divert perst_id(%d)\n",
4641 						target->per_id);
4642 					target->io_divert = 0;
4643 				}
4644 
4645 			} else if (ioc_pend_data_len <= sc->io_throttle_low) {
4646 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4647 				if (ratelimit % 1000) {
4648 					mpi3mr_dprint(sc, MPI3MR_IOT,
4649 						"vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4650 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4651 						    target->per_id,
4652 						    target->dev_handle,
4653 						    data_len_blks, ioc_pend_data_len,
4654 						    tg_pend_data_len,
4655 						    sc->io_throttle_low,
4656 						    tg->low);
4657 					ratelimit++;
4658 				}
4659 				if (tg->io_divert  && (tg_pend_data_len <= tg->low)) {
4660 					tg->io_divert = 0;
4661 					mpi3mr_dprint(sc, MPI3MR_IOT,
4662 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4663 						target->per_id, tg->id);
4664 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4665 					    sc, tg, 0);
4666 				}
4667 
4668 			}
4669 		}
4670 	}
4671 
4672 	if (success_desc) {
4673 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4674 		goto out_success;
4675 	}
4676 
4677 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN
4678 	    && xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
4679 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
4680 	    scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
4681 		ioc_status = MPI3_IOCSTATUS_SUCCESS;
4682 
4683 	if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count
4684 	    && sense_buf) {
4685 		int sense_len, returned_sense_len;
4686 
4687 		returned_sense_len = min(le32toh(sense_count),
4688 		    sizeof(struct scsi_sense_data));
4689 		if (returned_sense_len < csio->sense_len)
4690 			csio->sense_resid = csio->sense_len -
4691 			    returned_sense_len;
4692 		else
4693 			csio->sense_resid = 0;
4694 
4695 		sense_len = min(returned_sense_len,
4696 		    csio->sense_len - csio->sense_resid);
4697 		bzero(&csio->sense_data, sizeof(csio->sense_data));
4698 		bcopy(sense_buf, &csio->sense_data, sense_len);
4699 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4700 	}
4701 
4702 	switch (ioc_status) {
4703 	case MPI3_IOCSTATUS_BUSY:
4704 	case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
4705 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
4706 		break;
4707 	case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4708 		/*
4709 		 * If devinfo is 0 this will be a volume.  In that case don't
4710 		 * tell CAM that the volume is not there.  We want volumes to
4711 		 * be enumerated until they are deleted/removed, not just
4712 		 * failed.
4713 		 */
4714 		if (cm->targ->devinfo == 0)
4715 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4716 		else
4717 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
4718 		break;
4719 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
4720 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
4721 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
4722 		mpi3mr_set_ccbstatus(ccb, CAM_SCSI_BUSY);
4723 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4724 		    "func: %s line:%d tgt %u Hosttag %u loginfo %x\n",
4725 		    __func__, __LINE__,
4726 		    target_id, cm->hosttag,
4727 		    le32toh(scsi_reply->IOCLogInfo));
4728 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4729 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
4730 		    scsi_reply->SCSIStatus, scsi_reply->SCSIState,
4731 		    le32toh(xfer_count));
4732 		break;
4733 	case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
4734 		/* resid is ignored for this condition */
4735 		csio->resid = 0;
4736 		mpi3mr_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
4737 		break;
4738 	case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
4739 		csio->resid = cm->length - le32toh(xfer_count);
4740 	case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
4741 	case MPI3_IOCSTATUS_SUCCESS:
4742 		if ((scsi_reply->IOCStatus & MPI3_IOCSTATUS_STATUS_MASK) ==
4743 		    MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR)
4744 			mpi3mr_dprint(sc, MPI3MR_XINFO, "func: %s line: %d recovered error\n",  __func__, __LINE__);
4745 
4746 		/* Completion failed at the transport level. */
4747 		if (scsi_reply->SCSIState & (MPI3_SCSI_STATE_NO_SCSI_STATUS |
4748 		    MPI3_SCSI_STATE_TERMINATED)) {
4749 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4750 			break;
4751 		}
4752 
4753 		/* In a modern packetized environment, an autosense failure
4754 		 * implies that there's not much else that can be done to
4755 		 * recover the command.
4756 		 */
4757 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4758 			mpi3mr_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
4759 			break;
4760 		}
4761 
4762 		/*
4763 		 * Intentionally override the normal SCSI status reporting
4764 		 * for these two cases.  These are likely to happen in a
4765 		 * multi-initiator environment, and we want to make sure that
4766 		 * CAM retries these commands rather than fail them.
4767 		 */
4768 		if ((scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_COMMAND_TERMINATED) ||
4769 		    (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_TASK_ABORTED)) {
4770 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_ABORTED);
4771 			break;
4772 		}
4773 
4774 		/* Handle normal status and sense */
4775 		csio->scsi_status = scsi_reply->SCSIStatus;
4776 		if (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_GOOD)
4777 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4778 		else
4779 			mpi3mr_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
4780 
4781 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4782 			int sense_len, returned_sense_len;
4783 
4784 			returned_sense_len = min(le32toh(scsi_reply->SenseCount),
4785 			    sizeof(struct scsi_sense_data));
4786 			if (returned_sense_len < csio->sense_len)
4787 				csio->sense_resid = csio->sense_len -
4788 				    returned_sense_len;
4789 			else
4790 				csio->sense_resid = 0;
4791 
4792 			sense_len = min(returned_sense_len,
4793 			    csio->sense_len - csio->sense_resid);
4794 			bzero(&csio->sense_data, sizeof(csio->sense_data));
4795 			bcopy(cm->sense, &csio->sense_data, sense_len);
4796 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4797 		}
4798 
4799 		break;
4800 	case MPI3_IOCSTATUS_INVALID_SGL:
4801 		mpi3mr_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
4802 		break;
4803 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
4804 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
4805 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
4806 	case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4807 	case MPI3_IOCSTATUS_INVALID_FUNCTION:
4808 	case MPI3_IOCSTATUS_INTERNAL_ERROR:
4809 	case MPI3_IOCSTATUS_INVALID_FIELD:
4810 	case MPI3_IOCSTATUS_INVALID_STATE:
4811 	case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
4812 	case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4813 	case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
4814 	case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4815 	default:
4816 		csio->resid = cm->length;
4817 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4818 		break;
4819 	}
4820 
4821 out_success:
4822 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_CMP) {
4823 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
4824 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
4825 	}
4826 
4827 	mpi3mr_atomic_dec(&cm->targ->outstanding);
4828 	mpi3mr_cmd_done(sc, cm);
4829 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Completion IO path :"
4830 		" cdb[0]: %x targetid: 0x%x SMID: %x ioc_status: 0x%x ioc_loginfo: 0x%x scsi_status: 0x%x "
4831 		"scsi_state: 0x%x response_data: 0x%x\n", scsi_cdb[0], target_id, host_tag,
4832 		ioc_status, ioc_loginfo, scsi_status, scsi_state, resp_data);
4833 	mpi3mr_atomic_dec(&sc->fw_outstanding);
4834 out:
4835 
4836 	if (sense_buf)
4837 		mpi3mr_repost_sense_buf(sc,
4838 		    scsi_reply->SenseDataBufferAddress);
4839 	return;
4840 }
4841 
4842 /*
4843  * mpi3mr_complete_io_cmd:	ISR routine for IO commands
4844  * @sc:				Adapter's soft instance
4845  * @irq_ctx:			Driver's internal per IRQ structure
4846  *
4847  * This function processes IO command completions.
4848  */
mpi3mr_complete_io_cmd(struct mpi3mr_softc * sc,struct mpi3mr_irq_context * irq_ctx)4849 int mpi3mr_complete_io_cmd(struct mpi3mr_softc *sc,
4850     struct mpi3mr_irq_context *irq_ctx)
4851 {
4852 	struct mpi3mr_op_reply_queue *op_reply_q = irq_ctx->op_reply_q;
4853 	U32 exp_phase = op_reply_q->ephase;
4854 	U32 reply_ci = op_reply_q->ci;
4855 	U32 num_op_replies = 0;
4856 	U64 reply_dma = 0;
4857 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4858 	U16 req_qid = 0, threshold_comps = 0;
4859 
4860 	mtx_lock_spin(&op_reply_q->q_lock);
4861 	if (op_reply_q->in_use == false) {
4862 		op_reply_q->in_use = true;
4863 		mtx_unlock_spin(&op_reply_q->q_lock);
4864 	} else {
4865 		mtx_unlock_spin(&op_reply_q->q_lock);
4866 		return 0;
4867 	}
4868 
4869 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4870 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]:reply_desc: (%pa) reply_ci: %x"
4871 		" reply_desc->ReplyFlags: 0x%x\n"
4872 		"reply_q_base_phys: %#016jx reply_q_base: (%pa) exp_phase: %x\n",
4873 		op_reply_q->qid, reply_desc, reply_ci, reply_desc->ReplyFlags, op_reply_q->q_base_phys,
4874 		op_reply_q->q_base, exp_phase);
4875 
4876 	if (((reply_desc->ReplyFlags &
4877 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) || !op_reply_q->qid) {
4878 		mtx_lock_spin(&op_reply_q->q_lock);
4879 		op_reply_q->in_use = false;
4880 		mtx_unlock_spin(&op_reply_q->q_lock);
4881 		return 0;
4882 	}
4883 
4884 	do {
4885 		req_qid = reply_desc->RequestQueueID;
4886 		sc->op_req_q[req_qid - 1].ci =
4887 		    reply_desc->RequestQueueCI;
4888 
4889 		mpi3mr_process_op_reply_desc(sc, reply_desc, &reply_dma);
4890 		mpi3mr_atomic_dec(&op_reply_q->pend_ios);
4891 		if (reply_dma)
4892 			mpi3mr_repost_reply_buf(sc, reply_dma);
4893 		num_op_replies++;
4894 		if (++reply_ci == op_reply_q->num_replies) {
4895 			reply_ci = 0;
4896 			exp_phase ^= 1;
4897 		}
4898 		reply_desc =
4899 		    (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4900 		if ((reply_desc->ReplyFlags &
4901 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4902 			break;
4903 
4904 		if (++threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
4905 			mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
4906 			threshold_comps = 0;
4907 		}
4908 
4909 	} while (1);
4910 
4911 
4912 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
4913 	op_reply_q->ci = reply_ci;
4914 	op_reply_q->ephase = exp_phase;
4915 	mtx_lock_spin(&op_reply_q->q_lock);
4916 	op_reply_q->in_use = false;
4917 	mtx_unlock_spin(&op_reply_q->q_lock);
4918 	return num_op_replies;
4919 }
4920 
4921 /*
4922  * mpi3mr_isr:			Primary ISR function
4923  * privdata:			Driver's internal per IRQ structure
4924  *
4925  * This is driver's primary ISR function which is being called whenever any admin/IO
4926  * command completion.
4927  */
mpi3mr_isr(void * privdata)4928 void mpi3mr_isr(void *privdata)
4929 {
4930 	struct mpi3mr_irq_context *irq_ctx = (struct mpi3mr_irq_context *)privdata;
4931 	struct mpi3mr_softc *sc = irq_ctx->sc;
4932 	U16 msi_idx;
4933 
4934 	if (!irq_ctx)
4935 		return;
4936 
4937 	msi_idx = irq_ctx->msix_index;
4938 
4939 	if (!sc->intr_enabled)
4940 		return;
4941 
4942 	if (!msi_idx)
4943 		mpi3mr_complete_admin_cmd(sc);
4944 
4945 	if (irq_ctx->op_reply_q && irq_ctx->op_reply_q->qid) {
4946 		mpi3mr_complete_io_cmd(sc, irq_ctx);
4947 	}
4948 }
4949 
4950 /*
4951  * mpi3mr_alloc_requests - Allocates host commands
4952  * @sc: Adapter reference
4953  *
4954  * This function allocates controller supported host commands
4955  *
4956  * Return: 0 on success and proper error codes on failure
4957  */
4958 int
mpi3mr_alloc_requests(struct mpi3mr_softc * sc)4959 mpi3mr_alloc_requests(struct mpi3mr_softc *sc)
4960 {
4961 	struct mpi3mr_cmd *cmd;
4962 	int i, j, nsegs, ret;
4963 
4964 	nsegs = MPI3MR_SG_DEPTH;
4965 	ret = bus_dma_tag_create( sc->mpi3mr_parent_dmat,    /* parent */
4966 				1, 0,			/* algnmnt, boundary */
4967 				sc->dma_loaddr,		/* lowaddr */
4968 				BUS_SPACE_MAXADDR,	/* highaddr */
4969 				NULL, NULL,		/* filter, filterarg */
4970 				BUS_SPACE_MAXSIZE,	/* maxsize */
4971                                 nsegs,			/* nsegments */
4972 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4973                                 BUS_DMA_ALLOCNOW,	/* flags */
4974                                 busdma_lock_mutex,	/* lockfunc */
4975 				&sc->io_lock,	/* lockarg */
4976 				&sc->buffer_dmat);
4977 	if (ret) {
4978 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate buffer DMA tag ret: %d\n", ret);
4979 		return (ENOMEM);
4980         }
4981 
4982 	/*
4983 	 * sc->cmd_list is an array of struct mpi3mr_cmd pointers.
4984 	 * Allocate the dynamic array first and then allocate individual
4985 	 * commands.
4986 	 */
4987 	sc->cmd_list = malloc(sizeof(struct mpi3mr_cmd *) * sc->max_host_ios,
4988 	    M_MPI3MR, M_NOWAIT | M_ZERO);
4989 
4990 	if (!sc->cmd_list) {
4991 		device_printf(sc->mpi3mr_dev, "Cannot alloc memory for mpt_cmd_list.\n");
4992 		return (ENOMEM);
4993 	}
4994 
4995 	for (i = 0; i < sc->max_host_ios; i++) {
4996 		sc->cmd_list[i] = malloc(sizeof(struct mpi3mr_cmd),
4997 		    M_MPI3MR, M_NOWAIT | M_ZERO);
4998 		if (!sc->cmd_list[i]) {
4999 			for (j = 0; j < i; j++)
5000 				free(sc->cmd_list[j], M_MPI3MR);
5001 			free(sc->cmd_list, M_MPI3MR);
5002 			sc->cmd_list = NULL;
5003 			return (ENOMEM);
5004 		}
5005 	}
5006 
5007 	for (i = 1; i < sc->max_host_ios; i++) {
5008 		cmd = sc->cmd_list[i];
5009 		cmd->hosttag = i;
5010 		cmd->sc = sc;
5011 		cmd->state = MPI3MR_CMD_STATE_BUSY;
5012 		callout_init_mtx(&cmd->callout, &sc->mpi3mr_mtx, 0);
5013 		cmd->ccb = NULL;
5014 		TAILQ_INSERT_TAIL(&(sc->cmd_list_head), cmd, next);
5015 		if (bus_dmamap_create(sc->buffer_dmat, 0, &cmd->dmamap))
5016 			return ENOMEM;
5017 	}
5018 	return (0);
5019 }
5020 
5021 /*
5022  * mpi3mr_get_command:		Get a coomand structure from free command pool
5023  * @sc:				Adapter soft instance
5024  * Return:			MPT command reference
5025  *
5026  * This function returns an MPT command to the caller.
5027  */
5028 struct mpi3mr_cmd *
mpi3mr_get_command(struct mpi3mr_softc * sc)5029 mpi3mr_get_command(struct mpi3mr_softc *sc)
5030 {
5031 	struct mpi3mr_cmd *cmd = NULL;
5032 
5033 	mtx_lock(&sc->cmd_pool_lock);
5034 	if (!TAILQ_EMPTY(&sc->cmd_list_head)) {
5035 		cmd = TAILQ_FIRST(&sc->cmd_list_head);
5036 		TAILQ_REMOVE(&sc->cmd_list_head, cmd, next);
5037 	} else {
5038 		goto out;
5039 	}
5040 
5041 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Get command SMID: 0x%x\n", cmd->hosttag);
5042 
5043 	memset((uint8_t *)&cmd->io_request, 0, MPI3MR_AREQ_FRAME_SZ);
5044 	cmd->data_dir = 0;
5045 	cmd->ccb = NULL;
5046 	cmd->targ = NULL;
5047 	cmd->state = MPI3MR_CMD_STATE_BUSY;
5048 	cmd->data = NULL;
5049 	cmd->length = 0;
5050 out:
5051 	mtx_unlock(&sc->cmd_pool_lock);
5052 	return cmd;
5053 }
5054 
5055 /*
5056  * mpi3mr_release_command:	Return a cmd to free command pool
5057  * input:			Command packet for return to free command pool
5058  *
5059  * This function returns an MPT command to the free command list.
5060  */
5061 void
mpi3mr_release_command(struct mpi3mr_cmd * cmd)5062 mpi3mr_release_command(struct mpi3mr_cmd *cmd)
5063 {
5064 	struct mpi3mr_softc *sc = cmd->sc;
5065 
5066 	mtx_lock(&sc->cmd_pool_lock);
5067 	TAILQ_INSERT_HEAD(&(sc->cmd_list_head), cmd, next);
5068 	cmd->state = MPI3MR_CMD_STATE_FREE;
5069 	cmd->req_qidx = 0;
5070 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Release command SMID: 0x%x\n", cmd->hosttag);
5071 	mtx_unlock(&sc->cmd_pool_lock);
5072 
5073 	return;
5074 }
5075 
5076  /**
5077  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
5078  * @sc: Adapter instance reference
5079  *
5080  * Free the DMA memory allocated for IOCTL handling purpose.
5081  *
5082  * Return: None
5083  */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc * sc)5084 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc *sc)
5085 {
5086 	U16 i;
5087 	struct dma_memory_desc *mem_desc;
5088 
5089 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5090 		mem_desc = &sc->ioctl_sge[i];
5091 		if (mem_desc->addr && mem_desc->dma_addr) {
5092 			bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5093 			bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5094 			mem_desc->addr = NULL;
5095 			if (mem_desc->tag != NULL)
5096 				bus_dma_tag_destroy(mem_desc->tag);
5097 		}
5098 	}
5099 
5100 	mem_desc = &sc->ioctl_chain_sge;
5101 	if (mem_desc->addr && mem_desc->dma_addr) {
5102 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5103 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5104 		mem_desc->addr = NULL;
5105 		if (mem_desc->tag != NULL)
5106 			bus_dma_tag_destroy(mem_desc->tag);
5107 	}
5108 
5109 	mem_desc = &sc->ioctl_resp_sge;
5110 	if (mem_desc->addr && mem_desc->dma_addr) {
5111 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5112 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5113 		mem_desc->addr = NULL;
5114 		if (mem_desc->tag != NULL)
5115 			bus_dma_tag_destroy(mem_desc->tag);
5116 	}
5117 
5118 	sc->ioctl_sges_allocated = false;
5119 }
5120 
5121 /**
5122  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
5123  * @sc: Adapter instance reference
5124  *
5125  * This function allocates dmaable memory required to handle the
5126  * application issued MPI3 IOCTL requests.
5127  *
5128  * Return: None
5129  */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc * sc)5130 void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc *sc)
5131 {
5132 	struct dma_memory_desc *mem_desc;
5133 	U16 i;
5134 
5135 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5136 		mem_desc = &sc->ioctl_sge[i];
5137 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
5138 
5139 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5140 					4, 0,			/* algnmnt, boundary */
5141 					sc->dma_loaddr,		/* lowaddr */
5142 					BUS_SPACE_MAXADDR,	/* highaddr */
5143 					NULL, NULL,		/* filter, filterarg */
5144 					mem_desc->size,		/* maxsize */
5145 					1,			/* nsegments */
5146 					mem_desc->size,		/* maxsegsize */
5147 					0,			/* flags */
5148 					NULL, NULL,		/* lockfunc, lockarg */
5149 					&mem_desc->tag)) {
5150 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5151 			goto out_failed;
5152 		}
5153 
5154 		if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5155 		    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5156 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
5157 			goto out_failed;
5158 		}
5159 		bzero(mem_desc->addr, mem_desc->size);
5160 		bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5161 		    mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5162 
5163 		if (!mem_desc->addr)
5164 			goto out_failed;
5165 	}
5166 
5167 	mem_desc = &sc->ioctl_chain_sge;
5168 	mem_desc->size = MPI3MR_4K_PGSZ;
5169 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5170 				4, 0,			/* algnmnt, boundary */
5171 				sc->dma_loaddr,		/* lowaddr */
5172 				BUS_SPACE_MAXADDR,	/* highaddr */
5173 				NULL, NULL,		/* filter, filterarg */
5174 				mem_desc->size,		/* maxsize */
5175 				1,			/* nsegments */
5176 				mem_desc->size,		/* maxsegsize */
5177 				0,			/* flags */
5178 				NULL, NULL,		/* lockfunc, lockarg */
5179 				&mem_desc->tag)) {
5180 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5181 		goto out_failed;
5182 	}
5183 
5184 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5185 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5186 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
5187 		goto out_failed;
5188 	}
5189 	bzero(mem_desc->addr, mem_desc->size);
5190 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5191 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5192 
5193 	if (!mem_desc->addr)
5194 		goto out_failed;
5195 
5196 	mem_desc = &sc->ioctl_resp_sge;
5197 	mem_desc->size = MPI3MR_4K_PGSZ;
5198 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5199 				4, 0,			/* algnmnt, boundary */
5200 				sc->dma_loaddr,		/* lowaddr */
5201 				BUS_SPACE_MAXADDR,	/* highaddr */
5202 				NULL, NULL,		/* filter, filterarg */
5203 				mem_desc->size,		/* maxsize */
5204 				1,			/* nsegments */
5205 				mem_desc->size,		/* maxsegsize */
5206 				0,			/* flags */
5207 				NULL, NULL,		/* lockfunc, lockarg */
5208 				&mem_desc->tag)) {
5209 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5210 		goto out_failed;
5211 	}
5212 
5213 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5214 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5215 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5216 		goto out_failed;
5217 	}
5218 	bzero(mem_desc->addr, mem_desc->size);
5219 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5220 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5221 
5222 	if (!mem_desc->addr)
5223 		goto out_failed;
5224 
5225 	sc->ioctl_sges_allocated = true;
5226 
5227 	return;
5228 out_failed:
5229 	printf("cannot allocate DMA memory for the mpt commands"
5230 	    "  from the applications, application interface for MPT command is disabled\n");
5231 	mpi3mr_free_ioctl_dma_memory(sc);
5232 }
5233 
5234 void
mpi3mr_destory_mtx(struct mpi3mr_softc * sc)5235 mpi3mr_destory_mtx(struct mpi3mr_softc *sc)
5236 {
5237 	int i;
5238 	struct mpi3mr_op_req_queue *op_req_q;
5239 	struct mpi3mr_op_reply_queue *op_reply_q;
5240 
5241 	if (sc->admin_reply) {
5242 		if (mtx_initialized(&sc->admin_reply_lock))
5243 			mtx_destroy(&sc->admin_reply_lock);
5244 	}
5245 
5246 	if (sc->op_reply_q) {
5247 		for(i = 0; i < sc->num_queues; i++) {
5248 			op_reply_q = sc->op_reply_q + i;
5249 			if (mtx_initialized(&op_reply_q->q_lock))
5250 				mtx_destroy(&op_reply_q->q_lock);
5251 		}
5252 	}
5253 
5254 	if (sc->op_req_q) {
5255 		for(i = 0; i < sc->num_queues; i++) {
5256 			op_req_q = sc->op_req_q + i;
5257 			if (mtx_initialized(&op_req_q->q_lock))
5258 				mtx_destroy(&op_req_q->q_lock);
5259 		}
5260 	}
5261 
5262 	if (mtx_initialized(&sc->init_cmds.completion.lock))
5263 		mtx_destroy(&sc->init_cmds.completion.lock);
5264 
5265 	if (mtx_initialized(&sc->ioctl_cmds.completion.lock))
5266 		mtx_destroy(&sc->ioctl_cmds.completion.lock);
5267 
5268 	if (mtx_initialized(&sc->host_tm_cmds.completion.lock))
5269 		mtx_destroy(&sc->host_tm_cmds.completion.lock);
5270 
5271 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5272 		if (mtx_initialized(&sc->dev_rmhs_cmds[i].completion.lock))
5273 			mtx_destroy(&sc->dev_rmhs_cmds[i].completion.lock);
5274 	}
5275 
5276 	if (mtx_initialized(&sc->reset_mutex))
5277 		mtx_destroy(&sc->reset_mutex);
5278 
5279 	if (mtx_initialized(&sc->target_lock))
5280 		mtx_destroy(&sc->target_lock);
5281 
5282 	if (mtx_initialized(&sc->fwevt_lock))
5283 		mtx_destroy(&sc->fwevt_lock);
5284 
5285 	if (mtx_initialized(&sc->cmd_pool_lock))
5286 		mtx_destroy(&sc->cmd_pool_lock);
5287 
5288 	if (mtx_initialized(&sc->reply_free_q_lock))
5289 		mtx_destroy(&sc->reply_free_q_lock);
5290 
5291 	if (mtx_initialized(&sc->sense_buf_q_lock))
5292 		mtx_destroy(&sc->sense_buf_q_lock);
5293 
5294 	if (mtx_initialized(&sc->chain_buf_lock))
5295 		mtx_destroy(&sc->chain_buf_lock);
5296 
5297 	if (mtx_initialized(&sc->admin_req_lock))
5298 		mtx_destroy(&sc->admin_req_lock);
5299 
5300 	if (mtx_initialized(&sc->mpi3mr_mtx))
5301 		mtx_destroy(&sc->mpi3mr_mtx);
5302 }
5303 
5304 /**
5305  * mpi3mr_free_mem - Freeup adapter level data structures
5306  * @sc: Adapter reference
5307  *
5308  * Return: Nothing.
5309  */
5310 void
mpi3mr_free_mem(struct mpi3mr_softc * sc)5311 mpi3mr_free_mem(struct mpi3mr_softc *sc)
5312 {
5313 	int i;
5314 	struct mpi3mr_op_req_queue *op_req_q;
5315 	struct mpi3mr_op_reply_queue *op_reply_q;
5316 	struct mpi3mr_irq_context *irq_ctx;
5317 
5318 	if (sc->cmd_list) {
5319 		for (i = 0; i < sc->max_host_ios; i++) {
5320 			free(sc->cmd_list[i], M_MPI3MR);
5321 		}
5322 		free(sc->cmd_list, M_MPI3MR);
5323 		sc->cmd_list = NULL;
5324 	}
5325 
5326 	if (sc->pel_seq_number && sc->pel_seq_number_dma) {
5327 		bus_dmamap_unload(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap);
5328 		bus_dmamem_free(sc->pel_seq_num_dmatag, sc->pel_seq_number, sc->pel_seq_num_dmamap);
5329 		sc->pel_seq_number = NULL;
5330 		if (sc->pel_seq_num_dmatag != NULL)
5331 			bus_dma_tag_destroy(sc->pel_seq_num_dmatag);
5332 	}
5333 
5334 	if (sc->throttle_groups) {
5335 		free(sc->throttle_groups, M_MPI3MR);
5336 		sc->throttle_groups = NULL;
5337 	}
5338 
5339 	/* Free up operational queues*/
5340 	if (sc->op_req_q) {
5341 		for (i = 0; i < sc->num_queues; i++) {
5342 			op_req_q = sc->op_req_q + i;
5343 			if (op_req_q->q_base && op_req_q->q_base_phys) {
5344 				bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
5345 				bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
5346 				op_req_q->q_base = NULL;
5347 				if (op_req_q->q_base_tag != NULL)
5348 					bus_dma_tag_destroy(op_req_q->q_base_tag);
5349 			}
5350 		}
5351 		free(sc->op_req_q, M_MPI3MR);
5352 		sc->op_req_q = NULL;
5353 	}
5354 
5355 	if (sc->op_reply_q) {
5356 		for (i = 0; i < sc->num_queues; i++) {
5357 			op_reply_q = sc->op_reply_q + i;
5358 			if (op_reply_q->q_base && op_reply_q->q_base_phys) {
5359 				bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
5360 				bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
5361 				op_reply_q->q_base = NULL;
5362 				if (op_reply_q->q_base_tag != NULL)
5363 					bus_dma_tag_destroy(op_reply_q->q_base_tag);
5364 			}
5365 		}
5366 		free(sc->op_reply_q, M_MPI3MR);
5367 		sc->op_reply_q = NULL;
5368 	}
5369 
5370 	/* Free up chain buffers*/
5371 	if (sc->chain_sgl_list) {
5372 		for (i = 0; i < sc->chain_buf_count; i++) {
5373 			if (sc->chain_sgl_list[i].buf && sc->chain_sgl_list[i].buf_phys) {
5374 				bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
5375 				bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf,
5376 						sc->chain_sgl_list[i].buf_dmamap);
5377 				sc->chain_sgl_list[i].buf = NULL;
5378 			}
5379 		}
5380 		if (sc->chain_sgl_list_tag != NULL)
5381 			bus_dma_tag_destroy(sc->chain_sgl_list_tag);
5382 		free(sc->chain_sgl_list, M_MPI3MR);
5383 		sc->chain_sgl_list = NULL;
5384 	}
5385 
5386 	if (sc->chain_bitmap) {
5387 		free(sc->chain_bitmap, M_MPI3MR);
5388 		sc->chain_bitmap = NULL;
5389 	}
5390 
5391 	for (i = 0; i < sc->msix_count; i++) {
5392 		irq_ctx = sc->irq_ctx + i;
5393 		if (irq_ctx)
5394 			irq_ctx->op_reply_q = NULL;
5395 	}
5396 
5397 	/* Free reply_buf_tag */
5398 	if (sc->reply_buf && sc->reply_buf_phys) {
5399 		bus_dmamap_unload(sc->reply_buf_tag, sc->reply_buf_dmamap);
5400 		bus_dmamem_free(sc->reply_buf_tag, sc->reply_buf,
5401 				sc->reply_buf_dmamap);
5402 		sc->reply_buf = NULL;
5403 		if (sc->reply_buf_tag != NULL)
5404 			bus_dma_tag_destroy(sc->reply_buf_tag);
5405 	}
5406 
5407 	/* Free reply_free_q_tag */
5408 	if (sc->reply_free_q && sc->reply_free_q_phys) {
5409 		bus_dmamap_unload(sc->reply_free_q_tag, sc->reply_free_q_dmamap);
5410 		bus_dmamem_free(sc->reply_free_q_tag, sc->reply_free_q,
5411 				sc->reply_free_q_dmamap);
5412 		sc->reply_free_q = NULL;
5413 		if (sc->reply_free_q_tag != NULL)
5414 			bus_dma_tag_destroy(sc->reply_free_q_tag);
5415 	}
5416 
5417 	/* Free sense_buf_tag */
5418 	if (sc->sense_buf && sc->sense_buf_phys) {
5419 		bus_dmamap_unload(sc->sense_buf_tag, sc->sense_buf_dmamap);
5420 		bus_dmamem_free(sc->sense_buf_tag, sc->sense_buf,
5421 				sc->sense_buf_dmamap);
5422 		sc->sense_buf = NULL;
5423 		if (sc->sense_buf_tag != NULL)
5424 			bus_dma_tag_destroy(sc->sense_buf_tag);
5425 	}
5426 
5427 	/* Free sense_buf_q_tag */
5428 	if (sc->sense_buf_q && sc->sense_buf_q_phys) {
5429 		bus_dmamap_unload(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap);
5430 		bus_dmamem_free(sc->sense_buf_q_tag, sc->sense_buf_q,
5431 				sc->sense_buf_q_dmamap);
5432 		sc->sense_buf_q = NULL;
5433 		if (sc->sense_buf_q_tag != NULL)
5434 			bus_dma_tag_destroy(sc->sense_buf_q_tag);
5435 	}
5436 
5437 	/* Free up internal(non-IO) commands*/
5438 	if (sc->init_cmds.reply) {
5439 		free(sc->init_cmds.reply, M_MPI3MR);
5440 		sc->init_cmds.reply = NULL;
5441 	}
5442 
5443 	if (sc->ioctl_cmds.reply) {
5444 		free(sc->ioctl_cmds.reply, M_MPI3MR);
5445 		sc->ioctl_cmds.reply = NULL;
5446 	}
5447 
5448 	if (sc->pel_cmds.reply) {
5449 		free(sc->pel_cmds.reply, M_MPI3MR);
5450 		sc->pel_cmds.reply = NULL;
5451 	}
5452 
5453 	if (sc->pel_abort_cmd.reply) {
5454 		free(sc->pel_abort_cmd.reply, M_MPI3MR);
5455 		sc->pel_abort_cmd.reply = NULL;
5456 	}
5457 
5458 	if (sc->host_tm_cmds.reply) {
5459 		free(sc->host_tm_cmds.reply, M_MPI3MR);
5460 		sc->host_tm_cmds.reply = NULL;
5461 	}
5462 
5463 	if (sc->log_data_buffer) {
5464 		free(sc->log_data_buffer, M_MPI3MR);
5465 		sc->log_data_buffer = NULL;
5466 	}
5467 
5468 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5469 		if (sc->dev_rmhs_cmds[i].reply) {
5470 			free(sc->dev_rmhs_cmds[i].reply, M_MPI3MR);
5471 			sc->dev_rmhs_cmds[i].reply = NULL;
5472 		}
5473 	}
5474 
5475 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5476 		if (sc->evtack_cmds[i].reply) {
5477 			free(sc->evtack_cmds[i].reply, M_MPI3MR);
5478 			sc->evtack_cmds[i].reply = NULL;
5479 		}
5480 	}
5481 
5482 	if (sc->removepend_bitmap) {
5483 		free(sc->removepend_bitmap, M_MPI3MR);
5484 		sc->removepend_bitmap = NULL;
5485 	}
5486 
5487 	if (sc->devrem_bitmap) {
5488 		free(sc->devrem_bitmap, M_MPI3MR);
5489 		sc->devrem_bitmap = NULL;
5490 	}
5491 
5492 	if (sc->evtack_cmds_bitmap) {
5493 		free(sc->evtack_cmds_bitmap, M_MPI3MR);
5494 		sc->evtack_cmds_bitmap = NULL;
5495 	}
5496 
5497 	/* Free Admin reply*/
5498 	if (sc->admin_reply && sc->admin_reply_phys) {
5499 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
5500 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
5501 				sc->admin_reply_dmamap);
5502 		sc->admin_reply = NULL;
5503 		if (sc->admin_reply_tag != NULL)
5504 			bus_dma_tag_destroy(sc->admin_reply_tag);
5505 	}
5506 
5507 	/* Free Admin request*/
5508 	if (sc->admin_req && sc->admin_req_phys) {
5509 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
5510 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
5511 				sc->admin_req_dmamap);
5512 		sc->admin_req = NULL;
5513 		if (sc->admin_req_tag != NULL)
5514 			bus_dma_tag_destroy(sc->admin_req_tag);
5515 	}
5516 	mpi3mr_free_ioctl_dma_memory(sc);
5517 
5518 }
5519 
5520 /**
5521  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5522  * @sc: Adapter instance reference
5523  * @cmdptr: Internal command tracker
5524  *
5525  * Complete an internal driver commands with state indicating it
5526  * is completed due to reset.
5527  *
5528  * Return: Nothing.
5529  */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * cmdptr)5530 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc *sc,
5531 	struct mpi3mr_drvr_cmd *cmdptr)
5532 {
5533 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5534 		cmdptr->state |= MPI3MR_CMD_RESET;
5535 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5536 		if (cmdptr->is_waiting) {
5537 			complete(&cmdptr->completion);
5538 			cmdptr->is_waiting = 0;
5539 		} else if (cmdptr->callback)
5540 			cmdptr->callback(sc, cmdptr);
5541 	}
5542 }
5543 
5544 /**
5545  * mpi3mr_flush_drv_cmds - Flush internal driver commands
5546  * @sc: Adapter instance reference
5547  *
5548  * Flush all internal driver commands post reset
5549  *
5550  * Return: Nothing.
5551  */
mpi3mr_flush_drv_cmds(struct mpi3mr_softc * sc)5552 static void mpi3mr_flush_drv_cmds(struct mpi3mr_softc *sc)
5553 {
5554 	int i = 0;
5555 	struct mpi3mr_drvr_cmd *cmdptr;
5556 
5557 	cmdptr = &sc->init_cmds;
5558 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5559 
5560 	cmdptr = &sc->ioctl_cmds;
5561 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5562 
5563 	cmdptr = &sc->host_tm_cmds;
5564 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5565 
5566 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5567 		cmdptr = &sc->dev_rmhs_cmds[i];
5568 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5569 	}
5570 
5571 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5572 		cmdptr = &sc->evtack_cmds[i];
5573 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5574 	}
5575 
5576 	cmdptr = &sc->pel_cmds;
5577 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5578 
5579 	cmdptr = &sc->pel_abort_cmd;
5580 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5581 }
5582 
5583 
5584 /**
5585  * mpi3mr_memset_buffers - memset memory for a controller
5586  * @sc: Adapter instance reference
5587  *
5588  * clear all the memory allocated for a controller, typically
5589  * called post reset to reuse the memory allocated during the
5590  * controller init.
5591  *
5592  * Return: Nothing.
5593  */
mpi3mr_memset_buffers(struct mpi3mr_softc * sc)5594 static void mpi3mr_memset_buffers(struct mpi3mr_softc *sc)
5595 {
5596 	U16 i;
5597 	struct mpi3mr_throttle_group_info *tg;
5598 
5599 	memset(sc->admin_req, 0, sc->admin_req_q_sz);
5600 	memset(sc->admin_reply, 0, sc->admin_reply_q_sz);
5601 
5602 	memset(sc->init_cmds.reply, 0, sc->reply_sz);
5603 	memset(sc->ioctl_cmds.reply, 0, sc->reply_sz);
5604 	memset(sc->host_tm_cmds.reply, 0, sc->reply_sz);
5605 	memset(sc->pel_cmds.reply, 0, sc->reply_sz);
5606 	memset(sc->pel_abort_cmd.reply, 0, sc->reply_sz);
5607 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
5608 		memset(sc->dev_rmhs_cmds[i].reply, 0, sc->reply_sz);
5609 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
5610 		memset(sc->evtack_cmds[i].reply, 0, sc->reply_sz);
5611 	memset(sc->removepend_bitmap, 0, sc->dev_handle_bitmap_sz);
5612 	memset(sc->devrem_bitmap, 0, sc->devrem_bitmap_sz);
5613 	memset(sc->evtack_cmds_bitmap, 0, sc->evtack_cmds_bitmap_sz);
5614 
5615 	for (i = 0; i < sc->num_queues; i++) {
5616 		sc->op_reply_q[i].qid = 0;
5617 		sc->op_reply_q[i].ci = 0;
5618 		sc->op_reply_q[i].num_replies = 0;
5619 		sc->op_reply_q[i].ephase = 0;
5620 		mpi3mr_atomic_set(&sc->op_reply_q[i].pend_ios, 0);
5621 		memset(sc->op_reply_q[i].q_base, 0, sc->op_reply_q[i].qsz);
5622 
5623 		sc->op_req_q[i].ci = 0;
5624 		sc->op_req_q[i].pi = 0;
5625 		sc->op_req_q[i].num_reqs = 0;
5626 		sc->op_req_q[i].qid = 0;
5627 		sc->op_req_q[i].reply_qid = 0;
5628 		memset(sc->op_req_q[i].q_base, 0, sc->op_req_q[i].qsz);
5629 	}
5630 
5631 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
5632 	if (sc->throttle_groups) {
5633 		tg = sc->throttle_groups;
5634 		for (i = 0; i < sc->num_io_throttle_group; i++, tg++) {
5635 			tg->id = 0;
5636 			tg->fw_qd = 0;
5637 			tg->modified_qd = 0;
5638 			tg->io_divert= 0;
5639 			tg->high = 0;
5640 			tg->low = 0;
5641 			mpi3mr_atomic_set(&tg->pend_large_data_sz, 0);
5642 		}
5643  	}
5644 }
5645 
5646 /**
5647  * mpi3mr_invalidate_devhandles -Invalidate device handles
5648  * @sc: Adapter instance reference
5649  *
5650  * Invalidate the device handles in the target device structures
5651  * . Called post reset prior to reinitializing the controller.
5652  *
5653  * Return: Nothing.
5654  */
mpi3mr_invalidate_devhandles(struct mpi3mr_softc * sc)5655 static void mpi3mr_invalidate_devhandles(struct mpi3mr_softc *sc)
5656 {
5657 	struct mpi3mr_target *target = NULL;
5658 
5659 	mtx_lock_spin(&sc->target_lock);
5660 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5661 		if (target) {
5662 			target->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
5663 			target->io_throttle_enabled = 0;
5664 			target->io_divert = 0;
5665 			target->throttle_group = NULL;
5666 			target->ws_len = 0;
5667 		}
5668 	}
5669 	mtx_unlock_spin(&sc->target_lock);
5670 }
5671 
5672 /**
5673  * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
5674  * @sc: Adapter instance reference
5675  *
5676  * This is executed post controller reset to identify any
5677  * missing devices during reset and remove from the upper layers
5678  * or expose any newly detected device to the upper layers.
5679  *
5680  * Return: Nothing.
5681  */
5682 
mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc * sc)5683 static void mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc *sc)
5684 {
5685 	struct mpi3mr_target *target = NULL;
5686 	struct mpi3mr_target *target_temp = NULL;
5687 
5688 	TAILQ_FOREACH_SAFE(target, &sc->cam_sc->tgt_list, tgt_next, target_temp) {
5689 		if (target->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
5690 			if (target->exposed_to_os)
5691 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
5692 			mpi3mr_remove_device_from_list(sc, target, true);
5693 		} else if (target->is_hidden && target->exposed_to_os) {
5694 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
5695 		}
5696 	}
5697 
5698 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5699 		if ((target->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
5700 		    !target->is_hidden && !target->exposed_to_os) {
5701 			mpi3mr_add_device(sc, target->per_id);
5702 		}
5703 	}
5704 
5705 }
5706 
mpi3mr_flush_io(struct mpi3mr_softc * sc)5707 static void mpi3mr_flush_io(struct mpi3mr_softc *sc)
5708 {
5709 	int i;
5710 	struct mpi3mr_cmd *cmd = NULL;
5711 	union ccb *ccb = NULL;
5712 
5713 	for (i = 0; i < sc->max_host_ios; i++) {
5714 		cmd = sc->cmd_list[i];
5715 
5716 		if (cmd && cmd->ccb) {
5717 			if (cmd->callout_owner) {
5718 				ccb = (union ccb *)(cmd->ccb);
5719 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
5720 				mpi3mr_atomic_dec(&sc->fw_outstanding);
5721 				mpi3mr_atomic_dec(&cmd->targ->outstanding);
5722 				mpi3mr_cmd_done(sc, cmd);
5723 			} else {
5724 				cmd->ccb = NULL;
5725 				mpi3mr_release_command(cmd);
5726 			}
5727 		}
5728 	}
5729 }
5730 /**
5731  * mpi3mr_clear_reset_history - Clear reset history
5732  * @sc: Adapter instance reference
5733  *
5734  * Write the reset history bit in IOC Status to clear the bit,
5735  * if it is already set.
5736  *
5737  * Return: Nothing.
5738  */
mpi3mr_clear_reset_history(struct mpi3mr_softc * sc)5739 static inline void mpi3mr_clear_reset_history(struct mpi3mr_softc *sc)
5740 {
5741 	U32 ioc_status;
5742 
5743 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5744 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
5745 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
5746 }
5747 
5748 /**
5749  * mpi3mr_set_diagsave - Set diag save bit for snapdump
5750  * @sc: Adapter reference
5751  *
5752  * Set diag save bit in IOC configuration register to enable
5753  * snapdump.
5754  *
5755  * Return: Nothing.
5756  */
mpi3mr_set_diagsave(struct mpi3mr_softc * sc)5757 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc)
5758 {
5759 	U32 ioc_config;
5760 
5761 	ioc_config =
5762 	    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5763 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
5764 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
5765 }
5766 
5767 /**
5768  * mpi3mr_issue_reset - Issue reset to the controller
5769  * @sc: Adapter reference
5770  * @reset_type: Reset type
5771  * @reset_reason: Reset reason code
5772  *
5773  * Unlock the host diagnostic registers and write the specific
5774  * reset type to that, wait for reset acknowledgement from the
5775  * controller, if the reset is not successful retry for the
5776  * predefined number of times.
5777  *
5778  * Return: 0 on success, non-zero on failure.
5779  */
mpi3mr_issue_reset(struct mpi3mr_softc * sc,U16 reset_type,U16 reset_reason)5780 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
5781 	U16 reset_reason)
5782 {
5783 	int retval = -1;
5784 	U8 unlock_retry_count = 0;
5785 	U32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
5786 	U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
5787 
5788 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
5789 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
5790 		return retval;
5791 	if (sc->unrecoverable)
5792 		return retval;
5793 
5794 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
5795 		retval = 0;
5796 		return retval;
5797 	}
5798 
5799 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s reset due to %s(0x%x)\n",
5800 	    mpi3mr_reset_type_name(reset_type),
5801 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
5802 
5803 	mpi3mr_clear_reset_history(sc);
5804 	do {
5805 		mpi3mr_dprint(sc, MPI3MR_INFO,
5806 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
5807 		    ++unlock_retry_count);
5808 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
5809 			mpi3mr_dprint(sc, MPI3MR_ERROR,
5810 			    "%s reset failed! due to host diag register unlock failure"
5811 			    "host_diagnostic(0x%08x)\n", mpi3mr_reset_type_name(reset_type),
5812 			    host_diagnostic);
5813 			sc->unrecoverable = 1;
5814 			return retval;
5815 		}
5816 
5817 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5818 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH);
5819 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5820 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST);
5821 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5822 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5823 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5824 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD);
5825 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5826 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH);
5827 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5828 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH);
5829 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5830 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH);
5831 
5832 		DELAY(1000); /* delay in usec */
5833 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
5834 		mpi3mr_dprint(sc, MPI3MR_INFO,
5835 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
5836 		    unlock_retry_count, host_diagnostic);
5837 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
5838 
5839 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_FREEBSD <<
5840 			MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
5841 			(sc->facts.ioc_num <<
5842 			MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
5843 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, scratch_pad0);
5844 	mpi3mr_regwrite(sc, MPI3_SYSIF_HOST_DIAG_OFFSET, host_diagnostic | reset_type);
5845 
5846 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
5847 		do {
5848 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5849 			if (ioc_status &
5850 			    MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
5851 				ioc_config =
5852 				    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5853 				if (mpi3mr_soft_reset_success(ioc_status,
5854 				    ioc_config)) {
5855 					mpi3mr_clear_reset_history(sc);
5856 					retval = 0;
5857 					break;
5858 				}
5859 			}
5860 			DELAY(100 * 1000);
5861 		} while (--timeout);
5862 	} else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
5863 		do {
5864 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5865 			if (mpi3mr_diagfault_success(sc, ioc_status)) {
5866 				retval = 0;
5867 				break;
5868 			}
5869 			DELAY(100 * 1000);
5870 		} while (--timeout);
5871 	}
5872 
5873 	mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5874 		MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5875 
5876 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5877 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5878 
5879 	mpi3mr_dprint(sc, MPI3MR_INFO,
5880 	    "IOC Status/Config after %s reset is (0x%x)/(0x%x)\n",
5881 	    !retval ? "successful":"failed", ioc_status,
5882 	    ioc_config);
5883 
5884 	if (retval)
5885 		sc->unrecoverable = 1;
5886 
5887 	return retval;
5888 }
5889 
mpi3mr_cleanup_event_taskq(struct mpi3mr_softc * sc)5890 inline void mpi3mr_cleanup_event_taskq(struct mpi3mr_softc *sc)
5891 {
5892 	/*
5893 	 * Block the taskqueue before draining.  This means any new tasks won't
5894 	 * be queued to the taskqueue worker thread.  But it doesn't stop the
5895 	 * current workers that are running.  taskqueue_drain waits for those
5896 	 * correctly in the case of thread backed taskqueues.  The while loop
5897 	 * ensures that all taskqueue threads have finished their current tasks.
5898 	 */
5899 	taskqueue_block(sc->cam_sc->ev_tq);
5900 	while (taskqueue_cancel(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task, NULL) != 0) {
5901 		taskqueue_drain(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
5902 	}
5903 }
5904 
5905 /**
5906  * mpi3mr_soft_reset_handler - Reset the controller
5907  * @sc: Adapter instance reference
5908  * @reset_reason: Reset reason code
5909  * @snapdump: snapdump enable/disbale bit
5910  *
5911  * This is an handler for recovering controller by issuing soft
5912  * reset or diag fault reset. This is a blocking function and
5913  * when one reset is executed if any other resets they will be
5914  * blocked. All IOCTLs/IO will be blocked during the reset. If
5915  * controller reset is successful then the controller will be
5916  * reinitalized, otherwise the controller will be marked as not
5917  * recoverable
5918  *
5919  * Return: 0 on success, non-zero on failure.
5920  */
mpi3mr_soft_reset_handler(struct mpi3mr_softc * sc,U16 reset_reason,bool snapdump)5921 int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
5922 	U16 reset_reason, bool snapdump)
5923 {
5924 	int retval = 0, i = 0;
5925 	enum mpi3mr_iocstate ioc_state;
5926 
5927 	mpi3mr_dprint(sc, MPI3MR_INFO, "soft reset invoked: reason code: %s\n",
5928 	    mpi3mr_reset_rc_name(reset_reason));
5929 
5930 	if ((reset_reason == MPI3MR_RESET_FROM_IOCTL) &&
5931 	     (sc->reset.ioctl_reset_snapdump != true))
5932 		snapdump = false;
5933 
5934 	mpi3mr_dprint(sc, MPI3MR_INFO,
5935 	    "soft_reset_handler: wait if diag save is in progress\n");
5936 	while (sc->diagsave_timeout)
5937 		DELAY(1000 * 1000);
5938 
5939 	ioc_state = mpi3mr_get_iocstate(sc);
5940 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE) {
5941 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is in unrecoverable state, exit\n");
5942 		sc->reset.type = MPI3MR_NO_RESET;
5943 		sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
5944 		sc->reset.status = -1;
5945 		sc->reset.ioctl_reset_snapdump = false;
5946 		return -1;
5947 	}
5948 
5949 	if (sc->reset_in_progress) {
5950 		mpi3mr_dprint(sc, MPI3MR_INFO, "reset is already in progress, exit\n");
5951 		return -1;
5952 	}
5953 
5954 	/* Pause IOs, drain and block the event taskqueue */
5955 	xpt_freeze_simq(sc->cam_sc->sim, 1);
5956 
5957 	mpi3mr_cleanup_event_taskq(sc);
5958 
5959 	sc->reset_in_progress = 1;
5960 	sc->block_ioctls = 1;
5961 
5962 	while (mpi3mr_atomic_read(&sc->pend_ioctls) && (i < PEND_IOCTLS_COMP_WAIT_TIME)) {
5963 		ioc_state = mpi3mr_get_iocstate(sc);
5964 		if (ioc_state == MRIOC_STATE_FAULT)
5965 			break;
5966 		i++;
5967 		if (!(i % 5)) {
5968 			mpi3mr_dprint(sc, MPI3MR_INFO,
5969 			    "[%2ds]waiting for IOCTL to be finished from %s\n", i, __func__);
5970 		}
5971 		DELAY(1000 * 1000);
5972 	}
5973 
5974 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5975 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5976 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5977 
5978 		mpi3mr_dprint(sc, MPI3MR_INFO, "Turn off events prior to reset\n");
5979 
5980 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5981 			sc->event_masks[i] = -1;
5982 		mpi3mr_issue_event_notification(sc);
5983 	}
5984 
5985 	mpi3mr_disable_interrupts(sc);
5986 
5987 	if (snapdump)
5988 		mpi3mr_trigger_snapdump(sc, reset_reason);
5989 
5990 	retval = mpi3mr_issue_reset(sc,
5991 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5992 	if (retval) {
5993 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to issue soft reset to the ioc\n");
5994 		goto out;
5995 	}
5996 
5997 	mpi3mr_flush_drv_cmds(sc);
5998 	mpi3mr_flush_io(sc);
5999 	mpi3mr_invalidate_devhandles(sc);
6000 	mpi3mr_memset_buffers(sc);
6001 
6002 	if (sc->prepare_for_reset) {
6003 		sc->prepare_for_reset = 0;
6004 		sc->prepare_for_reset_timeout_counter = 0;
6005 	}
6006 
6007 	retval = mpi3mr_initialize_ioc(sc, MPI3MR_INIT_TYPE_RESET);
6008 	if (retval) {
6009 		mpi3mr_dprint(sc, MPI3MR_ERROR, "reinit after soft reset failed: reason %d\n",
6010 		    reset_reason);
6011 		goto out;
6012 	}
6013 
6014 	DELAY((1000 * 1000) * 10);
6015 out:
6016 	if (!retval) {
6017 		sc->diagsave_timeout = 0;
6018 		sc->reset_in_progress = 0;
6019 		mpi3mr_rfresh_tgtdevs(sc);
6020 		sc->ts_update_counter = 0;
6021 		sc->block_ioctls = 0;
6022 		sc->pel_abort_requested = 0;
6023 		if (sc->pel_wait_pend) {
6024 			sc->pel_cmds.retry_count = 0;
6025 			mpi3mr_issue_pel_wait(sc, &sc->pel_cmds);
6026 			mpi3mr_app_send_aen(sc);
6027 		}
6028 	} else {
6029 		mpi3mr_issue_reset(sc,
6030 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
6031 		sc->unrecoverable = 1;
6032 		sc->reset_in_progress = 0;
6033 	}
6034 
6035 	mpi3mr_dprint(sc, MPI3MR_INFO, "Soft Reset: %s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
6036 
6037 	taskqueue_unblock(sc->cam_sc->ev_tq);
6038 	xpt_release_simq(sc->cam_sc->sim, 1);
6039 
6040 	sc->reset.type = MPI3MR_NO_RESET;
6041 	sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
6042 	sc->reset.status = retval;
6043 	sc->reset.ioctl_reset_snapdump = false;
6044 
6045 	return retval;
6046 }
6047 
6048 /**
6049  * mpi3mr_issue_ioc_shutdown - shutdown controller
6050  * @sc: Adapter instance reference
6051  *
6052  * Send shutodwn notification to the controller and wait for the
6053  * shutdown_timeout for it to be completed.
6054  *
6055  * Return: Nothing.
6056  */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc * sc)6057 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc *sc)
6058 {
6059 	U32 ioc_config, ioc_status;
6060 	U8 retval = 1, retry = 0;
6061 	U32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
6062 
6063 	mpi3mr_dprint(sc, MPI3MR_INFO, "sending shutdown notification\n");
6064 	if (sc->unrecoverable) {
6065 		mpi3mr_dprint(sc, MPI3MR_ERROR,
6066 		    "controller is unrecoverable, shutdown not issued\n");
6067 		return;
6068 	}
6069 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6070 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6071 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
6072 		mpi3mr_dprint(sc, MPI3MR_ERROR, "shutdown already in progress\n");
6073 		return;
6074 	}
6075 
6076 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6077 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6078 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6079 
6080 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6081 
6082 	if (sc->facts.shutdown_timeout)
6083 		timeout = sc->facts.shutdown_timeout * 10;
6084 
6085 	do {
6086 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6087 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6088 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
6089 			retval = 0;
6090 			break;
6091 		}
6092 
6093 		if (sc->unrecoverable)
6094 			break;
6095 
6096 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
6097 			mpi3mr_print_fault_info(sc);
6098 
6099 			if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
6100 				break;
6101 
6102 			if (mpi3mr_issue_reset(sc,
6103 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6104 			    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6105 				break;
6106 
6107 			ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6108 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6109 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6110 
6111 			mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6112 
6113 			if (sc->facts.shutdown_timeout)
6114 				timeout = sc->facts.shutdown_timeout * 10;
6115 
6116 			retry++;
6117 		}
6118 
6119                 DELAY(100 * 1000);
6120 
6121 	} while (--timeout);
6122 
6123 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6124 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6125 
6126 	if (retval) {
6127 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6128 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
6129 			mpi3mr_dprint(sc, MPI3MR_ERROR,
6130 			    "shutdown still in progress after timeout\n");
6131 	}
6132 
6133 	mpi3mr_dprint(sc, MPI3MR_INFO,
6134 	    "ioc_status/ioc_config after %s shutdown is (0x%x)/(0x%x)\n",
6135 	    (!retval)?"successful":"failed", ioc_status,
6136 	    ioc_config);
6137 }
6138 
6139 /**
6140  * mpi3mr_cleanup_ioc - Cleanup controller
6141  * @sc: Adapter instance reference
6142 
6143  * controller cleanup handler, Message unit reset or soft reset
6144  * and shutdown notification is issued to the controller.
6145  *
6146  * Return: Nothing.
6147  */
mpi3mr_cleanup_ioc(struct mpi3mr_softc * sc)6148 void mpi3mr_cleanup_ioc(struct mpi3mr_softc *sc)
6149 {
6150 	enum mpi3mr_iocstate ioc_state;
6151 
6152 	mpi3mr_dprint(sc, MPI3MR_INFO, "cleaning up the controller\n");
6153 	mpi3mr_disable_interrupts(sc);
6154 
6155 	ioc_state = mpi3mr_get_iocstate(sc);
6156 
6157 	if ((!sc->unrecoverable) && (!sc->reset_in_progress) &&
6158 	    (ioc_state == MRIOC_STATE_READY)) {
6159 		if (mpi3mr_mur_ioc(sc,
6160 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6161 			mpi3mr_issue_reset(sc,
6162 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6163 			    MPI3MR_RESET_FROM_MUR_FAILURE);
6164 		mpi3mr_issue_ioc_shutdown(sc);
6165 	}
6166 
6167 	mpi3mr_dprint(sc, MPI3MR_INFO, "controller cleanup completed\n");
6168 }
6169