xref: /freebsd/sys/dev/mrsas/mrsas.c (revision d0b2dbfa)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing
33  * official policies,either expressed or implied, of the FreeBSD Project.
34  *
35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37  *
38  */
39 
40 #include <sys/cdefs.h>
41 #include <dev/mrsas/mrsas.h>
42 #include <dev/mrsas/mrsas_ioctl.h>
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 
47 #include <sys/sysctl.h>
48 #include <sys/types.h>
49 #include <sys/sysent.h>
50 #include <sys/kthread.h>
51 #include <sys/taskqueue.h>
52 #include <sys/smp.h>
53 #include <sys/endian.h>
54 
55 /*
56  * Function prototypes
57  */
58 static d_open_t mrsas_open;
59 static d_close_t mrsas_close;
60 static d_ioctl_t mrsas_ioctl;
61 static d_poll_t mrsas_poll;
62 
63 static void mrsas_ich_startup(void *arg);
64 static struct mrsas_mgmt_info mrsas_mgmt_info;
65 static struct mrsas_ident *mrsas_find_ident(device_t);
66 static int mrsas_setup_msix(struct mrsas_softc *sc);
67 static int mrsas_allocate_msix(struct mrsas_softc *sc);
68 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
69 static void mrsas_flush_cache(struct mrsas_softc *sc);
70 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
71 static void mrsas_ocr_thread(void *arg);
72 static int mrsas_get_map_info(struct mrsas_softc *sc);
73 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
74 static int mrsas_sync_map_info(struct mrsas_softc *sc);
75 static int mrsas_get_pd_list(struct mrsas_softc *sc);
76 static int mrsas_get_ld_list(struct mrsas_softc *sc);
77 static int mrsas_setup_irq(struct mrsas_softc *sc);
78 static int mrsas_alloc_mem(struct mrsas_softc *sc);
79 static int mrsas_init_fw(struct mrsas_softc *sc);
80 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
81 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
82 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
83 static int mrsas_clear_intr(struct mrsas_softc *sc);
84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
85 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
86 static int
87 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
88     struct mrsas_mfi_cmd *cmd_to_abort);
89 static void
90 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
91 static struct mrsas_softc *
92 mrsas_get_softc_instance(struct cdev *dev,
93     u_long cmd, caddr_t arg);
94 u_int32_t
95 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
97 u_int8_t
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99     struct mrsas_mfi_cmd *mfi_cmd);
100 void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int	mrsas_init_adapter(struct mrsas_softc *sc);
103 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int	mrsas_ioc_init(struct mrsas_softc *sc);
107 int	mrsas_bus_scan(struct mrsas_softc *sc);
108 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int	mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
113 int mrsas_reset_targets(struct mrsas_softc *sc);
114 int
115 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
116     struct mrsas_mfi_cmd *cmd);
117 int
118 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
119     int size);
120 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
121 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
124 void	mrsas_disable_intr(struct mrsas_softc *sc);
125 void	mrsas_enable_intr(struct mrsas_softc *sc);
126 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
127 void	mrsas_free_mem(struct mrsas_softc *sc);
128 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
129 void	mrsas_isr(void *arg);
130 void	mrsas_teardown_intr(struct mrsas_softc *sc);
131 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
132 void	mrsas_kill_hba(struct mrsas_softc *sc);
133 void	mrsas_aen_handler(struct mrsas_softc *sc);
134 void
135 mrsas_write_reg(struct mrsas_softc *sc, int offset,
136     u_int32_t value);
137 void
138 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
139     u_int32_t req_desc_hi);
140 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
141 void
142 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
143     struct mrsas_mfi_cmd *cmd, u_int8_t status);
144 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
145 
146 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
147         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
148 
149 extern int mrsas_cam_attach(struct mrsas_softc *sc);
150 extern void mrsas_cam_detach(struct mrsas_softc *sc);
151 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
152 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
153 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
154 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
155 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
156 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
157 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
158 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
159 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
160 extern void mrsas_xpt_release(struct mrsas_softc *sc);
161 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
162 mrsas_get_request_desc(struct mrsas_softc *sc,
163     u_int16_t index);
164 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
165 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
166 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
167 void	mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
168 
169 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
170 	union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
171 	u_int32_t data_length, u_int8_t *sense);
172 void
173 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
174     u_int32_t req_desc_hi);
175 
176 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
177     "MRSAS Driver Parameters");
178 
179 /*
180  * PCI device struct and table
181  *
182  */
183 typedef struct mrsas_ident {
184 	uint16_t vendor;
185 	uint16_t device;
186 	uint16_t subvendor;
187 	uint16_t subdevice;
188 	const char *desc;
189 }	MRSAS_CTLR_ID;
190 
191 MRSAS_CTLR_ID device_table[] = {
192 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
193 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
194 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
195 	{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
196 	{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
197 	{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
198 	{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
199 	{0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
200 	{0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
201 	{0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
202 	{0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
203 	{0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
204 	{0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
205 	{0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
206 	{0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
207 	{0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
208 	{0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
209 	{0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
210 	{0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
211 	{0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
212 	{0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
213 	{0, 0, 0, 0, NULL}
214 };
215 
216 /*
217  * Character device entry points
218  *
219  */
220 static struct cdevsw mrsas_cdevsw = {
221 	.d_version = D_VERSION,
222 	.d_open = mrsas_open,
223 	.d_close = mrsas_close,
224 	.d_ioctl = mrsas_ioctl,
225 	.d_poll = mrsas_poll,
226 	.d_name = "mrsas",
227 };
228 
229 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
230 
231 int
232 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
233 {
234 
235 	return (0);
236 }
237 
238 int
239 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
240 {
241 
242 	return (0);
243 }
244 
245 u_int32_t
246 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
247 {
248 	u_int32_t i = 0, ret_val;
249 
250 	if (sc->is_aero) {
251 		do {
252 			ret_val = mrsas_read_reg(sc, offset);
253 			i++;
254 		} while(ret_val == 0 && i < 3);
255 	} else
256 		ret_val = mrsas_read_reg(sc, offset);
257 
258 	return ret_val;
259 }
260 
261 /*
262  * Register Read/Write Functions
263  *
264  */
265 void
266 mrsas_write_reg(struct mrsas_softc *sc, int offset,
267     u_int32_t value)
268 {
269 	bus_space_tag_t bus_tag = sc->bus_tag;
270 	bus_space_handle_t bus_handle = sc->bus_handle;
271 
272 	bus_space_write_4(bus_tag, bus_handle, offset, value);
273 }
274 
275 u_int32_t
276 mrsas_read_reg(struct mrsas_softc *sc, int offset)
277 {
278 	bus_space_tag_t bus_tag = sc->bus_tag;
279 	bus_space_handle_t bus_handle = sc->bus_handle;
280 
281 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
282 }
283 
284 /*
285  * Interrupt Disable/Enable/Clear Functions
286  *
287  */
288 void
289 mrsas_disable_intr(struct mrsas_softc *sc)
290 {
291 	u_int32_t mask = 0xFFFFFFFF;
292 
293 	sc->mask_interrupts = 1;
294 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
295 	/* Dummy read to force pci flush */
296 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
297 }
298 
299 void
300 mrsas_enable_intr(struct mrsas_softc *sc)
301 {
302 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
303 
304 	sc->mask_interrupts = 0;
305 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
306 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
307 
308 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
309 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
310 }
311 
312 static int
313 mrsas_clear_intr(struct mrsas_softc *sc)
314 {
315 	u_int32_t status;
316 
317 	/* Read received interrupt */
318 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
319 
320 	/* Not our interrupt, so just return */
321 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
322 		return (0);
323 
324 	/* We got a reply interrupt */
325 	return (1);
326 }
327 
328 /*
329  * PCI Support Functions
330  *
331  */
332 static struct mrsas_ident *
333 mrsas_find_ident(device_t dev)
334 {
335 	struct mrsas_ident *pci_device;
336 
337 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
338 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
339 		    (pci_device->device == pci_get_device(dev)) &&
340 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
341 		    (pci_device->subvendor == 0xffff)) &&
342 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
343 		    (pci_device->subdevice == 0xffff)))
344 			return (pci_device);
345 	}
346 	return (NULL);
347 }
348 
349 static int
350 mrsas_probe(device_t dev)
351 {
352 	static u_int8_t first_ctrl = 1;
353 	struct mrsas_ident *id;
354 
355 	if ((id = mrsas_find_ident(dev)) != NULL) {
356 		if (first_ctrl) {
357 			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
358 			    MRSAS_VERSION);
359 			first_ctrl = 0;
360 		}
361 		device_set_desc(dev, id->desc);
362 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
363 		return (-30);
364 	}
365 	return (ENXIO);
366 }
367 
368 /*
369  * mrsas_setup_sysctl:	setup sysctl values for mrsas
370  * input:				Adapter instance soft state
371  *
372  * Setup sysctl entries for mrsas driver.
373  */
374 static void
375 mrsas_setup_sysctl(struct mrsas_softc *sc)
376 {
377 	struct sysctl_ctx_list *sysctl_ctx = NULL;
378 	struct sysctl_oid *sysctl_tree = NULL;
379 	char tmpstr[80], tmpstr2[80];
380 
381 	/*
382 	 * Setup the sysctl variable so the user can change the debug level
383 	 * on the fly.
384 	 */
385 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
386 	    device_get_unit(sc->mrsas_dev));
387 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
388 
389 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
390 	if (sysctl_ctx != NULL)
391 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
392 
393 	if (sysctl_tree == NULL) {
394 		sysctl_ctx_init(&sc->sysctl_ctx);
395 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
396 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
397 		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
398 		if (sc->sysctl_tree == NULL)
399 			return;
400 		sysctl_ctx = &sc->sysctl_ctx;
401 		sysctl_tree = sc->sysctl_tree;
402 	}
403 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
404 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
405 	    "Disable the use of OCR");
406 
407 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
408 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
409 	    strlen(MRSAS_VERSION), "driver version");
410 
411 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
412 	    OID_AUTO, "reset_count", CTLFLAG_RD,
413 	    &sc->reset_count, 0, "number of ocr from start of the day");
414 
415 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
417 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
418 
419 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
420 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
421 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
422 
423 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
424 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
425 	    "Driver debug level");
426 
427 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
428 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
429 	    0, "Driver IO timeout value in mili-second.");
430 
431 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
433 	    &sc->mrsas_fw_fault_check_delay,
434 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
435 
436 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
437 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
438 	    &sc->reset_in_progress, 0, "ocr in progress status");
439 
440 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
441 	    OID_AUTO, "block_sync_cache", CTLFLAG_RW,
442 	    &sc->block_sync_cache, 0,
443 	    "Block SYNC CACHE at driver. <default: 0, send it to FW>");
444 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
445 	    OID_AUTO, "stream detection", CTLFLAG_RW,
446 		&sc->drv_stream_detection, 0,
447 		"Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
448 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
449 	    OID_AUTO, "prp_count", CTLFLAG_RD,
450 	    &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
451 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
452 	    OID_AUTO, "SGE holes", CTLFLAG_RD,
453 	    &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
454 }
455 
456 /*
457  * mrsas_get_tunables:	get tunable parameters.
458  * input:				Adapter instance soft state
459  *
460  * Get tunable parameters. This will help to debug driver at boot time.
461  */
462 static void
463 mrsas_get_tunables(struct mrsas_softc *sc)
464 {
465 	char tmpstr[80];
466 
467 	/* XXX default to some debugging for now */
468 	sc->mrsas_debug =
469 		(MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
470 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
471 	sc->mrsas_fw_fault_check_delay = 1;
472 	sc->reset_count = 0;
473 	sc->reset_in_progress = 0;
474 	sc->block_sync_cache = 0;
475 	sc->drv_stream_detection = 1;
476 
477 	/*
478 	 * Grab the global variables.
479 	 */
480 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
481 
482 	/*
483 	 * Grab the global variables.
484 	 */
485 	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
486 
487 	/* Grab the unit-instance variables */
488 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
489 	    device_get_unit(sc->mrsas_dev));
490 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
491 }
492 
493 /*
494  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
495  * Used to get sequence number at driver load time.
496  * input:		Adapter soft state
497  *
498  * Allocates DMAable memory for the event log info internal command.
499  */
500 int
501 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
502 {
503 	int el_info_size;
504 
505 	/* Allocate get event log info command */
506 	el_info_size = sizeof(struct mrsas_evt_log_info);
507 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
508 	    1, 0,
509 	    BUS_SPACE_MAXADDR_32BIT,
510 	    BUS_SPACE_MAXADDR,
511 	    NULL, NULL,
512 	    el_info_size,
513 	    1,
514 	    el_info_size,
515 	    BUS_DMA_ALLOCNOW,
516 	    NULL, NULL,
517 	    &sc->el_info_tag)) {
518 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
519 		return (ENOMEM);
520 	}
521 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
522 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
523 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
524 		return (ENOMEM);
525 	}
526 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
527 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
528 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
529 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
530 		return (ENOMEM);
531 	}
532 	memset(sc->el_info_mem, 0, el_info_size);
533 	return (0);
534 }
535 
536 /*
537  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
538  * input:					Adapter soft state
539  *
540  * Deallocates memory for the event log info internal command.
541  */
542 void
543 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
544 {
545 	if (sc->el_info_phys_addr)
546 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
547 	if (sc->el_info_mem != NULL)
548 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
549 	if (sc->el_info_tag != NULL)
550 		bus_dma_tag_destroy(sc->el_info_tag);
551 }
552 
553 /*
554  *  mrsas_get_seq_num:	Get latest event sequence number
555  *  @sc:				Adapter soft state
556  *  @eli:				Firmware event log sequence number information.
557  *
558  * Firmware maintains a log of all events in a non-volatile area.
559  * Driver get the sequence number using DCMD
560  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
561  */
562 
563 static int
564 mrsas_get_seq_num(struct mrsas_softc *sc,
565     struct mrsas_evt_log_info *eli)
566 {
567 	struct mrsas_mfi_cmd *cmd;
568 	struct mrsas_dcmd_frame *dcmd;
569 	u_int8_t do_ocr = 1, retcode = 0;
570 
571 	cmd = mrsas_get_mfi_cmd(sc);
572 
573 	if (!cmd) {
574 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
575 		return -ENOMEM;
576 	}
577 	dcmd = &cmd->frame->dcmd;
578 
579 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
580 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
581 		mrsas_release_mfi_cmd(cmd);
582 		return -ENOMEM;
583 	}
584 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
585 
586 	dcmd->cmd = MFI_CMD_DCMD;
587 	dcmd->cmd_status = 0x0;
588 	dcmd->sge_count = 1;
589 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
590 	dcmd->timeout = 0;
591 	dcmd->pad_0 = 0;
592 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info));
593 	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO);
594 	dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF);
595 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info));
596 
597 	retcode = mrsas_issue_blocked_cmd(sc, cmd);
598 	if (retcode == ETIMEDOUT)
599 		goto dcmd_timeout;
600 
601 	do_ocr = 0;
602 	/*
603 	 * Copy the data back into callers buffer
604 	 */
605 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
606 	mrsas_free_evt_log_info_cmd(sc);
607 
608 dcmd_timeout:
609 	if (do_ocr)
610 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
611 	else
612 		mrsas_release_mfi_cmd(cmd);
613 
614 	return retcode;
615 }
616 
617 /*
618  *  mrsas_register_aen:		Register for asynchronous event notification
619  *  @sc:			Adapter soft state
620  *  @seq_num:			Starting sequence number
621  *  @class_locale:		Class of the event
622  *
623  *  This function subscribes for events beyond the @seq_num
624  *  and type @class_locale.
625  *
626  */
627 static int
628 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
629     u_int32_t class_locale_word)
630 {
631 	int ret_val;
632 	struct mrsas_mfi_cmd *cmd;
633 	struct mrsas_dcmd_frame *dcmd;
634 	union mrsas_evt_class_locale curr_aen;
635 	union mrsas_evt_class_locale prev_aen;
636 
637 	/*
638 	 * If there an AEN pending already (aen_cmd), check if the
639 	 * class_locale of that pending AEN is inclusive of the new AEN
640 	 * request we currently have. If it is, then we don't have to do
641 	 * anything. In other words, whichever events the current AEN request
642 	 * is subscribing to, have already been subscribed to. If the old_cmd
643 	 * is _not_ inclusive, then we have to abort that command, form a
644 	 * class_locale that is superset of both old and current and re-issue
645 	 * to the FW
646 	 */
647 
648 	curr_aen.word = class_locale_word;
649 
650 	if (sc->aen_cmd) {
651 		prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]);
652 
653 		/*
654 		 * A class whose enum value is smaller is inclusive of all
655 		 * higher values. If a PROGRESS (= -1) was previously
656 		 * registered, then a new registration requests for higher
657 		 * classes need not be sent to FW. They are automatically
658 		 * included. Locale numbers don't have such hierarchy. They
659 		 * are bitmap values
660 		 */
661 		if ((prev_aen.members.class <= curr_aen.members.class) &&
662 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
663 		    curr_aen.members.locale)) {
664 			/*
665 			 * Previously issued event registration includes
666 			 * current request. Nothing to do.
667 			 */
668 			return 0;
669 		} else {
670 			curr_aen.members.locale |= prev_aen.members.locale;
671 
672 			if (prev_aen.members.class < curr_aen.members.class)
673 				curr_aen.members.class = prev_aen.members.class;
674 
675 			sc->aen_cmd->abort_aen = 1;
676 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
677 			    sc->aen_cmd);
678 
679 			if (ret_val) {
680 				printf("mrsas: Failed to abort previous AEN command\n");
681 				return ret_val;
682 			} else
683 				sc->aen_cmd = NULL;
684 		}
685 	}
686 	cmd = mrsas_get_mfi_cmd(sc);
687 	if (!cmd)
688 		return ENOMEM;
689 
690 	dcmd = &cmd->frame->dcmd;
691 
692 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
693 
694 	/*
695 	 * Prepare DCMD for aen registration
696 	 */
697 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
698 
699 	dcmd->cmd = MFI_CMD_DCMD;
700 	dcmd->cmd_status = 0x0;
701 	dcmd->sge_count = 1;
702 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
703 	dcmd->timeout = 0;
704 	dcmd->pad_0 = 0;
705 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail));
706 	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
707 	dcmd->mbox.w[0] = htole32(seq_num);
708 	sc->last_seq_num = seq_num;
709 	dcmd->mbox.w[1] = htole32(curr_aen.word);
710 	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF);
711 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail));
712 
713 	if (sc->aen_cmd != NULL) {
714 		mrsas_release_mfi_cmd(cmd);
715 		return 0;
716 	}
717 	/*
718 	 * Store reference to the cmd used to register for AEN. When an
719 	 * application wants us to register for AEN, we have to abort this
720 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
721 	 */
722 	sc->aen_cmd = cmd;
723 
724 	/*
725 	 * Issue the aen registration frame
726 	 */
727 	if (mrsas_issue_dcmd(sc, cmd)) {
728 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
729 		return (1);
730 	}
731 	return 0;
732 }
733 
734 /*
735  * mrsas_start_aen:	Subscribes to AEN during driver load time
736  * @instance:		Adapter soft state
737  */
738 static int
739 mrsas_start_aen(struct mrsas_softc *sc)
740 {
741 	struct mrsas_evt_log_info eli;
742 	union mrsas_evt_class_locale class_locale;
743 
744 	/* Get the latest sequence number from FW */
745 
746 	memset(&eli, 0, sizeof(eli));
747 
748 	if (mrsas_get_seq_num(sc, &eli))
749 		return -1;
750 
751 	/* Register AEN with FW for latest sequence number plus 1 */
752 	class_locale.members.reserved = 0;
753 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
754 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
755 
756 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
757 	    class_locale.word);
758 
759 }
760 
761 /*
762  * mrsas_setup_msix:	Allocate MSI-x vectors
763  * @sc:					adapter soft state
764  */
765 static int
766 mrsas_setup_msix(struct mrsas_softc *sc)
767 {
768 	int i;
769 
770 	for (i = 0; i < sc->msix_vectors; i++) {
771 		sc->irq_context[i].sc = sc;
772 		sc->irq_context[i].MSIxIndex = i;
773 		sc->irq_id[i] = i + 1;
774 		sc->mrsas_irq[i] = bus_alloc_resource_any
775 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
776 		    ,RF_ACTIVE);
777 		if (sc->mrsas_irq[i] == NULL) {
778 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
779 			goto irq_alloc_failed;
780 		}
781 		if (bus_setup_intr(sc->mrsas_dev,
782 		    sc->mrsas_irq[i],
783 		    INTR_MPSAFE | INTR_TYPE_CAM,
784 		    NULL, mrsas_isr, &sc->irq_context[i],
785 		    &sc->intr_handle[i])) {
786 			device_printf(sc->mrsas_dev,
787 			    "Cannot set up MSI-x interrupt handler\n");
788 			goto irq_alloc_failed;
789 		}
790 	}
791 	return SUCCESS;
792 
793 irq_alloc_failed:
794 	mrsas_teardown_intr(sc);
795 	return (FAIL);
796 }
797 
798 /*
799  * mrsas_allocate_msix:		Setup MSI-x vectors
800  * @sc:						adapter soft state
801  */
802 static int
803 mrsas_allocate_msix(struct mrsas_softc *sc)
804 {
805 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
806 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
807 		    " of vectors\n", sc->msix_vectors);
808 	} else {
809 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
810 		goto irq_alloc_failed;
811 	}
812 	return SUCCESS;
813 
814 irq_alloc_failed:
815 	mrsas_teardown_intr(sc);
816 	return (FAIL);
817 }
818 
819 /*
820  * mrsas_attach:	PCI entry point
821  * input:			pointer to device struct
822  *
823  * Performs setup of PCI and registers, initializes mutexes and linked lists,
824  * registers interrupts and CAM, and initializes   the adapter/controller to
825  * its proper state.
826  */
827 static int
828 mrsas_attach(device_t dev)
829 {
830 	struct mrsas_softc *sc = device_get_softc(dev);
831 	uint32_t cmd, error;
832 
833 	memset(sc, 0, sizeof(struct mrsas_softc));
834 
835 	/* Look up our softc and initialize its fields. */
836 	sc->mrsas_dev = dev;
837 	sc->device_id = pci_get_device(dev);
838 
839 	switch (sc->device_id) {
840 	case MRSAS_INVADER:
841 	case MRSAS_FURY:
842 	case MRSAS_INTRUDER:
843 	case MRSAS_INTRUDER_24:
844 	case MRSAS_CUTLASS_52:
845 	case MRSAS_CUTLASS_53:
846 		sc->mrsas_gen3_ctrl = 1;
847 		break;
848 	case MRSAS_VENTURA:
849 	case MRSAS_CRUSADER:
850 	case MRSAS_HARPOON:
851 	case MRSAS_TOMCAT:
852 	case MRSAS_VENTURA_4PORT:
853 	case MRSAS_CRUSADER_4PORT:
854 		sc->is_ventura = true;
855 		break;
856 	case MRSAS_AERO_10E1:
857 	case MRSAS_AERO_10E5:
858 		device_printf(dev, "Adapter is in configurable secure mode\n");
859 	case MRSAS_AERO_10E2:
860 	case MRSAS_AERO_10E6:
861 		sc->is_aero = true;
862 		break;
863 	case MRSAS_AERO_10E0:
864 	case MRSAS_AERO_10E3:
865 	case MRSAS_AERO_10E4:
866 	case MRSAS_AERO_10E7:
867 		device_printf(dev, "Adapter is in non-secure mode\n");
868 		return SUCCESS;
869 	}
870 
871 	mrsas_get_tunables(sc);
872 
873 	/*
874 	 * Set up PCI and registers
875 	 */
876 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
877 	/* Force the busmaster enable bit on. */
878 	cmd |= PCIM_CMD_BUSMASTEREN;
879 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
880 
881 	/* For Ventura/Aero system registers are mapped to BAR0 */
882 	if (sc->is_ventura || sc->is_aero)
883 		sc->reg_res_id = PCIR_BAR(0);	/* BAR0 offset */
884 	else
885 		sc->reg_res_id = PCIR_BAR(1);	/* BAR1 offset */
886 
887 	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
888 	    &(sc->reg_res_id), RF_ACTIVE))
889 	    == NULL) {
890 		device_printf(dev, "Cannot allocate PCI registers\n");
891 		goto attach_fail;
892 	}
893 	sc->bus_tag = rman_get_bustag(sc->reg_res);
894 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
895 
896 	/* Intialize mutexes */
897 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
898 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
899 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
900 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
901 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
902 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
903 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
904 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
905 	mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
906 
907 	/* Intialize linked list */
908 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
909 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
910 
911 	mrsas_atomic_set(&sc->fw_outstanding, 0);
912 	mrsas_atomic_set(&sc->target_reset_outstanding, 0);
913 	mrsas_atomic_set(&sc->prp_count, 0);
914 	mrsas_atomic_set(&sc->sge_holes, 0);
915 
916 	sc->io_cmds_highwater = 0;
917 
918 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
919 	sc->UnevenSpanSupport = 0;
920 
921 	sc->msix_enable = 0;
922 
923 	/* Initialize Firmware */
924 	if (mrsas_init_fw(sc) != SUCCESS) {
925 		goto attach_fail_fw;
926 	}
927 	/* Register mrsas to CAM layer */
928 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
929 		goto attach_fail_cam;
930 	}
931 	/* Register IRQs */
932 	if (mrsas_setup_irq(sc) != SUCCESS) {
933 		goto attach_fail_irq;
934 	}
935 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
936 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
937 	    device_get_unit(sc->mrsas_dev));
938 	if (error) {
939 		device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
940 		goto attach_fail_ocr_thread;
941 	}
942 	/*
943 	 * After FW initialization and OCR thread creation
944 	 * we will defer the cdev creation, AEN setup on ICH callback
945 	 */
946 	sc->mrsas_ich.ich_func = mrsas_ich_startup;
947 	sc->mrsas_ich.ich_arg = sc;
948 	if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
949 		device_printf(sc->mrsas_dev, "Config hook is already established\n");
950 	}
951 	mrsas_setup_sysctl(sc);
952 	return SUCCESS;
953 
954 attach_fail_ocr_thread:
955 	if (sc->ocr_thread_active)
956 		wakeup(&sc->ocr_chan);
957 attach_fail_irq:
958 	mrsas_teardown_intr(sc);
959 attach_fail_cam:
960 	mrsas_cam_detach(sc);
961 attach_fail_fw:
962 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
963 	if (sc->msix_enable == 1)
964 		pci_release_msi(sc->mrsas_dev);
965 	mrsas_free_mem(sc);
966 	mtx_destroy(&sc->sim_lock);
967 	mtx_destroy(&sc->aen_lock);
968 	mtx_destroy(&sc->pci_lock);
969 	mtx_destroy(&sc->io_lock);
970 	mtx_destroy(&sc->ioctl_lock);
971 	mtx_destroy(&sc->mpt_cmd_pool_lock);
972 	mtx_destroy(&sc->mfi_cmd_pool_lock);
973 	mtx_destroy(&sc->raidmap_lock);
974 	mtx_destroy(&sc->stream_lock);
975 attach_fail:
976 	if (sc->reg_res) {
977 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
978 		    sc->reg_res_id, sc->reg_res);
979 	}
980 	return (ENXIO);
981 }
982 
983 /*
984  * Interrupt config hook
985  */
986 static void
987 mrsas_ich_startup(void *arg)
988 {
989 	int i = 0;
990 	struct mrsas_softc *sc = (struct mrsas_softc *)arg;
991 
992 	/*
993 	 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
994 	 */
995 	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
996 	    IOCTL_SEMA_DESCRIPTION);
997 
998 	/* Create a /dev entry for mrsas controller. */
999 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
1000 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
1001 	    device_get_unit(sc->mrsas_dev));
1002 
1003 	if (device_get_unit(sc->mrsas_dev) == 0) {
1004 		make_dev_alias_p(MAKEDEV_CHECKNAME,
1005 		    &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
1006 		    "megaraid_sas_ioctl_node");
1007 	}
1008 	if (sc->mrsas_cdev)
1009 		sc->mrsas_cdev->si_drv1 = sc;
1010 
1011 	/*
1012 	 * Add this controller to mrsas_mgmt_info structure so that it can be
1013 	 * exported to management applications
1014 	 */
1015 	if (device_get_unit(sc->mrsas_dev) == 0)
1016 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
1017 
1018 	mrsas_mgmt_info.count++;
1019 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
1020 	mrsas_mgmt_info.max_index++;
1021 
1022 	/* Enable Interrupts */
1023 	mrsas_enable_intr(sc);
1024 
1025 	/* Call DCMD get_pd_info for all system PDs */
1026 	for (i = 0; i < MRSAS_MAX_PD; i++) {
1027 		if ((sc->target_list[i].target_id != 0xffff) &&
1028 			sc->pd_info_mem)
1029 			mrsas_get_pd_info(sc, sc->target_list[i].target_id);
1030 	}
1031 
1032 	/* Initiate AEN (Asynchronous Event Notification) */
1033 	if (mrsas_start_aen(sc)) {
1034 		device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
1035 		    "Further events from the controller will not be communicated.\n"
1036 		    "Either there is some problem in the controller"
1037 		    "or the controller does not support AEN.\n"
1038 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
1039 	}
1040 	if (sc->mrsas_ich.ich_arg != NULL) {
1041 		device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
1042 		config_intrhook_disestablish(&sc->mrsas_ich);
1043 		sc->mrsas_ich.ich_arg = NULL;
1044 	}
1045 }
1046 
1047 /*
1048  * mrsas_detach:	De-allocates and teardown resources
1049  * input:			pointer to device struct
1050  *
1051  * This function is the entry point for device disconnect and detach.
1052  * It performs memory de-allocations, shutdown of the controller and various
1053  * teardown and destroy resource functions.
1054  */
1055 static int
1056 mrsas_detach(device_t dev)
1057 {
1058 	struct mrsas_softc *sc;
1059 	int i = 0;
1060 
1061 	sc = device_get_softc(dev);
1062 	sc->remove_in_progress = 1;
1063 
1064 	/* Destroy the character device so no other IOCTL will be handled */
1065 	if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1066 		destroy_dev(sc->mrsas_linux_emulator_cdev);
1067 	destroy_dev(sc->mrsas_cdev);
1068 
1069 	/*
1070 	 * Take the instance off the instance array. Note that we will not
1071 	 * decrement the max_index. We let this array be sparse array
1072 	 */
1073 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1074 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1075 			mrsas_mgmt_info.count--;
1076 			mrsas_mgmt_info.sc_ptr[i] = NULL;
1077 			break;
1078 		}
1079 	}
1080 
1081 	if (sc->ocr_thread_active)
1082 		wakeup(&sc->ocr_chan);
1083 	while (sc->reset_in_progress) {
1084 		i++;
1085 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1086 			mrsas_dprint(sc, MRSAS_INFO,
1087 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1088 		}
1089 		pause("mr_shutdown", hz);
1090 	}
1091 	i = 0;
1092 	while (sc->ocr_thread_active) {
1093 		i++;
1094 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1095 			mrsas_dprint(sc, MRSAS_INFO,
1096 			    "[%2d]waiting for "
1097 			    "mrsas_ocr thread to quit ocr %d\n", i,
1098 			    sc->ocr_thread_active);
1099 		}
1100 		pause("mr_shutdown", hz);
1101 	}
1102 	mrsas_flush_cache(sc);
1103 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1104 	mrsas_disable_intr(sc);
1105 
1106 	if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
1107 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
1108 			free(sc->streamDetectByLD[i], M_MRSAS);
1109 		free(sc->streamDetectByLD, M_MRSAS);
1110 		sc->streamDetectByLD = NULL;
1111 	}
1112 
1113 	mrsas_cam_detach(sc);
1114 	mrsas_teardown_intr(sc);
1115 	mrsas_free_mem(sc);
1116 	mtx_destroy(&sc->sim_lock);
1117 	mtx_destroy(&sc->aen_lock);
1118 	mtx_destroy(&sc->pci_lock);
1119 	mtx_destroy(&sc->io_lock);
1120 	mtx_destroy(&sc->ioctl_lock);
1121 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1122 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1123 	mtx_destroy(&sc->raidmap_lock);
1124 	mtx_destroy(&sc->stream_lock);
1125 
1126 	/* Wait for all the semaphores to be released */
1127 	while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1128 		pause("mr_shutdown", hz);
1129 
1130 	/* Destroy the counting semaphore created for Ioctl */
1131 	sema_destroy(&sc->ioctl_count_sema);
1132 
1133 	if (sc->reg_res) {
1134 		bus_release_resource(sc->mrsas_dev,
1135 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1136 	}
1137 	if (sc->sysctl_tree != NULL)
1138 		sysctl_ctx_free(&sc->sysctl_ctx);
1139 
1140 	return (0);
1141 }
1142 
1143 static int
1144 mrsas_shutdown(device_t dev)
1145 {
1146 	struct mrsas_softc *sc;
1147 	int i;
1148 
1149 	sc = device_get_softc(dev);
1150 	sc->remove_in_progress = 1;
1151 	if (!KERNEL_PANICKED()) {
1152 		if (sc->ocr_thread_active)
1153 			wakeup(&sc->ocr_chan);
1154 		i = 0;
1155 		while (sc->reset_in_progress && i < 15) {
1156 			i++;
1157 			if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
1158 				mrsas_dprint(sc, MRSAS_INFO,
1159 				    "[%2d]waiting for OCR to be finished "
1160 				    "from %s\n", i, __func__);
1161 			}
1162 			pause("mr_shutdown", hz);
1163 		}
1164 		if (sc->reset_in_progress) {
1165 			mrsas_dprint(sc, MRSAS_INFO,
1166 			    "gave up waiting for OCR to be finished\n");
1167 			return (0);
1168 		}
1169 	}
1170 
1171 	mrsas_flush_cache(sc);
1172 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1173 	mrsas_disable_intr(sc);
1174 	return (0);
1175 }
1176 
1177 /*
1178  * mrsas_free_mem:		Frees allocated memory
1179  * input:				Adapter instance soft state
1180  *
1181  * This function is called from mrsas_detach() to free previously allocated
1182  * memory.
1183  */
1184 void
1185 mrsas_free_mem(struct mrsas_softc *sc)
1186 {
1187 	int i;
1188 	u_int32_t max_fw_cmds;
1189 	struct mrsas_mfi_cmd *mfi_cmd;
1190 	struct mrsas_mpt_cmd *mpt_cmd;
1191 
1192 	/*
1193 	 * Free RAID map memory
1194 	 */
1195 	for (i = 0; i < 2; i++) {
1196 		if (sc->raidmap_phys_addr[i])
1197 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1198 		if (sc->raidmap_mem[i] != NULL)
1199 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1200 		if (sc->raidmap_tag[i] != NULL)
1201 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1202 
1203 		if (sc->ld_drv_map[i] != NULL)
1204 			free(sc->ld_drv_map[i], M_MRSAS);
1205 	}
1206 	for (i = 0; i < 2; i++) {
1207 		if (sc->jbodmap_phys_addr[i])
1208 			bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1209 		if (sc->jbodmap_mem[i] != NULL)
1210 			bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1211 		if (sc->jbodmap_tag[i] != NULL)
1212 			bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1213 	}
1214 	/*
1215 	 * Free version buffer memory
1216 	 */
1217 	if (sc->verbuf_phys_addr)
1218 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1219 	if (sc->verbuf_mem != NULL)
1220 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1221 	if (sc->verbuf_tag != NULL)
1222 		bus_dma_tag_destroy(sc->verbuf_tag);
1223 
1224 	/*
1225 	 * Free sense buffer memory
1226 	 */
1227 	if (sc->sense_phys_addr)
1228 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1229 	if (sc->sense_mem != NULL)
1230 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1231 	if (sc->sense_tag != NULL)
1232 		bus_dma_tag_destroy(sc->sense_tag);
1233 
1234 	/*
1235 	 * Free chain frame memory
1236 	 */
1237 	if (sc->chain_frame_phys_addr)
1238 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1239 	if (sc->chain_frame_mem != NULL)
1240 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1241 	if (sc->chain_frame_tag != NULL)
1242 		bus_dma_tag_destroy(sc->chain_frame_tag);
1243 
1244 	/*
1245 	 * Free IO Request memory
1246 	 */
1247 	if (sc->io_request_phys_addr)
1248 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1249 	if (sc->io_request_mem != NULL)
1250 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1251 	if (sc->io_request_tag != NULL)
1252 		bus_dma_tag_destroy(sc->io_request_tag);
1253 
1254 	/*
1255 	 * Free Reply Descriptor memory
1256 	 */
1257 	if (sc->reply_desc_phys_addr)
1258 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1259 	if (sc->reply_desc_mem != NULL)
1260 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1261 	if (sc->reply_desc_tag != NULL)
1262 		bus_dma_tag_destroy(sc->reply_desc_tag);
1263 
1264 	/*
1265 	 * Free event detail memory
1266 	 */
1267 	if (sc->evt_detail_phys_addr)
1268 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1269 	if (sc->evt_detail_mem != NULL)
1270 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1271 	if (sc->evt_detail_tag != NULL)
1272 		bus_dma_tag_destroy(sc->evt_detail_tag);
1273 
1274 	/*
1275 	 * Free PD info memory
1276 	 */
1277 	if (sc->pd_info_phys_addr)
1278 		bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
1279 	if (sc->pd_info_mem != NULL)
1280 		bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
1281 	if (sc->pd_info_tag != NULL)
1282 		bus_dma_tag_destroy(sc->pd_info_tag);
1283 
1284 	/*
1285 	 * Free MFI frames
1286 	 */
1287 	if (sc->mfi_cmd_list) {
1288 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1289 			mfi_cmd = sc->mfi_cmd_list[i];
1290 			mrsas_free_frame(sc, mfi_cmd);
1291 		}
1292 	}
1293 	if (sc->mficmd_frame_tag != NULL)
1294 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1295 
1296 	/*
1297 	 * Free MPT internal command list
1298 	 */
1299 	max_fw_cmds = sc->max_fw_cmds;
1300 	if (sc->mpt_cmd_list) {
1301 		for (i = 0; i < max_fw_cmds; i++) {
1302 			mpt_cmd = sc->mpt_cmd_list[i];
1303 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1304 			free(sc->mpt_cmd_list[i], M_MRSAS);
1305 		}
1306 		free(sc->mpt_cmd_list, M_MRSAS);
1307 		sc->mpt_cmd_list = NULL;
1308 	}
1309 	/*
1310 	 * Free MFI internal command list
1311 	 */
1312 
1313 	if (sc->mfi_cmd_list) {
1314 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1315 			free(sc->mfi_cmd_list[i], M_MRSAS);
1316 		}
1317 		free(sc->mfi_cmd_list, M_MRSAS);
1318 		sc->mfi_cmd_list = NULL;
1319 	}
1320 	/*
1321 	 * Free request descriptor memory
1322 	 */
1323 	free(sc->req_desc, M_MRSAS);
1324 	sc->req_desc = NULL;
1325 
1326 	/*
1327 	 * Destroy parent tag
1328 	 */
1329 	if (sc->mrsas_parent_tag != NULL)
1330 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1331 
1332 	/*
1333 	 * Free ctrl_info memory
1334 	 */
1335 	if (sc->ctrl_info != NULL)
1336 		free(sc->ctrl_info, M_MRSAS);
1337 }
1338 
1339 /*
1340  * mrsas_teardown_intr:	Teardown interrupt
1341  * input:				Adapter instance soft state
1342  *
1343  * This function is called from mrsas_detach() to teardown and release bus
1344  * interrupt resourse.
1345  */
1346 void
1347 mrsas_teardown_intr(struct mrsas_softc *sc)
1348 {
1349 	int i;
1350 
1351 	if (!sc->msix_enable) {
1352 		if (sc->intr_handle[0])
1353 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1354 		if (sc->mrsas_irq[0] != NULL)
1355 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1356 			    sc->irq_id[0], sc->mrsas_irq[0]);
1357 		sc->intr_handle[0] = NULL;
1358 	} else {
1359 		for (i = 0; i < sc->msix_vectors; i++) {
1360 			if (sc->intr_handle[i])
1361 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1362 				    sc->intr_handle[i]);
1363 
1364 			if (sc->mrsas_irq[i] != NULL)
1365 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1366 				    sc->irq_id[i], sc->mrsas_irq[i]);
1367 
1368 			sc->intr_handle[i] = NULL;
1369 		}
1370 		pci_release_msi(sc->mrsas_dev);
1371 	}
1372 
1373 }
1374 
1375 /*
1376  * mrsas_suspend:	Suspend entry point
1377  * input:			Device struct pointer
1378  *
1379  * This function is the entry point for system suspend from the OS.
1380  */
1381 static int
1382 mrsas_suspend(device_t dev)
1383 {
1384 	/* This will be filled when the driver will have hibernation support */
1385 	return (0);
1386 }
1387 
1388 /*
1389  * mrsas_resume:	Resume entry point
1390  * input:			Device struct pointer
1391  *
1392  * This function is the entry point for system resume from the OS.
1393  */
1394 static int
1395 mrsas_resume(device_t dev)
1396 {
1397 	/* This will be filled when the driver will have hibernation support */
1398 	return (0);
1399 }
1400 
1401 /**
1402  * mrsas_get_softc_instance:    Find softc instance based on cmd type
1403  *
1404  * This function will return softc instance based on cmd type.
1405  * In some case, application fire ioctl on required management instance and
1406  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1407  * case, else get the softc instance from host_no provided by application in
1408  * user data.
1409  */
1410 
1411 static struct mrsas_softc *
1412 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1413 {
1414 	struct mrsas_softc *sc = NULL;
1415 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1416 
1417 	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1418 		sc = dev->si_drv1;
1419 	} else {
1420 		/*
1421 		 * get the Host number & the softc from data sent by the
1422 		 * Application
1423 		 */
1424 		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1425 		if (sc == NULL)
1426 			printf("There is no Controller number %d\n",
1427 			    user_ioc->host_no);
1428 		else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1429 			mrsas_dprint(sc, MRSAS_FAULT,
1430 			    "Invalid Controller number %d\n", user_ioc->host_no);
1431 	}
1432 
1433 	return sc;
1434 }
1435 
1436 /*
1437  * mrsas_ioctl:	IOCtl commands entry point.
1438  *
1439  * This function is the entry point for IOCtls from the OS.  It calls the
1440  * appropriate function for processing depending on the command received.
1441  */
1442 static int
1443 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1444     struct thread *td)
1445 {
1446 	struct mrsas_softc *sc;
1447 	int ret = 0, i = 0;
1448 	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1449 
1450 	switch (cmd) {
1451 	case MFIIO_PASSTHRU:
1452                 sc = (struct mrsas_softc *)(dev->si_drv1);
1453 		break;
1454 	default:
1455 		sc = mrsas_get_softc_instance(dev, cmd, arg);
1456 		break;
1457         }
1458 	if (!sc)
1459 		return ENOENT;
1460 
1461 	if (sc->remove_in_progress ||
1462 		(sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1463 		mrsas_dprint(sc, MRSAS_INFO,
1464 		    "Either driver remove or shutdown called or "
1465 			"HW is in unrecoverable critical error state.\n");
1466 		return ENOENT;
1467 	}
1468 	mtx_lock_spin(&sc->ioctl_lock);
1469 	if (!sc->reset_in_progress) {
1470 		mtx_unlock_spin(&sc->ioctl_lock);
1471 		goto do_ioctl;
1472 	}
1473 	mtx_unlock_spin(&sc->ioctl_lock);
1474 	while (sc->reset_in_progress) {
1475 		i++;
1476 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1477 			mrsas_dprint(sc, MRSAS_INFO,
1478 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1479 		}
1480 		pause("mr_ioctl", hz);
1481 	}
1482 
1483 do_ioctl:
1484 	switch (cmd) {
1485 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1486 #ifdef COMPAT_FREEBSD32
1487 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1488 #endif
1489 		/*
1490 		 * Decrement the Ioctl counting Semaphore before getting an
1491 		 * mfi command
1492 		 */
1493 		sema_wait(&sc->ioctl_count_sema);
1494 
1495 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1496 
1497 		/* Increment the Ioctl counting semaphore value */
1498 		sema_post(&sc->ioctl_count_sema);
1499 
1500 		break;
1501 	case MRSAS_IOC_SCAN_BUS:
1502 		ret = mrsas_bus_scan(sc);
1503 		break;
1504 
1505 	case MRSAS_IOC_GET_PCI_INFO:
1506 		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1507 		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1508 		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1509 		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1510 		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1511 		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1512 		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1513 		    "pci device no: %d, pci function no: %d,"
1514 		    "pci domain ID: %d\n",
1515 		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1516 		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1517 		ret = 0;
1518 		break;
1519 
1520 	case MFIIO_PASSTHRU:
1521 		ret = mrsas_user_command(sc, (struct mfi_ioc_passthru *)arg);
1522 		break;
1523 
1524 	default:
1525 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1526 		ret = ENOENT;
1527 	}
1528 
1529 	return (ret);
1530 }
1531 
1532 /*
1533  * mrsas_poll:	poll entry point for mrsas driver fd
1534  *
1535  * This function is the entry point for poll from the OS.  It waits for some AEN
1536  * events to be triggered from the controller and notifies back.
1537  */
1538 static int
1539 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1540 {
1541 	struct mrsas_softc *sc;
1542 	int revents = 0;
1543 
1544 	sc = dev->si_drv1;
1545 
1546 	if (poll_events & (POLLIN | POLLRDNORM)) {
1547 		if (sc->mrsas_aen_triggered) {
1548 			revents |= poll_events & (POLLIN | POLLRDNORM);
1549 		}
1550 	}
1551 	if (revents == 0) {
1552 		if (poll_events & (POLLIN | POLLRDNORM)) {
1553 			mtx_lock(&sc->aen_lock);
1554 			sc->mrsas_poll_waiting = 1;
1555 			selrecord(td, &sc->mrsas_select);
1556 			mtx_unlock(&sc->aen_lock);
1557 		}
1558 	}
1559 	return revents;
1560 }
1561 
1562 /*
1563  * mrsas_setup_irq:	Set up interrupt
1564  * input:			Adapter instance soft state
1565  *
1566  * This function sets up interrupts as a bus resource, with flags indicating
1567  * resource permitting contemporaneous sharing and for resource to activate
1568  * atomically.
1569  */
1570 static int
1571 mrsas_setup_irq(struct mrsas_softc *sc)
1572 {
1573 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1574 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1575 
1576 	else {
1577 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1578 		sc->irq_context[0].sc = sc;
1579 		sc->irq_context[0].MSIxIndex = 0;
1580 		sc->irq_id[0] = 0;
1581 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1582 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1583 		if (sc->mrsas_irq[0] == NULL) {
1584 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1585 			    "interrupt\n");
1586 			return (FAIL);
1587 		}
1588 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1589 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1590 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1591 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1592 			    "interrupt\n");
1593 			return (FAIL);
1594 		}
1595 	}
1596 	return (0);
1597 }
1598 
1599 /*
1600  * mrsas_isr:	ISR entry point
1601  * input:		argument pointer
1602  *
1603  * This function is the interrupt service routine entry point.  There are two
1604  * types of interrupts, state change interrupt and response interrupt.  If an
1605  * interrupt is not ours, we just return.
1606  */
1607 void
1608 mrsas_isr(void *arg)
1609 {
1610 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1611 	struct mrsas_softc *sc = irq_context->sc;
1612 	int status = 0;
1613 
1614 	if (sc->mask_interrupts)
1615 		return;
1616 
1617 	if (!sc->msix_vectors) {
1618 		status = mrsas_clear_intr(sc);
1619 		if (!status)
1620 			return;
1621 	}
1622 	/* If we are resetting, bail */
1623 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1624 		printf(" Entered into ISR when OCR is going active. \n");
1625 		mrsas_clear_intr(sc);
1626 		return;
1627 	}
1628 	/* Process for reply request and clear response interrupt */
1629 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1630 		mrsas_clear_intr(sc);
1631 
1632 	return;
1633 }
1634 
1635 /*
1636  * mrsas_complete_cmd:	Process reply request
1637  * input:				Adapter instance soft state
1638  *
1639  * This function is called from mrsas_isr() to process reply request and clear
1640  * response interrupt. Processing of the reply request entails walking
1641  * through the reply descriptor array for the command request  pended from
1642  * Firmware.  We look at the Function field to determine the command type and
1643  * perform the appropriate action.  Before we return, we clear the response
1644  * interrupt.
1645  */
1646 int
1647 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1648 {
1649 	Mpi2ReplyDescriptorsUnion_t *desc;
1650 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1651 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1652 	struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
1653 	struct mrsas_mfi_cmd *cmd_mfi;
1654 	u_int8_t reply_descript_type, *sense;
1655 	u_int16_t smid, num_completed;
1656 	u_int8_t status, extStatus;
1657 	union desc_value desc_val;
1658 	PLD_LOAD_BALANCE_INFO lbinfo;
1659 	u_int32_t device_id, data_length;
1660 	int threshold_reply_count = 0;
1661 #if TM_DEBUG
1662 	MR_TASK_MANAGE_REQUEST *mr_tm_req;
1663 	MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1664 #endif
1665 
1666 	/* If we have a hardware error, not need to continue */
1667 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1668 		return (DONE);
1669 
1670 	desc = sc->reply_desc_mem;
1671 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1672 	    + sc->last_reply_idx[MSIxIndex];
1673 
1674 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1675 
1676 	desc_val.word = desc->Words;
1677 	num_completed = 0;
1678 
1679 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1680 
1681 	/* Find our reply descriptor for the command and process */
1682 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1683 		smid = le16toh(reply_desc->SMID);
1684 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1685 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1686 
1687 		status = scsi_io_req->RaidContext.raid_context.status;
1688 		extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
1689 		sense = cmd_mpt->sense;
1690 		data_length = scsi_io_req->DataLength;
1691 
1692 		switch (scsi_io_req->Function) {
1693 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
1694 #if TM_DEBUG
1695 			mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1696 			mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1697 			    &mr_tm_req->TmRequest;
1698 			device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1699 			    "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1700 #endif
1701             wakeup_one((void *)&sc->ocr_chan);
1702             break;
1703 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1704 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1705 			lbinfo = &sc->load_balance_info[device_id];
1706 			/* R1 load balancing for READ */
1707 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1708 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1709 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1710 			}
1711 			/* Fall thru and complete IO */
1712 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1713 			if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
1714 				mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1715 				    extStatus, le32toh(data_length), sense);
1716 				mrsas_cmd_done(sc, cmd_mpt);
1717 				mrsas_atomic_dec(&sc->fw_outstanding);
1718 			} else {
1719 				/*
1720 				 * If the peer  Raid  1/10 fast path failed,
1721 				 * mark IO as failed to the scsi layer.
1722 				 * Overwrite the current status by the failed status
1723 				 * and make sure that if any command fails,
1724 				 * driver returns fail status to CAM.
1725 				 */
1726 				cmd_mpt->cmd_completed = 1;
1727 				r1_cmd = cmd_mpt->peer_cmd;
1728 				if (r1_cmd->cmd_completed) {
1729 					if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
1730 						status = r1_cmd->io_request->RaidContext.raid_context.status;
1731 						extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
1732 						data_length = r1_cmd->io_request->DataLength;
1733 						sense = r1_cmd->sense;
1734 					}
1735 					r1_cmd->ccb_ptr = NULL;
1736 					if (r1_cmd->callout_owner) {
1737 						callout_stop(&r1_cmd->cm_callout);
1738 						r1_cmd->callout_owner  = false;
1739 					}
1740 					mrsas_release_mpt_cmd(r1_cmd);
1741 					mrsas_atomic_dec(&sc->fw_outstanding);
1742 					mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1743 					    extStatus, le32toh(data_length), sense);
1744 					mrsas_cmd_done(sc, cmd_mpt);
1745 					mrsas_atomic_dec(&sc->fw_outstanding);
1746 				}
1747 			}
1748 			break;
1749 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1750 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1751 			/*
1752 			 * Make sure NOT TO release the mfi command from the called
1753 			 * function's context if it is fired with issue_polled call.
1754 			 * And also make sure that the issue_polled call should only be
1755 			 * used if INTERRUPT IS DISABLED.
1756 			 */
1757 			if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
1758 				mrsas_release_mfi_cmd(cmd_mfi);
1759 			else
1760 				mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1761 			break;
1762 		}
1763 
1764 		sc->last_reply_idx[MSIxIndex]++;
1765 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1766 			sc->last_reply_idx[MSIxIndex] = 0;
1767 
1768 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1769 							 * 0xFFFFFFFFs */
1770 		num_completed++;
1771 		threshold_reply_count++;
1772 
1773 		/* Get the next reply descriptor */
1774 		if (!sc->last_reply_idx[MSIxIndex]) {
1775 			desc = sc->reply_desc_mem;
1776 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1777 		} else
1778 			desc++;
1779 
1780 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1781 		desc_val.word = desc->Words;
1782 
1783 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1784 
1785 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1786 			break;
1787 
1788 		/*
1789 		 * Write to reply post index after completing threshold reply
1790 		 * count and still there are more replies in reply queue
1791 		 * pending to be completed.
1792 		 */
1793 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1794 			if (sc->msix_enable) {
1795 				if (sc->msix_combined)
1796 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1797 					    ((MSIxIndex & 0x7) << 24) |
1798 					    sc->last_reply_idx[MSIxIndex]);
1799 				else
1800 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1801 					    sc->last_reply_idx[MSIxIndex]);
1802 			} else
1803 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1804 				    reply_post_host_index), sc->last_reply_idx[0]);
1805 
1806 			threshold_reply_count = 0;
1807 		}
1808 	}
1809 
1810 	/* No match, just return */
1811 	if (num_completed == 0)
1812 		return (DONE);
1813 
1814 	/* Clear response interrupt */
1815 	if (sc->msix_enable) {
1816 		if (sc->msix_combined) {
1817 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1818 			    ((MSIxIndex & 0x7) << 24) |
1819 			    sc->last_reply_idx[MSIxIndex]);
1820 		} else
1821 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1822 			    sc->last_reply_idx[MSIxIndex]);
1823 	} else
1824 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1825 		    reply_post_host_index), sc->last_reply_idx[0]);
1826 
1827 	return (0);
1828 }
1829 
1830 /*
1831  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1832  * input:						Adapter instance soft state
1833  *
1834  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1835  * It checks the command status and maps the appropriate CAM status for the
1836  * CCB.
1837  */
1838 void
1839 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
1840     u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
1841 {
1842 	struct mrsas_softc *sc = cmd->sc;
1843 	u_int8_t *sense_data;
1844 
1845 	switch (status) {
1846 	case MFI_STAT_OK:
1847 		ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1848 		break;
1849 	case MFI_STAT_SCSI_IO_FAILED:
1850 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1851 		ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1852 		sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
1853 		if (sense_data) {
1854 			/* For now just copy 18 bytes back */
1855 			memcpy(sense_data, sense, 18);
1856 			ccb_ptr->csio.sense_len = 18;
1857 			ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1858 		}
1859 		break;
1860 	case MFI_STAT_LD_OFFLINE:
1861 	case MFI_STAT_DEVICE_NOT_FOUND:
1862 		if (ccb_ptr->ccb_h.target_lun)
1863 			ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1864 		else
1865 			ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1866 		break;
1867 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1868 		ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1869 		break;
1870 	default:
1871 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1872 		ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1873 		ccb_ptr->csio.scsi_status = status;
1874 	}
1875 	return;
1876 }
1877 
1878 /*
1879  * mrsas_alloc_mem:	Allocate DMAable memory
1880  * input:			Adapter instance soft state
1881  *
1882  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1883  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1884  * Kernel virtual address. Callback argument is physical memory address.
1885  */
1886 static int
1887 mrsas_alloc_mem(struct mrsas_softc *sc)
1888 {
1889 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1890 		evt_detail_size, count, pd_info_size;
1891 
1892 	/*
1893 	 * Allocate parent DMA tag
1894 	 */
1895 	if (bus_dma_tag_create(
1896 	    bus_get_dma_tag(sc->mrsas_dev),	/* parent */
1897 	    1,				/* alignment */
1898 	    0,				/* boundary */
1899 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1900 	    BUS_SPACE_MAXADDR,		/* highaddr */
1901 	    NULL, NULL,			/* filter, filterarg */
1902 	    BUS_SPACE_MAXSIZE,		/* maxsize */
1903 	    BUS_SPACE_UNRESTRICTED,	/* nsegments */
1904 	    BUS_SPACE_MAXSIZE,		/* maxsegsize */
1905 	    0,				/* flags */
1906 	    NULL, NULL,			/* lockfunc, lockarg */
1907 	    &sc->mrsas_parent_tag	/* tag */
1908 	    )) {
1909 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1910 		return (ENOMEM);
1911 	}
1912 	/*
1913 	 * Allocate for version buffer
1914 	 */
1915 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1916 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1917 	    1, 0,
1918 	    BUS_SPACE_MAXADDR_32BIT,
1919 	    BUS_SPACE_MAXADDR,
1920 	    NULL, NULL,
1921 	    verbuf_size,
1922 	    1,
1923 	    verbuf_size,
1924 	    BUS_DMA_ALLOCNOW,
1925 	    NULL, NULL,
1926 	    &sc->verbuf_tag)) {
1927 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1928 		return (ENOMEM);
1929 	}
1930 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1931 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1932 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1933 		return (ENOMEM);
1934 	}
1935 	bzero(sc->verbuf_mem, verbuf_size);
1936 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1937 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1938 	    BUS_DMA_NOWAIT)) {
1939 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1940 		return (ENOMEM);
1941 	}
1942 	/*
1943 	 * Allocate IO Request Frames
1944 	 */
1945 	io_req_size = sc->io_frames_alloc_sz;
1946 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1947 	    16, 0,
1948 	    BUS_SPACE_MAXADDR_32BIT,
1949 	    BUS_SPACE_MAXADDR,
1950 	    NULL, NULL,
1951 	    io_req_size,
1952 	    1,
1953 	    io_req_size,
1954 	    BUS_DMA_ALLOCNOW,
1955 	    NULL, NULL,
1956 	    &sc->io_request_tag)) {
1957 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1958 		return (ENOMEM);
1959 	}
1960 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1961 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1962 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1963 		return (ENOMEM);
1964 	}
1965 	bzero(sc->io_request_mem, io_req_size);
1966 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1967 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1968 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1969 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1970 		return (ENOMEM);
1971 	}
1972 	/*
1973 	 * Allocate Chain Frames
1974 	 */
1975 	chain_frame_size = sc->chain_frames_alloc_sz;
1976 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1977 	    4, 0,
1978 	    BUS_SPACE_MAXADDR_32BIT,
1979 	    BUS_SPACE_MAXADDR,
1980 	    NULL, NULL,
1981 	    chain_frame_size,
1982 	    1,
1983 	    chain_frame_size,
1984 	    BUS_DMA_ALLOCNOW,
1985 	    NULL, NULL,
1986 	    &sc->chain_frame_tag)) {
1987 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1988 		return (ENOMEM);
1989 	}
1990 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1991 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1992 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1993 		return (ENOMEM);
1994 	}
1995 	bzero(sc->chain_frame_mem, chain_frame_size);
1996 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1997 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1998 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1999 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
2000 		return (ENOMEM);
2001 	}
2002 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2003 	/*
2004 	 * Allocate Reply Descriptor Array
2005 	 */
2006 	reply_desc_size = sc->reply_alloc_sz * count;
2007 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2008 	    16, 0,
2009 	    BUS_SPACE_MAXADDR_32BIT,
2010 	    BUS_SPACE_MAXADDR,
2011 	    NULL, NULL,
2012 	    reply_desc_size,
2013 	    1,
2014 	    reply_desc_size,
2015 	    BUS_DMA_ALLOCNOW,
2016 	    NULL, NULL,
2017 	    &sc->reply_desc_tag)) {
2018 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
2019 		return (ENOMEM);
2020 	}
2021 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
2022 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
2023 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
2024 		return (ENOMEM);
2025 	}
2026 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
2027 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
2028 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
2029 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
2030 		return (ENOMEM);
2031 	}
2032 	/*
2033 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
2034 	 */
2035 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
2036 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2037 	    64, 0,
2038 	    BUS_SPACE_MAXADDR_32BIT,
2039 	    BUS_SPACE_MAXADDR,
2040 	    NULL, NULL,
2041 	    sense_size,
2042 	    1,
2043 	    sense_size,
2044 	    BUS_DMA_ALLOCNOW,
2045 	    NULL, NULL,
2046 	    &sc->sense_tag)) {
2047 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
2048 		return (ENOMEM);
2049 	}
2050 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
2051 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
2052 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
2053 		return (ENOMEM);
2054 	}
2055 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
2056 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
2057 	    BUS_DMA_NOWAIT)) {
2058 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
2059 		return (ENOMEM);
2060 	}
2061 
2062 	/*
2063 	 * Allocate for Event detail structure
2064 	 */
2065 	evt_detail_size = sizeof(struct mrsas_evt_detail);
2066 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2067 	    1, 0,
2068 	    BUS_SPACE_MAXADDR_32BIT,
2069 	    BUS_SPACE_MAXADDR,
2070 	    NULL, NULL,
2071 	    evt_detail_size,
2072 	    1,
2073 	    evt_detail_size,
2074 	    BUS_DMA_ALLOCNOW,
2075 	    NULL, NULL,
2076 	    &sc->evt_detail_tag)) {
2077 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
2078 		return (ENOMEM);
2079 	}
2080 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
2081 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
2082 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
2083 		return (ENOMEM);
2084 	}
2085 	bzero(sc->evt_detail_mem, evt_detail_size);
2086 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
2087 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
2088 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
2089 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
2090 		return (ENOMEM);
2091 	}
2092 
2093 	/*
2094 	 * Allocate for PD INFO structure
2095 	 */
2096 	pd_info_size = sizeof(struct mrsas_pd_info);
2097 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2098 	    1, 0,
2099 	    BUS_SPACE_MAXADDR_32BIT,
2100 	    BUS_SPACE_MAXADDR,
2101 	    NULL, NULL,
2102 	    pd_info_size,
2103 	    1,
2104 	    pd_info_size,
2105 	    BUS_DMA_ALLOCNOW,
2106 	    NULL, NULL,
2107 	    &sc->pd_info_tag)) {
2108 		device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
2109 		return (ENOMEM);
2110 	}
2111 	if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
2112 	    BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
2113 		device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
2114 		return (ENOMEM);
2115 	}
2116 	bzero(sc->pd_info_mem, pd_info_size);
2117 	if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
2118 	    sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
2119 	    &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
2120 		device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
2121 		return (ENOMEM);
2122 	}
2123 
2124 	/*
2125 	 * Create a dma tag for data buffers; size will be the maximum
2126 	 * possible I/O size (280kB).
2127 	 */
2128 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2129 	    1,
2130 	    0,
2131 	    BUS_SPACE_MAXADDR,
2132 	    BUS_SPACE_MAXADDR,
2133 	    NULL, NULL,
2134 	    maxphys,
2135 	    sc->max_num_sge,		/* nsegments */
2136 	    maxphys,
2137 	    BUS_DMA_ALLOCNOW,
2138 	    busdma_lock_mutex,
2139 	    &sc->io_lock,
2140 	    &sc->data_tag)) {
2141 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
2142 		return (ENOMEM);
2143 	}
2144 	return (0);
2145 }
2146 
2147 /*
2148  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
2149  * input:			callback argument, machine dependent type
2150  * 					that describes DMA segments, number of segments, error code
2151  *
2152  * This function is for the driver to receive mapping information resultant of
2153  * the bus_dmamap_load(). The information is actually not being used, but the
2154  * address is saved anyway.
2155  */
2156 void
2157 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2158 {
2159 	bus_addr_t *addr;
2160 
2161 	addr = arg;
2162 	*addr = segs[0].ds_addr;
2163 }
2164 
2165 /*
2166  * mrsas_setup_raidmap:	Set up RAID map.
2167  * input:				Adapter instance soft state
2168  *
2169  * Allocate DMA memory for the RAID maps and perform setup.
2170  */
2171 static int
2172 mrsas_setup_raidmap(struct mrsas_softc *sc)
2173 {
2174 	int i;
2175 
2176 	for (i = 0; i < 2; i++) {
2177 		sc->ld_drv_map[i] =
2178 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2179 		/* Do Error handling */
2180 		if (!sc->ld_drv_map[i]) {
2181 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2182 
2183 			if (i == 1)
2184 				free(sc->ld_drv_map[0], M_MRSAS);
2185 			/* ABORT driver initialization */
2186 			goto ABORT;
2187 		}
2188 	}
2189 
2190 	for (int i = 0; i < 2; i++) {
2191 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2192 		    4, 0,
2193 		    BUS_SPACE_MAXADDR_32BIT,
2194 		    BUS_SPACE_MAXADDR,
2195 		    NULL, NULL,
2196 		    sc->max_map_sz,
2197 		    1,
2198 		    sc->max_map_sz,
2199 		    BUS_DMA_ALLOCNOW,
2200 		    NULL, NULL,
2201 		    &sc->raidmap_tag[i])) {
2202 			device_printf(sc->mrsas_dev,
2203 			    "Cannot allocate raid map tag.\n");
2204 			return (ENOMEM);
2205 		}
2206 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
2207 		    (void **)&sc->raidmap_mem[i],
2208 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2209 			device_printf(sc->mrsas_dev,
2210 			    "Cannot allocate raidmap memory.\n");
2211 			return (ENOMEM);
2212 		}
2213 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
2214 
2215 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2216 		    sc->raidmap_mem[i], sc->max_map_sz,
2217 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2218 		    BUS_DMA_NOWAIT)) {
2219 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2220 			return (ENOMEM);
2221 		}
2222 		if (!sc->raidmap_mem[i]) {
2223 			device_printf(sc->mrsas_dev,
2224 			    "Cannot allocate memory for raid map.\n");
2225 			return (ENOMEM);
2226 		}
2227 	}
2228 
2229 	if (!mrsas_get_map_info(sc))
2230 		mrsas_sync_map_info(sc);
2231 
2232 	return (0);
2233 
2234 ABORT:
2235 	return (1);
2236 }
2237 
2238 /**
2239  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
2240  * @sc:				Adapter soft state
2241  *
2242  * Return 0 on success.
2243  */
2244 void
2245 megasas_setup_jbod_map(struct mrsas_softc *sc)
2246 {
2247 	int i;
2248 	uint32_t pd_seq_map_sz;
2249 
2250 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2251 	    (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2252 
2253 	if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2254 		sc->use_seqnum_jbod_fp = 0;
2255 		return;
2256 	}
2257 	if (sc->jbodmap_mem[0])
2258 		goto skip_alloc;
2259 
2260 	for (i = 0; i < 2; i++) {
2261 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2262 		    4, 0,
2263 		    BUS_SPACE_MAXADDR_32BIT,
2264 		    BUS_SPACE_MAXADDR,
2265 		    NULL, NULL,
2266 		    pd_seq_map_sz,
2267 		    1,
2268 		    pd_seq_map_sz,
2269 		    BUS_DMA_ALLOCNOW,
2270 		    NULL, NULL,
2271 		    &sc->jbodmap_tag[i])) {
2272 			device_printf(sc->mrsas_dev,
2273 			    "Cannot allocate jbod map tag.\n");
2274 			return;
2275 		}
2276 		if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2277 		    (void **)&sc->jbodmap_mem[i],
2278 		    BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2279 			device_printf(sc->mrsas_dev,
2280 			    "Cannot allocate jbod map memory.\n");
2281 			return;
2282 		}
2283 		bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2284 
2285 		if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2286 		    sc->jbodmap_mem[i], pd_seq_map_sz,
2287 		    mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2288 		    BUS_DMA_NOWAIT)) {
2289 			device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2290 			return;
2291 		}
2292 		if (!sc->jbodmap_mem[i]) {
2293 			device_printf(sc->mrsas_dev,
2294 			    "Cannot allocate memory for jbod map.\n");
2295 			sc->use_seqnum_jbod_fp = 0;
2296 			return;
2297 		}
2298 	}
2299 
2300 skip_alloc:
2301 	if (!megasas_sync_pd_seq_num(sc, false) &&
2302 	    !megasas_sync_pd_seq_num(sc, true))
2303 		sc->use_seqnum_jbod_fp = 1;
2304 	else
2305 		sc->use_seqnum_jbod_fp = 0;
2306 
2307 	device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2308 }
2309 
2310 /*
2311  * mrsas_init_fw:	Initialize Firmware
2312  * input:			Adapter soft state
2313  *
2314  * Calls transition_to_ready() to make sure Firmware is in operational state and
2315  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2316  * issues internal commands to get the controller info after the IOC_INIT
2317  * command response is received by Firmware.  Note:  code relating to
2318  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2319  * is left here as placeholder.
2320  */
2321 static int
2322 mrsas_init_fw(struct mrsas_softc *sc)
2323 {
2324 
2325 	int ret, loop, ocr = 0;
2326 	u_int32_t max_sectors_1;
2327 	u_int32_t max_sectors_2;
2328 	u_int32_t tmp_sectors;
2329 	u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
2330 	int msix_enable = 0;
2331 	int fw_msix_count = 0;
2332 	int i, j;
2333 
2334 	/* Make sure Firmware is ready */
2335 	ret = mrsas_transition_to_ready(sc, ocr);
2336 	if (ret != SUCCESS) {
2337 		return (ret);
2338 	}
2339 	if (sc->is_ventura || sc->is_aero) {
2340 		scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
2341 #if VD_EXT_DEBUG
2342 		device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
2343 #endif
2344 		sc->maxRaidMapSize = ((scratch_pad_3 >>
2345 		    MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
2346 		    MR_MAX_RAID_MAP_SIZE_MASK);
2347 	}
2348 	/* MSI-x index 0- reply post host index register */
2349 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2350 	/* Check if MSI-X is supported while in ready state */
2351 	msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2352 
2353 	if (msix_enable) {
2354 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2355 		    outbound_scratch_pad_2));
2356 
2357 		/* Check max MSI-X vectors */
2358 		if (sc->device_id == MRSAS_TBOLT) {
2359 			sc->msix_vectors = (scratch_pad_2
2360 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2361 			fw_msix_count = sc->msix_vectors;
2362 		} else {
2363 			/* Invader/Fury supports 96 MSI-X vectors */
2364 			sc->msix_vectors = ((scratch_pad_2
2365 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2366 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2367 			fw_msix_count = sc->msix_vectors;
2368 
2369 			if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
2370 				((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
2371 				sc->msix_combined = true;
2372 			/*
2373 			 * Save 1-15 reply post index
2374 			 * address to local memory Index 0
2375 			 * is already saved from reg offset
2376 			 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
2377 			 */
2378 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2379 			    loop++) {
2380 				sc->msix_reg_offset[loop] =
2381 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2382 				    (loop * 0x10);
2383 			}
2384 		}
2385 
2386 		/* Don't bother allocating more MSI-X vectors than cpus */
2387 		sc->msix_vectors = min(sc->msix_vectors,
2388 		    mp_ncpus);
2389 
2390 		/* Allocate MSI-x vectors */
2391 		if (mrsas_allocate_msix(sc) == SUCCESS)
2392 			sc->msix_enable = 1;
2393 		else
2394 			sc->msix_enable = 0;
2395 
2396 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2397 		    "Online CPU %d Current MSIX <%d>\n",
2398 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2399 	}
2400 	/*
2401      * MSI-X host index 0 is common for all adapter.
2402      * It is used for all MPT based Adapters.
2403 	 */
2404 	if (sc->msix_combined) {
2405 		sc->msix_reg_offset[0] =
2406 		    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
2407 	}
2408 	if (mrsas_init_adapter(sc) != SUCCESS) {
2409 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2410 		return (1);
2411 	}
2412 
2413 	if (sc->is_ventura || sc->is_aero) {
2414 		scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2415 		    outbound_scratch_pad_4));
2416 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
2417 			sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
2418 
2419 		device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
2420 	}
2421 
2422 	/* Allocate internal commands for pass-thru */
2423 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2424 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2425 		return (1);
2426 	}
2427 	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2428 	if (!sc->ctrl_info) {
2429 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2430 		return (1);
2431 	}
2432 	/*
2433 	 * Get the controller info from FW, so that the MAX VD support
2434 	 * availability can be decided.
2435 	 */
2436 	if (mrsas_get_ctrl_info(sc)) {
2437 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2438 		return (1);
2439 	}
2440 	sc->secure_jbod_support =
2441 	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2442 
2443 	if (sc->secure_jbod_support)
2444 		device_printf(sc->mrsas_dev, "FW supports SED \n");
2445 
2446 	if (sc->use_seqnum_jbod_fp)
2447 		device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2448 
2449 	if (sc->support_morethan256jbod)
2450 		device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
2451 
2452 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2453 		device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2454 		    "There seems to be some problem in the controller\n"
2455 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
2456 	}
2457 	megasas_setup_jbod_map(sc);
2458 
2459 	memset(sc->target_list, 0,
2460 		MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
2461 	for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
2462 		sc->target_list[i].target_id = 0xffff;
2463 
2464 	/* For pass-thru, get PD/LD list and controller info */
2465 	memset(sc->pd_list, 0,
2466 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2467 	if (mrsas_get_pd_list(sc) != SUCCESS) {
2468 		device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2469 		return (1);
2470 	}
2471 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2472 	if (mrsas_get_ld_list(sc) != SUCCESS) {
2473 		device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2474 		return (1);
2475 	}
2476 
2477 	if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
2478 		sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
2479 						MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
2480 		if (!sc->streamDetectByLD) {
2481 			device_printf(sc->mrsas_dev,
2482 				"unable to allocate stream detection for pool of LDs\n");
2483 			return (1);
2484 		}
2485 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
2486 			sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
2487 			if (!sc->streamDetectByLD[i]) {
2488 				device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
2489 				for (j = 0; j < i; ++j)
2490 					free(sc->streamDetectByLD[j], M_MRSAS);
2491 				free(sc->streamDetectByLD, M_MRSAS);
2492 				sc->streamDetectByLD = NULL;
2493 				return (1);
2494 			}
2495 			memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
2496 			sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
2497 		}
2498 	}
2499 
2500 	/*
2501 	 * Compute the max allowed sectors per IO: The controller info has
2502 	 * two limits on max sectors. Driver should use the minimum of these
2503 	 * two.
2504 	 *
2505 	 * 1 << stripe_sz_ops.min = max sectors per strip
2506 	 *
2507 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2508 	 * calculate max_sectors_1. So the number ended up as zero always.
2509 	 */
2510 	tmp_sectors = 0;
2511 	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2512 	    sc->ctrl_info->max_strips_per_io;
2513 	max_sectors_2 = sc->ctrl_info->max_request_size;
2514 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2515 	sc->max_sectors_per_req = (sc->max_num_sge - 1) * MRSAS_PAGE_SIZE / 512;
2516 
2517 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2518 		sc->max_sectors_per_req = tmp_sectors;
2519 
2520 	sc->disableOnlineCtrlReset =
2521 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2522 	sc->UnevenSpanSupport =
2523 	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2524 	if (sc->UnevenSpanSupport) {
2525 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2526 		    sc->UnevenSpanSupport);
2527 
2528 		if (MR_ValidateMapInfo(sc))
2529 			sc->fast_path_io = 1;
2530 		else
2531 			sc->fast_path_io = 0;
2532 	}
2533 
2534 	device_printf(sc->mrsas_dev, "max_fw_cmds: %u  max_scsi_cmds: %u\n",
2535 		sc->max_fw_cmds, sc->max_scsi_cmds);
2536 	return (0);
2537 }
2538 
2539 /*
2540  * mrsas_init_adapter:	Initializes the adapter/controller
2541  * input:				Adapter soft state
2542  *
2543  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2544  * ROC/controller.  The FW register is read to determined the number of
2545  * commands that is supported.  All memory allocations for IO is based on
2546  * max_cmd.  Appropriate calculations are performed in this function.
2547  */
2548 int
2549 mrsas_init_adapter(struct mrsas_softc *sc)
2550 {
2551 	uint32_t status;
2552 	u_int32_t scratch_pad_2;
2553 	int ret;
2554 	int i = 0;
2555 
2556 	/* Read FW status register */
2557 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2558 
2559 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2560 
2561 	/* Decrement the max supported by 1, to correlate with FW */
2562 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2563 	sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
2564 
2565 	/* Determine allocation size of command frames */
2566 	sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
2567 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
2568 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2569 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
2570 	    (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
2571 	scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2572 	    outbound_scratch_pad_2));
2573 
2574 	mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x,"
2575 	    "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x,"
2576 	    "sc->io_frames_alloc_sz 0x%x\n", __func__,
2577 	    sc->reply_q_depth, sc->request_alloc_sz,
2578 	    sc->reply_alloc_sz, sc->io_frames_alloc_sz);
2579 
2580 	/*
2581 	 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2582 	 * Firmware support extended IO chain frame which is 4 time more
2583 	 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2584 	 * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
2585 	 */
2586 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2587 		sc->max_chain_frame_sz =
2588 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2589 		    * MEGASAS_1MB_IO;
2590 	else
2591 		sc->max_chain_frame_sz =
2592 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2593 		    * MEGASAS_256K_IO;
2594 
2595 	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
2596 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2597 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2598 
2599 	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2600 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2601 
2602 	mrsas_dprint(sc, MRSAS_INFO,
2603 	    "max sge: 0x%x, max chain frame size: 0x%x, "
2604 	    "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n",
2605 	    sc->max_num_sge,
2606 	    sc->max_chain_frame_sz, sc->max_fw_cmds,
2607 	    sc->chain_frames_alloc_sz);
2608 
2609 	/* Used for pass thru MFI frame (DCMD) */
2610 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2611 
2612 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2613 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2614 
2615 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2616 
2617 	for (i = 0; i < count; i++)
2618 		sc->last_reply_idx[i] = 0;
2619 
2620 	ret = mrsas_alloc_mem(sc);
2621 	if (ret != SUCCESS)
2622 		return (ret);
2623 
2624 	ret = mrsas_alloc_mpt_cmds(sc);
2625 	if (ret != SUCCESS)
2626 		return (ret);
2627 
2628 	ret = mrsas_ioc_init(sc);
2629 	if (ret != SUCCESS)
2630 		return (ret);
2631 
2632 	return (0);
2633 }
2634 
2635 /*
2636  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2637  * input:				Adapter soft state
2638  *
2639  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2640  */
2641 int
2642 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2643 {
2644 	int ioc_init_size;
2645 
2646 	/* Allocate IOC INIT command */
2647 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2648 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2649 	    1, 0,
2650 	    BUS_SPACE_MAXADDR_32BIT,
2651 	    BUS_SPACE_MAXADDR,
2652 	    NULL, NULL,
2653 	    ioc_init_size,
2654 	    1,
2655 	    ioc_init_size,
2656 	    BUS_DMA_ALLOCNOW,
2657 	    NULL, NULL,
2658 	    &sc->ioc_init_tag)) {
2659 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2660 		return (ENOMEM);
2661 	}
2662 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2663 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2664 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2665 		return (ENOMEM);
2666 	}
2667 	bzero(sc->ioc_init_mem, ioc_init_size);
2668 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2669 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2670 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2671 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2672 		return (ENOMEM);
2673 	}
2674 	return (0);
2675 }
2676 
2677 /*
2678  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2679  * input:				Adapter soft state
2680  *
2681  * Deallocates memory of the IOC Init cmd.
2682  */
2683 void
2684 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2685 {
2686 	if (sc->ioc_init_phys_mem)
2687 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2688 	if (sc->ioc_init_mem != NULL)
2689 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2690 	if (sc->ioc_init_tag != NULL)
2691 		bus_dma_tag_destroy(sc->ioc_init_tag);
2692 }
2693 
2694 /*
2695  * mrsas_ioc_init:	Sends IOC Init command to FW
2696  * input:			Adapter soft state
2697  *
2698  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2699  */
2700 int
2701 mrsas_ioc_init(struct mrsas_softc *sc)
2702 {
2703 	struct mrsas_init_frame *init_frame;
2704 	pMpi2IOCInitRequest_t IOCInitMsg;
2705 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2706 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2707 	bus_addr_t phys_addr;
2708 	int i, retcode = 0;
2709 	u_int32_t scratch_pad_2;
2710 
2711 	/* Allocate memory for the IOC INIT command */
2712 	if (mrsas_alloc_ioc_cmd(sc)) {
2713 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2714 		return (1);
2715 	}
2716 
2717 	if (!sc->block_sync_cache) {
2718 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2719 		    outbound_scratch_pad_2));
2720 		sc->fw_sync_cache_support = (scratch_pad_2 &
2721 		    MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2722 	}
2723 
2724 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2725 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2726 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2727 	IOCInitMsg->MsgVersion = htole16(MPI2_VERSION);
2728 	IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION);
2729 	IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
2730 	IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth);
2731 	IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr);
2732 	IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr);
2733 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2734 	IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
2735 
2736 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2737 	init_frame->cmd = MFI_CMD_INIT;
2738 	init_frame->cmd_status = 0xFF;
2739 	init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
2740 
2741 	/* driver support Extended MSIX */
2742 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
2743 		init_frame->driver_operations.
2744 		    mfi_capabilities.support_additional_msix = 1;
2745 	}
2746 	if (sc->verbuf_mem) {
2747 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2748 		    MRSAS_VERSION);
2749 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2750 		init_frame->driver_ver_hi = 0;
2751 	}
2752 	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2753 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2754 	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2755 	if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2756 		init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2757 
2758 	init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg);
2759 
2760 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2761 	init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr);
2762 	init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t));
2763 
2764 	req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem);
2765 	req_desc.MFAIo.RequestFlags =
2766 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2767 
2768 	mrsas_disable_intr(sc);
2769 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2770 	mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2771 
2772 	/*
2773 	 * Poll response timer to wait for Firmware response.  While this
2774 	 * timer with the DELAY call could block CPU, the time interval for
2775 	 * this is only 1 millisecond.
2776 	 */
2777 	if (init_frame->cmd_status == 0xFF) {
2778 		for (i = 0; i < (max_wait * 1000); i++) {
2779 			if (init_frame->cmd_status == 0xFF)
2780 				DELAY(1000);
2781 			else
2782 				break;
2783 		}
2784 	}
2785 	if (init_frame->cmd_status == 0)
2786 		mrsas_dprint(sc, MRSAS_OCR,
2787 		    "IOC INIT response received from FW.\n");
2788 	else {
2789 		if (init_frame->cmd_status == 0xFF)
2790 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2791 		else
2792 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2793 		retcode = 1;
2794 	}
2795 
2796 	if (sc->is_aero) {
2797 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2798 		    outbound_scratch_pad_2));
2799 		sc->atomic_desc_support = (scratch_pad_2 &
2800 			MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
2801 		device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
2802 			sc->atomic_desc_support ? "Yes" : "No");
2803 	}
2804 
2805 	mrsas_free_ioc_cmd(sc);
2806 	return (retcode);
2807 }
2808 
2809 /*
2810  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2811  * input:					Adapter instance soft state
2812  *
2813  * This function allocates the internal commands for IOs. Each command that is
2814  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2815  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2816  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2817  * max_fw_cmds.
2818  */
2819 int
2820 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2821 {
2822 	int i, j;
2823 	u_int32_t max_fw_cmds, count;
2824 	struct mrsas_mpt_cmd *cmd;
2825 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2826 	u_int32_t offset, chain_offset, sense_offset;
2827 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2828 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2829 
2830 	max_fw_cmds = sc->max_fw_cmds;
2831 
2832 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2833 	if (!sc->req_desc) {
2834 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2835 		return (ENOMEM);
2836 	}
2837 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2838 
2839 	/*
2840 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2841 	 * Allocate the dynamic array first and then allocate individual
2842 	 * commands.
2843 	 */
2844 	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
2845 	    M_MRSAS, M_NOWAIT);
2846 	if (!sc->mpt_cmd_list) {
2847 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2848 		return (ENOMEM);
2849 	}
2850 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
2851 	for (i = 0; i < max_fw_cmds; i++) {
2852 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2853 		    M_MRSAS, M_NOWAIT);
2854 		if (!sc->mpt_cmd_list[i]) {
2855 			for (j = 0; j < i; j++)
2856 				free(sc->mpt_cmd_list[j], M_MRSAS);
2857 			free(sc->mpt_cmd_list, M_MRSAS);
2858 			sc->mpt_cmd_list = NULL;
2859 			return (ENOMEM);
2860 		}
2861 	}
2862 
2863 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2864 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2865 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2866 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2867 	sense_base = (u_int8_t *)sc->sense_mem;
2868 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2869 	for (i = 0; i < max_fw_cmds; i++) {
2870 		cmd = sc->mpt_cmd_list[i];
2871 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2872 		chain_offset = sc->max_chain_frame_sz * i;
2873 		sense_offset = MRSAS_SENSE_LEN * i;
2874 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2875 		cmd->index = i + 1;
2876 		cmd->ccb_ptr = NULL;
2877 		cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2878 		callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2879 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2880 		cmd->sc = sc;
2881 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2882 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2883 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2884 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2885 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2886 		cmd->sense = sense_base + sense_offset;
2887 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2888 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2889 			return (FAIL);
2890 		}
2891 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2892 	}
2893 
2894 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2895 	reply_desc = sc->reply_desc_mem;
2896 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2897 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2898 		reply_desc->Words = MRSAS_ULONG_MAX;
2899 	}
2900 	return (0);
2901 }
2902 
2903 /*
2904  * mrsas_write_64bit_req_dsc:	Writes 64 bit request descriptor to FW
2905  * input:			Adapter softstate
2906  * 				request descriptor address low
2907  * 				request descriptor address high
2908  */
2909 void
2910 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2911     u_int32_t req_desc_hi)
2912 {
2913 	mtx_lock(&sc->pci_lock);
2914 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2915 	    le32toh(req_desc_lo));
2916 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2917 	    le32toh(req_desc_hi));
2918 	mtx_unlock(&sc->pci_lock);
2919 }
2920 
2921 /*
2922  * mrsas_fire_cmd:	Sends command to FW
2923  * input:		Adapter softstate
2924  * 			request descriptor address low
2925  * 			request descriptor address high
2926  *
2927  * This functions fires the command to Firmware by writing to the
2928  * inbound_low_queue_port and inbound_high_queue_port.
2929  */
2930 void
2931 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2932     u_int32_t req_desc_hi)
2933 {
2934 	if (sc->atomic_desc_support)
2935 		mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
2936 		    le32toh(req_desc_lo));
2937 	else
2938 		mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
2939 }
2940 
2941 /*
2942  * mrsas_transition_to_ready:  Move FW to Ready state input:
2943  * Adapter instance soft state
2944  *
2945  * During the initialization, FW passes can potentially be in any one of several
2946  * possible states. If the FW in operational, waiting-for-handshake states,
2947  * driver must take steps to bring it to ready state. Otherwise, it has to
2948  * wait for the ready state.
2949  */
2950 int
2951 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2952 {
2953 	int i;
2954 	u_int8_t max_wait;
2955 	u_int32_t val, fw_state;
2956 	u_int32_t cur_state __unused;
2957 	u_int32_t abs_state, curr_abs_state;
2958 
2959 	val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2960 	fw_state = val & MFI_STATE_MASK;
2961 	max_wait = MRSAS_RESET_WAIT_TIME;
2962 
2963 	if (fw_state != MFI_STATE_READY)
2964 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2965 
2966 	while (fw_state != MFI_STATE_READY) {
2967 		abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2968 		switch (fw_state) {
2969 		case MFI_STATE_FAULT:
2970 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2971 			if (ocr) {
2972 				cur_state = MFI_STATE_FAULT;
2973 				break;
2974 			} else
2975 				return -ENODEV;
2976 		case MFI_STATE_WAIT_HANDSHAKE:
2977 			/* Set the CLR bit in inbound doorbell */
2978 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2979 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2980 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2981 			break;
2982 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2983 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2984 			    MFI_INIT_HOTPLUG);
2985 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2986 			break;
2987 		case MFI_STATE_OPERATIONAL:
2988 			/*
2989 			 * Bring it to READY state; assuming max wait 10
2990 			 * secs
2991 			 */
2992 			mrsas_disable_intr(sc);
2993 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2994 			for (i = 0; i < max_wait * 1000; i++) {
2995 				if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2996 					DELAY(1000);
2997 				else
2998 					break;
2999 			}
3000 			cur_state = MFI_STATE_OPERATIONAL;
3001 			break;
3002 		case MFI_STATE_UNDEFINED:
3003 			/*
3004 			 * This state should not last for more than 2
3005 			 * seconds
3006 			 */
3007 			cur_state = MFI_STATE_UNDEFINED;
3008 			break;
3009 		case MFI_STATE_BB_INIT:
3010 			cur_state = MFI_STATE_BB_INIT;
3011 			break;
3012 		case MFI_STATE_FW_INIT:
3013 			cur_state = MFI_STATE_FW_INIT;
3014 			break;
3015 		case MFI_STATE_FW_INIT_2:
3016 			cur_state = MFI_STATE_FW_INIT_2;
3017 			break;
3018 		case MFI_STATE_DEVICE_SCAN:
3019 			cur_state = MFI_STATE_DEVICE_SCAN;
3020 			break;
3021 		case MFI_STATE_FLUSH_CACHE:
3022 			cur_state = MFI_STATE_FLUSH_CACHE;
3023 			break;
3024 		default:
3025 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
3026 			return -ENODEV;
3027 		}
3028 
3029 		/*
3030 		 * The cur_state should not last for more than max_wait secs
3031 		 */
3032 		for (i = 0; i < (max_wait * 1000); i++) {
3033 			fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3034 			    outbound_scratch_pad)) & MFI_STATE_MASK);
3035 			curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3036 			    outbound_scratch_pad));
3037 			if (abs_state == curr_abs_state)
3038 				DELAY(1000);
3039 			else
3040 				break;
3041 		}
3042 
3043 		/*
3044 		 * Return error if fw_state hasn't changed after max_wait
3045 		 */
3046 		if (curr_abs_state == abs_state) {
3047 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
3048 			    "in %d secs\n", fw_state, max_wait);
3049 			return -ENODEV;
3050 		}
3051 	}
3052 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
3053 	return 0;
3054 }
3055 
3056 /*
3057  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
3058  * input:				Adapter soft state
3059  *
3060  * This function removes an MFI command from the command list.
3061  */
3062 struct mrsas_mfi_cmd *
3063 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
3064 {
3065 	struct mrsas_mfi_cmd *cmd = NULL;
3066 
3067 	mtx_lock(&sc->mfi_cmd_pool_lock);
3068 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
3069 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
3070 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
3071 	}
3072 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3073 
3074 	return cmd;
3075 }
3076 
3077 /*
3078  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
3079  * input:				Adapter Context.
3080  *
3081  * This function will check FW status register and flag do_timeout_reset flag.
3082  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
3083  * trigger reset.
3084  */
3085 static void
3086 mrsas_ocr_thread(void *arg)
3087 {
3088 	struct mrsas_softc *sc;
3089 	u_int32_t fw_status, fw_state;
3090 	u_int8_t tm_target_reset_failed = 0;
3091 
3092 	sc = (struct mrsas_softc *)arg;
3093 
3094 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
3095 	sc->ocr_thread_active = 1;
3096 	mtx_lock(&sc->sim_lock);
3097 	for (;;) {
3098 		/* Sleep for 1 second and check the queue status */
3099 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3100 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
3101 		if (sc->remove_in_progress ||
3102 		    sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3103 			mrsas_dprint(sc, MRSAS_OCR,
3104 			    "Exit due to %s from %s\n",
3105 			    sc->remove_in_progress ? "Shutdown" :
3106 			    "Hardware critical error", __func__);
3107 			break;
3108 		}
3109 		fw_status = mrsas_read_reg_with_retries(sc,
3110 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
3111 		fw_state = fw_status & MFI_STATE_MASK;
3112 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
3113 			mrsas_atomic_read(&sc->target_reset_outstanding)) {
3114 			/* First, freeze further IOs to come to the SIM */
3115 			mrsas_xpt_freeze(sc);
3116 
3117 			/* If this is an IO timeout then go for target reset */
3118 			if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
3119 				device_printf(sc->mrsas_dev, "Initiating Target RESET "
3120 				    "because of SCSI IO timeout!\n");
3121 
3122 				/* Let the remaining IOs to complete */
3123 				msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3124 				      "mrsas_reset_targets", 5 * hz);
3125 
3126 				/* Try to reset the target device */
3127 				if (mrsas_reset_targets(sc) == FAIL)
3128 					tm_target_reset_failed = 1;
3129 			}
3130 
3131 			/* If this is a DCMD timeout or FW fault,
3132 			 * then go for controller reset
3133 			 */
3134 			if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
3135 			    (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
3136 				if (tm_target_reset_failed)
3137 					device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
3138 					    "TM FAILURE!\n");
3139 				else
3140 					device_printf(sc->mrsas_dev, "Initiaiting OCR "
3141 						"because of %s!\n", sc->do_timedout_reset ?
3142 						"DCMD IO Timeout" : "FW fault");
3143 
3144 				mtx_lock_spin(&sc->ioctl_lock);
3145 				sc->reset_in_progress = 1;
3146 				mtx_unlock_spin(&sc->ioctl_lock);
3147 				sc->reset_count++;
3148 
3149 				/*
3150 				 * Wait for the AEN task to be completed if it is running.
3151 				 */
3152 				mtx_unlock(&sc->sim_lock);
3153 				taskqueue_drain(sc->ev_tq, &sc->ev_task);
3154 				mtx_lock(&sc->sim_lock);
3155 
3156 				taskqueue_block(sc->ev_tq);
3157 				/* Try to reset the controller */
3158 				mrsas_reset_ctrl(sc, sc->do_timedout_reset);
3159 
3160 				sc->do_timedout_reset = 0;
3161 				sc->reset_in_progress = 0;
3162 				tm_target_reset_failed = 0;
3163 				mrsas_atomic_set(&sc->target_reset_outstanding, 0);
3164 				memset(sc->target_reset_pool, 0,
3165 				    sizeof(sc->target_reset_pool));
3166 				taskqueue_unblock(sc->ev_tq);
3167 			}
3168 
3169 			/* Now allow IOs to come to the SIM */
3170 			 mrsas_xpt_release(sc);
3171 		}
3172 	}
3173 	mtx_unlock(&sc->sim_lock);
3174 	sc->ocr_thread_active = 0;
3175 	mrsas_kproc_exit(0);
3176 }
3177 
3178 /*
3179  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
3180  * input:					Adapter Context.
3181  *
3182  * This function will clear reply descriptor so that post OCR driver and FW will
3183  * lost old history.
3184  */
3185 void
3186 mrsas_reset_reply_desc(struct mrsas_softc *sc)
3187 {
3188 	int i, count;
3189 	pMpi2ReplyDescriptorsUnion_t reply_desc;
3190 
3191 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3192 	for (i = 0; i < count; i++)
3193 		sc->last_reply_idx[i] = 0;
3194 
3195 	reply_desc = sc->reply_desc_mem;
3196 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
3197 		reply_desc->Words = MRSAS_ULONG_MAX;
3198 	}
3199 }
3200 
3201 /*
3202  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
3203  * input:				Adapter Context.
3204  *
3205  * This function will run from thread context so that it can sleep. 1. Do not
3206  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
3207  * to complete for 180 seconds. 3. If #2 does not find any outstanding
3208  * command Controller is in working state, so skip OCR. Otherwise, do
3209  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
3210  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
3211  * OCR, Re-fire Management command and move Controller to Operation state.
3212  */
3213 int
3214 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
3215 {
3216 	int retval = SUCCESS, i, j, retry = 0;
3217 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
3218 	union ccb *ccb;
3219 	struct mrsas_mfi_cmd *mfi_cmd;
3220 	struct mrsas_mpt_cmd *mpt_cmd;
3221 	union mrsas_evt_class_locale class_locale;
3222 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3223 
3224 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3225 		device_printf(sc->mrsas_dev,
3226 		    "mrsas: Hardware critical error, returning FAIL.\n");
3227 		return FAIL;
3228 	}
3229 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3230 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
3231 	mrsas_disable_intr(sc);
3232 	msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
3233 	    sc->mrsas_fw_fault_check_delay * hz);
3234 
3235 	/* First try waiting for commands to complete */
3236 	if (mrsas_wait_for_outstanding(sc, reset_reason)) {
3237 		mrsas_dprint(sc, MRSAS_OCR,
3238 		    "resetting adapter from %s.\n",
3239 		    __func__);
3240 		/* Now return commands back to the CAM layer */
3241 		mtx_unlock(&sc->sim_lock);
3242 		for (i = 0; i < sc->max_fw_cmds; i++) {
3243 			mpt_cmd = sc->mpt_cmd_list[i];
3244 
3245 			if (mpt_cmd->peer_cmd) {
3246 				mrsas_dprint(sc, MRSAS_OCR,
3247 				    "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
3248 				    i, mpt_cmd, mpt_cmd->peer_cmd);
3249 			}
3250 
3251 			if (mpt_cmd->ccb_ptr) {
3252 				if (mpt_cmd->callout_owner) {
3253 					ccb = (union ccb *)(mpt_cmd->ccb_ptr);
3254 					ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3255 					mrsas_cmd_done(sc, mpt_cmd);
3256 				} else {
3257 					mpt_cmd->ccb_ptr = NULL;
3258 					mrsas_release_mpt_cmd(mpt_cmd);
3259 				}
3260 			}
3261 		}
3262 
3263 		mrsas_atomic_set(&sc->fw_outstanding, 0);
3264 
3265 		mtx_lock(&sc->sim_lock);
3266 
3267 		status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3268 		    outbound_scratch_pad));
3269 		abs_state = status_reg & MFI_STATE_MASK;
3270 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
3271 		if (sc->disableOnlineCtrlReset ||
3272 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3273 			/* Reset not supported, kill adapter */
3274 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
3275 			mrsas_kill_hba(sc);
3276 			retval = FAIL;
3277 			goto out;
3278 		}
3279 		/* Now try to reset the chip */
3280 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
3281 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3282 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
3283 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3284 			    MPI2_WRSEQ_1ST_KEY_VALUE);
3285 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3286 			    MPI2_WRSEQ_2ND_KEY_VALUE);
3287 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3288 			    MPI2_WRSEQ_3RD_KEY_VALUE);
3289 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3290 			    MPI2_WRSEQ_4TH_KEY_VALUE);
3291 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3292 			    MPI2_WRSEQ_5TH_KEY_VALUE);
3293 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3294 			    MPI2_WRSEQ_6TH_KEY_VALUE);
3295 
3296 			/* Check that the diag write enable (DRWE) bit is on */
3297 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3298 			    fusion_host_diag));
3299 			retry = 0;
3300 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3301 				DELAY(100 * 1000);
3302 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3303 				    fusion_host_diag));
3304 				if (retry++ == 100) {
3305 					mrsas_dprint(sc, MRSAS_OCR,
3306 					    "Host diag unlock failed!\n");
3307 					break;
3308 				}
3309 			}
3310 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3311 				continue;
3312 
3313 			/* Send chip reset command */
3314 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3315 			    host_diag | HOST_DIAG_RESET_ADAPTER);
3316 			DELAY(3000 * 1000);
3317 
3318 			/* Make sure reset adapter bit is cleared */
3319 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3320 			    fusion_host_diag));
3321 			retry = 0;
3322 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3323 				DELAY(100 * 1000);
3324 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3325 				    fusion_host_diag));
3326 				if (retry++ == 1000) {
3327 					mrsas_dprint(sc, MRSAS_OCR,
3328 					    "Diag reset adapter never cleared!\n");
3329 					break;
3330 				}
3331 			}
3332 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
3333 				continue;
3334 
3335 			abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3336 			    outbound_scratch_pad)) & MFI_STATE_MASK;
3337 			retry = 0;
3338 
3339 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3340 				DELAY(100 * 1000);
3341 				abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3342 				    outbound_scratch_pad)) & MFI_STATE_MASK;
3343 			}
3344 			if (abs_state <= MFI_STATE_FW_INIT) {
3345 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3346 				    " state = 0x%x\n", abs_state);
3347 				continue;
3348 			}
3349 			/* Wait for FW to become ready */
3350 			if (mrsas_transition_to_ready(sc, 1)) {
3351 				mrsas_dprint(sc, MRSAS_OCR,
3352 				    "mrsas: Failed to transition controller to ready.\n");
3353 				continue;
3354 			}
3355 			mrsas_reset_reply_desc(sc);
3356 			if (mrsas_ioc_init(sc)) {
3357 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3358 				continue;
3359 			}
3360 			for (j = 0; j < sc->max_fw_cmds; j++) {
3361 				mpt_cmd = sc->mpt_cmd_list[j];
3362 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3363 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3364 					/* If not an IOCTL then release the command else re-fire */
3365 					if (!mfi_cmd->sync_cmd) {
3366 						mrsas_release_mfi_cmd(mfi_cmd);
3367 					} else {
3368 						req_desc = mrsas_get_request_desc(sc,
3369 						    mfi_cmd->cmd_id.context.smid - 1);
3370 						mrsas_dprint(sc, MRSAS_OCR,
3371 						    "Re-fire command DCMD opcode 0x%x index %d\n ",
3372 						    mfi_cmd->frame->dcmd.opcode, j);
3373 						if (!req_desc)
3374 							device_printf(sc->mrsas_dev,
3375 							    "Cannot build MPT cmd.\n");
3376 						else
3377 							mrsas_fire_cmd(sc, req_desc->addr.u.low,
3378 							    req_desc->addr.u.high);
3379 					}
3380 				}
3381 			}
3382 
3383 			/* Reset load balance info */
3384 			memset(sc->load_balance_info, 0,
3385 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3386 
3387 			if (mrsas_get_ctrl_info(sc)) {
3388 				mrsas_kill_hba(sc);
3389 				retval = FAIL;
3390 				goto out;
3391 			}
3392 			if (!mrsas_get_map_info(sc))
3393 				mrsas_sync_map_info(sc);
3394 
3395 			megasas_setup_jbod_map(sc);
3396 
3397 			if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
3398 				for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
3399 					memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
3400 					sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
3401 				}
3402 			}
3403 
3404 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3405 			mrsas_enable_intr(sc);
3406 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3407 
3408 			/* Register AEN with FW for last sequence number */
3409 			class_locale.members.reserved = 0;
3410 			class_locale.members.locale = MR_EVT_LOCALE_ALL;
3411 			class_locale.members.class = MR_EVT_CLASS_DEBUG;
3412 
3413 			mtx_unlock(&sc->sim_lock);
3414 			if (mrsas_register_aen(sc, sc->last_seq_num,
3415 			    class_locale.word)) {
3416 				device_printf(sc->mrsas_dev,
3417 				    "ERROR: AEN registration FAILED from OCR !!! "
3418 				    "Further events from the controller cannot be notified."
3419 				    "Either there is some problem in the controller"
3420 				    "or the controller does not support AEN.\n"
3421 				    "Please contact to the SUPPORT TEAM if the problem persists\n");
3422 			}
3423 			mtx_lock(&sc->sim_lock);
3424 
3425 			/* Adapter reset completed successfully */
3426 			device_printf(sc->mrsas_dev, "Reset successful\n");
3427 			retval = SUCCESS;
3428 			goto out;
3429 		}
3430 		/* Reset failed, kill the adapter */
3431 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3432 		mrsas_kill_hba(sc);
3433 		retval = FAIL;
3434 	} else {
3435 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3436 		mrsas_enable_intr(sc);
3437 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3438 	}
3439 out:
3440 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3441 	mrsas_dprint(sc, MRSAS_OCR,
3442 	    "Reset Exit with %d.\n", retval);
3443 	return retval;
3444 }
3445 
3446 /*
3447  * mrsas_kill_hba:	Kill HBA when OCR is not supported
3448  * input:			Adapter Context.
3449  *
3450  * This function will kill HBA when OCR is not supported.
3451  */
3452 void
3453 mrsas_kill_hba(struct mrsas_softc *sc)
3454 {
3455 	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3456 	DELAY(1000 * 1000);
3457 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3458 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3459 	    MFI_STOP_ADP);
3460 	/* Flush */
3461 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3462 	mrsas_complete_outstanding_ioctls(sc);
3463 }
3464 
3465 /**
3466  * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
3467  * input:			Controller softc
3468  *
3469  * Returns void
3470  */
3471 void
3472 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3473 {
3474 	int i;
3475 	struct mrsas_mpt_cmd *cmd_mpt;
3476 	struct mrsas_mfi_cmd *cmd_mfi;
3477 	u_int32_t count, MSIxIndex;
3478 
3479 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3480 	for (i = 0; i < sc->max_fw_cmds; i++) {
3481 		cmd_mpt = sc->mpt_cmd_list[i];
3482 
3483 		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3484 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3485 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3486 				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3487 					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3488 					    cmd_mpt->io_request->RaidContext.raid_context.status);
3489 			}
3490 		}
3491 	}
3492 }
3493 
3494 /*
3495  * mrsas_wait_for_outstanding:	Wait for outstanding commands
3496  * input:						Adapter Context.
3497  *
3498  * This function will wait for 180 seconds for outstanding commands to be
3499  * completed.
3500  */
3501 int
3502 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3503 {
3504 	int i, outstanding, retval = 0;
3505 	u_int32_t fw_state, count, MSIxIndex;
3506 
3507 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3508 		if (sc->remove_in_progress) {
3509 			mrsas_dprint(sc, MRSAS_OCR,
3510 			    "Driver remove or shutdown called.\n");
3511 			retval = 1;
3512 			goto out;
3513 		}
3514 		/* Check if firmware is in fault state */
3515 		fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3516 		    outbound_scratch_pad)) & MFI_STATE_MASK;
3517 		if (fw_state == MFI_STATE_FAULT) {
3518 			mrsas_dprint(sc, MRSAS_OCR,
3519 			    "Found FW in FAULT state, will reset adapter.\n");
3520 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3521 			mtx_unlock(&sc->sim_lock);
3522 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3523 				mrsas_complete_cmd(sc, MSIxIndex);
3524 			mtx_lock(&sc->sim_lock);
3525 			retval = 1;
3526 			goto out;
3527 		}
3528 		if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3529 			mrsas_dprint(sc, MRSAS_OCR,
3530 			    "DCMD IO TIMEOUT detected, will reset adapter.\n");
3531 			retval = 1;
3532 			goto out;
3533 		}
3534 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3535 		if (!outstanding)
3536 			goto out;
3537 
3538 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3539 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3540 			    "commands to complete\n", i, outstanding);
3541 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3542 			mtx_unlock(&sc->sim_lock);
3543 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3544 				mrsas_complete_cmd(sc, MSIxIndex);
3545 			mtx_lock(&sc->sim_lock);
3546 		}
3547 		DELAY(1000 * 1000);
3548 	}
3549 
3550 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
3551 		mrsas_dprint(sc, MRSAS_OCR,
3552 		    " pending commands remain after waiting,"
3553 		    " will reset adapter.\n");
3554 		retval = 1;
3555 	}
3556 out:
3557 	return retval;
3558 }
3559 
3560 /*
3561  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
3562  * input:					Command packet for return to free cmd pool
3563  *
3564  * This function returns the MFI & MPT command to the command list.
3565  */
3566 void
3567 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3568 {
3569 	struct mrsas_softc *sc = cmd_mfi->sc;
3570 	struct mrsas_mpt_cmd *cmd_mpt;
3571 
3572 	mtx_lock(&sc->mfi_cmd_pool_lock);
3573 	/*
3574 	 * Release the mpt command (if at all it is allocated
3575 	 * associated with the mfi command
3576 	 */
3577 	if (cmd_mfi->cmd_id.context.smid) {
3578 		mtx_lock(&sc->mpt_cmd_pool_lock);
3579 		/* Get the mpt cmd from mfi cmd frame's smid value */
3580 		cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3581 		cmd_mpt->flags = 0;
3582 		cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3583 		TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3584 		mtx_unlock(&sc->mpt_cmd_pool_lock);
3585 	}
3586 	/* Release the mfi command */
3587 	cmd_mfi->ccb_ptr = NULL;
3588 	cmd_mfi->cmd_id.frame_count = 0;
3589 	TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3590 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3591 
3592 	return;
3593 }
3594 
3595 /*
3596  * mrsas_get_controller_info:	Returns FW's controller structure
3597  * input:						Adapter soft state
3598  * 								Controller information structure
3599  *
3600  * Issues an internal command (DCMD) to get the FW's controller structure. This
3601  * information is mainly used to find out the maximum IO transfer per command
3602  * supported by the FW.
3603  */
3604 static int
3605 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3606 {
3607 	int retcode = 0;
3608 	u_int8_t do_ocr = 1;
3609 	struct mrsas_mfi_cmd *cmd;
3610 	struct mrsas_dcmd_frame *dcmd;
3611 
3612 	cmd = mrsas_get_mfi_cmd(sc);
3613 
3614 	if (!cmd) {
3615 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3616 		return -ENOMEM;
3617 	}
3618 	dcmd = &cmd->frame->dcmd;
3619 
3620 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3621 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3622 		mrsas_release_mfi_cmd(cmd);
3623 		return -ENOMEM;
3624 	}
3625 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3626 
3627 	dcmd->cmd = MFI_CMD_DCMD;
3628 	dcmd->cmd_status = 0xFF;
3629 	dcmd->sge_count = 1;
3630 	dcmd->flags = MFI_FRAME_DIR_READ;
3631 	dcmd->timeout = 0;
3632 	dcmd->pad_0 = 0;
3633 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info));
3634 	dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO);
3635 	dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF);
3636 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info));
3637 
3638 	if (!sc->mask_interrupts)
3639 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
3640 	else
3641 		retcode = mrsas_issue_polled(sc, cmd);
3642 
3643 	if (retcode == ETIMEDOUT)
3644 		goto dcmd_timeout;
3645 	else {
3646 		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3647 		le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties);
3648 		le32_to_cpus(&sc->ctrl_info->adapterOperations2);
3649 		le32_to_cpus(&sc->ctrl_info->adapterOperations3);
3650 		le16_to_cpus(&sc->ctrl_info->adapterOperations4);
3651 	}
3652 
3653 	do_ocr = 0;
3654 	mrsas_update_ext_vd_details(sc);
3655 
3656 	sc->use_seqnum_jbod_fp =
3657 	    sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3658 	sc->support_morethan256jbod =
3659 		sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
3660 
3661 	sc->disableOnlineCtrlReset =
3662 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3663 
3664 dcmd_timeout:
3665 	mrsas_free_ctlr_info_cmd(sc);
3666 
3667 	if (do_ocr)
3668 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3669 
3670 	if (!sc->mask_interrupts)
3671 		mrsas_release_mfi_cmd(cmd);
3672 
3673 	return (retcode);
3674 }
3675 
3676 /*
3677  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3678  * input:
3679  *	sc - Controller's softc
3680 */
3681 static void
3682 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3683 {
3684 	u_int32_t ventura_map_sz = 0;
3685 	sc->max256vdSupport =
3686 		sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3687 
3688 	/* Below is additional check to address future FW enhancement */
3689 	if (sc->ctrl_info->max_lds > 64)
3690 		sc->max256vdSupport = 1;
3691 
3692 	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3693 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3694 	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3695 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3696 	if (sc->max256vdSupport) {
3697 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3698 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3699 	} else {
3700 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3701 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3702 	}
3703 
3704 	if (sc->maxRaidMapSize) {
3705 		ventura_map_sz = sc->maxRaidMapSize *
3706 		    MR_MIN_MAP_SIZE;
3707 		sc->current_map_sz = ventura_map_sz;
3708 		sc->max_map_sz = ventura_map_sz;
3709 	} else {
3710 		sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3711 		    (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
3712 		sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3713 		sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3714 		if (sc->max256vdSupport)
3715 			sc->current_map_sz = sc->new_map_sz;
3716 		else
3717 			sc->current_map_sz = sc->old_map_sz;
3718 	}
3719 
3720 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
3721 #if VD_EXT_DEBUG
3722 	device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
3723 	    sc->maxRaidMapSize);
3724 	device_printf(sc->mrsas_dev,
3725 	    "new_map_sz = 0x%x, old_map_sz = 0x%x, "
3726 	    "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
3727 	    "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
3728 	    sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
3729 	    sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
3730 #endif
3731 }
3732 
3733 /*
3734  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3735  * input:						Adapter soft state
3736  *
3737  * Allocates DMAable memory for the controller info internal command.
3738  */
3739 int
3740 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3741 {
3742 	int ctlr_info_size;
3743 
3744 	/* Allocate get controller info command */
3745 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3746 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3747 	    1, 0,
3748 	    BUS_SPACE_MAXADDR_32BIT,
3749 	    BUS_SPACE_MAXADDR,
3750 	    NULL, NULL,
3751 	    ctlr_info_size,
3752 	    1,
3753 	    ctlr_info_size,
3754 	    BUS_DMA_ALLOCNOW,
3755 	    NULL, NULL,
3756 	    &sc->ctlr_info_tag)) {
3757 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3758 		return (ENOMEM);
3759 	}
3760 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3761 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3762 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3763 		return (ENOMEM);
3764 	}
3765 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3766 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3767 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3768 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3769 		return (ENOMEM);
3770 	}
3771 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3772 	return (0);
3773 }
3774 
3775 /*
3776  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3777  * input:						Adapter soft state
3778  *
3779  * Deallocates memory of the get controller info cmd.
3780  */
3781 void
3782 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3783 {
3784 	if (sc->ctlr_info_phys_addr)
3785 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3786 	if (sc->ctlr_info_mem != NULL)
3787 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3788 	if (sc->ctlr_info_tag != NULL)
3789 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3790 }
3791 
3792 /*
3793  * mrsas_issue_polled:	Issues a polling command
3794  * inputs:				Adapter soft state
3795  * 						Command packet to be issued
3796  *
3797  * This function is for posting of internal commands to Firmware.  MFI requires
3798  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3799  * the poll response timer is 180 seconds.
3800  */
3801 int
3802 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3803 {
3804 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3805 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3806 	int i, retcode = SUCCESS;
3807 
3808 	frame_hdr->cmd_status = 0xFF;
3809 	frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
3810 
3811 	/* Issue the frame using inbound queue port */
3812 	if (mrsas_issue_dcmd(sc, cmd)) {
3813 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3814 		return (1);
3815 	}
3816 	/*
3817 	 * Poll response timer to wait for Firmware response.  While this
3818 	 * timer with the DELAY call could block CPU, the time interval for
3819 	 * this is only 1 millisecond.
3820 	 */
3821 	if (frame_hdr->cmd_status == 0xFF) {
3822 		for (i = 0; i < (max_wait * 1000); i++) {
3823 			if (frame_hdr->cmd_status == 0xFF)
3824 				DELAY(1000);
3825 			else
3826 				break;
3827 		}
3828 	}
3829 	if (frame_hdr->cmd_status == 0xFF) {
3830 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3831 		    "seconds from %s\n", max_wait, __func__);
3832 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3833 		    cmd->frame->dcmd.opcode);
3834 		retcode = ETIMEDOUT;
3835 	}
3836 	return (retcode);
3837 }
3838 
3839 /*
3840  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3841  * input:				Adapter soft state mfi cmd pointer
3842  *
3843  * This function is called by mrsas_issued_blocked_cmd() and
3844  * mrsas_issued_polled(), to build the MPT command and then fire the command
3845  * to Firmware.
3846  */
3847 int
3848 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3849 {
3850 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3851 
3852 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3853 	if (!req_desc) {
3854 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3855 		return (1);
3856 	}
3857 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3858 
3859 	return (0);
3860 }
3861 
3862 /*
3863  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3864  * input:				Adapter soft state mfi cmd to build
3865  *
3866  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3867  * command and prepares the MPT command to send to Firmware.
3868  */
3869 MRSAS_REQUEST_DESCRIPTOR_UNION *
3870 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3871 {
3872 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3873 	u_int16_t index;
3874 
3875 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3876 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3877 		return NULL;
3878 	}
3879 	index = cmd->cmd_id.context.smid;
3880 
3881 	req_desc = mrsas_get_request_desc(sc, index - 1);
3882 	if (!req_desc)
3883 		return NULL;
3884 
3885 	req_desc->addr.Words = 0;
3886 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3887 
3888 	req_desc->SCSIIO.SMID = htole16(index);
3889 
3890 	return (req_desc);
3891 }
3892 
3893 /*
3894  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3895  * input:						Adapter soft state mfi cmd pointer
3896  *
3897  * The MPT command and the io_request are setup as a passthru command. The SGE
3898  * chain address is set to frame_phys_addr of the MFI command.
3899  */
3900 u_int8_t
3901 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3902 {
3903 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3904 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3905 	struct mrsas_mpt_cmd *mpt_cmd;
3906 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3907 
3908 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3909 	if (!mpt_cmd)
3910 		return (1);
3911 
3912 	/* Save the smid. To be used for returning the cmd */
3913 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3914 
3915 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3916 
3917 	/*
3918 	 * For cmds where the flag is set, store the flag and check on
3919 	 * completion. For cmds with this flag, don't call
3920 	 * mrsas_complete_cmd.
3921 	 */
3922 
3923 	if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3924 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3925 
3926 	io_req = mpt_cmd->io_request;
3927 
3928 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
3929 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3930 
3931 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3932 		sgl_ptr_end->Flags = 0;
3933 	}
3934 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3935 
3936 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3937 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3938 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3939 
3940 	mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr);
3941 
3942 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3943 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3944 
3945 	mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz);
3946 
3947 	return (0);
3948 }
3949 
3950 /*
3951  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3952  * input:					Adapter soft state Command to be issued
3953  *
3954  * This function waits on an event for the command to be returned from the ISR.
3955  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3956  * internal and ioctl commands.
3957  */
3958 int
3959 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3960 {
3961 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3962 	unsigned long total_time = 0;
3963 	int retcode = SUCCESS;
3964 
3965 	/* Initialize cmd_status */
3966 	cmd->cmd_status = 0xFF;
3967 
3968 	/* Build MPT-MFI command for issue to FW */
3969 	if (mrsas_issue_dcmd(sc, cmd)) {
3970 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3971 		return (1);
3972 	}
3973 	sc->chan = (void *)&cmd;
3974 
3975 	while (1) {
3976 		if (cmd->cmd_status == 0xFF) {
3977 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3978 		} else
3979 			break;
3980 
3981 		if (!cmd->sync_cmd) {	/* cmd->sync will be set for an IOCTL
3982 					 * command */
3983 			total_time++;
3984 			if (total_time >= max_wait) {
3985 				device_printf(sc->mrsas_dev,
3986 				    "Internal command timed out after %d seconds.\n", max_wait);
3987 				retcode = 1;
3988 				break;
3989 			}
3990 		}
3991 	}
3992 	sc->chan = NULL;
3993 
3994 	if (cmd->cmd_status == 0xFF) {
3995 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3996 		    "seconds from %s\n", max_wait, __func__);
3997 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3998 		    cmd->frame->dcmd.opcode);
3999 		retcode = ETIMEDOUT;
4000 	}
4001 	return (retcode);
4002 }
4003 
4004 /*
4005  * mrsas_complete_mptmfi_passthru:	Completes a command
4006  * input:	@sc:					Adapter soft state
4007  * 			@cmd:					Command to be completed
4008  * 			@status:				cmd completion status
4009  *
4010  * This function is called from mrsas_complete_cmd() after an interrupt is
4011  * received from Firmware, and io_request->Function is
4012  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
4013  */
4014 void
4015 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
4016     u_int8_t status)
4017 {
4018 	struct mrsas_header *hdr = &cmd->frame->hdr;
4019 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
4020 
4021 	/* Reset the retry counter for future re-tries */
4022 	cmd->retry_for_fw_reset = 0;
4023 
4024 	if (cmd->ccb_ptr)
4025 		cmd->ccb_ptr = NULL;
4026 
4027 	switch (hdr->cmd) {
4028 	case MFI_CMD_INVALID:
4029 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
4030 		break;
4031 	case MFI_CMD_PD_SCSI_IO:
4032 	case MFI_CMD_LD_SCSI_IO:
4033 		/*
4034 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
4035 		 * issued either through an IO path or an IOCTL path. If it
4036 		 * was via IOCTL, we will send it to internal completion.
4037 		 */
4038 		if (cmd->sync_cmd) {
4039 			cmd->sync_cmd = 0;
4040 			mrsas_wakeup(sc, cmd);
4041 			break;
4042 		}
4043 	case MFI_CMD_SMP:
4044 	case MFI_CMD_STP:
4045 	case MFI_CMD_DCMD:
4046 		/* Check for LD map update */
4047 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
4048 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
4049 			sc->fast_path_io = 0;
4050 			mtx_lock(&sc->raidmap_lock);
4051 			sc->map_update_cmd = NULL;
4052 			if (cmd_status != 0) {
4053 				if (cmd_status != MFI_STAT_NOT_FOUND)
4054 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
4055 				else {
4056 					mrsas_release_mfi_cmd(cmd);
4057 					mtx_unlock(&sc->raidmap_lock);
4058 					break;
4059 				}
4060 			} else
4061 				sc->map_id++;
4062 			mrsas_release_mfi_cmd(cmd);
4063 			if (MR_ValidateMapInfo(sc))
4064 				sc->fast_path_io = 0;
4065 			else
4066 				sc->fast_path_io = 1;
4067 			mrsas_sync_map_info(sc);
4068 			mtx_unlock(&sc->raidmap_lock);
4069 			break;
4070 		}
4071 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
4072 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
4073 			sc->mrsas_aen_triggered = 0;
4074 		}
4075 		/* FW has an updated PD sequence */
4076 		if ((cmd->frame->dcmd.opcode ==
4077 		    MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4078 		    (cmd->frame->dcmd.mbox.b[0] == 1)) {
4079 			mtx_lock(&sc->raidmap_lock);
4080 			sc->jbod_seq_cmd = NULL;
4081 			mrsas_release_mfi_cmd(cmd);
4082 
4083 			if (cmd_status == MFI_STAT_OK) {
4084 				sc->pd_seq_map_id++;
4085 				/* Re-register a pd sync seq num cmd */
4086 				if (megasas_sync_pd_seq_num(sc, true))
4087 					sc->use_seqnum_jbod_fp = 0;
4088 			} else {
4089 				sc->use_seqnum_jbod_fp = 0;
4090 				device_printf(sc->mrsas_dev,
4091 				    "Jbod map sync failed, status=%x\n", cmd_status);
4092 			}
4093 			mtx_unlock(&sc->raidmap_lock);
4094 			break;
4095 		}
4096 		/* See if got an event notification */
4097 		if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT)
4098 			mrsas_complete_aen(sc, cmd);
4099 		else
4100 			mrsas_wakeup(sc, cmd);
4101 		break;
4102 	case MFI_CMD_ABORT:
4103 		/* Command issued to abort another cmd return */
4104 		mrsas_complete_abort(sc, cmd);
4105 		break;
4106 	default:
4107 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
4108 		break;
4109 	}
4110 }
4111 
4112 /*
4113  * mrsas_wakeup:	Completes an internal command
4114  * input:			Adapter soft state
4115  * 					Command to be completed
4116  *
4117  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
4118  * timer is started.  This function is called from
4119  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
4120  * from the command wait.
4121  */
4122 void
4123 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4124 {
4125 	cmd->cmd_status = cmd->frame->io.cmd_status;
4126 
4127 	if (cmd->cmd_status == 0xFF)
4128 		cmd->cmd_status = 0;
4129 
4130 	sc->chan = (void *)&cmd;
4131 	wakeup_one((void *)&sc->chan);
4132 	return;
4133 }
4134 
4135 /*
4136  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
4137  * Adapter soft state Shutdown/Hibernate
4138  *
4139  * This function issues a DCMD internal command to Firmware to initiate shutdown
4140  * of the controller.
4141  */
4142 static void
4143 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
4144 {
4145 	struct mrsas_mfi_cmd *cmd;
4146 	struct mrsas_dcmd_frame *dcmd;
4147 
4148 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4149 		return;
4150 
4151 	cmd = mrsas_get_mfi_cmd(sc);
4152 	if (!cmd) {
4153 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
4154 		return;
4155 	}
4156 	if (sc->aen_cmd)
4157 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
4158 	if (sc->map_update_cmd)
4159 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
4160 	if (sc->jbod_seq_cmd)
4161 		mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
4162 
4163 	dcmd = &cmd->frame->dcmd;
4164 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4165 
4166 	dcmd->cmd = MFI_CMD_DCMD;
4167 	dcmd->cmd_status = 0x0;
4168 	dcmd->sge_count = 0;
4169 	dcmd->flags = MFI_FRAME_DIR_NONE;
4170 	dcmd->timeout = 0;
4171 	dcmd->pad_0 = 0;
4172 	dcmd->data_xfer_len = 0;
4173 	dcmd->opcode = opcode;
4174 
4175 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
4176 
4177 	mrsas_issue_blocked_cmd(sc, cmd);
4178 	mrsas_release_mfi_cmd(cmd);
4179 
4180 	return;
4181 }
4182 
4183 /*
4184  * mrsas_flush_cache:         Requests FW to flush all its caches input:
4185  * Adapter soft state
4186  *
4187  * This function is issues a DCMD internal command to Firmware to initiate
4188  * flushing of all caches.
4189  */
4190 static void
4191 mrsas_flush_cache(struct mrsas_softc *sc)
4192 {
4193 	struct mrsas_mfi_cmd *cmd;
4194 	struct mrsas_dcmd_frame *dcmd;
4195 
4196 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4197 		return;
4198 
4199 	cmd = mrsas_get_mfi_cmd(sc);
4200 	if (!cmd) {
4201 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
4202 		return;
4203 	}
4204 	dcmd = &cmd->frame->dcmd;
4205 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4206 
4207 	dcmd->cmd = MFI_CMD_DCMD;
4208 	dcmd->cmd_status = 0x0;
4209 	dcmd->sge_count = 0;
4210 	dcmd->flags = MFI_FRAME_DIR_NONE;
4211 	dcmd->timeout = 0;
4212 	dcmd->pad_0 = 0;
4213 	dcmd->data_xfer_len = 0;
4214 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
4215 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4216 
4217 	mrsas_issue_blocked_cmd(sc, cmd);
4218 	mrsas_release_mfi_cmd(cmd);
4219 
4220 	return;
4221 }
4222 
4223 int
4224 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
4225 {
4226 	int retcode = 0;
4227 	u_int8_t do_ocr = 1;
4228 	struct mrsas_mfi_cmd *cmd;
4229 	struct mrsas_dcmd_frame *dcmd;
4230 	uint32_t pd_seq_map_sz;
4231 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4232 	bus_addr_t pd_seq_h;
4233 
4234 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4235 	    (sizeof(struct MR_PD_CFG_SEQ) *
4236 	    (MAX_PHYSICAL_DEVICES - 1));
4237 
4238 	cmd = mrsas_get_mfi_cmd(sc);
4239 	if (!cmd) {
4240 		device_printf(sc->mrsas_dev,
4241 		    "Cannot alloc for ld map info cmd.\n");
4242 		return 1;
4243 	}
4244 	dcmd = &cmd->frame->dcmd;
4245 
4246 	pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
4247 	pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
4248 	if (!pd_sync) {
4249 		device_printf(sc->mrsas_dev,
4250 		    "Failed to alloc mem for jbod map info.\n");
4251 		mrsas_release_mfi_cmd(cmd);
4252 		return (ENOMEM);
4253 	}
4254 	memset(pd_sync, 0, pd_seq_map_sz);
4255 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4256 	dcmd->cmd = MFI_CMD_DCMD;
4257 	dcmd->cmd_status = 0xFF;
4258 	dcmd->sge_count = 1;
4259 	dcmd->timeout = 0;
4260 	dcmd->pad_0 = 0;
4261 	dcmd->data_xfer_len = htole32(pd_seq_map_sz);
4262 	dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
4263 	dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF);
4264 	dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz);
4265 
4266 	if (pend) {
4267 		dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
4268 		dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4269 		sc->jbod_seq_cmd = cmd;
4270 		if (mrsas_issue_dcmd(sc, cmd)) {
4271 			device_printf(sc->mrsas_dev,
4272 			    "Fail to send sync map info command.\n");
4273 			return 1;
4274 		} else
4275 			return 0;
4276 	} else
4277 		dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4278 
4279 	retcode = mrsas_issue_polled(sc, cmd);
4280 	if (retcode == ETIMEDOUT)
4281 		goto dcmd_timeout;
4282 
4283 	if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
4284 		device_printf(sc->mrsas_dev,
4285 		    "driver supports max %d JBOD, but FW reports %d\n",
4286 		    MAX_PHYSICAL_DEVICES, pd_sync->count);
4287 		retcode = -EINVAL;
4288 	}
4289 	if (!retcode)
4290 		sc->pd_seq_map_id++;
4291 	do_ocr = 0;
4292 
4293 dcmd_timeout:
4294 	if (do_ocr)
4295 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4296 
4297 	return (retcode);
4298 }
4299 
4300 /*
4301  * mrsas_get_map_info:        Load and validate RAID map input:
4302  * Adapter instance soft state
4303  *
4304  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
4305  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
4306  */
4307 static int
4308 mrsas_get_map_info(struct mrsas_softc *sc)
4309 {
4310 	uint8_t retcode = 0;
4311 
4312 	sc->fast_path_io = 0;
4313 	if (!mrsas_get_ld_map_info(sc)) {
4314 		retcode = MR_ValidateMapInfo(sc);
4315 		if (retcode == 0) {
4316 			sc->fast_path_io = 1;
4317 			return 0;
4318 		}
4319 	}
4320 	return 1;
4321 }
4322 
4323 /*
4324  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
4325  * Adapter instance soft state
4326  *
4327  * Issues an internal command (DCMD) to get the FW's controller PD list
4328  * structure.
4329  */
4330 static int
4331 mrsas_get_ld_map_info(struct mrsas_softc *sc)
4332 {
4333 	int retcode = 0;
4334 	struct mrsas_mfi_cmd *cmd;
4335 	struct mrsas_dcmd_frame *dcmd;
4336 	void *map;
4337 	bus_addr_t map_phys_addr = 0;
4338 
4339 	cmd = mrsas_get_mfi_cmd(sc);
4340 	if (!cmd) {
4341 		device_printf(sc->mrsas_dev,
4342 		    "Cannot alloc for ld map info cmd.\n");
4343 		return 1;
4344 	}
4345 	dcmd = &cmd->frame->dcmd;
4346 
4347 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4348 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4349 	if (!map) {
4350 		device_printf(sc->mrsas_dev,
4351 		    "Failed to alloc mem for ld map info.\n");
4352 		mrsas_release_mfi_cmd(cmd);
4353 		return (ENOMEM);
4354 	}
4355 	memset(map, 0, sizeof(sc->max_map_sz));
4356 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4357 
4358 	dcmd->cmd = MFI_CMD_DCMD;
4359 	dcmd->cmd_status = 0xFF;
4360 	dcmd->sge_count = 1;
4361 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4362 	dcmd->timeout = 0;
4363 	dcmd->pad_0 = 0;
4364 	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4365 	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4366 	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4367 	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4368 
4369 	retcode = mrsas_issue_polled(sc, cmd);
4370 	if (retcode == ETIMEDOUT)
4371 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4372 
4373 	return (retcode);
4374 }
4375 
4376 /*
4377  * mrsas_sync_map_info:        Get FW's ld_map structure input:
4378  * Adapter instance soft state
4379  *
4380  * Issues an internal command (DCMD) to get the FW's controller PD list
4381  * structure.
4382  */
4383 static int
4384 mrsas_sync_map_info(struct mrsas_softc *sc)
4385 {
4386 	int retcode = 0, i;
4387 	struct mrsas_mfi_cmd *cmd;
4388 	struct mrsas_dcmd_frame *dcmd;
4389 	uint32_t num_lds;
4390 	MR_LD_TARGET_SYNC *target_map = NULL;
4391 	MR_DRV_RAID_MAP_ALL *map;
4392 	MR_LD_RAID *raid;
4393 	MR_LD_TARGET_SYNC *ld_sync;
4394 	bus_addr_t map_phys_addr = 0;
4395 
4396 	cmd = mrsas_get_mfi_cmd(sc);
4397 	if (!cmd) {
4398 		device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4399 		return ENOMEM;
4400 	}
4401 	map = sc->ld_drv_map[sc->map_id & 1];
4402 	num_lds = map->raidMap.ldCount;
4403 
4404 	dcmd = &cmd->frame->dcmd;
4405 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4406 
4407 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4408 	memset(target_map, 0, sc->max_map_sz);
4409 
4410 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4411 
4412 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4413 
4414 	for (i = 0; i < num_lds; i++, ld_sync++) {
4415 		raid = MR_LdRaidGet(i, map);
4416 		ld_sync->targetId = MR_GetLDTgtId(i, map);
4417 		ld_sync->seqNum = raid->seqNum;
4418 	}
4419 
4420 	dcmd->cmd = MFI_CMD_DCMD;
4421 	dcmd->cmd_status = 0xFF;
4422 	dcmd->sge_count = 1;
4423 	dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4424 	dcmd->timeout = 0;
4425 	dcmd->pad_0 = 0;
4426 	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4427 	dcmd->mbox.b[0] = num_lds;
4428 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4429 	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4430 	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4431 	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4432 
4433 	sc->map_update_cmd = cmd;
4434 	if (mrsas_issue_dcmd(sc, cmd)) {
4435 		device_printf(sc->mrsas_dev,
4436 		    "Fail to send sync map info command.\n");
4437 		return (1);
4438 	}
4439 	return (retcode);
4440 }
4441 
4442 /* Input:	dcmd.opcode		- MR_DCMD_PD_GET_INFO
4443   *		dcmd.mbox.s[0]		- deviceId for this physical drive
4444   *		dcmd.sge IN		- ptr to returned MR_PD_INFO structure
4445   * Desc:	Firmware return the physical drive info structure
4446   *
4447   */
4448 static void
4449 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
4450 {
4451 	int retcode;
4452 	u_int8_t do_ocr = 1;
4453 	struct mrsas_mfi_cmd *cmd;
4454 	struct mrsas_dcmd_frame *dcmd;
4455 
4456 	cmd = mrsas_get_mfi_cmd(sc);
4457 
4458 	if (!cmd) {
4459 		device_printf(sc->mrsas_dev,
4460 		    "Cannot alloc for get PD info cmd\n");
4461 		return;
4462 	}
4463 	dcmd = &cmd->frame->dcmd;
4464 
4465 	memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
4466 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4467 
4468 	dcmd->mbox.s[0] = htole16(device_id);
4469 	dcmd->cmd = MFI_CMD_DCMD;
4470 	dcmd->cmd_status = 0xFF;
4471 	dcmd->sge_count = 1;
4472 	dcmd->flags = MFI_FRAME_DIR_READ;
4473 	dcmd->timeout = 0;
4474 	dcmd->pad_0 = 0;
4475 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info));
4476 	dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO);
4477 	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF);
4478 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info));
4479 
4480 	if (!sc->mask_interrupts)
4481 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4482 	else
4483 		retcode = mrsas_issue_polled(sc, cmd);
4484 
4485 	if (retcode == ETIMEDOUT)
4486 		goto dcmd_timeout;
4487 
4488 	sc->target_list[device_id].interface_type =
4489 		le16toh(sc->pd_info_mem->state.ddf.pdType.intf);
4490 
4491 	do_ocr = 0;
4492 
4493 dcmd_timeout:
4494 
4495 	if (do_ocr)
4496 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4497 
4498 	if (!sc->mask_interrupts)
4499 		mrsas_release_mfi_cmd(cmd);
4500 }
4501 
4502 /*
4503  * mrsas_add_target:				Add target ID of system PD/VD to driver's data structure.
4504  * sc:						Adapter's soft state
4505  * target_id:					Unique target id per controller(managed by driver)
4506  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4507  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4508  * return:					void
4509  * Descripton:					This function will be called whenever system PD or VD is created.
4510  */
4511 static void mrsas_add_target(struct mrsas_softc *sc,
4512 	u_int16_t target_id)
4513 {
4514 	sc->target_list[target_id].target_id = target_id;
4515 
4516 	device_printf(sc->mrsas_dev,
4517 		"%s created target ID: 0x%x\n",
4518 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4519 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4520 	/*
4521 	 * If interrupts are enabled, then only fire DCMD to get pd_info
4522 	 * for system PDs
4523 	 */
4524 	if (!sc->mask_interrupts && sc->pd_info_mem &&
4525 		(target_id < MRSAS_MAX_PD))
4526 		mrsas_get_pd_info(sc, target_id);
4527 
4528 }
4529 
4530 /*
4531  * mrsas_remove_target:			Remove target ID of system PD/VD from driver's data structure.
4532  * sc:						Adapter's soft state
4533  * target_id:					Unique target id per controller(managed by driver)
4534  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4535  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4536  * return:					void
4537  * Descripton:					This function will be called whenever system PD or VD is deleted
4538  */
4539 static void mrsas_remove_target(struct mrsas_softc *sc,
4540 	u_int16_t target_id)
4541 {
4542 	sc->target_list[target_id].target_id = 0xffff;
4543 	device_printf(sc->mrsas_dev,
4544 		"%s deleted target ID: 0x%x\n",
4545 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4546 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4547 }
4548 
4549 /*
4550  * mrsas_get_pd_list:           Returns FW's PD list structure input:
4551  * Adapter soft state
4552  *
4553  * Issues an internal command (DCMD) to get the FW's controller PD list
4554  * structure.  This information is mainly used to find out about system
4555  * supported by Firmware.
4556  */
4557 static int
4558 mrsas_get_pd_list(struct mrsas_softc *sc)
4559 {
4560 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4561 	u_int8_t do_ocr = 1;
4562 	struct mrsas_mfi_cmd *cmd;
4563 	struct mrsas_dcmd_frame *dcmd;
4564 	struct MR_PD_LIST *pd_list_mem;
4565 	struct MR_PD_ADDRESS *pd_addr;
4566 	bus_addr_t pd_list_phys_addr = 0;
4567 	struct mrsas_tmp_dcmd *tcmd;
4568 	u_int16_t dev_id;
4569 
4570 	cmd = mrsas_get_mfi_cmd(sc);
4571 	if (!cmd) {
4572 		device_printf(sc->mrsas_dev,
4573 		    "Cannot alloc for get PD list cmd\n");
4574 		return 1;
4575 	}
4576 	dcmd = &cmd->frame->dcmd;
4577 
4578 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4579 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4580 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4581 		device_printf(sc->mrsas_dev,
4582 		    "Cannot alloc dmamap for get PD list cmd\n");
4583 		mrsas_release_mfi_cmd(cmd);
4584 		mrsas_free_tmp_dcmd(tcmd);
4585 		free(tcmd, M_MRSAS);
4586 		return (ENOMEM);
4587 	} else {
4588 		pd_list_mem = tcmd->tmp_dcmd_mem;
4589 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4590 	}
4591 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4592 
4593 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4594 	dcmd->mbox.b[1] = 0;
4595 	dcmd->cmd = MFI_CMD_DCMD;
4596 	dcmd->cmd_status = 0xFF;
4597 	dcmd->sge_count = 1;
4598 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4599 	dcmd->timeout = 0;
4600 	dcmd->pad_0 = 0;
4601 	dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4602 	dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY);
4603 	dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF);
4604 	dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4605 
4606 	if (!sc->mask_interrupts)
4607 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4608 	else
4609 		retcode = mrsas_issue_polled(sc, cmd);
4610 
4611 	if (retcode == ETIMEDOUT)
4612 		goto dcmd_timeout;
4613 
4614 	/* Get the instance PD list */
4615 	pd_count = MRSAS_MAX_PD;
4616 	pd_addr = pd_list_mem->addr;
4617 	if (le32toh(pd_list_mem->count) < pd_count) {
4618 		memset(sc->local_pd_list, 0,
4619 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4620 		for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) {
4621 			dev_id = le16toh(pd_addr->deviceId);
4622 			sc->local_pd_list[dev_id].tid = dev_id;
4623 			sc->local_pd_list[dev_id].driveType =
4624 			    le16toh(pd_addr->scsiDevType);
4625 			sc->local_pd_list[dev_id].driveState =
4626 			    MR_PD_STATE_SYSTEM;
4627 			if (sc->target_list[dev_id].target_id == 0xffff)
4628 				mrsas_add_target(sc, dev_id);
4629 			pd_addr++;
4630 		}
4631 		for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
4632 			if ((sc->local_pd_list[pd_index].driveState !=
4633 				MR_PD_STATE_SYSTEM) &&
4634 				(sc->target_list[pd_index].target_id !=
4635 				0xffff)) {
4636 				mrsas_remove_target(sc, pd_index);
4637 			}
4638 		}
4639 		/*
4640 		 * Use mutext/spinlock if pd_list component size increase more than
4641 		 * 32 bit.
4642 		 */
4643 		memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4644 		do_ocr = 0;
4645 	}
4646 dcmd_timeout:
4647 	mrsas_free_tmp_dcmd(tcmd);
4648 	free(tcmd, M_MRSAS);
4649 
4650 	if (do_ocr)
4651 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4652 
4653 	if (!sc->mask_interrupts)
4654 		mrsas_release_mfi_cmd(cmd);
4655 
4656 	return (retcode);
4657 }
4658 
4659 /*
4660  * mrsas_get_ld_list:           Returns FW's LD list structure input:
4661  * Adapter soft state
4662  *
4663  * Issues an internal command (DCMD) to get the FW's controller PD list
4664  * structure.  This information is mainly used to find out about supported by
4665  * the FW.
4666  */
4667 static int
4668 mrsas_get_ld_list(struct mrsas_softc *sc)
4669 {
4670 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
4671 	u_int8_t do_ocr = 1;
4672 	struct mrsas_mfi_cmd *cmd;
4673 	struct mrsas_dcmd_frame *dcmd;
4674 	struct MR_LD_LIST *ld_list_mem;
4675 	bus_addr_t ld_list_phys_addr = 0;
4676 	struct mrsas_tmp_dcmd *tcmd;
4677 
4678 	cmd = mrsas_get_mfi_cmd(sc);
4679 	if (!cmd) {
4680 		device_printf(sc->mrsas_dev,
4681 		    "Cannot alloc for get LD list cmd\n");
4682 		return 1;
4683 	}
4684 	dcmd = &cmd->frame->dcmd;
4685 
4686 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4687 	ld_list_size = sizeof(struct MR_LD_LIST);
4688 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4689 		device_printf(sc->mrsas_dev,
4690 		    "Cannot alloc dmamap for get LD list cmd\n");
4691 		mrsas_release_mfi_cmd(cmd);
4692 		mrsas_free_tmp_dcmd(tcmd);
4693 		free(tcmd, M_MRSAS);
4694 		return (ENOMEM);
4695 	} else {
4696 		ld_list_mem = tcmd->tmp_dcmd_mem;
4697 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4698 	}
4699 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4700 
4701 	if (sc->max256vdSupport)
4702 		dcmd->mbox.b[0] = 1;
4703 
4704 	dcmd->cmd = MFI_CMD_DCMD;
4705 	dcmd->cmd_status = 0xFF;
4706 	dcmd->sge_count = 1;
4707 	dcmd->flags = MFI_FRAME_DIR_READ;
4708 	dcmd->timeout = 0;
4709 	dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST));
4710 	dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST);
4711 	dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr);
4712 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST));
4713 	dcmd->pad_0 = 0;
4714 
4715 	if (!sc->mask_interrupts)
4716 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4717 	else
4718 		retcode = mrsas_issue_polled(sc, cmd);
4719 
4720 	if (retcode == ETIMEDOUT)
4721 		goto dcmd_timeout;
4722 
4723 #if VD_EXT_DEBUG
4724 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
4725 #endif
4726 
4727 	/* Get the instance LD list */
4728 	if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) {
4729 		sc->CurLdCount = le32toh(ld_list_mem->ldCount);
4730 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4731 		for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) {
4732 			ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4733 			drv_tgt_id = ids + MRSAS_MAX_PD;
4734 			if (ld_list_mem->ldList[ld_index].state != 0) {
4735 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4736 				if (sc->target_list[drv_tgt_id].target_id ==
4737 					0xffff)
4738 					mrsas_add_target(sc, drv_tgt_id);
4739 			} else {
4740 				if (sc->target_list[drv_tgt_id].target_id !=
4741 					0xffff)
4742 					mrsas_remove_target(sc,
4743 						drv_tgt_id);
4744 			}
4745 		}
4746 
4747 		do_ocr = 0;
4748 	}
4749 dcmd_timeout:
4750 	mrsas_free_tmp_dcmd(tcmd);
4751 	free(tcmd, M_MRSAS);
4752 
4753 	if (do_ocr)
4754 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4755 	if (!sc->mask_interrupts)
4756 		mrsas_release_mfi_cmd(cmd);
4757 
4758 	return (retcode);
4759 }
4760 
4761 /*
4762  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
4763  * Adapter soft state Temp command Size of allocation
4764  *
4765  * Allocates DMAable memory for a temporary internal command. The allocated
4766  * memory is initialized to all zeros upon successful loading of the dma
4767  * mapped memory.
4768  */
4769 int
4770 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4771     struct mrsas_tmp_dcmd *tcmd, int size)
4772 {
4773 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
4774 	    1, 0,
4775 	    BUS_SPACE_MAXADDR_32BIT,
4776 	    BUS_SPACE_MAXADDR,
4777 	    NULL, NULL,
4778 	    size,
4779 	    1,
4780 	    size,
4781 	    BUS_DMA_ALLOCNOW,
4782 	    NULL, NULL,
4783 	    &tcmd->tmp_dcmd_tag)) {
4784 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4785 		return (ENOMEM);
4786 	}
4787 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4788 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4789 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4790 		return (ENOMEM);
4791 	}
4792 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4793 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4794 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4795 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4796 		return (ENOMEM);
4797 	}
4798 	memset(tcmd->tmp_dcmd_mem, 0, size);
4799 	return (0);
4800 }
4801 
4802 /*
4803  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
4804  * temporary dcmd pointer
4805  *
4806  * Deallocates memory of the temporary command for use in the construction of
4807  * the internal DCMD.
4808  */
4809 void
4810 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4811 {
4812 	if (tmp->tmp_dcmd_phys_addr)
4813 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4814 	if (tmp->tmp_dcmd_mem != NULL)
4815 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4816 	if (tmp->tmp_dcmd_tag != NULL)
4817 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4818 }
4819 
4820 /*
4821  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
4822  * Adapter soft state Previously issued cmd to be aborted
4823  *
4824  * This function is used to abort previously issued commands, such as AEN and
4825  * RAID map sync map commands.  The abort command is sent as a DCMD internal
4826  * command and subsequently the driver will wait for a return status.  The
4827  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4828  */
4829 static int
4830 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4831     struct mrsas_mfi_cmd *cmd_to_abort)
4832 {
4833 	struct mrsas_mfi_cmd *cmd;
4834 	struct mrsas_abort_frame *abort_fr;
4835 	u_int8_t retcode = 0;
4836 	unsigned long total_time = 0;
4837 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4838 
4839 	cmd = mrsas_get_mfi_cmd(sc);
4840 	if (!cmd) {
4841 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4842 		return (1);
4843 	}
4844 	abort_fr = &cmd->frame->abort;
4845 
4846 	/* Prepare and issue the abort frame */
4847 	abort_fr->cmd = MFI_CMD_ABORT;
4848 	abort_fr->cmd_status = 0xFF;
4849 	abort_fr->flags = 0;
4850 	abort_fr->abort_context = cmd_to_abort->index;
4851 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4852 	abort_fr->abort_mfi_phys_addr_hi = 0;
4853 
4854 	cmd->sync_cmd = 1;
4855 	cmd->cmd_status = 0xFF;
4856 
4857 	if (mrsas_issue_dcmd(sc, cmd)) {
4858 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4859 		return (1);
4860 	}
4861 	/* Wait for this cmd to complete */
4862 	sc->chan = (void *)&cmd;
4863 	while (1) {
4864 		if (cmd->cmd_status == 0xFF) {
4865 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4866 		} else
4867 			break;
4868 		total_time++;
4869 		if (total_time >= max_wait) {
4870 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4871 			retcode = 1;
4872 			break;
4873 		}
4874 	}
4875 
4876 	cmd->sync_cmd = 0;
4877 	mrsas_release_mfi_cmd(cmd);
4878 	return (retcode);
4879 }
4880 
4881 /*
4882  * mrsas_complete_abort:      Completes aborting a command input:
4883  * Adapter soft state Cmd that was issued to abort another cmd
4884  *
4885  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4886  * change after sending the command.  This function is called from
4887  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4888  */
4889 void
4890 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4891 {
4892 	if (cmd->sync_cmd) {
4893 		cmd->sync_cmd = 0;
4894 		cmd->cmd_status = 0;
4895 		sc->chan = (void *)&cmd;
4896 		wakeup_one((void *)&sc->chan);
4897 	}
4898 	return;
4899 }
4900 
4901 /*
4902  * mrsas_aen_handler:	AEN processing callback function from thread context
4903  * input:				Adapter soft state
4904  *
4905  * Asynchronous event handler
4906  */
4907 void
4908 mrsas_aen_handler(struct mrsas_softc *sc)
4909 {
4910 	union mrsas_evt_class_locale class_locale;
4911 	int doscan = 0;
4912 	u_int32_t seq_num;
4913  	int error, fail_aen = 0;
4914 
4915 	if (sc == NULL) {
4916 		printf("invalid instance!\n");
4917 		return;
4918 	}
4919 	if (sc->remove_in_progress || sc->reset_in_progress) {
4920 		device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4921 			__func__, __LINE__);
4922 		return;
4923 	}
4924 	if (sc->evt_detail_mem) {
4925 		switch (sc->evt_detail_mem->code) {
4926 		case MR_EVT_PD_INSERTED:
4927 			fail_aen = mrsas_get_pd_list(sc);
4928 			if (!fail_aen)
4929 				mrsas_bus_scan_sim(sc, sc->sim_1);
4930 			else
4931 				goto skip_register_aen;
4932 			break;
4933 		case MR_EVT_PD_REMOVED:
4934 			fail_aen = mrsas_get_pd_list(sc);
4935 			if (!fail_aen)
4936 				mrsas_bus_scan_sim(sc, sc->sim_1);
4937 			else
4938 				goto skip_register_aen;
4939 			break;
4940 		case MR_EVT_LD_OFFLINE:
4941 		case MR_EVT_CFG_CLEARED:
4942 		case MR_EVT_LD_DELETED:
4943 			mrsas_bus_scan_sim(sc, sc->sim_0);
4944 			break;
4945 		case MR_EVT_LD_CREATED:
4946 			fail_aen = mrsas_get_ld_list(sc);
4947 			if (!fail_aen)
4948 				mrsas_bus_scan_sim(sc, sc->sim_0);
4949 			else
4950 				goto skip_register_aen;
4951 			break;
4952 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4953 		case MR_EVT_FOREIGN_CFG_IMPORTED:
4954 		case MR_EVT_LD_STATE_CHANGE:
4955 			doscan = 1;
4956 			break;
4957 		case MR_EVT_CTRL_PROP_CHANGED:
4958 			fail_aen = mrsas_get_ctrl_info(sc);
4959 			if (fail_aen)
4960 				goto skip_register_aen;
4961 			break;
4962 		default:
4963 			break;
4964 		}
4965 	} else {
4966 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4967 		return;
4968 	}
4969 	if (doscan) {
4970 		fail_aen = mrsas_get_pd_list(sc);
4971 		if (!fail_aen) {
4972 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4973 			mrsas_bus_scan_sim(sc, sc->sim_1);
4974 		} else
4975 			goto skip_register_aen;
4976 
4977 		fail_aen = mrsas_get_ld_list(sc);
4978 		if (!fail_aen) {
4979 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4980 			mrsas_bus_scan_sim(sc, sc->sim_0);
4981 		} else
4982 			goto skip_register_aen;
4983 	}
4984 	seq_num = sc->evt_detail_mem->seq_num + 1;
4985 
4986 	/* Register AEN with FW for latest sequence number plus 1 */
4987 	class_locale.members.reserved = 0;
4988 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4989 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4990 
4991 	if (sc->aen_cmd != NULL)
4992 		return;
4993 
4994 	mtx_lock(&sc->aen_lock);
4995 	error = mrsas_register_aen(sc, seq_num,
4996 	    class_locale.word);
4997 	mtx_unlock(&sc->aen_lock);
4998 
4999 	if (error)
5000 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
5001 
5002 skip_register_aen:
5003 	return;
5004 
5005 }
5006 
5007 /*
5008  * mrsas_complete_aen:	Completes AEN command
5009  * input:				Adapter soft state
5010  * 						Cmd that was issued to abort another cmd
5011  *
5012  * This function will be called from ISR and will continue event processing from
5013  * thread context by enqueuing task in ev_tq (callback function
5014  * "mrsas_aen_handler").
5015  */
5016 void
5017 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
5018 {
5019 	/*
5020 	 * Don't signal app if it is just an aborted previously registered
5021 	 * aen
5022 	 */
5023 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
5024 		sc->mrsas_aen_triggered = 1;
5025 		mtx_lock(&sc->aen_lock);
5026 		if (sc->mrsas_poll_waiting) {
5027 			sc->mrsas_poll_waiting = 0;
5028 			selwakeup(&sc->mrsas_select);
5029 		}
5030 		mtx_unlock(&sc->aen_lock);
5031 	} else
5032 		cmd->abort_aen = 0;
5033 
5034 	sc->aen_cmd = NULL;
5035 	mrsas_release_mfi_cmd(cmd);
5036 
5037 	taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
5038 
5039 	return;
5040 }
5041 
5042 static device_method_t mrsas_methods[] = {
5043 	DEVMETHOD(device_probe, mrsas_probe),
5044 	DEVMETHOD(device_attach, mrsas_attach),
5045 	DEVMETHOD(device_detach, mrsas_detach),
5046 	DEVMETHOD(device_shutdown, mrsas_shutdown),
5047 	DEVMETHOD(device_suspend, mrsas_suspend),
5048 	DEVMETHOD(device_resume, mrsas_resume),
5049 	DEVMETHOD(bus_print_child, bus_generic_print_child),
5050 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
5051 	{0, 0}
5052 };
5053 
5054 static driver_t mrsas_driver = {
5055 	"mrsas",
5056 	mrsas_methods,
5057 	sizeof(struct mrsas_softc)
5058 };
5059 
5060 DRIVER_MODULE(mrsas, pci, mrsas_driver, 0, 0);
5061 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
5062