1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26  * Copyright 2020 RackTop Systems, Inc.
27  */
28 
29 #include <emlxs.h>
30 
31 
32 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33 EMLXS_MSG_DEF(EMLXS_SLI4_C);
34 
35 static int		emlxs_sli4_init_extents(emlxs_hba_t *hba,
36 				MAILBOXQ *mbq);
37 static uint32_t		emlxs_sli4_read_status(emlxs_hba_t *hba);
38 
39 static int		emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
40 
41 static uint32_t		emlxs_sli4_read_sema(emlxs_hba_t *hba);
42 
43 static uint32_t		emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
44 
45 static void		emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys,
46 				boolean_t high);
47 
48 static void		emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid,
49 				uint_t posted, uint_t index);
50 
51 static void		emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid,
52 				uint_t count);
53 
54 static void		emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid,
55 				uint_t count);
56 
57 static void		emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid,
58 				uint32_t count, boolean_t arm);
59 static void		emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid,
60 				uint32_t count, boolean_t arm);
61 
62 static int		emlxs_sli4_create_queues(emlxs_hba_t *hba,
63 				MAILBOXQ *mbq);
64 static int		emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
65 				MAILBOXQ *mbq);
66 static int		emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
67 				MAILBOXQ *mbq);
68 
69 static int		emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
70 
71 static int		emlxs_sli4_map_hdw(emlxs_hba_t *hba);
72 
73 static void		emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
74 
75 static int32_t		emlxs_sli4_online(emlxs_hba_t *hba);
76 
77 static void		emlxs_sli4_offline(emlxs_hba_t *hba,
78 				uint32_t reset_requested);
79 
80 static uint32_t		emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
81 				uint32_t skip_post, uint32_t quiesce);
82 static void		emlxs_sli4_hba_kill(emlxs_hba_t *hba);
83 
84 static uint32_t		emlxs_sli4_hba_init(emlxs_hba_t *hba);
85 
86 static uint32_t		emlxs_sli4_bde_setup(emlxs_port_t *port,
87 				emlxs_buf_t *sbp);
88 
89 static void		emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
90 				CHANNEL *cp, IOCBQ *iocb_cmd);
91 static uint32_t		emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
92 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
93 static uint32_t		emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
94 				MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
95 #ifdef SFCT_SUPPORT
96 static uint32_t		emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
97 				emlxs_buf_t *cmd_sbp, int channel);
98 static uint32_t		emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
99 				emlxs_buf_t *sbp);
100 #endif /* SFCT_SUPPORT */
101 
102 static uint32_t		emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
103 				emlxs_buf_t *sbp, int ring);
104 static uint32_t		emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
105 				emlxs_buf_t *sbp);
106 static uint32_t		emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
107 				emlxs_buf_t *sbp);
108 static uint32_t		emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
109 				emlxs_buf_t *sbp);
110 static void		emlxs_sli4_poll_intr(emlxs_hba_t *hba);
111 static int32_t		emlxs_sli4_intx_intr(char *arg);
112 
113 #ifdef MSI_SUPPORT
114 static uint32_t		emlxs_sli4_msi_intr(char *arg1, char *arg2);
115 #endif /* MSI_SUPPORT */
116 
117 static void		emlxs_sli4_resource_free(emlxs_hba_t *hba);
118 
119 static int		emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
120 extern void		emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
121 
122 static XRIobj_t		*emlxs_sli4_alloc_xri(emlxs_port_t *port,
123 				emlxs_buf_t *sbp, RPIobj_t *rpip,
124 				uint32_t type);
125 static void		emlxs_sli4_enable_intr(emlxs_hba_t *hba);
126 
127 static void		emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
128 
129 static void		emlxs_sli4_timer(emlxs_hba_t *hba);
130 
131 static void		emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
132 
133 static void		emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba);
134 
135 static void		emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba);
136 
137 static void		emlxs_sli4_gpio_timer(void *arg);
138 
139 static void		emlxs_sli4_check_gpio(emlxs_hba_t *hba);
140 
141 static uint32_t	emlxs_sli4_fix_gpio(emlxs_hba_t *hba,
142 					uint8_t *pin, uint8_t *pinval);
143 
144 static uint32_t	emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq);
145 
146 static void		emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
147 
148 extern XRIobj_t		*emlxs_sli4_reserve_xri(emlxs_port_t *port,
149 				RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
150 static int		emlxs_check_hdw_ready(emlxs_hba_t *);
151 
152 static uint32_t		emlxs_sli4_reg_did(emlxs_port_t *port,
153 				uint32_t did, SERV_PARM *param,
154 				emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
155 				IOCBQ *iocbq);
156 
157 static uint32_t		emlxs_sli4_unreg_node(emlxs_port_t *port,
158 				emlxs_node_t *node, emlxs_buf_t *sbp,
159 				fc_unsol_buf_t *ubp, IOCBQ *iocbq);
160 
161 static void		emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
162 				CQE_ASYNC_t *cqe);
163 static void		emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
164 				CQE_ASYNC_t *cqe);
165 
166 
167 static uint16_t		emlxs_sli4_rqid_to_index(emlxs_hba_t *hba,
168 				uint16_t rqid);
169 static uint16_t		emlxs_sli4_wqid_to_index(emlxs_hba_t *hba,
170 				uint16_t wqid);
171 static uint16_t		emlxs_sli4_cqid_to_index(emlxs_hba_t *hba,
172 				uint16_t cqid);
173 
174 /* Define SLI4 API functions */
175 emlxs_sli_api_t emlxs_sli4_api = {
176 	emlxs_sli4_map_hdw,
177 	emlxs_sli4_unmap_hdw,
178 	emlxs_sli4_online,
179 	emlxs_sli4_offline,
180 	emlxs_sli4_hba_reset,
181 	emlxs_sli4_hba_kill,
182 	emlxs_sli4_issue_iocb_cmd,
183 	emlxs_sli4_issue_mbox_cmd,
184 #ifdef SFCT_SUPPORT
185 	emlxs_sli4_prep_fct_iocb,
186 #else
187 	NULL,
188 #endif /* SFCT_SUPPORT */
189 	emlxs_sli4_prep_fcp_iocb,
190 	emlxs_sli4_prep_ip_iocb,
191 	emlxs_sli4_prep_els_iocb,
192 	emlxs_sli4_prep_ct_iocb,
193 	emlxs_sli4_poll_intr,
194 	emlxs_sli4_intx_intr,
195 	emlxs_sli4_msi_intr,
196 	emlxs_sli4_disable_intr,
197 	emlxs_sli4_timer,
198 	emlxs_sli4_poll_erratt,
199 	emlxs_sli4_reg_did,
200 	emlxs_sli4_unreg_node
201 };
202 
203 
204 /* ************************************************************************** */
205 
206 static void
207 emlxs_sli4_set_default_params(emlxs_hba_t *hba)
208 {
209 	emlxs_port_t *port = &PPORT;
210 
211 	bzero((char *)&hba->sli.sli4.param, sizeof (sli_params_t));
212 
213 	hba->sli.sli4.param.ProtocolType = 0x3; /* FC/FCoE */
214 
215 	hba->sli.sli4.param.SliHint2 = 0;
216 	hba->sli.sli4.param.SliHint1 = 0;
217 	hba->sli.sli4.param.IfType = 0;
218 	hba->sli.sli4.param.SliFamily = 0;
219 	hba->sli.sli4.param.Revision = 0x4; /* SLI4 */
220 	hba->sli.sli4.param.FT = 0;
221 
222 	hba->sli.sli4.param.EqeCntMethod = 0x1; /* Bit pattern */
223 	hba->sli.sli4.param.EqPageSize = 0x1; /* 4096 */
224 	hba->sli.sli4.param.EqeSize = 0x1; /* 4 byte */
225 	hba->sli.sli4.param.EqPageCnt = 8;
226 	hba->sli.sli4.param.EqeCntMask = 0x1F; /* 256-4096 elements */
227 
228 	hba->sli.sli4.param.CqeCntMethod = 0x1; /* Bit pattern */
229 	hba->sli.sli4.param.CqPageSize = 0x1; /* 4096 */
230 	hba->sli.sli4.param.CQV = 0;
231 	hba->sli.sli4.param.CqeSize = 0x3; /* 16 byte */
232 	hba->sli.sli4.param.CqPageCnt = 4;
233 	hba->sli.sli4.param.CqeCntMask = 0x70; /* 256-1024 elements */
234 
235 	hba->sli.sli4.param.MqeCntMethod = 0x1; /* Bit pattern */
236 	hba->sli.sli4.param.MqPageSize = 0x1; /* 4096 */
237 	hba->sli.sli4.param.MQV = 0;
238 	hba->sli.sli4.param.MqPageCnt = 8;
239 	hba->sli.sli4.param.MqeCntMask = 0x0F; /* 16-128 elements */
240 
241 	hba->sli.sli4.param.WqeCntMethod = 0; /* Page Count */
242 	hba->sli.sli4.param.WqPageSize = 0x1; /* 4096 */
243 	hba->sli.sli4.param.WQV = 0;
244 	hba->sli.sli4.param.WqeSize = 0x5; /* 64 byte */
245 	hba->sli.sli4.param.WqPageCnt = 4;
246 	hba->sli.sli4.param.WqeCntMask = 0x10; /* 256 elements */
247 
248 	hba->sli.sli4.param.RqeCntMethod = 0; /* Page Count */
249 	hba->sli.sli4.param.RqPageSize = 0x1; /* 4096 */
250 	hba->sli.sli4.param.RQV = 0;
251 	hba->sli.sli4.param.RqeSize = 0x2; /* 8 byte */
252 	hba->sli.sli4.param.RqPageCnt = 8;
253 	hba->sli.sli4.param.RqDbWin = 1;
254 	hba->sli.sli4.param.RqeCntMask = 0x100; /* 4096 elements */
255 
256 	hba->sli.sli4.param.Loopback = 0xf; /* unsupported */
257 	hba->sli.sli4.param.PHWQ = 0;
258 	hba->sli.sli4.param.PHON = 0;
259 	hba->sli.sli4.param.TRIR = 0;
260 	hba->sli.sli4.param.TRTY = 0;
261 	hba->sli.sli4.param.TCCA = 0;
262 	hba->sli.sli4.param.MWQE = 0;
263 	hba->sli.sli4.param.ASSI = 0;
264 	hba->sli.sli4.param.TERP = 0;
265 	hba->sli.sli4.param.TGT  = 0;
266 	hba->sli.sli4.param.AREG = 0;
267 	hba->sli.sli4.param.FBRR = 0;
268 	hba->sli.sli4.param.SGLR = 1;
269 	hba->sli.sli4.param.HDRR = 1;
270 	hba->sli.sli4.param.EXT  = 0;
271 	hba->sli.sli4.param.FCOE = 1;
272 
273 	hba->sli.sli4.param.SgeLength = (64 * 1024);
274 	hba->sli.sli4.param.SglAlign = 0x7 /* 4096 */;
275 	hba->sli.sli4.param.SglPageSize = 0x1; /* 4096 */
276 	hba->sli.sli4.param.SglPageCnt = 2;
277 
278 	hba->sli.sli4.param.MinRqSize = 128;
279 	hba->sli.sli4.param.MaxRqSize = 2048;
280 
281 	hba->sli.sli4.param.RPIMax = 0x3ff;
282 	hba->sli.sli4.param.XRIMax = 0x3ff;
283 	hba->sli.sli4.param.VFIMax = 0xff;
284 	hba->sli.sli4.param.VPIMax = 0xff;
285 
286 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
287 	    "Default SLI4 parameters set.");
288 
289 } /* emlxs_sli4_set_default_params() */
290 
291 
292 /*
293  * emlxs_sli4_online()
294  *
295  * This routine will start initialization of the SLI4 HBA.
296  */
297 static int32_t
298 emlxs_sli4_online(emlxs_hba_t *hba)
299 {
300 	emlxs_port_t *port = &PPORT;
301 	emlxs_config_t *cfg;
302 	emlxs_vpd_t *vpd;
303 	MAILBOXQ *mbq = NULL;
304 	MAILBOX4 *mb  = NULL;
305 	MATCHMAP *mp  = NULL;
306 	uint32_t i;
307 	uint32_t j;
308 	uint32_t rval = 0;
309 	uint8_t *vpd_data;
310 	uint32_t sli_mode;
311 	uint8_t *outptr;
312 	uint32_t status;
313 	uint32_t fw_check;
314 	uint32_t kern_update = 0;
315 	emlxs_firmware_t hba_fw;
316 	emlxs_firmware_t *fw;
317 	uint16_t ssvid;
318 	char buf[64];
319 
320 	cfg = &CFG;
321 	vpd = &VPD;
322 
323 	sli_mode = EMLXS_HBA_SLI4_MODE;
324 	hba->sli_mode = sli_mode;
325 
326 	/* Set the fw_check flag */
327 	fw_check = cfg[CFG_FW_CHECK].current;
328 
329 	if ((fw_check & 0x04) ||
330 	    (hba->fw_flag & FW_UPDATE_KERNEL)) {
331 		kern_update = 1;
332 	}
333 
334 	hba->mbox_queue_flag = 0;
335 	hba->fc_edtov = FF_DEF_EDTOV;
336 	hba->fc_ratov = FF_DEF_RATOV;
337 	hba->fc_altov = FF_DEF_ALTOV;
338 	hba->fc_arbtov = FF_DEF_ARBTOV;
339 
340 	/* Networking not supported */
341 	if (cfg[CFG_NETWORK_ON].current) {
342 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
343 		    "Networking is not supported in SLI4, turning it off");
344 		cfg[CFG_NETWORK_ON].current = 0;
345 	}
346 
347 	hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
348 	if (hba->chan_count > MAX_CHANNEL) {
349 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
350 		    "Max channels exceeded, dropping num-wq from %d to 1",
351 		    cfg[CFG_NUM_WQ].current);
352 		cfg[CFG_NUM_WQ].current = 1;
353 		hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
354 	}
355 	hba->channel_fcp = 0; /* First channel */
356 
357 	/* Gen6 chips only support P2P topologies */
358 	if ((hba->model_info.chip == EMLXS_LANCERG6_CHIP) &&
359 	    cfg[CFG_TOPOLOGY].current != 2) {
360 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
361 		    "Loop topologies are not supported by this HBA. "
362 		    "Forcing topology to P2P.");
363 		cfg[CFG_TOPOLOGY].current = 2;
364 	}
365 
366 	/* Default channel for everything else is the last channel */
367 	hba->channel_ip = hba->chan_count - 1;
368 	hba->channel_els = hba->chan_count - 1;
369 	hba->channel_ct = hba->chan_count - 1;
370 
371 	hba->fc_iotag = 1;
372 	hba->io_count = 0;
373 	hba->channel_tx_count = 0;
374 
375 	/* Specific to ATTO G5 boards */
376 	if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
377 		/* Set hard-coded GPIO pins */
378 		if (hba->pci_function_number) {
379 			hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 27;
380 			hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 28;
381 			hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 29;
382 			hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 8;
383 		} else {
384 			hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 13;
385 			hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 25;
386 			hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 26;
387 			hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 12;
388 		}
389 	}
390 
391 	/* Initialize the local dump region buffer */
392 	bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
393 	hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
394 	hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
395 	hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
396 
397 	(void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
398 
399 	if (hba->sli.sli4.dump_region.virt == NULL) {
400 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
401 		    "Unable to allocate dump region buffer.");
402 
403 		return (ENOMEM);
404 	}
405 
406 	/*
407 	 * Get a buffer which will be used repeatedly for mailbox commands
408 	 */
409 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
410 
411 	mb = (MAILBOX4 *)mbq;
412 
413 reset:
414 	/* Reset & Initialize the adapter */
415 	if (emlxs_sli4_hba_init(hba)) {
416 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
417 		    "Unable to init hba.");
418 
419 		rval = EIO;
420 		goto failed1;
421 	}
422 
423 #ifdef FMA_SUPPORT
424 	/* Access handle validation */
425 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
426 	case SLI_INTF_IF_TYPE_2:
427 		if ((emlxs_fm_check_acc_handle(hba,
428 		    hba->pci_acc_handle) != DDI_FM_OK) ||
429 		    (emlxs_fm_check_acc_handle(hba,
430 		    hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK)) {
431 			EMLXS_MSGF(EMLXS_CONTEXT,
432 			    &emlxs_invalid_access_handle_msg, NULL);
433 
434 			rval = EIO;
435 			goto failed1;
436 		}
437 		break;
438 
439 	default :
440 		if ((emlxs_fm_check_acc_handle(hba,
441 		    hba->pci_acc_handle) != DDI_FM_OK) ||
442 		    (emlxs_fm_check_acc_handle(hba,
443 		    hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK) ||
444 		    (emlxs_fm_check_acc_handle(hba,
445 		    hba->sli.sli4.bar2_acc_handle) != DDI_FM_OK)) {
446 			EMLXS_MSGF(EMLXS_CONTEXT,
447 			    &emlxs_invalid_access_handle_msg, NULL);
448 
449 			rval = EIO;
450 			goto failed1;
451 		}
452 		break;
453 	}
454 #endif	/* FMA_SUPPORT */
455 
456 	/*
457 	 * Setup and issue mailbox READ REV command
458 	 */
459 	vpd->opFwRev = 0;
460 	vpd->postKernRev = 0;
461 	vpd->sli1FwRev = 0;
462 	vpd->sli2FwRev = 0;
463 	vpd->sli3FwRev = 0;
464 	vpd->sli4FwRev = 0;
465 
466 	vpd->postKernName[0] = 0;
467 	vpd->opFwName[0] = 0;
468 	vpd->sli1FwName[0] = 0;
469 	vpd->sli2FwName[0] = 0;
470 	vpd->sli3FwName[0] = 0;
471 	vpd->sli4FwName[0] = 0;
472 
473 	vpd->opFwLabel[0] = 0;
474 	vpd->sli1FwLabel[0] = 0;
475 	vpd->sli2FwLabel[0] = 0;
476 	vpd->sli3FwLabel[0] = 0;
477 	vpd->sli4FwLabel[0] = 0;
478 
479 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
480 
481 	emlxs_mb_get_sli4_params(hba, mbq);
482 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
483 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
484 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
485 		    mb->mbxCommand, mb->mbxStatus);
486 
487 		/* Set param defaults */
488 		emlxs_sli4_set_default_params(hba);
489 
490 	} else {
491 		/* Save parameters */
492 		bcopy((char *)&mb->un.varSLIConfig.payload,
493 		    (char *)&hba->sli.sli4.param, sizeof (sli_params_t));
494 
495 		emlxs_data_dump(port, "SLI_PARMS",
496 		    (uint32_t *)&hba->sli.sli4.param,
497 		    sizeof (sli_params_t), 0);
498 	}
499 
500 	/* Reuse mbq from previous mbox */
501 	bzero(mbq, sizeof (MAILBOXQ));
502 
503 	emlxs_mb_get_port_name(hba, mbq);
504 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
505 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
506 		    "Unable to get port names. Mailbox cmd=%x status=%x",
507 		    mb->mbxCommand, mb->mbxStatus);
508 
509 		bzero(hba->sli.sli4.port_name,
510 		    sizeof (hba->sli.sli4.port_name));
511 	} else {
512 		/* Save port names */
513 		bcopy((char *)&mb->un.varSLIConfig.payload,
514 		    (char *)&hba->sli.sli4.port_name,
515 		    sizeof (hba->sli.sli4.port_name));
516 	}
517 
518 	/* Reuse mbq from previous mbox */
519 	bzero(mbq, sizeof (MAILBOXQ));
520 
521 	emlxs_mb_read_rev(hba, mbq, 0);
522 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
523 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
524 		    "Unable to read rev. Mailbox cmd=%x status=%x",
525 		    mb->mbxCommand, mb->mbxStatus);
526 
527 		rval = EIO;
528 		goto failed1;
529 
530 	}
531 
532 	emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
533 	if (mb->un.varRdRev4.sliLevel != 4) {
534 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
535 		    "Invalid read rev Version for SLI4: 0x%x",
536 		    mb->un.varRdRev4.sliLevel);
537 
538 		rval = EIO;
539 		goto failed1;
540 	}
541 
542 	switch (mb->un.varRdRev4.dcbxMode) {
543 	case EMLXS_DCBX_MODE_CIN:	/* Mapped to nonFIP mode */
544 		hba->flag &= ~FC_FIP_SUPPORTED;
545 		break;
546 
547 	case EMLXS_DCBX_MODE_CEE:	/* Mapped to FIP mode */
548 		hba->flag |= FC_FIP_SUPPORTED;
549 		break;
550 
551 	default:
552 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
553 		    "Invalid read rev dcbx mode for SLI4: 0x%x",
554 		    mb->un.varRdRev4.dcbxMode);
555 
556 		rval = EIO;
557 		goto failed1;
558 	}
559 
560 	/* Set FC/FCoE mode */
561 	if (mb->un.varRdRev4.FCoE) {
562 		hba->sli.sli4.flag |= EMLXS_SLI4_FCOE_MODE;
563 	} else {
564 		hba->sli.sli4.flag &= ~EMLXS_SLI4_FCOE_MODE;
565 	}
566 
567 	/* Save information as VPD data */
568 	vpd->rBit = 1;
569 
570 	vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
571 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
572 
573 	vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
574 	bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
575 
576 	vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
577 	bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
578 
579 	vpd->biuRev = mb->un.varRdRev4.HwRev1;
580 	vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
581 	vpd->fcphLow = mb->un.varRdRev4.fcphLow;
582 	vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
583 	vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
584 
585 	/* Decode FW labels */
586 	if ((hba->model_info.chip & EMLXS_LANCER_CHIPS) != 0) {
587 		bcopy(vpd->postKernName, vpd->sli4FwName, 16);
588 	}
589 	emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0,
590 	    sizeof (vpd->sli4FwName));
591 	emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0,
592 	    sizeof (vpd->opFwName));
593 	emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0,
594 	    sizeof (vpd->postKernName));
595 
596 	if (hba->model_info.chip == EMLXS_BE2_CHIP) {
597 		(void) strlcpy(vpd->sli4FwLabel, "be2.ufi",
598 		    sizeof (vpd->sli4FwLabel));
599 	} else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
600 		(void) strlcpy(vpd->sli4FwLabel, "be3.ufi",
601 		    sizeof (vpd->sli4FwLabel));
602 	} else if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
603 		(void) strlcpy(vpd->sli4FwLabel, "xe201.grp",
604 		    sizeof (vpd->sli4FwLabel));
605 	} else if (hba->model_info.chip == EMLXS_LANCERG6_CHIP) {
606 		(void) strlcpy(vpd->sli4FwLabel, "xe501.grp",
607 		    sizeof (vpd->sli4FwLabel));
608 	} else {
609 		(void) strlcpy(vpd->sli4FwLabel, "sli4.fw",
610 		    sizeof (vpd->sli4FwLabel));
611 	}
612 
613 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
614 	    "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
615 	    vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
616 	    vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
617 	    mb->un.varRdRev4.dcbxMode);
618 
619 	/* No key information is needed for SLI4 products */
620 
621 	/* Get adapter VPD information */
622 	vpd->port_index = (uint32_t)-1;
623 
624 	/* Reuse mbq from previous mbox */
625 	bzero(mbq, sizeof (MAILBOXQ));
626 
627 	emlxs_mb_dump_vpd(hba, mbq, 0);
628 	vpd_data = hba->sli.sli4.dump_region.virt;
629 
630 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
631 	    MBX_SUCCESS) {
632 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
633 		    "No VPD found. status=%x", mb->mbxStatus);
634 	} else {
635 		EMLXS_MSGF(EMLXS_CONTEXT,
636 		    &emlxs_init_debug_msg,
637 		    "VPD dumped. rsp_cnt=%d status=%x",
638 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
639 
640 		if (mb->un.varDmp4.rsp_cnt) {
641 			EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
642 			    0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
643 
644 #ifdef FMA_SUPPORT
645 			if (hba->sli.sli4.dump_region.dma_handle) {
646 				if (emlxs_fm_check_dma_handle(hba,
647 				    hba->sli.sli4.dump_region.dma_handle)
648 				    != DDI_FM_OK) {
649 					EMLXS_MSGF(EMLXS_CONTEXT,
650 					    &emlxs_invalid_dma_handle_msg,
651 					    "sli4_online: hdl=%p",
652 					    hba->sli.sli4.dump_region.
653 					    dma_handle);
654 					rval = EIO;
655 					goto failed1;
656 				}
657 			}
658 #endif /* FMA_SUPPORT */
659 
660 		}
661 	}
662 
663 	if (vpd_data[0]) {
664 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
665 		    mb->un.varDmp4.rsp_cnt);
666 
667 		/*
668 		 * If there is a VPD part number, and it does not
669 		 * match the current default HBA model info,
670 		 * replace the default data with an entry that
671 		 * does match.
672 		 *
673 		 * After emlxs_parse_vpd model holds the VPD value
674 		 * for V2 and part_num hold the value for PN. These
675 		 * 2 values are NOT necessarily the same.
676 		 */
677 
678 		rval = 0;
679 		if ((vpd->model[0] != 0) &&
680 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
681 
682 			/* First scan for a V2 match */
683 
684 			for (i = 1; i < emlxs_pci_model_count; i++) {
685 				if (strcmp(&vpd->model[0],
686 				    emlxs_pci_model[i].model) == 0) {
687 					bcopy(&emlxs_pci_model[i],
688 					    &hba->model_info,
689 					    sizeof (emlxs_model_t));
690 					rval = 1;
691 					break;
692 				}
693 			}
694 		}
695 
696 		if (!rval && (vpd->part_num[0] != 0) &&
697 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
698 
699 			/* Next scan for a PN match */
700 
701 			for (i = 1; i < emlxs_pci_model_count; i++) {
702 				if (strcmp(&vpd->part_num[0],
703 				    emlxs_pci_model[i].model) == 0) {
704 					bcopy(&emlxs_pci_model[i],
705 					    &hba->model_info,
706 					    sizeof (emlxs_model_t));
707 					break;
708 				}
709 			}
710 		}
711 
712 		/* HP CNA port indices start at 1 instead of 0 */
713 		if (hba->model_info.chip & EMLXS_BE_CHIPS) {
714 			ssvid = ddi_get16(hba->pci_acc_handle,
715 			    (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
716 
717 			if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
718 				vpd->port_index--;
719 			}
720 		}
721 
722 		/*
723 		 * Now lets update hba->model_info with the real
724 		 * VPD data, if any.
725 		 */
726 
727 		/*
728 		 * Replace the default model description with vpd data
729 		 */
730 		if (vpd->model_desc[0] != 0) {
731 			(void) strncpy(hba->model_info.model_desc,
732 			    vpd->model_desc,
733 			    (sizeof (hba->model_info.model_desc)-1));
734 		}
735 
736 		/* Replace the default model with vpd data */
737 		if (vpd->model[0] != 0) {
738 			(void) strncpy(hba->model_info.model, vpd->model,
739 			    (sizeof (hba->model_info.model)-1));
740 		}
741 
742 		/* Replace the default program types with vpd data */
743 		if (vpd->prog_types[0] != 0) {
744 			emlxs_parse_prog_types(hba, vpd->prog_types);
745 		}
746 	}
747 
748 	/*
749 	 * Since the adapter model may have changed with the vpd data
750 	 * lets double check if adapter is not supported
751 	 */
752 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
753 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
754 		    "Unsupported adapter found.  "
755 		    "Id:%d  Vendor id:0x%x  Device id:0x%x  SSDID:0x%x  "
756 		    "Model:%s", hba->model_info.id, hba->model_info.vendor_id,
757 		    hba->model_info.device_id, hba->model_info.ssdid,
758 		    hba->model_info.model);
759 
760 		rval = EIO;
761 		goto failed1;
762 	}
763 
764 	(void) strncpy(vpd->boot_version, vpd->sli4FwName,
765 	    (sizeof (vpd->boot_version)-1));
766 
767 	/* Get fcode version property */
768 	emlxs_get_fcode_version(hba);
769 
770 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
771 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
772 	    vpd->opFwRev, vpd->sli1FwRev);
773 
774 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
775 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
776 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
777 
778 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
779 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
780 
781 	/*
782 	 * If firmware checking is enabled and the adapter model indicates
783 	 * a firmware image, then perform firmware version check
784 	 */
785 	hba->fw_flag = 0;
786 	hba->fw_timer = 0;
787 
788 	if (((fw_check & 0x1) &&
789 	    (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
790 	    hba->model_info.fwid) ||
791 	    ((fw_check & 0x2) && hba->model_info.fwid)) {
792 
793 		/* Find firmware image indicated by adapter model */
794 		fw = NULL;
795 		for (i = 0; i < emlxs_fw_count; i++) {
796 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
797 				fw = &emlxs_fw_table[i];
798 				break;
799 			}
800 		}
801 
802 		/*
803 		 * If the image was found, then verify current firmware
804 		 * versions of adapter
805 		 */
806 		if (fw) {
807 			/* Obtain current firmware version info */
808 			if (hba->model_info.chip & EMLXS_BE_CHIPS) {
809 				(void) emlxs_be_read_fw_version(hba, &hba_fw);
810 			} else {
811 				hba_fw.kern = vpd->postKernRev;
812 				hba_fw.stub = vpd->opFwRev;
813 				hba_fw.sli1 = vpd->sli1FwRev;
814 				hba_fw.sli2 = vpd->sli2FwRev;
815 				hba_fw.sli3 = vpd->sli3FwRev;
816 				hba_fw.sli4 = vpd->sli4FwRev;
817 			}
818 
819 			if (!kern_update &&
820 			    ((fw->kern && (hba_fw.kern != fw->kern)) ||
821 			    (fw->stub && (hba_fw.stub != fw->stub)))) {
822 
823 				hba->fw_flag |= FW_UPDATE_NEEDED;
824 
825 			} else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
826 			    (fw->stub && (hba_fw.stub != fw->stub)) ||
827 			    (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
828 			    (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
829 			    (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
830 			    (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
831 
832 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
833 				    "Firmware update needed. "
834 				    "Updating. id=%d fw=%d",
835 				    hba->model_info.id, hba->model_info.fwid);
836 
837 #ifdef MODFW_SUPPORT
838 				/*
839 				 * Load the firmware image now
840 				 * If MODFW_SUPPORT is not defined, the
841 				 * firmware image will already be defined
842 				 * in the emlxs_fw_table
843 				 */
844 				emlxs_fw_load(hba, fw);
845 #endif /* MODFW_SUPPORT */
846 
847 				if (fw->image && fw->size) {
848 					uint32_t rc;
849 
850 					rc = emlxs_fw_download(hba,
851 					    (char *)fw->image, fw->size, 0);
852 					if ((rc != FC_SUCCESS) &&
853 					    (rc != EMLXS_REBOOT_REQUIRED)) {
854 						EMLXS_MSGF(EMLXS_CONTEXT,
855 						    &emlxs_init_msg,
856 						    "Firmware update failed.");
857 						hba->fw_flag |=
858 						    FW_UPDATE_NEEDED;
859 					}
860 #ifdef MODFW_SUPPORT
861 					/*
862 					 * Unload the firmware image from
863 					 * kernel memory
864 					 */
865 					emlxs_fw_unload(hba, fw);
866 #endif /* MODFW_SUPPORT */
867 
868 					fw_check = 0;
869 
870 					goto reset;
871 				}
872 
873 				hba->fw_flag |= FW_UPDATE_NEEDED;
874 
875 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
876 				    "Firmware image unavailable.");
877 			} else {
878 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
879 				    "Firmware update not needed.");
880 			}
881 		} else {
882 			/*
883 			 * This means either the adapter database is not
884 			 * correct or a firmware image is missing from the
885 			 * compile
886 			 */
887 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
888 			    "Firmware image unavailable. id=%d fw=%d",
889 			    hba->model_info.id, hba->model_info.fwid);
890 		}
891 	}
892 
893 	/* Reuse mbq from previous mbox */
894 	bzero(mbq, sizeof (MAILBOXQ));
895 
896 	emlxs_mb_dump_fcoe(hba, mbq, 0);
897 
898 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
899 	    MBX_SUCCESS) {
900 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
901 		    "No FCOE info found. status=%x", mb->mbxStatus);
902 	} else {
903 		EMLXS_MSGF(EMLXS_CONTEXT,
904 		    &emlxs_init_debug_msg,
905 		    "FCOE info dumped. rsp_cnt=%d status=%x",
906 		    mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
907 		(void) emlxs_parse_fcoe(hba,
908 		    (uint8_t *)hba->sli.sli4.dump_region.virt,
909 		    mb->un.varDmp4.rsp_cnt);
910 	}
911 
912 	/* Reuse mbq from previous mbox */
913 	bzero(mbq, sizeof (MAILBOXQ));
914 
915 	status = 0;
916 	if (port->flag & EMLXS_INI_ENABLED) {
917 		status |= SLI4_FEATURE_FCP_INITIATOR;
918 	}
919 	if (port->flag & EMLXS_TGT_ENABLED) {
920 		status |= SLI4_FEATURE_FCP_TARGET;
921 	}
922 	if (cfg[CFG_NPIV_ENABLE].current) {
923 		status |= SLI4_FEATURE_NPIV;
924 	}
925 	if (cfg[CFG_RQD_MODE].current) {
926 		status |= SLI4_FEATURE_RQD;
927 	}
928 	if (cfg[CFG_PERF_HINT].current) {
929 		if (hba->sli.sli4.param.PHON) {
930 			status |= SLI4_FEATURE_PERF_HINT;
931 		}
932 	}
933 
934 	emlxs_mb_request_features(hba, mbq, status);
935 
936 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
937 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
938 		    "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
939 		    mb->mbxCommand, mb->mbxStatus);
940 
941 		rval = EIO;
942 		goto failed1;
943 	}
944 	emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
945 
946 	/* Check to see if we get the features we requested */
947 	if (status != mb->un.varReqFeatures.featuresEnabled) {
948 
949 		/* Just report descrepencies, don't abort the attach */
950 
951 		outptr = (uint8_t *)emlxs_request_feature_xlate(
952 		    mb->un.varReqFeatures.featuresRequested);
953 		(void) strlcpy(buf, (char *)outptr, sizeof (buf));
954 
955 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
956 		    "REQUEST_FEATURES: wanted:%s  got:%s",
957 		    &buf[0], emlxs_request_feature_xlate(
958 		    mb->un.varReqFeatures.featuresEnabled));
959 
960 	}
961 
962 	if ((port->flag & EMLXS_INI_ENABLED) &&
963 	    !(mb->un.varReqFeatures.featuresEnabled &
964 	    SLI4_FEATURE_FCP_INITIATOR)) {
965 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
966 		    "Initiator mode not supported by adapter.");
967 
968 		rval = EIO;
969 
970 #ifdef SFCT_SUPPORT
971 		/* Check if we can fall back to just target mode */
972 		if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
973 		    (mb->un.varReqFeatures.featuresEnabled &
974 		    SLI4_FEATURE_FCP_TARGET) &&
975 		    (cfg[CFG_DTM_ENABLE].current == 1) &&
976 		    (cfg[CFG_TARGET_MODE].current == 1)) {
977 
978 			cfg[CFG_DTM_ENABLE].current = 0;
979 
980 			EMLXS_MSGF(EMLXS_CONTEXT,
981 			    &emlxs_init_failed_msg,
982 			    "Disabling dynamic target mode. "
983 			    "Enabling target mode only.");
984 
985 			/* This will trigger the driver to reattach */
986 			rval = EAGAIN;
987 		}
988 #endif /* SFCT_SUPPORT */
989 		goto failed1;
990 	}
991 
992 	if ((port->flag & EMLXS_TGT_ENABLED) &&
993 	    !(mb->un.varReqFeatures.featuresEnabled &
994 	    SLI4_FEATURE_FCP_TARGET)) {
995 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
996 		    "Target mode not supported by adapter.");
997 
998 		rval = EIO;
999 
1000 #ifdef SFCT_SUPPORT
1001 		/* Check if we can fall back to just initiator mode */
1002 		if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
1003 		    (mb->un.varReqFeatures.featuresEnabled &
1004 		    SLI4_FEATURE_FCP_INITIATOR) &&
1005 		    (cfg[CFG_DTM_ENABLE].current == 1) &&
1006 		    (cfg[CFG_TARGET_MODE].current == 0)) {
1007 
1008 			cfg[CFG_DTM_ENABLE].current = 0;
1009 
1010 			EMLXS_MSGF(EMLXS_CONTEXT,
1011 			    &emlxs_init_failed_msg,
1012 			    "Disabling dynamic target mode. "
1013 			    "Enabling initiator mode only.");
1014 
1015 			/* This will trigger the driver to reattach */
1016 			rval = EAGAIN;
1017 		}
1018 #endif /* SFCT_SUPPORT */
1019 		goto failed1;
1020 	}
1021 
1022 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
1023 		hba->flag |= FC_NPIV_ENABLED;
1024 	}
1025 
1026 	if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_PERF_HINT) {
1027 		hba->sli.sli4.flag |= EMLXS_SLI4_PHON;
1028 		if (hba->sli.sli4.param.PHWQ) {
1029 			hba->sli.sli4.flag |= EMLXS_SLI4_PHWQ;
1030 		}
1031 	}
1032 
1033 	/* Reuse mbq from previous mbox */
1034 	bzero(mbq, sizeof (MAILBOXQ));
1035 
1036 	emlxs_mb_read_config(hba, mbq);
1037 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1038 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1039 		    "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
1040 		    mb->mbxCommand, mb->mbxStatus);
1041 
1042 		rval = EIO;
1043 		goto failed1;
1044 	}
1045 	emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
1046 
1047 	/* Set default extents */
1048 	hba->sli.sli4.XRICount = mb->un.varRdConfig4.XRICount;
1049 	hba->sli.sli4.XRIExtCount = 1;
1050 	hba->sli.sli4.XRIExtSize = hba->sli.sli4.XRICount;
1051 	hba->sli.sli4.XRIBase[0] = mb->un.varRdConfig4.XRIBase;
1052 
1053 	hba->sli.sli4.RPICount = mb->un.varRdConfig4.RPICount;
1054 	hba->sli.sli4.RPIExtCount = 1;
1055 	hba->sli.sli4.RPIExtSize = hba->sli.sli4.RPICount;
1056 	hba->sli.sli4.RPIBase[0] = mb->un.varRdConfig4.RPIBase;
1057 
1058 	hba->sli.sli4.VPICount = mb->un.varRdConfig4.VPICount;
1059 	hba->sli.sli4.VPIExtCount = 1;
1060 	hba->sli.sli4.VPIExtSize = hba->sli.sli4.VPICount;
1061 	hba->sli.sli4.VPIBase[0] = mb->un.varRdConfig4.VPIBase;
1062 
1063 	hba->sli.sli4.VFICount = mb->un.varRdConfig4.VFICount;
1064 	hba->sli.sli4.VFIExtCount = 1;
1065 	hba->sli.sli4.VFIExtSize = hba->sli.sli4.VFICount;
1066 	hba->sli.sli4.VFIBase[0] = mb->un.varRdConfig4.VFIBase;
1067 
1068 	hba->sli.sli4.FCFICount = mb->un.varRdConfig4.FCFICount;
1069 
1070 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1071 	    "CONFIG: xri:%d rpi:%d vpi:%d vfi:%d fcfi:%d",
1072 	    hba->sli.sli4.XRICount,
1073 	    hba->sli.sli4.RPICount,
1074 	    hba->sli.sli4.VPICount,
1075 	    hba->sli.sli4.VFICount,
1076 	    hba->sli.sli4.FCFICount);
1077 
1078 	if ((hba->sli.sli4.XRICount == 0) ||
1079 	    (hba->sli.sli4.RPICount == 0) ||
1080 	    (hba->sli.sli4.VPICount == 0) ||
1081 	    (hba->sli.sli4.VFICount == 0) ||
1082 	    (hba->sli.sli4.FCFICount == 0)) {
1083 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1084 		    "Invalid extent value(s) - xri:%d rpi:%d vpi:%d "
1085 		    "vfi:%d fcfi:%d",
1086 		    hba->sli.sli4.XRICount,
1087 		    hba->sli.sli4.RPICount,
1088 		    hba->sli.sli4.VPICount,
1089 		    hba->sli.sli4.VFICount,
1090 		    hba->sli.sli4.FCFICount);
1091 
1092 		rval = EIO;
1093 		goto failed1;
1094 	}
1095 
1096 	if (mb->un.varRdConfig4.extents) {
1097 		if (emlxs_sli4_init_extents(hba, mbq)) {
1098 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1099 			    "Unable to initialize extents.");
1100 
1101 			rval = EIO;
1102 			goto failed1;
1103 		}
1104 	}
1105 
1106 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1107 	    "CONFIG: port_name:%c %c %c %c",
1108 	    hba->sli.sli4.port_name[0],
1109 	    hba->sli.sli4.port_name[1],
1110 	    hba->sli.sli4.port_name[2],
1111 	    hba->sli.sli4.port_name[3]);
1112 
1113 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1114 	    "CONFIG: ldv:%d link_type:%d link_number:%d",
1115 	    mb->un.varRdConfig4.ldv,
1116 	    mb->un.varRdConfig4.link_type,
1117 	    mb->un.varRdConfig4.link_number);
1118 
1119 	if (mb->un.varRdConfig4.ldv) {
1120 		hba->sli.sli4.link_number = mb->un.varRdConfig4.link_number;
1121 	} else {
1122 		hba->sli.sli4.link_number = (uint32_t)-1;
1123 	}
1124 
1125 	if (hba->sli.sli4.VPICount) {
1126 		hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
1127 	}
1128 
1129 	/* Set the max node count */
1130 	if (cfg[CFG_NUM_NODES].current > 0) {
1131 		hba->max_nodes =
1132 		    min(cfg[CFG_NUM_NODES].current,
1133 		    hba->sli.sli4.RPICount);
1134 	} else {
1135 		hba->max_nodes = hba->sli.sli4.RPICount;
1136 	}
1137 
1138 	/* Set the io throttle */
1139 	hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
1140 
1141 	/* Set max_iotag */
1142 	/* We add 1 in case all XRI's are non-zero */
1143 	hba->max_iotag = hba->sli.sli4.XRICount + 1;
1144 
1145 	if (cfg[CFG_NUM_IOTAGS].current) {
1146 		hba->max_iotag = min(hba->max_iotag,
1147 		    (uint16_t)cfg[CFG_NUM_IOTAGS].current);
1148 	}
1149 
1150 	/* Set out-of-range iotag base */
1151 	hba->fc_oor_iotag = hba->max_iotag;
1152 
1153 	/* Save the link speed capabilities */
1154 	vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
1155 	emlxs_process_link_speed(hba);
1156 
1157 	/*
1158 	 * Allocate some memory for buffers
1159 	 */
1160 	if (emlxs_mem_alloc_buffer(hba) == 0) {
1161 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1162 		    "Unable to allocate memory buffers.");
1163 
1164 		rval = ENOMEM;
1165 		goto failed1;
1166 	}
1167 
1168 	if (emlxs_sli4_resource_alloc(hba)) {
1169 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1170 		    "Unable to allocate resources.");
1171 
1172 		rval = ENOMEM;
1173 		goto failed2;
1174 	}
1175 	emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
1176 	emlxs_sli4_zero_queue_stat(hba);
1177 
1178 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1179 	if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
1180 		hba->fca_tran->fca_num_npivports = hba->vpi_max;
1181 	}
1182 #endif /* >= EMLXS_MODREV5 */
1183 
1184 	/* Reuse mbq from previous mbox */
1185 	bzero(mbq, sizeof (MAILBOXQ));
1186 
1187 	if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
1188 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1189 		    "Unable to post sgl pages.");
1190 
1191 		rval = EIO;
1192 		goto failed3;
1193 	}
1194 
1195 	/* Reuse mbq from previous mbox */
1196 	bzero(mbq, sizeof (MAILBOXQ));
1197 
1198 	if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
1199 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1200 		    "Unable to post header templates.");
1201 
1202 		rval = EIO;
1203 		goto failed3;
1204 	}
1205 
1206 	/*
1207 	 * Add our interrupt routine to kernel's interrupt chain & enable it
1208 	 * If MSI is enabled this will cause Solaris to program the MSI address
1209 	 * and data registers in PCI config space
1210 	 */
1211 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
1212 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1213 		    "Unable to add interrupt(s).");
1214 
1215 		rval = EIO;
1216 		goto failed3;
1217 	}
1218 
1219 	/* Reuse mbq from previous mbox */
1220 	bzero(mbq, sizeof (MAILBOXQ));
1221 
1222 	/* This MUST be done after EMLXS_INTR_ADD */
1223 	if (emlxs_sli4_create_queues(hba, mbq)) {
1224 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1225 		    "Unable to create queues.");
1226 
1227 		rval = EIO;
1228 		goto failed3;
1229 	}
1230 
1231 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
1232 
1233 	/* Get and save the current firmware version (based on sli_mode) */
1234 	emlxs_decode_firmware_rev(hba, vpd);
1235 
1236 
1237 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1238 
1239 	if (SLI4_FC_MODE) {
1240 		/* Reuse mbq from previous mbox */
1241 		bzero(mbq, sizeof (MAILBOXQ));
1242 
1243 		emlxs_mb_config_link(hba, mbq);
1244 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1245 		    MBX_SUCCESS) {
1246 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1247 			    "Unable to configure link. Mailbox cmd=%x "
1248 			    "status=%x",
1249 			    mb->mbxCommand, mb->mbxStatus);
1250 
1251 			rval = EIO;
1252 			goto failed3;
1253 		}
1254 	}
1255 
1256 	/* Reuse mbq from previous mbox */
1257 	bzero(mbq, sizeof (MAILBOXQ));
1258 
1259 	/*
1260 	 * We need to get login parameters for NID
1261 	 */
1262 	(void) emlxs_mb_read_sparam(hba, mbq);
1263 	mp = (MATCHMAP *)mbq->bp;
1264 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1265 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1266 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1267 		    mb->mbxCommand, mb->mbxStatus);
1268 
1269 		rval = EIO;
1270 		goto failed3;
1271 	}
1272 
1273 	/* Free the buffer since we were polling */
1274 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1275 	mp = NULL;
1276 
1277 	/* If no serial number in VPD data, then use the WWPN */
1278 	if (vpd->serial_num[0] == 0) {
1279 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1280 		for (i = 0; i < 12; i++) {
1281 			status = *outptr++;
1282 			j = ((status & 0xf0) >> 4);
1283 			if (j <= 9) {
1284 				vpd->serial_num[i] =
1285 				    (char)((uint8_t)'0' + (uint8_t)j);
1286 			} else {
1287 				vpd->serial_num[i] =
1288 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1289 			}
1290 
1291 			i++;
1292 			j = (status & 0xf);
1293 			if (j <= 9) {
1294 				vpd->serial_num[i] =
1295 				    (char)((uint8_t)'0' + (uint8_t)j);
1296 			} else {
1297 				vpd->serial_num[i] =
1298 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1299 			}
1300 		}
1301 
1302 		/*
1303 		 * Set port number and port index to zero
1304 		 * The WWN's are unique to each port and therefore port_num
1305 		 * must equal zero. This effects the hba_fru_details structure
1306 		 * in fca_bind_port()
1307 		 */
1308 		vpd->port_num[0] = 0;
1309 		vpd->port_index = 0;
1310 
1311 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1312 		    "CONFIG: WWPN: port_index=0");
1313 	}
1314 
1315 	/* Make final attempt to set a port index */
1316 	if (vpd->port_index == (uint32_t)-1) {
1317 		dev_info_t *p_dip;
1318 		dev_info_t *c_dip;
1319 
1320 		p_dip = ddi_get_parent(hba->dip);
1321 		c_dip = ddi_get_child(p_dip);
1322 
1323 		vpd->port_index = 0;
1324 		while (c_dip && (hba->dip != c_dip)) {
1325 			c_dip = ddi_get_next_sibling(c_dip);
1326 
1327 			if (strcmp(ddi_get_name(c_dip), "ethernet") == 0) {
1328 				continue;
1329 			}
1330 
1331 			vpd->port_index++;
1332 		}
1333 
1334 		EMLXS_MSGF(EMLXS_CONTEXT,
1335 		    &emlxs_init_debug_msg,
1336 		    "CONFIG: Device tree: port_index=%d",
1337 		    vpd->port_index);
1338 	}
1339 
1340 	if (vpd->port_num[0] == 0) {
1341 		if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1342 			(void) snprintf(vpd->port_num,
1343 			    (sizeof (vpd->port_num)-1),
1344 			    "%d", vpd->port_index);
1345 		}
1346 	}
1347 
1348 	if (vpd->id[0] == 0) {
1349 		(void) snprintf(vpd->id, (sizeof (vpd->id)-1),
1350 		    "%s %d",
1351 		    hba->model_info.model_desc, vpd->port_index);
1352 
1353 	}
1354 
1355 	if (vpd->manufacturer[0] == 0) {
1356 		(void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1357 		    (sizeof (vpd->manufacturer)-1));
1358 	}
1359 
1360 	if (vpd->part_num[0] == 0) {
1361 		(void) strncpy(vpd->part_num, hba->model_info.model,
1362 		    (sizeof (vpd->part_num)-1));
1363 	}
1364 
1365 	if (vpd->model_desc[0] == 0) {
1366 		(void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1367 		    "%s %d",
1368 		    hba->model_info.model_desc, vpd->port_index);
1369 	}
1370 
1371 	if (vpd->model[0] == 0) {
1372 		(void) strncpy(vpd->model, hba->model_info.model,
1373 		    (sizeof (vpd->model)-1));
1374 	}
1375 
1376 	if (vpd->prog_types[0] == 0) {
1377 		emlxs_build_prog_types(hba, vpd);
1378 	}
1379 
1380 	/* Create the symbolic names */
1381 	(void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1382 	    "%s %s FV%s DV%s %s",
1383 	    hba->model_info.manufacturer, hba->model_info.model,
1384 	    hba->vpd.fw_version, emlxs_version,
1385 	    (char *)utsname.nodename);
1386 
1387 	(void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1388 	    "%s PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1389 	    hba->model_info.manufacturer,
1390 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1391 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1392 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1393 
1394 
1395 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1396 	emlxs_sli4_enable_intr(hba);
1397 
1398 	/* Check persist-linkdown */
1399 	if (cfg[CFG_PERSIST_LINKDOWN].current) {
1400 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1401 		goto done;
1402 	}
1403 
1404 #ifdef SFCT_SUPPORT
1405 	if ((port->mode == MODE_TARGET) &&
1406 	    !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1407 		goto done;
1408 	}
1409 #endif /* SFCT_SUPPORT */
1410 
1411 	/* Reuse mbq from previous mbox */
1412 	bzero(mbq, sizeof (MAILBOXQ));
1413 
1414 	/*
1415 	 * Interupts are enabled, start the timeout timers now.
1416 	 */
1417 	emlxs_timer_start(hba);
1418 
1419 	/*
1420 	 * Setup and issue mailbox INITIALIZE LINK command
1421 	 * At this point, the interrupt will be generated by the HW
1422 	 */
1423 	emlxs_mb_init_link(hba, mbq,
1424 	    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1425 
1426 	rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0);
1427 	if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1428 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1429 		    "Unable to initialize link. "
1430 		    "Mailbox cmd=%x status=%x",
1431 		    mb->mbxCommand, mb->mbxStatus);
1432 
1433 		rval = EIO;
1434 		goto failed4;
1435 	}
1436 
1437 	/* Wait for link to come up */
1438 	i = cfg[CFG_LINKUP_DELAY].current;
1439 	while (i && (hba->state < FC_LINK_UP)) {
1440 		/* Check for hardware error */
1441 		if (hba->state == FC_ERROR) {
1442 			EMLXS_MSGF(EMLXS_CONTEXT,
1443 			    &emlxs_init_failed_msg,
1444 			    "Adapter error.", mb->mbxCommand,
1445 			    mb->mbxStatus);
1446 
1447 			rval = EIO;
1448 			goto failed4;
1449 		}
1450 
1451 		BUSYWAIT_MS(1000);
1452 		i--;
1453 	}
1454 
1455 done:
1456 	/*
1457 	 * The leadville driver will now handle the FLOGI at the driver level
1458 	 */
1459 
1460 	if (mbq) {
1461 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1462 		mbq = NULL;
1463 		mb = NULL;
1464 	}
1465 
1466 	if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1467 		emlxs_sli4_gpio_timer_start(hba);
1468 
1469 	return (0);
1470 
1471 failed4:
1472 	emlxs_timer_stop(hba);
1473 
1474 failed3:
1475 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1476 
1477 	if (mp) {
1478 		emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1479 		mp = NULL;
1480 	}
1481 
1482 
1483 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1484 		(void) EMLXS_INTR_REMOVE(hba);
1485 	}
1486 
1487 	emlxs_sli4_resource_free(hba);
1488 
1489 failed2:
1490 	(void) emlxs_mem_free_buffer(hba);
1491 
1492 failed1:
1493 	if (mbq) {
1494 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1495 		mbq = NULL;
1496 		mb = NULL;
1497 	}
1498 
1499 	if (hba->sli.sli4.dump_region.virt) {
1500 		(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1501 	}
1502 
1503 	if (rval == 0) {
1504 		rval = EIO;
1505 	}
1506 
1507 	return (rval);
1508 
1509 } /* emlxs_sli4_online() */
1510 
1511 
1512 static void
1513 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1514 {
1515 	/* Reverse emlxs_sli4_online */
1516 
1517 	if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1518 		emlxs_sli4_gpio_timer_stop(hba);
1519 
1520 	mutex_enter(&EMLXS_PORT_LOCK);
1521 	if (hba->flag & FC_INTERLOCKED) {
1522 		mutex_exit(&EMLXS_PORT_LOCK);
1523 		goto killed;
1524 	}
1525 	mutex_exit(&EMLXS_PORT_LOCK);
1526 
1527 	if (reset_requested) {
1528 		(void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1529 	}
1530 
1531 	/* Shutdown the adapter interface */
1532 	emlxs_sli4_hba_kill(hba);
1533 
1534 killed:
1535 
1536 	/* Free SLI shared memory */
1537 	emlxs_sli4_resource_free(hba);
1538 
1539 	/* Free driver shared memory */
1540 	(void) emlxs_mem_free_buffer(hba);
1541 
1542 	/* Free the host dump region buffer */
1543 	(void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1544 
1545 } /* emlxs_sli4_offline() */
1546 
1547 
1548 /*ARGSUSED*/
1549 static int
1550 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1551 {
1552 	emlxs_port_t		*port = &PPORT;
1553 	dev_info_t		*dip;
1554 	ddi_device_acc_attr_t	dev_attr;
1555 	int			status;
1556 
1557 	dip = (dev_info_t *)hba->dip;
1558 	dev_attr = emlxs_dev_acc_attr;
1559 
1560 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1561 	case SLI_INTF_IF_TYPE_0:
1562 
1563 		/* Map in Hardware BAR pages that will be used for */
1564 		/* communication with HBA. */
1565 		if (hba->sli.sli4.bar1_acc_handle == 0) {
1566 			status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1567 			    (caddr_t *)&hba->sli.sli4.bar1_addr,
1568 			    0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1569 			if (status != DDI_SUCCESS) {
1570 				EMLXS_MSGF(EMLXS_CONTEXT,
1571 				    &emlxs_attach_failed_msg,
1572 				    "(PCI) ddi_regs_map_setup BAR1 failed. "
1573 				    "stat=%d mem=%p attr=%p hdl=%p",
1574 				    status, &hba->sli.sli4.bar1_addr, &dev_attr,
1575 				    &hba->sli.sli4.bar1_acc_handle);
1576 				goto failed;
1577 			}
1578 		}
1579 
1580 		if (hba->sli.sli4.bar2_acc_handle == 0) {
1581 			status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1582 			    (caddr_t *)&hba->sli.sli4.bar2_addr,
1583 			    0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1584 			if (status != DDI_SUCCESS) {
1585 				EMLXS_MSGF(EMLXS_CONTEXT,
1586 				    &emlxs_attach_failed_msg,
1587 				    "ddi_regs_map_setup BAR2 failed. status=%x",
1588 				    status);
1589 				goto failed;
1590 			}
1591 		}
1592 
1593 		/* offset from beginning of register space */
1594 		hba->sli.sli4.MPUEPSemaphore_reg_addr =
1595 		    (uint32_t *)(hba->sli.sli4.bar1_addr +
1596 		    CSR_MPU_EP_SEMAPHORE_OFFSET);
1597 		hba->sli.sli4.MBDB_reg_addr =
1598 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1599 		hba->sli.sli4.CQDB_reg_addr =
1600 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1601 		hba->sli.sli4.MQDB_reg_addr =
1602 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1603 		hba->sli.sli4.WQDB_reg_addr =
1604 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1605 		hba->sli.sli4.RQDB_reg_addr =
1606 		    (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1607 
1608 		hba->sli.sli4.STATUS_reg_addr = 0;
1609 		hba->sli.sli4.CNTL_reg_addr = 0;
1610 
1611 		hba->sli.sli4.ERR1_reg_addr =
1612 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET);
1613 		hba->sli.sli4.ERR2_reg_addr =
1614 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET);
1615 
1616 		hba->sli.sli4.PHYSDEV_reg_addr = 0;
1617 		break;
1618 
1619 	case SLI_INTF_IF_TYPE_2:
1620 
1621 		/* Map in Hardware BAR pages that will be used for */
1622 		/* communication with HBA. */
1623 		if (hba->sli.sli4.bar0_acc_handle == 0) {
1624 			status = ddi_regs_map_setup(dip, PCI_BAR0_RINDEX,
1625 			    (caddr_t *)&hba->sli.sli4.bar0_addr,
1626 			    0, 0, &dev_attr, &hba->sli.sli4.bar0_acc_handle);
1627 			if (status != DDI_SUCCESS) {
1628 				EMLXS_MSGF(EMLXS_CONTEXT,
1629 				    &emlxs_attach_failed_msg,
1630 				    "(PCI) ddi_regs_map_setup BAR0 failed. "
1631 				    "stat=%d mem=%p attr=%p hdl=%p",
1632 				    status, &hba->sli.sli4.bar0_addr, &dev_attr,
1633 				    &hba->sli.sli4.bar0_acc_handle);
1634 				goto failed;
1635 			}
1636 		}
1637 
1638 		/* offset from beginning of register space */
1639 		hba->sli.sli4.MPUEPSemaphore_reg_addr =
1640 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1641 		    SLIPORT_SEMAPHORE_OFFSET);
1642 		hba->sli.sli4.MBDB_reg_addr =
1643 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1644 		hba->sli.sli4.CQDB_reg_addr =
1645 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_CQ_DB_OFFSET);
1646 		hba->sli.sli4.MQDB_reg_addr =
1647 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MQ_DB_OFFSET);
1648 		hba->sli.sli4.WQDB_reg_addr =
1649 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_WQ_DB_OFFSET);
1650 		hba->sli.sli4.RQDB_reg_addr =
1651 		    (uint32_t *)(hba->sli.sli4.bar0_addr + PD_RQ_DB_OFFSET);
1652 
1653 		hba->sli.sli4.STATUS_reg_addr =
1654 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1655 		    SLIPORT_STATUS_OFFSET);
1656 		hba->sli.sli4.CNTL_reg_addr =
1657 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1658 		    SLIPORT_CONTROL_OFFSET);
1659 		hba->sli.sli4.ERR1_reg_addr =
1660 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1661 		    SLIPORT_ERROR1_OFFSET);
1662 		hba->sli.sli4.ERR2_reg_addr =
1663 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1664 		    SLIPORT_ERROR2_OFFSET);
1665 		hba->sli.sli4.PHYSDEV_reg_addr =
1666 		    (uint32_t *)(hba->sli.sli4.bar0_addr +
1667 		    PHYSDEV_CONTROL_OFFSET);
1668 
1669 		break;
1670 
1671 	case SLI_INTF_IF_TYPE_1:
1672 	case SLI_INTF_IF_TYPE_3:
1673 	default:
1674 		EMLXS_MSGF(EMLXS_CONTEXT,
1675 		    &emlxs_attach_failed_msg,
1676 		    "Map hdw: Unsupported if_type %08x",
1677 		    (hba->sli_intf & SLI_INTF_IF_TYPE_MASK));
1678 
1679 		goto failed;
1680 	}
1681 
1682 	if (hba->sli.sli4.bootstrapmb.virt == 0) {
1683 		MBUF_INFO	*buf_info;
1684 		MBUF_INFO	bufinfo;
1685 
1686 		buf_info = &bufinfo;
1687 
1688 		bzero(buf_info, sizeof (MBUF_INFO));
1689 		buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1690 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
1691 		buf_info->align = ddi_ptob(dip, 1L);
1692 
1693 		(void) emlxs_mem_alloc(hba, buf_info);
1694 
1695 		if (buf_info->virt == NULL) {
1696 			goto failed;
1697 		}
1698 
1699 		hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1700 		hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1701 		hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1702 		    MBOX_EXTENSION_SIZE;
1703 		hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1704 		hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1705 		bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1706 		    EMLXS_BOOTSTRAP_MB_SIZE);
1707 	}
1708 
1709 	hba->chan_count = MAX_CHANNEL;
1710 
1711 	return (0);
1712 
1713 failed:
1714 
1715 	emlxs_sli4_unmap_hdw(hba);
1716 	return (ENOMEM);
1717 
1718 
1719 } /* emlxs_sli4_map_hdw() */
1720 
1721 
1722 /*ARGSUSED*/
1723 static void
1724 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1725 {
1726 	MBUF_INFO	bufinfo;
1727 	MBUF_INFO	*buf_info = &bufinfo;
1728 
1729 
1730 	if (hba->sli.sli4.bar0_acc_handle) {
1731 		ddi_regs_map_free(&hba->sli.sli4.bar0_acc_handle);
1732 		hba->sli.sli4.bar0_acc_handle = 0;
1733 	}
1734 
1735 	if (hba->sli.sli4.bar1_acc_handle) {
1736 		ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1737 		hba->sli.sli4.bar1_acc_handle = 0;
1738 	}
1739 
1740 	if (hba->sli.sli4.bar2_acc_handle) {
1741 		ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1742 		hba->sli.sli4.bar2_acc_handle = 0;
1743 	}
1744 
1745 	if (hba->sli.sli4.bootstrapmb.virt) {
1746 		bzero(buf_info, sizeof (MBUF_INFO));
1747 
1748 		if (hba->sli.sli4.bootstrapmb.phys) {
1749 			buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1750 			buf_info->data_handle =
1751 			    hba->sli.sli4.bootstrapmb.data_handle;
1752 			buf_info->dma_handle =
1753 			    hba->sli.sli4.bootstrapmb.dma_handle;
1754 			buf_info->flags = FC_MBUF_DMA;
1755 		}
1756 
1757 		buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1758 		buf_info->size = hba->sli.sli4.bootstrapmb.size;
1759 		emlxs_mem_free(hba, buf_info);
1760 
1761 		hba->sli.sli4.bootstrapmb.virt = NULL;
1762 	}
1763 
1764 	return;
1765 
1766 } /* emlxs_sli4_unmap_hdw() */
1767 
1768 
1769 static int
1770 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1771 {
1772 	emlxs_port_t *port = &PPORT;
1773 	uint32_t status;
1774 	uint32_t i = 0;
1775 	uint32_t err1;
1776 	uint32_t err2;
1777 
1778 	/* Wait for reset completion */
1779 	while (i < 30) {
1780 
1781 		switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1782 		case SLI_INTF_IF_TYPE_0:
1783 			status = emlxs_sli4_read_sema(hba);
1784 
1785 			/* Check to see if any errors occurred during init */
1786 			if (status & ARM_POST_FATAL) {
1787 				EMLXS_MSGF(EMLXS_CONTEXT,
1788 				    &emlxs_reset_failed_msg,
1789 				    "SEMA Error: status=%x", status);
1790 
1791 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1792 
1793 				return (1);
1794 			}
1795 
1796 			if ((status & ARM_UNRECOVERABLE_ERROR) ==
1797 			    ARM_UNRECOVERABLE_ERROR) {
1798 				EMLXS_MSGF(EMLXS_CONTEXT,
1799 				    &emlxs_reset_failed_msg,
1800 				    "Unrecoverable Error: status=%x", status);
1801 
1802 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1803 
1804 				return (1);
1805 			}
1806 
1807 			if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1808 				/* ARM Ready !! */
1809 				EMLXS_MSGF(EMLXS_CONTEXT,
1810 				    &emlxs_sli_detail_msg,
1811 				    "ARM Ready: status=%x", status);
1812 
1813 				return (0);
1814 			}
1815 			break;
1816 
1817 		case SLI_INTF_IF_TYPE_2:
1818 			status = emlxs_sli4_read_status(hba);
1819 
1820 			if (status & SLI_STATUS_READY) {
1821 				if (!(status & SLI_STATUS_ERROR)) {
1822 					/* ARM Ready !! */
1823 					EMLXS_MSGF(EMLXS_CONTEXT,
1824 					    &emlxs_sli_detail_msg,
1825 					    "ARM Ready: status=%x", status);
1826 
1827 					return (0);
1828 				}
1829 
1830 				err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1831 				    hba->sli.sli4.ERR1_reg_addr);
1832 				err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1833 				    hba->sli.sli4.ERR2_reg_addr);
1834 
1835 				if (status & SLI_STATUS_RESET_NEEDED) {
1836 					EMLXS_MSGF(EMLXS_CONTEXT,
1837 					    &emlxs_sli_detail_msg,
1838 					    "ARM Ready (Reset Needed): "
1839 					    "status=%x err1=%x "
1840 					    "err2=%x",
1841 					    status, err1, err2);
1842 
1843 					return (1);
1844 				}
1845 
1846 				EMLXS_MSGF(EMLXS_CONTEXT,
1847 				    &emlxs_reset_failed_msg,
1848 				    "Unrecoverable Error: status=%x err1=%x "
1849 				    "err2=%x",
1850 				    status, err1, err2);
1851 
1852 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
1853 
1854 				return (2);
1855 			}
1856 
1857 			break;
1858 
1859 		default:
1860 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1861 
1862 			return (3);
1863 		}
1864 
1865 		BUSYWAIT_MS(1000);
1866 		i++;
1867 	}
1868 
1869 	/* Timeout occurred */
1870 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1871 	case SLI_INTF_IF_TYPE_0:
1872 		err1 = ddi_get32(hba->pci_acc_handle,
1873 		    hba->sli.sli4.ERR1_reg_addr);
1874 		err2 = ddi_get32(hba->pci_acc_handle,
1875 		    hba->sli.sli4.ERR2_reg_addr);
1876 		break;
1877 
1878 	default:
1879 		err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1880 		    hba->sli.sli4.ERR1_reg_addr);
1881 		err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1882 		    hba->sli.sli4.ERR2_reg_addr);
1883 		break;
1884 	}
1885 
1886 	if (status & SLI_STATUS_ERROR) {
1887 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1888 		    "Ready Timeout: Port Error: status=%x err1=%x err2=%x",
1889 		    status, err1, err2);
1890 	} else {
1891 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1892 		    "Ready Timeout: status=%x err1=%x err2=%x",
1893 		    status, err1, err2);
1894 	}
1895 
1896 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1897 
1898 	return (3);
1899 
1900 } /* emlxs_check_hdw_ready() */
1901 
1902 
1903 static uint32_t
1904 emlxs_sli4_read_status(emlxs_hba_t *hba)
1905 {
1906 #ifdef FMA_SUPPORT
1907 	emlxs_port_t *port = &PPORT;
1908 #endif  /* FMA_SUPPORT */
1909 	uint32_t status;
1910 
1911 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1912 	case SLI_INTF_IF_TYPE_2:
1913 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1914 		    hba->sli.sli4.STATUS_reg_addr);
1915 #ifdef FMA_SUPPORT
1916 		/* Access handle validation */
1917 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1918 #endif  /* FMA_SUPPORT */
1919 		break;
1920 	default:
1921 		status = 0;
1922 		break;
1923 	}
1924 
1925 	return (status);
1926 
1927 } /* emlxs_sli4_read_status() */
1928 
1929 
1930 static uint32_t
1931 emlxs_sli4_read_sema(emlxs_hba_t *hba)
1932 {
1933 #ifdef FMA_SUPPORT
1934 	emlxs_port_t *port = &PPORT;
1935 #endif  /* FMA_SUPPORT */
1936 	uint32_t status;
1937 
1938 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1939 	case SLI_INTF_IF_TYPE_0:
1940 		status = ddi_get32(hba->sli.sli4.bar1_acc_handle,
1941 		    hba->sli.sli4.MPUEPSemaphore_reg_addr);
1942 #ifdef FMA_SUPPORT
1943 		/* Access handle validation */
1944 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1945 #endif  /* FMA_SUPPORT */
1946 		break;
1947 
1948 	case SLI_INTF_IF_TYPE_2:
1949 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1950 		    hba->sli.sli4.MPUEPSemaphore_reg_addr);
1951 #ifdef FMA_SUPPORT
1952 		/* Access handle validation */
1953 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1954 #endif  /* FMA_SUPPORT */
1955 		break;
1956 	default:
1957 		status = 0;
1958 		break;
1959 	}
1960 
1961 	return (status);
1962 
1963 } /* emlxs_sli4_read_sema() */
1964 
1965 
1966 static uint32_t
1967 emlxs_sli4_read_mbdb(emlxs_hba_t *hba)
1968 {
1969 #ifdef FMA_SUPPORT
1970 	emlxs_port_t *port = &PPORT;
1971 #endif  /* FMA_SUPPORT */
1972 	uint32_t status;
1973 
1974 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1975 	case SLI_INTF_IF_TYPE_0:
1976 		status = ddi_get32(hba->sli.sli4.bar2_acc_handle,
1977 		    hba->sli.sli4.MBDB_reg_addr);
1978 
1979 #ifdef FMA_SUPPORT
1980 		/* Access handle validation */
1981 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1982 #endif  /* FMA_SUPPORT */
1983 		break;
1984 
1985 	case SLI_INTF_IF_TYPE_2:
1986 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1987 		    hba->sli.sli4.MBDB_reg_addr);
1988 #ifdef FMA_SUPPORT
1989 		/* Access handle validation */
1990 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1991 #endif  /* FMA_SUPPORT */
1992 		break;
1993 	default:
1994 		status = 0;
1995 		break;
1996 	}
1997 
1998 	return (status);
1999 
2000 } /* emlxs_sli4_read_mbdb() */
2001 
2002 
2003 static void
2004 emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys, boolean_t high)
2005 {
2006 	uint32_t db;
2007 	uint_t shift;
2008 
2009 	/*
2010 	 * The bootstrap mailbox is posted as 2 x 30 bit values.
2011 	 * It is required to be 16 bit aligned, and the 2 low order
2012 	 * bits are used as flags.
2013 	 */
2014 	shift = high ? 32 : 2;
2015 
2016 	db = (uint32_t)(phys >> shift) & BMBX_ADDR;
2017 
2018 	if (high)
2019 		db |= BMBX_ADDR_HI;
2020 
2021 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2022 	case SLI_INTF_IF_TYPE_0:
2023 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2024 		    hba->sli.sli4.MBDB_reg_addr, db);
2025 		break;
2026 
2027 	case SLI_INTF_IF_TYPE_2:
2028 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2029 		    hba->sli.sli4.MBDB_reg_addr, db);
2030 		break;
2031 	}
2032 
2033 } /* emlxs_sli4_write_mbdb() */
2034 
2035 
2036 static void
2037 emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
2038     boolean_t arm)
2039 {
2040 	uint32_t	db;
2041 
2042 	/*
2043 	 * Add the qid to the doorbell. It is split into a low and
2044 	 * high component.
2045 	 */
2046 
2047 	/* Initialize with the low bits */
2048 	db = qid & EQ_DB_ID_LO_MASK;
2049 
2050 	/* drop the low bits */
2051 	qid >>= EQ_ID_LO_BITS;
2052 
2053 	/* Add the high bits */
2054 	db |= (qid << EQ_DB_ID_HI_SHIFT) & EQ_DB_ID_HI_MASK;
2055 
2056 	/*
2057 	 * Include the number of entries to be popped.
2058 	 */
2059 	db |= (count << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK;
2060 
2061 	/* The doorbell is for an event queue */
2062 	db |= EQ_DB_EVENT;
2063 
2064 	/* Arm if asked to do so */
2065 	if (arm)
2066 		db |= EQ_DB_CLEAR | EQ_DB_REARM;
2067 
2068 #ifdef DEBUG_FASTPATH
2069 	EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2070 	    "EQE: CLEAR db=%08x pops=%d", db, count);
2071 #endif /* DEBUG_FASTPATH */
2072 
2073 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2074 	case SLI_INTF_IF_TYPE_0:
2075 		/* The CQDB_reg_addr is also use for EQs */
2076 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2077 		    hba->sli.sli4.CQDB_reg_addr, db);
2078 		break;
2079 
2080 	case SLI_INTF_IF_TYPE_2:
2081 		/* The CQDB_reg_addr is also use for EQs */
2082 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2083 		    hba->sli.sli4.CQDB_reg_addr, db);
2084 		break;
2085 	}
2086 } /* emlxs_sli4_write_eqdb() */
2087 
2088 static void
2089 emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
2090     boolean_t arm)
2091 {
2092 	uint32_t	db;
2093 
2094 	/*
2095 	 * Add the qid to the doorbell. It is split into a low and
2096 	 * high component.
2097 	 */
2098 
2099 	/* Initialize with the low bits */
2100 	db = qid & CQ_DB_ID_LO_MASK;
2101 
2102 	/* drop the low bits */
2103 	qid >>= CQ_ID_LO_BITS;
2104 
2105 	/* Add the high bits */
2106 	db |= (qid << CQ_DB_ID_HI_SHIFT) & CQ_DB_ID_HI_MASK;
2107 
2108 	/*
2109 	 * Include the number of entries to be popped.
2110 	 */
2111 	db |= (count << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK;
2112 
2113 	/* Arm if asked to do so */
2114 	if (arm)
2115 		db |= CQ_DB_REARM;
2116 
2117 #ifdef DEBUG_FASTPATH
2118 	EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2119 	    "CQE: CLEAR db=%08x: pops=%d", db, count);
2120 #endif /* DEBUG_FASTPATH */
2121 
2122 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2123 	case SLI_INTF_IF_TYPE_0:
2124 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2125 		    hba->sli.sli4.CQDB_reg_addr, db);
2126 		break;
2127 
2128 	case SLI_INTF_IF_TYPE_2:
2129 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2130 		    hba->sli.sli4.CQDB_reg_addr, db);
2131 		break;
2132 	}
2133 } /* emlxs_sli4_write_cqdb() */
2134 
2135 
2136 static void
2137 emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2138 {
2139 	emlxs_rqdbu_t rqdb;
2140 
2141 	rqdb.word = 0;
2142 	rqdb.db.Qid = qid;
2143 	rqdb.db.NumPosted = count;
2144 
2145 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2146 	case SLI_INTF_IF_TYPE_0:
2147 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2148 		    hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2149 		break;
2150 
2151 	case SLI_INTF_IF_TYPE_2:
2152 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2153 		    hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2154 		break;
2155 	}
2156 
2157 } /* emlxs_sli4_write_rqdb() */
2158 
2159 
2160 static void
2161 emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2162 {
2163 	uint32_t db;
2164 
2165 	db = qid;
2166 	db |= (count << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK;
2167 
2168 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2169 	case SLI_INTF_IF_TYPE_0:
2170 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2171 		    hba->sli.sli4.MQDB_reg_addr, db);
2172 		break;
2173 
2174 	case SLI_INTF_IF_TYPE_2:
2175 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2176 		    hba->sli.sli4.MQDB_reg_addr, db);
2177 		break;
2178 	}
2179 
2180 } /* emlxs_sli4_write_mqdb() */
2181 
2182 
2183 static void
2184 emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid, uint_t posted,
2185     uint_t index)
2186 {
2187 	uint32_t db;
2188 
2189 	db = qid;
2190 	db |= (posted << WQ_DB_POST_SHIFT) & WQ_DB_POST_MASK;
2191 	db |= (index << WQ_DB_IDX_SHIFT) & WQ_DB_IDX_MASK;
2192 
2193 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2194 	case SLI_INTF_IF_TYPE_0:
2195 		ddi_put32(hba->sli.sli4.bar2_acc_handle,
2196 		    hba->sli.sli4.WQDB_reg_addr, db);
2197 		break;
2198 
2199 	case SLI_INTF_IF_TYPE_2:
2200 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2201 		    hba->sli.sli4.WQDB_reg_addr, db);
2202 		break;
2203 	}
2204 
2205 #ifdef DEBUG_FASTPATH
2206 	EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2207 	    "WQ RING: %08x", db);
2208 #endif /* DEBUG_FASTPATH */
2209 } /* emlxs_sli4_write_wqdb() */
2210 
2211 
2212 static uint32_t
2213 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
2214 {
2215 	emlxs_port_t *port = &PPORT;
2216 	uint32_t status = 0;
2217 	uint32_t err1;
2218 	uint32_t err2;
2219 
2220 	/* Wait for reset completion, tmo is in 10ms ticks */
2221 	while (tmo) {
2222 		status = emlxs_sli4_read_mbdb(hba);
2223 
2224 		/* Check to see if any errors occurred during init */
2225 		if (status & BMBX_READY) {
2226 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2227 			    "BMBX Ready: status=0x%x", status);
2228 
2229 			return (tmo);
2230 		}
2231 
2232 		BUSYWAIT_MS(10);
2233 		tmo--;
2234 	}
2235 
2236 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2237 	case SLI_INTF_IF_TYPE_0:
2238 		err1 = ddi_get32(hba->pci_acc_handle,
2239 		    hba->sli.sli4.ERR1_reg_addr);
2240 		err2 = ddi_get32(hba->pci_acc_handle,
2241 		    hba->sli.sli4.ERR2_reg_addr);
2242 		break;
2243 
2244 	default:
2245 		err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2246 		    hba->sli.sli4.ERR1_reg_addr);
2247 		err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2248 		    hba->sli.sli4.ERR2_reg_addr);
2249 		break;
2250 	}
2251 
2252 	/* Timeout occurred */
2253 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2254 	    "Timeout waiting for BMailbox: status=%x err1=%x err2=%x",
2255 	    status, err1, err2);
2256 
2257 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2258 
2259 	return (0);
2260 
2261 } /* emlxs_check_bootstrap_ready() */
2262 
2263 
2264 static uint32_t
2265 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
2266 {
2267 	emlxs_port_t *port = &PPORT;
2268 	uint32_t *iptr;
2269 
2270 	/*
2271 	 * This routine assumes the bootstrap mbox is loaded
2272 	 * with the mailbox command to be executed.
2273 	 *
2274 	 * First, load the high 30 bits of bootstrap mailbox
2275 	 */
2276 	emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_TRUE);
2277 
2278 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
2279 	if (tmo == 0) {
2280 		return (0);
2281 	}
2282 
2283 	/* Load the low 30 bits of bootstrap mailbox */
2284 	emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_FALSE);
2285 
2286 	tmo = emlxs_check_bootstrap_ready(hba, tmo);
2287 	if (tmo == 0) {
2288 		return (0);
2289 	}
2290 
2291 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2292 
2293 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2294 	    "BootstrapMB: %p Completed %08x %08x %08x",
2295 	    hba->sli.sli4.bootstrapmb.virt,
2296 	    *iptr, *(iptr+1), *(iptr+2));
2297 
2298 	return (tmo);
2299 
2300 } /* emlxs_issue_bootstrap_mb() */
2301 
2302 
2303 static int
2304 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
2305 {
2306 #ifdef FMA_SUPPORT
2307 	emlxs_port_t *port = &PPORT;
2308 #endif /* FMA_SUPPORT */
2309 	uint32_t *iptr;
2310 	uint32_t tmo;
2311 
2312 	if (emlxs_check_hdw_ready(hba)) {
2313 		return (1);
2314 	}
2315 
2316 	if (hba->flag & FC_BOOTSTRAPMB_INIT) {
2317 		return (0);  /* Already initialized */
2318 	}
2319 
2320 	/* NOTE: tmo is in 10ms ticks */
2321 	tmo = emlxs_check_bootstrap_ready(hba, 3000);
2322 	if (tmo == 0) {
2323 		return (1);
2324 	}
2325 
2326 	/* Issue FW_INITIALIZE command */
2327 
2328 	/* Special words to initialize bootstrap mbox MUST be little endian */
2329 	iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2330 	*iptr = LE_SWAP32(FW_INITIALIZE_WORD0);
2331 	*(iptr+1) = LE_SWAP32(FW_INITIALIZE_WORD1);
2332 
2333 	EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2334 	    MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
2335 
2336 	emlxs_data_dump(port, "FW_INIT", (uint32_t *)iptr, 6, 0);
2337 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2338 		return (1);
2339 	}
2340 
2341 #ifdef FMA_SUPPORT
2342 	if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
2343 	    != DDI_FM_OK) {
2344 		EMLXS_MSGF(EMLXS_CONTEXT,
2345 		    &emlxs_invalid_dma_handle_msg,
2346 		    "init_bootstrap_mb: hdl=%p",
2347 		    hba->sli.sli4.bootstrapmb.dma_handle);
2348 		return (1);
2349 	}
2350 #endif
2351 	hba->flag |= FC_BOOTSTRAPMB_INIT;
2352 	return (0);
2353 
2354 } /* emlxs_init_bootstrap_mb() */
2355 
2356 
2357 
2358 
2359 static uint32_t
2360 emlxs_sli4_hba_init(emlxs_hba_t *hba)
2361 {
2362 	int rc;
2363 	uint16_t i;
2364 	emlxs_port_t *vport;
2365 	emlxs_config_t *cfg = &CFG;
2366 	CHANNEL *cp;
2367 	VPIobj_t *vpip;
2368 
2369 	/* Restart the adapter */
2370 	if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
2371 		return (1);
2372 	}
2373 
2374 	for (i = 0; i < hba->chan_count; i++) {
2375 		cp = &hba->chan[i];
2376 		cp->iopath = (void *)&hba->sli.sli4.wq[i];
2377 	}
2378 
2379 	/* Initialize all the port objects */
2380 	hba->vpi_max  = 0;
2381 	for (i = 0; i < MAX_VPORTS; i++) {
2382 		vport = &VPORT(i);
2383 		vport->hba = hba;
2384 		vport->vpi = i;
2385 
2386 		vpip = &vport->VPIobj;
2387 		vpip->index = i;
2388 		vpip->VPI = i;
2389 		vpip->port = vport;
2390 		vpip->state = VPI_STATE_OFFLINE;
2391 		vport->vpip = vpip;
2392 	}
2393 
2394 	/* Set the max node count */
2395 	if (hba->max_nodes == 0) {
2396 		if (cfg[CFG_NUM_NODES].current > 0) {
2397 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
2398 		} else {
2399 			hba->max_nodes = 4096;
2400 		}
2401 	}
2402 
2403 	rc = emlxs_init_bootstrap_mb(hba);
2404 	if (rc) {
2405 		return (rc);
2406 	}
2407 
2408 	hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
2409 	hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
2410 	hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
2411 
2412 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_0) {
2413 		/* Cache the UE MASK registers value for UE error detection */
2414 		hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
2415 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
2416 		hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
2417 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
2418 	}
2419 
2420 	return (0);
2421 
2422 } /* emlxs_sli4_hba_init() */
2423 
2424 
2425 /*ARGSUSED*/
2426 static uint32_t
2427 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2428     uint32_t quiesce)
2429 {
2430 	emlxs_port_t *port = &PPORT;
2431 	emlxs_port_t *vport;
2432 	CHANNEL *cp;
2433 	emlxs_config_t *cfg = &CFG;
2434 	MAILBOXQ mboxq;
2435 	uint32_t value;
2436 	uint32_t i;
2437 	uint32_t rc;
2438 	uint16_t channelno;
2439 	uint32_t status;
2440 	uint32_t err1;
2441 	uint32_t err2;
2442 	uint8_t generate_event = 0;
2443 
2444 	if (!cfg[CFG_RESET_ENABLE].current) {
2445 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2446 		    "Adapter reset disabled.");
2447 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
2448 
2449 		return (1);
2450 	}
2451 
2452 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2453 	case SLI_INTF_IF_TYPE_0:
2454 		if (quiesce == 0) {
2455 			emlxs_sli4_hba_kill(hba);
2456 
2457 			/*
2458 			 * Initalize Hardware that will be used to bring
2459 			 * SLI4 online.
2460 			 */
2461 			rc = emlxs_init_bootstrap_mb(hba);
2462 			if (rc) {
2463 				return (rc);
2464 			}
2465 		}
2466 
2467 		bzero((void *)&mboxq, sizeof (MAILBOXQ));
2468 		emlxs_mb_resetport(hba, &mboxq);
2469 
2470 		if (quiesce == 0) {
2471 			if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
2472 			    MBX_POLL, 0) != MBX_SUCCESS) {
2473 				/* Timeout occurred */
2474 				EMLXS_MSGF(EMLXS_CONTEXT,
2475 				    &emlxs_reset_failed_msg,
2476 				    "Timeout: RESET");
2477 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
2478 				/* Log a dump event - not supported */
2479 				return (1);
2480 			}
2481 		} else {
2482 			if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
2483 			    MBX_POLL, 0) != MBX_SUCCESS) {
2484 				EMLXS_STATE_CHANGE(hba, FC_ERROR);
2485 				/* Log a dump event - not supported */
2486 				return (1);
2487 			}
2488 		}
2489 		emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
2490 		break;
2491 
2492 	case SLI_INTF_IF_TYPE_2:
2493 		if (quiesce == 0) {
2494 			emlxs_sli4_hba_kill(hba);
2495 		}
2496 
2497 		rc = emlxs_check_hdw_ready(hba);
2498 		if (rc > 1) {
2499 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
2500 			    "Adapter not ready for reset.");
2501 			return (1);
2502 		}
2503 
2504 		if (rc == 1) {
2505 			err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2506 			    hba->sli.sli4.ERR1_reg_addr);
2507 			err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2508 			    hba->sli.sli4.ERR2_reg_addr);
2509 
2510 			/* Don't generate an event if dump was forced */
2511 			if ((err1 != 0x2) || (err2 != 0x2)) {
2512 				generate_event = 1;
2513 			}
2514 		}
2515 
2516 		/* Reset the port now */
2517 
2518 		mutex_enter(&EMLXS_PORT_LOCK);
2519 		value = SLI_CNTL_INIT_PORT;
2520 
2521 		ddi_put32(hba->sli.sli4.bar0_acc_handle,
2522 		    hba->sli.sli4.CNTL_reg_addr, value);
2523 		mutex_exit(&EMLXS_PORT_LOCK);
2524 
2525 		break;
2526 	}
2527 
2528 	/* Reset the hba structure */
2529 	hba->flag &= FC_RESET_MASK;
2530 
2531 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2532 		cp = &hba->chan[channelno];
2533 		cp->hba = hba;
2534 		cp->channelno = channelno;
2535 	}
2536 
2537 	hba->channel_tx_count = 0;
2538 	hba->io_count = 0;
2539 	hba->iodone_count = 0;
2540 	hba->topology = 0;
2541 	hba->linkspeed = 0;
2542 	hba->heartbeat_active = 0;
2543 	hba->discovery_timer = 0;
2544 	hba->linkup_timer = 0;
2545 	hba->loopback_tics = 0;
2546 
2547 	/* Specific to ATTO G5 boards */
2548 	if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
2549 		/* Assume the boot driver enabled all LEDs */
2550 		hba->gpio_current =
2551 		    EMLXS_GPIO_LO | EMLXS_GPIO_HI | EMLXS_GPIO_ACT;
2552 		hba->gpio_desired = 0;
2553 		hba->gpio_bit = 0;
2554 	}
2555 
2556 	/* Reset the port objects */
2557 	for (i = 0; i < MAX_VPORTS; i++) {
2558 		vport = &VPORT(i);
2559 
2560 		vport->flag &= EMLXS_PORT_RESET_MASK;
2561 		vport->did = 0;
2562 		vport->prev_did = 0;
2563 		vport->lip_type = 0;
2564 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2565 		bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2566 
2567 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2568 		vport->node_base.nlp_Rpi = 0;
2569 		vport->node_base.nlp_DID = 0xffffff;
2570 		vport->node_base.nlp_list_next = NULL;
2571 		vport->node_base.nlp_list_prev = NULL;
2572 		vport->node_base.nlp_active = 1;
2573 		vport->node_count = 0;
2574 
2575 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2576 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2577 		}
2578 	}
2579 
2580 	if (emlxs_check_hdw_ready(hba)) {
2581 		return (1);
2582 	}
2583 
2584 	if (generate_event) {
2585 		status = emlxs_sli4_read_status(hba);
2586 		if (status & SLI_STATUS_DUMP_IMAGE_PRESENT) {
2587 			emlxs_log_dump_event(port, NULL, 0);
2588 		}
2589 	}
2590 
2591 	return (0);
2592 
2593 } /* emlxs_sli4_hba_reset */
2594 
2595 
2596 #define	SGL_CMD		0
2597 #define	SGL_RESP	1
2598 #define	SGL_DATA	2
2599 #define	SGL_LAST	0x80
2600 
2601 /*ARGSUSED*/
2602 static ULP_SGE64 *
2603 emlxs_pkt_to_sgl(emlxs_port_t *port, fc_packet_t *pkt, ULP_SGE64 *sge,
2604     uint32_t sgl_type, uint32_t *pcnt)
2605 {
2606 #ifdef DEBUG_SGE
2607 	emlxs_hba_t *hba = HBA;
2608 #endif /* DEBUG_SGE */
2609 	ddi_dma_cookie_t *cp;
2610 	uint_t i;
2611 	uint_t last;
2612 	int32_t	size;
2613 	int32_t	sge_size;
2614 	uint64_t sge_addr;
2615 	int32_t	len;
2616 	uint32_t cnt;
2617 	uint_t cookie_cnt;
2618 	ULP_SGE64 stage_sge;
2619 
2620 	last = sgl_type & SGL_LAST;
2621 	sgl_type &= ~SGL_LAST;
2622 
2623 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2624 	switch (sgl_type) {
2625 	case SGL_CMD:
2626 		cp = pkt->pkt_cmd_cookie;
2627 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2628 		size = (int32_t)pkt->pkt_cmdlen;
2629 		break;
2630 
2631 	case SGL_RESP:
2632 		cp = pkt->pkt_resp_cookie;
2633 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2634 		size = (int32_t)pkt->pkt_rsplen;
2635 		break;
2636 
2637 
2638 	case SGL_DATA:
2639 		cp = pkt->pkt_data_cookie;
2640 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2641 		size = (int32_t)pkt->pkt_datalen;
2642 		break;
2643 
2644 	default:
2645 		return (NULL);
2646 	}
2647 
2648 #else
2649 	switch (sgl_type) {
2650 	case SGL_CMD:
2651 		cp = &pkt->pkt_cmd_cookie;
2652 		cookie_cnt = 1;
2653 		size = (int32_t)pkt->pkt_cmdlen;
2654 		break;
2655 
2656 	case SGL_RESP:
2657 		cp = &pkt->pkt_resp_cookie;
2658 		cookie_cnt = 1;
2659 		size = (int32_t)pkt->pkt_rsplen;
2660 		break;
2661 
2662 
2663 	case SGL_DATA:
2664 		cp = &pkt->pkt_data_cookie;
2665 		cookie_cnt = 1;
2666 		size = (int32_t)pkt->pkt_datalen;
2667 		break;
2668 
2669 	default:
2670 		return (NULL);
2671 	}
2672 #endif	/* >= EMLXS_MODREV3 */
2673 
2674 	stage_sge.offset = 0;
2675 	stage_sge.type = 0;
2676 	stage_sge.last = 0;
2677 	cnt = 0;
2678 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2679 
2680 		sge_size = cp->dmac_size;
2681 		sge_addr = cp->dmac_laddress;
2682 		while (sge_size && size) {
2683 			if (cnt) {
2684 				/* Copy staged SGE before we build next one */
2685 				BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2686 				    (uint8_t *)sge, sizeof (ULP_SGE64));
2687 				sge++;
2688 			}
2689 			len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2690 			len = MIN(size, len);
2691 
2692 			stage_sge.addrHigh =
2693 			    PADDR_HI(sge_addr);
2694 			stage_sge.addrLow =
2695 			    PADDR_LO(sge_addr);
2696 			stage_sge.length = len;
2697 			if (sgl_type == SGL_DATA) {
2698 				stage_sge.offset = cnt;
2699 			}
2700 #ifdef DEBUG_SGE
2701 			emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
2702 			    4, 0);
2703 #endif /* DEBUG_SGE */
2704 			sge_addr += len;
2705 			sge_size -= len;
2706 
2707 			cnt += len;
2708 			size -= len;
2709 		}
2710 	}
2711 
2712 	if (last) {
2713 		stage_sge.last = 1;
2714 	}
2715 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2716 	    sizeof (ULP_SGE64));
2717 
2718 	sge++;
2719 
2720 	if (pcnt) {
2721 		*pcnt = cnt;
2722 	}
2723 	return (sge);
2724 
2725 } /* emlxs_pkt_to_sgl */
2726 
2727 
2728 /*ARGSUSED*/
2729 uint32_t
2730 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2731 {
2732 	emlxs_hba_t *hba = HBA;
2733 	fc_packet_t *pkt;
2734 	XRIobj_t *xrip;
2735 	ULP_SGE64 *sge;
2736 	emlxs_wqe_t *wqe;
2737 	IOCBQ *iocbq;
2738 	ddi_dma_cookie_t *cp_cmd;
2739 	ddi_dma_cookie_t *cp_data;
2740 	uint64_t sge_addr;
2741 	uint32_t cmd_cnt;
2742 	uint32_t resp_cnt;
2743 
2744 	iocbq = (IOCBQ *) &sbp->iocbq;
2745 	wqe = &iocbq->wqe;
2746 	pkt = PRIV2PKT(sbp);
2747 	xrip = sbp->xrip;
2748 	sge = xrip->SGList->virt;
2749 
2750 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2751 	cp_cmd = pkt->pkt_cmd_cookie;
2752 	cp_data = pkt->pkt_data_cookie;
2753 #else
2754 	cp_cmd  = &pkt->pkt_cmd_cookie;
2755 	cp_data = &pkt->pkt_data_cookie;
2756 #endif	/* >= EMLXS_MODREV3 */
2757 
2758 	iocbq = &sbp->iocbq;
2759 	if (iocbq->flag & IOCB_FCP_CMD) {
2760 
2761 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2762 			return (1);
2763 		}
2764 
2765 		/* CMD payload */
2766 		sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2767 		if (! sge) {
2768 			return (1);
2769 		}
2770 
2771 		/* DATA payload */
2772 		if (pkt->pkt_datalen != 0) {
2773 			/* RSP payload */
2774 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2775 			    SGL_RESP, &resp_cnt);
2776 			if (! sge) {
2777 				return (1);
2778 			}
2779 
2780 			/* Data payload */
2781 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2782 			    SGL_DATA | SGL_LAST, 0);
2783 			if (! sge) {
2784 				return (1);
2785 			}
2786 sgl_done:
2787 			if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2788 				sge_addr = cp_data->dmac_laddress;
2789 				wqe->FirstData.addrHigh = PADDR_HI(sge_addr);
2790 				wqe->FirstData.addrLow = PADDR_LO(sge_addr);
2791 				wqe->FirstData.tus.f.bdeSize =
2792 				    cp_data->dmac_size;
2793 			}
2794 		} else {
2795 			/* RSP payload */
2796 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2797 			    SGL_RESP | SGL_LAST, &resp_cnt);
2798 			if (! sge) {
2799 				return (1);
2800 			}
2801 		}
2802 
2803 		wqe->un.FcpCmd.Payload.addrHigh =
2804 		    PADDR_HI(cp_cmd->dmac_laddress);
2805 		wqe->un.FcpCmd.Payload.addrLow =
2806 		    PADDR_LO(cp_cmd->dmac_laddress);
2807 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
2808 		wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
2809 
2810 	} else {
2811 
2812 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2813 			/* CMD payload */
2814 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2815 			    SGL_CMD | SGL_LAST, &cmd_cnt);
2816 			if (! sge) {
2817 				return (1);
2818 			}
2819 		} else {
2820 			/* CMD payload */
2821 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2822 			    SGL_CMD, &cmd_cnt);
2823 			if (! sge) {
2824 				return (1);
2825 			}
2826 
2827 			/* RSP payload */
2828 			sge = emlxs_pkt_to_sgl(port, pkt, sge,
2829 			    SGL_RESP | SGL_LAST, &resp_cnt);
2830 			if (! sge) {
2831 				return (1);
2832 			}
2833 			wqe->un.GenReq.PayloadLength = cmd_cnt;
2834 		}
2835 
2836 		wqe->un.GenReq.Payload.addrHigh =
2837 		    PADDR_HI(cp_cmd->dmac_laddress);
2838 		wqe->un.GenReq.Payload.addrLow =
2839 		    PADDR_LO(cp_cmd->dmac_laddress);
2840 		wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
2841 	}
2842 	return (0);
2843 } /* emlxs_sli4_bde_setup */
2844 
2845 
2846 
2847 
2848 #ifdef SFCT_SUPPORT
2849 /*ARGSUSED*/
2850 static uint32_t
2851 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2852 {
2853 	emlxs_hba_t *hba = HBA;
2854 	emlxs_wqe_t *wqe;
2855 	ULP_SGE64 stage_sge;
2856 	ULP_SGE64 *sge;
2857 	IOCB *iocb;
2858 	IOCBQ *iocbq;
2859 	MATCHMAP *mp;
2860 	MATCHMAP *fct_mp;
2861 	XRIobj_t *xrip;
2862 	uint64_t sge_addr;
2863 	uint32_t sge_size;
2864 	uint32_t cnt;
2865 	uint32_t len;
2866 	uint32_t size;
2867 	uint32_t *xrdy_vaddr;
2868 	stmf_data_buf_t *dbuf;
2869 
2870 	iocbq = &sbp->iocbq;
2871 	iocb = &iocbq->iocb;
2872 	wqe = &iocbq->wqe;
2873 	xrip = sbp->xrip;
2874 
2875 	if (!sbp->fct_buf) {
2876 		return (0);
2877 	}
2878 
2879 	size = sbp->fct_buf->db_data_size;
2880 
2881 	/*
2882 	 * The hardware will automaticlly round up
2883 	 * to multiple of 4.
2884 	 *
2885 	 * if (size & 3) {
2886 	 *	size = (size + 3) & 0xfffffffc;
2887 	 * }
2888 	 */
2889 	fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2890 
2891 	if (sbp->fct_buf->db_sglist_length != 1) {
2892 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2893 		    "fct_bde_setup: Only 1 sglist entry supported: %d",
2894 		    sbp->fct_buf->db_sglist_length);
2895 		return (1);
2896 	}
2897 
2898 	sge = xrip->SGList->virt;
2899 
2900 	if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
2901 
2902 		mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
2903 		if (!mp || !mp->virt || !mp->phys) {
2904 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2905 			    "fct_bde_setup: Cannot allocate XRDY memory");
2906 			return (1);
2907 		}
2908 		/* Save the MATCHMAP info to free this memory later */
2909 		iocbq->bp = mp;
2910 
2911 		/* Point to XRDY payload */
2912 		xrdy_vaddr = (uint32_t *)(mp->virt);
2913 
2914 		/* Fill in burstsize in payload */
2915 		*xrdy_vaddr++ = 0;
2916 		*xrdy_vaddr++ = LE_SWAP32(size);
2917 		*xrdy_vaddr = 0;
2918 
2919 		/* First 2 SGEs are XRDY and SKIP */
2920 		stage_sge.addrHigh = PADDR_HI(mp->phys);
2921 		stage_sge.addrLow = PADDR_LO(mp->phys);
2922 		stage_sge.length = EMLXS_XFER_RDY_SIZE;
2923 		stage_sge.offset = 0;
2924 		stage_sge.type = 0;
2925 		stage_sge.last = 0;
2926 
2927 		/* Words  0-3 */
2928 		wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
2929 		wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
2930 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = EMLXS_XFER_RDY_SIZE;
2931 		wqe->un.FcpCmd.PayloadLength = EMLXS_XFER_RDY_SIZE;
2932 
2933 	} else {	/* CMD_FCP_TSEND64_CX */
2934 		/* First 2 SGEs are SKIP */
2935 		stage_sge.addrHigh = 0;
2936 		stage_sge.addrLow = 0;
2937 		stage_sge.length = 0;
2938 		stage_sge.offset = 0;
2939 		stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2940 		stage_sge.last = 0;
2941 
2942 		/* Words  0-3 */
2943 		wqe->un.FcpCmd.Payload.addrHigh = PADDR_HI(fct_mp->phys);
2944 		wqe->un.FcpCmd.Payload.addrLow = PADDR_LO(fct_mp->phys);
2945 
2946 		/* The BDE should match the contents of the first SGE payload */
2947 		len = MIN(EMLXS_MAX_SGE_SIZE, size);
2948 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = len;
2949 
2950 		/* The PayloadLength should be set to 0 for TSEND64. */
2951 		wqe->un.FcpCmd.PayloadLength = 0;
2952 	}
2953 
2954 	dbuf = sbp->fct_buf;
2955 	/*
2956 	 * TotalTransferCount equals to Relative Offset field (Word 4)
2957 	 * in both TSEND64 and TRECEIVE64 WQE.
2958 	 */
2959 	wqe->un.FcpCmd.TotalTransferCount = dbuf->db_relative_offset;
2960 
2961 	/* Copy staged SGE into SGL */
2962 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2963 	    (uint8_t *)sge, sizeof (ULP_SGE64));
2964 	sge++;
2965 
2966 	stage_sge.addrHigh = 0;
2967 	stage_sge.addrLow = 0;
2968 	stage_sge.length = 0;
2969 	stage_sge.offset = 0;
2970 	stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2971 	stage_sge.last = 0;
2972 
2973 	/* Copy staged SGE into SGL */
2974 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2975 	    (uint8_t *)sge, sizeof (ULP_SGE64));
2976 	sge++;
2977 
2978 	sge_size = size;
2979 	sge_addr = fct_mp->phys;
2980 	cnt = 0;
2981 
2982 	/* Build SGEs */
2983 	while (sge_size) {
2984 		if (cnt) {
2985 			/* Copy staged SGE before we build next one */
2986 			BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2987 			    (uint8_t *)sge, sizeof (ULP_SGE64));
2988 			sge++;
2989 		}
2990 
2991 		len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2992 
2993 		stage_sge.addrHigh = PADDR_HI(sge_addr);
2994 		stage_sge.addrLow = PADDR_LO(sge_addr);
2995 		stage_sge.length = len;
2996 		stage_sge.offset = cnt;
2997 		stage_sge.type = EMLXS_SGE_TYPE_DATA;
2998 
2999 		sge_addr += len;
3000 		sge_size -= len;
3001 		cnt += len;
3002 	}
3003 
3004 	stage_sge.last = 1;
3005 
3006 	if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
3007 		wqe->FirstData.addrHigh = stage_sge.addrHigh;
3008 		wqe->FirstData.addrLow = stage_sge.addrLow;
3009 		wqe->FirstData.tus.f.bdeSize = stage_sge.length;
3010 	}
3011 	/* Copy staged SGE into SGL */
3012 	BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
3013 	    (uint8_t *)sge, sizeof (ULP_SGE64));
3014 
3015 	return (0);
3016 
3017 } /* emlxs_sli4_fct_bde_setup */
3018 #endif /* SFCT_SUPPORT */
3019 
3020 
3021 static void
3022 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
3023 {
3024 	emlxs_port_t *port = &PPORT;
3025 	emlxs_buf_t *sbp;
3026 	uint32_t channelno;
3027 	int32_t throttle;
3028 	emlxs_wqe_t *wqe;
3029 	emlxs_wqe_t *wqeslot;
3030 	WQ_DESC_t *wq;
3031 	uint32_t flag;
3032 	uint16_t next_wqe;
3033 	off_t offset;
3034 #ifdef NODE_THROTTLE_SUPPORT
3035 	int32_t node_throttle;
3036 	NODELIST *marked_node = NULL;
3037 #endif /* NODE_THROTTLE_SUPPORT */
3038 
3039 
3040 	channelno = cp->channelno;
3041 	wq = (WQ_DESC_t *)cp->iopath;
3042 
3043 #ifdef DEBUG_FASTPATH
3044 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3045 	    "ISSUE WQE channel: %x  %p", channelno, wq);
3046 #endif /* DEBUG_FASTPATH */
3047 
3048 	throttle = 0;
3049 
3050 	/* Check if FCP ring and adapter is not ready */
3051 	/* We may use any ring for FCP_CMD */
3052 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
3053 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
3054 		    (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
3055 			emlxs_tx_put(iocbq, 1);
3056 			return;
3057 		}
3058 	}
3059 
3060 	/* Attempt to acquire CMD_RING lock */
3061 	if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
3062 		/* Queue it for later */
3063 		if (iocbq) {
3064 			if ((hba->io_count -
3065 			    hba->channel_tx_count) > 10) {
3066 				emlxs_tx_put(iocbq, 1);
3067 				return;
3068 			} else {
3069 
3070 				mutex_enter(&EMLXS_QUE_LOCK(channelno));
3071 			}
3072 		} else {
3073 			return;
3074 		}
3075 	}
3076 	/* EMLXS_QUE_LOCK acquired */
3077 
3078 	/* Throttle check only applies to non special iocb */
3079 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
3080 		/* Check if HBA is full */
3081 		throttle = hba->io_throttle - hba->io_active;
3082 		if (throttle <= 0) {
3083 			/* Hitting adapter throttle limit */
3084 			/* Queue it for later */
3085 			if (iocbq) {
3086 				emlxs_tx_put(iocbq, 1);
3087 			}
3088 
3089 			goto busy;
3090 		}
3091 	}
3092 
3093 	/* Check to see if we have room for this WQE */
3094 	next_wqe = wq->host_index + 1;
3095 	if (next_wqe >= wq->max_index) {
3096 		next_wqe = 0;
3097 	}
3098 
3099 	if (next_wqe == wq->port_index) {
3100 		/* Queue it for later */
3101 		if (iocbq) {
3102 			emlxs_tx_put(iocbq, 1);
3103 		}
3104 		goto busy;
3105 	}
3106 
3107 	/*
3108 	 * We have a command ring slot available
3109 	 * Make sure we have an iocb to send
3110 	 */
3111 	if (iocbq) {
3112 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3113 
3114 		/* Check if the ring already has iocb's waiting */
3115 		if (cp->nodeq.q_first != NULL) {
3116 			/* Put the current iocbq on the tx queue */
3117 			emlxs_tx_put(iocbq, 0);
3118 
3119 			/*
3120 			 * Attempt to replace it with the next iocbq
3121 			 * in the tx queue
3122 			 */
3123 			iocbq = emlxs_tx_get(cp, 0);
3124 		}
3125 
3126 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3127 	} else {
3128 		iocbq = emlxs_tx_get(cp, 1);
3129 	}
3130 
3131 sendit:
3132 	/* Process each iocbq */
3133 	while (iocbq) {
3134 		sbp = iocbq->sbp;
3135 
3136 #ifdef NODE_THROTTLE_SUPPORT
3137 		if (sbp && sbp->node && sbp->node->io_throttle) {
3138 			node_throttle = sbp->node->io_throttle -
3139 			    sbp->node->io_active;
3140 			if (node_throttle <= 0) {
3141 				/* Node is busy */
3142 				/* Queue this iocb and get next iocb from */
3143 				/* channel */
3144 
3145 				if (!marked_node) {
3146 					marked_node = sbp->node;
3147 				}
3148 
3149 				mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3150 				emlxs_tx_put(iocbq, 0);
3151 
3152 				if (cp->nodeq.q_first == marked_node) {
3153 					mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3154 					goto busy;
3155 				}
3156 
3157 				iocbq = emlxs_tx_get(cp, 0);
3158 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3159 				continue;
3160 			}
3161 		}
3162 		marked_node = 0;
3163 #endif /* NODE_THROTTLE_SUPPORT */
3164 
3165 		wqe = &iocbq->wqe;
3166 #ifdef DEBUG_FASTPATH
3167 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3168 		    "ISSUE QID %d WQE iotag:%x xri:%d", wq->qid,
3169 		    wqe->RequestTag, wqe->XRITag);
3170 #endif /* DEBUG_FASTPATH */
3171 
3172 		if (sbp) {
3173 			/* If exchange removed after wqe was prep'ed, drop it */
3174 			if (!(sbp->xrip)) {
3175 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3176 				    "Xmit WQE iotag:%x xri:%d aborted",
3177 				    wqe->RequestTag, wqe->XRITag);
3178 
3179 				/* Get next iocb from the tx queue */
3180 				iocbq = emlxs_tx_get(cp, 1);
3181 				continue;
3182 			}
3183 
3184 			if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
3185 
3186 				/* Perform delay */
3187 				if ((channelno == hba->channel_els) &&
3188 				    !(iocbq->flag & IOCB_FCP_CMD)) {
3189 					drv_usecwait(100000);
3190 				} else {
3191 					drv_usecwait(20000);
3192 				}
3193 			}
3194 
3195 			/* Check for ULP pkt request */
3196 			mutex_enter(&sbp->mtx);
3197 
3198 			if (sbp->node == NULL) {
3199 				/* Set node to base node by default */
3200 				iocbq->node = (void *)&port->node_base;
3201 				sbp->node = (void *)&port->node_base;
3202 			}
3203 
3204 			sbp->pkt_flags |= PACKET_IN_CHIPQ;
3205 			mutex_exit(&sbp->mtx);
3206 
3207 			atomic_inc_32(&hba->io_active);
3208 #ifdef NODE_THROTTLE_SUPPORT
3209 			if (sbp->node) {
3210 				atomic_inc_32(&sbp->node->io_active);
3211 			}
3212 #endif /* NODE_THROTTLE_SUPPORT */
3213 
3214 			sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
3215 #ifdef SFCT_SUPPORT
3216 #ifdef FCT_IO_TRACE
3217 			if (sbp->fct_cmd) {
3218 				emlxs_fct_io_trace(port, sbp->fct_cmd,
3219 				    EMLXS_FCT_IOCB_ISSUED);
3220 				emlxs_fct_io_trace(port, sbp->fct_cmd,
3221 				    icmd->ULPCOMMAND);
3222 			}
3223 #endif /* FCT_IO_TRACE */
3224 #endif /* SFCT_SUPPORT */
3225 			cp->hbaSendCmd_sbp++;
3226 			iocbq->channel = cp;
3227 		} else {
3228 			cp->hbaSendCmd++;
3229 		}
3230 
3231 		flag = iocbq->flag;
3232 
3233 		/*
3234 		 * At this point, we have a command ring slot available
3235 		 * and an iocb to send
3236 		 */
3237 		wq->release_depth--;
3238 		if (wq->release_depth == 0) {
3239 			wq->release_depth = WQE_RELEASE_DEPTH;
3240 			wqe->WQEC = 1;
3241 		}
3242 
3243 		HBASTATS.IocbIssued[channelno]++;
3244 		wq->num_proc++;
3245 
3246 		/* Send the iocb */
3247 		wqeslot = (emlxs_wqe_t *)wq->addr.virt;
3248 		wqeslot += wq->host_index;
3249 
3250 		wqe->CQId = wq->cqid;
3251 		if (hba->sli.sli4.param.PHWQ) {
3252 			WQE_PHWQ_WQID(wqe, wq->qid);
3253 		}
3254 		BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
3255 		    sizeof (emlxs_wqe_t));
3256 #ifdef DEBUG_WQE
3257 		emlxs_data_dump(port, "WQE", (uint32_t *)wqe, 18, 0);
3258 #endif /* DEBUG_WQE */
3259 		offset = (off_t)((uint64_t)((unsigned long)
3260 		    wq->addr.virt) -
3261 		    (uint64_t)((unsigned long)
3262 		    hba->sli.sli4.slim2.virt));
3263 
3264 		EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
3265 		    4096, DDI_DMA_SYNC_FORDEV);
3266 
3267 		/*
3268 		 * After this, the sbp / iocb / wqe should not be
3269 		 * accessed in the xmit path.
3270 		 */
3271 
3272 		/* Ring the WQ Doorbell */
3273 		emlxs_sli4_write_wqdb(hba, wq->qid, 1, wq->host_index);
3274 		wq->host_index = next_wqe;
3275 
3276 		if (!sbp) {
3277 			emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3278 		}
3279 
3280 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
3281 			/* Check if HBA is full */
3282 			throttle = hba->io_throttle - hba->io_active;
3283 			if (throttle <= 0) {
3284 				goto busy;
3285 			}
3286 		}
3287 
3288 		/* Check to see if we have room for another WQE */
3289 		next_wqe++;
3290 		if (next_wqe >= wq->max_index) {
3291 			next_wqe = 0;
3292 		}
3293 
3294 		if (next_wqe == wq->port_index) {
3295 			/* Queue it for later */
3296 			goto busy;
3297 		}
3298 
3299 		/* Get the next iocb from the tx queue if there is one */
3300 		iocbq = emlxs_tx_get(cp, 1);
3301 	}
3302 
3303 	mutex_exit(&EMLXS_QUE_LOCK(channelno));
3304 
3305 	return;
3306 
3307 busy:
3308 	wq->num_busy++;
3309 	if (throttle <= 0) {
3310 		HBASTATS.IocbThrottled++;
3311 	} else {
3312 		HBASTATS.IocbRingFull[channelno]++;
3313 	}
3314 
3315 	mutex_exit(&EMLXS_QUE_LOCK(channelno));
3316 
3317 	return;
3318 
3319 } /* emlxs_sli4_issue_iocb_cmd() */
3320 
3321 
3322 /*ARGSUSED*/
3323 static uint32_t
3324 emlxs_sli4_issue_mq(emlxs_port_t *port, MAILBOX4 *mqe, MAILBOX *mb,
3325     uint32_t tmo)
3326 {
3327 	emlxs_hba_t *hba = HBA;
3328 	MAILBOXQ	*mbq;
3329 	MAILBOX4	*mb4;
3330 	MATCHMAP	*mp;
3331 	uint32_t	*iptr;
3332 	off_t		offset;
3333 
3334 	mbq = (MAILBOXQ *)mb;
3335 	mb4 = (MAILBOX4 *)mb;
3336 	mp = (MATCHMAP *) mbq->nonembed;
3337 	hba->mbox_mqe = (void *)mqe;
3338 
3339 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3340 	    (mb4->un.varSLIConfig.be.embedded)) {
3341 		/*
3342 		 * If this is an embedded mbox, everything should fit
3343 		 * into the mailbox area.
3344 		 */
3345 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3346 		    MAILBOX_CMD_SLI4_BSIZE);
3347 
3348 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
3349 		    4096, DDI_DMA_SYNC_FORDEV);
3350 
3351 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3352 			emlxs_data_dump(port, "MBOX CMD", (uint32_t *)mqe,
3353 			    18, 0);
3354 		}
3355 	} else {
3356 		/* SLI_CONFIG and non-embedded */
3357 
3358 		/*
3359 		 * If this is not embedded, the MQ area
3360 		 * MUST contain a SGE pointer to a larger area for the
3361 		 * non-embedded mailbox command.
3362 		 * mp will point to the actual mailbox command which
3363 		 * should be copied into the non-embedded area.
3364 		 */
3365 		mb4->un.varSLIConfig.be.sge_cnt = 1;
3366 		mb4->un.varSLIConfig.be.payload_length = mp->size;
3367 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3368 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
3369 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
3370 		*iptr = mp->size;
3371 
3372 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3373 
3374 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3375 		    DDI_DMA_SYNC_FORDEV);
3376 
3377 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3378 		    MAILBOX_CMD_SLI4_BSIZE);
3379 
3380 		offset = (off_t)((uint64_t)((unsigned long)
3381 		    hba->sli.sli4.mq.addr.virt) -
3382 		    (uint64_t)((unsigned long)
3383 		    hba->sli.sli4.slim2.virt));
3384 
3385 		EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3386 		    4096, DDI_DMA_SYNC_FORDEV);
3387 
3388 		emlxs_data_dump(port, "MBOX EXT", (uint32_t *)mqe, 12, 0);
3389 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3390 		    "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
3391 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3392 	}
3393 
3394 	/* Ring the MQ Doorbell */
3395 	if (mb->mbxCommand != MBX_HEARTBEAT) {
3396 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3397 		    "MQ RING: Qid %04x", hba->sli.sli4.mq.qid);
3398 	}
3399 
3400 	emlxs_sli4_write_mqdb(hba, hba->sli.sli4.mq.qid, 1);
3401 
3402 	return (MBX_SUCCESS);
3403 
3404 } /* emlxs_sli4_issue_mq() */
3405 
3406 
3407 /*ARGSUSED*/
3408 static uint32_t
3409 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
3410 {
3411 	emlxs_port_t	*port = &PPORT;
3412 	MAILBOXQ	*mbq;
3413 	MAILBOX4	*mb4;
3414 	MATCHMAP	*mp = NULL;
3415 	uint32_t	*iptr;
3416 	int		nonembed = 0;
3417 
3418 	mbq = (MAILBOXQ *)mb;
3419 	mb4 = (MAILBOX4 *)mb;
3420 	mp = (MATCHMAP *) mbq->nonembed;
3421 	hba->mbox_mqe = hba->sli.sli4.bootstrapmb.virt;
3422 
3423 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3424 	    (mb4->un.varSLIConfig.be.embedded)) {
3425 		/*
3426 		 * If this is an embedded mbox, everything should fit
3427 		 * into the bootstrap mailbox area.
3428 		 */
3429 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3430 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3431 		    MAILBOX_CMD_SLI4_BSIZE);
3432 
3433 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3434 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
3435 		emlxs_data_dump(port, "MBOX CMD", iptr, 18, 0);
3436 	} else {
3437 		/*
3438 		 * If this is not embedded, the bootstrap mailbox area
3439 		 * MUST contain a SGE pointer to a larger area for the
3440 		 * non-embedded mailbox command.
3441 		 * mp will point to the actual mailbox command which
3442 		 * should be copied into the non-embedded area.
3443 		 */
3444 		nonembed = 1;
3445 		mb4->un.varSLIConfig.be.sge_cnt = 1;
3446 		mb4->un.varSLIConfig.be.payload_length = mp->size;
3447 		iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3448 		*iptr++ = (uint32_t)PADDR_LO(mp->phys);
3449 		*iptr++ = (uint32_t)PADDR_HI(mp->phys);
3450 		*iptr = mp->size;
3451 
3452 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3453 
3454 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3455 		    DDI_DMA_SYNC_FORDEV);
3456 
3457 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3458 		BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3459 		    MAILBOX_CMD_SLI4_BSIZE);
3460 
3461 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3462 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3463 		    DDI_DMA_SYNC_FORDEV);
3464 
3465 		emlxs_data_dump(port, "MBOX EXT", iptr, 12, 0);
3466 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3467 		    "Extension Addr %p %p", mp->phys,
3468 		    (uint32_t *)((uint8_t *)mp->virt));
3469 		iptr = (uint32_t *)((uint8_t *)mp->virt);
3470 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3471 	}
3472 
3473 
3474 	/* NOTE: tmo is in 10ms ticks */
3475 	if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
3476 		return (MBX_TIMEOUT);
3477 	}
3478 
3479 	if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3480 	    (mb4->un.varSLIConfig.be.embedded)) {
3481 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3482 		    MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3483 
3484 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3485 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3486 		    MAILBOX_CMD_SLI4_BSIZE);
3487 
3488 		emlxs_data_dump(port, "MBOX CMP", iptr, 18, 0);
3489 
3490 	} else {
3491 		EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3492 		    EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3493 		    DDI_DMA_SYNC_FORKERNEL);
3494 
3495 		EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3496 		    DDI_DMA_SYNC_FORKERNEL);
3497 
3498 		BE_SWAP32_BUFFER(mp->virt, mp->size);
3499 
3500 		iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3501 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3502 		    MAILBOX_CMD_SLI4_BSIZE);
3503 
3504 		emlxs_data_dump(port, "MBOX CMP", iptr, 12, 0);
3505 		iptr = (uint32_t *)((uint8_t *)mp->virt);
3506 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
3507 	}
3508 
3509 #ifdef FMA_SUPPORT
3510 	if (nonembed && mp) {
3511 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
3512 		    != DDI_FM_OK) {
3513 			EMLXS_MSGF(EMLXS_CONTEXT,
3514 			    &emlxs_invalid_dma_handle_msg,
3515 			    "sli4_issue_bootstrap: mp_hdl=%p",
3516 			    mp->dma_handle);
3517 			return (MBXERR_DMA_ERROR);
3518 		}
3519 	}
3520 
3521 	if (emlxs_fm_check_dma_handle(hba,
3522 	    hba->sli.sli4.bootstrapmb.dma_handle)
3523 	    != DDI_FM_OK) {
3524 		EMLXS_MSGF(EMLXS_CONTEXT,
3525 		    &emlxs_invalid_dma_handle_msg,
3526 		    "sli4_issue_bootstrap: hdl=%p",
3527 		    hba->sli.sli4.bootstrapmb.dma_handle);
3528 		return (MBXERR_DMA_ERROR);
3529 	}
3530 #endif
3531 
3532 	return (MBX_SUCCESS);
3533 
3534 } /* emlxs_sli4_issue_bootstrap() */
3535 
3536 
3537 /*ARGSUSED*/
3538 static uint32_t
3539 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3540     uint32_t tmo)
3541 {
3542 	emlxs_port_t	*port;
3543 	MAILBOX4	*mb4;
3544 	MAILBOX		*mb;
3545 	mbox_rsp_hdr_t	*hdr_rsp;
3546 	MATCHMAP	*mp;
3547 	uint32_t	*iptr;
3548 	uint32_t	rc;
3549 	uint32_t	i;
3550 	uint32_t	tmo_local;
3551 
3552 	if (!mbq->port) {
3553 		mbq->port = &PPORT;
3554 	}
3555 
3556 	port = (emlxs_port_t *)mbq->port;
3557 
3558 	mb4 = (MAILBOX4 *)mbq;
3559 	mb = (MAILBOX *)mbq;
3560 
3561 	mb->mbxStatus = MBX_SUCCESS;
3562 	rc = MBX_SUCCESS;
3563 
3564 	/* Check for minimum timeouts */
3565 	switch (mb->mbxCommand) {
3566 	/* Mailbox commands that erase/write flash */
3567 	case MBX_DOWN_LOAD:
3568 	case MBX_UPDATE_CFG:
3569 	case MBX_LOAD_AREA:
3570 	case MBX_LOAD_EXP_ROM:
3571 	case MBX_WRITE_NV:
3572 	case MBX_FLASH_WR_ULA:
3573 	case MBX_DEL_LD_ENTRY:
3574 	case MBX_LOAD_SM:
3575 	case MBX_DUMP_MEMORY:
3576 	case MBX_WRITE_VPARMS:
3577 	case MBX_ACCESS_VDATA:
3578 		if (tmo < 300) {
3579 			tmo = 300;
3580 		}
3581 		break;
3582 
3583 	case MBX_SLI_CONFIG: {
3584 		mbox_req_hdr_t *hdr_req;
3585 
3586 		hdr_req = (mbox_req_hdr_t *)
3587 		    &mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3588 
3589 		if (hdr_req->subsystem == IOCTL_SUBSYSTEM_COMMON) {
3590 			switch (hdr_req->opcode) {
3591 			case COMMON_OPCODE_WRITE_OBJ:
3592 			case COMMON_OPCODE_READ_OBJ:
3593 			case COMMON_OPCODE_READ_OBJ_LIST:
3594 			case COMMON_OPCODE_DELETE_OBJ:
3595 			case COMMON_OPCODE_SET_BOOT_CFG:
3596 			case COMMON_OPCODE_GET_PROFILE_CFG:
3597 			case COMMON_OPCODE_SET_PROFILE_CFG:
3598 			case COMMON_OPCODE_GET_PROFILE_LIST:
3599 			case COMMON_OPCODE_SET_ACTIVE_PROFILE:
3600 			case COMMON_OPCODE_GET_PROFILE_CAPS:
3601 			case COMMON_OPCODE_GET_MR_PROFILE_CAPS:
3602 			case COMMON_OPCODE_SET_MR_PROFILE_CAPS:
3603 			case COMMON_OPCODE_SET_FACTORY_PROFILE_CFG:
3604 			case COMMON_OPCODE_SEND_ACTIVATION:
3605 			case COMMON_OPCODE_RESET_LICENSES:
3606 			case COMMON_OPCODE_SET_PHYSICAL_LINK_CFG_V1:
3607 			case COMMON_OPCODE_GET_VPD_DATA:
3608 				if (tmo < 300) {
3609 					tmo = 300;
3610 				}
3611 				break;
3612 			default:
3613 				if (tmo < 30) {
3614 					tmo = 30;
3615 				}
3616 			}
3617 		} else if (hdr_req->subsystem == IOCTL_SUBSYSTEM_FCOE) {
3618 			switch (hdr_req->opcode) {
3619 			case FCOE_OPCODE_SET_FCLINK_SETTINGS:
3620 				if (tmo < 300) {
3621 					tmo = 300;
3622 				}
3623 				break;
3624 			default:
3625 				if (tmo < 30) {
3626 					tmo = 30;
3627 				}
3628 			}
3629 		} else {
3630 			if (tmo < 30) {
3631 				tmo = 30;
3632 			}
3633 		}
3634 
3635 		/*
3636 		 * Also: VENDOR_MANAGE_FFV  (0x13, 0x02) (not currently used)
3637 		 */
3638 
3639 		break;
3640 	}
3641 	default:
3642 		if (tmo < 30) {
3643 			tmo = 30;
3644 		}
3645 		break;
3646 	}
3647 
3648 	/* Convert tmo seconds to 10 millisecond tics */
3649 	tmo_local = tmo * 100;
3650 
3651 	mutex_enter(&EMLXS_PORT_LOCK);
3652 
3653 	/* Adjust wait flag */
3654 	if (flag != MBX_NOWAIT) {
3655 		if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
3656 			flag = MBX_SLEEP;
3657 		} else {
3658 			flag = MBX_POLL;
3659 		}
3660 	} else {
3661 		/* Must have interrupts enabled to perform MBX_NOWAIT */
3662 		if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
3663 
3664 			mb->mbxStatus = MBX_HARDWARE_ERROR;
3665 			mutex_exit(&EMLXS_PORT_LOCK);
3666 
3667 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3668 			    "Interrupts disabled. %s failed.",
3669 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
3670 
3671 			return (MBX_HARDWARE_ERROR);
3672 		}
3673 	}
3674 
3675 	/* Check for hardware error ; special case SLI_CONFIG */
3676 	if ((hba->flag & FC_HARDWARE_ERROR) &&
3677 	    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3678 	    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3679 	    COMMON_OPCODE_RESET))) {
3680 		mb->mbxStatus = MBX_HARDWARE_ERROR;
3681 
3682 		mutex_exit(&EMLXS_PORT_LOCK);
3683 
3684 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3685 		    "Hardware error reported. %s failed. status=%x mb=%p",
3686 		    emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3687 
3688 		return (MBX_HARDWARE_ERROR);
3689 	}
3690 
3691 	if (hba->mbox_queue_flag) {
3692 		/* If we are not polling, then queue it for later */
3693 		if (flag == MBX_NOWAIT) {
3694 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3695 			    "Busy.      %s: mb=%p NoWait.",
3696 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3697 
3698 			emlxs_mb_put(hba, mbq);
3699 
3700 			HBASTATS.MboxBusy++;
3701 
3702 			mutex_exit(&EMLXS_PORT_LOCK);
3703 
3704 			return (MBX_BUSY);
3705 		}
3706 
3707 		while (hba->mbox_queue_flag) {
3708 			mutex_exit(&EMLXS_PORT_LOCK);
3709 
3710 			if (tmo_local-- == 0) {
3711 				EMLXS_MSGF(EMLXS_CONTEXT,
3712 				    &emlxs_mbox_event_msg,
3713 				    "Timeout.   %s: mb=%p tmo=%d Waiting.",
3714 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3715 				    tmo);
3716 
3717 				/* Non-lethalStatus mailbox timeout */
3718 				/* Does not indicate a hardware error */
3719 				mb->mbxStatus = MBX_TIMEOUT;
3720 				return (MBX_TIMEOUT);
3721 			}
3722 
3723 			BUSYWAIT_MS(10);
3724 			mutex_enter(&EMLXS_PORT_LOCK);
3725 
3726 			/* Check for hardware error ; special case SLI_CONFIG */
3727 			if ((hba->flag & FC_HARDWARE_ERROR) &&
3728 			    ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3729 			    (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3730 			    COMMON_OPCODE_RESET))) {
3731 				mb->mbxStatus = MBX_HARDWARE_ERROR;
3732 
3733 				mutex_exit(&EMLXS_PORT_LOCK);
3734 
3735 				EMLXS_MSGF(EMLXS_CONTEXT,
3736 				    &emlxs_mbox_detail_msg,
3737 				    "Hardware error reported. %s failed. "
3738 				    "status=%x mb=%p",
3739 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3740 				    mb->mbxStatus, mb);
3741 
3742 				return (MBX_HARDWARE_ERROR);
3743 			}
3744 		}
3745 	}
3746 
3747 	/* Initialize mailbox area */
3748 	emlxs_mb_init(hba, mbq, flag, tmo);
3749 
3750 	if (mb->mbxCommand == MBX_DOWN_LINK) {
3751 		hba->sli.sli4.flag |= EMLXS_SLI4_DOWN_LINK;
3752 	}
3753 
3754 	mutex_exit(&EMLXS_PORT_LOCK);
3755 	switch (flag) {
3756 
3757 	case MBX_NOWAIT:
3758 		if (mb->mbxCommand != MBX_HEARTBEAT) {
3759 			if (mb->mbxCommand != MBX_DOWN_LOAD
3760 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3761 				EMLXS_MSGF(EMLXS_CONTEXT,
3762 				    &emlxs_mbox_detail_msg,
3763 				    "Sending.   %s: mb=%p NoWait. embedded %d",
3764 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3765 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3766 				    (mb4->un.varSLIConfig.be.embedded)));
3767 			}
3768 		}
3769 
3770 		iptr = hba->sli.sli4.mq.addr.virt;
3771 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3772 		hba->sli.sli4.mq.host_index++;
3773 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3774 			hba->sli.sli4.mq.host_index = 0;
3775 		}
3776 
3777 		if (mbq->bp) {
3778 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3779 			    "BDE virt %p phys %p size x%x",
3780 			    ((MATCHMAP *)mbq->bp)->virt,
3781 			    ((MATCHMAP *)mbq->bp)->phys,
3782 			    ((MATCHMAP *)mbq->bp)->size);
3783 			emlxs_data_dump(port, "DATA",
3784 			    (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
3785 		}
3786 		rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3787 		break;
3788 
3789 	case MBX_POLL:
3790 		if (mb->mbxCommand != MBX_DOWN_LOAD
3791 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3792 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3793 			    "Sending.   %s: mb=%p Poll. embedded %d",
3794 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3795 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3796 			    (mb4->un.varSLIConfig.be.embedded)));
3797 		}
3798 
3799 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3800 
3801 		/* Clean up the mailbox area */
3802 		if (rc == MBX_TIMEOUT) {
3803 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3804 			    "Timeout.   %s: mb=%p tmo=%x Poll. embedded %d",
3805 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3806 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3807 			    (mb4->un.varSLIConfig.be.embedded)));
3808 
3809 			hba->flag |= FC_MBOX_TIMEOUT;
3810 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
3811 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3812 
3813 		} else {
3814 			if (mb->mbxCommand != MBX_DOWN_LOAD
3815 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3816 				EMLXS_MSGF(EMLXS_CONTEXT,
3817 				    &emlxs_mbox_detail_msg,
3818 				    "Completed.   %s: mb=%p status=%x Poll. "
3819 				    "embedded %d",
3820 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3821 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3822 				    (mb4->un.varSLIConfig.be.embedded)));
3823 			}
3824 
3825 			/* Process the result */
3826 			if (!(mbq->flag & MBQ_PASSTHRU)) {
3827 				if (mbq->mbox_cmpl) {
3828 					(void) (mbq->mbox_cmpl)(hba, mbq);
3829 				}
3830 			}
3831 
3832 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3833 		}
3834 
3835 		mp = (MATCHMAP *)mbq->nonembed;
3836 		if (mp) {
3837 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3838 			if (hdr_rsp->status) {
3839 				EMLXS_MSGF(EMLXS_CONTEXT,
3840 				    &emlxs_mbox_detail_msg,
3841 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3842 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3843 				    hdr_rsp->status, hdr_rsp->extra_status);
3844 
3845 				mb->mbxStatus = MBX_NONEMBED_ERROR;
3846 			}
3847 		}
3848 		rc = mb->mbxStatus;
3849 
3850 		/* Attempt to send pending mailboxes */
3851 		mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3852 		if (mbq) {
3853 			/* Attempt to send pending mailboxes */
3854 			i =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3855 			if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
3856 				emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
3857 			}
3858 		}
3859 		break;
3860 
3861 	case MBX_SLEEP:
3862 		if (mb->mbxCommand != MBX_DOWN_LOAD
3863 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3864 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3865 			    "Sending.   %s: mb=%p Sleep. embedded %d",
3866 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3867 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3868 			    (mb4->un.varSLIConfig.be.embedded)));
3869 		}
3870 
3871 		iptr = hba->sli.sli4.mq.addr.virt;
3872 		iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3873 		hba->sli.sli4.mq.host_index++;
3874 		if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3875 			hba->sli.sli4.mq.host_index = 0;
3876 		}
3877 
3878 		rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3879 
3880 		if (rc != MBX_SUCCESS) {
3881 			break;
3882 		}
3883 
3884 		/* Wait for completion */
3885 		/* The driver clock is timing the mailbox. */
3886 
3887 		mutex_enter(&EMLXS_MBOX_LOCK);
3888 		while (!(mbq->flag & MBQ_COMPLETED)) {
3889 			cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3890 		}
3891 		mutex_exit(&EMLXS_MBOX_LOCK);
3892 
3893 		mp = (MATCHMAP *)mbq->nonembed;
3894 		if (mp) {
3895 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3896 			if (hdr_rsp->status) {
3897 				EMLXS_MSGF(EMLXS_CONTEXT,
3898 				    &emlxs_mbox_detail_msg,
3899 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3900 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
3901 				    hdr_rsp->status, hdr_rsp->extra_status);
3902 
3903 				mb->mbxStatus = MBX_NONEMBED_ERROR;
3904 			}
3905 		}
3906 		rc = mb->mbxStatus;
3907 
3908 		if (rc == MBX_TIMEOUT) {
3909 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3910 			    "Timeout.   %s: mb=%p tmo=%x Sleep. embedded %d",
3911 			    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3912 			    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3913 			    (mb4->un.varSLIConfig.be.embedded)));
3914 		} else {
3915 			if (mb->mbxCommand != MBX_DOWN_LOAD
3916 			    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3917 				EMLXS_MSGF(EMLXS_CONTEXT,
3918 				    &emlxs_mbox_detail_msg,
3919 				    "Completed.   %s: mb=%p status=%x Sleep. "
3920 				    "embedded %d",
3921 				    emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3922 				    ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3923 				    (mb4->un.varSLIConfig.be.embedded)));
3924 			}
3925 		}
3926 		break;
3927 	}
3928 
3929 	return (rc);
3930 
3931 } /* emlxs_sli4_issue_mbox_cmd() */
3932 
3933 
3934 
3935 /*ARGSUSED*/
3936 static uint32_t
3937 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3938     uint32_t tmo)
3939 {
3940 	emlxs_port_t	*port = &PPORT;
3941 	MAILBOX		*mb;
3942 	mbox_rsp_hdr_t	*hdr_rsp;
3943 	MATCHMAP	*mp;
3944 	uint32_t	rc;
3945 	uint32_t	tmo_local;
3946 
3947 	mb = (MAILBOX *)mbq;
3948 
3949 	mb->mbxStatus = MBX_SUCCESS;
3950 	rc = MBX_SUCCESS;
3951 
3952 	if (tmo < 30) {
3953 		tmo = 30;
3954 	}
3955 
3956 	/* Convert tmo seconds to 10 millisecond tics */
3957 	tmo_local = tmo * 100;
3958 
3959 	flag = MBX_POLL;
3960 
3961 	/* Check for hardware error */
3962 	if (hba->flag & FC_HARDWARE_ERROR) {
3963 		mb->mbxStatus = MBX_HARDWARE_ERROR;
3964 		return (MBX_HARDWARE_ERROR);
3965 	}
3966 
3967 	/* Initialize mailbox area */
3968 	emlxs_mb_init(hba, mbq, flag, tmo);
3969 
3970 	switch (flag) {
3971 
3972 	case MBX_POLL:
3973 
3974 		rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3975 
3976 		/* Clean up the mailbox area */
3977 		if (rc == MBX_TIMEOUT) {
3978 			hba->flag |= FC_MBOX_TIMEOUT;
3979 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
3980 			emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3981 
3982 		} else {
3983 			/* Process the result */
3984 			if (!(mbq->flag & MBQ_PASSTHRU)) {
3985 				if (mbq->mbox_cmpl) {
3986 					(void) (mbq->mbox_cmpl)(hba, mbq);
3987 				}
3988 			}
3989 
3990 			emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3991 		}
3992 
3993 		mp = (MATCHMAP *)mbq->nonembed;
3994 		if (mp) {
3995 			hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3996 			if (hdr_rsp->status) {
3997 				EMLXS_MSGF(EMLXS_CONTEXT,
3998 				    &emlxs_mbox_detail_msg,
3999 				    "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
4000 				    emlxs_mb_cmd_xlate(mb->mbxCommand),
4001 				    hdr_rsp->status, hdr_rsp->extra_status);
4002 
4003 				mb->mbxStatus = MBX_NONEMBED_ERROR;
4004 			}
4005 		}
4006 		rc = mb->mbxStatus;
4007 
4008 		break;
4009 	}
4010 
4011 	return (rc);
4012 
4013 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
4014 
4015 
4016 
4017 #ifdef SFCT_SUPPORT
4018 /*ARGSUSED*/
4019 extern uint32_t
4020 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
4021 {
4022 	emlxs_hba_t *hba = HBA;
4023 	emlxs_config_t *cfg = &CFG;
4024 	fct_cmd_t *fct_cmd;
4025 	stmf_data_buf_t *dbuf;
4026 	scsi_task_t *fct_task;
4027 	fc_packet_t *pkt;
4028 	CHANNEL *cp;
4029 	XRIobj_t *xrip;
4030 	emlxs_node_t *ndlp;
4031 	IOCBQ *iocbq;
4032 	IOCB *iocb;
4033 	emlxs_wqe_t *wqe;
4034 	ULP_SGE64 stage_sge;
4035 	ULP_SGE64 *sge;
4036 	RPIobj_t *rpip;
4037 	int32_t	sge_size;
4038 	uint64_t sge_addr;
4039 	uint32_t did;
4040 	uint32_t timeout;
4041 
4042 	ddi_dma_cookie_t *cp_cmd;
4043 
4044 	pkt = PRIV2PKT(cmd_sbp);
4045 
4046 	cp = (CHANNEL *)cmd_sbp->channel;
4047 
4048 	iocbq = &cmd_sbp->iocbq;
4049 	iocb = &iocbq->iocb;
4050 
4051 	did = cmd_sbp->did;
4052 	if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
4053 
4054 		ndlp = cmd_sbp->node;
4055 		rpip = EMLXS_NODE_TO_RPI(port, ndlp);
4056 
4057 		if (!rpip) {
4058 			/* Use the fabric rpi */
4059 			rpip = port->vpip->fabric_rpip;
4060 		}
4061 
4062 		/* Next allocate an Exchange for this command */
4063 		xrip = emlxs_sli4_alloc_xri(port, cmd_sbp, rpip,
4064 		    EMLXS_XRI_SOL_BLS_TYPE);
4065 
4066 		if (!xrip) {
4067 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4068 			    "Adapter Busy. Unable to allocate exchange. "
4069 			    "did=0x%x", did);
4070 
4071 			return (FC_TRAN_BUSY);
4072 		}
4073 
4074 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4075 		    "FCT Abort Request: xri=%d iotag=%d sbp=%p rxid=%x",
4076 		    xrip->XRI, xrip->iotag, cmd_sbp, pkt->pkt_cmd_fhdr.rx_id);
4077 
4078 		cmd_sbp->xrip = xrip;
4079 
4080 		cp->ulpSendCmd++;
4081 
4082 		/* Initalize iocbq */
4083 		iocbq->port = (void *)port;
4084 		iocbq->node = (void *)ndlp;
4085 		iocbq->channel = (void *)cp;
4086 
4087 		/*
4088 		 * Don't give the abort priority, we want the IOCB
4089 		 * we are aborting to be processed first.
4090 		 */
4091 		iocbq->flag |= IOCB_SPECIAL;
4092 
4093 		wqe = &iocbq->wqe;
4094 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4095 
4096 		wqe = &iocbq->wqe;
4097 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4098 		wqe->RequestTag = xrip->iotag;
4099 		wqe->AbortTag = pkt->pkt_cmd_fhdr.rx_id;
4100 		wqe->Command = CMD_ABORT_XRI_CX;
4101 		wqe->Class = CLASS3;
4102 		wqe->CQId = 0xffff;
4103 		wqe->CmdType = WQE_TYPE_ABORT;
4104 
4105 		if (hba->state >= FC_LINK_UP) {
4106 			wqe->un.Abort.IA = 0;
4107 		} else {
4108 			wqe->un.Abort.IA = 1;
4109 		}
4110 
4111 		/* Set the pkt timer */
4112 		cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
4113 		    ((pkt->pkt_timeout > 0xff) ? 0 : 10);
4114 
4115 		return (IOERR_SUCCESS);
4116 
4117 	} else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
4118 
4119 		timeout = pkt->pkt_timeout;
4120 		ndlp = cmd_sbp->node;
4121 		if (!ndlp) {
4122 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4123 			    "Unable to find rpi. did=0x%x", did);
4124 
4125 			emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4126 			    IOERR_INVALID_RPI, 0);
4127 			return (0xff);
4128 		}
4129 
4130 		cp->ulpSendCmd++;
4131 
4132 		/* Initalize iocbq */
4133 		iocbq->port = (void *)port;
4134 		iocbq->node = (void *)ndlp;
4135 		iocbq->channel = (void *)cp;
4136 
4137 		wqe = &iocbq->wqe;
4138 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
4139 
4140 		xrip = emlxs_sli4_register_xri(port, cmd_sbp,
4141 		    pkt->pkt_cmd_fhdr.rx_id, did);
4142 
4143 		if (!xrip) {
4144 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4145 			    "Unable to register xri %x. did=0x%x",
4146 			    pkt->pkt_cmd_fhdr.rx_id, did);
4147 
4148 			emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4149 			    IOERR_NO_XRI, 0);
4150 			return (0xff);
4151 		}
4152 
4153 		cmd_sbp->iotag = xrip->iotag;
4154 		cmd_sbp->channel = cp;
4155 
4156 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4157 		cp_cmd = pkt->pkt_cmd_cookie;
4158 #else
4159 		cp_cmd  = &pkt->pkt_cmd_cookie;
4160 #endif	/* >= EMLXS_MODREV3 */
4161 
4162 		sge_size = pkt->pkt_cmdlen;
4163 		/* Make size a multiple of 4 */
4164 		if (sge_size & 3) {
4165 			sge_size = (sge_size + 3) & 0xfffffffc;
4166 		}
4167 		sge_addr = cp_cmd->dmac_laddress;
4168 		sge = xrip->SGList->virt;
4169 
4170 		stage_sge.addrHigh = PADDR_HI(sge_addr);
4171 		stage_sge.addrLow = PADDR_LO(sge_addr);
4172 		stage_sge.length = sge_size;
4173 		stage_sge.offset = 0;
4174 		stage_sge.type = 0;
4175 		stage_sge.last = 1;
4176 
4177 		/* Copy staged SGE into SGL */
4178 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
4179 		    (uint8_t *)sge, sizeof (ULP_SGE64));
4180 
4181 		/* Words  0-3 */
4182 		wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
4183 		wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
4184 		wqe->un.FcpCmd.Payload.tus.f.bdeSize = sge_size;
4185 		wqe->un.FcpCmd.PayloadLength = sge_size;
4186 
4187 		/*  Word  6 */
4188 		wqe->ContextTag = ndlp->nlp_Rpi;
4189 		wqe->XRITag = xrip->XRI;
4190 
4191 		/*  Word  7 */
4192 		wqe->Command  = iocb->ULPCOMMAND;
4193 		wqe->Class = cmd_sbp->class;
4194 		wqe->ContextType = WQE_RPI_CONTEXT;
4195 		wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4196 
4197 		/*  Word  8 */
4198 		wqe->AbortTag = 0;
4199 
4200 		/*  Word  9 */
4201 		wqe->RequestTag = xrip->iotag;
4202 		wqe->OXId = (uint16_t)xrip->rx_id;
4203 
4204 		/*  Word  10 */
4205 		if (xrip->flag & EMLXS_XRI_BUSY) {
4206 			wqe->XC = 1;
4207 		}
4208 
4209 		if (!(hba->sli.sli4.param.PHWQ)) {
4210 			wqe->QOSd = 1;
4211 			wqe->DBDE = 1; /* Data type for BDE 0 */
4212 		}
4213 
4214 		/*  Word  11 */
4215 		wqe->CmdType = WQE_TYPE_TRSP;
4216 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4217 
4218 		/* Set the pkt timer */
4219 		cmd_sbp->ticks = hba->timer_tics + timeout +
4220 		    ((timeout > 0xff) ? 0 : 10);
4221 
4222 		if (pkt->pkt_cmdlen) {
4223 			EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
4224 			    DDI_DMA_SYNC_FORDEV);
4225 		}
4226 
4227 		return (IOERR_SUCCESS);
4228 	}
4229 
4230 	fct_cmd = cmd_sbp->fct_cmd;
4231 	did = fct_cmd->cmd_rportid;
4232 	dbuf = cmd_sbp->fct_buf;
4233 	fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
4234 	ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
4235 	if (!ndlp) {
4236 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4237 		    "Unable to find rpi. did=0x%x", did);
4238 
4239 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4240 		    IOERR_INVALID_RPI, 0);
4241 		return (0xff);
4242 	}
4243 
4244 
4245 	/* Initalize iocbq */
4246 	iocbq->port = (void *) port;
4247 	iocbq->node = (void *)ndlp;
4248 	iocbq->channel = (void *) cp;
4249 
4250 	wqe = &iocbq->wqe;
4251 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4252 
4253 	xrip = cmd_sbp->xrip;
4254 	if (!xrip) {
4255 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4256 		    "Unable to find xri. did=0x%x", did);
4257 
4258 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4259 		    IOERR_NO_XRI, 0);
4260 		return (0xff);
4261 	}
4262 
4263 	if (emlxs_sli4_register_xri(port, cmd_sbp,
4264 	    xrip->XRI, ndlp->nlp_DID) == NULL) {
4265 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4266 		    "Unable to register xri. did=0x%x", did);
4267 
4268 		emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4269 		    IOERR_NO_XRI, 0);
4270 		return (0xff);
4271 	}
4272 	cmd_sbp->iotag = xrip->iotag;
4273 	cmd_sbp->channel = cp;
4274 
4275 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
4276 		timeout =
4277 		    ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
4278 	} else {
4279 		timeout = 0x80000000;
4280 	}
4281 	cmd_sbp->ticks =
4282 	    hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
4283 
4284 
4285 	iocb->ULPCT = 0;
4286 	if (fct_task->task_flags & TF_WRITE_DATA) {
4287 		iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
4288 		wqe->CmdType = WQE_TYPE_TRECEIVE;		/* Word 11 */
4289 
4290 	} else { /* TF_READ_DATA */
4291 
4292 		iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
4293 		wqe->CmdType = WQE_TYPE_TSEND;			/* Word 11 */
4294 
4295 		if ((dbuf->db_data_size >=
4296 		    fct_task->task_expected_xfer_length)) {
4297 			/* enable auto-rsp AP feature */
4298 			wqe->AR = 0x1;
4299 			iocb->ULPCT = 0x1; /* for cmpl */
4300 		}
4301 	}
4302 
4303 	(void) emlxs_sli4_fct_bde_setup(port, cmd_sbp);
4304 
4305 	/*  Word  6 */
4306 	wqe->ContextTag = ndlp->nlp_Rpi;
4307 	wqe->XRITag = xrip->XRI;
4308 
4309 	/*  Word  7 */
4310 	wqe->Command  = iocb->ULPCOMMAND;
4311 	wqe->Class = cmd_sbp->class;
4312 	wqe->ContextType = WQE_RPI_CONTEXT;
4313 	wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4314 	wqe->PU = 1;
4315 
4316 	/*  Word  8 */
4317 	wqe->AbortTag = 0;
4318 
4319 	/*  Word  9 */
4320 	wqe->RequestTag = xrip->iotag;
4321 	wqe->OXId = (uint16_t)fct_cmd->cmd_oxid;
4322 
4323 	/*  Word  10 */
4324 	if (xrip->flag & EMLXS_XRI_BUSY) {
4325 		wqe->XC = 1;
4326 	}
4327 
4328 	if (!(hba->sli.sli4.param.PHWQ)) {
4329 		wqe->QOSd = 1;
4330 		wqe->DBDE = 1; /* Data type for BDE 0 */
4331 	}
4332 
4333 	/*  Word  11 */
4334 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4335 
4336 	/*  Word  12 */
4337 	wqe->CmdSpecific = dbuf->db_data_size;
4338 
4339 	return (IOERR_SUCCESS);
4340 
4341 } /* emlxs_sli4_prep_fct_iocb() */
4342 #endif /* SFCT_SUPPORT */
4343 
4344 
4345 /*ARGSUSED*/
4346 extern uint32_t
4347 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
4348 {
4349 	emlxs_hba_t *hba = HBA;
4350 	fc_packet_t *pkt;
4351 	CHANNEL *cp;
4352 	RPIobj_t *rpip;
4353 	XRIobj_t *xrip;
4354 	emlxs_wqe_t *wqe;
4355 	IOCBQ *iocbq;
4356 	IOCB *iocb;
4357 	NODELIST *node;
4358 	uint16_t iotag;
4359 	uint32_t did;
4360 
4361 	pkt = PRIV2PKT(sbp);
4362 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4363 	cp = &hba->chan[channel];
4364 
4365 	iocbq = &sbp->iocbq;
4366 	iocbq->channel = (void *) cp;
4367 	iocbq->port = (void *) port;
4368 
4369 	wqe = &iocbq->wqe;
4370 	iocb = &iocbq->iocb;
4371 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4372 	bzero((void *)iocb, sizeof (IOCB));
4373 
4374 	/* Find target node object */
4375 	node = (NODELIST *)iocbq->node;
4376 	rpip = EMLXS_NODE_TO_RPI(port, node);
4377 
4378 	if (!rpip) {
4379 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4380 		    "Unable to find rpi. did=0x%x", did);
4381 
4382 		emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4383 		    IOERR_INVALID_RPI, 0);
4384 		return (0xff);
4385 	}
4386 
4387 	sbp->channel = cp;
4388 	/* Next allocate an Exchange for this command */
4389 	xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4390 	    EMLXS_XRI_SOL_FCP_TYPE);
4391 
4392 	if (!xrip) {
4393 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4394 		    "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
4395 
4396 		return (FC_TRAN_BUSY);
4397 	}
4398 	sbp->bmp = NULL;
4399 	iotag = sbp->iotag;
4400 
4401 #ifdef DEBUG_FASTPATH
4402 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4403 	    "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4404 	    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4405 #endif /* DEBUG_FASTPATH */
4406 
4407 	/* Indicate this is a FCP cmd */
4408 	iocbq->flag |= IOCB_FCP_CMD;
4409 
4410 	if (emlxs_sli4_bde_setup(port, sbp)) {
4411 		emlxs_sli4_free_xri(port, sbp, xrip, 1);
4412 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4413 		    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4414 
4415 		return (FC_TRAN_BUSY);
4416 	}
4417 
4418 	/* DEBUG */
4419 #ifdef DEBUG_FCP
4420 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4421 	    "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList->virt,
4422 	    xrip->SGList->phys, pkt->pkt_datalen);
4423 	emlxs_data_dump(port, "FCP: SGL",
4424 	    (uint32_t *)xrip->SGList->virt, 20, 0);
4425 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4426 	    "FCP: CMD virt %p len %d:%d:%d",
4427 	    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
4428 	emlxs_data_dump(port, "FCP: CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
4429 #endif /* DEBUG_FCP */
4430 
4431 	EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4432 	    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4433 
4434 	/* if device is FCP-2 device, set the following bit */
4435 	/* that says to run the FC-TAPE protocol. */
4436 	if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4437 		wqe->ERP = 1;
4438 	}
4439 
4440 	if (pkt->pkt_datalen == 0) {
4441 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
4442 		wqe->Command = CMD_FCP_ICMND64_CR;
4443 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4444 	} else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
4445 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
4446 		wqe->Command = CMD_FCP_IREAD64_CR;
4447 		wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4448 		wqe->PU = PARM_XFER_CHECK;
4449 	} else {
4450 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
4451 		wqe->Command = CMD_FCP_IWRITE64_CR;
4452 		wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
4453 	}
4454 	wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
4455 
4456 	if (!(hba->sli.sli4.param.PHWQ)) {
4457 		wqe->DBDE = 1; /* Data type for BDE 0 */
4458 	}
4459 	wqe->ContextTag = rpip->RPI;
4460 	wqe->ContextType = WQE_RPI_CONTEXT;
4461 	wqe->XRITag = xrip->XRI;
4462 	wqe->Timer =
4463 	    ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4464 
4465 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4466 		wqe->CCPE = 1;
4467 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4468 	}
4469 
4470 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4471 	case FC_TRAN_CLASS2:
4472 		wqe->Class = CLASS2;
4473 		break;
4474 	case FC_TRAN_CLASS3:
4475 	default:
4476 		wqe->Class = CLASS3;
4477 		break;
4478 	}
4479 	sbp->class = wqe->Class;
4480 	wqe->RequestTag = iotag;
4481 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4482 
4483 	return (FC_SUCCESS);
4484 } /* emlxs_sli4_prep_fcp_iocb() */
4485 
4486 
4487 /*ARGSUSED*/
4488 static uint32_t
4489 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4490 {
4491 	return (FC_TRAN_BUSY);
4492 
4493 } /* emlxs_sli4_prep_ip_iocb() */
4494 
4495 
4496 /*ARGSUSED*/
4497 static uint32_t
4498 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4499 {
4500 	emlxs_hba_t *hba = HBA;
4501 	fc_packet_t *pkt;
4502 	IOCBQ *iocbq;
4503 	IOCB *iocb;
4504 	emlxs_wqe_t *wqe;
4505 	FCFIobj_t *fcfp;
4506 	RPIobj_t *reserved_rpip = NULL;
4507 	RPIobj_t *rpip = NULL;
4508 	XRIobj_t *xrip;
4509 	CHANNEL *cp;
4510 	uint32_t did;
4511 	uint32_t cmd;
4512 	ULP_SGE64 stage_sge;
4513 	ULP_SGE64 *sge;
4514 	ddi_dma_cookie_t *cp_cmd;
4515 	ddi_dma_cookie_t *cp_resp;
4516 	emlxs_node_t *node;
4517 
4518 	pkt = PRIV2PKT(sbp);
4519 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4520 
4521 	iocbq = &sbp->iocbq;
4522 	wqe = &iocbq->wqe;
4523 	iocb = &iocbq->iocb;
4524 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4525 	bzero((void *)iocb, sizeof (IOCB));
4526 	cp = &hba->chan[hba->channel_els];
4527 
4528 	/* Initalize iocbq */
4529 	iocbq->port = (void *) port;
4530 	iocbq->channel = (void *) cp;
4531 
4532 	sbp->channel = cp;
4533 	sbp->bmp = NULL;
4534 
4535 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4536 	cp_cmd = pkt->pkt_cmd_cookie;
4537 	cp_resp = pkt->pkt_resp_cookie;
4538 #else
4539 	cp_cmd  = &pkt->pkt_cmd_cookie;
4540 	cp_resp = &pkt->pkt_resp_cookie;
4541 #endif	/* >= EMLXS_MODREV3 */
4542 
4543 	/* CMD payload */
4544 	sge = &stage_sge;
4545 	sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
4546 	sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
4547 	sge->length = pkt->pkt_cmdlen;
4548 	sge->offset = 0;
4549 	sge->type = 0;
4550 
4551 	cmd = *((uint32_t *)pkt->pkt_cmd);
4552 	cmd &= ELS_CMD_MASK;
4553 
4554 	/* Initalize iocb */
4555 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4556 		/* ELS Response */
4557 
4558 		sbp->xrip = 0;
4559 		xrip = emlxs_sli4_register_xri(port, sbp,
4560 		    pkt->pkt_cmd_fhdr.rx_id, did);
4561 
4562 		if (!xrip) {
4563 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4564 			    "Unable to find XRI. rxid=%x",
4565 			    pkt->pkt_cmd_fhdr.rx_id);
4566 
4567 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4568 			    IOERR_NO_XRI, 0);
4569 			return (0xff);
4570 		}
4571 
4572 		rpip = xrip->rpip;
4573 
4574 		if (!rpip) {
4575 			/* This means that we had a node registered */
4576 			/* when the unsol request came in but the node */
4577 			/* has since been unregistered. */
4578 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4579 			    "Unable to find RPI. rxid=%x",
4580 			    pkt->pkt_cmd_fhdr.rx_id);
4581 
4582 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4583 			    IOERR_INVALID_RPI, 0);
4584 			return (0xff);
4585 		}
4586 
4587 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4588 		    "ELS: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4589 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4590 
4591 		iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4592 		wqe->Command = CMD_XMIT_ELS_RSP64_CX;
4593 		wqe->CmdType = WQE_TYPE_GEN;
4594 		if (!(hba->sli.sli4.param.PHWQ)) {
4595 			wqe->DBDE = 1; /* Data type for BDE 0 */
4596 		}
4597 
4598 		wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
4599 		wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
4600 		wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4601 		wqe->un.ElsCmd.PayloadLength = pkt->pkt_cmdlen;
4602 
4603 		wqe->un.ElsRsp.RemoteId = did;
4604 		wqe->PU = 0x3;
4605 		wqe->OXId = xrip->rx_id;
4606 
4607 		sge->last = 1;
4608 		/* Now sge is fully staged */
4609 
4610 		sge = xrip->SGList->virt;
4611 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4612 		    sizeof (ULP_SGE64));
4613 
4614 		if (rpip->RPI == FABRIC_RPI) {
4615 			wqe->ContextTag = port->vpip->VPI;
4616 			wqe->ContextType = WQE_VPI_CONTEXT;
4617 		} else {
4618 			wqe->ContextTag = rpip->RPI;
4619 			wqe->ContextType = WQE_RPI_CONTEXT;
4620 		}
4621 
4622 		if ((cmd == ELS_CMD_ACC) && (sbp->ucmd == ELS_CMD_FLOGI)) {
4623 			wqe->un.ElsCmd.SP = 1;
4624 			wqe->un.ElsCmd.LocalId = 0xFFFFFE;
4625 		}
4626 
4627 	} else {
4628 		/* ELS Request */
4629 
4630 		fcfp = port->vpip->vfip->fcfp;
4631 		node = (emlxs_node_t *)iocbq->node;
4632 		rpip = EMLXS_NODE_TO_RPI(port, node);
4633 
4634 		if (!rpip) {
4635 			/* Use the fabric rpi */
4636 			rpip = port->vpip->fabric_rpip;
4637 		}
4638 
4639 		/* Next allocate an Exchange for this command */
4640 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4641 		    EMLXS_XRI_SOL_ELS_TYPE);
4642 
4643 		if (!xrip) {
4644 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4645 			    "Adapter Busy. Unable to allocate exchange. "
4646 			    "did=0x%x", did);
4647 
4648 			return (FC_TRAN_BUSY);
4649 		}
4650 
4651 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4652 		    "ELS: Prep xri=%d iotag=%d rpi=%d",
4653 		    xrip->XRI, xrip->iotag, rpip->RPI);
4654 
4655 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4656 		wqe->Command = CMD_ELS_REQUEST64_CR;
4657 		wqe->CmdType = WQE_TYPE_ELS;
4658 		if (!(hba->sli.sli4.param.PHWQ)) {
4659 			wqe->DBDE = 1; /* Data type for BDE 0 */
4660 		}
4661 
4662 		wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
4663 		wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
4664 		wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4665 
4666 		wqe->un.ElsCmd.RemoteId = did;
4667 		wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4668 
4669 		/* setup for rsp */
4670 		iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4671 		iocb->ULPPU = 1;	/* Wd4 is relative offset */
4672 
4673 		sge->last = 0;
4674 
4675 		sge = xrip->SGList->virt;
4676 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4677 		    sizeof (ULP_SGE64));
4678 
4679 		wqe->un.ElsCmd.PayloadLength =
4680 		    pkt->pkt_cmdlen; /* Byte offset of rsp data */
4681 
4682 		/* RSP payload */
4683 		sge = &stage_sge;
4684 		sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
4685 		sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
4686 		sge->length = pkt->pkt_rsplen;
4687 		sge->offset = 0;
4688 		sge->last = 1;
4689 		/* Now sge is fully staged */
4690 
4691 		sge = xrip->SGList->virt;
4692 		sge++;
4693 		BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4694 		    sizeof (ULP_SGE64));
4695 #ifdef DEBUG_ELS
4696 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4697 		    "ELS: SGLaddr virt %p phys %p",
4698 		    xrip->SGList->virt, xrip->SGList->phys);
4699 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4700 		    "ELS: PAYLOAD virt %p phys %p",
4701 		    pkt->pkt_cmd, cp_cmd->dmac_laddress);
4702 		emlxs_data_dump(port, "ELS: SGL",
4703 		    (uint32_t *)xrip->SGList->virt, 12, 0);
4704 #endif /* DEBUG_ELS */
4705 
4706 		switch (cmd) {
4707 		case ELS_CMD_FLOGI:
4708 			wqe->un.ElsCmd.SP = 1;
4709 
4710 			if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
4711 			    SLI_INTF_IF_TYPE_0) {
4712 				wqe->ContextTag = fcfp->FCFI;
4713 				wqe->ContextType = WQE_FCFI_CONTEXT;
4714 			} else {
4715 				wqe->ContextTag = port->vpip->VPI;
4716 				wqe->ContextType = WQE_VPI_CONTEXT;
4717 			}
4718 
4719 			if (hba->flag & FC_FIP_SUPPORTED) {
4720 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4721 			}
4722 
4723 			if (hba->topology == TOPOLOGY_LOOP) {
4724 				wqe->un.ElsCmd.LocalId = port->did;
4725 			}
4726 
4727 			wqe->ELSId = WQE_ELSID_FLOGI;
4728 			break;
4729 		case ELS_CMD_FDISC:
4730 			wqe->un.ElsCmd.SP = 1;
4731 			wqe->ContextTag = port->vpip->VPI;
4732 			wqe->ContextType = WQE_VPI_CONTEXT;
4733 
4734 			if (hba->flag & FC_FIP_SUPPORTED) {
4735 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4736 			}
4737 
4738 			wqe->ELSId = WQE_ELSID_FDISC;
4739 			break;
4740 		case ELS_CMD_LOGO:
4741 			if ((did == FABRIC_DID) &&
4742 			    (hba->flag & FC_FIP_SUPPORTED)) {
4743 				wqe->CmdType |= WQE_TYPE_MASK_FIP;
4744 			}
4745 
4746 			wqe->ContextTag = port->vpip->VPI;
4747 			wqe->ContextType = WQE_VPI_CONTEXT;
4748 			wqe->ELSId = WQE_ELSID_LOGO;
4749 			break;
4750 		case ELS_CMD_PLOGI:
4751 			if (rpip->RPI == FABRIC_RPI) {
4752 				if (hba->flag & FC_PT_TO_PT) {
4753 					wqe->un.ElsCmd.SP = 1;
4754 					wqe->un.ElsCmd.LocalId = port->did;
4755 				}
4756 
4757 				wqe->ContextTag = port->vpip->VPI;
4758 				wqe->ContextType = WQE_VPI_CONTEXT;
4759 			} else {
4760 				wqe->ContextTag = rpip->RPI;
4761 				wqe->ContextType = WQE_RPI_CONTEXT;
4762 			}
4763 
4764 			wqe->ELSId = WQE_ELSID_PLOGI;
4765 			break;
4766 		default:
4767 			if (rpip->RPI == FABRIC_RPI) {
4768 				wqe->ContextTag = port->vpip->VPI;
4769 				wqe->ContextType = WQE_VPI_CONTEXT;
4770 			} else {
4771 				wqe->ContextTag = rpip->RPI;
4772 				wqe->ContextType = WQE_RPI_CONTEXT;
4773 			}
4774 
4775 			wqe->ELSId = WQE_ELSID_CMD;
4776 			break;
4777 		}
4778 
4779 #ifdef SFCT_SUPPORT
4780 		/* This allows fct to abort the request */
4781 		if (sbp->fct_cmd) {
4782 			sbp->fct_cmd->cmd_oxid = xrip->XRI;
4783 			sbp->fct_cmd->cmd_rxid = 0xFFFF;
4784 		}
4785 #endif /* SFCT_SUPPORT */
4786 	}
4787 
4788 	if (wqe->ContextType == WQE_VPI_CONTEXT) {
4789 		reserved_rpip = emlxs_rpi_reserve_notify(port, did, xrip);
4790 
4791 		if (!reserved_rpip) {
4792 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4793 			    "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4794 			    pkt->pkt_cmd_fhdr.rx_id);
4795 
4796 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4797 			    IOERR_INVALID_RPI, 0);
4798 			return (0xff);
4799 		}
4800 
4801 		/* Store the reserved rpi */
4802 		if (wqe->Command == CMD_ELS_REQUEST64_CR) {
4803 			wqe->OXId = reserved_rpip->RPI;
4804 		} else {
4805 			wqe->CmdSpecific = reserved_rpip->RPI;
4806 		}
4807 	}
4808 
4809 	EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4810 	    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4811 
4812 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4813 		wqe->CCPE = 1;
4814 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4815 	}
4816 
4817 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4818 	case FC_TRAN_CLASS2:
4819 		wqe->Class = CLASS2;
4820 		break;
4821 	case FC_TRAN_CLASS3:
4822 	default:
4823 		wqe->Class = CLASS3;
4824 		break;
4825 	}
4826 	sbp->class = wqe->Class;
4827 	wqe->XRITag = xrip->XRI;
4828 	wqe->RequestTag = xrip->iotag;
4829 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4830 	return (FC_SUCCESS);
4831 
4832 } /* emlxs_sli4_prep_els_iocb() */
4833 
4834 
4835 /*ARGSUSED*/
4836 static uint32_t
4837 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4838 {
4839 	emlxs_hba_t *hba = HBA;
4840 	fc_packet_t *pkt;
4841 	IOCBQ *iocbq;
4842 	IOCB *iocb;
4843 	emlxs_wqe_t *wqe;
4844 	NODELIST *node = NULL;
4845 	CHANNEL *cp;
4846 	RPIobj_t *rpip;
4847 	XRIobj_t *xrip;
4848 	uint32_t did;
4849 
4850 	pkt = PRIV2PKT(sbp);
4851 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4852 
4853 	iocbq = &sbp->iocbq;
4854 	wqe = &iocbq->wqe;
4855 	iocb = &iocbq->iocb;
4856 	bzero((void *)wqe, sizeof (emlxs_wqe_t));
4857 	bzero((void *)iocb, sizeof (IOCB));
4858 
4859 	cp = &hba->chan[hba->channel_ct];
4860 
4861 	iocbq->port = (void *) port;
4862 	iocbq->channel = (void *) cp;
4863 
4864 	sbp->bmp = NULL;
4865 	sbp->channel = cp;
4866 
4867 	/* Initalize wqe */
4868 	if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4869 		/* CT Response */
4870 
4871 		sbp->xrip = 0;
4872 		xrip = emlxs_sli4_register_xri(port, sbp,
4873 		    pkt->pkt_cmd_fhdr.rx_id, did);
4874 
4875 		if (!xrip) {
4876 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4877 			    "Unable to find XRI. rxid=%x",
4878 			    pkt->pkt_cmd_fhdr.rx_id);
4879 
4880 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4881 			    IOERR_NO_XRI, 0);
4882 			return (0xff);
4883 		}
4884 
4885 		rpip = xrip->rpip;
4886 
4887 		if (!rpip) {
4888 			/* This means that we had a node registered */
4889 			/* when the unsol request came in but the node */
4890 			/* has since been unregistered. */
4891 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4892 			    "Unable to find RPI. rxid=%x",
4893 			    pkt->pkt_cmd_fhdr.rx_id);
4894 
4895 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4896 			    IOERR_INVALID_RPI, 0);
4897 			return (0xff);
4898 		}
4899 
4900 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4901 		    "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4902 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4903 
4904 		if (emlxs_sli4_bde_setup(port, sbp)) {
4905 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4906 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4907 
4908 			return (FC_TRAN_BUSY);
4909 		}
4910 
4911 		if (!(hba->model_info.chip & EMLXS_BE_CHIPS)) {
4912 			wqe->un.XmitSeq.Rsvd0 = 0; /* Word3 now reserved */
4913 		}
4914 
4915 		if (!(hba->sli.sli4.param.PHWQ)) {
4916 			wqe->DBDE = 1; /* Data type for BDE 0 */
4917 		}
4918 
4919 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CR;
4920 		wqe->CmdType = WQE_TYPE_GEN;
4921 		wqe->Command = CMD_XMIT_SEQUENCE64_CR;
4922 		wqe->LenLoc = 2;
4923 
4924 		if (((SLI_CT_REQUEST *) pkt->pkt_cmd)->CommandResponse.bits.
4925 		    CmdRsp == (LE_SWAP16(SLI_CT_LOOPBACK))) {
4926 			wqe->un.XmitSeq.xo = 1;
4927 		} else {
4928 			wqe->un.XmitSeq.xo = 0;
4929 		}
4930 
4931 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
4932 			wqe->un.XmitSeq.ls = 1;
4933 		}
4934 
4935 		if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
4936 			wqe->un.XmitSeq.si = 1;
4937 		}
4938 
4939 		wqe->un.XmitSeq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
4940 		wqe->un.XmitSeq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4941 		wqe->un.XmitSeq.Type  = pkt->pkt_cmd_fhdr.type;
4942 		wqe->OXId = xrip->rx_id;
4943 		wqe->XC = 0; /* xri_tag is a new exchange */
4944 		wqe->CmdSpecific = wqe->un.GenReq.Payload.tus.f.bdeSize;
4945 
4946 	} else {
4947 		/* CT Request */
4948 
4949 		node = (emlxs_node_t *)iocbq->node;
4950 		rpip = EMLXS_NODE_TO_RPI(port, node);
4951 
4952 		if (!rpip) {
4953 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4954 			    "Unable to find rpi. did=0x%x rpi=%d",
4955 			    did, node->nlp_Rpi);
4956 
4957 			emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4958 			    IOERR_INVALID_RPI, 0);
4959 			return (0xff);
4960 		}
4961 
4962 		/* Next allocate an Exchange for this command */
4963 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4964 		    EMLXS_XRI_SOL_CT_TYPE);
4965 
4966 		if (!xrip) {
4967 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4968 			    "Adapter Busy. Unable to allocate exchange. "
4969 			    "did=0x%x", did);
4970 
4971 			return (FC_TRAN_BUSY);
4972 		}
4973 
4974 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4975 		    "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4976 		    xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4977 
4978 		if (emlxs_sli4_bde_setup(port, sbp)) {
4979 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4980 			    "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4981 
4982 			emlxs_sli4_free_xri(port, sbp, xrip, 1);
4983 			return (FC_TRAN_BUSY);
4984 		}
4985 
4986 		if (!(hba->sli.sli4.param.PHWQ)) {
4987 			wqe->DBDE = 1; /* Data type for BDE 0 */
4988 		}
4989 
4990 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4991 		wqe->CmdType = WQE_TYPE_GEN;
4992 		wqe->Command = CMD_GEN_REQUEST64_CR;
4993 		wqe->un.GenReq.la = 1;
4994 		wqe->un.GenReq.DFctl  = pkt->pkt_cmd_fhdr.df_ctl;
4995 		wqe->un.GenReq.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
4996 		wqe->un.GenReq.Type  = pkt->pkt_cmd_fhdr.type;
4997 
4998 #ifdef DEBUG_CT
4999 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5000 		    "CT: SGLaddr virt %p phys %p", xrip->SGList->virt,
5001 		    xrip->SGList->phys);
5002 		emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList->virt,
5003 		    12, 0);
5004 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5005 		    "CT: CMD virt %p len %d:%d",
5006 		    pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
5007 		emlxs_data_dump(port, "CT: DATA", (uint32_t *)pkt->pkt_cmd,
5008 		    20, 0);
5009 #endif /* DEBUG_CT */
5010 
5011 #ifdef SFCT_SUPPORT
5012 		/* This allows fct to abort the request */
5013 		if (sbp->fct_cmd) {
5014 			sbp->fct_cmd->cmd_oxid = xrip->XRI;
5015 			sbp->fct_cmd->cmd_rxid = 0xFFFF;
5016 		}
5017 #endif /* SFCT_SUPPORT */
5018 	}
5019 
5020 	/* Setup for rsp */
5021 	iocb->un.genreq64.w5.hcsw.Rctl  = pkt->pkt_cmd_fhdr.r_ctl;
5022 	iocb->un.genreq64.w5.hcsw.Type  = pkt->pkt_cmd_fhdr.type;
5023 	iocb->un.genreq64.w5.hcsw.Dfctl  = pkt->pkt_cmd_fhdr.df_ctl;
5024 	iocb->ULPPU = 1;	/* Wd4 is relative offset */
5025 
5026 	EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
5027 	    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
5028 
5029 	wqe->ContextTag = rpip->RPI;
5030 	wqe->ContextType = WQE_RPI_CONTEXT;
5031 	wqe->XRITag = xrip->XRI;
5032 	wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
5033 
5034 	if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
5035 		wqe->CCPE = 1;
5036 		wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
5037 	}
5038 
5039 	switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
5040 	case FC_TRAN_CLASS2:
5041 		wqe->Class = CLASS2;
5042 		break;
5043 	case FC_TRAN_CLASS3:
5044 	default:
5045 		wqe->Class = CLASS3;
5046 		break;
5047 	}
5048 	sbp->class = wqe->Class;
5049 	wqe->RequestTag = xrip->iotag;
5050 	wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
5051 	return (FC_SUCCESS);
5052 
5053 } /* emlxs_sli4_prep_ct_iocb() */
5054 
5055 
5056 /*ARGSUSED*/
5057 static int
5058 emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
5059 {
5060 	uint32_t *ptr;
5061 	EQE_u eqe;
5062 	int rc = 0;
5063 	off_t offset;
5064 
5065 	mutex_enter(&EMLXS_PORT_LOCK);
5066 
5067 	ptr = eq->addr.virt;
5068 	ptr += eq->host_index;
5069 
5070 	offset = (off_t)((uint64_t)((unsigned long)
5071 	    eq->addr.virt) -
5072 	    (uint64_t)((unsigned long)
5073 	    hba->sli.sli4.slim2.virt));
5074 
5075 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
5076 	    4096, DDI_DMA_SYNC_FORKERNEL);
5077 
5078 	eqe.word = *ptr;
5079 	eqe.word = BE_SWAP32(eqe.word);
5080 
5081 	if (eqe.word & EQE_VALID) {
5082 		rc = 1;
5083 	}
5084 
5085 	mutex_exit(&EMLXS_PORT_LOCK);
5086 
5087 	return (rc);
5088 
5089 } /* emlxs_sli4_read_eq */
5090 
5091 
5092 static void
5093 emlxs_sli4_poll_intr(emlxs_hba_t *hba)
5094 {
5095 	int rc = 0;
5096 	int i;
5097 	char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
5098 
5099 	/* Check attention bits once and process if required */
5100 
5101 	for (i = 0; i < hba->intr_count; i++) {
5102 		rc = emlxs_sli4_read_eq(hba, &hba->sli.sli4.eq[i]);
5103 		if (rc == 1) {
5104 			break;
5105 		}
5106 	}
5107 
5108 	if (rc != 1) {
5109 		return;
5110 	}
5111 
5112 	(void) emlxs_sli4_msi_intr((char *)hba,
5113 	    (char *)(unsigned long)arg[i]);
5114 
5115 	return;
5116 
5117 } /* emlxs_sli4_poll_intr() */
5118 
5119 
5120 /*ARGSUSED*/
5121 static void
5122 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
5123 {
5124 	emlxs_port_t *port = &PPORT;
5125 	uint8_t status;
5126 
5127 	/* Save the event tag */
5128 	if (hba->link_event_tag == cqe->un.link.event_tag) {
5129 		HBASTATS.LinkMultiEvent++;
5130 	} else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) {
5131 		HBASTATS.LinkMultiEvent++;
5132 	}
5133 	hba->link_event_tag = cqe->un.link.event_tag;
5134 
5135 	switch (cqe->event_code) {
5136 	case ASYNC_EVENT_CODE_FCOE_LINK_STATE:
5137 		HBASTATS.LinkEvent++;
5138 
5139 		switch (cqe->un.link.link_status) {
5140 		case ASYNC_EVENT_PHYS_LINK_UP:
5141 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5142 			    "Link Async Event: PHYS_LINK_UP. val=%d "
5143 			    "type=%x event=%x",
5144 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5145 			break;
5146 
5147 		case ASYNC_EVENT_LOGICAL_LINK_UP:
5148 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5149 			    "Link Async Event: LOGICAL_LINK_UP. val=%d "
5150 			    "type=%x event=%x",
5151 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5152 
5153 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5154 			break;
5155 
5156 		case ASYNC_EVENT_PHYS_LINK_DOWN:
5157 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5158 			    "Link Async Event: PHYS_LINK_DOWN. val=%d "
5159 			    "type=%x event=%x",
5160 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5161 
5162 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5163 			break;
5164 
5165 		case ASYNC_EVENT_LOGICAL_LINK_DOWN:
5166 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5167 			    "Link Async Event: LOGICAL_LINK_DOWN. val=%d "
5168 			    "type=%x event=%x",
5169 			    cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5170 
5171 			emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5172 			break;
5173 		default:
5174 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5175 			    "Link Async Event: Unknown link status=%d event=%x",
5176 			    cqe->un.link.link_status, HBASTATS.LinkEvent);
5177 			break;
5178 		}
5179 		break;
5180 	case ASYNC_EVENT_CODE_FCOE_FIP:
5181 		switch (cqe->un.fcoe.evt_type) {
5182 		case ASYNC_EVENT_NEW_FCF_DISC:
5183 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5184 			    "FIP Async Event: FCF_FOUND %d:%d",
5185 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5186 
5187 			(void) emlxs_fcf_found_notify(port,
5188 			    cqe->un.fcoe.ref_index);
5189 			break;
5190 		case ASYNC_EVENT_FCF_TABLE_FULL:
5191 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5192 			    "FIP Async Event: FCFTAB_FULL %d:%d",
5193 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5194 
5195 			(void) emlxs_fcf_full_notify(port);
5196 			break;
5197 		case ASYNC_EVENT_FCF_DEAD:
5198 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5199 			    "FIP Async Event: FCF_LOST %d:%d",
5200 			    cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5201 
5202 			(void) emlxs_fcf_lost_notify(port,
5203 			    cqe->un.fcoe.ref_index);
5204 			break;
5205 		case ASYNC_EVENT_VIRT_LINK_CLEAR:
5206 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5207 			    "FIP Async Event: CVL %d",
5208 			    cqe->un.fcoe.ref_index);
5209 
5210 			(void) emlxs_fcf_cvl_notify(port,
5211 			    emlxs_sli4_vpi_to_index(hba,
5212 			    cqe->un.fcoe.ref_index));
5213 			break;
5214 
5215 		case ASYNC_EVENT_FCF_MODIFIED:
5216 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5217 			    "FIP Async Event: FCF_CHANGED %d",
5218 			    cqe->un.fcoe.ref_index);
5219 
5220 			(void) emlxs_fcf_changed_notify(port,
5221 			    cqe->un.fcoe.ref_index);
5222 			break;
5223 		default:
5224 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5225 			    "FIP Async Event: Unknown event type=%d",
5226 			    cqe->un.fcoe.evt_type);
5227 			break;
5228 		}
5229 		break;
5230 	case ASYNC_EVENT_CODE_DCBX:
5231 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5232 		    "DCBX Async Event: type=%d. Not supported.",
5233 		    cqe->event_type);
5234 		break;
5235 	case ASYNC_EVENT_CODE_GRP_5:
5236 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5237 		    "Group 5 Async Event: type=%d.", cqe->event_type);
5238 		if (cqe->event_type == ASYNC_EVENT_QOS_SPEED) {
5239 			hba->qos_linkspeed = cqe->un.qos.qos_link_speed;
5240 		}
5241 		break;
5242 	case ASYNC_EVENT_CODE_FC_EVENT:
5243 		switch (cqe->event_type) {
5244 		case ASYNC_EVENT_FC_LINK_ATT:
5245 			HBASTATS.LinkEvent++;
5246 
5247 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5248 			    "FC Async Event: Link Attention. event=%x",
5249 			    HBASTATS.LinkEvent);
5250 
5251 			emlxs_sli4_handle_fc_link_att(hba, cqe);
5252 			break;
5253 		case ASYNC_EVENT_FC_SHARED_LINK_ATT:
5254 			HBASTATS.LinkEvent++;
5255 
5256 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5257 			    "FC Async Event: Shared Link Attention. event=%x",
5258 			    HBASTATS.LinkEvent);
5259 
5260 			emlxs_sli4_handle_fc_link_att(hba, cqe);
5261 			break;
5262 		default:
5263 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5264 			    "FC Async Event: Unknown event. type=%d event=%x",
5265 			    cqe->event_type, HBASTATS.LinkEvent);
5266 		}
5267 		break;
5268 	case ASYNC_EVENT_CODE_PORT:
5269 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5270 		    "SLI Port Async Event: type=%d", cqe->event_type);
5271 
5272 		switch (cqe->event_type) {
5273 		case ASYNC_EVENT_PORT_OTEMP:
5274 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5275 			    "SLI Port Async Event: Temperature limit exceeded");
5276 			cmn_err(CE_WARN,
5277 			    "^%s%d: Temperature limit exceeded. Fibre channel "
5278 			    "controller temperature %u degrees C",
5279 			    DRIVER_NAME, hba->ddiinst,
5280 			    BE_SWAP32(*(uint32_t *)cqe->un.port.link_status));
5281 			break;
5282 
5283 		case ASYNC_EVENT_PORT_NTEMP:
5284 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5285 			    "SLI Port Async Event: Temperature returned to "
5286 			    "normal");
5287 			cmn_err(CE_WARN,
5288 			    "^%s%d: Temperature returned to normal",
5289 			    DRIVER_NAME, hba->ddiinst);
5290 			break;
5291 
5292 		case ASYNC_EVENT_MISCONFIG_PORT:
5293 			*((uint32_t *)cqe->un.port.link_status) =
5294 			    BE_SWAP32(*((uint32_t *)cqe->un.port.link_status));
5295 			status =
5296 			    cqe->un.port.link_status[hba->sli.sli4.link_number];
5297 
5298 			switch (status) {
5299 				case 0 :
5300 				break;
5301 
5302 				case 1 :
5303 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5304 				    "SLI Port Async Event: Physical media not "
5305 				    "detected");
5306 				cmn_err(CE_WARN,
5307 				    "^%s%d: Optics faulted/incorrectly "
5308 				    "installed/not installed - Reseat optics, "
5309 				    "if issue not resolved, replace.",
5310 				    DRIVER_NAME, hba->ddiinst);
5311 				break;
5312 
5313 				case 2 :
5314 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5315 				    "SLI Port Async Event: Wrong physical "
5316 				    "media detected");
5317 				cmn_err(CE_WARN,
5318 				    "^%s%d: Optics of two types installed - "
5319 				    "Remove one optic or install matching"
5320 				    "pair of optics.",
5321 				    DRIVER_NAME, hba->ddiinst);
5322 				break;
5323 
5324 				case 3 :
5325 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5326 				    "SLI Port Async Event: Unsupported "
5327 				    "physical media detected");
5328 				cmn_err(CE_WARN,
5329 				    "^%s%d:  Incompatible optics - Replace "
5330 				    "with compatible optics for card to "
5331 				    "function.",
5332 				    DRIVER_NAME, hba->ddiinst);
5333 				break;
5334 
5335 				default :
5336 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5337 				    "SLI Port Async Event: Physical media "
5338 				    "error, status=%x", status);
5339 				cmn_err(CE_WARN,
5340 				    "^%s%d: Misconfigured port: status=0x%x - "
5341 				    "Check optics on card.",
5342 				    DRIVER_NAME, hba->ddiinst, status);
5343 				break;
5344 			}
5345 			break;
5346 		}
5347 
5348 		break;
5349 	case ASYNC_EVENT_CODE_VF:
5350 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5351 		    "VF Async Event: type=%d",
5352 		    cqe->event_type);
5353 		break;
5354 	case ASYNC_EVENT_CODE_MR:
5355 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5356 		    "MR Async Event: type=%d",
5357 		    cqe->event_type);
5358 		break;
5359 	default:
5360 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5361 		    "Unknown Async Event: code=%d type=%d.",
5362 		    cqe->event_code, cqe->event_type);
5363 		break;
5364 	}
5365 
5366 } /* emlxs_sli4_process_async_event() */
5367 
5368 
5369 /*ARGSUSED*/
5370 static void
5371 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
5372 {
5373 	emlxs_port_t *port = &PPORT;
5374 	MAILBOX4 *mb;
5375 	MATCHMAP *mbox_bp;
5376 	MATCHMAP *mbox_nonembed;
5377 	MAILBOXQ *mbq = NULL;
5378 	uint32_t size;
5379 	uint32_t *iptr;
5380 	int rc;
5381 	off_t offset;
5382 
5383 	if (cqe->consumed && !cqe->completed) {
5384 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5385 		    "CQ ENTRY: Mbox event. Entry consumed but not completed");
5386 		return;
5387 	}
5388 
5389 	mutex_enter(&EMLXS_PORT_LOCK);
5390 	switch (hba->mbox_queue_flag) {
5391 	case 0:
5392 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5393 		    "CQ ENTRY: Mbox event. No mailbox active.");
5394 
5395 		mutex_exit(&EMLXS_PORT_LOCK);
5396 		return;
5397 
5398 	case MBX_POLL:
5399 
5400 		/* Mark mailbox complete, this should wake up any polling */
5401 		/* threads. This can happen if interrupts are enabled while */
5402 		/* a polled mailbox command is outstanding. If we don't set */
5403 		/* MBQ_COMPLETED here, the polling thread may wait until */
5404 		/* timeout error occurs */
5405 
5406 		mutex_enter(&EMLXS_MBOX_LOCK);
5407 		mbq = (MAILBOXQ *)hba->mbox_mbq;
5408 		if (mbq) {
5409 			port = (emlxs_port_t *)mbq->port;
5410 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5411 			    "CQ ENTRY: Mbox event. Completing Polled command.");
5412 			mbq->flag |= MBQ_COMPLETED;
5413 		}
5414 		mutex_exit(&EMLXS_MBOX_LOCK);
5415 
5416 		mutex_exit(&EMLXS_PORT_LOCK);
5417 		return;
5418 
5419 	case MBX_SLEEP:
5420 	case MBX_NOWAIT:
5421 		/* Check mbox_timer, it acts as a service flag too */
5422 		/* The first to service the mbox queue will clear the timer */
5423 		if (hba->mbox_timer) {
5424 			hba->mbox_timer = 0;
5425 
5426 			mutex_enter(&EMLXS_MBOX_LOCK);
5427 			mbq = (MAILBOXQ *)hba->mbox_mbq;
5428 			mutex_exit(&EMLXS_MBOX_LOCK);
5429 		}
5430 
5431 		if (!mbq) {
5432 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5433 			    "Mailbox event. No service required.");
5434 			mutex_exit(&EMLXS_PORT_LOCK);
5435 			return;
5436 		}
5437 
5438 		mb = (MAILBOX4 *)mbq;
5439 		mutex_exit(&EMLXS_PORT_LOCK);
5440 		break;
5441 
5442 	default:
5443 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5444 		    "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
5445 		    hba->mbox_queue_flag);
5446 
5447 		mutex_exit(&EMLXS_PORT_LOCK);
5448 		return;
5449 	}
5450 
5451 	/* Set port context */
5452 	port = (emlxs_port_t *)mbq->port;
5453 
5454 	offset = (off_t)((uint64_t)((unsigned long)
5455 	    hba->sli.sli4.mq.addr.virt) -
5456 	    (uint64_t)((unsigned long)
5457 	    hba->sli.sli4.slim2.virt));
5458 
5459 	/* Now that we are the owner, DMA Sync entire MQ if needed */
5460 	EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
5461 	    4096, DDI_DMA_SYNC_FORDEV);
5462 
5463 	BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
5464 	    MAILBOX_CMD_SLI4_BSIZE);
5465 
5466 	if (mb->mbxCommand != MBX_HEARTBEAT) {
5467 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5468 		    "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
5469 		    mb->mbxStatus, mb->mbxCommand);
5470 
5471 		emlxs_data_dump(port, "MBOX CMP", (uint32_t *)hba->mbox_mqe,
5472 		    12, 0);
5473 	}
5474 
5475 	if (mb->mbxCommand == MBX_SLI_CONFIG) {
5476 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5477 		    "Mbox sge_cnt: %d length: %d embed: %d",
5478 		    mb->un.varSLIConfig.be.sge_cnt,
5479 		    mb->un.varSLIConfig.be.payload_length,
5480 		    mb->un.varSLIConfig.be.embedded);
5481 	}
5482 
5483 	/* Now sync the memory buffer if one was used */
5484 	if (mbq->bp) {
5485 		mbox_bp = (MATCHMAP *)mbq->bp;
5486 		EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5487 		    DDI_DMA_SYNC_FORKERNEL);
5488 #ifdef FMA_SUPPORT
5489 		if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
5490 		    != DDI_FM_OK) {
5491 			EMLXS_MSGF(EMLXS_CONTEXT,
5492 			    &emlxs_invalid_dma_handle_msg,
5493 			    "sli4_process_mbox_event: hdl=%p",
5494 			    mbox_bp->dma_handle);
5495 
5496 			mb->mbxStatus = MBXERR_DMA_ERROR;
5497 }
5498 #endif
5499 	}
5500 
5501 	/* Now sync the memory buffer if one was used */
5502 	if (mbq->nonembed) {
5503 		mbox_nonembed = (MATCHMAP *)mbq->nonembed;
5504 		size = mbox_nonembed->size;
5505 		EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
5506 		    DDI_DMA_SYNC_FORKERNEL);
5507 		iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
5508 		BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
5509 
5510 #ifdef FMA_SUPPORT
5511 		if (emlxs_fm_check_dma_handle(hba,
5512 		    mbox_nonembed->dma_handle) != DDI_FM_OK) {
5513 			EMLXS_MSGF(EMLXS_CONTEXT,
5514 			    &emlxs_invalid_dma_handle_msg,
5515 			    "sli4_process_mbox_event: hdl=%p",
5516 			    mbox_nonembed->dma_handle);
5517 
5518 			mb->mbxStatus = MBXERR_DMA_ERROR;
5519 		}
5520 #endif
5521 		emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
5522 	}
5523 
5524 	/* Mailbox has been completely received at this point */
5525 
5526 	if (mb->mbxCommand == MBX_HEARTBEAT) {
5527 		hba->heartbeat_active = 0;
5528 		goto done;
5529 	}
5530 
5531 	if (hba->mbox_queue_flag == MBX_SLEEP) {
5532 		if (mb->mbxCommand != MBX_DOWN_LOAD
5533 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5534 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5535 			    "Received.  %s: status=%x Sleep.",
5536 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5537 			    mb->mbxStatus);
5538 		}
5539 	} else {
5540 		if (mb->mbxCommand != MBX_DOWN_LOAD
5541 		    /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5542 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5543 			    "Completed. %s: status=%x",
5544 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
5545 			    mb->mbxStatus);
5546 		}
5547 	}
5548 
5549 	/* Filter out passthru mailbox */
5550 	if (mbq->flag & MBQ_PASSTHRU) {
5551 		goto done;
5552 	}
5553 
5554 	if (mb->mbxStatus) {
5555 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5556 		    "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5557 		    (uint32_t)mb->mbxStatus);
5558 	}
5559 
5560 	if (mbq->mbox_cmpl) {
5561 		rc = (mbq->mbox_cmpl)(hba, mbq);
5562 
5563 		/* If mbox was retried, return immediately */
5564 		if (rc) {
5565 			return;
5566 		}
5567 	}
5568 
5569 done:
5570 
5571 	/* Clean up the mailbox area */
5572 	emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
5573 
5574 	/* Attempt to send pending mailboxes */
5575 	mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5576 	if (mbq) {
5577 		/* Attempt to send pending mailboxes */
5578 		rc =  emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5579 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5580 			emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5581 		}
5582 	}
5583 	return;
5584 
5585 } /* emlxs_sli4_process_mbox_event() */
5586 
5587 
5588 /*ARGSUSED*/
5589 static void
5590 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
5591 {
5592 #ifdef DEBUG_FASTPATH
5593 	emlxs_port_t *port = &PPORT;
5594 #endif /* DEBUG_FASTPATH */
5595 	IOCBQ *iocbq;
5596 	IOCB *iocb;
5597 	uint32_t *iptr;
5598 	fc_packet_t *pkt;
5599 	emlxs_wqe_t *wqe;
5600 
5601 	iocbq = &sbp->iocbq;
5602 	wqe = &iocbq->wqe;
5603 	iocb = &iocbq->iocb;
5604 
5605 #ifdef DEBUG_FASTPATH
5606 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5607 	    "CQE to IOCB: cmd:%x tag:%x xri:%d", wqe->Command,
5608 	    wqe->RequestTag, wqe->XRITag);
5609 #endif /* DEBUG_FASTPATH */
5610 
5611 	iocb->ULPSTATUS = cqe->Status;
5612 	iocb->un.ulpWord[4] = cqe->Parameter;
5613 	iocb->ULPIOTAG = cqe->RequestTag;
5614 	iocb->ULPCONTEXT = wqe->XRITag;
5615 
5616 	switch (wqe->Command) {
5617 
5618 	case CMD_FCP_ICMND64_CR:
5619 		iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
5620 		break;
5621 
5622 	case CMD_FCP_IREAD64_CR:
5623 		iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
5624 		iocb->ULPPU = PARM_XFER_CHECK;
5625 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
5626 			iocb->un.fcpi64.fcpi_parm =
5627 			    wqe->un.FcpCmd.TotalTransferCount -
5628 			    cqe->CmdSpecific;
5629 		}
5630 		break;
5631 
5632 	case CMD_FCP_IWRITE64_CR:
5633 		iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
5634 		if (iocb->ULPSTATUS ==  IOSTAT_FCP_RSP_ERROR) {
5635 			if (wqe->un.FcpCmd.TotalTransferCount >
5636 			    cqe->CmdSpecific) {
5637 				iocb->un.fcpi64.fcpi_parm =
5638 				    wqe->un.FcpCmd.TotalTransferCount -
5639 				    cqe->CmdSpecific;
5640 			} else {
5641 				iocb->un.fcpi64.fcpi_parm = 0;
5642 			}
5643 		}
5644 		break;
5645 
5646 	case CMD_ELS_REQUEST64_CR:
5647 		iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
5648 		iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
5649 		if (iocb->ULPSTATUS == 0) {
5650 			iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5651 		}
5652 		if (iocb->ULPSTATUS == IOSTAT_LS_RJT) {
5653 			/* For LS_RJT, the driver populates the rsp buffer */
5654 			pkt = PRIV2PKT(sbp);
5655 			iptr = (uint32_t *)pkt->pkt_resp;
5656 			*iptr++ = ELS_CMD_LS_RJT;
5657 			*iptr = cqe->Parameter;
5658 		}
5659 		break;
5660 
5661 	case CMD_GEN_REQUEST64_CR:
5662 		iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
5663 		iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5664 		break;
5665 
5666 	case CMD_XMIT_SEQUENCE64_CR:
5667 		iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
5668 		break;
5669 
5670 	case CMD_ABORT_XRI_CX:
5671 		iocb->ULPCONTEXT = wqe->AbortTag;
5672 		break;
5673 
5674 	case CMD_FCP_TRECEIVE64_CX:
5675 		/* free memory for XRDY */
5676 		if (iocbq->bp) {
5677 			emlxs_mem_buf_free(hba, iocbq->bp);
5678 			iocbq->bp = 0;
5679 		}
5680 
5681 		/*FALLTHROUGH*/
5682 
5683 	case CMD_FCP_TSEND64_CX:
5684 	case CMD_FCP_TRSP64_CX:
5685 	default:
5686 		iocb->ULPCOMMAND = wqe->Command;
5687 
5688 	}
5689 } /* emlxs_CQE_to_IOCB() */
5690 
5691 
5692 /*ARGSUSED*/
5693 static void
5694 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
5695 {
5696 	emlxs_port_t *port = &PPORT;
5697 	CHANNEL *cp;
5698 	emlxs_buf_t *sbp;
5699 	IOCBQ *iocbq;
5700 	uint16_t i;
5701 	uint32_t trigger = 0;
5702 	CQE_CmplWQ_t cqe;
5703 
5704 	mutex_enter(&EMLXS_FCTAB_LOCK);
5705 	for (i = 0; i < hba->max_iotag; i++) {
5706 		sbp = hba->fc_table[i];
5707 		if (sbp == NULL || sbp == STALE_PACKET) {
5708 			continue;
5709 		}
5710 		hba->fc_table[i] = STALE_PACKET;
5711 		hba->io_count--;
5712 		sbp->iotag = 0;
5713 		mutex_exit(&EMLXS_FCTAB_LOCK);
5714 
5715 		cp = sbp->channel;
5716 		bzero(&cqe, sizeof (CQE_CmplWQ_t));
5717 		cqe.RequestTag = i;
5718 		cqe.Status = IOSTAT_LOCAL_REJECT;
5719 		cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
5720 
5721 		cp->hbaCmplCmd_sbp++;
5722 
5723 #ifdef SFCT_SUPPORT
5724 #ifdef FCT_IO_TRACE
5725 		if (sbp->fct_cmd) {
5726 			emlxs_fct_io_trace(port, sbp->fct_cmd,
5727 			    EMLXS_FCT_IOCB_COMPLETE);
5728 		}
5729 #endif /* FCT_IO_TRACE */
5730 #endif /* SFCT_SUPPORT */
5731 
5732 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5733 			atomic_dec_32(&hba->io_active);
5734 #ifdef NODE_THROTTLE_SUPPORT
5735 			if (sbp->node) {
5736 				atomic_dec_32(&sbp->node->io_active);
5737 			}
5738 #endif /* NODE_THROTTLE_SUPPORT */
5739 		}
5740 
5741 		/* Copy entry to sbp's iocbq */
5742 		iocbq = &sbp->iocbq;
5743 		emlxs_CQE_to_IOCB(hba, &cqe, sbp);
5744 
5745 		iocbq->next = NULL;
5746 
5747 		/* Exchange is no longer busy on-chip, free it */
5748 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
5749 
5750 		if (!(sbp->pkt_flags &
5751 		    (PACKET_POLLED | PACKET_ALLOCATED))) {
5752 			/* Add the IOCB to the channel list */
5753 			mutex_enter(&cp->rsp_lock);
5754 			if (cp->rsp_head == NULL) {
5755 				cp->rsp_head = iocbq;
5756 				cp->rsp_tail = iocbq;
5757 			} else {
5758 				cp->rsp_tail->next = iocbq;
5759 				cp->rsp_tail = iocbq;
5760 			}
5761 			mutex_exit(&cp->rsp_lock);
5762 			trigger = 1;
5763 		} else {
5764 			emlxs_proc_channel_event(hba, cp, iocbq);
5765 		}
5766 		mutex_enter(&EMLXS_FCTAB_LOCK);
5767 	}
5768 	mutex_exit(&EMLXS_FCTAB_LOCK);
5769 
5770 	if (trigger) {
5771 		for (i = 0; i < hba->chan_count; i++) {
5772 			cp = &hba->chan[i];
5773 			if (cp->rsp_head != NULL) {
5774 				emlxs_thread_trigger2(&cp->intr_thread,
5775 				    emlxs_proc_channel, cp);
5776 			}
5777 		}
5778 	}
5779 
5780 } /* emlxs_sli4_hba_flush_chipq() */
5781 
5782 
5783 /*ARGSUSED*/
5784 static void
5785 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
5786     CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5787 {
5788 	emlxs_port_t *port = &PPORT;
5789 	CHANNEL *cp;
5790 	uint16_t request_tag;
5791 
5792 	request_tag = cqe->RequestTag;
5793 
5794 	/* 1 to 1 mapping between CQ and channel */
5795 	cp = cq->channelp;
5796 
5797 	cp->hbaCmplCmd++;
5798 
5799 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5800 	    "CQ ENTRY: OOR Cmpl: iotag=%d", request_tag);
5801 
5802 	emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 4, 0);
5803 
5804 } /* emlxs_sli4_process_oor_wqe_cmpl() */
5805 
5806 
5807 /*ARGSUSED*/
5808 static void
5809 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5810 {
5811 	emlxs_port_t *port = &PPORT;
5812 	CHANNEL *cp;
5813 	emlxs_buf_t *sbp;
5814 	IOCBQ *iocbq;
5815 	uint16_t request_tag;
5816 #ifdef SFCT_SUPPORT
5817 #ifdef FCT_IO_TRACE
5818 	fct_cmd_t *fct_cmd;
5819 	emlxs_buf_t *cmd_sbp;
5820 #endif /* FCT_IO_TRACE */
5821 #endif /* SFCT_SUPPORT */
5822 
5823 	request_tag = cqe->RequestTag;
5824 
5825 	/* 1 to 1 mapping between CQ and channel */
5826 	cp = cq->channelp;
5827 
5828 	mutex_enter(&EMLXS_FCTAB_LOCK);
5829 	sbp = hba->fc_table[request_tag];
5830 
5831 	if (!sbp) {
5832 		cp->hbaCmplCmd++;
5833 		mutex_exit(&EMLXS_FCTAB_LOCK);
5834 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5835 		    "CQ ENTRY: NULL sbp. iotag=%d. Dropping...",
5836 		    request_tag);
5837 		return;
5838 	}
5839 
5840 	if (sbp == STALE_PACKET) {
5841 		cp->hbaCmplCmd_sbp++;
5842 		mutex_exit(&EMLXS_FCTAB_LOCK);
5843 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5844 		    "CQ ENTRY: Stale sbp. iotag=%d. Dropping...", request_tag);
5845 		return;
5846 	}
5847 
5848 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5849 		atomic_add_32(&hba->io_active, -1);
5850 #ifdef NODE_THROTTLE_SUPPORT
5851 		if (sbp->node) {
5852 			atomic_add_32(&sbp->node->io_active, -1);
5853 		}
5854 #endif /* NODE_THROTTLE_SUPPORT */
5855 	}
5856 
5857 	if (!(sbp->xrip)) {
5858 		cp->hbaCmplCmd++;
5859 		mutex_exit(&EMLXS_FCTAB_LOCK);
5860 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5861 		    "CQ ENTRY: NULL sbp xrip %p. iotag=%d. Dropping...",
5862 		    sbp, request_tag);
5863 		return;
5864 	}
5865 
5866 #ifdef DEBUG_FASTPATH
5867 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5868 	    "CQ ENTRY: process wqe compl");
5869 #endif /* DEBUG_FASTPATH */
5870 	cp->hbaCmplCmd_sbp++;
5871 
5872 	/* Copy entry to sbp's iocbq */
5873 	iocbq = &sbp->iocbq;
5874 	emlxs_CQE_to_IOCB(hba, cqe, sbp);
5875 
5876 	iocbq->next = NULL;
5877 
5878 	if (cqe->XB) {
5879 		/* Mark exchange as ABORT in progress */
5880 		sbp->xrip->flag &= ~EMLXS_XRI_PENDING_IO;
5881 		sbp->xrip->flag |= EMLXS_XRI_BUSY;
5882 
5883 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5884 		    "CQ ENTRY: XRI BUSY: iotag=%d xri=%d", request_tag,
5885 		    sbp->xrip->XRI);
5886 
5887 		emlxs_sli4_free_xri(port, sbp, 0, 0);
5888 	} else {
5889 		/* Exchange is no longer busy on-chip, free it */
5890 		emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
5891 	}
5892 
5893 	mutex_exit(&EMLXS_FCTAB_LOCK);
5894 
5895 #ifdef SFCT_SUPPORT
5896 #ifdef FCT_IO_TRACE
5897 	fct_cmd = sbp->fct_cmd;
5898 	if (fct_cmd) {
5899 		cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
5900 		mutex_enter(&cmd_sbp->fct_mtx);
5901 		EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
5902 		mutex_exit(&cmd_sbp->fct_mtx);
5903 	}
5904 #endif /* FCT_IO_TRACE */
5905 #endif /* SFCT_SUPPORT */
5906 
5907 	/*
5908 	 * If this is NOT a polled command completion
5909 	 * or a driver allocated pkt, then defer pkt
5910 	 * completion.
5911 	 */
5912 	if (!(sbp->pkt_flags &
5913 	    (PACKET_POLLED | PACKET_ALLOCATED))) {
5914 		/* Add the IOCB to the channel list */
5915 		mutex_enter(&cp->rsp_lock);
5916 		if (cp->rsp_head == NULL) {
5917 			cp->rsp_head = iocbq;
5918 			cp->rsp_tail = iocbq;
5919 		} else {
5920 			cp->rsp_tail->next = iocbq;
5921 			cp->rsp_tail = iocbq;
5922 		}
5923 		mutex_exit(&cp->rsp_lock);
5924 
5925 		/* Delay triggering thread till end of ISR */
5926 		cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
5927 	} else {
5928 		emlxs_proc_channel_event(hba, cp, iocbq);
5929 	}
5930 
5931 } /* emlxs_sli4_process_wqe_cmpl() */
5932 
5933 
5934 /*ARGSUSED*/
5935 static void
5936 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
5937     CQE_RelWQ_t *cqe)
5938 {
5939 	emlxs_port_t *port = &PPORT;
5940 	WQ_DESC_t *wq;
5941 	CHANNEL *cp;
5942 	uint32_t i;
5943 	uint16_t wqi;
5944 
5945 	wqi = emlxs_sli4_wqid_to_index(hba, (uint16_t)cqe->WQid);
5946 
5947 	/* Verify WQ index */
5948 	if (wqi == 0xffff) {
5949 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5950 		    "CQ ENTRY: Invalid WQid:%d. Dropping...",
5951 		    cqe->WQid);
5952 		return;
5953 	}
5954 
5955 	wq = &hba->sli.sli4.wq[wqi];
5956 
5957 #ifdef DEBUG_FASTPATH
5958 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5959 	    "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
5960 	    cqe->WQindex);
5961 #endif /* DEBUG_FASTPATH */
5962 
5963 	wq->port_index = cqe->WQindex;
5964 
5965 	/* Cmd ring may be available. Try sending more iocbs */
5966 	for (i = 0; i < hba->chan_count; i++) {
5967 		cp = &hba->chan[i];
5968 		if (wq == (WQ_DESC_t *)cp->iopath) {
5969 			emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
5970 		}
5971 	}
5972 
5973 } /* emlxs_sli4_process_release_wqe() */
5974 
5975 
5976 /*ARGSUSED*/
5977 emlxs_iocbq_t *
5978 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
5979 {
5980 	emlxs_queue_t *q;
5981 	emlxs_iocbq_t *iocbq;
5982 	emlxs_iocbq_t *prev;
5983 	fc_frame_hdr_t *fchdr2;
5984 	RXQ_DESC_t *rxq;
5985 
5986 	switch (fchdr->type) {
5987 	case 1: /* ELS */
5988 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
5989 		break;
5990 	case 0x20: /* CT */
5991 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
5992 		break;
5993 	default:
5994 		return (NULL);
5995 	}
5996 
5997 	mutex_enter(&rxq->lock);
5998 
5999 	q = &rxq->active;
6000 	iocbq  = (emlxs_iocbq_t *)q->q_first;
6001 	prev = NULL;
6002 
6003 	while (iocbq) {
6004 
6005 		fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
6006 
6007 		if ((fchdr2->s_id == fchdr->s_id) &&
6008 		    (fchdr2->ox_id == fchdr->ox_id) &&
6009 		    (fchdr2->seq_id == fchdr->seq_id)) {
6010 			/* Remove iocbq */
6011 			if (prev) {
6012 				prev->next = iocbq->next;
6013 			}
6014 			if (q->q_first == (uint8_t *)iocbq) {
6015 				q->q_first = (uint8_t *)iocbq->next;
6016 			}
6017 			if (q->q_last == (uint8_t *)iocbq) {
6018 				q->q_last = (uint8_t *)prev;
6019 			}
6020 			q->q_cnt--;
6021 
6022 			break;
6023 		}
6024 
6025 		prev  = iocbq;
6026 		iocbq = iocbq->next;
6027 	}
6028 
6029 	mutex_exit(&rxq->lock);
6030 
6031 	return (iocbq);
6032 
6033 } /* emlxs_sli4_rxq_get() */
6034 
6035 
6036 /*ARGSUSED*/
6037 void
6038 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
6039 {
6040 	emlxs_queue_t *q;
6041 	fc_frame_hdr_t *fchdr;
6042 	RXQ_DESC_t *rxq;
6043 
6044 	fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
6045 
6046 	switch (fchdr->type) {
6047 	case 1: /* ELS */
6048 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
6049 		break;
6050 	case 0x20: /* CT */
6051 		rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
6052 		break;
6053 	default:
6054 		return;
6055 	}
6056 
6057 	mutex_enter(&rxq->lock);
6058 
6059 	q = &rxq->active;
6060 
6061 	if (q->q_last) {
6062 		((emlxs_iocbq_t *)q->q_last)->next = iocbq;
6063 		q->q_cnt++;
6064 	} else {
6065 		q->q_first = (uint8_t *)iocbq;
6066 		q->q_cnt = 1;
6067 	}
6068 
6069 	q->q_last = (uint8_t *)iocbq;
6070 	iocbq->next = NULL;
6071 
6072 	mutex_exit(&rxq->lock);
6073 
6074 	return;
6075 
6076 } /* emlxs_sli4_rxq_put() */
6077 
6078 
6079 static void
6080 emlxs_sli4_rq_post(emlxs_port_t *port, uint16_t rqid)
6081 {
6082 	emlxs_hba_t *hba = HBA;
6083 
6084 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6085 	    "RQ POST: rqid=%d count=1", rqid);
6086 
6087 	/* Ring the RQ doorbell once to repost the RQ buffer */
6088 
6089 	emlxs_sli4_write_rqdb(hba, rqid, 1);
6090 
6091 } /* emlxs_sli4_rq_post() */
6092 
6093 
6094 /*ARGSUSED*/
6095 static void
6096 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
6097     CQE_UnsolRcv_t *cqe)
6098 {
6099 	emlxs_port_t *port = &PPORT;
6100 	emlxs_port_t *vport;
6101 	RQ_DESC_t *hdr_rq;
6102 	RQ_DESC_t *data_rq;
6103 	MBUF_INFO *hdr_mp;
6104 	MBUF_INFO *data_mp;
6105 	MATCHMAP *seq_mp;
6106 	uint32_t *data;
6107 	fc_frame_hdr_t fchdr;
6108 	uint16_t hdr_rqi;
6109 	uint32_t host_index;
6110 	emlxs_iocbq_t *iocbq = NULL;
6111 	emlxs_iocb_t *iocb;
6112 	emlxs_node_t *node = NULL;
6113 	uint32_t i;
6114 	uint32_t seq_len;
6115 	uint32_t seq_cnt;
6116 	uint32_t buf_type;
6117 	char label[32];
6118 	emlxs_wqe_t *wqe;
6119 	CHANNEL *cp;
6120 	XRIobj_t *xrip;
6121 	RPIobj_t *rpip = NULL;
6122 	uint32_t	cmd;
6123 	uint32_t posted = 0;
6124 	uint32_t abort = 1;
6125 	off_t offset;
6126 	uint32_t status;
6127 	uint32_t data_size;
6128 	uint16_t rqid;
6129 	uint32_t hdr_size;
6130 	fc_packet_t *pkt;
6131 	emlxs_buf_t *sbp;
6132 
6133 	if (cqe->Code == CQE_TYPE_UNSOL_RCV_V1) {
6134 		CQE_UnsolRcvV1_t *cqeV1 = (CQE_UnsolRcvV1_t *)cqe;
6135 
6136 		status	  = cqeV1->Status;
6137 		data_size = cqeV1->data_size;
6138 		rqid	  = cqeV1->RQid;
6139 		hdr_size  = cqeV1->hdr_size;
6140 	} else {
6141 		status	  = cqe->Status;
6142 		data_size = cqe->data_size;
6143 		rqid	  = cqe->RQid;
6144 		hdr_size  = cqe->hdr_size;
6145 	}
6146 
6147 	/* Validate the CQE */
6148 
6149 	/* Check status */
6150 	switch (status) {
6151 	case RQ_STATUS_SUCCESS: /* 0x10 */
6152 		break;
6153 
6154 	case RQ_STATUS_BUFLEN_EXCEEDED:  /* 0x11 */
6155 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6156 		    "CQ ENTRY: Unsol Rcv: Payload truncated.");
6157 		break;
6158 
6159 	case RQ_STATUS_NEED_BUFFER: /* 0x12 */
6160 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6161 		    "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
6162 		return;
6163 
6164 	case RQ_STATUS_FRAME_DISCARDED:  /* 0x13 */
6165 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6166 		    "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
6167 		return;
6168 
6169 	default:
6170 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6171 		    "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
6172 		    status);
6173 		break;
6174 	}
6175 
6176 	/* Make sure there is a frame header */
6177 	if (hdr_size < sizeof (fc_frame_hdr_t)) {
6178 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6179 		    "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
6180 		return;
6181 	}
6182 
6183 	hdr_rqi = emlxs_sli4_rqid_to_index(hba, rqid);
6184 
6185 	/* Verify RQ index */
6186 	if (hdr_rqi == 0xffff) {
6187 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6188 		    "CQ ENTRY: Unsol Rcv: Invalid RQID:%d. Dropping...",
6189 		    rqid);
6190 		return;
6191 	}
6192 
6193 	hdr_rq  = &hba->sli.sli4.rq[hdr_rqi];
6194 	data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
6195 
6196 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6197 	    "CQ ENTRY: Unsol Rcv:%x rqid=%d,%d index=%d status=%x "
6198 	    "hdr_size=%d data_size=%d",
6199 	    cqe->Code, rqid, hdr_rqi, hdr_rq->host_index, status, hdr_size,
6200 	    data_size);
6201 
6202 	hdr_rq->num_proc++;
6203 
6204 	/* Update host index */
6205 	mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
6206 	host_index = hdr_rq->host_index;
6207 	hdr_rq->host_index++;
6208 
6209 	if (hdr_rq->host_index >= hdr_rq->max_index) {
6210 		hdr_rq->host_index = 0;
6211 	}
6212 	data_rq->host_index = hdr_rq->host_index;
6213 	mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
6214 
6215 	/* Get the next header rqb */
6216 	hdr_mp  = &hdr_rq->rqb[host_index];
6217 
6218 	offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
6219 	    (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
6220 
6221 	EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
6222 	    sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
6223 
6224 	LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
6225 	    sizeof (fc_frame_hdr_t));
6226 
6227 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6228 	    "RQ HDR[%d]: rctl:%x type:%x "
6229 	    "sid:%x did:%x oxid:%x rxid:%x",
6230 	    host_index, fchdr.r_ctl, fchdr.type,
6231 	    fchdr.s_id,  fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
6232 
6233 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6234 	    "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
6235 	    host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
6236 	    fchdr.df_ctl, fchdr.ro);
6237 
6238 	/* Verify fc header type */
6239 	switch (fchdr.type) {
6240 	case 0: /* BLS */
6241 		if (fchdr.r_ctl != 0x81) {
6242 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6243 			    "RQ ENTRY: Unexpected FC rctl (0x%x) "
6244 			    "received. Dropping...",
6245 			    fchdr.r_ctl);
6246 
6247 			goto done;
6248 		}
6249 
6250 		/* Make sure there is no payload */
6251 		if (data_size != 0) {
6252 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6253 			    "RQ ENTRY: ABTS payload provided. Dropping...");
6254 
6255 			goto done;
6256 		}
6257 
6258 		buf_type = 0xFFFFFFFF;
6259 		(void) strlcpy(label, "ABTS", sizeof (label));
6260 		cp = &hba->chan[hba->channel_els];
6261 		break;
6262 
6263 	case 0x01: /* ELS */
6264 		/* Make sure there is a payload */
6265 		if (data_size == 0) {
6266 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6267 			    "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
6268 			    "Dropping...");
6269 
6270 			goto done;
6271 		}
6272 
6273 		buf_type = MEM_ELSBUF;
6274 		(void) strlcpy(label, "Unsol ELS", sizeof (label));
6275 		cp = &hba->chan[hba->channel_els];
6276 		break;
6277 
6278 	case 0x20: /* CT */
6279 		/* Make sure there is a payload */
6280 		if (data_size == 0) {
6281 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6282 			    "RQ ENTRY: Unsol Rcv: No CT payload provided. "
6283 			    "Dropping...");
6284 
6285 			goto done;
6286 		}
6287 
6288 		buf_type = MEM_CTBUF;
6289 		(void) strlcpy(label, "Unsol CT", sizeof (label));
6290 		cp = &hba->chan[hba->channel_ct];
6291 		break;
6292 
6293 	case 0x08: /* FCT */
6294 		/* Make sure there is a payload */
6295 		if (data_size == 0) {
6296 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6297 			    "RQ ENTRY: Unsol Rcv: No FCP payload provided. "
6298 			    "Dropping...");
6299 
6300 			goto done;
6301 		}
6302 
6303 		buf_type = MEM_FCTBUF;
6304 		(void) strlcpy(label, "Unsol FCT", sizeof (label));
6305 		cp = &hba->chan[hba->CHANNEL_FCT];
6306 		break;
6307 
6308 	default:
6309 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6310 		    "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
6311 		    fchdr.type);
6312 
6313 		goto done;
6314 	}
6315 	/* Fc Header is valid */
6316 
6317 	/* Check if this is an active sequence */
6318 	iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
6319 
6320 	if (!iocbq) {
6321 		if (fchdr.type != 0) {
6322 			if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
6323 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6324 				    "RQ ENTRY: %s: First of sequence not"
6325 				    " set.  Dropping...",
6326 				    label);
6327 
6328 				goto done;
6329 			}
6330 		}
6331 
6332 		if ((fchdr.type != 0) && (fchdr.seq_cnt != 0)) {
6333 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6334 			    "RQ ENTRY: %s: Sequence count not zero (%d).  "
6335 			    "Dropping...",
6336 			    label, fchdr.seq_cnt);
6337 
6338 			goto done;
6339 		}
6340 
6341 		/* Find vport */
6342 		for (i = 0; i < MAX_VPORTS; i++) {
6343 			vport = &VPORT(i);
6344 
6345 			if (vport->did == fchdr.d_id) {
6346 				port = vport;
6347 				break;
6348 			}
6349 		}
6350 
6351 		if (i == MAX_VPORTS) {
6352 			/* Allow unsol FLOGI & PLOGI for P2P */
6353 			if ((fchdr.type != 1 /* ELS*/) ||
6354 			    ((fchdr.d_id != FABRIC_DID) &&
6355 			    !(hba->flag & FC_PT_TO_PT))) {
6356 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6357 				    "RQ ENTRY: %s: Invalid did=%x. Dropping...",
6358 				    label, fchdr.d_id);
6359 
6360 				goto done;
6361 			}
6362 		}
6363 
6364 		/* Allocate an IOCBQ */
6365 		iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba, MEM_IOCB);
6366 
6367 		if (!iocbq) {
6368 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6369 			    "RQ ENTRY: %s: Out of IOCB "
6370 			    "resources.  Dropping...",
6371 			    label);
6372 
6373 			goto done;
6374 		}
6375 
6376 		seq_mp = NULL;
6377 		if (fchdr.type != 0) {
6378 			/* Allocate a buffer */
6379 			seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type);
6380 
6381 			if (!seq_mp) {
6382 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6383 				    "RQ ENTRY: %s: Out of buffer "
6384 				    "resources.  Dropping...",
6385 				    label);
6386 
6387 				goto done;
6388 			}
6389 
6390 			iocbq->bp = (uint8_t *)seq_mp;
6391 		}
6392 
6393 		node = (void *)emlxs_node_find_did(port, fchdr.s_id, 1);
6394 		if (node == NULL) {
6395 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6396 			    "RQ ENTRY: %s: Node not found. sid=%x",
6397 			    label, fchdr.s_id);
6398 		}
6399 
6400 		/* Initialize the iocbq */
6401 		iocbq->port = port;
6402 		iocbq->channel = cp;
6403 		iocbq->node = node;
6404 
6405 		iocb = &iocbq->iocb;
6406 		iocb->RXSEQCNT = 0;
6407 		iocb->RXSEQLEN = 0;
6408 
6409 		seq_len = 0;
6410 		seq_cnt = 0;
6411 
6412 	} else {
6413 
6414 		iocb = &iocbq->iocb;
6415 		port = iocbq->port;
6416 		node = (emlxs_node_t *)iocbq->node;
6417 
6418 		seq_mp = (MATCHMAP *)iocbq->bp;
6419 		seq_len = iocb->RXSEQLEN;
6420 		seq_cnt = iocb->RXSEQCNT;
6421 
6422 		/* Check sequence order */
6423 		if (fchdr.seq_cnt != seq_cnt) {
6424 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6425 			    "RQ ENTRY: %s: Out of order frame received "
6426 			    "(%d != %d).  Dropping...",
6427 			    label, fchdr.seq_cnt, seq_cnt);
6428 
6429 			goto done;
6430 		}
6431 	}
6432 
6433 	/* We now have an iocbq */
6434 
6435 	if (!port->vpip->vfip) {
6436 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6437 		    "RQ ENTRY: %s: No fabric connection. "
6438 		    "Dropping...",
6439 		    label);
6440 
6441 		goto done;
6442 	}
6443 
6444 	/* Save the frame data to our seq buffer */
6445 	if (data_size && seq_mp) {
6446 		/* Get the next data rqb */
6447 		data_mp = &data_rq->rqb[host_index];
6448 
6449 		offset = (off_t)((uint64_t)((unsigned long)
6450 		    data_mp->virt) -
6451 		    (uint64_t)((unsigned long)
6452 		    hba->sli.sli4.slim2.virt));
6453 
6454 		EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
6455 		    data_size, DDI_DMA_SYNC_FORKERNEL);
6456 
6457 		data = (uint32_t *)data_mp->virt;
6458 
6459 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6460 		    "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
6461 		    host_index, data[0], data[1], data[2], data[3],
6462 		    data[4], data[5]);
6463 
6464 		/* Check sequence length */
6465 		if ((seq_len + data_size) > seq_mp->size) {
6466 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6467 			    "RQ ENTRY: %s: Sequence buffer overflow. "
6468 			    "(%d > %d). Dropping...",
6469 			    label, (seq_len + data_size), seq_mp->size);
6470 
6471 			goto done;
6472 		}
6473 
6474 		/* Copy data to local receive buffer */
6475 		bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
6476 		    seq_len), data_size);
6477 
6478 		seq_len += data_size;
6479 	}
6480 
6481 	/* If this is not the last frame of sequence, queue it. */
6482 	if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
6483 		/* Save sequence header */
6484 		if (seq_cnt == 0) {
6485 			bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
6486 			    sizeof (fc_frame_hdr_t));
6487 		}
6488 
6489 		/* Update sequence info in iocb */
6490 		iocb->RXSEQCNT = seq_cnt + 1;
6491 		iocb->RXSEQLEN = seq_len;
6492 
6493 		/* Queue iocbq for next frame */
6494 		emlxs_sli4_rxq_put(hba, iocbq);
6495 
6496 		/* Don't free resources */
6497 		iocbq = NULL;
6498 
6499 		/* No need to abort */
6500 		abort = 0;
6501 
6502 		goto done;
6503 	}
6504 
6505 	emlxs_sli4_rq_post(port, hdr_rq->qid);
6506 	posted = 1;
6507 
6508 	/* End of sequence found. Process request now. */
6509 
6510 	if (seq_cnt > 0) {
6511 		/* Retrieve first frame of sequence */
6512 		bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
6513 		    sizeof (fc_frame_hdr_t));
6514 
6515 		bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
6516 	}
6517 
6518 	/* Build rcv iocb and process it */
6519 	switch (fchdr.type) {
6520 	case 0: /* BLS */
6521 
6522 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6523 		    "RQ ENTRY: %s: oxid:%x rxid %x sid:%x. Sending BLS ACC...",
6524 		    label, fchdr.ox_id, fchdr.rx_id, fchdr.s_id);
6525 
6526 		/* Try to send abort response */
6527 		if (!(pkt = emlxs_pkt_alloc(port, 0, 0, 0, KM_NOSLEEP))) {
6528 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6529 			    "RQ ENTRY: %s: Unable to alloc pkt. Dropping...",
6530 			    label);
6531 			goto done;
6532 		}
6533 
6534 		/* Setup sbp / iocb for driver initiated cmd */
6535 		sbp = PKT2PRIV(pkt);
6536 
6537 		/* Free the temporary iocbq */
6538 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6539 
6540 		iocbq = (emlxs_iocbq_t *)&sbp->iocbq;
6541 		iocbq->port = port;
6542 		iocbq->channel = cp;
6543 		iocbq->node = node;
6544 
6545 		sbp->pkt_flags &= ~PACKET_ULP_OWNED;
6546 
6547 		if (node) {
6548 			sbp->node = node;
6549 			sbp->did  = node->nlp_DID;
6550 		}
6551 
6552 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
6553 
6554 		/* BLS ACC Response */
6555 		wqe = &iocbq->wqe;
6556 		bzero((void *)wqe, sizeof (emlxs_wqe_t));
6557 
6558 		iocbq->iocb.ULPCOMMAND = CMD_XMIT_BLS_RSP64_CX;
6559 		wqe->Command = CMD_XMIT_BLS_RSP64_CX;
6560 		wqe->CmdType = WQE_TYPE_GEN;
6561 
6562 		wqe->un.BlsRsp.Payload0 = 0x80;
6563 		wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
6564 
6565 		wqe->un.BlsRsp.OXId = fchdr.ox_id;
6566 		wqe->un.BlsRsp.RXId = fchdr.rx_id;
6567 
6568 		wqe->un.BlsRsp.SeqCntLow = 0;
6569 		wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
6570 
6571 		wqe->un.BlsRsp.XO = ((fchdr.f_ctl & F_CTL_XCHG_CONTEXT)? 1:0);
6572 		wqe->un.BlsRsp.AR = 0;
6573 
6574 		rpip = EMLXS_NODE_TO_RPI(port, node);
6575 
6576 		if (rpip) {
6577 			wqe->ContextType = WQE_RPI_CONTEXT;
6578 			wqe->ContextTag = rpip->RPI;
6579 		} else {
6580 			wqe->ContextType = WQE_VPI_CONTEXT;
6581 			wqe->ContextTag = port->vpip->VPI;
6582 
6583 			rpip = emlxs_rpi_reserve_notify(port, fchdr.s_id, 0);
6584 
6585 			if (!rpip) {
6586 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6587 				    "RQ ENTRY: %s: Unable to alloc "
6588 				    "reserved RPI. Dropping...",
6589 				    label);
6590 
6591 				goto done;
6592 			}
6593 
6594 			/* Store the reserved rpi */
6595 			wqe->CmdSpecific = rpip->RPI;
6596 
6597 			wqe->un.BlsRsp.RemoteId = fchdr.s_id;
6598 			wqe->un.BlsRsp.LocalId = fchdr.d_id;
6599 		}
6600 
6601 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6602 			wqe->CCPE = 1;
6603 			wqe->CCP = fchdr.rsvd;
6604 		}
6605 
6606 		/* Allocate an exchange for this command */
6607 		xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
6608 		    EMLXS_XRI_SOL_BLS_TYPE);
6609 
6610 		if (!xrip) {
6611 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6612 			    "RQ ENTRY: %s: Unable to alloc XRI. Dropping...",
6613 			    label);
6614 			goto done;
6615 		}
6616 
6617 		wqe->XRITag = xrip->XRI;
6618 		wqe->Class = CLASS3;
6619 		wqe->RequestTag = xrip->iotag;
6620 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
6621 
6622 		sbp->ticks = hba->timer_tics + 30;
6623 
6624 		emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
6625 
6626 		/* The temporary iocbq has been freed already */
6627 		iocbq = NULL;
6628 
6629 		break;
6630 
6631 	case 1: /* ELS */
6632 		cmd = *((uint32_t *)seq_mp->virt);
6633 		cmd &= ELS_CMD_MASK;
6634 
6635 		if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED)) {
6636 			uint32_t dropit = 1;
6637 
6638 			/* Allow for P2P handshaking */
6639 			switch (cmd) {
6640 			case ELS_CMD_FLOGI:
6641 				dropit = 0;
6642 				break;
6643 
6644 			case ELS_CMD_PLOGI:
6645 			case ELS_CMD_PRLI:
6646 				if (hba->flag & FC_PT_TO_PT) {
6647 					dropit = 0;
6648 				}
6649 				break;
6650 			}
6651 
6652 			if (dropit) {
6653 				EMLXS_MSGF(EMLXS_CONTEXT,
6654 				    &emlxs_sli_detail_msg,
6655 				    "RQ ENTRY: %s: Port not yet enabled. "
6656 				    "Dropping...",
6657 				    label);
6658 				goto done;
6659 			}
6660 		}
6661 
6662 		rpip = NULL;
6663 
6664 		if (cmd != ELS_CMD_LOGO) {
6665 			rpip = EMLXS_NODE_TO_RPI(port, node);
6666 		}
6667 
6668 		if (!rpip) {
6669 			/* Use the fabric rpi */
6670 			rpip = port->vpip->fabric_rpip;
6671 		}
6672 
6673 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6674 		    EMLXS_XRI_UNSOL_ELS_TYPE, fchdr.ox_id);
6675 
6676 		if (!xrip) {
6677 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6678 			    "RQ ENTRY: %s: Out of exchange "
6679 			    "resources.  Dropping...",
6680 			    label);
6681 
6682 			goto done;
6683 		}
6684 
6685 		/* Build CMD_RCV_ELS64_CX */
6686 		iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
6687 		iocb->un.rcvels64.elsReq.tus.f.bdeSize  = seq_len;
6688 		iocb->un.rcvels64.elsReq.addrLow  = PADDR_LO(seq_mp->phys);
6689 		iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
6690 		iocb->ULPBDECOUNT = 1;
6691 
6692 		iocb->un.rcvels64.remoteID = fchdr.s_id;
6693 		iocb->un.rcvels64.parmRo = fchdr.d_id;
6694 
6695 		iocb->ULPPU = 0x3;
6696 		iocb->ULPCONTEXT = xrip->XRI;
6697 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6698 		iocb->ULPCLASS = CLASS3;
6699 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6700 
6701 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6702 		iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6703 		iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6704 
6705 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6706 			iocb->unsli3.ext_rcv.ccpe = 1;
6707 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6708 		}
6709 
6710 		if (port->mode == MODE_INITIATOR) {
6711 			(void) emlxs_els_handle_unsol_req(port, iocbq->channel,
6712 			    iocbq, seq_mp, seq_len);
6713 		}
6714 #ifdef SFCT_SUPPORT
6715 		else if (port->mode == MODE_TARGET) {
6716 			(void) emlxs_fct_handle_unsol_els(port, iocbq->channel,
6717 			    iocbq, seq_mp, seq_len);
6718 		}
6719 #endif /* SFCT_SUPPORT */
6720 		break;
6721 
6722 #ifdef SFCT_SUPPORT
6723 	case 8: /* FCT */
6724 		if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
6725 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6726 			    "RQ ENTRY: %s: Port not yet enabled. "
6727 			    "Dropping...",
6728 			    label);
6729 
6730 			goto done;
6731 		}
6732 
6733 		rpip = EMLXS_NODE_TO_RPI(port, node);
6734 
6735 		if (!rpip) {
6736 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6737 			    "RQ ENTRY: %s: Port not logged in. "
6738 			    "Dropping...",
6739 			    label);
6740 
6741 			goto done;
6742 		}
6743 
6744 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6745 		    EMLXS_XRI_UNSOL_FCP_TYPE, fchdr.ox_id);
6746 
6747 		if (!xrip) {
6748 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6749 			    "RQ ENTRY: %s: Out of exchange "
6750 			    "resources.  Dropping...",
6751 			    label);
6752 
6753 			goto done;
6754 		}
6755 
6756 		/* Build CMD_RCV_SEQUENCE64_CX */
6757 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6758 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
6759 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
6760 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6761 		iocb->ULPBDECOUNT = 1;
6762 
6763 		iocb->ULPPU = 0x3;
6764 		iocb->ULPCONTEXT = xrip->XRI;
6765 		iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6766 		iocb->ULPCLASS = CLASS3;
6767 		iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6768 
6769 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6770 		iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
6771 		iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6772 
6773 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6774 			iocb->unsli3.ext_rcv.ccpe = 1;
6775 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6776 		}
6777 
6778 		/* pass xrip to FCT in the iocbq */
6779 		iocbq->sbp = xrip;
6780 
6781 		(void) emlxs_fct_handle_unsol_req(port, cp, iocbq,
6782 		    seq_mp, seq_len);
6783 		break;
6784 #endif /* SFCT_SUPPORT */
6785 
6786 	case 0x20: /* CT */
6787 		if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED) &&
6788 		    !(hba->flag & FC_LOOPBACK_MODE)) {
6789 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6790 			    "RQ ENTRY: %s: Port not yet enabled. "
6791 			    "Dropping...",
6792 			    label);
6793 
6794 			goto done;
6795 		}
6796 
6797 		if (!node) {
6798 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6799 			    "RQ ENTRY: %s: Node not found (did=%x).  "
6800 			    "Dropping...",
6801 			    label, fchdr.d_id);
6802 
6803 			goto done;
6804 		}
6805 
6806 		rpip = EMLXS_NODE_TO_RPI(port, node);
6807 
6808 		if (!rpip) {
6809 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6810 			    "RQ ENTRY: %s: RPI not found (did=%x rpi=%d).  "
6811 			    "Dropping...",
6812 			    label, fchdr.d_id, node->nlp_Rpi);
6813 
6814 			goto done;
6815 		}
6816 
6817 		xrip = emlxs_sli4_reserve_xri(port, rpip,
6818 		    EMLXS_XRI_UNSOL_CT_TYPE, fchdr.ox_id);
6819 
6820 		if (!xrip) {
6821 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6822 			    "RQ ENTRY: %s: Out of exchange "
6823 			    "resources.  Dropping...",
6824 			    label);
6825 
6826 			goto done;
6827 		}
6828 
6829 		/* Build CMD_RCV_SEQ64_CX */
6830 		iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6831 		iocb->un.rcvseq64.rcvBde.tus.f.bdeSize  = seq_len;
6832 		iocb->un.rcvseq64.rcvBde.addrLow  = PADDR_LO(seq_mp->phys);
6833 		iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6834 		iocb->ULPBDECOUNT = 1;
6835 
6836 		iocb->un.rcvseq64.xrsqRo = 0;
6837 		iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
6838 		iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
6839 		iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
6840 		iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
6841 
6842 		iocb->ULPPU = 0x3;
6843 		iocb->ULPCONTEXT = xrip->XRI;
6844 		iocb->ULPIOTAG = rpip->RPI;
6845 		iocb->ULPCLASS = CLASS3;
6846 		iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
6847 
6848 		iocb->unsli3.ext_rcv.seq_len = seq_len;
6849 		iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6850 
6851 		if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6852 			iocb->unsli3.ext_rcv.ccpe = 1;
6853 			iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6854 		}
6855 
6856 		(void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
6857 		    iocbq, seq_mp, seq_len);
6858 
6859 		break;
6860 	}
6861 
6862 	/* Sequence handled, no need to abort */
6863 	abort = 0;
6864 
6865 done:
6866 
6867 	if (!posted) {
6868 		emlxs_sli4_rq_post(port, hdr_rq->qid);
6869 	}
6870 
6871 	if (abort) {
6872 		/* Send ABTS for this exchange */
6873 		/* !!! Currently, we have no implementation for this !!! */
6874 		abort = 0;
6875 	}
6876 
6877 	/* Return memory resources to pools */
6878 	if (iocbq) {
6879 		if (iocbq->bp) {
6880 			emlxs_mem_put(hba, buf_type, (void *)iocbq->bp);
6881 			iocbq->bp = 0;
6882 		}
6883 
6884 		emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6885 	}
6886 
6887 #ifdef FMA_SUPPORT
6888 	if (emlxs_fm_check_dma_handle(hba,
6889 	    hba->sli.sli4.slim2.dma_handle)
6890 	    != DDI_FM_OK) {
6891 		EMLXS_MSGF(EMLXS_CONTEXT,
6892 		    &emlxs_invalid_dma_handle_msg,
6893 		    "sli4_process_unsol_rcv: hdl=%p",
6894 		    hba->sli.sli4.slim2.dma_handle);
6895 
6896 		emlxs_thread_spawn(hba, emlxs_restart_thread,
6897 		    0, 0);
6898 	}
6899 #endif
6900 	return;
6901 
6902 } /* emlxs_sli4_process_unsol_rcv() */
6903 
6904 
6905 /*ARGSUSED*/
6906 static void
6907 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
6908     CQE_XRI_Abort_t *cqe)
6909 {
6910 	emlxs_port_t *port = &PPORT;
6911 	XRIobj_t *xrip;
6912 
6913 	mutex_enter(&EMLXS_FCTAB_LOCK);
6914 
6915 	xrip = emlxs_sli4_find_xri(port, cqe->XRI);
6916 	if (xrip == NULL) {
6917 		/* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg, */
6918 		/*    "CQ ENTRY: process xri aborted ignored");  */
6919 
6920 		mutex_exit(&EMLXS_FCTAB_LOCK);
6921 		return;
6922 	}
6923 
6924 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6925 	    "CQ ENTRY: XRI Aborted: xri=%d IA=%d EO=%d BR=%d",
6926 	    cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
6927 
6928 	if (!(xrip->flag & EMLXS_XRI_BUSY)) {
6929 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6930 		    "CQ ENTRY: XRI Aborted: xri=%d flag=%x. Bad state.",
6931 		    xrip->XRI, xrip->flag);
6932 
6933 		mutex_exit(&EMLXS_FCTAB_LOCK);
6934 		return;
6935 	}
6936 
6937 	/* Exchange is no longer busy on-chip, free it */
6938 	emlxs_sli4_free_xri(port, 0, xrip, 0);
6939 
6940 	mutex_exit(&EMLXS_FCTAB_LOCK);
6941 
6942 	return;
6943 
6944 } /* emlxs_sli4_process_xri_aborted () */
6945 
6946 
6947 /*ARGSUSED*/
6948 static void
6949 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
6950 {
6951 	emlxs_port_t *port = &PPORT;
6952 	CQE_u *cqe;
6953 	CQE_u cq_entry;
6954 	int num_entries = 0;
6955 	off_t offset;
6956 
6957 	/* EMLXS_PORT_LOCK must be held when entering this routine */
6958 
6959 	cqe = (CQE_u *)cq->addr.virt;
6960 	cqe += cq->host_index;
6961 
6962 	offset = (off_t)((uint64_t)((unsigned long)
6963 	    cq->addr.virt) -
6964 	    (uint64_t)((unsigned long)
6965 	    hba->sli.sli4.slim2.virt));
6966 
6967 	EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
6968 	    4096, DDI_DMA_SYNC_FORKERNEL);
6969 
6970 	for (;;) {
6971 		cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
6972 		if (!(cq_entry.word[3] & CQE_VALID)) {
6973 			break;
6974 		}
6975 
6976 		cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
6977 		cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
6978 		cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
6979 
6980 #ifdef	DEBUG_CQE
6981 		emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 6, 0);
6982 #endif /* DEBUG_CQE */
6983 		num_entries++;
6984 		cqe->word[3] = 0;
6985 
6986 		cq->host_index++;
6987 		if (cq->host_index >= cq->max_index) {
6988 			cq->host_index = 0;
6989 			cqe = (CQE_u *)cq->addr.virt;
6990 		} else {
6991 			cqe++;
6992 		}
6993 		mutex_exit(&EMLXS_PORT_LOCK);
6994 
6995 		/* Now handle specific cq type */
6996 		if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
6997 			if (cq_entry.cqAsyncEntry.async_evt) {
6998 				emlxs_sli4_process_async_event(hba,
6999 				    (CQE_ASYNC_t *)&cq_entry);
7000 			} else {
7001 				emlxs_sli4_process_mbox_event(hba,
7002 				    (CQE_MBOX_t *)&cq_entry);
7003 			}
7004 		} else { /* EMLXS_CQ_TYPE_GROUP2 */
7005 			switch (cq_entry.cqCmplEntry.Code) {
7006 			case CQE_TYPE_WQ_COMPLETION:
7007 				if (cq_entry.cqCmplEntry.RequestTag <
7008 				    hba->max_iotag) {
7009 					emlxs_sli4_process_wqe_cmpl(hba, cq,
7010 					    (CQE_CmplWQ_t *)&cq_entry);
7011 				} else {
7012 					emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
7013 					    (CQE_CmplWQ_t *)&cq_entry);
7014 				}
7015 				break;
7016 			case CQE_TYPE_RELEASE_WQE:
7017 				emlxs_sli4_process_release_wqe(hba, cq,
7018 				    (CQE_RelWQ_t *)&cq_entry);
7019 				break;
7020 			case CQE_TYPE_UNSOL_RCV:
7021 			case CQE_TYPE_UNSOL_RCV_V1:
7022 				emlxs_sli4_process_unsol_rcv(hba, cq,
7023 				    (CQE_UnsolRcv_t *)&cq_entry);
7024 				break;
7025 			case CQE_TYPE_XRI_ABORTED:
7026 				emlxs_sli4_process_xri_aborted(hba, cq,
7027 				    (CQE_XRI_Abort_t *)&cq_entry);
7028 				break;
7029 			default:
7030 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7031 				    "Invalid CQ entry %d: %08x %08x %08x %08x",
7032 				    cq_entry.cqCmplEntry.Code, cq_entry.word[0],
7033 				    cq_entry.word[1], cq_entry.word[2],
7034 				    cq_entry.word[3]);
7035 				break;
7036 			}
7037 		}
7038 
7039 		mutex_enter(&EMLXS_PORT_LOCK);
7040 	}
7041 
7042 	/* Number of times this routine gets called for this CQ */
7043 	cq->isr_count++;
7044 
7045 	/* num_entries is the number of CQEs we process in this specific CQ */
7046 	cq->num_proc += num_entries;
7047 	if (cq->max_proc < num_entries)
7048 		cq->max_proc = num_entries;
7049 
7050 	emlxs_sli4_write_cqdb(hba, cq->qid, num_entries, B_TRUE);
7051 
7052 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
7053 
7054 } /* emlxs_sli4_process_cq() */
7055 
7056 
7057 /*ARGSUSED*/
7058 static void
7059 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
7060 {
7061 	emlxs_port_t *port = &PPORT;
7062 	uint32_t *ptr;
7063 	CHANNEL *cp;
7064 	EQE_u eqe;
7065 	uint32_t i;
7066 	uint16_t cqi;
7067 	int num_entries = 0;
7068 	off_t offset;
7069 
7070 	/* EMLXS_PORT_LOCK must be held when entering this routine */
7071 
7072 	hba->intr_busy_cnt ++;
7073 
7074 	ptr = eq->addr.virt;
7075 	ptr += eq->host_index;
7076 
7077 	offset = (off_t)((uint64_t)((unsigned long)
7078 	    eq->addr.virt) -
7079 	    (uint64_t)((unsigned long)
7080 	    hba->sli.sli4.slim2.virt));
7081 
7082 	EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
7083 	    4096, DDI_DMA_SYNC_FORKERNEL);
7084 
7085 	for (;;) {
7086 		eqe.word = *ptr;
7087 		eqe.word = BE_SWAP32(eqe.word);
7088 
7089 		if (!(eqe.word & EQE_VALID)) {
7090 			break;
7091 		}
7092 
7093 #ifdef DEBUG_FASTPATH
7094 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7095 		    "EQE00: %08x", eqe.word);
7096 #endif /* DEBUG_FASTPATH */
7097 
7098 		*ptr = 0;
7099 		num_entries++;
7100 		eq->host_index++;
7101 		if (eq->host_index >= eq->max_index) {
7102 			eq->host_index = 0;
7103 			ptr = eq->addr.virt;
7104 		} else {
7105 			ptr++;
7106 		}
7107 
7108 		cqi = emlxs_sli4_cqid_to_index(hba, eqe.entry.CQId);
7109 
7110 		/* Verify CQ index */
7111 		if (cqi == 0xffff) {
7112 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7113 			    "EQE: Invalid CQid: %d. Dropping...",
7114 			    eqe.entry.CQId);
7115 			continue;
7116 		}
7117 
7118 #ifdef DEBUG_FASTPATH
7119 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7120 		    "EQE: CQIndex:%x cqid:%x", cqi, eqe.entry.CQId);
7121 #endif /* DEBUG_FASTPATH */
7122 
7123 		emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[cqi]);
7124 	}
7125 
7126 	/* Number of times the ISR for this EQ gets called */
7127 	eq->isr_count++;
7128 
7129 	/* num_entries is the number of EQEs we process in this specific ISR */
7130 	eq->num_proc += num_entries;
7131 	if (eq->max_proc < num_entries) {
7132 		eq->max_proc = num_entries;
7133 	}
7134 
7135 	if (num_entries != 0) {
7136 		for (i = 0; i < hba->chan_count; i++) {
7137 			cp = &hba->chan[i];
7138 			if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
7139 				cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
7140 				emlxs_thread_trigger2(&cp->intr_thread,
7141 				    emlxs_proc_channel, cp);
7142 			}
7143 		}
7144 	}
7145 
7146 	emlxs_sli4_write_eqdb(hba, eq->qid, num_entries, B_TRUE);
7147 
7148 	/* EMLXS_PORT_LOCK must be held when exiting this routine */
7149 
7150 	hba->intr_busy_cnt --;
7151 
7152 } /* emlxs_sli4_process_eq() */
7153 
7154 
7155 #ifdef MSI_SUPPORT
7156 /*ARGSUSED*/
7157 static uint32_t
7158 emlxs_sli4_msi_intr(char *arg1, char *arg2)
7159 {
7160 	emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
7161 #ifdef DEBUG_FASTPATH
7162 	emlxs_port_t *port = &PPORT;
7163 #endif /* DEBUG_FASTPATH */
7164 	uint16_t msgid;
7165 	int rc;
7166 
7167 #ifdef DEBUG_FASTPATH
7168 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7169 	    "msiINTR arg1:%p arg2:%p", arg1, arg2);
7170 #endif /* DEBUG_FASTPATH */
7171 
7172 	/* Check for legacy interrupt handling */
7173 	if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
7174 		rc = emlxs_sli4_intx_intr(arg1);
7175 		return (rc);
7176 	}
7177 
7178 	/* Get MSI message id */
7179 	msgid = (uint16_t)((unsigned long)arg2);
7180 
7181 	/* Validate the message id */
7182 	if (msgid >= hba->intr_count) {
7183 		msgid = 0;
7184 	}
7185 	mutex_enter(&EMLXS_PORT_LOCK);
7186 
7187 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7188 		mutex_exit(&EMLXS_PORT_LOCK);
7189 		return (DDI_INTR_UNCLAIMED);
7190 	}
7191 
7192 	/* The eq[] index == the MSI vector number */
7193 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
7194 
7195 	mutex_exit(&EMLXS_PORT_LOCK);
7196 	return (DDI_INTR_CLAIMED);
7197 
7198 } /* emlxs_sli4_msi_intr() */
7199 #endif /* MSI_SUPPORT */
7200 
7201 
7202 /*ARGSUSED*/
7203 static int
7204 emlxs_sli4_intx_intr(char *arg)
7205 {
7206 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
7207 #ifdef DEBUG_FASTPATH
7208 	emlxs_port_t *port = &PPORT;
7209 #endif /* DEBUG_FASTPATH */
7210 
7211 #ifdef DEBUG_FASTPATH
7212 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7213 	    "intxINTR arg:%p", arg);
7214 #endif /* DEBUG_FASTPATH */
7215 
7216 	mutex_enter(&EMLXS_PORT_LOCK);
7217 
7218 	if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7219 		mutex_exit(&EMLXS_PORT_LOCK);
7220 		return (DDI_INTR_UNCLAIMED);
7221 	}
7222 
7223 	emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
7224 
7225 	mutex_exit(&EMLXS_PORT_LOCK);
7226 	return (DDI_INTR_CLAIMED);
7227 } /* emlxs_sli4_intx_intr() */
7228 
7229 
7230 static void
7231 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
7232 {
7233 	emlxs_port_t *port = &PPORT;
7234 	uint32_t j;
7235 
7236 	mutex_enter(&EMLXS_PORT_LOCK);
7237 	if (hba->flag & FC_INTERLOCKED) {
7238 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7239 
7240 		mutex_exit(&EMLXS_PORT_LOCK);
7241 
7242 		return;
7243 	}
7244 
7245 	j = 0;
7246 	while (j++ < 10000) {
7247 		if ((hba->mbox_queue_flag == 0) &&
7248 		    (hba->intr_busy_cnt == 0)) {
7249 			break;
7250 		}
7251 
7252 		mutex_exit(&EMLXS_PORT_LOCK);
7253 		BUSYWAIT_US(100);
7254 		mutex_enter(&EMLXS_PORT_LOCK);
7255 	}
7256 
7257 	if ((hba->mbox_queue_flag != 0) || (hba->intr_busy_cnt > 0)) {
7258 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7259 		    "Board kill failed. Adapter busy, %d, %d.",
7260 		    hba->mbox_queue_flag, hba->intr_busy_cnt);
7261 		mutex_exit(&EMLXS_PORT_LOCK);
7262 		return;
7263 	}
7264 
7265 	hba->flag |= FC_INTERLOCKED;
7266 
7267 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7268 
7269 	mutex_exit(&EMLXS_PORT_LOCK);
7270 
7271 } /* emlxs_sli4_hba_kill() */
7272 
7273 
7274 extern void
7275 emlxs_sli4_hba_reset_all(emlxs_hba_t *hba, uint32_t flag)
7276 {
7277 	emlxs_port_t *port = &PPORT;
7278 	uint32_t value;
7279 
7280 	mutex_enter(&EMLXS_PORT_LOCK);
7281 
7282 	if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_2) {
7283 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7284 		    "Reset All failed. Invalid Operation.");
7285 		mutex_exit(&EMLXS_PORT_LOCK);
7286 		return;
7287 	}
7288 
7289 	/* Issue a Firmware Reset All Request */
7290 	if (flag) {
7291 		value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL | SLI_PHYDEV_DD;
7292 	} else {
7293 		value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL;
7294 	}
7295 
7296 	ddi_put32(hba->sli.sli4.bar0_acc_handle,
7297 	    hba->sli.sli4.PHYSDEV_reg_addr, value);
7298 
7299 	mutex_exit(&EMLXS_PORT_LOCK);
7300 
7301 } /* emlxs_sli4_hba_reset_all() */
7302 
7303 
7304 static void
7305 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
7306 {
7307 	emlxs_config_t *cfg = &CFG;
7308 	int i;
7309 	int num_cq;
7310 
7311 	hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
7312 
7313 	num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
7314 	    EMLXS_CQ_OFFSET_WQ;
7315 
7316 	/* ARM EQ / CQs */
7317 	for (i = 0; i < num_cq; i++) {
7318 		emlxs_sli4_write_cqdb(hba, hba->sli.sli4.cq[i].qid, 0, B_TRUE);
7319 	}
7320 
7321 	for (i = 0; i < hba->intr_count; i++) {
7322 		emlxs_sli4_write_eqdb(hba, hba->sli.sli4.eq[i].qid, 0, B_TRUE);
7323 	}
7324 } /* emlxs_sli4_enable_intr() */
7325 
7326 
7327 static void
7328 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
7329 {
7330 	if (att) {
7331 		return;
7332 	}
7333 
7334 	hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
7335 
7336 	/* Short of reset, we cannot disable interrupts */
7337 } /* emlxs_sli4_disable_intr() */
7338 
7339 static void
7340 emlxs_sli4_resource_free(emlxs_hba_t *hba)
7341 {
7342 	emlxs_port_t	*port = &PPORT;
7343 	MBUF_INFO	*buf_info;
7344 	uint32_t	i;
7345 
7346 	buf_info = &hba->sli.sli4.slim2;
7347 	if (buf_info->virt == 0) {
7348 		/* Already free */
7349 		return;
7350 	}
7351 
7352 	emlxs_fcf_fini(hba);
7353 
7354 	buf_info = &hba->sli.sli4.HeaderTmplate;
7355 	if (buf_info->virt) {
7356 		bzero(buf_info, sizeof (MBUF_INFO));
7357 	}
7358 
7359 	if (hba->sli.sli4.XRIp) {
7360 		XRIobj_t	*xrip;
7361 
7362 		if ((hba->sli.sli4.XRIinuse_f !=
7363 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
7364 		    (hba->sli.sli4.XRIinuse_b !=
7365 		    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
7366 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7367 			    "XRIs in use during free!: %p %p != %p\n",
7368 			    hba->sli.sli4.XRIinuse_f,
7369 			    hba->sli.sli4.XRIinuse_b,
7370 			    &hba->sli.sli4.XRIinuse_f);
7371 		}
7372 
7373 		xrip = hba->sli.sli4.XRIp;
7374 		for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7375 			xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7376 
7377 			if (xrip->XRI != 0)
7378 				emlxs_mem_put(hba, xrip->SGSeg, xrip->SGList);
7379 
7380 			xrip++;
7381 		}
7382 
7383 		kmem_free(hba->sli.sli4.XRIp,
7384 		    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
7385 		hba->sli.sli4.XRIp = NULL;
7386 
7387 		hba->sli.sli4.XRIfree_f =
7388 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7389 		hba->sli.sli4.XRIfree_b =
7390 		    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7391 		hba->sli.sli4.xrif_count = 0;
7392 	}
7393 
7394 	for (i = 0; i < hba->intr_count; i++) {
7395 		mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
7396 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7397 		hba->sli.sli4.eq[i].qid = 0xffff;
7398 	}
7399 	for (i = 0; i < EMLXS_MAX_CQS; i++) {
7400 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7401 		hba->sli.sli4.cq[i].qid = 0xffff;
7402 	}
7403 	for (i = 0; i < EMLXS_MAX_WQS; i++) {
7404 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7405 		hba->sli.sli4.wq[i].qid = 0xffff;
7406 	}
7407 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7408 		mutex_destroy(&hba->sli.sli4.rxq[i].lock);
7409 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7410 	}
7411 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7412 		mutex_destroy(&hba->sli.sli4.rq[i].lock);
7413 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7414 		hba->sli.sli4.rq[i].qid = 0xffff;
7415 	}
7416 
7417 	/* Free the MQ */
7418 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7419 
7420 	buf_info = &hba->sli.sli4.slim2;
7421 	if (buf_info->virt) {
7422 		buf_info->flags = FC_MBUF_DMA;
7423 		emlxs_mem_free(hba, buf_info);
7424 		bzero(buf_info, sizeof (MBUF_INFO));
7425 	}
7426 
7427 	/* GPIO lock */
7428 	if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7429 		mutex_destroy(&hba->gpio_lock);
7430 
7431 } /* emlxs_sli4_resource_free() */
7432 
7433 static int
7434 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
7435 {
7436 	emlxs_port_t	*port = &PPORT;
7437 	emlxs_config_t	*cfg = &CFG;
7438 	MBUF_INFO	*buf_info;
7439 	int		num_eq;
7440 	int		num_wq;
7441 	uint16_t	i;
7442 	uint32_t	j;
7443 	uint32_t	k;
7444 	uint16_t	cq_depth;
7445 	uint32_t	cq_size;
7446 	uint32_t	word;
7447 	XRIobj_t	*xrip;
7448 	RQE_t		*rqe;
7449 	MBUF_INFO	*rqb;
7450 	uint64_t	phys;
7451 	uint64_t	tmp_phys;
7452 	char		*virt;
7453 	char		*tmp_virt;
7454 	void		*data_handle;
7455 	void		*dma_handle;
7456 	int32_t		size;
7457 	off_t		offset;
7458 	uint32_t	count = 0;
7459 	uint32_t	hddr_size = 0;
7460 	uint32_t	align;
7461 	uint32_t	iotag;
7462 	uint32_t	mseg;
7463 
7464 	buf_info = &hba->sli.sli4.slim2;
7465 	if (buf_info->virt) {
7466 		/* Already allocated */
7467 		return (0);
7468 	}
7469 
7470 	emlxs_fcf_init(hba);
7471 
7472 	switch (hba->sli.sli4.param.CQV) {
7473 	case 0:
7474 		cq_depth = CQ_DEPTH;
7475 		break;
7476 	case 2:
7477 	default:
7478 		cq_depth = CQ_DEPTH_V2;
7479 		break;
7480 	}
7481 	cq_size = (cq_depth * CQE_SIZE);
7482 
7483 	/* EQs - 1 per Interrupt vector */
7484 	num_eq = hba->intr_count;
7485 
7486 	/* CQs  - number of WQs + 1 for RQs + 1 for mbox/async events */
7487 	num_wq = cfg[CFG_NUM_WQ].current * num_eq;
7488 
7489 	/* Calculate total dmable memory we need */
7490 	/* WARNING: make sure each section is aligned on 4K boundary */
7491 
7492 	/* EQ */
7493 	count += num_eq * 4096;
7494 
7495 	/* CQ */
7496 	count += (num_wq + EMLXS_CQ_OFFSET_WQ) * cq_size;
7497 
7498 	/* WQ */
7499 	count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
7500 
7501 	/* MQ */
7502 	count +=  EMLXS_MAX_MQS * 4096;
7503 
7504 	/* RQ */
7505 	count +=  EMLXS_MAX_RQS * 4096;
7506 
7507 	/* RQB/E */
7508 	count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
7509 	count += (4096 - (count%4096)); /* Ensure 4K alignment */
7510 
7511 	/* RPI Header Templates */
7512 	if (hba->sli.sli4.param.HDRR) {
7513 		/* Bytes per extent */
7514 		j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
7515 
7516 		/* Pages required per extent (page == 4096 bytes) */
7517 		k = (j/4096) + ((j%4096)? 1:0);
7518 
7519 		/* Total size */
7520 		hddr_size = (k * hba->sli.sli4.RPIExtCount * 4096);
7521 
7522 		count += hddr_size;
7523 	}
7524 
7525 	/* Allocate slim2 for SLI4 */
7526 	buf_info = &hba->sli.sli4.slim2;
7527 	buf_info->size = count;
7528 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7529 	buf_info->align = ddi_ptob(hba->dip, 1L);
7530 
7531 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7532 	    "Allocating memory for slim2: %d", count);
7533 
7534 	(void) emlxs_mem_alloc(hba, buf_info);
7535 
7536 	if (buf_info->virt == NULL) {
7537 		EMLXS_MSGF(EMLXS_CONTEXT,
7538 		    &emlxs_init_failed_msg,
7539 		    "Unable to allocate internal memory for SLI4: %d",
7540 		    count);
7541 		goto failed;
7542 	}
7543 	bzero(buf_info->virt, buf_info->size);
7544 	EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
7545 	    buf_info->size, DDI_DMA_SYNC_FORDEV);
7546 
7547 	/* Assign memory to Head Template, EQ, CQ, WQ, RQ and MQ */
7548 	data_handle = buf_info->data_handle;
7549 	dma_handle = buf_info->dma_handle;
7550 	phys = buf_info->phys;
7551 	virt = (char *)buf_info->virt;
7552 
7553 	/* Allocate space for queues */
7554 
7555 	/* EQ */
7556 	size = 4096;
7557 	for (i = 0; i < num_eq; i++) {
7558 		bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7559 
7560 		buf_info = &hba->sli.sli4.eq[i].addr;
7561 		buf_info->size = size;
7562 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7563 		buf_info->align = ddi_ptob(hba->dip, 1L);
7564 		buf_info->phys = phys;
7565 		buf_info->virt = (void *)virt;
7566 		buf_info->data_handle = data_handle;
7567 		buf_info->dma_handle = dma_handle;
7568 
7569 		phys += size;
7570 		virt += size;
7571 
7572 		hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
7573 		hba->sli.sli4.eq[i].qid = 0xffff;
7574 
7575 		mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, NULL,
7576 		    MUTEX_DRIVER, NULL);
7577 	}
7578 
7579 
7580 	/* CQ */
7581 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7582 		bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7583 
7584 		buf_info = &hba->sli.sli4.cq[i].addr;
7585 		buf_info->size = cq_size;
7586 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7587 		buf_info->align = ddi_ptob(hba->dip, 1L);
7588 		buf_info->phys = phys;
7589 		buf_info->virt = (void *)virt;
7590 		buf_info->data_handle = data_handle;
7591 		buf_info->dma_handle = dma_handle;
7592 
7593 		phys += cq_size;
7594 		virt += cq_size;
7595 
7596 		hba->sli.sli4.cq[i].max_index = cq_depth;
7597 		hba->sli.sli4.cq[i].qid = 0xffff;
7598 	}
7599 
7600 
7601 	/* WQ */
7602 	size = 4096 * EMLXS_NUM_WQ_PAGES;
7603 	for (i = 0; i < num_wq; i++) {
7604 		bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7605 
7606 		buf_info = &hba->sli.sli4.wq[i].addr;
7607 		buf_info->size = size;
7608 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7609 		buf_info->align = ddi_ptob(hba->dip, 1L);
7610 		buf_info->phys = phys;
7611 		buf_info->virt = (void *)virt;
7612 		buf_info->data_handle = data_handle;
7613 		buf_info->dma_handle = dma_handle;
7614 
7615 		phys += size;
7616 		virt += size;
7617 
7618 		hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
7619 		hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
7620 		hba->sli.sli4.wq[i].qid = 0xFFFF;
7621 	}
7622 
7623 
7624 	/* MQ */
7625 	size = 4096;
7626 	bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7627 
7628 	buf_info = &hba->sli.sli4.mq.addr;
7629 	buf_info->size = size;
7630 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7631 	buf_info->align = ddi_ptob(hba->dip, 1L);
7632 	buf_info->phys = phys;
7633 	buf_info->virt = (void *)virt;
7634 	buf_info->data_handle = data_handle;
7635 	buf_info->dma_handle = dma_handle;
7636 
7637 	phys += size;
7638 	virt += size;
7639 
7640 	hba->sli.sli4.mq.max_index = MQ_DEPTH;
7641 
7642 
7643 	/* RXQ */
7644 	for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7645 		bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7646 
7647 		mutex_init(&hba->sli.sli4.rxq[i].lock, NULL, MUTEX_DRIVER,
7648 		    NULL);
7649 	}
7650 
7651 
7652 	/* RQ */
7653 	size = 4096;
7654 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7655 		bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7656 
7657 		buf_info = &hba->sli.sli4.rq[i].addr;
7658 		buf_info->size = size;
7659 		buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7660 		buf_info->align = ddi_ptob(hba->dip, 1L);
7661 		buf_info->phys = phys;
7662 		buf_info->virt = (void *)virt;
7663 		buf_info->data_handle = data_handle;
7664 		buf_info->dma_handle = dma_handle;
7665 
7666 		phys += size;
7667 		virt += size;
7668 
7669 		hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
7670 		hba->sli.sli4.rq[i].qid = 0xFFFF;
7671 
7672 		mutex_init(&hba->sli.sli4.rq[i].lock, NULL, MUTEX_DRIVER, NULL);
7673 	}
7674 
7675 
7676 	/* RQB/E */
7677 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7678 		size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
7679 		tmp_phys = phys;
7680 		tmp_virt = virt;
7681 
7682 		/* Initialize the RQEs */
7683 		rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
7684 		for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
7685 			phys = tmp_phys;
7686 			virt = tmp_virt;
7687 			for (k = 0; k < RQB_COUNT; k++) {
7688 				word = PADDR_HI(phys);
7689 				rqe->AddrHi = BE_SWAP32(word);
7690 
7691 				word = PADDR_LO(phys);
7692 				rqe->AddrLo = BE_SWAP32(word);
7693 
7694 				rqb = &hba->sli.sli4.rq[i].
7695 				    rqb[k + (j * RQB_COUNT)];
7696 				rqb->size = size;
7697 				rqb->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7698 				rqb->align = ddi_ptob(hba->dip, 1L);
7699 				rqb->phys = phys;
7700 				rqb->virt = (void *)virt;
7701 				rqb->data_handle = data_handle;
7702 				rqb->dma_handle = dma_handle;
7703 
7704 				phys += size;
7705 				virt += size;
7706 #ifdef DEBUG_RQE
7707 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7708 				    "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p iotag=%d",
7709 				    i, j, k, mp, mp->tag);
7710 #endif /* DEBUG_RQE */
7711 
7712 				rqe++;
7713 			}
7714 		}
7715 
7716 		offset = (off_t)((uint64_t)((unsigned long)
7717 		    hba->sli.sli4.rq[i].addr.virt) -
7718 		    (uint64_t)((unsigned long)
7719 		    hba->sli.sli4.slim2.virt));
7720 
7721 		/* Sync the RQ buffer list */
7722 		EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
7723 		    hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
7724 	}
7725 
7726 	/* 4K Alignment */
7727 	align = (4096 - (phys%4096));
7728 	phys += align;
7729 	virt += align;
7730 
7731 	/* RPI Header Templates */
7732 	if (hba->sli.sli4.param.HDRR) {
7733 		buf_info = &hba->sli.sli4.HeaderTmplate;
7734 		bzero(buf_info, sizeof (MBUF_INFO));
7735 		buf_info->size = hddr_size;
7736 		buf_info->flags = FC_MBUF_DMA;
7737 		buf_info->align = ddi_ptob(hba->dip, 1L);
7738 		buf_info->phys = phys;
7739 		buf_info->virt = (void *)virt;
7740 		buf_info->data_handle = data_handle;
7741 		buf_info->dma_handle = dma_handle;
7742 	}
7743 
7744 	/* SGL */
7745 
7746 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7747 	    "Allocating memory for %d SGLs: %d/%d",
7748 	    hba->sli.sli4.XRICount, sizeof (XRIobj_t), size);
7749 
7750 	/* Initialize double linked lists */
7751 	hba->sli.sli4.XRIinuse_f =
7752 	    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7753 	hba->sli.sli4.XRIinuse_b =
7754 	    (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7755 	hba->sli.sli4.xria_count = 0;
7756 
7757 	hba->sli.sli4.XRIfree_f =
7758 	    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7759 	hba->sli.sli4.XRIfree_b =
7760 	    (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7761 	hba->sli.sli4.xrif_count = 0;
7762 
7763 	switch (hba->sli.sli4.mem_sgl_size) {
7764 	case 1024:
7765 		mseg = MEM_SGL1K;
7766 		break;
7767 	case 2048:
7768 		mseg = MEM_SGL2K;
7769 		break;
7770 	case 4096:
7771 		mseg = MEM_SGL4K;
7772 		break;
7773 	default:
7774 		EMLXS_MSGF(EMLXS_CONTEXT,
7775 		    &emlxs_init_failed_msg,
7776 		    "Unsupported SGL Size: %d", hba->sli.sli4.mem_sgl_size);
7777 		goto failed;
7778 	}
7779 
7780 	hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
7781 	    (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
7782 
7783 	xrip = hba->sli.sli4.XRIp;
7784 	iotag = 1;
7785 
7786 	for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7787 		xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7788 
7789 		/* We don't use XRI==0, since it also represents an */
7790 		/* uninitialized exchange */
7791 		if (xrip->XRI == 0) {
7792 			xrip++;
7793 			continue;
7794 		}
7795 
7796 		xrip->iotag = iotag++;
7797 		xrip->sge_count =
7798 		    (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
7799 
7800 		/* Add xrip to end of free list */
7801 		xrip->_b = hba->sli.sli4.XRIfree_b;
7802 		hba->sli.sli4.XRIfree_b->_f = xrip;
7803 		xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7804 		hba->sli.sli4.XRIfree_b = xrip;
7805 		hba->sli.sli4.xrif_count++;
7806 
7807 		/* Allocate SGL for this xrip */
7808 		xrip->SGSeg = mseg;
7809 		xrip->SGList = emlxs_mem_get(hba, xrip->SGSeg);
7810 
7811 		if (xrip->SGList == NULL) {
7812 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
7813 			    "Unable to allocate memory for SGL %d", i);
7814 			goto failed;
7815 		}
7816 
7817 		EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
7818 		    xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
7819 
7820 		xrip++;
7821 	}
7822 
7823 	/* GPIO lock */
7824 	if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7825 		mutex_init(&hba->gpio_lock, NULL, MUTEX_DRIVER, NULL);
7826 
7827 #ifdef FMA_SUPPORT
7828 	if (hba->sli.sli4.slim2.dma_handle) {
7829 		if (emlxs_fm_check_dma_handle(hba,
7830 		    hba->sli.sli4.slim2.dma_handle)
7831 		    != DDI_FM_OK) {
7832 			EMLXS_MSGF(EMLXS_CONTEXT,
7833 			    &emlxs_invalid_dma_handle_msg,
7834 			    "sli4_resource_alloc: hdl=%p",
7835 			    hba->sli.sli4.slim2.dma_handle);
7836 			goto failed;
7837 		}
7838 	}
7839 #endif /* FMA_SUPPORT */
7840 
7841 	return (0);
7842 
7843 failed:
7844 
7845 	(void) emlxs_sli4_resource_free(hba);
7846 	return (ENOMEM);
7847 
7848 } /* emlxs_sli4_resource_alloc */
7849 
7850 
7851 extern void
7852 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba)
7853 {
7854 	uint32_t i;
7855 	uint32_t num_wq;
7856 	emlxs_config_t	*cfg = &CFG;
7857 	clock_t		time;
7858 
7859 	/* EQ */
7860 	for (i = 0; i < hba->intr_count; i++) {
7861 		hba->sli.sli4.eq[i].num_proc = 0;
7862 		hba->sli.sli4.eq[i].max_proc = 0;
7863 		hba->sli.sli4.eq[i].isr_count = 0;
7864 	}
7865 	num_wq = cfg[CFG_NUM_WQ].current * hba->intr_count;
7866 	/* CQ */
7867 	for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7868 		hba->sli.sli4.cq[i].num_proc = 0;
7869 		hba->sli.sli4.cq[i].max_proc = 0;
7870 		hba->sli.sli4.cq[i].isr_count = 0;
7871 	}
7872 	/* WQ */
7873 	for (i = 0; i < num_wq; i++) {
7874 		hba->sli.sli4.wq[i].num_proc = 0;
7875 		hba->sli.sli4.wq[i].num_busy = 0;
7876 	}
7877 	/* RQ */
7878 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
7879 		hba->sli.sli4.rq[i].num_proc = 0;
7880 	}
7881 	(void) drv_getparm(LBOLT, &time);
7882 	hba->sli.sli4.que_stat_timer = (uint32_t)time;
7883 
7884 } /* emlxs_sli4_zero_queue_stat */
7885 
7886 
7887 extern XRIobj_t *
7888 emlxs_sli4_reserve_xri(emlxs_port_t *port,  RPIobj_t *rpip, uint32_t type,
7889     uint16_t rx_id)
7890 {
7891 	emlxs_hba_t *hba = HBA;
7892 	XRIobj_t	*xrip;
7893 	uint16_t	iotag;
7894 
7895 	mutex_enter(&EMLXS_FCTAB_LOCK);
7896 
7897 	xrip = hba->sli.sli4.XRIfree_f;
7898 
7899 	if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
7900 		mutex_exit(&EMLXS_FCTAB_LOCK);
7901 
7902 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7903 		    "Unable to reserve XRI. type=%d",
7904 		    type);
7905 
7906 		return (NULL);
7907 	}
7908 
7909 	iotag = xrip->iotag;
7910 
7911 	if ((!iotag) ||
7912 	    ((hba->fc_table[iotag] != NULL) &&
7913 	    (hba->fc_table[iotag] != STALE_PACKET))) {
7914 		/*
7915 		 * No more command slots available, retry later
7916 		 */
7917 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7918 		    "Adapter Busy. Unable to reserve iotag. type=%d",
7919 		    type);
7920 
7921 		mutex_exit(&EMLXS_FCTAB_LOCK);
7922 		return (NULL);
7923 	}
7924 
7925 	xrip->state = XRI_STATE_ALLOCATED;
7926 	xrip->type = type;
7927 	xrip->flag = EMLXS_XRI_RESERVED;
7928 	xrip->sbp = NULL;
7929 
7930 	xrip->rpip = rpip;
7931 	xrip->rx_id = rx_id;
7932 	rpip->xri_count++;
7933 
7934 	/* Take it off free list */
7935 	(xrip->_b)->_f = xrip->_f;
7936 	(xrip->_f)->_b = xrip->_b;
7937 	xrip->_f = NULL;
7938 	xrip->_b = NULL;
7939 	hba->sli.sli4.xrif_count--;
7940 
7941 	/* Add it to end of inuse list */
7942 	xrip->_b = hba->sli.sli4.XRIinuse_b;
7943 	hba->sli.sli4.XRIinuse_b->_f = xrip;
7944 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7945 	hba->sli.sli4.XRIinuse_b = xrip;
7946 	hba->sli.sli4.xria_count++;
7947 
7948 	mutex_exit(&EMLXS_FCTAB_LOCK);
7949 	return (xrip);
7950 
7951 } /* emlxs_sli4_reserve_xri() */
7952 
7953 
7954 extern uint32_t
7955 emlxs_sli4_unreserve_xri(emlxs_port_t *port, uint16_t xri, uint32_t lock)
7956 {
7957 	emlxs_hba_t *hba = HBA;
7958 	XRIobj_t *xrip;
7959 
7960 	if (lock) {
7961 		mutex_enter(&EMLXS_FCTAB_LOCK);
7962 	}
7963 
7964 	xrip = emlxs_sli4_find_xri(port, xri);
7965 
7966 	if (!xrip || xrip->state == XRI_STATE_FREE) {
7967 		if (lock) {
7968 			mutex_exit(&EMLXS_FCTAB_LOCK);
7969 		}
7970 
7971 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7972 		    "sli4_unreserve_xri:%d already freed.", xri);
7973 		return (0);
7974 	}
7975 
7976 	/* Flush this unsolicited ct command */
7977 	if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
7978 		(void) emlxs_flush_ct_event(port, xrip->rx_id);
7979 	}
7980 
7981 	if (!(xrip->flag & EMLXS_XRI_RESERVED)) {
7982 		if (lock) {
7983 			mutex_exit(&EMLXS_FCTAB_LOCK);
7984 		}
7985 
7986 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7987 		    "sli4_unreserve_xri:%d in use. type=%d",
7988 		    xrip->XRI, xrip->type);
7989 		return (1);
7990 	}
7991 
7992 	if (xrip->iotag &&
7993 	    (hba->fc_table[xrip->iotag] != NULL) &&
7994 	    (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
7995 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7996 		    "sli4_unreserve_xri:%d  sbp dropped:%p type=%d",
7997 		    xrip->XRI, hba->fc_table[xrip->iotag], xrip->type);
7998 
7999 		hba->fc_table[xrip->iotag] = NULL;
8000 		hba->io_count--;
8001 	}
8002 
8003 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8004 	    "sli4_unreserve_xri:%d unreserved. type=%d",
8005 	    xrip->XRI, xrip->type);
8006 
8007 	xrip->state = XRI_STATE_FREE;
8008 	xrip->type = 0;
8009 
8010 	if (xrip->rpip) {
8011 		xrip->rpip->xri_count--;
8012 		xrip->rpip = NULL;
8013 	}
8014 
8015 	if (xrip->reserved_rpip) {
8016 		xrip->reserved_rpip->xri_count--;
8017 		xrip->reserved_rpip = NULL;
8018 	}
8019 
8020 	/* Take it off inuse list */
8021 	(xrip->_b)->_f = xrip->_f;
8022 	(xrip->_f)->_b = xrip->_b;
8023 	xrip->_f = NULL;
8024 	xrip->_b = NULL;
8025 	hba->sli.sli4.xria_count--;
8026 
8027 	/* Add it to end of free list */
8028 	xrip->_b = hba->sli.sli4.XRIfree_b;
8029 	hba->sli.sli4.XRIfree_b->_f = xrip;
8030 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8031 	hba->sli.sli4.XRIfree_b = xrip;
8032 	hba->sli.sli4.xrif_count++;
8033 
8034 	if (lock) {
8035 		mutex_exit(&EMLXS_FCTAB_LOCK);
8036 	}
8037 
8038 	return (0);
8039 
8040 } /* emlxs_sli4_unreserve_xri() */
8041 
8042 
8043 XRIobj_t *
8044 emlxs_sli4_register_xri(emlxs_port_t *port, emlxs_buf_t *sbp, uint16_t xri,
8045     uint32_t did)
8046 {
8047 	emlxs_hba_t *hba = HBA;
8048 	uint16_t	iotag;
8049 	XRIobj_t	*xrip;
8050 	emlxs_node_t	*node;
8051 	RPIobj_t	*rpip;
8052 
8053 	mutex_enter(&EMLXS_FCTAB_LOCK);
8054 
8055 	xrip = sbp->xrip;
8056 	if (!xrip) {
8057 		xrip = emlxs_sli4_find_xri(port, xri);
8058 
8059 		if (!xrip) {
8060 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8061 			    "sli4_register_xri:%d XRI not found.", xri);
8062 
8063 			mutex_exit(&EMLXS_FCTAB_LOCK);
8064 			return (NULL);
8065 		}
8066 	}
8067 
8068 	if ((xrip->state == XRI_STATE_FREE) ||
8069 	    !(xrip->flag & EMLXS_XRI_RESERVED)) {
8070 
8071 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8072 		    "sli4_register_xri:%d Invalid XRI. xrip=%p "
8073 		    "state=%x flag=%x",
8074 		    xrip->XRI, xrip, xrip->state, xrip->flag);
8075 
8076 		mutex_exit(&EMLXS_FCTAB_LOCK);
8077 		return (NULL);
8078 	}
8079 
8080 	iotag = xrip->iotag;
8081 
8082 	if ((!iotag) ||
8083 	    ((hba->fc_table[iotag] != NULL) &&
8084 	    (hba->fc_table[iotag] != STALE_PACKET))) {
8085 
8086 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8087 		    "sli4_register_xri:%d Invalid fc_table entry. "
8088 		    "iotag=%d entry=%p",
8089 		    xrip->XRI, iotag, hba->fc_table[iotag]);
8090 
8091 		mutex_exit(&EMLXS_FCTAB_LOCK);
8092 		return (NULL);
8093 	}
8094 
8095 	hba->fc_table[iotag] = sbp;
8096 	hba->io_count++;
8097 
8098 	sbp->iotag = iotag;
8099 	sbp->xrip = xrip;
8100 
8101 	xrip->flag &= ~EMLXS_XRI_RESERVED;
8102 	xrip->sbp = sbp;
8103 
8104 	/* If we did not have a registered RPI when we reserved */
8105 	/* this exchange, check again now. */
8106 	if (xrip->rpip && (xrip->rpip->RPI == FABRIC_RPI)) {
8107 		node = emlxs_node_find_did(port, did, 1);
8108 		rpip = EMLXS_NODE_TO_RPI(port, node);
8109 
8110 		if (rpip && (rpip->RPI != FABRIC_RPI)) {
8111 			/* Move the XRI to the new RPI */
8112 			xrip->rpip->xri_count--;
8113 			xrip->rpip = rpip;
8114 			rpip->xri_count++;
8115 		}
8116 	}
8117 
8118 	mutex_exit(&EMLXS_FCTAB_LOCK);
8119 
8120 	return (xrip);
8121 
8122 } /* emlxs_sli4_register_xri() */
8123 
8124 
8125 /* Performs both reserve and register functions for XRI */
8126 static XRIobj_t *
8127 emlxs_sli4_alloc_xri(emlxs_port_t *port, emlxs_buf_t *sbp, RPIobj_t *rpip,
8128     uint32_t type)
8129 {
8130 	emlxs_hba_t *hba = HBA;
8131 	XRIobj_t	*xrip;
8132 	uint16_t	iotag;
8133 
8134 	mutex_enter(&EMLXS_FCTAB_LOCK);
8135 
8136 	xrip = hba->sli.sli4.XRIfree_f;
8137 
8138 	if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
8139 		mutex_exit(&EMLXS_FCTAB_LOCK);
8140 
8141 		return (NULL);
8142 	}
8143 
8144 	/* Get the iotag by registering the packet */
8145 	iotag = xrip->iotag;
8146 
8147 	if ((!iotag) ||
8148 	    ((hba->fc_table[iotag] != NULL) &&
8149 	    (hba->fc_table[iotag] != STALE_PACKET))) {
8150 		/*
8151 		 * No more command slots available, retry later
8152 		 */
8153 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8154 		    "Adapter Busy. Unable to alloc iotag:(0x%x)(%p) type=%d",
8155 		    iotag, hba->fc_table[iotag], type);
8156 
8157 		mutex_exit(&EMLXS_FCTAB_LOCK);
8158 		return (NULL);
8159 	}
8160 
8161 	hba->fc_table[iotag] = sbp;
8162 	hba->io_count++;
8163 
8164 	sbp->iotag = iotag;
8165 	sbp->xrip = xrip;
8166 
8167 	xrip->state = XRI_STATE_ALLOCATED;
8168 	xrip->type = type;
8169 	xrip->flag = 0;
8170 	xrip->sbp = sbp;
8171 
8172 	xrip->rpip = rpip;
8173 	rpip->xri_count++;
8174 
8175 	/* Take it off free list */
8176 	(xrip->_b)->_f = xrip->_f;
8177 	(xrip->_f)->_b = xrip->_b;
8178 	xrip->_f = NULL;
8179 	xrip->_b = NULL;
8180 	hba->sli.sli4.xrif_count--;
8181 
8182 	/* Add it to end of inuse list */
8183 	xrip->_b = hba->sli.sli4.XRIinuse_b;
8184 	hba->sli.sli4.XRIinuse_b->_f = xrip;
8185 	xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
8186 	hba->sli.sli4.XRIinuse_b = xrip;
8187 	hba->sli.sli4.xria_count++;
8188 
8189 	mutex_exit(&EMLXS_FCTAB_LOCK);
8190 
8191 	return (xrip);
8192 
8193 } /* emlxs_sli4_alloc_xri() */
8194 
8195 
8196 /* EMLXS_FCTAB_LOCK must be held to enter */
8197 extern XRIobj_t *
8198 emlxs_sli4_find_xri(emlxs_port_t *port, uint16_t xri)
8199 {
8200 	emlxs_hba_t *hba = HBA;
8201 	XRIobj_t	*xrip;
8202 
8203 	xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
8204 	while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
8205 		if ((xrip->state >= XRI_STATE_ALLOCATED) &&
8206 		    (xrip->XRI == xri)) {
8207 			return (xrip);
8208 		}
8209 		xrip = xrip->_f;
8210 	}
8211 
8212 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8213 	    "Unable to find XRI x%x", xri);
8214 
8215 	return (NULL);
8216 
8217 } /* emlxs_sli4_find_xri() */
8218 
8219 
8220 
8221 
8222 extern void
8223 emlxs_sli4_free_xri(emlxs_port_t *port, emlxs_buf_t *sbp, XRIobj_t *xrip,
8224     uint8_t lock)
8225 {
8226 	emlxs_hba_t *hba = HBA;
8227 
8228 	if (lock) {
8229 		mutex_enter(&EMLXS_FCTAB_LOCK);
8230 	}
8231 
8232 	if (xrip) {
8233 		if (xrip->state == XRI_STATE_FREE) {
8234 			if (lock) {
8235 				mutex_exit(&EMLXS_FCTAB_LOCK);
8236 			}
8237 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8238 			    "Free XRI:%x, Already freed. type=%d",
8239 			    xrip->XRI, xrip->type);
8240 			return;
8241 		}
8242 
8243 		if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
8244 			(void) emlxs_flush_ct_event(port, xrip->rx_id);
8245 		}
8246 
8247 		if (xrip->iotag &&
8248 		    (hba->fc_table[xrip->iotag] != NULL) &&
8249 		    (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
8250 			hba->fc_table[xrip->iotag] = NULL;
8251 			hba->io_count--;
8252 		}
8253 
8254 		xrip->state = XRI_STATE_FREE;
8255 		xrip->type  = 0;
8256 		xrip->flag  = 0;
8257 
8258 		if (xrip->rpip) {
8259 			xrip->rpip->xri_count--;
8260 			xrip->rpip = NULL;
8261 		}
8262 
8263 		if (xrip->reserved_rpip) {
8264 			xrip->reserved_rpip->xri_count--;
8265 			xrip->reserved_rpip = NULL;
8266 		}
8267 
8268 		/* Take it off inuse list */
8269 		(xrip->_b)->_f = xrip->_f;
8270 		(xrip->_f)->_b = xrip->_b;
8271 		xrip->_f = NULL;
8272 		xrip->_b = NULL;
8273 		hba->sli.sli4.xria_count--;
8274 
8275 		/* Add it to end of free list */
8276 		xrip->_b = hba->sli.sli4.XRIfree_b;
8277 		hba->sli.sli4.XRIfree_b->_f = xrip;
8278 		xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8279 		hba->sli.sli4.XRIfree_b = xrip;
8280 		hba->sli.sli4.xrif_count++;
8281 	}
8282 
8283 	if (sbp) {
8284 		if (!(sbp->pkt_flags & PACKET_VALID) ||
8285 		    (sbp->pkt_flags &
8286 		    (PACKET_ULP_OWNED|PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
8287 			if (lock) {
8288 				mutex_exit(&EMLXS_FCTAB_LOCK);
8289 			}
8290 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8291 			    "Free XRI: sbp invalid. sbp=%p flags=%x xri=%d",
8292 			    sbp, sbp->pkt_flags, ((xrip)? xrip->XRI:0));
8293 			return;
8294 		}
8295 
8296 		if (xrip && (xrip->iotag != sbp->iotag)) {
8297 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8298 			    "sbp/iotag mismatch %p iotag:%d %d", sbp,
8299 			    sbp->iotag, xrip->iotag);
8300 		}
8301 
8302 		if (sbp->iotag) {
8303 			if (sbp == hba->fc_table[sbp->iotag]) {
8304 				hba->fc_table[sbp->iotag] = NULL;
8305 				hba->io_count--;
8306 
8307 				if (sbp->xrip) {
8308 					/* Exchange is still reserved */
8309 					sbp->xrip->flag |= EMLXS_XRI_RESERVED;
8310 				}
8311 			}
8312 			sbp->iotag = 0;
8313 		}
8314 
8315 		if (xrip) {
8316 			sbp->xrip = 0;
8317 		}
8318 
8319 		if (lock) {
8320 			mutex_exit(&EMLXS_FCTAB_LOCK);
8321 		}
8322 
8323 		/* Clean up the sbp */
8324 		mutex_enter(&sbp->mtx);
8325 
8326 		if (sbp->pkt_flags & PACKET_IN_TXQ) {
8327 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
8328 			hba->channel_tx_count--;
8329 		}
8330 
8331 		if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
8332 			sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
8333 		}
8334 
8335 		mutex_exit(&sbp->mtx);
8336 	} else {
8337 		if (lock) {
8338 			mutex_exit(&EMLXS_FCTAB_LOCK);
8339 		}
8340 	}
8341 
8342 } /* emlxs_sli4_free_xri() */
8343 
8344 
8345 static int
8346 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
8347 {
8348 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8349 	emlxs_port_t	*port = &PPORT;
8350 	XRIobj_t	*xrip;
8351 	MATCHMAP	*mp;
8352 	mbox_req_hdr_t	*hdr_req;
8353 	uint32_t	i;
8354 	uint32_t	cnt;
8355 	uint32_t	xri_cnt;
8356 	uint32_t	j;
8357 	uint32_t	size;
8358 	IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
8359 
8360 	bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8361 	mbq->bp = NULL;
8362 	mbq->mbox_cmpl = NULL;
8363 
8364 	if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
8365 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8366 		    "Unable to POST_SGL. Mailbox cmd=%x  ",
8367 		    mb->mbxCommand);
8368 		return (EIO);
8369 	}
8370 	mbq->nonembed = (void *)mp;
8371 
8372 	/*
8373 	 * Signifies a non embedded command
8374 	 */
8375 	mb->un.varSLIConfig.be.embedded = 0;
8376 	mb->mbxCommand = MBX_SLI_CONFIG;
8377 	mb->mbxOwner = OWN_HOST;
8378 
8379 	hdr_req = (mbox_req_hdr_t *)mp->virt;
8380 	post_sgl =
8381 	    (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
8382 
8383 	xrip = hba->sli.sli4.XRIp;
8384 
8385 	/* For each extent */
8386 	for (j = 0; j < hba->sli.sli4.XRIExtCount; j++) {
8387 		cnt = hba->sli.sli4.XRIExtSize;
8388 		while (cnt) {
8389 			if (xrip->XRI == 0) {
8390 				cnt--;
8391 				xrip++;
8392 				continue;
8393 			}
8394 
8395 			bzero((void *) hdr_req, mp->size);
8396 			size = mp->size - IOCTL_HEADER_SZ;
8397 
8398 			mb->un.varSLIConfig.be.payload_length =
8399 			    mp->size;
8400 			mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8401 			    IOCTL_SUBSYSTEM_FCOE;
8402 			mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8403 			    FCOE_OPCODE_CFG_POST_SGL_PAGES;
8404 			mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8405 			mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
8406 
8407 			hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
8408 			hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
8409 			hdr_req->timeout = 0;
8410 			hdr_req->req_length = size;
8411 
8412 			post_sgl->params.request.xri_count = 0;
8413 			post_sgl->params.request.xri_start = xrip->XRI;
8414 
8415 			xri_cnt = (size -
8416 			    sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
8417 			    sizeof (FCOE_SGL_PAGES);
8418 
8419 			for (i = 0; (i < xri_cnt) && cnt; i++) {
8420 				post_sgl->params.request.xri_count++;
8421 				post_sgl->params.request.pages[i].\
8422 				    sgl_page0.addrLow =
8423 				    PADDR_LO(xrip->SGList->phys);
8424 				post_sgl->params.request.pages[i].\
8425 				    sgl_page0.addrHigh =
8426 				    PADDR_HI(xrip->SGList->phys);
8427 
8428 				cnt--;
8429 				xrip++;
8430 			}
8431 
8432 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8433 			    MBX_SUCCESS) {
8434 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8435 				    "Unable to POST_SGL. Mailbox cmd=%x "
8436 				    "status=%x XRI cnt:%d start:%d",
8437 				    mb->mbxCommand, mb->mbxStatus,
8438 				    post_sgl->params.request.xri_count,
8439 				    post_sgl->params.request.xri_start);
8440 				emlxs_mem_buf_free(hba, mp);
8441 				mbq->nonembed = NULL;
8442 				return (EIO);
8443 			}
8444 		}
8445 	}
8446 
8447 	emlxs_mem_buf_free(hba, mp);
8448 	mbq->nonembed = NULL;
8449 	return (0);
8450 
8451 } /* emlxs_sli4_post_sgl_pages() */
8452 
8453 
8454 static int
8455 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
8456 {
8457 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8458 	emlxs_port_t	*port = &PPORT;
8459 	uint32_t	j;
8460 	uint32_t	k;
8461 	uint64_t	addr;
8462 	IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
8463 	uint16_t	num_pages;
8464 
8465 	if (!(hba->sli.sli4.param.HDRR)) {
8466 		return (0);
8467 	}
8468 
8469 	/* Bytes per extent */
8470 	j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
8471 
8472 	/* Pages required per extent (page == 4096 bytes) */
8473 	num_pages = (j/4096) + ((j%4096)? 1:0);
8474 
8475 	addr = hba->sli.sli4.HeaderTmplate.phys;
8476 
8477 	/* For each extent */
8478 	for (j = 0; j < hba->sli.sli4.RPIExtCount; j++) {
8479 		bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8480 		mbq->bp = NULL;
8481 		mbq->mbox_cmpl = NULL;
8482 
8483 		/*
8484 		 * Signifies an embedded command
8485 		 */
8486 		mb->un.varSLIConfig.be.embedded = 1;
8487 
8488 		mb->mbxCommand = MBX_SLI_CONFIG;
8489 		mb->mbxOwner = OWN_HOST;
8490 		mb->un.varSLIConfig.be.payload_length =
8491 		    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
8492 		mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8493 		    IOCTL_SUBSYSTEM_FCOE;
8494 		mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8495 		    FCOE_OPCODE_POST_HDR_TEMPLATES;
8496 		mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8497 		mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
8498 		    sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
8499 
8500 		post_hdr =
8501 		    (IOCTL_FCOE_POST_HDR_TEMPLATES *)
8502 		    &mb->un.varSLIConfig.payload;
8503 		post_hdr->params.request.num_pages = num_pages;
8504 		post_hdr->params.request.rpi_offset = hba->sli.sli4.RPIBase[j];
8505 
8506 		for (k = 0; k < num_pages; k++) {
8507 			post_hdr->params.request.pages[k].addrLow =
8508 			    PADDR_LO(addr);
8509 			post_hdr->params.request.pages[k].addrHigh =
8510 			    PADDR_HI(addr);
8511 			addr += 4096;
8512 		}
8513 
8514 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8515 		    MBX_SUCCESS) {
8516 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8517 			    "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x "
8518 			    "status=%x ",
8519 			    mb->mbxCommand, mb->mbxStatus);
8520 			return (EIO);
8521 		}
8522 		emlxs_data_dump(port, "POST_HDR", (uint32_t *)mb, 18, 0);
8523 	}
8524 
8525 	return (0);
8526 
8527 } /* emlxs_sli4_post_hdr_tmplates() */
8528 
8529 
8530 static int
8531 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
8532 {
8533 	MAILBOX4	*mb = (MAILBOX4 *)mbq;
8534 	emlxs_port_t	*port = &PPORT;
8535 	emlxs_config_t	*cfg = &CFG;
8536 	IOCTL_COMMON_EQ_CREATE *eq;
8537 	IOCTL_COMMON_CQ_CREATE *cq;
8538 	IOCTL_FCOE_WQ_CREATE *wq;
8539 	IOCTL_FCOE_RQ_CREATE *rq;
8540 	IOCTL_COMMON_MQ_CREATE *mq;
8541 	IOCTL_COMMON_MQ_CREATE_EXT *mq_ext;
8542 	uint16_t i, j;
8543 	uint16_t num_cq, total_cq;
8544 	uint16_t num_wq, total_wq;
8545 
8546 	/*
8547 	 * The first CQ is reserved for ASYNC events,
8548 	 * the second is reserved for unsol rcv, the rest
8549 	 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
8550 	 */
8551 
8552 	total_cq = 0;
8553 	total_wq = 0;
8554 
8555 	/* Create EQ's */
8556 	for (i = 0; i < hba->intr_count; i++) {
8557 		emlxs_mb_eq_create(hba, mbq, i);
8558 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8559 		    MBX_SUCCESS) {
8560 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8561 			    "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
8562 			    i, mb->mbxCommand, mb->mbxStatus);
8563 			return (EIO);
8564 		}
8565 		eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
8566 		hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
8567 		hba->sli.sli4.eq[i].lastwq = total_wq;
8568 		hba->sli.sli4.eq[i].msix_vector = i;
8569 
8570 		emlxs_data_dump(port, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
8571 		num_wq = cfg[CFG_NUM_WQ].current;
8572 		num_cq = num_wq;
8573 		if (i == 0) {
8574 			/* One for RQ handling, one for mbox/event handling */
8575 			num_cq += EMLXS_CQ_OFFSET_WQ;
8576 		}
8577 
8578 		/* Create CQ's */
8579 		for (j = 0; j < num_cq; j++) {
8580 			/* Reuse mbq from previous mbox */
8581 			bzero(mbq, sizeof (MAILBOXQ));
8582 
8583 			hba->sli.sli4.cq[total_cq].eqid =
8584 			    hba->sli.sli4.eq[i].qid;
8585 
8586 			emlxs_mb_cq_create(hba, mbq, total_cq);
8587 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8588 			    MBX_SUCCESS) {
8589 				EMLXS_MSGF(EMLXS_CONTEXT,
8590 				    &emlxs_init_failed_msg, "Unable to Create "
8591 				    "CQ %d: Mailbox cmd=%x status=%x ",
8592 				    total_cq, mb->mbxCommand, mb->mbxStatus);
8593 				return (EIO);
8594 			}
8595 			cq = (IOCTL_COMMON_CQ_CREATE *)
8596 			    &mb->un.varSLIConfig.payload;
8597 			hba->sli.sli4.cq[total_cq].qid =
8598 			    cq->params.response.CQId;
8599 
8600 			switch (total_cq) {
8601 			case EMLXS_CQ_MBOX:
8602 				/* First CQ is for async event handling */
8603 				hba->sli.sli4.cq[total_cq].type =
8604 				    EMLXS_CQ_TYPE_GROUP1;
8605 				break;
8606 
8607 			case EMLXS_CQ_RCV:
8608 				/* Second CQ is for unsol receive handling */
8609 				hba->sli.sli4.cq[total_cq].type =
8610 				    EMLXS_CQ_TYPE_GROUP2;
8611 				break;
8612 
8613 			default:
8614 				/* Setup CQ to channel mapping */
8615 				hba->sli.sli4.cq[total_cq].type =
8616 				    EMLXS_CQ_TYPE_GROUP2;
8617 				hba->sli.sli4.cq[total_cq].channelp =
8618 				    &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
8619 				break;
8620 			}
8621 			emlxs_data_dump(port, "CQX_CREATE", (uint32_t *)mb,
8622 			    18, 0);
8623 			total_cq++;
8624 		}
8625 
8626 		/* Create WQ's */
8627 		for (j = 0; j < num_wq; j++) {
8628 			/* Reuse mbq from previous mbox */
8629 			bzero(mbq, sizeof (MAILBOXQ));
8630 
8631 			hba->sli.sli4.wq[total_wq].cqid =
8632 			    hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
8633 
8634 			emlxs_mb_wq_create(hba, mbq, total_wq);
8635 			if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8636 			    MBX_SUCCESS) {
8637 				EMLXS_MSGF(EMLXS_CONTEXT,
8638 				    &emlxs_init_failed_msg, "Unable to Create "
8639 				    "WQ %d: Mailbox cmd=%x status=%x ",
8640 				    total_wq, mb->mbxCommand, mb->mbxStatus);
8641 				return (EIO);
8642 			}
8643 			wq = (IOCTL_FCOE_WQ_CREATE *)
8644 			    &mb->un.varSLIConfig.payload;
8645 			hba->sli.sli4.wq[total_wq].qid =
8646 			    wq->params.response.WQId;
8647 
8648 			hba->sli.sli4.wq[total_wq].cqid =
8649 			    hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
8650 			emlxs_data_dump(port, "WQ_CREATE", (uint32_t *)mb,
8651 			    18, 0);
8652 			total_wq++;
8653 		}
8654 		hba->last_msiid = i;
8655 	}
8656 
8657 	/* We assume 1 RQ pair will handle ALL incoming data */
8658 	/* Create RQs */
8659 	for (i = 0; i < EMLXS_MAX_RQS; i++) {
8660 		/* Personalize the RQ */
8661 		switch (i) {
8662 		case 0:
8663 			hba->sli.sli4.rq[i].cqid =
8664 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8665 			break;
8666 		case 1:
8667 			hba->sli.sli4.rq[i].cqid =
8668 			    hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8669 			break;
8670 		default:
8671 			hba->sli.sli4.rq[i].cqid = 0xffff;
8672 		}
8673 
8674 		/* Reuse mbq from previous mbox */
8675 		bzero(mbq, sizeof (MAILBOXQ));
8676 
8677 		emlxs_mb_rq_create(hba, mbq, i);
8678 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8679 		    MBX_SUCCESS) {
8680 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8681 			    "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
8682 			    i, mb->mbxCommand, mb->mbxStatus);
8683 			return (EIO);
8684 		}
8685 
8686 		rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
8687 		hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
8688 		emlxs_data_dump(port, "RQ CREATE", (uint32_t *)mb, 18, 0);
8689 
8690 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8691 		    "RQ CREATE: rq[%d].qid=%d cqid=%d",
8692 		    i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
8693 
8694 		/* Initialize the host_index */
8695 		hba->sli.sli4.rq[i].host_index = 0;
8696 
8697 		/* If Data queue was just created, */
8698 		/* then post buffers using the header qid */
8699 		if ((i & 0x1)) {
8700 			/* Ring the RQ doorbell to post buffers */
8701 
8702 			emlxs_sli4_write_rqdb(hba, hba->sli.sli4.rq[i-1].qid,
8703 			    RQB_COUNT);
8704 
8705 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8706 			    "RQ CREATE: Doorbell rang: qid=%d count=%d",
8707 			    hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
8708 		}
8709 	}
8710 
8711 	/* Create MQ */
8712 
8713 	/* Personalize the MQ */
8714 	hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
8715 
8716 	/* Reuse mbq from previous mbox */
8717 	bzero(mbq, sizeof (MAILBOXQ));
8718 
8719 	emlxs_mb_mq_create_ext(hba, mbq);
8720 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8721 	    MBX_SUCCESS) {
8722 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8723 		    "Unable to Create MQ_EXT %d: Mailbox cmd=%x status=%x ",
8724 		    i, mb->mbxCommand, mb->mbxStatus);
8725 
8726 		/* Reuse mbq from previous mbox */
8727 		bzero(mbq, sizeof (MAILBOXQ));
8728 
8729 		emlxs_mb_mq_create(hba, mbq);
8730 		if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8731 		    MBX_SUCCESS) {
8732 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8733 			    "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
8734 			    i, mb->mbxCommand, mb->mbxStatus);
8735 			return (EIO);
8736 		}
8737 
8738 		mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
8739 		hba->sli.sli4.mq.qid = mq->params.response.MQId;
8740 		return (0);
8741 	}
8742 
8743 	mq_ext = (IOCTL_COMMON_MQ_CREATE_EXT *)&mb->un.varSLIConfig.payload;
8744 	hba->sli.sli4.mq.qid = mq_ext->params.response.MQId;
8745 	return (0);
8746 
8747 } /* emlxs_sli4_create_queues() */
8748 
8749 
8750 extern void
8751 emlxs_sli4_timer(emlxs_hba_t *hba)
8752 {
8753 	/* Perform SLI4 level timer checks */
8754 
8755 	emlxs_fcf_timer_notify(hba);
8756 
8757 	emlxs_sli4_timer_check_mbox(hba);
8758 
8759 	return;
8760 
8761 } /* emlxs_sli4_timer() */
8762 
8763 
8764 static void
8765 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
8766 {
8767 	emlxs_port_t *port = &PPORT;
8768 	emlxs_config_t *cfg = &CFG;
8769 	MAILBOX *mb = NULL;
8770 
8771 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
8772 		return;
8773 	}
8774 
8775 	mutex_enter(&EMLXS_PORT_LOCK);
8776 
8777 	/* Return if timer hasn't expired */
8778 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
8779 		mutex_exit(&EMLXS_PORT_LOCK);
8780 		return;
8781 	}
8782 
8783 	/* The first to service the mbox queue will clear the timer */
8784 	hba->mbox_timer = 0;
8785 
8786 	if (hba->mbox_queue_flag) {
8787 		if (hba->mbox_mbq) {
8788 			mb = (MAILBOX *)hba->mbox_mbq;
8789 		}
8790 	}
8791 
8792 	if (mb) {
8793 		switch (hba->mbox_queue_flag) {
8794 		case MBX_NOWAIT:
8795 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8796 			    "%s: Nowait.",
8797 			    emlxs_mb_cmd_xlate(mb->mbxCommand));
8798 			break;
8799 
8800 		case MBX_SLEEP:
8801 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8802 			    "%s: mb=%p Sleep.",
8803 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
8804 			    mb);
8805 			break;
8806 
8807 		case MBX_POLL:
8808 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8809 			    "%s: mb=%p Polled.",
8810 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
8811 			    mb);
8812 			break;
8813 
8814 		default:
8815 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8816 			    "%s: mb=%p (%d).",
8817 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
8818 			    mb, hba->mbox_queue_flag);
8819 			break;
8820 		}
8821 	} else {
8822 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
8823 	}
8824 
8825 	hba->flag |= FC_MBOX_TIMEOUT;
8826 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
8827 
8828 	mutex_exit(&EMLXS_PORT_LOCK);
8829 
8830 	/* Perform mailbox cleanup */
8831 	/* This will wake any sleeping or polling threads */
8832 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
8833 
8834 	/* Trigger adapter shutdown */
8835 	emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8836 
8837 	return;
8838 
8839 } /* emlxs_sli4_timer_check_mbox() */
8840 
8841 static void
8842 emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba)
8843 {
8844 	mutex_enter(&hba->gpio_lock);
8845 
8846 	if (!hba->gpio_timer) {
8847 		hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
8848 		    drv_usectohz(100000));
8849 	}
8850 
8851 	mutex_exit(&hba->gpio_lock);
8852 
8853 } /* emlxs_sli4_gpio_timer_start() */
8854 
8855 static void
8856 emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba)
8857 {
8858 	mutex_enter(&hba->gpio_lock);
8859 
8860 	if (hba->gpio_timer) {
8861 		(void) untimeout(hba->gpio_timer);
8862 		hba->gpio_timer = 0;
8863 	}
8864 
8865 	mutex_exit(&hba->gpio_lock);
8866 
8867 	delay(drv_usectohz(300000));
8868 } /* emlxs_sli4_gpio_timer_stop() */
8869 
8870 static void
8871 emlxs_sli4_gpio_timer(void *arg)
8872 {
8873 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
8874 
8875 	mutex_enter(&hba->gpio_lock);
8876 
8877 	if (hba->gpio_timer) {
8878 		emlxs_sli4_check_gpio(hba);
8879 		hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
8880 		    drv_usectohz(100000));
8881 	}
8882 
8883 	mutex_exit(&hba->gpio_lock);
8884 } /* emlxs_sli4_gpio_timer() */
8885 
8886 static void
8887 emlxs_sli4_check_gpio(emlxs_hba_t *hba)
8888 {
8889 	hba->gpio_desired = 0;
8890 
8891 	if (hba->flag & FC_GPIO_LINK_UP) {
8892 		if (hba->io_active)
8893 			hba->gpio_desired |= EMLXS_GPIO_ACT;
8894 
8895 		/* This is model specific to ATTO gen5 lancer cards */
8896 
8897 		switch (hba->linkspeed) {
8898 			case LA_4GHZ_LINK:
8899 				hba->gpio_desired |= EMLXS_GPIO_LO;
8900 				break;
8901 
8902 			case LA_8GHZ_LINK:
8903 				hba->gpio_desired |= EMLXS_GPIO_HI;
8904 				break;
8905 
8906 			case LA_16GHZ_LINK:
8907 				hba->gpio_desired |=
8908 				    EMLXS_GPIO_LO | EMLXS_GPIO_HI;
8909 				break;
8910 		}
8911 	}
8912 
8913 	if (hba->gpio_current != hba->gpio_desired) {
8914 		emlxs_port_t *port = &PPORT;
8915 		uint8_t pin;
8916 		uint8_t pinval;
8917 		MAILBOXQ *mbq;
8918 		uint32_t rval;
8919 
8920 		if (!emlxs_sli4_fix_gpio(hba, &pin, &pinval))
8921 			return;
8922 
8923 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
8924 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8925 			    "Unable to allocate GPIO mailbox.");
8926 
8927 			hba->gpio_bit = 0;
8928 			return;
8929 		}
8930 
8931 		emlxs_mb_gpio_write(hba, mbq, pin, pinval);
8932 		mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
8933 
8934 		rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
8935 
8936 		if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
8937 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8938 			    "Unable to start GPIO mailbox.");
8939 
8940 			hba->gpio_bit = 0;
8941 			emlxs_mem_put(hba, MEM_MBOX, mbq);
8942 			return;
8943 		}
8944 	}
8945 } /* emlxs_sli4_check_gpio */
8946 
8947 static uint32_t
8948 emlxs_sli4_fix_gpio(emlxs_hba_t *hba, uint8_t *pin, uint8_t *pinval)
8949 {
8950 	uint8_t dif = hba->gpio_desired ^ hba->gpio_current;
8951 	uint8_t bit;
8952 	uint8_t i;
8953 
8954 	/* Get out if no pins to set a GPIO request is pending */
8955 
8956 	if (dif == 0 || hba->gpio_bit)
8957 		return (0);
8958 
8959 	/* Fix one pin at a time */
8960 
8961 	bit = dif & -dif;
8962 	hba->gpio_bit = bit;
8963 	dif = hba->gpio_current ^ bit;
8964 
8965 	for (i = EMLXS_GPIO_PIN_LO; bit > 1; ++i) {
8966 		dif >>= 1;
8967 		bit >>= 1;
8968 	}
8969 
8970 	/* Pins are active low so invert the bit value */
8971 
8972 	*pin = hba->gpio_pin[i];
8973 	*pinval = ~dif & bit;
8974 
8975 	return (1);
8976 } /* emlxs_sli4_fix_gpio */
8977 
8978 static uint32_t
8979 emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
8980 {
8981 	MAILBOX *mb;
8982 	uint8_t pin;
8983 	uint8_t pinval;
8984 
8985 	mb = (MAILBOX *)mbq;
8986 
8987 	mutex_enter(&hba->gpio_lock);
8988 
8989 	if (mb->mbxStatus == 0)
8990 		hba->gpio_current ^= hba->gpio_bit;
8991 
8992 	hba->gpio_bit = 0;
8993 
8994 	if (emlxs_sli4_fix_gpio(hba, &pin, &pinval)) {
8995 		emlxs_port_t *port = &PPORT;
8996 		MAILBOXQ *mbq;
8997 		uint32_t rval;
8998 
8999 		/*
9000 		 * We're not using the mb_retry routine here because for some
9001 		 * reason it doesn't preserve the completion routine. Just let
9002 		 * this mbox cmd fail to start here and run when the mailbox
9003 		 * is no longer busy.
9004 		 */
9005 
9006 		if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
9007 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9008 			    "Unable to allocate GPIO mailbox.");
9009 
9010 			hba->gpio_bit = 0;
9011 			goto done;
9012 		}
9013 
9014 		emlxs_mb_gpio_write(hba, mbq, pin, pinval);
9015 		mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
9016 
9017 		rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
9018 
9019 		if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
9020 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9021 			    "Unable to start GPIO mailbox.");
9022 
9023 			hba->gpio_bit = 0;
9024 			emlxs_mem_put(hba, MEM_MBOX, mbq);
9025 			goto done;
9026 		}
9027 	}
9028 
9029 done:
9030 	mutex_exit(&hba->gpio_lock);
9031 
9032 	return (0);
9033 }
9034 
9035 extern void
9036 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
9037 {
9038 	void *msg;
9039 
9040 	if (!port || !str || !iptr || !cnt) {
9041 		return;
9042 	}
9043 
9044 	if (err) {
9045 		msg = &emlxs_sli_err_msg;
9046 	} else {
9047 		msg = &emlxs_sli_detail_msg;
9048 	}
9049 
9050 	if (cnt) {
9051 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9052 		    "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
9053 		    *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
9054 	}
9055 	if (cnt > 6) {
9056 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9057 		    "%s06: %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
9058 		    *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
9059 	}
9060 	if (cnt > 12) {
9061 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9062 		    "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
9063 		    *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
9064 	}
9065 	if (cnt > 18) {
9066 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9067 		    "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
9068 		    *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
9069 	}
9070 	if (cnt > 24) {
9071 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9072 		    "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
9073 		    *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
9074 	}
9075 	if (cnt > 30) {
9076 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9077 		    "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
9078 		    *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
9079 	}
9080 	if (cnt > 36) {
9081 		EMLXS_MSGF(EMLXS_CONTEXT, msg,
9082 		    "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
9083 		    *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
9084 	}
9085 
9086 } /* emlxs_data_dump() */
9087 
9088 
9089 extern void
9090 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
9091 {
9092 	emlxs_port_t *port = &PPORT;
9093 	uint32_t status;
9094 	uint32_t ue_h;
9095 	uint32_t ue_l;
9096 	uint32_t on1;
9097 	uint32_t on2;
9098 
9099 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
9100 	case SLI_INTF_IF_TYPE_0:
9101 		ue_l = ddi_get32(hba->pci_acc_handle,
9102 		    hba->sli.sli4.ERR1_reg_addr);
9103 		ue_h = ddi_get32(hba->pci_acc_handle,
9104 		    hba->sli.sli4.ERR2_reg_addr);
9105 
9106 		on1 = ddi_get32(hba->pci_acc_handle,
9107 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
9108 		on2 = ddi_get32(hba->pci_acc_handle,
9109 		    (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
9110 
9111 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9112 		    "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
9113 		    ue_l, ue_h, on1, on2);
9114 		break;
9115 
9116 	case SLI_INTF_IF_TYPE_2:
9117 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9118 		    hba->sli.sli4.STATUS_reg_addr);
9119 
9120 		ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9121 		    hba->sli.sli4.ERR1_reg_addr);
9122 		ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9123 		    hba->sli.sli4.ERR2_reg_addr);
9124 
9125 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9126 		    "%s: status:%08x err1:%08x err2:%08x", str,
9127 		    status, ue_l, ue_h);
9128 
9129 		break;
9130 	}
9131 
9132 #ifdef FMA_SUPPORT
9133 	/* Access handle validation */
9134 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
9135 #endif  /* FMA_SUPPORT */
9136 
9137 } /* emlxs_ue_dump() */
9138 
9139 
9140 static void
9141 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
9142 {
9143 	emlxs_port_t *port = &PPORT;
9144 	uint32_t status;
9145 	uint32_t ue_h;
9146 	uint32_t ue_l;
9147 	uint32_t error = 0;
9148 
9149 	if (hba->flag & FC_HARDWARE_ERROR) {
9150 		return;
9151 	}
9152 
9153 	switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
9154 	case SLI_INTF_IF_TYPE_0:
9155 		ue_l = ddi_get32(hba->pci_acc_handle,
9156 		    hba->sli.sli4.ERR1_reg_addr);
9157 		ue_h = ddi_get32(hba->pci_acc_handle,
9158 		    hba->sli.sli4.ERR2_reg_addr);
9159 
9160 		if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
9161 		    (~hba->sli.sli4.ue_mask_hi & ue_h) ||
9162 		    (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
9163 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
9164 			    "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
9165 			    "maskHigh:%08x flag:%08x",
9166 			    ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
9167 			    hba->sli.sli4.ue_mask_hi, hba->sli.sli4.flag);
9168 
9169 			error = 2;
9170 		}
9171 		break;
9172 
9173 	case SLI_INTF_IF_TYPE_2:
9174 		status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9175 		    hba->sli.sli4.STATUS_reg_addr);
9176 
9177 		if ((status & SLI_STATUS_ERROR) ||
9178 		    (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
9179 			ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9180 			    hba->sli.sli4.ERR1_reg_addr);
9181 			ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9182 			    hba->sli.sli4.ERR2_reg_addr);
9183 
9184 			error = (status & SLI_STATUS_RESET_NEEDED)? 1:2;
9185 
9186 			if (error == 1) {
9187 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
9188 				    "Host Error: status:%08x err1:%08x "
9189 				    "err2:%08x flag:%08x",
9190 				    status, ue_l, ue_h, hba->sli.sli4.flag);
9191 			} else {
9192 				EMLXS_MSGF(EMLXS_CONTEXT,
9193 				    &emlxs_hardware_error_msg,
9194 				    "Host Error: status:%08x err1:%08x "
9195 				    "err2:%08x flag:%08x",
9196 				    status, ue_l, ue_h, hba->sli.sli4.flag);
9197 			}
9198 		}
9199 		break;
9200 	}
9201 
9202 	if (error == 2) {
9203 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
9204 
9205 		emlxs_sli4_hba_flush_chipq(hba);
9206 
9207 		emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
9208 
9209 	} else if (error == 1) {
9210 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
9211 
9212 		emlxs_sli4_hba_flush_chipq(hba);
9213 
9214 		emlxs_thread_spawn(hba, emlxs_restart_thread, 0, 0);
9215 	}
9216 
9217 #ifdef FMA_SUPPORT
9218 	/* Access handle validation */
9219 	EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
9220 #endif  /* FMA_SUPPORT */
9221 
9222 } /* emlxs_sli4_poll_erratt() */
9223 
9224 
9225 static uint32_t
9226 emlxs_sli4_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
9227     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
9228 {
9229 	emlxs_hba_t	*hba = HBA;
9230 	NODELIST	*node;
9231 	RPIobj_t	*rpip;
9232 	uint32_t	rval;
9233 
9234 	/* Check for invalid node ids to register */
9235 	if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
9236 		return (1);
9237 	}
9238 
9239 	if (did & 0xff000000) {
9240 		return (1);
9241 	}
9242 
9243 	/* We don't register our own did */
9244 	if ((did == port->did) && (!(hba->flag & FC_LOOPBACK_MODE))) {
9245 		return (1);
9246 	}
9247 
9248 	if (did != FABRIC_DID) {
9249 		if ((rval = emlxs_mb_check_sparm(hba, param))) {
9250 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
9251 			    "Invalid service parameters. did=%06x rval=%d", did,
9252 			    rval);
9253 
9254 			return (1);
9255 		}
9256 	}
9257 
9258 	/* Check if the node limit has been reached */
9259 	if (port->node_count >= hba->max_nodes) {
9260 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
9261 		    "Limit reached. did=%06x count=%d", did,
9262 		    port->node_count);
9263 
9264 		return (1);
9265 	}
9266 
9267 	node = emlxs_node_find_did(port, did, 1);
9268 	rpip = EMLXS_NODE_TO_RPI(port, node);
9269 
9270 	rval = emlxs_rpi_online_notify(port, rpip, did, param, (void *)sbp,
9271 	    (void *)ubp, (void *)iocbq);
9272 
9273 	return (rval);
9274 
9275 } /* emlxs_sli4_reg_did() */
9276 
9277 
9278 static uint32_t
9279 emlxs_sli4_unreg_node(emlxs_port_t *port, emlxs_node_t *node,
9280     emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
9281 {
9282 	RPIobj_t	*rpip;
9283 	uint32_t	rval;
9284 
9285 	if (!node) {
9286 		/* Unreg all nodes */
9287 		(void) emlxs_sli4_unreg_all_nodes(port);
9288 		return (1);
9289 	}
9290 
9291 	/* Check for base node */
9292 	if (node == &port->node_base) {
9293 		/* Just flush base node */
9294 		(void) emlxs_tx_node_flush(port, &port->node_base,
9295 		    0, 0, 0);
9296 
9297 		(void) emlxs_chipq_node_flush(port, 0,
9298 		    &port->node_base, 0);
9299 
9300 		port->did = 0;
9301 
9302 		/* Return now */
9303 		return (1);
9304 	}
9305 
9306 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9307 	    "unreg_node:%p did=%x rpi=%d",
9308 	    node, node->nlp_DID, node->nlp_Rpi);
9309 
9310 	rpip = EMLXS_NODE_TO_RPI(port, node);
9311 
9312 	if (!rpip) {
9313 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9314 		    "unreg_node:%p did=%x rpi=%d. RPI not found.",
9315 		    node, node->nlp_DID, node->nlp_Rpi);
9316 
9317 		emlxs_node_rm(port, node);
9318 		return (1);
9319 	}
9320 
9321 	rval = emlxs_rpi_offline_notify(port, rpip, (void *)sbp, (void *)ubp,
9322 	    (void *)iocbq);
9323 
9324 	return (rval);
9325 
9326 } /* emlxs_sli4_unreg_node() */
9327 
9328 
9329 extern uint32_t
9330 emlxs_sli4_unreg_all_nodes(emlxs_port_t *port)
9331 {
9332 	NODELIST	*nlp;
9333 	int		i;
9334 	uint32_t	found;
9335 
9336 	/* Set the node tags */
9337 	/* We will process all nodes with this tag */
9338 	rw_enter(&port->node_rwlock, RW_READER);
9339 	found = 0;
9340 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9341 		nlp = port->node_table[i];
9342 		while (nlp != NULL) {
9343 			found = 1;
9344 			nlp->nlp_tag = 1;
9345 			nlp = nlp->nlp_list_next;
9346 		}
9347 	}
9348 	rw_exit(&port->node_rwlock);
9349 
9350 	if (!found) {
9351 		return (0);
9352 	}
9353 
9354 	for (;;) {
9355 		rw_enter(&port->node_rwlock, RW_READER);
9356 		found = 0;
9357 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9358 			nlp = port->node_table[i];
9359 			while (nlp != NULL) {
9360 				if (!nlp->nlp_tag) {
9361 					nlp = nlp->nlp_list_next;
9362 					continue;
9363 				}
9364 				nlp->nlp_tag = 0;
9365 				found = 1;
9366 				break;
9367 			}
9368 
9369 			if (found) {
9370 				break;
9371 			}
9372 		}
9373 		rw_exit(&port->node_rwlock);
9374 
9375 		if (!found) {
9376 			break;
9377 		}
9378 
9379 		(void) emlxs_sli4_unreg_node(port, nlp, 0, 0, 0);
9380 	}
9381 
9382 	return (0);
9383 
9384 } /* emlxs_sli4_unreg_all_nodes() */
9385 
9386 
9387 static void
9388 emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9389 {
9390 	emlxs_port_t *port = &PPORT;
9391 
9392 	/* Handle link down */
9393 	if ((cqe->un.link.link_status == ASYNC_EVENT_LOGICAL_LINK_DOWN) ||
9394 	    (cqe->un.link.link_status == ASYNC_EVENT_PHYS_LINK_DOWN)) {
9395 		(void) emlxs_fcf_linkdown_notify(port);
9396 
9397 		mutex_enter(&EMLXS_PORT_LOCK);
9398 		hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9399 		mutex_exit(&EMLXS_PORT_LOCK);
9400 		return;
9401 	}
9402 
9403 	/* Link is up */
9404 
9405 	/* Set linkspeed */
9406 	switch (cqe->un.link.port_speed) {
9407 	case PHY_1GHZ_LINK:
9408 		hba->linkspeed = LA_1GHZ_LINK;
9409 		break;
9410 	case PHY_10GHZ_LINK:
9411 		hba->linkspeed = LA_10GHZ_LINK;
9412 		break;
9413 	default:
9414 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9415 		    "sli4_handle_fcoe_link_event: Unknown link speed=%x.",
9416 		    cqe->un.link.port_speed);
9417 		hba->linkspeed = 0;
9418 		break;
9419 	}
9420 
9421 	/* Set qos_linkspeed */
9422 	hba->qos_linkspeed = cqe->un.link.qos_link_speed;
9423 
9424 	/* Set topology */
9425 	hba->topology = TOPOLOGY_PT_PT;
9426 
9427 	mutex_enter(&EMLXS_PORT_LOCK);
9428 	hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9429 	mutex_exit(&EMLXS_PORT_LOCK);
9430 
9431 	(void) emlxs_fcf_linkup_notify(port);
9432 
9433 	return;
9434 
9435 } /* emlxs_sli4_handle_fcoe_link_event()  */
9436 
9437 
9438 static void
9439 emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9440 {
9441 	emlxs_port_t *port = &PPORT;
9442 
9443 	/* Handle link down */
9444 	if (cqe->un.fc.att_type == ATT_TYPE_LINK_DOWN) {
9445 		(void) emlxs_fcf_linkdown_notify(port);
9446 
9447 		mutex_enter(&EMLXS_PORT_LOCK);
9448 		hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9449 		mutex_exit(&EMLXS_PORT_LOCK);
9450 		return;
9451 	}
9452 
9453 	/* Link is up */
9454 
9455 	/* Set linkspeed */
9456 	switch (cqe->un.fc.port_speed) {
9457 	case 1:
9458 		hba->linkspeed = LA_1GHZ_LINK;
9459 		break;
9460 	case 2:
9461 		hba->linkspeed = LA_2GHZ_LINK;
9462 		break;
9463 	case 4:
9464 		hba->linkspeed = LA_4GHZ_LINK;
9465 		break;
9466 	case 8:
9467 		hba->linkspeed = LA_8GHZ_LINK;
9468 		break;
9469 	case 10:
9470 		hba->linkspeed = LA_10GHZ_LINK;
9471 		break;
9472 	case 16:
9473 		hba->linkspeed = LA_16GHZ_LINK;
9474 		break;
9475 	case 32:
9476 		hba->linkspeed = LA_32GHZ_LINK;
9477 		break;
9478 	default:
9479 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9480 		    "sli4_handle_fc_link_att: Unknown link speed=%x.",
9481 		    cqe->un.fc.port_speed);
9482 		hba->linkspeed = 0;
9483 		break;
9484 	}
9485 
9486 	/* Set qos_linkspeed */
9487 	hba->qos_linkspeed = cqe->un.fc.link_speed;
9488 
9489 	/* Set topology */
9490 	hba->topology = cqe->un.fc.topology;
9491 
9492 	mutex_enter(&EMLXS_PORT_LOCK);
9493 	hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9494 	mutex_exit(&EMLXS_PORT_LOCK);
9495 
9496 	(void) emlxs_fcf_linkup_notify(port);
9497 
9498 	return;
9499 
9500 } /* emlxs_sli4_handle_fc_link_att() */
9501 
9502 
9503 static int
9504 emlxs_sli4_init_extents(emlxs_hba_t *hba, MAILBOXQ *mbq)
9505 {
9506 	emlxs_port_t *port = &PPORT;
9507 	MAILBOX4 *mb4;
9508 	IOCTL_COMMON_EXTENTS *ep;
9509 	uint32_t i;
9510 	uint32_t ExtentCnt;
9511 
9512 	if (!(hba->sli.sli4.param.EXT)) {
9513 		return (0);
9514 	}
9515 
9516 	mb4 = (MAILBOX4 *) mbq;
9517 
9518 	/* Discover XRI Extents */
9519 	bzero(mbq, sizeof (MAILBOXQ));
9520 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_XRI);
9521 
9522 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9523 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9524 		    "Unable to discover XRI extents.  Mailbox cmd=%x status=%x",
9525 		    mb4->mbxCommand, mb4->mbxStatus);
9526 
9527 		return (EIO);
9528 	}
9529 
9530 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9531 	hba->sli.sli4.XRIExtSize = ep->params.response.ExtentSize;
9532 	ExtentCnt = ep->params.response.ExtentCnt;
9533 
9534 	/* Allocate XRI Extents */
9535 	bzero(mbq, sizeof (MAILBOXQ));
9536 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_XRI, ExtentCnt);
9537 
9538 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9539 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9540 		    "Unable to allocate XRI extents.  Mailbox cmd=%x status=%x",
9541 		    mb4->mbxCommand, mb4->mbxStatus);
9542 
9543 		return (EIO);
9544 	}
9545 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9546 
9547 	bcopy((uint8_t *)ep->params.response.RscId,
9548 	    (uint8_t *)hba->sli.sli4.XRIBase,
9549 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9550 
9551 	hba->sli.sli4.XRIExtCount = ep->params.response.ExtentCnt;
9552 	hba->sli.sli4.XRICount = hba->sli.sli4.XRIExtCount *
9553 	    hba->sli.sli4.XRIExtSize;
9554 
9555 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9556 	    "XRI Ext: size=%d cnt=%d/%d",
9557 	    hba->sli.sli4.XRIExtSize,
9558 	    hba->sli.sli4.XRIExtCount, ExtentCnt);
9559 
9560 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9561 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9562 		    "XRI Ext%d: %d, %d, %d, %d", i,
9563 		    hba->sli.sli4.XRIBase[i],
9564 		    hba->sli.sli4.XRIBase[i+1],
9565 		    hba->sli.sli4.XRIBase[i+2],
9566 		    hba->sli.sli4.XRIBase[i+3]);
9567 	}
9568 
9569 
9570 	/* Discover RPI Extents */
9571 	bzero(mbq, sizeof (MAILBOXQ));
9572 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_RPI);
9573 
9574 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9575 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9576 		    "Unable to discover RPI extents.  Mailbox cmd=%x status=%x",
9577 		    mb4->mbxCommand, mb4->mbxStatus);
9578 
9579 		return (EIO);
9580 	}
9581 
9582 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9583 	hba->sli.sli4.RPIExtSize = ep->params.response.ExtentSize;
9584 	ExtentCnt = ep->params.response.ExtentCnt;
9585 
9586 	/* Allocate RPI Extents */
9587 	bzero(mbq, sizeof (MAILBOXQ));
9588 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_RPI, ExtentCnt);
9589 
9590 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9591 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9592 		    "Unable to allocate RPI extents.  Mailbox cmd=%x status=%x",
9593 		    mb4->mbxCommand, mb4->mbxStatus);
9594 
9595 		return (EIO);
9596 	}
9597 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9598 
9599 	bcopy((uint8_t *)ep->params.response.RscId,
9600 	    (uint8_t *)hba->sli.sli4.RPIBase,
9601 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9602 
9603 	hba->sli.sli4.RPIExtCount = ep->params.response.ExtentCnt;
9604 	hba->sli.sli4.RPICount = hba->sli.sli4.RPIExtCount *
9605 	    hba->sli.sli4.RPIExtSize;
9606 
9607 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9608 	    "RPI Ext: size=%d cnt=%d/%d",
9609 	    hba->sli.sli4.RPIExtSize,
9610 	    hba->sli.sli4.RPIExtCount, ExtentCnt);
9611 
9612 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9613 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9614 		    "RPI Ext%d: %d, %d, %d, %d", i,
9615 		    hba->sli.sli4.RPIBase[i],
9616 		    hba->sli.sli4.RPIBase[i+1],
9617 		    hba->sli.sli4.RPIBase[i+2],
9618 		    hba->sli.sli4.RPIBase[i+3]);
9619 	}
9620 
9621 
9622 	/* Discover VPI Extents */
9623 	bzero(mbq, sizeof (MAILBOXQ));
9624 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VPI);
9625 
9626 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9627 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9628 		    "Unable to discover VPI extents.  Mailbox cmd=%x status=%x",
9629 		    mb4->mbxCommand, mb4->mbxStatus);
9630 
9631 		return (EIO);
9632 	}
9633 
9634 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9635 	hba->sli.sli4.VPIExtSize = ep->params.response.ExtentSize;
9636 	ExtentCnt = ep->params.response.ExtentCnt;
9637 
9638 	/* Allocate VPI Extents */
9639 	bzero(mbq, sizeof (MAILBOXQ));
9640 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VPI, ExtentCnt);
9641 
9642 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9643 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9644 		    "Unable to allocate VPI extents.  Mailbox cmd=%x status=%x",
9645 		    mb4->mbxCommand, mb4->mbxStatus);
9646 
9647 		return (EIO);
9648 	}
9649 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9650 
9651 	bcopy((uint8_t *)ep->params.response.RscId,
9652 	    (uint8_t *)hba->sli.sli4.VPIBase,
9653 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9654 
9655 	hba->sli.sli4.VPIExtCount = ep->params.response.ExtentCnt;
9656 	hba->sli.sli4.VPICount = hba->sli.sli4.VPIExtCount *
9657 	    hba->sli.sli4.VPIExtSize;
9658 
9659 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9660 	    "VPI Ext: size=%d cnt=%d/%d",
9661 	    hba->sli.sli4.VPIExtSize,
9662 	    hba->sli.sli4.VPIExtCount, ExtentCnt);
9663 
9664 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9665 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9666 		    "VPI Ext%d: %d, %d, %d, %d", i,
9667 		    hba->sli.sli4.VPIBase[i],
9668 		    hba->sli.sli4.VPIBase[i+1],
9669 		    hba->sli.sli4.VPIBase[i+2],
9670 		    hba->sli.sli4.VPIBase[i+3]);
9671 	}
9672 
9673 	/* Discover VFI Extents */
9674 	bzero(mbq, sizeof (MAILBOXQ));
9675 	emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VFI);
9676 
9677 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9678 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9679 		    "Unable to discover VFI extents.  Mailbox cmd=%x status=%x",
9680 		    mb4->mbxCommand, mb4->mbxStatus);
9681 
9682 		return (EIO);
9683 	}
9684 
9685 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9686 	hba->sli.sli4.VFIExtSize = ep->params.response.ExtentSize;
9687 	ExtentCnt = ep->params.response.ExtentCnt;
9688 
9689 	/* Allocate VFI Extents */
9690 	bzero(mbq, sizeof (MAILBOXQ));
9691 	emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VFI, ExtentCnt);
9692 
9693 	if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9694 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9695 		    "Unable to allocate VFI extents.  Mailbox cmd=%x status=%x",
9696 		    mb4->mbxCommand, mb4->mbxStatus);
9697 
9698 		return (EIO);
9699 	}
9700 	ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9701 
9702 	bcopy((uint8_t *)ep->params.response.RscId,
9703 	    (uint8_t *)hba->sli.sli4.VFIBase,
9704 	    (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9705 
9706 	hba->sli.sli4.VFIExtCount = ep->params.response.ExtentCnt;
9707 	hba->sli.sli4.VFICount = hba->sli.sli4.VFIExtCount *
9708 	    hba->sli.sli4.VFIExtSize;
9709 
9710 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9711 	    "VFI Ext: size=%d cnt=%d/%d",
9712 	    hba->sli.sli4.VFIExtSize,
9713 	    hba->sli.sli4.VFIExtCount, ExtentCnt);
9714 
9715 	for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9716 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9717 		    "VFI Ext%d: %d, %d, %d, %d", i,
9718 		    hba->sli.sli4.VFIBase[i],
9719 		    hba->sli.sli4.VFIBase[i+1],
9720 		    hba->sli.sli4.VFIBase[i+2],
9721 		    hba->sli.sli4.VFIBase[i+3]);
9722 	}
9723 
9724 	return (0);
9725 
9726 } /* emlxs_sli4_init_extents() */
9727 
9728 
9729 extern uint32_t
9730 emlxs_sli4_index_to_rpi(emlxs_hba_t *hba, uint32_t index)
9731 {
9732 	uint32_t i;
9733 	uint32_t j;
9734 	uint32_t rpi;
9735 
9736 	i = index / hba->sli.sli4.RPIExtSize;
9737 	j = index % hba->sli.sli4.RPIExtSize;
9738 	rpi = hba->sli.sli4.RPIBase[i] + j;
9739 
9740 	return (rpi);
9741 
9742 } /* emlxs_sli4_index_to_rpi */
9743 
9744 
9745 extern uint32_t
9746 emlxs_sli4_rpi_to_index(emlxs_hba_t *hba, uint32_t rpi)
9747 {
9748 	uint32_t i;
9749 	uint32_t lo;
9750 	uint32_t hi;
9751 	uint32_t index = hba->sli.sli4.RPICount;
9752 
9753 	for (i = 0; i < hba->sli.sli4.RPIExtCount; i++) {
9754 		lo = hba->sli.sli4.RPIBase[i];
9755 		hi = lo + hba->sli.sli4.RPIExtSize;
9756 
9757 		if ((rpi < hi) && (rpi >= lo)) {
9758 			index = (i * hba->sli.sli4.RPIExtSize) + (rpi - lo);
9759 			break;
9760 		}
9761 	}
9762 
9763 	return (index);
9764 
9765 } /* emlxs_sli4_rpi_to_index */
9766 
9767 
9768 extern uint32_t
9769 emlxs_sli4_index_to_xri(emlxs_hba_t *hba, uint32_t index)
9770 {
9771 	uint32_t i;
9772 	uint32_t j;
9773 	uint32_t xri;
9774 
9775 	i = index / hba->sli.sli4.XRIExtSize;
9776 	j = index % hba->sli.sli4.XRIExtSize;
9777 	xri = hba->sli.sli4.XRIBase[i] + j;
9778 
9779 	return (xri);
9780 
9781 } /* emlxs_sli4_index_to_xri */
9782 
9783 
9784 
9785 
9786 extern uint32_t
9787 emlxs_sli4_index_to_vpi(emlxs_hba_t *hba, uint32_t index)
9788 {
9789 	uint32_t i;
9790 	uint32_t j;
9791 	uint32_t vpi;
9792 
9793 	i = index / hba->sli.sli4.VPIExtSize;
9794 	j = index % hba->sli.sli4.VPIExtSize;
9795 	vpi = hba->sli.sli4.VPIBase[i] + j;
9796 
9797 	return (vpi);
9798 
9799 } /* emlxs_sli4_index_to_vpi */
9800 
9801 
9802 extern uint32_t
9803 emlxs_sli4_vpi_to_index(emlxs_hba_t *hba, uint32_t vpi)
9804 {
9805 	uint32_t i;
9806 	uint32_t lo;
9807 	uint32_t hi;
9808 	uint32_t index = hba->sli.sli4.VPICount;
9809 
9810 	for (i = 0; i < hba->sli.sli4.VPIExtCount; i++) {
9811 		lo = hba->sli.sli4.VPIBase[i];
9812 		hi = lo + hba->sli.sli4.VPIExtSize;
9813 
9814 		if ((vpi < hi) && (vpi >= lo)) {
9815 			index = (i * hba->sli.sli4.VPIExtSize) + (vpi - lo);
9816 			break;
9817 		}
9818 	}
9819 
9820 	return (index);
9821 
9822 } /* emlxs_sli4_vpi_to_index */
9823 
9824 
9825 
9826 
9827 extern uint32_t
9828 emlxs_sli4_index_to_vfi(emlxs_hba_t *hba, uint32_t index)
9829 {
9830 	uint32_t i;
9831 	uint32_t j;
9832 	uint32_t vfi;
9833 
9834 	i = index / hba->sli.sli4.VFIExtSize;
9835 	j = index % hba->sli.sli4.VFIExtSize;
9836 	vfi = hba->sli.sli4.VFIBase[i] + j;
9837 
9838 	return (vfi);
9839 
9840 } /* emlxs_sli4_index_to_vfi */
9841 
9842 
9843 static uint16_t
9844 emlxs_sli4_rqid_to_index(emlxs_hba_t *hba, uint16_t rqid)
9845 {
9846 	uint16_t i;
9847 
9848 	if (rqid < 0xffff) {
9849 		for (i = 0; i < EMLXS_MAX_RQS; i++) {
9850 			if (hba->sli.sli4.rq[i].qid == rqid) {
9851 				return (i);
9852 			}
9853 		}
9854 	}
9855 
9856 	return (0xffff);
9857 
9858 } /* emlxs_sli4_rqid_to_index */
9859 
9860 
9861 static uint16_t
9862 emlxs_sli4_wqid_to_index(emlxs_hba_t *hba, uint16_t wqid)
9863 {
9864 	uint16_t i;
9865 
9866 	if (wqid < 0xffff) {
9867 		for (i = 0; i < EMLXS_MAX_WQS; i++) {
9868 			if (hba->sli.sli4.wq[i].qid == wqid) {
9869 				return (i);
9870 			}
9871 		}
9872 	}
9873 
9874 	return (0xffff);
9875 
9876 } /* emlxs_sli4_wqid_to_index */
9877 
9878 
9879 static uint16_t
9880 emlxs_sli4_cqid_to_index(emlxs_hba_t *hba, uint16_t cqid)
9881 {
9882 	uint16_t i;
9883 
9884 	if (cqid < 0xffff) {
9885 		for (i = 0; i < EMLXS_MAX_CQS; i++) {
9886 			if (hba->sli.sli4.cq[i].qid == cqid) {
9887 				return (i);
9888 			}
9889 		}
9890 	}
9891 
9892 	return (0xffff);
9893 
9894 } /* emlxs_sli4_cqid_to_index */
9895