xref: /freebsd/sys/dev/ocs_fc/ocs_hw.c (revision 4bc52338)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 /**
35  * @file
36  * Defines and implements the Hardware Abstraction Layer (HW).
37  * All interaction with the hardware is performed through the HW, which abstracts
38  * the details of the underlying SLI-4 implementation.
39  */
40 
41 /**
42  * @defgroup devInitShutdown Device Initialization and Shutdown
43  * @defgroup domain Domain Functions
44  * @defgroup port Port Functions
45  * @defgroup node Remote Node Functions
46  * @defgroup io IO Functions
47  * @defgroup interrupt Interrupt handling
48  * @defgroup os OS Required Functions
49  */
50 
51 #include "ocs.h"
52 #include "ocs_os.h"
53 #include "ocs_hw.h"
54 #include "ocs_hw_queues.h"
55 
56 #define OCS_HW_MQ_DEPTH	128
57 #define OCS_HW_READ_FCF_SIZE	4096
58 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS	256
59 #define OCS_HW_WQ_TIMER_PERIOD_MS	500
60 
61 /* values used for setting the auto xfer rdy parameters */
62 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT		0 /* 512 bytes */
63 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT	TRUE
64 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT	FALSE
65 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT	0
66 #define OCS_HW_REQUE_XRI_REGTAG			65534
67 /* max command and response buffer lengths -- arbitrary at the moment */
68 #define OCS_HW_DMTF_CLP_CMD_MAX	256
69 #define OCS_HW_DMTF_CLP_RSP_MAX	256
70 
71 /* HW global data */
72 ocs_hw_global_t hw_global;
73 
74 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t);
75 static void ocs_hw_adjust_wqs(ocs_hw_t *hw);
76 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw);
77 static int32_t ocs_hw_cb_link(void *, void *);
78 static int32_t ocs_hw_cb_fip(void *, void *);
79 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t);
80 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *);
81 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *);
82 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *);
83 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *);
84 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *);
85 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *);
86 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *);
87 static int32_t ocs_hw_flush(ocs_hw_t *);
88 static int32_t ocs_hw_command_cancel(ocs_hw_t *);
89 static int32_t ocs_hw_io_cancel(ocs_hw_t *);
90 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io);
91 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *);
92 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *);
93 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg);
94 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void  *);
95 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void  *);
96 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void  *);
97 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void  *);
98 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg);
99 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg);
100 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len);
101 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
102 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg);
103 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
104 
105 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
106 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
107 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
108 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
109 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
110 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
111 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
112 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
113 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license);
114 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw);
115 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw);
116 static void ocs_hw_io_free_internal(void *arg);
117 static void ocs_hw_io_free_port_owned(void *arg);
118 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf);
119 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint);
120 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status);
121 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t);
122 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw);
123 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable);
124 
125 /* HW domain database operations */
126 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *);
127 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *);
128 
129 
130 /* Port state machine */
131 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
132 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
133 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
134 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
135 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
136 
137 /* Domain state machine */
138 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
139 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
140 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
141 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
142 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data);
143 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
144 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
145 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
146 
147 /* BZ 161832 */
148 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw);
149 
150 /* WQE timeouts */
151 static void target_wqe_timer_cb(void *arg);
152 static void shutdown_target_wqe_timer(ocs_hw_t *hw);
153 
154 static inline void
155 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
156 {
157 	if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) {
158 		/*
159 		 * Active WQE list currently only used for
160 		 * target WQE timeouts.
161 		 */
162 		ocs_lock(&hw->io_lock);
163 			ocs_list_add_tail(&hw->io_timed_wqe, io);
164 			io->submit_ticks = ocs_get_os_ticks();
165 		ocs_unlock(&hw->io_lock);
166 	}
167 }
168 
169 static inline void
170 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
171 {
172 	if (hw->config.emulate_tgt_wqe_timeout) {
173 		/*
174 		 * If target wqe timeouts are enabled,
175 		 * remove from active wqe list.
176 		 */
177 		ocs_lock(&hw->io_lock);
178 			if (ocs_list_on_list(&io->wqe_link)) {
179 				ocs_list_remove(&hw->io_timed_wqe, io);
180 			}
181 		ocs_unlock(&hw->io_lock);
182 	}
183 }
184 
185 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type)
186 {
187 	switch (io_type) {
188 	case OCS_HW_IO_INITIATOR_READ:
189 	case OCS_HW_IO_INITIATOR_WRITE:
190 	case OCS_HW_IO_INITIATOR_NODATA:
191 	case OCS_HW_FC_CT:
192 	case OCS_HW_ELS_REQ:
193 		return 1;
194 	default:
195 		return 0;
196 	}
197 }
198 
199 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb)
200 {
201 	/* if exchange not active, nothing to abort */
202 	if (!xb) {
203 		return FALSE;
204 	}
205 	if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) {
206 		switch (ext) {
207 		/* exceptions where abort is not needed */
208 		case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */
209 		case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */
210 			return FALSE;
211 		default:
212 			break;
213 		}
214 	}
215 	return TRUE;
216 }
217 
218 /**
219  * @brief Determine the number of chutes on the device.
220  *
221  * @par Description
222  * Some devices require queue resources allocated per protocol processor
223  * (chute). This function returns the number of chutes on this device.
224  *
225  * @param hw Hardware context allocated by the caller.
226  *
227  * @return Returns the number of chutes on the device for protocol.
228  */
229 static uint32_t
230 ocs_hw_get_num_chutes(ocs_hw_t *hw)
231 {
232 	uint32_t num_chutes = 1;
233 
234 	if (sli_get_is_dual_ulp_capable(&hw->sli) &&
235 	    sli_get_is_ulp_enabled(&hw->sli, 0) &&
236 	    sli_get_is_ulp_enabled(&hw->sli, 1)) {
237 		num_chutes = 2;
238 	}
239 	return num_chutes;
240 }
241 
242 static ocs_hw_rtn_e
243 ocs_hw_link_event_init(ocs_hw_t *hw)
244 {
245 	ocs_hw_assert(hw);
246 
247 	hw->link.status = SLI_LINK_STATUS_MAX;
248 	hw->link.topology = SLI_LINK_TOPO_NONE;
249 	hw->link.medium = SLI_LINK_MEDIUM_MAX;
250 	hw->link.speed = 0;
251 	hw->link.loop_map = NULL;
252 	hw->link.fc_id = UINT32_MAX;
253 
254 	return OCS_HW_RTN_SUCCESS;
255 }
256 
257 /**
258  * @ingroup devInitShutdown
259  * @brief If this is physical port 0, then read the max dump size.
260  *
261  * @par Description
262  * Queries the FW for the maximum dump size
263  *
264  * @param hw Hardware context allocated by the caller.
265  *
266  * @return Returns 0 on success, or a non-zero value on failure.
267  */
268 static ocs_hw_rtn_e
269 ocs_hw_read_max_dump_size(ocs_hw_t *hw)
270 {
271 	uint8_t	buf[SLI4_BMBX_SIZE];
272 	uint8_t bus, dev, func;
273 	int 	rc;
274 
275 	/* lancer only */
276 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
277 		ocs_log_debug(hw->os, "Function only supported for I/F type 2\n");
278 		return OCS_HW_RTN_ERROR;
279 	}
280 
281 	/*
282 	 * Make sure the FW is new enough to support this command. If the FW
283 	 * is too old, the FW will UE.
284 	 */
285 	if (hw->workaround.disable_dump_loc) {
286 		ocs_log_test(hw->os, "FW version is too old for this feature\n");
287 		return OCS_HW_RTN_ERROR;
288 	}
289 
290 	/* attempt to detemine the dump size for function 0 only. */
291 	ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
292 	if (func == 0) {
293 		if (sli_cmd_common_set_dump_location(&hw->sli, buf,
294 							SLI4_BMBX_SIZE, 1, 0, NULL, 0)) {
295 			sli4_res_common_set_dump_location_t *rsp =
296 				(sli4_res_common_set_dump_location_t *)
297 				(buf + offsetof(sli4_cmd_sli_config_t,
298 						payload.embed));
299 
300 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
301 			if (rc != OCS_HW_RTN_SUCCESS) {
302 				ocs_log_test(hw->os, "set dump location command failed\n");
303 				return rc;
304 			} else {
305 				hw->dump_size = rsp->buffer_length;
306 				ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length);
307 			}
308 		}
309 	}
310 	return OCS_HW_RTN_SUCCESS;
311 }
312 
313 /**
314  * @ingroup devInitShutdown
315  * @brief Set up the Hardware Abstraction Layer module.
316  *
317  * @par Description
318  * Calls set up to configure the hardware.
319  *
320  * @param hw Hardware context allocated by the caller.
321  * @param os Device abstraction.
322  * @param port_type Protocol type of port, such as FC and NIC.
323  *
324  * @todo Why is port_type a parameter?
325  *
326  * @return Returns 0 on success, or a non-zero value on failure.
327  */
328 ocs_hw_rtn_e
329 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type)
330 {
331 	uint32_t i;
332 	char prop_buf[32];
333 
334 	if (hw == NULL) {
335 		ocs_log_err(os, "bad parameter(s) hw=%p\n", hw);
336 		return OCS_HW_RTN_ERROR;
337 	}
338 
339 	if (hw->hw_setup_called) {
340 		/* Setup run-time workarounds.
341 		 * Call for each setup, to allow for hw_war_version
342 		 */
343 		ocs_hw_workaround_setup(hw);
344 		return OCS_HW_RTN_SUCCESS;
345 	}
346 
347 	/*
348 	 * ocs_hw_init() relies on NULL pointers indicating that a structure
349 	 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't
350 	 * free/realloc that memory
351 	 */
352 	ocs_memset(hw, 0, sizeof(ocs_hw_t));
353 
354 	hw->hw_setup_called = TRUE;
355 
356 	hw->os = os;
357 
358 	ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os));
359 	ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link);
360 	ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link);
361 	hw->cmd_head_count = 0;
362 
363 	ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os));
364 	ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os));
365 
366 	ocs_atomic_init(&hw->io_alloc_failed_count, 0);
367 
368 	hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
369 	hw->config.dif_seed = 0;
370 	hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT;
371 	hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT;
372 	hw->config.auto_xfer_rdy_app_tag_valid =  OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT;
373 	hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT;
374 
375 
376 	if (sli_setup(&hw->sli, hw->os, port_type)) {
377 		ocs_log_err(hw->os, "SLI setup failed\n");
378 		return OCS_HW_RTN_ERROR;
379 	}
380 
381 	ocs_memset(hw->domains, 0, sizeof(hw->domains));
382 
383 	ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
384 
385 	ocs_hw_link_event_init(hw);
386 
387 	sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw);
388 	sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw);
389 
390 	/*
391 	 * Set all the queue sizes to the maximum allowed. These values may
392 	 * be changes later by the adjust and workaround functions.
393 	 */
394 	for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) {
395 		hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i);
396 	}
397 
398 	/*
399 	 * The RQ assignment for RQ pair mode.
400 	 */
401 	hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD;
402 	hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
403 	if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) {
404 		hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0);
405 	}
406 
407 	/* by default, enable initiator-only auto-ABTS emulation */
408 	hw->config.i_only_aab = TRUE;
409 
410 	/* Setup run-time workarounds */
411 	ocs_hw_workaround_setup(hw);
412 
413 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
414 	if (hw->workaround.override_fcfi) {
415 		hw->first_domain_idx = -1;
416 	}
417 
418 	/* Must be done after the workaround setup */
419 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
420 		(void)ocs_hw_read_max_dump_size(hw);
421 	}
422 
423 	/* calculate the number of WQs required. */
424 	ocs_hw_adjust_wqs(hw);
425 
426 	/* Set the default dif mode */
427 	if (! sli_is_dif_inline_capable(&hw->sli)) {
428 		ocs_log_test(hw->os, "not inline capable, setting mode to separate\n");
429 		hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE;
430 	}
431 	/* Workaround: BZ 161832 */
432 	if (hw->workaround.use_dif_sec_xri) {
433 		ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link);
434 	}
435 
436 	/*
437 	 * Figure out the starting and max ULP to spread the WQs across the
438 	 * ULPs.
439 	 */
440 	if (sli_get_is_dual_ulp_capable(&hw->sli)) {
441 		if (sli_get_is_ulp_enabled(&hw->sli, 0) &&
442 		    sli_get_is_ulp_enabled(&hw->sli, 1)) {
443 			hw->ulp_start = 0;
444 			hw->ulp_max   = 1;
445 		} else if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
446 			hw->ulp_start = 0;
447 			hw->ulp_max   = 0;
448 		} else {
449 			hw->ulp_start = 1;
450 			hw->ulp_max   = 1;
451 		}
452 	} else {
453 		if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
454 			hw->ulp_start = 0;
455 			hw->ulp_max   = 0;
456 		} else {
457 			hw->ulp_start = 1;
458 			hw->ulp_max   = 1;
459 		}
460 	}
461 	ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n",
462 		hw->ulp_start, hw->ulp_max);
463 	hw->config.queue_topology = hw_global.queue_topology_string;
464 
465 	hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology);
466 
467 	hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ];
468 	hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ];
469 	hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ];
470 	hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ];
471 	hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ];
472 
473 	/* Verify qtop configuration against driver supported configuration */
474 	if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) {
475 		ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n",
476 				OCE_HW_MAX_NUM_MRQ_PAIRS);
477 		return OCS_HW_RTN_ERROR;
478 	}
479 
480 	if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) {
481 		ocs_log_crit(hw->os, "Max supported EQs = %d\n",
482 				OCS_HW_MAX_NUM_EQ);
483 		return OCS_HW_RTN_ERROR;
484 	}
485 
486 	if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) {
487 		ocs_log_crit(hw->os, "Max supported CQs = %d\n",
488 				OCS_HW_MAX_NUM_CQ);
489 		return OCS_HW_RTN_ERROR;
490 	}
491 
492 	if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
493 		ocs_log_crit(hw->os, "Max supported WQs = %d\n",
494 				OCS_HW_MAX_NUM_WQ);
495 		return OCS_HW_RTN_ERROR;
496 	}
497 
498 	if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) {
499 		ocs_log_crit(hw->os, "Max supported MQs = %d\n",
500 				OCS_HW_MAX_NUM_MQ);
501 		return OCS_HW_RTN_ERROR;
502 	}
503 
504 	return OCS_HW_RTN_SUCCESS;
505 }
506 
507 /**
508  * @ingroup devInitShutdown
509  * @brief Allocate memory structures to prepare for the device operation.
510  *
511  * @par Description
512  * Allocates memory structures needed by the device and prepares the device
513  * for operation.
514  * @n @n @b Note: This function may be called more than once (for example, at
515  * initialization and then after a reset), but the size of the internal resources
516  * may not be changed without tearing down the HW (ocs_hw_teardown()).
517  *
518  * @param hw Hardware context allocated by the caller.
519  *
520  * @return Returns 0 on success, or a non-zero value on failure.
521  */
522 ocs_hw_rtn_e
523 ocs_hw_init(ocs_hw_t *hw)
524 {
525 	ocs_hw_rtn_e	rc;
526 	uint32_t	i = 0;
527 	uint8_t		buf[SLI4_BMBX_SIZE];
528 	uint32_t	max_rpi;
529 	int		rem_count;
530 	int	        written_size = 0;
531 	uint32_t	count;
532 	char		prop_buf[32];
533 	uint32_t ramdisc_blocksize = 512;
534 	uint32_t q_count = 0;
535 	/*
536 	 * Make sure the command lists are empty. If this is start-of-day,
537 	 * they'll be empty since they were just initialized in ocs_hw_setup.
538 	 * If we've just gone through a reset, the command and command pending
539 	 * lists should have been cleaned up as part of the reset (ocs_hw_reset()).
540 	 */
541 	ocs_lock(&hw->cmd_lock);
542 		if (!ocs_list_empty(&hw->cmd_head)) {
543 			ocs_log_test(hw->os, "command found on cmd list\n");
544 			ocs_unlock(&hw->cmd_lock);
545 			return OCS_HW_RTN_ERROR;
546 		}
547 		if (!ocs_list_empty(&hw->cmd_pending)) {
548 			ocs_log_test(hw->os, "command found on pending list\n");
549 			ocs_unlock(&hw->cmd_lock);
550 			return OCS_HW_RTN_ERROR;
551 		}
552 	ocs_unlock(&hw->cmd_lock);
553 
554 	/* Free RQ buffers if prevously allocated */
555 	ocs_hw_rx_free(hw);
556 
557 	/*
558 	 * The IO queues must be initialized here for the reset case. The
559 	 * ocs_hw_init_io() function will re-add the IOs to the free list.
560 	 * The cmd_head list should be OK since we free all entries in
561 	 * ocs_hw_command_cancel() that is called in the ocs_hw_reset().
562 	 */
563 
564 	/* If we are in this function due to a reset, there may be stale items
565 	 * on lists that need to be removed.  Clean them up.
566 	 */
567 	rem_count=0;
568 	if (ocs_list_valid(&hw->io_wait_free)) {
569 		while ((!ocs_list_empty(&hw->io_wait_free))) {
570 			rem_count++;
571 			ocs_list_remove_head(&hw->io_wait_free);
572 		}
573 		if (rem_count > 0) {
574 			ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count);
575 		}
576 	}
577 	rem_count=0;
578 	if (ocs_list_valid(&hw->io_inuse)) {
579 		while ((!ocs_list_empty(&hw->io_inuse))) {
580 			rem_count++;
581 			ocs_list_remove_head(&hw->io_inuse);
582 		}
583 		if (rem_count > 0) {
584 			ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count);
585 		}
586 	}
587 	rem_count=0;
588 	if (ocs_list_valid(&hw->io_free)) {
589 		while ((!ocs_list_empty(&hw->io_free))) {
590 			rem_count++;
591 			ocs_list_remove_head(&hw->io_free);
592 		}
593 		if (rem_count > 0) {
594 			ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count);
595 		}
596 	}
597 	if (ocs_list_valid(&hw->io_port_owned)) {
598 		while ((!ocs_list_empty(&hw->io_port_owned))) {
599 			ocs_list_remove_head(&hw->io_port_owned);
600 		}
601 	}
602 	ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link);
603 	ocs_list_init(&hw->io_free, ocs_hw_io_t, link);
604 	ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link);
605 	ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link);
606 	ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link);
607 	ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link);
608 
609 	/* If MRQ not required, Make sure we dont request feature. */
610 	if (hw->config.n_rq == 1) {
611 		hw->sli.config.features.flag.mrqp = FALSE;
612 	}
613 
614 	if (sli_init(&hw->sli)) {
615 		ocs_log_err(hw->os, "SLI failed to initialize\n");
616 		return OCS_HW_RTN_ERROR;
617 	}
618 
619 	/*
620 	 * Enable the auto xfer rdy feature if requested.
621 	 */
622 	hw->auto_xfer_rdy_enabled = FALSE;
623 	if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
624 	    hw->config.auto_xfer_rdy_size > 0) {
625 		if (hw->config.esoc){
626 			if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
627 				ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
628 			}
629 			written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize);
630 		} else {
631 			written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size);
632 		}
633 		if (written_size) {
634 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
635 			if (rc != OCS_HW_RTN_SUCCESS) {
636 				ocs_log_err(hw->os, "config auto xfer rdy failed\n");
637 				return rc;
638 			}
639 		}
640 		hw->auto_xfer_rdy_enabled = TRUE;
641 
642 		if (hw->config.auto_xfer_rdy_t10_enable) {
643 			rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf);
644 			if (rc != OCS_HW_RTN_SUCCESS) {
645 				ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n");
646 				return rc;
647 			}
648 		}
649 	}
650 
651 	if(hw->sliport_healthcheck) {
652 		rc = ocs_hw_config_sli_port_health_check(hw, 0, 1);
653 		if (rc != OCS_HW_RTN_SUCCESS) {
654 			ocs_log_err(hw->os, "Enabling Sliport Health check failed \n");
655 			return rc;
656 		}
657 	}
658 
659 	/*
660 	 * Set FDT transfer hint, only works on Lancer
661 	 */
662 	if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) {
663 		/*
664 		 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on
665 		 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature.
666 		 */
667 		ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT);
668 	}
669 
670 	/*
671 	 * Verify that we have not exceeded any queue sizes
672 	 */
673 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ),
674 					OCS_HW_MAX_NUM_EQ);
675 	if (hw->config.n_eq > q_count) {
676 		ocs_log_err(hw->os, "requested %d EQ but %d allowed\n",
677 			    hw->config.n_eq, q_count);
678 		return OCS_HW_RTN_ERROR;
679 	}
680 
681 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ),
682 					OCS_HW_MAX_NUM_CQ);
683 	if (hw->config.n_cq > q_count) {
684 		ocs_log_err(hw->os, "requested %d CQ but %d allowed\n",
685 			    hw->config.n_cq, q_count);
686 		return OCS_HW_RTN_ERROR;
687 	}
688 
689 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ),
690 					OCS_HW_MAX_NUM_MQ);
691 	if (hw->config.n_mq > q_count) {
692 		ocs_log_err(hw->os, "requested %d MQ but %d allowed\n",
693 			    hw->config.n_mq, q_count);
694 		return OCS_HW_RTN_ERROR;
695 	}
696 
697 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ),
698 					OCS_HW_MAX_NUM_RQ);
699 	if (hw->config.n_rq > q_count) {
700 		ocs_log_err(hw->os, "requested %d RQ but %d allowed\n",
701 			    hw->config.n_rq, q_count);
702 		return OCS_HW_RTN_ERROR;
703 	}
704 
705 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ),
706 					OCS_HW_MAX_NUM_WQ);
707 	if (hw->config.n_wq > q_count) {
708 		ocs_log_err(hw->os, "requested %d WQ but %d allowed\n",
709 			    hw->config.n_wq, q_count);
710 		return OCS_HW_RTN_ERROR;
711 	}
712 
713 	/* zero the hashes */
714 	ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
715 	ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
716 			OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE);
717 
718 	ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
719 	ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
720 			OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE);
721 
722 	ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
723 	ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
724 			OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE);
725 
726 
727 	rc = ocs_hw_init_queues(hw, hw->qtop);
728 	if (rc != OCS_HW_RTN_SUCCESS) {
729 		return rc;
730 	}
731 
732 	max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
733 	i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi);
734 	if (i) {
735 		ocs_dma_t payload_memory;
736 
737 		rc = OCS_HW_RTN_ERROR;
738 
739 		if (hw->rnode_mem.size) {
740 			ocs_dma_free(hw->os, &hw->rnode_mem);
741 		}
742 
743 		if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) {
744 			ocs_log_err(hw->os, "remote node memory allocation fail\n");
745 			return OCS_HW_RTN_NO_MEMORY;
746 		}
747 
748 		payload_memory.size = 0;
749 		if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE,
750 					&hw->rnode_mem, UINT16_MAX, &payload_memory)) {
751 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
752 
753 			if (payload_memory.size != 0) {
754 				/* The command was non-embedded - need to free the dma buffer */
755 				ocs_dma_free(hw->os, &payload_memory);
756 			}
757 		}
758 
759 		if (rc != OCS_HW_RTN_SUCCESS) {
760 			ocs_log_err(hw->os, "header template registration failed\n");
761 			return rc;
762 		}
763 	}
764 
765 	/* Allocate and post RQ buffers */
766 	rc = ocs_hw_rx_allocate(hw);
767 	if (rc) {
768 		ocs_log_err(hw->os, "rx_allocate failed\n");
769 		return rc;
770 	}
771 
772 	/* Populate hw->seq_free_list */
773 	if (hw->seq_pool == NULL) {
774 		uint32_t count = 0;
775 		uint32_t i;
776 
777 		/* Sum up the total number of RQ entries, to use to allocate the sequence object pool */
778 		for (i = 0; i < hw->hw_rq_count; i++) {
779 			count += hw->hw_rq[i]->entry_count;
780 		}
781 
782 		hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count);
783 		if (hw->seq_pool == NULL) {
784 			ocs_log_err(hw->os, "malloc seq_pool failed\n");
785 			return OCS_HW_RTN_NO_MEMORY;
786 		}
787 	}
788 
789 	if(ocs_hw_rx_post(hw)) {
790 		ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n");
791 	}
792 
793 	/* Allocate rpi_ref if not previously allocated */
794 	if (hw->rpi_ref == NULL) {
795 		hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref),
796 					  OCS_M_ZERO | OCS_M_NOWAIT);
797 		if (hw->rpi_ref == NULL) {
798 			ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i);
799 			return OCS_HW_RTN_NO_MEMORY;
800 		}
801 	}
802 
803 	for (i = 0; i < max_rpi; i ++) {
804 		ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0);
805 		ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0);
806 	}
807 
808 	ocs_memset(hw->domains, 0, sizeof(hw->domains));
809 
810 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
811 	if (hw->workaround.override_fcfi) {
812 		hw->first_domain_idx = -1;
813 	}
814 
815 	ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
816 
817 	/* Register a FCFI to allow unsolicited frames to be routed to the driver */
818 	if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
819 
820 		if (hw->hw_mrq_count) {
821 			ocs_log_debug(hw->os, "using REG_FCFI MRQ\n");
822 
823 			rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0);
824 			if (rc != OCS_HW_RTN_SUCCESS) {
825 				ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n");
826 				return rc;
827 			}
828 
829 			rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0);
830 			if (rc != OCS_HW_RTN_SUCCESS) {
831 				ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n");
832 				return rc;
833 			}
834 		} else {
835 			sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
836 
837 			ocs_log_debug(hw->os, "using REG_FCFI standard\n");
838 
839 			/* Set the filter match/mask values from hw's filter_def values */
840 			for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
841 				rq_cfg[i].rq_id = 0xffff;
842 				rq_cfg[i].r_ctl_mask =	(uint8_t)  hw->config.filter_def[i];
843 				rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
844 				rq_cfg[i].type_mask =	(uint8_t) (hw->config.filter_def[i] >> 16);
845 				rq_cfg[i].type_match =	(uint8_t) (hw->config.filter_def[i] >> 24);
846 			}
847 
848 			/*
849 			 * Update the rq_id's of the FCF configuration (don't update more than the number
850 			 * of rq_cfg elements)
851 			 */
852 			for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) {
853 				hw_rq_t *rq = hw->hw_rq[i];
854 				uint32_t j;
855 				for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
856 					uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1;
857 					if (mask & (1U << j)) {
858 						rq_cfg[j].rq_id = rq->hdr->id;
859 						ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
860 							j, hw->config.filter_def[j], i, rq->hdr->id);
861 					}
862 				}
863 			}
864 
865 			rc = OCS_HW_RTN_ERROR;
866 
867 			if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) {
868 				rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
869 			}
870 
871 			if (rc != OCS_HW_RTN_SUCCESS) {
872 				ocs_log_err(hw->os, "FCFI registration failed\n");
873 				return rc;
874 			}
875 			hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi;
876 		}
877 
878 	}
879 
880 	/*
881 	 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits,
882 	 * thus the pool allocation size of 64k)
883 	 */
884 	rc = ocs_hw_reqtag_init(hw);
885 	if (rc) {
886 		ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc);
887 		return rc;
888 	}
889 
890 	rc = ocs_hw_setup_io(hw);
891 	if (rc) {
892 		ocs_log_err(hw->os, "IO allocation failure\n");
893 		return rc;
894 	}
895 
896 	rc = ocs_hw_init_io(hw);
897 	if (rc) {
898 		ocs_log_err(hw->os, "IO initialization failure\n");
899 		return rc;
900 	}
901 
902 	ocs_queue_history_init(hw->os, &hw->q_hist);
903 
904 	/* get hw link config; polling, so callback will be called immediately */
905 	hw->linkcfg = OCS_HW_LINKCFG_NA;
906 	ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw);
907 
908 	/* if lancer ethernet, ethernet ports need to be enabled */
909 	if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) &&
910 	    (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) {
911 		if (ocs_hw_set_eth_license(hw, hw->eth_license)) {
912 			/* log warning but continue */
913 			ocs_log_err(hw->os, "Failed to set ethernet license\n");
914 		}
915 	}
916 
917 	/* Set the DIF seed - only for lancer right now */
918 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) &&
919 	    ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) {
920 		ocs_log_err(hw->os, "Failed to set DIF seed value\n");
921 		return rc;
922 	}
923 
924 	/* Set the DIF mode - skyhawk only */
925 	if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) &&
926 	    sli_get_dif_capable(&hw->sli)) {
927 		rc = ocs_hw_set_dif_mode(hw);
928 		if (rc != OCS_HW_RTN_SUCCESS) {
929 			ocs_log_err(hw->os, "Failed to set DIF mode value\n");
930 			return rc;
931 		}
932 	}
933 
934 	/*
935 	 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries
936 	 */
937 	for (i = 0; i < hw->eq_count; i++) {
938 		sli_queue_arm(&hw->sli, &hw->eq[i], TRUE);
939 	}
940 
941 	/*
942 	 * Initialize RQ hash
943 	 */
944 	for (i = 0; i < hw->rq_count; i++) {
945 		ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
946 	}
947 
948 	/*
949 	 * Initialize WQ hash
950 	 */
951 	for (i = 0; i < hw->wq_count; i++) {
952 		ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
953 	}
954 
955 	/*
956 	 * Arming the CQ allows (e.g.) MQ completions to write CQ entries
957 	 */
958 	for (i = 0; i < hw->cq_count; i++) {
959 		ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
960 		sli_queue_arm(&hw->sli, &hw->cq[i], TRUE);
961 	}
962 
963 	/* record the fact that the queues are functional */
964 	hw->state = OCS_HW_STATE_ACTIVE;
965 
966 	/* Note: Must be after the IOs are setup and the state is active*/
967 	if (ocs_hw_rqpair_init(hw)) {
968 		ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n");
969 	}
970 
971 	/* finally kick off periodic timer to check for timed out target WQEs */
972 	if (hw->config.emulate_tgt_wqe_timeout) {
973 		ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw,
974 				OCS_HW_WQ_TIMER_PERIOD_MS);
975 	}
976 
977 	/*
978 	 * Allocate a HW IOs for send frame.  Allocate one for each Class 1 WQ, or if there
979 	 * are none of those, allocate one for WQ[0]
980 	 */
981 	if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) {
982 		for (i = 0; i < count; i++) {
983 			hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]);
984 			wq->send_frame_io = ocs_hw_io_alloc(hw);
985 			if (wq->send_frame_io == NULL) {
986 				ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
987 			}
988 		}
989 	} else {
990 		hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw);
991 		if (hw->hw_wq[0]->send_frame_io == NULL) {
992 			ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
993 		}
994 	}
995 
996 	/* Initialize send frame frame sequence id */
997 	ocs_atomic_init(&hw->send_frame_seq_id, 0);
998 
999 	/* Initialize watchdog timer if enabled by user */
1000 	hw->expiration_logged = 0;
1001 	if(hw->watchdog_timeout) {
1002 		if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) {
1003 			ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n");
1004 		}else if(!ocs_hw_config_watchdog_timer(hw)) {
1005 			ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout);
1006 		}
1007 	}
1008 
1009 	if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) {
1010 	   ocs_log_err(hw->os, "domain node memory allocation fail\n");
1011 	   return OCS_HW_RTN_NO_MEMORY;
1012 	}
1013 
1014 	if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) {
1015 	   ocs_log_err(hw->os, "domain fcf memory allocation fail\n");
1016 	   return OCS_HW_RTN_NO_MEMORY;
1017 	}
1018 
1019 	if ((0 == hw->loop_map.size) &&	ocs_dma_alloc(hw->os, &hw->loop_map,
1020 				SLI4_MIN_LOOP_MAP_BYTES, 4)) {
1021 		ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size);
1022 	}
1023 
1024 	return OCS_HW_RTN_SUCCESS;
1025 }
1026 
1027 /**
1028  * @brief Configure Multi-RQ
1029  *
1030  * @param hw	Hardware context allocated by the caller.
1031  * @param mode	1 to set MRQ filters and 0 to set FCFI index
1032  * @param vlanid    valid in mode 0
1033  * @param fcf_index valid in mode 0
1034  *
1035  * @return Returns 0 on success, or a non-zero value on failure.
1036  */
1037 static int32_t
1038 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index)
1039 {
1040 	uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
1041 	hw_rq_t *rq;
1042 	sli4_cmd_reg_fcfi_mrq_t *rsp = NULL;
1043 	uint32_t i, j;
1044 	sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
1045 	int32_t rc;
1046 
1047 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1048 		goto issue_cmd;
1049 	}
1050 
1051 	/* Set the filter match/mask values from hw's filter_def values */
1052 	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1053 		rq_filter[i].rq_id = 0xffff;
1054 		rq_filter[i].r_ctl_mask  = (uint8_t)  hw->config.filter_def[i];
1055 		rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
1056 		rq_filter[i].type_mask   = (uint8_t) (hw->config.filter_def[i] >> 16);
1057 		rq_filter[i].type_match  = (uint8_t) (hw->config.filter_def[i] >> 24);
1058 	}
1059 
1060 	/* Accumulate counts for each filter type used, build rq_ids[] list */
1061 	for (i = 0; i < hw->hw_rq_count; i++) {
1062 		rq = hw->hw_rq[i];
1063 		for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
1064 			if (rq->filter_mask & (1U << j)) {
1065 				if (rq_filter[j].rq_id != 0xffff) {
1066 					/* Already used. Bailout ifts not RQset case */
1067 					if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) {
1068 						ocs_log_err(hw->os, "Wrong queue topology.\n");
1069 						return OCS_HW_RTN_ERROR;
1070 					}
1071 					continue;
1072 				}
1073 
1074 				if (rq->is_mrq) {
1075 					rq_filter[j].rq_id = rq->base_mrq_id;
1076 					mrq_bitmask |= (1U << j);
1077 				} else {
1078 					rq_filter[j].rq_id = rq->hdr->id;
1079 				}
1080 			}
1081 		}
1082 	}
1083 
1084 issue_cmd:
1085 	/* Invoke REG_FCFI_MRQ */
1086 	rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
1087 				 buf,					/* buf */
1088 				 SLI4_BMBX_SIZE,			/* size */
1089 				 mode,					/* mode 1 */
1090 				 fcf_index,				/* fcf_index */
1091 				 vlanid,				/* vlan_id */
1092 				 hw->config.rq_selection_policy,	/* RQ selection policy*/
1093 				 mrq_bitmask,				/* MRQ bitmask */
1094 				 hw->hw_mrq_count,			/* num_mrqs */
1095 				 rq_filter);				/* RQ filter */
1096 	if (rc == 0) {
1097 		ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
1098 		return OCS_HW_RTN_ERROR;
1099 	}
1100 
1101 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
1102 
1103 	rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf;
1104 
1105 	if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) {
1106 		ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n",
1107 			    rsp->hdr.command, rsp->hdr.status);
1108 		return OCS_HW_RTN_ERROR;
1109 	}
1110 
1111 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1112 		hw->fcf_indicator = rsp->fcfi;
1113 	}
1114 	return 0;
1115 }
1116 
1117 /**
1118  * @brief Callback function for getting linkcfg during HW initialization.
1119  *
1120  * @param status Status of the linkcfg get operation.
1121  * @param value Link configuration enum to which the link configuration is set.
1122  * @param arg Callback argument (ocs_hw_t *).
1123  *
1124  * @return None.
1125  */
1126 static void
1127 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
1128 {
1129 	ocs_hw_t *hw = (ocs_hw_t *)arg;
1130 	if (status == 0) {
1131 		hw->linkcfg = (ocs_hw_linkcfg_e)value;
1132 	} else {
1133 		hw->linkcfg = OCS_HW_LINKCFG_NA;
1134 	}
1135 	ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg);
1136 }
1137 
1138 /**
1139  * @ingroup devInitShutdown
1140  * @brief Tear down the Hardware Abstraction Layer module.
1141  *
1142  * @par Description
1143  * Frees memory structures needed by the device, and shuts down the device. Does
1144  * not free the HW context memory (which is done by the caller).
1145  *
1146  * @param hw Hardware context allocated by the caller.
1147  *
1148  * @return Returns 0 on success, or a non-zero value on failure.
1149  */
1150 ocs_hw_rtn_e
1151 ocs_hw_teardown(ocs_hw_t *hw)
1152 {
1153 	uint32_t	i = 0;
1154 	uint32_t	iters = 10;/*XXX*/
1155 	uint32_t	max_rpi;
1156 	uint32_t destroy_queues;
1157 	uint32_t free_memory;
1158 
1159 	if (!hw) {
1160 		ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw);
1161 		return OCS_HW_RTN_ERROR;
1162 	}
1163 
1164 	destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE);
1165 	free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED);
1166 
1167 	/* shutdown target wqe timer */
1168 	shutdown_target_wqe_timer(hw);
1169 
1170 	/* Cancel watchdog timer if enabled */
1171 	if(hw->watchdog_timeout) {
1172 		hw->watchdog_timeout = 0;
1173 		ocs_hw_config_watchdog_timer(hw);
1174 	}
1175 
1176 	/* Cancel Sliport Healthcheck */
1177 	if(hw->sliport_healthcheck) {
1178 		hw->sliport_healthcheck = 0;
1179 		ocs_hw_config_sli_port_health_check(hw, 0, 0);
1180 	}
1181 
1182 	if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) {
1183 
1184 		hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1185 
1186 		ocs_hw_flush(hw);
1187 
1188 		/* If there are outstanding commands, wait for them to complete */
1189 		while (!ocs_list_empty(&hw->cmd_head) && iters) {
1190 			ocs_udelay(10000);
1191 			ocs_hw_flush(hw);
1192 			iters--;
1193 		}
1194 
1195 		if (ocs_list_empty(&hw->cmd_head)) {
1196 			ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1197 		} else {
1198 			ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1199 		}
1200 
1201 		/* Cancel any remaining commands */
1202 		ocs_hw_command_cancel(hw);
1203 	} else {
1204 		hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1205 	}
1206 
1207 	ocs_lock_free(&hw->cmd_lock);
1208 
1209 	/* Free unregistered RPI if workaround is in force */
1210 	if (hw->workaround.use_unregistered_rpi) {
1211 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid);
1212 	}
1213 
1214 	max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1215 	if (hw->rpi_ref) {
1216 		for (i = 0; i < max_rpi; i++) {
1217 			if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) {
1218 				ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n",
1219 						i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
1220 			}
1221 		}
1222 		ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref));
1223 		hw->rpi_ref = NULL;
1224 	}
1225 
1226 	ocs_dma_free(hw->os, &hw->rnode_mem);
1227 
1228 	if (hw->io) {
1229 		for (i = 0; i < hw->config.n_io; i++) {
1230 			if (hw->io[i] && (hw->io[i]->sgl != NULL) &&
1231 			    (hw->io[i]->sgl->virt != NULL)) {
1232 				if(hw->io[i]->is_port_owned) {
1233 					ocs_lock_free(&hw->io[i]->axr_lock);
1234 				}
1235 				ocs_dma_free(hw->os, hw->io[i]->sgl);
1236 			}
1237 			ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
1238 			hw->io[i] = NULL;
1239 		}
1240 		ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size);
1241 		hw->wqe_buffs = NULL;
1242 		ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *));
1243 		hw->io = NULL;
1244 	}
1245 
1246 	ocs_dma_free(hw->os, &hw->xfer_rdy);
1247 	ocs_dma_free(hw->os, &hw->dump_sges);
1248 	ocs_dma_free(hw->os, &hw->loop_map);
1249 
1250 	ocs_lock_free(&hw->io_lock);
1251 	ocs_lock_free(&hw->io_abort_lock);
1252 
1253 
1254 	for (i = 0; i < hw->wq_count; i++) {
1255 		sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory);
1256 	}
1257 
1258 
1259 	for (i = 0; i < hw->rq_count; i++) {
1260 		sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory);
1261 	}
1262 
1263 	for (i = 0; i < hw->mq_count; i++) {
1264 		sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory);
1265 	}
1266 
1267 	for (i = 0; i < hw->cq_count; i++) {
1268 		sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory);
1269 	}
1270 
1271 	for (i = 0; i < hw->eq_count; i++) {
1272 		sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory);
1273 	}
1274 
1275 	ocs_hw_qtop_free(hw->qtop);
1276 
1277 	/* Free rq buffers */
1278 	ocs_hw_rx_free(hw);
1279 
1280 	hw_queue_teardown(hw);
1281 
1282 	ocs_hw_rqpair_teardown(hw);
1283 
1284 	if (sli_teardown(&hw->sli)) {
1285 		ocs_log_err(hw->os, "SLI teardown failed\n");
1286 	}
1287 
1288 	ocs_queue_history_free(&hw->q_hist);
1289 
1290 	/* record the fact that the queues are non-functional */
1291 	hw->state = OCS_HW_STATE_UNINITIALIZED;
1292 
1293 	/* free sequence free pool */
1294 	ocs_array_free(hw->seq_pool);
1295 	hw->seq_pool = NULL;
1296 
1297 	/* free hw_wq_callback pool */
1298 	ocs_pool_free(hw->wq_reqtag_pool);
1299 
1300 	ocs_dma_free(hw->os, &hw->domain_dmem);
1301 	ocs_dma_free(hw->os, &hw->fcf_dmem);
1302 	/* Mark HW setup as not having been called */
1303 	hw->hw_setup_called = FALSE;
1304 
1305 	return OCS_HW_RTN_SUCCESS;
1306 }
1307 
1308 ocs_hw_rtn_e
1309 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset)
1310 {
1311 	uint32_t	i;
1312 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1313 	uint32_t	iters;
1314 	ocs_hw_state_e prev_state = hw->state;
1315 
1316 	if (hw->state != OCS_HW_STATE_ACTIVE) {
1317 		ocs_log_test(hw->os, "HW state %d is not active\n", hw->state);
1318 	}
1319 
1320 	hw->state = OCS_HW_STATE_RESET_IN_PROGRESS;
1321 
1322 	/* shutdown target wqe timer */
1323 	shutdown_target_wqe_timer(hw);
1324 
1325 	ocs_hw_flush(hw);
1326 
1327 	/*
1328 	 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM),
1329 	 * then the FW will UE when the reset is issued. So attempt to complete
1330 	 * all mailbox commands.
1331 	 */
1332 	iters = 10;
1333 	while (!ocs_list_empty(&hw->cmd_head) && iters) {
1334 		ocs_udelay(10000);
1335 		ocs_hw_flush(hw);
1336 		iters--;
1337 	}
1338 
1339 	if (ocs_list_empty(&hw->cmd_head)) {
1340 		ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1341 	} else {
1342 		ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1343 	}
1344 
1345 	/* Reset the chip */
1346 	switch(reset) {
1347 	case OCS_HW_RESET_FUNCTION:
1348 		ocs_log_debug(hw->os, "issuing function level reset\n");
1349 		if (sli_reset(&hw->sli)) {
1350 			ocs_log_err(hw->os, "sli_reset failed\n");
1351 			rc = OCS_HW_RTN_ERROR;
1352 		}
1353 		break;
1354 	case OCS_HW_RESET_FIRMWARE:
1355 		ocs_log_debug(hw->os, "issuing firmware reset\n");
1356 		if (sli_fw_reset(&hw->sli)) {
1357 			ocs_log_err(hw->os, "sli_soft_reset failed\n");
1358 			rc = OCS_HW_RTN_ERROR;
1359 		}
1360 		/*
1361 		 * Because the FW reset leaves the FW in a non-running state,
1362 		 * follow that with a regular reset.
1363 		 */
1364 		ocs_log_debug(hw->os, "issuing function level reset\n");
1365 		if (sli_reset(&hw->sli)) {
1366 			ocs_log_err(hw->os, "sli_reset failed\n");
1367 			rc = OCS_HW_RTN_ERROR;
1368 		}
1369 		break;
1370 	default:
1371 		ocs_log_test(hw->os, "unknown reset type - no reset performed\n");
1372 		hw->state = prev_state;
1373 		return OCS_HW_RTN_ERROR;
1374 	}
1375 
1376 	/* Not safe to walk command/io lists unless they've been initialized */
1377 	if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1378 		ocs_hw_command_cancel(hw);
1379 
1380 		/* Clean up the inuse list, the free list and the wait free list */
1381 		ocs_hw_io_cancel(hw);
1382 
1383 		ocs_memset(hw->domains, 0, sizeof(hw->domains));
1384 		ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
1385 
1386 		ocs_hw_link_event_init(hw);
1387 
1388 		ocs_lock(&hw->io_lock);
1389 			/* The io lists should be empty, but remove any that didn't get cleaned up. */
1390 			while (!ocs_list_empty(&hw->io_timed_wqe)) {
1391 				ocs_list_remove_head(&hw->io_timed_wqe);
1392 			}
1393 			/* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */
1394 
1395 			while (!ocs_list_empty(&hw->io_free)) {
1396 				ocs_list_remove_head(&hw->io_free);
1397 			}
1398 			while (!ocs_list_empty(&hw->io_wait_free)) {
1399 				ocs_list_remove_head(&hw->io_wait_free);
1400 			}
1401 
1402 			/* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */
1403 			ocs_hw_reqtag_reset(hw);
1404 
1405 		ocs_unlock(&hw->io_lock);
1406 	}
1407 
1408 	if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1409 		for (i = 0; i < hw->wq_count; i++) {
1410 			sli_queue_reset(&hw->sli, &hw->wq[i]);
1411 		}
1412 
1413 		for (i = 0; i < hw->rq_count; i++) {
1414 			sli_queue_reset(&hw->sli, &hw->rq[i]);
1415 		}
1416 
1417 		for (i = 0; i < hw->hw_rq_count; i++) {
1418 			hw_rq_t *rq = hw->hw_rq[i];
1419 			if (rq->rq_tracker != NULL) {
1420 				uint32_t j;
1421 
1422 				for (j = 0; j < rq->entry_count; j++) {
1423 					rq->rq_tracker[j] = NULL;
1424 				}
1425 			}
1426 		}
1427 
1428 		for (i = 0; i < hw->mq_count; i++) {
1429 			sli_queue_reset(&hw->sli, &hw->mq[i]);
1430 		}
1431 
1432 		for (i = 0; i < hw->cq_count; i++) {
1433 			sli_queue_reset(&hw->sli, &hw->cq[i]);
1434 		}
1435 
1436 		for (i = 0; i < hw->eq_count; i++) {
1437 			sli_queue_reset(&hw->sli, &hw->eq[i]);
1438 		}
1439 
1440 		/* Free rq buffers */
1441 		ocs_hw_rx_free(hw);
1442 
1443 		/* Teardown the HW queue topology */
1444 		hw_queue_teardown(hw);
1445 	} else {
1446 
1447 		/* Free rq buffers */
1448 		ocs_hw_rx_free(hw);
1449 	}
1450 
1451 	/*
1452 	 * Re-apply the run-time workarounds after clearing the SLI config
1453 	 * fields in sli_reset.
1454 	 */
1455 	ocs_hw_workaround_setup(hw);
1456 	hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
1457 
1458 	return rc;
1459 }
1460 
1461 int32_t
1462 ocs_hw_get_num_eq(ocs_hw_t *hw)
1463 {
1464 	return hw->eq_count;
1465 }
1466 
1467 static int32_t
1468 ocs_hw_get_fw_timed_out(ocs_hw_t *hw)
1469 {
1470 	/* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf
1471 	* No further explanation is given in the document.
1472 	* */
1473 	return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 &&
1474 		sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10);
1475 }
1476 
1477 
1478 ocs_hw_rtn_e
1479 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
1480 {
1481 	ocs_hw_rtn_e		rc = OCS_HW_RTN_SUCCESS;
1482 	int32_t			tmp;
1483 
1484 	if (!value) {
1485 		return OCS_HW_RTN_ERROR;
1486 	}
1487 
1488 	*value = 0;
1489 
1490 	switch (prop) {
1491 	case OCS_HW_N_IO:
1492 		*value = hw->config.n_io;
1493 		break;
1494 	case OCS_HW_N_SGL:
1495 		*value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED);
1496 		break;
1497 	case OCS_HW_MAX_IO:
1498 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
1499 		break;
1500 	case OCS_HW_MAX_NODES:
1501 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1502 		break;
1503 	case OCS_HW_MAX_RQ_ENTRIES:
1504 		*value = hw->num_qentries[SLI_QTYPE_RQ];
1505 		break;
1506 	case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1507 		*value = hw->config.rq_default_buffer_size;
1508 		break;
1509 	case OCS_HW_AUTO_XFER_RDY_CAPABLE:
1510 		*value = sli_get_auto_xfer_rdy_capable(&hw->sli);
1511 		break;
1512 	case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1513 		*value = hw->config.auto_xfer_rdy_xri_cnt;
1514 		break;
1515 	case OCS_HW_AUTO_XFER_RDY_SIZE:
1516 		*value = hw->config.auto_xfer_rdy_size;
1517 		break;
1518 	case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1519 		switch (hw->config.auto_xfer_rdy_blk_size_chip) {
1520 		case 0:
1521 			*value = 512;
1522 			break;
1523 		case 1:
1524 			*value = 1024;
1525 			break;
1526 		case 2:
1527 			*value = 2048;
1528 			break;
1529 		case 3:
1530 			*value = 4096;
1531 			break;
1532 		case 4:
1533 			*value = 520;
1534 			break;
1535 		default:
1536 			*value = 0;
1537 			rc = OCS_HW_RTN_ERROR;
1538 			break;
1539 		}
1540 		break;
1541 	case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1542 		*value = hw->config.auto_xfer_rdy_t10_enable;
1543 		break;
1544 	case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1545 		*value = hw->config.auto_xfer_rdy_p_type;
1546 		break;
1547 	case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1548 		*value = hw->config.auto_xfer_rdy_ref_tag_is_lba;
1549 		break;
1550 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1551 		*value = hw->config.auto_xfer_rdy_app_tag_valid;
1552 		break;
1553 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1554 		*value = hw->config.auto_xfer_rdy_app_tag_value;
1555 		break;
1556 	case OCS_HW_MAX_SGE:
1557 		*value = sli_get_max_sge(&hw->sli);
1558 		break;
1559 	case OCS_HW_MAX_SGL:
1560 		*value = sli_get_max_sgl(&hw->sli);
1561 		break;
1562 	case OCS_HW_TOPOLOGY:
1563 		/*
1564 		 * Infer link.status based on link.speed.
1565 		 * Report OCS_HW_TOPOLOGY_NONE if the link is down.
1566 		 */
1567 		if (hw->link.speed == 0) {
1568 			*value = OCS_HW_TOPOLOGY_NONE;
1569 			break;
1570 		}
1571 		switch (hw->link.topology) {
1572 		case SLI_LINK_TOPO_NPORT:
1573 			*value = OCS_HW_TOPOLOGY_NPORT;
1574 			break;
1575 		case SLI_LINK_TOPO_LOOP:
1576 			*value = OCS_HW_TOPOLOGY_LOOP;
1577 			break;
1578 		case SLI_LINK_TOPO_NONE:
1579 			*value = OCS_HW_TOPOLOGY_NONE;
1580 			break;
1581 		default:
1582 			ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology);
1583 			rc = OCS_HW_RTN_ERROR;
1584 			break;
1585 		}
1586 		break;
1587 	case OCS_HW_CONFIG_TOPOLOGY:
1588 		*value = hw->config.topology;
1589 		break;
1590 	case OCS_HW_LINK_SPEED:
1591 		*value = hw->link.speed;
1592 		break;
1593 	case OCS_HW_LINK_CONFIG_SPEED:
1594 		switch (hw->config.speed) {
1595 		case FC_LINK_SPEED_10G:
1596 			*value = 10000;
1597 			break;
1598 		case FC_LINK_SPEED_AUTO_16_8_4:
1599 			*value = 0;
1600 			break;
1601 		case FC_LINK_SPEED_2G:
1602 			*value = 2000;
1603 			break;
1604 		case FC_LINK_SPEED_4G:
1605 			*value = 4000;
1606 			break;
1607 		case FC_LINK_SPEED_8G:
1608 			*value = 8000;
1609 			break;
1610 		case FC_LINK_SPEED_16G:
1611 			*value = 16000;
1612 			break;
1613 		case FC_LINK_SPEED_32G:
1614 			*value = 32000;
1615 			break;
1616 		default:
1617 			ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed);
1618 			rc = OCS_HW_RTN_ERROR;
1619 			break;
1620 		}
1621 		break;
1622 	case OCS_HW_IF_TYPE:
1623 		*value = sli_get_if_type(&hw->sli);
1624 		break;
1625 	case OCS_HW_SLI_REV:
1626 		*value = sli_get_sli_rev(&hw->sli);
1627 		break;
1628 	case OCS_HW_SLI_FAMILY:
1629 		*value = sli_get_sli_family(&hw->sli);
1630 		break;
1631 	case OCS_HW_DIF_CAPABLE:
1632 		*value = sli_get_dif_capable(&hw->sli);
1633 		break;
1634 	case OCS_HW_DIF_SEED:
1635 		*value = hw->config.dif_seed;
1636 		break;
1637 	case OCS_HW_DIF_MODE:
1638 		*value = hw->config.dif_mode;
1639 		break;
1640 	case OCS_HW_DIF_MULTI_SEPARATE:
1641 		/* Lancer supports multiple DIF separates */
1642 		if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
1643 			*value = TRUE;
1644 		} else {
1645 			*value = FALSE;
1646 		}
1647 		break;
1648 	case OCS_HW_DUMP_MAX_SIZE:
1649 		*value = hw->dump_size;
1650 		break;
1651 	case OCS_HW_DUMP_READY:
1652 		*value = sli_dump_is_ready(&hw->sli);
1653 		break;
1654 	case OCS_HW_DUMP_PRESENT:
1655 		*value = sli_dump_is_present(&hw->sli);
1656 		break;
1657 	case OCS_HW_RESET_REQUIRED:
1658 		tmp = sli_reset_required(&hw->sli);
1659 		if(tmp < 0) {
1660 			rc = OCS_HW_RTN_ERROR;
1661 		} else {
1662 			*value = tmp;
1663 		}
1664 		break;
1665 	case OCS_HW_FW_ERROR:
1666 		*value = sli_fw_error_status(&hw->sli);
1667 		break;
1668 	case OCS_HW_FW_READY:
1669 		*value = sli_fw_ready(&hw->sli);
1670 		break;
1671 	case OCS_HW_FW_TIMED_OUT:
1672 		*value = ocs_hw_get_fw_timed_out(hw);
1673 		break;
1674 	case OCS_HW_HIGH_LOGIN_MODE:
1675 		*value = sli_get_hlm_capable(&hw->sli);
1676 		break;
1677 	case OCS_HW_PREREGISTER_SGL:
1678 		*value = sli_get_sgl_preregister_required(&hw->sli);
1679 		break;
1680 	case OCS_HW_HW_REV1:
1681 		*value = sli_get_hw_revision(&hw->sli, 0);
1682 		break;
1683 	case OCS_HW_HW_REV2:
1684 		*value = sli_get_hw_revision(&hw->sli, 1);
1685 		break;
1686 	case OCS_HW_HW_REV3:
1687 		*value = sli_get_hw_revision(&hw->sli, 2);
1688 		break;
1689 	case OCS_HW_LINKCFG:
1690 		*value = hw->linkcfg;
1691 		break;
1692 	case OCS_HW_ETH_LICENSE:
1693 		*value = hw->eth_license;
1694 		break;
1695 	case OCS_HW_LINK_MODULE_TYPE:
1696 		*value = sli_get_link_module_type(&hw->sli);
1697 		break;
1698 	case OCS_HW_NUM_CHUTES:
1699 		*value = ocs_hw_get_num_chutes(hw);
1700 		break;
1701 	case OCS_HW_DISABLE_AR_TGT_DIF:
1702 		*value = hw->workaround.disable_ar_tgt_dif;
1703 		break;
1704 	case OCS_HW_EMULATE_I_ONLY_AAB:
1705 		*value = hw->config.i_only_aab;
1706 		break;
1707 	case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
1708 		*value = hw->config.emulate_tgt_wqe_timeout;
1709 		break;
1710 	case OCS_HW_VPD_LEN:
1711 		*value = sli_get_vpd_len(&hw->sli);
1712 		break;
1713 	case OCS_HW_SGL_CHAINING_CAPABLE:
1714 		*value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported;
1715 		break;
1716 	case OCS_HW_SGL_CHAINING_ALLOWED:
1717 		/*
1718 		 * SGL Chaining is allowed in the following cases:
1719 		 *   1. Lancer with host SGL Lists
1720 		 *   2. Skyhawk with pre-registered SGL Lists
1721 		 */
1722 		*value = FALSE;
1723 		if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1724 		    !sli_get_sgl_preregister(&hw->sli) &&
1725 		    SLI4_IF_TYPE_LANCER_FC_ETH  == sli_get_if_type(&hw->sli)) {
1726 			*value = TRUE;
1727 		}
1728 
1729 		if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1730 		    sli_get_sgl_preregister(&hw->sli) &&
1731 		    ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
1732 			(SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
1733 			*value = TRUE;
1734 		}
1735 		break;
1736 	case OCS_HW_SGL_CHAINING_HOST_ALLOCATED:
1737 		/* Only lancer supports host allocated SGL Chaining buffers. */
1738 		*value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1739 			  (SLI4_IF_TYPE_LANCER_FC_ETH  == sli_get_if_type(&hw->sli)));
1740 		break;
1741 	case OCS_HW_SEND_FRAME_CAPABLE:
1742 		if (hw->workaround.ignore_send_frame) {
1743 			*value = 0;
1744 		} else {
1745 			/* Only lancer is capable */
1746 			*value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH;
1747 		}
1748 		break;
1749 	case OCS_HW_RQ_SELECTION_POLICY:
1750 		*value = hw->config.rq_selection_policy;
1751 		break;
1752 	case OCS_HW_RR_QUANTA:
1753 		*value = hw->config.rr_quanta;
1754 		break;
1755 	case OCS_HW_MAX_VPORTS:
1756 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
1757 		break;
1758 	default:
1759 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1760 		rc = OCS_HW_RTN_ERROR;
1761 	}
1762 
1763 	return rc;
1764 }
1765 
1766 void *
1767 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop)
1768 {
1769 	void	*rc = NULL;
1770 
1771 	switch (prop) {
1772 	case OCS_HW_WWN_NODE:
1773 		rc = sli_get_wwn_node(&hw->sli);
1774 		break;
1775 	case OCS_HW_WWN_PORT:
1776 		rc = sli_get_wwn_port(&hw->sli);
1777 		break;
1778 	case OCS_HW_VPD:
1779 		/* make sure VPD length is non-zero */
1780 		if (sli_get_vpd_len(&hw->sli)) {
1781 			rc = sli_get_vpd(&hw->sli);
1782 		}
1783 		break;
1784 	case OCS_HW_FW_REV:
1785 		rc = sli_get_fw_name(&hw->sli, 0);
1786 		break;
1787 	case OCS_HW_FW_REV2:
1788 		rc = sli_get_fw_name(&hw->sli, 1);
1789 		break;
1790 	case OCS_HW_IPL:
1791 		rc = sli_get_ipl_name(&hw->sli);
1792 		break;
1793 	case OCS_HW_PORTNUM:
1794 		rc = sli_get_portnum(&hw->sli);
1795 		break;
1796 	case OCS_HW_BIOS_VERSION_STRING:
1797 		rc = sli_get_bios_version_string(&hw->sli);
1798 		break;
1799 	default:
1800 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1801 	}
1802 
1803 	return rc;
1804 }
1805 
1806 
1807 
1808 ocs_hw_rtn_e
1809 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
1810 {
1811 	ocs_hw_rtn_e		rc = OCS_HW_RTN_SUCCESS;
1812 
1813 	switch (prop) {
1814 	case OCS_HW_N_IO:
1815 		if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) ||
1816 		    value == 0) {
1817 			ocs_log_test(hw->os, "IO value out of range %d vs %d\n",
1818 					value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI));
1819 			rc = OCS_HW_RTN_ERROR;
1820 		} else {
1821 			hw->config.n_io = value;
1822 		}
1823 		break;
1824 	case OCS_HW_N_SGL:
1825 		value += SLI4_SGE_MAX_RESERVED;
1826 		if (value > sli_get_max_sgl(&hw->sli)) {
1827 			ocs_log_test(hw->os, "SGL value out of range %d vs %d\n",
1828 					value, sli_get_max_sgl(&hw->sli));
1829 			rc = OCS_HW_RTN_ERROR;
1830 		} else {
1831 			hw->config.n_sgl = value;
1832 		}
1833 		break;
1834 	case OCS_HW_TOPOLOGY:
1835 		if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) &&
1836 				(value != OCS_HW_TOPOLOGY_AUTO)) {
1837 			ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n",
1838 					value, sli_get_medium(&hw->sli));
1839 			rc = OCS_HW_RTN_ERROR;
1840 			break;
1841 		}
1842 
1843 		switch (value) {
1844 		case OCS_HW_TOPOLOGY_AUTO:
1845 			if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
1846 				sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC);
1847 			} else {
1848 				sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE);
1849 			}
1850 			break;
1851 		case OCS_HW_TOPOLOGY_NPORT:
1852 			sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA);
1853 			break;
1854 		case OCS_HW_TOPOLOGY_LOOP:
1855 			sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL);
1856 			break;
1857 		default:
1858 			ocs_log_test(hw->os, "unsupported topology %#x\n", value);
1859 			rc = OCS_HW_RTN_ERROR;
1860 		}
1861 		hw->config.topology = value;
1862 		break;
1863 	case OCS_HW_LINK_SPEED:
1864 		if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) {
1865 			switch (value) {
1866 			case 0: 	/* Auto-speed negotiation */
1867 			case 10000:	/* FCoE speed */
1868 				hw->config.speed = FC_LINK_SPEED_10G;
1869 				break;
1870 			default:
1871 				ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n",
1872 						value, sli_get_medium(&hw->sli));
1873 				rc = OCS_HW_RTN_ERROR;
1874 			}
1875 			break;
1876 		}
1877 
1878 		switch (value) {
1879 		case 0:		/* Auto-speed negotiation */
1880 			hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
1881 			break;
1882 		case 2000:	/* FC speeds */
1883 			hw->config.speed = FC_LINK_SPEED_2G;
1884 			break;
1885 		case 4000:
1886 			hw->config.speed = FC_LINK_SPEED_4G;
1887 			break;
1888 		case 8000:
1889 			hw->config.speed = FC_LINK_SPEED_8G;
1890 			break;
1891 		case 16000:
1892 			hw->config.speed = FC_LINK_SPEED_16G;
1893 			break;
1894 		case 32000:
1895 			hw->config.speed = FC_LINK_SPEED_32G;
1896 			break;
1897 		default:
1898 			ocs_log_test(hw->os, "unsupported speed %d\n", value);
1899 			rc = OCS_HW_RTN_ERROR;
1900 		}
1901 		break;
1902 	case OCS_HW_DIF_SEED:
1903 		/* Set the DIF seed - only for lancer right now */
1904 		if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
1905 			ocs_log_test(hw->os, "DIF seed not supported for this device\n");
1906 			rc = OCS_HW_RTN_ERROR;
1907 		} else {
1908 			hw->config.dif_seed = value;
1909 		}
1910 		break;
1911 	case OCS_HW_DIF_MODE:
1912 		switch (value) {
1913 		case OCS_HW_DIF_MODE_INLINE:
1914 			/*
1915 			 *  Make sure we support inline DIF.
1916 			 *
1917 			 * Note: Having both bits clear means that we have old
1918 			 *	FW that doesn't set the bits.
1919 			 */
1920 			if (sli_is_dif_inline_capable(&hw->sli)) {
1921 				hw->config.dif_mode = value;
1922 			} else {
1923 				ocs_log_test(hw->os, "chip does not support DIF inline\n");
1924 				rc = OCS_HW_RTN_ERROR;
1925 			}
1926 			break;
1927 		case OCS_HW_DIF_MODE_SEPARATE:
1928 			/* Make sure we support DIF separates. */
1929 			if (sli_is_dif_separate_capable(&hw->sli)) {
1930 				hw->config.dif_mode = value;
1931 			} else {
1932 				ocs_log_test(hw->os, "chip does not support DIF separate\n");
1933 				rc = OCS_HW_RTN_ERROR;
1934 			}
1935 		}
1936 		break;
1937 	case OCS_HW_RQ_PROCESS_LIMIT: {
1938 		hw_rq_t *rq;
1939 		uint32_t i;
1940 
1941 		/* For each hw_rq object, set its parent CQ limit value */
1942 		for (i = 0; i < hw->hw_rq_count; i++) {
1943 			rq = hw->hw_rq[i];
1944 			hw->cq[rq->cq->instance].proc_limit = value;
1945 		}
1946 		break;
1947 	}
1948 	case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1949 		hw->config.rq_default_buffer_size = value;
1950 		break;
1951 	case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1952 		hw->config.auto_xfer_rdy_xri_cnt = value;
1953 		break;
1954 	case OCS_HW_AUTO_XFER_RDY_SIZE:
1955 		hw->config.auto_xfer_rdy_size = value;
1956 		break;
1957 	case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1958 		switch (value) {
1959 		case 512:
1960 			hw->config.auto_xfer_rdy_blk_size_chip = 0;
1961 			break;
1962 		case 1024:
1963 			hw->config.auto_xfer_rdy_blk_size_chip = 1;
1964 			break;
1965 		case 2048:
1966 			hw->config.auto_xfer_rdy_blk_size_chip = 2;
1967 			break;
1968 		case 4096:
1969 			hw->config.auto_xfer_rdy_blk_size_chip = 3;
1970 			break;
1971 		case 520:
1972 			hw->config.auto_xfer_rdy_blk_size_chip = 4;
1973 			break;
1974 		default:
1975 			ocs_log_err(hw->os, "Invalid block size %d\n",
1976 				    value);
1977 			rc = OCS_HW_RTN_ERROR;
1978 		}
1979 		break;
1980 	case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1981 		hw->config.auto_xfer_rdy_t10_enable = value;
1982 		break;
1983 	case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1984 		hw->config.auto_xfer_rdy_p_type = value;
1985 		break;
1986 	case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1987 		hw->config.auto_xfer_rdy_ref_tag_is_lba = value;
1988 		break;
1989 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1990 		hw->config.auto_xfer_rdy_app_tag_valid = value;
1991 		break;
1992 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1993 		hw->config.auto_xfer_rdy_app_tag_value = value;
1994 		break;
1995 	case OCS_ESOC:
1996 		hw->config.esoc = value;
1997 		break;
1998 	case OCS_HW_HIGH_LOGIN_MODE:
1999 		rc = sli_set_hlm(&hw->sli, value);
2000 		break;
2001 	case OCS_HW_PREREGISTER_SGL:
2002 		rc = sli_set_sgl_preregister(&hw->sli, value);
2003 		break;
2004 	case OCS_HW_ETH_LICENSE:
2005 		hw->eth_license = value;
2006 		break;
2007 	case OCS_HW_EMULATE_I_ONLY_AAB:
2008 		hw->config.i_only_aab = value;
2009 		break;
2010 	case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
2011 		hw->config.emulate_tgt_wqe_timeout = value;
2012 		break;
2013 	case OCS_HW_BOUNCE:
2014 		hw->config.bounce = value;
2015 		break;
2016 	case OCS_HW_RQ_SELECTION_POLICY:
2017 		hw->config.rq_selection_policy = value;
2018 		break;
2019 	case OCS_HW_RR_QUANTA:
2020 		hw->config.rr_quanta = value;
2021 		break;
2022 	default:
2023 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2024 		rc = OCS_HW_RTN_ERROR;
2025 	}
2026 
2027 	return rc;
2028 }
2029 
2030 
2031 ocs_hw_rtn_e
2032 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value)
2033 {
2034 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2035 
2036 	switch (prop) {
2037 	case OCS_HW_WAR_VERSION:
2038 		hw->hw_war_version = value;
2039 		break;
2040 	case OCS_HW_FILTER_DEF: {
2041 		char *p = value;
2042 		uint32_t idx = 0;
2043 
2044 		for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) {
2045 			hw->config.filter_def[idx] = 0;
2046 		}
2047 
2048 		for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) {
2049 			hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0);
2050 			p = ocs_strchr(p, ',');
2051 			if (p != NULL) {
2052 				p++;
2053 			}
2054 		}
2055 
2056 		break;
2057 	}
2058 	default:
2059 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2060 		rc = OCS_HW_RTN_ERROR;
2061 		break;
2062 	}
2063 	return rc;
2064 }
2065 /**
2066  * @ingroup interrupt
2067  * @brief Check for the events associated with the interrupt vector.
2068  *
2069  * @param hw Hardware context.
2070  * @param vector Zero-based interrupt vector number.
2071  *
2072  * @return Returns 0 on success, or a non-zero value on failure.
2073  */
2074 int32_t
2075 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector)
2076 {
2077 	int32_t rc = 0;
2078 
2079 	if (!hw) {
2080 		ocs_log_err(NULL, "HW context NULL?!?\n");
2081 		return -1;
2082 	}
2083 
2084 	if (vector > hw->eq_count) {
2085 		ocs_log_err(hw->os, "vector %d. max %d\n",
2086 				vector, hw->eq_count);
2087 		return -1;
2088 	}
2089 
2090 	/*
2091 	 * The caller should disable interrupts if they wish to prevent us
2092 	 * from processing during a shutdown. The following states are defined:
2093 	 *   OCS_HW_STATE_UNINITIALIZED - No queues allocated
2094 	 *   OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2095 	 *                                    queues are cleared.
2096 	 *   OCS_HW_STATE_ACTIVE - Chip and queues are operational
2097 	 *   OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2098 	 *   OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2099 	 *                                        completions.
2100 	 */
2101 	if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
2102 		rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]);
2103 
2104 		/* Re-arm queue if there are no entries */
2105 		if (rc != 0) {
2106 			sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE);
2107 		}
2108 	}
2109 	return rc;
2110 }
2111 
2112 void
2113 ocs_hw_unsol_process_bounce(void *arg)
2114 {
2115 	ocs_hw_sequence_t *seq = arg;
2116 	ocs_hw_t *hw = seq->hw;
2117 
2118 	ocs_hw_assert(hw != NULL);
2119 	ocs_hw_assert(hw->callback.unsolicited != NULL);
2120 
2121 	hw->callback.unsolicited(hw->args.unsolicited, seq);
2122 }
2123 
2124 int32_t
2125 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec)
2126 {
2127 	hw_eq_t *eq;
2128 	int32_t rc = 0;
2129 
2130 	CPUTRACE("");
2131 
2132 	/*
2133 	 * The caller should disable interrupts if they wish to prevent us
2134 	 * from processing during a shutdown. The following states are defined:
2135 	 *   OCS_HW_STATE_UNINITIALIZED - No queues allocated
2136 	 *   OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2137 	 *                                    queues are cleared.
2138 	 *   OCS_HW_STATE_ACTIVE - Chip and queues are operational
2139 	 *   OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2140 	 *   OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2141 	 *                                        completions.
2142 	 */
2143 	if (hw->state == OCS_HW_STATE_UNINITIALIZED) {
2144 		return 0;
2145 	}
2146 
2147 	/* Get pointer to hw_eq_t */
2148 	eq = hw->hw_eq[vector];
2149 
2150 	OCS_STAT(eq->use_count++);
2151 
2152 	rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec);
2153 
2154 	return rc;
2155 }
2156 
2157 /**
2158  * @ingroup interrupt
2159  * @brief Process events associated with an EQ.
2160  *
2161  * @par Description
2162  * Loop termination:
2163  * @n @n Without a mechanism to terminate the completion processing loop, it
2164  * is possible under some workload conditions for the loop to never terminate
2165  * (or at least take longer than the OS is happy to have an interrupt handler
2166  * or kernel thread context hold a CPU without yielding).
2167  * @n @n The approach taken here is to periodically check how much time
2168  * we have been in this
2169  * processing loop, and if we exceed a predetermined time (multiple seconds), the
2170  * loop is terminated, and ocs_hw_process() returns.
2171  *
2172  * @param hw Hardware context.
2173  * @param eq Pointer to HW EQ object.
2174  * @param max_isr_time_msec Maximum time in msec to stay in this function.
2175  *
2176  * @return Returns 0 on success, or a non-zero value on failure.
2177  */
2178 int32_t
2179 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec)
2180 {
2181 	uint8_t		eqe[sizeof(sli4_eqe_t)] = { 0 };
2182 	uint32_t	done = FALSE;
2183 	uint32_t	tcheck_count;
2184 	time_t		tstart;
2185 	time_t		telapsed;
2186 
2187 	tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2188 	tstart = ocs_msectime();
2189 
2190 	CPUTRACE("");
2191 
2192 	while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
2193 		uint16_t	cq_id = 0;
2194 		int32_t		rc;
2195 
2196 		rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2197 		if (unlikely(rc)) {
2198 			if (rc > 0) {
2199 				uint32_t i;
2200 
2201 				/*
2202 				 * Received a sentinel EQE indicating the EQ is full.
2203 				 * Process all CQs
2204 				 */
2205 				for (i = 0; i < hw->cq_count; i++) {
2206 					ocs_hw_cq_process(hw, hw->hw_cq[i]);
2207 				}
2208 				continue;
2209 			} else {
2210 				return rc;
2211 			}
2212 		} else {
2213 			int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id);
2214 			if (likely(index >= 0)) {
2215 				ocs_hw_cq_process(hw, hw->hw_cq[index]);
2216 			} else {
2217 				ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2218 			}
2219 		}
2220 
2221 
2222 		if (eq->queue->n_posted > (eq->queue->posted_limit)) {
2223 			sli_queue_arm(&hw->sli, eq->queue, FALSE);
2224 		}
2225 
2226 		if (tcheck_count && (--tcheck_count == 0)) {
2227 			tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2228 			telapsed = ocs_msectime() - tstart;
2229 			if (telapsed >= max_isr_time_msec) {
2230 				done = TRUE;
2231 			}
2232 		}
2233 	}
2234 	sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
2235 
2236 	return 0;
2237 }
2238 
2239 /**
2240  * @brief Submit queued (pending) mbx commands.
2241  *
2242  * @par Description
2243  * Submit queued mailbox commands.
2244  * --- Assumes that hw->cmd_lock is held ---
2245  *
2246  * @param hw Hardware context.
2247  *
2248  * @return Returns 0 on success, or a negative error code value on failure.
2249  */
2250 static int32_t
2251 ocs_hw_cmd_submit_pending(ocs_hw_t *hw)
2252 {
2253 	ocs_command_ctx_t *ctx;
2254 	int32_t rc = 0;
2255 
2256 	/* Assumes lock held */
2257 
2258 	/* Only submit MQE if there's room */
2259 	while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) {
2260 		ctx = ocs_list_remove_head(&hw->cmd_pending);
2261 		if (ctx == NULL) {
2262 			break;
2263 		}
2264 		ocs_list_add_tail(&hw->cmd_head, ctx);
2265 		hw->cmd_head_count++;
2266 		if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) {
2267 			ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc);
2268 			rc = -1;
2269 			break;
2270 		}
2271 	}
2272 	return rc;
2273 }
2274 
2275 /**
2276  * @ingroup io
2277  * @brief Issue a SLI command.
2278  *
2279  * @par Description
2280  * Send a mailbox command to the hardware, and either wait for a completion
2281  * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT).
2282  *
2283  * @param hw Hardware context.
2284  * @param cmd Buffer containing a formatted command and results.
2285  * @param opts Command options:
2286  *  - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion.
2287  *  - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback.
2288  * @param cb Function callback used for asynchronous mode. May be NULL.
2289  * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>.
2290  * @n @n @b Note: If the
2291  * callback function pointer is NULL, the results of the command are silently
2292  * discarded, allowing this pointer to exist solely on the stack.
2293  * @param arg Argument passed to an asynchronous callback.
2294  *
2295  * @return Returns 0 on success, or a non-zero value on failure.
2296  */
2297 ocs_hw_rtn_e
2298 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg)
2299 {
2300 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2301 
2302 	/*
2303 	 * If the chip is in an error state (UE'd) then reject this mailbox
2304 	 *  command.
2305 	 */
2306 	if (sli_fw_error_status(&hw->sli) > 0) {
2307 		uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1);
2308 		uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2);
2309 		if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) {
2310 			hw->expiration_logged = 1;
2311 			ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n",
2312 					hw->watchdog_timeout);
2313 		}
2314 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2315 		ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
2316 			sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS),
2317 			err1, err2);
2318 
2319 		return OCS_HW_RTN_ERROR;
2320 	}
2321 
2322 	if (OCS_CMD_POLL == opts) {
2323 
2324 		ocs_lock(&hw->cmd_lock);
2325 		if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) {
2326 			/*
2327 			 * Can't issue Boot-strap mailbox command with other
2328 			 * mail-queue commands pending as this interaction is
2329 			 * undefined
2330 			 */
2331 			rc = OCS_HW_RTN_ERROR;
2332 		} else {
2333 			void *bmbx = hw->sli.bmbx.virt;
2334 
2335 			ocs_memset(bmbx, 0, SLI4_BMBX_SIZE);
2336 			ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
2337 
2338 			if (sli_bmbx_command(&hw->sli) == 0) {
2339 				rc = OCS_HW_RTN_SUCCESS;
2340 				ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
2341 			}
2342 		}
2343 		ocs_unlock(&hw->cmd_lock);
2344 	} else if (OCS_CMD_NOWAIT == opts) {
2345 		ocs_command_ctx_t	*ctx = NULL;
2346 
2347 		ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT);
2348 		if (!ctx) {
2349 			ocs_log_err(hw->os, "can't allocate command context\n");
2350 			return OCS_HW_RTN_NO_RESOURCES;
2351 		}
2352 
2353 		if (hw->state != OCS_HW_STATE_ACTIVE) {
2354 			ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state);
2355 			ocs_free(hw->os, ctx, sizeof(*ctx));
2356 			return OCS_HW_RTN_ERROR;
2357 		}
2358 
2359 		if (cb) {
2360 			ctx->cb = cb;
2361 			ctx->arg = arg;
2362 		}
2363 		ctx->buf = cmd;
2364 		ctx->ctx = hw;
2365 
2366 		ocs_lock(&hw->cmd_lock);
2367 
2368 			/* Add to pending list */
2369 			ocs_list_add_tail(&hw->cmd_pending, ctx);
2370 
2371 			/* Submit as much of the pending list as we can */
2372 			if (ocs_hw_cmd_submit_pending(hw) == 0) {
2373 				rc = OCS_HW_RTN_SUCCESS;
2374 			}
2375 
2376 		ocs_unlock(&hw->cmd_lock);
2377 	}
2378 
2379 	return rc;
2380 }
2381 
2382 /**
2383  * @ingroup devInitShutdown
2384  * @brief Register a callback for the given event.
2385  *
2386  * @param hw Hardware context.
2387  * @param which Event of interest.
2388  * @param func Function to call when the event occurs.
2389  * @param arg Argument passed to the callback function.
2390  *
2391  * @return Returns 0 on success, or a non-zero value on failure.
2392  */
2393 ocs_hw_rtn_e
2394 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg)
2395 {
2396 
2397 	if (!hw || !func || (which >= OCS_HW_CB_MAX)) {
2398 		ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n",
2399 			    hw, which, func);
2400 		return OCS_HW_RTN_ERROR;
2401 	}
2402 
2403 	switch (which) {
2404 	case OCS_HW_CB_DOMAIN:
2405 		hw->callback.domain = func;
2406 		hw->args.domain = arg;
2407 		break;
2408 	case OCS_HW_CB_PORT:
2409 		hw->callback.port = func;
2410 		hw->args.port = arg;
2411 		break;
2412 	case OCS_HW_CB_UNSOLICITED:
2413 		hw->callback.unsolicited = func;
2414 		hw->args.unsolicited = arg;
2415 		break;
2416 	case OCS_HW_CB_REMOTE_NODE:
2417 		hw->callback.rnode = func;
2418 		hw->args.rnode = arg;
2419 		break;
2420 	case OCS_HW_CB_BOUNCE:
2421 		hw->callback.bounce = func;
2422 		hw->args.bounce = arg;
2423 		break;
2424 	default:
2425 		ocs_log_test(hw->os, "unknown callback %#x\n", which);
2426 		return OCS_HW_RTN_ERROR;
2427 	}
2428 
2429 	return OCS_HW_RTN_SUCCESS;
2430 }
2431 
2432 /**
2433  * @ingroup port
2434  * @brief Allocate a port object.
2435  *
2436  * @par Description
2437  * This function allocates a VPI object for the port and stores it in the
2438  * indicator field of the port object.
2439  *
2440  * @param hw Hardware context.
2441  * @param sport SLI port object used to connect to the domain.
2442  * @param domain Domain object associated with this port (may be NULL).
2443  * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
2444  *
2445  * @return Returns 0 on success, or a non-zero value on failure.
2446  */
2447 ocs_hw_rtn_e
2448 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain,
2449 		uint8_t *wwpn)
2450 {
2451 	uint8_t	*cmd = NULL;
2452 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2453 	uint32_t index;
2454 
2455 	sport->indicator = UINT32_MAX;
2456 	sport->hw = hw;
2457 	sport->ctx.app = sport;
2458 	sport->sm_free_req_pending = 0;
2459 
2460 	/*
2461 	 * Check if the chip is in an error state (UE'd) before proceeding.
2462 	 */
2463 	if (sli_fw_error_status(&hw->sli) > 0) {
2464 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2465 		return OCS_HW_RTN_ERROR;
2466 	}
2467 
2468 	if (wwpn) {
2469 		ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn));
2470 	}
2471 
2472 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) {
2473 		ocs_log_err(hw->os, "FCOE_VPI allocation failure\n");
2474 		return OCS_HW_RTN_ERROR;
2475 	}
2476 
2477 	if (domain != NULL) {
2478 		ocs_sm_function_t	next = NULL;
2479 
2480 		cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2481 		if (!cmd) {
2482 			ocs_log_err(hw->os, "command memory allocation failed\n");
2483 			rc = OCS_HW_RTN_NO_MEMORY;
2484 			goto ocs_hw_port_alloc_out;
2485 		}
2486 
2487 		/* If the WWPN is NULL, fetch the default WWPN and WWNN before
2488 		 * initializing the VPI
2489 		 */
2490 		if (!wwpn) {
2491 			next = __ocs_hw_port_alloc_read_sparm64;
2492 		} else {
2493 			next = __ocs_hw_port_alloc_init_vpi;
2494 		}
2495 
2496 		ocs_sm_transition(&sport->ctx, next, cmd);
2497 	} else if (!wwpn) {
2498 		/* This is the convention for the HW, not SLI */
2499 		ocs_log_test(hw->os, "need WWN for physical port\n");
2500 		rc = OCS_HW_RTN_ERROR;
2501 	} else {
2502 		/* domain NULL and wwpn non-NULL */
2503 		ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL);
2504 	}
2505 
2506 ocs_hw_port_alloc_out:
2507 	if (rc != OCS_HW_RTN_SUCCESS) {
2508 		ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2509 
2510 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
2511 	}
2512 
2513 	return rc;
2514 }
2515 
2516 /**
2517  * @ingroup port
2518  * @brief Attach a physical/virtual SLI port to a domain.
2519  *
2520  * @par Description
2521  * This function registers a previously-allocated VPI with the
2522  * device.
2523  *
2524  * @param hw Hardware context.
2525  * @param sport Pointer to the SLI port object.
2526  * @param fc_id Fibre Channel ID to associate with this port.
2527  *
2528  * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure.
2529  */
2530 ocs_hw_rtn_e
2531 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id)
2532 {
2533 	uint8_t	*buf = NULL;
2534 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2535 
2536 	if (!hw || !sport) {
2537 		ocs_log_err(hw ? hw->os : NULL,
2538 			"bad parameter(s) hw=%p sport=%p\n", hw,
2539 			sport);
2540 		return OCS_HW_RTN_ERROR;
2541 	}
2542 
2543 	/*
2544 	 * Check if the chip is in an error state (UE'd) before proceeding.
2545 	 */
2546 	if (sli_fw_error_status(&hw->sli) > 0) {
2547 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2548 		return OCS_HW_RTN_ERROR;
2549 	}
2550 
2551 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2552 	if (!buf) {
2553 		ocs_log_err(hw->os, "no buffer for command\n");
2554 		return OCS_HW_RTN_NO_MEMORY;
2555 	}
2556 
2557 	sport->fc_id = fc_id;
2558 	ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf);
2559 	return rc;
2560 }
2561 
2562 /**
2563  * @brief Called when the port control command completes.
2564  *
2565  * @par Description
2566  * We only need to free the mailbox command buffer.
2567  *
2568  * @param hw Hardware context.
2569  * @param status Status field from the mbox completion.
2570  * @param mqe Mailbox response structure.
2571  * @param arg Pointer to a callback function that signals the caller that the command is done.
2572  *
2573  * @return Returns 0.
2574  */
2575 static int32_t
2576 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
2577 {
2578 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2579 	return 0;
2580 }
2581 
2582 /**
2583  * @ingroup port
2584  * @brief Control a port (initialize, shutdown, or set link configuration).
2585  *
2586  * @par Description
2587  * This function controls a port depending on the @c ctrl parameter:
2588  * - @b OCS_HW_PORT_INIT -
2589  * Issues the CONFIG_LINK and INIT_LINK commands for the specified port.
2590  * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up.
2591  * .
2592  * - @b OCS_HW_PORT_SHUTDOWN -
2593  * Issues the DOWN_LINK command for the specified port.
2594  * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down.
2595  * .
2596  * - @b OCS_HW_PORT_SET_LINK_CONFIG -
2597  * Sets the link configuration.
2598  *
2599  * @param hw Hardware context.
2600  * @param ctrl Specifies the operation:
2601  * - OCS_HW_PORT_INIT
2602  * - OCS_HW_PORT_SHUTDOWN
2603  * - OCS_HW_PORT_SET_LINK_CONFIG
2604  *
2605  * @param value Operation-specific value.
2606  * - OCS_HW_PORT_INIT - Selective reset AL_PA
2607  * - OCS_HW_PORT_SHUTDOWN - N/A
2608  * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value.
2609  *
2610  * @param cb Callback function to invoke the following operation.
2611  * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2612  * are handled by the OCS_HW_CB_DOMAIN callbacks).
2613  * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2614  * completes.
2615  *
2616  * @param arg Callback argument invoked after the command completes.
2617  * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2618  * are handled by the OCS_HW_CB_DOMAIN callbacks).
2619  * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2620  * completes.
2621  *
2622  * @return Returns 0 on success, or a non-zero value on failure.
2623  */
2624 ocs_hw_rtn_e
2625 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg)
2626 {
2627 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2628 
2629 	switch (ctrl) {
2630 	case OCS_HW_PORT_INIT:
2631 	{
2632 		uint8_t	*init_link;
2633 		uint32_t speed = 0;
2634 		uint8_t reset_alpa = 0;
2635 
2636 		if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) {
2637 			uint8_t	*cfg_link;
2638 
2639 			cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2640 			if (cfg_link == NULL) {
2641 				ocs_log_err(hw->os, "no buffer for command\n");
2642 				return OCS_HW_RTN_NO_MEMORY;
2643 			}
2644 
2645 			if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) {
2646 				rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT,
2647 							ocs_hw_cb_port_control, NULL);
2648 			}
2649 
2650 			if (rc != OCS_HW_RTN_SUCCESS) {
2651 				ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE);
2652 				ocs_log_err(hw->os, "CONFIG_LINK failed\n");
2653 				break;
2654 			}
2655 			speed = hw->config.speed;
2656 			reset_alpa = (uint8_t)(value & 0xff);
2657 		} else {
2658 			speed = FC_LINK_SPEED_10G;
2659 		}
2660 
2661 		/*
2662 		 * Bring link up, unless FW version is not supported
2663 		 */
2664 		if (hw->workaround.fw_version_too_low) {
2665 			if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) {
2666 				ocs_log_err(hw->os, "Cannot bring up link.  Please update firmware to %s or later (current version is %s)\n",
2667 					OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0));
2668 			} else {
2669 				ocs_log_err(hw->os, "Cannot bring up link.  Please update firmware to %s or later (current version is %s)\n",
2670 					OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0));
2671 			}
2672 
2673 			return OCS_HW_RTN_ERROR;
2674 		}
2675 
2676 		rc = OCS_HW_RTN_ERROR;
2677 
2678 		/* Allocate a new buffer for the init_link command */
2679 		init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2680 		if (init_link == NULL) {
2681 			ocs_log_err(hw->os, "no buffer for command\n");
2682 			return OCS_HW_RTN_NO_MEMORY;
2683 		}
2684 
2685 
2686 		if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) {
2687 			rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT,
2688 						ocs_hw_cb_port_control, NULL);
2689 		}
2690 		/* Free buffer on error, since no callback is coming */
2691 		if (rc != OCS_HW_RTN_SUCCESS) {
2692 			ocs_free(hw->os, init_link, SLI4_BMBX_SIZE);
2693 			ocs_log_err(hw->os, "INIT_LINK failed\n");
2694 		}
2695 		break;
2696 	}
2697 	case OCS_HW_PORT_SHUTDOWN:
2698 	{
2699 		uint8_t	*down_link;
2700 
2701 		down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2702 		if (down_link == NULL) {
2703 			ocs_log_err(hw->os, "no buffer for command\n");
2704 			return OCS_HW_RTN_NO_MEMORY;
2705 		}
2706 		if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) {
2707 			rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT,
2708 						ocs_hw_cb_port_control, NULL);
2709 		}
2710 		/* Free buffer on error, since no callback is coming */
2711 		if (rc != OCS_HW_RTN_SUCCESS) {
2712 			ocs_free(hw->os, down_link, SLI4_BMBX_SIZE);
2713 			ocs_log_err(hw->os, "DOWN_LINK failed\n");
2714 		}
2715 		break;
2716 	}
2717 	case OCS_HW_PORT_SET_LINK_CONFIG:
2718 		rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg);
2719 		break;
2720 	default:
2721 		ocs_log_test(hw->os, "unhandled control %#x\n", ctrl);
2722 		break;
2723 	}
2724 
2725 	return rc;
2726 }
2727 
2728 
2729 /**
2730  * @ingroup port
2731  * @brief Free port resources.
2732  *
2733  * @par Description
2734  * Issue the UNREG_VPI command to free the assigned VPI context.
2735  *
2736  * @param hw Hardware context.
2737  * @param sport SLI port object used to connect to the domain.
2738  *
2739  * @return Returns 0 on success, or a non-zero value on failure.
2740  */
2741 ocs_hw_rtn_e
2742 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport)
2743 {
2744 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
2745 
2746 	if (!hw || !sport) {
2747 		ocs_log_err(hw ? hw->os : NULL,
2748 			"bad parameter(s) hw=%p sport=%p\n", hw,
2749 			sport);
2750 		return OCS_HW_RTN_ERROR;
2751 	}
2752 
2753 	/*
2754 	 * Check if the chip is in an error state (UE'd) before proceeding.
2755 	 */
2756 	if (sli_fw_error_status(&hw->sli) > 0) {
2757 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2758 		return OCS_HW_RTN_ERROR;
2759 	}
2760 
2761 	ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL);
2762 	return rc;
2763 }
2764 
2765 /**
2766  * @ingroup domain
2767  * @brief Allocate a fabric domain object.
2768  *
2769  * @par Description
2770  * This function starts a series of commands needed to connect to the domain, including
2771  *   - REG_FCFI
2772  *   - INIT_VFI
2773  *   - READ_SPARMS
2774  *   .
2775  * @b Note: Not all SLI interface types use all of the above commands.
2776  * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK
2777  * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event.
2778  *
2779  * @param hw Hardware context.
2780  * @param domain Pointer to the domain object.
2781  * @param fcf FCF index.
2782  * @param vlan VLAN ID.
2783  *
2784  * @return Returns 0 on success, or a non-zero value on failure.
2785  */
2786 ocs_hw_rtn_e
2787 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan)
2788 {
2789 	uint8_t		*cmd = NULL;
2790 	uint32_t	index;
2791 
2792 	if (!hw || !domain || !domain->sport) {
2793 		ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n",
2794 				hw, domain, domain ? domain->sport : NULL);
2795 		return OCS_HW_RTN_ERROR;
2796 	}
2797 
2798 	/*
2799 	 * Check if the chip is in an error state (UE'd) before proceeding.
2800 	 */
2801 	if (sli_fw_error_status(&hw->sli) > 0) {
2802 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2803 		return OCS_HW_RTN_ERROR;
2804 	}
2805 
2806 	cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2807 	if (!cmd) {
2808 		ocs_log_err(hw->os, "command memory allocation failed\n");
2809 		return OCS_HW_RTN_NO_MEMORY;
2810 	}
2811 
2812 	domain->dma = hw->domain_dmem;
2813 
2814 	domain->hw = hw;
2815 	domain->sm.app = domain;
2816 	domain->fcf = fcf;
2817 	domain->fcf_indicator = UINT32_MAX;
2818 	domain->vlan_id = vlan;
2819 	domain->indicator = UINT32_MAX;
2820 
2821 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) {
2822 		ocs_log_err(hw->os, "FCOE_VFI allocation failure\n");
2823 
2824 		ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2825 
2826 		return OCS_HW_RTN_ERROR;
2827 	}
2828 
2829 	ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd);
2830 	return OCS_HW_RTN_SUCCESS;
2831 }
2832 
2833 /**
2834  * @ingroup domain
2835  * @brief Attach a SLI port to a domain.
2836  *
2837  * @param hw Hardware context.
2838  * @param domain Pointer to the domain object.
2839  * @param fc_id Fibre Channel ID to associate with this port.
2840  *
2841  * @return Returns 0 on success, or a non-zero value on failure.
2842  */
2843 ocs_hw_rtn_e
2844 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id)
2845 {
2846 	uint8_t	*buf = NULL;
2847 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2848 
2849 	if (!hw || !domain) {
2850 		ocs_log_err(hw ? hw->os : NULL,
2851 			"bad parameter(s) hw=%p domain=%p\n",
2852 			hw, domain);
2853 		return OCS_HW_RTN_ERROR;
2854 	}
2855 
2856 	/*
2857 	 * Check if the chip is in an error state (UE'd) before proceeding.
2858 	 */
2859 	if (sli_fw_error_status(&hw->sli) > 0) {
2860 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2861 		return OCS_HW_RTN_ERROR;
2862 	}
2863 
2864 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2865 	if (!buf) {
2866 		ocs_log_err(hw->os, "no buffer for command\n");
2867 		return OCS_HW_RTN_NO_MEMORY;
2868 	}
2869 
2870 	domain->sport->fc_id = fc_id;
2871 	ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf);
2872 	return rc;
2873 }
2874 
2875 /**
2876  * @ingroup domain
2877  * @brief Free a fabric domain object.
2878  *
2879  * @par Description
2880  * Free both the driver and SLI port resources associated with the domain.
2881  *
2882  * @param hw Hardware context.
2883  * @param domain Pointer to the domain object.
2884  *
2885  * @return Returns 0 on success, or a non-zero value on failure.
2886  */
2887 ocs_hw_rtn_e
2888 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain)
2889 {
2890 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
2891 
2892 	if (!hw || !domain) {
2893 		ocs_log_err(hw ? hw->os : NULL,
2894 			"bad parameter(s) hw=%p domain=%p\n",
2895 			hw, domain);
2896 		return OCS_HW_RTN_ERROR;
2897 	}
2898 
2899 	/*
2900 	 * Check if the chip is in an error state (UE'd) before proceeding.
2901 	 */
2902 	if (sli_fw_error_status(&hw->sli) > 0) {
2903 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2904 		return OCS_HW_RTN_ERROR;
2905 	}
2906 
2907 	ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL);
2908 	return rc;
2909 }
2910 
2911 /**
2912  * @ingroup domain
2913  * @brief Free a fabric domain object.
2914  *
2915  * @par Description
2916  * Free the driver resources associated with the domain. The difference between
2917  * this call and ocs_hw_domain_free() is that this call assumes resources no longer
2918  * exist on the SLI port, due to a reset or after some error conditions.
2919  *
2920  * @param hw Hardware context.
2921  * @param domain Pointer to the domain object.
2922  *
2923  * @return Returns 0 on success, or a non-zero value on failure.
2924  */
2925 ocs_hw_rtn_e
2926 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain)
2927 {
2928 	if (!hw || !domain) {
2929 		ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain);
2930 		return OCS_HW_RTN_ERROR;
2931 	}
2932 
2933 	sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
2934 
2935 	return OCS_HW_RTN_SUCCESS;
2936 }
2937 
2938 /**
2939  * @ingroup node
2940  * @brief Allocate a remote node object.
2941  *
2942  * @param hw Hardware context.
2943  * @param rnode Allocated remote node object to initialize.
2944  * @param fc_addr FC address of the remote node.
2945  * @param sport SLI port used to connect to remote node.
2946  *
2947  * @return Returns 0 on success, or a non-zero value on failure.
2948  */
2949 ocs_hw_rtn_e
2950 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr,
2951 		ocs_sli_port_t *sport)
2952 {
2953 	/* Check for invalid indicator */
2954 	if (UINT32_MAX != rnode->indicator) {
2955 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n",
2956 				fc_addr, rnode->indicator);
2957 		return OCS_HW_RTN_ERROR;
2958 	}
2959 
2960 	/*
2961 	 * Check if the chip is in an error state (UE'd) before proceeding.
2962 	 */
2963 	if (sli_fw_error_status(&hw->sli) > 0) {
2964 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2965 		return OCS_HW_RTN_ERROR;
2966 	}
2967 
2968 	/* NULL SLI port indicates an unallocated remote node */
2969 	rnode->sport = NULL;
2970 
2971 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) {
2972 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
2973 				fc_addr);
2974 		return OCS_HW_RTN_ERROR;
2975 	}
2976 
2977 	rnode->fc_id = fc_addr;
2978 	rnode->sport = sport;
2979 
2980 	return OCS_HW_RTN_SUCCESS;
2981 }
2982 
2983 /**
2984  * @ingroup node
2985  * @brief Update a remote node object with the remote port's service parameters.
2986  *
2987  * @param hw Hardware context.
2988  * @param rnode Allocated remote node object to initialize.
2989  * @param sparms DMA buffer containing the remote port's service parameters.
2990  *
2991  * @return Returns 0 on success, or a non-zero value on failure.
2992  */
2993 ocs_hw_rtn_e
2994 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms)
2995 {
2996 	ocs_hw_rtn_e	rc = OCS_HW_RTN_ERROR;
2997 	uint8_t		*buf = NULL;
2998 	uint32_t	count = 0;
2999 
3000 	if (!hw || !rnode || !sparms) {
3001 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n",
3002 			    hw, rnode, sparms);
3003 		return OCS_HW_RTN_ERROR;
3004 	}
3005 
3006 	/*
3007 	 * Check if the chip is in an error state (UE'd) before proceeding.
3008 	 */
3009 	if (sli_fw_error_status(&hw->sli) > 0) {
3010 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3011 		return OCS_HW_RTN_ERROR;
3012 	}
3013 
3014 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3015 	if (!buf) {
3016 		ocs_log_err(hw->os, "no buffer for command\n");
3017 		return OCS_HW_RTN_NO_MEMORY;
3018 	}
3019 
3020 	/*
3021 	 * If the attach count is non-zero, this RPI has already been registered.
3022 	 * Otherwise, register the RPI
3023 	 */
3024 	if (rnode->index == UINT32_MAX) {
3025 		ocs_log_err(NULL, "bad parameter rnode->index invalid\n");
3026 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3027 		return OCS_HW_RTN_ERROR;
3028 	}
3029 	count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3030 	if (count) {
3031 		/*
3032 		 * Can't attach multiple FC_ID's to a node unless High Login
3033 		 * Mode is enabled
3034 		 */
3035 		if (sli_get_hlm(&hw->sli) == FALSE) {
3036 			ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n",
3037 					sli_get_hlm(&hw->sli), count);
3038 			rc = OCS_HW_RTN_SUCCESS;
3039 		} else {
3040 			rnode->node_group = TRUE;
3041 			rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached);
3042 			rc = rnode->attached  ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS;
3043 		}
3044 	} else {
3045 		rnode->node_group = FALSE;
3046 
3047 		ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt);
3048 		if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id,
3049 					rnode->indicator, rnode->sport->indicator,
3050 					sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) {
3051 			rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT,
3052 					ocs_hw_cb_node_attach, rnode);
3053 		}
3054 	}
3055 
3056 	if (count || rc) {
3057 		if (rc < OCS_HW_RTN_SUCCESS) {
3058 			ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3059 			ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI");
3060 		}
3061 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3062 	}
3063 
3064 	return rc;
3065 }
3066 
3067 /**
3068  * @ingroup node
3069  * @brief Free a remote node resource.
3070  *
3071  * @param hw Hardware context.
3072  * @param rnode Remote node object to free.
3073  *
3074  * @return Returns 0 on success, or a non-zero value on failure.
3075  */
3076 ocs_hw_rtn_e
3077 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3078 {
3079 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
3080 
3081 	if (!hw || !rnode) {
3082 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3083 			    hw, rnode);
3084 		return OCS_HW_RTN_ERROR;
3085 	}
3086 
3087 	if (rnode->sport) {
3088 		if (!rnode->attached) {
3089 			if (rnode->indicator != UINT32_MAX) {
3090 				if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3091 					ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n",
3092 						    rnode->indicator, rnode->fc_id);
3093 					rc = OCS_HW_RTN_ERROR;
3094 				} else {
3095 					rnode->node_group = FALSE;
3096 					rnode->indicator = UINT32_MAX;
3097 					rnode->index = UINT32_MAX;
3098 					rnode->free_group = FALSE;
3099 				}
3100 			}
3101 		} else {
3102 			ocs_log_err(hw->os, "Error: rnode is still attached\n");
3103 			rc = OCS_HW_RTN_ERROR;
3104 		}
3105 	}
3106 
3107 	return rc;
3108 }
3109 
3110 
3111 /**
3112  * @ingroup node
3113  * @brief Free a remote node object.
3114  *
3115  * @param hw Hardware context.
3116  * @param rnode Remote node object to free.
3117  *
3118  * @return Returns 0 on success, or a non-zero value on failure.
3119  */
3120 ocs_hw_rtn_e
3121 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3122 {
3123 	uint8_t	*buf = NULL;
3124 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS_SYNC;
3125 	uint32_t	index = UINT32_MAX;
3126 
3127 	if (!hw || !rnode) {
3128 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3129 			    hw, rnode);
3130 		return OCS_HW_RTN_ERROR;
3131 	}
3132 
3133 	/*
3134 	 * Check if the chip is in an error state (UE'd) before proceeding.
3135 	 */
3136 	if (sli_fw_error_status(&hw->sli) > 0) {
3137 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3138 		return OCS_HW_RTN_ERROR;
3139 	}
3140 
3141 	index = rnode->index;
3142 
3143 	if (rnode->sport) {
3144 		uint32_t	count = 0;
3145 		uint32_t	fc_id;
3146 
3147 		if (!rnode->attached) {
3148 			return OCS_HW_RTN_SUCCESS_SYNC;
3149 		}
3150 
3151 		buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3152 		if (!buf) {
3153 			ocs_log_err(hw->os, "no buffer for command\n");
3154 			return OCS_HW_RTN_NO_MEMORY;
3155 		}
3156 
3157 		count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1);
3158 
3159 		if (count <= 1) {
3160 			/* There are no other references to this RPI
3161 			 * so unregister it and free the resource. */
3162 			fc_id = UINT32_MAX;
3163 			rnode->node_group = FALSE;
3164 			rnode->free_group = TRUE;
3165 		} else {
3166 			if (sli_get_hlm(&hw->sli) == FALSE) {
3167 				ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n",
3168 						count);
3169 			}
3170 			fc_id = rnode->fc_id & 0x00ffffff;
3171 		}
3172 
3173 		rc = OCS_HW_RTN_ERROR;
3174 
3175 		if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator,
3176 					SLI_RSRC_FCOE_RPI, fc_id)) {
3177 			rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode);
3178 		}
3179 
3180 		if (rc != OCS_HW_RTN_SUCCESS) {
3181 			ocs_log_err(hw->os, "UNREG_RPI failed\n");
3182 			ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3183 			rc = OCS_HW_RTN_ERROR;
3184 		}
3185 	}
3186 
3187 	return rc;
3188 }
3189 
3190 /**
3191  * @ingroup node
3192  * @brief Free all remote node objects.
3193  *
3194  * @param hw Hardware context.
3195  *
3196  * @return Returns 0 on success, or a non-zero value on failure.
3197  */
3198 ocs_hw_rtn_e
3199 ocs_hw_node_free_all(ocs_hw_t *hw)
3200 {
3201 	uint8_t	*buf = NULL;
3202 	ocs_hw_rtn_e	rc = OCS_HW_RTN_ERROR;
3203 
3204 	if (!hw) {
3205 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
3206 		return OCS_HW_RTN_ERROR;
3207 	}
3208 
3209 	/*
3210 	 * Check if the chip is in an error state (UE'd) before proceeding.
3211 	 */
3212 	if (sli_fw_error_status(&hw->sli) > 0) {
3213 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3214 		return OCS_HW_RTN_ERROR;
3215 	}
3216 
3217 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3218 	if (!buf) {
3219 		ocs_log_err(hw->os, "no buffer for command\n");
3220 		return OCS_HW_RTN_NO_MEMORY;
3221 	}
3222 
3223 	if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff,
3224 				SLI_RSRC_FCOE_FCFI, UINT32_MAX)) {
3225 		rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all,
3226 				NULL);
3227 	}
3228 
3229 	if (rc != OCS_HW_RTN_SUCCESS) {
3230 		ocs_log_err(hw->os, "UNREG_RPI failed\n");
3231 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3232 		rc = OCS_HW_RTN_ERROR;
3233 	}
3234 
3235 	return rc;
3236 }
3237 
3238 ocs_hw_rtn_e
3239 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3240 {
3241 
3242 	if (!hw || !ngroup) {
3243 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3244 				hw, ngroup);
3245 		return OCS_HW_RTN_ERROR;
3246 	}
3247 
3248 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator,
3249 				&ngroup->index)) {
3250 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
3251 				ngroup->indicator);
3252 		return OCS_HW_RTN_ERROR;
3253 	}
3254 
3255 	return OCS_HW_RTN_SUCCESS;
3256 }
3257 
3258 ocs_hw_rtn_e
3259 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode)
3260 {
3261 
3262 	if (!hw || !ngroup || !rnode) {
3263 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n",
3264 			    hw, ngroup, rnode);
3265 		return OCS_HW_RTN_ERROR;
3266 	}
3267 
3268 	if (rnode->attached) {
3269 		ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n",
3270 			    rnode->indicator, rnode->fc_id);
3271 		return OCS_HW_RTN_ERROR;
3272 	}
3273 
3274 	if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3275 		ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3276 				rnode->indicator);
3277 		return OCS_HW_RTN_ERROR;
3278 	}
3279 
3280 	rnode->indicator = ngroup->indicator;
3281 	rnode->index = ngroup->index;
3282 
3283 	return OCS_HW_RTN_SUCCESS;
3284 }
3285 
3286 ocs_hw_rtn_e
3287 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3288 {
3289 	int	ref;
3290 
3291 	if (!hw || !ngroup) {
3292 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3293 				hw, ngroup);
3294 		return OCS_HW_RTN_ERROR;
3295 	}
3296 
3297 	ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count);
3298 	if (ref) {
3299 		/* Hmmm, the reference count is non-zero */
3300 		ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n",
3301 				ref, ngroup->indicator);
3302 
3303 		if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) {
3304 			ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3305 				    ngroup->indicator);
3306 			return OCS_HW_RTN_ERROR;
3307 		}
3308 
3309 		ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0);
3310 	}
3311 
3312 	ngroup->indicator = UINT32_MAX;
3313 	ngroup->index = UINT32_MAX;
3314 
3315 	return OCS_HW_RTN_SUCCESS;
3316 }
3317 
3318 /**
3319  * @brief Initialize IO fields on each free call.
3320  *
3321  * @n @b Note: This is done on each free call (as opposed to each
3322  * alloc call) because port-owned XRIs are not
3323  * allocated with ocs_hw_io_alloc() but are freed with this
3324  * function.
3325  *
3326  * @param io Pointer to HW IO.
3327  */
3328 static inline void
3329 ocs_hw_init_free_io(ocs_hw_io_t *io)
3330 {
3331 	/*
3332 	 * Set io->done to NULL, to avoid any callbacks, should
3333 	 * a completion be received for one of these IOs
3334 	 */
3335 	io->done = NULL;
3336 	io->abort_done = NULL;
3337 	io->status_saved = 0;
3338 	io->abort_in_progress = FALSE;
3339 	io->port_owned_abort_count = 0;
3340 	io->rnode = NULL;
3341 	io->type = 0xFFFF;
3342 	io->wq = NULL;
3343 	io->ul_io = NULL;
3344 	io->tgt_wqe_timeout = 0;
3345 }
3346 
3347 /**
3348  * @ingroup io
3349  * @brief Lockless allocate a HW IO object.
3350  *
3351  * @par Description
3352  * Assume that hw->ocs_lock is held. This function is only used if
3353  * use_dif_sec_xri workaround is being used.
3354  *
3355  * @param hw Hardware context.
3356  *
3357  * @return Returns a pointer to an object on success, or NULL on failure.
3358  */
3359 static inline ocs_hw_io_t *
3360 _ocs_hw_io_alloc(ocs_hw_t *hw)
3361 {
3362 	ocs_hw_io_t	*io = NULL;
3363 
3364 	if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
3365 		ocs_list_add_tail(&hw->io_inuse, io);
3366 		io->state = OCS_HW_IO_STATE_INUSE;
3367 		io->quarantine = FALSE;
3368 		io->quarantine_first_phase = TRUE;
3369 		io->abort_reqtag = UINT32_MAX;
3370 		ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io);
3371 	} else {
3372 		ocs_atomic_add_return(&hw->io_alloc_failed_count, 1);
3373 	}
3374 
3375 	return io;
3376 }
3377 /**
3378  * @ingroup io
3379  * @brief Allocate a HW IO object.
3380  *
3381  * @par Description
3382  * @n @b Note: This function applies to non-port owned XRIs
3383  * only.
3384  *
3385  * @param hw Hardware context.
3386  *
3387  * @return Returns a pointer to an object on success, or NULL on failure.
3388  */
3389 ocs_hw_io_t *
3390 ocs_hw_io_alloc(ocs_hw_t *hw)
3391 {
3392 	ocs_hw_io_t	*io = NULL;
3393 
3394 	ocs_lock(&hw->io_lock);
3395 		io = _ocs_hw_io_alloc(hw);
3396 	ocs_unlock(&hw->io_lock);
3397 
3398 	return io;
3399 }
3400 
3401 /**
3402  * @ingroup io
3403  * @brief Allocate/Activate a port owned HW IO object.
3404  *
3405  * @par Description
3406  * This function is called by the transport layer when an XRI is
3407  * allocated by the SLI-Port. This will "activate" the HW IO
3408  * associated with the XRI received from the SLI-Port to mirror
3409  * the state of the XRI.
3410  * @n @n @b Note: This function applies to port owned XRIs only.
3411  *
3412  * @param hw Hardware context.
3413  * @param io Pointer HW IO to activate/allocate.
3414  *
3415  * @return Returns a pointer to an object on success, or NULL on failure.
3416  */
3417 ocs_hw_io_t *
3418 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
3419 {
3420 	if (ocs_ref_read_count(&io->ref) > 0) {
3421 		ocs_log_err(hw->os, "Bad parameter: refcount > 0\n");
3422 		return NULL;
3423 	}
3424 
3425 	if (io->wq != NULL) {
3426 		ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator);
3427 		return NULL;
3428 	}
3429 
3430 	ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3431 	io->xbusy = TRUE;
3432 
3433 	return io;
3434 }
3435 
3436 /**
3437  * @ingroup io
3438  * @brief When an IO is freed, depending on the exchange busy flag, and other
3439  * workarounds, move it to the correct list.
3440  *
3441  * @par Description
3442  * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed
3443  * from the busy or wait_free list.
3444  *
3445  * @param hw Hardware context.
3446  * @param io Pointer to the IO object to move.
3447  */
3448 static void
3449 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io)
3450 {
3451 	if (io->xbusy) {
3452 		/* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */
3453 		ocs_list_add_tail(&hw->io_wait_free, io);
3454 		io->state = OCS_HW_IO_STATE_WAIT_FREE;
3455 	} else {
3456 		/* IO not busy, add to free list */
3457 		ocs_list_add_tail(&hw->io_free, io);
3458 		io->state = OCS_HW_IO_STATE_FREE;
3459 	}
3460 
3461 	/* BZ 161832 workaround */
3462 	if (hw->workaround.use_dif_sec_xri) {
3463 		ocs_hw_check_sec_hio_list(hw);
3464 	}
3465 }
3466 
3467 /**
3468  * @ingroup io
3469  * @brief Free a HW IO object. Perform cleanup common to
3470  * port and host-owned IOs.
3471  *
3472  * @param hw Hardware context.
3473  * @param io Pointer to the HW IO object.
3474  */
3475 static inline void
3476 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io)
3477 {
3478 	/* initialize IO fields */
3479 	ocs_hw_init_free_io(io);
3480 
3481 	/* Restore default SGL */
3482 	ocs_hw_io_restore_sgl(hw, io);
3483 }
3484 
3485 /**
3486  * @ingroup io
3487  * @brief Free a HW IO object associated with a port-owned XRI.
3488  *
3489  * @param arg Pointer to the HW IO object.
3490  */
3491 static void
3492 ocs_hw_io_free_port_owned(void *arg)
3493 {
3494 	ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3495 	ocs_hw_t *hw = io->hw;
3496 
3497 	/*
3498 	 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs
3499 	 * waiting for buffers.
3500 	 */
3501 	if (io->auto_xfer_rdy_dnrx) {
3502 		ocs_lock(&hw->io_lock);
3503 			/* take a reference count because we still own the IO until the buffer is posted */
3504 			ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3505 			ocs_list_add_tail(&hw->io_port_dnrx, io);
3506 		ocs_unlock(&hw->io_lock);
3507 	}
3508 
3509 	/* perform common cleanup */
3510 	ocs_hw_io_free_common(hw, io);
3511 }
3512 
3513 /**
3514  * @ingroup io
3515  * @brief Free a previously-allocated HW IO object. Called when
3516  * IO refcount goes to zero (host-owned IOs only).
3517  *
3518  * @param arg Pointer to the HW IO object.
3519  */
3520 static void
3521 ocs_hw_io_free_internal(void *arg)
3522 {
3523 	ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3524 	ocs_hw_t *hw = io->hw;
3525 
3526 	/* perform common cleanup */
3527 	ocs_hw_io_free_common(hw, io);
3528 
3529 	ocs_lock(&hw->io_lock);
3530 		/* remove from in-use list */
3531 		ocs_list_remove(&hw->io_inuse, io);
3532 		ocs_hw_io_free_move_correct_list(hw, io);
3533 	ocs_unlock(&hw->io_lock);
3534 }
3535 
3536 /**
3537  * @ingroup io
3538  * @brief Free a previously-allocated HW IO object.
3539  *
3540  * @par Description
3541  * @n @b Note: This function applies to port and host owned XRIs.
3542  *
3543  * @param hw Hardware context.
3544  * @param io Pointer to the HW IO object.
3545  *
3546  * @return Returns a non-zero value if HW IO was freed, 0 if references
3547  * on the IO still exist, or a negative value if an error occurred.
3548  */
3549 int32_t
3550 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io)
3551 {
3552 	/* just put refcount */
3553 	if (ocs_ref_read_count(&io->ref) <= 0) {
3554 		ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
3555 			    io->indicator, io->reqtag);
3556 		return -1;
3557 	}
3558 
3559 	return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */
3560 }
3561 
3562 /**
3563  * @ingroup io
3564  * @brief Check if given HW IO is in-use
3565  *
3566  * @par Description
3567  * This function returns TRUE if the given HW IO has been
3568  * allocated and is in-use, and FALSE otherwise. It applies to
3569  * port and host owned XRIs.
3570  *
3571  * @param hw Hardware context.
3572  * @param io Pointer to the HW IO object.
3573  *
3574  * @return TRUE if an IO is in use, or FALSE otherwise.
3575  */
3576 uint8_t
3577 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io)
3578 {
3579 	return (ocs_ref_read_count(&io->ref) > 0);
3580 }
3581 
3582 /**
3583  * @brief Write a HW IO to a work queue.
3584  *
3585  * @par Description
3586  * A HW IO is written to a work queue.
3587  *
3588  * @param wq Pointer to work queue.
3589  * @param wqe Pointer to WQ entry.
3590  *
3591  * @n @b Note: Assumes the SLI-4 queue lock is held.
3592  *
3593  * @return Returns 0 on success, or a negative error code value on failure.
3594  */
3595 static int32_t
3596 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3597 {
3598 	int32_t rc;
3599 	int32_t queue_rc;
3600 
3601 	/* Every so often, set the wqec bit to generate comsummed completions */
3602 	if (wq->wqec_count) {
3603 		wq->wqec_count--;
3604 	}
3605 	if (wq->wqec_count == 0) {
3606 		sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
3607 		genwqe->wqec = 1;
3608 		wq->wqec_count = wq->wqec_set_count;
3609 	}
3610 
3611 	/* Decrement WQ free count */
3612 	wq->free_count--;
3613 
3614 	queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
3615 
3616 	if (queue_rc < 0) {
3617 		rc = -1;
3618 	} else {
3619 		rc = 0;
3620 		ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
3621 	}
3622 
3623 	return rc;
3624 }
3625 
3626 /**
3627  * @brief Write a HW IO to a work queue.
3628  *
3629  * @par Description
3630  * A HW IO is written to a work queue.
3631  *
3632  * @param wq Pointer to work queue.
3633  * @param wqe Pointer to WQE entry.
3634  *
3635  * @n @b Note: Takes the SLI-4 queue lock.
3636  *
3637  * @return Returns 0 on success, or a negative error code value on failure.
3638  */
3639 int32_t
3640 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3641 {
3642 	int32_t rc = 0;
3643 
3644 	sli_queue_lock(wq->queue);
3645 		if ( ! ocs_list_empty(&wq->pending_list)) {
3646 			ocs_list_add_tail(&wq->pending_list, wqe);
3647 			OCS_STAT(wq->wq_pending_count++;)
3648 			while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3649 				rc = _hw_wq_write(wq, wqe);
3650 				if (rc < 0) {
3651 					break;
3652 				}
3653 				if (wqe->abort_wqe_submit_needed) {
3654 					wqe->abort_wqe_submit_needed = 0;
3655 					sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3656 							wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT );
3657 					ocs_list_add_tail(&wq->pending_list, wqe);
3658 					OCS_STAT(wq->wq_pending_count++;)
3659 				}
3660 			}
3661 		} else {
3662 			if (wq->free_count > 0) {
3663 				rc = _hw_wq_write(wq, wqe);
3664 			} else {
3665 				ocs_list_add_tail(&wq->pending_list, wqe);
3666 				OCS_STAT(wq->wq_pending_count++;)
3667 			}
3668 		}
3669 
3670 	sli_queue_unlock(wq->queue);
3671 
3672 	return rc;
3673 
3674 }
3675 
3676 /**
3677  * @brief Update free count and submit any pending HW IOs
3678  *
3679  * @par Description
3680  * The WQ free count is updated, and any pending HW IOs are submitted that
3681  * will fit in the queue.
3682  *
3683  * @param wq Pointer to work queue.
3684  * @param update_free_count Value added to WQs free count.
3685  *
3686  * @return None.
3687  */
3688 static void
3689 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count)
3690 {
3691 	ocs_hw_wqe_t *wqe;
3692 
3693 	sli_queue_lock(wq->queue);
3694 
3695 		/* Update free count with value passed in */
3696 		wq->free_count += update_free_count;
3697 
3698 		while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3699 			_hw_wq_write(wq, wqe);
3700 
3701 			if (wqe->abort_wqe_submit_needed) {
3702 				wqe->abort_wqe_submit_needed = 0;
3703 				sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3704 						wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
3705 				ocs_list_add_tail(&wq->pending_list, wqe);
3706 				OCS_STAT(wq->wq_pending_count++;)
3707 			}
3708 		}
3709 
3710 	sli_queue_unlock(wq->queue);
3711 }
3712 
3713 /**
3714  * @brief Check to see if there are any BZ 161832 workaround waiting IOs
3715  *
3716  * @par Description
3717  * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try
3718  * to allocate a secondary HW io, and dispatch it.
3719  *
3720  * @n @b Note: hw->io_lock MUST be taken when called.
3721  *
3722  * @param hw pointer to HW object
3723  *
3724  * @return none
3725  */
3726 static void
3727 ocs_hw_check_sec_hio_list(ocs_hw_t *hw)
3728 {
3729 	ocs_hw_io_t *io;
3730 	ocs_hw_io_t *sec_io;
3731 	int rc = 0;
3732 
3733 	while (!ocs_list_empty(&hw->sec_hio_wait_list)) {
3734 		uint16_t flags;
3735 
3736 		sec_io = _ocs_hw_io_alloc(hw);
3737 		if (sec_io == NULL) {
3738 			break;
3739 		}
3740 
3741 		io = ocs_list_remove_head(&hw->sec_hio_wait_list);
3742 		ocs_list_add_tail(&hw->io_inuse, io);
3743 		io->state = OCS_HW_IO_STATE_INUSE;
3744 		io->sec_hio = sec_io;
3745 
3746 		/* mark secondary XRI for second and subsequent data phase as quarantine */
3747 		if (io->xbusy) {
3748 			sec_io->quarantine = TRUE;
3749 		}
3750 
3751 		flags = io->sec_iparam.fcp_tgt.flags;
3752 		if (io->xbusy) {
3753 			flags |= SLI4_IO_CONTINUATION;
3754 		} else {
3755 			flags &= ~SLI4_IO_CONTINUATION;
3756 		}
3757 
3758 		io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout;
3759 
3760 		/* Complete (continue) TRECV IO */
3761 		if (io->xbusy) {
3762 			if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3763 				io->first_data_sge,
3764 				io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator,
3765 				io->reqtag, SLI4_CQ_DEFAULT,
3766 				io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3767 				flags,
3768 				io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3769 					ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3770 					break;
3771 			}
3772 		} else {
3773 			if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3774 				io->first_data_sge,
3775 				io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator,
3776 				io->reqtag, SLI4_CQ_DEFAULT,
3777 				io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3778 				flags,
3779 				io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size,
3780 				io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3781 					ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3782 					break;
3783 			}
3784 		}
3785 
3786 		if (io->wq == NULL) {
3787 			io->wq = ocs_hw_queue_next_wq(hw, io);
3788 			ocs_hw_assert(io->wq != NULL);
3789 		}
3790 		io->xbusy = TRUE;
3791 
3792 		/*
3793 		 * Add IO to active io wqe list before submitting, in case the
3794 		 * wcqe processing preempts this thread.
3795 		 */
3796 		ocs_hw_add_io_timed_wqe(hw, io);
3797 		rc = hw_wq_write(io->wq, &io->wqe);
3798 		if (rc >= 0) {
3799 			/* non-negative return is success */
3800 			rc = 0;
3801 		} else {
3802 			/* failed to write wqe, remove from active wqe list */
3803 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3804 			io->xbusy = FALSE;
3805 			ocs_hw_remove_io_timed_wqe(hw, io);
3806 		}
3807 	}
3808 }
3809 
3810 /**
3811  * @ingroup io
3812  * @brief Send a Single Request/Response Sequence (SRRS).
3813  *
3814  * @par Description
3815  * This routine supports communication sequences consisting of a single
3816  * request and single response between two endpoints. Examples include:
3817  *  - Sending an ELS request.
3818  *  - Sending an ELS response - To send an ELS reponse, the caller must provide
3819  * the OX_ID from the received request.
3820  *  - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
3821  * the caller must provide the R_CTL, TYPE, and DF_CTL
3822  * values to place in the FC frame header.
3823  *  .
3824  * @n @b Note: The caller is expected to provide both send and receive
3825  * buffers for requests. In the case of sending a response, no receive buffer
3826  * is necessary and the caller may pass in a NULL pointer.
3827  *
3828  * @param hw Hardware context.
3829  * @param type Type of sequence (ELS request/response, FC-CT).
3830  * @param io Previously-allocated HW IO object.
3831  * @param send DMA memory holding data to send (for example, ELS request, BLS response).
3832  * @param len Length, in bytes, of data to send.
3833  * @param receive Optional DMA memory to hold a response.
3834  * @param rnode Destination of data (that is, a remote node).
3835  * @param iparam IO parameters (ELS response and FC-CT).
3836  * @param cb Function call upon completion of sending the data (may be NULL).
3837  * @param arg Argument to pass to IO completion function.
3838  *
3839  * @return Returns 0 on success, or a non-zero on failure.
3840  */
3841 ocs_hw_rtn_e
3842 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
3843 		  ocs_dma_t *send, uint32_t len, ocs_dma_t *receive,
3844 		  ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam,
3845 		  ocs_hw_srrs_cb_t cb, void *arg)
3846 {
3847 	sli4_sge_t	*sge = NULL;
3848 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
3849 	uint16_t	local_flags = 0;
3850 
3851 	if (!hw || !io || !rnode || !iparam) {
3852 		ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n",
3853 			    hw, io, send, receive, rnode, iparam);
3854 		return OCS_HW_RTN_ERROR;
3855 	}
3856 
3857 	if (hw->state != OCS_HW_STATE_ACTIVE) {
3858 		ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state);
3859 		return OCS_HW_RTN_ERROR;
3860 	}
3861 
3862 	if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
3863 		/* We must set the XC bit for port owned XRIs */
3864 		local_flags |= SLI4_IO_CONTINUATION;
3865 	}
3866 	io->rnode = rnode;
3867 	io->type  = type;
3868 	io->done = cb;
3869 	io->arg  = arg;
3870 
3871 	sge = io->sgl->virt;
3872 
3873 	/* clear both SGE */
3874 	ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
3875 
3876 	if (send) {
3877 		sge[0].buffer_address_high = ocs_addr32_hi(send->phys);
3878 		sge[0].buffer_address_low  = ocs_addr32_lo(send->phys);
3879 		sge[0].sge_type = SLI4_SGE_TYPE_DATA;
3880 		sge[0].buffer_length = len;
3881 	}
3882 
3883 	if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) {
3884 		sge[1].buffer_address_high = ocs_addr32_hi(receive->phys);
3885 		sge[1].buffer_address_low  = ocs_addr32_lo(receive->phys);
3886 		sge[1].sge_type = SLI4_SGE_TYPE_DATA;
3887 		sge[1].buffer_length = receive->size;
3888 		sge[1].last = TRUE;
3889 	} else {
3890 		sge[0].last = TRUE;
3891 	}
3892 
3893 	switch (type) {
3894 	case OCS_HW_ELS_REQ:
3895 		if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl,
3896 							*((uint8_t *)(send->virt)), /* req_type */
3897 							len, receive->size,
3898 							iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) {
3899 			ocs_log_err(hw->os, "REQ WQE error\n");
3900 			rc = OCS_HW_RTN_ERROR;
3901 		}
3902 		break;
3903 	case OCS_HW_ELS_RSP:
3904 		if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3905 					   io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3906 					   iparam->els.ox_id,
3907 							rnode, local_flags, UINT32_MAX)) {
3908 			ocs_log_err(hw->os, "RSP WQE error\n");
3909 			rc = OCS_HW_RTN_ERROR;
3910 		}
3911 		break;
3912 	case OCS_HW_ELS_RSP_SID:
3913 		if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3914 					   io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3915 					   iparam->els_sid.ox_id,
3916 							rnode, local_flags, iparam->els_sid.s_id)) {
3917 			ocs_log_err(hw->os, "RSP (SID) WQE error\n");
3918 			rc = OCS_HW_RTN_ERROR;
3919 		}
3920 		break;
3921 	case OCS_HW_FC_CT:
3922 		if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3923 					  receive->size, iparam->fc_ct.timeout, io->indicator,
3924 					  io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl,
3925 					  iparam->fc_ct.type, iparam->fc_ct.df_ctl)) {
3926 			ocs_log_err(hw->os, "GEN WQE error\n");
3927 			rc = OCS_HW_RTN_ERROR;
3928 		}
3929 		break;
3930 	case OCS_HW_FC_CT_RSP:
3931 		if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3932 					  iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator,
3933 					  io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl,
3934 					  iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) {
3935 			ocs_log_err(hw->os, "XMIT SEQ WQE error\n");
3936 			rc = OCS_HW_RTN_ERROR;
3937 		}
3938 		break;
3939 	case OCS_HW_BLS_ACC:
3940 	case OCS_HW_BLS_RJT:
3941 	{
3942 		sli_bls_payload_t	bls;
3943 
3944 		if (OCS_HW_BLS_ACC == type) {
3945 			bls.type = SLI_BLS_ACC;
3946 			ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc));
3947 		} else {
3948 			bls.type = SLI_BLS_RJT;
3949 			ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt));
3950 		}
3951 
3952 		bls.ox_id = iparam->bls.ox_id;
3953 		bls.rx_id = iparam->bls.rx_id;
3954 
3955 		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3956 					   io->indicator, io->reqtag,
3957 					   SLI4_CQ_DEFAULT,
3958 					   rnode, UINT32_MAX)) {
3959 			ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
3960 			rc = OCS_HW_RTN_ERROR;
3961 		}
3962 		break;
3963 	}
3964 	case OCS_HW_BLS_ACC_SID:
3965 	{
3966 		sli_bls_payload_t	bls;
3967 
3968 		bls.type = SLI_BLS_ACC;
3969 		ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc));
3970 
3971 		bls.ox_id = iparam->bls_sid.ox_id;
3972 		bls.rx_id = iparam->bls_sid.rx_id;
3973 
3974 		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3975 					   io->indicator, io->reqtag,
3976 					   SLI4_CQ_DEFAULT,
3977 					   rnode, iparam->bls_sid.s_id)) {
3978 			ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
3979 			rc = OCS_HW_RTN_ERROR;
3980 		}
3981 		break;
3982 	}
3983 	case OCS_HW_BCAST:
3984 		if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3985 					iparam->bcast.timeout, io->indicator, io->reqtag,
3986 					SLI4_CQ_DEFAULT, rnode,
3987 					iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) {
3988 			ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n");
3989 			rc = OCS_HW_RTN_ERROR;
3990 		}
3991 		break;
3992 	default:
3993 		ocs_log_err(hw->os, "bad SRRS type %#x\n", type);
3994 		rc = OCS_HW_RTN_ERROR;
3995 	}
3996 
3997 	if (OCS_HW_RTN_SUCCESS == rc) {
3998 		if (io->wq == NULL) {
3999 			io->wq = ocs_hw_queue_next_wq(hw, io);
4000 			ocs_hw_assert(io->wq != NULL);
4001 		}
4002 		io->xbusy = TRUE;
4003 
4004 		/*
4005 		 * Add IO to active io wqe list before submitting, in case the
4006 		 * wcqe processing preempts this thread.
4007 		 */
4008 		OCS_STAT(io->wq->use_count++);
4009 		ocs_hw_add_io_timed_wqe(hw, io);
4010 		rc = hw_wq_write(io->wq, &io->wqe);
4011 		if (rc >= 0) {
4012 			/* non-negative return is success */
4013 			rc = 0;
4014 		} else {
4015 			/* failed to write wqe, remove from active wqe list */
4016 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4017 			io->xbusy = FALSE;
4018 			ocs_hw_remove_io_timed_wqe(hw, io);
4019 		}
4020 	}
4021 
4022 	return rc;
4023 }
4024 
4025 /**
4026  * @ingroup io
4027  * @brief Send a read, write, or response IO.
4028  *
4029  * @par Description
4030  * This routine supports sending a higher-level IO (for example, FCP) between two endpoints
4031  * as a target or initiator. Examples include:
4032  *  - Sending read data and good response (target).
4033  *  - Sending a response (target with no data or after receiving write data).
4034  *  .
4035  * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
4036  * calling this routine, the data should be loaded using ocs_hw_io_add_sge().
4037  *
4038  * @param hw Hardware context.
4039  * @param type Type of IO (target read, target response, and so on).
4040  * @param io Previously-allocated HW IO object.
4041  * @param len Length, in bytes, of data to send.
4042  * @param iparam IO parameters.
4043  * @param rnode Destination of data (that is, a remote node).
4044  * @param cb Function call upon completion of sending data (may be NULL).
4045  * @param arg Argument to pass to IO completion function.
4046  *
4047  * @return Returns 0 on success, or a non-zero value on failure.
4048  *
4049  * @todo
4050  *  - Support specifiying relative offset.
4051  *  - Use a WQ other than 0.
4052  */
4053 ocs_hw_rtn_e
4054 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
4055 		uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
4056 		void *cb, void *arg)
4057 {
4058 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
4059 	uint32_t	rpi;
4060 	uint8_t		send_wqe = TRUE;
4061 
4062 	CPUTRACE("");
4063 
4064 	if (!hw || !io || !rnode || !iparam) {
4065 		ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n",
4066 			    hw, io, iparam, rnode);
4067 		return OCS_HW_RTN_ERROR;
4068 	}
4069 
4070 	if (hw->state != OCS_HW_STATE_ACTIVE) {
4071 		ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
4072 		return OCS_HW_RTN_ERROR;
4073 	}
4074 
4075 	rpi = rnode->indicator;
4076 
4077 	if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) {
4078 		rpi = hw->workaround.unregistered_rid;
4079 		ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi);
4080 	}
4081 
4082 	/*
4083 	 * Save state needed during later stages
4084 	 */
4085 	io->rnode = rnode;
4086 	io->type  = type;
4087 	io->done  = cb;
4088 	io->arg   = arg;
4089 
4090 	/*
4091 	 * Format the work queue entry used to send the IO
4092 	 */
4093 	switch (type) {
4094 	case OCS_HW_IO_INITIATOR_READ:
4095 		/*
4096 		 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the
4097 		 * initiator read IO for quarantine
4098 		 */
4099 		if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4100 		    (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4101 			io->quarantine = TRUE;
4102 		}
4103 
4104 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4105 				iparam->fcp_ini.rsp);
4106 
4107 		if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len,
4108 					io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode,
4109 					iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4110 					iparam->fcp_ini.timeout)) {
4111 			ocs_log_err(hw->os, "IREAD WQE error\n");
4112 			rc = OCS_HW_RTN_ERROR;
4113 		}
4114 		break;
4115 	case OCS_HW_IO_INITIATOR_WRITE:
4116 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4117 				iparam->fcp_ini.rsp);
4118 
4119 		if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4120 					 len, iparam->fcp_ini.first_burst,
4121 					 io->indicator, io->reqtag,
4122 					SLI4_CQ_DEFAULT, rpi, rnode,
4123 					iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4124 					iparam->fcp_ini.timeout)) {
4125 			ocs_log_err(hw->os, "IWRITE WQE error\n");
4126 			rc = OCS_HW_RTN_ERROR;
4127 		}
4128 		break;
4129 	case OCS_HW_IO_INITIATOR_NODATA:
4130 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4131 				iparam->fcp_ini.rsp);
4132 
4133 		if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
4134 					io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
4135 					rpi, rnode, iparam->fcp_ini.timeout)) {
4136 			ocs_log_err(hw->os, "ICMND WQE error\n");
4137 			rc = OCS_HW_RTN_ERROR;
4138 		}
4139 		break;
4140 	case OCS_HW_IO_TARGET_WRITE: {
4141 		uint16_t flags = iparam->fcp_tgt.flags;
4142 		fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt;
4143 
4144 		/*
4145 		 * Fill in the XFER_RDY for IF_TYPE 0 devices
4146 		 */
4147 		*((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset);
4148 		*((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len);
4149 		*((uint32_t *)xfer->rsvd) = 0;
4150 
4151 		if (io->xbusy) {
4152 			flags |= SLI4_IO_CONTINUATION;
4153 		} else {
4154 			flags &= ~SLI4_IO_CONTINUATION;
4155 		}
4156 
4157 		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4158 
4159 		/*
4160 		 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO
4161 		 * then mark the target write IO for quarantine
4162 		 */
4163 		if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4164 		    (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4165 			io->quarantine = TRUE;
4166 		}
4167 
4168 		/*
4169 		 * BZ 161832 Workaround:
4170 		 * Check for use_dif_sec_xri workaround.  Note, even though the first dataphase
4171 		 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the
4172 		 * potential for deadlock where all XRI's are allocated as primaries to IOs that
4173 		 * are on hw->sec_hio_wait_list.   If this secondary XRI is not for the first
4174 		 * data phase, it is marked for quarantine.
4175 		 */
4176 		if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4177 
4178 			/*
4179 			 * If we have allocated a chained SGL for skyhawk, then
4180 			 * we can re-use this for the sec_hio.
4181 			 */
4182 			if (io->ovfl_io != NULL) {
4183 				io->sec_hio = io->ovfl_io;
4184 				io->sec_hio->quarantine = TRUE;
4185 			} else {
4186 				io->sec_hio = ocs_hw_io_alloc(hw);
4187 			}
4188 			if (io->sec_hio == NULL) {
4189 				/* Failed to allocate, so save full request context and put
4190 				 * this IO on the wait list
4191 				 */
4192 				io->sec_iparam = *iparam;
4193 				io->sec_len = len;
4194 				ocs_lock(&hw->io_lock);
4195 					ocs_list_remove(&hw->io_inuse,  io);
4196 					ocs_list_add_tail(&hw->sec_hio_wait_list, io);
4197 					io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO;
4198 					hw->sec_hio_wait_count++;
4199 				ocs_unlock(&hw->io_lock);
4200 				send_wqe = FALSE;
4201 				/* Done */
4202 				break;
4203 			}
4204 			/* We quarantine the secondary IO if this is the second or subsequent data phase */
4205 			if (io->xbusy) {
4206 				io->sec_hio->quarantine = TRUE;
4207 			}
4208 		}
4209 
4210 		/*
4211 		 * If not the first data phase, and io->sec_hio has been allocated, then issue
4212 		 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE
4213 		 */
4214 		if (io->xbusy && (io->sec_hio != NULL)) {
4215 			if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4216 						   iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator,
4217 						   io->reqtag, SLI4_CQ_DEFAULT,
4218 						   iparam->fcp_tgt.ox_id, rpi, rnode,
4219 						   flags,
4220 						   iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4221 						   iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4222 				ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4223 				rc = OCS_HW_RTN_ERROR;
4224 			}
4225 		} else {
4226 			if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4227 						   iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4228 						   SLI4_CQ_DEFAULT,
4229 						   iparam->fcp_tgt.ox_id, rpi, rnode,
4230 						   flags,
4231 						   iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4232 						   iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4233 				ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4234 				rc = OCS_HW_RTN_ERROR;
4235 			}
4236 		}
4237 		break;
4238 	}
4239 	case OCS_HW_IO_TARGET_READ: {
4240 		uint16_t flags = iparam->fcp_tgt.flags;
4241 
4242 		if (io->xbusy) {
4243 			flags |= SLI4_IO_CONTINUATION;
4244 		} else {
4245 			flags &= ~SLI4_IO_CONTINUATION;
4246 		}
4247 
4248 		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4249 		if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4250 					iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4251 					SLI4_CQ_DEFAULT,
4252 					iparam->fcp_tgt.ox_id, rpi, rnode,
4253 					flags,
4254 					iparam->fcp_tgt.dif_oper,
4255 					iparam->fcp_tgt.blk_size,
4256 					iparam->fcp_tgt.cs_ctl,
4257 					iparam->fcp_tgt.app_id)) {
4258 			ocs_log_err(hw->os, "TSEND WQE error\n");
4259 			rc = OCS_HW_RTN_ERROR;
4260 		} else if (hw->workaround.retain_tsend_io_length) {
4261 			io->length = len;
4262 		}
4263 		break;
4264 	}
4265 	case OCS_HW_IO_TARGET_RSP: {
4266 		uint16_t flags = iparam->fcp_tgt.flags;
4267 
4268 		if (io->xbusy) {
4269 			flags |= SLI4_IO_CONTINUATION;
4270 		} else {
4271 			flags &= ~SLI4_IO_CONTINUATION;
4272 		}
4273 
4274 		/* post a new auto xfer ready buffer */
4275 		if (hw->auto_xfer_rdy_enabled && io->is_port_owned) {
4276 			if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) {
4277 				flags |= SLI4_IO_DNRX;
4278 			}
4279 		}
4280 
4281 		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4282 		if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size,
4283 					&io->def_sgl,
4284 					len,
4285 					io->indicator, io->reqtag,
4286 					SLI4_CQ_DEFAULT,
4287 					iparam->fcp_tgt.ox_id,
4288 					rpi, rnode,
4289 					flags, iparam->fcp_tgt.cs_ctl,
4290 					io->is_port_owned,
4291 					iparam->fcp_tgt.app_id)) {
4292 			ocs_log_err(hw->os, "TRSP WQE error\n");
4293 			rc = OCS_HW_RTN_ERROR;
4294 		}
4295 
4296 		break;
4297 	}
4298 	default:
4299 		ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4300 		rc = OCS_HW_RTN_ERROR;
4301 	}
4302 
4303 	if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) {
4304 		if (io->wq == NULL) {
4305 			io->wq = ocs_hw_queue_next_wq(hw, io);
4306 			ocs_hw_assert(io->wq != NULL);
4307 		}
4308 
4309 		io->xbusy = TRUE;
4310 
4311 		/*
4312 		 * Add IO to active io wqe list before submitting, in case the
4313 		 * wcqe processing preempts this thread.
4314 		 */
4315 		OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
4316 		OCS_STAT(io->wq->use_count++);
4317 		ocs_hw_add_io_timed_wqe(hw, io);
4318 		rc = hw_wq_write(io->wq, &io->wqe);
4319 		if (rc >= 0) {
4320 			/* non-negative return is success */
4321 			rc = 0;
4322 		} else {
4323 			/* failed to write wqe, remove from active wqe list */
4324 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4325 			io->xbusy = FALSE;
4326 			ocs_hw_remove_io_timed_wqe(hw, io);
4327 		}
4328 	}
4329 
4330 	return rc;
4331 }
4332 
4333 /**
4334  * @brief Send a raw frame
4335  *
4336  * @par Description
4337  * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
4338  *
4339  * @param hw Pointer to HW object.
4340  * @param hdr Pointer to a little endian formatted FC header.
4341  * @param sof Value to use as the frame SOF.
4342  * @param eof Value to use as the frame EOF.
4343  * @param payload Pointer to payload DMA buffer.
4344  * @param ctx Pointer to caller provided send frame context.
4345  * @param callback Callback function.
4346  * @param arg Callback function argument.
4347  *
4348  * @return Returns 0 on success, or a negative error code value on failure.
4349  */
4350 ocs_hw_rtn_e
4351 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
4352 		   ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
4353 {
4354 	int32_t rc;
4355 	ocs_hw_wqe_t *wqe;
4356 	uint32_t xri;
4357 	hw_wq_t *wq;
4358 
4359 	wqe = &ctx->wqe;
4360 
4361 	/* populate the callback object */
4362 	ctx->hw = hw;
4363 
4364 	/* Fetch and populate request tag */
4365 	ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg);
4366 	if (ctx->wqcb == NULL) {
4367 		ocs_log_err(hw->os, "can't allocate request tag\n");
4368 		return OCS_HW_RTN_NO_RESOURCES;
4369 	}
4370 
4371 	/* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */
4372 	wq = ocs_varray_iter_next(hw->wq_class_array[1]);
4373 	if (wq == NULL) {
4374 		wq = hw->hw_wq[0];
4375 	}
4376 
4377 	/* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */
4378 	xri = wq->send_frame_io->indicator;
4379 
4380 	/* Build the send frame WQE */
4381 	rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload,
4382 				payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index);
4383 	if (rc) {
4384 		ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
4385 		return OCS_HW_RTN_ERROR;
4386 	}
4387 
4388 	/* Write to WQ */
4389 	rc = hw_wq_write(wq, wqe);
4390 	if (rc) {
4391 		ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc);
4392 		return OCS_HW_RTN_ERROR;
4393 	}
4394 
4395 	OCS_STAT(wq->use_count++);
4396 
4397 	return OCS_HW_RTN_SUCCESS;
4398 }
4399 
4400 ocs_hw_rtn_e
4401 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count)
4402 {
4403 	if (sli_get_sgl_preregister(&hw->sli)) {
4404 		ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n");
4405 		return OCS_HW_RTN_ERROR;
4406 	}
4407 	io->ovfl_sgl = sgl;
4408 	io->ovfl_sgl_count = sgl_count;
4409 	io->ovfl_io = NULL;
4410 
4411 	return OCS_HW_RTN_SUCCESS;
4412 }
4413 
4414 static void
4415 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4416 {
4417 	/* Restore the default */
4418 	io->sgl = &io->def_sgl;
4419 	io->sgl_count = io->def_sgl_count;
4420 
4421 	/*
4422 	 * For skyhawk, we need to free the IO allocated for the chained
4423 	 * SGL. For all devices, clear the overflow fields on the IO.
4424 	 *
4425 	 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and
4426 	 *       the chained SGLs. If so, then we clear the ovfl_io field
4427 	 *       when the sec_hio is freed.
4428 	 */
4429 	if (io->ovfl_io != NULL) {
4430 		ocs_hw_io_free(hw, io->ovfl_io);
4431 		io->ovfl_io = NULL;
4432 	}
4433 
4434 	/* Clear the overflow SGL */
4435 	io->ovfl_sgl = NULL;
4436 	io->ovfl_sgl_count = 0;
4437 	io->ovfl_lsp = NULL;
4438 }
4439 
4440 /**
4441  * @ingroup io
4442  * @brief Initialize the scatter gather list entries of an IO.
4443  *
4444  * @param hw Hardware context.
4445  * @param io Previously-allocated HW IO object.
4446  * @param type Type of IO (target read, target response, and so on).
4447  *
4448  * @return Returns 0 on success, or a non-zero value on failure.
4449  */
4450 ocs_hw_rtn_e
4451 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type)
4452 {
4453 	sli4_sge_t	*data = NULL;
4454 	uint32_t	i = 0;
4455 	uint32_t	skips = 0;
4456 
4457 	if (!hw || !io) {
4458 		ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n",
4459 			    hw, io);
4460 		return OCS_HW_RTN_ERROR;
4461 	}
4462 
4463 	/* Clear / reset the scatter-gather list */
4464 	io->sgl = &io->def_sgl;
4465 	io->sgl_count = io->def_sgl_count;
4466 	io->first_data_sge = 0;
4467 
4468 	ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
4469 	io->n_sge = 0;
4470 	io->sge_offset = 0;
4471 
4472 	io->type = type;
4473 
4474 	data = io->sgl->virt;
4475 
4476 	/*
4477 	 * Some IO types have underlying hardware requirements on the order
4478 	 * of SGEs. Process all special entries here.
4479 	 */
4480 	switch (type) {
4481 	case OCS_HW_IO_INITIATOR_READ:
4482 	case OCS_HW_IO_INITIATOR_WRITE:
4483 	case OCS_HW_IO_INITIATOR_NODATA:
4484 		/*
4485 		 * No skips, 2 special for initiator I/Os
4486 		 * The addresses and length are written later
4487 		 */
4488 		/* setup command pointer */
4489 		data->sge_type = SLI4_SGE_TYPE_DATA;
4490 		data++;
4491 
4492 		/* setup response pointer */
4493 		data->sge_type = SLI4_SGE_TYPE_DATA;
4494 
4495 		if (OCS_HW_IO_INITIATOR_NODATA == type) {
4496 			data->last = TRUE;
4497 		}
4498 		data++;
4499 
4500 		io->n_sge = 2;
4501 		break;
4502 	case OCS_HW_IO_TARGET_WRITE:
4503 #define OCS_TARGET_WRITE_SKIPS	2
4504 		skips = OCS_TARGET_WRITE_SKIPS;
4505 
4506 		/* populate host resident XFER_RDY buffer */
4507 		data->sge_type = SLI4_SGE_TYPE_DATA;
4508 		data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys);
4509 		data->buffer_address_low  = ocs_addr32_lo(io->xfer_rdy.phys);
4510 		data->buffer_length = io->xfer_rdy.size;
4511 		data++;
4512 
4513 		skips--;
4514 
4515 		io->n_sge = 1;
4516 		break;
4517 	case OCS_HW_IO_TARGET_READ:
4518 		/*
4519 		 * For FCP_TSEND64, the first 2 entries are SKIP SGE's
4520 		 */
4521 #define OCS_TARGET_READ_SKIPS	2
4522 		skips = OCS_TARGET_READ_SKIPS;
4523 		break;
4524 	case OCS_HW_IO_TARGET_RSP:
4525 		/*
4526 		 * No skips, etc. for FCP_TRSP64
4527 		 */
4528 		break;
4529 	default:
4530 		ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4531 		return OCS_HW_RTN_ERROR;
4532 	}
4533 
4534 	/*
4535 	 * Write skip entries
4536 	 */
4537 	for (i = 0; i < skips; i++) {
4538 		data->sge_type = SLI4_SGE_TYPE_SKIP;
4539 		data++;
4540 	}
4541 
4542 	io->n_sge += skips;
4543 
4544 	/*
4545 	 * Set last
4546 	 */
4547 	data->last = TRUE;
4548 
4549 	return OCS_HW_RTN_SUCCESS;
4550 }
4551 
4552 /**
4553  * @ingroup io
4554  * @brief Add a T10 PI seed scatter gather list entry.
4555  *
4556  * @param hw Hardware context.
4557  * @param io Previously-allocated HW IO object.
4558  * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
4559  *
4560  * @return Returns 0 on success, or a non-zero value on failure.
4561  */
4562 ocs_hw_rtn_e
4563 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info)
4564 {
4565 	sli4_sge_t	*data = NULL;
4566 	sli4_diseed_sge_t *dif_seed;
4567 
4568 	/* If no dif_info, or dif_oper is disabled, then just return success */
4569 	if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) {
4570 		return OCS_HW_RTN_SUCCESS;
4571 	}
4572 
4573 	if (!hw || !io) {
4574 		ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n",
4575 			    hw, io, dif_info);
4576 		return OCS_HW_RTN_ERROR;
4577 	}
4578 
4579 	data = io->sgl->virt;
4580 	data += io->n_sge;
4581 
4582 	/* If we are doing T10 DIF add the DIF Seed SGE */
4583 	ocs_memset(data, 0, sizeof(sli4_diseed_sge_t));
4584 	dif_seed = (sli4_diseed_sge_t *)data;
4585 	dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp;
4586 	dif_seed->ref_tag_repl = dif_info->ref_tag_repl;
4587 	dif_seed->app_tag_repl = dif_info->app_tag_repl;
4588 	dif_seed->repl_app_tag = dif_info->repl_app_tag;
4589 	if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) {
4590 		dif_seed->atrt = dif_info->disable_app_ref_ffff;
4591 		dif_seed->at = dif_info->disable_app_ffff;
4592 	}
4593 	dif_seed->sge_type = SLI4_SGE_TYPE_DISEED;
4594 	/* Workaround for SKH (BZ157233) */
4595 	if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4596 		(SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) {
4597 		dif_seed->sge_type = SLI4_SGE_TYPE_SKIP;
4598 	}
4599 
4600 	dif_seed->app_tag_cmp = dif_info->app_tag_cmp;
4601 	dif_seed->dif_blk_size = dif_info->blk_size;
4602 	dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag;
4603 	dif_seed->check_app_tag = dif_info->check_app_tag;
4604 	dif_seed->check_ref_tag = dif_info->check_ref_tag;
4605 	dif_seed->check_crc = dif_info->check_guard;
4606 	dif_seed->new_ref_tag = dif_info->repl_ref_tag;
4607 
4608 	switch(dif_info->dif_oper) {
4609 	case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
4610 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4611 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4612 		break;
4613 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
4614 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4615 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4616 		break;
4617 	case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM:
4618 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4619 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4620 		break;
4621 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF:
4622 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4623 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4624 		break;
4625 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
4626 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4627 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4628 		break;
4629 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM:
4630 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4631 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4632 		break;
4633 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
4634 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4635 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4636 		break;
4637 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
4638 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4639 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4640 		break;
4641 	case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW:
4642 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4643 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4644 		break;
4645 	default:
4646 		ocs_log_err(hw->os, "unsupported DIF operation %#x\n",
4647 			    dif_info->dif_oper);
4648 		return OCS_HW_RTN_ERROR;
4649 	}
4650 
4651 	/*
4652 	 * Set last, clear previous last
4653 	 */
4654 	data->last = TRUE;
4655 	if (io->n_sge) {
4656 		data[-1].last = FALSE;
4657 	}
4658 
4659 	io->n_sge++;
4660 
4661 	return OCS_HW_RTN_SUCCESS;
4662 }
4663 
4664 static ocs_hw_rtn_e
4665 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4666 {
4667 	sli4_lsp_sge_t *lsp;
4668 
4669 	/* fail if we're already pointing to the overflow SGL */
4670 	if (io->sgl == io->ovfl_sgl) {
4671 		return OCS_HW_RTN_ERROR;
4672 	}
4673 
4674 	/*
4675 	 * For skyhawk, we can use another SGL to extend the SGL list. The
4676 	 * Chained entry must not be in the first 4 entries.
4677 	 *
4678 	 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio.
4679 	 */
4680 	if (sli_get_sgl_preregister(&hw->sli) &&
4681 	    io->def_sgl_count > 4 &&
4682 	    io->ovfl_io == NULL &&
4683 	    ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4684 		(SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
4685 		io->ovfl_io = ocs_hw_io_alloc(hw);
4686 		if (io->ovfl_io != NULL) {
4687 			/*
4688 			 * Note: We can't call ocs_hw_io_register_sgl() here
4689 			 * because it checks that SGLs are not pre-registered
4690 			 * and for shyhawk, preregistered SGLs are required.
4691 			 */
4692 			io->ovfl_sgl = &io->ovfl_io->def_sgl;
4693 			io->ovfl_sgl_count = io->ovfl_io->def_sgl_count;
4694 		}
4695 	}
4696 
4697 	/* fail if we don't have an overflow SGL registered */
4698 	if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) {
4699 		return OCS_HW_RTN_ERROR;
4700 	}
4701 
4702 	/*
4703 	 * Overflow, we need to put a link SGE in the last location of the current SGL, after
4704 	 * copying the the last SGE to the overflow SGL
4705 	 */
4706 
4707 	((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1];
4708 
4709 	lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1];
4710 	ocs_memset(lsp, 0, sizeof(*lsp));
4711 
4712 	if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4713 	    (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
4714 		sli_skh_chain_sge_build(&hw->sli,
4715 					(sli4_sge_t*)lsp,
4716 					io->ovfl_io->indicator,
4717 					0, /* frag_num */
4718 					0); /* offset */
4719 	} else {
4720 		lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys);
4721 		lsp->buffer_address_low  = ocs_addr32_lo(io->ovfl_sgl->phys);
4722 		lsp->sge_type = SLI4_SGE_TYPE_LSP;
4723 		lsp->last = 0;
4724 		io->ovfl_lsp = lsp;
4725 		io->ovfl_lsp->segment_length = sizeof(sli4_sge_t);
4726 	}
4727 
4728 	/* Update the current SGL pointer, and n_sgl */
4729 	io->sgl = io->ovfl_sgl;
4730 	io->sgl_count = io->ovfl_sgl_count;
4731 	io->n_sge = 1;
4732 
4733 	return OCS_HW_RTN_SUCCESS;
4734 }
4735 
4736 /**
4737  * @ingroup io
4738  * @brief Add a scatter gather list entry to an IO.
4739  *
4740  * @param hw Hardware context.
4741  * @param io Previously-allocated HW IO object.
4742  * @param addr Physical address.
4743  * @param length Length of memory pointed to by @c addr.
4744  *
4745  * @return Returns 0 on success, or a non-zero value on failure.
4746  */
4747 ocs_hw_rtn_e
4748 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length)
4749 {
4750 	sli4_sge_t	*data = NULL;
4751 
4752 	if (!hw || !io || !addr || !length) {
4753 		ocs_log_err(hw ? hw->os : NULL,
4754 			    "bad parameter hw=%p io=%p addr=%lx length=%u\n",
4755 			    hw, io, addr, length);
4756 		return OCS_HW_RTN_ERROR;
4757 	}
4758 
4759 	if ((length != 0) && (io->n_sge + 1) > io->sgl_count) {
4760 		if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) {
4761 			ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4762 			return OCS_HW_RTN_ERROR;
4763 		}
4764 	}
4765 
4766 	if (length > sli_get_max_sge(&hw->sli)) {
4767 		ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n",
4768 			    length, sli_get_max_sge(&hw->sli));
4769 		return OCS_HW_RTN_ERROR;
4770 	}
4771 
4772 	data = io->sgl->virt;
4773 	data += io->n_sge;
4774 
4775 	data->sge_type = SLI4_SGE_TYPE_DATA;
4776 	data->buffer_address_high = ocs_addr32_hi(addr);
4777 	data->buffer_address_low  = ocs_addr32_lo(addr);
4778 	data->buffer_length = length;
4779 	data->data_offset = io->sge_offset;
4780 	/*
4781 	 * Always assume this is the last entry and mark as such.
4782 	 * If this is not the first entry unset the "last SGE"
4783 	 * indication for the previous entry
4784 	 */
4785 	data->last = TRUE;
4786 	if (io->n_sge) {
4787 		data[-1].last = FALSE;
4788 	}
4789 
4790 	/* Set first_data_bde if not previously set */
4791 	if (io->first_data_sge == 0) {
4792 		io->first_data_sge = io->n_sge;
4793 	}
4794 
4795 	io->sge_offset += length;
4796 	io->n_sge++;
4797 
4798 	/* Update the linked segment length (only executed after overflow has begun) */
4799 	if (io->ovfl_lsp != NULL) {
4800 		io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t);
4801 	}
4802 
4803 	return OCS_HW_RTN_SUCCESS;
4804 }
4805 
4806 /**
4807  * @ingroup io
4808  * @brief Add a T10 DIF scatter gather list entry to an IO.
4809  *
4810  * @param hw Hardware context.
4811  * @param io Previously-allocated HW IO object.
4812  * @param addr DIF physical address.
4813  *
4814  * @return Returns 0 on success, or a non-zero value on failure.
4815  */
4816 ocs_hw_rtn_e
4817 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr)
4818 {
4819 	sli4_dif_sge_t	*data = NULL;
4820 
4821 	if (!hw || !io || !addr) {
4822 		ocs_log_err(hw ? hw->os : NULL,
4823 			    "bad parameter hw=%p io=%p addr=%lx\n",
4824 			    hw, io, addr);
4825 		return OCS_HW_RTN_ERROR;
4826 	}
4827 
4828 	if ((io->n_sge + 1) > hw->config.n_sgl) {
4829 		if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) {
4830 			ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4831 			return OCS_HW_RTN_ERROR;
4832 		}
4833 	}
4834 
4835 	data = io->sgl->virt;
4836 	data += io->n_sge;
4837 
4838 	data->sge_type = SLI4_SGE_TYPE_DIF;
4839 	/* Workaround for SKH (BZ157233) */
4840 	if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4841 		(SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) {
4842 		data->sge_type = SLI4_SGE_TYPE_SKIP;
4843 	}
4844 
4845 	data->buffer_address_high = ocs_addr32_hi(addr);
4846 	data->buffer_address_low  = ocs_addr32_lo(addr);
4847 
4848 	/*
4849 	 * Always assume this is the last entry and mark as such.
4850 	 * If this is not the first entry unset the "last SGE"
4851 	 * indication for the previous entry
4852 	 */
4853 	data->last = TRUE;
4854 	if (io->n_sge) {
4855 		data[-1].last = FALSE;
4856 	}
4857 
4858 	io->n_sge++;
4859 
4860 	return OCS_HW_RTN_SUCCESS;
4861 }
4862 
4863 /**
4864  * @ingroup io
4865  * @brief Abort a previously-started IO.
4866  *
4867  * @param hw Hardware context.
4868  * @param io_to_abort The IO to abort.
4869  * @param send_abts Boolean to have the hardware automatically
4870  * generate an ABTS.
4871  * @param cb Function call upon completion of the abort (may be NULL).
4872  * @param arg Argument to pass to abort completion function.
4873  *
4874  * @return Returns 0 on success, or a non-zero value on failure.
4875  */
4876 ocs_hw_rtn_e
4877 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg)
4878 {
4879 	sli4_abort_type_e atype = SLI_ABORT_MAX;
4880 	uint32_t	id = 0, mask = 0;
4881 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
4882 	hw_wq_callback_t *wqcb;
4883 
4884 	if (!hw || !io_to_abort) {
4885 		ocs_log_err(hw ? hw->os : NULL,
4886 			    "bad parameter hw=%p io=%p\n",
4887 			    hw, io_to_abort);
4888 		return OCS_HW_RTN_ERROR;
4889 	}
4890 
4891 	if (hw->state != OCS_HW_STATE_ACTIVE) {
4892 		ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
4893 			    hw->state);
4894 		return OCS_HW_RTN_ERROR;
4895 	}
4896 
4897 	/* take a reference on IO being aborted */
4898 	if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) {
4899 		/* command no longer active */
4900 		ocs_log_test(hw ? hw->os : NULL,
4901 				"io not active xri=0x%x tag=0x%x\n",
4902 				io_to_abort->indicator, io_to_abort->reqtag);
4903 		return OCS_HW_RTN_IO_NOT_ACTIVE;
4904 	}
4905 
4906 	/* non-port owned XRI checks */
4907 	/* Must have a valid WQ reference */
4908 	if (io_to_abort->wq == NULL) {
4909 		ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
4910 				io_to_abort->indicator);
4911 		ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4912 		return OCS_HW_RTN_IO_NOT_ACTIVE;
4913 	}
4914 
4915 	/* Validation checks complete; now check to see if already being aborted */
4916 	ocs_lock(&hw->io_abort_lock);
4917 		if (io_to_abort->abort_in_progress) {
4918 			ocs_unlock(&hw->io_abort_lock);
4919 			ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4920 			ocs_log_debug(hw ? hw->os : NULL,
4921 				"io already being aborted xri=0x%x tag=0x%x\n",
4922 				io_to_abort->indicator, io_to_abort->reqtag);
4923 			return OCS_HW_RTN_IO_ABORT_IN_PROGRESS;
4924 		}
4925 
4926 		/*
4927 		 * This IO is not already being aborted. Set flag so we won't try to
4928 		 * abort it again. After all, we only have one abort_done callback.
4929 		 */
4930 		io_to_abort->abort_in_progress = 1;
4931 	ocs_unlock(&hw->io_abort_lock);
4932 
4933 	/*
4934 	 * If we got here, the possibilities are:
4935 	 * - host owned xri
4936 	 *	- io_to_abort->wq_index != UINT32_MAX
4937 	 *		- submit ABORT_WQE to same WQ
4938 	 * - port owned xri:
4939 	 *	- rxri: io_to_abort->wq_index == UINT32_MAX
4940 	 *		- submit ABORT_WQE to any WQ
4941 	 *	- non-rxri
4942 	 *		- io_to_abort->index != UINT32_MAX
4943 	 *			- submit ABORT_WQE to same WQ
4944 	 *		- io_to_abort->index == UINT32_MAX
4945 	 *			- submit ABORT_WQE to any WQ
4946 	 */
4947 	io_to_abort->abort_done = cb;
4948 	io_to_abort->abort_arg  = arg;
4949 
4950 	atype = SLI_ABORT_XRI;
4951 	id = io_to_abort->indicator;
4952 
4953 	/* Allocate a request tag for the abort portion of this IO */
4954 	wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort);
4955 	if (wqcb == NULL) {
4956 		ocs_log_err(hw->os, "can't allocate request tag\n");
4957 		return OCS_HW_RTN_NO_RESOURCES;
4958 	}
4959 	io_to_abort->abort_reqtag = wqcb->instance_index;
4960 
4961 	/*
4962 	 * If the wqe is on the pending list, then set this wqe to be
4963 	 * aborted when the IO's wqe is removed from the list.
4964 	 */
4965 	if (io_to_abort->wq != NULL) {
4966 		sli_queue_lock(io_to_abort->wq->queue);
4967 			if (ocs_list_on_list(&io_to_abort->wqe.link)) {
4968 				io_to_abort->wqe.abort_wqe_submit_needed = 1;
4969 				io_to_abort->wqe.send_abts = send_abts;
4970 				io_to_abort->wqe.id = id;
4971 				io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
4972 				sli_queue_unlock(io_to_abort->wq->queue);
4973 				return 0;
4974 		}
4975 		sli_queue_unlock(io_to_abort->wq->queue);
4976 	}
4977 
4978 	if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask,
4979 			  io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
4980 		ocs_log_err(hw->os, "ABORT WQE error\n");
4981 		io_to_abort->abort_reqtag = UINT32_MAX;
4982 		ocs_hw_reqtag_free(hw, wqcb);
4983 		rc = OCS_HW_RTN_ERROR;
4984 	}
4985 
4986 	if (OCS_HW_RTN_SUCCESS == rc) {
4987 		if (io_to_abort->wq == NULL) {
4988 			io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort);
4989 			ocs_hw_assert(io_to_abort->wq != NULL);
4990 		}
4991 		/* ABORT_WQE does not actually utilize an XRI on the Port,
4992 		 * therefore, keep xbusy as-is to track the exchange's state,
4993 		 * not the ABORT_WQE's state
4994 		 */
4995 		rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
4996 		if (rc > 0) {
4997 			/* non-negative return is success */
4998 			rc = 0;
4999 			/* can't abort an abort so skip adding to timed wqe list */
5000 		}
5001 	}
5002 
5003 	if (OCS_HW_RTN_SUCCESS != rc) {
5004 		ocs_lock(&hw->io_abort_lock);
5005 			io_to_abort->abort_in_progress = 0;
5006 		ocs_unlock(&hw->io_abort_lock);
5007 		ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
5008 	}
5009 	return rc;
5010 }
5011 
5012 /**
5013  * @ingroup io
5014  * @brief Return the OX_ID/RX_ID of the IO.
5015  *
5016  * @param hw Hardware context.
5017  * @param io HW IO object.
5018  *
5019  * @return Returns X_ID on success, or -1 on failure.
5020  */
5021 int32_t
5022 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io)
5023 {
5024 	if (!hw || !io) {
5025 		ocs_log_err(hw ? hw->os : NULL,
5026 			    "bad parameter hw=%p io=%p\n", hw, io);
5027 		return -1;
5028 	}
5029 
5030 	return io->indicator;
5031 }
5032 
5033 
5034 typedef struct ocs_hw_fw_write_cb_arg {
5035 	ocs_hw_fw_cb_t cb;
5036 	void *arg;
5037 } ocs_hw_fw_write_cb_arg_t;
5038 
5039 typedef struct ocs_hw_sfp_cb_arg {
5040 	ocs_hw_sfp_cb_t cb;
5041 	void *arg;
5042 	ocs_dma_t payload;
5043 } ocs_hw_sfp_cb_arg_t;
5044 
5045 typedef struct ocs_hw_temp_cb_arg {
5046 	ocs_hw_temp_cb_t cb;
5047 	void *arg;
5048 } ocs_hw_temp_cb_arg_t;
5049 
5050 typedef struct ocs_hw_link_stat_cb_arg {
5051 	ocs_hw_link_stat_cb_t cb;
5052 	void *arg;
5053 } ocs_hw_link_stat_cb_arg_t;
5054 
5055 typedef struct ocs_hw_host_stat_cb_arg {
5056 	ocs_hw_host_stat_cb_t cb;
5057 	void *arg;
5058 } ocs_hw_host_stat_cb_arg_t;
5059 
5060 typedef struct ocs_hw_dump_get_cb_arg {
5061 	ocs_hw_dump_get_cb_t cb;
5062 	void *arg;
5063 	void *mbox_cmd;
5064 } ocs_hw_dump_get_cb_arg_t;
5065 
5066 typedef struct ocs_hw_dump_clear_cb_arg {
5067 	ocs_hw_dump_clear_cb_t cb;
5068 	void *arg;
5069 	void *mbox_cmd;
5070 } ocs_hw_dump_clear_cb_arg_t;
5071 
5072 /**
5073  * @brief Write a portion of a firmware image to the device.
5074  *
5075  * @par Description
5076  * Calls the correct firmware write function based on the device type.
5077  *
5078  * @param hw Hardware context.
5079  * @param dma DMA structure containing the firmware image chunk.
5080  * @param size Size of the firmware image chunk.
5081  * @param offset Offset, in bytes, from the beginning of the firmware image.
5082  * @param last True if this is the last chunk of the image.
5083  * Causes the image to be committed to flash.
5084  * @param cb Pointer to a callback function that is called when the command completes.
5085  * The callback function prototype is
5086  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5087  * @param arg Pointer to be passed to the callback function.
5088  *
5089  * @return Returns 0 on success, or a non-zero value on failure.
5090  */
5091 ocs_hw_rtn_e
5092 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5093 {
5094 	if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
5095 		return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg);
5096 	} else {
5097 		/* Write firmware_write for BE3/Skyhawk not supported */
5098 		return -1;
5099 	}
5100 }
5101 
5102 /**
5103  * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer).
5104  *
5105  * @par Description
5106  * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a
5107  * firmware image chunk, and then sends the command with ocs_hw_command(). On completion,
5108  * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox
5109  * and to signal the caller that the write has completed.
5110  *
5111  * @param hw Hardware context.
5112  * @param dma DMA structure containing the firmware image chunk.
5113  * @param size Size of the firmware image chunk.
5114  * @param offset Offset, in bytes, from the beginning of the firmware image.
5115  * @param last True if this is the last chunk of the image. Causes the image to be committed to flash.
5116  * @param cb Pointer to a callback function that is called when the command completes.
5117  * The callback function prototype is
5118  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5119  * @param arg Pointer to be passed to the callback function.
5120  *
5121  * @return Returns 0 on success, or a non-zero value on failure.
5122  */
5123 ocs_hw_rtn_e
5124 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5125 {
5126 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5127 	uint8_t *mbxdata;
5128 	ocs_hw_fw_write_cb_arg_t *cb_arg;
5129 	int noc=0;	/* No Commit bit - set to 1 for testing */
5130 
5131 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
5132 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
5133 		return OCS_HW_RTN_ERROR;
5134 	}
5135 
5136 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5137 	if (mbxdata == NULL) {
5138 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5139 		return OCS_HW_RTN_NO_MEMORY;
5140 	}
5141 
5142 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT);
5143 	if (cb_arg == NULL) {
5144 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5145 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5146 		return OCS_HW_RTN_NO_MEMORY;
5147 	}
5148 
5149 	cb_arg->cb = cb;
5150 	cb_arg->arg = arg;
5151 
5152 	if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last,
5153 			size, offset, "/prg/", dma)) {
5154 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg);
5155 	}
5156 
5157 	if (rc != OCS_HW_RTN_SUCCESS) {
5158 		ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n");
5159 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5160 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5161 	}
5162 
5163 	return rc;
5164 
5165 }
5166 
5167 /**
5168  * @brief Called when the WRITE OBJECT command completes.
5169  *
5170  * @par Description
5171  * Get the number of bytes actually written out of the response, free the mailbox
5172  * that was malloc'd by ocs_hw_firmware_write(),
5173  * then call the callback and pass the status and bytes written.
5174  *
5175  * @param hw Hardware context.
5176  * @param status Status field from the mbox completion.
5177  * @param mqe Mailbox response structure.
5178  * @param arg Pointer to a callback function that signals the caller that the command is done.
5179  * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>.
5180  *
5181  * @return Returns 0.
5182  */
5183 static int32_t
5184 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5185 {
5186 
5187 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
5188 	sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed);
5189 	ocs_hw_fw_write_cb_arg_t *cb_arg = arg;
5190 	uint32_t bytes_written;
5191 	uint16_t mbox_status;
5192 	uint32_t change_status;
5193 
5194 	bytes_written = wr_obj_rsp->actual_write_length;
5195 	mbox_status = mbox_rsp->hdr.status;
5196 	change_status = wr_obj_rsp->change_status;
5197 
5198 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5199 
5200 	if (cb_arg) {
5201 		if (cb_arg->cb) {
5202 			if ((status == 0) && mbox_status) {
5203 				status = mbox_status;
5204 			}
5205 			cb_arg->cb(status, bytes_written, change_status, cb_arg->arg);
5206 		}
5207 
5208 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5209 	}
5210 
5211 	return 0;
5212 
5213 }
5214 
5215 /**
5216  * @brief Called when the READ_TRANSCEIVER_DATA command completes.
5217  *
5218  * @par Description
5219  * Get the number of bytes read out of the response, free the mailbox that was malloc'd
5220  * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written.
5221  *
5222  * @param hw Hardware context.
5223  * @param status Status field from the mbox completion.
5224  * @param mqe Mailbox response structure.
5225  * @param arg Pointer to a callback function that signals the caller that the command is done.
5226  * The callback function prototype is
5227  * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>.
5228  *
5229  * @return Returns 0.
5230  */
5231 static int32_t
5232 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5233 {
5234 
5235 	ocs_hw_sfp_cb_arg_t *cb_arg = arg;
5236 	ocs_dma_t *payload = NULL;
5237 	sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL;
5238 	uint32_t bytes_written;
5239 
5240 	if (cb_arg) {
5241 		payload = &(cb_arg->payload);
5242 		if (cb_arg->cb) {
5243 			mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt;
5244 			bytes_written = mbox_rsp->hdr.response_length;
5245 			if ((status == 0) && mbox_rsp->hdr.status) {
5246 				status = mbox_rsp->hdr.status;
5247 			}
5248 			cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg);
5249 		}
5250 
5251 		ocs_dma_free(hw->os, &cb_arg->payload);
5252 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5253 	}
5254 
5255 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5256 	return 0;
5257 }
5258 
5259 /**
5260  * @ingroup io
5261  * @brief Function to retrieve the SFP information.
5262  *
5263  * @param hw Hardware context.
5264  * @param page The page of SFP data to retrieve (0xa0 or 0xa2).
5265  * @param cb Function call upon completion of sending the data (may be NULL).
5266  * @param arg Argument to pass to IO completion function.
5267  *
5268  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5269  */
5270 ocs_hw_rtn_e
5271 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg)
5272 {
5273 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5274 	ocs_hw_sfp_cb_arg_t *cb_arg;
5275 	uint8_t *mbxdata;
5276 
5277 	/* mbxdata holds the header of the command */
5278 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5279 	if (mbxdata == NULL) {
5280 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5281 		return OCS_HW_RTN_NO_MEMORY;
5282 	}
5283 
5284 	/* cb_arg holds the data that will be passed to the callback on completion */
5285 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT);
5286 	if (cb_arg == NULL) {
5287 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5288 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5289 		return OCS_HW_RTN_NO_MEMORY;
5290 	}
5291 
5292 	cb_arg->cb = cb;
5293 	cb_arg->arg = arg;
5294 
5295 	/* payload holds the non-embedded portion */
5296 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t),
5297 			  OCS_MIN_DMA_ALIGNMENT)) {
5298 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
5299 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5300 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5301 		return OCS_HW_RTN_NO_MEMORY;
5302 	}
5303 
5304 	/* Send the HW command */
5305 	if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page,
5306 	    &cb_arg->payload)) {
5307 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg);
5308 	}
5309 
5310 	if (rc != OCS_HW_RTN_SUCCESS) {
5311 		ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n",
5312 				rc);
5313 		ocs_dma_free(hw->os, &cb_arg->payload);
5314 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5315 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5316 	}
5317 
5318 	return rc;
5319 }
5320 
5321 /**
5322  * @brief Function to retrieve the temperature information.
5323  *
5324  * @param hw Hardware context.
5325  * @param cb Function call upon completion of sending the data (may be NULL).
5326  * @param arg Argument to pass to IO completion function.
5327  *
5328  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5329  */
5330 ocs_hw_rtn_e
5331 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg)
5332 {
5333 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5334 	ocs_hw_temp_cb_arg_t *cb_arg;
5335 	uint8_t *mbxdata;
5336 
5337 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5338 	if (mbxdata == NULL) {
5339 		ocs_log_err(hw->os, "failed to malloc mbox");
5340 		return OCS_HW_RTN_NO_MEMORY;
5341 	}
5342 
5343 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT);
5344 	if (cb_arg == NULL) {
5345 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5346 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5347 		return OCS_HW_RTN_NO_MEMORY;
5348 	}
5349 
5350 	cb_arg->cb = cb;
5351 	cb_arg->arg = arg;
5352 
5353 	if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5354 				SLI4_WKI_TAG_SAT_TEM)) {
5355 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg);
5356 	}
5357 
5358 	if (rc != OCS_HW_RTN_SUCCESS) {
5359 		ocs_log_test(hw->os, "DUMP_TYPE4 failed\n");
5360 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5361 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5362 	}
5363 
5364 	return rc;
5365 }
5366 
5367 /**
5368  * @brief Called when the DUMP command completes.
5369  *
5370  * @par Description
5371  * Get the temperature data out of the response, free the mailbox that was malloc'd
5372  * by ocs_hw_get_temperature(), then call the callback and pass the status and data.
5373  *
5374  * @param hw Hardware context.
5375  * @param status Status field from the mbox completion.
5376  * @param mqe Mailbox response structure.
5377  * @param arg Pointer to a callback function that signals the caller that the command is done.
5378  * The callback function prototype is defined by ocs_hw_temp_cb_t.
5379  *
5380  * @return Returns 0.
5381  */
5382 static int32_t
5383 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5384 {
5385 
5386 	sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe;
5387 	ocs_hw_temp_cb_arg_t *cb_arg = arg;
5388 	uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */
5389 	uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/
5390 	uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */
5391 	uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */
5392 	uint32_t fan_off_thrshld = mbox_rsp->resp_data[4];   /* word 9 */
5393 	uint32_t fan_on_thrshld = mbox_rsp->resp_data[5];    /* word 10 */
5394 
5395 	if (cb_arg) {
5396 		if (cb_arg->cb) {
5397 			if ((status == 0) && mbox_rsp->hdr.status) {
5398 				status = mbox_rsp->hdr.status;
5399 			}
5400 			cb_arg->cb(status,
5401 				   curr_temp,
5402 				   crit_temp_thrshld,
5403 				   warn_temp_thrshld,
5404 				   norm_temp_thrshld,
5405 				   fan_off_thrshld,
5406 				   fan_on_thrshld,
5407 				   cb_arg->arg);
5408 		}
5409 
5410 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5411 	}
5412 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5413 
5414 	return 0;
5415 }
5416 
5417 /**
5418  * @brief Function to retrieve the link statistics.
5419  *
5420  * @param hw Hardware context.
5421  * @param req_ext_counters If TRUE, then the extended counters will be requested.
5422  * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
5423  * @param clear_all_counters If TRUE, the counters will be cleared.
5424  * @param cb Function call upon completion of sending the data (may be NULL).
5425  * @param arg Argument to pass to IO completion function.
5426  *
5427  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5428  */
5429 ocs_hw_rtn_e
5430 ocs_hw_get_link_stats(ocs_hw_t *hw,
5431 			uint8_t req_ext_counters,
5432 			uint8_t clear_overflow_flags,
5433 			uint8_t clear_all_counters,
5434 			ocs_hw_link_stat_cb_t cb,
5435 			void *arg)
5436 {
5437 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5438 	ocs_hw_link_stat_cb_arg_t *cb_arg;
5439 	uint8_t *mbxdata;
5440 
5441 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5442 	if (mbxdata == NULL) {
5443 		ocs_log_err(hw->os, "failed to malloc mbox");
5444 		return OCS_HW_RTN_NO_MEMORY;
5445 	}
5446 
5447 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT);
5448 	if (cb_arg == NULL) {
5449 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5450 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5451 		return OCS_HW_RTN_NO_MEMORY;
5452 	}
5453 
5454 	cb_arg->cb = cb;
5455 	cb_arg->arg = arg;
5456 
5457 	if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5458 				    req_ext_counters,
5459 				    clear_overflow_flags,
5460 				    clear_all_counters)) {
5461 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg);
5462 	}
5463 
5464 	if (rc != OCS_HW_RTN_SUCCESS) {
5465 		ocs_log_test(hw->os, "READ_LINK_STATS failed\n");
5466 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5467 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5468 	}
5469 
5470 	return rc;
5471 }
5472 
5473 /**
5474  * @brief Called when the READ_LINK_STAT command completes.
5475  *
5476  * @par Description
5477  * Get the counters out of the response, free the mailbox that was malloc'd
5478  * by ocs_hw_get_link_stats(), then call the callback and pass the status and data.
5479  *
5480  * @param hw Hardware context.
5481  * @param status Status field from the mbox completion.
5482  * @param mqe Mailbox response structure.
5483  * @param arg Pointer to a callback function that signals the caller that the command is done.
5484  * The callback function prototype is defined by ocs_hw_link_stat_cb_t.
5485  *
5486  * @return Returns 0.
5487  */
5488 static int32_t
5489 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5490 {
5491 
5492 	sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe;
5493 	ocs_hw_link_stat_cb_arg_t *cb_arg = arg;
5494 	ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX];
5495 	uint32_t num_counters = (mbox_rsp->gec ? 20 : 13);
5496 
5497 	ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) *
5498 		   OCS_HW_LINK_STAT_MAX);
5499 
5500 	counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of;
5501 	counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of;
5502 	counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of;
5503 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of;
5504 	counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of;
5505 	counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of;
5506 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of;
5507 	counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of;
5508 	counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of;
5509 	counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of;
5510 	counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of;
5511 	counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of;
5512 	counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of;
5513 	counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of;
5514 	counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of;
5515 	counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of;
5516 	counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of;
5517 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of;
5518 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of;
5519 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of;
5520 
5521 	counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count;
5522 	counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count;
5523 	counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count;
5524 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count;
5525 	counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count;
5526 	counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count;
5527 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count;
5528 	counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count;
5529 	counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count;
5530 	counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit;
5531 	counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit;
5532 	counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit;
5533 	counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit;
5534 	counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count;
5535 	counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count;
5536 	counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count;
5537 	counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count;
5538 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count;
5539 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count;
5540 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count;
5541 
5542 	if (cb_arg) {
5543 		if (cb_arg->cb) {
5544 			if ((status == 0) && mbox_rsp->hdr.status) {
5545 				status = mbox_rsp->hdr.status;
5546 			}
5547 			cb_arg->cb(status,
5548 				   num_counters,
5549 				   counts,
5550 				   cb_arg->arg);
5551 		}
5552 
5553 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5554 	}
5555 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5556 
5557 	return 0;
5558 }
5559 
5560 /**
5561  * @brief Function to retrieve the link and host statistics.
5562  *
5563  * @param hw Hardware context.
5564  * @param cc clear counters, if TRUE all counters will be cleared.
5565  * @param cb Function call upon completion of receiving the data.
5566  * @param arg Argument to pass to pointer fc hosts statistics structure.
5567  *
5568  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5569  */
5570 ocs_hw_rtn_e
5571 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg)
5572 {
5573 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5574 	ocs_hw_host_stat_cb_arg_t *cb_arg;
5575 	uint8_t *mbxdata;
5576 
5577 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO);
5578 	if (mbxdata == NULL) {
5579 		ocs_log_err(hw->os, "failed to malloc mbox");
5580 		return OCS_HW_RTN_NO_MEMORY;
5581 	}
5582 
5583 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0);
5584 	if (cb_arg == NULL) {
5585 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5586 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5587 		return OCS_HW_RTN_NO_MEMORY;
5588 	 }
5589 
5590 	 cb_arg->cb = cb;
5591 	 cb_arg->arg = arg;
5592 
5593 	 /* Send the HW command to get the host stats */
5594 	if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) {
5595 		 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg);
5596 	}
5597 
5598 	if (rc != OCS_HW_RTN_SUCCESS) {
5599 		ocs_log_test(hw->os, "READ_HOST_STATS failed\n");
5600 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5601 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5602 	}
5603 
5604 	return rc;
5605 }
5606 
5607 
5608 /**
5609  * @brief Called when the READ_STATUS command completes.
5610  *
5611  * @par Description
5612  * Get the counters out of the response, free the mailbox that was malloc'd
5613  * by ocs_hw_get_host_stats(), then call the callback and pass
5614  * the status and data.
5615  *
5616  * @param hw Hardware context.
5617  * @param status Status field from the mbox completion.
5618  * @param mqe Mailbox response structure.
5619  * @param arg Pointer to a callback function that signals the caller that the command is done.
5620  * The callback function prototype is defined by
5621  * ocs_hw_host_stat_cb_t.
5622  *
5623  * @return Returns 0.
5624  */
5625 static int32_t
5626 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5627 {
5628 
5629 	sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe;
5630 	ocs_hw_host_stat_cb_arg_t *cb_arg = arg;
5631 	ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX];
5632 	uint32_t num_counters = OCS_HW_HOST_STAT_MAX;
5633 
5634 	ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) *
5635 		   OCS_HW_HOST_STAT_MAX);
5636 
5637 	counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count;
5638 	counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count;
5639 	counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count;
5640 	counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count;
5641 	counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count;
5642 	counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count;
5643 	counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator;
5644 	counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder;
5645 	counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count;
5646 	counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count;
5647 	counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count;
5648 	counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count;
5649 	counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count;
5650 	counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count;
5651 
5652 
5653 	if (cb_arg) {
5654 		if (cb_arg->cb) {
5655 			if ((status == 0) && mbox_rsp->hdr.status) {
5656 				status = mbox_rsp->hdr.status;
5657 			}
5658 			cb_arg->cb(status,
5659 				   num_counters,
5660 				   counts,
5661 				   cb_arg->arg);
5662 		}
5663 
5664 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5665 	}
5666 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5667 
5668 	return 0;
5669 }
5670 
5671 /**
5672  * @brief HW link configuration enum to the CLP string value mapping.
5673  *
5674  * This structure provides a mapping from the ocs_hw_linkcfg_e
5675  * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5676  * control) to the CLP string that is used
5677  * in the DMTF_CLP_CMD mailbox command.
5678  */
5679 typedef struct ocs_hw_linkcfg_map_s {
5680 	ocs_hw_linkcfg_e linkcfg;
5681 	const char *clp_str;
5682 } ocs_hw_linkcfg_map_t;
5683 
5684 /**
5685  * @brief Mapping from the HW linkcfg enum to the CLP command value
5686  * string.
5687  */
5688 static ocs_hw_linkcfg_map_t linkcfg_map[] = {
5689 	{OCS_HW_LINKCFG_4X10G, "ELX_4x10G"},
5690 	{OCS_HW_LINKCFG_1X40G, "ELX_1x40G"},
5691 	{OCS_HW_LINKCFG_2X16G, "ELX_2x16G"},
5692 	{OCS_HW_LINKCFG_4X8G, "ELX_4x8G"},
5693 	{OCS_HW_LINKCFG_4X1G, "ELX_4x1G"},
5694 	{OCS_HW_LINKCFG_2X10G, "ELX_2x10G"},
5695 	{OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}};
5696 
5697 /**
5698  * @brief HW link configuration enum to Skyhawk link config ID mapping.
5699  *
5700  * This structure provides a mapping from the ocs_hw_linkcfg_e
5701  * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5702  * control) to the link config ID numbers used by Skyhawk
5703  */
5704 typedef struct ocs_hw_skyhawk_linkcfg_map_s {
5705 	ocs_hw_linkcfg_e linkcfg;
5706 	uint32_t	config_id;
5707 } ocs_hw_skyhawk_linkcfg_map_t;
5708 
5709 /**
5710  * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs
5711  */
5712 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = {
5713 	{OCS_HW_LINKCFG_4X10G, 0x0a},
5714 	{OCS_HW_LINKCFG_1X40G, 0x09},
5715 };
5716 
5717 /**
5718  * @brief Helper function for getting the HW linkcfg enum from the CLP
5719  * string value
5720  *
5721  * @param clp_str CLP string value from OEMELX_LinkConfig.
5722  *
5723  * @return Returns the HW linkcfg enum corresponding to clp_str.
5724  */
5725 static ocs_hw_linkcfg_e
5726 ocs_hw_linkcfg_from_clp(const char *clp_str)
5727 {
5728 	uint32_t i;
5729 	for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5730 		if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) {
5731 			return linkcfg_map[i].linkcfg;
5732 		}
5733 	}
5734 	return OCS_HW_LINKCFG_NA;
5735 }
5736 
5737 /**
5738  * @brief Helper function for getting the CLP string value from the HW
5739  * linkcfg enum.
5740  *
5741  * @param linkcfg HW linkcfg enum.
5742  *
5743  * @return Returns the OEMELX_LinkConfig CLP string value corresponding to
5744  * given linkcfg.
5745  */
5746 static const char *
5747 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5748 {
5749 	uint32_t i;
5750 	for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5751 		if (linkcfg_map[i].linkcfg == linkcfg) {
5752 			return linkcfg_map[i].clp_str;
5753 		}
5754 	}
5755 	return NULL;
5756 }
5757 
5758 /**
5759  * @brief Helper function for getting a Skyhawk link config ID from the HW
5760  * linkcfg enum.
5761  *
5762  * @param linkcfg HW linkcfg enum.
5763  *
5764  * @return Returns the Skyhawk link config ID corresponding to
5765  * given linkcfg.
5766  */
5767 static uint32_t
5768 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5769 {
5770 	uint32_t i;
5771 	for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5772 		if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) {
5773 			return skyhawk_linkcfg_map[i].config_id;
5774 		}
5775 	}
5776 	return 0;
5777 }
5778 
5779 /**
5780  * @brief Helper function for getting the HW linkcfg enum from a
5781  * Skyhawk config ID.
5782  *
5783  * @param config_id Skyhawk link config ID.
5784  *
5785  * @return Returns the HW linkcfg enum corresponding to config_id.
5786  */
5787 static ocs_hw_linkcfg_e
5788 ocs_hw_linkcfg_from_config_id(const uint32_t config_id)
5789 {
5790 	uint32_t i;
5791 	for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5792 		if (skyhawk_linkcfg_map[i].config_id == config_id) {
5793 			return skyhawk_linkcfg_map[i].linkcfg;
5794 		}
5795 	}
5796 	return OCS_HW_LINKCFG_NA;
5797 }
5798 
5799 /**
5800  * @brief Link configuration callback argument.
5801  */
5802 typedef struct ocs_hw_linkcfg_cb_arg_s {
5803 	ocs_hw_port_control_cb_t cb;
5804 	void *arg;
5805 	uint32_t opts;
5806 	int32_t status;
5807 	ocs_dma_t dma_cmd;
5808 	ocs_dma_t dma_resp;
5809 	uint32_t result_len;
5810 } ocs_hw_linkcfg_cb_arg_t;
5811 
5812 /**
5813  * @brief Set link configuration.
5814  *
5815  * @param hw Hardware context.
5816  * @param value Link configuration enum to which the link configuration is
5817  * set.
5818  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5819  * @param cb Callback function to invoke following mbx command.
5820  * @param arg Callback argument.
5821  *
5822  * @return Returns OCS_HW_RTN_SUCCESS on success.
5823  */
5824 static ocs_hw_rtn_e
5825 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5826 {
5827 	if (!sli_link_is_configurable(&hw->sli)) {
5828 		ocs_log_debug(hw->os, "Function not supported\n");
5829 		return OCS_HW_RTN_ERROR;
5830 	}
5831 
5832 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
5833 		return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg);
5834 	} else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
5835 		   (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
5836 		return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg);
5837 	} else {
5838 		ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
5839 		return OCS_HW_RTN_ERROR;
5840 	}
5841 }
5842 
5843 /**
5844  * @brief Set link configuration for Lancer
5845  *
5846  * @param hw Hardware context.
5847  * @param value Link configuration enum to which the link configuration is
5848  * set.
5849  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5850  * @param cb Callback function to invoke following mbx command.
5851  * @param arg Callback argument.
5852  *
5853  * @return Returns OCS_HW_RTN_SUCCESS on success.
5854  */
5855 static ocs_hw_rtn_e
5856 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5857 {
5858 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
5859 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
5860 	const char *value_str = NULL;
5861 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5862 
5863 	/* translate ocs_hw_linkcfg_e to CLP string */
5864 	value_str = ocs_hw_clp_from_linkcfg(value);
5865 
5866 	/* allocate memory for callback argument */
5867 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
5868 	if (cb_arg == NULL) {
5869 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5870 		return OCS_HW_RTN_NO_MEMORY;
5871 	}
5872 
5873 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str);
5874 	/* allocate DMA for command  */
5875 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
5876 		ocs_log_err(hw->os, "malloc failed\n");
5877 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5878 		return OCS_HW_RTN_NO_MEMORY;
5879 	}
5880 	ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
5881 	ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
5882 
5883 	/* allocate DMA for response */
5884 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
5885 		ocs_log_err(hw->os, "malloc failed\n");
5886 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5887 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5888 		return OCS_HW_RTN_NO_MEMORY;
5889 	}
5890 	cb_arg->cb = cb;
5891 	cb_arg->arg = arg;
5892 	cb_arg->opts = opts;
5893 
5894 	rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
5895 					opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
5896 
5897 	if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
5898 		/* if failed, or polling, free memory here; if success and not
5899 		 * polling, will free in callback function
5900 		 */
5901 		if (rc) {
5902 			ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
5903 					(char *)cb_arg->dma_cmd.virt);
5904 		}
5905 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5906 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
5907 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5908 	}
5909 	return rc;
5910 }
5911 
5912 /**
5913  * @brief Callback for ocs_hw_set_linkcfg_skyhawk
5914  *
5915  * @param hw Hardware context.
5916  * @param status Status from the RECONFIG_GET_LINK_INFO command.
5917  * @param mqe Mailbox response structure.
5918  * @param arg Pointer to a callback argument.
5919  *
5920  * @return none
5921  */
5922 static void
5923 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5924 {
5925 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
5926 
5927 	if (status) {
5928 		ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status);
5929 	}
5930 
5931 	/* invoke callback */
5932 	if (cb_arg->cb) {
5933 		cb_arg->cb(status, 0, cb_arg->arg);
5934 	}
5935 
5936 	/* if polling, will free memory in calling function */
5937 	if (cb_arg->opts != OCS_CMD_POLL) {
5938 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5939 	}
5940 }
5941 
5942 /**
5943  * @brief Set link configuration for a Skyhawk
5944  *
5945  * @param hw Hardware context.
5946  * @param value Link configuration enum to which the link configuration is
5947  * set.
5948  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5949  * @param cb Callback function to invoke following mbx command.
5950  * @param arg Callback argument.
5951  *
5952  * @return Returns OCS_HW_RTN_SUCCESS on success.
5953  */
5954 static ocs_hw_rtn_e
5955 ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5956 {
5957 	uint8_t *mbxdata;
5958 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
5959 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5960 	uint32_t config_id;
5961 
5962 	config_id = ocs_hw_config_id_from_linkcfg(value);
5963 
5964 	if (config_id == 0) {
5965 		ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value);
5966 		return OCS_HW_RTN_ERROR;
5967 	}
5968 
5969 	/* mbxdata holds the header of the command */
5970 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5971 	if (mbxdata == NULL) {
5972 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5973 		return OCS_HW_RTN_NO_MEMORY;
5974 	}
5975 
5976 	/* cb_arg holds the data that will be passed to the callback on completion */
5977 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
5978 	if (cb_arg == NULL) {
5979 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5980 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5981 		return OCS_HW_RTN_NO_MEMORY;
5982 	}
5983 
5984 	cb_arg->cb = cb;
5985 	cb_arg->arg = arg;
5986 
5987 	if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) {
5988 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg);
5989 	}
5990 
5991 	if (rc != OCS_HW_RTN_SUCCESS) {
5992 		ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n");
5993 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5994 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5995 	} else if (opts == OCS_CMD_POLL) {
5996 		/* if we're polling we have to call the callback here. */
5997 		ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg);
5998 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5999 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6000 	} else {
6001 		/* We weren't poling, so the callback got called */
6002 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6003 	}
6004 
6005 	return rc;
6006 }
6007 
6008 /**
6009  * @brief Get link configuration.
6010  *
6011  * @param hw Hardware context.
6012  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6013  * @param cb Callback function to invoke following mbx command.
6014  * @param arg Callback argument.
6015  *
6016  * @return Returns OCS_HW_RTN_SUCCESS on success.
6017  */
6018 static ocs_hw_rtn_e
6019 ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6020 {
6021 	if (!sli_link_is_configurable(&hw->sli)) {
6022 		ocs_log_debug(hw->os, "Function not supported\n");
6023 		return OCS_HW_RTN_ERROR;
6024 	}
6025 
6026 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
6027 		return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg);
6028 	} else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
6029 		   (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
6030 		return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg);
6031 	} else {
6032 		ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
6033 		return OCS_HW_RTN_ERROR;
6034 	}
6035 }
6036 
6037 /**
6038  * @brief Get link configuration for a Lancer
6039  *
6040  * @param hw Hardware context.
6041  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6042  * @param cb Callback function to invoke following mbx command.
6043  * @param arg Callback argument.
6044  *
6045  * @return Returns OCS_HW_RTN_SUCCESS on success.
6046  */
6047 static ocs_hw_rtn_e
6048 ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6049 {
6050 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6051 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
6052 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6053 
6054 	/* allocate memory for callback argument */
6055 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6056 	if (cb_arg == NULL) {
6057 		ocs_log_err(hw->os, "failed to malloc cb_arg");
6058 		return OCS_HW_RTN_NO_MEMORY;
6059 	}
6060 
6061 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig");
6062 
6063 	/* allocate DMA for command  */
6064 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6065 		ocs_log_err(hw->os, "malloc failed\n");
6066 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6067 		return OCS_HW_RTN_NO_MEMORY;
6068 	}
6069 
6070 	/* copy CLP command to DMA command */
6071 	ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6072 	ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
6073 
6074 	/* allocate DMA for response */
6075 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6076 		ocs_log_err(hw->os, "malloc failed\n");
6077 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6078 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6079 		return OCS_HW_RTN_NO_MEMORY;
6080 	}
6081 	cb_arg->cb = cb;
6082 	cb_arg->arg = arg;
6083 	cb_arg->opts = opts;
6084 
6085 	rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
6086 					opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
6087 
6088 	if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6089 		/* if failed or polling, free memory here; if not polling and success,
6090 		 * will free in callback function
6091 		 */
6092 		if (rc) {
6093 			ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
6094 					(char *)cb_arg->dma_cmd.virt);
6095 		}
6096 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6097 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
6098 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6099 	}
6100 	return rc;
6101 }
6102 
6103 
6104 /**
6105  * @brief Get the link configuration callback.
6106  *
6107  * @param hw Hardware context.
6108  * @param status Status from the RECONFIG_GET_LINK_INFO command.
6109  * @param mqe Mailbox response structure.
6110  * @param arg Pointer to a callback argument.
6111  *
6112  * @return none
6113  */
6114 static void
6115 ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6116 {
6117 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6118 	sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt;
6119 	ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA;
6120 
6121 	if (status) {
6122 		ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status);
6123 	} else {
6124 		/* Call was successful */
6125 		value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id);
6126 	}
6127 
6128 	/* invoke callback */
6129 	if (cb_arg->cb) {
6130 		cb_arg->cb(status, value, cb_arg->arg);
6131 	}
6132 
6133 	/* if polling, will free memory in calling function */
6134 	if (cb_arg->opts != OCS_CMD_POLL) {
6135 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6136 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6137 	}
6138 }
6139 
6140 /**
6141  * @brief Get link configuration for a Skyhawk.
6142  *
6143  * @param hw Hardware context.
6144  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6145  * @param cb Callback function to invoke following mbx command.
6146  * @param arg Callback argument.
6147  *
6148  * @return Returns OCS_HW_RTN_SUCCESS on success.
6149  */
6150 static ocs_hw_rtn_e
6151 ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6152 {
6153 	uint8_t *mbxdata;
6154 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
6155 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6156 
6157 	/* mbxdata holds the header of the command */
6158 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6159 	if (mbxdata == NULL) {
6160 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6161 		return OCS_HW_RTN_NO_MEMORY;
6162 	}
6163 
6164 	/* cb_arg holds the data that will be passed to the callback on completion */
6165 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
6166 	if (cb_arg == NULL) {
6167 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6168 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6169 		return OCS_HW_RTN_NO_MEMORY;
6170 	}
6171 
6172 	cb_arg->cb = cb;
6173 	cb_arg->arg = arg;
6174 	cb_arg->opts = opts;
6175 
6176 	/* dma_mem holds the non-embedded portion */
6177 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) {
6178 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
6179 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6180 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6181 		return OCS_HW_RTN_NO_MEMORY;
6182 	}
6183 
6184 	if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) {
6185 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg);
6186 	}
6187 
6188 	if (rc != OCS_HW_RTN_SUCCESS) {
6189 		ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n");
6190 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6191 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6192 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6193 	} else if (opts == OCS_CMD_POLL) {
6194 		/* if we're polling we have to call the callback here. */
6195 		ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg);
6196 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6197 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6198 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6199 	} else {
6200 		/* We weren't poling, so the callback got called */
6201 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6202 	}
6203 
6204 	return rc;
6205 }
6206 
6207 /**
6208  * @brief Sets the DIF seed value.
6209  *
6210  * @param hw Hardware context.
6211  *
6212  * @return Returns OCS_HW_RTN_SUCCESS on success.
6213  */
6214 static ocs_hw_rtn_e
6215 ocs_hw_set_dif_seed(ocs_hw_t *hw)
6216 {
6217 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6218 	uint8_t buf[SLI4_BMBX_SIZE];
6219 	sli4_req_common_set_features_dif_seed_t seed_param;
6220 
6221 	ocs_memset(&seed_param, 0, sizeof(seed_param));
6222 	seed_param.seed = hw->config.dif_seed;
6223 
6224 	/* send set_features command */
6225 	if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6226 					SLI4_SET_FEATURES_DIF_SEED,
6227 					4,
6228 					(uint32_t*)&seed_param)) {
6229 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6230 		if (rc) {
6231 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6232 		} else {
6233 			ocs_log_debug(hw->os, "DIF seed set to 0x%x\n",
6234 					hw->config.dif_seed);
6235 		}
6236 	} else {
6237 		ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6238 		rc = OCS_HW_RTN_ERROR;
6239 	}
6240 	return rc;
6241 }
6242 
6243 
6244 /**
6245  * @brief Sets the DIF mode value.
6246  *
6247  * @param hw Hardware context.
6248  *
6249  * @return Returns OCS_HW_RTN_SUCCESS on success.
6250  */
6251 static ocs_hw_rtn_e
6252 ocs_hw_set_dif_mode(ocs_hw_t *hw)
6253 {
6254 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6255 	uint8_t buf[SLI4_BMBX_SIZE];
6256 	sli4_req_common_set_features_t10_pi_mem_model_t mode_param;
6257 
6258 	ocs_memset(&mode_param, 0, sizeof(mode_param));
6259 	mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1);
6260 
6261 	/* send set_features command */
6262 	if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6263 					SLI4_SET_FEATURES_DIF_MEMORY_MODE,
6264 					sizeof(mode_param),
6265 					(uint32_t*)&mode_param)) {
6266 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6267 		if (rc) {
6268 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6269 		} else {
6270 			ocs_log_test(hw->os, "DIF mode set to %s\n",
6271 				(hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate"));
6272 		}
6273 	} else {
6274 		ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6275 		rc = OCS_HW_RTN_ERROR;
6276 	}
6277 	return rc;
6278 }
6279 
6280 static void
6281 ocs_hw_watchdog_timer_cb(void *arg)
6282 {
6283 	ocs_hw_t *hw = (ocs_hw_t *)arg;
6284 
6285 	ocs_hw_config_watchdog_timer(hw);
6286 	return;
6287 }
6288 
6289 static void
6290 ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6291 {
6292 	uint16_t timeout = hw->watchdog_timeout;
6293 
6294 	if (status != 0) {
6295 		ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status);
6296 	} else {
6297 		if(timeout != 0) {
6298 			/* keeping callback 500ms before timeout to keep heartbeat alive */
6299 			ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) );
6300 		}else {
6301 			ocs_del_timer(&hw->watchdog_timer);
6302 		}
6303 	}
6304 
6305 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6306 	return;
6307 }
6308 
6309 /**
6310  * @brief Set configuration parameters for watchdog timer feature.
6311  *
6312  * @param hw Hardware context.
6313  * @param timeout Timeout for watchdog timer in seconds
6314  *
6315  * @return Returns OCS_HW_RTN_SUCCESS on success.
6316  */
6317 static ocs_hw_rtn_e
6318 ocs_hw_config_watchdog_timer(ocs_hw_t *hw)
6319 {
6320 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6321 	uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
6322 
6323 	if (!buf) {
6324 		ocs_log_err(hw->os, "no buffer for command\n");
6325 		return OCS_HW_RTN_NO_MEMORY;
6326 	}
6327 
6328 	sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout);
6329 	rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL);
6330 	if (rc) {
6331 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
6332 		ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc);
6333 	}
6334 	return rc;
6335 }
6336 
6337 /**
6338  * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature.
6339  *
6340  * @param hw Hardware context.
6341  * @param buf Pointer to a mailbox buffer area.
6342  *
6343  * @return Returns OCS_HW_RTN_SUCCESS on success.
6344  */
6345 static ocs_hw_rtn_e
6346 ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf)
6347 {
6348 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6349 	sli4_req_common_set_features_xfer_rdy_t10pi_t param;
6350 
6351 	ocs_memset(&param, 0, sizeof(param));
6352 	param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1);
6353 	param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0);
6354 	param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1);
6355 	param.app_tag = hw->config.auto_xfer_rdy_app_tag_value;
6356 	param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
6357 
6358 	switch (hw->config.auto_xfer_rdy_p_type) {
6359 	case 1:
6360 		param.p_type = 0;
6361 		break;
6362 	case 3:
6363 		param.p_type = 2;
6364 		break;
6365 	default:
6366 		ocs_log_err(hw->os, "unsupported p_type %d\n",
6367 			hw->config.auto_xfer_rdy_p_type);
6368 		return OCS_HW_RTN_ERROR;
6369 	}
6370 
6371 	/* build the set_features command */
6372 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6373 				    SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI,
6374 				    sizeof(param),
6375 				    &param);
6376 
6377 
6378 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6379 	if (rc) {
6380 		ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6381 	} else {
6382 		ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n",
6383 				param.rtc, param.atv, param.p_type,
6384 				param.app_tag, param.blk_size);
6385 	}
6386 
6387 	return rc;
6388 }
6389 
6390 
6391 /**
6392  * @brief enable sli port health check
6393  *
6394  * @param hw Hardware context.
6395  * @param buf Pointer to a mailbox buffer area.
6396  * @param query current status of the health check feature enabled/disabled
6397  * @param enable if 1: enable 0: disable
6398  * @param buf Pointer to a mailbox buffer area.
6399  *
6400  * @return Returns OCS_HW_RTN_SUCCESS on success.
6401  */
6402 static ocs_hw_rtn_e
6403 ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable)
6404 {
6405 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6406 	uint8_t buf[SLI4_BMBX_SIZE];
6407 	sli4_req_common_set_features_health_check_t param;
6408 
6409 	ocs_memset(&param, 0, sizeof(param));
6410 	param.hck = enable;
6411 	param.qry = query;
6412 
6413 	/* build the set_features command */
6414 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6415 				    SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK,
6416 				    sizeof(param),
6417 				    &param);
6418 
6419 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6420 	if (rc) {
6421 		ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6422 	} else {
6423 		ocs_log_test(hw->os, "SLI Port Health Check is enabled \n");
6424 	}
6425 
6426 	return rc;
6427 }
6428 
6429 /**
6430  * @brief Set FTD transfer hint feature
6431  *
6432  * @param hw Hardware context.
6433  * @param fdt_xfer_hint size in bytes where read requests are segmented.
6434  *
6435  * @return Returns OCS_HW_RTN_SUCCESS on success.
6436  */
6437 static ocs_hw_rtn_e
6438 ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint)
6439 {
6440 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6441 	uint8_t buf[SLI4_BMBX_SIZE];
6442 	sli4_req_common_set_features_set_fdt_xfer_hint_t param;
6443 
6444 	ocs_memset(&param, 0, sizeof(param));
6445 	param.fdt_xfer_hint = fdt_xfer_hint;
6446 	/* build the set_features command */
6447 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6448 				    SLI4_SET_FEATURES_SET_FTD_XFER_HINT,
6449 				    sizeof(param),
6450 				    &param);
6451 
6452 
6453 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6454 	if (rc) {
6455 		ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc);
6456 	} else {
6457 		ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint);
6458 	}
6459 
6460 	return rc;
6461 }
6462 
6463 /**
6464  * @brief Get the link configuration callback.
6465  *
6466  * @param hw Hardware context.
6467  * @param status Status from the DMTF CLP command.
6468  * @param result_len Length, in bytes, of the DMTF CLP result.
6469  * @param arg Pointer to a callback argument.
6470  *
6471  * @return Returns OCS_HW_RTN_SUCCESS on success.
6472  */
6473 static void
6474 ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg)
6475 {
6476 	int32_t rval;
6477 	char retdata_str[64];
6478 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6479 	ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA;
6480 
6481 	if (status) {
6482 		ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status);
6483 	} else {
6484 		/* parse CLP response to get return data */
6485 		rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str,
6486 						  sizeof(retdata_str),
6487 						  cb_arg->dma_resp.virt,
6488 						  result_len);
6489 
6490 		if (rval <= 0) {
6491 			ocs_log_err(hw->os, "failed to get retdata %d\n", result_len);
6492 		} else {
6493 			/* translate string into hw enum */
6494 			linkcfg = ocs_hw_linkcfg_from_clp(retdata_str);
6495 		}
6496 	}
6497 
6498 	/* invoke callback */
6499 	if (cb_arg->cb) {
6500 		cb_arg->cb(status, linkcfg, cb_arg->arg);
6501 	}
6502 
6503 	/* if polling, will free memory in calling function */
6504 	if (cb_arg->opts != OCS_CMD_POLL) {
6505 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6506 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
6507 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6508 	}
6509 }
6510 
6511 /**
6512  * @brief Set the Lancer dump location
6513  * @par Description
6514  * This function tells a Lancer chip to use a specific DMA
6515  * buffer as a dump location rather than the internal flash.
6516  *
6517  * @param hw Hardware context.
6518  * @param num_buffers The number of DMA buffers to hold the dump (1..n).
6519  * @param dump_buffers DMA buffers to hold the dump.
6520  *
6521  * @return Returns OCS_HW_RTN_SUCCESS on success.
6522  */
6523 ocs_hw_rtn_e
6524 ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb)
6525 {
6526 	uint8_t bus, dev, func;
6527 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6528 	uint8_t	buf[SLI4_BMBX_SIZE];
6529 
6530 	/*
6531 	 * Make sure the FW is new enough to support this command. If the FW
6532 	 * is too old, the FW will UE.
6533 	 */
6534 	if (hw->workaround.disable_dump_loc) {
6535 		ocs_log_test(hw->os, "FW version is too old for this feature\n");
6536 		return OCS_HW_RTN_ERROR;
6537 	}
6538 
6539 	/* This command is only valid for physical port 0 */
6540 	ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
6541 	if (fdb == 0 && func != 0) {
6542 		ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n",
6543 			     func);
6544 		return OCS_HW_RTN_ERROR;
6545 	}
6546 
6547 	/*
6548 	 * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers,
6549 	 * We must allocate a SGL list and then pass the address of the list to the chip.
6550 	 */
6551 	if (num_buffers > 1) {
6552 		uint32_t sge_size = num_buffers * sizeof(sli4_sge_t);
6553 		sli4_sge_t *sge;
6554 		uint32_t i;
6555 
6556 		if (hw->dump_sges.size < sge_size) {
6557 			ocs_dma_free(hw->os, &hw->dump_sges);
6558 			if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) {
6559 				ocs_log_err(hw->os, "SGE DMA allocation failed\n");
6560 				return OCS_HW_RTN_NO_MEMORY;
6561 			}
6562 		}
6563 		/* build the SGE list */
6564 		ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size);
6565 		hw->dump_sges.len = sge_size;
6566 		sge = hw->dump_sges.virt;
6567 		for (i = 0; i < num_buffers; i++) {
6568 			sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys);
6569 			sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys);
6570 			sge[i].last = (i == num_buffers - 1 ? 1 : 0);
6571 			sge[i].buffer_length = dump_buffers[i].size;
6572 		}
6573 		rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6574 						      SLI4_BMBX_SIZE, FALSE, TRUE,
6575 						      &hw->dump_sges, fdb);
6576 	} else {
6577 		dump_buffers->len = dump_buffers->size;
6578 		rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6579 						      SLI4_BMBX_SIZE, FALSE, FALSE,
6580 						      dump_buffers, fdb);
6581 	}
6582 
6583 	if (rc) {
6584 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL,
6585 				     NULL, NULL);
6586 		if (rc) {
6587 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n",
6588 				rc);
6589 		}
6590 	} else {
6591 		ocs_log_err(hw->os,
6592 			"sli_cmd_common_set_dump_location failed\n");
6593 		rc = OCS_HW_RTN_ERROR;
6594 	}
6595 
6596 	return rc;
6597 }
6598 
6599 
6600 /**
6601  * @brief Set the Ethernet license.
6602  *
6603  * @par Description
6604  * This function sends the appropriate mailbox command (DMTF
6605  * CLP) to set the Ethernet license to the given license value.
6606  * Since it is used during the time of ocs_hw_init(), the mailbox
6607  * command is sent via polling (the BMBX route).
6608  *
6609  * @param hw Hardware context.
6610  * @param license 32-bit license value.
6611  *
6612  * @return Returns OCS_HW_RTN_SUCCESS on success.
6613  */
6614 static ocs_hw_rtn_e
6615 ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license)
6616 {
6617 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6618 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6619 	ocs_dma_t dma_cmd;
6620 	ocs_dma_t dma_resp;
6621 
6622 	/* only for lancer right now */
6623 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6624 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6625 		return OCS_HW_RTN_ERROR;
6626 	}
6627 
6628 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license);
6629 	/* allocate DMA for command  */
6630 	if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6631 		ocs_log_err(hw->os, "malloc failed\n");
6632 		return OCS_HW_RTN_NO_MEMORY;
6633 	}
6634 	ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6635 	ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd));
6636 
6637 	/* allocate DMA for response */
6638 	if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6639 		ocs_log_err(hw->os, "malloc failed\n");
6640 		ocs_dma_free(hw->os, &dma_cmd);
6641 		return OCS_HW_RTN_NO_MEMORY;
6642 	}
6643 
6644 	/* send DMTF CLP command mbx and poll */
6645 	if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) {
6646 		ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt);
6647 		rc = OCS_HW_RTN_ERROR;
6648 	}
6649 
6650 	ocs_dma_free(hw->os, &dma_cmd);
6651 	ocs_dma_free(hw->os, &dma_resp);
6652 	return rc;
6653 }
6654 
6655 /**
6656  * @brief Callback argument structure for the DMTF CLP commands.
6657  */
6658 typedef struct ocs_hw_clp_cb_arg_s {
6659 	ocs_hw_dmtf_clp_cb_t cb;
6660 	ocs_dma_t *dma_resp;
6661 	int32_t status;
6662 	uint32_t opts;
6663 	void *arg;
6664 } ocs_hw_clp_cb_arg_t;
6665 
6666 /**
6667  * @brief Execute the DMTF CLP command.
6668  *
6669  * @param hw Hardware context.
6670  * @param dma_cmd DMA buffer containing the CLP command.
6671  * @param dma_resp DMA buffer that will contain the response (if successful).
6672  * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL).
6673  * @param cb Callback function.
6674  * @param arg Callback argument.
6675  *
6676  * @return Returns the number of bytes written to the response
6677  * buffer on success, or a negative value if failed.
6678  */
6679 static ocs_hw_rtn_e
6680 ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg)
6681 {
6682 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6683 	ocs_hw_clp_cb_arg_t *cb_arg;
6684 	uint8_t *mbxdata;
6685 
6686 	/* allocate DMA for mailbox */
6687 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6688 	if (mbxdata == NULL) {
6689 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6690 		return OCS_HW_RTN_NO_MEMORY;
6691 	}
6692 
6693 	/* allocate memory for callback argument */
6694 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6695 	if (cb_arg == NULL) {
6696 		ocs_log_err(hw->os, "failed to malloc cb_arg");
6697 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6698 		return OCS_HW_RTN_NO_MEMORY;
6699 	}
6700 
6701 	cb_arg->cb = cb;
6702 	cb_arg->arg = arg;
6703 	cb_arg->dma_resp = dma_resp;
6704 	cb_arg->opts = opts;
6705 
6706 	/* Send the HW command */
6707 	if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6708 				      dma_cmd, dma_resp)) {
6709 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg);
6710 
6711 		if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) {
6712 			/* if we're polling, copy response and invoke callback to
6713 			 * parse result */
6714 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6715 			ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg);
6716 
6717 			/* set rc to resulting or "parsed" status */
6718 			rc = cb_arg->status;
6719 		}
6720 
6721 		/* if failed, or polling, free memory here */
6722 		if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6723 			if (rc != OCS_HW_RTN_SUCCESS) {
6724 				ocs_log_test(hw->os, "ocs_hw_command failed\n");
6725 			}
6726 			ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6727 			ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6728 		}
6729 	} else {
6730 		ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n");
6731 		rc = OCS_HW_RTN_ERROR;
6732 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6733 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6734 	}
6735 
6736 	return rc;
6737 }
6738 
6739 
6740 /**
6741  * @brief Called when the DMTF CLP command completes.
6742  *
6743  * @param hw Hardware context.
6744  * @param status Status field from the mbox completion.
6745  * @param mqe Mailbox response structure.
6746  * @param arg Pointer to a callback argument.
6747  *
6748  * @return None.
6749  *
6750  */
6751 static void
6752 ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6753 {
6754 	int32_t cb_status = 0;
6755 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6756 	sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed;
6757 	ocs_hw_clp_cb_arg_t *cb_arg = arg;
6758 	uint32_t result_len = 0;
6759 	int32_t stat_len;
6760 	char stat_str[8];
6761 
6762 	/* there are several status codes here, check them all and condense
6763 	 * into a single callback status
6764 	 */
6765 	if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) {
6766 		ocs_log_debug(hw->os, "status=x%x/x%x/x%x  addl=x%x clp=x%x detail=x%x\n",
6767 			status,
6768 			mbox_rsp->hdr.status,
6769 			clp_rsp->hdr.status,
6770 			clp_rsp->hdr.additional_status,
6771 			clp_rsp->clp_status,
6772 			clp_rsp->clp_detailed_status);
6773 		if (status) {
6774 			cb_status = status;
6775 		} else if (mbox_rsp->hdr.status) {
6776 			cb_status = mbox_rsp->hdr.status;
6777 		} else {
6778 			cb_status = clp_rsp->clp_status;
6779 		}
6780 	} else {
6781 		result_len = clp_rsp->resp_length;
6782 	}
6783 
6784 	if (cb_status) {
6785 		goto ocs_hw_cb_dmtf_clp_done;
6786 	}
6787 
6788 	if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) {
6789 		ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n",
6790 			     cb_arg->dma_resp->size, result_len);
6791 		cb_status = -1;
6792 		goto ocs_hw_cb_dmtf_clp_done;
6793 	}
6794 
6795 	/* parse CLP response to get status */
6796 	stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str,
6797 					      sizeof(stat_str),
6798 					      cb_arg->dma_resp->virt,
6799 					      result_len);
6800 
6801 	if (stat_len <= 0) {
6802 		ocs_log_test(hw->os, "failed to get status %d\n", stat_len);
6803 		cb_status = -1;
6804 		goto ocs_hw_cb_dmtf_clp_done;
6805 	}
6806 
6807 	if (ocs_strcmp(stat_str, "0") != 0) {
6808 		ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str);
6809 		cb_status = -1;
6810 		goto ocs_hw_cb_dmtf_clp_done;
6811 	}
6812 
6813 ocs_hw_cb_dmtf_clp_done:
6814 
6815 	/* save status in cb_arg for callers with NULL cb's + polling */
6816 	cb_arg->status = cb_status;
6817 	if (cb_arg->cb) {
6818 		cb_arg->cb(hw, cb_status, result_len, cb_arg->arg);
6819 	}
6820 	/* if polling, caller will free memory */
6821 	if (cb_arg->opts != OCS_CMD_POLL) {
6822 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6823 		ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6824 	}
6825 }
6826 
6827 /**
6828  * @brief Parse the CLP result and get the value corresponding to the given
6829  * keyword.
6830  *
6831  * @param hw Hardware context.
6832  * @param keyword CLP keyword for which the value is returned.
6833  * @param value Location to which the resulting value is copied.
6834  * @param value_len Length of the value parameter.
6835  * @param resp Pointer to the response buffer that is searched
6836  * for the keyword and value.
6837  * @param resp_len Length of response buffer passed in.
6838  *
6839  * @return Returns the number of bytes written to the value
6840  * buffer on success, or a negative vaue on failure.
6841  */
6842 static int32_t
6843 ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len)
6844 {
6845 	char *start = NULL;
6846 	char *end = NULL;
6847 
6848 	/* look for specified keyword in string */
6849 	start = ocs_strstr(resp, keyword);
6850 	if (start == NULL) {
6851 		ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n",
6852 			     keyword);
6853 		return -1;
6854 	}
6855 
6856 	/* now look for '=' and go one past */
6857 	start = ocs_strchr(start, '=');
6858 	if (start == NULL) {
6859 		ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n",
6860 			     keyword);
6861 		return -1;
6862 	}
6863 	start++;
6864 
6865 	/* \r\n terminates value */
6866 	end = ocs_strstr(start, "\r\n");
6867 	if (end == NULL) {
6868 		ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n",
6869 			     keyword);
6870 		return -1;
6871 	}
6872 
6873 	/* make sure given result array is big enough */
6874 	if ((end - start + 1) > value_len) {
6875 		ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n",
6876 			     value_len, (end-start));
6877 		return -1;
6878 	}
6879 
6880 	ocs_strncpy(value, start, (end - start));
6881 	value[end-start] = '\0';
6882 	return (end-start+1);
6883 }
6884 
6885 /**
6886  * @brief Cause chip to enter an unrecoverable error state.
6887  *
6888  * @par Description
6889  * Cause chip to enter an unrecoverable error state. This is
6890  * used when detecting unexpected FW behavior so that the FW can be
6891  * hwted from the driver as soon as the error is detected.
6892  *
6893  * @param hw Hardware context.
6894  * @param dump Generate dump as part of reset.
6895  *
6896  * @return Returns 0 on success, or a non-zero value on failure.
6897  *
6898  */
6899 ocs_hw_rtn_e
6900 ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump)
6901 {
6902 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6903 
6904 	if (sli_raise_ue(&hw->sli, dump) != 0) {
6905 		rc = OCS_HW_RTN_ERROR;
6906 	} else {
6907 		if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
6908 			hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
6909 		}
6910 	}
6911 
6912 	return rc;
6913 }
6914 
6915 /**
6916  * @brief Called when the OBJECT_GET command completes.
6917  *
6918  * @par Description
6919  * Get the number of bytes actually written out of the response, free the mailbox
6920  * that was malloc'd by ocs_hw_dump_get(), then call the callback
6921  * and pass the status and bytes read.
6922  *
6923  * @param hw Hardware context.
6924  * @param status Status field from the mbox completion.
6925  * @param mqe Mailbox response structure.
6926  * @param arg Pointer to a callback function that signals the caller that the command is done.
6927  * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>.
6928  *
6929  * @return Returns 0.
6930  */
6931 static int32_t
6932 ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6933 {
6934 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6935 	sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed;
6936 	ocs_hw_dump_get_cb_arg_t *cb_arg = arg;
6937 	uint32_t bytes_read;
6938 	uint8_t eof;
6939 
6940 	bytes_read = rd_obj_rsp->actual_read_length;
6941 	eof = rd_obj_rsp->eof;
6942 
6943 	if (cb_arg) {
6944 		if (cb_arg->cb) {
6945 			if ((status == 0) && mbox_rsp->hdr.status) {
6946 				status = mbox_rsp->hdr.status;
6947 			}
6948 			cb_arg->cb(status, bytes_read, eof, cb_arg->arg);
6949 		}
6950 
6951 		ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
6952 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
6953 	}
6954 
6955 	return 0;
6956 }
6957 
6958 
6959 /**
6960  * @brief Read a dump image to the host.
6961  *
6962  * @par Description
6963  * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a
6964  * dump image chunk, then sends the command with the ocs_hw_command(). On completion,
6965  * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox
6966  * and signal the caller that the read has completed.
6967  *
6968  * @param hw Hardware context.
6969  * @param dma DMA structure to transfer the dump chunk into.
6970  * @param size Size of the dump chunk.
6971  * @param offset Offset, in bytes, from the beginning of the dump.
6972  * @param cb Pointer to a callback function that is called when the command completes.
6973  * The callback function prototype is
6974  * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>.
6975  * @param arg Pointer to be passed to the callback function.
6976  *
6977  * @return Returns 0 on success, or a non-zero value on failure.
6978  */
6979 ocs_hw_rtn_e
6980 ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg)
6981 {
6982 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6983 	uint8_t *mbxdata;
6984 	ocs_hw_dump_get_cb_arg_t *cb_arg;
6985 	uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
6986 
6987 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6988 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6989 		return OCS_HW_RTN_ERROR;
6990 	}
6991 
6992 	if (1 != sli_dump_is_present(&hw->sli)) {
6993 		ocs_log_test(hw->os, "No dump is present\n");
6994 		return OCS_HW_RTN_ERROR;
6995 	}
6996 
6997 	if (1 == sli_reset_required(&hw->sli)) {
6998 		ocs_log_test(hw->os, "device reset required\n");
6999 		return OCS_HW_RTN_ERROR;
7000 	}
7001 
7002 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7003 	if (mbxdata == NULL) {
7004 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7005 		return OCS_HW_RTN_NO_MEMORY;
7006 	}
7007 
7008 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT);
7009 	if (cb_arg == NULL) {
7010 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7011 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7012 		return OCS_HW_RTN_NO_MEMORY;
7013 	}
7014 
7015 	cb_arg->cb = cb;
7016 	cb_arg->arg = arg;
7017 	cb_arg->mbox_cmd = mbxdata;
7018 
7019 	if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7020 			size, offset, "/dbg/dump.bin", dma)) {
7021 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg);
7022 		if (rc == 0 && opts == OCS_CMD_POLL) {
7023 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7024 			rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg);
7025 		}
7026 	}
7027 
7028 	if (rc != OCS_HW_RTN_SUCCESS) {
7029 		ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n");
7030 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7031 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
7032 	}
7033 
7034 	return rc;
7035 }
7036 
7037 /**
7038  * @brief Called when the OBJECT_DELETE command completes.
7039  *
7040  * @par Description
7041  * Free the mailbox that was malloc'd
7042  * by ocs_hw_dump_clear(), then call the callback and pass the status.
7043  *
7044  * @param hw Hardware context.
7045  * @param status Status field from the mbox completion.
7046  * @param mqe Mailbox response structure.
7047  * @param arg Pointer to a callback function that signals the caller that the command is done.
7048  * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>.
7049  *
7050  * @return Returns 0.
7051  */
7052 static int32_t
7053 ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
7054 {
7055 	ocs_hw_dump_clear_cb_arg_t *cb_arg = arg;
7056 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7057 
7058 	if (cb_arg) {
7059 		if (cb_arg->cb) {
7060 			if ((status == 0) && mbox_rsp->hdr.status) {
7061 				status = mbox_rsp->hdr.status;
7062 			}
7063 			cb_arg->cb(status, cb_arg->arg);
7064 		}
7065 
7066 		ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
7067 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7068 	}
7069 
7070 	return 0;
7071 }
7072 
7073 /**
7074  * @brief Clear a dump image from the device.
7075  *
7076  * @par Description
7077  * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear
7078  * the dump, then sends the command with ocs_hw_command(). On completion,
7079  * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox
7080  * and to signal the caller that the write has completed.
7081  *
7082  * @param hw Hardware context.
7083  * @param cb Pointer to a callback function that is called when the command completes.
7084  * The callback function prototype is
7085  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
7086  * @param arg Pointer to be passed to the callback function.
7087  *
7088  * @return Returns 0 on success, or a non-zero value on failure.
7089  */
7090 ocs_hw_rtn_e
7091 ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg)
7092 {
7093 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7094 	uint8_t *mbxdata;
7095 	ocs_hw_dump_clear_cb_arg_t *cb_arg;
7096 	uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
7097 
7098 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
7099 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
7100 		return OCS_HW_RTN_ERROR;
7101 	}
7102 
7103 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7104 	if (mbxdata == NULL) {
7105 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7106 		return OCS_HW_RTN_NO_MEMORY;
7107 	}
7108 
7109 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT);
7110 	if (cb_arg == NULL) {
7111 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7112 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7113 		return OCS_HW_RTN_NO_MEMORY;
7114 	}
7115 
7116 	cb_arg->cb = cb;
7117 	cb_arg->arg = arg;
7118 	cb_arg->mbox_cmd = mbxdata;
7119 
7120 	if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7121 			"/dbg/dump.bin")) {
7122 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg);
7123 		if (rc == 0 && opts == OCS_CMD_POLL) {
7124 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7125 			rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg);
7126 		}
7127 	}
7128 
7129 	if (rc != OCS_HW_RTN_SUCCESS) {
7130 		ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n");
7131 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7132 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7133 	}
7134 
7135 	return rc;
7136 }
7137 
7138 typedef struct ocs_hw_get_port_protocol_cb_arg_s {
7139 	ocs_get_port_protocol_cb_t cb;
7140 	void *arg;
7141 	uint32_t pci_func;
7142 	ocs_dma_t payload;
7143 } ocs_hw_get_port_protocol_cb_arg_t;
7144 
7145 /**
7146  * @brief Called for the completion of get_port_profile for a
7147  *        user request.
7148  *
7149  * @param hw Hardware context.
7150  * @param status The status from the MQE.
7151  * @param mqe Pointer to mailbox command buffer.
7152  * @param arg Pointer to a callback argument.
7153  *
7154  * @return Returns 0 on success, or a non-zero value on failure.
7155  */
7156 static int32_t
7157 ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status,
7158 			    uint8_t *mqe, void *arg)
7159 {
7160 	ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg;
7161 	ocs_dma_t *payload = &(cb_arg->payload);
7162 	sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7163 	ocs_hw_port_protocol_e port_protocol;
7164 	int num_descriptors;
7165 	sli4_resource_descriptor_v1_t *desc_p;
7166 	sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7167 	int i;
7168 
7169 	port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7170 
7171 	num_descriptors = response->desc_count;
7172 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7173 	for (i=0; i<num_descriptors; i++) {
7174 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7175 			pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7176 			if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7177 				switch(pcie_desc_p->pf_type) {
7178 				case 0x02:
7179 					port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI;
7180 					break;
7181 				case 0x04:
7182 					port_protocol = OCS_HW_PORT_PROTOCOL_FCOE;
7183 					break;
7184 				case 0x10:
7185 					port_protocol = OCS_HW_PORT_PROTOCOL_FC;
7186 					break;
7187 				default:
7188 					port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7189 					break;
7190 				}
7191 			}
7192 		}
7193 
7194 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7195 	}
7196 
7197 	if (cb_arg->cb) {
7198 		cb_arg->cb(status, port_protocol, cb_arg->arg);
7199 
7200 	}
7201 
7202 	ocs_dma_free(hw->os, &cb_arg->payload);
7203 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7204 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7205 
7206 	return 0;
7207 }
7208 
7209 /**
7210  * @ingroup io
7211  * @brief  Get the current port protocol.
7212  * @par Description
7213  * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox.  When the
7214  * command completes the provided mgmt callback function is
7215  * called.
7216  *
7217  * @param hw Hardware context.
7218  * @param pci_func PCI function to query for current protocol.
7219  * @param cb Callback function to be called when the command completes.
7220  * @param ul_arg An argument that is passed to the callback function.
7221  *
7222  * @return
7223  * - OCS_HW_RTN_SUCCESS on success.
7224  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7225  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7226  *   context.
7227  * - OCS_HW_RTN_ERROR on any other error.
7228  */
7229 ocs_hw_rtn_e
7230 ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func,
7231 	ocs_get_port_protocol_cb_t cb, void* ul_arg)
7232 {
7233 	uint8_t *mbxdata;
7234 	ocs_hw_get_port_protocol_cb_arg_t *cb_arg;
7235 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7236 
7237 	/* Only supported on Skyhawk */
7238 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7239 		return OCS_HW_RTN_ERROR;
7240 	}
7241 
7242 	/* mbxdata holds the header of the command */
7243 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7244 	if (mbxdata == NULL) {
7245 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7246 		return OCS_HW_RTN_NO_MEMORY;
7247 	}
7248 
7249 
7250 	/* cb_arg holds the data that will be passed to the callback on completion */
7251 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7252 	if (cb_arg == NULL) {
7253 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7254 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7255 		return OCS_HW_RTN_NO_MEMORY;
7256 	}
7257 
7258 	cb_arg->cb = cb;
7259 	cb_arg->arg = ul_arg;
7260 	cb_arg->pci_func = pci_func;
7261 
7262 	/* dma_mem holds the non-embedded portion */
7263 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7264 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7265 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7266 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7267 		return OCS_HW_RTN_NO_MEMORY;
7268 	}
7269 
7270 	if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7271 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg);
7272 	}
7273 
7274 	if (rc != OCS_HW_RTN_SUCCESS) {
7275 		ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7276 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7277 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7278 		ocs_dma_free(hw->os, &cb_arg->payload);
7279 	}
7280 
7281 	return rc;
7282 
7283 }
7284 
7285 typedef struct ocs_hw_set_port_protocol_cb_arg_s {
7286 	ocs_set_port_protocol_cb_t cb;
7287 	void *arg;
7288 	ocs_dma_t payload;
7289 	uint32_t new_protocol;
7290 	uint32_t pci_func;
7291 } ocs_hw_set_port_protocol_cb_arg_t;
7292 
7293 /**
7294  * @brief Called for the completion of set_port_profile for a
7295  *        user request.
7296  *
7297  * @par Description
7298  * This is the second of two callbacks for the set_port_protocol
7299  * function. The set operation is a read-modify-write. This
7300  * callback is called when the write (SET_PROFILE_CONFIG)
7301  * completes.
7302  *
7303  * @param hw Hardware context.
7304  * @param status The status from the MQE.
7305  * @param mqe Pointer to mailbox command buffer.
7306  * @param arg Pointer to a callback argument.
7307  *
7308  * @return 0 on success, non-zero otherwise
7309  */
7310 static int32_t
7311 ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7312 {
7313 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7314 
7315 	if (cb_arg->cb) {
7316 		cb_arg->cb( status, cb_arg->arg);
7317 	}
7318 
7319 	ocs_dma_free(hw->os, &(cb_arg->payload));
7320 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7321 	ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7322 
7323 	return 0;
7324 }
7325 
7326 /**
7327  * @brief Called for the completion of set_port_profile for a
7328  *        user request.
7329  *
7330  * @par Description
7331  * This is the first of two callbacks for the set_port_protocol
7332  * function.  The set operation is a read-modify-write.  This
7333  * callback is called when the read completes
7334  * (GET_PROFILE_CONFG).  It will updated the resource
7335  * descriptors, then queue the write (SET_PROFILE_CONFIG).
7336  *
7337  * On entry there are three memory areas that were allocated by
7338  * ocs_hw_set_port_protocol.  If a failure is detected in this
7339  * function those need to be freed.  If this function succeeds
7340  * it allocates three more areas.
7341  *
7342  * @param hw Hardware context.
7343  * @param status The status from the MQE
7344  * @param mqe Pointer to mailbox command buffer.
7345  * @param arg Pointer to a callback argument.
7346  *
7347  * @return Returns 0 on success, or a non-zero value otherwise.
7348  */
7349 static int32_t
7350 ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7351 {
7352 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7353 	ocs_dma_t *payload = &(cb_arg->payload);
7354 	sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7355 	int num_descriptors;
7356 	sli4_resource_descriptor_v1_t *desc_p;
7357 	sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7358 	int i;
7359 	ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg;
7360 	ocs_hw_port_protocol_e new_protocol;
7361 	uint8_t *dst;
7362 	sli4_isap_resouce_descriptor_v1_t *isap_desc_p;
7363 	uint8_t *mbxdata;
7364 	int pci_descriptor_count;
7365 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7366 	int num_fcoe_ports = 0;
7367 	int num_iscsi_ports = 0;
7368 
7369 	new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol;
7370 
7371 	num_descriptors = response->desc_count;
7372 
7373 	/* Count PCI descriptors */
7374 	pci_descriptor_count = 0;
7375 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7376 	for (i=0; i<num_descriptors; i++) {
7377 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7378 			++pci_descriptor_count;
7379 		}
7380 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7381 	}
7382 
7383 	/* mbxdata holds the header of the command */
7384 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7385 	if (mbxdata == NULL) {
7386 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7387 		return OCS_HW_RTN_NO_MEMORY;
7388 	}
7389 
7390 
7391 	/* cb_arg holds the data that will be passed to the callback on completion */
7392 	new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7393 	if (new_cb_arg == NULL) {
7394 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7395 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7396 		return OCS_HW_RTN_NO_MEMORY;
7397 	}
7398 
7399 	new_cb_arg->cb = cb_arg->cb;
7400 	new_cb_arg->arg = cb_arg->arg;
7401 
7402 	/* Allocate memory for the descriptors we're going to send.  This is
7403 	 * one for each PCI descriptor plus one ISAP descriptor. */
7404 	if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) +
7405 			  (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) +
7406 			  sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) {
7407 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7408 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7409 		ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7410 		return OCS_HW_RTN_NO_MEMORY;
7411 	}
7412 
7413 	sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7414 						   &new_cb_arg->payload,
7415 						   0, pci_descriptor_count+1, 1);
7416 
7417 	/* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */
7418 	dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc);
7419 
7420 	/* Loop over all descriptors.  If the descriptor is a PCIe descriptor, copy it
7421 	 * to the SET_PROFILE_CONFIG command to be written back.  If it's the descriptor
7422 	 * that we're trying to change also set its pf_type.
7423 	 */
7424 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7425 	for (i=0; i<num_descriptors; i++) {
7426 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7427 			pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7428 			if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7429 				/* This is the PCIe descriptor for this OCS instance.
7430 				 * Update it with the new pf_type */
7431 				switch(new_protocol) {
7432 				case OCS_HW_PORT_PROTOCOL_FC:
7433 					pcie_desc_p->pf_type = SLI4_PROTOCOL_FC;
7434 					break;
7435 				case OCS_HW_PORT_PROTOCOL_FCOE:
7436 					pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE;
7437 					break;
7438 				case OCS_HW_PORT_PROTOCOL_ISCSI:
7439 					pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI;
7440 					break;
7441 				default:
7442 					pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT;
7443 					break;
7444 				}
7445 
7446 			}
7447 
7448 			if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) {
7449 				++num_fcoe_ports;
7450 			}
7451 			if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) {
7452 				++num_iscsi_ports;
7453 			}
7454 			ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t));
7455 			dst += sizeof(sli4_pcie_resource_descriptor_v1_t);
7456 		}
7457 
7458 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7459 	}
7460 
7461 	/* Create an ISAP resource descriptor */
7462 	isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst;
7463 	isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP;
7464 	isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t);
7465 	if (num_iscsi_ports > 0) {
7466 		isap_desc_p->iscsi_tgt = 1;
7467 		isap_desc_p->iscsi_ini = 1;
7468 		isap_desc_p->iscsi_dif = 1;
7469 	}
7470 	if (num_fcoe_ports > 0) {
7471 		isap_desc_p->fcoe_tgt = 1;
7472 		isap_desc_p->fcoe_ini = 1;
7473 		isap_desc_p->fcoe_dif = 1;
7474 	}
7475 
7476 	/* At this point we're done with the memory allocated by ocs_port_set_protocol */
7477 	ocs_dma_free(hw->os, &cb_arg->payload);
7478 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7479 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7480 
7481 
7482 	/* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */
7483 	rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg);
7484 	if (rc) {
7485 		ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n");
7486 		/* Call the upper level callback to report a failure */
7487 		if (new_cb_arg->cb) {
7488 			new_cb_arg->cb( rc, new_cb_arg->arg);
7489 		}
7490 
7491 		/* Free the memory allocated by this function */
7492 		ocs_dma_free(hw->os, &new_cb_arg->payload);
7493 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7494 		ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7495 	}
7496 
7497 
7498 	return rc;
7499 }
7500 
7501 /**
7502  * @ingroup io
7503  * @brief  Set the port protocol.
7504  * @par Description
7505  * Setting the port protocol is a read-modify-write operation.
7506  * This function submits a GET_PROFILE_CONFIG command to read
7507  * the current settings.  The callback function will modify the
7508  * settings and issue the write.
7509  *
7510  * On successful completion this function will have allocated
7511  * two regular memory areas and one dma area which will need to
7512  * get freed later in the callbacks.
7513  *
7514  * @param hw Hardware context.
7515  * @param new_protocol New protocol to use.
7516  * @param pci_func PCI function to configure.
7517  * @param cb Callback function to be called when the command completes.
7518  * @param ul_arg An argument that is passed to the callback function.
7519  *
7520  * @return
7521  * - OCS_HW_RTN_SUCCESS on success.
7522  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7523  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7524  *   context.
7525  * - OCS_HW_RTN_ERROR on any other error.
7526  */
7527 ocs_hw_rtn_e
7528 ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol,
7529 		uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg)
7530 {
7531 	uint8_t *mbxdata;
7532 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg;
7533 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7534 
7535 	/* Only supported on Skyhawk */
7536 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7537 		return OCS_HW_RTN_ERROR;
7538 	}
7539 
7540 	/* mbxdata holds the header of the command */
7541 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7542 	if (mbxdata == NULL) {
7543 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7544 		return OCS_HW_RTN_NO_MEMORY;
7545 	}
7546 
7547 
7548 	/* cb_arg holds the data that will be passed to the callback on completion */
7549 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7550 	if (cb_arg == NULL) {
7551 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7552 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7553 		return OCS_HW_RTN_NO_MEMORY;
7554 	}
7555 
7556 	cb_arg->cb = cb;
7557 	cb_arg->arg = ul_arg;
7558 	cb_arg->new_protocol = new_protocol;
7559 	cb_arg->pci_func = pci_func;
7560 
7561 	/* dma_mem holds the non-embedded portion */
7562 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7563 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7564 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7565 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7566 		return OCS_HW_RTN_NO_MEMORY;
7567 	}
7568 
7569 	if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7570 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg);
7571 	}
7572 
7573 	if (rc != OCS_HW_RTN_SUCCESS) {
7574 		ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7575 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7576 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7577 		ocs_dma_free(hw->os, &cb_arg->payload);
7578 	}
7579 
7580 	return rc;
7581 }
7582 
7583 typedef struct ocs_hw_get_profile_list_cb_arg_s {
7584 	ocs_get_profile_list_cb_t cb;
7585 	void *arg;
7586 	ocs_dma_t payload;
7587 } ocs_hw_get_profile_list_cb_arg_t;
7588 
7589 /**
7590  * @brief Called for the completion of get_profile_list for a
7591  *        user request.
7592  * @par Description
7593  * This function is called when the COMMMON_GET_PROFILE_LIST
7594  * mailbox completes.  The response will be in
7595  * ctx->non_embedded_mem.virt.  This function parses the
7596  * response and creates a ocs_hw_profile_list, then calls the
7597  * mgmt_cb callback function and passes that list to it.
7598  *
7599  * @param hw Hardware context.
7600  * @param status The status from the MQE
7601  * @param mqe Pointer to mailbox command buffer.
7602  * @param arg Pointer to a callback argument.
7603  *
7604  * @return Returns 0 on success, or a non-zero value on failure.
7605  */
7606 static int32_t
7607 ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7608 {
7609 	ocs_hw_profile_list_t *list;
7610 	ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg;
7611 	ocs_dma_t *payload = &(cb_arg->payload);
7612 	sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt;
7613 	int i;
7614 	int num_descriptors;
7615 
7616 	list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO);
7617 	list->num_descriptors = response->profile_descriptor_count;
7618 
7619 	num_descriptors = list->num_descriptors;
7620 	if (num_descriptors > OCS_HW_MAX_PROFILES) {
7621 		num_descriptors = OCS_HW_MAX_PROFILES;
7622 	}
7623 
7624 	for (i=0; i<num_descriptors; i++) {
7625 		list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id;
7626 		list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index;
7627 		ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description);
7628 	}
7629 
7630 	if (cb_arg->cb) {
7631 		cb_arg->cb(status, list, cb_arg->arg);
7632 	} else {
7633 		ocs_free(hw->os, list, sizeof(*list));
7634 	}
7635 
7636 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7637 	ocs_dma_free(hw->os, &cb_arg->payload);
7638 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7639 
7640 	return 0;
7641 }
7642 
7643 /**
7644  * @ingroup io
7645  * @brief  Get a list of available profiles.
7646  * @par Description
7647  * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox.  When the
7648  * command completes the provided mgmt callback function is
7649  * called.
7650  *
7651  * @param hw Hardware context.
7652  * @param cb Callback function to be called when the
7653  *      	  command completes.
7654  * @param ul_arg An argument that is passed to the callback
7655  *      	 function.
7656  *
7657  * @return
7658  * - OCS_HW_RTN_SUCCESS on success.
7659  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7660  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7661  *   context.
7662  * - OCS_HW_RTN_ERROR on any other error.
7663  */
7664 ocs_hw_rtn_e
7665 ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg)
7666 {
7667 	uint8_t *mbxdata;
7668 	ocs_hw_get_profile_list_cb_arg_t *cb_arg;
7669 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7670 
7671 	/* Only supported on Skyhawk */
7672 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7673 		return OCS_HW_RTN_ERROR;
7674 	}
7675 
7676 	/* mbxdata holds the header of the command */
7677 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7678 	if (mbxdata == NULL) {
7679 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7680 		return OCS_HW_RTN_NO_MEMORY;
7681 	}
7682 
7683 
7684 	/* cb_arg holds the data that will be passed to the callback on completion */
7685 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT);
7686 	if (cb_arg == NULL) {
7687 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7688 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7689 		return OCS_HW_RTN_NO_MEMORY;
7690 	}
7691 
7692 	cb_arg->cb = cb;
7693 	cb_arg->arg = ul_arg;
7694 
7695 	/* dma_mem holds the non-embedded portion */
7696 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) {
7697 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7698 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7699 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7700 		return OCS_HW_RTN_NO_MEMORY;
7701 	}
7702 
7703 	if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) {
7704 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg);
7705 	}
7706 
7707 	if (rc != OCS_HW_RTN_SUCCESS) {
7708 		ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n");
7709 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7710 		ocs_dma_free(hw->os, &cb_arg->payload);
7711 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7712 	}
7713 
7714 	return rc;
7715 }
7716 
7717 typedef struct ocs_hw_get_active_profile_cb_arg_s {
7718 	ocs_get_active_profile_cb_t cb;
7719 	void *arg;
7720 } ocs_hw_get_active_profile_cb_arg_t;
7721 
7722 /**
7723  * @brief Called for the completion of get_active_profile for a
7724  *        user request.
7725  *
7726  * @param hw Hardware context.
7727  * @param status The status from the MQE
7728  * @param mqe Pointer to mailbox command buffer.
7729  * @param arg Pointer to a callback argument.
7730  *
7731  * @return Returns 0 on success, or a non-zero value on failure.
7732  */
7733 static int32_t
7734 ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7735 {
7736 	ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg;
7737 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7738 	sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed;
7739 	uint32_t active_profile;
7740 
7741 	active_profile = response->active_profile_id;
7742 
7743 	if (cb_arg->cb) {
7744 		cb_arg->cb(status, active_profile, cb_arg->arg);
7745 	}
7746 
7747 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7748 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7749 
7750 	return 0;
7751 }
7752 
7753 /**
7754  * @ingroup io
7755  * @brief  Get the currently active profile.
7756  * @par Description
7757  * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
7758  * command completes the provided mgmt callback function is
7759  * called.
7760  *
7761  * @param hw Hardware context.
7762  * @param cb Callback function to be called when the
7763  *	     command completes.
7764  * @param ul_arg An argument that is passed to the callback
7765  *      	 function.
7766  *
7767  * @return
7768  * - OCS_HW_RTN_SUCCESS on success.
7769  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7770  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7771  *   context.
7772  * - OCS_HW_RTN_ERROR on any other error.
7773  */
7774 int32_t
7775 ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg)
7776 {
7777 	uint8_t *mbxdata;
7778 	ocs_hw_get_active_profile_cb_arg_t *cb_arg;
7779 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7780 
7781 	/* Only supported on Skyhawk */
7782 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7783 		return OCS_HW_RTN_ERROR;
7784 	}
7785 
7786 	/* mbxdata holds the header of the command */
7787 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7788 	if (mbxdata == NULL) {
7789 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7790 		return OCS_HW_RTN_NO_MEMORY;
7791 	}
7792 
7793 	/* cb_arg holds the data that will be passed to the callback on completion */
7794 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT);
7795 	if (cb_arg == NULL) {
7796 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7797 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7798 		return OCS_HW_RTN_NO_MEMORY;
7799 	}
7800 
7801 	cb_arg->cb = cb;
7802 	cb_arg->arg = ul_arg;
7803 
7804 	if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7805 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg);
7806 	}
7807 
7808 	if (rc != OCS_HW_RTN_SUCCESS) {
7809 		ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n");
7810 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7811 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7812 	}
7813 
7814 	return rc;
7815 }
7816 
7817 typedef struct ocs_hw_get_nvparms_cb_arg_s {
7818 	ocs_get_nvparms_cb_t cb;
7819 	void *arg;
7820 } ocs_hw_get_nvparms_cb_arg_t;
7821 
7822 /**
7823  * @brief Called for the completion of get_nvparms for a
7824  *        user request.
7825  *
7826  * @param hw Hardware context.
7827  * @param status The status from the MQE.
7828  * @param mqe Pointer to mailbox command buffer.
7829  * @param arg Pointer to a callback argument.
7830  *
7831  * @return 0 on success, non-zero otherwise
7832  */
7833 static int32_t
7834 ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7835 {
7836 	ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg;
7837 	sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe;
7838 
7839 	if (cb_arg->cb) {
7840 		cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa,
7841 				mbox_rsp->preferred_d_id, cb_arg->arg);
7842 	}
7843 
7844 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7845 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7846 
7847 	return 0;
7848 }
7849 
7850 /**
7851  * @ingroup io
7852  * @brief  Read non-volatile parms.
7853  * @par Description
7854  * Issues a SLI-4 READ_NVPARMS mailbox. When the
7855  * command completes the provided mgmt callback function is
7856  * called.
7857  *
7858  * @param hw Hardware context.
7859  * @param cb Callback function to be called when the
7860  *	  command completes.
7861  * @param ul_arg An argument that is passed to the callback
7862  *	  function.
7863  *
7864  * @return
7865  * - OCS_HW_RTN_SUCCESS on success.
7866  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7867  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7868  *   context.
7869  * - OCS_HW_RTN_ERROR on any other error.
7870  */
7871 int32_t
7872 ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg)
7873 {
7874 	uint8_t *mbxdata;
7875 	ocs_hw_get_nvparms_cb_arg_t *cb_arg;
7876 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7877 
7878 	/* mbxdata holds the header of the command */
7879 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7880 	if (mbxdata == NULL) {
7881 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7882 		return OCS_HW_RTN_NO_MEMORY;
7883 	}
7884 
7885 	/* cb_arg holds the data that will be passed to the callback on completion */
7886 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT);
7887 	if (cb_arg == NULL) {
7888 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7889 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7890 		return OCS_HW_RTN_NO_MEMORY;
7891 	}
7892 
7893 	cb_arg->cb = cb;
7894 	cb_arg->arg = ul_arg;
7895 
7896 	if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7897 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg);
7898 	}
7899 
7900 	if (rc != OCS_HW_RTN_SUCCESS) {
7901 		ocs_log_test(hw->os, "READ_NVPARMS failed\n");
7902 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7903 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7904 	}
7905 
7906 	return rc;
7907 }
7908 
7909 typedef struct ocs_hw_set_nvparms_cb_arg_s {
7910 	ocs_set_nvparms_cb_t cb;
7911 	void *arg;
7912 } ocs_hw_set_nvparms_cb_arg_t;
7913 
7914 /**
7915  * @brief Called for the completion of set_nvparms for a
7916  *        user request.
7917  *
7918  * @param hw Hardware context.
7919  * @param status The status from the MQE.
7920  * @param mqe Pointer to mailbox command buffer.
7921  * @param arg Pointer to a callback argument.
7922  *
7923  * @return Returns 0 on success, or a non-zero value on failure.
7924  */
7925 static int32_t
7926 ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7927 {
7928 	ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg;
7929 
7930 	if (cb_arg->cb) {
7931 		cb_arg->cb(status, cb_arg->arg);
7932 	}
7933 
7934 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7935 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7936 
7937 	return 0;
7938 }
7939 
7940 /**
7941  * @ingroup io
7942  * @brief  Write non-volatile parms.
7943  * @par Description
7944  * Issues a SLI-4 WRITE_NVPARMS mailbox. When the
7945  * command completes the provided mgmt callback function is
7946  * called.
7947  *
7948  * @param hw Hardware context.
7949  * @param cb Callback function to be called when the
7950  *	  command completes.
7951  * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
7952  * @param wwnn Port's WWNN in big-endian order, or NULL to use default.
7953  * @param hard_alpa A hard AL_PA address setting used during loop
7954  * initialization. If no hard AL_PA is required, set to 0.
7955  * @param preferred_d_id A preferred D_ID address setting
7956  * that may be overridden with the CONFIG_LINK mailbox command.
7957  * If there is no preference, set to 0.
7958  * @param ul_arg An argument that is passed to the callback
7959  *	  function.
7960  *
7961  * @return
7962  * - OCS_HW_RTN_SUCCESS on success.
7963  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7964  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7965  *   context.
7966  * - OCS_HW_RTN_ERROR on any other error.
7967  */
7968 int32_t
7969 ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn,
7970 		uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg)
7971 {
7972 	uint8_t *mbxdata;
7973 	ocs_hw_set_nvparms_cb_arg_t *cb_arg;
7974 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7975 
7976 	/* mbxdata holds the header of the command */
7977 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7978 	if (mbxdata == NULL) {
7979 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7980 		return OCS_HW_RTN_NO_MEMORY;
7981 	}
7982 
7983 	/* cb_arg holds the data that will be passed to the callback on completion */
7984 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT);
7985 	if (cb_arg == NULL) {
7986 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7987 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7988 		return OCS_HW_RTN_NO_MEMORY;
7989 	}
7990 
7991 	cb_arg->cb = cb;
7992 	cb_arg->arg = ul_arg;
7993 
7994 	if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) {
7995 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg);
7996 	}
7997 
7998 	if (rc != OCS_HW_RTN_SUCCESS) {
7999 		ocs_log_test(hw->os, "SET_NVPARMS failed\n");
8000 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8001 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
8002 	}
8003 
8004 	return rc;
8005 }
8006 
8007 
8008 
8009 /**
8010  * @brief Called to obtain the count for the specified type.
8011  *
8012  * @param hw Hardware context.
8013  * @param io_count_type IO count type (inuse, free, wait_free).
8014  *
8015  * @return Returns the number of IOs on the specified list type.
8016  */
8017 uint32_t
8018 ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type)
8019 {
8020 	ocs_hw_io_t *io = NULL;
8021 	uint32_t count = 0;
8022 
8023 	ocs_lock(&hw->io_lock);
8024 
8025 	switch (io_count_type) {
8026 	case OCS_HW_IO_INUSE_COUNT :
8027 		ocs_list_foreach(&hw->io_inuse, io) {
8028 			count++;
8029 		}
8030 		break;
8031 	case OCS_HW_IO_FREE_COUNT :
8032 		 ocs_list_foreach(&hw->io_free, io) {
8033 			 count++;
8034 		 }
8035 		 break;
8036 	case OCS_HW_IO_WAIT_FREE_COUNT :
8037 		 ocs_list_foreach(&hw->io_wait_free, io) {
8038 			 count++;
8039 		 }
8040 		 break;
8041 	case OCS_HW_IO_PORT_OWNED_COUNT:
8042 		 ocs_list_foreach(&hw->io_port_owned, io) {
8043 			 count++;
8044 		 }
8045 		 break;
8046 	case OCS_HW_IO_N_TOTAL_IO_COUNT :
8047 		count = hw->config.n_io;
8048 		break;
8049 	}
8050 
8051 	ocs_unlock(&hw->io_lock);
8052 
8053 	return count;
8054 }
8055 
8056 /**
8057  * @brief Called to obtain the count of produced RQs.
8058  *
8059  * @param hw Hardware context.
8060  *
8061  * @return Returns the number of RQs produced.
8062  */
8063 uint32_t
8064 ocs_hw_get_rqes_produced_count(ocs_hw_t *hw)
8065 {
8066 	uint32_t count = 0;
8067 	uint32_t i;
8068 	uint32_t j;
8069 
8070 	for (i = 0; i < hw->hw_rq_count; i++) {
8071 		hw_rq_t *rq = hw->hw_rq[i];
8072 		if (rq->rq_tracker != NULL) {
8073 			for (j = 0; j < rq->entry_count; j++) {
8074 				if (rq->rq_tracker[j] != NULL) {
8075 					count++;
8076 				}
8077 			}
8078 		}
8079 	}
8080 
8081 	return count;
8082 }
8083 
8084 typedef struct ocs_hw_set_active_profile_cb_arg_s {
8085 	ocs_set_active_profile_cb_t cb;
8086 	void *arg;
8087 } ocs_hw_set_active_profile_cb_arg_t;
8088 
8089 /**
8090  * @brief Called for the completion of set_active_profile for a
8091  *        user request.
8092  *
8093  * @param hw Hardware context.
8094  * @param status The status from the MQE
8095  * @param mqe Pointer to mailbox command buffer.
8096  * @param arg Pointer to a callback argument.
8097  *
8098  * @return Returns 0 on success, or a non-zero value on failure.
8099  */
8100 static int32_t
8101 ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
8102 {
8103 	ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg;
8104 
8105 	if (cb_arg->cb) {
8106 		cb_arg->cb(status, cb_arg->arg);
8107 	}
8108 
8109 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
8110 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
8111 
8112 	return 0;
8113 }
8114 
8115 /**
8116  * @ingroup io
8117  * @brief  Set the currently active profile.
8118  * @par Description
8119  * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
8120  * command completes the provided mgmt callback function is
8121  * called.
8122  *
8123  * @param hw Hardware context.
8124  * @param profile_id Profile ID to activate.
8125  * @param cb Callback function to be called when the command completes.
8126  * @param ul_arg An argument that is passed to the callback function.
8127  *
8128  * @return
8129  * - OCS_HW_RTN_SUCCESS on success.
8130  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
8131  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
8132  *   context.
8133  * - OCS_HW_RTN_ERROR on any other error.
8134  */
8135 int32_t
8136 ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg)
8137 {
8138 	uint8_t *mbxdata;
8139 	ocs_hw_set_active_profile_cb_arg_t *cb_arg;
8140 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
8141 
8142 	/* Only supported on Skyhawk */
8143 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
8144 		return OCS_HW_RTN_ERROR;
8145 	}
8146 
8147 	/* mbxdata holds the header of the command */
8148 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
8149 	if (mbxdata == NULL) {
8150 		ocs_log_err(hw->os, "failed to malloc mbox\n");
8151 		return OCS_HW_RTN_NO_MEMORY;
8152 	}
8153 
8154 
8155 	/* cb_arg holds the data that will be passed to the callback on completion */
8156 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT);
8157 	if (cb_arg == NULL) {
8158 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
8159 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8160 		return OCS_HW_RTN_NO_MEMORY;
8161 	}
8162 
8163 	cb_arg->cb = cb;
8164 	cb_arg->arg = ul_arg;
8165 
8166 	if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) {
8167 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg);
8168 	}
8169 
8170 	if (rc != OCS_HW_RTN_SUCCESS) {
8171 		ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n");
8172 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8173 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t));
8174 	}
8175 
8176 	return rc;
8177 }
8178 
8179 
8180 
8181 /*
8182  * Private functions
8183  */
8184 
8185 /**
8186  * @brief Update the queue hash with the ID and index.
8187  *
8188  * @param hash Pointer to hash table.
8189  * @param id ID that was created.
8190  * @param index The index into the hash object.
8191  */
8192 static void
8193 ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index)
8194 {
8195 	uint32_t	hash_index = id & (OCS_HW_Q_HASH_SIZE - 1);
8196 
8197 	/*
8198 	 * Since the hash is always bigger than the number of queues, then we
8199 	 * never have to worry about an infinite loop.
8200 	 */
8201 	while(hash[hash_index].in_use) {
8202 		hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8203 	}
8204 
8205 	/* not used, claim the entry */
8206 	hash[hash_index].id = id;
8207 	hash[hash_index].in_use = 1;
8208 	hash[hash_index].index = index;
8209 }
8210 
8211 /**
8212  * @brief Find index given queue ID.
8213  *
8214  * @param hash Pointer to hash table.
8215  * @param id ID to find.
8216  *
8217  * @return Returns the index into the HW cq array or -1 if not found.
8218  */
8219 int32_t
8220 ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id)
8221 {
8222 	int32_t	rc = -1;
8223 	int32_t	index = id & (OCS_HW_Q_HASH_SIZE - 1);
8224 
8225 	/*
8226 	 * Since the hash is always bigger than the maximum number of Qs, then we
8227 	 * never have to worry about an infinite loop. We will always find an
8228 	 * unused entry.
8229 	 */
8230 	do {
8231 		if (hash[index].in_use &&
8232 		    hash[index].id == id) {
8233 			rc = hash[index].index;
8234 		} else {
8235 			index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8236 		}
8237 	} while(rc == -1 && hash[index].in_use);
8238 
8239 	return rc;
8240 }
8241 
8242 static int32_t
8243 ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain)
8244 {
8245 	int32_t		rc = OCS_HW_RTN_ERROR;
8246 	uint16_t	fcfi = UINT16_MAX;
8247 
8248 	if ((hw == NULL) || (domain == NULL)) {
8249 		ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8250 				hw, domain);
8251 		return OCS_HW_RTN_ERROR;
8252 	}
8253 
8254 	fcfi = domain->fcf_indicator;
8255 
8256 	if (fcfi < SLI4_MAX_FCFI) {
8257 		uint16_t	fcf_index = UINT16_MAX;
8258 
8259 		ocs_log_debug(hw->os, "adding domain %p @ %#x\n",
8260 				domain, fcfi);
8261 		hw->domains[fcfi] = domain;
8262 
8263 		/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8264 		if (hw->workaround.override_fcfi) {
8265 			if (hw->first_domain_idx < 0) {
8266 				hw->first_domain_idx = fcfi;
8267 			}
8268 		}
8269 
8270 		fcf_index = domain->fcf;
8271 
8272 		if (fcf_index < SLI4_MAX_FCF_INDEX) {
8273 			ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n",
8274 				      fcf_index, fcfi);
8275 			hw->fcf_index_fcfi[fcf_index] = fcfi;
8276 			rc = OCS_HW_RTN_SUCCESS;
8277 		} else {
8278 			ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8279 				     fcf_index, SLI4_MAX_FCF_INDEX);
8280 			hw->domains[fcfi] = NULL;
8281 		}
8282 	} else {
8283 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8284 				fcfi, SLI4_MAX_FCFI);
8285 	}
8286 
8287 	return rc;
8288 }
8289 
8290 static int32_t
8291 ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain)
8292 {
8293 	int32_t		rc = OCS_HW_RTN_ERROR;
8294 	uint16_t	fcfi = UINT16_MAX;
8295 
8296 	if ((hw == NULL) || (domain == NULL)) {
8297 		ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8298 				hw, domain);
8299 		return OCS_HW_RTN_ERROR;
8300 	}
8301 
8302 	fcfi = domain->fcf_indicator;
8303 
8304 	if (fcfi < SLI4_MAX_FCFI) {
8305 		uint16_t	fcf_index = UINT16_MAX;
8306 
8307 		ocs_log_debug(hw->os, "deleting domain %p @ %#x\n",
8308 				domain, fcfi);
8309 
8310 		if (domain != hw->domains[fcfi]) {
8311 			ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n",
8312 				     domain, hw->domains[fcfi]);
8313 			return OCS_HW_RTN_ERROR;
8314 		}
8315 
8316 		hw->domains[fcfi] = NULL;
8317 
8318 		/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8319 		if (hw->workaround.override_fcfi) {
8320 			if (hw->first_domain_idx == fcfi) {
8321 				hw->first_domain_idx = -1;
8322 			}
8323 		}
8324 
8325 		fcf_index = domain->fcf;
8326 
8327 		if (fcf_index < SLI4_MAX_FCF_INDEX) {
8328 			if (hw->fcf_index_fcfi[fcf_index] == fcfi) {
8329 				hw->fcf_index_fcfi[fcf_index] = 0;
8330 				rc = OCS_HW_RTN_SUCCESS;
8331 			} else {
8332 				ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n",
8333 					     hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index);
8334 			}
8335 		} else {
8336 			ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8337 				     fcf_index, SLI4_MAX_FCF_INDEX);
8338 		}
8339 	} else {
8340 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8341 				fcfi, SLI4_MAX_FCFI);
8342 	}
8343 
8344 	return rc;
8345 }
8346 
8347 ocs_domain_t *
8348 ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi)
8349 {
8350 
8351 	if (hw == NULL) {
8352 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8353 		return NULL;
8354 	}
8355 
8356 	if (fcfi < SLI4_MAX_FCFI) {
8357 		return hw->domains[fcfi];
8358 	} else {
8359 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8360 				fcfi, SLI4_MAX_FCFI);
8361 		return NULL;
8362 	}
8363 }
8364 
8365 static ocs_domain_t *
8366 ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index)
8367 {
8368 
8369 	if (hw == NULL) {
8370 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8371 		return NULL;
8372 	}
8373 
8374 	if (fcf_index < SLI4_MAX_FCF_INDEX) {
8375 		return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]);
8376 	} else {
8377 		ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8378 			     fcf_index, SLI4_MAX_FCF_INDEX);
8379 		return NULL;
8380 	}
8381 }
8382 
8383 /**
8384  * @brief Quaratine an IO by taking a reference count and adding it to the
8385  *        quarantine list. When the IO is popped from the list then the
8386  *        count is released and the IO MAY be freed depending on whether
8387  *        it is still referenced by the IO.
8388  *
8389  *        @n @b Note: BZ 160124 - If this is a target write or an initiator read using
8390  *        DIF, then we must add the XRI to a quarantine list until we receive
8391  *        4 more completions of this same type.
8392  *
8393  * @param hw Hardware context.
8394  * @param wq Pointer to the WQ associated with the IO object to quarantine.
8395  * @param io Pointer to the io object to quarantine.
8396  */
8397 static void
8398 ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io)
8399 {
8400 	ocs_quarantine_info_t *q_info = &wq->quarantine_info;
8401 	uint32_t	index;
8402 	ocs_hw_io_t	*free_io = NULL;
8403 
8404 	/* return if the QX bit was clear */
8405 	if (!io->quarantine) {
8406 		return;
8407 	}
8408 
8409 	/* increment the IO refcount to prevent it from being freed before the quarantine is over */
8410 	if (ocs_ref_get_unless_zero(&io->ref) == 0) {
8411 		/* command no longer active */
8412 		ocs_log_debug(hw ? hw->os : NULL,
8413 			      "io not active xri=0x%x tag=0x%x\n",
8414 			      io->indicator, io->reqtag);
8415 		return;
8416 	}
8417 
8418 	sli_queue_lock(wq->queue);
8419 		index = q_info->quarantine_index;
8420 		free_io = q_info->quarantine_ios[index];
8421 		q_info->quarantine_ios[index] = io;
8422 		q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH;
8423 	sli_queue_unlock(wq->queue);
8424 
8425 	if (free_io != NULL) {
8426 		ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */
8427 	}
8428 }
8429 
8430 /**
8431  * @brief Process entries on the given completion queue.
8432  *
8433  * @param hw Hardware context.
8434  * @param cq Pointer to the HW completion queue object.
8435  *
8436  * @return None.
8437  */
8438 void
8439 ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq)
8440 {
8441 	uint8_t		cqe[sizeof(sli4_mcqe_t)];
8442 	uint16_t	rid = UINT16_MAX;
8443 	sli4_qentry_e	ctype;		/* completion type */
8444 	int32_t		status;
8445 	uint32_t	n_processed = 0;
8446 	time_t		tstart;
8447 	time_t		telapsed;
8448 
8449 	tstart = ocs_msectime();
8450 
8451 	while (!sli_queue_read(&hw->sli, cq->queue, cqe)) {
8452 		status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
8453 		/*
8454 		 * The sign of status is significant. If status is:
8455 		 * == 0 : call completed correctly and the CQE indicated success
8456 		 *  > 0 : call completed correctly and the CQE indicated an error
8457 		 *  < 0 : call failed and no information is available about the CQE
8458 		 */
8459 		if (status < 0) {
8460 			if (status == -2) {
8461 				/* Notification that an entry was consumed, but not completed */
8462 				continue;
8463 			}
8464 
8465 			break;
8466 		}
8467 
8468 		switch (ctype) {
8469 		case SLI_QENTRY_ASYNC:
8470 			CPUTRACE("async");
8471 			sli_cqe_async(&hw->sli, cqe);
8472 			break;
8473 		case SLI_QENTRY_MQ:
8474 			/*
8475 			 * Process MQ entry. Note there is no way to determine
8476 			 * the MQ_ID from the completion entry.
8477 			 */
8478 			CPUTRACE("mq");
8479 			ocs_hw_mq_process(hw, status, hw->mq);
8480 			break;
8481 		case SLI_QENTRY_OPT_WRITE_CMD:
8482 			ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe);
8483 			break;
8484 		case SLI_QENTRY_OPT_WRITE_DATA:
8485 			ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe);
8486 			break;
8487 		case SLI_QENTRY_WQ:
8488 			CPUTRACE("wq");
8489 			ocs_hw_wq_process(hw, cq, cqe, status, rid);
8490 			break;
8491 		case SLI_QENTRY_WQ_RELEASE: {
8492 			uint32_t wq_id = rid;
8493 			int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
8494 
8495 			if (unlikely(index < 0)) {
8496 				ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n",
8497 					    index, rid);
8498 				break;
8499 			}
8500 
8501 			hw_wq_t *wq = hw->hw_wq[index];
8502 
8503 			/* Submit any HW IOs that are on the WQ pending list */
8504 			hw_wq_submit_pending(wq, wq->wqec_set_count);
8505 
8506 			break;
8507 		}
8508 
8509 		case SLI_QENTRY_RQ:
8510 			CPUTRACE("rq");
8511 			ocs_hw_rqpair_process_rq(hw, cq, cqe);
8512 			break;
8513 		case SLI_QENTRY_XABT: {
8514 			CPUTRACE("xabt");
8515 			ocs_hw_xabt_process(hw, cq, cqe, rid);
8516 			break;
8517 
8518 		}
8519 		default:
8520 			ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid);
8521 			break;
8522 		}
8523 
8524 		n_processed++;
8525 		if (n_processed == cq->queue->proc_limit) {
8526 			break;
8527 		}
8528 
8529 		if (cq->queue->n_posted >= (cq->queue->posted_limit)) {
8530 			sli_queue_arm(&hw->sli, cq->queue, FALSE);
8531 		}
8532 	}
8533 
8534 	sli_queue_arm(&hw->sli, cq->queue, TRUE);
8535 
8536 	if (n_processed > cq->queue->max_num_processed) {
8537 		cq->queue->max_num_processed = n_processed;
8538 	}
8539 	telapsed = ocs_msectime() - tstart;
8540 	if (telapsed > cq->queue->max_process_time) {
8541 		cq->queue->max_process_time = telapsed;
8542 	}
8543 }
8544 
8545 /**
8546  * @brief Process WQ completion queue entries.
8547  *
8548  * @param hw Hardware context.
8549  * @param cq Pointer to the HW completion queue object.
8550  * @param cqe Pointer to WQ completion queue.
8551  * @param status Completion status.
8552  * @param rid Resource ID (IO tag).
8553  *
8554  * @return none
8555  */
8556 void
8557 ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid)
8558 {
8559 	hw_wq_callback_t *wqcb;
8560 
8561 	ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id,
8562 			      ((cq->queue->index - 1) & (cq->queue->length - 1)));
8563 
8564 	if(rid == OCS_HW_REQUE_XRI_REGTAG) {
8565 		if(status) {
8566 			ocs_log_err(hw->os, "reque xri failed, status = %d \n", status);
8567 		}
8568 		return;
8569 	}
8570 
8571 	wqcb = ocs_hw_reqtag_get_instance(hw, rid);
8572 	if (wqcb == NULL) {
8573 		ocs_log_err(hw->os, "invalid request tag: x%x\n", rid);
8574 		return;
8575 	}
8576 
8577 	if (wqcb->callback == NULL) {
8578 		ocs_log_err(hw->os, "wqcb callback is NULL\n");
8579 		return;
8580 	}
8581 
8582 	(*wqcb->callback)(wqcb->arg, cqe, status);
8583 }
8584 
8585 /**
8586  * @brief Process WQ completions for IO requests
8587  *
8588  * @param arg Generic callback argument
8589  * @param cqe Pointer to completion queue entry
8590  * @param status Completion status
8591  *
8592  * @par Description
8593  * @n @b Note:  Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized
8594  * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool.
8595  *
8596  * @return None.
8597  */
8598 static void
8599 ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status)
8600 {
8601 	ocs_hw_io_t *io = arg;
8602 	ocs_hw_t *hw = io->hw;
8603 	sli4_fc_wcqe_t *wcqe = (void *)cqe;
8604 	uint32_t	len = 0;
8605 	uint32_t ext = 0;
8606 	uint8_t out_of_order_axr_cmd = 0;
8607 	uint8_t out_of_order_axr_data = 0;
8608 	uint8_t lock_taken = 0;
8609 #if defined(OCS_DISC_SPIN_DELAY)
8610 	uint32_t delay = 0;
8611 	char prop_buf[32];
8612 #endif
8613 
8614 	/*
8615 	 * For the primary IO, this will also be used for the
8616 	 * response. So it is important to only set/clear this
8617 	 * flag on the first data phase of the IO because
8618 	 * subsequent phases will be done on the secondary XRI.
8619 	 */
8620 	if (io->quarantine && io->quarantine_first_phase) {
8621 		io->quarantine = (wcqe->qx == 1);
8622 		ocs_hw_io_quarantine(hw, io->wq, io);
8623 	}
8624 	io->quarantine_first_phase = FALSE;
8625 
8626 	/* BZ 161832 - free secondary HW IO */
8627 	if (io->sec_hio != NULL &&
8628 	    io->sec_hio->quarantine) {
8629 		/*
8630 		 * If the quarantine flag is set on the
8631 		 * IO, then set it on the secondary IO
8632 		 * based on the quarantine XRI (QX) bit
8633 		 * sent by the FW.
8634 		 */
8635 		io->sec_hio->quarantine = (wcqe->qx == 1);
8636 		/* use the primary io->wq because it is not set on the secondary IO. */
8637 		ocs_hw_io_quarantine(hw, io->wq, io->sec_hio);
8638 	}
8639 
8640 	ocs_hw_remove_io_timed_wqe(hw, io);
8641 
8642 	/* clear xbusy flag if WCQE[XB] is clear */
8643 	if (io->xbusy && wcqe->xb == 0) {
8644 		io->xbusy = FALSE;
8645 	}
8646 
8647 	/* get extended CQE status */
8648 	switch (io->type) {
8649 	case OCS_HW_BLS_ACC:
8650 	case OCS_HW_BLS_ACC_SID:
8651 		break;
8652 	case OCS_HW_ELS_REQ:
8653 		sli_fc_els_did(&hw->sli, cqe, &ext);
8654 		len = sli_fc_response_length(&hw->sli, cqe);
8655 		break;
8656 	case OCS_HW_ELS_RSP:
8657 	case OCS_HW_ELS_RSP_SID:
8658 	case OCS_HW_FC_CT_RSP:
8659 		break;
8660 	case OCS_HW_FC_CT:
8661 		len = sli_fc_response_length(&hw->sli, cqe);
8662 		break;
8663 	case OCS_HW_IO_TARGET_WRITE:
8664 		len = sli_fc_io_length(&hw->sli, cqe);
8665 #if defined(OCS_DISC_SPIN_DELAY)
8666 		if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
8667 			delay = ocs_strtoul(prop_buf, 0, 0);
8668 			ocs_udelay(delay);
8669 		}
8670 #endif
8671 		break;
8672 	case OCS_HW_IO_TARGET_READ:
8673 		len = sli_fc_io_length(&hw->sli, cqe);
8674 		/*
8675 		 * if_type == 2 seems to return 0 "total length placed" on
8676 		 * FCP_TSEND64_WQE completions. If this appears to happen,
8677 		 * use the CTIO data transfer length instead.
8678 		 */
8679 		if (hw->workaround.retain_tsend_io_length && !len && !status) {
8680 			len = io->length;
8681 		}
8682 
8683 		break;
8684 	case OCS_HW_IO_TARGET_RSP:
8685 		if(io->is_port_owned) {
8686 			ocs_lock(&io->axr_lock);
8687 			lock_taken = 1;
8688 			if(io->axr_buf->call_axr_cmd) {
8689 				out_of_order_axr_cmd = 1;
8690 			}
8691 			if(io->axr_buf->call_axr_data) {
8692 				out_of_order_axr_data = 1;
8693 			}
8694 		}
8695 		break;
8696 	case OCS_HW_IO_INITIATOR_READ:
8697 		len = sli_fc_io_length(&hw->sli, cqe);
8698 		break;
8699 	case OCS_HW_IO_INITIATOR_WRITE:
8700 		len = sli_fc_io_length(&hw->sli, cqe);
8701 		break;
8702 	case OCS_HW_IO_INITIATOR_NODATA:
8703 		break;
8704 	case OCS_HW_IO_DNRX_REQUEUE:
8705 		/* release the count for re-posting the buffer */
8706 		//ocs_hw_io_free(hw, io);
8707 		break;
8708 	default:
8709 		ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n",
8710 			     io->type, io->indicator);
8711 		break;
8712 	}
8713 	if (status) {
8714 		ext = sli_fc_ext_status(&hw->sli, cqe);
8715 		/* Emulate IAAB=0 for initiator WQEs only; i.e. automatically
8716 		 * abort exchange if an error occurred and exchange is still busy.
8717 		 */
8718 		if (hw->config.i_only_aab &&
8719 		    (ocs_hw_iotype_is_originator(io->type)) &&
8720 		    (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) {
8721 			ocs_hw_rtn_e rc;
8722 
8723 			ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
8724 				      io->indicator, io->reqtag);
8725 			/*
8726 			 * Because the initiator will not issue another IO phase, then it is OK to to issue the
8727 			 * callback on the abort completion, but for consistency with the target, wait for the
8728 			 * XRI_ABORTED CQE to issue the IO callback.
8729 			 */
8730 			rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL);
8731 
8732 			if (rc == OCS_HW_RTN_SUCCESS) {
8733 				/* latch status to return after abort is complete */
8734 				io->status_saved = 1;
8735 				io->saved_status = status;
8736 				io->saved_ext = ext;
8737 				io->saved_len = len;
8738 				goto exit_ocs_hw_wq_process_io;
8739 			} else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8740 				/*
8741 				 * Already being aborted by someone else (ABTS
8742 				 * perhaps). Just fall through and return original
8743 				 * error.
8744 				 */
8745 				ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8746 					      io->indicator, io->reqtag);
8747 
8748 			} else {
8749 				/* Failed to abort for some other reason, log error */
8750 				ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8751 					     io->indicator, io->reqtag, rc);
8752 			}
8753 		}
8754 
8755 		/*
8756 		 * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW
8757 		 */
8758 		if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) {
8759 			ocs_hw_rtn_e rc;
8760 
8761 			ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag);
8762 
8763 			/*
8764 			 * Because targets may send a response when the IO completes using the same XRI, we must
8765 			 * wait for the XRI_ABORTED CQE to issue the IO callback
8766 			 */
8767 			rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
8768 			if (rc == OCS_HW_RTN_SUCCESS) {
8769 				/* latch status to return after abort is complete */
8770 				io->status_saved = 1;
8771 				io->saved_status = status;
8772 				io->saved_ext = ext;
8773 				io->saved_len = len;
8774 				goto exit_ocs_hw_wq_process_io;
8775 			} else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8776 				/*
8777 				 * Already being aborted by someone else (ABTS
8778 				 * perhaps). Just fall through and return original
8779 				 * error.
8780 				 */
8781 				ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8782 					      io->indicator, io->reqtag);
8783 
8784 			} else {
8785 				/* Failed to abort for some other reason, log error */
8786 				ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8787 					     io->indicator, io->reqtag, rc);
8788 			}
8789 		}
8790 	}
8791 	/* BZ 161832 - free secondary HW IO */
8792 	if (io->sec_hio != NULL) {
8793 		ocs_hw_io_free(hw, io->sec_hio);
8794 		io->sec_hio = NULL;
8795 	}
8796 
8797 	if (io->done != NULL) {
8798 		ocs_hw_done_t  done = io->done;
8799 		void		*arg = io->arg;
8800 
8801 		io->done = NULL;
8802 
8803 		if (io->status_saved) {
8804 			/* use latched status if exists */
8805 			status = io->saved_status;
8806 			len = io->saved_len;
8807 			ext = io->saved_ext;
8808 			io->status_saved = 0;
8809 		}
8810 
8811 		/* Restore default SGL */
8812 		ocs_hw_io_restore_sgl(hw, io);
8813 		done(io, io->rnode, len, status, ext, arg);
8814 	}
8815 
8816 	if(out_of_order_axr_cmd) {
8817 		/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8818 		if (hw->config.bounce) {
8819 			fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt;
8820 			uint32_t s_id = fc_be24toh(hdr->s_id);
8821 			uint32_t d_id = fc_be24toh(hdr->d_id);
8822 			uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
8823 			if (hw->callback.bounce != NULL) {
8824 				(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id);
8825 			}
8826 		}else {
8827 			hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq);
8828 		}
8829 
8830 		if(out_of_order_axr_data) {
8831 			/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8832 			if (hw->config.bounce) {
8833 				fc_header_t *hdr = io->axr_buf->seq.header->dma.virt;
8834 				uint32_t s_id = fc_be24toh(hdr->s_id);
8835 				uint32_t d_id = fc_be24toh(hdr->d_id);
8836 				uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
8837 				if (hw->callback.bounce != NULL) {
8838 					(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id);
8839 				}
8840 			}else {
8841 				hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq);
8842 			}
8843 		}
8844 	}
8845 
8846 exit_ocs_hw_wq_process_io:
8847 	if(lock_taken) {
8848 		ocs_unlock(&io->axr_lock);
8849 	}
8850 }
8851 
8852 /**
8853  * @brief Process WQ completions for abort requests.
8854  *
8855  * @param arg Generic callback argument.
8856  * @param cqe Pointer to completion queue entry.
8857  * @param status Completion status.
8858  *
8859  * @return None.
8860  */
8861 static void
8862 ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status)
8863 {
8864 	ocs_hw_io_t *io = arg;
8865 	ocs_hw_t *hw = io->hw;
8866 	uint32_t ext = 0;
8867 	uint32_t len = 0;
8868 	hw_wq_callback_t *wqcb;
8869 
8870 	/*
8871 	 * For IOs that were aborted internally, we may need to issue the callback here depending
8872 	 * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then
8873 	 * issue the callback now.
8874 	*/
8875 	ext = sli_fc_ext_status(&hw->sli, cqe);
8876 	if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
8877 	    ext == SLI4_FC_LOCAL_REJECT_NO_XRI &&
8878 		io->done != NULL) {
8879 		ocs_hw_done_t  done = io->done;
8880 		void		*arg = io->arg;
8881 
8882 		io->done = NULL;
8883 
8884 		/*
8885 		 * Use latched status as this is always saved for an internal abort
8886 		 *
8887 		 * Note: We wont have both a done and abort_done function, so don't worry about
8888 		 *       clobbering the len, status and ext fields.
8889 		 */
8890 		status = io->saved_status;
8891 		len = io->saved_len;
8892 		ext = io->saved_ext;
8893 		io->status_saved = 0;
8894 		done(io, io->rnode, len, status, ext, arg);
8895 	}
8896 
8897 	if (io->abort_done != NULL) {
8898 		ocs_hw_done_t  done = io->abort_done;
8899 		void		*arg = io->abort_arg;
8900 
8901 		io->abort_done = NULL;
8902 
8903 		done(io, io->rnode, len, status, ext, arg);
8904 	}
8905 	ocs_lock(&hw->io_abort_lock);
8906 		/* clear abort bit to indicate abort is complete */
8907 		io->abort_in_progress = 0;
8908 	ocs_unlock(&hw->io_abort_lock);
8909 
8910 	/* Free the WQ callback */
8911 	ocs_hw_assert(io->abort_reqtag != UINT32_MAX);
8912 	wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag);
8913 	ocs_hw_reqtag_free(hw, wqcb);
8914 
8915 	/*
8916 	 * Call ocs_hw_io_free() because this releases the WQ reservation as
8917 	 * well as doing the refcount put. Don't duplicate the code here.
8918 	 */
8919 	(void)ocs_hw_io_free(hw, io);
8920 }
8921 
8922 /**
8923  * @brief Process XABT completions
8924  *
8925  * @param hw Hardware context.
8926  * @param cq Pointer to the HW completion queue object.
8927  * @param cqe Pointer to WQ completion queue.
8928  * @param rid Resource ID (IO tag).
8929  *
8930  *
8931  * @return None.
8932  */
8933 void
8934 ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid)
8935 {
8936 	/* search IOs wait free list */
8937 	ocs_hw_io_t *io = NULL;
8938 
8939 	io = ocs_hw_io_lookup(hw, rid);
8940 
8941 	ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id,
8942 			      ((cq->queue->index - 1) & (cq->queue->length - 1)));
8943 	if (io == NULL) {
8944 		/* IO lookup failure should never happen */
8945 		ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid);
8946 		return;
8947 	}
8948 
8949 	if (!io->xbusy) {
8950 		ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
8951 	} else {
8952 		/* mark IO as no longer busy */
8953 		io->xbusy = FALSE;
8954 	}
8955 
8956        if (io->is_port_owned) {
8957                ocs_lock(&hw->io_lock);
8958                /* Take reference so that below callback will not free io before reque */
8959                ocs_ref_get(&io->ref);
8960                ocs_unlock(&hw->io_lock);
8961        }
8962 
8963 
8964 
8965 	/* For IOs that were aborted internally, we need to issue any pending callback here. */
8966 	if (io->done != NULL) {
8967 		ocs_hw_done_t  done = io->done;
8968 		void		*arg = io->arg;
8969 
8970 		/* Use latched status as this is always saved for an internal abort */
8971 		int32_t status = io->saved_status;
8972 		uint32_t len = io->saved_len;
8973 		uint32_t ext = io->saved_ext;
8974 
8975 		io->done = NULL;
8976 		io->status_saved = 0;
8977 
8978 		done(io, io->rnode, len, status, ext, arg);
8979 	}
8980 
8981 	/* Check to see if this is a port owned XRI */
8982 	if (io->is_port_owned) {
8983 		ocs_lock(&hw->io_lock);
8984 		ocs_hw_reque_xri(hw, io);
8985 		ocs_unlock(&hw->io_lock);
8986 		/* Not hanlding reque xri completion, free io */
8987 		ocs_hw_io_free(hw, io);
8988 		return;
8989 	}
8990 
8991 	ocs_lock(&hw->io_lock);
8992 		if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) {
8993 			/* if on wait_free list, caller has already freed IO;
8994 			 * remove from wait_free list and add to free list.
8995 			 * if on in-use list, already marked as no longer busy;
8996 			 * just leave there and wait for caller to free.
8997 			 */
8998 			if (io->state == OCS_HW_IO_STATE_WAIT_FREE) {
8999 				io->state = OCS_HW_IO_STATE_FREE;
9000 				ocs_list_remove(&hw->io_wait_free, io);
9001 				ocs_hw_io_free_move_correct_list(hw, io);
9002 			}
9003 		}
9004 	ocs_unlock(&hw->io_lock);
9005 }
9006 
9007 /**
9008  * @brief Adjust the number of WQs and CQs within the HW.
9009  *
9010  * @par Description
9011  * Calculates the number of WQs and associated CQs needed in the HW based on
9012  * the number of IOs. Calculates the starting CQ index for each WQ, RQ and
9013  * MQ.
9014  *
9015  * @param hw Hardware context allocated by the caller.
9016  */
9017 static void
9018 ocs_hw_adjust_wqs(ocs_hw_t *hw)
9019 {
9020 	uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ);
9021 	uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ];
9022 	uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ];
9023 
9024 	/*
9025 	 * possibly adjust the the size of the WQs so that the CQ is twice as
9026 	 * big as the WQ to allow for 2 completions per IO. This allows us to
9027 	 * handle multi-phase as well as aborts.
9028 	 */
9029 	if (max_cq_entries < max_wq_entries * 2) {
9030 		max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2;
9031 	}
9032 
9033 	/*
9034 	 * Calculate the number of WQs to use base on the number of IOs.
9035 	 *
9036 	 * Note: We need to reserve room for aborts which must be sent down
9037 	 *       the same WQ as the IO. So we allocate enough WQ space to
9038 	 *       handle 2 times the number of IOs. Half of the space will be
9039 	 *       used for normal IOs and the other hwf is reserved for aborts.
9040 	 */
9041 	hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries;
9042 
9043 	/*
9044 	 * For performance reasons, it is best to use use a minimum of 4 WQs
9045 	 * for BE3 and Skyhawk.
9046 	 */
9047 	if (hw->config.n_wq < 4 &&
9048 	    SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
9049 		hw->config.n_wq = 4;
9050 	}
9051 
9052 	/*
9053 	 * For dual-chute support, we need to have at least one WQ per chute.
9054 	 */
9055 	if (hw->config.n_wq < 2 &&
9056 	    ocs_hw_get_num_chutes(hw) > 1) {
9057 		hw->config.n_wq = 2;
9058 	}
9059 
9060 	/* make sure we haven't exceeded the max supported in the HW */
9061 	if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
9062 		hw->config.n_wq = OCS_HW_MAX_NUM_WQ;
9063 	}
9064 
9065 	/* make sure we haven't exceeded the chip maximum */
9066 	if (hw->config.n_wq > max_wq_num) {
9067 		hw->config.n_wq = max_wq_num;
9068 	}
9069 
9070 	/*
9071 	 * Using Queue Topology string, we divide by number of chutes
9072 	 */
9073 	hw->config.n_wq /= ocs_hw_get_num_chutes(hw);
9074 }
9075 
9076 static int32_t
9077 ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size)
9078 {
9079 	ocs_command_ctx_t *ctx = NULL;
9080 
9081 	ocs_lock(&hw->cmd_lock);
9082 		if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) {
9083 			ocs_log_err(hw->os, "XXX no command context?!?\n");
9084 			ocs_unlock(&hw->cmd_lock);
9085 			return -1;
9086 		}
9087 
9088 		hw->cmd_head_count--;
9089 
9090 		/* Post any pending requests */
9091 		ocs_hw_cmd_submit_pending(hw);
9092 
9093 	ocs_unlock(&hw->cmd_lock);
9094 
9095 	if (ctx->cb) {
9096 		if (ctx->buf) {
9097 			ocs_memcpy(ctx->buf, mqe, size);
9098 		}
9099 		ctx->cb(hw, status, ctx->buf, ctx->arg);
9100 	}
9101 
9102 	ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t));
9103 	ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t));
9104 
9105 	return 0;
9106 }
9107 
9108 
9109 
9110 
9111 /**
9112  * @brief Process entries on the given mailbox queue.
9113  *
9114  * @param hw Hardware context.
9115  * @param status CQE status.
9116  * @param mq Pointer to the mailbox queue object.
9117  *
9118  * @return Returns 0 on success, or a non-zero value on failure.
9119  */
9120 static int32_t
9121 ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq)
9122 {
9123 	uint8_t		mqe[SLI4_BMBX_SIZE];
9124 
9125 	if (!sli_queue_read(&hw->sli, mq, mqe)) {
9126 		ocs_hw_command_process(hw, status, mqe, mq->size);
9127 	}
9128 
9129 	return 0;
9130 }
9131 
9132 /**
9133  * @brief Read a FCF table entry.
9134  *
9135  * @param hw Hardware context.
9136  * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first
9137  * read and the next_index field from the FCOE_READ_FCF_TABLE command
9138  * for subsequent reads.
9139  *
9140  * @return Returns 0 on success, or a non-zero value on failure.
9141  */
9142 static ocs_hw_rtn_e
9143 ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index)
9144 {
9145 	uint8_t		*buf = NULL;
9146 	int32_t		rc = OCS_HW_RTN_ERROR;
9147 
9148 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9149 	if (!buf) {
9150 		ocs_log_err(hw->os, "no buffer for command\n");
9151 		return OCS_HW_RTN_NO_MEMORY;
9152 	}
9153 
9154 	if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem,
9155 			index)) {
9156 		rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem);
9157 	}
9158 
9159 	if (rc != OCS_HW_RTN_SUCCESS) {
9160 		ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n");
9161 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9162 	}
9163 
9164 	return rc;
9165 }
9166 
9167 /**
9168  * @brief Callback function for the FCOE_READ_FCF_TABLE command.
9169  *
9170  * @par Description
9171  * Note that the caller has allocated:
9172  *  - DMA memory to hold the table contents
9173  *  - DMA memory structure
9174  *  - Command/results buffer
9175  *  .
9176  * Each of these must be freed here.
9177  *
9178  * @param hw Hardware context.
9179  * @param status Hardware status.
9180  * @param mqe Pointer to the mailbox command/results buffer.
9181  * @param arg Pointer to the DMA memory structure.
9182  *
9183  * @return Returns 0 on success, or a non-zero value on failure.
9184  */
9185 static int32_t
9186 ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9187 {
9188 	ocs_dma_t	*dma = arg;
9189 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9190 
9191 	if (status || hdr->status) {
9192 		ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n",
9193 				status, hdr->status);
9194 	} else if (dma->virt) {
9195 		sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt;
9196 
9197 		/* if FC or FCOE and FCF entry valid, process it */
9198 		if (read_fcf->fcf_entry.fc ||
9199 				(read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) {
9200 			if (hw->callback.domain != NULL) {
9201 				ocs_domain_record_t drec = {0};
9202 
9203 				if (read_fcf->fcf_entry.fc) {
9204 					/*
9205 					 * This is a pseudo FCF entry. Create a domain
9206 					 * record based on the read topology information
9207 					 */
9208 					drec.speed = hw->link.speed;
9209 					drec.fc_id = hw->link.fc_id;
9210 					drec.is_fc = TRUE;
9211 					if (SLI_LINK_TOPO_LOOP == hw->link.topology) {
9212 						drec.is_loop = TRUE;
9213 						ocs_memcpy(drec.map.loop, hw->link.loop_map,
9214 							   sizeof(drec.map.loop));
9215 					} else if (SLI_LINK_TOPO_NPORT == hw->link.topology) {
9216 						drec.is_nport = TRUE;
9217 					}
9218 				} else {
9219 					drec.index = read_fcf->fcf_entry.fcf_index;
9220 					drec.priority = read_fcf->fcf_entry.fip_priority;
9221 
9222 					/* copy address, wwn and vlan_bitmap */
9223 					ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address,
9224 						   sizeof(drec.address));
9225 					ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id,
9226 						   sizeof(drec.wwn));
9227 					ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap,
9228 						   sizeof(drec.map.vlan));
9229 
9230 					drec.is_ethernet = TRUE;
9231 					drec.is_nport = TRUE;
9232 				}
9233 
9234 				hw->callback.domain(hw->args.domain,
9235 						OCS_HW_DOMAIN_FOUND,
9236 						&drec);
9237 			}
9238 		} else {
9239 			/* if FCOE and FCF is not valid, ignore it */
9240 			ocs_log_test(hw->os, "ignore invalid FCF entry\n");
9241 		}
9242 
9243 		if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) {
9244 			ocs_hw_read_fcf(hw, read_fcf->next_index);
9245 		}
9246 	}
9247 
9248 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9249 	//ocs_dma_free(hw->os, dma);
9250 	//ocs_free(hw->os, dma, sizeof(ocs_dma_t));
9251 
9252 	return 0;
9253 }
9254 
9255 /**
9256  * @brief Callback function for the SLI link events.
9257  *
9258  * @par Description
9259  * This function allocates memory which must be freed in its callback.
9260  *
9261  * @param ctx Hardware context pointer (that is, ocs_hw_t *).
9262  * @param e Event structure pointer (that is, sli4_link_event_t *).
9263  *
9264  * @return Returns 0 on success, or a non-zero value on failure.
9265  */
9266 static int32_t
9267 ocs_hw_cb_link(void *ctx, void *e)
9268 {
9269 	ocs_hw_t	*hw = ctx;
9270 	sli4_link_event_t *event = e;
9271 	ocs_domain_t	*d = NULL;
9272 	uint32_t	i = 0;
9273 	int32_t		rc = OCS_HW_RTN_ERROR;
9274 	ocs_t 		*ocs = hw->os;
9275 
9276 	ocs_hw_link_event_init(hw);
9277 
9278 	switch (event->status) {
9279 	case SLI_LINK_STATUS_UP:
9280 
9281 		hw->link = *event;
9282 
9283 		if (SLI_LINK_TOPO_NPORT == event->topology) {
9284 			device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed);
9285 			ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9286 		} else if (SLI_LINK_TOPO_LOOP == event->topology) {
9287 			uint8_t	*buf = NULL;
9288 			device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed);
9289 
9290 			buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9291 			if (!buf) {
9292 				ocs_log_err(hw->os, "no buffer for command\n");
9293 				break;
9294 			}
9295 
9296 			if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) {
9297 				rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL);
9298 			}
9299 
9300 			if (rc != OCS_HW_RTN_SUCCESS) {
9301 				ocs_log_test(hw->os, "READ_TOPOLOGY failed\n");
9302 				ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9303 			}
9304 		} else {
9305 			device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n",
9306 					event->topology, event->speed);
9307 		}
9308 		break;
9309 	case SLI_LINK_STATUS_DOWN:
9310 		device_printf(ocs->dev, "Link Down\n");
9311 
9312 		hw->link.status = event->status;
9313 
9314 		for (i = 0; i < SLI4_MAX_FCFI; i++) {
9315 			d = hw->domains[i];
9316 			if (d != NULL &&
9317 			    hw->callback.domain != NULL) {
9318 				hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d);
9319 			}
9320 		}
9321 		break;
9322 	default:
9323 		ocs_log_test(hw->os, "unhandled link status %#x\n", event->status);
9324 		break;
9325 	}
9326 
9327 	return 0;
9328 }
9329 
9330 static int32_t
9331 ocs_hw_cb_fip(void *ctx, void *e)
9332 {
9333 	ocs_hw_t	*hw = ctx;
9334 	ocs_domain_t	*domain = NULL;
9335 	sli4_fip_event_t *event = e;
9336 
9337 	ocs_hw_assert(event);
9338 	ocs_hw_assert(hw);
9339 
9340 	/* Find the associated domain object */
9341 	if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) {
9342 		ocs_domain_t *d = NULL;
9343 		uint32_t	i = 0;
9344 
9345 		/* Clear VLINK is different from the other FIP events as it passes back
9346 		 * a VPI instead of a FCF index. Check all attached SLI ports for a
9347 		 * matching VPI */
9348 		for (i = 0; i < SLI4_MAX_FCFI; i++) {
9349 			d = hw->domains[i];
9350 			if (d != NULL) {
9351 				ocs_sport_t	*sport = NULL;
9352 
9353 				ocs_list_foreach(&d->sport_list, sport) {
9354 					if (sport->indicator == event->index) {
9355 						domain = d;
9356 						break;
9357 					}
9358 				}
9359 
9360 				if (domain != NULL) {
9361 					break;
9362 				}
9363 			}
9364 		}
9365 	} else {
9366 		domain = ocs_hw_domain_get_indexed(hw, event->index);
9367 	}
9368 
9369 	switch (event->type) {
9370 	case SLI4_FCOE_FIP_FCF_DISCOVERED:
9371 		ocs_hw_read_fcf(hw, event->index);
9372 		break;
9373 	case SLI4_FCOE_FIP_FCF_DEAD:
9374 		if (domain != NULL &&
9375 		    hw->callback.domain != NULL) {
9376 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9377 		}
9378 		break;
9379 	case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
9380 		if (domain != NULL &&
9381 		    hw->callback.domain != NULL) {
9382 			/*
9383 			 * We will want to issue rediscover FCF when this domain is free'd  in order
9384 			 * to invalidate the FCF table
9385 			 */
9386 			domain->req_rediscover_fcf = TRUE;
9387 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9388 		}
9389 		break;
9390 	case SLI4_FCOE_FIP_FCF_MODIFIED:
9391 		if (domain != NULL &&
9392 		    hw->callback.domain != NULL) {
9393 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9394 		}
9395 
9396 		ocs_hw_read_fcf(hw, event->index);
9397 		break;
9398 	default:
9399 		ocs_log_test(hw->os, "unsupported event %#x\n", event->type);
9400 	}
9401 
9402 	return 0;
9403 }
9404 
9405 static int32_t
9406 ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9407 {
9408 	ocs_remote_node_t *rnode = arg;
9409 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9410 	ocs_hw_remote_node_event_e	evt = 0;
9411 
9412 	if (status || hdr->status) {
9413 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9414 				hdr->status);
9415 		ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
9416 		rnode->attached = FALSE;
9417 		ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9418 		evt = OCS_HW_NODE_ATTACH_FAIL;
9419 	} else {
9420 		rnode->attached = TRUE;
9421 		ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1);
9422 		evt = OCS_HW_NODE_ATTACH_OK;
9423 	}
9424 
9425 	if (hw->callback.rnode != NULL) {
9426 		hw->callback.rnode(hw->args.rnode, evt, rnode);
9427 	}
9428 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9429 
9430 	return 0;
9431 }
9432 
9433 static int32_t
9434 ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9435 {
9436 	ocs_remote_node_t *rnode = arg;
9437 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9438 	ocs_hw_remote_node_event_e	evt = OCS_HW_NODE_FREE_FAIL;
9439 	int32_t		rc = 0;
9440 
9441 	if (status || hdr->status) {
9442 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9443 				hdr->status);
9444 
9445 		/*
9446 		 * In certain cases, a non-zero MQE status is OK (all must be true):
9447 		 *   - node is attached
9448 		 *   - if High Login Mode is enabled, node is part of a node group
9449 		 *   - status is 0x1400
9450 		 */
9451 		if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) ||
9452 				(hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) {
9453 			rc = -1;
9454 		}
9455 	}
9456 
9457 	if (rc == 0) {
9458 		rnode->node_group = FALSE;
9459 		rnode->attached = FALSE;
9460 
9461 		if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) {
9462 			ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9463 		}
9464 
9465 		evt = OCS_HW_NODE_FREE_OK;
9466 	}
9467 
9468 	if (hw->callback.rnode != NULL) {
9469 		hw->callback.rnode(hw->args.rnode, evt, rnode);
9470 	}
9471 
9472 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9473 
9474 	return rc;
9475 }
9476 
9477 static int32_t
9478 ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9479 {
9480 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9481 	ocs_hw_remote_node_event_e	evt = OCS_HW_NODE_FREE_FAIL;
9482 	int32_t		rc = 0;
9483 	uint32_t	i;
9484 
9485 	if (status || hdr->status) {
9486 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9487 				hdr->status);
9488 	} else {
9489 		evt = OCS_HW_NODE_FREE_ALL_OK;
9490 	}
9491 
9492 	if (evt == OCS_HW_NODE_FREE_ALL_OK) {
9493 		for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) {
9494 			ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0);
9495 		}
9496 
9497 		if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) {
9498 			ocs_log_test(hw->os, "FCOE_RPI free all failure\n");
9499 			rc = -1;
9500 		}
9501 	}
9502 
9503 	if (hw->callback.rnode != NULL) {
9504 		hw->callback.rnode(hw->args.rnode, evt, NULL);
9505 	}
9506 
9507 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9508 
9509 	return rc;
9510 }
9511 
9512 /**
9513  * @brief Initialize the pool of HW IO objects.
9514  *
9515  * @param hw Hardware context.
9516  *
9517  * @return Returns 0 on success, or a non-zero value on failure.
9518  */
9519 static ocs_hw_rtn_e
9520 ocs_hw_setup_io(ocs_hw_t *hw)
9521 {
9522 	uint32_t	i = 0;
9523 	ocs_hw_io_t	*io = NULL;
9524 	uintptr_t	xfer_virt = 0;
9525 	uintptr_t	xfer_phys = 0;
9526 	uint32_t	index;
9527 	uint8_t		new_alloc = TRUE;
9528 
9529 	if (NULL == hw->io) {
9530 		hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT);
9531 
9532 		if (NULL == hw->io) {
9533 			ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n",
9534 				    hw->config.n_io,
9535 				    sizeof(ocs_hw_io_t *));
9536 			return OCS_HW_RTN_NO_MEMORY;
9537 		}
9538 		for (i = 0; i < hw->config.n_io; i++) {
9539 			hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t),
9540 						OCS_M_ZERO | OCS_M_NOWAIT);
9541 			if (hw->io[i] == NULL) {
9542 				ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i);
9543 				goto error;
9544 			}
9545 		}
9546 
9547 		/* Create WQE buffs for IO */
9548 		hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size,
9549 				OCS_M_ZERO | OCS_M_NOWAIT);
9550 		if (NULL == hw->wqe_buffs) {
9551 			ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t));
9552 			ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n",
9553 					__func__, hw->config.n_io, hw->sli.config.wqe_size);
9554 			return OCS_HW_RTN_NO_MEMORY;
9555 		}
9556 
9557 	} else {
9558 		/* re-use existing IOs, including SGLs */
9559 		new_alloc = FALSE;
9560 	}
9561 
9562 	if (new_alloc) {
9563 		if (ocs_dma_alloc(hw->os, &hw->xfer_rdy,
9564 					sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io,
9565 					4/*XXX what does this need to be? */)) {
9566 			ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n");
9567 			return OCS_HW_RTN_NO_MEMORY;
9568 		}
9569 	}
9570 	xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
9571 	xfer_phys = hw->xfer_rdy.phys;
9572 
9573 	for (i = 0; i < hw->config.n_io; i++) {
9574 		hw_wq_callback_t *wqcb;
9575 
9576 		io = hw->io[i];
9577 
9578 		/* initialize IO fields */
9579 		io->hw = hw;
9580 
9581 		/* Assign a WQE buff */
9582 		io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size];
9583 
9584 		/* Allocate the request tag for this IO */
9585 		wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io);
9586 		if (wqcb == NULL) {
9587 			ocs_log_err(hw->os, "can't allocate request tag\n");
9588 			return OCS_HW_RTN_NO_RESOURCES;
9589 		}
9590 		io->reqtag = wqcb->instance_index;
9591 
9592 		/* Now for the fields that are initialized on each free */
9593 		ocs_hw_init_free_io(io);
9594 
9595 		/* The XB flag isn't cleared on IO free, so initialize it to zero here */
9596 		io->xbusy = 0;
9597 
9598 		if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) {
9599 			ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i);
9600 			return OCS_HW_RTN_NO_MEMORY;
9601 		}
9602 
9603 		if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) {
9604 			ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i);
9605 			ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t));
9606 			return OCS_HW_RTN_NO_MEMORY;
9607 		}
9608 		io->def_sgl_count = hw->config.n_sgl;
9609 		io->sgl = &io->def_sgl;
9610 		io->sgl_count = io->def_sgl_count;
9611 
9612 		if (hw->xfer_rdy.size) {
9613 			io->xfer_rdy.virt = (void *)xfer_virt;
9614 			io->xfer_rdy.phys = xfer_phys;
9615 			io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t);
9616 
9617 			xfer_virt += sizeof(fcp_xfer_rdy_iu_t);
9618 			xfer_phys += sizeof(fcp_xfer_rdy_iu_t);
9619 		}
9620 	}
9621 
9622 	return OCS_HW_RTN_SUCCESS;
9623 error:
9624 	for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
9625 		ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
9626 		hw->io[i] = NULL;
9627 	}
9628 
9629 	return OCS_HW_RTN_NO_MEMORY;
9630 }
9631 
9632 static ocs_hw_rtn_e
9633 ocs_hw_init_io(ocs_hw_t *hw)
9634 {
9635 	uint32_t        i = 0, io_index = 0;
9636 	uint32_t        prereg = 0;
9637 	ocs_hw_io_t	*io = NULL;
9638 	uint8_t		cmd[SLI4_BMBX_SIZE];
9639 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
9640 	uint32_t	nremaining;
9641 	uint32_t	n = 0;
9642 	uint32_t	sgls_per_request = 256;
9643 	ocs_dma_t	**sgls = NULL;
9644 	ocs_dma_t	reqbuf = { 0 };
9645 
9646 	prereg = sli_get_sgl_preregister(&hw->sli);
9647 
9648 	if (prereg) {
9649 		sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT);
9650 		if (sgls == NULL) {
9651 			ocs_log_err(hw->os, "ocs_malloc sgls failed\n");
9652 			return OCS_HW_RTN_NO_MEMORY;
9653 		}
9654 
9655 		rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT);
9656 		if (rc) {
9657 			ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n");
9658 			ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9659 			return OCS_HW_RTN_NO_MEMORY;
9660 		}
9661 	}
9662 
9663 	io = hw->io[io_index];
9664 	for (nremaining = hw->config.n_io; nremaining; nremaining -= n) {
9665 		if (prereg) {
9666 			/* Copy address of SGL's into local sgls[] array, break out if the xri
9667 			 * is not contiguous.
9668 			 */
9669 			for (n = 0; n < MIN(sgls_per_request, nremaining); n++) {
9670 				/* Check that we have contiguous xri values */
9671 				if (n > 0) {
9672 					if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) {
9673 						break;
9674 					}
9675 				}
9676 				sgls[n] = hw->io[io_index + n]->sgl;
9677 			}
9678 
9679 			if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd),
9680 						io->indicator, n, sgls, NULL, &reqbuf)) {
9681 				if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) {
9682 					rc = OCS_HW_RTN_ERROR;
9683 					ocs_log_err(hw->os, "SGL post failed\n");
9684 					break;
9685 				}
9686 			}
9687 		} else {
9688 			n = nremaining;
9689 		}
9690 
9691 		/* Add to tail if successful */
9692 		for (i = 0; i < n; i ++) {
9693 			io->is_port_owned = 0;
9694 			io->state = OCS_HW_IO_STATE_FREE;
9695 			ocs_list_add_tail(&hw->io_free, io);
9696 			io = hw->io[io_index+1];
9697 			io_index++;
9698 		}
9699 	}
9700 
9701 	if (prereg) {
9702 		ocs_dma_free(hw->os, &reqbuf);
9703 		ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9704 	}
9705 
9706 	return rc;
9707 }
9708 
9709 static int32_t
9710 ocs_hw_flush(ocs_hw_t *hw)
9711 {
9712 	uint32_t	i = 0;
9713 
9714 	/* Process any remaining completions */
9715 	for (i = 0; i < hw->eq_count; i++) {
9716 		ocs_hw_process(hw, i, ~0);
9717 	}
9718 
9719 	return 0;
9720 }
9721 
9722 static int32_t
9723 ocs_hw_command_cancel(ocs_hw_t *hw)
9724 {
9725 
9726 	ocs_lock(&hw->cmd_lock);
9727 
9728 	/*
9729 	 * Manually clean up remaining commands. Note: since this calls
9730 	 * ocs_hw_command_process(), we'll also process the cmd_pending
9731 	 * list, so no need to manually clean that out.
9732 	 */
9733 	while (!ocs_list_empty(&hw->cmd_head)) {
9734 		uint8_t		mqe[SLI4_BMBX_SIZE] = { 0 };
9735 		ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head);
9736 
9737 		ocs_log_test(hw->os, "hung command %08x\n",
9738 				NULL == ctx ? UINT32_MAX :
9739 				(NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf)));
9740 		ocs_unlock(&hw->cmd_lock);
9741 		ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE);
9742 		ocs_lock(&hw->cmd_lock);
9743 	}
9744 
9745 	ocs_unlock(&hw->cmd_lock);
9746 
9747 	return 0;
9748 }
9749 
9750 /**
9751  * @brief Find IO given indicator (xri).
9752  *
9753  * @param hw Hal context.
9754  * @param indicator Indicator (xri) to look for.
9755  *
9756  * @return Returns io if found, NULL otherwise.
9757  */
9758 ocs_hw_io_t *
9759 ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri)
9760 {
9761 	uint32_t ioindex;
9762 	ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0];
9763 	return hw->io[ioindex];
9764 }
9765 
9766 /**
9767  * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists.
9768  *
9769  * @param hw Hal context.
9770  * @param io Pointer to the IO to cleanup.
9771  */
9772 static void
9773 ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io)
9774 {
9775 	ocs_hw_done_t  done = io->done;
9776 	ocs_hw_done_t  abort_done = io->abort_done;
9777 
9778 	/* first check active_wqe list and remove if there */
9779 	if (ocs_list_on_list(&io->wqe_link)) {
9780 		ocs_list_remove(&hw->io_timed_wqe, io);
9781 	}
9782 
9783 	/* Remove from WQ pending list */
9784 	if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) {
9785 		ocs_list_remove(&io->wq->pending_list, io);
9786 	}
9787 
9788 	if (io->done) {
9789 		void		*arg = io->arg;
9790 
9791 		io->done = NULL;
9792 		ocs_unlock(&hw->io_lock);
9793 		done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg);
9794 		ocs_lock(&hw->io_lock);
9795 	}
9796 
9797 	if (io->abort_done != NULL) {
9798 		void		*abort_arg = io->abort_arg;
9799 
9800 		io->abort_done = NULL;
9801 		ocs_unlock(&hw->io_lock);
9802 		abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg);
9803 		ocs_lock(&hw->io_lock);
9804 	}
9805 }
9806 
9807 static int32_t
9808 ocs_hw_io_cancel(ocs_hw_t *hw)
9809 {
9810 	ocs_hw_io_t	*io = NULL;
9811 	ocs_hw_io_t	*tmp_io = NULL;
9812 	uint32_t	iters = 100; /* One second limit */
9813 
9814 	/*
9815 	 * Manually clean up outstanding IO.
9816 	 * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called.
9817 	 */
9818 	ocs_lock(&hw->io_lock);
9819 	ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) {
9820 		ocs_hw_done_t  done = io->done;
9821 		ocs_hw_done_t  abort_done = io->abort_done;
9822 
9823 		ocs_hw_io_cancel_cleanup(hw, io);
9824 
9825 		/*
9826 		 * Since this is called in a reset/shutdown
9827 		 * case, If there is no callback, then just
9828 		 * free the IO.
9829 		 *
9830 		 * Note: A port owned XRI cannot be on
9831 		 *       the in use list. We cannot call
9832 		 *       ocs_hw_io_free() because we already
9833 		 *       hold the io_lock.
9834 		 */
9835 		if (done == NULL &&
9836 		    abort_done == NULL) {
9837 			/*
9838 			 * Since this is called in a reset/shutdown
9839 			 * case, If there is no callback, then just
9840 			 * free the IO.
9841 			 */
9842 			ocs_hw_io_free_common(hw, io);
9843 			ocs_list_remove(&hw->io_inuse, io);
9844 			ocs_hw_io_free_move_correct_list(hw, io);
9845 		}
9846 	}
9847 
9848 	/*
9849 	 * For port owned XRIs, they are not on the in use list, so
9850 	 * walk though XRIs and issue any callbacks.
9851 	 */
9852 	ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) {
9853 		/* check  list and remove if there */
9854 		if (ocs_list_on_list(&io->dnrx_link)) {
9855 			ocs_list_remove(&hw->io_port_dnrx, io);
9856 			ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
9857 		}
9858 		ocs_hw_io_cancel_cleanup(hw, io);
9859 		ocs_list_remove(&hw->io_port_owned, io);
9860 		ocs_hw_io_free_common(hw, io);
9861 	}
9862 	ocs_unlock(&hw->io_lock);
9863 
9864 	/* Give time for the callbacks to complete */
9865 	do {
9866 		ocs_udelay(10000);
9867 		iters--;
9868 	} while (!ocs_list_empty(&hw->io_inuse) && iters);
9869 
9870 	/* Leave a breadcrumb that cleanup is not yet complete. */
9871 	if (!ocs_list_empty(&hw->io_inuse)) {
9872 		ocs_log_test(hw->os, "io_inuse list is not empty\n");
9873 	}
9874 
9875 	return 0;
9876 }
9877 
9878 static int32_t
9879 ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size,
9880 		ocs_dma_t *rsp)
9881 {
9882 	sli4_sge_t	*data = NULL;
9883 
9884 	if (!hw || !io) {
9885 		ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io);
9886 		return OCS_HW_RTN_ERROR;
9887 	}
9888 
9889 	data = io->def_sgl.virt;
9890 
9891 	/* setup command pointer */
9892 	data->buffer_address_high = ocs_addr32_hi(cmnd->phys);
9893 	data->buffer_address_low  = ocs_addr32_lo(cmnd->phys);
9894 	data->buffer_length = cmnd_size;
9895 	data++;
9896 
9897 	/* setup response pointer */
9898 	data->buffer_address_high = ocs_addr32_hi(rsp->phys);
9899 	data->buffer_address_low  = ocs_addr32_lo(rsp->phys);
9900 	data->buffer_length = rsp->size;
9901 
9902 	return 0;
9903 }
9904 
9905 static int32_t
9906 __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9907 {
9908 	sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe;
9909 
9910 	if (status || read_topo->hdr.status) {
9911 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n",
9912 				status, read_topo->hdr.status);
9913 		ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9914 		return -1;
9915 	}
9916 
9917 	switch (read_topo->attention_type) {
9918 	case SLI4_READ_TOPOLOGY_LINK_UP:
9919 		hw->link.status = SLI_LINK_STATUS_UP;
9920 		break;
9921 	case SLI4_READ_TOPOLOGY_LINK_DOWN:
9922 		hw->link.status = SLI_LINK_STATUS_DOWN;
9923 		break;
9924 	case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
9925 		hw->link.status = SLI_LINK_STATUS_NO_ALPA;
9926 		break;
9927 	default:
9928 		hw->link.status = SLI_LINK_STATUS_MAX;
9929 		break;
9930 	}
9931 
9932 	switch (read_topo->topology) {
9933 	case SLI4_READ_TOPOLOGY_NPORT:
9934 		hw->link.topology = SLI_LINK_TOPO_NPORT;
9935 		break;
9936 	case SLI4_READ_TOPOLOGY_FC_AL:
9937 		hw->link.topology = SLI_LINK_TOPO_LOOP;
9938 		if (SLI_LINK_STATUS_UP == hw->link.status) {
9939 			hw->link.loop_map = hw->loop_map.virt;
9940 		}
9941 		hw->link.fc_id = read_topo->acquired_al_pa;
9942 		break;
9943 	default:
9944 		hw->link.topology = SLI_LINK_TOPO_MAX;
9945 		break;
9946 	}
9947 
9948 	hw->link.medium = SLI_LINK_MEDIUM_FC;
9949 
9950 	switch (read_topo->link_current.link_speed) {
9951 	case SLI4_READ_TOPOLOGY_SPEED_1G:
9952 		hw->link.speed =  1 * 1000;
9953 		break;
9954 	case SLI4_READ_TOPOLOGY_SPEED_2G:
9955 		hw->link.speed =  2 * 1000;
9956 		break;
9957 	case SLI4_READ_TOPOLOGY_SPEED_4G:
9958 		hw->link.speed =  4 * 1000;
9959 		break;
9960 	case SLI4_READ_TOPOLOGY_SPEED_8G:
9961 		hw->link.speed =  8 * 1000;
9962 		break;
9963 	case SLI4_READ_TOPOLOGY_SPEED_16G:
9964 		hw->link.speed = 16 * 1000;
9965 		hw->link.loop_map = NULL;
9966 		break;
9967 	case SLI4_READ_TOPOLOGY_SPEED_32G:
9968 		hw->link.speed = 32 * 1000;
9969 		hw->link.loop_map = NULL;
9970 		break;
9971 	}
9972 
9973 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9974 
9975 	ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9976 
9977 	return 0;
9978 }
9979 
9980 static int32_t
9981 __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9982 {
9983 	ocs_sli_port_t	*sport = ctx->app;
9984 	ocs_hw_t	*hw = sport->hw;
9985 
9986 	smtrace("port");
9987 
9988 	switch (evt) {
9989 	case OCS_EVT_EXIT:
9990 		/* ignore */
9991 		break;
9992 
9993 	case OCS_EVT_HW_PORT_REQ_FREE:
9994 	case OCS_EVT_HW_PORT_REQ_ATTACH:
9995 		if (data != NULL) {
9996 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9997 		}
9998 		/* fall through */
9999 	default:
10000 		ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
10001 		break;
10002 	}
10003 
10004 	return 0;
10005 }
10006 
10007 static void *
10008 __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10009 {
10010 	ocs_sli_port_t	*sport = ctx->app;
10011 	ocs_hw_t	*hw = sport->hw;
10012 
10013 	smtrace("port");
10014 
10015 	switch (evt) {
10016 	case OCS_EVT_ENTER:
10017 		if (data != NULL) {
10018 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10019 		}
10020 		if (hw->callback.port != NULL) {
10021 			hw->callback.port(hw->args.port,
10022 					OCS_HW_PORT_FREE_FAIL, sport);
10023 		}
10024 		break;
10025 	default:
10026 		break;
10027 	}
10028 
10029 	return NULL;
10030 }
10031 
10032 static void *
10033 __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10034 {
10035 	ocs_sli_port_t	*sport = ctx->app;
10036 	ocs_hw_t	*hw = sport->hw;
10037 
10038 	smtrace("port");
10039 
10040 	switch (evt) {
10041 	case OCS_EVT_ENTER:
10042 		/* free SLI resource */
10043 		if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) {
10044 			ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id);
10045 		}
10046 
10047 		/* free mailbox buffer */
10048 		if (data != NULL) {
10049 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10050 		}
10051 		if (hw->callback.port != NULL) {
10052 			hw->callback.port(hw->args.port,
10053 					OCS_HW_PORT_FREE_OK, sport);
10054 		}
10055 		break;
10056 	default:
10057 		break;
10058 	}
10059 
10060 	return NULL;
10061 }
10062 
10063 static void *
10064 __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10065 {
10066 	ocs_sli_port_t	*sport = ctx->app;
10067 	ocs_hw_t	*hw = sport->hw;
10068 
10069 	smtrace("port");
10070 
10071 	switch (evt) {
10072 	case OCS_EVT_ENTER:
10073 		/* free SLI resource */
10074 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10075 
10076 		/* free mailbox buffer */
10077 		if (data != NULL) {
10078 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10079 		}
10080 
10081 		if (hw->callback.port != NULL) {
10082 			hw->callback.port(hw->args.port,
10083 					OCS_HW_PORT_ATTACH_FAIL, sport);
10084 		}
10085 		if (sport->sm_free_req_pending) {
10086 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10087 		}
10088 		break;
10089 	default:
10090 		__ocs_hw_port_common(__func__, ctx, evt, data);
10091 		break;
10092 	}
10093 
10094 	return NULL;
10095 }
10096 
10097 static void *
10098 __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10099 {
10100 	ocs_sli_port_t	*sport = ctx->app;
10101 	ocs_hw_t	*hw = sport->hw;
10102 	uint8_t		*cmd = NULL;
10103 
10104 	smtrace("port");
10105 
10106 	switch (evt) {
10107 	case OCS_EVT_ENTER:
10108 		/* allocate memory and send unreg_vpi */
10109 		cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10110 		if (!cmd) {
10111 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10112 			break;
10113 		}
10114 
10115 		if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator,
10116 					   SLI4_UNREG_TYPE_PORT)) {
10117 			ocs_log_err(hw->os, "UNREG_VPI format failure\n");
10118 			ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10119 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10120 			break;
10121 		}
10122 
10123 		if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10124 			ocs_log_err(hw->os, "UNREG_VPI command failure\n");
10125 			ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10126 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10127 			break;
10128 		}
10129 		break;
10130 	case OCS_EVT_RESPONSE:
10131 		ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10132 		break;
10133 	case OCS_EVT_ERROR:
10134 		ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10135 		break;
10136 	default:
10137 		__ocs_hw_port_common(__func__, ctx, evt, data);
10138 		break;
10139 	}
10140 
10141 	return NULL;
10142 }
10143 
10144 static void *
10145 __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10146 {
10147 	ocs_sli_port_t	*sport = ctx->app;
10148 	ocs_hw_t	*hw = sport->hw;
10149 
10150 	smtrace("port");
10151 
10152 	switch (evt) {
10153 	case OCS_EVT_ENTER:
10154 		/* Forward to execute in mailbox completion processing context */
10155 		if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) {
10156 			ocs_log_err(hw->os, "ocs_hw_async_call failed\n");
10157 		}
10158 		break;
10159 	case OCS_EVT_RESPONSE:
10160 		ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10161 		break;
10162 	case OCS_EVT_ERROR:
10163 		ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10164 		break;
10165 	default:
10166 		break;
10167 	}
10168 
10169 	return NULL;
10170 }
10171 
10172 static void *
10173 __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10174 {
10175 	ocs_sli_port_t	*sport = ctx->app;
10176 	ocs_hw_t	*hw = sport->hw;
10177 
10178 	smtrace("port");
10179 
10180 	switch (evt) {
10181 	case OCS_EVT_ENTER:
10182 		if (data != NULL) {
10183 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10184 		}
10185 		if (hw->callback.port != NULL) {
10186 			hw->callback.port(hw->args.port,
10187 					OCS_HW_PORT_ATTACH_OK, sport);
10188 		}
10189 		if (sport->sm_free_req_pending) {
10190 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10191 		}
10192 		break;
10193 	case OCS_EVT_HW_PORT_REQ_FREE:
10194 		/* virtual/physical port request free */
10195 		ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10196 		break;
10197 	default:
10198 		__ocs_hw_port_common(__func__, ctx, evt, data);
10199 		break;
10200 	}
10201 
10202 	return NULL;
10203 }
10204 
10205 static void *
10206 __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10207 {
10208 	ocs_sli_port_t	*sport = ctx->app;
10209 	ocs_hw_t	*hw = sport->hw;
10210 
10211 	smtrace("port");
10212 
10213 	switch (evt) {
10214 	case OCS_EVT_ENTER:
10215 		if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) {
10216 			ocs_log_err(hw->os, "REG_VPI format failure\n");
10217 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10218 			break;
10219 		}
10220 
10221 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10222 			ocs_log_err(hw->os, "REG_VPI command failure\n");
10223 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10224 			break;
10225 		}
10226 		break;
10227 	case OCS_EVT_RESPONSE:
10228 		ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10229 		break;
10230 	case OCS_EVT_ERROR:
10231 		ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data);
10232 		break;
10233 	case OCS_EVT_HW_PORT_REQ_FREE:
10234 		/* Wait for attach response and then free */
10235 		sport->sm_free_req_pending = 1;
10236 		break;
10237 	default:
10238 		__ocs_hw_port_common(__func__, ctx, evt, data);
10239 		break;
10240 	}
10241 
10242 	return NULL;
10243 }
10244 
10245 static void *
10246 __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10247 {
10248 	ocs_sli_port_t	*sport = ctx->app;
10249 	ocs_hw_t	*hw = sport->hw;
10250 
10251 	smtrace("port");
10252 
10253 	switch (evt) {
10254 	case OCS_EVT_ENTER:
10255 		/* free SLI resource */
10256 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10257 
10258 		/* free mailbox buffer */
10259 		if (data != NULL) {
10260 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10261 		}
10262 		break;
10263 	default:
10264 		__ocs_hw_port_common(__func__, ctx, evt, data);
10265 		break;
10266 	}
10267 
10268 	return NULL;
10269 }
10270 
10271 static void *
10272 __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10273 {
10274 	ocs_sli_port_t	*sport = ctx->app;
10275 	ocs_hw_t	*hw = sport->hw;
10276 
10277 	smtrace("port");
10278 
10279 	switch (evt) {
10280 	case OCS_EVT_ENTER:
10281 		if (data != NULL) {
10282 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10283 		}
10284 		if (hw->callback.port != NULL) {
10285 			hw->callback.port(hw->args.port,
10286 					OCS_HW_PORT_ALLOC_OK, sport);
10287 		}
10288 		/* If there is a pending free request, then handle it now */
10289 		if (sport->sm_free_req_pending) {
10290 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10291 		}
10292 		break;
10293 	case OCS_EVT_HW_PORT_REQ_ATTACH:
10294 		/* virtual port requests attach */
10295 		ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data);
10296 		break;
10297 	case OCS_EVT_HW_PORT_ATTACH_OK:
10298 		/* physical port attached (as part of attaching domain) */
10299 		ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10300 		break;
10301 	case OCS_EVT_HW_PORT_REQ_FREE:
10302 		/* virtual port request free */
10303 		if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
10304 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10305 		} else {
10306 			/*
10307 			 * Note: BE3/Skyhawk will respond with a status of 0x20
10308 			 *       unless the reg_vpi has been issued, so we can
10309 			 *       skip the unreg_vpi for these adapters.
10310 			 *
10311 			 * Send a nop to make sure that free doesn't occur in
10312 			 * same context
10313 			 */
10314 			ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL);
10315 		}
10316 		break;
10317 	default:
10318 		__ocs_hw_port_common(__func__, ctx, evt, data);
10319 		break;
10320 	}
10321 
10322 	return NULL;
10323 }
10324 
10325 static void *
10326 __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10327 {
10328 	ocs_sli_port_t	*sport = ctx->app;
10329 	ocs_hw_t	*hw = sport->hw;
10330 
10331 	smtrace("port");
10332 
10333 	switch (evt) {
10334 	case OCS_EVT_ENTER:
10335 		/* free SLI resource */
10336 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10337 
10338 		/* free mailbox buffer */
10339 		if (data != NULL) {
10340 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10341 		}
10342 
10343 		if (hw->callback.port != NULL) {
10344 			hw->callback.port(hw->args.port,
10345 					OCS_HW_PORT_ALLOC_FAIL, sport);
10346 		}
10347 
10348 		/* If there is a pending free request, then handle it now */
10349 		if (sport->sm_free_req_pending) {
10350 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10351 		}
10352 		break;
10353 	default:
10354 		__ocs_hw_port_common(__func__, ctx, evt, data);
10355 		break;
10356 	}
10357 
10358 	return NULL;
10359 }
10360 
10361 static void *
10362 __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10363 {
10364 	ocs_sli_port_t	*sport = ctx->app;
10365 	ocs_hw_t	*hw = sport->hw;
10366 	uint8_t		*payload = NULL;
10367 
10368 	smtrace("port");
10369 
10370 	switch (evt) {
10371 	case OCS_EVT_ENTER:
10372 		/* allocate memory for the service parameters */
10373 		if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) {
10374 			ocs_log_err(hw->os, "Failed to allocate DMA memory\n");
10375 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10376 			break;
10377 		}
10378 
10379 		if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10380 					&sport->dma, sport->indicator)) {
10381 			ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n");
10382 			ocs_dma_free(hw->os, &sport->dma);
10383 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10384 			break;
10385 		}
10386 
10387 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10388 			ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10389 			ocs_dma_free(hw->os, &sport->dma);
10390 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10391 			break;
10392 		}
10393 		break;
10394 	case OCS_EVT_RESPONSE:
10395 		payload = sport->dma.virt;
10396 
10397 		ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload);
10398 
10399 		ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
10400 				sizeof(sport->sli_wwpn));
10401 		ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
10402 				sizeof(sport->sli_wwnn));
10403 
10404 		ocs_dma_free(hw->os, &sport->dma);
10405 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data);
10406 		break;
10407 	case OCS_EVT_ERROR:
10408 		ocs_dma_free(hw->os, &sport->dma);
10409 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10410 		break;
10411 	case OCS_EVT_HW_PORT_REQ_FREE:
10412 		/* Wait for attach response and then free */
10413 		sport->sm_free_req_pending = 1;
10414 		break;
10415 	case OCS_EVT_EXIT:
10416 		break;
10417 	default:
10418 		__ocs_hw_port_common(__func__, ctx, evt, data);
10419 		break;
10420 	}
10421 
10422 	return NULL;
10423 }
10424 
10425 static void *
10426 __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10427 {
10428 	ocs_sli_port_t	*sport = ctx->app;
10429 
10430 	smtrace("port");
10431 
10432 	switch (evt) {
10433 	case OCS_EVT_ENTER:
10434 		/* no-op */
10435 		break;
10436 	case OCS_EVT_HW_PORT_ALLOC_OK:
10437 		ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL);
10438 		break;
10439 	case OCS_EVT_HW_PORT_ALLOC_FAIL:
10440 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL);
10441 		break;
10442 	case OCS_EVT_HW_PORT_REQ_FREE:
10443 		/* Wait for attach response and then free */
10444 		sport->sm_free_req_pending = 1;
10445 		break;
10446 	default:
10447 		__ocs_hw_port_common(__func__, ctx, evt, data);
10448 		break;
10449 	}
10450 
10451 	return NULL;
10452 }
10453 
10454 static void *
10455 __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10456 {
10457 	ocs_sli_port_t	*sport = ctx->app;
10458 	ocs_hw_t	*hw = sport->hw;
10459 
10460 	smtrace("port");
10461 
10462 	switch (evt) {
10463 	case OCS_EVT_ENTER:
10464 		/* If there is a pending free request, then handle it now */
10465 		if (sport->sm_free_req_pending) {
10466 			ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL);
10467 			return NULL;
10468 		}
10469 
10470 		/* TODO XXX transitioning to done only works if this is called
10471 		 * directly from ocs_hw_port_alloc BUT not if called from
10472 		 * read_sparm64. In the later case, we actually want to go
10473 		 * through report_ok/fail
10474 		 */
10475 		if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE,
10476 					sport->indicator, sport->domain->indicator)) {
10477 			ocs_log_err(hw->os, "INIT_VPI allocation failure\n");
10478 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10479 			break;
10480 		}
10481 
10482 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10483 			ocs_log_err(hw->os, "INIT_VPI command failure\n");
10484 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10485 			break;
10486 		}
10487 		break;
10488 	case OCS_EVT_RESPONSE:
10489 		ocs_sm_transition(ctx, __ocs_hw_port_allocated, data);
10490 		break;
10491 	case OCS_EVT_ERROR:
10492 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10493 		break;
10494 	case OCS_EVT_HW_PORT_REQ_FREE:
10495 		/* Wait for attach response and then free */
10496 		sport->sm_free_req_pending = 1;
10497 		break;
10498 	case OCS_EVT_EXIT:
10499 		break;
10500 	default:
10501 		__ocs_hw_port_common(__func__, ctx, evt, data);
10502 		break;
10503 	}
10504 
10505 	return NULL;
10506 }
10507 
10508 static int32_t
10509 __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10510 {
10511 	ocs_sli_port_t *sport = arg;
10512 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
10513 	ocs_sm_event_t	evt;
10514 
10515 	if (status || hdr->status) {
10516 		ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10517 			      sport->indicator, status, hdr->status);
10518 		evt = OCS_EVT_ERROR;
10519 	} else {
10520 		evt = OCS_EVT_RESPONSE;
10521 	}
10522 
10523 	ocs_sm_post_event(&sport->ctx, evt, mqe);
10524 
10525 	return 0;
10526 }
10527 
10528 static int32_t
10529 __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10530 {
10531 	ocs_sli_port_t *sport = arg;
10532 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
10533 	ocs_sm_event_t	evt;
10534 	uint8_t *mqecpy;
10535 
10536 	if (status || hdr->status) {
10537 		ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10538 			      sport->indicator, status, hdr->status);
10539 		evt = OCS_EVT_ERROR;
10540 	} else {
10541 		evt = OCS_EVT_RESPONSE;
10542 	}
10543 
10544 	/*
10545 	 * In this case we have to malloc a mailbox command buffer, as it is reused
10546 	 * in the state machine post event call, and eventually freed
10547 	 */
10548 	mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10549 	if (mqecpy == NULL) {
10550 		ocs_log_err(hw->os, "malloc mqecpy failed\n");
10551 		return -1;
10552 	}
10553 	ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE);
10554 
10555 	ocs_sm_post_event(&sport->ctx, evt, mqecpy);
10556 
10557 	return 0;
10558 }
10559 
10560 /***************************************************************************
10561  * Domain state machine
10562  */
10563 
10564 static int32_t
10565 __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10566 {
10567 	ocs_domain_t	*domain = ctx->app;
10568 	ocs_hw_t	*hw = domain->hw;
10569 
10570 	smtrace("domain");
10571 
10572 	switch (evt) {
10573 	case OCS_EVT_EXIT:
10574 		/* ignore */
10575 		break;
10576 
10577 	default:
10578 		ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
10579 		break;
10580 	}
10581 
10582 	return 0;
10583 }
10584 
10585 static void *
10586 __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10587 {
10588 	ocs_domain_t	*domain = ctx->app;
10589 	ocs_hw_t	*hw = domain->hw;
10590 
10591 	smtrace("domain");
10592 
10593 	switch (evt) {
10594 	case OCS_EVT_ENTER:
10595 		/* free command buffer */
10596 		if (data != NULL) {
10597 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10598 		}
10599 		/* free SLI resources */
10600 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10601 		/* TODO how to free FCFI (or do we at all)? */
10602 
10603 		if (hw->callback.domain != NULL) {
10604 			hw->callback.domain(hw->args.domain,
10605 					OCS_HW_DOMAIN_ALLOC_FAIL,
10606 					domain);
10607 		}
10608 		break;
10609 	default:
10610 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10611 		break;
10612 	}
10613 
10614 	return NULL;
10615 }
10616 
10617 static void *
10618 __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10619 {
10620 	ocs_domain_t	*domain = ctx->app;
10621 	ocs_hw_t	*hw = domain->hw;
10622 
10623 	smtrace("domain");
10624 
10625 	switch (evt) {
10626 	case OCS_EVT_ENTER:
10627 		/* free mailbox buffer and send alloc ok to physical sport */
10628 		ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10629 		ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL);
10630 
10631 		/* now inform registered callbacks */
10632 		if (hw->callback.domain != NULL) {
10633 			hw->callback.domain(hw->args.domain,
10634 					OCS_HW_DOMAIN_ATTACH_OK,
10635 					domain);
10636 		}
10637 		break;
10638 	case OCS_EVT_HW_DOMAIN_REQ_FREE:
10639 		ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10640 		break;
10641 	default:
10642 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10643 		break;
10644 	}
10645 
10646 	return NULL;
10647 }
10648 
10649 static void *
10650 __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10651 {
10652 	ocs_domain_t	*domain = ctx->app;
10653 	ocs_hw_t	*hw = domain->hw;
10654 
10655 	smtrace("domain");
10656 
10657 	switch (evt) {
10658 	case OCS_EVT_ENTER:
10659 		if (data != NULL) {
10660 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10661 		}
10662 		/* free SLI resources */
10663 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10664 		/* TODO how to free FCFI (or do we at all)? */
10665 
10666 		if (hw->callback.domain != NULL) {
10667 			hw->callback.domain(hw->args.domain,
10668 					OCS_HW_DOMAIN_ATTACH_FAIL,
10669 					domain);
10670 		}
10671 		break;
10672 	case OCS_EVT_EXIT:
10673 		break;
10674 	default:
10675 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10676 		break;
10677 	}
10678 
10679 	return NULL;
10680 }
10681 
10682 static void *
10683 __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10684 {
10685 	ocs_domain_t	*domain = ctx->app;
10686 	ocs_hw_t	*hw = domain->hw;
10687 
10688 	smtrace("domain");
10689 
10690 	switch (evt) {
10691 	case OCS_EVT_ENTER:
10692 
10693 		ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt);
10694 
10695 		if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) {
10696 			ocs_log_err(hw->os, "REG_VFI format failure\n");
10697 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10698 			break;
10699 		}
10700 
10701 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10702 			ocs_log_err(hw->os, "REG_VFI command failure\n");
10703 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10704 			break;
10705 		}
10706 		break;
10707 	case OCS_EVT_RESPONSE:
10708 		ocs_sm_transition(ctx, __ocs_hw_domain_attached, data);
10709 		break;
10710 	case OCS_EVT_ERROR:
10711 		ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data);
10712 		break;
10713 	default:
10714 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10715 		break;
10716 	}
10717 
10718 	return NULL;
10719 }
10720 
10721 static void *
10722 __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10723 {
10724 	ocs_domain_t	*domain = ctx->app;
10725 	ocs_hw_t	*hw = domain->hw;
10726 
10727 	smtrace("domain");
10728 
10729 	switch (evt) {
10730 	case OCS_EVT_ENTER:
10731 		/* free mailbox buffer and send alloc ok to physical sport */
10732 		ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10733 		ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL);
10734 
10735 		ocs_hw_domain_add(hw, domain);
10736 
10737 		/* now inform registered callbacks */
10738 		if (hw->callback.domain != NULL) {
10739 			hw->callback.domain(hw->args.domain,
10740 					OCS_HW_DOMAIN_ALLOC_OK,
10741 					domain);
10742 		}
10743 		break;
10744 	case OCS_EVT_HW_DOMAIN_REQ_ATTACH:
10745 		ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data);
10746 		break;
10747 	case OCS_EVT_HW_DOMAIN_REQ_FREE:
10748 		/* unreg_fcfi/vfi */
10749 		if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10750 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL);
10751 		} else {
10752 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10753 		}
10754 		break;
10755 	default:
10756 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10757 		break;
10758 	}
10759 
10760 	return NULL;
10761 }
10762 
10763 static void *
10764 __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10765 {
10766 	ocs_domain_t	*domain = ctx->app;
10767 	ocs_hw_t	*hw = domain->hw;
10768 
10769 	smtrace("domain");
10770 
10771 	switch (evt) {
10772 	case OCS_EVT_ENTER:
10773 		if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10774 					&domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) {
10775 			ocs_log_err(hw->os, "READ_SPARM64 format failure\n");
10776 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10777 			break;
10778 		}
10779 
10780 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10781 			ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10782 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10783 			break;
10784 		}
10785 		break;
10786 	case OCS_EVT_EXIT:
10787 		break;
10788 	case OCS_EVT_RESPONSE:
10789 		ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt);
10790 
10791 		ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data);
10792 		break;
10793 	case OCS_EVT_ERROR:
10794 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10795 		break;
10796 	default:
10797 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10798 		break;
10799 	}
10800 
10801 	return NULL;
10802 }
10803 
10804 static void *
10805 __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10806 {
10807 	ocs_domain_t	*domain = ctx->app;
10808 	ocs_sli_port_t	*sport = domain->sport;
10809 	ocs_hw_t	*hw = domain->hw;
10810 
10811 	smtrace("domain");
10812 
10813 	switch (evt) {
10814 	case OCS_EVT_ENTER:
10815 		if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator,
10816 					domain->fcf_indicator, sport->indicator)) {
10817 			ocs_log_err(hw->os, "INIT_VFI format failure\n");
10818 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10819 			break;
10820 		}
10821 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10822 			ocs_log_err(hw->os, "INIT_VFI command failure\n");
10823 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10824 			break;
10825 		}
10826 		break;
10827 	case OCS_EVT_EXIT:
10828 		break;
10829 	case OCS_EVT_RESPONSE:
10830 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10831 		break;
10832 	case OCS_EVT_ERROR:
10833 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10834 		break;
10835 	default:
10836 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10837 		break;
10838 	}
10839 
10840 	return NULL;
10841 }
10842 
10843 static void *
10844 __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10845 {
10846 	ocs_domain_t	*domain = ctx->app;
10847 	ocs_hw_t	*hw = domain->hw;
10848 
10849 	smtrace("domain");
10850 
10851 	switch (evt) {
10852 	case OCS_EVT_ENTER: {
10853 		sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
10854 		uint32_t i;
10855 
10856 		/* Set the filter match/mask values from hw's filter_def values */
10857 		for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
10858 			rq_cfg[i].rq_id = 0xffff;
10859 			rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
10860 			rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
10861 			rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
10862 			rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
10863 		}
10864 
10865 		/* Set the rq_id for each, in order of RQ definition */
10866 		for (i = 0; i < hw->hw_rq_count; i++) {
10867 			if (i >= ARRAY_SIZE(rq_cfg)) {
10868 				ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n");
10869 				break;
10870 			}
10871 			rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id;
10872 		}
10873 
10874 		if (!data) {
10875 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10876 			break;
10877 		}
10878 
10879 		if (hw->hw_mrq_count) {
10880 			if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE,
10881 				 domain->vlan_id, domain->fcf)) {
10882 				ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n");
10883 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10884 				break;
10885 			}
10886 
10887 		} else {
10888 			if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf,
10889 						rq_cfg, domain->vlan_id)) {
10890 				ocs_log_err(hw->os, "REG_FCFI format failure\n");
10891 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10892 				break;
10893 			}
10894 		}
10895 
10896 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10897 			ocs_log_err(hw->os, "REG_FCFI command failure\n");
10898 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10899 			break;
10900 		}
10901 		break;
10902 	}
10903 	case OCS_EVT_EXIT:
10904 		break;
10905 	case OCS_EVT_RESPONSE:
10906 		if (!data) {
10907 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10908 			break;
10909 		}
10910 
10911 		domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi;
10912 
10913 		/*
10914 		 * IF_TYPE 0 devices do not support explicit VFI and VPI initialization
10915 		 * and instead rely on implicit initialization during VFI registration.
10916 		 * Short circuit normal processing here for those devices.
10917 		 */
10918 		if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10919 			ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10920 		} else {
10921 			ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data);
10922 		}
10923 		break;
10924 	case OCS_EVT_ERROR:
10925 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10926 		break;
10927 	default:
10928 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10929 		break;
10930 	}
10931 
10932 	return NULL;
10933 }
10934 
10935 static void *
10936 __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10937 {
10938 	ocs_domain_t	*domain = ctx->app;
10939 	ocs_hw_t	*hw = domain->hw;
10940 
10941 	smtrace("domain");
10942 
10943 	switch (evt) {
10944 	case OCS_EVT_ENTER:
10945 		if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
10946 			/*
10947 			 * For FC, the HW alread registered a FCFI
10948 			 * Copy FCF information into the domain and jump to INIT_VFI
10949 			 */
10950 			domain->fcf_indicator = hw->fcf_indicator;
10951 			ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data);
10952 		} else {
10953 			ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data);
10954 		}
10955 		break;
10956 	default:
10957 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10958 		break;
10959 	}
10960 
10961 	return NULL;
10962 }
10963 
10964 static void *
10965 __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10966 {
10967 	ocs_domain_t	*domain = ctx->app;
10968 
10969 	smtrace("domain");
10970 
10971 	switch (evt) {
10972 	case OCS_EVT_ENTER:
10973 		if (domain != NULL) {
10974 			ocs_hw_t	*hw = domain->hw;
10975 
10976 			ocs_hw_domain_del(hw, domain);
10977 
10978 			if (hw->callback.domain != NULL) {
10979 				hw->callback.domain(hw->args.domain,
10980 						     OCS_HW_DOMAIN_FREE_FAIL,
10981 						     domain);
10982 			}
10983 		}
10984 
10985 		/* free command buffer */
10986 		if (data != NULL) {
10987 			ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE);
10988 		}
10989 		break;
10990 	case OCS_EVT_EXIT:
10991 		break;
10992 	default:
10993 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10994 		break;
10995 	}
10996 
10997 	return NULL;
10998 }
10999 
11000 static void *
11001 __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11002 {
11003 	ocs_domain_t	*domain = ctx->app;
11004 
11005 	smtrace("domain");
11006 
11007 	switch (evt) {
11008 	case OCS_EVT_ENTER:
11009 		/* Free DMA and mailbox buffer */
11010 		if (domain != NULL) {
11011 			ocs_hw_t *hw = domain->hw;
11012 
11013 			/* free VFI resource */
11014 			sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI,
11015 					  domain->indicator);
11016 
11017 			ocs_hw_domain_del(hw, domain);
11018 
11019 			/* inform registered callbacks */
11020 			if (hw->callback.domain != NULL) {
11021 				hw->callback.domain(hw->args.domain,
11022 						     OCS_HW_DOMAIN_FREE_OK,
11023 						     domain);
11024 			}
11025 		}
11026 		if (data != NULL) {
11027 			ocs_free(NULL, data, SLI4_BMBX_SIZE);
11028 		}
11029 		break;
11030 	case OCS_EVT_EXIT:
11031 		break;
11032 	default:
11033 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11034 		break;
11035 	}
11036 
11037 	return NULL;
11038 }
11039 
11040 
11041 static void *
11042 __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11043 {
11044 	ocs_domain_t	*domain = ctx->app;
11045 	ocs_hw_t	*hw = domain->hw;
11046 
11047 	smtrace("domain");
11048 
11049 	switch (evt) {
11050 	case OCS_EVT_ENTER:
11051 		/* if we're in the middle of a teardown, skip sending rediscover */
11052 		if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) {
11053 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11054 			break;
11055 		}
11056 		if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) {
11057 			ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n");
11058 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11059 			break;
11060 		}
11061 
11062 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11063 			ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n");
11064 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11065 		}
11066 		break;
11067 	case OCS_EVT_RESPONSE:
11068 	case OCS_EVT_ERROR:
11069 		/* REDISCOVER_FCF can fail if none exist */
11070 		ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11071 		break;
11072 	case OCS_EVT_EXIT:
11073 		break;
11074 	default:
11075 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11076 		break;
11077 	}
11078 
11079 	return NULL;
11080 }
11081 
11082 static void *
11083 __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11084 {
11085 	ocs_domain_t	*domain = ctx->app;
11086 	ocs_hw_t	*hw = domain->hw;
11087 
11088 	smtrace("domain");
11089 
11090 	switch (evt) {
11091 	case OCS_EVT_ENTER:
11092 		if (data == NULL) {
11093 			data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11094 			if (!data) {
11095 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11096 				break;
11097 			}
11098 		}
11099 
11100 		if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) {
11101 			ocs_log_err(hw->os, "UNREG_FCFI format failure\n");
11102 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11103 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11104 			break;
11105 		}
11106 
11107 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11108 			ocs_log_err(hw->os, "UNREG_FCFI command failure\n");
11109 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11110 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11111 			break;
11112 		}
11113 		break;
11114 	case OCS_EVT_RESPONSE:
11115 		if (domain->req_rediscover_fcf) {
11116 			domain->req_rediscover_fcf = FALSE;
11117 			ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data);
11118 		} else {
11119 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11120 		}
11121 		break;
11122 	case OCS_EVT_ERROR:
11123 		ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11124 		break;
11125 	case OCS_EVT_EXIT:
11126 		break;
11127 	default:
11128 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11129 		break;
11130 	}
11131 
11132 	return NULL;
11133 }
11134 
11135 static void *
11136 __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11137 {
11138 	ocs_domain_t	*domain = ctx->app;
11139 	ocs_hw_t	*hw = domain->hw;
11140 	uint8_t		is_fc = FALSE;
11141 
11142 	smtrace("domain");
11143 
11144 	is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC);
11145 
11146 	switch (evt) {
11147 	case OCS_EVT_ENTER:
11148 		if (data == NULL) {
11149 			data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11150 			if (!data) {
11151 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11152 				break;
11153 			}
11154 		}
11155 
11156 		if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain,
11157 					SLI4_UNREG_TYPE_DOMAIN)) {
11158 			ocs_log_err(hw->os, "UNREG_VFI format failure\n");
11159 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11160 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11161 			break;
11162 		}
11163 
11164 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11165 			ocs_log_err(hw->os, "UNREG_VFI command failure\n");
11166 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11167 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11168 			break;
11169 		}
11170 		break;
11171 	case OCS_EVT_ERROR:
11172 		if (is_fc) {
11173 			ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11174 		} else {
11175 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11176 		}
11177 		break;
11178 	case OCS_EVT_RESPONSE:
11179 		if (is_fc) {
11180 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11181 		} else {
11182 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11183 		}
11184 		break;
11185 	default:
11186 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11187 		break;
11188 	}
11189 
11190 	return NULL;
11191 }
11192 
11193 /* callback for domain alloc/attach/free */
11194 static int32_t
11195 __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11196 {
11197 	ocs_domain_t	*domain = arg;
11198 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
11199 	ocs_sm_event_t	evt;
11200 
11201 	if (status || hdr->status) {
11202 		ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n",
11203 			      domain->indicator, status, hdr->status);
11204 		evt = OCS_EVT_ERROR;
11205 	} else {
11206 		evt = OCS_EVT_RESPONSE;
11207 	}
11208 
11209 	ocs_sm_post_event(&domain->sm, evt, mqe);
11210 
11211 	return 0;
11212 }
11213 
11214 static int32_t
11215 target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11216 {
11217 	ocs_hw_io_t *io = NULL;
11218 	ocs_hw_io_t *io_next = NULL;
11219 	uint64_t ticks_current = ocs_get_os_ticks();
11220 	uint32_t sec_elapsed;
11221 	ocs_hw_rtn_e rc;
11222 
11223 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
11224 
11225 	if (status || hdr->status) {
11226 		ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n",
11227 			      status, hdr->status);
11228 		/* go ahead and proceed with wqe timer checks... */
11229 	}
11230 
11231 	/* loop through active WQE list and check for timeouts */
11232 	ocs_lock(&hw->io_lock);
11233 	ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
11234 		sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq());
11235 
11236 		/*
11237 		 * If elapsed time > timeout, abort it. No need to check type since
11238 		 * it wouldn't be on this list unless it was a target WQE
11239 		 */
11240 		if (sec_elapsed > io->tgt_wqe_timeout) {
11241 			ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n",
11242 				     io->indicator, io->reqtag, io->type);
11243 
11244 			/* remove from active_wqe list so won't try to abort again */
11245 			ocs_list_remove(&hw->io_timed_wqe, io);
11246 
11247 			/* save status of "timed out" for when abort completes */
11248 			io->status_saved = 1;
11249 			io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT;
11250 			io->saved_ext = 0;
11251 			io->saved_len = 0;
11252 
11253 			/* now abort outstanding IO */
11254 			rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
11255 			if (rc) {
11256 				ocs_log_test(hw->os,
11257 					"abort failed xri=%#x tag=%#x rc=%d\n",
11258 					io->indicator, io->reqtag, rc);
11259 			}
11260 		}
11261 		/*
11262 		 * need to go through entire list since each IO could have a
11263 		 * different timeout value
11264 		 */
11265 	}
11266 	ocs_unlock(&hw->io_lock);
11267 
11268 	/* if we're not in the middle of shutting down, schedule next timer */
11269 	if (!hw->active_wqe_timer_shutdown) {
11270 		ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS);
11271 	}
11272 	hw->in_active_wqe_timer = FALSE;
11273 	return 0;
11274 }
11275 
11276 static void
11277 target_wqe_timer_cb(void *arg)
11278 {
11279 	ocs_hw_t *hw = (ocs_hw_t *)arg;
11280 
11281 	/* delete existing timer; will kick off new timer after checking wqe timeouts */
11282 	hw->in_active_wqe_timer = TRUE;
11283 	ocs_del_timer(&hw->wqe_timer);
11284 
11285 	/* Forward timer callback to execute in the mailbox completion processing context */
11286 	if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) {
11287 		ocs_log_test(hw->os, "ocs_hw_async_call failed\n");
11288 	}
11289 }
11290 
11291 static void
11292 shutdown_target_wqe_timer(ocs_hw_t *hw)
11293 {
11294 	uint32_t	iters = 100;
11295 
11296 	if (hw->config.emulate_tgt_wqe_timeout) {
11297 		/* request active wqe timer shutdown, then wait for it to complete */
11298 		hw->active_wqe_timer_shutdown = TRUE;
11299 
11300 		/* delete WQE timer and wait for timer handler to complete (if necessary) */
11301 		ocs_del_timer(&hw->wqe_timer);
11302 
11303 		/* now wait for timer handler to complete (if necessary) */
11304 		while (hw->in_active_wqe_timer && iters) {
11305 			/*
11306 			 * if we happen to have just sent NOP mailbox command, make sure
11307 			 * completions are being processed
11308 			 */
11309 			ocs_hw_flush(hw);
11310 			iters--;
11311 		}
11312 
11313 		if (iters == 0) {
11314 			ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n");
11315 		}
11316 	}
11317 }
11318 
11319 /**
11320  * @brief Determine if HW IO is owned by the port.
11321  *
11322  * @par Description
11323  * Determines if the given HW IO has been posted to the chip.
11324  *
11325  * @param hw Hardware context allocated by the caller.
11326  * @param io HW IO.
11327  *
11328  * @return Returns TRUE if given HW IO is port-owned.
11329  */
11330 uint8_t
11331 ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
11332 {
11333 	/* Check to see if this is a port owned XRI */
11334 	return io->is_port_owned;
11335 }
11336 
11337 /**
11338  * @brief Return TRUE if exchange is port-owned.
11339  *
11340  * @par Description
11341  * Test to see if the xri is a port-owned xri.
11342  *
11343  * @param hw Hardware context.
11344  * @param xri Exchange indicator.
11345  *
11346  * @return Returns TRUE if XRI is a port owned XRI.
11347  */
11348 
11349 uint8_t
11350 ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri)
11351 {
11352 	ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri);
11353 	return (io == NULL ? FALSE : io->is_port_owned);
11354 }
11355 
11356 /**
11357  * @brief Returns an XRI from the port owned list to the host.
11358  *
11359  * @par Description
11360  * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes.
11361  *
11362  * @param hw Hardware context.
11363  * @param xri_base The starting XRI number.
11364  * @param xri_count The number of XRIs to free from the base.
11365  */
11366 static void
11367 ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count)
11368 {
11369 	ocs_hw_io_t	*io;
11370 	uint32_t i;
11371 
11372 	for (i = 0; i < xri_count; i++) {
11373 		io = ocs_hw_io_lookup(hw, xri_base + i);
11374 
11375 		/*
11376 		 * if this is an auto xfer rdy XRI, then we need to release any
11377 		 * buffer attached to the XRI before moving the XRI back to the free pool.
11378 		 */
11379 		if (hw->auto_xfer_rdy_enabled) {
11380 			ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io);
11381 		}
11382 
11383 		ocs_lock(&hw->io_lock);
11384 			ocs_list_remove(&hw->io_port_owned, io);
11385 			io->is_port_owned = 0;
11386 			ocs_list_add_tail(&hw->io_free, io);
11387 		ocs_unlock(&hw->io_lock);
11388 	}
11389 }
11390 
11391 /**
11392  * @brief Called when the POST_XRI command completes.
11393  *
11394  * @par Description
11395  * Free the mailbox command buffer and reclaim the XRIs on failure.
11396  *
11397  * @param hw Hardware context.
11398  * @param status Status field from the mbox completion.
11399  * @param mqe Mailbox response structure.
11400  * @param arg Pointer to a callback function that signals the caller that the command is done.
11401  *
11402  * @return Returns 0.
11403  */
11404 static int32_t
11405 ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
11406 {
11407 	sli4_cmd_post_xri_t	*post_xri = (sli4_cmd_post_xri_t*)mqe;
11408 
11409 	/* Reclaim the XRIs as host owned if the command fails */
11410 	if (status != 0) {
11411 		ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n",
11412 			      status, post_xri->xri_base, post_xri->xri_count);
11413 		ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count);
11414 	}
11415 
11416 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11417 	return 0;
11418 }
11419 
11420 /**
11421  * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port.
11422  *
11423  * @param hw Hardware context.
11424  * @param xri_start The starting XRI to post.
11425  * @param num_to_post The number of XRIs to post.
11426  *
11427  * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS.
11428  */
11429 
11430 static ocs_hw_rtn_e
11431 ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post)
11432 {
11433 	uint8_t	*post_xri;
11434 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11435 
11436 	/* Since we need to allocate for mailbox queue, just always allocate */
11437 	post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11438 	if (post_xri == NULL) {
11439 		ocs_log_err(hw->os, "no buffer for command\n");
11440 		return OCS_HW_RTN_NO_MEMORY;
11441 	}
11442 
11443 	/* Register the XRIs */
11444 	if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE,
11445 			     xri_start, num_to_post)) {
11446 		rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL);
11447 		if (rc != OCS_HW_RTN_SUCCESS) {
11448 			ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE);
11449 			ocs_log_err(hw->os, "post_xri failed\n");
11450 		}
11451 	}
11452 	return rc;
11453 }
11454 
11455 /**
11456  * @brief Move XRIs from the host-controlled pool to the port.
11457  *
11458  * @par Description
11459  * Removes IOs from the free list and moves them to the port.
11460  *
11461  * @param hw Hardware context.
11462  * @param num_xri The number of XRIs being requested to move to the chip.
11463  *
11464  * @return Returns the number of XRIs that were moved.
11465  */
11466 
11467 uint32_t
11468 ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri)
11469 {
11470 	ocs_hw_io_t	*io;
11471 	uint32_t i;
11472 	uint32_t num_posted = 0;
11473 
11474 	/*
11475 	 * Note: We cannot use ocs_hw_io_alloc() because that would place the
11476 	 *       IO on the io_inuse list. We need to move from the io_free to
11477 	 *       the io_port_owned list.
11478 	 */
11479 	ocs_lock(&hw->io_lock);
11480 
11481 	for (i = 0; i < num_xri; i++) {
11482 
11483 		if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
11484 			ocs_hw_rtn_e rc;
11485 
11486 			/*
11487 			 * if this is an auto xfer rdy XRI, then we need to attach a
11488 			 * buffer to the XRI before submitting it to the chip. If a
11489 			 * buffer is unavailable, then we cannot post it, so return it
11490 			 * to the free pool.
11491 			 */
11492 			if (hw->auto_xfer_rdy_enabled) {
11493 				/* Note: uses the IO lock to get the auto xfer rdy buffer */
11494 				ocs_unlock(&hw->io_lock);
11495 				rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io);
11496 				ocs_lock(&hw->io_lock);
11497 				if (rc != OCS_HW_RTN_SUCCESS) {
11498 					ocs_list_add_head(&hw->io_free, io);
11499 					break;
11500 				}
11501 			}
11502 			ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator);
11503 			io->is_port_owned = 1;
11504 			ocs_list_add_tail(&hw->io_port_owned, io);
11505 
11506 			/* Post XRI */
11507 			if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) {
11508 				ocs_hw_reclaim_xri(hw, io->indicator, i);
11509 				break;
11510 			}
11511 			num_posted++;
11512 		} else {
11513 			/* no more free XRIs */
11514 			break;
11515 		}
11516 	}
11517 	ocs_unlock(&hw->io_lock);
11518 
11519 	return num_posted;
11520 }
11521 
11522 /**
11523  * @brief Called when the RELEASE_XRI command completes.
11524  *
11525  * @par Description
11526  * Move the IOs back to the free pool on success.
11527  *
11528  * @param hw Hardware context.
11529  * @param status Status field from the mbox completion.
11530  * @param mqe Mailbox response structure.
11531  * @param arg Pointer to a callback function that signals the caller that the command is done.
11532  *
11533  * @return Returns 0.
11534  */
11535 static int32_t
11536 ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
11537 {
11538 	sli4_cmd_release_xri_t	*release_xri = (sli4_cmd_release_xri_t*)mqe;
11539 	uint8_t i;
11540 
11541 	/* Reclaim the XRIs as host owned if the command fails */
11542 	if (status != 0) {
11543 		ocs_log_err(hw->os, "Status 0x%x\n", status);
11544 	} else {
11545 		for (i = 0; i < release_xri->released_xri_count; i++) {
11546 			uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 :
11547 					release_xri->xri_tbl[i/2].xri_tag1);
11548 			ocs_hw_reclaim_xri(hw, xri, 1);
11549 		}
11550 	}
11551 
11552 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11553 	return 0;
11554 }
11555 
11556 /**
11557  * @brief Move XRIs from the port-controlled pool to the host.
11558  *
11559  * Requests XRIs from the FW to return to the host-owned pool.
11560  *
11561  * @param hw Hardware context.
11562  * @param num_xri The number of XRIs being requested to moved from the chip.
11563  *
11564  * @return Returns 0 for success, or a negative error code value for failure.
11565  */
11566 
11567 ocs_hw_rtn_e
11568 ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri)
11569 {
11570 	uint8_t	*release_xri;
11571 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11572 
11573 	/* non-local buffer required for mailbox queue */
11574 	release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11575 	if (release_xri == NULL) {
11576 		ocs_log_err(hw->os, "no buffer for command\n");
11577 		return OCS_HW_RTN_NO_MEMORY;
11578 	}
11579 
11580 	/* release the XRIs */
11581 	if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) {
11582 		rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL);
11583 		if (rc != OCS_HW_RTN_SUCCESS) {
11584 			ocs_log_err(hw->os, "release_xri failed\n");
11585 		}
11586 	}
11587 	/* If we are polling or an error occurred, then free the mailbox buffer */
11588 	if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) {
11589 		ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE);
11590 	}
11591 	return rc;
11592 }
11593 
11594 
11595 /**
11596  * @brief Allocate an ocs_hw_rx_buffer_t array.
11597  *
11598  * @par Description
11599  * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory.
11600  *
11601  * @param hw Pointer to HW object.
11602  * @param rqindex RQ index for this buffer.
11603  * @param count Count of buffers in array.
11604  * @param size Size of buffer.
11605  *
11606  * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array.
11607  */
11608 static ocs_hw_rq_buffer_t *
11609 ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size)
11610 {
11611 	ocs_t *ocs = hw->os;
11612 	ocs_hw_rq_buffer_t *rq_buf = NULL;
11613 	ocs_hw_rq_buffer_t *prq;
11614 	uint32_t i;
11615 
11616 	if (count != 0) {
11617 		rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO);
11618 		if (rq_buf == NULL) {
11619 			ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n");
11620 			return NULL;
11621 		}
11622 
11623 		for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
11624 			prq->rqindex = rqindex;
11625 			if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) {
11626 				ocs_log_err(hw->os, "DMA allocation failed\n");
11627 				ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11628 				rq_buf = NULL;
11629 				break;
11630 			}
11631 		}
11632 	}
11633 	return rq_buf;
11634 }
11635 
11636 /**
11637  * @brief Free an ocs_hw_rx_buffer_t array.
11638  *
11639  * @par Description
11640  * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory.
11641  *
11642  * @param hw Pointer to HW object.
11643  * @param rq_buf Pointer to ocs_hw_rx_buffer_t array.
11644  * @param count Count of buffers in array.
11645  *
11646  * @return None.
11647  */
11648 static void
11649 ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count)
11650 {
11651 	ocs_t *ocs = hw->os;
11652 	uint32_t i;
11653 	ocs_hw_rq_buffer_t *prq;
11654 
11655 	if (rq_buf != NULL) {
11656 		for (i = 0, prq = rq_buf; i < count; i++, prq++) {
11657 			ocs_dma_free(ocs, &prq->dma);
11658 		}
11659 		ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11660 	}
11661 }
11662 
11663 /**
11664  * @brief Allocate the RQ data buffers.
11665  *
11666  * @param hw Pointer to HW object.
11667  *
11668  * @return Returns 0 on success, or a non-zero value on failure.
11669  */
11670 ocs_hw_rtn_e
11671 ocs_hw_rx_allocate(ocs_hw_t *hw)
11672 {
11673 	ocs_t *ocs = hw->os;
11674 	uint32_t i;
11675 	int32_t rc = OCS_HW_RTN_SUCCESS;
11676 	uint32_t rqindex = 0;
11677 	hw_rq_t *rq;
11678 	uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR;
11679 	uint32_t payload_size = hw->config.rq_default_buffer_size;
11680 
11681 	rqindex = 0;
11682 
11683 	for (i = 0; i < hw->hw_rq_count; i++) {
11684 		rq = hw->hw_rq[i];
11685 
11686 		/* Allocate header buffers */
11687 		rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size);
11688 		if (rq->hdr_buf == NULL) {
11689 			ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n");
11690 			rc = OCS_HW_RTN_ERROR;
11691 			break;
11692 		}
11693 
11694 		ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header  %4d by %4d bytes\n", i, rq->hdr->id,
11695 			      rq->entry_count, hdr_size);
11696 
11697 		rqindex++;
11698 
11699 		/* Allocate payload buffers */
11700 		rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size);
11701 		if (rq->payload_buf == NULL) {
11702 			ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n");
11703 			rc = OCS_HW_RTN_ERROR;
11704 			break;
11705 		}
11706 		ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id,
11707 			      rq->entry_count, payload_size);
11708 		rqindex++;
11709 	}
11710 
11711 	return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
11712 }
11713 
11714 /**
11715  * @brief Post the RQ data buffers to the chip.
11716  *
11717  * @param hw Pointer to HW object.
11718  *
11719  * @return Returns 0 on success, or a non-zero value on failure.
11720  */
11721 ocs_hw_rtn_e
11722 ocs_hw_rx_post(ocs_hw_t *hw)
11723 {
11724 	uint32_t i;
11725 	uint32_t idx;
11726 	uint32_t rq_idx;
11727 	int32_t rc = 0;
11728 
11729 	/*
11730 	 * In RQ pair mode, we MUST post the header and payload buffer at the
11731 	 * same time.
11732 	 */
11733 	for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
11734 		hw_rq_t *rq = hw->hw_rq[rq_idx];
11735 
11736 		for (i = 0; i < rq->entry_count-1; i++) {
11737 			ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++);
11738 			ocs_hw_assert(seq != NULL);
11739 
11740 			seq->header = &rq->hdr_buf[i];
11741 
11742 			seq->payload = &rq->payload_buf[i];
11743 
11744 			rc = ocs_hw_sequence_free(hw, seq);
11745 			if (rc) {
11746 				break;
11747 			}
11748 		}
11749 		if (rc) {
11750 			break;
11751 		}
11752 	}
11753 
11754 	return rc;
11755 }
11756 
11757 /**
11758  * @brief Free the RQ data buffers.
11759  *
11760  * @param hw Pointer to HW object.
11761  *
11762  */
11763 void
11764 ocs_hw_rx_free(ocs_hw_t *hw)
11765 {
11766 	hw_rq_t *rq;
11767 	uint32_t i;
11768 
11769 	/* Free hw_rq buffers */
11770 	for (i = 0; i < hw->hw_rq_count; i++) {
11771 		rq = hw->hw_rq[i];
11772 		if (rq != NULL) {
11773 			ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count);
11774 			rq->hdr_buf = NULL;
11775 			ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count);
11776 			rq->payload_buf = NULL;
11777 		}
11778 	}
11779 }
11780 
11781 /**
11782  * @brief HW async call context structure.
11783  */
11784 typedef struct {
11785 	ocs_hw_async_cb_t callback;
11786 	void *arg;
11787 	uint8_t cmd[SLI4_BMBX_SIZE];
11788 } ocs_hw_async_call_ctx_t;
11789 
11790 /**
11791  * @brief HW async callback handler
11792  *
11793  * @par Description
11794  * This function is called when the NOP mailbox command completes.  The callback stored
11795  * in the requesting context is invoked.
11796  *
11797  * @param hw Pointer to HW object.
11798  * @param status Completion status.
11799  * @param mqe Pointer to mailbox completion queue entry.
11800  * @param arg Caller-provided argument.
11801  *
11802  * @return None.
11803  */
11804 static void
11805 ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11806 {
11807 	ocs_hw_async_call_ctx_t *ctx = arg;
11808 
11809 	if (ctx != NULL) {
11810 		if (ctx->callback != NULL) {
11811 			(*ctx->callback)(hw, status, mqe, ctx->arg);
11812 		}
11813 		ocs_free(hw->os, ctx, sizeof(*ctx));
11814 	}
11815 }
11816 
11817 /**
11818  * @brief Make an async callback using NOP mailbox command
11819  *
11820  * @par Description
11821  * Post a NOP mailbox command; the callback with argument is invoked upon completion
11822  * while in the event processing context.
11823  *
11824  * @param hw Pointer to HW object.
11825  * @param callback Pointer to callback function.
11826  * @param arg Caller-provided callback.
11827  *
11828  * @return Returns 0 on success, or a negative error code value on failure.
11829  */
11830 int32_t
11831 ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg)
11832 {
11833 	int32_t rc = 0;
11834 	ocs_hw_async_call_ctx_t *ctx;
11835 
11836 	/*
11837 	 * Allocate a callback context (which includes the mailbox command buffer), we need
11838 	 * this to be persistent as the mailbox command submission may be queued and executed later
11839 	 * execution.
11840 	 */
11841 	ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT);
11842 	if (ctx == NULL) {
11843 		ocs_log_err(hw->os, "failed to malloc async call context\n");
11844 		return OCS_HW_RTN_NO_MEMORY;
11845 	}
11846 	ctx->callback = callback;
11847 	ctx->arg = arg;
11848 
11849 	/* Build and send a NOP mailbox command */
11850 	if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) {
11851 		ocs_log_err(hw->os, "COMMON_NOP format failure\n");
11852 		ocs_free(hw->os, ctx, sizeof(*ctx));
11853 		rc = -1;
11854 	}
11855 
11856 	if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) {
11857 		ocs_log_err(hw->os, "COMMON_NOP command failure\n");
11858 		ocs_free(hw->os, ctx, sizeof(*ctx));
11859 		rc = -1;
11860 	}
11861 	return rc;
11862 }
11863 
11864 /**
11865  * @brief Initialize the reqtag pool.
11866  *
11867  * @par Description
11868  * The WQ request tag pool is initialized.
11869  *
11870  * @param hw Pointer to HW object.
11871  *
11872  * @return Returns 0 on success, or a negative error code value on failure.
11873  */
11874 ocs_hw_rtn_e
11875 ocs_hw_reqtag_init(ocs_hw_t *hw)
11876 {
11877 	if (hw->wq_reqtag_pool == NULL) {
11878 		hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE);
11879 		if (hw->wq_reqtag_pool == NULL) {
11880 			ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n");
11881 			return OCS_HW_RTN_NO_MEMORY;
11882 		}
11883 	}
11884 	ocs_hw_reqtag_reset(hw);
11885 	return OCS_HW_RTN_SUCCESS;
11886 }
11887 
11888 /**
11889  * @brief Allocate a WQ request tag.
11890  *
11891  * Allocate and populate a WQ request tag from the WQ request tag pool.
11892  *
11893  * @param hw Pointer to HW object.
11894  * @param callback Callback function.
11895  * @param arg Pointer to callback argument.
11896  *
11897  * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated.
11898  */
11899 hw_wq_callback_t *
11900 ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
11901 {
11902 	hw_wq_callback_t *wqcb;
11903 
11904 	ocs_hw_assert(callback != NULL);
11905 
11906 	wqcb = ocs_pool_get(hw->wq_reqtag_pool);
11907 	if (wqcb != NULL) {
11908 		ocs_hw_assert(wqcb->callback == NULL);
11909 		wqcb->callback = callback;
11910 		wqcb->arg = arg;
11911 	}
11912 	return wqcb;
11913 }
11914 
11915 /**
11916  * @brief Free a WQ request tag.
11917  *
11918  * Free the passed in WQ request tag.
11919  *
11920  * @param hw Pointer to HW object.
11921  * @param wqcb Pointer to WQ request tag object to free.
11922  *
11923  * @return None.
11924  */
11925 void
11926 ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb)
11927 {
11928 	ocs_hw_assert(wqcb->callback != NULL);
11929 	wqcb->callback = NULL;
11930 	wqcb->arg = NULL;
11931 	ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11932 }
11933 
11934 /**
11935  * @brief Return WQ request tag by index.
11936  *
11937  * @par Description
11938  * Return pointer to WQ request tag object given an index.
11939  *
11940  * @param hw Pointer to HW object.
11941  * @param instance_index Index of WQ request tag to return.
11942  *
11943  * @return Pointer to WQ request tag, or NULL.
11944  */
11945 hw_wq_callback_t *
11946 ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index)
11947 {
11948 	hw_wq_callback_t *wqcb;
11949 
11950 	wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index);
11951 	if (wqcb == NULL) {
11952 		ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index);
11953 	}
11954 	return wqcb;
11955 }
11956 
11957 /**
11958  * @brief Reset the WQ request tag pool.
11959  *
11960  * @par Description
11961  * Reset the WQ request tag pool, returning all to the free list.
11962  *
11963  * @param hw pointer to HW object.
11964  *
11965  * @return None.
11966  */
11967 void
11968 ocs_hw_reqtag_reset(ocs_hw_t *hw)
11969 {
11970 	hw_wq_callback_t *wqcb;
11971 	uint32_t i;
11972 
11973 	/* Remove all from freelist */
11974 	while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) {
11975 		;
11976 	}
11977 
11978 	/* Put them all back */
11979 	for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) {
11980 		wqcb->instance_index = i;
11981 		wqcb->callback = NULL;
11982 		wqcb->arg = NULL;
11983 		ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11984 	}
11985 }
11986 
11987 /**
11988  * @brief Handle HW assertion
11989  *
11990  * HW assert, display diagnostic message, and abort.
11991  *
11992  * @param cond string describing failing assertion condition
11993  * @param filename file name
11994  * @param linenum line number
11995  *
11996  * @return none
11997  */
11998 void
11999 _ocs_hw_assert(const char *cond, const char *filename, int linenum)
12000 {
12001 	ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond);
12002 	ocs_abort();
12003 		/* no return */
12004 }
12005 
12006 /**
12007  * @brief Handle HW verify
12008  *
12009  * HW verify, display diagnostic message, dump stack and return.
12010  *
12011  * @param cond string describing failing verify condition
12012  * @param filename file name
12013  * @param linenum line number
12014  *
12015  * @return none
12016  */
12017 void
12018 _ocs_hw_verify(const char *cond, const char *filename, int linenum)
12019 {
12020 	ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond);
12021 	ocs_print_stack();
12022 }
12023 
12024 /**
12025  * @brief Reque XRI
12026  *
12027  * @par Description
12028  * Reque XRI
12029  *
12030  * @param hw Pointer to HW object.
12031  * @param io Pointer to HW IO
12032  *
12033  * @return Return 0 if successful else returns -1
12034  */
12035 int32_t
12036 ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io )
12037 {
12038 	int32_t rc = 0;
12039 
12040 	rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1);
12041 	if (rc) {
12042 		ocs_list_add_tail(&hw->io_port_dnrx, io);
12043 		rc = -1;
12044 		goto exit_ocs_hw_reque_xri;
12045 	}
12046 
12047 	io->auto_xfer_rdy_dnrx = 0;
12048 	io->type = OCS_HW_IO_DNRX_REQUEUE;
12049 	if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) {
12050 		/* Clear buffer from XRI */
12051 		ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
12052 		io->axr_buf = NULL;
12053 
12054 		ocs_log_err(hw->os, "requeue_xri WQE error\n");
12055 		ocs_list_add_tail(&hw->io_port_dnrx, io);
12056 
12057 		rc = -1;
12058 		goto exit_ocs_hw_reque_xri;
12059 	}
12060 
12061 	if (io->wq == NULL) {
12062 		io->wq = ocs_hw_queue_next_wq(hw, io);
12063 		ocs_hw_assert(io->wq != NULL);
12064 	}
12065 
12066 	/*
12067 	 * Add IO to active io wqe list before submitting, in case the
12068 	 * wcqe processing preempts this thread.
12069 	 */
12070 	OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
12071 	OCS_STAT(io->wq->use_count++);
12072 
12073 	rc = hw_wq_write(io->wq, &io->wqe);
12074 	if (rc < 0) {
12075 		ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc);
12076 		rc = -1;
12077 	}
12078 
12079 exit_ocs_hw_reque_xri:
12080 	return 0;
12081 }
12082 
12083 uint32_t
12084 ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn)
12085 {
12086 	sli4_t *sli4 = &ocs->hw.sli;
12087 	ocs_dma_t       dma;
12088 	uint8_t		*payload = NULL;
12089 
12090 	int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan;
12091 
12092 	/* allocate memory for the service parameters */
12093 	if (ocs_dma_alloc(ocs, &dma, 112, 4)) {
12094 		ocs_log_err(ocs, "Failed to allocate DMA memory\n");
12095 		return 1;
12096 	}
12097 
12098 	if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
12099 				&dma, indicator)) {
12100 		ocs_log_err(ocs, "READ_SPARM64 allocation failure\n");
12101 		ocs_dma_free(ocs, &dma);
12102 		return 1;
12103 	}
12104 
12105 	if (sli_bmbx_command(sli4)) {
12106 		ocs_log_err(ocs, "READ_SPARM64 command failure\n");
12107 		ocs_dma_free(ocs, &dma);
12108 		return 1;
12109 	}
12110 
12111 	payload = dma.virt;
12112 	ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn));
12113 	ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn));
12114 	ocs_dma_free(ocs, &dma);
12115 	return 0;
12116 }
12117 
12118 /**
12119  * @page fc_hw_api_overview HW APIs
12120  * - @ref devInitShutdown
12121  * - @ref domain
12122  * - @ref port
12123  * - @ref node
12124  * - @ref io
12125  * - @ref interrupt
12126  *
12127  * <div class="overview">
12128  * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4
12129  * message details, but the higher level code must still manage domains, ports,
12130  * IT nexuses, and IOs. The HW API is designed to help the higher level manage
12131  * these objects.<br><br>
12132  *
12133  * The HW uses function callbacks to notify the higher-level code of events
12134  * that are received from the chip. There are currently three types of
12135  * functions that may be registered:
12136  *
12137  * <ul><li>domain – This function is called whenever a domain event is generated
12138  * within the HW. Examples include a new FCF is discovered, a connection
12139  * to a domain is disrupted, and allocation callbacks.</li>
12140  * <li>unsolicited – This function is called whenever new data is received in
12141  * the SLI-4 receive queue.</li>
12142  * <li>rnode – This function is called for remote node events, such as attach status
12143  * and  allocation callbacks.</li></ul>
12144  *
12145  * Upper layer functions may be registered by using the ocs_hw_callback() function.
12146  *
12147  * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/>
12148  * <h2>FC/FCoE HW API</h2>
12149  * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible
12150  * interface for creating the necessary common objects and sending I/Os. It may be used
12151  * “as is” in customer implementations or it can serve as an example of typical interactions
12152  * between a driver and the SLI-4 hardware. The broad categories of functionality include:
12153  *
12154  * <ul><li>Setting-up and tearing-down of the HW.</li>
12155  * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li>
12156  * <li>Sending and receiving I/Os.</li></ul>
12157  *
12158  * <h3>HW Setup</h3>
12159  * To set up the HW:
12160  *
12161  * <ol>
12162  * <li>Set up the HW object using ocs_hw_setup().<br>
12163  * This step performs a basic configuration of the SLI-4 component and the HW to
12164  * enable querying the hardware for its capabilities. At this stage, the HW is not
12165  * capable of general operations (such as, receiving events or sending I/Os).</li><br><br>
12166  * <li>Configure the HW according to the driver requirements.<br>
12167  * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as
12168  * well as configures the amount of resources required (ocs_hw_set()). The driver
12169  * must also register callback functions (ocs_hw_callback()) to receive notification of
12170  * various asynchronous events.<br><br>
12171  * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This
12172  * step creates the underlying queues, commits resources to the hardware, and
12173  * prepares the hardware for operation. While the hardware is operational, the
12174  * port is not online, and cannot send or receive data.</li><br><br>
12175  * <br><br>
12176  * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br>
12177  * When the link comes up, the HW determines if a domain is present and notifies the
12178  * driver using the domain callback function. This is the starting point of the driver's
12179  * interaction with the common objects.<br><br>
12180  * @b Note: For FCoE, there may be more than one domain available and, therefore,
12181  * more than one callback.</li>
12182  * </ol>
12183  *
12184  * <h3>Allocating and Using Common Objects</h3>
12185  * Common objects provide a mechanism through which the various OneCore Storage
12186  * driver components share and track information. These data structures are primarily
12187  * used to track SLI component information but can be extended by other components, if
12188  * needed. The main objects are:
12189  *
12190  * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct
12191  * memory access (DMA) transactions.</li>
12192  * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including
12193  * any infrastructure devices such as FC switches and FC forwarders. The domain
12194  * object contains both an FCFI and a VFI.</li>
12195  * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between
12196  * the driver and the SCSI domain. The SLI Port object contains a VPI.</li>
12197  * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI
12198  * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul>
12199  *
12200  * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote
12201  * node common objects and establish the connections between them. The goal is to
12202  * connect the driver to the SCSI domain to exchange I/Os with other devices. These
12203  * common object connections are shown in the following figure, FC Driver Common Objects:
12204  * <img src="elx_fc_common_objects.jpg"
12205  * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/>
12206  *
12207  * The first step is to create a connection to the domain by allocating an SLI Port object.
12208  * The SLI Port object represents a particular FC ID and must be initialized with one. With
12209  * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying
12210  * a domain, the driver allocates a domain object and attaches to it using the previous SLI
12211  * port object.<br><br>
12212  *
12213  * @b Note: In some cases, the driver may need to negotiate service parameters (that is,
12214  * FLOGI) with the domain before attaching.<br><br>
12215  *
12216  * Once attached to the domain, the driver can discover and attach to other devices
12217  * (remote nodes). The exact discovery method depends on the driver, but it typically
12218  * includes using a position map, querying the fabric name server, or an out-of-band
12219  * method. In most cases, it is necessary to log in with devices before performing I/Os.
12220  * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must
12221  * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is
12222  * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port
12223  * before exchanging FCP I/O.<br><br>
12224  *
12225  * @b Note: The HW manages both the well known fabric address and the name server as
12226  * nodes in the domain. Therefore, the driver must allocate node objects prior to
12227  * communicating with either of these entities.
12228  *
12229  * <h3>Sending and Receiving I/Os</h3>
12230  * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the
12231  * commands are conceptually similar. Since the commands complete asynchronously,
12232  * the caller must provide a HW I/O object that maintains the I/O state, as well as
12233  * provide a callback function. The driver may use the same callback function for all I/O
12234  * operations, but each operation must use a unique HW I/O object. In the SLI-4
12235  * architecture, there is a direct association between the HW I/O object and the SGL used
12236  * to describe the data. Therefore, a driver typically performs the following operations:
12237  *
12238  * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li>
12239  * <li>Formats the SGL, specifying both the HW I/O object and the SGL.
12240  * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li>
12241  * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul>
12242  *
12243  * <h3>HW Tear Down</h3>
12244  * To tear-down the HW:
12245  *
12246  * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further
12247  * data andevents.</li>
12248  * <li>Destroy the HW object (ocs_hw_teardown()).</li>
12249  * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol>
12250  * <br>
12251  * </div><!-- overview -->
12252  *
12253  */
12254 
12255 
12256 
12257 
12258 /**
12259  * This contains all hw runtime workaround code.  Based on the asic type,
12260  * asic revision, and range of fw revisions, a particular workaround may be enabled.
12261  *
12262  * A workaround may consist of overriding a particular HW/SLI4 value that was initialized
12263  * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue
12264  * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to
12265  * control specific runtime behavior.
12266  *
12267  * It is intended that the controls in ocs_hw_workaround_t be defined functionally.  So we
12268  * would have the driver look like:  "if (hw->workaround.enable_xxx) then ...", rather than
12269  * what we might previously see as "if this is a BE3, then do xxx"
12270  *
12271  */
12272 
12273 
12274 #define HW_FWREV_ZERO		(0ull)
12275 #define HW_FWREV_MAX		(~0ull)
12276 
12277 #define SLI4_ASIC_TYPE_ANY	0
12278 #define SLI4_ASIC_REV_ANY	0
12279 
12280 /**
12281  * @brief Internal definition of workarounds
12282  */
12283 
12284 typedef enum {
12285 	HW_WORKAROUND_TEST = 1,
12286 	HW_WORKAROUND_MAX_QUEUE,	/**< Limits all queues */
12287 	HW_WORKAROUND_MAX_RQ,		/**< Limits only the RQ */
12288 	HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH,
12289 	HW_WORKAROUND_WQE_COUNT_METHOD,
12290 	HW_WORKAROUND_RQE_COUNT_METHOD,
12291 	HW_WORKAROUND_USE_UNREGISTERD_RPI,
12292 	HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */
12293 	HW_WORKAROUND_DISABLE_SET_DUMP_LOC,
12294 	HW_WORKAROUND_USE_DIF_QUARANTINE,
12295 	HW_WORKAROUND_USE_DIF_SEC_XRI,		/**< Use secondary xri for multiple data phases */
12296 	HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB,	/**< FCFI reported in SRB not correct, use "first" registered domain */
12297 	HW_WORKAROUND_FW_VERSION_TOO_LOW,	/**< The FW version is not the min version supported by this driver */
12298 	HW_WORKAROUND_SGLC_MISREPORTED,	/**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */
12299 	HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE,	/**< Don't use SEND_FRAME capable if FW version is too old */
12300 } hw_workaround_e;
12301 
12302 /**
12303  * @brief Internal workaround structure instance
12304  */
12305 
12306 typedef struct {
12307 	sli4_asic_type_e asic_type;
12308 	sli4_asic_rev_e asic_rev;
12309 	uint64_t fwrev_low;
12310 	uint64_t fwrev_high;
12311 
12312 	hw_workaround_e workaround;
12313 	uint32_t value;
12314 } hw_workaround_t;
12315 
12316 static hw_workaround_t hw_workarounds[] = {
12317 	{SLI4_ASIC_TYPE_ANY,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12318 		HW_WORKAROUND_TEST, 999},
12319 
12320 	/* Bug: 127585: if_type == 2 returns 0 for total length placed on
12321 	 * FCP_TSEND64_WQE completions.   Note, original driver code enables this
12322 	 * workaround for all asic types
12323 	 */
12324 	{SLI4_ASIC_TYPE_ANY,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12325 		HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0},
12326 
12327 	/* Bug: unknown, Lancer A0 has mis-reported max queue depth */
12328 	{SLI4_ASIC_TYPE_LANCER,	SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX,
12329 		HW_WORKAROUND_MAX_QUEUE, 2048},
12330 
12331 	/* Bug: 143399, BE3 has mis-reported max RQ queue depth */
12332 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0),
12333 		HW_WORKAROUND_MAX_RQ, 2048},
12334 
12335 	/* Bug: 143399, skyhawk has mis-reported max RQ queue depth */
12336 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0),
12337 		HW_WORKAROUND_MAX_RQ, 2048},
12338 
12339 	/* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */
12340 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12341 		HW_WORKAROUND_WQE_COUNT_METHOD, 1},
12342 
12343 	/* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */
12344 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12345 		HW_WORKAROUND_RQE_COUNT_METHOD, 1},
12346 
12347 	/* Bug: 142968, BE3 UE with RPI == 0xffff */
12348 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12349 		HW_WORKAROUND_USE_UNREGISTERD_RPI, 0},
12350 
12351 	/* Bug: unknown, Skyhawk won't support auto-response on target T10-PI  */
12352 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12353 		HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0},
12354 
12355 	{SLI4_ASIC_TYPE_LANCER,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0),
12356 		HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0},
12357 
12358 	/* Bug: 160124, Skyhawk quarantine DIF XRIs  */
12359 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12360 		HW_WORKAROUND_USE_DIF_QUARANTINE, 0},
12361 
12362 	/* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */
12363 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12364 		HW_WORKAROUND_USE_DIF_SEC_XRI, 0},
12365 
12366 	/* Bug: xxxxxx, FCFI reported in SRB not corrrect */
12367 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12368 		HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0},
12369 #if 0
12370 	/* Bug: 165642, FW version check for driver */
12371 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER),
12372 		HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12373 #endif
12374 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK),
12375 		HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12376 
12377 	/* Bug 177061, Lancer FW does not set the SGLC bit */
12378 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12379 		HW_WORKAROUND_SGLC_MISREPORTED, 0},
12380 
12381 	/* BZ 181208/183914, enable this workaround for ALL revisions */
12382 	{SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12383 		HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0},
12384 };
12385 
12386 /**
12387  * @brief Function prototypes
12388  */
12389 
12390 static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w);
12391 
12392 /**
12393  * @brief Parse the firmware version (name)
12394  *
12395  * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined
12396  * by the HW_FWREV() macro
12397  *
12398  * @param fwrev_string pointer to the firmware string
12399  *
12400  * @return packed firmware revision value
12401  */
12402 
12403 static uint64_t
12404 parse_fw_version(const char *fwrev_string)
12405 {
12406 	int v[4] = {0};
12407 	const char *p;
12408 	int i;
12409 
12410 	for (p = fwrev_string, i = 0; *p && (i < 4); i ++) {
12411 		v[i] = ocs_strtoul(p, 0, 0);
12412 		while(*p && *p != '.') {
12413 			p ++;
12414 		}
12415 		if (*p) {
12416 			p ++;
12417 		}
12418 	}
12419 
12420 	/* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */
12421 	if (v[2] == 9999) {
12422 		return HW_FWREV_MAX;
12423 	} else {
12424 		return HW_FWREV(v[0], v[1], v[2], v[3]);
12425 	}
12426 }
12427 
12428 /**
12429  * @brief Test for a workaround match
12430  *
12431  * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match.
12432  *
12433  * @param hw Pointer to the HW structure
12434  * @param w Pointer to a workaround structure entry
12435  *
12436  * @return Return TRUE for a match
12437  */
12438 
12439 static int32_t
12440 ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w)
12441 {
12442 	return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) &&
12443 		    ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) &&
12444 		    (w->fwrev_low <= hw->workaround.fwrev) &&
12445 		    ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high)));
12446 }
12447 
12448 /**
12449  * @brief Setup HW runtime workarounds
12450  *
12451  * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds
12452  * based on the HW/SLI setup.
12453  *
12454  * @param hw Pointer to HW structure
12455  *
12456  * @return none
12457  */
12458 
12459 void
12460 ocs_hw_workaround_setup(struct ocs_hw_s *hw)
12461 {
12462 	hw_workaround_t *w;
12463 	sli4_t *sli4 = &hw->sli;
12464 	uint32_t i;
12465 
12466 	/* Initialize the workaround settings */
12467 	ocs_memset(&hw->workaround, 0, sizeof(hw->workaround));
12468 
12469 	/* If hw_war_version is non-null, then its a value that was set by a module parameter
12470 	 * (sorry for the break in abstraction, but workarounds are ... well, workarounds)
12471 	 */
12472 
12473 	if (hw->hw_war_version) {
12474 		hw->workaround.fwrev = parse_fw_version(hw->hw_war_version);
12475 	} else {
12476 		hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]);
12477 	}
12478 
12479 	/* Walk the workaround list, if a match is found, then handle it */
12480 	for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) {
12481 		if (ocs_hw_workaround_match(hw, w)) {
12482 			switch(w->workaround) {
12483 
12484 			case HW_WORKAROUND_TEST: {
12485 				ocs_log_debug(hw->os, "Override: test: %d\n", w->value);
12486 				break;
12487 			}
12488 
12489 			case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: {
12490 				ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n");
12491 				hw->workaround.retain_tsend_io_length = 1;
12492 				break;
12493 			}
12494 			case HW_WORKAROUND_MAX_QUEUE: {
12495 				sli4_qtype_e q;
12496 
12497 				ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value);
12498 				for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
12499 					if (hw->num_qentries[q] > w->value) {
12500 						hw->num_qentries[q] = w->value;
12501 					}
12502 				}
12503 				break;
12504 			}
12505 			case HW_WORKAROUND_MAX_RQ: {
12506 				ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value);
12507 				if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) {
12508 					hw->num_qentries[SLI_QTYPE_RQ] = w->value;
12509 				}
12510 				break;
12511 			}
12512 			case HW_WORKAROUND_WQE_COUNT_METHOD: {
12513 				ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value);
12514 				sli4->config.count_method[SLI_QTYPE_WQ] = w->value;
12515 				sli_calc_max_qentries(sli4);
12516 				break;
12517 			}
12518 			case HW_WORKAROUND_RQE_COUNT_METHOD: {
12519 				ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value);
12520 				sli4->config.count_method[SLI_QTYPE_RQ] = w->value;
12521 				sli_calc_max_qentries(sli4);
12522 				break;
12523 			}
12524 			case HW_WORKAROUND_USE_UNREGISTERD_RPI:
12525 				ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n");
12526 				hw->workaround.use_unregistered_rpi = TRUE;
12527 				/*
12528 				 * Allocate an RPI that is never registered, to be used in the case where
12529 				 * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF
12530 				 */
12531 				if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid,
12532 					&hw->workaround.unregistered_index)) {
12533 					ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n");
12534 					hw->workaround.use_unregistered_rpi = FALSE;
12535 				}
12536 				break;
12537 			case HW_WORKAROUND_DISABLE_AR_TGT_DIF:
12538 				ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n");
12539 				hw->workaround.disable_ar_tgt_dif = TRUE;
12540 				break;
12541 			case HW_WORKAROUND_DISABLE_SET_DUMP_LOC:
12542 				ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n");
12543 				hw->workaround.disable_dump_loc = TRUE;
12544 				break;
12545 			case HW_WORKAROUND_USE_DIF_QUARANTINE:
12546 				ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n");
12547 				hw->workaround.use_dif_quarantine = TRUE;
12548 				break;
12549 			case HW_WORKAROUND_USE_DIF_SEC_XRI:
12550 				ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n");
12551 				hw->workaround.use_dif_sec_xri = TRUE;
12552 				break;
12553 			case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB:
12554 				ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n");
12555 				hw->workaround.override_fcfi = TRUE;
12556 				break;
12557 
12558 			case HW_WORKAROUND_FW_VERSION_TOO_LOW:
12559 				ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n");
12560 				hw->workaround.fw_version_too_low = TRUE;
12561 				break;
12562 			case HW_WORKAROUND_SGLC_MISREPORTED:
12563 				ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n");
12564 				hw->workaround.sglc_misreported = TRUE;
12565 				break;
12566 			case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE:
12567 				ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n");
12568 				hw->workaround.ignore_send_frame = TRUE;
12569 				break;
12570 			} /* switch(w->workaround) */
12571 		}
12572 	}
12573 }
12574