xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_mcp.c (revision 0e6acb26)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * File : ecore_mcp.c
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 
34 #include "bcm_osal.h"
35 #include "ecore.h"
36 #include "ecore_status.h"
37 #include "nvm_map.h"
38 #include "nvm_cfg.h"
39 #include "ecore_mcp.h"
40 #include "mcp_public.h"
41 #include "reg_addr.h"
42 #include "ecore_hw.h"
43 #include "ecore_init_fw_funcs.h"
44 #include "ecore_sriov.h"
45 #include "ecore_vf.h"
46 #include "ecore_iov_api.h"
47 #include "ecore_gtt_reg_addr.h"
48 #include "ecore_iro.h"
49 #include "ecore_dcbx.h"
50 
51 #define CHIP_MCP_RESP_ITER_US 10
52 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
53 
54 #define ECORE_DRV_MB_MAX_RETRIES	(500 * 1000) /* Account for 5 sec */
55 #define ECORE_MCP_RESET_RETRIES		(50 * 1000) /* Account for 500 msec */
56 
57 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
58 	ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
59 		 _val)
60 
61 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
62 	ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
63 
64 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
65 	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
66 		     OFFSETOF(struct public_drv_mb, _field), _val)
67 
68 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
69 	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
70 		     OFFSETOF(struct public_drv_mb, _field))
71 
72 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
73 	DRV_ID_PDA_COMP_VER_SHIFT)
74 
75 #define MCP_BYTES_PER_MBIT_SHIFT 17
76 
77 #ifndef ASIC_ONLY
78 static int loaded;
79 static int loaded_port[MAX_NUM_PORTS] = { 0 };
80 #endif
81 
82 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
83 {
84 	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
85 		return false;
86 	return true;
87 }
88 
89 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
90 			     struct ecore_ptt *p_ptt)
91 {
92 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
93 					PUBLIC_PORT);
94 	u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
95 
96 	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
97 						   MFW_PORT(p_hwfn));
98 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
99 		   "port_addr = 0x%x, port_id 0x%02x\n",
100 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
101 }
102 
103 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
104 		       struct ecore_ptt *p_ptt)
105 {
106 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
107 	OSAL_BE32 tmp;
108 	u32 i;
109 
110 #ifndef ASIC_ONLY
111 	if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
112 		return;
113 #endif
114 
115 	if (!p_hwfn->mcp_info->public_base)
116 		return;
117 
118 	for (i = 0; i < length; i++) {
119 		tmp = ecore_rd(p_hwfn, p_ptt,
120 			       p_hwfn->mcp_info->mfw_mb_addr +
121 			       (i << 2) + sizeof(u32));
122 
123 		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
124 						OSAL_BE32_TO_CPU(tmp);
125 	}
126 }
127 
128 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
129 {
130 	if (p_hwfn->mcp_info) {
131 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
132 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
133 #ifdef CONFIG_ECORE_LOCK_ALLOC
134 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
135 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
136 #endif
137 	}
138 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
139 	p_hwfn->mcp_info = OSAL_NULL;
140 
141 	return ECORE_SUCCESS;
142 }
143 
144 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
145 					    struct ecore_ptt *p_ptt)
146 {
147 	struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
148 	u32 drv_mb_offsize, mfw_mb_offsize;
149 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
150 
151 #ifndef ASIC_ONLY
152 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
153 		DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
154 		p_info->public_base = 0;
155 		return ECORE_INVAL;
156 	}
157 #endif
158 
159 	p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
160 	if (!p_info->public_base)
161 			return ECORE_INVAL;
162 
163 	p_info->public_base |= GRCBASE_MCP;
164 
165 	/* Calculate the driver and MFW mailbox address */
166 	drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
167 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
168 						       PUBLIC_DRV_MB));
169 	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
170 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
171 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
172 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
173 
174 	/* Set the MFW MB address */
175 	mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
176 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
177 				  PUBLIC_MFW_MB));
178 	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
179 	p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
180 					      p_info->mfw_mb_addr);
181 
182 	/* Get the current driver mailbox sequence before sending
183 	 * the first command
184 	 */
185 	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
186 				       DRV_MSG_SEQ_NUMBER_MASK;
187 
188 	/* Get current FW pulse sequence */
189 	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
190 				DRV_PULSE_SEQ_MASK;
191 
192 	p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
193 					 MISCS_REG_GENERIC_POR_0);
194 
195 	return ECORE_SUCCESS;
196 }
197 
198 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
199 					struct ecore_ptt *p_ptt)
200 {
201 	struct ecore_mcp_info *p_info;
202 	u32 size;
203 
204 	/* Allocate mcp_info structure */
205 	p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
206 				       sizeof(*p_hwfn->mcp_info));
207 	if (!p_hwfn->mcp_info)
208 		goto err;
209 	p_info = p_hwfn->mcp_info;
210 
211 	if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
212 		DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
213 		/* Do not free mcp_info here, since public_base indicate that
214 		 * the MCP is not initialized
215 		 */
216 		return ECORE_SUCCESS;
217 	}
218 
219 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
220 	p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
221 	p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
222 	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
223 		goto err;
224 
225 	/* Initialize the MFW spinlock */
226 #ifdef CONFIG_ECORE_LOCK_ALLOC
227 	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
228 	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
229 #endif
230 	OSAL_SPIN_LOCK_INIT(&p_info->lock);
231 	OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
232 
233 	return ECORE_SUCCESS;
234 
235 err:
236 	DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
237 	ecore_mcp_free(p_hwfn);
238 	return ECORE_NOMEM;
239 
240 }
241 
242 /* Locks the MFW mailbox of a PF to ensure a single access.
243  * The lock is achieved in most cases by holding a spinlock, causing other
244  * threads to wait till a previous access is done.
245  * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
246  * access is achieved by setting a blocking flag, which will fail other
247  * competing contexts to send their mailboxes.
248  */
249 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
250 					      u32 cmd)
251 {
252 	OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
253 
254 	/* The spinlock shouldn't be acquired when the mailbox command is
255 	 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
256 	 * pending [UN]LOAD_REQ command of another PF together with a spinlock
257 	 * (i.e. interrupts are disabled) - can lead to a deadlock.
258 	 * It is assumed that for a single PF, no other mailbox commands can be
259 	 * sent from another context while sending LOAD_REQ, and that any
260 	 * parallel commands to UNLOAD_REQ can be cancelled.
261 	 */
262 	if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
263 		p_hwfn->mcp_info->block_mb_sending = false;
264 
265 	/* There's at least a single command that is sent by ecore during the
266 	 * load sequence [expectation of MFW].
267 	 */
268 	if ((p_hwfn->mcp_info->block_mb_sending) &&
269 	    (cmd != DRV_MSG_CODE_FEATURE_SUPPORT)) {
270 		DP_NOTICE(p_hwfn, false,
271 			  "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
272 			  cmd);
273 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
274 		return ECORE_BUSY;
275 	}
276 
277 	if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
278 		p_hwfn->mcp_info->block_mb_sending = true;
279 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
280 	}
281 
282 	return ECORE_SUCCESS;
283 }
284 
285 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
286 {
287 	if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
288 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
289 }
290 
291 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
292 				     struct ecore_ptt *p_ptt)
293 {
294 	u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
295 	u32 delay = CHIP_MCP_RESP_ITER_US;
296 	u32 org_mcp_reset_seq, cnt = 0;
297 	enum _ecore_status_t rc = ECORE_SUCCESS;
298 
299 #ifndef ASIC_ONLY
300 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
301 		delay = EMUL_MCP_RESP_ITER_US;
302 #endif
303 
304 	/* Ensure that only a single thread is accessing the mailbox at a
305 	 * certain time.
306 	 */
307 	rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
308 	if (rc != ECORE_SUCCESS)
309 		return rc;
310 
311 	/* Set drv command along with the updated sequence */
312 	org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
313 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
314 
315 	do {
316 		/* Wait for MFW response */
317 		OSAL_UDELAY(delay);
318 		/* Give the FW up to 500 second (50*1000*10usec) */
319 	} while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
320 						MISCS_REG_GENERIC_POR_0)) &&
321 		 (cnt++ < ECORE_MCP_RESET_RETRIES));
322 
323 	if (org_mcp_reset_seq !=
324 	    ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
326 			   "MCP was reset after %d usec\n", cnt * delay);
327 	} else {
328 		DP_ERR(p_hwfn, "Failed to reset MCP\n");
329 		rc = ECORE_AGAIN;
330 	}
331 
332 	ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
333 
334 	return rc;
335 }
336 
337 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
338 					     struct ecore_ptt *p_ptt,
339 					     u32 cmd, u32 param,
340 					     u32 *o_mcp_resp, u32 *o_mcp_param)
341 {
342 	u32 delay = CHIP_MCP_RESP_ITER_US;
343 	u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
344 	u32 seq, cnt = 1, actual_mb_seq;
345 	enum _ecore_status_t rc = ECORE_SUCCESS;
346 
347 #ifndef ASIC_ONLY
348 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
349 		delay = EMUL_MCP_RESP_ITER_US;
350 	/* There is a built-in delay of 100usec in each MFW response read */
351 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
352 		max_retries /= 10;
353 #endif
354 
355 	/* Get actual driver mailbox sequence */
356 	actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
357 			DRV_MSG_SEQ_NUMBER_MASK;
358 
359 	/* Use MCP history register to check if MCP reset occurred between
360 	 * init time and now.
361 	 */
362 	if (p_hwfn->mcp_info->mcp_hist !=
363 	    ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
364 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
365 		ecore_load_mcp_offsets(p_hwfn, p_ptt);
366 		ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
367 	}
368 	seq = ++p_hwfn->mcp_info->drv_mb_seq;
369 
370 	/* Set drv param */
371 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
372 
373 	/* Set drv command along with the updated sequence */
374 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
375 
376 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
377 		   "wrote command (%x) to MFW MB param 0x%08x\n",
378 		   (cmd | seq), param);
379 
380 	do {
381 		/* Wait for MFW response */
382 		OSAL_UDELAY(delay);
383 		*o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
384 
385 		/* Give the FW up to 5 second (500*10ms) */
386 	} while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
387 		 (cnt++ < max_retries));
388 
389 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
390 		   "[after %d ms] read (%x) seq is (%x) from FW MB\n",
391 		   cnt * delay, *o_mcp_resp, seq);
392 
393 	/* Is this a reply to our command? */
394 	if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
395 		*o_mcp_resp &= FW_MSG_CODE_MASK;
396 		/* Get the MCP param */
397 		*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
398 	} else {
399 		/* FW BUG! */
400 		DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
401 		       cmd, param);
402 		*o_mcp_resp = 0;
403 		rc = ECORE_AGAIN;
404 		ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
405 	}
406 	return rc;
407 }
408 
409 static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
410 						    struct ecore_ptt *p_ptt,
411 						    struct ecore_mcp_mb_params *p_mb_params)
412 {
413 	union drv_union_data union_data;
414 	u32 union_data_addr;
415 	enum _ecore_status_t rc;
416 
417 	/* MCP not initialized */
418 	if (!ecore_mcp_is_init(p_hwfn)) {
419 		DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
420 		return ECORE_BUSY;
421 	}
422 
423 	if (p_mb_params->data_src_size > sizeof(union_data) ||
424 	    p_mb_params->data_dst_size > sizeof(union_data)) {
425 		DP_ERR(p_hwfn,
426 		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
427 		       p_mb_params->data_src_size, p_mb_params->data_dst_size,
428 		       sizeof(union_data));
429 		return ECORE_INVAL;
430 	}
431 
432 	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
433 			  OFFSETOF(struct public_drv_mb, union_data);
434 
435 	/* Ensure that only a single thread is accessing the mailbox at a
436 	 * certain time.
437 	 */
438 	rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
439 	if (rc != ECORE_SUCCESS)
440 		return rc;
441 
442 	OSAL_MEM_ZERO(&union_data, sizeof(union_data));
443 	if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
444 		OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
445 			    p_mb_params->data_src_size);
446 	ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
447 			sizeof(union_data));
448 
449 	rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
450 			      p_mb_params->param, &p_mb_params->mcp_resp,
451 			      &p_mb_params->mcp_param);
452 
453 	if (p_mb_params->p_data_dst != OSAL_NULL &&
454 	    p_mb_params->data_dst_size)
455 		ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
456 				  union_data_addr, p_mb_params->data_dst_size);
457 
458 	ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
459 
460 	return rc;
461 }
462 
463 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
464 				   struct ecore_ptt *p_ptt, u32 cmd, u32 param,
465 				   u32 *o_mcp_resp, u32 *o_mcp_param)
466 {
467 	struct ecore_mcp_mb_params mb_params;
468 	enum _ecore_status_t rc;
469 
470 #ifndef ASIC_ONLY
471 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
472 		if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
473 			loaded--;
474 			loaded_port[p_hwfn->port_id]--;
475 			DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
476 				   loaded);
477 		}
478 		return ECORE_SUCCESS;
479 	}
480 #endif
481 
482 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
483 	mb_params.cmd = cmd;
484 	mb_params.param = param;
485 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
486 	if (rc != ECORE_SUCCESS)
487 		return rc;
488 
489 	*o_mcp_resp = mb_params.mcp_resp;
490 	*o_mcp_param = mb_params.mcp_param;
491 
492 	return ECORE_SUCCESS;
493 }
494 
495 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
496 					  struct ecore_ptt *p_ptt,
497 					  u32 cmd,
498 					  u32 param,
499 					  u32 *o_mcp_resp,
500 					  u32 *o_mcp_param,
501 					  u32 i_txn_size,
502 					  u32 *i_buf)
503 {
504 	struct ecore_mcp_mb_params mb_params;
505 	enum _ecore_status_t rc;
506 
507 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
508 	mb_params.cmd = cmd;
509 	mb_params.param = param;
510 	mb_params.p_data_src = i_buf;
511 	mb_params.data_src_size = (u8) i_txn_size;
512 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
513 	if (rc != ECORE_SUCCESS)
514 		return rc;
515 
516 	*o_mcp_resp = mb_params.mcp_resp;
517 	*o_mcp_param = mb_params.mcp_param;
518 
519 	return ECORE_SUCCESS;
520 }
521 
522 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
523 					  struct ecore_ptt *p_ptt,
524 					  u32 cmd,
525 					  u32 param,
526 					  u32 *o_mcp_resp,
527 					  u32 *o_mcp_param,
528 					  u32 *o_txn_size,
529 					  u32 *o_buf)
530 {
531 	struct ecore_mcp_mb_params mb_params;
532 	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
533 	enum _ecore_status_t rc;
534 
535 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
536 	mb_params.cmd = cmd;
537 	mb_params.param = param;
538 	mb_params.p_data_dst = raw_data;
539 
540 	/* Use the maximal value since the actual one is part of the response */
541 	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
542 
543 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
544 	if (rc != ECORE_SUCCESS)
545 		return rc;
546 
547 	*o_mcp_resp = mb_params.mcp_resp;
548 	*o_mcp_param = mb_params.mcp_param;
549 
550 	*o_txn_size = *o_mcp_param;
551 	OSAL_MEMCPY(o_buf, raw_data, *o_txn_size);
552 
553 	return ECORE_SUCCESS;
554 }
555 
556 #ifndef ASIC_ONLY
557 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
558 				    u32 *p_load_code)
559 {
560 	static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
561 
562 	if (!loaded) {
563 		load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
564 	} else if (!loaded_port[p_hwfn->port_id]) {
565 		load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
566 	} else {
567 		load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
568 	}
569 
570 	/* On CMT, always tell that it's engine */
571 	if (p_hwfn->p_dev->num_hwfns > 1)
572 		load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
573 
574 	*p_load_code = load_phase;
575 	loaded++;
576 	loaded_port[p_hwfn->port_id]++;
577 
578 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
579 		   "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
580 		   *p_load_code, loaded, p_hwfn->port_id,
581 		   loaded_port[p_hwfn->port_id]);
582 }
583 #endif
584 
585 static bool
586 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
587 			 enum ecore_override_force_load override_force_load)
588 {
589 	bool can_force_load = false;
590 
591 	switch (override_force_load) {
592 	case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
593 		can_force_load = true;
594 		break;
595 	case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
596 		can_force_load = false;
597 		break;
598 	default:
599 		can_force_load = (drv_role == DRV_ROLE_OS &&
600 				  exist_drv_role == DRV_ROLE_PREBOOT) ||
601 				 (drv_role == DRV_ROLE_KDUMP &&
602 				  exist_drv_role == DRV_ROLE_OS);
603 		break;
604 	}
605 
606 	return can_force_load;
607 }
608 
609 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
610 						      struct ecore_ptt *p_ptt)
611 {
612 	u32 resp = 0, param = 0;
613 	enum _ecore_status_t rc;
614 
615 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
616 			   &resp, &param);
617 	if (rc != ECORE_SUCCESS)
618 		DP_NOTICE(p_hwfn, false,
619 			  "Failed to send cancel load request, rc = %d\n", rc);
620 
621 	return rc;
622 }
623 
624 #define CONFIG_ECORE_L2_BITMAP_IDX	(0x1 << 0)
625 #define CONFIG_ECORE_SRIOV_BITMAP_IDX	(0x1 << 1)
626 #define CONFIG_ECORE_ROCE_BITMAP_IDX	(0x1 << 2)
627 #define CONFIG_ECORE_IWARP_BITMAP_IDX	(0x1 << 3)
628 #define CONFIG_ECORE_FCOE_BITMAP_IDX	(0x1 << 4)
629 #define CONFIG_ECORE_ISCSI_BITMAP_IDX	(0x1 << 5)
630 #define CONFIG_ECORE_LL2_BITMAP_IDX	(0x1 << 6)
631 
632 static u32 ecore_get_config_bitmap(void)
633 {
634 	u32 config_bitmap = 0x0;
635 
636 #ifdef CONFIG_ECORE_L2
637 	config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
638 #endif
639 #ifdef CONFIG_ECORE_SRIOV
640 	config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
641 #endif
642 #ifdef CONFIG_ECORE_ROCE
643 	config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
644 #endif
645 #ifdef CONFIG_ECORE_IWARP
646 	config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
647 #endif
648 #ifdef CONFIG_ECORE_FCOE
649 	config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
650 #endif
651 #ifdef CONFIG_ECORE_ISCSI
652 	config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
653 #endif
654 #ifdef CONFIG_ECORE_LL2
655 	config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
656 #endif
657 
658 	return config_bitmap;
659 }
660 
661 struct ecore_load_req_in_params {
662 	u8 hsi_ver;
663 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT	0
664 #define ECORE_LOAD_REQ_HSI_VER_1	1
665 	u32 drv_ver_0;
666 	u32 drv_ver_1;
667 	u32 fw_ver;
668 	u8 drv_role;
669 	u8 timeout_val;
670 	u8 force_cmd;
671 	bool avoid_eng_reset;
672 };
673 
674 struct ecore_load_req_out_params {
675 	u32 load_code;
676 	u32 exist_drv_ver_0;
677 	u32 exist_drv_ver_1;
678 	u32 exist_fw_ver;
679 	u8 exist_drv_role;
680 	u8 mfw_hsi_ver;
681 	bool drv_exists;
682 };
683 
684 static enum _ecore_status_t
685 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
686 		     struct ecore_load_req_in_params *p_in_params,
687 		     struct ecore_load_req_out_params *p_out_params)
688 {
689 	struct ecore_mcp_mb_params mb_params;
690 	struct load_req_stc load_req;
691 	struct load_rsp_stc load_rsp;
692 	u32 hsi_ver;
693 	enum _ecore_status_t rc;
694 
695 	OSAL_MEM_ZERO(&load_req, sizeof(load_req));
696 	load_req.drv_ver_0 = p_in_params->drv_ver_0;
697 	load_req.drv_ver_1 = p_in_params->drv_ver_1;
698 	load_req.fw_ver = p_in_params->fw_ver;
699 	ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
700 			    p_in_params->drv_role);
701 	ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
702 			    p_in_params->timeout_val);
703 	ECORE_MFW_SET_FIELD(load_req.misc0, (u64)LOAD_REQ_FORCE,
704 			    p_in_params->force_cmd);
705 	ECORE_MFW_SET_FIELD(load_req.misc0, (u64)LOAD_REQ_FLAGS0,
706 			    p_in_params->avoid_eng_reset);
707 
708 	hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
709 		  DRV_ID_MCP_HSI_VER_CURRENT :
710 		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
711 
712 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
713 	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
714 	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
715 	mb_params.p_data_src = &load_req;
716 	mb_params.data_src_size = sizeof(load_req);
717 	mb_params.p_data_dst = &load_rsp;
718 	mb_params.data_dst_size = sizeof(load_rsp);
719 
720 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
721 		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
722 		   mb_params.param,
723 		   ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
724 		   ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
725 		   ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
726 		   ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
727 
728 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
729 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
730 			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
731 			   load_req.drv_ver_0, load_req.drv_ver_1,
732 			   load_req.fw_ver, load_req.misc0,
733 			   ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
734 			   ECORE_MFW_GET_FIELD(load_req.misc0,
735 					       LOAD_REQ_LOCK_TO),
736 			   ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
737 			   ECORE_MFW_GET_FIELD(load_req.misc0,
738 					       LOAD_REQ_FLAGS0));
739 
740 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
741 	if (rc != ECORE_SUCCESS) {
742 		DP_NOTICE(p_hwfn, false,
743 			  "Failed to send load request, rc = %d\n", rc);
744 		return rc;
745 	}
746 
747 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
748 		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
749 	p_out_params->load_code = mb_params.mcp_resp;
750 
751 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
752 	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
753 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
754 			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
755 			   load_rsp.drv_ver_0, load_rsp.drv_ver_1,
756 			   load_rsp.fw_ver, load_rsp.misc0,
757 			   ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
758 			   ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
759 			   ECORE_MFW_GET_FIELD(load_rsp.misc0,
760 					       LOAD_RSP_FLAGS0));
761 
762 		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
763 		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
764 		p_out_params->exist_fw_ver = load_rsp.fw_ver;
765 		p_out_params->exist_drv_role =
766 			ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
767 		p_out_params->mfw_hsi_ver =
768 			ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
769 		p_out_params->drv_exists =
770 			ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
771 			LOAD_RSP_FLAGS0_DRV_EXISTS;
772 	}
773 
774 	return ECORE_SUCCESS;
775 }
776 
777 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
778 						   enum ecore_drv_role drv_role,
779 						   u8 *p_mfw_drv_role)
780 {
781 	switch (drv_role)
782 	{
783 	case ECORE_DRV_ROLE_OS:
784 		*p_mfw_drv_role = DRV_ROLE_OS;
785 		break;
786 	case ECORE_DRV_ROLE_KDUMP:
787 		*p_mfw_drv_role = DRV_ROLE_KDUMP;
788 		break;
789 	default:
790 		DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
791 		return ECORE_INVAL;
792 	}
793 
794 	return ECORE_SUCCESS;
795 }
796 
797 enum ecore_load_req_force {
798 	ECORE_LOAD_REQ_FORCE_NONE,
799 	ECORE_LOAD_REQ_FORCE_PF,
800 	ECORE_LOAD_REQ_FORCE_ALL,
801 };
802 
803 static enum _ecore_status_t
804 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
805 			enum ecore_load_req_force force_cmd,
806 			u8 *p_mfw_force_cmd)
807 {
808 	switch (force_cmd) {
809 	case ECORE_LOAD_REQ_FORCE_NONE:
810 		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
811 		break;
812 	case ECORE_LOAD_REQ_FORCE_PF:
813 		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
814 		break;
815 	case ECORE_LOAD_REQ_FORCE_ALL:
816 		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
817 		break;
818 	default:
819 		DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
820 		return ECORE_INVAL;
821 	}
822 
823 	return ECORE_SUCCESS;
824 }
825 
826 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
827 					struct ecore_ptt *p_ptt,
828 					struct ecore_load_req_params *p_params)
829 {
830 	struct ecore_load_req_out_params out_params;
831 	struct ecore_load_req_in_params in_params;
832 	u8 mfw_drv_role, mfw_force_cmd;
833 	enum _ecore_status_t rc;
834 
835 #ifndef ASIC_ONLY
836 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
837 		ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
838 		return ECORE_SUCCESS;
839 	}
840 #endif
841 
842 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
843 	in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
844 	in_params.drv_ver_0 = ECORE_VERSION;
845 	in_params.drv_ver_1 = ecore_get_config_bitmap();
846 	in_params.fw_ver = STORM_FW_VERSION;
847 	rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
848 	if (rc != ECORE_SUCCESS)
849 		return rc;
850 
851 	in_params.drv_role = mfw_drv_role;
852 	in_params.timeout_val = p_params->timeout_val;
853 	rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
854 				     &mfw_force_cmd);
855 	if (rc != ECORE_SUCCESS)
856 		return rc;
857 
858 	in_params.force_cmd = mfw_force_cmd;
859 	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
860 
861 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
862 	rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
863 	if (rc != ECORE_SUCCESS)
864 		return rc;
865 
866 	/* First handle cases where another load request should/might be sent:
867 	 * - MFW expects the old interface [HSI version = 1]
868 	 * - MFW responds that a force load request is required
869 	 */
870 	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
871 		DP_INFO(p_hwfn,
872 			"MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
873 
874 		/* The previous load request set the mailbox blocking */
875 		p_hwfn->mcp_info->block_mb_sending = false;
876 
877 		in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
878 		OSAL_MEM_ZERO(&out_params, sizeof(out_params));
879 		rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
880 					  &out_params);
881 		if (rc != ECORE_SUCCESS)
882 			return rc;
883 	} else if (out_params.load_code ==
884 		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
885 		/* The previous load request set the mailbox blocking */
886 		p_hwfn->mcp_info->block_mb_sending = false;
887 
888 		if (ecore_mcp_can_force_load(in_params.drv_role,
889 					     out_params.exist_drv_role,
890 					     p_params->override_force_load)) {
891 			DP_INFO(p_hwfn,
892 				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
893 				in_params.drv_role, in_params.fw_ver,
894 				in_params.drv_ver_0, in_params.drv_ver_1,
895 				out_params.exist_drv_role,
896 				out_params.exist_fw_ver,
897 				out_params.exist_drv_ver_0,
898 				out_params.exist_drv_ver_1);
899 			DP_INFO(p_hwfn, "Sending a force load request\n");
900 
901 			rc = ecore_get_mfw_force_cmd(p_hwfn,
902 						     ECORE_LOAD_REQ_FORCE_ALL,
903 						     &mfw_force_cmd);
904 			if (rc != ECORE_SUCCESS)
905 				return rc;
906 
907 			in_params.force_cmd = mfw_force_cmd;
908 			OSAL_MEM_ZERO(&out_params, sizeof(out_params));
909 			rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
910 						  &out_params);
911 			if (rc != ECORE_SUCCESS)
912 				return rc;
913 		} else {
914 			DP_NOTICE(p_hwfn, false,
915 				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
916 				  in_params.drv_role, in_params.fw_ver,
917 				  in_params.drv_ver_0, in_params.drv_ver_1,
918 				  out_params.exist_drv_role,
919 				  out_params.exist_fw_ver,
920 				  out_params.exist_drv_ver_0,
921 				  out_params.exist_drv_ver_1);
922 			DP_NOTICE(p_hwfn, false,
923 				  "Avoid sending a force load request to prevent disruption of active PFs\n");
924 
925 			ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
926 			return ECORE_BUSY;
927 		}
928 	}
929 
930 	/* Now handle the other types of responses.
931 	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
932 	 * expected here after the additional revised load requests were sent.
933 	 */
934 	switch (out_params.load_code) {
935 	case FW_MSG_CODE_DRV_LOAD_ENGINE:
936 	case FW_MSG_CODE_DRV_LOAD_PORT:
937 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
938 		if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
939 		    out_params.drv_exists) {
940 			/* The role and fw/driver version match, but the PF is
941 			 * already loaded and has not been unloaded gracefully.
942 			 * This is unexpected since a quasi-FLR request was
943 			 * previously sent as part of ecore_hw_prepare().
944 			 */
945 			DP_NOTICE(p_hwfn, false,
946 				  "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
947 			return ECORE_INVAL;
948 		}
949 		break;
950 	case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
951 	case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
952 	case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
953 	case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
954 		DP_NOTICE(p_hwfn, false,
955 			  "MFW refused a load request [resp 0x%08x]. Aborting.\n",
956 			  out_params.load_code);
957 		return ECORE_BUSY;
958 	default:
959 		DP_NOTICE(p_hwfn, false,
960 			  "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
961 			  out_params.load_code);
962 		break;
963 	}
964 
965 	p_params->load_code = out_params.load_code;
966 
967 	return ECORE_SUCCESS;
968 }
969 
970 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
971 					  struct ecore_ptt *p_ptt)
972 {
973 	u32 wol_param, mcp_resp, mcp_param;
974 
975 	switch (p_hwfn->p_dev->wol_config) {
976 	case ECORE_OV_WOL_DISABLED:
977 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
978 		break;
979 	case ECORE_OV_WOL_ENABLED:
980 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
981 		break;
982 	default:
983 		DP_NOTICE(p_hwfn, true,
984 			  "Unknown WoL configuration %02x\n",
985 			  p_hwfn->p_dev->wol_config);
986 		/* Fallthrough */
987 	case ECORE_OV_WOL_DEFAULT:
988 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
989 	}
990 
991 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
992 			     &mcp_resp, &mcp_param);
993 }
994 
995 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
996 					   struct ecore_ptt *p_ptt)
997 {
998 	struct ecore_mcp_mb_params mb_params;
999 	struct mcp_mac wol_mac;
1000 
1001 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1002 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1003 
1004 	/* Set the primary MAC if WoL is enabled */
1005 	if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) {
1006 		u8 *p_mac = p_hwfn->p_dev->wol_mac;
1007 
1008 		OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac));
1009 		wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1010 		wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1011 				    p_mac[4] << 8 | p_mac[5];
1012 
1013 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN),
1014 			   "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n",
1015 			   p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4],
1016 			   p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower);
1017 
1018 		mb_params.p_data_src = &wol_mac;
1019 		mb_params.data_src_size = sizeof(wol_mac);
1020 	}
1021 
1022 	return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1023 }
1024 
1025 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1026 				    struct ecore_ptt *p_ptt)
1027 {
1028 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1029 					PUBLIC_PATH);
1030 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1031 	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1032 				     ECORE_PATH_ID(p_hwfn));
1033 	u32 disabled_vfs[VF_MAX_STATIC / 32];
1034 	int i;
1035 
1036 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1037 		   "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1038 		   mfw_path_offsize, path_addr);
1039 
1040 	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1041 		disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1042 					   path_addr +
1043 					   OFFSETOF(struct public_path,
1044 						    mcp_vf_disabled) +
1045 					   sizeof(u32) * i);
1046 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1047 			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1048 			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1049 	}
1050 
1051 	if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1052 		OSAL_VF_FLR_UPDATE(p_hwfn);
1053 }
1054 
1055 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1056 					  struct ecore_ptt *p_ptt,
1057 					  u32 *vfs_to_ack)
1058 {
1059 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1060 					PUBLIC_FUNC);
1061 	u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1062 	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1063 				     MCP_PF_ID(p_hwfn));
1064 	struct ecore_mcp_mb_params mb_params;
1065 	enum _ecore_status_t rc;
1066 	int i;
1067 
1068 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1069 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1070 			   "Acking VFs [%08x,...,%08x] - %08x\n",
1071 			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1072 
1073 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1074 	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1075 	mb_params.p_data_src = vfs_to_ack;
1076 	mb_params.data_src_size = VF_MAX_STATIC / 8;
1077 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1078 	if (rc != ECORE_SUCCESS) {
1079 		DP_NOTICE(p_hwfn, false,
1080 			  "Failed to pass ACK for VF flr to MFW\n");
1081 		return ECORE_TIMEOUT;
1082 	}
1083 
1084 	/* TMP - clear the ACK bits; should be done by MFW */
1085 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1086 		ecore_wr(p_hwfn, p_ptt,
1087 			 func_addr +
1088 			 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1089 			 i * sizeof(u32), 0);
1090 
1091 	return rc;
1092 }
1093 
1094 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1095 						struct ecore_ptt *p_ptt)
1096 {
1097 	u32 transceiver_state;
1098 
1099 	transceiver_state = ecore_rd(p_hwfn, p_ptt,
1100 				     p_hwfn->mcp_info->port_addr +
1101 				     OFFSETOF(struct public_port,
1102 					      transceiver_data));
1103 
1104 	DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1105 		   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1106 		   transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1107 					    OFFSETOF(struct public_port,
1108 						     transceiver_data)));
1109 
1110 	transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
1111 
1112 	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1113 		DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1114 	else
1115 		DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1116 }
1117 
1118 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1119 				      struct ecore_ptt *p_ptt,
1120 				      struct ecore_mcp_link_state *p_link)
1121 {
1122 	u32 eee_status, val;
1123 
1124 	p_link->eee_adv_caps = 0;
1125 	p_link->eee_lp_adv_caps = 0;
1126 	eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1127 				     OFFSETOF(struct public_port, eee_status));
1128 	p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1129 	val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_SHIFT;
1130 	if (val & EEE_1G_ADV)
1131 		p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1132 	if (val & EEE_10G_ADV)
1133 		p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1134 	val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_SHIFT;
1135 	if (val & EEE_1G_ADV)
1136 		p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1137 	if (val & EEE_10G_ADV)
1138 		p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1139 }
1140 
1141 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1142 					 struct ecore_ptt *p_ptt,
1143 					 bool b_reset)
1144 {
1145 	struct ecore_mcp_link_state *p_link;
1146 	u8 max_bw, min_bw;
1147 	u32 status = 0;
1148 
1149 	/* Prevent SW/attentions from doing this at the same time */
1150 	OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1151 
1152 	p_link = &p_hwfn->mcp_info->link_output;
1153 	OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1154 	if (!b_reset) {
1155 		status = ecore_rd(p_hwfn, p_ptt,
1156 				  p_hwfn->mcp_info->port_addr +
1157 				  OFFSETOF(struct public_port, link_status));
1158 		DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1159 			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1160 			   status, (u32)(p_hwfn->mcp_info->port_addr +
1161 			   OFFSETOF(struct public_port, link_status)));
1162 	} else {
1163 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1164 			   "Resetting link indications\n");
1165 		goto out;
1166 	}
1167 
1168 	if (p_hwfn->b_drv_link_init)
1169 		p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1170 	else
1171 		p_link->link_up = false;
1172 
1173 	p_link->full_duplex = true;
1174 	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1175 	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1176 		p_link->speed = 100000;
1177 		break;
1178 	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1179 		p_link->speed = 50000;
1180 		break;
1181 	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1182 		p_link->speed = 40000;
1183 		break;
1184 	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1185 		p_link->speed = 25000;
1186 		break;
1187 	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1188 		p_link->speed = 20000;
1189 		break;
1190 	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1191 		p_link->speed = 10000;
1192 		break;
1193 	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1194 		p_link->full_duplex = false;
1195 		/* Fall-through */
1196 	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1197 		p_link->speed = 1000;
1198 		break;
1199 	default:
1200 		p_link->speed = 0;
1201 	}
1202 
1203 	/* We never store total line speed as p_link->speed is
1204 	 * again changes according to bandwidth allocation.
1205 	 */
1206 	if (p_link->link_up && p_link->speed)
1207 		p_link->line_speed = p_link->speed;
1208 	else
1209 		p_link->line_speed = 0;
1210 
1211 	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1212 	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1213 
1214 	/* Max bandwidth configuration */
1215 	__ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1216 
1217 	/* Mintz bandwidth configuration */
1218 	__ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1219 	ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1220 					      p_link->min_pf_rate);
1221 
1222 	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1223 	p_link->an_complete = !!(status &
1224 				 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1225 	p_link->parallel_detection = !!(status &
1226 					LINK_STATUS_PARALLEL_DETECTION_USED);
1227 	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1228 
1229 	p_link->partner_adv_speed |=
1230 		(status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1231 		ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1232 	p_link->partner_adv_speed |=
1233 		(status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1234 		ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1235 	p_link->partner_adv_speed |=
1236 		(status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1237 		ECORE_LINK_PARTNER_SPEED_10G : 0;
1238 	p_link->partner_adv_speed |=
1239 		(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1240 		ECORE_LINK_PARTNER_SPEED_20G : 0;
1241 	p_link->partner_adv_speed |=
1242 		(status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1243 		ECORE_LINK_PARTNER_SPEED_25G : 0;
1244 	p_link->partner_adv_speed |=
1245 		(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1246 		ECORE_LINK_PARTNER_SPEED_40G : 0;
1247 	p_link->partner_adv_speed |=
1248 		(status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1249 		ECORE_LINK_PARTNER_SPEED_50G : 0;
1250 	p_link->partner_adv_speed |=
1251 		(status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1252 		ECORE_LINK_PARTNER_SPEED_100G : 0;
1253 
1254 	p_link->partner_tx_flow_ctrl_en =
1255 		!!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1256 	p_link->partner_rx_flow_ctrl_en =
1257 		!!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1258 
1259 	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1260 	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1261 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1262 		break;
1263 	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1264 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1265 		break;
1266 	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1267 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1268 		break;
1269 	default:
1270 		p_link->partner_adv_pause = 0;
1271 	}
1272 
1273 	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1274 
1275 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1276 		ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1277 
1278 	OSAL_LINK_UPDATE(p_hwfn);
1279 out:
1280 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1281 }
1282 
1283 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1284 					struct ecore_ptt *p_ptt,
1285 					bool b_up)
1286 {
1287 	struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1288 	struct ecore_mcp_mb_params mb_params;
1289 	struct eth_phy_cfg phy_cfg;
1290 	enum _ecore_status_t rc = ECORE_SUCCESS;
1291 	u32 cmd;
1292 
1293 #ifndef ASIC_ONLY
1294 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1295 		return ECORE_SUCCESS;
1296 #endif
1297 
1298 	/* Set the shmem configuration according to params */
1299 	OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1300 	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1301 	if (!params->speed.autoneg)
1302 		phy_cfg.speed = params->speed.forced_speed;
1303 	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1304 	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1305 	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1306 	phy_cfg.adv_speed = params->speed.advertised_speeds;
1307 	phy_cfg.loopback_mode = params->loopback_mode;
1308 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
1309 		if (params->eee.enable)
1310 			phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1311 		if (params->eee.tx_lpi_enable)
1312 			phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1313 		if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1314 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1315 		if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1316 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1317 		phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1318 				    EEE_TX_TIMER_USEC_SHIFT) &
1319 					EEE_TX_TIMER_USEC_MASK;
1320 	}
1321 
1322 	p_hwfn->b_drv_link_init = b_up;
1323 
1324 	if (b_up)
1325 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1326 			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1327 			   phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1328 			   phy_cfg.loopback_mode);
1329 	else
1330 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1331 
1332 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1333 	mb_params.cmd = cmd;
1334 	mb_params.p_data_src = &phy_cfg;
1335 	mb_params.data_src_size = sizeof(phy_cfg);
1336 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1337 
1338 	/* if mcp fails to respond we must abort */
1339 	if (rc != ECORE_SUCCESS) {
1340 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1341 		return rc;
1342 	}
1343 
1344 	/* Mimic link-change attention, done for several reasons:
1345 	 *  - On reset, there's no guarantee MFW would trigger
1346 	 *    an attention.
1347 	 *  - On initialization, older MFWs might not indicate link change
1348 	 *    during LFA, so we'll never get an UP indication.
1349 	 */
1350 	ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1351 
1352 	return rc;
1353 }
1354 
1355 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1356 				   struct ecore_ptt *p_ptt)
1357 {
1358 	u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1359 
1360 	/* TODO - Add support for VFs */
1361 	if (IS_VF(p_hwfn->p_dev))
1362 		return ECORE_INVAL;
1363 
1364 	path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1365 						 PUBLIC_PATH);
1366 	path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1367 	path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1368 
1369 	proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1370 				 path_addr +
1371 				 OFFSETOF(struct public_path, process_kill)) &
1372 			PROCESS_KILL_COUNTER_MASK;
1373 
1374 	return proc_kill_cnt;
1375 }
1376 
1377 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1378 					  struct ecore_ptt *p_ptt)
1379 {
1380 	struct ecore_dev *p_dev = p_hwfn->p_dev;
1381 	u32 proc_kill_cnt;
1382 
1383 	/* Prevent possible attentions/interrupts during the recovery handling
1384 	 * and till its load phase, during which they will be re-enabled.
1385 	 */
1386 	ecore_int_igu_disable_int(p_hwfn, p_ptt);
1387 
1388 	DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1389 
1390 	/* The following operations should be done once, and thus in CMT mode
1391 	 * are carried out by only the first HW function.
1392 	 */
1393 	if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1394 		return;
1395 
1396 	if (p_dev->recov_in_prog) {
1397 		DP_NOTICE(p_hwfn, false,
1398 			  "Ignoring the indication since a recovery process is already in progress\n");
1399 		return;
1400 	}
1401 
1402 	p_dev->recov_in_prog = true;
1403 
1404 	proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1405 	DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1406 
1407 	OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1408 }
1409 
1410 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1411 					  struct ecore_ptt *p_ptt,
1412 					  enum MFW_DRV_MSG_TYPE type)
1413 {
1414 	enum ecore_mcp_protocol_type stats_type;
1415 	union ecore_mcp_protocol_stats stats;
1416 	struct ecore_mcp_mb_params mb_params;
1417 	u32 hsi_param;
1418 	enum _ecore_status_t rc;
1419 
1420 	switch (type) {
1421 	case MFW_DRV_MSG_GET_LAN_STATS:
1422 		stats_type = ECORE_MCP_LAN_STATS;
1423 		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1424 		break;
1425 	case MFW_DRV_MSG_GET_FCOE_STATS:
1426 		stats_type = ECORE_MCP_FCOE_STATS;
1427 		hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1428 		break;
1429 	case MFW_DRV_MSG_GET_ISCSI_STATS:
1430 		stats_type = ECORE_MCP_ISCSI_STATS;
1431 		hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1432 		break;
1433 	case MFW_DRV_MSG_GET_RDMA_STATS:
1434 		stats_type = ECORE_MCP_RDMA_STATS;
1435 		hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1436 		break;
1437 	default:
1438 		DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
1439 		return;
1440 	}
1441 
1442 	OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1443 
1444 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1445 	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1446 	mb_params.param = hsi_param;
1447 	mb_params.p_data_src = &stats;
1448 	mb_params.data_src_size = sizeof(stats);
1449 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1450 	if (rc != ECORE_SUCCESS)
1451 		DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1452 }
1453 
1454 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1455 				    struct public_func *p_shmem_info)
1456 {
1457 	struct ecore_mcp_function_info *p_info;
1458 
1459 	p_info = &p_hwfn->mcp_info->func_info;
1460 
1461 	/* TODO - bandwidth min/max should have valid values of 1-100,
1462 	 * as well as some indication that the feature is disabled.
1463 	 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1464 	 * limit and correct value to min `1' and max `100' if limit isn't in
1465 	 * range.
1466 	 */
1467 	p_info->bandwidth_min = (p_shmem_info->config &
1468 				 FUNC_MF_CFG_MIN_BW_MASK) >>
1469 				FUNC_MF_CFG_MIN_BW_SHIFT;
1470 	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1471 		DP_INFO(p_hwfn,
1472 			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1473 			p_info->bandwidth_min);
1474 		p_info->bandwidth_min = 1;
1475 	}
1476 
1477 	p_info->bandwidth_max = (p_shmem_info->config &
1478 				 FUNC_MF_CFG_MAX_BW_MASK) >>
1479 				FUNC_MF_CFG_MAX_BW_SHIFT;
1480 	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1481 		DP_INFO(p_hwfn,
1482 			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1483 			p_info->bandwidth_max);
1484 		p_info->bandwidth_max = 100;
1485 	}
1486 }
1487 
1488 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1489 				    struct ecore_ptt *p_ptt,
1490 				    struct public_func *p_data,
1491 				    int pfid)
1492 {
1493 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1494 					PUBLIC_FUNC);
1495 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1496 	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1497 	u32 i, size;
1498 
1499 	OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1500 
1501 	size = OSAL_MIN_T(u32, sizeof(*p_data),
1502 			  SECTION_SIZE(mfw_path_offsize));
1503 	for (i = 0; i < size / sizeof(u32); i++)
1504 		((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1505 					      func_addr + (i << 2));
1506 
1507 	return size;
1508 }
1509 #if 0
1510 /* This was introduced with FW 8.10.5.0; Hopefully this is only temp. */
1511 enum _ecore_status_t ecore_hw_init_first_eth(struct ecore_hwfn *p_hwfn,
1512 					     struct ecore_ptt *p_ptt,
1513 					     u8 *p_pf)
1514 {
1515 	struct public_func shmem_info;
1516 	int i;
1517 
1518 	/* Find first Ethernet interface in port */
1519 	for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->p_dev);
1520 	     i += p_hwfn->p_dev->num_ports_in_engines) {
1521 		ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1522 					 MCP_PF_ID_BY_REL(p_hwfn, i));
1523 
1524 		if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1525 			continue;
1526 
1527 		if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
1528 		    FUNC_MF_CFG_PROTOCOL_ETHERNET) {
1529 			*p_pf = (u8)i;
1530 			return ECORE_SUCCESS;
1531 		}
1532 	}
1533 
1534 	/* This might actually be valid somewhere in the future but for now
1535 	 * it's highly unlikely.
1536 	 */
1537 	DP_NOTICE(p_hwfn, false,
1538 		  "Failed to find on port an ethernet interface in MF_SI mode\n");
1539 
1540 	return ECORE_INVAL;
1541 }
1542 #endif
1543 static void
1544 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1545 {
1546 	struct ecore_mcp_function_info *p_info;
1547 	struct public_func shmem_info;
1548 	u32 resp = 0, param = 0;
1549 
1550 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1551 				 MCP_PF_ID(p_hwfn));
1552 
1553 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1554 
1555 	p_info = &p_hwfn->mcp_info->func_info;
1556 
1557 	ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1558 
1559 	ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1560 
1561 	/* Acknowledge the MFW */
1562 	ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1563 		      &param);
1564 }
1565 
1566 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1567 				  struct ecore_ptt *p_ptt)
1568 {
1569 	struct public_func shmem_info;
1570 	u32 resp = 0, param = 0;
1571 
1572 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1573 				 MCP_PF_ID(p_hwfn));
1574 
1575 	p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1576 						 FUNC_MF_CFG_OV_STAG_MASK;
1577 	p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1578 	if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) &&
1579 	    (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET))
1580 		ecore_wr(p_hwfn, p_ptt,
1581 			 NIG_REG_LLH_FUNC_TAG_VALUE,
1582 			 p_hwfn->hw_info.ovlan);
1583 
1584 	OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1585 
1586 	/* Acknowledge the MFW */
1587 	ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1588 		      &resp, &param);
1589 }
1590 
1591 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1592 					 struct ecore_ptt *p_ptt)
1593 {
1594 	/* A single notification should be sent to upper driver in CMT mode */
1595 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1596 		return;
1597 
1598 	DP_NOTICE(p_hwfn, false,
1599 		  "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1600 
1601 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1602 }
1603 
1604 struct ecore_mdump_cmd_params {
1605 	u32 cmd;
1606 	void *p_data_src;
1607 	u8 data_src_size;
1608 	void *p_data_dst;
1609 	u8 data_dst_size;
1610 	u32 mcp_resp;
1611 };
1612 
1613 static enum _ecore_status_t
1614 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1615 		    struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1616 {
1617 	struct ecore_mcp_mb_params mb_params;
1618 	enum _ecore_status_t rc;
1619 
1620 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1621 	mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1622 	mb_params.param = p_mdump_cmd_params->cmd;
1623 	mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1624 	mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1625 	mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1626 	mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1627 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1628 	if (rc != ECORE_SUCCESS)
1629 		return rc;
1630 
1631 	p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1632 
1633 	if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1634 		DP_INFO(p_hwfn,
1635 			"The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1636 			p_mdump_cmd_params->cmd);
1637 		rc = ECORE_NOTIMPL;
1638 	} else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1639 		DP_INFO(p_hwfn,
1640 			"The mdump command is not supported by the MFW\n");
1641 		rc = ECORE_NOTIMPL;
1642 	}
1643 
1644 	return rc;
1645 }
1646 
1647 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1648 						struct ecore_ptt *p_ptt)
1649 {
1650 	struct ecore_mdump_cmd_params mdump_cmd_params;
1651 
1652 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1653 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1654 
1655 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1656 }
1657 
1658 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1659 						struct ecore_ptt *p_ptt,
1660 						u32 epoch)
1661 {
1662 	struct ecore_mdump_cmd_params mdump_cmd_params;
1663 
1664 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1665 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1666 	mdump_cmd_params.p_data_src = &epoch;
1667 	mdump_cmd_params.data_src_size = sizeof(epoch);
1668 
1669 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1670 }
1671 
1672 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1673 					     struct ecore_ptt *p_ptt)
1674 {
1675 	struct ecore_mdump_cmd_params mdump_cmd_params;
1676 
1677 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1678 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1679 
1680 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1681 }
1682 
1683 static enum _ecore_status_t
1684 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1685 			   struct mdump_config_stc *p_mdump_config)
1686 {
1687 	struct ecore_mdump_cmd_params mdump_cmd_params;
1688 	enum _ecore_status_t rc;
1689 
1690 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1691 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1692 	mdump_cmd_params.p_data_dst = p_mdump_config;
1693 	mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1694 
1695 	rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1696 	if (rc != ECORE_SUCCESS)
1697 		return rc;
1698 
1699 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1700 		DP_INFO(p_hwfn,
1701 			"Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1702 			mdump_cmd_params.mcp_resp);
1703 		rc = ECORE_UNKNOWN_ERROR;
1704 	}
1705 
1706 	return rc;
1707 }
1708 
1709 enum _ecore_status_t
1710 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1711 			 struct ecore_mdump_info *p_mdump_info)
1712 {
1713 	u32 addr, global_offsize, global_addr;
1714 	struct mdump_config_stc mdump_config;
1715 	enum _ecore_status_t rc;
1716 
1717 	OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1718 
1719 	addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1720 				    PUBLIC_GLOBAL);
1721 	global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1722 	global_addr = SECTION_ADDR(global_offsize, 0);
1723 	p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1724 					global_addr +
1725 					OFFSETOF(struct public_global,
1726 						 mdump_reason));
1727 
1728 	if (p_mdump_info->reason) {
1729 		rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1730 		if (rc != ECORE_SUCCESS)
1731 			return rc;
1732 
1733 		p_mdump_info->version = mdump_config.version;
1734 		p_mdump_info->config = mdump_config.config;
1735 		p_mdump_info->epoch = mdump_config.epoc;
1736 		p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1737 		p_mdump_info->valid_logs = mdump_config.valid_logs;
1738 
1739 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1740 			   "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1741 			   p_mdump_info->reason, p_mdump_info->version,
1742 			   p_mdump_info->config, p_mdump_info->epoch,
1743 			   p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1744 	} else {
1745 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1746 			   "MFW mdump info: reason %d\n", p_mdump_info->reason);
1747 	}
1748 
1749 	return ECORE_SUCCESS;
1750 }
1751 
1752 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1753 						struct ecore_ptt *p_ptt)
1754 {
1755 	struct ecore_mdump_cmd_params mdump_cmd_params;
1756 
1757 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1758 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1759 
1760 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1761 }
1762 
1763 enum _ecore_status_t
1764 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1765 			   struct ecore_mdump_retain_data *p_mdump_retain)
1766 {
1767 	struct ecore_mdump_cmd_params mdump_cmd_params;
1768 	struct mdump_retain_data_stc mfw_mdump_retain;
1769 	enum _ecore_status_t rc;
1770 
1771 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1772 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1773 	mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1774 	mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1775 
1776 	rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1777 	if (rc != ECORE_SUCCESS)
1778 		return rc;
1779 
1780 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1781 		DP_INFO(p_hwfn,
1782 			"Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1783 			mdump_cmd_params.mcp_resp);
1784 		return ECORE_UNKNOWN_ERROR;
1785 	}
1786 
1787 	p_mdump_retain->valid = mfw_mdump_retain.valid;
1788 	p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1789 	p_mdump_retain->pf = mfw_mdump_retain.pf;
1790 	p_mdump_retain->status = mfw_mdump_retain.status;
1791 
1792 	return ECORE_SUCCESS;
1793 }
1794 
1795 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1796 						struct ecore_ptt *p_ptt)
1797 {
1798 	struct ecore_mdump_cmd_params mdump_cmd_params;
1799 
1800 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1801 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1802 
1803 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1804 }
1805 
1806 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1807 					    struct ecore_ptt *p_ptt)
1808 {
1809 	struct ecore_mdump_retain_data mdump_retain;
1810 	enum _ecore_status_t rc;
1811 
1812 	/* In CMT mode - no need for more than a single acknowledgement to the
1813 	 * MFW, and no more than a single notification to the upper driver.
1814 	 */
1815 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1816 		return;
1817 
1818 	rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1819 	if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1820 		DP_NOTICE(p_hwfn, false,
1821 			  "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1822 			  mdump_retain.epoch, mdump_retain.pf,
1823 			  mdump_retain.status);
1824 	} else {
1825 		DP_NOTICE(p_hwfn, false,
1826 			  "The MFW notified that a critical error occurred in the device\n");
1827 	}
1828 
1829 	if (p_hwfn->p_dev->allow_mdump) {
1830 		DP_NOTICE(p_hwfn, false,
1831 			  "Not acknowledging the notification to allow the MFW crash dump\n");
1832 		return;
1833 	}
1834 
1835 	DP_NOTICE(p_hwfn, false,
1836 		  "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1837 	ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1838 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1839 }
1840 
1841 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1842 					     struct ecore_ptt *p_ptt)
1843 {
1844 	struct ecore_mcp_info *info = p_hwfn->mcp_info;
1845 	enum _ecore_status_t rc = ECORE_SUCCESS;
1846 	bool found = false;
1847 	u16 i;
1848 
1849 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1850 
1851 	/* Read Messages from MFW */
1852 	ecore_mcp_read_mb(p_hwfn, p_ptt);
1853 
1854 	/* Compare current messages to old ones */
1855 	for (i = 0; i < info->mfw_mb_length; i++) {
1856 		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1857 			continue;
1858 
1859 		found = true;
1860 
1861 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1862 			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1863 			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1864 
1865 		switch (i) {
1866 		case MFW_DRV_MSG_LINK_CHANGE:
1867 			ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1868 			break;
1869 		case MFW_DRV_MSG_VF_DISABLED:
1870 			ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1871 			break;
1872 		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1873 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1874 						    ECORE_DCBX_REMOTE_LLDP_MIB);
1875 			break;
1876 		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1877 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1878 						    ECORE_DCBX_REMOTE_MIB);
1879 			break;
1880 		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1881 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1882 						    ECORE_DCBX_OPERATIONAL_MIB);
1883 			break;
1884 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1885 			ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1886 			break;
1887 		case MFW_DRV_MSG_ERROR_RECOVERY:
1888 			ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1889 			break;
1890 		case MFW_DRV_MSG_GET_LAN_STATS:
1891 		case MFW_DRV_MSG_GET_FCOE_STATS:
1892 		case MFW_DRV_MSG_GET_ISCSI_STATS:
1893 		case MFW_DRV_MSG_GET_RDMA_STATS:
1894 			ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1895 			break;
1896 		case MFW_DRV_MSG_BW_UPDATE:
1897 			ecore_mcp_update_bw(p_hwfn, p_ptt);
1898 			break;
1899 		case MFW_DRV_MSG_S_TAG_UPDATE:
1900 			ecore_mcp_update_stag(p_hwfn, p_ptt);
1901 			break;
1902 		case MFW_DRV_MSG_FAILURE_DETECTED:
1903 			ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1904 			break;
1905 		case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1906 			ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1907 			break;
1908 		case MFW_DRV_MSG_GET_TLV_REQ:
1909 			OSAL_MFW_TLV_REQ(p_hwfn);
1910 			break;
1911 		default:
1912 			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1913 			rc = ECORE_INVAL;
1914 		}
1915 	}
1916 
1917 	/* ACK everything */
1918 	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1919 		OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1920 
1921 		/* MFW expect answer in BE, so we force write in that format */
1922 		ecore_wr(p_hwfn, p_ptt,
1923 			 info->mfw_mb_addr + sizeof(u32) +
1924 			 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1925 			 sizeof(u32) + i * sizeof(u32), val);
1926 	}
1927 
1928 	if (!found) {
1929 		DP_NOTICE(p_hwfn, false,
1930 			  "Received an MFW message indication but no new message!\n");
1931 		rc = ECORE_INVAL;
1932 	}
1933 
1934 	/* Copy the new mfw messages into the shadow */
1935 	OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1936 
1937 	return rc;
1938 }
1939 
1940 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1941 					   struct ecore_ptt *p_ptt,
1942 					   u32 *p_mfw_ver,
1943 					   u32 *p_running_bundle_id)
1944 {
1945 	u32 global_offsize;
1946 
1947 #ifndef ASIC_ONLY
1948 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1949 		DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1950 		return ECORE_SUCCESS;
1951 	}
1952 #endif
1953 
1954 	if (IS_VF(p_hwfn->p_dev)) {
1955 		if (p_hwfn->vf_iov_info) {
1956 			struct pfvf_acquire_resp_tlv *p_resp;
1957 
1958 			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1959 			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1960 			return ECORE_SUCCESS;
1961 		} else {
1962 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1963 				   "VF requested MFW version prior to ACQUIRE\n");
1964 			return ECORE_INVAL;
1965 		}
1966 	}
1967 
1968 	global_offsize = ecore_rd(p_hwfn, p_ptt,
1969 			  SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1970 					       PUBLIC_GLOBAL));
1971 	*p_mfw_ver = ecore_rd(p_hwfn, p_ptt,
1972 			SECTION_ADDR(global_offsize, 0) +
1973 			OFFSETOF(struct public_global, mfw_ver));
1974 
1975 	if (p_running_bundle_id != OSAL_NULL) {
1976 		*p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1977 				SECTION_ADDR(global_offsize, 0) +
1978 				OFFSETOF(struct public_global,
1979 					 running_bundle_id));
1980 	}
1981 
1982 	return ECORE_SUCCESS;
1983 }
1984 
1985 enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
1986 					   struct ecore_ptt *p_ptt,
1987 					   u32 *p_mbi_ver)
1988 {
1989 	u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
1990 
1991 #ifndef ASIC_ONLY
1992 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1993 		DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n");
1994 		return ECORE_SUCCESS;
1995 	}
1996 #endif
1997 
1998 	if (IS_VF(p_hwfn->p_dev))
1999 		return ECORE_INVAL;
2000 
2001 	/* Read the address of the nvm_cfg */
2002 	nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2003 	if (!nvm_cfg_addr) {
2004 		DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2005 		return ECORE_INVAL;
2006 	}
2007 
2008 	/* Read the offset of nvm_cfg1 */
2009 	nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2010 
2011 	mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2012 		       OFFSETOF(struct nvm_cfg1, glob) +
2013 		       OFFSETOF(struct nvm_cfg1_glob, mbi_version);
2014 	*p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) &
2015 		     (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2016 		      NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2017 		      NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2018 
2019 	return ECORE_SUCCESS;
2020 }
2021 
2022 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
2023 					   u32 *p_media_type)
2024 {
2025 	struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
2026 	struct ecore_ptt *p_ptt;
2027 
2028 	/* TODO - Add support for VFs */
2029 	if (IS_VF(p_dev))
2030 		return ECORE_INVAL;
2031 
2032 	if (!ecore_mcp_is_init(p_hwfn)) {
2033 		DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
2034 		return ECORE_BUSY;
2035 	}
2036 
2037 	*p_media_type = MEDIA_UNSPECIFIED;
2038 
2039 	p_ptt = ecore_ptt_acquire(p_hwfn);
2040 	if (!p_ptt)
2041 		return ECORE_BUSY;
2042 
2043 	*p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2044 				 OFFSETOF(struct public_port, media_type));
2045 
2046 	ecore_ptt_release(p_hwfn, p_ptt);
2047 
2048 	return ECORE_SUCCESS;
2049 }
2050 
2051 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2052 static void
2053 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2054 				 enum ecore_pci_personality *p_proto)
2055 {
2056 	/* There wasn't ever a legacy MFW that published iwarp.
2057 	 * So at this point, this is either plain l2 or RoCE.
2058 	 */
2059 	if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
2060 			  &p_hwfn->hw_info.device_capabilities))
2061 		*p_proto = ECORE_PCI_ETH_ROCE;
2062 	else
2063 		*p_proto = ECORE_PCI_ETH;
2064 
2065 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2066 		   "According to Legacy capabilities, L2 personality is %08x\n",
2067 		   (u32) *p_proto);
2068 }
2069 
2070 static enum _ecore_status_t
2071 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2072 			      struct ecore_ptt *p_ptt,
2073 			      enum ecore_pci_personality *p_proto)
2074 {
2075 	u32 resp = 0, param = 0;
2076 	enum _ecore_status_t rc;
2077 
2078 	rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2079 			 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
2080 	if (rc != ECORE_SUCCESS)
2081 		return rc;
2082 	if (resp != FW_MSG_CODE_OK) {
2083 		DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2084 			   "MFW lacks support for command; Returns %08x\n",
2085 			   resp);
2086 		return ECORE_INVAL;
2087 	}
2088 
2089 	switch (param) {
2090 	case FW_MB_PARAM_GET_PF_RDMA_NONE:
2091 		*p_proto = ECORE_PCI_ETH;
2092 		break;
2093 	case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2094 		*p_proto = ECORE_PCI_ETH_ROCE;
2095 		break;
2096 	case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2097 		*p_proto = ECORE_PCI_ETH_IWARP;
2098 		break;
2099 	case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2100 		*p_proto = ECORE_PCI_ETH_RDMA;
2101 		break;
2102 	default:
2103 		DP_NOTICE(p_hwfn, true,
2104 			  "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2105 			  param);
2106 		return ECORE_INVAL;
2107 	}
2108 
2109 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2110 		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2111 		   (u32) *p_proto, resp, param);
2112 	return ECORE_SUCCESS;
2113 }
2114 
2115 static enum _ecore_status_t
2116 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2117 			  struct public_func *p_info,
2118 			  struct ecore_ptt *p_ptt,
2119 			  enum ecore_pci_personality *p_proto)
2120 {
2121 	enum _ecore_status_t rc = ECORE_SUCCESS;
2122 
2123 	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2124 	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2125 		if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2126 		    ECORE_SUCCESS)
2127 			ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2128 		break;
2129 	case FUNC_MF_CFG_PROTOCOL_ISCSI:
2130 		*p_proto = ECORE_PCI_ISCSI;
2131 		break;
2132 	case FUNC_MF_CFG_PROTOCOL_FCOE:
2133 		*p_proto = ECORE_PCI_FCOE;
2134 		break;
2135 	case FUNC_MF_CFG_PROTOCOL_ROCE:
2136 		DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n");
2137 		rc = ECORE_INVAL;
2138 		break;
2139 	default:
2140 		rc = ECORE_INVAL;
2141 	}
2142 
2143 	return rc;
2144 }
2145 
2146 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2147 						    struct ecore_ptt *p_ptt)
2148 {
2149 	struct ecore_mcp_function_info *info;
2150 	struct public_func shmem_info;
2151 
2152 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2153 				 MCP_PF_ID(p_hwfn));
2154 	info = &p_hwfn->mcp_info->func_info;
2155 
2156 	info->pause_on_host = (shmem_info.config &
2157 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2158 
2159 	if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2160 				      &info->protocol)) {
2161 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
2162 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2163 		return ECORE_INVAL;
2164 	}
2165 
2166 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2167 
2168 	if (shmem_info.mac_upper || shmem_info.mac_lower) {
2169 		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2170 		info->mac[1] = (u8)(shmem_info.mac_upper);
2171 		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2172 		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2173 		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2174 		info->mac[5] = (u8)(shmem_info.mac_lower);
2175 
2176 		/* Store primary MAC for later possible WoL */
2177 		OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN);
2178 
2179 	} else {
2180 		/* TODO - are there protocols for which there's no MAC? */
2181 		DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2182 	}
2183 
2184 	/* TODO - are these calculations true for BE machine? */
2185 	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
2186 			 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
2187 	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
2188 			 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
2189 
2190 	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2191 
2192 	info->mtu = (u16)shmem_info.mtu_size;
2193 
2194 	p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE;
2195 	if (ecore_mcp_is_init(p_hwfn)) {
2196 		u32 resp = 0, param = 0;
2197 		enum _ecore_status_t rc;
2198 
2199 		rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2200 				   DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
2201 		if (rc != ECORE_SUCCESS)
2202 			return rc;
2203 		if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2204 			p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME;
2205 	}
2206 	p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT;
2207 
2208 	DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2209 		   "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2210 		   info->pause_on_host, info->protocol,
2211 		   info->bandwidth_min, info->bandwidth_max,
2212 		   info->mac[0], info->mac[1], info->mac[2],
2213 		   info->mac[3], info->mac[4], info->mac[5],
2214 		   (unsigned long long)info->wwn_port, (unsigned long long)info->wwn_node, info->ovlan,
2215 		   (u8)p_hwfn->hw_info.b_wol_support);
2216 
2217 	return ECORE_SUCCESS;
2218 }
2219 
2220 struct ecore_mcp_link_params
2221 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2222 {
2223 	if (!p_hwfn || !p_hwfn->mcp_info)
2224 		return OSAL_NULL;
2225 	return &p_hwfn->mcp_info->link_input;
2226 }
2227 
2228 struct ecore_mcp_link_state
2229 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2230 {
2231 	if (!p_hwfn || !p_hwfn->mcp_info)
2232 		return OSAL_NULL;
2233 
2234 #ifndef ASIC_ONLY
2235 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2236 		DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2237 		p_hwfn->mcp_info->link_output.link_up = true;
2238 	}
2239 #endif
2240 
2241 	return &p_hwfn->mcp_info->link_output;
2242 }
2243 
2244 struct ecore_mcp_link_capabilities
2245 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2246 {
2247 	if (!p_hwfn || !p_hwfn->mcp_info)
2248 		return OSAL_NULL;
2249 	return &p_hwfn->mcp_info->link_capabilities;
2250 }
2251 
2252 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2253 				     struct ecore_ptt *p_ptt)
2254 {
2255 	u32 resp = 0, param = 0;
2256 	enum _ecore_status_t rc;
2257 
2258 	rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2259 			   DRV_MSG_CODE_NIG_DRAIN, 1000,
2260 			   &resp, &param);
2261 
2262 	/* Wait for the drain to complete before returning */
2263 	OSAL_MSLEEP(1020);
2264 
2265 	return rc;
2266 }
2267 
2268 #ifndef LINUX_REMOVE
2269 const struct ecore_mcp_function_info
2270 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2271 {
2272 	if (!p_hwfn || !p_hwfn->mcp_info)
2273 		return OSAL_NULL;
2274 	return &p_hwfn->mcp_info->func_info;
2275 }
2276 #endif
2277 
2278 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
2279 					   struct ecore_ptt *p_ptt,
2280 					   struct ecore_mcp_nvm_params *params)
2281 {
2282 	enum _ecore_status_t rc;
2283 
2284 	switch (params->type) {
2285 	case ECORE_MCP_NVM_RD:
2286 		rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2287 					  params->nvm_common.offset,
2288 					  &params->nvm_common.resp,
2289 					  &params->nvm_common.param,
2290 					  params->nvm_rd.buf_size,
2291 					  params->nvm_rd.buf);
2292 		break;
2293 	case ECORE_MCP_CMD:
2294 		rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2295 				   params->nvm_common.offset,
2296 				   &params->nvm_common.resp,
2297 				   &params->nvm_common.param);
2298 		break;
2299 	case ECORE_MCP_NVM_WR:
2300 		rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2301 					  params->nvm_common.offset,
2302 					  &params->nvm_common.resp,
2303 					  &params->nvm_common.param,
2304 					  params->nvm_wr.buf_size,
2305 					  params->nvm_wr.buf);
2306 		break;
2307 	default:
2308 		rc = ECORE_NOTIMPL;
2309 		break;
2310 	}
2311 	return rc;
2312 }
2313 
2314 #ifndef LINUX_REMOVE
2315 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2316 				  struct ecore_ptt *p_ptt,
2317 				  u32 personalities)
2318 {
2319 	enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2320 	struct public_func shmem_info;
2321 	int i, count = 0, num_pfs;
2322 
2323 	num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2324 
2325 	for (i = 0; i < num_pfs; i++) {
2326 		ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2327 					 MCP_PF_ID_BY_REL(p_hwfn, i));
2328 		if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2329 			continue;
2330 
2331 		if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2332 					      &protocol) !=
2333 		    ECORE_SUCCESS)
2334 			continue;
2335 
2336 		if ((1 << ((u32)protocol)) & personalities)
2337 			count++;
2338 	}
2339 
2340 	return count;
2341 }
2342 #endif
2343 
2344 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2345 					      struct ecore_ptt *p_ptt,
2346 					      u32 *p_flash_size)
2347 {
2348 	u32 flash_size;
2349 
2350 #ifndef ASIC_ONLY
2351 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2352 		DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2353 		return ECORE_INVAL;
2354 	}
2355 #endif
2356 
2357 	if (IS_VF(p_hwfn->p_dev))
2358 		return ECORE_INVAL;
2359 
2360 	flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2361 	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2362 				MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2363 	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2364 
2365 	*p_flash_size = flash_size;
2366 
2367 	return ECORE_SUCCESS;
2368 }
2369 
2370 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2371 						  struct ecore_ptt *p_ptt)
2372 {
2373 	struct ecore_dev *p_dev = p_hwfn->p_dev;
2374 
2375 	if (p_dev->recov_in_prog) {
2376 		DP_NOTICE(p_hwfn, false,
2377 			  "Avoid triggering a recovery since such a process is already in progress\n");
2378 		return ECORE_AGAIN;
2379 	}
2380 
2381 	DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2382 	ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2383 
2384 	return ECORE_SUCCESS;
2385 }
2386 
2387 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2388 					      struct ecore_ptt *p_ptt,
2389 					      u8 vf_id, u8 num)
2390 {
2391 	u32 resp = 0, param = 0, rc_param = 0;
2392 	enum _ecore_status_t rc;
2393 
2394 	/* Only Leader can configure MSIX, and need to take CMT into account */
2395 	if (!IS_LEAD_HWFN(p_hwfn))
2396 		return ECORE_SUCCESS;
2397 	num *= p_hwfn->p_dev->num_hwfns;
2398 
2399 	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2400 		 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2401 	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2402 		 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2403 
2404 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2405 			   &resp, &rc_param);
2406 
2407 	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2408 		DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2409 			  vf_id);
2410 		rc = ECORE_INVAL;
2411 	} else {
2412 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2413 			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2414 			    num, vf_id);
2415 	}
2416 
2417 	return rc;
2418 }
2419 
2420 enum _ecore_status_t
2421 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2422 			   struct ecore_mcp_drv_version *p_ver)
2423 {
2424 	struct ecore_mcp_mb_params mb_params;
2425 	struct drv_version_stc drv_version;
2426 	u32 num_words, i;
2427 	void *p_name;
2428 	OSAL_BE32 val;
2429 	enum _ecore_status_t rc;
2430 
2431 #ifndef ASIC_ONLY
2432 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2433 		return ECORE_SUCCESS;
2434 #endif
2435 
2436 	OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2437 	drv_version.version = p_ver->version;
2438 	num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2439 	for (i = 0; i < num_words; i++) {
2440 		/* The driver name is expected to be in a big-endian format */
2441 		p_name = &p_ver->name[i * sizeof(u32)];
2442 		val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2443 		*(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2444 	}
2445 
2446 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2447 	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2448 	mb_params.p_data_src = &drv_version;
2449 	mb_params.data_src_size = sizeof(drv_version);
2450 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2451 	if (rc != ECORE_SUCCESS)
2452 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2453 
2454 	return rc;
2455 }
2456 
2457 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2458 				    struct ecore_ptt *p_ptt)
2459 {
2460 	enum _ecore_status_t rc;
2461 	u32 resp = 0, param = 0;
2462 
2463 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2464 			   &param);
2465 	if (rc != ECORE_SUCCESS)
2466 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2467 
2468 	return rc;
2469 }
2470 
2471 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2472 				      struct ecore_ptt *p_ptt)
2473 {
2474 	u32 value, cpu_mode;
2475 
2476 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2477 
2478 	value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2479 	value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2480 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2481 	cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2482 
2483 	return  (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
2484 }
2485 
2486 enum _ecore_status_t
2487 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2488 				   struct ecore_ptt *p_ptt,
2489 				   enum ecore_ov_client client)
2490 {
2491 	enum _ecore_status_t rc;
2492 	u32 resp = 0, param = 0;
2493 	u32 drv_mb_param;
2494 
2495 	switch (client) {
2496 	case ECORE_OV_CLIENT_DRV:
2497 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2498 		break;
2499 	case ECORE_OV_CLIENT_USER:
2500 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2501 		break;
2502 	case ECORE_OV_CLIENT_VENDOR_SPEC:
2503 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2504 		break;
2505 	default:
2506 		DP_NOTICE(p_hwfn, true,
2507 			  "Invalid client type %d\n", client);
2508 		return ECORE_INVAL;
2509 	}
2510 
2511 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2512 			   drv_mb_param, &resp, &param);
2513 	if (rc != ECORE_SUCCESS)
2514 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2515 
2516 	return rc;
2517 }
2518 
2519 enum _ecore_status_t
2520 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2521 				 struct ecore_ptt *p_ptt,
2522 				 enum ecore_ov_driver_state drv_state)
2523 {
2524 	enum _ecore_status_t rc;
2525 	u32 resp = 0, param = 0;
2526 	u32 drv_mb_param;
2527 
2528 	switch (drv_state) {
2529 	case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2530 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2531 		break;
2532 	case ECORE_OV_DRIVER_STATE_DISABLED:
2533 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2534 		break;
2535 	case ECORE_OV_DRIVER_STATE_ACTIVE:
2536 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2537 		break;
2538 	default:
2539 		DP_NOTICE(p_hwfn, true,
2540 			  "Invalid driver state %d\n", drv_state);
2541 		return ECORE_INVAL;
2542 	}
2543 
2544 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2545 			   drv_mb_param, &resp, &param);
2546 	if (rc != ECORE_SUCCESS)
2547 		DP_ERR(p_hwfn, "Failed to send driver state\n");
2548 
2549 	return rc;
2550 }
2551 
2552 enum _ecore_status_t
2553 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2554 			 struct ecore_fc_npiv_tbl *p_table)
2555 {
2556 	enum _ecore_status_t rc = ECORE_SUCCESS;
2557 	struct dci_fc_npiv_tbl *p_npiv_table;
2558 	u8 *p_buf = OSAL_NULL;
2559 	u32 addr, size, i;
2560 
2561 	p_table->num_wwpn = 0;
2562 	p_table->num_wwnn = 0;
2563 	addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2564 			OFFSETOF(struct public_port, fc_npiv_nvram_tbl_addr));
2565 	if (addr == NPIV_TBL_INVALID_ADDR) {
2566 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n");
2567 		return rc;
2568 	}
2569 
2570 	size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2571 			OFFSETOF(struct public_port, fc_npiv_nvram_tbl_size));
2572 	if (!size) {
2573 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n");
2574 		return rc;
2575 	}
2576 
2577 	p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size);
2578 	if (!p_buf) {
2579 		DP_ERR(p_hwfn, "Buffer allocation failed\n");
2580 		return ECORE_NOMEM;
2581 	}
2582 
2583 	rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size);
2584 	if (rc != ECORE_SUCCESS) {
2585 		OSAL_VFREE(p_hwfn->p_dev, p_buf);
2586 		return rc;
2587 	}
2588 
2589 	p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf;
2590 	p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2591 	p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2592 	for (i = 0; i < p_table->num_wwpn; i++) {
2593 		OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn,
2594 			    ECORE_WWN_SIZE);
2595 		OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn,
2596 			    ECORE_WWN_SIZE);
2597 	}
2598 
2599 	OSAL_VFREE(p_hwfn->p_dev, p_buf);
2600 
2601 	return ECORE_SUCCESS;
2602 }
2603 
2604 enum _ecore_status_t
2605 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2606 			u16 mtu)
2607 {
2608 	enum _ecore_status_t rc;
2609 	u32 resp = 0, param = 0;
2610 	u32 drv_mb_param;
2611 
2612 	drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2613 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2614 			   drv_mb_param, &resp, &param);
2615 	if (rc != ECORE_SUCCESS)
2616 		DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2617 
2618 	return rc;
2619 }
2620 
2621 enum _ecore_status_t
2622 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2623 			u8 *mac)
2624 {
2625 	struct ecore_mcp_mb_params mb_params;
2626 	enum _ecore_status_t rc;
2627 	u32 mfw_mac[2];
2628 
2629 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2630 	mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2631 	mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2632 				DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2633 	mb_params.param |= MCP_PF_ID(p_hwfn);
2634 
2635 	/* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2636 	 * in 32-bit granularity.
2637 	 * So the MAC has to be set in native order [and not byte order],
2638 	 * otherwise it would be read incorrectly by MFW after swap.
2639 	 */
2640 	mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2641 	mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2642 
2643 	mb_params.p_data_src = (u8 *)mfw_mac;
2644 	mb_params.data_src_size = 8;
2645 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2646 	if (rc != ECORE_SUCCESS)
2647 		DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2648 
2649 	/* Store primary MAC for later possible WoL */
2650 	OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN);
2651 
2652 	return rc;
2653 }
2654 
2655 enum _ecore_status_t
2656 ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2657 			enum ecore_ov_wol wol)
2658 {
2659 	enum _ecore_status_t rc;
2660 	u32 resp = 0, param = 0;
2661 	u32 drv_mb_param;
2662 
2663 	if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) {
2664 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2665 			   "Can't change WoL configuration when WoL isn't supported\n");
2666 		return ECORE_INVAL;
2667 	}
2668 
2669 	switch (wol) {
2670 	case ECORE_OV_WOL_DEFAULT:
2671 		drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2672 		break;
2673 	case ECORE_OV_WOL_DISABLED:
2674 		drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2675 		break;
2676 	case ECORE_OV_WOL_ENABLED:
2677 		drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2678 		break;
2679 	default:
2680 		DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2681 		return ECORE_INVAL;
2682 	}
2683 
2684 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2685 			   drv_mb_param, &resp, &param);
2686 	if (rc != ECORE_SUCCESS)
2687 		DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2688 
2689 	/* Store the WoL update for a future unload */
2690 	p_hwfn->p_dev->wol_config = (u8)wol;
2691 
2692 	return rc;
2693 }
2694 
2695 enum _ecore_status_t
2696 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2697 			    enum ecore_ov_eswitch eswitch)
2698 {
2699 	enum _ecore_status_t rc;
2700 	u32 resp = 0, param = 0;
2701 	u32 drv_mb_param;
2702 
2703 	switch (eswitch) {
2704 	case ECORE_OV_ESWITCH_NONE:
2705 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2706 		break;
2707 	case ECORE_OV_ESWITCH_VEB:
2708 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2709 		break;
2710 	case ECORE_OV_ESWITCH_VEPA:
2711 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2712 		break;
2713 	default:
2714 		DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2715 		return ECORE_INVAL;
2716 	}
2717 
2718 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2719 			   drv_mb_param, &resp, &param);
2720 	if (rc != ECORE_SUCCESS)
2721 		DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2722 
2723 	return rc;
2724 }
2725 
2726 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2727 				       struct ecore_ptt *p_ptt,
2728 				       enum ecore_led_mode mode)
2729 {
2730 	u32 resp = 0, param = 0, drv_mb_param;
2731 	enum _ecore_status_t rc;
2732 
2733 	switch (mode) {
2734 	case ECORE_LED_MODE_ON:
2735 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2736 		break;
2737 	case ECORE_LED_MODE_OFF:
2738 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2739 		break;
2740 	case ECORE_LED_MODE_RESTORE:
2741 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2742 		break;
2743 	default:
2744 		DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2745 		return ECORE_INVAL;
2746 	}
2747 
2748 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2749 			   drv_mb_param, &resp, &param);
2750 	if (rc != ECORE_SUCCESS)
2751 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2752 
2753 	return rc;
2754 }
2755 
2756 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2757 					     struct ecore_ptt *p_ptt,
2758 					     u32 mask_parities)
2759 {
2760 	enum _ecore_status_t rc;
2761 	u32 resp = 0, param = 0;
2762 
2763 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2764 			   mask_parities, &resp, &param);
2765 
2766 	if (rc != ECORE_SUCCESS) {
2767 		DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n");
2768 	} else if (resp != FW_MSG_CODE_OK) {
2769 		DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n");
2770 		rc = ECORE_INVAL;
2771 	}
2772 
2773 	return rc;
2774 }
2775 
2776 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2777 			   u8 *p_buf, u32 len)
2778 {
2779 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2780 	u32 bytes_left, offset, bytes_to_copy, buf_size;
2781 	struct ecore_mcp_nvm_params params;
2782 	struct ecore_ptt  *p_ptt;
2783 	enum _ecore_status_t rc = ECORE_SUCCESS;
2784 
2785 	p_ptt = ecore_ptt_acquire(p_hwfn);
2786 	if (!p_ptt)
2787 		return ECORE_BUSY;
2788 
2789 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2790 	bytes_left = len;
2791 	offset = 0;
2792 	params.type = ECORE_MCP_NVM_RD;
2793 	params.nvm_rd.buf_size = &buf_size;
2794 	params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2795 	while (bytes_left > 0) {
2796 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2797 					   MCP_DRV_NVM_BUF_LEN);
2798 		params.nvm_common.offset = (addr + offset) |
2799 					   (bytes_to_copy <<
2800 					    DRV_MB_PARAM_NVM_LEN_SHIFT);
2801 		params.nvm_rd.buf = (u32 *)(p_buf + offset);
2802 		rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2803 		if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2804 					    FW_MSG_CODE_NVM_OK)) {
2805 			DP_NOTICE(p_dev, false, "MCP command rc = %d\n",
2806 				  rc);
2807 			break;
2808 		}
2809 
2810 		/* This can be a lengthy process, and it's possible scheduler
2811 		 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2812 		 */
2813 		if (bytes_left % 0x1000 <
2814 		    (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2815 			OSAL_MSLEEP(1);
2816 
2817 		offset += *params.nvm_rd.buf_size;
2818 		bytes_left -= *params.nvm_rd.buf_size;
2819 	}
2820 
2821 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2822 	ecore_ptt_release(p_hwfn, p_ptt);
2823 
2824 	return rc;
2825 }
2826 
2827 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2828 					u32 addr, u8 *p_buf, u32 len)
2829 {
2830 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2831 	struct ecore_mcp_nvm_params params;
2832 	struct ecore_ptt  *p_ptt;
2833 	enum _ecore_status_t rc;
2834 
2835 	p_ptt = ecore_ptt_acquire(p_hwfn);
2836 	if (!p_ptt)
2837 		return ECORE_BUSY;
2838 
2839 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2840 	params.type = ECORE_MCP_NVM_RD;
2841 	params.nvm_rd.buf_size = &len;
2842 	params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2843 					DRV_MSG_CODE_PHY_CORE_READ :
2844 					DRV_MSG_CODE_PHY_RAW_READ;
2845 	params.nvm_common.offset = addr;
2846 	params.nvm_rd.buf = (u32 *)p_buf;
2847 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2848 	if (rc != ECORE_SUCCESS)
2849 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2850 
2851 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2852 	ecore_ptt_release(p_hwfn, p_ptt);
2853 
2854 	return rc;
2855 }
2856 
2857 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
2858 {
2859 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2860 	struct ecore_mcp_nvm_params params;
2861 	struct ecore_ptt  *p_ptt;
2862 
2863 	p_ptt = ecore_ptt_acquire(p_hwfn);
2864 	if (!p_ptt)
2865 		return ECORE_BUSY;
2866 
2867 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2868 	OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
2869 	ecore_ptt_release(p_hwfn, p_ptt);
2870 
2871 	return ECORE_SUCCESS;
2872 }
2873 
2874 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
2875 					    u32 addr)
2876 {
2877 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2878 	struct ecore_mcp_nvm_params params;
2879 	struct ecore_ptt  *p_ptt;
2880 	enum _ecore_status_t rc;
2881 
2882 	p_ptt = ecore_ptt_acquire(p_hwfn);
2883 	if (!p_ptt)
2884 		return ECORE_BUSY;
2885 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2886 	params.type = ECORE_MCP_CMD;
2887 	params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
2888 	params.nvm_common.offset = addr;
2889 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2890 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2891 	ecore_ptt_release(p_hwfn, p_ptt);
2892 
2893 	return rc;
2894 }
2895 
2896 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
2897 						  u32 addr)
2898 {
2899 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2900 	struct ecore_mcp_nvm_params params;
2901 	struct ecore_ptt  *p_ptt;
2902 	enum _ecore_status_t rc;
2903 
2904 	p_ptt = ecore_ptt_acquire(p_hwfn);
2905 	if (!p_ptt)
2906 		return ECORE_BUSY;
2907 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2908 	params.type = ECORE_MCP_CMD;
2909 	params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2910 	params.nvm_common.offset = addr;
2911 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2912 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2913 	ecore_ptt_release(p_hwfn, p_ptt);
2914 
2915 	return rc;
2916 }
2917 
2918 /* rc receives ECORE_INVAL as default parameter because
2919  * it might not enter the while loop if the len is 0
2920  */
2921 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
2922 					 u32 addr, u8 *p_buf, u32 len)
2923 {
2924 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2925 	enum _ecore_status_t rc = ECORE_INVAL;
2926 	struct ecore_mcp_nvm_params params;
2927 	struct ecore_ptt  *p_ptt;
2928 	u32 buf_idx, buf_size;
2929 
2930 	p_ptt = ecore_ptt_acquire(p_hwfn);
2931 	if (!p_ptt)
2932 		return ECORE_BUSY;
2933 
2934 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2935 	params.type = ECORE_MCP_NVM_WR;
2936 	switch (cmd) {
2937 	case ECORE_PUT_FILE_DATA:
2938 		params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2939 		break;
2940 	case ECORE_NVM_WRITE_NVRAM:
2941 		params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2942 		break;
2943 	case ECORE_EXT_PHY_FW_UPGRADE:
2944 		params.nvm_common.cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
2945 		break;
2946 	default:
2947 		DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
2948 			  cmd);
2949 		return ECORE_INVAL;
2950 	}
2951 
2952 	buf_idx = 0;
2953 	while (buf_idx < len) {
2954 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
2955 				      MCP_DRV_NVM_BUF_LEN);
2956 		params.nvm_common.offset = ((buf_size <<
2957 					     DRV_MB_PARAM_NVM_LEN_SHIFT)
2958 					    | addr) + buf_idx;
2959 		params.nvm_wr.buf_size = buf_size;
2960 		params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
2961 		rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
2962 		if (rc != ECORE_SUCCESS ||
2963 		    ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
2964 		     (params.nvm_common.resp !=
2965 		      FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
2966 			DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2967 
2968 		/* This can be a lengthy process, and it's possible scheduler
2969 		 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2970 		 */
2971 		if (buf_idx % 0x1000 >
2972 		    (buf_idx + buf_size) % 0x1000)
2973 			OSAL_MSLEEP(1);
2974 
2975 		buf_idx += buf_size;
2976 	}
2977 
2978 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
2979 	ecore_ptt_release(p_hwfn, p_ptt);
2980 
2981 	return rc;
2982 }
2983 
2984 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
2985 					 u32 addr, u8 *p_buf, u32 len)
2986 {
2987 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2988 	struct ecore_mcp_nvm_params params;
2989 	struct ecore_ptt  *p_ptt;
2990 	enum _ecore_status_t rc;
2991 
2992 	p_ptt = ecore_ptt_acquire(p_hwfn);
2993 	if (!p_ptt)
2994 		return ECORE_BUSY;
2995 
2996 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
2997 	params.type = ECORE_MCP_NVM_WR;
2998 	params.nvm_wr.buf_size = len;
2999 	params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
3000 					DRV_MSG_CODE_PHY_CORE_WRITE :
3001 					DRV_MSG_CODE_PHY_RAW_WRITE;
3002 	params.nvm_common.offset = addr;
3003 	params.nvm_wr.buf = (u32 *)p_buf;
3004 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
3005 	if (rc != ECORE_SUCCESS)
3006 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3007 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
3008 	ecore_ptt_release(p_hwfn, p_ptt);
3009 
3010 	return rc;
3011 }
3012 
3013 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3014 						   u32 addr)
3015 {
3016 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3017 	struct ecore_mcp_nvm_params params;
3018 	struct ecore_ptt  *p_ptt;
3019 	enum _ecore_status_t rc;
3020 
3021 	p_ptt = ecore_ptt_acquire(p_hwfn);
3022 	if (!p_ptt)
3023 		return ECORE_BUSY;
3024 
3025 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
3026 	params.type = ECORE_MCP_CMD;
3027 	params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
3028 	params.nvm_common.offset = addr;
3029 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
3030 	p_dev->mcp_nvm_resp = params.nvm_common.resp;
3031 	ecore_ptt_release(p_hwfn, p_ptt);
3032 
3033 	return rc;
3034 }
3035 
3036 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3037 					    struct ecore_ptt *p_ptt,
3038 					    u32 port, u32 addr, u32 offset,
3039 					    u32 len, u8 *p_buf)
3040 {
3041 	struct ecore_mcp_nvm_params params;
3042 	enum _ecore_status_t rc;
3043 	u32 bytes_left, bytes_to_copy, buf_size;
3044 
3045 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
3046 	params.nvm_common.offset =
3047 		(port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3048 		(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3049 	addr = offset;
3050 	offset = 0;
3051 	bytes_left = len;
3052 	params.type = ECORE_MCP_NVM_RD;
3053 	params.nvm_rd.buf_size = &buf_size;
3054 	params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
3055 	while (bytes_left > 0) {
3056 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3057 					   MAX_I2C_TRANSACTION_SIZE);
3058 		params.nvm_rd.buf = (u32 *)(p_buf + offset);
3059 		params.nvm_common.offset &=
3060 			(DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3061 			 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3062 		params.nvm_common.offset |=
3063 			((addr + offset) <<
3064 			 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3065 		params.nvm_common.offset |=
3066 			(bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3067 		rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
3068 		if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3069 		    FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3070 			return ECORE_NODEV;
3071 		} else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3072 			   FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3073 			return ECORE_UNKNOWN_ERROR;
3074 
3075 		offset += *params.nvm_rd.buf_size;
3076 		bytes_left -= *params.nvm_rd.buf_size;
3077 	}
3078 
3079 	return ECORE_SUCCESS;
3080 }
3081 
3082 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3083 					     struct ecore_ptt *p_ptt,
3084 					     u32 port, u32 addr, u32 offset,
3085 					     u32 len, u8 *p_buf)
3086 {
3087 	struct ecore_mcp_nvm_params params;
3088 	enum _ecore_status_t rc;
3089 	u32 buf_idx, buf_size;
3090 
3091 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
3092 	params.nvm_common.offset =
3093 		(port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
3094 		(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
3095 	params.type = ECORE_MCP_NVM_WR;
3096 	params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
3097 	buf_idx = 0;
3098 	while (buf_idx < len) {
3099 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3100 				      MAX_I2C_TRANSACTION_SIZE);
3101 		params.nvm_common.offset &=
3102 			(DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3103 			 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3104 		params.nvm_common.offset |=
3105 			((offset + buf_idx) <<
3106 			 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
3107 		params.nvm_common.offset |=
3108 			(buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
3109 		params.nvm_wr.buf_size = buf_size;
3110 		params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
3111 		rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
3112 		if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
3113 		    FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
3114 			return ECORE_NODEV;
3115 		} else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3116 			   FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3117 			return ECORE_UNKNOWN_ERROR;
3118 
3119 		buf_idx += buf_size;
3120 	}
3121 
3122 	return ECORE_SUCCESS;
3123 }
3124 
3125 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3126 					 struct ecore_ptt *p_ptt,
3127 					 u16 gpio, u32 *gpio_val)
3128 {
3129 	enum _ecore_status_t rc = ECORE_SUCCESS;
3130 	u32 drv_mb_param = 0, rsp;
3131 
3132 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
3133 
3134 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3135 			   drv_mb_param, &rsp, gpio_val);
3136 
3137 	if (rc != ECORE_SUCCESS)
3138 		return rc;
3139 
3140 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3141 		return ECORE_UNKNOWN_ERROR;
3142 
3143 	return ECORE_SUCCESS;
3144 }
3145 
3146 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3147 					  struct ecore_ptt *p_ptt,
3148 					  u16 gpio, u16 gpio_val)
3149 {
3150 	enum _ecore_status_t rc = ECORE_SUCCESS;
3151 	u32 drv_mb_param = 0, param, rsp;
3152 
3153 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
3154 		(gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
3155 
3156 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3157 			   drv_mb_param, &rsp, &param);
3158 
3159 	if (rc != ECORE_SUCCESS)
3160 		return rc;
3161 
3162 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3163 		return ECORE_UNKNOWN_ERROR;
3164 
3165 	return ECORE_SUCCESS;
3166 }
3167 
3168 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3169 					 struct ecore_ptt *p_ptt,
3170 					 u16 gpio, u32 *gpio_direction,
3171 					 u32 *gpio_ctrl)
3172 {
3173 	u32 drv_mb_param = 0, rsp, val = 0;
3174 	enum _ecore_status_t rc = ECORE_SUCCESS;
3175 
3176 	drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
3177 
3178 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3179 			   drv_mb_param, &rsp, &val);
3180 	if (rc != ECORE_SUCCESS)
3181 		return rc;
3182 
3183 	*gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3184 			   DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
3185 	*gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3186 		      DRV_MB_PARAM_GPIO_CTRL_SHIFT;
3187 
3188 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3189 		return ECORE_UNKNOWN_ERROR;
3190 
3191 	return ECORE_SUCCESS;
3192 }
3193 
3194 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3195 						  struct ecore_ptt *p_ptt)
3196 {
3197 	u32 drv_mb_param = 0, rsp, param;
3198 	enum _ecore_status_t rc = ECORE_SUCCESS;
3199 
3200 	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3201 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3202 
3203 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3204 			   drv_mb_param, &rsp, &param);
3205 
3206 	if (rc != ECORE_SUCCESS)
3207 		return rc;
3208 
3209 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3210 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3211 		rc = ECORE_UNKNOWN_ERROR;
3212 
3213 	return rc;
3214 }
3215 
3216 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3217 					       struct ecore_ptt *p_ptt)
3218 {
3219 	u32 drv_mb_param, rsp, param;
3220 	enum _ecore_status_t rc = ECORE_SUCCESS;
3221 
3222 	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3223 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3224 
3225 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3226 			   drv_mb_param, &rsp, &param);
3227 
3228 	if (rc != ECORE_SUCCESS)
3229 		return rc;
3230 
3231 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3232 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3233 		rc = ECORE_UNKNOWN_ERROR;
3234 
3235 	return rc;
3236 }
3237 
3238 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3239 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3240 {
3241 	u32 drv_mb_param = 0, rsp;
3242 	enum _ecore_status_t rc = ECORE_SUCCESS;
3243 
3244 	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3245 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3246 
3247 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3248 			   drv_mb_param, &rsp, num_images);
3249 
3250 	if (rc != ECORE_SUCCESS)
3251 		return rc;
3252 
3253 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3254 		rc = ECORE_UNKNOWN_ERROR;
3255 
3256 	return rc;
3257 }
3258 
3259 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3260 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3261 	struct bist_nvm_image_att *p_image_att, u32 image_index)
3262 {
3263 	struct ecore_mcp_nvm_params params;
3264 	enum _ecore_status_t rc;
3265 	u32 buf_size;
3266 
3267 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
3268 	params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3269 				    DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3270 	params.nvm_common.offset |= (image_index <<
3271 				    DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
3272 
3273 	params.type = ECORE_MCP_NVM_RD;
3274 	params.nvm_rd.buf_size = &buf_size;
3275 	params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
3276 	params.nvm_rd.buf = (u32 *)p_image_att;
3277 
3278 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
3279 	if (rc != ECORE_SUCCESS)
3280 		return rc;
3281 
3282 	if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3283 	    (p_image_att->return_code != 1))
3284 		rc = ECORE_UNKNOWN_ERROR;
3285 
3286 	return rc;
3287 }
3288 
3289 enum _ecore_status_t
3290 ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3291 			    enum ecore_nvm_images image_id,
3292 			    struct ecore_nvm_image_att *p_image_att)
3293 {
3294 	struct bist_nvm_image_att mfw_image_att;
3295 	enum nvm_image_type type;
3296 	u32 num_images, i;
3297 	enum _ecore_status_t rc;
3298 
3299 	/* Translate image_id into MFW definitions */
3300 	switch (image_id) {
3301 	case ECORE_NVM_IMAGE_ISCSI_CFG:
3302 		type = NVM_TYPE_ISCSI_CFG;
3303 		break;
3304 	case ECORE_NVM_IMAGE_FCOE_CFG:
3305 		type = NVM_TYPE_FCOE_CFG;
3306 		break;
3307 	case ECORE_NVM_IMAGE_MDUMP:
3308 		type = NVM_TYPE_MDUMP;
3309 		break;
3310 	default:
3311 		DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
3312 			  image_id);
3313 		return ECORE_INVAL;
3314 	}
3315 
3316 	/* Learn number of images, then traverse and see if one fits */
3317 	rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
3318 	if (rc != ECORE_SUCCESS || !num_images)
3319 		return ECORE_INVAL;
3320 
3321 	for (i = 0; i < num_images; i++) {
3322 		rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
3323 							   &mfw_image_att, i);
3324 		if (rc != ECORE_SUCCESS)
3325 			return rc;
3326 
3327 		if (type == mfw_image_att.image_type)
3328 			break;
3329 	}
3330 	if (i == num_images) {
3331 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3332 			   "Failed to find nvram image of type %08x\n",
3333 			   image_id);
3334 		return ECORE_INVAL;
3335 	}
3336 
3337 	p_image_att->start_addr = mfw_image_att.nvm_start_addr;
3338 	p_image_att->length = mfw_image_att.len;
3339 
3340 	return ECORE_SUCCESS;
3341 }
3342 
3343 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
3344 					     struct ecore_ptt *p_ptt,
3345 					     enum ecore_nvm_images image_id,
3346 					     u8 *p_buffer, u32 buffer_len)
3347 {
3348 	struct ecore_nvm_image_att image_att;
3349 	enum _ecore_status_t rc;
3350 
3351 	OSAL_MEM_ZERO(p_buffer, buffer_len);
3352 
3353 	rc = ecore_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
3354 	if (rc != ECORE_SUCCESS)
3355 		return rc;
3356 
3357 	/* Validate sizes - both the image's and the supplied buffer's */
3358 	if (image_att.length <= 4) {
3359 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3360 			   "Image [%d] is too small - only %d bytes\n",
3361 			   image_id, image_att.length);
3362 		return ECORE_INVAL;
3363 	}
3364 
3365 	/* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
3366 	image_att.length -= 4;
3367 
3368 	if (image_att.length > buffer_len) {
3369 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3370 			   "Image [%d] is too big - %08x bytes where only %08x are available\n",
3371 			   image_id, image_att.length, buffer_len);
3372 		return ECORE_NOMEM;
3373 	}
3374 
3375 	return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
3376 				  p_buffer, image_att.length);
3377 }
3378 
3379 enum _ecore_status_t
3380 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3381 			       struct ecore_ptt *p_ptt,
3382 			       struct ecore_temperature_info *p_temp_info)
3383 {
3384 	struct ecore_temperature_sensor *p_temp_sensor;
3385 	struct temperature_status_stc mfw_temp_info;
3386 	struct ecore_mcp_mb_params mb_params;
3387 	u32 val;
3388 	enum _ecore_status_t rc;
3389 	u8 i;
3390 
3391 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3392 	mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3393 	mb_params.p_data_dst = &mfw_temp_info;
3394 	mb_params.data_dst_size = sizeof(mfw_temp_info);
3395 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3396 	if (rc != ECORE_SUCCESS)
3397 		return rc;
3398 
3399 	OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3400 	p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3401 					      ECORE_MAX_NUM_OF_SENSORS);
3402 	for (i = 0; i < p_temp_info->num_sensors; i++) {
3403 		val = mfw_temp_info.sensor[i];
3404 		p_temp_sensor = &p_temp_info->sensors[i];
3405 		p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3406 						 SENSOR_LOCATION_SHIFT;
3407 		p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3408 						THRESHOLD_HIGH_SHIFT;
3409 		p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3410 					  CRITICAL_TEMPERATURE_SHIFT;
3411 		p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3412 					      CURRENT_TEMP_SHIFT;
3413 	}
3414 
3415 	return ECORE_SUCCESS;
3416 }
3417 
3418 enum _ecore_status_t ecore_mcp_get_mba_versions(
3419 	struct ecore_hwfn *p_hwfn,
3420 	struct ecore_ptt *p_ptt,
3421 	struct ecore_mba_vers *p_mba_vers)
3422 {
3423 	struct ecore_mcp_nvm_params params;
3424 	enum _ecore_status_t rc;
3425 	u32 buf_size;
3426 
3427 	OSAL_MEM_ZERO(&params, sizeof(params));
3428 	params.type = ECORE_MCP_NVM_RD;
3429 	params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
3430 	params.nvm_common.offset = 0;
3431 	params.nvm_rd.buf = &(p_mba_vers->mba_vers[0]);
3432 	params.nvm_rd.buf_size = &buf_size;
3433 	rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
3434 
3435 	if (rc != ECORE_SUCCESS)
3436 		return rc;
3437 
3438 	if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
3439 	    FW_MSG_CODE_NVM_OK)
3440 		rc = ECORE_UNKNOWN_ERROR;
3441 
3442 	if (buf_size != MCP_DRV_NVM_BUF_LEN)
3443 		rc = ECORE_UNKNOWN_ERROR;
3444 
3445 	return rc;
3446 }
3447 
3448 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3449 					      struct ecore_ptt *p_ptt,
3450 					      u64 *num_events)
3451 {
3452 	u32 rsp;
3453 
3454 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3455 			     0, &rsp, (u32 *)num_events);
3456 }
3457 
3458 static enum resource_id_enum
3459 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3460 {
3461 	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3462 
3463 	switch (res_id) {
3464 	case ECORE_SB:
3465 		mfw_res_id = RESOURCE_NUM_SB_E;
3466 		break;
3467 	case ECORE_L2_QUEUE:
3468 		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3469 		break;
3470 	case ECORE_VPORT:
3471 		mfw_res_id = RESOURCE_NUM_VPORT_E;
3472 		break;
3473 	case ECORE_RSS_ENG:
3474 		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3475 		break;
3476 	case ECORE_PQ:
3477 		mfw_res_id = RESOURCE_NUM_PQ_E;
3478 		break;
3479 	case ECORE_RL:
3480 		mfw_res_id = RESOURCE_NUM_RL_E;
3481 		break;
3482 	case ECORE_MAC:
3483 	case ECORE_VLAN:
3484 		/* Each VFC resource can accommodate both a MAC and a VLAN */
3485 		mfw_res_id = RESOURCE_VFC_FILTER_E;
3486 		break;
3487 	case ECORE_ILT:
3488 		mfw_res_id = RESOURCE_ILT_E;
3489 		break;
3490 	case ECORE_LL2_QUEUE:
3491 		mfw_res_id = RESOURCE_LL2_QUEUE_E;
3492 		break;
3493 	case ECORE_RDMA_CNQ_RAM:
3494 	case ECORE_CMDQS_CQS:
3495 		/* CNQ/CMDQS are the same resource */
3496 		mfw_res_id = RESOURCE_CQS_E;
3497 		break;
3498 	case ECORE_RDMA_STATS_QUEUE:
3499 		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3500 		break;
3501 	case ECORE_BDQ:
3502 		mfw_res_id = RESOURCE_BDQ_E;
3503 		break;
3504 	default:
3505 		break;
3506 	}
3507 
3508 	return mfw_res_id;
3509 }
3510 
3511 #define ECORE_RESC_ALLOC_VERSION_MAJOR	2
3512 #define ECORE_RESC_ALLOC_VERSION_MINOR	0
3513 #define ECORE_RESC_ALLOC_VERSION				\
3514 	((ECORE_RESC_ALLOC_VERSION_MAJOR <<			\
3515 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) |	\
3516 	 (ECORE_RESC_ALLOC_VERSION_MINOR <<			\
3517 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3518 
3519 struct ecore_resc_alloc_in_params {
3520 	u32 cmd;
3521 	enum ecore_resources res_id;
3522 	u32 resc_max_val;
3523 };
3524 
3525 struct ecore_resc_alloc_out_params {
3526 	u32 mcp_resp;
3527 	u32 mcp_param;
3528 	u32 resc_num;
3529 	u32 resc_start;
3530 	u32 vf_resc_num;
3531 	u32 vf_resc_start;
3532 	u32 flags;
3533 };
3534 
3535 static enum _ecore_status_t
3536 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3537 			      struct ecore_ptt *p_ptt,
3538 			      struct ecore_resc_alloc_in_params *p_in_params,
3539 			      struct ecore_resc_alloc_out_params *p_out_params)
3540 {
3541 	struct ecore_mcp_mb_params mb_params;
3542 	struct resource_info mfw_resc_info;
3543 	enum _ecore_status_t rc;
3544 
3545 	OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3546 
3547 	mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3548 	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3549 		DP_ERR(p_hwfn,
3550 		       "Failed to match resource %d [%s] with the MFW resources\n",
3551 		       p_in_params->res_id,
3552 		       ecore_hw_get_resc_name(p_in_params->res_id));
3553 		return ECORE_INVAL;
3554 	}
3555 
3556 	switch (p_in_params->cmd) {
3557 	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3558 		mfw_resc_info.size = p_in_params->resc_max_val;
3559 		/* Fallthrough */
3560 	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3561 		break;
3562 	default:
3563 		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3564 		       p_in_params->cmd);
3565 		return ECORE_INVAL;
3566 	}
3567 
3568 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3569 	mb_params.cmd = p_in_params->cmd;
3570 	mb_params.param = ECORE_RESC_ALLOC_VERSION;
3571 	mb_params.p_data_src = &mfw_resc_info;
3572 	mb_params.data_src_size = sizeof(mfw_resc_info);
3573 	mb_params.p_data_dst = mb_params.p_data_src;
3574 	mb_params.data_dst_size = mb_params.data_src_size;
3575 
3576 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3577 		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3578 		   p_in_params->cmd, p_in_params->res_id,
3579 		   ecore_hw_get_resc_name(p_in_params->res_id),
3580 		   ECORE_MFW_GET_FIELD(mb_params.param,
3581 			   DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3582 		   ECORE_MFW_GET_FIELD(mb_params.param,
3583 			   DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3584 		   p_in_params->resc_max_val);
3585 
3586 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3587 	if (rc != ECORE_SUCCESS)
3588 		return rc;
3589 
3590 	p_out_params->mcp_resp = mb_params.mcp_resp;
3591 	p_out_params->mcp_param = mb_params.mcp_param;
3592 	p_out_params->resc_num = mfw_resc_info.size;
3593 	p_out_params->resc_start = mfw_resc_info.offset;
3594 	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3595 	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3596 	p_out_params->flags = mfw_resc_info.flags;
3597 
3598 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3599 		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3600 		   ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3601 			   FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3602 		   ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
3603 			   FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3604 		   p_out_params->resc_num, p_out_params->resc_start,
3605 		   p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3606 		   p_out_params->flags);
3607 
3608 	return ECORE_SUCCESS;
3609 }
3610 
3611 enum _ecore_status_t
3612 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3613 			   enum ecore_resources res_id, u32 resc_max_val,
3614 			   u32 *p_mcp_resp)
3615 {
3616 	struct ecore_resc_alloc_out_params out_params;
3617 	struct ecore_resc_alloc_in_params in_params;
3618 	enum _ecore_status_t rc;
3619 
3620 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3621 	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3622 	in_params.res_id = res_id;
3623 	in_params.resc_max_val = resc_max_val;
3624 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3625 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3626 					   &out_params);
3627 	if (rc != ECORE_SUCCESS)
3628 		return rc;
3629 
3630 	*p_mcp_resp = out_params.mcp_resp;
3631 
3632 	return ECORE_SUCCESS;
3633 }
3634 
3635 enum _ecore_status_t
3636 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3637 			enum ecore_resources res_id, u32 *p_mcp_resp,
3638 			u32 *p_resc_num, u32 *p_resc_start)
3639 {
3640 	struct ecore_resc_alloc_out_params out_params;
3641 	struct ecore_resc_alloc_in_params in_params;
3642 	enum _ecore_status_t rc;
3643 
3644 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3645 	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3646 	in_params.res_id = res_id;
3647 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3648 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3649 					   &out_params);
3650 	if (rc != ECORE_SUCCESS)
3651 		return rc;
3652 
3653 	*p_mcp_resp = out_params.mcp_resp;
3654 
3655 	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3656 		*p_resc_num = out_params.resc_num;
3657 		*p_resc_start = out_params.resc_start;
3658 	}
3659 
3660 	return ECORE_SUCCESS;
3661 }
3662 
3663 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3664 					       struct ecore_ptt *p_ptt)
3665 {
3666 	u32 mcp_resp, mcp_param;
3667 
3668 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3669 			     &mcp_resp, &mcp_param);
3670 }
3671 
3672 enum _ecore_status_t ecore_mcp_get_lldp_mac(struct ecore_hwfn *p_hwfn,
3673 					    struct ecore_ptt *p_ptt,
3674 					    u8 lldp_mac_addr[ETH_ALEN])
3675 {
3676 	struct ecore_mcp_mb_params mb_params;
3677 	struct mcp_mac lldp_mac;
3678 	enum _ecore_status_t rc;
3679 
3680 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3681 	mb_params.cmd = DRV_MSG_CODE_GET_LLDP_MAC;
3682 	mb_params.p_data_dst = &lldp_mac;
3683 	mb_params.data_dst_size = sizeof(lldp_mac);
3684 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3685 	if (rc != ECORE_SUCCESS)
3686 		return rc;
3687 
3688 	if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3689 		DP_NOTICE(p_hwfn, false,
3690 			  "MFW lacks support for the GET_LLDP_MAC command [resp 0x%08x]\n",
3691 			  mb_params.mcp_resp);
3692 		return ECORE_INVAL;
3693 	}
3694 
3695 	*(u16 *)lldp_mac_addr = *(u16 *)&lldp_mac.mac_upper;
3696 	*(u32 *)(lldp_mac_addr + 2) = lldp_mac.mac_lower;
3697 
3698 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3699 		   "LLDP MAC address is %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3700 		   lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3701 		   lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3702 
3703 	return ECORE_SUCCESS;
3704 }
3705 
3706 enum _ecore_status_t ecore_mcp_set_lldp_mac(struct ecore_hwfn *p_hwfn,
3707 					    struct ecore_ptt *p_ptt,
3708 					    u8 lldp_mac_addr[ETH_ALEN])
3709 {
3710 	struct ecore_mcp_mb_params mb_params;
3711 	struct mcp_mac lldp_mac;
3712 	enum _ecore_status_t rc;
3713 
3714 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3715 		   "Configuring LLDP MAC address to %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3716 		   lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3717 		   lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3718 
3719 	OSAL_MEM_ZERO(&lldp_mac, sizeof(lldp_mac));
3720 	lldp_mac.mac_upper = *(u16 *)lldp_mac_addr;
3721 	lldp_mac.mac_lower = *(u32 *)(lldp_mac_addr + 2);
3722 
3723 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3724 	mb_params.cmd = DRV_MSG_CODE_SET_LLDP_MAC;
3725 	mb_params.p_data_src = &lldp_mac;
3726 	mb_params.data_src_size = sizeof(lldp_mac);
3727 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3728 	if (rc != ECORE_SUCCESS)
3729 		return rc;
3730 
3731 	if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3732 		DP_NOTICE(p_hwfn, false,
3733 			  "MFW lacks support for the SET_LLDP_MAC command [resp 0x%08x]\n",
3734 			  mb_params.mcp_resp);
3735 		return ECORE_INVAL;
3736 	}
3737 
3738 	return ECORE_SUCCESS;
3739 }
3740 
3741 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3742 						   struct ecore_ptt *p_ptt,
3743 						   u32 param, u32 *p_mcp_resp,
3744 						   u32 *p_mcp_param)
3745 {
3746 	enum _ecore_status_t rc;
3747 
3748 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3749 			   p_mcp_resp, p_mcp_param);
3750 	if (rc != ECORE_SUCCESS)
3751 		return rc;
3752 
3753 	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3754 		DP_INFO(p_hwfn,
3755 			"The resource command is unsupported by the MFW\n");
3756 		return ECORE_NOTIMPL;
3757 	}
3758 
3759 	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3760 		u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3761 
3762 		DP_NOTICE(p_hwfn, false,
3763 			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3764 			  param, opcode);
3765 		return ECORE_INVAL;
3766 	}
3767 
3768 	return rc;
3769 }
3770 
3771 static enum _ecore_status_t
3772 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3773 		      struct ecore_resc_lock_params *p_params)
3774 {
3775 	u32 param = 0, mcp_resp, mcp_param;
3776 	u8 opcode;
3777 	enum _ecore_status_t rc;
3778 
3779 	switch (p_params->timeout) {
3780 	case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3781 		opcode = RESOURCE_OPCODE_REQ;
3782 		p_params->timeout = 0;
3783 		break;
3784 	case ECORE_MCP_RESC_LOCK_TO_NONE:
3785 		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3786 		p_params->timeout = 0;
3787 		break;
3788 	default:
3789 		opcode = RESOURCE_OPCODE_REQ_W_AGING;
3790 		break;
3791 	}
3792 
3793 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3794 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3795 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3796 
3797 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3798 		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3799 		   param, p_params->timeout, opcode, p_params->resource);
3800 
3801 	/* Attempt to acquire the resource */
3802 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3803 				    &mcp_param);
3804 	if (rc != ECORE_SUCCESS)
3805 		return rc;
3806 
3807 	/* Analyze the response */
3808 	p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
3809 					     RESOURCE_CMD_RSP_OWNER);
3810 	opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3811 
3812 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3813 		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3814 		   mcp_param, opcode, p_params->owner);
3815 
3816 	switch (opcode) {
3817 	case RESOURCE_OPCODE_GNT:
3818 		p_params->b_granted = true;
3819 		break;
3820 	case RESOURCE_OPCODE_BUSY:
3821 		p_params->b_granted = false;
3822 		break;
3823 	default:
3824 		DP_NOTICE(p_hwfn, false,
3825 			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3826 			  mcp_param, opcode);
3827 		return ECORE_INVAL;
3828 	}
3829 
3830 	return ECORE_SUCCESS;
3831 }
3832 
3833 enum _ecore_status_t
3834 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3835 		    struct ecore_resc_lock_params *p_params)
3836 {
3837 	u32 retry_cnt = 0;
3838 	enum _ecore_status_t rc;
3839 
3840 	do {
3841 		/* No need for an interval before the first iteration */
3842 		if (retry_cnt) {
3843 			if (p_params->sleep_b4_retry) {
3844 				u16 retry_interval_in_ms =
3845 					DIV_ROUND_UP(p_params->retry_interval,
3846 						     1000);
3847 
3848 				OSAL_MSLEEP(retry_interval_in_ms);
3849 			} else {
3850 				OSAL_UDELAY(p_params->retry_interval);
3851 			}
3852 		}
3853 
3854 		rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3855 		if (rc != ECORE_SUCCESS)
3856 			return rc;
3857 
3858 		if (p_params->b_granted)
3859 			break;
3860 	} while (retry_cnt++ < p_params->retry_num);
3861 
3862 	return ECORE_SUCCESS;
3863 }
3864 
3865 enum _ecore_status_t
3866 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3867 		      struct ecore_resc_unlock_params *p_params)
3868 {
3869 	u32 param = 0, mcp_resp, mcp_param;
3870 	u8 opcode;
3871 	enum _ecore_status_t rc;
3872 
3873 	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3874 				   : RESOURCE_OPCODE_RELEASE;
3875 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3876 	ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3877 
3878 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3879 		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3880 		   param, opcode, p_params->resource);
3881 
3882 	/* Attempt to release the resource */
3883 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3884 				    &mcp_param);
3885 	if (rc != ECORE_SUCCESS)
3886 		return rc;
3887 
3888 	/* Analyze the response */
3889 	opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3890 
3891 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3892 		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3893 		   mcp_param, opcode);
3894 
3895 	switch (opcode) {
3896 	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3897 		DP_INFO(p_hwfn,
3898 			"Resource unlock request for an already released resource [%d]\n",
3899 			p_params->resource);
3900 		/* Fallthrough */
3901 	case RESOURCE_OPCODE_RELEASED:
3902 		p_params->b_released = true;
3903 		break;
3904 	case RESOURCE_OPCODE_WRONG_OWNER:
3905 		p_params->b_released = false;
3906 		break;
3907 	default:
3908 		DP_NOTICE(p_hwfn, false,
3909 			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3910 			  mcp_param, opcode);
3911 		return ECORE_INVAL;
3912 	}
3913 
3914 	return ECORE_SUCCESS;
3915 }
3916 
3917 enum _ecore_status_t
3918 ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3919 			   u16 vlan)
3920 {
3921 	u32 resp = 0, param = 0;
3922 	enum _ecore_status_t rc;
3923 
3924 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID,
3925 			   (u32)vlan << DRV_MB_PARAM_FCOE_CVID_SHIFT,
3926 			   &resp, &param);
3927 	if (rc != ECORE_SUCCESS)
3928 		DP_ERR(p_hwfn, "Failed to update fcoe vlan, rc = %d\n", rc);
3929 
3930 	return rc;
3931 }
3932 
3933 enum _ecore_status_t
3934 ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn,
3935 				  struct ecore_ptt *p_ptt, u8 *wwn)
3936 {
3937 	struct ecore_mcp_mb_params mb_params;
3938 	struct mcp_wwn fabric_name;
3939 	enum _ecore_status_t rc;
3940 
3941 	OSAL_MEM_ZERO(&fabric_name, sizeof(fabric_name));
3942 	fabric_name.wwn_upper = *(u32 *)wwn;
3943 	fabric_name.wwn_lower = *(u32 *)(wwn + 4);
3944 
3945 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3946 	mb_params.cmd = DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME;
3947 	mb_params.p_data_src = &fabric_name;
3948 	mb_params.data_src_size = sizeof(fabric_name);
3949 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3950 	if (rc != ECORE_SUCCESS)
3951 		DP_ERR(p_hwfn, "Failed to update fcoe wwn, rc = %d\n", rc);
3952 
3953 	return rc;
3954 }
3955 
3956 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3957 		      u32 offset, u32 val)
3958 {
3959 	struct ecore_mcp_mb_params mb_params = {0};
3960 	enum _ecore_status_t	   rc = ECORE_SUCCESS;
3961 	u32			   dword = val;
3962 
3963 	mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
3964 	mb_params.param = offset;
3965 	mb_params.p_data_src = &dword;
3966 	mb_params.data_src_size = sizeof(dword);
3967 
3968 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3969 	if (rc != ECORE_SUCCESS) {
3970 		DP_NOTICE(p_hwfn, false,
3971 			  "Failed to wol write request, rc = %d\n", rc);
3972 	}
3973 
3974 	if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
3975 		DP_NOTICE(p_hwfn, false,
3976 			  "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
3977 			  val, offset, mb_params.mcp_resp);
3978 		rc = ECORE_UNKNOWN_ERROR;
3979 	}
3980 }
3981 
3982 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
3983 						struct ecore_ptt *p_ptt)
3984 {
3985 	u32 mcp_resp;
3986 	enum _ecore_status_t rc;
3987 
3988 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3989 			   0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3990 	if (rc == ECORE_SUCCESS)
3991 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
3992 			   "MFW supported features: %08x\n",
3993 			   p_hwfn->mcp_info->capabilities);
3994 
3995 	return rc;
3996 }
3997 
3998 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
3999 						struct ecore_ptt *p_ptt)
4000 {
4001 	u32 mcp_resp, mcp_param, features;
4002 
4003 	features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4004 		   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
4005 
4006 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4007 			     features, &mcp_resp, &mcp_param);
4008 }
4009