xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_mcp.c (revision b00ab754)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * File : ecore_mcp.c
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "bcm_osal.h"
34 #include "ecore.h"
35 #include "ecore_status.h"
36 #include "nvm_map.h"
37 #include "nvm_cfg.h"
38 #include "ecore_mcp.h"
39 #include "mcp_public.h"
40 #include "reg_addr.h"
41 #include "ecore_hw.h"
42 #include "ecore_init_fw_funcs.h"
43 #include "ecore_sriov.h"
44 #include "ecore_vf.h"
45 #include "ecore_iov_api.h"
46 #include "ecore_gtt_reg_addr.h"
47 #include "ecore_iro.h"
48 #include "ecore_dcbx.h"
49 #include "ecore_sp_commands.h"
50 
51 #define CHIP_MCP_RESP_ITER_US 10
52 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
53 
54 #define ECORE_DRV_MB_MAX_RETRIES	(500 * 1000) /* Account for 5 sec */
55 #define ECORE_MCP_RESET_RETRIES		(50 * 1000) /* Account for 500 msec */
56 
57 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
58 	ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
59 		 _val)
60 
61 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
62 	ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
63 
64 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
65 	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
66 		     OFFSETOF(struct public_drv_mb, _field), _val)
67 
68 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
69 	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
70 		     OFFSETOF(struct public_drv_mb, _field))
71 
72 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
73 	DRV_ID_PDA_COMP_VER_OFFSET)
74 
75 #define MCP_BYTES_PER_MBIT_OFFSET 17
76 
77 #ifndef ASIC_ONLY
78 static int loaded;
79 static int loaded_port[MAX_NUM_PORTS] = { 0 };
80 #endif
81 
82 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
83 {
84 	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
85 		return false;
86 	return true;
87 }
88 
89 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
90 			     struct ecore_ptt *p_ptt)
91 {
92 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
93 					PUBLIC_PORT);
94 	u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
95 
96 	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
97 						   MFW_PORT(p_hwfn));
98 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
99 		   "port_addr = 0x%x, port_id 0x%02x\n",
100 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
101 }
102 
103 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
104 		       struct ecore_ptt *p_ptt)
105 {
106 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
107 	OSAL_BE32 tmp;
108 	u32 i;
109 
110 #ifndef ASIC_ONLY
111 	if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
112 		return;
113 #endif
114 
115 	if (!p_hwfn->mcp_info->public_base)
116 		return;
117 
118 	for (i = 0; i < length; i++) {
119 		tmp = ecore_rd(p_hwfn, p_ptt,
120 			       p_hwfn->mcp_info->mfw_mb_addr +
121 			       (i << 2) + sizeof(u32));
122 
123 		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
124 						OSAL_BE32_TO_CPU(tmp);
125 	}
126 }
127 
128 struct ecore_mcp_cmd_elem {
129 	osal_list_entry_t list;
130 	struct ecore_mcp_mb_params *p_mb_params;
131 	u16 expected_seq_num;
132 	bool b_is_completed;
133 };
134 
135 /* Must be called while cmd_lock is acquired */
136 static struct ecore_mcp_cmd_elem *
137 ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
138 		       struct ecore_mcp_mb_params *p_mb_params,
139 		       u16 expected_seq_num)
140 {
141 	struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
142 
143 	p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
144 				 sizeof(*p_cmd_elem));
145 	if (!p_cmd_elem) {
146 		DP_NOTICE(p_hwfn, false,
147 			  "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
148 		goto out;
149 	}
150 
151 	p_cmd_elem->p_mb_params = p_mb_params;
152 	p_cmd_elem->expected_seq_num = expected_seq_num;
153 	OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
154 out:
155 	return p_cmd_elem;
156 }
157 
158 /* Must be called while cmd_lock is acquired */
159 static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
160 				   struct ecore_mcp_cmd_elem *p_cmd_elem)
161 {
162 	OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
163 	OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
164 }
165 
166 /* Must be called while cmd_lock is acquired */
167 static struct ecore_mcp_cmd_elem *
168 ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
169 {
170 	struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
171 
172 	OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
173 				 struct ecore_mcp_cmd_elem) {
174 		if (p_cmd_elem->expected_seq_num == seq_num)
175 			return p_cmd_elem;
176 	}
177 
178 	return OSAL_NULL;
179 }
180 
181 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
182 {
183 	if (p_hwfn->mcp_info) {
184 		struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
185 
186 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
187 		OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
188 
189 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
190 		OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
191 					      &p_hwfn->mcp_info->cmd_list, list,
192 					      struct ecore_mcp_cmd_elem) {
193 			ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
194 		}
195 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
196 
197 #ifdef CONFIG_ECORE_LOCK_ALLOC
198 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
199 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
200 #endif
201 	}
202 
203 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
204 	p_hwfn->mcp_info = OSAL_NULL;
205 
206 	return ECORE_SUCCESS;
207 }
208 
209 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
210 					    struct ecore_ptt *p_ptt)
211 {
212 	struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
213 	u32 drv_mb_offsize, mfw_mb_offsize;
214 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
215 
216 #ifndef ASIC_ONLY
217 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
218 		DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
219 		p_info->public_base = 0;
220 		return ECORE_INVAL;
221 	}
222 #endif
223 
224 	p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
225 	if (!p_info->public_base)
226 		return ECORE_INVAL;
227 
228 	p_info->public_base |= GRCBASE_MCP;
229 
230 	/* Calculate the driver and MFW mailbox address */
231 	drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
232 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
233 						       PUBLIC_DRV_MB));
234 	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
235 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
236 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
237 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
238 
239 	/* Set the MFW MB address */
240 	mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
241 				  SECTION_OFFSIZE_ADDR(p_info->public_base,
242 				  PUBLIC_MFW_MB));
243 	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
244 	p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
245 					      p_info->mfw_mb_addr);
246 
247 	/* Get the current driver mailbox sequence before sending
248 	 * the first command
249 	 */
250 	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
251 				       DRV_MSG_SEQ_NUMBER_MASK;
252 
253 	/* Get current FW pulse sequence */
254 	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
255 				DRV_PULSE_SEQ_MASK;
256 
257 	p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
258 
259 	return ECORE_SUCCESS;
260 }
261 
262 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
263 					struct ecore_ptt *p_ptt)
264 {
265 	struct ecore_mcp_info *p_info;
266 	u32 size;
267 
268 	/* Allocate mcp_info structure */
269 	p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
270 				       sizeof(*p_hwfn->mcp_info));
271 	if (!p_hwfn->mcp_info)
272 		goto err;
273 	p_info = p_hwfn->mcp_info;
274 
275 	/* Initialize the MFW spinlocks */
276 #ifdef CONFIG_ECORE_LOCK_ALLOC
277 	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
278 	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
279 #endif
280 	OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
281 	OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
282 
283 	OSAL_LIST_INIT(&p_info->cmd_list);
284 
285 	if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
286 		DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
287 		/* Do not free mcp_info here, since public_base indicate that
288 		 * the MCP is not initialized
289 		 */
290 		return ECORE_SUCCESS;
291 	}
292 
293 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
294 	p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
295 	p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
296 	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
297 		goto err;
298 
299 	return ECORE_SUCCESS;
300 
301 err:
302 	DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
303 	ecore_mcp_free(p_hwfn);
304 	return ECORE_NOMEM;
305 }
306 
307 static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
308 				     struct ecore_ptt *p_ptt)
309 {
310 	u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
311 
312 	/* Use MCP history register to check if MCP reset occurred between init
313 	 * time and now.
314 	 */
315 	if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
316 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
317 			   "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
318 			   p_hwfn->mcp_info->mcp_hist, generic_por_0);
319 
320 		ecore_load_mcp_offsets(p_hwfn, p_ptt);
321 		ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
322 	}
323 }
324 
325 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
326 				     struct ecore_ptt *p_ptt)
327 {
328 	u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
329 	enum _ecore_status_t rc = ECORE_SUCCESS;
330 
331 #ifndef ASIC_ONLY
332 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
333 		delay = EMUL_MCP_RESP_ITER_US;
334 #endif
335 
336 	if (p_hwfn->mcp_info->b_block_cmd) {
337 		DP_NOTICE(p_hwfn, false,
338 			  "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
339 		return ECORE_ABORTED;
340 	}
341 
342 	/* Ensure that only a single thread is accessing the mailbox */
343 	OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
344 
345 	org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
346 
347 	/* Set drv command along with the updated sequence */
348 	ecore_mcp_reread_offsets(p_hwfn, p_ptt);
349 	seq = ++p_hwfn->mcp_info->drv_mb_seq;
350 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
351 
352 	do {
353 		/* Wait for MFW response */
354 		OSAL_UDELAY(delay);
355 		/* Give the FW up to 500 second (50*1000*10usec) */
356 	} while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
357 						MISCS_REG_GENERIC_POR_0)) &&
358 		 (cnt++ < ECORE_MCP_RESET_RETRIES));
359 
360 	if (org_mcp_reset_seq !=
361 	    ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
362 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
363 			   "MCP was reset after %d usec\n", cnt * delay);
364 	} else {
365 		DP_ERR(p_hwfn, "Failed to reset MCP\n");
366 		rc = ECORE_AGAIN;
367 	}
368 
369 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
370 
371 	return rc;
372 }
373 
374 /* Must be called while cmd_lock is acquired */
375 static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
376 {
377 	struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
378 
379 	/* There is at most one pending command at a certain time, and if it
380 	 * exists - it is placed at the HEAD of the list.
381 	 */
382 	if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
383 		p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
384 						   struct ecore_mcp_cmd_elem,
385 						   list);
386 		return !p_cmd_elem->b_is_completed;
387 	}
388 
389 	return false;
390 }
391 
392 /* Must be called while cmd_lock is acquired */
393 static enum _ecore_status_t
394 ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
395 {
396 	struct ecore_mcp_mb_params *p_mb_params;
397 	struct ecore_mcp_cmd_elem *p_cmd_elem;
398 	u32 mcp_resp;
399 	u16 seq_num;
400 
401 	mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
402 	seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
403 
404 	/* Return if no new non-handled response has been received */
405 	if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
406 		return ECORE_AGAIN;
407 
408 	p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
409 	if (!p_cmd_elem) {
410 		DP_ERR(p_hwfn,
411 		       "Failed to find a pending mailbox cmd that expects sequence number %d\n",
412 		       seq_num);
413 		return ECORE_UNKNOWN_ERROR;
414 	}
415 
416 	p_mb_params = p_cmd_elem->p_mb_params;
417 
418 	/* Get the MFW response along with the sequence number */
419 	p_mb_params->mcp_resp = mcp_resp;
420 
421 	/* Get the MFW param */
422 	p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
423 
424 	/* Get the union data */
425 	if (p_mb_params->p_data_dst != OSAL_NULL &&
426 	    p_mb_params->data_dst_size) {
427 		u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
428 				      OFFSETOF(struct public_drv_mb,
429 					       union_data);
430 		ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
431 				  union_data_addr, p_mb_params->data_dst_size);
432 	}
433 
434 	p_cmd_elem->b_is_completed = true;
435 
436 	return ECORE_SUCCESS;
437 }
438 
439 /* Must be called while cmd_lock is acquired */
440 static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
441 				      struct ecore_ptt *p_ptt,
442 				      struct ecore_mcp_mb_params *p_mb_params,
443 				      u16 seq_num)
444 {
445 	union drv_union_data union_data;
446 	u32 union_data_addr;
447 
448 	/* Set the union data */
449 	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
450 			  OFFSETOF(struct public_drv_mb, union_data);
451 	OSAL_MEM_ZERO(&union_data, sizeof(union_data));
452 	if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
453 		OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
454 			    p_mb_params->data_src_size);
455 	ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
456 			sizeof(union_data));
457 
458 	/* Set the drv param */
459 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
460 
461 	/* Set the drv command along with the sequence number */
462 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
463 
464 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
465 		   "MFW mailbox: command 0x%08x param 0x%08x\n",
466 		   (p_mb_params->cmd | seq_num), p_mb_params->param);
467 }
468 
469 static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
470 				       bool block_cmd)
471 {
472 	p_hwfn->mcp_info->b_block_cmd = block_cmd;
473 
474 	DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
475 		block_cmd ? "Block" : "Unblock");
476 }
477 
478 static void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
479 			      struct ecore_ptt *p_ptt)
480 {
481 	u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
482 
483 	cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
484 	cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
485 	cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
486 	OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
487 	cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
488 	OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
489 	cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
490 
491 	DP_NOTICE(p_hwfn, false,
492 		  "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
493 		  cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
494 }
495 
496 static enum _ecore_status_t
497 _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
498 			 struct ecore_mcp_mb_params *p_mb_params,
499 			 u32 max_retries, u32 delay)
500 {
501 	struct ecore_mcp_cmd_elem *p_cmd_elem;
502 	u32 cnt = 0;
503 	u16 seq_num;
504 	enum _ecore_status_t rc = ECORE_SUCCESS;
505 
506 	/* Wait until the mailbox is non-occupied */
507 	do {
508 		/* Exit the loop if there is no pending command, or if the
509 		 * pending command is completed during this iteration.
510 		 * The spinlock stays locked until the command is sent.
511 		 */
512 
513 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
514 
515 		if (!ecore_mcp_has_pending_cmd(p_hwfn))
516 			break;
517 
518 		rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
519 		if (rc == ECORE_SUCCESS)
520 			break;
521 		else if (rc != ECORE_AGAIN)
522 			goto err;
523 
524 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
525 		OSAL_UDELAY(delay);
526 	} while (++cnt < max_retries);
527 
528 	if (cnt >= max_retries) {
529 		DP_NOTICE(p_hwfn, false,
530 			  "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
531 			  p_mb_params->cmd, p_mb_params->param);
532 		return ECORE_AGAIN;
533 	}
534 
535 	/* Send the mailbox command */
536 	ecore_mcp_reread_offsets(p_hwfn, p_ptt);
537 	seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
538 	p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
539 	if (!p_cmd_elem)
540 		goto err;
541 
542 	__ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
543 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
544 
545 	/* Wait for the MFW response */
546 	do {
547 		/* Exit the loop if the command is already completed, or if the
548 		 * command is completed during this iteration.
549 		 * The spinlock stays locked until the list element is removed.
550 		 */
551 
552 		OSAL_UDELAY(delay);
553 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
554 
555 		if (p_cmd_elem->b_is_completed)
556 			break;
557 
558 		rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
559 		if (rc == ECORE_SUCCESS)
560 			break;
561 		else if (rc != ECORE_AGAIN)
562 			goto err;
563 
564 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
565 	} while (++cnt < max_retries);
566 
567 	if (cnt >= max_retries) {
568 		DP_NOTICE(p_hwfn, false,
569 			  "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
570 			  p_mb_params->cmd, p_mb_params->param);
571 		ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
572 
573 		OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
574 		ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
575 		OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
576 
577 		ecore_mcp_cmd_set_blocking(p_hwfn, true);
578 		ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
579 		return ECORE_AGAIN;
580 	}
581 
582 	ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
583 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
584 
585 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
586 		   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
587 		   p_mb_params->mcp_resp, p_mb_params->mcp_param,
588 		   (cnt * delay) / 1000, (cnt * delay) % 1000);
589 
590 	/* Clear the sequence number from the MFW response */
591 	p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
592 
593 	return ECORE_SUCCESS;
594 
595 err:
596 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
597 	return rc;
598 }
599 
600 static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
601 						    struct ecore_ptt *p_ptt,
602 						    struct ecore_mcp_mb_params *p_mb_params)
603 {
604 	osal_size_t union_data_size = sizeof(union drv_union_data);
605 	u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
606 	u32 delay = CHIP_MCP_RESP_ITER_US;
607 
608 #ifndef ASIC_ONLY
609 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
610 		delay = EMUL_MCP_RESP_ITER_US;
611 	/* There is a built-in delay of 100usec in each MFW response read */
612 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
613 		max_retries /= 10;
614 #endif
615 
616 	/* MCP not initialized */
617 	if (!ecore_mcp_is_init(p_hwfn)) {
618 		DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
619 		return ECORE_BUSY;
620 	}
621 
622 	if (p_mb_params->data_src_size > union_data_size ||
623 	    p_mb_params->data_dst_size > union_data_size) {
624 		DP_ERR(p_hwfn,
625 		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
626 		       p_mb_params->data_src_size, p_mb_params->data_dst_size,
627 		       union_data_size);
628 		return ECORE_INVAL;
629 	}
630 
631 	if (p_hwfn->mcp_info->b_block_cmd) {
632 		DP_NOTICE(p_hwfn, false,
633 			  "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
634 			  p_mb_params->cmd, p_mb_params->param);
635 		return ECORE_ABORTED;
636 	}
637 
638 	return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
639 					delay);
640 }
641 
642 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
643 				   struct ecore_ptt *p_ptt, u32 cmd, u32 param,
644 				   u32 *o_mcp_resp, u32 *o_mcp_param)
645 {
646 	struct ecore_mcp_mb_params mb_params;
647 	enum _ecore_status_t rc;
648 
649 #ifndef ASIC_ONLY
650 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
651 		if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
652 			loaded--;
653 			loaded_port[p_hwfn->port_id]--;
654 			DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
655 				   loaded);
656 		}
657 		return ECORE_SUCCESS;
658 	}
659 #endif
660 
661 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
662 	mb_params.cmd = cmd;
663 	mb_params.param = param;
664 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
665 	if (rc != ECORE_SUCCESS)
666 		return rc;
667 
668 	*o_mcp_resp = mb_params.mcp_resp;
669 	*o_mcp_param = mb_params.mcp_param;
670 
671 	return ECORE_SUCCESS;
672 }
673 
674 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
675 					  struct ecore_ptt *p_ptt,
676 					  u32 cmd,
677 					  u32 param,
678 					  u32 *o_mcp_resp,
679 					  u32 *o_mcp_param,
680 					  u32 i_txn_size,
681 					  u32 *i_buf)
682 {
683 	struct ecore_mcp_mb_params mb_params;
684 	enum _ecore_status_t rc;
685 
686 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
687 	mb_params.cmd = cmd;
688 	mb_params.param = param;
689 	mb_params.p_data_src = i_buf;
690 	mb_params.data_src_size = (u8) i_txn_size;
691 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
692 	if (rc != ECORE_SUCCESS)
693 		return rc;
694 
695 	*o_mcp_resp = mb_params.mcp_resp;
696 	*o_mcp_param = mb_params.mcp_param;
697 
698 	return ECORE_SUCCESS;
699 }
700 
701 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
702 					  struct ecore_ptt *p_ptt,
703 					  u32 cmd,
704 					  u32 param,
705 					  u32 *o_mcp_resp,
706 					  u32 *o_mcp_param,
707 					  u32 *o_txn_size,
708 					  u32 *o_buf)
709 {
710 	struct ecore_mcp_mb_params mb_params;
711 	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
712 	enum _ecore_status_t rc;
713 
714 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
715 	mb_params.cmd = cmd;
716 	mb_params.param = param;
717 	mb_params.p_data_dst = raw_data;
718 
719 	/* Use the maximal value since the actual one is part of the response */
720 	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
721 
722 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
723 	if (rc != ECORE_SUCCESS)
724 		return rc;
725 
726 	*o_mcp_resp = mb_params.mcp_resp;
727 	*o_mcp_param = mb_params.mcp_param;
728 
729 	*o_txn_size = *o_mcp_param;
730 	OSAL_MEMCPY(o_buf, raw_data, *o_txn_size);
731 
732 	return ECORE_SUCCESS;
733 }
734 
735 #ifndef ASIC_ONLY
736 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
737 				    u32 *p_load_code)
738 {
739 	static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
740 
741 	if (!loaded) {
742 		load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
743 	} else if (!loaded_port[p_hwfn->port_id]) {
744 		load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
745 	} else {
746 		load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
747 	}
748 
749 	/* On CMT, always tell that it's engine */
750 	if (p_hwfn->p_dev->num_hwfns > 1)
751 		load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
752 
753 	*p_load_code = load_phase;
754 	loaded++;
755 	loaded_port[p_hwfn->port_id]++;
756 
757 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
758 		   "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
759 		   *p_load_code, loaded, p_hwfn->port_id,
760 		   loaded_port[p_hwfn->port_id]);
761 }
762 #endif
763 
764 static bool
765 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
766 			 enum ecore_override_force_load override_force_load)
767 {
768 	bool can_force_load = false;
769 
770 	switch (override_force_load) {
771 	case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
772 		can_force_load = true;
773 		break;
774 	case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
775 		can_force_load = false;
776 		break;
777 	default:
778 		can_force_load = (drv_role == DRV_ROLE_OS &&
779 				  exist_drv_role == DRV_ROLE_PREBOOT) ||
780 				 (drv_role == DRV_ROLE_KDUMP &&
781 				  exist_drv_role == DRV_ROLE_OS);
782 		break;
783 	}
784 
785 	return can_force_load;
786 }
787 
788 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
789 						      struct ecore_ptt *p_ptt)
790 {
791 	u32 resp = 0, param = 0;
792 	enum _ecore_status_t rc;
793 
794 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
795 			   &resp, &param);
796 	if (rc != ECORE_SUCCESS)
797 		DP_NOTICE(p_hwfn, false,
798 			  "Failed to send cancel load request, rc = %d\n", rc);
799 
800 	return rc;
801 }
802 
803 #define CONFIG_ECORE_L2_BITMAP_IDX	(0x1 << 0)
804 #define CONFIG_ECORE_SRIOV_BITMAP_IDX	(0x1 << 1)
805 #define CONFIG_ECORE_ROCE_BITMAP_IDX	(0x1 << 2)
806 #define CONFIG_ECORE_IWARP_BITMAP_IDX	(0x1 << 3)
807 #define CONFIG_ECORE_FCOE_BITMAP_IDX	(0x1 << 4)
808 #define CONFIG_ECORE_ISCSI_BITMAP_IDX	(0x1 << 5)
809 #define CONFIG_ECORE_LL2_BITMAP_IDX	(0x1 << 6)
810 
811 static u32 ecore_get_config_bitmap(void)
812 {
813 	u32 config_bitmap = 0x0;
814 
815 #ifdef CONFIG_ECORE_L2
816 	config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
817 #endif
818 #ifdef CONFIG_ECORE_SRIOV
819 	config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
820 #endif
821 #ifdef CONFIG_ECORE_ROCE
822 	config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
823 #endif
824 #ifdef CONFIG_ECORE_IWARP
825 	config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
826 #endif
827 #ifdef CONFIG_ECORE_FCOE
828 	config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
829 #endif
830 #ifdef CONFIG_ECORE_ISCSI
831 	config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
832 #endif
833 #ifdef CONFIG_ECORE_LL2
834 	config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
835 #endif
836 
837 	return config_bitmap;
838 }
839 
840 struct ecore_load_req_in_params {
841 	u8 hsi_ver;
842 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT	0
843 #define ECORE_LOAD_REQ_HSI_VER_1	1
844 	u32 drv_ver_0;
845 	u32 drv_ver_1;
846 	u32 fw_ver;
847 	u8 drv_role;
848 	u8 timeout_val;
849 	u8 force_cmd;
850 	bool avoid_eng_reset;
851 };
852 
853 struct ecore_load_req_out_params {
854 	u32 load_code;
855 	u32 exist_drv_ver_0;
856 	u32 exist_drv_ver_1;
857 	u32 exist_fw_ver;
858 	u8 exist_drv_role;
859 	u8 mfw_hsi_ver;
860 	bool drv_exists;
861 };
862 
863 static enum _ecore_status_t
864 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
865 		     struct ecore_load_req_in_params *p_in_params,
866 		     struct ecore_load_req_out_params *p_out_params)
867 {
868 	struct ecore_mcp_mb_params mb_params;
869 	struct load_req_stc load_req;
870 	struct load_rsp_stc load_rsp;
871 	u32 hsi_ver;
872 	enum _ecore_status_t rc;
873 
874 	OSAL_MEM_ZERO(&load_req, sizeof(load_req));
875 	load_req.drv_ver_0 = p_in_params->drv_ver_0;
876 	load_req.drv_ver_1 = p_in_params->drv_ver_1;
877 	load_req.fw_ver = p_in_params->fw_ver;
878 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
879 	SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
880 		      p_in_params->timeout_val);
881 	SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FORCE, p_in_params->force_cmd);
882 	SET_MFW_FIELD(load_req.misc0, (u64)LOAD_REQ_FLAGS0,
883 		      p_in_params->avoid_eng_reset);
884 
885 	hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
886 		  DRV_ID_MCP_HSI_VER_CURRENT :
887 		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
888 
889 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
890 	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
891 	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
892 	mb_params.p_data_src = &load_req;
893 	mb_params.data_src_size = sizeof(load_req);
894 	mb_params.p_data_dst = &load_rsp;
895 	mb_params.data_dst_size = sizeof(load_rsp);
896 
897 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898 		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
899 		   mb_params.param,
900 		   GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
901 		   GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
902 		   GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
903 		   GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
904 
905 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
906 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
907 			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
908 			   load_req.drv_ver_0, load_req.drv_ver_1,
909 			   load_req.fw_ver, load_req.misc0,
910 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
911 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
912 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
913 			   GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
914 
915 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
916 	if (rc != ECORE_SUCCESS) {
917 		DP_NOTICE(p_hwfn, false,
918 			  "Failed to send load request, rc = %d\n", rc);
919 		return rc;
920 	}
921 
922 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
923 		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
924 	p_out_params->load_code = mb_params.mcp_resp;
925 
926 	if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
927 	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
928 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
929 			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
930 			   load_rsp.drv_ver_0, load_rsp.drv_ver_1,
931 			   load_rsp.fw_ver, load_rsp.misc0,
932 			   GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
933 			   GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
934 			   GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
935 
936 		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
937 		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
938 		p_out_params->exist_fw_ver = load_rsp.fw_ver;
939 		p_out_params->exist_drv_role =
940 			GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
941 		p_out_params->mfw_hsi_ver =
942 			GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
943 		p_out_params->drv_exists =
944 			GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
945 			LOAD_RSP_FLAGS0_DRV_EXISTS;
946 	}
947 
948 	return ECORE_SUCCESS;
949 }
950 
951 static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
952 				   u8 *p_mfw_drv_role)
953 {
954 	switch (drv_role) {
955 	case ECORE_DRV_ROLE_OS:
956 		*p_mfw_drv_role = DRV_ROLE_OS;
957 		break;
958 	case ECORE_DRV_ROLE_KDUMP:
959 		*p_mfw_drv_role = DRV_ROLE_KDUMP;
960 		break;
961 	}
962 }
963 
964 enum ecore_load_req_force {
965 	ECORE_LOAD_REQ_FORCE_NONE,
966 	ECORE_LOAD_REQ_FORCE_PF,
967 	ECORE_LOAD_REQ_FORCE_ALL,
968 };
969 
970 static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
971 				    u8 *p_mfw_force_cmd)
972 {
973 	switch (force_cmd) {
974 	case ECORE_LOAD_REQ_FORCE_NONE:
975 		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
976 		break;
977 	case ECORE_LOAD_REQ_FORCE_PF:
978 		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
979 		break;
980 	case ECORE_LOAD_REQ_FORCE_ALL:
981 		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
982 		break;
983 	}
984 }
985 
986 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
987 					struct ecore_ptt *p_ptt,
988 					struct ecore_load_req_params *p_params)
989 {
990 	struct ecore_load_req_out_params out_params;
991 	struct ecore_load_req_in_params in_params;
992 	u8 mfw_drv_role = 0, mfw_force_cmd;
993 	enum _ecore_status_t rc;
994 
995 #ifndef ASIC_ONLY
996 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
997 		ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
998 		return ECORE_SUCCESS;
999 	}
1000 #endif
1001 
1002 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
1003 	in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
1004 	in_params.drv_ver_0 = ECORE_VERSION;
1005 	in_params.drv_ver_1 = ecore_get_config_bitmap();
1006 	in_params.fw_ver = STORM_FW_VERSION;
1007 	ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
1008 	in_params.drv_role = mfw_drv_role;
1009 	in_params.timeout_val = p_params->timeout_val;
1010 	ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
1011 	in_params.force_cmd = mfw_force_cmd;
1012 	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
1013 
1014 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1015 	rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1016 	if (rc != ECORE_SUCCESS)
1017 		return rc;
1018 
1019 	/* First handle cases where another load request should/might be sent:
1020 	 * - MFW expects the old interface [HSI version = 1]
1021 	 * - MFW responds that a force load request is required
1022 	 */
1023 	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
1024 		DP_INFO(p_hwfn,
1025 			"MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
1026 
1027 		in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
1028 		OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1029 		rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1030 					  &out_params);
1031 		if (rc != ECORE_SUCCESS)
1032 			return rc;
1033 	} else if (out_params.load_code ==
1034 		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1035 		if (ecore_mcp_can_force_load(in_params.drv_role,
1036 					     out_params.exist_drv_role,
1037 					     p_params->override_force_load)) {
1038 			DP_INFO(p_hwfn,
1039 				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
1040 				in_params.drv_role, in_params.fw_ver,
1041 				in_params.drv_ver_1, in_params.drv_ver_0,
1042 				out_params.exist_drv_role,
1043 				out_params.exist_fw_ver,
1044 				out_params.exist_drv_ver_1,
1045 				out_params.exist_drv_ver_0);
1046 
1047 			ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
1048 						&mfw_force_cmd);
1049 
1050 			in_params.force_cmd = mfw_force_cmd;
1051 			OSAL_MEM_ZERO(&out_params, sizeof(out_params));
1052 			rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
1053 						  &out_params);
1054 			if (rc != ECORE_SUCCESS)
1055 				return rc;
1056 		} else {
1057 			DP_NOTICE(p_hwfn, false,
1058 				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1059 				  in_params.drv_role, in_params.fw_ver,
1060 				  in_params.drv_ver_0, in_params.drv_ver_1,
1061 				  out_params.exist_drv_role,
1062 				  out_params.exist_fw_ver,
1063 				  out_params.exist_drv_ver_0,
1064 				  out_params.exist_drv_ver_1);
1065 
1066 			ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
1067 			return ECORE_BUSY;
1068 		}
1069 	}
1070 
1071 	/* Now handle the other types of responses.
1072 	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1073 	 * expected here after the additional revised load requests were sent.
1074 	 */
1075 	switch (out_params.load_code) {
1076 	case FW_MSG_CODE_DRV_LOAD_ENGINE:
1077 	case FW_MSG_CODE_DRV_LOAD_PORT:
1078 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1079 		if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
1080 		    out_params.drv_exists) {
1081 			/* The role and fw/driver version match, but the PF is
1082 			 * already loaded and has not been unloaded gracefully.
1083 			 * This is unexpected since a quasi-FLR request was
1084 			 * previously sent as part of ecore_hw_prepare().
1085 			 */
1086 			DP_NOTICE(p_hwfn, false,
1087 				  "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
1088 			return ECORE_INVAL;
1089 		}
1090 		break;
1091 	default:
1092 		DP_NOTICE(p_hwfn, false,
1093 			  "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1094 			  out_params.load_code);
1095 		return ECORE_BUSY;
1096 	}
1097 
1098 	p_params->load_code = out_params.load_code;
1099 
1100 	return ECORE_SUCCESS;
1101 }
1102 
1103 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
1104 					  struct ecore_ptt *p_ptt)
1105 {
1106 	u32 wol_param, mcp_resp, mcp_param;
1107 
1108 	switch (p_hwfn->p_dev->wol_config) {
1109 	case ECORE_OV_WOL_DISABLED:
1110 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1111 		break;
1112 	case ECORE_OV_WOL_ENABLED:
1113 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1114 		break;
1115 	default:
1116 		DP_NOTICE(p_hwfn, true,
1117 			  "Unknown WoL configuration %02x\n",
1118 			  p_hwfn->p_dev->wol_config);
1119 		/* Fallthrough */
1120 	case ECORE_OV_WOL_DEFAULT:
1121 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1122 	}
1123 
1124 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1125 			     &mcp_resp, &mcp_param);
1126 }
1127 
1128 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1129 					   struct ecore_ptt *p_ptt)
1130 {
1131 	struct ecore_mcp_mb_params mb_params;
1132 	struct mcp_mac wol_mac;
1133 
1134 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1135 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1136 
1137 	/* Set the primary MAC if WoL is enabled */
1138 	if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) {
1139 		u8 *p_mac = p_hwfn->p_dev->wol_mac;
1140 
1141 		OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac));
1142 		wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1143 		wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1144 				    p_mac[4] << 8 | p_mac[5];
1145 
1146 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN),
1147 			   "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n",
1148 			   p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4],
1149 			   p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower);
1150 
1151 		mb_params.p_data_src = &wol_mac;
1152 		mb_params.data_src_size = sizeof(wol_mac);
1153 	}
1154 
1155 	return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1156 }
1157 
1158 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1159 				    struct ecore_ptt *p_ptt)
1160 {
1161 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1162 					PUBLIC_PATH);
1163 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1164 	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1165 				     ECORE_PATH_ID(p_hwfn));
1166 	u32 disabled_vfs[VF_MAX_STATIC / 32];
1167 	int i;
1168 
1169 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1170 		   "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1171 		   mfw_path_offsize, path_addr);
1172 
1173 	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1174 		disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1175 					   path_addr +
1176 					   OFFSETOF(struct public_path,
1177 						    mcp_vf_disabled) +
1178 					   sizeof(u32) * i);
1179 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1180 			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1181 			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1182 	}
1183 
1184 	if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1185 		OSAL_VF_FLR_UPDATE(p_hwfn);
1186 }
1187 
1188 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1189 					  struct ecore_ptt *p_ptt,
1190 					  u32 *vfs_to_ack)
1191 {
1192 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1193 					PUBLIC_FUNC);
1194 	u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1195 	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1196 				     MCP_PF_ID(p_hwfn));
1197 	struct ecore_mcp_mb_params mb_params;
1198 	enum _ecore_status_t rc;
1199 	int i;
1200 
1201 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1202 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1203 			   "Acking VFs [%08x,...,%08x] - %08x\n",
1204 			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1205 
1206 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1207 	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1208 	mb_params.p_data_src = vfs_to_ack;
1209 	mb_params.data_src_size = VF_MAX_STATIC / 8;
1210 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1211 	if (rc != ECORE_SUCCESS) {
1212 		DP_NOTICE(p_hwfn, false,
1213 			  "Failed to pass ACK for VF flr to MFW\n");
1214 		return ECORE_TIMEOUT;
1215 	}
1216 
1217 	/* TMP - clear the ACK bits; should be done by MFW */
1218 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1219 		ecore_wr(p_hwfn, p_ptt,
1220 			 func_addr +
1221 			 OFFSETOF(struct public_func, drv_ack_vf_disabled) +
1222 			 i * sizeof(u32), 0);
1223 
1224 	return rc;
1225 }
1226 
1227 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1228 						struct ecore_ptt *p_ptt)
1229 {
1230 	u32 transceiver_state;
1231 
1232 	transceiver_state = ecore_rd(p_hwfn, p_ptt,
1233 				     p_hwfn->mcp_info->port_addr +
1234 				     OFFSETOF(struct public_port,
1235 					      transceiver_data));
1236 
1237 	DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1238 		   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1239 		   transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1240 					    OFFSETOF(struct public_port,
1241 						     transceiver_data)));
1242 
1243 	transceiver_state = GET_MFW_FIELD(transceiver_state,
1244 					  ETH_TRANSCEIVER_STATE);
1245 
1246 	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1247 		DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1248 	else
1249 		DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1250 }
1251 
1252 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1253 				      struct ecore_ptt *p_ptt,
1254 				      struct ecore_mcp_link_state *p_link)
1255 {
1256 	u32 eee_status, val;
1257 
1258 	p_link->eee_adv_caps = 0;
1259 	p_link->eee_lp_adv_caps = 0;
1260 	eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1261 				     OFFSETOF(struct public_port, eee_status));
1262 	p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1263 	val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1264 	if (val & EEE_1G_ADV)
1265 		p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1266 	if (val & EEE_10G_ADV)
1267 		p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1268 	val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1269 	if (val & EEE_1G_ADV)
1270 		p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1271 	if (val & EEE_10G_ADV)
1272 		p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1273 }
1274 
1275 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1276 					 struct ecore_ptt *p_ptt,
1277 					 bool b_reset)
1278 {
1279 	struct ecore_mcp_link_state *p_link;
1280 	u8 max_bw, min_bw;
1281 	u32 status = 0;
1282 
1283 	/* Prevent SW/attentions from doing this at the same time */
1284 	OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1285 
1286 	p_link = &p_hwfn->mcp_info->link_output;
1287 	OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1288 	if (!b_reset) {
1289 		status = ecore_rd(p_hwfn, p_ptt,
1290 				  p_hwfn->mcp_info->port_addr +
1291 				  OFFSETOF(struct public_port, link_status));
1292 		DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1293 			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1294 			   status, (u32)(p_hwfn->mcp_info->port_addr +
1295 			   OFFSETOF(struct public_port, link_status)));
1296 	} else {
1297 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1298 			   "Resetting link indications\n");
1299 		goto out;
1300 	}
1301 
1302 	if (p_hwfn->b_drv_link_init)
1303 		p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1304 	else
1305 		p_link->link_up = false;
1306 
1307 	p_link->full_duplex = true;
1308 	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1309 	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1310 		p_link->speed = 100000;
1311 		break;
1312 	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1313 		p_link->speed = 50000;
1314 		break;
1315 	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1316 		p_link->speed = 40000;
1317 		break;
1318 	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1319 		p_link->speed = 25000;
1320 		break;
1321 	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1322 		p_link->speed = 20000;
1323 		break;
1324 	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1325 		p_link->speed = 10000;
1326 		break;
1327 	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1328 		p_link->full_duplex = false;
1329 		/* Fall-through */
1330 	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1331 		p_link->speed = 1000;
1332 		break;
1333 	default:
1334 		p_link->speed = 0;
1335 	}
1336 
1337 	/* We never store total line speed as p_link->speed is
1338 	 * again changes according to bandwidth allocation.
1339 	 */
1340 	if (p_link->link_up && p_link->speed)
1341 		p_link->line_speed = p_link->speed;
1342 	else
1343 		p_link->line_speed = 0;
1344 
1345 	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1346 	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1347 
1348 	/* Max bandwidth configuration */
1349 	__ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1350 
1351 	/* Min bandwidth configuration */
1352 	__ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1353 	ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1354 					      p_link->min_pf_rate);
1355 
1356 	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1357 	p_link->an_complete = !!(status &
1358 				 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1359 	p_link->parallel_detection = !!(status &
1360 					LINK_STATUS_PARALLEL_DETECTION_USED);
1361 	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1362 
1363 	p_link->partner_adv_speed |=
1364 		(status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1365 		ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1366 	p_link->partner_adv_speed |=
1367 		(status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1368 		ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1369 	p_link->partner_adv_speed |=
1370 		(status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1371 		ECORE_LINK_PARTNER_SPEED_10G : 0;
1372 	p_link->partner_adv_speed |=
1373 		(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1374 		ECORE_LINK_PARTNER_SPEED_20G : 0;
1375 	p_link->partner_adv_speed |=
1376 		(status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1377 		ECORE_LINK_PARTNER_SPEED_25G : 0;
1378 	p_link->partner_adv_speed |=
1379 		(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1380 		ECORE_LINK_PARTNER_SPEED_40G : 0;
1381 	p_link->partner_adv_speed |=
1382 		(status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1383 		ECORE_LINK_PARTNER_SPEED_50G : 0;
1384 	p_link->partner_adv_speed |=
1385 		(status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1386 		ECORE_LINK_PARTNER_SPEED_100G : 0;
1387 
1388 	p_link->partner_tx_flow_ctrl_en =
1389 		!!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1390 	p_link->partner_rx_flow_ctrl_en =
1391 		!!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1392 
1393 	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1394 	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1395 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1396 		break;
1397 	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1398 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1399 		break;
1400 	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1401 		p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1402 		break;
1403 	default:
1404 		p_link->partner_adv_pause = 0;
1405 	}
1406 
1407 	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1408 
1409 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1410 		ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1411 
1412 	OSAL_LINK_UPDATE(p_hwfn);
1413 out:
1414 	OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1415 }
1416 
1417 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1418 					struct ecore_ptt *p_ptt,
1419 					bool b_up)
1420 {
1421 	struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1422 	struct ecore_mcp_mb_params mb_params;
1423 	struct eth_phy_cfg phy_cfg;
1424 	enum _ecore_status_t rc = ECORE_SUCCESS;
1425 	u32 cmd;
1426 
1427 #ifndef ASIC_ONLY
1428 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1429 		return ECORE_SUCCESS;
1430 #endif
1431 
1432 	/* Set the shmem configuration according to params */
1433 	OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1434 	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1435 	if (!params->speed.autoneg)
1436 		phy_cfg.speed = params->speed.forced_speed;
1437 	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1438 	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1439 	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1440 	phy_cfg.adv_speed = params->speed.advertised_speeds;
1441 	phy_cfg.loopback_mode = params->loopback_mode;
1442 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
1443 		if (params->eee.enable)
1444 			phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1445 		if (params->eee.tx_lpi_enable)
1446 			phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1447 		if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1448 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1449 		if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1450 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1451 		phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1452 				    EEE_TX_TIMER_USEC_OFFSET) &
1453 					EEE_TX_TIMER_USEC_MASK;
1454 	}
1455 
1456 	p_hwfn->b_drv_link_init = b_up;
1457 
1458 	if (b_up)
1459 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1460 			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1461 			   phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1462 			   phy_cfg.loopback_mode);
1463 	else
1464 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1465 
1466 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1467 	mb_params.cmd = cmd;
1468 	mb_params.p_data_src = &phy_cfg;
1469 	mb_params.data_src_size = sizeof(phy_cfg);
1470 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1471 
1472 	/* if mcp fails to respond we must abort */
1473 	if (rc != ECORE_SUCCESS) {
1474 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1475 		return rc;
1476 	}
1477 
1478 	/* Mimic link-change attention, done for several reasons:
1479 	 *  - On reset, there's no guarantee MFW would trigger
1480 	 *    an attention.
1481 	 *  - On initialization, older MFWs might not indicate link change
1482 	 *    during LFA, so we'll never get an UP indication.
1483 	 */
1484 	ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1485 
1486 	return rc;
1487 }
1488 
1489 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1490 				   struct ecore_ptt *p_ptt)
1491 {
1492 	u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1493 
1494 	/* TODO - Add support for VFs */
1495 	if (IS_VF(p_hwfn->p_dev))
1496 		return ECORE_INVAL;
1497 
1498 	path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1499 						 PUBLIC_PATH);
1500 	path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1501 	path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1502 
1503 	proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1504 				 path_addr +
1505 				 OFFSETOF(struct public_path, process_kill)) &
1506 			PROCESS_KILL_COUNTER_MASK;
1507 
1508 	return proc_kill_cnt;
1509 }
1510 
1511 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1512 					  struct ecore_ptt *p_ptt)
1513 {
1514 	struct ecore_dev *p_dev = p_hwfn->p_dev;
1515 	u32 proc_kill_cnt;
1516 
1517 	/* Prevent possible attentions/interrupts during the recovery handling
1518 	 * and till its load phase, during which they will be re-enabled.
1519 	 */
1520 	ecore_int_igu_disable_int(p_hwfn, p_ptt);
1521 
1522 	DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1523 
1524 	/* The following operations should be done once, and thus in CMT mode
1525 	 * are carried out by only the first HW function.
1526 	 */
1527 	if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1528 		return;
1529 
1530 	if (p_dev->recov_in_prog) {
1531 		DP_NOTICE(p_hwfn, false,
1532 			  "Ignoring the indication since a recovery process is already in progress\n");
1533 		return;
1534 	}
1535 
1536 	p_dev->recov_in_prog = true;
1537 
1538 	proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1539 	DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1540 
1541 	OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1542 }
1543 
1544 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1545 					  struct ecore_ptt *p_ptt,
1546 					  enum MFW_DRV_MSG_TYPE type)
1547 {
1548 	enum ecore_mcp_protocol_type stats_type;
1549 	union ecore_mcp_protocol_stats stats;
1550 	struct ecore_mcp_mb_params mb_params;
1551 	u32 hsi_param;
1552 	enum _ecore_status_t rc;
1553 
1554 	switch (type) {
1555 	case MFW_DRV_MSG_GET_LAN_STATS:
1556 		stats_type = ECORE_MCP_LAN_STATS;
1557 		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1558 		break;
1559 	case MFW_DRV_MSG_GET_FCOE_STATS:
1560 		stats_type = ECORE_MCP_FCOE_STATS;
1561 		hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1562 		break;
1563 	case MFW_DRV_MSG_GET_ISCSI_STATS:
1564 		stats_type = ECORE_MCP_ISCSI_STATS;
1565 		hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1566 		break;
1567 	case MFW_DRV_MSG_GET_RDMA_STATS:
1568 		stats_type = ECORE_MCP_RDMA_STATS;
1569 		hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1570 		break;
1571 	default:
1572 		DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
1573 		return;
1574 	}
1575 
1576 	OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1577 
1578 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1579 	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1580 	mb_params.param = hsi_param;
1581 	mb_params.p_data_src = &stats;
1582 	mb_params.data_src_size = sizeof(stats);
1583 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1584 	if (rc != ECORE_SUCCESS)
1585 		DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1586 }
1587 
1588 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1589 				    struct public_func *p_shmem_info)
1590 {
1591 	struct ecore_mcp_function_info *p_info;
1592 
1593 	p_info = &p_hwfn->mcp_info->func_info;
1594 
1595 	/* TODO - bandwidth min/max should have valid values of 1-100,
1596 	 * as well as some indication that the feature is disabled.
1597 	 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1598 	 * limit and correct value to min `1' and max `100' if limit isn't in
1599 	 * range.
1600 	 */
1601 	p_info->bandwidth_min = (p_shmem_info->config &
1602 				 FUNC_MF_CFG_MIN_BW_MASK) >>
1603 				FUNC_MF_CFG_MIN_BW_OFFSET;
1604 	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1605 		DP_INFO(p_hwfn,
1606 			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1607 			p_info->bandwidth_min);
1608 		p_info->bandwidth_min = 1;
1609 	}
1610 
1611 	p_info->bandwidth_max = (p_shmem_info->config &
1612 				 FUNC_MF_CFG_MAX_BW_MASK) >>
1613 				FUNC_MF_CFG_MAX_BW_OFFSET;
1614 	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1615 		DP_INFO(p_hwfn,
1616 			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1617 			p_info->bandwidth_max);
1618 		p_info->bandwidth_max = 100;
1619 	}
1620 }
1621 
1622 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1623 				    struct ecore_ptt *p_ptt,
1624 				    struct public_func *p_data,
1625 				    int pfid)
1626 {
1627 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1628 					PUBLIC_FUNC);
1629 	u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1630 	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1631 	u32 i, size;
1632 
1633 	OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1634 
1635 	size = OSAL_MIN_T(u32, sizeof(*p_data),
1636 			  SECTION_SIZE(mfw_path_offsize));
1637 	for (i = 0; i < size / sizeof(u32); i++)
1638 		((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1639 					      func_addr + (i << 2));
1640 
1641 	return size;
1642 }
1643 #if 0
1644 /* This was introduced with FW 8.10.5.0; Hopefully this is only temp. */
1645 enum _ecore_status_t ecore_hw_init_first_eth(struct ecore_hwfn *p_hwfn,
1646 					     struct ecore_ptt *p_ptt,
1647 					     u8 *p_pf)
1648 {
1649 	struct public_func shmem_info;
1650 	int i;
1651 
1652 	/* Find first Ethernet interface in port */
1653 	for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->p_dev);
1654 	     i += p_hwfn->p_dev->num_ports_in_engine) {
1655 		ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1656 					 MCP_PF_ID_BY_REL(p_hwfn, i));
1657 
1658 		if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1659 			continue;
1660 
1661 		if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
1662 		    FUNC_MF_CFG_PROTOCOL_ETHERNET) {
1663 			*p_pf = (u8)i;
1664 			return ECORE_SUCCESS;
1665 		}
1666 	}
1667 
1668 	/* This might actually be valid somewhere in the future but for now
1669 	 * it's highly unlikely.
1670 	 */
1671 	DP_NOTICE(p_hwfn, false,
1672 		  "Failed to find on port an ethernet interface in MF_SI mode\n");
1673 
1674 	return ECORE_INVAL;
1675 }
1676 #endif
1677 static void
1678 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1679 {
1680 	struct ecore_mcp_function_info *p_info;
1681 	struct public_func shmem_info;
1682 	u32 resp = 0, param = 0;
1683 
1684 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1685 				 MCP_PF_ID(p_hwfn));
1686 
1687 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1688 
1689 	p_info = &p_hwfn->mcp_info->func_info;
1690 
1691 	ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1692 
1693 	ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1694 
1695 	/* Acknowledge the MFW */
1696 	ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1697 		      &param);
1698 }
1699 
1700 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1701 				  struct ecore_ptt *p_ptt)
1702 {
1703 	struct public_func shmem_info;
1704 	u32 resp = 0, param = 0;
1705 
1706 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1707 				 MCP_PF_ID(p_hwfn));
1708 
1709 	p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1710 						 FUNC_MF_CFG_OV_STAG_MASK;
1711 	p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1712 	if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) &&
1713 	    (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET)) {
1714 		ecore_wr(p_hwfn, p_ptt,
1715 			 NIG_REG_LLH_FUNC_TAG_VALUE,
1716 			 p_hwfn->hw_info.ovlan);
1717 		ecore_sp_pf_update_stag(p_hwfn);
1718 	}
1719 
1720 	OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1721 
1722 	/* Acknowledge the MFW */
1723 	ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1724 		      &resp, &param);
1725 }
1726 
1727 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
1728 {
1729 	/* A single notification should be sent to upper driver in CMT mode */
1730 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1731 		return;
1732 
1733 	DP_NOTICE(p_hwfn, false,
1734 		  "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1735 
1736 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1737 }
1738 
1739 struct ecore_mdump_cmd_params {
1740 	u32 cmd;
1741 	void *p_data_src;
1742 	u8 data_src_size;
1743 	void *p_data_dst;
1744 	u8 data_dst_size;
1745 	u32 mcp_resp;
1746 };
1747 
1748 static enum _ecore_status_t
1749 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1750 		    struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1751 {
1752 	struct ecore_mcp_mb_params mb_params;
1753 	enum _ecore_status_t rc;
1754 
1755 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1756 	mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1757 	mb_params.param = p_mdump_cmd_params->cmd;
1758 	mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1759 	mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1760 	mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1761 	mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1762 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1763 	if (rc != ECORE_SUCCESS)
1764 		return rc;
1765 
1766 	p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1767 
1768 	if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1769 		DP_INFO(p_hwfn,
1770 			"The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1771 			p_mdump_cmd_params->cmd);
1772 		rc = ECORE_NOTIMPL;
1773 	} else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1774 		DP_INFO(p_hwfn,
1775 			"The mdump command is not supported by the MFW\n");
1776 		rc = ECORE_NOTIMPL;
1777 	}
1778 
1779 	return rc;
1780 }
1781 
1782 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1783 						struct ecore_ptt *p_ptt)
1784 {
1785 	struct ecore_mdump_cmd_params mdump_cmd_params;
1786 
1787 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1788 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1789 
1790 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1791 }
1792 
1793 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1794 						struct ecore_ptt *p_ptt,
1795 						u32 epoch)
1796 {
1797 	struct ecore_mdump_cmd_params mdump_cmd_params;
1798 
1799 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1800 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1801 	mdump_cmd_params.p_data_src = &epoch;
1802 	mdump_cmd_params.data_src_size = sizeof(epoch);
1803 
1804 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1805 }
1806 
1807 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1808 					     struct ecore_ptt *p_ptt)
1809 {
1810 	struct ecore_mdump_cmd_params mdump_cmd_params;
1811 
1812 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1813 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1814 
1815 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1816 }
1817 
1818 static enum _ecore_status_t
1819 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1820 			   struct mdump_config_stc *p_mdump_config)
1821 {
1822 	struct ecore_mdump_cmd_params mdump_cmd_params;
1823 	enum _ecore_status_t rc;
1824 
1825 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1826 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1827 	mdump_cmd_params.p_data_dst = p_mdump_config;
1828 	mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1829 
1830 	rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1831 	if (rc != ECORE_SUCCESS)
1832 		return rc;
1833 
1834 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1835 		DP_INFO(p_hwfn,
1836 			"Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1837 			mdump_cmd_params.mcp_resp);
1838 		rc = ECORE_UNKNOWN_ERROR;
1839 	}
1840 
1841 	return rc;
1842 }
1843 
1844 enum _ecore_status_t
1845 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1846 			 struct ecore_mdump_info *p_mdump_info)
1847 {
1848 	u32 addr, global_offsize, global_addr;
1849 	struct mdump_config_stc mdump_config;
1850 	enum _ecore_status_t rc;
1851 
1852 	OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1853 
1854 	addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1855 				    PUBLIC_GLOBAL);
1856 	global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1857 	global_addr = SECTION_ADDR(global_offsize, 0);
1858 	p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1859 					global_addr +
1860 					OFFSETOF(struct public_global,
1861 						 mdump_reason));
1862 
1863 	if (p_mdump_info->reason) {
1864 		rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1865 		if (rc != ECORE_SUCCESS)
1866 			return rc;
1867 
1868 		p_mdump_info->version = mdump_config.version;
1869 		p_mdump_info->config = mdump_config.config;
1870 		p_mdump_info->epoch = mdump_config.epoc;
1871 		p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1872 		p_mdump_info->valid_logs = mdump_config.valid_logs;
1873 
1874 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1875 			   "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1876 			   p_mdump_info->reason, p_mdump_info->version,
1877 			   p_mdump_info->config, p_mdump_info->epoch,
1878 			   p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1879 	} else {
1880 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1881 			   "MFW mdump info: reason %d\n", p_mdump_info->reason);
1882 	}
1883 
1884 	return ECORE_SUCCESS;
1885 }
1886 
1887 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1888 						struct ecore_ptt *p_ptt)
1889 {
1890 	struct ecore_mdump_cmd_params mdump_cmd_params;
1891 
1892 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1893 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1894 
1895 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1896 }
1897 
1898 enum _ecore_status_t
1899 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1900 			   struct ecore_mdump_retain_data *p_mdump_retain)
1901 {
1902 	struct ecore_mdump_cmd_params mdump_cmd_params;
1903 	struct mdump_retain_data_stc mfw_mdump_retain;
1904 	enum _ecore_status_t rc;
1905 
1906 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1907 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1908 	mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1909 	mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1910 
1911 	rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1912 	if (rc != ECORE_SUCCESS)
1913 		return rc;
1914 
1915 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1916 		DP_INFO(p_hwfn,
1917 			"Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1918 			mdump_cmd_params.mcp_resp);
1919 		return ECORE_UNKNOWN_ERROR;
1920 	}
1921 
1922 	p_mdump_retain->valid = mfw_mdump_retain.valid;
1923 	p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1924 	p_mdump_retain->pf = mfw_mdump_retain.pf;
1925 	p_mdump_retain->status = mfw_mdump_retain.status;
1926 
1927 	return ECORE_SUCCESS;
1928 }
1929 
1930 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1931 						struct ecore_ptt *p_ptt)
1932 {
1933 	struct ecore_mdump_cmd_params mdump_cmd_params;
1934 
1935 	OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1936 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1937 
1938 	return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1939 }
1940 
1941 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1942 					    struct ecore_ptt *p_ptt)
1943 {
1944 	struct ecore_mdump_retain_data mdump_retain;
1945 	enum _ecore_status_t rc;
1946 
1947 	/* In CMT mode - no need for more than a single acknowledgement to the
1948 	 * MFW, and no more than a single notification to the upper driver.
1949 	 */
1950 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1951 		return;
1952 
1953 	rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1954 	if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1955 		DP_NOTICE(p_hwfn, false,
1956 			  "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1957 			  mdump_retain.epoch, mdump_retain.pf,
1958 			  mdump_retain.status);
1959 	} else {
1960 		DP_NOTICE(p_hwfn, false,
1961 			  "The MFW notified that a critical error occurred in the device\n");
1962 	}
1963 
1964 	if (p_hwfn->p_dev->allow_mdump) {
1965 		DP_NOTICE(p_hwfn, false,
1966 			  "Not acknowledging the notification to allow the MFW crash dump\n");
1967 		return;
1968 	}
1969 
1970 	DP_NOTICE(p_hwfn, false,
1971 		  "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1972 	ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1973 	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1974 }
1975 
1976 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1977 					     struct ecore_ptt *p_ptt)
1978 {
1979 	struct ecore_mcp_info *info = p_hwfn->mcp_info;
1980 	enum _ecore_status_t rc = ECORE_SUCCESS;
1981 	bool found = false;
1982 	u16 i;
1983 
1984 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1985 
1986 	/* Read Messages from MFW */
1987 	ecore_mcp_read_mb(p_hwfn, p_ptt);
1988 
1989 	/* Compare current messages to old ones */
1990 	for (i = 0; i < info->mfw_mb_length; i++) {
1991 		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1992 			continue;
1993 
1994 		found = true;
1995 
1996 		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1997 			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1998 			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1999 
2000 		switch (i) {
2001 		case MFW_DRV_MSG_LINK_CHANGE:
2002 			ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
2003 			break;
2004 		case MFW_DRV_MSG_VF_DISABLED:
2005 			ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
2006 			break;
2007 		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2008 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2009 						    ECORE_DCBX_REMOTE_LLDP_MIB);
2010 			break;
2011 		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2012 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2013 						    ECORE_DCBX_REMOTE_MIB);
2014 			break;
2015 		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2016 			ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
2017 						    ECORE_DCBX_OPERATIONAL_MIB);
2018 			break;
2019 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2020 			ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2021 			break;
2022 		case MFW_DRV_MSG_ERROR_RECOVERY:
2023 			ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
2024 			break;
2025 		case MFW_DRV_MSG_GET_LAN_STATS:
2026 		case MFW_DRV_MSG_GET_FCOE_STATS:
2027 		case MFW_DRV_MSG_GET_ISCSI_STATS:
2028 		case MFW_DRV_MSG_GET_RDMA_STATS:
2029 			ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2030 			break;
2031 		case MFW_DRV_MSG_BW_UPDATE:
2032 			ecore_mcp_update_bw(p_hwfn, p_ptt);
2033 			break;
2034 		case MFW_DRV_MSG_S_TAG_UPDATE:
2035 			ecore_mcp_update_stag(p_hwfn, p_ptt);
2036 			break;
2037 		case MFW_DRV_MSG_FAILURE_DETECTED:
2038 			ecore_mcp_handle_fan_failure(p_hwfn);
2039 			break;
2040 		case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2041 			ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
2042 			break;
2043 		case MFW_DRV_MSG_GET_TLV_REQ:
2044 			OSAL_MFW_TLV_REQ(p_hwfn);
2045 			break;
2046 		default:
2047 			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2048 			rc = ECORE_INVAL;
2049 		}
2050 	}
2051 
2052 	/* ACK everything */
2053 	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2054 		OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
2055 
2056 		/* MFW expect answer in BE, so we force write in that format */
2057 		ecore_wr(p_hwfn, p_ptt,
2058 			 info->mfw_mb_addr + sizeof(u32) +
2059 			 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2060 			 sizeof(u32) + i * sizeof(u32), val);
2061 	}
2062 
2063 	if (!found) {
2064 		DP_NOTICE(p_hwfn, false,
2065 			  "Received an MFW message indication but no new message!\n");
2066 		rc = ECORE_INVAL;
2067 	}
2068 
2069 	/* Copy the new mfw messages into the shadow */
2070 	OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2071 
2072 	return rc;
2073 }
2074 
2075 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
2076 					   struct ecore_ptt *p_ptt,
2077 					   u32 *p_mfw_ver,
2078 					   u32 *p_running_bundle_id)
2079 {
2080 	u32 global_offsize;
2081 
2082 #ifndef ASIC_ONLY
2083 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2084 		DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
2085 		return ECORE_SUCCESS;
2086 	}
2087 #endif
2088 
2089 	if (IS_VF(p_hwfn->p_dev)) {
2090 		if (p_hwfn->vf_iov_info) {
2091 			struct pfvf_acquire_resp_tlv *p_resp;
2092 
2093 			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2094 			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2095 			return ECORE_SUCCESS;
2096 		} else {
2097 			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2098 				   "VF requested MFW version prior to ACQUIRE\n");
2099 			return ECORE_INVAL;
2100 		}
2101 	}
2102 
2103 	global_offsize = ecore_rd(p_hwfn, p_ptt,
2104 			  SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
2105 					       PUBLIC_GLOBAL));
2106 	*p_mfw_ver = ecore_rd(p_hwfn, p_ptt,
2107 			SECTION_ADDR(global_offsize, 0) +
2108 			OFFSETOF(struct public_global, mfw_ver));
2109 
2110 	if (p_running_bundle_id != OSAL_NULL) {
2111 		*p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2112 				SECTION_ADDR(global_offsize, 0) +
2113 				OFFSETOF(struct public_global,
2114 					 running_bundle_id));
2115 	}
2116 
2117 	return ECORE_SUCCESS;
2118 }
2119 
2120 enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
2121 					   struct ecore_ptt *p_ptt,
2122 					   u32 *p_mbi_ver)
2123 {
2124 	u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2125 
2126 #ifndef ASIC_ONLY
2127 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2128 		DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n");
2129 		return ECORE_SUCCESS;
2130 	}
2131 #endif
2132 
2133 	if (IS_VF(p_hwfn->p_dev))
2134 		return ECORE_INVAL;
2135 
2136 	/* Read the address of the nvm_cfg */
2137 	nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2138 	if (!nvm_cfg_addr) {
2139 		DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2140 		return ECORE_INVAL;
2141 	}
2142 
2143 	/* Read the offset of nvm_cfg1 */
2144 	nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2145 
2146 	mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2147 		       OFFSETOF(struct nvm_cfg1, glob) +
2148 		       OFFSETOF(struct nvm_cfg1_glob, mbi_version);
2149 	*p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) &
2150 		     (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2151 		      NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2152 		      NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2153 
2154 	return ECORE_SUCCESS;
2155 }
2156 
2157 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
2158 					   u32 *p_media_type)
2159 {
2160 	struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
2161 	struct ecore_ptt *p_ptt;
2162 
2163 	/* TODO - Add support for VFs */
2164 	if (IS_VF(p_dev))
2165 		return ECORE_INVAL;
2166 
2167 	if (!ecore_mcp_is_init(p_hwfn)) {
2168 		DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
2169 		return ECORE_BUSY;
2170 	}
2171 
2172 	*p_media_type = MEDIA_UNSPECIFIED;
2173 
2174 	p_ptt = ecore_ptt_acquire(p_hwfn);
2175 	if (!p_ptt)
2176 		return ECORE_BUSY;
2177 
2178 	*p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2179 				 OFFSETOF(struct public_port, media_type));
2180 
2181 	ecore_ptt_release(p_hwfn, p_ptt);
2182 
2183 	return ECORE_SUCCESS;
2184 }
2185 
2186 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2187 static void
2188 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2189 				 enum ecore_pci_personality *p_proto)
2190 {
2191 	/* There wasn't ever a legacy MFW that published iwarp.
2192 	 * So at this point, this is either plain l2 or RoCE.
2193 	 */
2194 	if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
2195 			  &p_hwfn->hw_info.device_capabilities))
2196 		*p_proto = ECORE_PCI_ETH_ROCE;
2197 	else
2198 		*p_proto = ECORE_PCI_ETH;
2199 
2200 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2201 		   "According to Legacy capabilities, L2 personality is %08x\n",
2202 		   (u32) *p_proto);
2203 }
2204 
2205 static enum _ecore_status_t
2206 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2207 			      struct ecore_ptt *p_ptt,
2208 			      enum ecore_pci_personality *p_proto)
2209 {
2210 	u32 resp = 0, param = 0;
2211 	enum _ecore_status_t rc;
2212 
2213 	rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2214 			 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
2215 	if (rc != ECORE_SUCCESS)
2216 		return rc;
2217 	if (resp != FW_MSG_CODE_OK) {
2218 		DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2219 			   "MFW lacks support for command; Returns %08x\n",
2220 			   resp);
2221 		return ECORE_INVAL;
2222 	}
2223 
2224 	switch (param) {
2225 	case FW_MB_PARAM_GET_PF_RDMA_NONE:
2226 		*p_proto = ECORE_PCI_ETH;
2227 		break;
2228 	case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2229 		*p_proto = ECORE_PCI_ETH_ROCE;
2230 		break;
2231 	case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2232 		*p_proto = ECORE_PCI_ETH_IWARP;
2233 		break;
2234 	case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2235 		*p_proto = ECORE_PCI_ETH_RDMA;
2236 		break;
2237 	default:
2238 		DP_NOTICE(p_hwfn, true,
2239 			  "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2240 			  param);
2241 		return ECORE_INVAL;
2242 	}
2243 
2244 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2245 		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2246 		   (u32) *p_proto, resp, param);
2247 	return ECORE_SUCCESS;
2248 }
2249 
2250 static enum _ecore_status_t
2251 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2252 			  struct public_func *p_info,
2253 			  struct ecore_ptt *p_ptt,
2254 			  enum ecore_pci_personality *p_proto)
2255 {
2256 	enum _ecore_status_t rc = ECORE_SUCCESS;
2257 
2258 	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2259 	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2260 		if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2261 		    ECORE_SUCCESS)
2262 			ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2263 		break;
2264 	case FUNC_MF_CFG_PROTOCOL_ISCSI:
2265 		*p_proto = ECORE_PCI_ISCSI;
2266 		break;
2267 	case FUNC_MF_CFG_PROTOCOL_FCOE:
2268 		*p_proto = ECORE_PCI_FCOE;
2269 		break;
2270 	case FUNC_MF_CFG_PROTOCOL_ROCE:
2271 		DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n");
2272 		/* Fallthrough */
2273 	default:
2274 		rc = ECORE_INVAL;
2275 	}
2276 
2277 	return rc;
2278 }
2279 
2280 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2281 						    struct ecore_ptt *p_ptt)
2282 {
2283 	struct ecore_mcp_function_info *info;
2284 	struct public_func shmem_info;
2285 
2286 	ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2287 				 MCP_PF_ID(p_hwfn));
2288 	info = &p_hwfn->mcp_info->func_info;
2289 
2290 	info->pause_on_host = (shmem_info.config &
2291 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2292 
2293 	if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2294 				      &info->protocol)) {
2295 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
2296 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2297 		return ECORE_INVAL;
2298 	}
2299 
2300 	ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2301 
2302 	if (shmem_info.mac_upper || shmem_info.mac_lower) {
2303 		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2304 		info->mac[1] = (u8)(shmem_info.mac_upper);
2305 		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2306 		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2307 		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2308 		info->mac[5] = (u8)(shmem_info.mac_lower);
2309 
2310 		/* Store primary MAC for later possible WoL */
2311 		OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN);
2312 
2313 	} else {
2314 		/* TODO - are there protocols for which there's no MAC? */
2315 		DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2316 	}
2317 
2318 	/* TODO - are these calculations true for BE machine? */
2319 	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2320 			 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2321 	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2322 			 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2323 
2324 	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2325 
2326 	info->mtu = (u16)shmem_info.mtu_size;
2327 
2328 	p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE;
2329 	p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT;
2330 	if (ecore_mcp_is_init(p_hwfn)) {
2331 		u32 resp = 0, param = 0;
2332 		enum _ecore_status_t rc;
2333 
2334 		rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2335 				   DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
2336 		if (rc != ECORE_SUCCESS)
2337 			return rc;
2338 		if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2339 			p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME;
2340 	}
2341 
2342 	DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2343 		   "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2344 		   info->pause_on_host, info->protocol,
2345 		   info->bandwidth_min, info->bandwidth_max,
2346 		   info->mac[0], info->mac[1], info->mac[2],
2347 		   info->mac[3], info->mac[4], info->mac[5],
2348 		   (unsigned long long)info->wwn_port, (unsigned long long)info->wwn_node, info->ovlan,
2349 		   (u8)p_hwfn->hw_info.b_wol_support);
2350 
2351 	return ECORE_SUCCESS;
2352 }
2353 
2354 struct ecore_mcp_link_params
2355 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2356 {
2357 	if (!p_hwfn || !p_hwfn->mcp_info)
2358 		return OSAL_NULL;
2359 	return &p_hwfn->mcp_info->link_input;
2360 }
2361 
2362 struct ecore_mcp_link_state
2363 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2364 {
2365 	if (!p_hwfn || !p_hwfn->mcp_info)
2366 		return OSAL_NULL;
2367 
2368 #ifndef ASIC_ONLY
2369 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2370 		DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2371 		p_hwfn->mcp_info->link_output.link_up = true;
2372 	}
2373 #endif
2374 
2375 	return &p_hwfn->mcp_info->link_output;
2376 }
2377 
2378 struct ecore_mcp_link_capabilities
2379 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2380 {
2381 	if (!p_hwfn || !p_hwfn->mcp_info)
2382 		return OSAL_NULL;
2383 	return &p_hwfn->mcp_info->link_capabilities;
2384 }
2385 
2386 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2387 				     struct ecore_ptt *p_ptt)
2388 {
2389 	u32 resp = 0, param = 0;
2390 	enum _ecore_status_t rc;
2391 
2392 	rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2393 			   DRV_MSG_CODE_NIG_DRAIN, 1000,
2394 			   &resp, &param);
2395 
2396 	/* Wait for the drain to complete before returning */
2397 	OSAL_MSLEEP(1020);
2398 
2399 	return rc;
2400 }
2401 
2402 #ifndef LINUX_REMOVE
2403 const struct ecore_mcp_function_info
2404 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2405 {
2406 	if (!p_hwfn || !p_hwfn->mcp_info)
2407 		return OSAL_NULL;
2408 	return &p_hwfn->mcp_info->func_info;
2409 }
2410 
2411 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2412 				  struct ecore_ptt *p_ptt,
2413 				  u32 personalities)
2414 {
2415 	enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2416 	struct public_func shmem_info;
2417 	int i, count = 0, num_pfs;
2418 
2419 	num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2420 
2421 	for (i = 0; i < num_pfs; i++) {
2422 		ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2423 					 MCP_PF_ID_BY_REL(p_hwfn, i));
2424 		if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2425 			continue;
2426 
2427 		if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2428 					      &protocol) !=
2429 		    ECORE_SUCCESS)
2430 			continue;
2431 
2432 		if ((1 << ((u32)protocol)) & personalities)
2433 			count++;
2434 	}
2435 
2436 	return count;
2437 }
2438 #endif
2439 
2440 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2441 					      struct ecore_ptt *p_ptt,
2442 					      u32 *p_flash_size)
2443 {
2444 	u32 flash_size;
2445 
2446 #ifndef ASIC_ONLY
2447 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2448 		DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2449 		return ECORE_INVAL;
2450 	}
2451 #endif
2452 
2453 	if (IS_VF(p_hwfn->p_dev))
2454 		return ECORE_INVAL;
2455 
2456 	flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2457 	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2458 		     MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2459 	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
2460 
2461 	*p_flash_size = flash_size;
2462 
2463 	return ECORE_SUCCESS;
2464 }
2465 
2466 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2467 						  struct ecore_ptt *p_ptt)
2468 {
2469 	struct ecore_dev *p_dev = p_hwfn->p_dev;
2470 
2471 	if (p_dev->recov_in_prog) {
2472 		DP_NOTICE(p_hwfn, false,
2473 			  "Avoid triggering a recovery since such a process is already in progress\n");
2474 		return ECORE_AGAIN;
2475 	}
2476 
2477 	DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2478 	ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2479 
2480 	return ECORE_SUCCESS;
2481 }
2482 
2483 static enum _ecore_status_t
2484 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2485 			    struct ecore_ptt *p_ptt,
2486 			    u8 vf_id, u8 num)
2487 {
2488 	u32 resp = 0, param = 0, rc_param = 0;
2489 	enum _ecore_status_t rc;
2490 
2491 	/* Only Leader can configure MSIX, and need to take CMT into account */
2492 	if (!IS_LEAD_HWFN(p_hwfn))
2493 		return ECORE_SUCCESS;
2494 	num *= p_hwfn->p_dev->num_hwfns;
2495 
2496 	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
2497 		 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2498 	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
2499 		 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2500 
2501 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2502 			   &resp, &rc_param);
2503 
2504 	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2505 		DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2506 			  vf_id);
2507 		rc = ECORE_INVAL;
2508 	} else {
2509 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2510 			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2511 			    num, vf_id);
2512 	}
2513 
2514 	return rc;
2515 }
2516 
2517 static enum _ecore_status_t
2518 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2519 			    struct ecore_ptt *p_ptt,
2520 			    u8 num)
2521 {
2522 	u32 resp = 0, param = num, rc_param = 0;
2523 	enum _ecore_status_t rc;
2524 
2525 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2526 			   param, &resp, &rc_param);
2527 
2528 	if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2529 		DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2530 		rc = ECORE_INVAL;
2531 	} else {
2532 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2533 			   "Requested 0x%02x MSI-x interrupts for VFs\n",
2534 			   num);
2535 	}
2536 
2537 	return rc;
2538 }
2539 
2540 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2541 					      struct ecore_ptt *p_ptt,
2542 					      u8 vf_id, u8 num)
2543 {
2544 	if (ECORE_IS_BB(p_hwfn->p_dev))
2545 		return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2546 	else
2547 		return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2548 }
2549 
2550 enum _ecore_status_t
2551 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2552 			   struct ecore_mcp_drv_version *p_ver)
2553 {
2554 	struct ecore_mcp_mb_params mb_params;
2555 	struct drv_version_stc drv_version;
2556 	u32 num_words, i;
2557 	void *p_name;
2558 	OSAL_BE32 val;
2559 	enum _ecore_status_t rc;
2560 
2561 #ifndef ASIC_ONLY
2562 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2563 		return ECORE_SUCCESS;
2564 #endif
2565 
2566 	OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2567 	drv_version.version = p_ver->version;
2568 	num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2569 	for (i = 0; i < num_words; i++) {
2570 		/* The driver name is expected to be in a big-endian format */
2571 		p_name = &p_ver->name[i * sizeof(u32)];
2572 		val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2573 		*(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2574 	}
2575 
2576 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2577 	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2578 	mb_params.p_data_src = &drv_version;
2579 	mb_params.data_src_size = sizeof(drv_version);
2580 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2581 	if (rc != ECORE_SUCCESS)
2582 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2583 
2584 	return rc;
2585 }
2586 
2587 /* A maximal 100 msec waiting time for the MCP to halt */
2588 #define ECORE_MCP_HALT_SLEEP_MS		10
2589 #define ECORE_MCP_HALT_MAX_RETRIES	10
2590 
2591 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2592 				    struct ecore_ptt *p_ptt)
2593 {
2594 	u32 resp = 0, param = 0, cpu_mode, cnt = 0;
2595 	enum _ecore_status_t rc;
2596 
2597 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2598 			   &param);
2599 	if (rc != ECORE_SUCCESS) {
2600 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2601 		return rc;
2602 	}
2603 
2604 	do {
2605 		OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2606 		cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2607 		if (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT)
2608 			break;
2609 	} while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2610 
2611 	if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2612 		DP_NOTICE(p_hwfn, false,
2613 			  "Failed to halt the MCP [CPU_MODE = 0x%08x after %d msec]\n",
2614 			  cpu_mode, cnt * ECORE_MCP_HALT_SLEEP_MS);
2615 		return ECORE_BUSY;
2616 	}
2617 
2618 	ecore_mcp_cmd_set_blocking(p_hwfn, true);
2619 
2620 	return ECORE_SUCCESS;
2621 }
2622 
2623 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2624 				      struct ecore_ptt *p_ptt)
2625 {
2626 	u32 value, cpu_mode;
2627 
2628 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2629 
2630 	value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2631 	value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2632 	ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2633 	cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2634 
2635 	if (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) {
2636 		DP_NOTICE(p_hwfn, false,
2637 			  "Failed to resume the MCP [CPU_MODE = 0x%08x]\n",
2638 			  cpu_mode);
2639 		return ECORE_BUSY;
2640 	}
2641 
2642 	ecore_mcp_cmd_set_blocking(p_hwfn, false);
2643 
2644 	return ECORE_SUCCESS;
2645 }
2646 
2647 enum _ecore_status_t
2648 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2649 				   struct ecore_ptt *p_ptt,
2650 				   enum ecore_ov_client client)
2651 {
2652 	u32 resp = 0, param = 0;
2653 	u32 drv_mb_param;
2654 	enum _ecore_status_t rc;
2655 
2656 	switch (client) {
2657 	case ECORE_OV_CLIENT_DRV:
2658 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2659 		break;
2660 	case ECORE_OV_CLIENT_USER:
2661 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2662 		break;
2663 	case ECORE_OV_CLIENT_VENDOR_SPEC:
2664 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2665 		break;
2666 	default:
2667 		DP_NOTICE(p_hwfn, true,
2668 			  "Invalid client type %d\n", client);
2669 		return ECORE_INVAL;
2670 	}
2671 
2672 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2673 			   drv_mb_param, &resp, &param);
2674 	if (rc != ECORE_SUCCESS)
2675 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2676 
2677 	return rc;
2678 }
2679 
2680 enum _ecore_status_t
2681 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2682 				 struct ecore_ptt *p_ptt,
2683 				 enum ecore_ov_driver_state drv_state)
2684 {
2685 	u32 resp = 0, param = 0;
2686 	u32 drv_mb_param;
2687 	enum _ecore_status_t rc;
2688 
2689 	switch (drv_state) {
2690 	case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2691 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2692 		break;
2693 	case ECORE_OV_DRIVER_STATE_DISABLED:
2694 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2695 		break;
2696 	case ECORE_OV_DRIVER_STATE_ACTIVE:
2697 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2698 		break;
2699 	default:
2700 		DP_NOTICE(p_hwfn, true,
2701 			  "Invalid driver state %d\n", drv_state);
2702 		return ECORE_INVAL;
2703 	}
2704 
2705 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2706 			   drv_mb_param, &resp, &param);
2707 	if (rc != ECORE_SUCCESS)
2708 		DP_ERR(p_hwfn, "Failed to send driver state\n");
2709 
2710 	return rc;
2711 }
2712 
2713 enum _ecore_status_t
2714 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2715 			 struct ecore_fc_npiv_tbl *p_table)
2716 {
2717 	struct dci_fc_npiv_tbl *p_npiv_table;
2718 	u8 *p_buf = OSAL_NULL;
2719 	u32 addr, size, i;
2720 	enum _ecore_status_t rc = ECORE_SUCCESS;
2721 
2722 	p_table->num_wwpn = 0;
2723 	p_table->num_wwnn = 0;
2724 	addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2725 			OFFSETOF(struct public_port, fc_npiv_nvram_tbl_addr));
2726 	if (addr == NPIV_TBL_INVALID_ADDR) {
2727 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n");
2728 		return rc;
2729 	}
2730 
2731 	size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2732 			OFFSETOF(struct public_port, fc_npiv_nvram_tbl_size));
2733 	if (!size) {
2734 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n");
2735 		return rc;
2736 	}
2737 
2738 	p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size);
2739 	if (!p_buf) {
2740 		DP_ERR(p_hwfn, "Buffer allocation failed\n");
2741 		return ECORE_NOMEM;
2742 	}
2743 
2744 	rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size);
2745 	if (rc != ECORE_SUCCESS) {
2746 		OSAL_VFREE(p_hwfn->p_dev, p_buf);
2747 		return rc;
2748 	}
2749 
2750 	p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf;
2751 	p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2752 	p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2753 	for (i = 0; i < p_table->num_wwpn; i++) {
2754 		OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn,
2755 			    ECORE_WWN_SIZE);
2756 		OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn,
2757 			    ECORE_WWN_SIZE);
2758 	}
2759 
2760 	OSAL_VFREE(p_hwfn->p_dev, p_buf);
2761 
2762 	return ECORE_SUCCESS;
2763 }
2764 
2765 enum _ecore_status_t
2766 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2767 			u16 mtu)
2768 {
2769 	u32 resp = 0, param = 0;
2770 	u32 drv_mb_param;
2771 	enum _ecore_status_t rc;
2772 
2773 	drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_OFFSET;
2774 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2775 			   drv_mb_param, &resp, &param);
2776 	if (rc != ECORE_SUCCESS)
2777 		DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2778 
2779 	return rc;
2780 }
2781 
2782 enum _ecore_status_t
2783 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2784 			u8 *mac)
2785 {
2786 	struct ecore_mcp_mb_params mb_params;
2787 	u32 mfw_mac[2];
2788 	enum _ecore_status_t rc;
2789 
2790 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2791 	mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2792 	mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2793 				DRV_MSG_CODE_VMAC_TYPE_OFFSET;
2794 	mb_params.param |= MCP_PF_ID(p_hwfn);
2795 
2796 	/* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2797 	 * in 32-bit granularity.
2798 	 * So the MAC has to be set in native order [and not byte order],
2799 	 * otherwise it would be read incorrectly by MFW after swap.
2800 	 */
2801 	mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2802 	mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2803 
2804 	mb_params.p_data_src = (u8 *)mfw_mac;
2805 	mb_params.data_src_size = 8;
2806 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2807 	if (rc != ECORE_SUCCESS)
2808 		DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2809 
2810 	/* Store primary MAC for later possible WoL */
2811 	OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN);
2812 
2813 	return rc;
2814 }
2815 
2816 enum _ecore_status_t
2817 ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2818 			enum ecore_ov_wol wol)
2819 {
2820 	u32 resp = 0, param = 0;
2821 	u32 drv_mb_param;
2822 	enum _ecore_status_t rc;
2823 
2824 	if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) {
2825 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2826 			   "Can't change WoL configuration when WoL isn't supported\n");
2827 		return ECORE_INVAL;
2828 	}
2829 
2830 	switch (wol) {
2831 	case ECORE_OV_WOL_DEFAULT:
2832 		drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2833 		break;
2834 	case ECORE_OV_WOL_DISABLED:
2835 		drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2836 		break;
2837 	case ECORE_OV_WOL_ENABLED:
2838 		drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2839 		break;
2840 	default:
2841 		DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2842 		return ECORE_INVAL;
2843 	}
2844 
2845 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2846 			   drv_mb_param, &resp, &param);
2847 	if (rc != ECORE_SUCCESS)
2848 		DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2849 
2850 	/* Store the WoL update for a future unload */
2851 	p_hwfn->p_dev->wol_config = (u8)wol;
2852 
2853 	return rc;
2854 }
2855 
2856 enum _ecore_status_t
2857 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2858 			    enum ecore_ov_eswitch eswitch)
2859 {
2860 	u32 resp = 0, param = 0;
2861 	u32 drv_mb_param;
2862 	enum _ecore_status_t rc;
2863 
2864 	switch (eswitch) {
2865 	case ECORE_OV_ESWITCH_NONE:
2866 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2867 		break;
2868 	case ECORE_OV_ESWITCH_VEB:
2869 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2870 		break;
2871 	case ECORE_OV_ESWITCH_VEPA:
2872 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2873 		break;
2874 	default:
2875 		DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2876 		return ECORE_INVAL;
2877 	}
2878 
2879 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2880 			   drv_mb_param, &resp, &param);
2881 	if (rc != ECORE_SUCCESS)
2882 		DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2883 
2884 	return rc;
2885 }
2886 
2887 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2888 				       struct ecore_ptt *p_ptt,
2889 				       enum ecore_led_mode mode)
2890 {
2891 	u32 resp = 0, param = 0, drv_mb_param;
2892 	enum _ecore_status_t rc;
2893 
2894 	switch (mode) {
2895 	case ECORE_LED_MODE_ON:
2896 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2897 		break;
2898 	case ECORE_LED_MODE_OFF:
2899 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2900 		break;
2901 	case ECORE_LED_MODE_RESTORE:
2902 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2903 		break;
2904 	default:
2905 		DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2906 		return ECORE_INVAL;
2907 	}
2908 
2909 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2910 			   drv_mb_param, &resp, &param);
2911 	if (rc != ECORE_SUCCESS)
2912 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2913 
2914 	return rc;
2915 }
2916 
2917 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2918 					     struct ecore_ptt *p_ptt,
2919 					     u32 mask_parities)
2920 {
2921 	u32 resp = 0, param = 0;
2922 	enum _ecore_status_t rc;
2923 
2924 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2925 			   mask_parities, &resp, &param);
2926 
2927 	if (rc != ECORE_SUCCESS) {
2928 		DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n");
2929 	} else if (resp != FW_MSG_CODE_OK) {
2930 		DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n");
2931 		rc = ECORE_INVAL;
2932 	}
2933 
2934 	return rc;
2935 }
2936 
2937 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2938 			   u8 *p_buf, u32 len)
2939 {
2940 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2941 	u32 bytes_left, offset, bytes_to_copy, buf_size;
2942 	u32 nvm_offset, resp, param;
2943 	struct ecore_ptt  *p_ptt;
2944 	enum _ecore_status_t rc = ECORE_SUCCESS;
2945 
2946 	p_ptt = ecore_ptt_acquire(p_hwfn);
2947 	if (!p_ptt)
2948 		return ECORE_BUSY;
2949 
2950 	bytes_left = len;
2951 	offset = 0;
2952 	while (bytes_left > 0) {
2953 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2954 					   MCP_DRV_NVM_BUF_LEN);
2955 		nvm_offset = (addr + offset) | (bytes_to_copy <<
2956 						DRV_MB_PARAM_NVM_LEN_OFFSET);
2957 		rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2958 					  DRV_MSG_CODE_NVM_READ_NVRAM,
2959 					  nvm_offset, &resp, &param, &buf_size,
2960 					  (u32 *)(p_buf + offset));
2961 		if (rc != ECORE_SUCCESS || (resp != FW_MSG_CODE_NVM_OK)) {
2962 			DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2963 			break;
2964 		}
2965 
2966 		/* This can be a lengthy process, and it's possible scheduler
2967 		 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2968 		 */
2969 		if (bytes_left % 0x1000 <
2970 		    (bytes_left - buf_size) % 0x1000)
2971 			OSAL_MSLEEP(1);
2972 
2973 		offset += buf_size;
2974 		bytes_left -= buf_size;
2975 	}
2976 
2977 	p_dev->mcp_nvm_resp = resp;
2978 	ecore_ptt_release(p_hwfn, p_ptt);
2979 
2980 	return rc;
2981 }
2982 
2983 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2984 					u32 addr, u8 *p_buf, u32 len)
2985 {
2986 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2987 	struct ecore_ptt  *p_ptt;
2988 	u32 resp, param;
2989 	enum _ecore_status_t rc;
2990 
2991 	p_ptt = ecore_ptt_acquire(p_hwfn);
2992 	if (!p_ptt)
2993 		return ECORE_BUSY;
2994 
2995 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2996 				  (cmd == ECORE_PHY_CORE_READ) ?
2997 				  DRV_MSG_CODE_PHY_CORE_READ :
2998 				  DRV_MSG_CODE_PHY_RAW_READ,
2999 				  addr, &resp, &param, &len, (u32 *)p_buf);
3000 	if (rc != ECORE_SUCCESS)
3001 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3002 
3003 	p_dev->mcp_nvm_resp = resp;
3004 	ecore_ptt_release(p_hwfn, p_ptt);
3005 
3006 	return rc;
3007 }
3008 
3009 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
3010 {
3011 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3012 	struct ecore_ptt  *p_ptt;
3013 
3014 	p_ptt = ecore_ptt_acquire(p_hwfn);
3015 	if (!p_ptt)
3016 		return ECORE_BUSY;
3017 
3018 	OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
3019 	ecore_ptt_release(p_hwfn, p_ptt);
3020 
3021 	return ECORE_SUCCESS;
3022 }
3023 
3024 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
3025 					    u32 addr)
3026 {
3027 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3028 	struct ecore_ptt  *p_ptt;
3029 	u32 resp, param;
3030 	enum _ecore_status_t rc;
3031 
3032 	p_ptt = ecore_ptt_acquire(p_hwfn);
3033 	if (!p_ptt)
3034 		return ECORE_BUSY;
3035 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
3036 			   &resp, &param);
3037 	p_dev->mcp_nvm_resp = resp;
3038 	ecore_ptt_release(p_hwfn, p_ptt);
3039 
3040 	return rc;
3041 }
3042 
3043 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
3044 						  u32 addr)
3045 {
3046 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3047 	struct ecore_ptt  *p_ptt;
3048 	u32 resp, param;
3049 	enum _ecore_status_t rc;
3050 
3051 	p_ptt = ecore_ptt_acquire(p_hwfn);
3052 	if (!p_ptt)
3053 		return ECORE_BUSY;
3054 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
3055 			   &resp, &param);
3056 	p_dev->mcp_nvm_resp = resp;
3057 	ecore_ptt_release(p_hwfn, p_ptt);
3058 
3059 	return rc;
3060 }
3061 
3062 /* rc recieves ECORE_INVAL as default parameter because
3063  * it might not enter the while loop if the len is 0
3064  */
3065 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
3066 					 u32 addr, u8 *p_buf, u32 len)
3067 {
3068 	u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
3069 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3070 	enum _ecore_status_t rc = ECORE_INVAL;
3071 	struct ecore_ptt  *p_ptt;
3072 
3073 	p_ptt = ecore_ptt_acquire(p_hwfn);
3074 	if (!p_ptt)
3075 		return ECORE_BUSY;
3076 
3077 	switch (cmd) {
3078 	case ECORE_PUT_FILE_DATA:
3079 		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3080 		break;
3081 	case ECORE_NVM_WRITE_NVRAM:
3082 		nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3083 		break;
3084 	case ECORE_EXT_PHY_FW_UPGRADE:
3085 		nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
3086 		break;
3087 	default:
3088 		DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
3089 			  cmd);
3090 		return ECORE_INVAL;
3091 	}
3092 
3093 	buf_idx = 0;
3094 	while (buf_idx < len) {
3095 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3096 				      MCP_DRV_NVM_BUF_LEN);
3097 		nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
3098 			      addr) +
3099 			     buf_idx;
3100 		rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3101 					  &resp, &param, buf_size,
3102 					  (u32 *)&p_buf[buf_idx]);
3103 		if (rc != ECORE_SUCCESS ||
3104 		    ((resp != FW_MSG_CODE_NVM_OK) &&
3105 		     (resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
3106 			DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3107 
3108 		/* This can be a lengthy process, and it's possible scheduler
3109 		 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3110 		 */
3111 		if (buf_idx % 0x1000 >
3112 		    (buf_idx + buf_size) % 0x1000)
3113 			OSAL_MSLEEP(1);
3114 
3115 		buf_idx += buf_size;
3116 	}
3117 
3118 	p_dev->mcp_nvm_resp = resp;
3119 	ecore_ptt_release(p_hwfn, p_ptt);
3120 
3121 	return rc;
3122 }
3123 
3124 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
3125 					 u32 addr, u8 *p_buf, u32 len)
3126 {
3127 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3128 	struct ecore_ptt  *p_ptt;
3129 	u32 resp, param, nvm_cmd;
3130 	enum _ecore_status_t rc;
3131 
3132 	p_ptt = ecore_ptt_acquire(p_hwfn);
3133 	if (!p_ptt)
3134 		return ECORE_BUSY;
3135 
3136 	nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ?  DRV_MSG_CODE_PHY_CORE_WRITE :
3137 			DRV_MSG_CODE_PHY_RAW_WRITE;
3138 	rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
3139 				  &resp, &param, len, (u32 *)p_buf);
3140 	if (rc != ECORE_SUCCESS)
3141 		DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
3142 	p_dev->mcp_nvm_resp = resp;
3143 	ecore_ptt_release(p_hwfn, p_ptt);
3144 
3145 	return rc;
3146 }
3147 
3148 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
3149 						   u32 addr)
3150 {
3151 	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3152 	struct ecore_ptt  *p_ptt;
3153 	u32 resp, param;
3154 	enum _ecore_status_t rc;
3155 
3156 	p_ptt = ecore_ptt_acquire(p_hwfn);
3157 	if (!p_ptt)
3158 		return ECORE_BUSY;
3159 
3160 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
3161 			   &resp, &param);
3162 	p_dev->mcp_nvm_resp = resp;
3163 	ecore_ptt_release(p_hwfn, p_ptt);
3164 
3165 	return rc;
3166 }
3167 
3168 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
3169 					    struct ecore_ptt *p_ptt,
3170 					    u32 port, u32 addr, u32 offset,
3171 					    u32 len, u8 *p_buf)
3172 {
3173 	u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
3174 	u32 resp, param;
3175 	enum _ecore_status_t rc;
3176 
3177 	nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3178 			(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3179 	addr = offset;
3180 	offset = 0;
3181 	bytes_left = len;
3182 	while (bytes_left > 0) {
3183 		bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
3184 					   MAX_I2C_TRANSACTION_SIZE);
3185 		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3186 			       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3187 		nvm_offset |= ((addr + offset) <<
3188 				DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3189 		nvm_offset |= (bytes_to_copy <<
3190 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3191 		rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3192 					  DRV_MSG_CODE_TRANSCEIVER_READ,
3193 					  nvm_offset, &resp, &param, &buf_size,
3194 					  (u32 *)(p_buf + offset));
3195 		if (rc != ECORE_SUCCESS) {
3196 			DP_NOTICE(p_hwfn, false,
3197 				  "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3198 				  rc);
3199 			return rc;
3200 		}
3201 
3202 		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3203 			return ECORE_NODEV;
3204 		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3205 			return ECORE_UNKNOWN_ERROR;
3206 
3207 		offset += buf_size;
3208 		bytes_left -= buf_size;
3209 	}
3210 
3211 	return ECORE_SUCCESS;
3212 }
3213 
3214 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
3215 					     struct ecore_ptt *p_ptt,
3216 					     u32 port, u32 addr, u32 offset,
3217 					     u32 len, u8 *p_buf)
3218 {
3219 	u32 buf_idx, buf_size, nvm_offset, resp, param;
3220 	enum _ecore_status_t rc;
3221 
3222 	nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
3223 			(addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
3224 	buf_idx = 0;
3225 	while (buf_idx < len) {
3226 		buf_size = OSAL_MIN_T(u32, (len - buf_idx),
3227 				      MAX_I2C_TRANSACTION_SIZE);
3228 		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3229 				 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3230 		nvm_offset |= ((offset + buf_idx) <<
3231 				 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
3232 		nvm_offset |= (buf_size <<
3233 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
3234 		rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3235 					  DRV_MSG_CODE_TRANSCEIVER_WRITE,
3236 					  nvm_offset, &resp, &param, buf_size,
3237 					  (u32 *)&p_buf[buf_idx]);
3238 		if (rc != ECORE_SUCCESS) {
3239 			DP_NOTICE(p_hwfn, false,
3240 				  "Failed to send a transceiver write command to the MFW. rc = %d.\n",
3241 				  rc);
3242 			return rc;
3243 		}
3244 
3245 		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3246 			return ECORE_NODEV;
3247 		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3248 			return ECORE_UNKNOWN_ERROR;
3249 
3250 		buf_idx += buf_size;
3251 	}
3252 
3253 	return ECORE_SUCCESS;
3254 }
3255 
3256 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
3257 					 struct ecore_ptt *p_ptt,
3258 					 u16 gpio, u32 *gpio_val)
3259 {
3260 	enum _ecore_status_t rc = ECORE_SUCCESS;
3261 	u32 drv_mb_param = 0, rsp;
3262 
3263 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
3264 
3265 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
3266 			   drv_mb_param, &rsp, gpio_val);
3267 
3268 	if (rc != ECORE_SUCCESS)
3269 		return rc;
3270 
3271 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3272 		return ECORE_UNKNOWN_ERROR;
3273 
3274 	return ECORE_SUCCESS;
3275 }
3276 
3277 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
3278 					  struct ecore_ptt *p_ptt,
3279 					  u16 gpio, u16 gpio_val)
3280 {
3281 	enum _ecore_status_t rc = ECORE_SUCCESS;
3282 	u32 drv_mb_param = 0, param, rsp;
3283 
3284 	drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
3285 		(gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
3286 
3287 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
3288 			   drv_mb_param, &rsp, &param);
3289 
3290 	if (rc != ECORE_SUCCESS)
3291 		return rc;
3292 
3293 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3294 		return ECORE_UNKNOWN_ERROR;
3295 
3296 	return ECORE_SUCCESS;
3297 }
3298 
3299 enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
3300 					 struct ecore_ptt *p_ptt,
3301 					 u16 gpio, u32 *gpio_direction,
3302 					 u32 *gpio_ctrl)
3303 {
3304 	u32 drv_mb_param = 0, rsp, val = 0;
3305 	enum _ecore_status_t rc = ECORE_SUCCESS;
3306 
3307 	drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
3308 
3309 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
3310 			   drv_mb_param, &rsp, &val);
3311 	if (rc != ECORE_SUCCESS)
3312 		return rc;
3313 
3314 	*gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
3315 			   DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
3316 	*gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
3317 		      DRV_MB_PARAM_GPIO_CTRL_OFFSET;
3318 
3319 	if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
3320 		return ECORE_UNKNOWN_ERROR;
3321 
3322 	return ECORE_SUCCESS;
3323 }
3324 
3325 enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
3326 						  struct ecore_ptt *p_ptt)
3327 {
3328 	u32 drv_mb_param = 0, rsp, param;
3329 	enum _ecore_status_t rc = ECORE_SUCCESS;
3330 
3331 	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3332 			DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3333 
3334 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3335 			   drv_mb_param, &rsp, &param);
3336 
3337 	if (rc != ECORE_SUCCESS)
3338 		return rc;
3339 
3340 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3341 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3342 		rc = ECORE_UNKNOWN_ERROR;
3343 
3344 	return rc;
3345 }
3346 
3347 enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
3348 					       struct ecore_ptt *p_ptt)
3349 {
3350 	u32 drv_mb_param, rsp, param;
3351 	enum _ecore_status_t rc = ECORE_SUCCESS;
3352 
3353 	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3354 			DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3355 
3356 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3357 			   drv_mb_param, &rsp, &param);
3358 
3359 	if (rc != ECORE_SUCCESS)
3360 		return rc;
3361 
3362 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3363 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3364 		rc = ECORE_UNKNOWN_ERROR;
3365 
3366 	return rc;
3367 }
3368 
3369 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
3370 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
3371 {
3372 	u32 drv_mb_param = 0, rsp;
3373 	enum _ecore_status_t rc = ECORE_SUCCESS;
3374 
3375 	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3376 			DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3377 
3378 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3379 			   drv_mb_param, &rsp, num_images);
3380 
3381 	if (rc != ECORE_SUCCESS)
3382 		return rc;
3383 
3384 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3385 		rc = ECORE_UNKNOWN_ERROR;
3386 
3387 	return rc;
3388 }
3389 
3390 enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
3391 	struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3392 	struct bist_nvm_image_att *p_image_att, u32 image_index)
3393 {
3394 	u32 buf_size, nvm_offset, resp, param;
3395 	enum _ecore_status_t rc;
3396 
3397 	nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3398 				    DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
3399 	nvm_offset |= (image_index <<
3400 		       DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
3401 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3402 				  nvm_offset, &resp, &param, &buf_size,
3403 				  (u32 *)p_image_att);
3404 	if (rc != ECORE_SUCCESS)
3405 		return rc;
3406 
3407 	if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3408 	    (p_image_att->return_code != 1))
3409 		rc = ECORE_UNKNOWN_ERROR;
3410 
3411 	return rc;
3412 }
3413 
3414 enum _ecore_status_t
3415 ecore_mcp_get_nvm_image_att(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3416 			    enum ecore_nvm_images image_id,
3417 			    struct ecore_nvm_image_att *p_image_att)
3418 {
3419 	struct bist_nvm_image_att mfw_image_att;
3420 	enum nvm_image_type type;
3421 	u32 num_images, i;
3422 	enum _ecore_status_t rc;
3423 
3424 	/* Translate image_id into MFW definitions */
3425 	switch (image_id) {
3426 	case ECORE_NVM_IMAGE_ISCSI_CFG:
3427 		type = NVM_TYPE_ISCSI_CFG;
3428 		break;
3429 	case ECORE_NVM_IMAGE_FCOE_CFG:
3430 		type = NVM_TYPE_FCOE_CFG;
3431 		break;
3432 	case ECORE_NVM_IMAGE_MDUMP:
3433 		type = NVM_TYPE_MDUMP;
3434 		break;
3435 	default:
3436 		DP_NOTICE(p_hwfn, false, "Unknown request of image_id %08x\n",
3437 			  image_id);
3438 		return ECORE_INVAL;
3439 	}
3440 
3441 	/* Learn number of images, then traverse and see if one fits */
3442 	rc = ecore_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
3443 	if (rc != ECORE_SUCCESS || !num_images)
3444 		return ECORE_INVAL;
3445 
3446 	for (i = 0; i < num_images; i++) {
3447 		rc = ecore_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
3448 							   &mfw_image_att, i);
3449 		if (rc != ECORE_SUCCESS)
3450 			return rc;
3451 
3452 		if (type == mfw_image_att.image_type)
3453 			break;
3454 	}
3455 	if (i == num_images) {
3456 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3457 			   "Failed to find nvram image of type %08x\n",
3458 			   image_id);
3459 		return ECORE_INVAL;
3460 	}
3461 
3462 	p_image_att->start_addr = mfw_image_att.nvm_start_addr;
3463 	p_image_att->length = mfw_image_att.len;
3464 
3465 	return ECORE_SUCCESS;
3466 }
3467 
3468 enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
3469 					     struct ecore_ptt *p_ptt,
3470 					     enum ecore_nvm_images image_id,
3471 					     u8 *p_buffer, u32 buffer_len)
3472 {
3473 	struct ecore_nvm_image_att image_att;
3474 	enum _ecore_status_t rc;
3475 
3476 	OSAL_MEM_ZERO(p_buffer, buffer_len);
3477 
3478 	rc = ecore_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
3479 	if (rc != ECORE_SUCCESS)
3480 		return rc;
3481 
3482 	/* Validate sizes - both the image's and the supplied buffer's */
3483 	if (image_att.length <= 4) {
3484 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3485 			   "Image [%d] is too small - only %d bytes\n",
3486 			   image_id, image_att.length);
3487 		return ECORE_INVAL;
3488 	}
3489 
3490 	/* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
3491 	image_att.length -= 4;
3492 
3493 	if (image_att.length > buffer_len) {
3494 		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3495 			   "Image [%d] is too big - %08x bytes where only %08x are available\n",
3496 			   image_id, image_att.length, buffer_len);
3497 		return ECORE_NOMEM;
3498 	}
3499 
3500 	return ecore_mcp_nvm_read(p_hwfn->p_dev, image_att.start_addr,
3501 				  p_buffer, image_att.length);
3502 }
3503 
3504 enum _ecore_status_t
3505 ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
3506 			       struct ecore_ptt *p_ptt,
3507 			       struct ecore_temperature_info *p_temp_info)
3508 {
3509 	struct ecore_temperature_sensor *p_temp_sensor;
3510 	struct temperature_status_stc mfw_temp_info;
3511 	struct ecore_mcp_mb_params mb_params;
3512 	u32 val;
3513 	enum _ecore_status_t rc;
3514 	u8 i;
3515 
3516 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3517 	mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
3518 	mb_params.p_data_dst = &mfw_temp_info;
3519 	mb_params.data_dst_size = sizeof(mfw_temp_info);
3520 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3521 	if (rc != ECORE_SUCCESS)
3522 		return rc;
3523 
3524 	OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
3525 	p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
3526 					      ECORE_MAX_NUM_OF_SENSORS);
3527 	for (i = 0; i < p_temp_info->num_sensors; i++) {
3528 		val = mfw_temp_info.sensor[i];
3529 		p_temp_sensor = &p_temp_info->sensors[i];
3530 		p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
3531 						 SENSOR_LOCATION_OFFSET;
3532 		p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
3533 						THRESHOLD_HIGH_OFFSET;
3534 		p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
3535 					  CRITICAL_TEMPERATURE_OFFSET;
3536 		p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
3537 					      CURRENT_TEMP_OFFSET;
3538 	}
3539 
3540 	return ECORE_SUCCESS;
3541 }
3542 
3543 enum _ecore_status_t ecore_mcp_get_mba_versions(
3544 	struct ecore_hwfn *p_hwfn,
3545 	struct ecore_ptt *p_ptt,
3546 	struct ecore_mba_vers *p_mba_vers)
3547 {
3548 	u32 buf_size, resp, param;
3549 	enum _ecore_status_t rc;
3550 
3551 	rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
3552 				  0, &resp, &param, &buf_size,
3553 				  &(p_mba_vers->mba_vers[0]));
3554 
3555 	if (rc != ECORE_SUCCESS)
3556 		return rc;
3557 
3558 	if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3559 		rc = ECORE_UNKNOWN_ERROR;
3560 
3561 	if (buf_size != MCP_DRV_NVM_BUF_LEN)
3562 		rc = ECORE_UNKNOWN_ERROR;
3563 
3564 	return rc;
3565 }
3566 
3567 enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
3568 					      struct ecore_ptt *p_ptt,
3569 					      u64 *num_events)
3570 {
3571 	u32 rsp;
3572 
3573 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
3574 			     0, &rsp, (u32 *)num_events);
3575 }
3576 
3577 static enum resource_id_enum
3578 ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
3579 {
3580 	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3581 
3582 	switch (res_id) {
3583 	case ECORE_SB:
3584 		mfw_res_id = RESOURCE_NUM_SB_E;
3585 		break;
3586 	case ECORE_L2_QUEUE:
3587 		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3588 		break;
3589 	case ECORE_VPORT:
3590 		mfw_res_id = RESOURCE_NUM_VPORT_E;
3591 		break;
3592 	case ECORE_RSS_ENG:
3593 		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3594 		break;
3595 	case ECORE_PQ:
3596 		mfw_res_id = RESOURCE_NUM_PQ_E;
3597 		break;
3598 	case ECORE_RL:
3599 		mfw_res_id = RESOURCE_NUM_RL_E;
3600 		break;
3601 	case ECORE_MAC:
3602 	case ECORE_VLAN:
3603 		/* Each VFC resource can accommodate both a MAC and a VLAN */
3604 		mfw_res_id = RESOURCE_VFC_FILTER_E;
3605 		break;
3606 	case ECORE_ILT:
3607 		mfw_res_id = RESOURCE_ILT_E;
3608 		break;
3609 	case ECORE_LL2_QUEUE:
3610 		mfw_res_id = RESOURCE_LL2_QUEUE_E;
3611 		break;
3612 	case ECORE_RDMA_CNQ_RAM:
3613 	case ECORE_CMDQS_CQS:
3614 		/* CNQ/CMDQS are the same resource */
3615 		mfw_res_id = RESOURCE_CQS_E;
3616 		break;
3617 	case ECORE_RDMA_STATS_QUEUE:
3618 		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3619 		break;
3620 	case ECORE_BDQ:
3621 		mfw_res_id = RESOURCE_BDQ_E;
3622 		break;
3623 	default:
3624 		break;
3625 	}
3626 
3627 	return mfw_res_id;
3628 }
3629 
3630 #define ECORE_RESC_ALLOC_VERSION_MAJOR	2
3631 #define ECORE_RESC_ALLOC_VERSION_MINOR	0
3632 #define ECORE_RESC_ALLOC_VERSION				\
3633 	((ECORE_RESC_ALLOC_VERSION_MAJOR <<			\
3634 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) |	\
3635 	 (ECORE_RESC_ALLOC_VERSION_MINOR <<			\
3636 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
3637 
3638 struct ecore_resc_alloc_in_params {
3639 	u32 cmd;
3640 	enum ecore_resources res_id;
3641 	u32 resc_max_val;
3642 };
3643 
3644 struct ecore_resc_alloc_out_params {
3645 	u32 mcp_resp;
3646 	u32 mcp_param;
3647 	u32 resc_num;
3648 	u32 resc_start;
3649 	u32 vf_resc_num;
3650 	u32 vf_resc_start;
3651 	u32 flags;
3652 };
3653 
3654 static enum _ecore_status_t
3655 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
3656 			      struct ecore_ptt *p_ptt,
3657 			      struct ecore_resc_alloc_in_params *p_in_params,
3658 			      struct ecore_resc_alloc_out_params *p_out_params)
3659 {
3660 	struct ecore_mcp_mb_params mb_params;
3661 	struct resource_info mfw_resc_info;
3662 	enum _ecore_status_t rc;
3663 
3664 	OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
3665 
3666 	mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
3667 	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3668 		DP_ERR(p_hwfn,
3669 		       "Failed to match resource %d [%s] with the MFW resources\n",
3670 		       p_in_params->res_id,
3671 		       ecore_hw_get_resc_name(p_in_params->res_id));
3672 		return ECORE_INVAL;
3673 	}
3674 
3675 	switch (p_in_params->cmd) {
3676 	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3677 		mfw_resc_info.size = p_in_params->resc_max_val;
3678 		/* Fallthrough */
3679 	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3680 		break;
3681 	default:
3682 		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3683 		       p_in_params->cmd);
3684 		return ECORE_INVAL;
3685 	}
3686 
3687 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3688 	mb_params.cmd = p_in_params->cmd;
3689 	mb_params.param = ECORE_RESC_ALLOC_VERSION;
3690 	mb_params.p_data_src = &mfw_resc_info;
3691 	mb_params.data_src_size = sizeof(mfw_resc_info);
3692 	mb_params.p_data_dst = mb_params.p_data_src;
3693 	mb_params.data_dst_size = mb_params.data_src_size;
3694 
3695 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3696 		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3697 		   p_in_params->cmd, p_in_params->res_id,
3698 		   ecore_hw_get_resc_name(p_in_params->res_id),
3699 		   GET_MFW_FIELD(mb_params.param,
3700 				 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3701 		   GET_MFW_FIELD(mb_params.param,
3702 				 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3703 		   p_in_params->resc_max_val);
3704 
3705 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3706 	if (rc != ECORE_SUCCESS)
3707 		return rc;
3708 
3709 	p_out_params->mcp_resp = mb_params.mcp_resp;
3710 	p_out_params->mcp_param = mb_params.mcp_param;
3711 	p_out_params->resc_num = mfw_resc_info.size;
3712 	p_out_params->resc_start = mfw_resc_info.offset;
3713 	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3714 	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3715 	p_out_params->flags = mfw_resc_info.flags;
3716 
3717 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3718 		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3719 		   GET_MFW_FIELD(p_out_params->mcp_param,
3720 				 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3721 		   GET_MFW_FIELD(p_out_params->mcp_param,
3722 				 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3723 		   p_out_params->resc_num, p_out_params->resc_start,
3724 		   p_out_params->vf_resc_num, p_out_params->vf_resc_start,
3725 		   p_out_params->flags);
3726 
3727 	return ECORE_SUCCESS;
3728 }
3729 
3730 enum _ecore_status_t
3731 ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3732 			   enum ecore_resources res_id, u32 resc_max_val,
3733 			   u32 *p_mcp_resp)
3734 {
3735 	struct ecore_resc_alloc_out_params out_params;
3736 	struct ecore_resc_alloc_in_params in_params;
3737 	enum _ecore_status_t rc;
3738 
3739 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3740 	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3741 	in_params.res_id = res_id;
3742 	in_params.resc_max_val = resc_max_val;
3743 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3744 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3745 					   &out_params);
3746 	if (rc != ECORE_SUCCESS)
3747 		return rc;
3748 
3749 	*p_mcp_resp = out_params.mcp_resp;
3750 
3751 	return ECORE_SUCCESS;
3752 }
3753 
3754 enum _ecore_status_t
3755 ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3756 			enum ecore_resources res_id, u32 *p_mcp_resp,
3757 			u32 *p_resc_num, u32 *p_resc_start)
3758 {
3759 	struct ecore_resc_alloc_out_params out_params;
3760 	struct ecore_resc_alloc_in_params in_params;
3761 	enum _ecore_status_t rc;
3762 
3763 	OSAL_MEM_ZERO(&in_params, sizeof(in_params));
3764 	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3765 	in_params.res_id = res_id;
3766 	OSAL_MEM_ZERO(&out_params, sizeof(out_params));
3767 	rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3768 					   &out_params);
3769 	if (rc != ECORE_SUCCESS)
3770 		return rc;
3771 
3772 	*p_mcp_resp = out_params.mcp_resp;
3773 
3774 	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3775 		*p_resc_num = out_params.resc_num;
3776 		*p_resc_start = out_params.resc_start;
3777 	}
3778 
3779 	return ECORE_SUCCESS;
3780 }
3781 
3782 enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
3783 					       struct ecore_ptt *p_ptt)
3784 {
3785 	u32 mcp_resp, mcp_param;
3786 
3787 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3788 			     &mcp_resp, &mcp_param);
3789 }
3790 
3791 enum _ecore_status_t ecore_mcp_get_lldp_mac(struct ecore_hwfn *p_hwfn,
3792 					    struct ecore_ptt *p_ptt,
3793 					    u8 lldp_mac_addr[ETH_ALEN])
3794 {
3795 	struct ecore_mcp_mb_params mb_params;
3796 	struct mcp_mac lldp_mac;
3797 	enum _ecore_status_t rc;
3798 
3799 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3800 	mb_params.cmd = DRV_MSG_CODE_GET_LLDP_MAC;
3801 	mb_params.p_data_dst = &lldp_mac;
3802 	mb_params.data_dst_size = sizeof(lldp_mac);
3803 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3804 	if (rc != ECORE_SUCCESS)
3805 		return rc;
3806 
3807 	if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3808 		DP_NOTICE(p_hwfn, false,
3809 			  "MFW lacks support for the GET_LLDP_MAC command [resp 0x%08x]\n",
3810 			  mb_params.mcp_resp);
3811 		return ECORE_INVAL;
3812 	}
3813 
3814 	*(u16 *)lldp_mac_addr = *(u16 *)&lldp_mac.mac_upper;
3815 	*(u32 *)(lldp_mac_addr + 2) = lldp_mac.mac_lower;
3816 
3817 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3818 		   "LLDP MAC address is %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3819 		   lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3820 		   lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3821 
3822 	return ECORE_SUCCESS;
3823 }
3824 
3825 enum _ecore_status_t ecore_mcp_set_lldp_mac(struct ecore_hwfn *p_hwfn,
3826 					    struct ecore_ptt *p_ptt,
3827 					    u8 lldp_mac_addr[ETH_ALEN])
3828 {
3829 	struct ecore_mcp_mb_params mb_params;
3830 	struct mcp_mac lldp_mac;
3831 	enum _ecore_status_t rc;
3832 
3833 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3834 		   "Configuring LLDP MAC address to %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
3835 		   lldp_mac_addr[0], lldp_mac_addr[1], lldp_mac_addr[2],
3836 		   lldp_mac_addr[3], lldp_mac_addr[4], lldp_mac_addr[5]);
3837 
3838 	OSAL_MEM_ZERO(&lldp_mac, sizeof(lldp_mac));
3839 	lldp_mac.mac_upper = *(u16 *)lldp_mac_addr;
3840 	lldp_mac.mac_lower = *(u32 *)(lldp_mac_addr + 2);
3841 
3842 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
3843 	mb_params.cmd = DRV_MSG_CODE_SET_LLDP_MAC;
3844 	mb_params.p_data_src = &lldp_mac;
3845 	mb_params.data_src_size = sizeof(lldp_mac);
3846 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3847 	if (rc != ECORE_SUCCESS)
3848 		return rc;
3849 
3850 	if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
3851 		DP_NOTICE(p_hwfn, false,
3852 			  "MFW lacks support for the SET_LLDP_MAC command [resp 0x%08x]\n",
3853 			  mb_params.mcp_resp);
3854 		return ECORE_INVAL;
3855 	}
3856 
3857 	return ECORE_SUCCESS;
3858 }
3859 
3860 static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
3861 						   struct ecore_ptt *p_ptt,
3862 						   u32 param, u32 *p_mcp_resp,
3863 						   u32 *p_mcp_param)
3864 {
3865 	enum _ecore_status_t rc;
3866 
3867 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3868 			   p_mcp_resp, p_mcp_param);
3869 	if (rc != ECORE_SUCCESS)
3870 		return rc;
3871 
3872 	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3873 		DP_INFO(p_hwfn,
3874 			"The resource command is unsupported by the MFW\n");
3875 		return ECORE_NOTIMPL;
3876 	}
3877 
3878 	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3879 		u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3880 
3881 		DP_NOTICE(p_hwfn, false,
3882 			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3883 			  param, opcode);
3884 		return ECORE_INVAL;
3885 	}
3886 
3887 	return rc;
3888 }
3889 
3890 static enum _ecore_status_t
3891 __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3892 		      struct ecore_resc_lock_params *p_params)
3893 {
3894 	u32 param = 0, mcp_resp, mcp_param;
3895 	u8 opcode;
3896 	enum _ecore_status_t rc;
3897 
3898 	switch (p_params->timeout) {
3899 	case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
3900 		opcode = RESOURCE_OPCODE_REQ;
3901 		p_params->timeout = 0;
3902 		break;
3903 	case ECORE_MCP_RESC_LOCK_TO_NONE:
3904 		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3905 		p_params->timeout = 0;
3906 		break;
3907 	default:
3908 		opcode = RESOURCE_OPCODE_REQ_W_AGING;
3909 		break;
3910 	}
3911 
3912 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3913 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3914 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3915 
3916 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3917 		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3918 		   param, p_params->timeout, opcode, p_params->resource);
3919 
3920 	/* Attempt to acquire the resource */
3921 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
3922 				    &mcp_param);
3923 	if (rc != ECORE_SUCCESS)
3924 		return rc;
3925 
3926 	/* Analyze the response */
3927 	p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3928 	opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3929 
3930 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3931 		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3932 		   mcp_param, opcode, p_params->owner);
3933 
3934 	switch (opcode) {
3935 	case RESOURCE_OPCODE_GNT:
3936 		p_params->b_granted = true;
3937 		break;
3938 	case RESOURCE_OPCODE_BUSY:
3939 		p_params->b_granted = false;
3940 		break;
3941 	default:
3942 		DP_NOTICE(p_hwfn, false,
3943 			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3944 			  mcp_param, opcode);
3945 		return ECORE_INVAL;
3946 	}
3947 
3948 	return ECORE_SUCCESS;
3949 }
3950 
3951 enum _ecore_status_t
3952 ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3953 		    struct ecore_resc_lock_params *p_params)
3954 {
3955 	u32 retry_cnt = 0;
3956 	enum _ecore_status_t rc;
3957 
3958 	do {
3959 		/* No need for an interval before the first iteration */
3960 		if (retry_cnt) {
3961 			if (p_params->sleep_b4_retry) {
3962 				u16 retry_interval_in_ms =
3963 					DIV_ROUND_UP(p_params->retry_interval,
3964 						     1000);
3965 
3966 				OSAL_MSLEEP(retry_interval_in_ms);
3967 			} else {
3968 				OSAL_UDELAY(p_params->retry_interval);
3969 			}
3970 		}
3971 
3972 		rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3973 		if (rc != ECORE_SUCCESS)
3974 			return rc;
3975 
3976 		if (p_params->b_granted)
3977 			break;
3978 	} while (retry_cnt++ < p_params->retry_num);
3979 
3980 	return ECORE_SUCCESS;
3981 }
3982 
3983 enum _ecore_status_t
3984 ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3985 		      struct ecore_resc_unlock_params *p_params)
3986 {
3987 	u32 param = 0, mcp_resp, mcp_param;
3988 	u8 opcode;
3989 	enum _ecore_status_t rc;
3990 
3991 	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3992 				   : RESOURCE_OPCODE_RELEASE;
3993 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3994 	SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3995 
3996 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3997 		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3998 		   param, opcode, p_params->resource);
3999 
4000 	/* Attempt to release the resource */
4001 	rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
4002 				    &mcp_param);
4003 	if (rc != ECORE_SUCCESS)
4004 		return rc;
4005 
4006 	/* Analyze the response */
4007 	opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
4008 
4009 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
4010 		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
4011 		   mcp_param, opcode);
4012 
4013 	switch (opcode) {
4014 	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
4015 		DP_INFO(p_hwfn,
4016 			"Resource unlock request for an already released resource [%d]\n",
4017 			p_params->resource);
4018 		/* Fallthrough */
4019 	case RESOURCE_OPCODE_RELEASED:
4020 		p_params->b_released = true;
4021 		break;
4022 	case RESOURCE_OPCODE_WRONG_OWNER:
4023 		p_params->b_released = false;
4024 		break;
4025 	default:
4026 		DP_NOTICE(p_hwfn, false,
4027 			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
4028 			  mcp_param, opcode);
4029 		return ECORE_INVAL;
4030 	}
4031 
4032 	return ECORE_SUCCESS;
4033 }
4034 
4035 void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
4036 				      struct ecore_resc_unlock_params *p_unlock,
4037 				      enum ecore_resc_lock resource,
4038 				      bool b_is_permanent)
4039 {
4040 	if (p_lock != OSAL_NULL) {
4041 		OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
4042 
4043 		/* Permanent resources don't require aging, and there's no
4044 		 * point in trying to acquire them more than once since it's
4045 		 * unexpected another entity would release them.
4046 		 */
4047 		if (b_is_permanent) {
4048 			p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
4049 		} else {
4050 			p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
4051 			p_lock->retry_interval =
4052 					ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
4053 			p_lock->sleep_b4_retry = true;
4054 		}
4055 
4056 		p_lock->resource = resource;
4057 	}
4058 
4059 	if (p_unlock != OSAL_NULL) {
4060 		OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
4061 		p_unlock->resource = resource;
4062 	}
4063 }
4064 
4065 enum _ecore_status_t
4066 ecore_mcp_update_fcoe_cvid(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4067 			   u16 vlan)
4068 {
4069 	u32 resp = 0, param = 0;
4070 	enum _ecore_status_t rc;
4071 
4072 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID,
4073 			   (u32)vlan << DRV_MB_PARAM_FCOE_CVID_OFFSET,
4074 			   &resp, &param);
4075 	if (rc != ECORE_SUCCESS)
4076 		DP_ERR(p_hwfn, "Failed to update fcoe vlan, rc = %d\n", rc);
4077 
4078 	return rc;
4079 }
4080 
4081 enum _ecore_status_t
4082 ecore_mcp_update_fcoe_fabric_name(struct ecore_hwfn *p_hwfn,
4083 				  struct ecore_ptt *p_ptt, u8 *wwn)
4084 {
4085 	struct ecore_mcp_mb_params mb_params;
4086 	struct mcp_wwn fabric_name;
4087 	enum _ecore_status_t rc;
4088 
4089 	OSAL_MEM_ZERO(&fabric_name, sizeof(fabric_name));
4090 	fabric_name.wwn_upper = *(u32 *)wwn;
4091 	fabric_name.wwn_lower = *(u32 *)(wwn + 4);
4092 
4093 	OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
4094 	mb_params.cmd = DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME;
4095 	mb_params.p_data_src = &fabric_name;
4096 	mb_params.data_src_size = sizeof(fabric_name);
4097 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4098 	if (rc != ECORE_SUCCESS)
4099 		DP_ERR(p_hwfn, "Failed to update fcoe wwn, rc = %d\n", rc);
4100 
4101 	return rc;
4102 }
4103 
4104 void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4105 		      u32 offset, u32 val)
4106 {
4107 	struct ecore_mcp_mb_params mb_params = {0};
4108 	enum _ecore_status_t	   rc = ECORE_SUCCESS;
4109 	u32			   dword = val;
4110 
4111 	mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
4112 	mb_params.param = offset;
4113 	mb_params.p_data_src = &dword;
4114 	mb_params.data_src_size = sizeof(dword);
4115 
4116 	rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4117 	if (rc != ECORE_SUCCESS) {
4118 		DP_NOTICE(p_hwfn, false,
4119 			  "Failed to wol write request, rc = %d\n", rc);
4120 	}
4121 
4122 	if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
4123 		DP_NOTICE(p_hwfn, false,
4124 			  "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
4125 			  val, offset, mb_params.mcp_resp);
4126 		rc = ECORE_UNKNOWN_ERROR;
4127 	}
4128 }
4129 
4130 enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
4131 						struct ecore_ptt *p_ptt)
4132 {
4133 	u32 mcp_resp;
4134 	enum _ecore_status_t rc;
4135 
4136 	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
4137 			   0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
4138 	if (rc == ECORE_SUCCESS)
4139 		DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
4140 			   "MFW supported features: %08x\n",
4141 			   p_hwfn->mcp_info->capabilities);
4142 
4143 	return rc;
4144 }
4145 
4146 enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
4147 						struct ecore_ptt *p_ptt)
4148 {
4149 	u32 mcp_resp, mcp_param, features;
4150 
4151 	features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
4152 		   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
4153 
4154 	return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
4155 			     features, &mcp_resp, &mcp_param);
4156 }
4157