xref: /linux/drivers/net/ethernet/qlogic/qed/qed_mcp.c (revision f86fd32d)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/delay.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/etherdevice.h>
42 #include "qed.h"
43 #include "qed_cxt.h"
44 #include "qed_dcbx.h"
45 #include "qed_hsi.h"
46 #include "qed_hw.h"
47 #include "qed_mcp.h"
48 #include "qed_reg_addr.h"
49 #include "qed_sriov.h"
50 
51 #define GRCBASE_MCP     0xe00000
52 
53 #define QED_MCP_RESP_ITER_US	10
54 
55 #define QED_DRV_MB_MAX_RETRIES	(500 * 1000)	/* Account for 5 sec */
56 #define QED_MCP_RESET_RETRIES	(50 * 1000)	/* Account for 500 msec */
57 
58 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)	     \
59 	qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
60 	       _val)
61 
62 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
63 	qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
64 
65 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
66 	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
67 		     offsetof(struct public_drv_mb, _field), _val)
68 
69 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field)	   \
70 	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
71 		     offsetof(struct public_drv_mb, _field))
72 
73 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
74 		  DRV_ID_PDA_COMP_VER_SHIFT)
75 
76 #define MCP_BYTES_PER_MBIT_SHIFT 17
77 
78 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
79 {
80 	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
81 		return false;
82 	return true;
83 }
84 
85 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
86 {
87 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
88 					PUBLIC_PORT);
89 	u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
90 
91 	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
92 						   MFW_PORT(p_hwfn));
93 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
94 		   "port_addr = 0x%x, port_id 0x%02x\n",
95 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
96 }
97 
98 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
99 {
100 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
101 	u32 tmp, i;
102 
103 	if (!p_hwfn->mcp_info->public_base)
104 		return;
105 
106 	for (i = 0; i < length; i++) {
107 		tmp = qed_rd(p_hwfn, p_ptt,
108 			     p_hwfn->mcp_info->mfw_mb_addr +
109 			     (i << 2) + sizeof(u32));
110 
111 		/* The MB data is actually BE; Need to force it to cpu */
112 		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
113 			be32_to_cpu((__force __be32)tmp);
114 	}
115 }
116 
117 struct qed_mcp_cmd_elem {
118 	struct list_head list;
119 	struct qed_mcp_mb_params *p_mb_params;
120 	u16 expected_seq_num;
121 	bool b_is_completed;
122 };
123 
124 /* Must be called while cmd_lock is acquired */
125 static struct qed_mcp_cmd_elem *
126 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
127 		     struct qed_mcp_mb_params *p_mb_params,
128 		     u16 expected_seq_num)
129 {
130 	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
131 
132 	p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
133 	if (!p_cmd_elem)
134 		goto out;
135 
136 	p_cmd_elem->p_mb_params = p_mb_params;
137 	p_cmd_elem->expected_seq_num = expected_seq_num;
138 	list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
139 out:
140 	return p_cmd_elem;
141 }
142 
143 /* Must be called while cmd_lock is acquired */
144 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
145 				 struct qed_mcp_cmd_elem *p_cmd_elem)
146 {
147 	list_del(&p_cmd_elem->list);
148 	kfree(p_cmd_elem);
149 }
150 
151 /* Must be called while cmd_lock is acquired */
152 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
153 						     u16 seq_num)
154 {
155 	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
156 
157 	list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
158 		if (p_cmd_elem->expected_seq_num == seq_num)
159 			return p_cmd_elem;
160 	}
161 
162 	return NULL;
163 }
164 
165 int qed_mcp_free(struct qed_hwfn *p_hwfn)
166 {
167 	if (p_hwfn->mcp_info) {
168 		struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
169 
170 		kfree(p_hwfn->mcp_info->mfw_mb_cur);
171 		kfree(p_hwfn->mcp_info->mfw_mb_shadow);
172 
173 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
174 		list_for_each_entry_safe(p_cmd_elem,
175 					 p_tmp,
176 					 &p_hwfn->mcp_info->cmd_list, list) {
177 			qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
178 		}
179 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
180 	}
181 
182 	kfree(p_hwfn->mcp_info);
183 	p_hwfn->mcp_info = NULL;
184 
185 	return 0;
186 }
187 
188 /* Maximum of 1 sec to wait for the SHMEM ready indication */
189 #define QED_MCP_SHMEM_RDY_MAX_RETRIES	20
190 #define QED_MCP_SHMEM_RDY_ITER_MS	50
191 
192 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
193 {
194 	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
195 	u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
196 	u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
197 	u32 drv_mb_offsize, mfw_mb_offsize;
198 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
199 
200 	p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
201 	if (!p_info->public_base) {
202 		DP_NOTICE(p_hwfn,
203 			  "The address of the MCP scratch-pad is not configured\n");
204 		return -EINVAL;
205 	}
206 
207 	p_info->public_base |= GRCBASE_MCP;
208 
209 	/* Get the MFW MB address and number of supported messages */
210 	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
211 				SECTION_OFFSIZE_ADDR(p_info->public_base,
212 						     PUBLIC_MFW_MB));
213 	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
214 	p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
215 					    p_info->mfw_mb_addr +
216 					    offsetof(struct public_mfw_mb,
217 						     sup_msgs));
218 
219 	/* The driver can notify that there was an MCP reset, and might read the
220 	 * SHMEM values before the MFW has completed initializing them.
221 	 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
222 	 * data ready indication.
223 	 */
224 	while (!p_info->mfw_mb_length && --cnt) {
225 		msleep(msec);
226 		p_info->mfw_mb_length =
227 			(u16)qed_rd(p_hwfn, p_ptt,
228 				    p_info->mfw_mb_addr +
229 				    offsetof(struct public_mfw_mb, sup_msgs));
230 	}
231 
232 	if (!cnt) {
233 		DP_NOTICE(p_hwfn,
234 			  "Failed to get the SHMEM ready notification after %d msec\n",
235 			  QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
236 		return -EBUSY;
237 	}
238 
239 	/* Calculate the driver and MFW mailbox address */
240 	drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
241 				SECTION_OFFSIZE_ADDR(p_info->public_base,
242 						     PUBLIC_DRV_MB));
243 	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
244 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
245 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
246 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
247 
248 	/* Get the current driver mailbox sequence before sending
249 	 * the first command
250 	 */
251 	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
252 			     DRV_MSG_SEQ_NUMBER_MASK;
253 
254 	/* Get current FW pulse sequence */
255 	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
256 				DRV_PULSE_SEQ_MASK;
257 
258 	p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
259 
260 	return 0;
261 }
262 
263 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
264 {
265 	struct qed_mcp_info *p_info;
266 	u32 size;
267 
268 	/* Allocate mcp_info structure */
269 	p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
270 	if (!p_hwfn->mcp_info)
271 		goto err;
272 	p_info = p_hwfn->mcp_info;
273 
274 	/* Initialize the MFW spinlock */
275 	spin_lock_init(&p_info->cmd_lock);
276 	spin_lock_init(&p_info->link_lock);
277 
278 	INIT_LIST_HEAD(&p_info->cmd_list);
279 
280 	if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
281 		DP_NOTICE(p_hwfn, "MCP is not initialized\n");
282 		/* Do not free mcp_info here, since public_base indicate that
283 		 * the MCP is not initialized
284 		 */
285 		return 0;
286 	}
287 
288 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
289 	p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
290 	p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
291 	if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
292 		goto err;
293 
294 	return 0;
295 
296 err:
297 	qed_mcp_free(p_hwfn);
298 	return -ENOMEM;
299 }
300 
301 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
302 				   struct qed_ptt *p_ptt)
303 {
304 	u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
305 
306 	/* Use MCP history register to check if MCP reset occurred between init
307 	 * time and now.
308 	 */
309 	if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
310 		DP_VERBOSE(p_hwfn,
311 			   QED_MSG_SP,
312 			   "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
313 			   p_hwfn->mcp_info->mcp_hist, generic_por_0);
314 
315 		qed_load_mcp_offsets(p_hwfn, p_ptt);
316 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
317 	}
318 }
319 
320 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
321 {
322 	u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
323 	int rc = 0;
324 
325 	if (p_hwfn->mcp_info->b_block_cmd) {
326 		DP_NOTICE(p_hwfn,
327 			  "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
328 		return -EBUSY;
329 	}
330 
331 	/* Ensure that only a single thread is accessing the mailbox */
332 	spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
333 
334 	org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
335 
336 	/* Set drv command along with the updated sequence */
337 	qed_mcp_reread_offsets(p_hwfn, p_ptt);
338 	seq = ++p_hwfn->mcp_info->drv_mb_seq;
339 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
340 
341 	do {
342 		/* Wait for MFW response */
343 		udelay(delay);
344 		/* Give the FW up to 500 second (50*1000*10usec) */
345 	} while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
346 					      MISCS_REG_GENERIC_POR_0)) &&
347 		 (cnt++ < QED_MCP_RESET_RETRIES));
348 
349 	if (org_mcp_reset_seq !=
350 	    qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
351 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
352 			   "MCP was reset after %d usec\n", cnt * delay);
353 	} else {
354 		DP_ERR(p_hwfn, "Failed to reset MCP\n");
355 		rc = -EAGAIN;
356 	}
357 
358 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
359 
360 	return rc;
361 }
362 
363 /* Must be called while cmd_lock is acquired */
364 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
365 {
366 	struct qed_mcp_cmd_elem *p_cmd_elem;
367 
368 	/* There is at most one pending command at a certain time, and if it
369 	 * exists - it is placed at the HEAD of the list.
370 	 */
371 	if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
372 		p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
373 					      struct qed_mcp_cmd_elem, list);
374 		return !p_cmd_elem->b_is_completed;
375 	}
376 
377 	return false;
378 }
379 
380 /* Must be called while cmd_lock is acquired */
381 static int
382 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
383 {
384 	struct qed_mcp_mb_params *p_mb_params;
385 	struct qed_mcp_cmd_elem *p_cmd_elem;
386 	u32 mcp_resp;
387 	u16 seq_num;
388 
389 	mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
390 	seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
391 
392 	/* Return if no new non-handled response has been received */
393 	if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
394 		return -EAGAIN;
395 
396 	p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
397 	if (!p_cmd_elem) {
398 		DP_ERR(p_hwfn,
399 		       "Failed to find a pending mailbox cmd that expects sequence number %d\n",
400 		       seq_num);
401 		return -EINVAL;
402 	}
403 
404 	p_mb_params = p_cmd_elem->p_mb_params;
405 
406 	/* Get the MFW response along with the sequence number */
407 	p_mb_params->mcp_resp = mcp_resp;
408 
409 	/* Get the MFW param */
410 	p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
411 
412 	/* Get the union data */
413 	if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
414 		u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
415 				      offsetof(struct public_drv_mb,
416 					       union_data);
417 		qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
418 				union_data_addr, p_mb_params->data_dst_size);
419 	}
420 
421 	p_cmd_elem->b_is_completed = true;
422 
423 	return 0;
424 }
425 
426 /* Must be called while cmd_lock is acquired */
427 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
428 				    struct qed_ptt *p_ptt,
429 				    struct qed_mcp_mb_params *p_mb_params,
430 				    u16 seq_num)
431 {
432 	union drv_union_data union_data;
433 	u32 union_data_addr;
434 
435 	/* Set the union data */
436 	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
437 			  offsetof(struct public_drv_mb, union_data);
438 	memset(&union_data, 0, sizeof(union_data));
439 	if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
440 		memcpy(&union_data, p_mb_params->p_data_src,
441 		       p_mb_params->data_src_size);
442 	qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
443 		      sizeof(union_data));
444 
445 	/* Set the drv param */
446 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
447 
448 	/* Set the drv command along with the sequence number */
449 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
450 
451 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
452 		   "MFW mailbox: command 0x%08x param 0x%08x\n",
453 		   (p_mb_params->cmd | seq_num), p_mb_params->param);
454 }
455 
456 static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
457 {
458 	p_hwfn->mcp_info->b_block_cmd = block_cmd;
459 
460 	DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
461 		block_cmd ? "Block" : "Unblock");
462 }
463 
464 static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
465 				   struct qed_ptt *p_ptt)
466 {
467 	u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
468 	u32 delay = QED_MCP_RESP_ITER_US;
469 
470 	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
471 	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
472 	cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
473 	udelay(delay);
474 	cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
475 	udelay(delay);
476 	cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
477 
478 	DP_NOTICE(p_hwfn,
479 		  "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
480 		  cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
481 }
482 
483 static int
484 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
485 		       struct qed_ptt *p_ptt,
486 		       struct qed_mcp_mb_params *p_mb_params,
487 		       u32 max_retries, u32 usecs)
488 {
489 	u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
490 	struct qed_mcp_cmd_elem *p_cmd_elem;
491 	u16 seq_num;
492 	int rc = 0;
493 
494 	/* Wait until the mailbox is non-occupied */
495 	do {
496 		/* Exit the loop if there is no pending command, or if the
497 		 * pending command is completed during this iteration.
498 		 * The spinlock stays locked until the command is sent.
499 		 */
500 
501 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
502 
503 		if (!qed_mcp_has_pending_cmd(p_hwfn))
504 			break;
505 
506 		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
507 		if (!rc)
508 			break;
509 		else if (rc != -EAGAIN)
510 			goto err;
511 
512 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
513 
514 		if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
515 			msleep(msecs);
516 		else
517 			udelay(usecs);
518 	} while (++cnt < max_retries);
519 
520 	if (cnt >= max_retries) {
521 		DP_NOTICE(p_hwfn,
522 			  "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
523 			  p_mb_params->cmd, p_mb_params->param);
524 		return -EAGAIN;
525 	}
526 
527 	/* Send the mailbox command */
528 	qed_mcp_reread_offsets(p_hwfn, p_ptt);
529 	seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
530 	p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
531 	if (!p_cmd_elem) {
532 		rc = -ENOMEM;
533 		goto err;
534 	}
535 
536 	__qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
537 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
538 
539 	/* Wait for the MFW response */
540 	do {
541 		/* Exit the loop if the command is already completed, or if the
542 		 * command is completed during this iteration.
543 		 * The spinlock stays locked until the list element is removed.
544 		 */
545 
546 		if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
547 			msleep(msecs);
548 		else
549 			udelay(usecs);
550 
551 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
552 
553 		if (p_cmd_elem->b_is_completed)
554 			break;
555 
556 		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
557 		if (!rc)
558 			break;
559 		else if (rc != -EAGAIN)
560 			goto err;
561 
562 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
563 	} while (++cnt < max_retries);
564 
565 	if (cnt >= max_retries) {
566 		DP_NOTICE(p_hwfn,
567 			  "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
568 			  p_mb_params->cmd, p_mb_params->param);
569 		qed_mcp_print_cpu_info(p_hwfn, p_ptt);
570 
571 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
572 		qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
573 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
574 
575 		if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
576 			qed_mcp_cmd_set_blocking(p_hwfn, true);
577 
578 		return -EAGAIN;
579 	}
580 
581 	qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
582 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
583 
584 	DP_VERBOSE(p_hwfn,
585 		   QED_MSG_SP,
586 		   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
587 		   p_mb_params->mcp_resp,
588 		   p_mb_params->mcp_param,
589 		   (cnt * usecs) / 1000, (cnt * usecs) % 1000);
590 
591 	/* Clear the sequence number from the MFW response */
592 	p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
593 
594 	return 0;
595 
596 err:
597 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
598 	return rc;
599 }
600 
601 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
602 				 struct qed_ptt *p_ptt,
603 				 struct qed_mcp_mb_params *p_mb_params)
604 {
605 	size_t union_data_size = sizeof(union drv_union_data);
606 	u32 max_retries = QED_DRV_MB_MAX_RETRIES;
607 	u32 usecs = QED_MCP_RESP_ITER_US;
608 
609 	/* MCP not initialized */
610 	if (!qed_mcp_is_init(p_hwfn)) {
611 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
612 		return -EBUSY;
613 	}
614 
615 	if (p_hwfn->mcp_info->b_block_cmd) {
616 		DP_NOTICE(p_hwfn,
617 			  "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
618 			  p_mb_params->cmd, p_mb_params->param);
619 		return -EBUSY;
620 	}
621 
622 	if (p_mb_params->data_src_size > union_data_size ||
623 	    p_mb_params->data_dst_size > union_data_size) {
624 		DP_ERR(p_hwfn,
625 		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
626 		       p_mb_params->data_src_size,
627 		       p_mb_params->data_dst_size, union_data_size);
628 		return -EINVAL;
629 	}
630 
631 	if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
632 		max_retries = DIV_ROUND_UP(max_retries, 1000);
633 		usecs *= 1000;
634 	}
635 
636 	return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
637 				      usecs);
638 }
639 
640 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
641 		struct qed_ptt *p_ptt,
642 		u32 cmd,
643 		u32 param,
644 		u32 *o_mcp_resp,
645 		u32 *o_mcp_param)
646 {
647 	struct qed_mcp_mb_params mb_params;
648 	int rc;
649 
650 	memset(&mb_params, 0, sizeof(mb_params));
651 	mb_params.cmd = cmd;
652 	mb_params.param = param;
653 
654 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
655 	if (rc)
656 		return rc;
657 
658 	*o_mcp_resp = mb_params.mcp_resp;
659 	*o_mcp_param = mb_params.mcp_param;
660 
661 	return 0;
662 }
663 
664 static int
665 qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
666 		   struct qed_ptt *p_ptt,
667 		   u32 cmd,
668 		   u32 param,
669 		   u32 *o_mcp_resp,
670 		   u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
671 {
672 	struct qed_mcp_mb_params mb_params;
673 	int rc;
674 
675 	memset(&mb_params, 0, sizeof(mb_params));
676 	mb_params.cmd = cmd;
677 	mb_params.param = param;
678 	mb_params.p_data_src = i_buf;
679 	mb_params.data_src_size = (u8)i_txn_size;
680 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
681 	if (rc)
682 		return rc;
683 
684 	*o_mcp_resp = mb_params.mcp_resp;
685 	*o_mcp_param = mb_params.mcp_param;
686 
687 	/* nvm_info needs to be updated */
688 	p_hwfn->nvm_info.valid = false;
689 
690 	return 0;
691 }
692 
693 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
694 		       struct qed_ptt *p_ptt,
695 		       u32 cmd,
696 		       u32 param,
697 		       u32 *o_mcp_resp,
698 		       u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
699 {
700 	struct qed_mcp_mb_params mb_params;
701 	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
702 	int rc;
703 
704 	memset(&mb_params, 0, sizeof(mb_params));
705 	mb_params.cmd = cmd;
706 	mb_params.param = param;
707 	mb_params.p_data_dst = raw_data;
708 
709 	/* Use the maximal value since the actual one is part of the response */
710 	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
711 
712 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
713 	if (rc)
714 		return rc;
715 
716 	*o_mcp_resp = mb_params.mcp_resp;
717 	*o_mcp_param = mb_params.mcp_param;
718 
719 	*o_txn_size = *o_mcp_param;
720 	memcpy(o_buf, raw_data, *o_txn_size);
721 
722 	return 0;
723 }
724 
725 static bool
726 qed_mcp_can_force_load(u8 drv_role,
727 		       u8 exist_drv_role,
728 		       enum qed_override_force_load override_force_load)
729 {
730 	bool can_force_load = false;
731 
732 	switch (override_force_load) {
733 	case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
734 		can_force_load = true;
735 		break;
736 	case QED_OVERRIDE_FORCE_LOAD_NEVER:
737 		can_force_load = false;
738 		break;
739 	default:
740 		can_force_load = (drv_role == DRV_ROLE_OS &&
741 				  exist_drv_role == DRV_ROLE_PREBOOT) ||
742 				 (drv_role == DRV_ROLE_KDUMP &&
743 				  exist_drv_role == DRV_ROLE_OS);
744 		break;
745 	}
746 
747 	return can_force_load;
748 }
749 
750 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
751 				   struct qed_ptt *p_ptt)
752 {
753 	u32 resp = 0, param = 0;
754 	int rc;
755 
756 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
757 			 &resp, &param);
758 	if (rc)
759 		DP_NOTICE(p_hwfn,
760 			  "Failed to send cancel load request, rc = %d\n", rc);
761 
762 	return rc;
763 }
764 
765 #define CONFIG_QEDE_BITMAP_IDX		BIT(0)
766 #define CONFIG_QED_SRIOV_BITMAP_IDX	BIT(1)
767 #define CONFIG_QEDR_BITMAP_IDX		BIT(2)
768 #define CONFIG_QEDF_BITMAP_IDX		BIT(4)
769 #define CONFIG_QEDI_BITMAP_IDX		BIT(5)
770 #define CONFIG_QED_LL2_BITMAP_IDX	BIT(6)
771 
772 static u32 qed_get_config_bitmap(void)
773 {
774 	u32 config_bitmap = 0x0;
775 
776 	if (IS_ENABLED(CONFIG_QEDE))
777 		config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
778 
779 	if (IS_ENABLED(CONFIG_QED_SRIOV))
780 		config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
781 
782 	if (IS_ENABLED(CONFIG_QED_RDMA))
783 		config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
784 
785 	if (IS_ENABLED(CONFIG_QED_FCOE))
786 		config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
787 
788 	if (IS_ENABLED(CONFIG_QED_ISCSI))
789 		config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
790 
791 	if (IS_ENABLED(CONFIG_QED_LL2))
792 		config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
793 
794 	return config_bitmap;
795 }
796 
797 struct qed_load_req_in_params {
798 	u8 hsi_ver;
799 #define QED_LOAD_REQ_HSI_VER_DEFAULT	0
800 #define QED_LOAD_REQ_HSI_VER_1		1
801 	u32 drv_ver_0;
802 	u32 drv_ver_1;
803 	u32 fw_ver;
804 	u8 drv_role;
805 	u8 timeout_val;
806 	u8 force_cmd;
807 	bool avoid_eng_reset;
808 };
809 
810 struct qed_load_req_out_params {
811 	u32 load_code;
812 	u32 exist_drv_ver_0;
813 	u32 exist_drv_ver_1;
814 	u32 exist_fw_ver;
815 	u8 exist_drv_role;
816 	u8 mfw_hsi_ver;
817 	bool drv_exists;
818 };
819 
820 static int
821 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
822 		   struct qed_ptt *p_ptt,
823 		   struct qed_load_req_in_params *p_in_params,
824 		   struct qed_load_req_out_params *p_out_params)
825 {
826 	struct qed_mcp_mb_params mb_params;
827 	struct load_req_stc load_req;
828 	struct load_rsp_stc load_rsp;
829 	u32 hsi_ver;
830 	int rc;
831 
832 	memset(&load_req, 0, sizeof(load_req));
833 	load_req.drv_ver_0 = p_in_params->drv_ver_0;
834 	load_req.drv_ver_1 = p_in_params->drv_ver_1;
835 	load_req.fw_ver = p_in_params->fw_ver;
836 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
837 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
838 			  p_in_params->timeout_val);
839 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
840 			  p_in_params->force_cmd);
841 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
842 			  p_in_params->avoid_eng_reset);
843 
844 	hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
845 		  DRV_ID_MCP_HSI_VER_CURRENT :
846 		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
847 
848 	memset(&mb_params, 0, sizeof(mb_params));
849 	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
850 	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
851 	mb_params.p_data_src = &load_req;
852 	mb_params.data_src_size = sizeof(load_req);
853 	mb_params.p_data_dst = &load_rsp;
854 	mb_params.data_dst_size = sizeof(load_rsp);
855 	mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
856 
857 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
858 		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
859 		   mb_params.param,
860 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
861 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
862 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
863 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
864 
865 	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
866 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
867 			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
868 			   load_req.drv_ver_0,
869 			   load_req.drv_ver_1,
870 			   load_req.fw_ver,
871 			   load_req.misc0,
872 			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
873 			   QED_MFW_GET_FIELD(load_req.misc0,
874 					     LOAD_REQ_LOCK_TO),
875 			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
876 			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
877 	}
878 
879 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
880 	if (rc) {
881 		DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
882 		return rc;
883 	}
884 
885 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
886 		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
887 	p_out_params->load_code = mb_params.mcp_resp;
888 
889 	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
890 	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
891 		DP_VERBOSE(p_hwfn,
892 			   QED_MSG_SP,
893 			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
894 			   load_rsp.drv_ver_0,
895 			   load_rsp.drv_ver_1,
896 			   load_rsp.fw_ver,
897 			   load_rsp.misc0,
898 			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
899 			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
900 			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
901 
902 		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
903 		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
904 		p_out_params->exist_fw_ver = load_rsp.fw_ver;
905 		p_out_params->exist_drv_role =
906 		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
907 		p_out_params->mfw_hsi_ver =
908 		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
909 		p_out_params->drv_exists =
910 		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
911 		    LOAD_RSP_FLAGS0_DRV_EXISTS;
912 	}
913 
914 	return 0;
915 }
916 
917 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
918 				  enum qed_drv_role drv_role,
919 				  u8 *p_mfw_drv_role)
920 {
921 	switch (drv_role) {
922 	case QED_DRV_ROLE_OS:
923 		*p_mfw_drv_role = DRV_ROLE_OS;
924 		break;
925 	case QED_DRV_ROLE_KDUMP:
926 		*p_mfw_drv_role = DRV_ROLE_KDUMP;
927 		break;
928 	default:
929 		DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
930 		return -EINVAL;
931 	}
932 
933 	return 0;
934 }
935 
936 enum qed_load_req_force {
937 	QED_LOAD_REQ_FORCE_NONE,
938 	QED_LOAD_REQ_FORCE_PF,
939 	QED_LOAD_REQ_FORCE_ALL,
940 };
941 
942 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
943 
944 				  enum qed_load_req_force force_cmd,
945 				  u8 *p_mfw_force_cmd)
946 {
947 	switch (force_cmd) {
948 	case QED_LOAD_REQ_FORCE_NONE:
949 		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
950 		break;
951 	case QED_LOAD_REQ_FORCE_PF:
952 		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
953 		break;
954 	case QED_LOAD_REQ_FORCE_ALL:
955 		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
956 		break;
957 	}
958 }
959 
960 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
961 		     struct qed_ptt *p_ptt,
962 		     struct qed_load_req_params *p_params)
963 {
964 	struct qed_load_req_out_params out_params;
965 	struct qed_load_req_in_params in_params;
966 	u8 mfw_drv_role, mfw_force_cmd;
967 	int rc;
968 
969 	memset(&in_params, 0, sizeof(in_params));
970 	in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
971 	in_params.drv_ver_0 = QED_VERSION;
972 	in_params.drv_ver_1 = qed_get_config_bitmap();
973 	in_params.fw_ver = STORM_FW_VERSION;
974 	rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
975 	if (rc)
976 		return rc;
977 
978 	in_params.drv_role = mfw_drv_role;
979 	in_params.timeout_val = p_params->timeout_val;
980 	qed_get_mfw_force_cmd(p_hwfn,
981 			      QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
982 
983 	in_params.force_cmd = mfw_force_cmd;
984 	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
985 
986 	memset(&out_params, 0, sizeof(out_params));
987 	rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
988 	if (rc)
989 		return rc;
990 
991 	/* First handle cases where another load request should/might be sent:
992 	 * - MFW expects the old interface [HSI version = 1]
993 	 * - MFW responds that a force load request is required
994 	 */
995 	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
996 		DP_INFO(p_hwfn,
997 			"MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
998 
999 		in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
1000 		memset(&out_params, 0, sizeof(out_params));
1001 		rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1002 		if (rc)
1003 			return rc;
1004 	} else if (out_params.load_code ==
1005 		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1006 		if (qed_mcp_can_force_load(in_params.drv_role,
1007 					   out_params.exist_drv_role,
1008 					   p_params->override_force_load)) {
1009 			DP_INFO(p_hwfn,
1010 				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
1011 				in_params.drv_role, in_params.fw_ver,
1012 				in_params.drv_ver_0, in_params.drv_ver_1,
1013 				out_params.exist_drv_role,
1014 				out_params.exist_fw_ver,
1015 				out_params.exist_drv_ver_0,
1016 				out_params.exist_drv_ver_1);
1017 
1018 			qed_get_mfw_force_cmd(p_hwfn,
1019 					      QED_LOAD_REQ_FORCE_ALL,
1020 					      &mfw_force_cmd);
1021 
1022 			in_params.force_cmd = mfw_force_cmd;
1023 			memset(&out_params, 0, sizeof(out_params));
1024 			rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1025 						&out_params);
1026 			if (rc)
1027 				return rc;
1028 		} else {
1029 			DP_NOTICE(p_hwfn,
1030 				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1031 				  in_params.drv_role, in_params.fw_ver,
1032 				  in_params.drv_ver_0, in_params.drv_ver_1,
1033 				  out_params.exist_drv_role,
1034 				  out_params.exist_fw_ver,
1035 				  out_params.exist_drv_ver_0,
1036 				  out_params.exist_drv_ver_1);
1037 			DP_NOTICE(p_hwfn,
1038 				  "Avoid sending a force load request to prevent disruption of active PFs\n");
1039 
1040 			qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1041 			return -EBUSY;
1042 		}
1043 	}
1044 
1045 	/* Now handle the other types of responses.
1046 	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1047 	 * expected here after the additional revised load requests were sent.
1048 	 */
1049 	switch (out_params.load_code) {
1050 	case FW_MSG_CODE_DRV_LOAD_ENGINE:
1051 	case FW_MSG_CODE_DRV_LOAD_PORT:
1052 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1053 		if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1054 		    out_params.drv_exists) {
1055 			/* The role and fw/driver version match, but the PF is
1056 			 * already loaded and has not been unloaded gracefully.
1057 			 */
1058 			DP_NOTICE(p_hwfn,
1059 				  "PF is already loaded\n");
1060 			return -EINVAL;
1061 		}
1062 		break;
1063 	default:
1064 		DP_NOTICE(p_hwfn,
1065 			  "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1066 			  out_params.load_code);
1067 		return -EBUSY;
1068 	}
1069 
1070 	p_params->load_code = out_params.load_code;
1071 
1072 	return 0;
1073 }
1074 
1075 int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1076 {
1077 	u32 resp = 0, param = 0;
1078 	int rc;
1079 
1080 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1081 			 &param);
1082 	if (rc) {
1083 		DP_NOTICE(p_hwfn,
1084 			  "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1085 		return rc;
1086 	}
1087 
1088 	/* Check if there is a DID mismatch between nvm-cfg/efuse */
1089 	if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1090 		DP_NOTICE(p_hwfn,
1091 			  "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1092 
1093 	return 0;
1094 }
1095 
1096 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1097 {
1098 	struct qed_mcp_mb_params mb_params;
1099 	u32 wol_param;
1100 
1101 	switch (p_hwfn->cdev->wol_config) {
1102 	case QED_OV_WOL_DISABLED:
1103 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1104 		break;
1105 	case QED_OV_WOL_ENABLED:
1106 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1107 		break;
1108 	default:
1109 		DP_NOTICE(p_hwfn,
1110 			  "Unknown WoL configuration %02x\n",
1111 			  p_hwfn->cdev->wol_config);
1112 		/* Fallthrough */
1113 	case QED_OV_WOL_DEFAULT:
1114 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1115 	}
1116 
1117 	memset(&mb_params, 0, sizeof(mb_params));
1118 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1119 	mb_params.param = wol_param;
1120 	mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1121 
1122 	return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1123 }
1124 
1125 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1126 {
1127 	struct qed_mcp_mb_params mb_params;
1128 	struct mcp_mac wol_mac;
1129 
1130 	memset(&mb_params, 0, sizeof(mb_params));
1131 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1132 
1133 	/* Set the primary MAC if WoL is enabled */
1134 	if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1135 		u8 *p_mac = p_hwfn->cdev->wol_mac;
1136 
1137 		memset(&wol_mac, 0, sizeof(wol_mac));
1138 		wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1139 		wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1140 				    p_mac[4] << 8 | p_mac[5];
1141 
1142 		DP_VERBOSE(p_hwfn,
1143 			   (QED_MSG_SP | NETIF_MSG_IFDOWN),
1144 			   "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1145 			   p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1146 
1147 		mb_params.p_data_src = &wol_mac;
1148 		mb_params.data_src_size = sizeof(wol_mac);
1149 	}
1150 
1151 	return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1152 }
1153 
1154 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1155 				  struct qed_ptt *p_ptt)
1156 {
1157 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1158 					PUBLIC_PATH);
1159 	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1160 	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1161 				     QED_PATH_ID(p_hwfn));
1162 	u32 disabled_vfs[VF_MAX_STATIC / 32];
1163 	int i;
1164 
1165 	DP_VERBOSE(p_hwfn,
1166 		   QED_MSG_SP,
1167 		   "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1168 		   mfw_path_offsize, path_addr);
1169 
1170 	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1171 		disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1172 					 path_addr +
1173 					 offsetof(struct public_path,
1174 						  mcp_vf_disabled) +
1175 					 sizeof(u32) * i);
1176 		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1177 			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1178 			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1179 	}
1180 
1181 	if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1182 		qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1183 }
1184 
1185 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1186 		       struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1187 {
1188 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1189 					PUBLIC_FUNC);
1190 	u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1191 	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1192 				     MCP_PF_ID(p_hwfn));
1193 	struct qed_mcp_mb_params mb_params;
1194 	int rc;
1195 	int i;
1196 
1197 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1198 		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1199 			   "Acking VFs [%08x,...,%08x] - %08x\n",
1200 			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1201 
1202 	memset(&mb_params, 0, sizeof(mb_params));
1203 	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1204 	mb_params.p_data_src = vfs_to_ack;
1205 	mb_params.data_src_size = VF_MAX_STATIC / 8;
1206 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1207 	if (rc) {
1208 		DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1209 		return -EBUSY;
1210 	}
1211 
1212 	/* Clear the ACK bits */
1213 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1214 		qed_wr(p_hwfn, p_ptt,
1215 		       func_addr +
1216 		       offsetof(struct public_func, drv_ack_vf_disabled) +
1217 		       i * sizeof(u32), 0);
1218 
1219 	return rc;
1220 }
1221 
1222 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1223 					      struct qed_ptt *p_ptt)
1224 {
1225 	u32 transceiver_state;
1226 
1227 	transceiver_state = qed_rd(p_hwfn, p_ptt,
1228 				   p_hwfn->mcp_info->port_addr +
1229 				   offsetof(struct public_port,
1230 					    transceiver_data));
1231 
1232 	DP_VERBOSE(p_hwfn,
1233 		   (NETIF_MSG_HW | QED_MSG_SP),
1234 		   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1235 		   transceiver_state,
1236 		   (u32)(p_hwfn->mcp_info->port_addr +
1237 			  offsetof(struct public_port, transceiver_data)));
1238 
1239 	transceiver_state = GET_FIELD(transceiver_state,
1240 				      ETH_TRANSCEIVER_STATE);
1241 
1242 	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1243 		DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1244 	else
1245 		DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1246 }
1247 
1248 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1249 				    struct qed_ptt *p_ptt,
1250 				    struct qed_mcp_link_state *p_link)
1251 {
1252 	u32 eee_status, val;
1253 
1254 	p_link->eee_adv_caps = 0;
1255 	p_link->eee_lp_adv_caps = 0;
1256 	eee_status = qed_rd(p_hwfn,
1257 			    p_ptt,
1258 			    p_hwfn->mcp_info->port_addr +
1259 			    offsetof(struct public_port, eee_status));
1260 	p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1261 	val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1262 	if (val & EEE_1G_ADV)
1263 		p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1264 	if (val & EEE_10G_ADV)
1265 		p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1266 	val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1267 	if (val & EEE_1G_ADV)
1268 		p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1269 	if (val & EEE_10G_ADV)
1270 		p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1271 }
1272 
1273 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1274 				  struct qed_ptt *p_ptt,
1275 				  struct public_func *p_data, int pfid)
1276 {
1277 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1278 					PUBLIC_FUNC);
1279 	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1280 	u32 func_addr;
1281 	u32 i, size;
1282 
1283 	func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1284 	memset(p_data, 0, sizeof(*p_data));
1285 
1286 	size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1287 	for (i = 0; i < size / sizeof(u32); i++)
1288 		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1289 					    func_addr + (i << 2));
1290 	return size;
1291 }
1292 
1293 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1294 				  struct public_func *p_shmem_info)
1295 {
1296 	struct qed_mcp_function_info *p_info;
1297 
1298 	p_info = &p_hwfn->mcp_info->func_info;
1299 
1300 	p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1301 						  FUNC_MF_CFG_MIN_BW);
1302 	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1303 		DP_INFO(p_hwfn,
1304 			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1305 			p_info->bandwidth_min);
1306 		p_info->bandwidth_min = 1;
1307 	}
1308 
1309 	p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1310 						  FUNC_MF_CFG_MAX_BW);
1311 	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1312 		DP_INFO(p_hwfn,
1313 			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1314 			p_info->bandwidth_max);
1315 		p_info->bandwidth_max = 100;
1316 	}
1317 }
1318 
1319 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1320 				       struct qed_ptt *p_ptt, bool b_reset)
1321 {
1322 	struct qed_mcp_link_state *p_link;
1323 	u8 max_bw, min_bw;
1324 	u32 status = 0;
1325 
1326 	/* Prevent SW/attentions from doing this at the same time */
1327 	spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1328 
1329 	p_link = &p_hwfn->mcp_info->link_output;
1330 	memset(p_link, 0, sizeof(*p_link));
1331 	if (!b_reset) {
1332 		status = qed_rd(p_hwfn, p_ptt,
1333 				p_hwfn->mcp_info->port_addr +
1334 				offsetof(struct public_port, link_status));
1335 		DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1336 			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1337 			   status,
1338 			   (u32)(p_hwfn->mcp_info->port_addr +
1339 				 offsetof(struct public_port, link_status)));
1340 	} else {
1341 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1342 			   "Resetting link indications\n");
1343 		goto out;
1344 	}
1345 
1346 	if (p_hwfn->b_drv_link_init) {
1347 		/* Link indication with modern MFW arrives as per-PF
1348 		 * indication.
1349 		 */
1350 		if (p_hwfn->mcp_info->capabilities &
1351 		    FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1352 			struct public_func shmem_info;
1353 
1354 			qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1355 					       MCP_PF_ID(p_hwfn));
1356 			p_link->link_up = !!(shmem_info.status &
1357 					     FUNC_STATUS_VIRTUAL_LINK_UP);
1358 			qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1359 			DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1360 				   "Virtual link_up = %d\n", p_link->link_up);
1361 		} else {
1362 			p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1363 			DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1364 				   "Physical link_up = %d\n", p_link->link_up);
1365 		}
1366 	} else {
1367 		p_link->link_up = false;
1368 	}
1369 
1370 	p_link->full_duplex = true;
1371 	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1372 	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1373 		p_link->speed = 100000;
1374 		break;
1375 	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1376 		p_link->speed = 50000;
1377 		break;
1378 	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1379 		p_link->speed = 40000;
1380 		break;
1381 	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1382 		p_link->speed = 25000;
1383 		break;
1384 	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1385 		p_link->speed = 20000;
1386 		break;
1387 	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1388 		p_link->speed = 10000;
1389 		break;
1390 	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1391 		p_link->full_duplex = false;
1392 	/* Fall-through */
1393 	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1394 		p_link->speed = 1000;
1395 		break;
1396 	default:
1397 		p_link->speed = 0;
1398 		p_link->link_up = 0;
1399 	}
1400 
1401 	if (p_link->link_up && p_link->speed)
1402 		p_link->line_speed = p_link->speed;
1403 	else
1404 		p_link->line_speed = 0;
1405 
1406 	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1407 	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1408 
1409 	/* Max bandwidth configuration */
1410 	__qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1411 
1412 	/* Min bandwidth configuration */
1413 	__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1414 	qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1415 					    p_link->min_pf_rate);
1416 
1417 	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1418 	p_link->an_complete = !!(status &
1419 				 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1420 	p_link->parallel_detection = !!(status &
1421 					LINK_STATUS_PARALLEL_DETECTION_USED);
1422 	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1423 
1424 	p_link->partner_adv_speed |=
1425 		(status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1426 		QED_LINK_PARTNER_SPEED_1G_FD : 0;
1427 	p_link->partner_adv_speed |=
1428 		(status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1429 		QED_LINK_PARTNER_SPEED_1G_HD : 0;
1430 	p_link->partner_adv_speed |=
1431 		(status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1432 		QED_LINK_PARTNER_SPEED_10G : 0;
1433 	p_link->partner_adv_speed |=
1434 		(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1435 		QED_LINK_PARTNER_SPEED_20G : 0;
1436 	p_link->partner_adv_speed |=
1437 		(status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1438 		QED_LINK_PARTNER_SPEED_25G : 0;
1439 	p_link->partner_adv_speed |=
1440 		(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1441 		QED_LINK_PARTNER_SPEED_40G : 0;
1442 	p_link->partner_adv_speed |=
1443 		(status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1444 		QED_LINK_PARTNER_SPEED_50G : 0;
1445 	p_link->partner_adv_speed |=
1446 		(status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1447 		QED_LINK_PARTNER_SPEED_100G : 0;
1448 
1449 	p_link->partner_tx_flow_ctrl_en =
1450 		!!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1451 	p_link->partner_rx_flow_ctrl_en =
1452 		!!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1453 
1454 	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1455 	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1456 		p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1457 		break;
1458 	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1459 		p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1460 		break;
1461 	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1462 		p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1463 		break;
1464 	default:
1465 		p_link->partner_adv_pause = 0;
1466 	}
1467 
1468 	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1469 
1470 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1471 		qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1472 
1473 	qed_link_update(p_hwfn, p_ptt);
1474 out:
1475 	spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1476 }
1477 
1478 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1479 {
1480 	struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1481 	struct qed_mcp_mb_params mb_params;
1482 	struct eth_phy_cfg phy_cfg;
1483 	int rc = 0;
1484 	u32 cmd;
1485 
1486 	/* Set the shmem configuration according to params */
1487 	memset(&phy_cfg, 0, sizeof(phy_cfg));
1488 	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1489 	if (!params->speed.autoneg)
1490 		phy_cfg.speed = params->speed.forced_speed;
1491 	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1492 	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1493 	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1494 	phy_cfg.adv_speed = params->speed.advertised_speeds;
1495 	phy_cfg.loopback_mode = params->loopback_mode;
1496 
1497 	/* There are MFWs that share this capability regardless of whether
1498 	 * this is feasible or not. And given that at the very least adv_caps
1499 	 * would be set internally by qed, we want to make sure LFA would
1500 	 * still work.
1501 	 */
1502 	if ((p_hwfn->mcp_info->capabilities &
1503 	     FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1504 		phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1505 		if (params->eee.tx_lpi_enable)
1506 			phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1507 		if (params->eee.adv_caps & QED_EEE_1G_ADV)
1508 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1509 		if (params->eee.adv_caps & QED_EEE_10G_ADV)
1510 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1511 		phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1512 				    EEE_TX_TIMER_USEC_OFFSET) &
1513 				   EEE_TX_TIMER_USEC_MASK;
1514 	}
1515 
1516 	p_hwfn->b_drv_link_init = b_up;
1517 
1518 	if (b_up) {
1519 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1520 			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1521 			   phy_cfg.speed,
1522 			   phy_cfg.pause,
1523 			   phy_cfg.adv_speed,
1524 			   phy_cfg.loopback_mode,
1525 			   phy_cfg.feature_config_flags);
1526 	} else {
1527 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1528 			   "Resetting link\n");
1529 	}
1530 
1531 	memset(&mb_params, 0, sizeof(mb_params));
1532 	mb_params.cmd = cmd;
1533 	mb_params.p_data_src = &phy_cfg;
1534 	mb_params.data_src_size = sizeof(phy_cfg);
1535 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1536 
1537 	/* if mcp fails to respond we must abort */
1538 	if (rc) {
1539 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1540 		return rc;
1541 	}
1542 
1543 	/* Mimic link-change attention, done for several reasons:
1544 	 *  - On reset, there's no guarantee MFW would trigger
1545 	 *    an attention.
1546 	 *  - On initialization, older MFWs might not indicate link change
1547 	 *    during LFA, so we'll never get an UP indication.
1548 	 */
1549 	qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1550 
1551 	return 0;
1552 }
1553 
1554 u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1555 				 struct qed_ptt *p_ptt)
1556 {
1557 	u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1558 
1559 	if (IS_VF(p_hwfn->cdev))
1560 		return -EINVAL;
1561 
1562 	path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1563 						 PUBLIC_PATH);
1564 	path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1565 	path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1566 
1567 	proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1568 			       path_addr +
1569 			       offsetof(struct public_path, process_kill)) &
1570 			PROCESS_KILL_COUNTER_MASK;
1571 
1572 	return proc_kill_cnt;
1573 }
1574 
1575 static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1576 					struct qed_ptt *p_ptt)
1577 {
1578 	struct qed_dev *cdev = p_hwfn->cdev;
1579 	u32 proc_kill_cnt;
1580 
1581 	/* Prevent possible attentions/interrupts during the recovery handling
1582 	 * and till its load phase, during which they will be re-enabled.
1583 	 */
1584 	qed_int_igu_disable_int(p_hwfn, p_ptt);
1585 
1586 	DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1587 
1588 	/* The following operations should be done once, and thus in CMT mode
1589 	 * are carried out by only the first HW function.
1590 	 */
1591 	if (p_hwfn != QED_LEADING_HWFN(cdev))
1592 		return;
1593 
1594 	if (cdev->recov_in_prog) {
1595 		DP_NOTICE(p_hwfn,
1596 			  "Ignoring the indication since a recovery process is already in progress\n");
1597 		return;
1598 	}
1599 
1600 	cdev->recov_in_prog = true;
1601 
1602 	proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1603 	DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1604 
1605 	qed_schedule_recovery_handler(p_hwfn);
1606 }
1607 
1608 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1609 					struct qed_ptt *p_ptt,
1610 					enum MFW_DRV_MSG_TYPE type)
1611 {
1612 	enum qed_mcp_protocol_type stats_type;
1613 	union qed_mcp_protocol_stats stats;
1614 	struct qed_mcp_mb_params mb_params;
1615 	u32 hsi_param;
1616 
1617 	switch (type) {
1618 	case MFW_DRV_MSG_GET_LAN_STATS:
1619 		stats_type = QED_MCP_LAN_STATS;
1620 		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1621 		break;
1622 	case MFW_DRV_MSG_GET_FCOE_STATS:
1623 		stats_type = QED_MCP_FCOE_STATS;
1624 		hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1625 		break;
1626 	case MFW_DRV_MSG_GET_ISCSI_STATS:
1627 		stats_type = QED_MCP_ISCSI_STATS;
1628 		hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1629 		break;
1630 	case MFW_DRV_MSG_GET_RDMA_STATS:
1631 		stats_type = QED_MCP_RDMA_STATS;
1632 		hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1633 		break;
1634 	default:
1635 		DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1636 		return;
1637 	}
1638 
1639 	qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1640 
1641 	memset(&mb_params, 0, sizeof(mb_params));
1642 	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1643 	mb_params.param = hsi_param;
1644 	mb_params.p_data_src = &stats;
1645 	mb_params.data_src_size = sizeof(stats);
1646 	qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1647 }
1648 
1649 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1650 {
1651 	struct qed_mcp_function_info *p_info;
1652 	struct public_func shmem_info;
1653 	u32 resp = 0, param = 0;
1654 
1655 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1656 
1657 	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1658 
1659 	p_info = &p_hwfn->mcp_info->func_info;
1660 
1661 	qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1662 	qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1663 
1664 	/* Acknowledge the MFW */
1665 	qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1666 		    &param);
1667 }
1668 
1669 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1670 {
1671 	struct public_func shmem_info;
1672 	u32 resp = 0, param = 0;
1673 
1674 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1675 
1676 	p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1677 						 FUNC_MF_CFG_OV_STAG_MASK;
1678 	p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1679 	if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1680 		if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1681 			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1682 			       p_hwfn->hw_info.ovlan);
1683 			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1684 
1685 			/* Configure DB to add external vlan to EDPM packets */
1686 			qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1687 			qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1688 			       p_hwfn->hw_info.ovlan);
1689 		} else {
1690 			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1691 			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1692 			qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1693 			qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1694 		}
1695 
1696 		qed_sp_pf_update_stag(p_hwfn);
1697 	}
1698 
1699 	DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1700 		   p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1701 
1702 	/* Acknowledge the MFW */
1703 	qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1704 		    &resp, &param);
1705 }
1706 
1707 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1708 {
1709 	struct public_func shmem_info;
1710 	u32 port_cfg, val;
1711 
1712 	if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1713 		return;
1714 
1715 	memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1716 	port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1717 			  offsetof(struct public_port, oem_cfg_port));
1718 	val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1719 		OEM_CFG_CHANNEL_TYPE_OFFSET;
1720 	if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1721 		DP_NOTICE(p_hwfn,
1722 			  "Incorrect UFP Channel type  %d port_id 0x%02x\n",
1723 			  val, MFW_PORT(p_hwfn));
1724 
1725 	val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1726 	if (val == OEM_CFG_SCHED_TYPE_ETS) {
1727 		p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1728 	} else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1729 		p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1730 	} else {
1731 		p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1732 		DP_NOTICE(p_hwfn,
1733 			  "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1734 			  val, MFW_PORT(p_hwfn));
1735 	}
1736 
1737 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1738 	val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1739 		OEM_CFG_FUNC_TC_OFFSET;
1740 	p_hwfn->ufp_info.tc = (u8)val;
1741 	val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1742 		OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1743 	if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1744 		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1745 	} else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1746 		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1747 	} else {
1748 		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1749 		DP_NOTICE(p_hwfn,
1750 			  "Unknown Host priority control %d port_id 0x%02x\n",
1751 			  val, MFW_PORT(p_hwfn));
1752 	}
1753 
1754 	DP_NOTICE(p_hwfn,
1755 		  "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1756 		  p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1757 		  p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1758 }
1759 
1760 static int
1761 qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1762 {
1763 	qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1764 
1765 	if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1766 		p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1767 		qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
1768 					   p_hwfn->ufp_info.tc);
1769 
1770 		qed_qm_reconf(p_hwfn, p_ptt);
1771 	} else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1772 		/* Merge UFP TC with the dcbx TC data */
1773 		qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1774 					  QED_DCBX_OPERATIONAL_MIB);
1775 	} else {
1776 		DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1777 		return -EINVAL;
1778 	}
1779 
1780 	/* update storm FW with negotiation results */
1781 	qed_sp_pf_update_ufp(p_hwfn);
1782 
1783 	/* update stag pcp value */
1784 	qed_sp_pf_update_stag(p_hwfn);
1785 
1786 	return 0;
1787 }
1788 
1789 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1790 			  struct qed_ptt *p_ptt)
1791 {
1792 	struct qed_mcp_info *info = p_hwfn->mcp_info;
1793 	int rc = 0;
1794 	bool found = false;
1795 	u16 i;
1796 
1797 	DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1798 
1799 	/* Read Messages from MFW */
1800 	qed_mcp_read_mb(p_hwfn, p_ptt);
1801 
1802 	/* Compare current messages to old ones */
1803 	for (i = 0; i < info->mfw_mb_length; i++) {
1804 		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1805 			continue;
1806 
1807 		found = true;
1808 
1809 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1810 			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1811 			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1812 
1813 		switch (i) {
1814 		case MFW_DRV_MSG_LINK_CHANGE:
1815 			qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1816 			break;
1817 		case MFW_DRV_MSG_VF_DISABLED:
1818 			qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1819 			break;
1820 		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1821 			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1822 						  QED_DCBX_REMOTE_LLDP_MIB);
1823 			break;
1824 		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1825 			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1826 						  QED_DCBX_REMOTE_MIB);
1827 			break;
1828 		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1829 			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1830 						  QED_DCBX_OPERATIONAL_MIB);
1831 			break;
1832 		case MFW_DRV_MSG_OEM_CFG_UPDATE:
1833 			qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
1834 			break;
1835 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1836 			qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1837 			break;
1838 		case MFW_DRV_MSG_ERROR_RECOVERY:
1839 			qed_mcp_handle_process_kill(p_hwfn, p_ptt);
1840 			break;
1841 		case MFW_DRV_MSG_GET_LAN_STATS:
1842 		case MFW_DRV_MSG_GET_FCOE_STATS:
1843 		case MFW_DRV_MSG_GET_ISCSI_STATS:
1844 		case MFW_DRV_MSG_GET_RDMA_STATS:
1845 			qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1846 			break;
1847 		case MFW_DRV_MSG_BW_UPDATE:
1848 			qed_mcp_update_bw(p_hwfn, p_ptt);
1849 			break;
1850 		case MFW_DRV_MSG_S_TAG_UPDATE:
1851 			qed_mcp_update_stag(p_hwfn, p_ptt);
1852 			break;
1853 		case MFW_DRV_MSG_GET_TLV_REQ:
1854 			qed_mfw_tlv_req(p_hwfn);
1855 			break;
1856 		default:
1857 			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1858 			rc = -EINVAL;
1859 		}
1860 	}
1861 
1862 	/* ACK everything */
1863 	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1864 		__be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1865 
1866 		/* MFW expect answer in BE, so we force write in that format */
1867 		qed_wr(p_hwfn, p_ptt,
1868 		       info->mfw_mb_addr + sizeof(u32) +
1869 		       MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1870 		       sizeof(u32) + i * sizeof(u32),
1871 		       (__force u32)val);
1872 	}
1873 
1874 	if (!found) {
1875 		DP_NOTICE(p_hwfn,
1876 			  "Received an MFW message indication but no new message!\n");
1877 		rc = -EINVAL;
1878 	}
1879 
1880 	/* Copy the new mfw messages into the shadow */
1881 	memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1882 
1883 	return rc;
1884 }
1885 
1886 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1887 			struct qed_ptt *p_ptt,
1888 			u32 *p_mfw_ver, u32 *p_running_bundle_id)
1889 {
1890 	u32 global_offsize;
1891 
1892 	if (IS_VF(p_hwfn->cdev)) {
1893 		if (p_hwfn->vf_iov_info) {
1894 			struct pfvf_acquire_resp_tlv *p_resp;
1895 
1896 			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1897 			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1898 			return 0;
1899 		} else {
1900 			DP_VERBOSE(p_hwfn,
1901 				   QED_MSG_IOV,
1902 				   "VF requested MFW version prior to ACQUIRE\n");
1903 			return -EINVAL;
1904 		}
1905 	}
1906 
1907 	global_offsize = qed_rd(p_hwfn, p_ptt,
1908 				SECTION_OFFSIZE_ADDR(p_hwfn->
1909 						     mcp_info->public_base,
1910 						     PUBLIC_GLOBAL));
1911 	*p_mfw_ver =
1912 	    qed_rd(p_hwfn, p_ptt,
1913 		   SECTION_ADDR(global_offsize,
1914 				0) + offsetof(struct public_global, mfw_ver));
1915 
1916 	if (p_running_bundle_id != NULL) {
1917 		*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1918 					      SECTION_ADDR(global_offsize, 0) +
1919 					      offsetof(struct public_global,
1920 						       running_bundle_id));
1921 	}
1922 
1923 	return 0;
1924 }
1925 
1926 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
1927 			struct qed_ptt *p_ptt, u32 *p_mbi_ver)
1928 {
1929 	u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
1930 
1931 	if (IS_VF(p_hwfn->cdev))
1932 		return -EINVAL;
1933 
1934 	/* Read the address of the nvm_cfg */
1935 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1936 	if (!nvm_cfg_addr) {
1937 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1938 		return -EINVAL;
1939 	}
1940 
1941 	/* Read the offset of nvm_cfg1 */
1942 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1943 
1944 	mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1945 		       offsetof(struct nvm_cfg1, glob) +
1946 		       offsetof(struct nvm_cfg1_glob, mbi_version);
1947 	*p_mbi_ver = qed_rd(p_hwfn, p_ptt,
1948 			    mbi_ver_addr) &
1949 		     (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
1950 		      NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
1951 		      NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
1952 
1953 	return 0;
1954 }
1955 
1956 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
1957 			   struct qed_ptt *p_ptt, u32 *p_media_type)
1958 {
1959 	*p_media_type = MEDIA_UNSPECIFIED;
1960 
1961 	if (IS_VF(p_hwfn->cdev))
1962 		return -EINVAL;
1963 
1964 	if (!qed_mcp_is_init(p_hwfn)) {
1965 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
1966 		return -EBUSY;
1967 	}
1968 
1969 	if (!p_ptt) {
1970 		*p_media_type = MEDIA_UNSPECIFIED;
1971 		return -EINVAL;
1972 	}
1973 
1974 	*p_media_type = qed_rd(p_hwfn, p_ptt,
1975 			       p_hwfn->mcp_info->port_addr +
1976 			       offsetof(struct public_port,
1977 					media_type));
1978 
1979 	return 0;
1980 }
1981 
1982 int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
1983 				 struct qed_ptt *p_ptt,
1984 				 u32 *p_transceiver_state,
1985 				 u32 *p_transceiver_type)
1986 {
1987 	u32 transceiver_info;
1988 
1989 	*p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
1990 	*p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
1991 
1992 	if (IS_VF(p_hwfn->cdev))
1993 		return -EINVAL;
1994 
1995 	if (!qed_mcp_is_init(p_hwfn)) {
1996 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
1997 		return -EBUSY;
1998 	}
1999 
2000 	transceiver_info = qed_rd(p_hwfn, p_ptt,
2001 				  p_hwfn->mcp_info->port_addr +
2002 				  offsetof(struct public_port,
2003 					   transceiver_data));
2004 
2005 	*p_transceiver_state = (transceiver_info &
2006 				ETH_TRANSCEIVER_STATE_MASK) >>
2007 				ETH_TRANSCEIVER_STATE_OFFSET;
2008 
2009 	if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2010 		*p_transceiver_type = (transceiver_info &
2011 				       ETH_TRANSCEIVER_TYPE_MASK) >>
2012 				       ETH_TRANSCEIVER_TYPE_OFFSET;
2013 	else
2014 		*p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2015 
2016 	return 0;
2017 }
2018 static bool qed_is_transceiver_ready(u32 transceiver_state,
2019 				     u32 transceiver_type)
2020 {
2021 	if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2022 	    ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2023 	    (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2024 		return true;
2025 
2026 	return false;
2027 }
2028 
2029 int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2030 			     struct qed_ptt *p_ptt, u32 *p_speed_mask)
2031 {
2032 	u32 transceiver_type, transceiver_state;
2033 	int ret;
2034 
2035 	ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2036 					   &transceiver_type);
2037 	if (ret)
2038 		return ret;
2039 
2040 	if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2041 				     false)
2042 		return -EINVAL;
2043 
2044 	switch (transceiver_type) {
2045 	case ETH_TRANSCEIVER_TYPE_1G_LX:
2046 	case ETH_TRANSCEIVER_TYPE_1G_SX:
2047 	case ETH_TRANSCEIVER_TYPE_1G_PCC:
2048 	case ETH_TRANSCEIVER_TYPE_1G_ACC:
2049 	case ETH_TRANSCEIVER_TYPE_1000BASET:
2050 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2051 		break;
2052 	case ETH_TRANSCEIVER_TYPE_10G_SR:
2053 	case ETH_TRANSCEIVER_TYPE_10G_LR:
2054 	case ETH_TRANSCEIVER_TYPE_10G_LRM:
2055 	case ETH_TRANSCEIVER_TYPE_10G_ER:
2056 	case ETH_TRANSCEIVER_TYPE_10G_PCC:
2057 	case ETH_TRANSCEIVER_TYPE_10G_ACC:
2058 	case ETH_TRANSCEIVER_TYPE_4x10G:
2059 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2060 		break;
2061 	case ETH_TRANSCEIVER_TYPE_40G_LR4:
2062 	case ETH_TRANSCEIVER_TYPE_40G_SR4:
2063 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2064 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2065 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2066 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2067 		break;
2068 	case ETH_TRANSCEIVER_TYPE_100G_AOC:
2069 	case ETH_TRANSCEIVER_TYPE_100G_SR4:
2070 	case ETH_TRANSCEIVER_TYPE_100G_LR4:
2071 	case ETH_TRANSCEIVER_TYPE_100G_ER4:
2072 	case ETH_TRANSCEIVER_TYPE_100G_ACC:
2073 		*p_speed_mask =
2074 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2075 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2076 		break;
2077 	case ETH_TRANSCEIVER_TYPE_25G_SR:
2078 	case ETH_TRANSCEIVER_TYPE_25G_LR:
2079 	case ETH_TRANSCEIVER_TYPE_25G_AOC:
2080 	case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2081 	case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2082 	case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2083 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2084 		break;
2085 	case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2086 	case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2087 	case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2088 	case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2089 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2090 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2091 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2092 		break;
2093 	case ETH_TRANSCEIVER_TYPE_40G_CR4:
2094 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2095 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2096 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2097 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2098 		break;
2099 	case ETH_TRANSCEIVER_TYPE_100G_CR4:
2100 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2101 		*p_speed_mask =
2102 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2103 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2104 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2105 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2106 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2107 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2108 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2109 		break;
2110 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2111 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2112 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2113 		*p_speed_mask =
2114 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2115 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2116 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2117 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2118 		break;
2119 	case ETH_TRANSCEIVER_TYPE_XLPPI:
2120 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2121 		break;
2122 	case ETH_TRANSCEIVER_TYPE_10G_BASET:
2123 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2124 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2125 		break;
2126 	default:
2127 		DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2128 			transceiver_type);
2129 		*p_speed_mask = 0xff;
2130 		break;
2131 	}
2132 
2133 	return 0;
2134 }
2135 
2136 int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2137 			     struct qed_ptt *p_ptt, u32 *p_board_config)
2138 {
2139 	u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2140 
2141 	if (IS_VF(p_hwfn->cdev))
2142 		return -EINVAL;
2143 
2144 	if (!qed_mcp_is_init(p_hwfn)) {
2145 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2146 		return -EBUSY;
2147 	}
2148 	if (!p_ptt) {
2149 		*p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2150 		return -EINVAL;
2151 	}
2152 
2153 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2154 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2155 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2156 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2157 	*p_board_config = qed_rd(p_hwfn, p_ptt,
2158 				 port_cfg_addr +
2159 				 offsetof(struct nvm_cfg1_port,
2160 					  board_cfg));
2161 
2162 	return 0;
2163 }
2164 
2165 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2166 static void
2167 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2168 			       enum qed_pci_personality *p_proto)
2169 {
2170 	/* There wasn't ever a legacy MFW that published iwarp.
2171 	 * So at this point, this is either plain l2 or RoCE.
2172 	 */
2173 	if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2174 		*p_proto = QED_PCI_ETH_ROCE;
2175 	else
2176 		*p_proto = QED_PCI_ETH;
2177 
2178 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2179 		   "According to Legacy capabilities, L2 personality is %08x\n",
2180 		   (u32) *p_proto);
2181 }
2182 
2183 static int
2184 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2185 			    struct qed_ptt *p_ptt,
2186 			    enum qed_pci_personality *p_proto)
2187 {
2188 	u32 resp = 0, param = 0;
2189 	int rc;
2190 
2191 	rc = qed_mcp_cmd(p_hwfn, p_ptt,
2192 			 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
2193 	if (rc)
2194 		return rc;
2195 	if (resp != FW_MSG_CODE_OK) {
2196 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2197 			   "MFW lacks support for command; Returns %08x\n",
2198 			   resp);
2199 		return -EINVAL;
2200 	}
2201 
2202 	switch (param) {
2203 	case FW_MB_PARAM_GET_PF_RDMA_NONE:
2204 		*p_proto = QED_PCI_ETH;
2205 		break;
2206 	case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2207 		*p_proto = QED_PCI_ETH_ROCE;
2208 		break;
2209 	case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2210 		*p_proto = QED_PCI_ETH_IWARP;
2211 		break;
2212 	case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2213 		*p_proto = QED_PCI_ETH_RDMA;
2214 		break;
2215 	default:
2216 		DP_NOTICE(p_hwfn,
2217 			  "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2218 			  param);
2219 		return -EINVAL;
2220 	}
2221 
2222 	DP_VERBOSE(p_hwfn,
2223 		   NETIF_MSG_IFUP,
2224 		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2225 		   (u32) *p_proto, resp, param);
2226 	return 0;
2227 }
2228 
2229 static int
2230 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2231 			struct public_func *p_info,
2232 			struct qed_ptt *p_ptt,
2233 			enum qed_pci_personality *p_proto)
2234 {
2235 	int rc = 0;
2236 
2237 	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2238 	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2239 		if (!IS_ENABLED(CONFIG_QED_RDMA))
2240 			*p_proto = QED_PCI_ETH;
2241 		else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2242 			qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2243 		break;
2244 	case FUNC_MF_CFG_PROTOCOL_ISCSI:
2245 		*p_proto = QED_PCI_ISCSI;
2246 		break;
2247 	case FUNC_MF_CFG_PROTOCOL_FCOE:
2248 		*p_proto = QED_PCI_FCOE;
2249 		break;
2250 	case FUNC_MF_CFG_PROTOCOL_ROCE:
2251 		DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2252 	/* Fallthrough */
2253 	default:
2254 		rc = -EINVAL;
2255 	}
2256 
2257 	return rc;
2258 }
2259 
2260 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2261 				 struct qed_ptt *p_ptt)
2262 {
2263 	struct qed_mcp_function_info *info;
2264 	struct public_func shmem_info;
2265 
2266 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2267 	info = &p_hwfn->mcp_info->func_info;
2268 
2269 	info->pause_on_host = (shmem_info.config &
2270 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2271 
2272 	if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2273 				    &info->protocol)) {
2274 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
2275 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2276 		return -EINVAL;
2277 	}
2278 
2279 	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2280 
2281 	if (shmem_info.mac_upper || shmem_info.mac_lower) {
2282 		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2283 		info->mac[1] = (u8)(shmem_info.mac_upper);
2284 		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2285 		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2286 		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2287 		info->mac[5] = (u8)(shmem_info.mac_lower);
2288 
2289 		/* Store primary MAC for later possible WoL */
2290 		memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2291 	} else {
2292 		DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2293 	}
2294 
2295 	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2296 			 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2297 	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2298 			 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2299 
2300 	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2301 
2302 	info->mtu = (u16)shmem_info.mtu_size;
2303 
2304 	p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2305 	p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2306 	if (qed_mcp_is_init(p_hwfn)) {
2307 		u32 resp = 0, param = 0;
2308 		int rc;
2309 
2310 		rc = qed_mcp_cmd(p_hwfn, p_ptt,
2311 				 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
2312 		if (rc)
2313 			return rc;
2314 		if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2315 			p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2316 	}
2317 
2318 	DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2319 		   "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2320 		info->pause_on_host, info->protocol,
2321 		info->bandwidth_min, info->bandwidth_max,
2322 		info->mac[0], info->mac[1], info->mac[2],
2323 		info->mac[3], info->mac[4], info->mac[5],
2324 		info->wwn_port, info->wwn_node,
2325 		info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2326 
2327 	return 0;
2328 }
2329 
2330 struct qed_mcp_link_params
2331 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2332 {
2333 	if (!p_hwfn || !p_hwfn->mcp_info)
2334 		return NULL;
2335 	return &p_hwfn->mcp_info->link_input;
2336 }
2337 
2338 struct qed_mcp_link_state
2339 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2340 {
2341 	if (!p_hwfn || !p_hwfn->mcp_info)
2342 		return NULL;
2343 	return &p_hwfn->mcp_info->link_output;
2344 }
2345 
2346 struct qed_mcp_link_capabilities
2347 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2348 {
2349 	if (!p_hwfn || !p_hwfn->mcp_info)
2350 		return NULL;
2351 	return &p_hwfn->mcp_info->link_capabilities;
2352 }
2353 
2354 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2355 {
2356 	u32 resp = 0, param = 0;
2357 	int rc;
2358 
2359 	rc = qed_mcp_cmd(p_hwfn, p_ptt,
2360 			 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2361 
2362 	/* Wait for the drain to complete before returning */
2363 	msleep(1020);
2364 
2365 	return rc;
2366 }
2367 
2368 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2369 			   struct qed_ptt *p_ptt, u32 *p_flash_size)
2370 {
2371 	u32 flash_size;
2372 
2373 	if (IS_VF(p_hwfn->cdev))
2374 		return -EINVAL;
2375 
2376 	flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2377 	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2378 		      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2379 	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2380 
2381 	*p_flash_size = flash_size;
2382 
2383 	return 0;
2384 }
2385 
2386 int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2387 {
2388 	struct qed_dev *cdev = p_hwfn->cdev;
2389 
2390 	if (cdev->recov_in_prog) {
2391 		DP_NOTICE(p_hwfn,
2392 			  "Avoid triggering a recovery since such a process is already in progress\n");
2393 		return -EAGAIN;
2394 	}
2395 
2396 	DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2397 	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2398 
2399 	return 0;
2400 }
2401 
2402 #define QED_RECOVERY_PROLOG_SLEEP_MS    100
2403 
2404 int qed_recovery_prolog(struct qed_dev *cdev)
2405 {
2406 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2407 	struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2408 	int rc;
2409 
2410 	/* Allow ongoing PCIe transactions to complete */
2411 	msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2412 
2413 	/* Clear the PF's internal FID_enable in the PXP */
2414 	rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2415 	if (rc)
2416 		DP_NOTICE(p_hwfn,
2417 			  "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2418 			  rc);
2419 
2420 	return rc;
2421 }
2422 
2423 static int
2424 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2425 			  struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2426 {
2427 	u32 resp = 0, param = 0, rc_param = 0;
2428 	int rc;
2429 
2430 	/* Only Leader can configure MSIX, and need to take CMT into account */
2431 	if (!IS_LEAD_HWFN(p_hwfn))
2432 		return 0;
2433 	num *= p_hwfn->cdev->num_hwfns;
2434 
2435 	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2436 		 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2437 	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2438 		 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2439 
2440 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2441 			 &resp, &rc_param);
2442 
2443 	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2444 		DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2445 		rc = -EINVAL;
2446 	} else {
2447 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2448 			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2449 			   num, vf_id);
2450 	}
2451 
2452 	return rc;
2453 }
2454 
2455 static int
2456 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2457 			  struct qed_ptt *p_ptt, u8 num)
2458 {
2459 	u32 resp = 0, param = num, rc_param = 0;
2460 	int rc;
2461 
2462 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2463 			 param, &resp, &rc_param);
2464 
2465 	if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2466 		DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2467 		rc = -EINVAL;
2468 	} else {
2469 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2470 			   "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2471 	}
2472 
2473 	return rc;
2474 }
2475 
2476 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2477 			   struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2478 {
2479 	if (QED_IS_BB(p_hwfn->cdev))
2480 		return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2481 	else
2482 		return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2483 }
2484 
2485 int
2486 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2487 			 struct qed_ptt *p_ptt,
2488 			 struct qed_mcp_drv_version *p_ver)
2489 {
2490 	struct qed_mcp_mb_params mb_params;
2491 	struct drv_version_stc drv_version;
2492 	__be32 val;
2493 	u32 i;
2494 	int rc;
2495 
2496 	memset(&drv_version, 0, sizeof(drv_version));
2497 	drv_version.version = p_ver->version;
2498 	for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2499 		val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2500 		*(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2501 	}
2502 
2503 	memset(&mb_params, 0, sizeof(mb_params));
2504 	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2505 	mb_params.p_data_src = &drv_version;
2506 	mb_params.data_src_size = sizeof(drv_version);
2507 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2508 	if (rc)
2509 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2510 
2511 	return rc;
2512 }
2513 
2514 /* A maximal 100 msec waiting time for the MCP to halt */
2515 #define QED_MCP_HALT_SLEEP_MS		10
2516 #define QED_MCP_HALT_MAX_RETRIES	10
2517 
2518 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2519 {
2520 	u32 resp = 0, param = 0, cpu_state, cnt = 0;
2521 	int rc;
2522 
2523 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2524 			 &param);
2525 	if (rc) {
2526 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2527 		return rc;
2528 	}
2529 
2530 	do {
2531 		msleep(QED_MCP_HALT_SLEEP_MS);
2532 		cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2533 		if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2534 			break;
2535 	} while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2536 
2537 	if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2538 		DP_NOTICE(p_hwfn,
2539 			  "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2540 			  qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2541 		return -EBUSY;
2542 	}
2543 
2544 	qed_mcp_cmd_set_blocking(p_hwfn, true);
2545 
2546 	return 0;
2547 }
2548 
2549 #define QED_MCP_RESUME_SLEEP_MS	10
2550 
2551 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2552 {
2553 	u32 cpu_mode, cpu_state;
2554 
2555 	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2556 
2557 	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2558 	cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2559 	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2560 	msleep(QED_MCP_RESUME_SLEEP_MS);
2561 	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2562 
2563 	if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2564 		DP_NOTICE(p_hwfn,
2565 			  "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2566 			  cpu_mode, cpu_state);
2567 		return -EBUSY;
2568 	}
2569 
2570 	qed_mcp_cmd_set_blocking(p_hwfn, false);
2571 
2572 	return 0;
2573 }
2574 
2575 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2576 				     struct qed_ptt *p_ptt,
2577 				     enum qed_ov_client client)
2578 {
2579 	u32 resp = 0, param = 0;
2580 	u32 drv_mb_param;
2581 	int rc;
2582 
2583 	switch (client) {
2584 	case QED_OV_CLIENT_DRV:
2585 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2586 		break;
2587 	case QED_OV_CLIENT_USER:
2588 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2589 		break;
2590 	case QED_OV_CLIENT_VENDOR_SPEC:
2591 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2592 		break;
2593 	default:
2594 		DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2595 		return -EINVAL;
2596 	}
2597 
2598 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2599 			 drv_mb_param, &resp, &param);
2600 	if (rc)
2601 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2602 
2603 	return rc;
2604 }
2605 
2606 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2607 				   struct qed_ptt *p_ptt,
2608 				   enum qed_ov_driver_state drv_state)
2609 {
2610 	u32 resp = 0, param = 0;
2611 	u32 drv_mb_param;
2612 	int rc;
2613 
2614 	switch (drv_state) {
2615 	case QED_OV_DRIVER_STATE_NOT_LOADED:
2616 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2617 		break;
2618 	case QED_OV_DRIVER_STATE_DISABLED:
2619 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2620 		break;
2621 	case QED_OV_DRIVER_STATE_ACTIVE:
2622 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2623 		break;
2624 	default:
2625 		DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2626 		return -EINVAL;
2627 	}
2628 
2629 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2630 			 drv_mb_param, &resp, &param);
2631 	if (rc)
2632 		DP_ERR(p_hwfn, "Failed to send driver state\n");
2633 
2634 	return rc;
2635 }
2636 
2637 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2638 			  struct qed_ptt *p_ptt, u16 mtu)
2639 {
2640 	u32 resp = 0, param = 0;
2641 	u32 drv_mb_param;
2642 	int rc;
2643 
2644 	drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2645 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2646 			 drv_mb_param, &resp, &param);
2647 	if (rc)
2648 		DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2649 
2650 	return rc;
2651 }
2652 
2653 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2654 			  struct qed_ptt *p_ptt, u8 *mac)
2655 {
2656 	struct qed_mcp_mb_params mb_params;
2657 	u32 mfw_mac[2];
2658 	int rc;
2659 
2660 	memset(&mb_params, 0, sizeof(mb_params));
2661 	mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2662 	mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2663 			  DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2664 	mb_params.param |= MCP_PF_ID(p_hwfn);
2665 
2666 	/* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2667 	 * in 32-bit granularity.
2668 	 * So the MAC has to be set in native order [and not byte order],
2669 	 * otherwise it would be read incorrectly by MFW after swap.
2670 	 */
2671 	mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2672 	mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2673 
2674 	mb_params.p_data_src = (u8 *)mfw_mac;
2675 	mb_params.data_src_size = 8;
2676 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2677 	if (rc)
2678 		DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2679 
2680 	/* Store primary MAC for later possible WoL */
2681 	memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2682 
2683 	return rc;
2684 }
2685 
2686 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2687 			  struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2688 {
2689 	u32 resp = 0, param = 0;
2690 	u32 drv_mb_param;
2691 	int rc;
2692 
2693 	if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2694 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
2695 			   "Can't change WoL configuration when WoL isn't supported\n");
2696 		return -EINVAL;
2697 	}
2698 
2699 	switch (wol) {
2700 	case QED_OV_WOL_DEFAULT:
2701 		drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2702 		break;
2703 	case QED_OV_WOL_DISABLED:
2704 		drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2705 		break;
2706 	case QED_OV_WOL_ENABLED:
2707 		drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2708 		break;
2709 	default:
2710 		DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2711 		return -EINVAL;
2712 	}
2713 
2714 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2715 			 drv_mb_param, &resp, &param);
2716 	if (rc)
2717 		DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2718 
2719 	/* Store the WoL update for a future unload */
2720 	p_hwfn->cdev->wol_config = (u8)wol;
2721 
2722 	return rc;
2723 }
2724 
2725 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2726 			      struct qed_ptt *p_ptt,
2727 			      enum qed_ov_eswitch eswitch)
2728 {
2729 	u32 resp = 0, param = 0;
2730 	u32 drv_mb_param;
2731 	int rc;
2732 
2733 	switch (eswitch) {
2734 	case QED_OV_ESWITCH_NONE:
2735 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2736 		break;
2737 	case QED_OV_ESWITCH_VEB:
2738 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2739 		break;
2740 	case QED_OV_ESWITCH_VEPA:
2741 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2742 		break;
2743 	default:
2744 		DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2745 		return -EINVAL;
2746 	}
2747 
2748 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2749 			 drv_mb_param, &resp, &param);
2750 	if (rc)
2751 		DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2752 
2753 	return rc;
2754 }
2755 
2756 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2757 		    struct qed_ptt *p_ptt, enum qed_led_mode mode)
2758 {
2759 	u32 resp = 0, param = 0, drv_mb_param;
2760 	int rc;
2761 
2762 	switch (mode) {
2763 	case QED_LED_MODE_ON:
2764 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2765 		break;
2766 	case QED_LED_MODE_OFF:
2767 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2768 		break;
2769 	case QED_LED_MODE_RESTORE:
2770 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2771 		break;
2772 	default:
2773 		DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2774 		return -EINVAL;
2775 	}
2776 
2777 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2778 			 drv_mb_param, &resp, &param);
2779 
2780 	return rc;
2781 }
2782 
2783 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2784 			  struct qed_ptt *p_ptt, u32 mask_parities)
2785 {
2786 	u32 resp = 0, param = 0;
2787 	int rc;
2788 
2789 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2790 			 mask_parities, &resp, &param);
2791 
2792 	if (rc) {
2793 		DP_ERR(p_hwfn,
2794 		       "MCP response failure for mask parities, aborting\n");
2795 	} else if (resp != FW_MSG_CODE_OK) {
2796 		DP_ERR(p_hwfn,
2797 		       "MCP did not acknowledge mask parity request. Old MFW?\n");
2798 		rc = -EINVAL;
2799 	}
2800 
2801 	return rc;
2802 }
2803 
2804 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2805 {
2806 	u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2807 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2808 	u32 resp = 0, resp_param = 0;
2809 	struct qed_ptt *p_ptt;
2810 	int rc = 0;
2811 
2812 	p_ptt = qed_ptt_acquire(p_hwfn);
2813 	if (!p_ptt)
2814 		return -EBUSY;
2815 
2816 	while (bytes_left > 0) {
2817 		bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2818 
2819 		rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2820 					DRV_MSG_CODE_NVM_READ_NVRAM,
2821 					addr + offset +
2822 					(bytes_to_copy <<
2823 					 DRV_MB_PARAM_NVM_LEN_OFFSET),
2824 					&resp, &resp_param,
2825 					&read_len,
2826 					(u32 *)(p_buf + offset));
2827 
2828 		if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2829 			DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2830 			break;
2831 		}
2832 
2833 		/* This can be a lengthy process, and it's possible scheduler
2834 		 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2835 		 */
2836 		if (bytes_left % 0x1000 <
2837 		    (bytes_left - read_len) % 0x1000)
2838 			usleep_range(1000, 2000);
2839 
2840 		offset += read_len;
2841 		bytes_left -= read_len;
2842 	}
2843 
2844 	cdev->mcp_nvm_resp = resp;
2845 	qed_ptt_release(p_hwfn, p_ptt);
2846 
2847 	return rc;
2848 }
2849 
2850 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
2851 {
2852 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2853 	struct qed_ptt *p_ptt;
2854 
2855 	p_ptt = qed_ptt_acquire(p_hwfn);
2856 	if (!p_ptt)
2857 		return -EBUSY;
2858 
2859 	memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
2860 	qed_ptt_release(p_hwfn, p_ptt);
2861 
2862 	return 0;
2863 }
2864 
2865 int qed_mcp_nvm_write(struct qed_dev *cdev,
2866 		      u32 cmd, u32 addr, u8 *p_buf, u32 len)
2867 {
2868 	u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
2869 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2870 	struct qed_ptt *p_ptt;
2871 	int rc = -EINVAL;
2872 
2873 	p_ptt = qed_ptt_acquire(p_hwfn);
2874 	if (!p_ptt)
2875 		return -EBUSY;
2876 
2877 	switch (cmd) {
2878 	case QED_PUT_FILE_BEGIN:
2879 		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2880 		break;
2881 	case QED_PUT_FILE_DATA:
2882 		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2883 		break;
2884 	case QED_NVM_WRITE_NVRAM:
2885 		nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2886 		break;
2887 	default:
2888 		DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
2889 		rc = -EINVAL;
2890 		goto out;
2891 	}
2892 
2893 	buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
2894 	while (buf_idx < len) {
2895 		if (cmd == QED_PUT_FILE_BEGIN)
2896 			nvm_offset = addr;
2897 		else
2898 			nvm_offset = ((buf_size <<
2899 				       DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
2900 				       buf_idx;
2901 		rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2902 					&resp, &param, buf_size,
2903 					(u32 *)&p_buf[buf_idx]);
2904 		if (rc) {
2905 			DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
2906 			resp = FW_MSG_CODE_ERROR;
2907 			break;
2908 		}
2909 
2910 		if (resp != FW_MSG_CODE_OK &&
2911 		    resp != FW_MSG_CODE_NVM_OK &&
2912 		    resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2913 			DP_NOTICE(cdev,
2914 				  "nvm write failed, resp = 0x%08x\n", resp);
2915 			rc = -EINVAL;
2916 			break;
2917 		}
2918 
2919 		/* This can be a lengthy process, and it's possible scheduler
2920 		 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
2921 		 */
2922 		if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
2923 			usleep_range(1000, 2000);
2924 
2925 		/* For MBI upgrade, MFW response includes the next buffer offset
2926 		 * to be delivered to MFW.
2927 		 */
2928 		if (param && cmd == QED_PUT_FILE_DATA) {
2929 			buf_idx = QED_MFW_GET_FIELD(param,
2930 					FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
2931 			buf_size = QED_MFW_GET_FIELD(param,
2932 					 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
2933 		} else {
2934 			buf_idx += buf_size;
2935 			buf_size = min_t(u32, (len - buf_idx),
2936 					 MCP_DRV_NVM_BUF_LEN);
2937 		}
2938 	}
2939 
2940 	cdev->mcp_nvm_resp = resp;
2941 out:
2942 	qed_ptt_release(p_hwfn, p_ptt);
2943 
2944 	return rc;
2945 }
2946 
2947 int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2948 			 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
2949 {
2950 	u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
2951 	u32 resp, param;
2952 	int rc;
2953 
2954 	nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
2955 		       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
2956 	nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
2957 		       DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
2958 
2959 	addr = offset;
2960 	offset = 0;
2961 	bytes_left = len;
2962 	while (bytes_left > 0) {
2963 		bytes_to_copy = min_t(u32, bytes_left,
2964 				      MAX_I2C_TRANSACTION_SIZE);
2965 		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2966 			       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2967 		nvm_offset |= ((addr + offset) <<
2968 			       DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
2969 			       DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
2970 		nvm_offset |= (bytes_to_copy <<
2971 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
2972 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
2973 		rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2974 					DRV_MSG_CODE_TRANSCEIVER_READ,
2975 					nvm_offset, &resp, &param, &buf_size,
2976 					(u32 *)(p_buf + offset));
2977 		if (rc) {
2978 			DP_NOTICE(p_hwfn,
2979 				  "Failed to send a transceiver read command to the MFW. rc = %d.\n",
2980 				  rc);
2981 			return rc;
2982 		}
2983 
2984 		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
2985 			return -ENODEV;
2986 		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2987 			return -EINVAL;
2988 
2989 		offset += buf_size;
2990 		bytes_left -= buf_size;
2991 	}
2992 
2993 	return 0;
2994 }
2995 
2996 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2997 {
2998 	u32 drv_mb_param = 0, rsp, param;
2999 	int rc = 0;
3000 
3001 	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3002 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3003 
3004 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3005 			 drv_mb_param, &rsp, &param);
3006 
3007 	if (rc)
3008 		return rc;
3009 
3010 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3011 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3012 		rc = -EAGAIN;
3013 
3014 	return rc;
3015 }
3016 
3017 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3018 {
3019 	u32 drv_mb_param, rsp, param;
3020 	int rc = 0;
3021 
3022 	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3023 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3024 
3025 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3026 			 drv_mb_param, &rsp, &param);
3027 
3028 	if (rc)
3029 		return rc;
3030 
3031 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3032 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3033 		rc = -EAGAIN;
3034 
3035 	return rc;
3036 }
3037 
3038 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3039 				    struct qed_ptt *p_ptt,
3040 				    u32 *num_images)
3041 {
3042 	u32 drv_mb_param = 0, rsp;
3043 	int rc = 0;
3044 
3045 	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3046 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3047 
3048 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3049 			 drv_mb_param, &rsp, num_images);
3050 	if (rc)
3051 		return rc;
3052 
3053 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3054 		rc = -EINVAL;
3055 
3056 	return rc;
3057 }
3058 
3059 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3060 				   struct qed_ptt *p_ptt,
3061 				   struct bist_nvm_image_att *p_image_att,
3062 				   u32 image_index)
3063 {
3064 	u32 buf_size = 0, param, resp = 0, resp_param = 0;
3065 	int rc;
3066 
3067 	param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3068 		DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3069 	param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3070 
3071 	rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3072 				DRV_MSG_CODE_BIST_TEST, param,
3073 				&resp, &resp_param,
3074 				&buf_size,
3075 				(u32 *)p_image_att);
3076 	if (rc)
3077 		return rc;
3078 
3079 	if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3080 	    (p_image_att->return_code != 1))
3081 		rc = -EINVAL;
3082 
3083 	return rc;
3084 }
3085 
3086 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3087 {
3088 	struct qed_nvm_image_info nvm_info;
3089 	struct qed_ptt *p_ptt;
3090 	int rc;
3091 	u32 i;
3092 
3093 	if (p_hwfn->nvm_info.valid)
3094 		return 0;
3095 
3096 	p_ptt = qed_ptt_acquire(p_hwfn);
3097 	if (!p_ptt) {
3098 		DP_ERR(p_hwfn, "failed to acquire ptt\n");
3099 		return -EBUSY;
3100 	}
3101 
3102 	/* Acquire from MFW the amount of available images */
3103 	nvm_info.num_images = 0;
3104 	rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3105 					     p_ptt, &nvm_info.num_images);
3106 	if (rc == -EOPNOTSUPP) {
3107 		DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3108 		goto out;
3109 	} else if (rc || !nvm_info.num_images) {
3110 		DP_ERR(p_hwfn, "Failed getting number of images\n");
3111 		goto err0;
3112 	}
3113 
3114 	nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3115 					   sizeof(struct bist_nvm_image_att),
3116 					   GFP_KERNEL);
3117 	if (!nvm_info.image_att) {
3118 		rc = -ENOMEM;
3119 		goto err0;
3120 	}
3121 
3122 	/* Iterate over images and get their attributes */
3123 	for (i = 0; i < nvm_info.num_images; i++) {
3124 		rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3125 						    &nvm_info.image_att[i], i);
3126 		if (rc) {
3127 			DP_ERR(p_hwfn,
3128 			       "Failed getting image index %d attributes\n", i);
3129 			goto err1;
3130 		}
3131 
3132 		DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3133 			   nvm_info.image_att[i].len);
3134 	}
3135 out:
3136 	/* Update hwfn's nvm_info */
3137 	if (nvm_info.num_images) {
3138 		p_hwfn->nvm_info.num_images = nvm_info.num_images;
3139 		kfree(p_hwfn->nvm_info.image_att);
3140 		p_hwfn->nvm_info.image_att = nvm_info.image_att;
3141 		p_hwfn->nvm_info.valid = true;
3142 	}
3143 
3144 	qed_ptt_release(p_hwfn, p_ptt);
3145 	return 0;
3146 
3147 err1:
3148 	kfree(nvm_info.image_att);
3149 err0:
3150 	qed_ptt_release(p_hwfn, p_ptt);
3151 	return rc;
3152 }
3153 
3154 int
3155 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3156 			  enum qed_nvm_images image_id,
3157 			  struct qed_nvm_image_att *p_image_att)
3158 {
3159 	enum nvm_image_type type;
3160 	u32 i;
3161 
3162 	/* Translate image_id into MFW definitions */
3163 	switch (image_id) {
3164 	case QED_NVM_IMAGE_ISCSI_CFG:
3165 		type = NVM_TYPE_ISCSI_CFG;
3166 		break;
3167 	case QED_NVM_IMAGE_FCOE_CFG:
3168 		type = NVM_TYPE_FCOE_CFG;
3169 		break;
3170 	case QED_NVM_IMAGE_MDUMP:
3171 		type = NVM_TYPE_MDUMP;
3172 		break;
3173 	case QED_NVM_IMAGE_NVM_CFG1:
3174 		type = NVM_TYPE_NVM_CFG1;
3175 		break;
3176 	case QED_NVM_IMAGE_DEFAULT_CFG:
3177 		type = NVM_TYPE_DEFAULT_CFG;
3178 		break;
3179 	case QED_NVM_IMAGE_NVM_META:
3180 		type = NVM_TYPE_META;
3181 		break;
3182 	default:
3183 		DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3184 			  image_id);
3185 		return -EINVAL;
3186 	}
3187 
3188 	qed_mcp_nvm_info_populate(p_hwfn);
3189 	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3190 		if (type == p_hwfn->nvm_info.image_att[i].image_type)
3191 			break;
3192 	if (i == p_hwfn->nvm_info.num_images) {
3193 		DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3194 			   "Failed to find nvram image of type %08x\n",
3195 			   image_id);
3196 		return -ENOENT;
3197 	}
3198 
3199 	p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3200 	p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3201 
3202 	return 0;
3203 }
3204 
3205 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3206 			  enum qed_nvm_images image_id,
3207 			  u8 *p_buffer, u32 buffer_len)
3208 {
3209 	struct qed_nvm_image_att image_att;
3210 	int rc;
3211 
3212 	memset(p_buffer, 0, buffer_len);
3213 
3214 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3215 	if (rc)
3216 		return rc;
3217 
3218 	/* Validate sizes - both the image's and the supplied buffer's */
3219 	if (image_att.length <= 4) {
3220 		DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3221 			   "Image [%d] is too small - only %d bytes\n",
3222 			   image_id, image_att.length);
3223 		return -EINVAL;
3224 	}
3225 
3226 	if (image_att.length > buffer_len) {
3227 		DP_VERBOSE(p_hwfn,
3228 			   QED_MSG_STORAGE,
3229 			   "Image [%d] is too big - %08x bytes where only %08x are available\n",
3230 			   image_id, image_att.length, buffer_len);
3231 		return -ENOMEM;
3232 	}
3233 
3234 	return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3235 				p_buffer, image_att.length);
3236 }
3237 
3238 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3239 {
3240 	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3241 
3242 	switch (res_id) {
3243 	case QED_SB:
3244 		mfw_res_id = RESOURCE_NUM_SB_E;
3245 		break;
3246 	case QED_L2_QUEUE:
3247 		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3248 		break;
3249 	case QED_VPORT:
3250 		mfw_res_id = RESOURCE_NUM_VPORT_E;
3251 		break;
3252 	case QED_RSS_ENG:
3253 		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3254 		break;
3255 	case QED_PQ:
3256 		mfw_res_id = RESOURCE_NUM_PQ_E;
3257 		break;
3258 	case QED_RL:
3259 		mfw_res_id = RESOURCE_NUM_RL_E;
3260 		break;
3261 	case QED_MAC:
3262 	case QED_VLAN:
3263 		/* Each VFC resource can accommodate both a MAC and a VLAN */
3264 		mfw_res_id = RESOURCE_VFC_FILTER_E;
3265 		break;
3266 	case QED_ILT:
3267 		mfw_res_id = RESOURCE_ILT_E;
3268 		break;
3269 	case QED_LL2_RAM_QUEUE:
3270 		mfw_res_id = RESOURCE_LL2_QUEUE_E;
3271 		break;
3272 	case QED_LL2_CTX_QUEUE:
3273 		mfw_res_id = RESOURCE_LL2_CQS_E;
3274 		break;
3275 	case QED_RDMA_CNQ_RAM:
3276 	case QED_CMDQS_CQS:
3277 		/* CNQ/CMDQS are the same resource */
3278 		mfw_res_id = RESOURCE_CQS_E;
3279 		break;
3280 	case QED_RDMA_STATS_QUEUE:
3281 		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3282 		break;
3283 	case QED_BDQ:
3284 		mfw_res_id = RESOURCE_BDQ_E;
3285 		break;
3286 	default:
3287 		break;
3288 	}
3289 
3290 	return mfw_res_id;
3291 }
3292 
3293 #define QED_RESC_ALLOC_VERSION_MAJOR    2
3294 #define QED_RESC_ALLOC_VERSION_MINOR    0
3295 #define QED_RESC_ALLOC_VERSION				     \
3296 	((QED_RESC_ALLOC_VERSION_MAJOR <<		     \
3297 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3298 	 (QED_RESC_ALLOC_VERSION_MINOR <<		     \
3299 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3300 
3301 struct qed_resc_alloc_in_params {
3302 	u32 cmd;
3303 	enum qed_resources res_id;
3304 	u32 resc_max_val;
3305 };
3306 
3307 struct qed_resc_alloc_out_params {
3308 	u32 mcp_resp;
3309 	u32 mcp_param;
3310 	u32 resc_num;
3311 	u32 resc_start;
3312 	u32 vf_resc_num;
3313 	u32 vf_resc_start;
3314 	u32 flags;
3315 };
3316 
3317 static int
3318 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3319 			    struct qed_ptt *p_ptt,
3320 			    struct qed_resc_alloc_in_params *p_in_params,
3321 			    struct qed_resc_alloc_out_params *p_out_params)
3322 {
3323 	struct qed_mcp_mb_params mb_params;
3324 	struct resource_info mfw_resc_info;
3325 	int rc;
3326 
3327 	memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3328 
3329 	mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3330 	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3331 		DP_ERR(p_hwfn,
3332 		       "Failed to match resource %d [%s] with the MFW resources\n",
3333 		       p_in_params->res_id,
3334 		       qed_hw_get_resc_name(p_in_params->res_id));
3335 		return -EINVAL;
3336 	}
3337 
3338 	switch (p_in_params->cmd) {
3339 	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3340 		mfw_resc_info.size = p_in_params->resc_max_val;
3341 		/* Fallthrough */
3342 	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3343 		break;
3344 	default:
3345 		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3346 		       p_in_params->cmd);
3347 		return -EINVAL;
3348 	}
3349 
3350 	memset(&mb_params, 0, sizeof(mb_params));
3351 	mb_params.cmd = p_in_params->cmd;
3352 	mb_params.param = QED_RESC_ALLOC_VERSION;
3353 	mb_params.p_data_src = &mfw_resc_info;
3354 	mb_params.data_src_size = sizeof(mfw_resc_info);
3355 	mb_params.p_data_dst = mb_params.p_data_src;
3356 	mb_params.data_dst_size = mb_params.data_src_size;
3357 
3358 	DP_VERBOSE(p_hwfn,
3359 		   QED_MSG_SP,
3360 		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3361 		   p_in_params->cmd,
3362 		   p_in_params->res_id,
3363 		   qed_hw_get_resc_name(p_in_params->res_id),
3364 		   QED_MFW_GET_FIELD(mb_params.param,
3365 				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3366 		   QED_MFW_GET_FIELD(mb_params.param,
3367 				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3368 		   p_in_params->resc_max_val);
3369 
3370 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3371 	if (rc)
3372 		return rc;
3373 
3374 	p_out_params->mcp_resp = mb_params.mcp_resp;
3375 	p_out_params->mcp_param = mb_params.mcp_param;
3376 	p_out_params->resc_num = mfw_resc_info.size;
3377 	p_out_params->resc_start = mfw_resc_info.offset;
3378 	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3379 	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3380 	p_out_params->flags = mfw_resc_info.flags;
3381 
3382 	DP_VERBOSE(p_hwfn,
3383 		   QED_MSG_SP,
3384 		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3385 		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3386 				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3387 		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3388 				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3389 		   p_out_params->resc_num,
3390 		   p_out_params->resc_start,
3391 		   p_out_params->vf_resc_num,
3392 		   p_out_params->vf_resc_start, p_out_params->flags);
3393 
3394 	return 0;
3395 }
3396 
3397 int
3398 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3399 			 struct qed_ptt *p_ptt,
3400 			 enum qed_resources res_id,
3401 			 u32 resc_max_val, u32 *p_mcp_resp)
3402 {
3403 	struct qed_resc_alloc_out_params out_params;
3404 	struct qed_resc_alloc_in_params in_params;
3405 	int rc;
3406 
3407 	memset(&in_params, 0, sizeof(in_params));
3408 	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3409 	in_params.res_id = res_id;
3410 	in_params.resc_max_val = resc_max_val;
3411 	memset(&out_params, 0, sizeof(out_params));
3412 	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3413 					 &out_params);
3414 	if (rc)
3415 		return rc;
3416 
3417 	*p_mcp_resp = out_params.mcp_resp;
3418 
3419 	return 0;
3420 }
3421 
3422 int
3423 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3424 		      struct qed_ptt *p_ptt,
3425 		      enum qed_resources res_id,
3426 		      u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3427 {
3428 	struct qed_resc_alloc_out_params out_params;
3429 	struct qed_resc_alloc_in_params in_params;
3430 	int rc;
3431 
3432 	memset(&in_params, 0, sizeof(in_params));
3433 	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3434 	in_params.res_id = res_id;
3435 	memset(&out_params, 0, sizeof(out_params));
3436 	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3437 					 &out_params);
3438 	if (rc)
3439 		return rc;
3440 
3441 	*p_mcp_resp = out_params.mcp_resp;
3442 
3443 	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3444 		*p_resc_num = out_params.resc_num;
3445 		*p_resc_start = out_params.resc_start;
3446 	}
3447 
3448 	return 0;
3449 }
3450 
3451 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3452 {
3453 	u32 mcp_resp, mcp_param;
3454 
3455 	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3456 			   &mcp_resp, &mcp_param);
3457 }
3458 
3459 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3460 				struct qed_ptt *p_ptt,
3461 				u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3462 {
3463 	int rc;
3464 
3465 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3466 			 p_mcp_resp, p_mcp_param);
3467 	if (rc)
3468 		return rc;
3469 
3470 	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3471 		DP_INFO(p_hwfn,
3472 			"The resource command is unsupported by the MFW\n");
3473 		return -EINVAL;
3474 	}
3475 
3476 	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3477 		u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3478 
3479 		DP_NOTICE(p_hwfn,
3480 			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3481 			  param, opcode);
3482 		return -EINVAL;
3483 	}
3484 
3485 	return rc;
3486 }
3487 
3488 static int
3489 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3490 		    struct qed_ptt *p_ptt,
3491 		    struct qed_resc_lock_params *p_params)
3492 {
3493 	u32 param = 0, mcp_resp, mcp_param;
3494 	u8 opcode;
3495 	int rc;
3496 
3497 	switch (p_params->timeout) {
3498 	case QED_MCP_RESC_LOCK_TO_DEFAULT:
3499 		opcode = RESOURCE_OPCODE_REQ;
3500 		p_params->timeout = 0;
3501 		break;
3502 	case QED_MCP_RESC_LOCK_TO_NONE:
3503 		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3504 		p_params->timeout = 0;
3505 		break;
3506 	default:
3507 		opcode = RESOURCE_OPCODE_REQ_W_AGING;
3508 		break;
3509 	}
3510 
3511 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3512 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3513 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3514 
3515 	DP_VERBOSE(p_hwfn,
3516 		   QED_MSG_SP,
3517 		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3518 		   param, p_params->timeout, opcode, p_params->resource);
3519 
3520 	/* Attempt to acquire the resource */
3521 	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3522 	if (rc)
3523 		return rc;
3524 
3525 	/* Analyze the response */
3526 	p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3527 	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3528 
3529 	DP_VERBOSE(p_hwfn,
3530 		   QED_MSG_SP,
3531 		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3532 		   mcp_param, opcode, p_params->owner);
3533 
3534 	switch (opcode) {
3535 	case RESOURCE_OPCODE_GNT:
3536 		p_params->b_granted = true;
3537 		break;
3538 	case RESOURCE_OPCODE_BUSY:
3539 		p_params->b_granted = false;
3540 		break;
3541 	default:
3542 		DP_NOTICE(p_hwfn,
3543 			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3544 			  mcp_param, opcode);
3545 		return -EINVAL;
3546 	}
3547 
3548 	return 0;
3549 }
3550 
3551 int
3552 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3553 		  struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3554 {
3555 	u32 retry_cnt = 0;
3556 	int rc;
3557 
3558 	do {
3559 		/* No need for an interval before the first iteration */
3560 		if (retry_cnt) {
3561 			if (p_params->sleep_b4_retry) {
3562 				u16 retry_interval_in_ms =
3563 				    DIV_ROUND_UP(p_params->retry_interval,
3564 						 1000);
3565 
3566 				msleep(retry_interval_in_ms);
3567 			} else {
3568 				udelay(p_params->retry_interval);
3569 			}
3570 		}
3571 
3572 		rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3573 		if (rc)
3574 			return rc;
3575 
3576 		if (p_params->b_granted)
3577 			break;
3578 	} while (retry_cnt++ < p_params->retry_num);
3579 
3580 	return 0;
3581 }
3582 
3583 int
3584 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3585 		    struct qed_ptt *p_ptt,
3586 		    struct qed_resc_unlock_params *p_params)
3587 {
3588 	u32 param = 0, mcp_resp, mcp_param;
3589 	u8 opcode;
3590 	int rc;
3591 
3592 	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3593 				   : RESOURCE_OPCODE_RELEASE;
3594 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3595 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3596 
3597 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
3598 		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3599 		   param, opcode, p_params->resource);
3600 
3601 	/* Attempt to release the resource */
3602 	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3603 	if (rc)
3604 		return rc;
3605 
3606 	/* Analyze the response */
3607 	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3608 
3609 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
3610 		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3611 		   mcp_param, opcode);
3612 
3613 	switch (opcode) {
3614 	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3615 		DP_INFO(p_hwfn,
3616 			"Resource unlock request for an already released resource [%d]\n",
3617 			p_params->resource);
3618 		/* Fallthrough */
3619 	case RESOURCE_OPCODE_RELEASED:
3620 		p_params->b_released = true;
3621 		break;
3622 	case RESOURCE_OPCODE_WRONG_OWNER:
3623 		p_params->b_released = false;
3624 		break;
3625 	default:
3626 		DP_NOTICE(p_hwfn,
3627 			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3628 			  mcp_param, opcode);
3629 		return -EINVAL;
3630 	}
3631 
3632 	return 0;
3633 }
3634 
3635 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3636 				    struct qed_resc_unlock_params *p_unlock,
3637 				    enum qed_resc_lock
3638 				    resource, bool b_is_permanent)
3639 {
3640 	if (p_lock) {
3641 		memset(p_lock, 0, sizeof(*p_lock));
3642 
3643 		/* Permanent resources don't require aging, and there's no
3644 		 * point in trying to acquire them more than once since it's
3645 		 * unexpected another entity would release them.
3646 		 */
3647 		if (b_is_permanent) {
3648 			p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3649 		} else {
3650 			p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3651 			p_lock->retry_interval =
3652 			    QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3653 			p_lock->sleep_b4_retry = true;
3654 		}
3655 
3656 		p_lock->resource = resource;
3657 	}
3658 
3659 	if (p_unlock) {
3660 		memset(p_unlock, 0, sizeof(*p_unlock));
3661 		p_unlock->resource = resource;
3662 	}
3663 }
3664 
3665 bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3666 {
3667 	return !!(p_hwfn->mcp_info->capabilities &
3668 		  FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3669 }
3670 
3671 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3672 {
3673 	u32 mcp_resp;
3674 	int rc;
3675 
3676 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3677 			 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3678 	if (!rc)
3679 		DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3680 			   "MFW supported features: %08x\n",
3681 			   p_hwfn->mcp_info->capabilities);
3682 
3683 	return rc;
3684 }
3685 
3686 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3687 {
3688 	u32 mcp_resp, mcp_param, features;
3689 
3690 	features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3691 		   DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
3692 
3693 	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3694 			   features, &mcp_resp, &mcp_param);
3695 }
3696 
3697 int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3698 {
3699 	struct qed_mcp_mb_params mb_params = {0};
3700 	struct qed_dev *cdev = p_hwfn->cdev;
3701 	u8 fir_valid, l2_valid;
3702 	int rc;
3703 
3704 	mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3705 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3706 	if (rc)
3707 		return rc;
3708 
3709 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3710 		DP_INFO(p_hwfn,
3711 			"The get_engine_config command is unsupported by the MFW\n");
3712 		return -EOPNOTSUPP;
3713 	}
3714 
3715 	fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3716 				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3717 	if (fir_valid)
3718 		cdev->fir_affin =
3719 		    QED_MFW_GET_FIELD(mb_params.mcp_param,
3720 				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3721 
3722 	l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3723 				     FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
3724 	if (l2_valid)
3725 		cdev->l2_affin_hint =
3726 		    QED_MFW_GET_FIELD(mb_params.mcp_param,
3727 				      FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
3728 
3729 	DP_INFO(p_hwfn,
3730 		"Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3731 		fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
3732 
3733 	return 0;
3734 }
3735 
3736 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3737 {
3738 	struct qed_mcp_mb_params mb_params = {0};
3739 	struct qed_dev *cdev = p_hwfn->cdev;
3740 	int rc;
3741 
3742 	mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
3743 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3744 	if (rc)
3745 		return rc;
3746 
3747 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3748 		DP_INFO(p_hwfn,
3749 			"The get_ppfid_bitmap command is unsupported by the MFW\n");
3750 		return -EOPNOTSUPP;
3751 	}
3752 
3753 	cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
3754 					       FW_MB_PARAM_PPFID_BITMAP);
3755 
3756 	DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
3757 		   cdev->ppfid_bitmap);
3758 
3759 	return 0;
3760 }
3761 
3762 int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3763 			u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3764 			u32 *p_len)
3765 {
3766 	u32 mb_param = 0, resp, param;
3767 	int rc;
3768 
3769 	QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3770 	if (flags & QED_NVM_CFG_OPTION_INIT)
3771 		QED_MFW_SET_FIELD(mb_param,
3772 				  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3773 	if (flags & QED_NVM_CFG_OPTION_FREE)
3774 		QED_MFW_SET_FIELD(mb_param,
3775 				  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3776 	if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3777 		QED_MFW_SET_FIELD(mb_param,
3778 				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3779 		QED_MFW_SET_FIELD(mb_param,
3780 				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3781 				  entity_id);
3782 	}
3783 
3784 	rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3785 				DRV_MSG_CODE_GET_NVM_CFG_OPTION,
3786 				mb_param, &resp, &param, p_len, (u32 *)p_buf);
3787 
3788 	return rc;
3789 }
3790 
3791 int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3792 			u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3793 			u32 len)
3794 {
3795 	u32 mb_param = 0, resp, param;
3796 
3797 	QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3798 	if (flags & QED_NVM_CFG_OPTION_ALL)
3799 		QED_MFW_SET_FIELD(mb_param,
3800 				  DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
3801 	if (flags & QED_NVM_CFG_OPTION_INIT)
3802 		QED_MFW_SET_FIELD(mb_param,
3803 				  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3804 	if (flags & QED_NVM_CFG_OPTION_COMMIT)
3805 		QED_MFW_SET_FIELD(mb_param,
3806 				  DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
3807 	if (flags & QED_NVM_CFG_OPTION_FREE)
3808 		QED_MFW_SET_FIELD(mb_param,
3809 				  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3810 	if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3811 		QED_MFW_SET_FIELD(mb_param,
3812 				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3813 		QED_MFW_SET_FIELD(mb_param,
3814 				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3815 				  entity_id);
3816 	}
3817 
3818 	return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3819 				  DRV_MSG_CODE_SET_NVM_CFG_OPTION,
3820 				  mb_param, &resp, &param, len, (u32 *)p_buf);
3821 }
3822