1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "i40e_type.h"
5 #include "i40e_adminq.h"
6 #include "i40e_prototype.h"
7 #include <linux/avf/virtchnl.h>
8 
9 /**
10  * i40e_set_mac_type - Sets MAC type
11  * @hw: pointer to the HW structure
12  *
13  * This function sets the mac type of the adapter based on the
14  * vendor ID and device ID stored in the hw structure.
15  **/
16 static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
17 {
18 	i40e_status status = 0;
19 
20 	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
21 		switch (hw->device_id) {
22 		case I40E_DEV_ID_SFP_XL710:
23 		case I40E_DEV_ID_QEMU:
24 		case I40E_DEV_ID_KX_B:
25 		case I40E_DEV_ID_KX_C:
26 		case I40E_DEV_ID_QSFP_A:
27 		case I40E_DEV_ID_QSFP_B:
28 		case I40E_DEV_ID_QSFP_C:
29 		case I40E_DEV_ID_10G_BASE_T:
30 		case I40E_DEV_ID_10G_BASE_T4:
31 		case I40E_DEV_ID_10G_B:
32 		case I40E_DEV_ID_10G_SFP:
33 		case I40E_DEV_ID_20G_KR2:
34 		case I40E_DEV_ID_20G_KR2_A:
35 		case I40E_DEV_ID_25G_B:
36 		case I40E_DEV_ID_25G_SFP28:
37 		case I40E_DEV_ID_X710_N3000:
38 		case I40E_DEV_ID_XXV710_N3000:
39 			hw->mac.type = I40E_MAC_XL710;
40 			break;
41 		case I40E_DEV_ID_KX_X722:
42 		case I40E_DEV_ID_QSFP_X722:
43 		case I40E_DEV_ID_SFP_X722:
44 		case I40E_DEV_ID_1G_BASE_T_X722:
45 		case I40E_DEV_ID_10G_BASE_T_X722:
46 		case I40E_DEV_ID_SFP_I_X722:
47 			hw->mac.type = I40E_MAC_X722;
48 			break;
49 		default:
50 			hw->mac.type = I40E_MAC_GENERIC;
51 			break;
52 		}
53 	} else {
54 		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
55 	}
56 
57 	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
58 		  hw->mac.type, status);
59 	return status;
60 }
61 
62 /**
63  * i40e_aq_str - convert AQ err code to a string
64  * @hw: pointer to the HW structure
65  * @aq_err: the AQ error code to convert
66  **/
67 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
68 {
69 	switch (aq_err) {
70 	case I40E_AQ_RC_OK:
71 		return "OK";
72 	case I40E_AQ_RC_EPERM:
73 		return "I40E_AQ_RC_EPERM";
74 	case I40E_AQ_RC_ENOENT:
75 		return "I40E_AQ_RC_ENOENT";
76 	case I40E_AQ_RC_ESRCH:
77 		return "I40E_AQ_RC_ESRCH";
78 	case I40E_AQ_RC_EINTR:
79 		return "I40E_AQ_RC_EINTR";
80 	case I40E_AQ_RC_EIO:
81 		return "I40E_AQ_RC_EIO";
82 	case I40E_AQ_RC_ENXIO:
83 		return "I40E_AQ_RC_ENXIO";
84 	case I40E_AQ_RC_E2BIG:
85 		return "I40E_AQ_RC_E2BIG";
86 	case I40E_AQ_RC_EAGAIN:
87 		return "I40E_AQ_RC_EAGAIN";
88 	case I40E_AQ_RC_ENOMEM:
89 		return "I40E_AQ_RC_ENOMEM";
90 	case I40E_AQ_RC_EACCES:
91 		return "I40E_AQ_RC_EACCES";
92 	case I40E_AQ_RC_EFAULT:
93 		return "I40E_AQ_RC_EFAULT";
94 	case I40E_AQ_RC_EBUSY:
95 		return "I40E_AQ_RC_EBUSY";
96 	case I40E_AQ_RC_EEXIST:
97 		return "I40E_AQ_RC_EEXIST";
98 	case I40E_AQ_RC_EINVAL:
99 		return "I40E_AQ_RC_EINVAL";
100 	case I40E_AQ_RC_ENOTTY:
101 		return "I40E_AQ_RC_ENOTTY";
102 	case I40E_AQ_RC_ENOSPC:
103 		return "I40E_AQ_RC_ENOSPC";
104 	case I40E_AQ_RC_ENOSYS:
105 		return "I40E_AQ_RC_ENOSYS";
106 	case I40E_AQ_RC_ERANGE:
107 		return "I40E_AQ_RC_ERANGE";
108 	case I40E_AQ_RC_EFLUSHED:
109 		return "I40E_AQ_RC_EFLUSHED";
110 	case I40E_AQ_RC_BAD_ADDR:
111 		return "I40E_AQ_RC_BAD_ADDR";
112 	case I40E_AQ_RC_EMODE:
113 		return "I40E_AQ_RC_EMODE";
114 	case I40E_AQ_RC_EFBIG:
115 		return "I40E_AQ_RC_EFBIG";
116 	}
117 
118 	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
119 	return hw->err_str;
120 }
121 
122 /**
123  * i40e_stat_str - convert status err code to a string
124  * @hw: pointer to the HW structure
125  * @stat_err: the status error code to convert
126  **/
127 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
128 {
129 	switch (stat_err) {
130 	case 0:
131 		return "OK";
132 	case I40E_ERR_NVM:
133 		return "I40E_ERR_NVM";
134 	case I40E_ERR_NVM_CHECKSUM:
135 		return "I40E_ERR_NVM_CHECKSUM";
136 	case I40E_ERR_PHY:
137 		return "I40E_ERR_PHY";
138 	case I40E_ERR_CONFIG:
139 		return "I40E_ERR_CONFIG";
140 	case I40E_ERR_PARAM:
141 		return "I40E_ERR_PARAM";
142 	case I40E_ERR_MAC_TYPE:
143 		return "I40E_ERR_MAC_TYPE";
144 	case I40E_ERR_UNKNOWN_PHY:
145 		return "I40E_ERR_UNKNOWN_PHY";
146 	case I40E_ERR_LINK_SETUP:
147 		return "I40E_ERR_LINK_SETUP";
148 	case I40E_ERR_ADAPTER_STOPPED:
149 		return "I40E_ERR_ADAPTER_STOPPED";
150 	case I40E_ERR_INVALID_MAC_ADDR:
151 		return "I40E_ERR_INVALID_MAC_ADDR";
152 	case I40E_ERR_DEVICE_NOT_SUPPORTED:
153 		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
154 	case I40E_ERR_MASTER_REQUESTS_PENDING:
155 		return "I40E_ERR_MASTER_REQUESTS_PENDING";
156 	case I40E_ERR_INVALID_LINK_SETTINGS:
157 		return "I40E_ERR_INVALID_LINK_SETTINGS";
158 	case I40E_ERR_AUTONEG_NOT_COMPLETE:
159 		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
160 	case I40E_ERR_RESET_FAILED:
161 		return "I40E_ERR_RESET_FAILED";
162 	case I40E_ERR_SWFW_SYNC:
163 		return "I40E_ERR_SWFW_SYNC";
164 	case I40E_ERR_NO_AVAILABLE_VSI:
165 		return "I40E_ERR_NO_AVAILABLE_VSI";
166 	case I40E_ERR_NO_MEMORY:
167 		return "I40E_ERR_NO_MEMORY";
168 	case I40E_ERR_BAD_PTR:
169 		return "I40E_ERR_BAD_PTR";
170 	case I40E_ERR_RING_FULL:
171 		return "I40E_ERR_RING_FULL";
172 	case I40E_ERR_INVALID_PD_ID:
173 		return "I40E_ERR_INVALID_PD_ID";
174 	case I40E_ERR_INVALID_QP_ID:
175 		return "I40E_ERR_INVALID_QP_ID";
176 	case I40E_ERR_INVALID_CQ_ID:
177 		return "I40E_ERR_INVALID_CQ_ID";
178 	case I40E_ERR_INVALID_CEQ_ID:
179 		return "I40E_ERR_INVALID_CEQ_ID";
180 	case I40E_ERR_INVALID_AEQ_ID:
181 		return "I40E_ERR_INVALID_AEQ_ID";
182 	case I40E_ERR_INVALID_SIZE:
183 		return "I40E_ERR_INVALID_SIZE";
184 	case I40E_ERR_INVALID_ARP_INDEX:
185 		return "I40E_ERR_INVALID_ARP_INDEX";
186 	case I40E_ERR_INVALID_FPM_FUNC_ID:
187 		return "I40E_ERR_INVALID_FPM_FUNC_ID";
188 	case I40E_ERR_QP_INVALID_MSG_SIZE:
189 		return "I40E_ERR_QP_INVALID_MSG_SIZE";
190 	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
191 		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
192 	case I40E_ERR_INVALID_FRAG_COUNT:
193 		return "I40E_ERR_INVALID_FRAG_COUNT";
194 	case I40E_ERR_QUEUE_EMPTY:
195 		return "I40E_ERR_QUEUE_EMPTY";
196 	case I40E_ERR_INVALID_ALIGNMENT:
197 		return "I40E_ERR_INVALID_ALIGNMENT";
198 	case I40E_ERR_FLUSHED_QUEUE:
199 		return "I40E_ERR_FLUSHED_QUEUE";
200 	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
201 		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
202 	case I40E_ERR_INVALID_IMM_DATA_SIZE:
203 		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
204 	case I40E_ERR_TIMEOUT:
205 		return "I40E_ERR_TIMEOUT";
206 	case I40E_ERR_OPCODE_MISMATCH:
207 		return "I40E_ERR_OPCODE_MISMATCH";
208 	case I40E_ERR_CQP_COMPL_ERROR:
209 		return "I40E_ERR_CQP_COMPL_ERROR";
210 	case I40E_ERR_INVALID_VF_ID:
211 		return "I40E_ERR_INVALID_VF_ID";
212 	case I40E_ERR_INVALID_HMCFN_ID:
213 		return "I40E_ERR_INVALID_HMCFN_ID";
214 	case I40E_ERR_BACKING_PAGE_ERROR:
215 		return "I40E_ERR_BACKING_PAGE_ERROR";
216 	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
217 		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
218 	case I40E_ERR_INVALID_PBLE_INDEX:
219 		return "I40E_ERR_INVALID_PBLE_INDEX";
220 	case I40E_ERR_INVALID_SD_INDEX:
221 		return "I40E_ERR_INVALID_SD_INDEX";
222 	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
223 		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
224 	case I40E_ERR_INVALID_SD_TYPE:
225 		return "I40E_ERR_INVALID_SD_TYPE";
226 	case I40E_ERR_MEMCPY_FAILED:
227 		return "I40E_ERR_MEMCPY_FAILED";
228 	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
229 		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
230 	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
231 		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
232 	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
233 		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
234 	case I40E_ERR_SRQ_ENABLED:
235 		return "I40E_ERR_SRQ_ENABLED";
236 	case I40E_ERR_ADMIN_QUEUE_ERROR:
237 		return "I40E_ERR_ADMIN_QUEUE_ERROR";
238 	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
239 		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
240 	case I40E_ERR_BUF_TOO_SHORT:
241 		return "I40E_ERR_BUF_TOO_SHORT";
242 	case I40E_ERR_ADMIN_QUEUE_FULL:
243 		return "I40E_ERR_ADMIN_QUEUE_FULL";
244 	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
245 		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
246 	case I40E_ERR_BAD_IWARP_CQE:
247 		return "I40E_ERR_BAD_IWARP_CQE";
248 	case I40E_ERR_NVM_BLANK_MODE:
249 		return "I40E_ERR_NVM_BLANK_MODE";
250 	case I40E_ERR_NOT_IMPLEMENTED:
251 		return "I40E_ERR_NOT_IMPLEMENTED";
252 	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
253 		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
254 	case I40E_ERR_DIAG_TEST_FAILED:
255 		return "I40E_ERR_DIAG_TEST_FAILED";
256 	case I40E_ERR_NOT_READY:
257 		return "I40E_ERR_NOT_READY";
258 	case I40E_NOT_SUPPORTED:
259 		return "I40E_NOT_SUPPORTED";
260 	case I40E_ERR_FIRMWARE_API_VERSION:
261 		return "I40E_ERR_FIRMWARE_API_VERSION";
262 	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
263 		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
264 	}
265 
266 	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
267 	return hw->err_str;
268 }
269 
270 /**
271  * i40e_debug_aq
272  * @hw: debug mask related to admin queue
273  * @mask: debug mask
274  * @desc: pointer to admin queue descriptor
275  * @buffer: pointer to command buffer
276  * @buf_len: max length of buffer
277  *
278  * Dumps debug log about adminq command with descriptor contents.
279  **/
280 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
281 		   void *buffer, u16 buf_len)
282 {
283 	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
284 	u16 len;
285 	u8 *buf = (u8 *)buffer;
286 
287 	if ((!(mask & hw->debug_mask)) || (desc == NULL))
288 		return;
289 
290 	len = le16_to_cpu(aq_desc->datalen);
291 
292 	i40e_debug(hw, mask,
293 		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
294 		   le16_to_cpu(aq_desc->opcode),
295 		   le16_to_cpu(aq_desc->flags),
296 		   le16_to_cpu(aq_desc->datalen),
297 		   le16_to_cpu(aq_desc->retval));
298 	i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
299 		   le32_to_cpu(aq_desc->cookie_high),
300 		   le32_to_cpu(aq_desc->cookie_low));
301 	i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
302 		   le32_to_cpu(aq_desc->params.internal.param0),
303 		   le32_to_cpu(aq_desc->params.internal.param1));
304 	i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
305 		   le32_to_cpu(aq_desc->params.external.addr_high),
306 		   le32_to_cpu(aq_desc->params.external.addr_low));
307 
308 	if ((buffer != NULL) && (aq_desc->datalen != 0)) {
309 		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
310 		if (buf_len < len)
311 			len = buf_len;
312 		/* write the full 16-byte chunks */
313 		if (hw->debug_mask & mask) {
314 			char prefix[27];
315 
316 			snprintf(prefix, sizeof(prefix),
317 				 "i40e %02x:%02x.%x: \t0x",
318 				 hw->bus.bus_id,
319 				 hw->bus.device,
320 				 hw->bus.func);
321 
322 			print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
323 				       16, 1, buf, len, false);
324 		}
325 	}
326 }
327 
328 /**
329  * i40e_check_asq_alive
330  * @hw: pointer to the hw struct
331  *
332  * Returns true if Queue is enabled else false.
333  **/
334 bool i40e_check_asq_alive(struct i40e_hw *hw)
335 {
336 	if (hw->aq.asq.len)
337 		return !!(rd32(hw, hw->aq.asq.len) &
338 			  I40E_PF_ATQLEN_ATQENABLE_MASK);
339 	else
340 		return false;
341 }
342 
343 /**
344  * i40e_aq_queue_shutdown
345  * @hw: pointer to the hw struct
346  * @unloading: is the driver unloading itself
347  *
348  * Tell the Firmware that we're shutting down the AdminQ and whether
349  * or not the driver is unloading as well.
350  **/
351 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
352 					     bool unloading)
353 {
354 	struct i40e_aq_desc desc;
355 	struct i40e_aqc_queue_shutdown *cmd =
356 		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
357 	i40e_status status;
358 
359 	i40e_fill_default_direct_cmd_desc(&desc,
360 					  i40e_aqc_opc_queue_shutdown);
361 
362 	if (unloading)
363 		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
364 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
365 
366 	return status;
367 }
368 
369 /**
370  * i40e_aq_get_set_rss_lut
371  * @hw: pointer to the hardware structure
372  * @vsi_id: vsi fw index
373  * @pf_lut: for PF table set true, for VSI table set false
374  * @lut: pointer to the lut buffer provided by the caller
375  * @lut_size: size of the lut buffer
376  * @set: set true to set the table, false to get the table
377  *
378  * Internal function to get or set RSS look up table
379  **/
380 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
381 					   u16 vsi_id, bool pf_lut,
382 					   u8 *lut, u16 lut_size,
383 					   bool set)
384 {
385 	i40e_status status;
386 	struct i40e_aq_desc desc;
387 	struct i40e_aqc_get_set_rss_lut *cmd_resp =
388 		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
389 
390 	if (set)
391 		i40e_fill_default_direct_cmd_desc(&desc,
392 						  i40e_aqc_opc_set_rss_lut);
393 	else
394 		i40e_fill_default_direct_cmd_desc(&desc,
395 						  i40e_aqc_opc_get_rss_lut);
396 
397 	/* Indirect command */
398 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
399 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
400 
401 	cmd_resp->vsi_id =
402 			cpu_to_le16((u16)((vsi_id <<
403 					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
404 					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
405 	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
406 
407 	if (pf_lut)
408 		cmd_resp->flags |= cpu_to_le16((u16)
409 					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
410 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
411 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
412 	else
413 		cmd_resp->flags |= cpu_to_le16((u16)
414 					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
415 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
416 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
417 
418 	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
419 
420 	return status;
421 }
422 
423 /**
424  * i40e_aq_get_rss_lut
425  * @hw: pointer to the hardware structure
426  * @vsi_id: vsi fw index
427  * @pf_lut: for PF table set true, for VSI table set false
428  * @lut: pointer to the lut buffer provided by the caller
429  * @lut_size: size of the lut buffer
430  *
431  * get the RSS lookup table, PF or VSI type
432  **/
433 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
434 				bool pf_lut, u8 *lut, u16 lut_size)
435 {
436 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
437 				       false);
438 }
439 
440 /**
441  * i40e_aq_set_rss_lut
442  * @hw: pointer to the hardware structure
443  * @vsi_id: vsi fw index
444  * @pf_lut: for PF table set true, for VSI table set false
445  * @lut: pointer to the lut buffer provided by the caller
446  * @lut_size: size of the lut buffer
447  *
448  * set the RSS lookup table, PF or VSI type
449  **/
450 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
451 				bool pf_lut, u8 *lut, u16 lut_size)
452 {
453 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
454 }
455 
456 /**
457  * i40e_aq_get_set_rss_key
458  * @hw: pointer to the hw struct
459  * @vsi_id: vsi fw index
460  * @key: pointer to key info struct
461  * @set: set true to set the key, false to get the key
462  *
463  * get the RSS key per VSI
464  **/
465 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
466 				      u16 vsi_id,
467 				      struct i40e_aqc_get_set_rss_key_data *key,
468 				      bool set)
469 {
470 	i40e_status status;
471 	struct i40e_aq_desc desc;
472 	struct i40e_aqc_get_set_rss_key *cmd_resp =
473 			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
474 	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
475 
476 	if (set)
477 		i40e_fill_default_direct_cmd_desc(&desc,
478 						  i40e_aqc_opc_set_rss_key);
479 	else
480 		i40e_fill_default_direct_cmd_desc(&desc,
481 						  i40e_aqc_opc_get_rss_key);
482 
483 	/* Indirect command */
484 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
485 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
486 
487 	cmd_resp->vsi_id =
488 			cpu_to_le16((u16)((vsi_id <<
489 					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
490 					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
491 	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
492 
493 	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
494 
495 	return status;
496 }
497 
498 /**
499  * i40e_aq_get_rss_key
500  * @hw: pointer to the hw struct
501  * @vsi_id: vsi fw index
502  * @key: pointer to key info struct
503  *
504  **/
505 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
506 				u16 vsi_id,
507 				struct i40e_aqc_get_set_rss_key_data *key)
508 {
509 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
510 }
511 
512 /**
513  * i40e_aq_set_rss_key
514  * @hw: pointer to the hw struct
515  * @vsi_id: vsi fw index
516  * @key: pointer to key info struct
517  *
518  * set the RSS key per VSI
519  **/
520 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
521 				u16 vsi_id,
522 				struct i40e_aqc_get_set_rss_key_data *key)
523 {
524 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
525 }
526 
527 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
528  * hardware to a bit-field that can be used by SW to more easily determine the
529  * packet type.
530  *
531  * Macros are used to shorten the table lines and make this table human
532  * readable.
533  *
534  * We store the PTYPE in the top byte of the bit field - this is just so that
535  * we can check that the table doesn't have a row missing, as the index into
536  * the table should be the PTYPE.
537  *
538  * Typical work flow:
539  *
540  * IF NOT i40e_ptype_lookup[ptype].known
541  * THEN
542  *      Packet is unknown
543  * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
544  *      Use the rest of the fields to look at the tunnels, inner protocols, etc
545  * ELSE
546  *      Use the enum i40e_rx_l2_ptype to decode the packet type
547  * ENDIF
548  */
549 
550 /* macro to make the table lines short */
551 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
552 	{	PTYPE, \
553 		1, \
554 		I40E_RX_PTYPE_OUTER_##OUTER_IP, \
555 		I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
556 		I40E_RX_PTYPE_##OUTER_FRAG, \
557 		I40E_RX_PTYPE_TUNNEL_##T, \
558 		I40E_RX_PTYPE_TUNNEL_END_##TE, \
559 		I40E_RX_PTYPE_##TEF, \
560 		I40E_RX_PTYPE_INNER_PROT_##I, \
561 		I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
562 
563 #define I40E_PTT_UNUSED_ENTRY(PTYPE) \
564 		{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
565 
566 /* shorter macros makes the table fit but are terse */
567 #define I40E_RX_PTYPE_NOF		I40E_RX_PTYPE_NOT_FRAG
568 #define I40E_RX_PTYPE_FRG		I40E_RX_PTYPE_FRAG
569 #define I40E_RX_PTYPE_INNER_PROT_TS	I40E_RX_PTYPE_INNER_PROT_TIMESYNC
570 
571 /* Lookup table mapping the HW PTYPE to the bit field for decoding */
572 struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
573 	/* L2 Packet types */
574 	I40E_PTT_UNUSED_ENTRY(0),
575 	I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
576 	I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
577 	I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
578 	I40E_PTT_UNUSED_ENTRY(4),
579 	I40E_PTT_UNUSED_ENTRY(5),
580 	I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
581 	I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
582 	I40E_PTT_UNUSED_ENTRY(8),
583 	I40E_PTT_UNUSED_ENTRY(9),
584 	I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
585 	I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
586 	I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
587 	I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
588 	I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
589 	I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
590 	I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
591 	I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
592 	I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
593 	I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
594 	I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
595 	I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
596 
597 	/* Non Tunneled IPv4 */
598 	I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
599 	I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
600 	I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
601 	I40E_PTT_UNUSED_ENTRY(25),
602 	I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
603 	I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
604 	I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
605 
606 	/* IPv4 --> IPv4 */
607 	I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
608 	I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
609 	I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
610 	I40E_PTT_UNUSED_ENTRY(32),
611 	I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
612 	I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
613 	I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
614 
615 	/* IPv4 --> IPv6 */
616 	I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
617 	I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
618 	I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
619 	I40E_PTT_UNUSED_ENTRY(39),
620 	I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
621 	I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
622 	I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
623 
624 	/* IPv4 --> GRE/NAT */
625 	I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
626 
627 	/* IPv4 --> GRE/NAT --> IPv4 */
628 	I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
629 	I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
630 	I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
631 	I40E_PTT_UNUSED_ENTRY(47),
632 	I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
633 	I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
634 	I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
635 
636 	/* IPv4 --> GRE/NAT --> IPv6 */
637 	I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
638 	I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
639 	I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
640 	I40E_PTT_UNUSED_ENTRY(54),
641 	I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
642 	I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
643 	I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
644 
645 	/* IPv4 --> GRE/NAT --> MAC */
646 	I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
647 
648 	/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
649 	I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
650 	I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
651 	I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
652 	I40E_PTT_UNUSED_ENTRY(62),
653 	I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
654 	I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
655 	I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
656 
657 	/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
658 	I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
659 	I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
660 	I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
661 	I40E_PTT_UNUSED_ENTRY(69),
662 	I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
663 	I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
664 	I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
665 
666 	/* IPv4 --> GRE/NAT --> MAC/VLAN */
667 	I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
668 
669 	/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
670 	I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
671 	I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
672 	I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
673 	I40E_PTT_UNUSED_ENTRY(77),
674 	I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
675 	I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
676 	I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
677 
678 	/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
679 	I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
680 	I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
681 	I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
682 	I40E_PTT_UNUSED_ENTRY(84),
683 	I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
684 	I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
685 	I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
686 
687 	/* Non Tunneled IPv6 */
688 	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
689 	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
690 	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
691 	I40E_PTT_UNUSED_ENTRY(91),
692 	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
693 	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
694 	I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
695 
696 	/* IPv6 --> IPv4 */
697 	I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
698 	I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
699 	I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
700 	I40E_PTT_UNUSED_ENTRY(98),
701 	I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
702 	I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
703 	I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
704 
705 	/* IPv6 --> IPv6 */
706 	I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
707 	I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
708 	I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
709 	I40E_PTT_UNUSED_ENTRY(105),
710 	I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
711 	I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
712 	I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
713 
714 	/* IPv6 --> GRE/NAT */
715 	I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
716 
717 	/* IPv6 --> GRE/NAT -> IPv4 */
718 	I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
719 	I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
720 	I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
721 	I40E_PTT_UNUSED_ENTRY(113),
722 	I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
723 	I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
724 	I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
725 
726 	/* IPv6 --> GRE/NAT -> IPv6 */
727 	I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
728 	I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
729 	I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
730 	I40E_PTT_UNUSED_ENTRY(120),
731 	I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
732 	I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
733 	I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
734 
735 	/* IPv6 --> GRE/NAT -> MAC */
736 	I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
737 
738 	/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
739 	I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
740 	I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
741 	I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
742 	I40E_PTT_UNUSED_ENTRY(128),
743 	I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
744 	I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
745 	I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
746 
747 	/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
748 	I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
749 	I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
750 	I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
751 	I40E_PTT_UNUSED_ENTRY(135),
752 	I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
753 	I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
754 	I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
755 
756 	/* IPv6 --> GRE/NAT -> MAC/VLAN */
757 	I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
758 
759 	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
760 	I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
761 	I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
762 	I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
763 	I40E_PTT_UNUSED_ENTRY(143),
764 	I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
765 	I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
766 	I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
767 
768 	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
769 	I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
770 	I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
771 	I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
772 	I40E_PTT_UNUSED_ENTRY(150),
773 	I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
774 	I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
775 	I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
776 
777 	/* unused entries */
778 	I40E_PTT_UNUSED_ENTRY(154),
779 	I40E_PTT_UNUSED_ENTRY(155),
780 	I40E_PTT_UNUSED_ENTRY(156),
781 	I40E_PTT_UNUSED_ENTRY(157),
782 	I40E_PTT_UNUSED_ENTRY(158),
783 	I40E_PTT_UNUSED_ENTRY(159),
784 
785 	I40E_PTT_UNUSED_ENTRY(160),
786 	I40E_PTT_UNUSED_ENTRY(161),
787 	I40E_PTT_UNUSED_ENTRY(162),
788 	I40E_PTT_UNUSED_ENTRY(163),
789 	I40E_PTT_UNUSED_ENTRY(164),
790 	I40E_PTT_UNUSED_ENTRY(165),
791 	I40E_PTT_UNUSED_ENTRY(166),
792 	I40E_PTT_UNUSED_ENTRY(167),
793 	I40E_PTT_UNUSED_ENTRY(168),
794 	I40E_PTT_UNUSED_ENTRY(169),
795 
796 	I40E_PTT_UNUSED_ENTRY(170),
797 	I40E_PTT_UNUSED_ENTRY(171),
798 	I40E_PTT_UNUSED_ENTRY(172),
799 	I40E_PTT_UNUSED_ENTRY(173),
800 	I40E_PTT_UNUSED_ENTRY(174),
801 	I40E_PTT_UNUSED_ENTRY(175),
802 	I40E_PTT_UNUSED_ENTRY(176),
803 	I40E_PTT_UNUSED_ENTRY(177),
804 	I40E_PTT_UNUSED_ENTRY(178),
805 	I40E_PTT_UNUSED_ENTRY(179),
806 
807 	I40E_PTT_UNUSED_ENTRY(180),
808 	I40E_PTT_UNUSED_ENTRY(181),
809 	I40E_PTT_UNUSED_ENTRY(182),
810 	I40E_PTT_UNUSED_ENTRY(183),
811 	I40E_PTT_UNUSED_ENTRY(184),
812 	I40E_PTT_UNUSED_ENTRY(185),
813 	I40E_PTT_UNUSED_ENTRY(186),
814 	I40E_PTT_UNUSED_ENTRY(187),
815 	I40E_PTT_UNUSED_ENTRY(188),
816 	I40E_PTT_UNUSED_ENTRY(189),
817 
818 	I40E_PTT_UNUSED_ENTRY(190),
819 	I40E_PTT_UNUSED_ENTRY(191),
820 	I40E_PTT_UNUSED_ENTRY(192),
821 	I40E_PTT_UNUSED_ENTRY(193),
822 	I40E_PTT_UNUSED_ENTRY(194),
823 	I40E_PTT_UNUSED_ENTRY(195),
824 	I40E_PTT_UNUSED_ENTRY(196),
825 	I40E_PTT_UNUSED_ENTRY(197),
826 	I40E_PTT_UNUSED_ENTRY(198),
827 	I40E_PTT_UNUSED_ENTRY(199),
828 
829 	I40E_PTT_UNUSED_ENTRY(200),
830 	I40E_PTT_UNUSED_ENTRY(201),
831 	I40E_PTT_UNUSED_ENTRY(202),
832 	I40E_PTT_UNUSED_ENTRY(203),
833 	I40E_PTT_UNUSED_ENTRY(204),
834 	I40E_PTT_UNUSED_ENTRY(205),
835 	I40E_PTT_UNUSED_ENTRY(206),
836 	I40E_PTT_UNUSED_ENTRY(207),
837 	I40E_PTT_UNUSED_ENTRY(208),
838 	I40E_PTT_UNUSED_ENTRY(209),
839 
840 	I40E_PTT_UNUSED_ENTRY(210),
841 	I40E_PTT_UNUSED_ENTRY(211),
842 	I40E_PTT_UNUSED_ENTRY(212),
843 	I40E_PTT_UNUSED_ENTRY(213),
844 	I40E_PTT_UNUSED_ENTRY(214),
845 	I40E_PTT_UNUSED_ENTRY(215),
846 	I40E_PTT_UNUSED_ENTRY(216),
847 	I40E_PTT_UNUSED_ENTRY(217),
848 	I40E_PTT_UNUSED_ENTRY(218),
849 	I40E_PTT_UNUSED_ENTRY(219),
850 
851 	I40E_PTT_UNUSED_ENTRY(220),
852 	I40E_PTT_UNUSED_ENTRY(221),
853 	I40E_PTT_UNUSED_ENTRY(222),
854 	I40E_PTT_UNUSED_ENTRY(223),
855 	I40E_PTT_UNUSED_ENTRY(224),
856 	I40E_PTT_UNUSED_ENTRY(225),
857 	I40E_PTT_UNUSED_ENTRY(226),
858 	I40E_PTT_UNUSED_ENTRY(227),
859 	I40E_PTT_UNUSED_ENTRY(228),
860 	I40E_PTT_UNUSED_ENTRY(229),
861 
862 	I40E_PTT_UNUSED_ENTRY(230),
863 	I40E_PTT_UNUSED_ENTRY(231),
864 	I40E_PTT_UNUSED_ENTRY(232),
865 	I40E_PTT_UNUSED_ENTRY(233),
866 	I40E_PTT_UNUSED_ENTRY(234),
867 	I40E_PTT_UNUSED_ENTRY(235),
868 	I40E_PTT_UNUSED_ENTRY(236),
869 	I40E_PTT_UNUSED_ENTRY(237),
870 	I40E_PTT_UNUSED_ENTRY(238),
871 	I40E_PTT_UNUSED_ENTRY(239),
872 
873 	I40E_PTT_UNUSED_ENTRY(240),
874 	I40E_PTT_UNUSED_ENTRY(241),
875 	I40E_PTT_UNUSED_ENTRY(242),
876 	I40E_PTT_UNUSED_ENTRY(243),
877 	I40E_PTT_UNUSED_ENTRY(244),
878 	I40E_PTT_UNUSED_ENTRY(245),
879 	I40E_PTT_UNUSED_ENTRY(246),
880 	I40E_PTT_UNUSED_ENTRY(247),
881 	I40E_PTT_UNUSED_ENTRY(248),
882 	I40E_PTT_UNUSED_ENTRY(249),
883 
884 	I40E_PTT_UNUSED_ENTRY(250),
885 	I40E_PTT_UNUSED_ENTRY(251),
886 	I40E_PTT_UNUSED_ENTRY(252),
887 	I40E_PTT_UNUSED_ENTRY(253),
888 	I40E_PTT_UNUSED_ENTRY(254),
889 	I40E_PTT_UNUSED_ENTRY(255)
890 };
891 
892 /**
893  * i40e_init_shared_code - Initialize the shared code
894  * @hw: pointer to hardware structure
895  *
896  * This assigns the MAC type and PHY code and inits the NVM.
897  * Does not touch the hardware. This function must be called prior to any
898  * other function in the shared code. The i40e_hw structure should be
899  * memset to 0 prior to calling this function.  The following fields in
900  * hw structure should be filled in prior to calling this function:
901  * hw_addr, back, device_id, vendor_id, subsystem_device_id,
902  * subsystem_vendor_id, and revision_id
903  **/
904 i40e_status i40e_init_shared_code(struct i40e_hw *hw)
905 {
906 	i40e_status status = 0;
907 	u32 port, ari, func_rid;
908 
909 	i40e_set_mac_type(hw);
910 
911 	switch (hw->mac.type) {
912 	case I40E_MAC_XL710:
913 	case I40E_MAC_X722:
914 		break;
915 	default:
916 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
917 	}
918 
919 	hw->phy.get_link_info = true;
920 
921 	/* Determine port number and PF number*/
922 	port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
923 					   >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
924 	hw->port = (u8)port;
925 	ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
926 						 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
927 	func_rid = rd32(hw, I40E_PF_FUNC_RID);
928 	if (ari)
929 		hw->pf_id = (u8)(func_rid & 0xff);
930 	else
931 		hw->pf_id = (u8)(func_rid & 0x7);
932 
933 	if (hw->mac.type == I40E_MAC_X722)
934 		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
935 			     I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
936 
937 	status = i40e_init_nvm(hw);
938 	return status;
939 }
940 
941 /**
942  * i40e_aq_mac_address_read - Retrieve the MAC addresses
943  * @hw: pointer to the hw struct
944  * @flags: a return indicator of what addresses were added to the addr store
945  * @addrs: the requestor's mac addr store
946  * @cmd_details: pointer to command details structure or NULL
947  **/
948 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
949 				   u16 *flags,
950 				   struct i40e_aqc_mac_address_read_data *addrs,
951 				   struct i40e_asq_cmd_details *cmd_details)
952 {
953 	struct i40e_aq_desc desc;
954 	struct i40e_aqc_mac_address_read *cmd_data =
955 		(struct i40e_aqc_mac_address_read *)&desc.params.raw;
956 	i40e_status status;
957 
958 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
959 	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
960 
961 	status = i40e_asq_send_command(hw, &desc, addrs,
962 				       sizeof(*addrs), cmd_details);
963 	*flags = le16_to_cpu(cmd_data->command_flags);
964 
965 	return status;
966 }
967 
968 /**
969  * i40e_aq_mac_address_write - Change the MAC addresses
970  * @hw: pointer to the hw struct
971  * @flags: indicates which MAC to be written
972  * @mac_addr: address to write
973  * @cmd_details: pointer to command details structure or NULL
974  **/
975 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
976 				    u16 flags, u8 *mac_addr,
977 				    struct i40e_asq_cmd_details *cmd_details)
978 {
979 	struct i40e_aq_desc desc;
980 	struct i40e_aqc_mac_address_write *cmd_data =
981 		(struct i40e_aqc_mac_address_write *)&desc.params.raw;
982 	i40e_status status;
983 
984 	i40e_fill_default_direct_cmd_desc(&desc,
985 					  i40e_aqc_opc_mac_address_write);
986 	cmd_data->command_flags = cpu_to_le16(flags);
987 	cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
988 	cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
989 					((u32)mac_addr[3] << 16) |
990 					((u32)mac_addr[4] << 8) |
991 					mac_addr[5]);
992 
993 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
994 
995 	return status;
996 }
997 
998 /**
999  * i40e_get_mac_addr - get MAC address
1000  * @hw: pointer to the HW structure
1001  * @mac_addr: pointer to MAC address
1002  *
1003  * Reads the adapter's MAC address from register
1004  **/
1005 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1006 {
1007 	struct i40e_aqc_mac_address_read_data addrs;
1008 	i40e_status status;
1009 	u16 flags = 0;
1010 
1011 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1012 
1013 	if (flags & I40E_AQC_LAN_ADDR_VALID)
1014 		ether_addr_copy(mac_addr, addrs.pf_lan_mac);
1015 
1016 	return status;
1017 }
1018 
1019 /**
1020  * i40e_get_port_mac_addr - get Port MAC address
1021  * @hw: pointer to the HW structure
1022  * @mac_addr: pointer to Port MAC address
1023  *
1024  * Reads the adapter's Port MAC address
1025  **/
1026 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1027 {
1028 	struct i40e_aqc_mac_address_read_data addrs;
1029 	i40e_status status;
1030 	u16 flags = 0;
1031 
1032 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1033 	if (status)
1034 		return status;
1035 
1036 	if (flags & I40E_AQC_PORT_ADDR_VALID)
1037 		ether_addr_copy(mac_addr, addrs.port_mac);
1038 	else
1039 		status = I40E_ERR_INVALID_MAC_ADDR;
1040 
1041 	return status;
1042 }
1043 
1044 /**
1045  * i40e_pre_tx_queue_cfg - pre tx queue configure
1046  * @hw: pointer to the HW structure
1047  * @queue: target PF queue index
1048  * @enable: state change request
1049  *
1050  * Handles hw requirement to indicate intention to enable
1051  * or disable target queue.
1052  **/
1053 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
1054 {
1055 	u32 abs_queue_idx = hw->func_caps.base_queue + queue;
1056 	u32 reg_block = 0;
1057 	u32 reg_val;
1058 
1059 	if (abs_queue_idx >= 128) {
1060 		reg_block = abs_queue_idx / 128;
1061 		abs_queue_idx %= 128;
1062 	}
1063 
1064 	reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1065 	reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1066 	reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1067 
1068 	if (enable)
1069 		reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
1070 	else
1071 		reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1072 
1073 	wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
1074 }
1075 
1076 /**
1077  *  i40e_read_pba_string - Reads part number string from EEPROM
1078  *  @hw: pointer to hardware structure
1079  *  @pba_num: stores the part number string from the EEPROM
1080  *  @pba_num_size: part number string buffer length
1081  *
1082  *  Reads the part number string from the EEPROM.
1083  **/
1084 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
1085 				 u32 pba_num_size)
1086 {
1087 	i40e_status status = 0;
1088 	u16 pba_word = 0;
1089 	u16 pba_size = 0;
1090 	u16 pba_ptr = 0;
1091 	u16 i = 0;
1092 
1093 	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
1094 	if (status || (pba_word != 0xFAFA)) {
1095 		hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
1096 		return status;
1097 	}
1098 
1099 	status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
1100 	if (status) {
1101 		hw_dbg(hw, "Failed to read PBA Block pointer.\n");
1102 		return status;
1103 	}
1104 
1105 	status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
1106 	if (status) {
1107 		hw_dbg(hw, "Failed to read PBA Block size.\n");
1108 		return status;
1109 	}
1110 
1111 	/* Subtract one to get PBA word count (PBA Size word is included in
1112 	 * total size)
1113 	 */
1114 	pba_size--;
1115 	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1116 		hw_dbg(hw, "Buffer to small for PBA data.\n");
1117 		return I40E_ERR_PARAM;
1118 	}
1119 
1120 	for (i = 0; i < pba_size; i++) {
1121 		status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1122 		if (status) {
1123 			hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1124 			return status;
1125 		}
1126 
1127 		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1128 		pba_num[(i * 2) + 1] = pba_word & 0xFF;
1129 	}
1130 	pba_num[(pba_size * 2)] = '\0';
1131 
1132 	return status;
1133 }
1134 
1135 /**
1136  * i40e_get_media_type - Gets media type
1137  * @hw: pointer to the hardware structure
1138  **/
1139 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1140 {
1141 	enum i40e_media_type media;
1142 
1143 	switch (hw->phy.link_info.phy_type) {
1144 	case I40E_PHY_TYPE_10GBASE_SR:
1145 	case I40E_PHY_TYPE_10GBASE_LR:
1146 	case I40E_PHY_TYPE_1000BASE_SX:
1147 	case I40E_PHY_TYPE_1000BASE_LX:
1148 	case I40E_PHY_TYPE_40GBASE_SR4:
1149 	case I40E_PHY_TYPE_40GBASE_LR4:
1150 	case I40E_PHY_TYPE_25GBASE_LR:
1151 	case I40E_PHY_TYPE_25GBASE_SR:
1152 		media = I40E_MEDIA_TYPE_FIBER;
1153 		break;
1154 	case I40E_PHY_TYPE_100BASE_TX:
1155 	case I40E_PHY_TYPE_1000BASE_T:
1156 	case I40E_PHY_TYPE_2_5GBASE_T:
1157 	case I40E_PHY_TYPE_5GBASE_T:
1158 	case I40E_PHY_TYPE_10GBASE_T:
1159 		media = I40E_MEDIA_TYPE_BASET;
1160 		break;
1161 	case I40E_PHY_TYPE_10GBASE_CR1_CU:
1162 	case I40E_PHY_TYPE_40GBASE_CR4_CU:
1163 	case I40E_PHY_TYPE_10GBASE_CR1:
1164 	case I40E_PHY_TYPE_40GBASE_CR4:
1165 	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1166 	case I40E_PHY_TYPE_40GBASE_AOC:
1167 	case I40E_PHY_TYPE_10GBASE_AOC:
1168 	case I40E_PHY_TYPE_25GBASE_CR:
1169 	case I40E_PHY_TYPE_25GBASE_AOC:
1170 	case I40E_PHY_TYPE_25GBASE_ACC:
1171 		media = I40E_MEDIA_TYPE_DA;
1172 		break;
1173 	case I40E_PHY_TYPE_1000BASE_KX:
1174 	case I40E_PHY_TYPE_10GBASE_KX4:
1175 	case I40E_PHY_TYPE_10GBASE_KR:
1176 	case I40E_PHY_TYPE_40GBASE_KR4:
1177 	case I40E_PHY_TYPE_20GBASE_KR2:
1178 	case I40E_PHY_TYPE_25GBASE_KR:
1179 		media = I40E_MEDIA_TYPE_BACKPLANE;
1180 		break;
1181 	case I40E_PHY_TYPE_SGMII:
1182 	case I40E_PHY_TYPE_XAUI:
1183 	case I40E_PHY_TYPE_XFI:
1184 	case I40E_PHY_TYPE_XLAUI:
1185 	case I40E_PHY_TYPE_XLPPI:
1186 	default:
1187 		media = I40E_MEDIA_TYPE_UNKNOWN;
1188 		break;
1189 	}
1190 
1191 	return media;
1192 }
1193 
1194 /**
1195  * i40e_poll_globr - Poll for Global Reset completion
1196  * @hw: pointer to the hardware structure
1197  * @retry_limit: how many times to retry before failure
1198  **/
1199 static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1200 				   u32 retry_limit)
1201 {
1202 	u32 cnt, reg = 0;
1203 
1204 	for (cnt = 0; cnt < retry_limit; cnt++) {
1205 		reg = rd32(hw, I40E_GLGEN_RSTAT);
1206 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1207 			return 0;
1208 		msleep(100);
1209 	}
1210 
1211 	hw_dbg(hw, "Global reset failed.\n");
1212 	hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1213 
1214 	return I40E_ERR_RESET_FAILED;
1215 }
1216 
1217 #define I40E_PF_RESET_WAIT_COUNT_A0	200
1218 #define I40E_PF_RESET_WAIT_COUNT	200
1219 /**
1220  * i40e_pf_reset - Reset the PF
1221  * @hw: pointer to the hardware structure
1222  *
1223  * Assuming someone else has triggered a global reset,
1224  * assure the global reset is complete and then reset the PF
1225  **/
1226 i40e_status i40e_pf_reset(struct i40e_hw *hw)
1227 {
1228 	u32 cnt = 0;
1229 	u32 cnt1 = 0;
1230 	u32 reg = 0;
1231 	u32 grst_del;
1232 
1233 	/* Poll for Global Reset steady state in case of recent GRST.
1234 	 * The grst delay value is in 100ms units, and we'll wait a
1235 	 * couple counts longer to be sure we don't just miss the end.
1236 	 */
1237 	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1238 		    I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1239 		    I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1240 
1241 	/* It can take upto 15 secs for GRST steady state.
1242 	 * Bump it to 16 secs max to be safe.
1243 	 */
1244 	grst_del = grst_del * 20;
1245 
1246 	for (cnt = 0; cnt < grst_del; cnt++) {
1247 		reg = rd32(hw, I40E_GLGEN_RSTAT);
1248 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1249 			break;
1250 		msleep(100);
1251 	}
1252 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1253 		hw_dbg(hw, "Global reset polling failed to complete.\n");
1254 		return I40E_ERR_RESET_FAILED;
1255 	}
1256 
1257 	/* Now Wait for the FW to be ready */
1258 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1259 		reg = rd32(hw, I40E_GLNVM_ULD);
1260 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1261 			I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1262 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1263 			    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1264 			hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1265 			break;
1266 		}
1267 		usleep_range(10000, 20000);
1268 	}
1269 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1270 		     I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1271 		hw_dbg(hw, "wait for FW Reset complete timedout\n");
1272 		hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1273 		return I40E_ERR_RESET_FAILED;
1274 	}
1275 
1276 	/* If there was a Global Reset in progress when we got here,
1277 	 * we don't need to do the PF Reset
1278 	 */
1279 	if (!cnt) {
1280 		u32 reg2 = 0;
1281 		if (hw->revision_id == 0)
1282 			cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1283 		else
1284 			cnt = I40E_PF_RESET_WAIT_COUNT;
1285 		reg = rd32(hw, I40E_PFGEN_CTRL);
1286 		wr32(hw, I40E_PFGEN_CTRL,
1287 		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1288 		for (; cnt; cnt--) {
1289 			reg = rd32(hw, I40E_PFGEN_CTRL);
1290 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1291 				break;
1292 			reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1293 			if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1294 				break;
1295 			usleep_range(1000, 2000);
1296 		}
1297 		if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1298 			if (i40e_poll_globr(hw, grst_del))
1299 				return I40E_ERR_RESET_FAILED;
1300 		} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1301 			hw_dbg(hw, "PF reset polling failed to complete.\n");
1302 			return I40E_ERR_RESET_FAILED;
1303 		}
1304 	}
1305 
1306 	i40e_clear_pxe_mode(hw);
1307 
1308 	return 0;
1309 }
1310 
1311 /**
1312  * i40e_clear_hw - clear out any left over hw state
1313  * @hw: pointer to the hw struct
1314  *
1315  * Clear queues and interrupts, typically called at init time,
1316  * but after the capabilities have been found so we know how many
1317  * queues and msix vectors have been allocated.
1318  **/
1319 void i40e_clear_hw(struct i40e_hw *hw)
1320 {
1321 	u32 num_queues, base_queue;
1322 	u32 num_pf_int;
1323 	u32 num_vf_int;
1324 	u32 num_vfs;
1325 	u32 i, j;
1326 	u32 val;
1327 	u32 eol = 0x7ff;
1328 
1329 	/* get number of interrupts, queues, and VFs */
1330 	val = rd32(hw, I40E_GLPCI_CNF2);
1331 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1332 		     I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1333 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1334 		     I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1335 
1336 	val = rd32(hw, I40E_PFLAN_QALLOC);
1337 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1338 		     I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1339 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1340 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1341 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
1342 		num_queues = (j - base_queue) + 1;
1343 	else
1344 		num_queues = 0;
1345 
1346 	val = rd32(hw, I40E_PF_VT_PFALLOC);
1347 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1348 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1349 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1350 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1351 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
1352 		num_vfs = (j - i) + 1;
1353 	else
1354 		num_vfs = 0;
1355 
1356 	/* stop all the interrupts */
1357 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1358 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1359 	for (i = 0; i < num_pf_int - 2; i++)
1360 		wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1361 
1362 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1363 	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1364 	wr32(hw, I40E_PFINT_LNKLST0, val);
1365 	for (i = 0; i < num_pf_int - 2; i++)
1366 		wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1367 	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1368 	for (i = 0; i < num_vfs; i++)
1369 		wr32(hw, I40E_VPINT_LNKLST0(i), val);
1370 	for (i = 0; i < num_vf_int - 2; i++)
1371 		wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1372 
1373 	/* warn the HW of the coming Tx disables */
1374 	for (i = 0; i < num_queues; i++) {
1375 		u32 abs_queue_idx = base_queue + i;
1376 		u32 reg_block = 0;
1377 
1378 		if (abs_queue_idx >= 128) {
1379 			reg_block = abs_queue_idx / 128;
1380 			abs_queue_idx %= 128;
1381 		}
1382 
1383 		val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1384 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1385 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1386 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1387 
1388 		wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1389 	}
1390 	udelay(400);
1391 
1392 	/* stop all the queues */
1393 	for (i = 0; i < num_queues; i++) {
1394 		wr32(hw, I40E_QINT_TQCTL(i), 0);
1395 		wr32(hw, I40E_QTX_ENA(i), 0);
1396 		wr32(hw, I40E_QINT_RQCTL(i), 0);
1397 		wr32(hw, I40E_QRX_ENA(i), 0);
1398 	}
1399 
1400 	/* short wait for all queue disables to settle */
1401 	udelay(50);
1402 }
1403 
1404 /**
1405  * i40e_clear_pxe_mode - clear pxe operations mode
1406  * @hw: pointer to the hw struct
1407  *
1408  * Make sure all PXE mode settings are cleared, including things
1409  * like descriptor fetch/write-back mode.
1410  **/
1411 void i40e_clear_pxe_mode(struct i40e_hw *hw)
1412 {
1413 	u32 reg;
1414 
1415 	if (i40e_check_asq_alive(hw))
1416 		i40e_aq_clear_pxe_mode(hw, NULL);
1417 
1418 	/* Clear single descriptor fetch/write-back mode */
1419 	reg = rd32(hw, I40E_GLLAN_RCTL_0);
1420 
1421 	if (hw->revision_id == 0) {
1422 		/* As a work around clear PXE_MODE instead of setting it */
1423 		wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1424 	} else {
1425 		wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1426 	}
1427 }
1428 
1429 /**
1430  * i40e_led_is_mine - helper to find matching led
1431  * @hw: pointer to the hw struct
1432  * @idx: index into GPIO registers
1433  *
1434  * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1435  */
1436 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1437 {
1438 	u32 gpio_val = 0;
1439 	u32 port;
1440 
1441 	if (!hw->func_caps.led[idx])
1442 		return 0;
1443 
1444 	gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1445 	port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1446 		I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1447 
1448 	/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1449 	 * if it is not our port then ignore
1450 	 */
1451 	if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1452 	    (port != hw->port))
1453 		return 0;
1454 
1455 	return gpio_val;
1456 }
1457 
1458 #define I40E_COMBINED_ACTIVITY 0xA
1459 #define I40E_FILTER_ACTIVITY 0xE
1460 #define I40E_LINK_ACTIVITY 0xC
1461 #define I40E_MAC_ACTIVITY 0xD
1462 #define I40E_LED0 22
1463 
1464 /**
1465  * i40e_led_get - return current on/off mode
1466  * @hw: pointer to the hw struct
1467  *
1468  * The value returned is the 'mode' field as defined in the
1469  * GPIO register definitions: 0x0 = off, 0xf = on, and other
1470  * values are variations of possible behaviors relating to
1471  * blink, link, and wire.
1472  **/
1473 u32 i40e_led_get(struct i40e_hw *hw)
1474 {
1475 	u32 mode = 0;
1476 	int i;
1477 
1478 	/* as per the documentation GPIO 22-29 are the LED
1479 	 * GPIO pins named LED0..LED7
1480 	 */
1481 	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1482 		u32 gpio_val = i40e_led_is_mine(hw, i);
1483 
1484 		if (!gpio_val)
1485 			continue;
1486 
1487 		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1488 			I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1489 		break;
1490 	}
1491 
1492 	return mode;
1493 }
1494 
1495 /**
1496  * i40e_led_set - set new on/off mode
1497  * @hw: pointer to the hw struct
1498  * @mode: 0=off, 0xf=on (else see manual for mode details)
1499  * @blink: true if the LED should blink when on, false if steady
1500  *
1501  * if this function is used to turn on the blink it should
1502  * be used to disable the blink when restoring the original state.
1503  **/
1504 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1505 {
1506 	int i;
1507 
1508 	if (mode & 0xfffffff0)
1509 		hw_dbg(hw, "invalid mode passed in %X\n", mode);
1510 
1511 	/* as per the documentation GPIO 22-29 are the LED
1512 	 * GPIO pins named LED0..LED7
1513 	 */
1514 	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1515 		u32 gpio_val = i40e_led_is_mine(hw, i);
1516 
1517 		if (!gpio_val)
1518 			continue;
1519 		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1520 		/* this & is a bit of paranoia, but serves as a range check */
1521 		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1522 			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1523 
1524 		if (blink)
1525 			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1526 		else
1527 			gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1528 
1529 		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1530 		break;
1531 	}
1532 }
1533 
1534 /* Admin command wrappers */
1535 
1536 /**
1537  * i40e_aq_get_phy_capabilities
1538  * @hw: pointer to the hw struct
1539  * @abilities: structure for PHY capabilities to be filled
1540  * @qualified_modules: report Qualified Modules
1541  * @report_init: report init capabilities (active are default)
1542  * @cmd_details: pointer to command details structure or NULL
1543  *
1544  * Returns the various PHY abilities supported on the Port.
1545  **/
1546 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1547 			bool qualified_modules, bool report_init,
1548 			struct i40e_aq_get_phy_abilities_resp *abilities,
1549 			struct i40e_asq_cmd_details *cmd_details)
1550 {
1551 	struct i40e_aq_desc desc;
1552 	i40e_status status;
1553 	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1554 	u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1555 
1556 	if (!abilities)
1557 		return I40E_ERR_PARAM;
1558 
1559 	do {
1560 		i40e_fill_default_direct_cmd_desc(&desc,
1561 					       i40e_aqc_opc_get_phy_abilities);
1562 
1563 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1564 		if (abilities_size > I40E_AQ_LARGE_BUF)
1565 			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1566 
1567 		if (qualified_modules)
1568 			desc.params.external.param0 |=
1569 			cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1570 
1571 		if (report_init)
1572 			desc.params.external.param0 |=
1573 			cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1574 
1575 		status = i40e_asq_send_command(hw, &desc, abilities,
1576 					       abilities_size, cmd_details);
1577 
1578 		if (status)
1579 			break;
1580 
1581 		if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
1582 			status = I40E_ERR_UNKNOWN_PHY;
1583 			break;
1584 		} else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
1585 			usleep_range(1000, 2000);
1586 			total_delay++;
1587 			status = I40E_ERR_TIMEOUT;
1588 		}
1589 	} while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
1590 		 (total_delay < max_delay));
1591 
1592 	if (status)
1593 		return status;
1594 
1595 	if (report_init) {
1596 		if (hw->mac.type ==  I40E_MAC_XL710 &&
1597 		    hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1598 		    hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1599 			status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1600 		} else {
1601 			hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1602 			hw->phy.phy_types |=
1603 					((u64)abilities->phy_type_ext << 32);
1604 		}
1605 	}
1606 
1607 	return status;
1608 }
1609 
1610 /**
1611  * i40e_aq_set_phy_config
1612  * @hw: pointer to the hw struct
1613  * @config: structure with PHY configuration to be set
1614  * @cmd_details: pointer to command details structure or NULL
1615  *
1616  * Set the various PHY configuration parameters
1617  * supported on the Port.One or more of the Set PHY config parameters may be
1618  * ignored in an MFP mode as the PF may not have the privilege to set some
1619  * of the PHY Config parameters. This status will be indicated by the
1620  * command response.
1621  **/
1622 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1623 				struct i40e_aq_set_phy_config *config,
1624 				struct i40e_asq_cmd_details *cmd_details)
1625 {
1626 	struct i40e_aq_desc desc;
1627 	struct i40e_aq_set_phy_config *cmd =
1628 			(struct i40e_aq_set_phy_config *)&desc.params.raw;
1629 	enum i40e_status_code status;
1630 
1631 	if (!config)
1632 		return I40E_ERR_PARAM;
1633 
1634 	i40e_fill_default_direct_cmd_desc(&desc,
1635 					  i40e_aqc_opc_set_phy_config);
1636 
1637 	*cmd = *config;
1638 
1639 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1640 
1641 	return status;
1642 }
1643 
1644 /**
1645  * i40e_set_fc
1646  * @hw: pointer to the hw struct
1647  * @aq_failures: buffer to return AdminQ failure information
1648  * @atomic_restart: whether to enable atomic link restart
1649  *
1650  * Set the requested flow control mode using set_phy_config.
1651  **/
1652 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1653 				  bool atomic_restart)
1654 {
1655 	enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1656 	struct i40e_aq_get_phy_abilities_resp abilities;
1657 	struct i40e_aq_set_phy_config config;
1658 	enum i40e_status_code status;
1659 	u8 pause_mask = 0x0;
1660 
1661 	*aq_failures = 0x0;
1662 
1663 	switch (fc_mode) {
1664 	case I40E_FC_FULL:
1665 		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1666 		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1667 		break;
1668 	case I40E_FC_RX_PAUSE:
1669 		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1670 		break;
1671 	case I40E_FC_TX_PAUSE:
1672 		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1673 		break;
1674 	default:
1675 		break;
1676 	}
1677 
1678 	/* Get the current phy config */
1679 	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1680 					      NULL);
1681 	if (status) {
1682 		*aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1683 		return status;
1684 	}
1685 
1686 	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1687 	/* clear the old pause settings */
1688 	config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1689 			   ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1690 	/* set the new abilities */
1691 	config.abilities |= pause_mask;
1692 	/* If the abilities have changed, then set the new config */
1693 	if (config.abilities != abilities.abilities) {
1694 		/* Auto restart link so settings take effect */
1695 		if (atomic_restart)
1696 			config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1697 		/* Copy over all the old settings */
1698 		config.phy_type = abilities.phy_type;
1699 		config.phy_type_ext = abilities.phy_type_ext;
1700 		config.link_speed = abilities.link_speed;
1701 		config.eee_capability = abilities.eee_capability;
1702 		config.eeer = abilities.eeer_val;
1703 		config.low_power_ctrl = abilities.d3_lpan;
1704 		config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
1705 				    I40E_AQ_PHY_FEC_CONFIG_MASK;
1706 		status = i40e_aq_set_phy_config(hw, &config, NULL);
1707 
1708 		if (status)
1709 			*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1710 	}
1711 	/* Update the link info */
1712 	status = i40e_update_link_info(hw);
1713 	if (status) {
1714 		/* Wait a little bit (on 40G cards it sometimes takes a really
1715 		 * long time for link to come back from the atomic reset)
1716 		 * and try once more
1717 		 */
1718 		msleep(1000);
1719 		status = i40e_update_link_info(hw);
1720 	}
1721 	if (status)
1722 		*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1723 
1724 	return status;
1725 }
1726 
1727 /**
1728  * i40e_aq_clear_pxe_mode
1729  * @hw: pointer to the hw struct
1730  * @cmd_details: pointer to command details structure or NULL
1731  *
1732  * Tell the firmware that the driver is taking over from PXE
1733  **/
1734 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1735 				struct i40e_asq_cmd_details *cmd_details)
1736 {
1737 	i40e_status status;
1738 	struct i40e_aq_desc desc;
1739 	struct i40e_aqc_clear_pxe *cmd =
1740 		(struct i40e_aqc_clear_pxe *)&desc.params.raw;
1741 
1742 	i40e_fill_default_direct_cmd_desc(&desc,
1743 					  i40e_aqc_opc_clear_pxe_mode);
1744 
1745 	cmd->rx_cnt = 0x2;
1746 
1747 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1748 
1749 	wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1750 
1751 	return status;
1752 }
1753 
1754 /**
1755  * i40e_aq_set_link_restart_an
1756  * @hw: pointer to the hw struct
1757  * @enable_link: if true: enable link, if false: disable link
1758  * @cmd_details: pointer to command details structure or NULL
1759  *
1760  * Sets up the link and restarts the Auto-Negotiation over the link.
1761  **/
1762 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1763 					bool enable_link,
1764 					struct i40e_asq_cmd_details *cmd_details)
1765 {
1766 	struct i40e_aq_desc desc;
1767 	struct i40e_aqc_set_link_restart_an *cmd =
1768 		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1769 	i40e_status status;
1770 
1771 	i40e_fill_default_direct_cmd_desc(&desc,
1772 					  i40e_aqc_opc_set_link_restart_an);
1773 
1774 	cmd->command = I40E_AQ_PHY_RESTART_AN;
1775 	if (enable_link)
1776 		cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1777 	else
1778 		cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1779 
1780 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1781 
1782 	return status;
1783 }
1784 
1785 /**
1786  * i40e_aq_get_link_info
1787  * @hw: pointer to the hw struct
1788  * @enable_lse: enable/disable LinkStatusEvent reporting
1789  * @link: pointer to link status structure - optional
1790  * @cmd_details: pointer to command details structure or NULL
1791  *
1792  * Returns the link status of the adapter.
1793  **/
1794 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1795 				bool enable_lse, struct i40e_link_status *link,
1796 				struct i40e_asq_cmd_details *cmd_details)
1797 {
1798 	struct i40e_aq_desc desc;
1799 	struct i40e_aqc_get_link_status *resp =
1800 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
1801 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1802 	i40e_status status;
1803 	bool tx_pause, rx_pause;
1804 	u16 command_flags;
1805 
1806 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1807 
1808 	if (enable_lse)
1809 		command_flags = I40E_AQ_LSE_ENABLE;
1810 	else
1811 		command_flags = I40E_AQ_LSE_DISABLE;
1812 	resp->command_flags = cpu_to_le16(command_flags);
1813 
1814 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1815 
1816 	if (status)
1817 		goto aq_get_link_info_exit;
1818 
1819 	/* save off old link status information */
1820 	hw->phy.link_info_old = *hw_link_info;
1821 
1822 	/* update link status */
1823 	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1824 	hw->phy.media_type = i40e_get_media_type(hw);
1825 	hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1826 	hw_link_info->link_info = resp->link_info;
1827 	hw_link_info->an_info = resp->an_info;
1828 	hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1829 						 I40E_AQ_CONFIG_FEC_RS_ENA);
1830 	hw_link_info->ext_info = resp->ext_info;
1831 	hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1832 	hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1833 	hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1834 
1835 	/* update fc info */
1836 	tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1837 	rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1838 	if (tx_pause & rx_pause)
1839 		hw->fc.current_mode = I40E_FC_FULL;
1840 	else if (tx_pause)
1841 		hw->fc.current_mode = I40E_FC_TX_PAUSE;
1842 	else if (rx_pause)
1843 		hw->fc.current_mode = I40E_FC_RX_PAUSE;
1844 	else
1845 		hw->fc.current_mode = I40E_FC_NONE;
1846 
1847 	if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1848 		hw_link_info->crc_enable = true;
1849 	else
1850 		hw_link_info->crc_enable = false;
1851 
1852 	if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1853 		hw_link_info->lse_enable = true;
1854 	else
1855 		hw_link_info->lse_enable = false;
1856 
1857 	if ((hw->mac.type == I40E_MAC_XL710) &&
1858 	    (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1859 	     hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1860 		hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1861 
1862 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1863 	    hw->aq.api_min_ver >= 7) {
1864 		__le32 tmp;
1865 
1866 		memcpy(&tmp, resp->link_type, sizeof(tmp));
1867 		hw->phy.phy_types = le32_to_cpu(tmp);
1868 		hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1869 	}
1870 
1871 	/* save link status information */
1872 	if (link)
1873 		*link = *hw_link_info;
1874 
1875 	/* flag cleared so helper functions don't call AQ again */
1876 	hw->phy.get_link_info = false;
1877 
1878 aq_get_link_info_exit:
1879 	return status;
1880 }
1881 
1882 /**
1883  * i40e_aq_set_phy_int_mask
1884  * @hw: pointer to the hw struct
1885  * @mask: interrupt mask to be set
1886  * @cmd_details: pointer to command details structure or NULL
1887  *
1888  * Set link interrupt mask.
1889  **/
1890 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1891 				     u16 mask,
1892 				     struct i40e_asq_cmd_details *cmd_details)
1893 {
1894 	struct i40e_aq_desc desc;
1895 	struct i40e_aqc_set_phy_int_mask *cmd =
1896 		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1897 	i40e_status status;
1898 
1899 	i40e_fill_default_direct_cmd_desc(&desc,
1900 					  i40e_aqc_opc_set_phy_int_mask);
1901 
1902 	cmd->event_mask = cpu_to_le16(mask);
1903 
1904 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1905 
1906 	return status;
1907 }
1908 
1909 /**
1910  * i40e_aq_set_phy_debug
1911  * @hw: pointer to the hw struct
1912  * @cmd_flags: debug command flags
1913  * @cmd_details: pointer to command details structure or NULL
1914  *
1915  * Reset the external PHY.
1916  **/
1917 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1918 				  struct i40e_asq_cmd_details *cmd_details)
1919 {
1920 	struct i40e_aq_desc desc;
1921 	struct i40e_aqc_set_phy_debug *cmd =
1922 		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1923 	i40e_status status;
1924 
1925 	i40e_fill_default_direct_cmd_desc(&desc,
1926 					  i40e_aqc_opc_set_phy_debug);
1927 
1928 	cmd->command_flags = cmd_flags;
1929 
1930 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1931 
1932 	return status;
1933 }
1934 
1935 /**
1936  * i40e_aq_add_vsi
1937  * @hw: pointer to the hw struct
1938  * @vsi_ctx: pointer to a vsi context struct
1939  * @cmd_details: pointer to command details structure or NULL
1940  *
1941  * Add a VSI context to the hardware.
1942 **/
1943 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1944 				struct i40e_vsi_context *vsi_ctx,
1945 				struct i40e_asq_cmd_details *cmd_details)
1946 {
1947 	struct i40e_aq_desc desc;
1948 	struct i40e_aqc_add_get_update_vsi *cmd =
1949 		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1950 	struct i40e_aqc_add_get_update_vsi_completion *resp =
1951 		(struct i40e_aqc_add_get_update_vsi_completion *)
1952 		&desc.params.raw;
1953 	i40e_status status;
1954 
1955 	i40e_fill_default_direct_cmd_desc(&desc,
1956 					  i40e_aqc_opc_add_vsi);
1957 
1958 	cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1959 	cmd->connection_type = vsi_ctx->connection_type;
1960 	cmd->vf_id = vsi_ctx->vf_num;
1961 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1962 
1963 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1964 
1965 	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1966 				    sizeof(vsi_ctx->info), cmd_details);
1967 
1968 	if (status)
1969 		goto aq_add_vsi_exit;
1970 
1971 	vsi_ctx->seid = le16_to_cpu(resp->seid);
1972 	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1973 	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1974 	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1975 
1976 aq_add_vsi_exit:
1977 	return status;
1978 }
1979 
1980 /**
1981  * i40e_aq_set_default_vsi
1982  * @hw: pointer to the hw struct
1983  * @seid: vsi number
1984  * @cmd_details: pointer to command details structure or NULL
1985  **/
1986 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
1987 				    u16 seid,
1988 				    struct i40e_asq_cmd_details *cmd_details)
1989 {
1990 	struct i40e_aq_desc desc;
1991 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1992 		(struct i40e_aqc_set_vsi_promiscuous_modes *)
1993 		&desc.params.raw;
1994 	i40e_status status;
1995 
1996 	i40e_fill_default_direct_cmd_desc(&desc,
1997 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
1998 
1999 	cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2000 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2001 	cmd->seid = cpu_to_le16(seid);
2002 
2003 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2004 
2005 	return status;
2006 }
2007 
2008 /**
2009  * i40e_aq_clear_default_vsi
2010  * @hw: pointer to the hw struct
2011  * @seid: vsi number
2012  * @cmd_details: pointer to command details structure or NULL
2013  **/
2014 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
2015 				      u16 seid,
2016 				      struct i40e_asq_cmd_details *cmd_details)
2017 {
2018 	struct i40e_aq_desc desc;
2019 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2020 		(struct i40e_aqc_set_vsi_promiscuous_modes *)
2021 		&desc.params.raw;
2022 	i40e_status status;
2023 
2024 	i40e_fill_default_direct_cmd_desc(&desc,
2025 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2026 
2027 	cmd->promiscuous_flags = cpu_to_le16(0);
2028 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2029 	cmd->seid = cpu_to_le16(seid);
2030 
2031 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2032 
2033 	return status;
2034 }
2035 
2036 /**
2037  * i40e_aq_set_vsi_unicast_promiscuous
2038  * @hw: pointer to the hw struct
2039  * @seid: vsi number
2040  * @set: set unicast promiscuous enable/disable
2041  * @cmd_details: pointer to command details structure or NULL
2042  * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
2043  **/
2044 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
2045 				u16 seid, bool set,
2046 				struct i40e_asq_cmd_details *cmd_details,
2047 				bool rx_only_promisc)
2048 {
2049 	struct i40e_aq_desc desc;
2050 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2051 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2052 	i40e_status status;
2053 	u16 flags = 0;
2054 
2055 	i40e_fill_default_direct_cmd_desc(&desc,
2056 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2057 
2058 	if (set) {
2059 		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2060 		if (rx_only_promisc &&
2061 		    (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
2062 		     (hw->aq.api_maj_ver > 1)))
2063 			flags |= I40E_AQC_SET_VSI_PROMISC_TX;
2064 	}
2065 
2066 	cmd->promiscuous_flags = cpu_to_le16(flags);
2067 
2068 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2069 	if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
2070 	    (hw->aq.api_maj_ver > 1))
2071 		cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
2072 
2073 	cmd->seid = cpu_to_le16(seid);
2074 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2075 
2076 	return status;
2077 }
2078 
2079 /**
2080  * i40e_aq_set_vsi_multicast_promiscuous
2081  * @hw: pointer to the hw struct
2082  * @seid: vsi number
2083  * @set: set multicast promiscuous enable/disable
2084  * @cmd_details: pointer to command details structure or NULL
2085  **/
2086 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2087 				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2088 {
2089 	struct i40e_aq_desc desc;
2090 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2091 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2092 	i40e_status status;
2093 	u16 flags = 0;
2094 
2095 	i40e_fill_default_direct_cmd_desc(&desc,
2096 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2097 
2098 	if (set)
2099 		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2100 
2101 	cmd->promiscuous_flags = cpu_to_le16(flags);
2102 
2103 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2104 
2105 	cmd->seid = cpu_to_le16(seid);
2106 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2107 
2108 	return status;
2109 }
2110 
2111 /**
2112  * i40e_aq_set_vsi_mc_promisc_on_vlan
2113  * @hw: pointer to the hw struct
2114  * @seid: vsi number
2115  * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2116  * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2117  * @cmd_details: pointer to command details structure or NULL
2118  **/
2119 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2120 							 u16 seid, bool enable,
2121 							 u16 vid,
2122 				struct i40e_asq_cmd_details *cmd_details)
2123 {
2124 	struct i40e_aq_desc desc;
2125 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2126 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2127 	enum i40e_status_code status;
2128 	u16 flags = 0;
2129 
2130 	i40e_fill_default_direct_cmd_desc(&desc,
2131 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2132 
2133 	if (enable)
2134 		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2135 
2136 	cmd->promiscuous_flags = cpu_to_le16(flags);
2137 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2138 	cmd->seid = cpu_to_le16(seid);
2139 	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2140 
2141 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2142 
2143 	return status;
2144 }
2145 
2146 /**
2147  * i40e_aq_set_vsi_uc_promisc_on_vlan
2148  * @hw: pointer to the hw struct
2149  * @seid: vsi number
2150  * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2151  * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2152  * @cmd_details: pointer to command details structure or NULL
2153  **/
2154 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2155 							 u16 seid, bool enable,
2156 							 u16 vid,
2157 				struct i40e_asq_cmd_details *cmd_details)
2158 {
2159 	struct i40e_aq_desc desc;
2160 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2161 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2162 	enum i40e_status_code status;
2163 	u16 flags = 0;
2164 
2165 	i40e_fill_default_direct_cmd_desc(&desc,
2166 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2167 
2168 	if (enable)
2169 		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2170 
2171 	cmd->promiscuous_flags = cpu_to_le16(flags);
2172 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2173 	cmd->seid = cpu_to_le16(seid);
2174 	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2175 
2176 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2177 
2178 	return status;
2179 }
2180 
2181 /**
2182  * i40e_aq_set_vsi_bc_promisc_on_vlan
2183  * @hw: pointer to the hw struct
2184  * @seid: vsi number
2185  * @enable: set broadcast promiscuous enable/disable for a given VLAN
2186  * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2187  * @cmd_details: pointer to command details structure or NULL
2188  **/
2189 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2190 				u16 seid, bool enable, u16 vid,
2191 				struct i40e_asq_cmd_details *cmd_details)
2192 {
2193 	struct i40e_aq_desc desc;
2194 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2195 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2196 	i40e_status status;
2197 	u16 flags = 0;
2198 
2199 	i40e_fill_default_direct_cmd_desc(&desc,
2200 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2201 
2202 	if (enable)
2203 		flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2204 
2205 	cmd->promiscuous_flags = cpu_to_le16(flags);
2206 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2207 	cmd->seid = cpu_to_le16(seid);
2208 	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2209 
2210 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2211 
2212 	return status;
2213 }
2214 
2215 /**
2216  * i40e_aq_set_vsi_broadcast
2217  * @hw: pointer to the hw struct
2218  * @seid: vsi number
2219  * @set_filter: true to set filter, false to clear filter
2220  * @cmd_details: pointer to command details structure or NULL
2221  *
2222  * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2223  **/
2224 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2225 				u16 seid, bool set_filter,
2226 				struct i40e_asq_cmd_details *cmd_details)
2227 {
2228 	struct i40e_aq_desc desc;
2229 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2230 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2231 	i40e_status status;
2232 
2233 	i40e_fill_default_direct_cmd_desc(&desc,
2234 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2235 
2236 	if (set_filter)
2237 		cmd->promiscuous_flags
2238 			    |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2239 	else
2240 		cmd->promiscuous_flags
2241 			    &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2242 
2243 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2244 	cmd->seid = cpu_to_le16(seid);
2245 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2246 
2247 	return status;
2248 }
2249 
2250 /**
2251  * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2252  * @hw: pointer to the hw struct
2253  * @seid: vsi number
2254  * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2255  * @cmd_details: pointer to command details structure or NULL
2256  **/
2257 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2258 				       u16 seid, bool enable,
2259 				       struct i40e_asq_cmd_details *cmd_details)
2260 {
2261 	struct i40e_aq_desc desc;
2262 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2263 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2264 	i40e_status status;
2265 	u16 flags = 0;
2266 
2267 	i40e_fill_default_direct_cmd_desc(&desc,
2268 					i40e_aqc_opc_set_vsi_promiscuous_modes);
2269 	if (enable)
2270 		flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2271 
2272 	cmd->promiscuous_flags = cpu_to_le16(flags);
2273 	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2274 	cmd->seid = cpu_to_le16(seid);
2275 
2276 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2277 
2278 	return status;
2279 }
2280 
2281 /**
2282  * i40e_get_vsi_params - get VSI configuration info
2283  * @hw: pointer to the hw struct
2284  * @vsi_ctx: pointer to a vsi context struct
2285  * @cmd_details: pointer to command details structure or NULL
2286  **/
2287 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2288 				struct i40e_vsi_context *vsi_ctx,
2289 				struct i40e_asq_cmd_details *cmd_details)
2290 {
2291 	struct i40e_aq_desc desc;
2292 	struct i40e_aqc_add_get_update_vsi *cmd =
2293 		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2294 	struct i40e_aqc_add_get_update_vsi_completion *resp =
2295 		(struct i40e_aqc_add_get_update_vsi_completion *)
2296 		&desc.params.raw;
2297 	i40e_status status;
2298 
2299 	i40e_fill_default_direct_cmd_desc(&desc,
2300 					  i40e_aqc_opc_get_vsi_parameters);
2301 
2302 	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2303 
2304 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2305 
2306 	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2307 				    sizeof(vsi_ctx->info), NULL);
2308 
2309 	if (status)
2310 		goto aq_get_vsi_params_exit;
2311 
2312 	vsi_ctx->seid = le16_to_cpu(resp->seid);
2313 	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2314 	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2315 	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2316 
2317 aq_get_vsi_params_exit:
2318 	return status;
2319 }
2320 
2321 /**
2322  * i40e_aq_update_vsi_params
2323  * @hw: pointer to the hw struct
2324  * @vsi_ctx: pointer to a vsi context struct
2325  * @cmd_details: pointer to command details structure or NULL
2326  *
2327  * Update a VSI context.
2328  **/
2329 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2330 				struct i40e_vsi_context *vsi_ctx,
2331 				struct i40e_asq_cmd_details *cmd_details)
2332 {
2333 	struct i40e_aq_desc desc;
2334 	struct i40e_aqc_add_get_update_vsi *cmd =
2335 		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2336 	struct i40e_aqc_add_get_update_vsi_completion *resp =
2337 		(struct i40e_aqc_add_get_update_vsi_completion *)
2338 		&desc.params.raw;
2339 	i40e_status status;
2340 
2341 	i40e_fill_default_direct_cmd_desc(&desc,
2342 					  i40e_aqc_opc_update_vsi_parameters);
2343 	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2344 
2345 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2346 
2347 	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2348 				    sizeof(vsi_ctx->info), cmd_details);
2349 
2350 	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2351 	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2352 
2353 	return status;
2354 }
2355 
2356 /**
2357  * i40e_aq_get_switch_config
2358  * @hw: pointer to the hardware structure
2359  * @buf: pointer to the result buffer
2360  * @buf_size: length of input buffer
2361  * @start_seid: seid to start for the report, 0 == beginning
2362  * @cmd_details: pointer to command details structure or NULL
2363  *
2364  * Fill the buf with switch configuration returned from AdminQ command
2365  **/
2366 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2367 				struct i40e_aqc_get_switch_config_resp *buf,
2368 				u16 buf_size, u16 *start_seid,
2369 				struct i40e_asq_cmd_details *cmd_details)
2370 {
2371 	struct i40e_aq_desc desc;
2372 	struct i40e_aqc_switch_seid *scfg =
2373 		(struct i40e_aqc_switch_seid *)&desc.params.raw;
2374 	i40e_status status;
2375 
2376 	i40e_fill_default_direct_cmd_desc(&desc,
2377 					  i40e_aqc_opc_get_switch_config);
2378 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2379 	if (buf_size > I40E_AQ_LARGE_BUF)
2380 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2381 	scfg->seid = cpu_to_le16(*start_seid);
2382 
2383 	status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2384 	*start_seid = le16_to_cpu(scfg->seid);
2385 
2386 	return status;
2387 }
2388 
2389 /**
2390  * i40e_aq_set_switch_config
2391  * @hw: pointer to the hardware structure
2392  * @flags: bit flag values to set
2393  * @mode: cloud filter mode
2394  * @valid_flags: which bit flags to set
2395  * @mode: cloud filter mode
2396  * @cmd_details: pointer to command details structure or NULL
2397  *
2398  * Set switch configuration bits
2399  **/
2400 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2401 						u16 flags,
2402 						u16 valid_flags, u8 mode,
2403 				struct i40e_asq_cmd_details *cmd_details)
2404 {
2405 	struct i40e_aq_desc desc;
2406 	struct i40e_aqc_set_switch_config *scfg =
2407 		(struct i40e_aqc_set_switch_config *)&desc.params.raw;
2408 	enum i40e_status_code status;
2409 
2410 	i40e_fill_default_direct_cmd_desc(&desc,
2411 					  i40e_aqc_opc_set_switch_config);
2412 	scfg->flags = cpu_to_le16(flags);
2413 	scfg->valid_flags = cpu_to_le16(valid_flags);
2414 	scfg->mode = mode;
2415 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2416 		scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2417 		scfg->first_tag = cpu_to_le16(hw->first_tag);
2418 		scfg->second_tag = cpu_to_le16(hw->second_tag);
2419 	}
2420 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2421 
2422 	return status;
2423 }
2424 
2425 /**
2426  * i40e_aq_get_firmware_version
2427  * @hw: pointer to the hw struct
2428  * @fw_major_version: firmware major version
2429  * @fw_minor_version: firmware minor version
2430  * @fw_build: firmware build number
2431  * @api_major_version: major queue version
2432  * @api_minor_version: minor queue version
2433  * @cmd_details: pointer to command details structure or NULL
2434  *
2435  * Get the firmware version from the admin queue commands
2436  **/
2437 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2438 				u16 *fw_major_version, u16 *fw_minor_version,
2439 				u32 *fw_build,
2440 				u16 *api_major_version, u16 *api_minor_version,
2441 				struct i40e_asq_cmd_details *cmd_details)
2442 {
2443 	struct i40e_aq_desc desc;
2444 	struct i40e_aqc_get_version *resp =
2445 		(struct i40e_aqc_get_version *)&desc.params.raw;
2446 	i40e_status status;
2447 
2448 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2449 
2450 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2451 
2452 	if (!status) {
2453 		if (fw_major_version)
2454 			*fw_major_version = le16_to_cpu(resp->fw_major);
2455 		if (fw_minor_version)
2456 			*fw_minor_version = le16_to_cpu(resp->fw_minor);
2457 		if (fw_build)
2458 			*fw_build = le32_to_cpu(resp->fw_build);
2459 		if (api_major_version)
2460 			*api_major_version = le16_to_cpu(resp->api_major);
2461 		if (api_minor_version)
2462 			*api_minor_version = le16_to_cpu(resp->api_minor);
2463 	}
2464 
2465 	return status;
2466 }
2467 
2468 /**
2469  * i40e_aq_send_driver_version
2470  * @hw: pointer to the hw struct
2471  * @dv: driver's major, minor version
2472  * @cmd_details: pointer to command details structure or NULL
2473  *
2474  * Send the driver version to the firmware
2475  **/
2476 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2477 				struct i40e_driver_version *dv,
2478 				struct i40e_asq_cmd_details *cmd_details)
2479 {
2480 	struct i40e_aq_desc desc;
2481 	struct i40e_aqc_driver_version *cmd =
2482 		(struct i40e_aqc_driver_version *)&desc.params.raw;
2483 	i40e_status status;
2484 	u16 len;
2485 
2486 	if (dv == NULL)
2487 		return I40E_ERR_PARAM;
2488 
2489 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2490 
2491 	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2492 	cmd->driver_major_ver = dv->major_version;
2493 	cmd->driver_minor_ver = dv->minor_version;
2494 	cmd->driver_build_ver = dv->build_version;
2495 	cmd->driver_subbuild_ver = dv->subbuild_version;
2496 
2497 	len = 0;
2498 	while (len < sizeof(dv->driver_string) &&
2499 	       (dv->driver_string[len] < 0x80) &&
2500 	       dv->driver_string[len])
2501 		len++;
2502 	status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2503 				       len, cmd_details);
2504 
2505 	return status;
2506 }
2507 
2508 /**
2509  * i40e_get_link_status - get status of the HW network link
2510  * @hw: pointer to the hw struct
2511  * @link_up: pointer to bool (true/false = linkup/linkdown)
2512  *
2513  * Variable link_up true if link is up, false if link is down.
2514  * The variable link_up is invalid if returned value of status != 0
2515  *
2516  * Side effect: LinkStatusEvent reporting becomes enabled
2517  **/
2518 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2519 {
2520 	i40e_status status = 0;
2521 
2522 	if (hw->phy.get_link_info) {
2523 		status = i40e_update_link_info(hw);
2524 
2525 		if (status)
2526 			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2527 				   status);
2528 	}
2529 
2530 	*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2531 
2532 	return status;
2533 }
2534 
2535 /**
2536  * i40e_updatelink_status - update status of the HW network link
2537  * @hw: pointer to the hw struct
2538  **/
2539 i40e_status i40e_update_link_info(struct i40e_hw *hw)
2540 {
2541 	struct i40e_aq_get_phy_abilities_resp abilities;
2542 	i40e_status status = 0;
2543 
2544 	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2545 	if (status)
2546 		return status;
2547 
2548 	/* extra checking needed to ensure link info to user is timely */
2549 	if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2550 	    ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2551 	     !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2552 		status = i40e_aq_get_phy_capabilities(hw, false, false,
2553 						      &abilities, NULL);
2554 		if (status)
2555 			return status;
2556 
2557 		hw->phy.link_info.req_fec_info =
2558 			abilities.fec_cfg_curr_mod_ext_info &
2559 			(I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
2560 
2561 		memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2562 		       sizeof(hw->phy.link_info.module_type));
2563 	}
2564 
2565 	return status;
2566 }
2567 
2568 /**
2569  * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2570  * @hw: pointer to the hw struct
2571  * @uplink_seid: the MAC or other gizmo SEID
2572  * @downlink_seid: the VSI SEID
2573  * @enabled_tc: bitmap of TCs to be enabled
2574  * @default_port: true for default port VSI, false for control port
2575  * @veb_seid: pointer to where to put the resulting VEB SEID
2576  * @enable_stats: true to turn on VEB stats
2577  * @cmd_details: pointer to command details structure or NULL
2578  *
2579  * This asks the FW to add a VEB between the uplink and downlink
2580  * elements.  If the uplink SEID is 0, this will be a floating VEB.
2581  **/
2582 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2583 				u16 downlink_seid, u8 enabled_tc,
2584 				bool default_port, u16 *veb_seid,
2585 				bool enable_stats,
2586 				struct i40e_asq_cmd_details *cmd_details)
2587 {
2588 	struct i40e_aq_desc desc;
2589 	struct i40e_aqc_add_veb *cmd =
2590 		(struct i40e_aqc_add_veb *)&desc.params.raw;
2591 	struct i40e_aqc_add_veb_completion *resp =
2592 		(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2593 	i40e_status status;
2594 	u16 veb_flags = 0;
2595 
2596 	/* SEIDs need to either both be set or both be 0 for floating VEB */
2597 	if (!!uplink_seid != !!downlink_seid)
2598 		return I40E_ERR_PARAM;
2599 
2600 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2601 
2602 	cmd->uplink_seid = cpu_to_le16(uplink_seid);
2603 	cmd->downlink_seid = cpu_to_le16(downlink_seid);
2604 	cmd->enable_tcs = enabled_tc;
2605 	if (!uplink_seid)
2606 		veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2607 	if (default_port)
2608 		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2609 	else
2610 		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2611 
2612 	/* reverse logic here: set the bitflag to disable the stats */
2613 	if (!enable_stats)
2614 		veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2615 
2616 	cmd->veb_flags = cpu_to_le16(veb_flags);
2617 
2618 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2619 
2620 	if (!status && veb_seid)
2621 		*veb_seid = le16_to_cpu(resp->veb_seid);
2622 
2623 	return status;
2624 }
2625 
2626 /**
2627  * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2628  * @hw: pointer to the hw struct
2629  * @veb_seid: the SEID of the VEB to query
2630  * @switch_id: the uplink switch id
2631  * @floating: set to true if the VEB is floating
2632  * @statistic_index: index of the stats counter block for this VEB
2633  * @vebs_used: number of VEB's used by function
2634  * @vebs_free: total VEB's not reserved by any function
2635  * @cmd_details: pointer to command details structure or NULL
2636  *
2637  * This retrieves the parameters for a particular VEB, specified by
2638  * uplink_seid, and returns them to the caller.
2639  **/
2640 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2641 				u16 veb_seid, u16 *switch_id,
2642 				bool *floating, u16 *statistic_index,
2643 				u16 *vebs_used, u16 *vebs_free,
2644 				struct i40e_asq_cmd_details *cmd_details)
2645 {
2646 	struct i40e_aq_desc desc;
2647 	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2648 		(struct i40e_aqc_get_veb_parameters_completion *)
2649 		&desc.params.raw;
2650 	i40e_status status;
2651 
2652 	if (veb_seid == 0)
2653 		return I40E_ERR_PARAM;
2654 
2655 	i40e_fill_default_direct_cmd_desc(&desc,
2656 					  i40e_aqc_opc_get_veb_parameters);
2657 	cmd_resp->seid = cpu_to_le16(veb_seid);
2658 
2659 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2660 	if (status)
2661 		goto get_veb_exit;
2662 
2663 	if (switch_id)
2664 		*switch_id = le16_to_cpu(cmd_resp->switch_id);
2665 	if (statistic_index)
2666 		*statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2667 	if (vebs_used)
2668 		*vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2669 	if (vebs_free)
2670 		*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2671 	if (floating) {
2672 		u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2673 
2674 		if (flags & I40E_AQC_ADD_VEB_FLOATING)
2675 			*floating = true;
2676 		else
2677 			*floating = false;
2678 	}
2679 
2680 get_veb_exit:
2681 	return status;
2682 }
2683 
2684 /**
2685  * i40e_aq_add_macvlan
2686  * @hw: pointer to the hw struct
2687  * @seid: VSI for the mac address
2688  * @mv_list: list of macvlans to be added
2689  * @count: length of the list
2690  * @cmd_details: pointer to command details structure or NULL
2691  *
2692  * Add MAC/VLAN addresses to the HW filtering
2693  **/
2694 i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2695 			struct i40e_aqc_add_macvlan_element_data *mv_list,
2696 			u16 count, struct i40e_asq_cmd_details *cmd_details)
2697 {
2698 	struct i40e_aq_desc desc;
2699 	struct i40e_aqc_macvlan *cmd =
2700 		(struct i40e_aqc_macvlan *)&desc.params.raw;
2701 	i40e_status status;
2702 	u16 buf_size;
2703 	int i;
2704 
2705 	if (count == 0 || !mv_list || !hw)
2706 		return I40E_ERR_PARAM;
2707 
2708 	buf_size = count * sizeof(*mv_list);
2709 
2710 	/* prep the rest of the request */
2711 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
2712 	cmd->num_addresses = cpu_to_le16(count);
2713 	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2714 	cmd->seid[1] = 0;
2715 	cmd->seid[2] = 0;
2716 
2717 	for (i = 0; i < count; i++)
2718 		if (is_multicast_ether_addr(mv_list[i].mac_addr))
2719 			mv_list[i].flags |=
2720 			       cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2721 
2722 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2723 	if (buf_size > I40E_AQ_LARGE_BUF)
2724 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2725 
2726 	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2727 				       cmd_details);
2728 
2729 	return status;
2730 }
2731 
2732 /**
2733  * i40e_aq_remove_macvlan
2734  * @hw: pointer to the hw struct
2735  * @seid: VSI for the mac address
2736  * @mv_list: list of macvlans to be removed
2737  * @count: length of the list
2738  * @cmd_details: pointer to command details structure or NULL
2739  *
2740  * Remove MAC/VLAN addresses from the HW filtering
2741  **/
2742 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2743 			struct i40e_aqc_remove_macvlan_element_data *mv_list,
2744 			u16 count, struct i40e_asq_cmd_details *cmd_details)
2745 {
2746 	struct i40e_aq_desc desc;
2747 	struct i40e_aqc_macvlan *cmd =
2748 		(struct i40e_aqc_macvlan *)&desc.params.raw;
2749 	i40e_status status;
2750 	u16 buf_size;
2751 
2752 	if (count == 0 || !mv_list || !hw)
2753 		return I40E_ERR_PARAM;
2754 
2755 	buf_size = count * sizeof(*mv_list);
2756 
2757 	/* prep the rest of the request */
2758 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2759 	cmd->num_addresses = cpu_to_le16(count);
2760 	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2761 	cmd->seid[1] = 0;
2762 	cmd->seid[2] = 0;
2763 
2764 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2765 	if (buf_size > I40E_AQ_LARGE_BUF)
2766 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2767 
2768 	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2769 				       cmd_details);
2770 
2771 	return status;
2772 }
2773 
2774 /**
2775  * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2776  * @hw: pointer to the hw struct
2777  * @opcode: AQ opcode for add or delete mirror rule
2778  * @sw_seid: Switch SEID (to which rule refers)
2779  * @rule_type: Rule Type (ingress/egress/VLAN)
2780  * @id: Destination VSI SEID or Rule ID
2781  * @count: length of the list
2782  * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2783  * @cmd_details: pointer to command details structure or NULL
2784  * @rule_id: Rule ID returned from FW
2785  * @rules_used: Number of rules used in internal switch
2786  * @rules_free: Number of rules free in internal switch
2787  *
2788  * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2789  * VEBs/VEPA elements only
2790  **/
2791 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2792 				u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2793 				u16 count, __le16 *mr_list,
2794 				struct i40e_asq_cmd_details *cmd_details,
2795 				u16 *rule_id, u16 *rules_used, u16 *rules_free)
2796 {
2797 	struct i40e_aq_desc desc;
2798 	struct i40e_aqc_add_delete_mirror_rule *cmd =
2799 		(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2800 	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2801 	(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2802 	i40e_status status;
2803 	u16 buf_size;
2804 
2805 	buf_size = count * sizeof(*mr_list);
2806 
2807 	/* prep the rest of the request */
2808 	i40e_fill_default_direct_cmd_desc(&desc, opcode);
2809 	cmd->seid = cpu_to_le16(sw_seid);
2810 	cmd->rule_type = cpu_to_le16(rule_type &
2811 				     I40E_AQC_MIRROR_RULE_TYPE_MASK);
2812 	cmd->num_entries = cpu_to_le16(count);
2813 	/* Dest VSI for add, rule_id for delete */
2814 	cmd->destination = cpu_to_le16(id);
2815 	if (mr_list) {
2816 		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2817 						I40E_AQ_FLAG_RD));
2818 		if (buf_size > I40E_AQ_LARGE_BUF)
2819 			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2820 	}
2821 
2822 	status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2823 				       cmd_details);
2824 	if (!status ||
2825 	    hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2826 		if (rule_id)
2827 			*rule_id = le16_to_cpu(resp->rule_id);
2828 		if (rules_used)
2829 			*rules_used = le16_to_cpu(resp->mirror_rules_used);
2830 		if (rules_free)
2831 			*rules_free = le16_to_cpu(resp->mirror_rules_free);
2832 	}
2833 	return status;
2834 }
2835 
2836 /**
2837  * i40e_aq_add_mirrorrule - add a mirror rule
2838  * @hw: pointer to the hw struct
2839  * @sw_seid: Switch SEID (to which rule refers)
2840  * @rule_type: Rule Type (ingress/egress/VLAN)
2841  * @dest_vsi: SEID of VSI to which packets will be mirrored
2842  * @count: length of the list
2843  * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2844  * @cmd_details: pointer to command details structure or NULL
2845  * @rule_id: Rule ID returned from FW
2846  * @rules_used: Number of rules used in internal switch
2847  * @rules_free: Number of rules free in internal switch
2848  *
2849  * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2850  **/
2851 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2852 			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2853 			struct i40e_asq_cmd_details *cmd_details,
2854 			u16 *rule_id, u16 *rules_used, u16 *rules_free)
2855 {
2856 	if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2857 	    rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2858 		if (count == 0 || !mr_list)
2859 			return I40E_ERR_PARAM;
2860 	}
2861 
2862 	return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2863 				  rule_type, dest_vsi, count, mr_list,
2864 				  cmd_details, rule_id, rules_used, rules_free);
2865 }
2866 
2867 /**
2868  * i40e_aq_delete_mirrorrule - delete a mirror rule
2869  * @hw: pointer to the hw struct
2870  * @sw_seid: Switch SEID (to which rule refers)
2871  * @rule_type: Rule Type (ingress/egress/VLAN)
2872  * @count: length of the list
2873  * @rule_id: Rule ID that is returned in the receive desc as part of
2874  *		add_mirrorrule.
2875  * @mr_list: list of mirrored VLAN IDs to be removed
2876  * @cmd_details: pointer to command details structure or NULL
2877  * @rules_used: Number of rules used in internal switch
2878  * @rules_free: Number of rules free in internal switch
2879  *
2880  * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2881  **/
2882 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2883 			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2884 			struct i40e_asq_cmd_details *cmd_details,
2885 			u16 *rules_used, u16 *rules_free)
2886 {
2887 	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2888 	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2889 		/* count and mr_list shall be valid for rule_type INGRESS VLAN
2890 		 * mirroring. For other rule_type, count and rule_type should
2891 		 * not matter.
2892 		 */
2893 		if (count == 0 || !mr_list)
2894 			return I40E_ERR_PARAM;
2895 	}
2896 
2897 	return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2898 				  rule_type, rule_id, count, mr_list,
2899 				  cmd_details, NULL, rules_used, rules_free);
2900 }
2901 
2902 /**
2903  * i40e_aq_send_msg_to_vf
2904  * @hw: pointer to the hardware structure
2905  * @vfid: VF id to send msg
2906  * @v_opcode: opcodes for VF-PF communication
2907  * @v_retval: return error code
2908  * @msg: pointer to the msg buffer
2909  * @msglen: msg length
2910  * @cmd_details: pointer to command details
2911  *
2912  * send msg to vf
2913  **/
2914 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2915 				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2916 				struct i40e_asq_cmd_details *cmd_details)
2917 {
2918 	struct i40e_aq_desc desc;
2919 	struct i40e_aqc_pf_vf_message *cmd =
2920 		(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2921 	i40e_status status;
2922 
2923 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2924 	cmd->id = cpu_to_le32(vfid);
2925 	desc.cookie_high = cpu_to_le32(v_opcode);
2926 	desc.cookie_low = cpu_to_le32(v_retval);
2927 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2928 	if (msglen) {
2929 		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2930 						I40E_AQ_FLAG_RD));
2931 		if (msglen > I40E_AQ_LARGE_BUF)
2932 			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2933 		desc.datalen = cpu_to_le16(msglen);
2934 	}
2935 	status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2936 
2937 	return status;
2938 }
2939 
2940 /**
2941  * i40e_aq_debug_read_register
2942  * @hw: pointer to the hw struct
2943  * @reg_addr: register address
2944  * @reg_val: register value
2945  * @cmd_details: pointer to command details structure or NULL
2946  *
2947  * Read the register using the admin queue commands
2948  **/
2949 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
2950 				u32 reg_addr, u64 *reg_val,
2951 				struct i40e_asq_cmd_details *cmd_details)
2952 {
2953 	struct i40e_aq_desc desc;
2954 	struct i40e_aqc_debug_reg_read_write *cmd_resp =
2955 		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2956 	i40e_status status;
2957 
2958 	if (reg_val == NULL)
2959 		return I40E_ERR_PARAM;
2960 
2961 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2962 
2963 	cmd_resp->address = cpu_to_le32(reg_addr);
2964 
2965 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2966 
2967 	if (!status) {
2968 		*reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2969 			   (u64)le32_to_cpu(cmd_resp->value_low);
2970 	}
2971 
2972 	return status;
2973 }
2974 
2975 /**
2976  * i40e_aq_debug_write_register
2977  * @hw: pointer to the hw struct
2978  * @reg_addr: register address
2979  * @reg_val: register value
2980  * @cmd_details: pointer to command details structure or NULL
2981  *
2982  * Write to a register using the admin queue commands
2983  **/
2984 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
2985 					u32 reg_addr, u64 reg_val,
2986 					struct i40e_asq_cmd_details *cmd_details)
2987 {
2988 	struct i40e_aq_desc desc;
2989 	struct i40e_aqc_debug_reg_read_write *cmd =
2990 		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2991 	i40e_status status;
2992 
2993 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
2994 
2995 	cmd->address = cpu_to_le32(reg_addr);
2996 	cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
2997 	cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
2998 
2999 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3000 
3001 	return status;
3002 }
3003 
3004 /**
3005  * i40e_aq_request_resource
3006  * @hw: pointer to the hw struct
3007  * @resource: resource id
3008  * @access: access type
3009  * @sdp_number: resource number
3010  * @timeout: the maximum time in ms that the driver may hold the resource
3011  * @cmd_details: pointer to command details structure or NULL
3012  *
3013  * requests common resource using the admin queue commands
3014  **/
3015 i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
3016 				enum i40e_aq_resources_ids resource,
3017 				enum i40e_aq_resource_access_type access,
3018 				u8 sdp_number, u64 *timeout,
3019 				struct i40e_asq_cmd_details *cmd_details)
3020 {
3021 	struct i40e_aq_desc desc;
3022 	struct i40e_aqc_request_resource *cmd_resp =
3023 		(struct i40e_aqc_request_resource *)&desc.params.raw;
3024 	i40e_status status;
3025 
3026 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
3027 
3028 	cmd_resp->resource_id = cpu_to_le16(resource);
3029 	cmd_resp->access_type = cpu_to_le16(access);
3030 	cmd_resp->resource_number = cpu_to_le32(sdp_number);
3031 
3032 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3033 	/* The completion specifies the maximum time in ms that the driver
3034 	 * may hold the resource in the Timeout field.
3035 	 * If the resource is held by someone else, the command completes with
3036 	 * busy return value and the timeout field indicates the maximum time
3037 	 * the current owner of the resource has to free it.
3038 	 */
3039 	if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
3040 		*timeout = le32_to_cpu(cmd_resp->timeout);
3041 
3042 	return status;
3043 }
3044 
3045 /**
3046  * i40e_aq_release_resource
3047  * @hw: pointer to the hw struct
3048  * @resource: resource id
3049  * @sdp_number: resource number
3050  * @cmd_details: pointer to command details structure or NULL
3051  *
3052  * release common resource using the admin queue commands
3053  **/
3054 i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3055 				enum i40e_aq_resources_ids resource,
3056 				u8 sdp_number,
3057 				struct i40e_asq_cmd_details *cmd_details)
3058 {
3059 	struct i40e_aq_desc desc;
3060 	struct i40e_aqc_request_resource *cmd =
3061 		(struct i40e_aqc_request_resource *)&desc.params.raw;
3062 	i40e_status status;
3063 
3064 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3065 
3066 	cmd->resource_id = cpu_to_le16(resource);
3067 	cmd->resource_number = cpu_to_le32(sdp_number);
3068 
3069 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3070 
3071 	return status;
3072 }
3073 
3074 /**
3075  * i40e_aq_read_nvm
3076  * @hw: pointer to the hw struct
3077  * @module_pointer: module pointer location in words from the NVM beginning
3078  * @offset: byte offset from the module beginning
3079  * @length: length of the section to be read (in bytes from the offset)
3080  * @data: command buffer (size [bytes] = length)
3081  * @last_command: tells if this is the last command in a series
3082  * @cmd_details: pointer to command details structure or NULL
3083  *
3084  * Read the NVM using the admin queue commands
3085  **/
3086 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3087 				u32 offset, u16 length, void *data,
3088 				bool last_command,
3089 				struct i40e_asq_cmd_details *cmd_details)
3090 {
3091 	struct i40e_aq_desc desc;
3092 	struct i40e_aqc_nvm_update *cmd =
3093 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3094 	i40e_status status;
3095 
3096 	/* In offset the highest byte must be zeroed. */
3097 	if (offset & 0xFF000000) {
3098 		status = I40E_ERR_PARAM;
3099 		goto i40e_aq_read_nvm_exit;
3100 	}
3101 
3102 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3103 
3104 	/* If this is the last command in a series, set the proper flag. */
3105 	if (last_command)
3106 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3107 	cmd->module_pointer = module_pointer;
3108 	cmd->offset = cpu_to_le32(offset);
3109 	cmd->length = cpu_to_le16(length);
3110 
3111 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3112 	if (length > I40E_AQ_LARGE_BUF)
3113 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3114 
3115 	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3116 
3117 i40e_aq_read_nvm_exit:
3118 	return status;
3119 }
3120 
3121 /**
3122  * i40e_aq_erase_nvm
3123  * @hw: pointer to the hw struct
3124  * @module_pointer: module pointer location in words from the NVM beginning
3125  * @offset: offset in the module (expressed in 4 KB from module's beginning)
3126  * @length: length of the section to be erased (expressed in 4 KB)
3127  * @last_command: tells if this is the last command in a series
3128  * @cmd_details: pointer to command details structure or NULL
3129  *
3130  * Erase the NVM sector using the admin queue commands
3131  **/
3132 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3133 			      u32 offset, u16 length, bool last_command,
3134 			      struct i40e_asq_cmd_details *cmd_details)
3135 {
3136 	struct i40e_aq_desc desc;
3137 	struct i40e_aqc_nvm_update *cmd =
3138 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3139 	i40e_status status;
3140 
3141 	/* In offset the highest byte must be zeroed. */
3142 	if (offset & 0xFF000000) {
3143 		status = I40E_ERR_PARAM;
3144 		goto i40e_aq_erase_nvm_exit;
3145 	}
3146 
3147 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3148 
3149 	/* If this is the last command in a series, set the proper flag. */
3150 	if (last_command)
3151 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3152 	cmd->module_pointer = module_pointer;
3153 	cmd->offset = cpu_to_le32(offset);
3154 	cmd->length = cpu_to_le16(length);
3155 
3156 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3157 
3158 i40e_aq_erase_nvm_exit:
3159 	return status;
3160 }
3161 
3162 /**
3163  * i40e_parse_discover_capabilities
3164  * @hw: pointer to the hw struct
3165  * @buff: pointer to a buffer containing device/function capability records
3166  * @cap_count: number of capability records in the list
3167  * @list_type_opc: type of capabilities list to parse
3168  *
3169  * Parse the device/function capabilities list.
3170  **/
3171 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3172 				     u32 cap_count,
3173 				     enum i40e_admin_queue_opc list_type_opc)
3174 {
3175 	struct i40e_aqc_list_capabilities_element_resp *cap;
3176 	u32 valid_functions, num_functions;
3177 	u32 number, logical_id, phys_id;
3178 	struct i40e_hw_capabilities *p;
3179 	u16 id, ocp_cfg_word0;
3180 	i40e_status status;
3181 	u8 major_rev;
3182 	u32 i = 0;
3183 
3184 	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3185 
3186 	if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3187 		p = &hw->dev_caps;
3188 	else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3189 		p = &hw->func_caps;
3190 	else
3191 		return;
3192 
3193 	for (i = 0; i < cap_count; i++, cap++) {
3194 		id = le16_to_cpu(cap->id);
3195 		number = le32_to_cpu(cap->number);
3196 		logical_id = le32_to_cpu(cap->logical_id);
3197 		phys_id = le32_to_cpu(cap->phys_id);
3198 		major_rev = cap->major_rev;
3199 
3200 		switch (id) {
3201 		case I40E_AQ_CAP_ID_SWITCH_MODE:
3202 			p->switch_mode = number;
3203 			break;
3204 		case I40E_AQ_CAP_ID_MNG_MODE:
3205 			p->management_mode = number;
3206 			if (major_rev > 1) {
3207 				p->mng_protocols_over_mctp = logical_id;
3208 				i40e_debug(hw, I40E_DEBUG_INIT,
3209 					   "HW Capability: Protocols over MCTP = %d\n",
3210 					   p->mng_protocols_over_mctp);
3211 			} else {
3212 				p->mng_protocols_over_mctp = 0;
3213 			}
3214 			break;
3215 		case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3216 			p->npar_enable = number;
3217 			break;
3218 		case I40E_AQ_CAP_ID_OS2BMC_CAP:
3219 			p->os2bmc = number;
3220 			break;
3221 		case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3222 			p->valid_functions = number;
3223 			break;
3224 		case I40E_AQ_CAP_ID_SRIOV:
3225 			if (number == 1)
3226 				p->sr_iov_1_1 = true;
3227 			break;
3228 		case I40E_AQ_CAP_ID_VF:
3229 			p->num_vfs = number;
3230 			p->vf_base_id = logical_id;
3231 			break;
3232 		case I40E_AQ_CAP_ID_VMDQ:
3233 			if (number == 1)
3234 				p->vmdq = true;
3235 			break;
3236 		case I40E_AQ_CAP_ID_8021QBG:
3237 			if (number == 1)
3238 				p->evb_802_1_qbg = true;
3239 			break;
3240 		case I40E_AQ_CAP_ID_8021QBR:
3241 			if (number == 1)
3242 				p->evb_802_1_qbh = true;
3243 			break;
3244 		case I40E_AQ_CAP_ID_VSI:
3245 			p->num_vsis = number;
3246 			break;
3247 		case I40E_AQ_CAP_ID_DCB:
3248 			if (number == 1) {
3249 				p->dcb = true;
3250 				p->enabled_tcmap = logical_id;
3251 				p->maxtc = phys_id;
3252 			}
3253 			break;
3254 		case I40E_AQ_CAP_ID_FCOE:
3255 			if (number == 1)
3256 				p->fcoe = true;
3257 			break;
3258 		case I40E_AQ_CAP_ID_ISCSI:
3259 			if (number == 1)
3260 				p->iscsi = true;
3261 			break;
3262 		case I40E_AQ_CAP_ID_RSS:
3263 			p->rss = true;
3264 			p->rss_table_size = number;
3265 			p->rss_table_entry_width = logical_id;
3266 			break;
3267 		case I40E_AQ_CAP_ID_RXQ:
3268 			p->num_rx_qp = number;
3269 			p->base_queue = phys_id;
3270 			break;
3271 		case I40E_AQ_CAP_ID_TXQ:
3272 			p->num_tx_qp = number;
3273 			p->base_queue = phys_id;
3274 			break;
3275 		case I40E_AQ_CAP_ID_MSIX:
3276 			p->num_msix_vectors = number;
3277 			i40e_debug(hw, I40E_DEBUG_INIT,
3278 				   "HW Capability: MSIX vector count = %d\n",
3279 				   p->num_msix_vectors);
3280 			break;
3281 		case I40E_AQ_CAP_ID_VF_MSIX:
3282 			p->num_msix_vectors_vf = number;
3283 			break;
3284 		case I40E_AQ_CAP_ID_FLEX10:
3285 			if (major_rev == 1) {
3286 				if (number == 1) {
3287 					p->flex10_enable = true;
3288 					p->flex10_capable = true;
3289 				}
3290 			} else {
3291 				/* Capability revision >= 2 */
3292 				if (number & 1)
3293 					p->flex10_enable = true;
3294 				if (number & 2)
3295 					p->flex10_capable = true;
3296 			}
3297 			p->flex10_mode = logical_id;
3298 			p->flex10_status = phys_id;
3299 			break;
3300 		case I40E_AQ_CAP_ID_CEM:
3301 			if (number == 1)
3302 				p->mgmt_cem = true;
3303 			break;
3304 		case I40E_AQ_CAP_ID_IWARP:
3305 			if (number == 1)
3306 				p->iwarp = true;
3307 			break;
3308 		case I40E_AQ_CAP_ID_LED:
3309 			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3310 				p->led[phys_id] = true;
3311 			break;
3312 		case I40E_AQ_CAP_ID_SDP:
3313 			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3314 				p->sdp[phys_id] = true;
3315 			break;
3316 		case I40E_AQ_CAP_ID_MDIO:
3317 			if (number == 1) {
3318 				p->mdio_port_num = phys_id;
3319 				p->mdio_port_mode = logical_id;
3320 			}
3321 			break;
3322 		case I40E_AQ_CAP_ID_1588:
3323 			if (number == 1)
3324 				p->ieee_1588 = true;
3325 			break;
3326 		case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3327 			p->fd = true;
3328 			p->fd_filters_guaranteed = number;
3329 			p->fd_filters_best_effort = logical_id;
3330 			break;
3331 		case I40E_AQ_CAP_ID_WSR_PROT:
3332 			p->wr_csr_prot = (u64)number;
3333 			p->wr_csr_prot |= (u64)logical_id << 32;
3334 			break;
3335 		case I40E_AQ_CAP_ID_NVM_MGMT:
3336 			if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3337 				p->sec_rev_disabled = true;
3338 			if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3339 				p->update_disabled = true;
3340 			break;
3341 		default:
3342 			break;
3343 		}
3344 	}
3345 
3346 	if (p->fcoe)
3347 		i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3348 
3349 	/* Software override ensuring FCoE is disabled if npar or mfp
3350 	 * mode because it is not supported in these modes.
3351 	 */
3352 	if (p->npar_enable || p->flex10_enable)
3353 		p->fcoe = false;
3354 
3355 	/* count the enabled ports (aka the "not disabled" ports) */
3356 	hw->num_ports = 0;
3357 	for (i = 0; i < 4; i++) {
3358 		u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3359 		u64 port_cfg = 0;
3360 
3361 		/* use AQ read to get the physical register offset instead
3362 		 * of the port relative offset
3363 		 */
3364 		i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3365 		if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3366 			hw->num_ports++;
3367 	}
3368 
3369 	/* OCP cards case: if a mezz is removed the Ethernet port is at
3370 	 * disabled state in PRTGEN_CNF register. Additional NVM read is
3371 	 * needed in order to check if we are dealing with OCP card.
3372 	 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3373 	 * physical ports results in wrong partition id calculation and thus
3374 	 * not supporting WoL.
3375 	 */
3376 	if (hw->mac.type == I40E_MAC_X722) {
3377 		if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3378 			status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3379 						  2 * I40E_SR_OCP_CFG_WORD0,
3380 						  sizeof(ocp_cfg_word0),
3381 						  &ocp_cfg_word0, true, NULL);
3382 			if (!status &&
3383 			    (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3384 				hw->num_ports = 4;
3385 			i40e_release_nvm(hw);
3386 		}
3387 	}
3388 
3389 	valid_functions = p->valid_functions;
3390 	num_functions = 0;
3391 	while (valid_functions) {
3392 		if (valid_functions & 1)
3393 			num_functions++;
3394 		valid_functions >>= 1;
3395 	}
3396 
3397 	/* partition id is 1-based, and functions are evenly spread
3398 	 * across the ports as partitions
3399 	 */
3400 	if (hw->num_ports != 0) {
3401 		hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3402 		hw->num_partitions = num_functions / hw->num_ports;
3403 	}
3404 
3405 	/* additional HW specific goodies that might
3406 	 * someday be HW version specific
3407 	 */
3408 	p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3409 }
3410 
3411 /**
3412  * i40e_aq_discover_capabilities
3413  * @hw: pointer to the hw struct
3414  * @buff: a virtual buffer to hold the capabilities
3415  * @buff_size: Size of the virtual buffer
3416  * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3417  * @list_type_opc: capabilities type to discover - pass in the command opcode
3418  * @cmd_details: pointer to command details structure or NULL
3419  *
3420  * Get the device capabilities descriptions from the firmware
3421  **/
3422 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3423 				void *buff, u16 buff_size, u16 *data_size,
3424 				enum i40e_admin_queue_opc list_type_opc,
3425 				struct i40e_asq_cmd_details *cmd_details)
3426 {
3427 	struct i40e_aqc_list_capabilites *cmd;
3428 	struct i40e_aq_desc desc;
3429 	i40e_status status = 0;
3430 
3431 	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3432 
3433 	if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3434 		list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3435 		status = I40E_ERR_PARAM;
3436 		goto exit;
3437 	}
3438 
3439 	i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3440 
3441 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3442 	if (buff_size > I40E_AQ_LARGE_BUF)
3443 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3444 
3445 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3446 	*data_size = le16_to_cpu(desc.datalen);
3447 
3448 	if (status)
3449 		goto exit;
3450 
3451 	i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3452 					 list_type_opc);
3453 
3454 exit:
3455 	return status;
3456 }
3457 
3458 /**
3459  * i40e_aq_update_nvm
3460  * @hw: pointer to the hw struct
3461  * @module_pointer: module pointer location in words from the NVM beginning
3462  * @offset: byte offset from the module beginning
3463  * @length: length of the section to be written (in bytes from the offset)
3464  * @data: command buffer (size [bytes] = length)
3465  * @last_command: tells if this is the last command in a series
3466  * @preservation_flags: Preservation mode flags
3467  * @cmd_details: pointer to command details structure or NULL
3468  *
3469  * Update the NVM using the admin queue commands
3470  **/
3471 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3472 			       u32 offset, u16 length, void *data,
3473 				bool last_command, u8 preservation_flags,
3474 			       struct i40e_asq_cmd_details *cmd_details)
3475 {
3476 	struct i40e_aq_desc desc;
3477 	struct i40e_aqc_nvm_update *cmd =
3478 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3479 	i40e_status status;
3480 
3481 	/* In offset the highest byte must be zeroed. */
3482 	if (offset & 0xFF000000) {
3483 		status = I40E_ERR_PARAM;
3484 		goto i40e_aq_update_nvm_exit;
3485 	}
3486 
3487 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3488 
3489 	/* If this is the last command in a series, set the proper flag. */
3490 	if (last_command)
3491 		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3492 	if (hw->mac.type == I40E_MAC_X722) {
3493 		if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3494 			cmd->command_flags |=
3495 				(I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3496 				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3497 		else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3498 			cmd->command_flags |=
3499 				(I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3500 				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3501 	}
3502 	cmd->module_pointer = module_pointer;
3503 	cmd->offset = cpu_to_le32(offset);
3504 	cmd->length = cpu_to_le16(length);
3505 
3506 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3507 	if (length > I40E_AQ_LARGE_BUF)
3508 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3509 
3510 	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3511 
3512 i40e_aq_update_nvm_exit:
3513 	return status;
3514 }
3515 
3516 /**
3517  * i40e_aq_rearrange_nvm
3518  * @hw: pointer to the hw struct
3519  * @rearrange_nvm: defines direction of rearrangement
3520  * @cmd_details: pointer to command details structure or NULL
3521  *
3522  * Rearrange NVM structure, available only for transition FW
3523  **/
3524 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3525 				  u8 rearrange_nvm,
3526 				  struct i40e_asq_cmd_details *cmd_details)
3527 {
3528 	struct i40e_aqc_nvm_update *cmd;
3529 	i40e_status status;
3530 	struct i40e_aq_desc desc;
3531 
3532 	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3533 
3534 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3535 
3536 	rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3537 			 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3538 
3539 	if (!rearrange_nvm) {
3540 		status = I40E_ERR_PARAM;
3541 		goto i40e_aq_rearrange_nvm_exit;
3542 	}
3543 
3544 	cmd->command_flags |= rearrange_nvm;
3545 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3546 
3547 i40e_aq_rearrange_nvm_exit:
3548 	return status;
3549 }
3550 
3551 /**
3552  * i40e_aq_get_lldp_mib
3553  * @hw: pointer to the hw struct
3554  * @bridge_type: type of bridge requested
3555  * @mib_type: Local, Remote or both Local and Remote MIBs
3556  * @buff: pointer to a user supplied buffer to store the MIB block
3557  * @buff_size: size of the buffer (in bytes)
3558  * @local_len : length of the returned Local LLDP MIB
3559  * @remote_len: length of the returned Remote LLDP MIB
3560  * @cmd_details: pointer to command details structure or NULL
3561  *
3562  * Requests the complete LLDP MIB (entire packet).
3563  **/
3564 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3565 				u8 mib_type, void *buff, u16 buff_size,
3566 				u16 *local_len, u16 *remote_len,
3567 				struct i40e_asq_cmd_details *cmd_details)
3568 {
3569 	struct i40e_aq_desc desc;
3570 	struct i40e_aqc_lldp_get_mib *cmd =
3571 		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3572 	struct i40e_aqc_lldp_get_mib *resp =
3573 		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3574 	i40e_status status;
3575 
3576 	if (buff_size == 0 || !buff)
3577 		return I40E_ERR_PARAM;
3578 
3579 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3580 	/* Indirect Command */
3581 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3582 
3583 	cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3584 	cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3585 		       I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3586 
3587 	desc.datalen = cpu_to_le16(buff_size);
3588 
3589 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3590 	if (buff_size > I40E_AQ_LARGE_BUF)
3591 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3592 
3593 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3594 	if (!status) {
3595 		if (local_len != NULL)
3596 			*local_len = le16_to_cpu(resp->local_len);
3597 		if (remote_len != NULL)
3598 			*remote_len = le16_to_cpu(resp->remote_len);
3599 	}
3600 
3601 	return status;
3602 }
3603 
3604 /**
3605  * i40e_aq_cfg_lldp_mib_change_event
3606  * @hw: pointer to the hw struct
3607  * @enable_update: Enable or Disable event posting
3608  * @cmd_details: pointer to command details structure or NULL
3609  *
3610  * Enable or Disable posting of an event on ARQ when LLDP MIB
3611  * associated with the interface changes
3612  **/
3613 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3614 				bool enable_update,
3615 				struct i40e_asq_cmd_details *cmd_details)
3616 {
3617 	struct i40e_aq_desc desc;
3618 	struct i40e_aqc_lldp_update_mib *cmd =
3619 		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3620 	i40e_status status;
3621 
3622 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3623 
3624 	if (!enable_update)
3625 		cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3626 
3627 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3628 
3629 	return status;
3630 }
3631 
3632 /**
3633  * i40e_aq_restore_lldp
3634  * @hw: pointer to the hw struct
3635  * @setting: pointer to factory setting variable or NULL
3636  * @restore: True if factory settings should be restored
3637  * @cmd_details: pointer to command details structure or NULL
3638  *
3639  * Restore LLDP Agent factory settings if @restore set to True. In other case
3640  * only returns factory setting in AQ response.
3641  **/
3642 enum i40e_status_code
3643 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3644 		     struct i40e_asq_cmd_details *cmd_details)
3645 {
3646 	struct i40e_aq_desc desc;
3647 	struct i40e_aqc_lldp_restore *cmd =
3648 		(struct i40e_aqc_lldp_restore *)&desc.params.raw;
3649 	i40e_status status;
3650 
3651 	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3652 		i40e_debug(hw, I40E_DEBUG_ALL,
3653 			   "Restore LLDP not supported by current FW version.\n");
3654 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
3655 	}
3656 
3657 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3658 
3659 	if (restore)
3660 		cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3661 
3662 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3663 
3664 	if (setting)
3665 		*setting = cmd->command & 1;
3666 
3667 	return status;
3668 }
3669 
3670 /**
3671  * i40e_aq_stop_lldp
3672  * @hw: pointer to the hw struct
3673  * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3674  * @persist: True if stop of LLDP should be persistent across power cycles
3675  * @cmd_details: pointer to command details structure or NULL
3676  *
3677  * Stop or Shutdown the embedded LLDP Agent
3678  **/
3679 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3680 				bool persist,
3681 				struct i40e_asq_cmd_details *cmd_details)
3682 {
3683 	struct i40e_aq_desc desc;
3684 	struct i40e_aqc_lldp_stop *cmd =
3685 		(struct i40e_aqc_lldp_stop *)&desc.params.raw;
3686 	i40e_status status;
3687 
3688 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3689 
3690 	if (shutdown_agent)
3691 		cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3692 
3693 	if (persist) {
3694 		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3695 			cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3696 		else
3697 			i40e_debug(hw, I40E_DEBUG_ALL,
3698 				   "Persistent Stop LLDP not supported by current FW version.\n");
3699 	}
3700 
3701 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3702 
3703 	return status;
3704 }
3705 
3706 /**
3707  * i40e_aq_start_lldp
3708  * @hw: pointer to the hw struct
3709  * @buff: buffer for result
3710  * @persist: True if start of LLDP should be persistent across power cycles
3711  * @buff_size: buffer size
3712  * @cmd_details: pointer to command details structure or NULL
3713  *
3714  * Start the embedded LLDP Agent on all ports.
3715  **/
3716 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3717 			       struct i40e_asq_cmd_details *cmd_details)
3718 {
3719 	struct i40e_aq_desc desc;
3720 	struct i40e_aqc_lldp_start *cmd =
3721 		(struct i40e_aqc_lldp_start *)&desc.params.raw;
3722 	i40e_status status;
3723 
3724 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3725 
3726 	cmd->command = I40E_AQ_LLDP_AGENT_START;
3727 
3728 	if (persist) {
3729 		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3730 			cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3731 		else
3732 			i40e_debug(hw, I40E_DEBUG_ALL,
3733 				   "Persistent Start LLDP not supported by current FW version.\n");
3734 	}
3735 
3736 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3737 
3738 	return status;
3739 }
3740 
3741 /**
3742  * i40e_aq_set_dcb_parameters
3743  * @hw: pointer to the hw struct
3744  * @cmd_details: pointer to command details structure or NULL
3745  * @dcb_enable: True if DCB configuration needs to be applied
3746  *
3747  **/
3748 enum i40e_status_code
3749 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3750 			   struct i40e_asq_cmd_details *cmd_details)
3751 {
3752 	struct i40e_aq_desc desc;
3753 	struct i40e_aqc_set_dcb_parameters *cmd =
3754 		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3755 	i40e_status status;
3756 
3757 	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3758 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
3759 
3760 	i40e_fill_default_direct_cmd_desc(&desc,
3761 					  i40e_aqc_opc_set_dcb_parameters);
3762 
3763 	if (dcb_enable) {
3764 		cmd->valid_flags = I40E_DCB_VALID;
3765 		cmd->command = I40E_AQ_DCB_SET_AGENT;
3766 	}
3767 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3768 
3769 	return status;
3770 }
3771 
3772 /**
3773  * i40e_aq_get_cee_dcb_config
3774  * @hw: pointer to the hw struct
3775  * @buff: response buffer that stores CEE operational configuration
3776  * @buff_size: size of the buffer passed
3777  * @cmd_details: pointer to command details structure or NULL
3778  *
3779  * Get CEE DCBX mode operational configuration from firmware
3780  **/
3781 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3782 				       void *buff, u16 buff_size,
3783 				       struct i40e_asq_cmd_details *cmd_details)
3784 {
3785 	struct i40e_aq_desc desc;
3786 	i40e_status status;
3787 
3788 	if (buff_size == 0 || !buff)
3789 		return I40E_ERR_PARAM;
3790 
3791 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3792 
3793 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3794 	status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3795 				       cmd_details);
3796 
3797 	return status;
3798 }
3799 
3800 /**
3801  * i40e_aq_add_udp_tunnel
3802  * @hw: pointer to the hw struct
3803  * @udp_port: the UDP port to add in Host byte order
3804  * @protocol_index: protocol index type
3805  * @filter_index: pointer to filter index
3806  * @cmd_details: pointer to command details structure or NULL
3807  *
3808  * Note: Firmware expects the udp_port value to be in Little Endian format,
3809  * and this function will call cpu_to_le16 to convert from Host byte order to
3810  * Little Endian order.
3811  **/
3812 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3813 				u16 udp_port, u8 protocol_index,
3814 				u8 *filter_index,
3815 				struct i40e_asq_cmd_details *cmd_details)
3816 {
3817 	struct i40e_aq_desc desc;
3818 	struct i40e_aqc_add_udp_tunnel *cmd =
3819 		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3820 	struct i40e_aqc_del_udp_tunnel_completion *resp =
3821 		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3822 	i40e_status status;
3823 
3824 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3825 
3826 	cmd->udp_port = cpu_to_le16(udp_port);
3827 	cmd->protocol_type = protocol_index;
3828 
3829 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3830 
3831 	if (!status && filter_index)
3832 		*filter_index = resp->index;
3833 
3834 	return status;
3835 }
3836 
3837 /**
3838  * i40e_aq_del_udp_tunnel
3839  * @hw: pointer to the hw struct
3840  * @index: filter index
3841  * @cmd_details: pointer to command details structure or NULL
3842  **/
3843 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3844 				struct i40e_asq_cmd_details *cmd_details)
3845 {
3846 	struct i40e_aq_desc desc;
3847 	struct i40e_aqc_remove_udp_tunnel *cmd =
3848 		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3849 	i40e_status status;
3850 
3851 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3852 
3853 	cmd->index = index;
3854 
3855 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3856 
3857 	return status;
3858 }
3859 
3860 /**
3861  * i40e_aq_delete_element - Delete switch element
3862  * @hw: pointer to the hw struct
3863  * @seid: the SEID to delete from the switch
3864  * @cmd_details: pointer to command details structure or NULL
3865  *
3866  * This deletes a switch element from the switch.
3867  **/
3868 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3869 				struct i40e_asq_cmd_details *cmd_details)
3870 {
3871 	struct i40e_aq_desc desc;
3872 	struct i40e_aqc_switch_seid *cmd =
3873 		(struct i40e_aqc_switch_seid *)&desc.params.raw;
3874 	i40e_status status;
3875 
3876 	if (seid == 0)
3877 		return I40E_ERR_PARAM;
3878 
3879 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3880 
3881 	cmd->seid = cpu_to_le16(seid);
3882 
3883 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3884 
3885 	return status;
3886 }
3887 
3888 /**
3889  * i40e_aq_dcb_updated - DCB Updated Command
3890  * @hw: pointer to the hw struct
3891  * @cmd_details: pointer to command details structure or NULL
3892  *
3893  * EMP will return when the shared RPB settings have been
3894  * recomputed and modified. The retval field in the descriptor
3895  * will be set to 0 when RPB is modified.
3896  **/
3897 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
3898 				struct i40e_asq_cmd_details *cmd_details)
3899 {
3900 	struct i40e_aq_desc desc;
3901 	i40e_status status;
3902 
3903 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3904 
3905 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3906 
3907 	return status;
3908 }
3909 
3910 /**
3911  * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3912  * @hw: pointer to the hw struct
3913  * @seid: seid for the physical port/switching component/vsi
3914  * @buff: Indirect buffer to hold data parameters and response
3915  * @buff_size: Indirect buffer size
3916  * @opcode: Tx scheduler AQ command opcode
3917  * @cmd_details: pointer to command details structure or NULL
3918  *
3919  * Generic command handler for Tx scheduler AQ commands
3920  **/
3921 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3922 				void *buff, u16 buff_size,
3923 				 enum i40e_admin_queue_opc opcode,
3924 				struct i40e_asq_cmd_details *cmd_details)
3925 {
3926 	struct i40e_aq_desc desc;
3927 	struct i40e_aqc_tx_sched_ind *cmd =
3928 		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3929 	i40e_status status;
3930 	bool cmd_param_flag = false;
3931 
3932 	switch (opcode) {
3933 	case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3934 	case i40e_aqc_opc_configure_vsi_tc_bw:
3935 	case i40e_aqc_opc_enable_switching_comp_ets:
3936 	case i40e_aqc_opc_modify_switching_comp_ets:
3937 	case i40e_aqc_opc_disable_switching_comp_ets:
3938 	case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3939 	case i40e_aqc_opc_configure_switching_comp_bw_config:
3940 		cmd_param_flag = true;
3941 		break;
3942 	case i40e_aqc_opc_query_vsi_bw_config:
3943 	case i40e_aqc_opc_query_vsi_ets_sla_config:
3944 	case i40e_aqc_opc_query_switching_comp_ets_config:
3945 	case i40e_aqc_opc_query_port_ets_config:
3946 	case i40e_aqc_opc_query_switching_comp_bw_config:
3947 		cmd_param_flag = false;
3948 		break;
3949 	default:
3950 		return I40E_ERR_PARAM;
3951 	}
3952 
3953 	i40e_fill_default_direct_cmd_desc(&desc, opcode);
3954 
3955 	/* Indirect command */
3956 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3957 	if (cmd_param_flag)
3958 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3959 	if (buff_size > I40E_AQ_LARGE_BUF)
3960 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3961 
3962 	desc.datalen = cpu_to_le16(buff_size);
3963 
3964 	cmd->vsi_seid = cpu_to_le16(seid);
3965 
3966 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3967 
3968 	return status;
3969 }
3970 
3971 /**
3972  * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3973  * @hw: pointer to the hw struct
3974  * @seid: VSI seid
3975  * @credit: BW limit credits (0 = disabled)
3976  * @max_credit: Max BW limit credits
3977  * @cmd_details: pointer to command details structure or NULL
3978  **/
3979 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3980 				u16 seid, u16 credit, u8 max_credit,
3981 				struct i40e_asq_cmd_details *cmd_details)
3982 {
3983 	struct i40e_aq_desc desc;
3984 	struct i40e_aqc_configure_vsi_bw_limit *cmd =
3985 		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
3986 	i40e_status status;
3987 
3988 	i40e_fill_default_direct_cmd_desc(&desc,
3989 					  i40e_aqc_opc_configure_vsi_bw_limit);
3990 
3991 	cmd->vsi_seid = cpu_to_le16(seid);
3992 	cmd->credit = cpu_to_le16(credit);
3993 	cmd->max_credit = max_credit;
3994 
3995 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3996 
3997 	return status;
3998 }
3999 
4000 /**
4001  * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
4002  * @hw: pointer to the hw struct
4003  * @seid: VSI seid
4004  * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
4005  * @cmd_details: pointer to command details structure or NULL
4006  **/
4007 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
4008 			u16 seid,
4009 			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
4010 			struct i40e_asq_cmd_details *cmd_details)
4011 {
4012 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4013 				    i40e_aqc_opc_configure_vsi_tc_bw,
4014 				    cmd_details);
4015 }
4016 
4017 /**
4018  * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
4019  * @hw: pointer to the hw struct
4020  * @seid: seid of the switching component connected to Physical Port
4021  * @ets_data: Buffer holding ETS parameters
4022  * @opcode: Tx scheduler AQ command opcode
4023  * @cmd_details: pointer to command details structure or NULL
4024  **/
4025 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4026 		u16 seid,
4027 		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4028 		enum i40e_admin_queue_opc opcode,
4029 		struct i40e_asq_cmd_details *cmd_details)
4030 {
4031 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4032 				    sizeof(*ets_data), opcode, cmd_details);
4033 }
4034 
4035 /**
4036  * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4037  * @hw: pointer to the hw struct
4038  * @seid: seid of the switching component
4039  * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4040  * @cmd_details: pointer to command details structure or NULL
4041  **/
4042 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4043 	u16 seid,
4044 	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4045 	struct i40e_asq_cmd_details *cmd_details)
4046 {
4047 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4048 			    i40e_aqc_opc_configure_switching_comp_bw_config,
4049 			    cmd_details);
4050 }
4051 
4052 /**
4053  * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4054  * @hw: pointer to the hw struct
4055  * @seid: seid of the VSI
4056  * @bw_data: Buffer to hold VSI BW configuration
4057  * @cmd_details: pointer to command details structure or NULL
4058  **/
4059 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4060 			u16 seid,
4061 			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4062 			struct i40e_asq_cmd_details *cmd_details)
4063 {
4064 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4065 				    i40e_aqc_opc_query_vsi_bw_config,
4066 				    cmd_details);
4067 }
4068 
4069 /**
4070  * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4071  * @hw: pointer to the hw struct
4072  * @seid: seid of the VSI
4073  * @bw_data: Buffer to hold VSI BW configuration per TC
4074  * @cmd_details: pointer to command details structure or NULL
4075  **/
4076 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4077 			u16 seid,
4078 			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4079 			struct i40e_asq_cmd_details *cmd_details)
4080 {
4081 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4082 				    i40e_aqc_opc_query_vsi_ets_sla_config,
4083 				    cmd_details);
4084 }
4085 
4086 /**
4087  * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4088  * @hw: pointer to the hw struct
4089  * @seid: seid of the switching component
4090  * @bw_data: Buffer to hold switching component's per TC BW config
4091  * @cmd_details: pointer to command details structure or NULL
4092  **/
4093 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4094 		u16 seid,
4095 		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4096 		struct i40e_asq_cmd_details *cmd_details)
4097 {
4098 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4099 				   i40e_aqc_opc_query_switching_comp_ets_config,
4100 				   cmd_details);
4101 }
4102 
4103 /**
4104  * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4105  * @hw: pointer to the hw struct
4106  * @seid: seid of the VSI or switching component connected to Physical Port
4107  * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4108  * @cmd_details: pointer to command details structure or NULL
4109  **/
4110 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4111 			u16 seid,
4112 			struct i40e_aqc_query_port_ets_config_resp *bw_data,
4113 			struct i40e_asq_cmd_details *cmd_details)
4114 {
4115 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4116 				    i40e_aqc_opc_query_port_ets_config,
4117 				    cmd_details);
4118 }
4119 
4120 /**
4121  * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4122  * @hw: pointer to the hw struct
4123  * @seid: seid of the switching component
4124  * @bw_data: Buffer to hold switching component's BW configuration
4125  * @cmd_details: pointer to command details structure or NULL
4126  **/
4127 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4128 		u16 seid,
4129 		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4130 		struct i40e_asq_cmd_details *cmd_details)
4131 {
4132 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4133 				    i40e_aqc_opc_query_switching_comp_bw_config,
4134 				    cmd_details);
4135 }
4136 
4137 /**
4138  * i40e_validate_filter_settings
4139  * @hw: pointer to the hardware structure
4140  * @settings: Filter control settings
4141  *
4142  * Check and validate the filter control settings passed.
4143  * The function checks for the valid filter/context sizes being
4144  * passed for FCoE and PE.
4145  *
4146  * Returns 0 if the values passed are valid and within
4147  * range else returns an error.
4148  **/
4149 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4150 				struct i40e_filter_control_settings *settings)
4151 {
4152 	u32 fcoe_cntx_size, fcoe_filt_size;
4153 	u32 pe_cntx_size, pe_filt_size;
4154 	u32 fcoe_fmax;
4155 	u32 val;
4156 
4157 	/* Validate FCoE settings passed */
4158 	switch (settings->fcoe_filt_num) {
4159 	case I40E_HASH_FILTER_SIZE_1K:
4160 	case I40E_HASH_FILTER_SIZE_2K:
4161 	case I40E_HASH_FILTER_SIZE_4K:
4162 	case I40E_HASH_FILTER_SIZE_8K:
4163 	case I40E_HASH_FILTER_SIZE_16K:
4164 	case I40E_HASH_FILTER_SIZE_32K:
4165 		fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4166 		fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4167 		break;
4168 	default:
4169 		return I40E_ERR_PARAM;
4170 	}
4171 
4172 	switch (settings->fcoe_cntx_num) {
4173 	case I40E_DMA_CNTX_SIZE_512:
4174 	case I40E_DMA_CNTX_SIZE_1K:
4175 	case I40E_DMA_CNTX_SIZE_2K:
4176 	case I40E_DMA_CNTX_SIZE_4K:
4177 		fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4178 		fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4179 		break;
4180 	default:
4181 		return I40E_ERR_PARAM;
4182 	}
4183 
4184 	/* Validate PE settings passed */
4185 	switch (settings->pe_filt_num) {
4186 	case I40E_HASH_FILTER_SIZE_1K:
4187 	case I40E_HASH_FILTER_SIZE_2K:
4188 	case I40E_HASH_FILTER_SIZE_4K:
4189 	case I40E_HASH_FILTER_SIZE_8K:
4190 	case I40E_HASH_FILTER_SIZE_16K:
4191 	case I40E_HASH_FILTER_SIZE_32K:
4192 	case I40E_HASH_FILTER_SIZE_64K:
4193 	case I40E_HASH_FILTER_SIZE_128K:
4194 	case I40E_HASH_FILTER_SIZE_256K:
4195 	case I40E_HASH_FILTER_SIZE_512K:
4196 	case I40E_HASH_FILTER_SIZE_1M:
4197 		pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4198 		pe_filt_size <<= (u32)settings->pe_filt_num;
4199 		break;
4200 	default:
4201 		return I40E_ERR_PARAM;
4202 	}
4203 
4204 	switch (settings->pe_cntx_num) {
4205 	case I40E_DMA_CNTX_SIZE_512:
4206 	case I40E_DMA_CNTX_SIZE_1K:
4207 	case I40E_DMA_CNTX_SIZE_2K:
4208 	case I40E_DMA_CNTX_SIZE_4K:
4209 	case I40E_DMA_CNTX_SIZE_8K:
4210 	case I40E_DMA_CNTX_SIZE_16K:
4211 	case I40E_DMA_CNTX_SIZE_32K:
4212 	case I40E_DMA_CNTX_SIZE_64K:
4213 	case I40E_DMA_CNTX_SIZE_128K:
4214 	case I40E_DMA_CNTX_SIZE_256K:
4215 		pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4216 		pe_cntx_size <<= (u32)settings->pe_cntx_num;
4217 		break;
4218 	default:
4219 		return I40E_ERR_PARAM;
4220 	}
4221 
4222 	/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4223 	val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4224 	fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4225 		     >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4226 	if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)
4227 		return I40E_ERR_INVALID_SIZE;
4228 
4229 	return 0;
4230 }
4231 
4232 /**
4233  * i40e_set_filter_control
4234  * @hw: pointer to the hardware structure
4235  * @settings: Filter control settings
4236  *
4237  * Set the Queue Filters for PE/FCoE and enable filters required
4238  * for a single PF. It is expected that these settings are programmed
4239  * at the driver initialization time.
4240  **/
4241 i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4242 				struct i40e_filter_control_settings *settings)
4243 {
4244 	i40e_status ret = 0;
4245 	u32 hash_lut_size = 0;
4246 	u32 val;
4247 
4248 	if (!settings)
4249 		return I40E_ERR_PARAM;
4250 
4251 	/* Validate the input settings */
4252 	ret = i40e_validate_filter_settings(hw, settings);
4253 	if (ret)
4254 		return ret;
4255 
4256 	/* Read the PF Queue Filter control register */
4257 	val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4258 
4259 	/* Program required PE hash buckets for the PF */
4260 	val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4261 	val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4262 		I40E_PFQF_CTL_0_PEHSIZE_MASK;
4263 	/* Program required PE contexts for the PF */
4264 	val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4265 	val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4266 		I40E_PFQF_CTL_0_PEDSIZE_MASK;
4267 
4268 	/* Program required FCoE hash buckets for the PF */
4269 	val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4270 	val |= ((u32)settings->fcoe_filt_num <<
4271 			I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4272 		I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4273 	/* Program required FCoE DDP contexts for the PF */
4274 	val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4275 	val |= ((u32)settings->fcoe_cntx_num <<
4276 			I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4277 		I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4278 
4279 	/* Program Hash LUT size for the PF */
4280 	val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4281 	if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4282 		hash_lut_size = 1;
4283 	val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4284 		I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4285 
4286 	/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4287 	if (settings->enable_fdir)
4288 		val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4289 	if (settings->enable_ethtype)
4290 		val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4291 	if (settings->enable_macvlan)
4292 		val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4293 
4294 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4295 
4296 	return 0;
4297 }
4298 
4299 /**
4300  * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4301  * @hw: pointer to the hw struct
4302  * @mac_addr: MAC address to use in the filter
4303  * @ethtype: Ethertype to use in the filter
4304  * @flags: Flags that needs to be applied to the filter
4305  * @vsi_seid: seid of the control VSI
4306  * @queue: VSI queue number to send the packet to
4307  * @is_add: Add control packet filter if True else remove
4308  * @stats: Structure to hold information on control filter counts
4309  * @cmd_details: pointer to command details structure or NULL
4310  *
4311  * This command will Add or Remove control packet filter for a control VSI.
4312  * In return it will update the total number of perfect filter count in
4313  * the stats member.
4314  **/
4315 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4316 				u8 *mac_addr, u16 ethtype, u16 flags,
4317 				u16 vsi_seid, u16 queue, bool is_add,
4318 				struct i40e_control_filter_stats *stats,
4319 				struct i40e_asq_cmd_details *cmd_details)
4320 {
4321 	struct i40e_aq_desc desc;
4322 	struct i40e_aqc_add_remove_control_packet_filter *cmd =
4323 		(struct i40e_aqc_add_remove_control_packet_filter *)
4324 		&desc.params.raw;
4325 	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4326 		(struct i40e_aqc_add_remove_control_packet_filter_completion *)
4327 		&desc.params.raw;
4328 	i40e_status status;
4329 
4330 	if (vsi_seid == 0)
4331 		return I40E_ERR_PARAM;
4332 
4333 	if (is_add) {
4334 		i40e_fill_default_direct_cmd_desc(&desc,
4335 				i40e_aqc_opc_add_control_packet_filter);
4336 		cmd->queue = cpu_to_le16(queue);
4337 	} else {
4338 		i40e_fill_default_direct_cmd_desc(&desc,
4339 				i40e_aqc_opc_remove_control_packet_filter);
4340 	}
4341 
4342 	if (mac_addr)
4343 		ether_addr_copy(cmd->mac, mac_addr);
4344 
4345 	cmd->etype = cpu_to_le16(ethtype);
4346 	cmd->flags = cpu_to_le16(flags);
4347 	cmd->seid = cpu_to_le16(vsi_seid);
4348 
4349 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4350 
4351 	if (!status && stats) {
4352 		stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4353 		stats->etype_used = le16_to_cpu(resp->etype_used);
4354 		stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4355 		stats->etype_free = le16_to_cpu(resp->etype_free);
4356 	}
4357 
4358 	return status;
4359 }
4360 
4361 /**
4362  * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4363  * @hw: pointer to the hw struct
4364  * @seid: VSI seid to add ethertype filter from
4365  **/
4366 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4367 						    u16 seid)
4368 {
4369 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4370 	u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4371 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4372 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4373 	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4374 	i40e_status status;
4375 
4376 	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4377 						       seid, 0, true, NULL,
4378 						       NULL);
4379 	if (status)
4380 		hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4381 }
4382 
4383 /**
4384  * i40e_aq_alternate_read
4385  * @hw: pointer to the hardware structure
4386  * @reg_addr0: address of first dword to be read
4387  * @reg_val0: pointer for data read from 'reg_addr0'
4388  * @reg_addr1: address of second dword to be read
4389  * @reg_val1: pointer for data read from 'reg_addr1'
4390  *
4391  * Read one or two dwords from alternate structure. Fields are indicated
4392  * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4393  * is not passed then only register at 'reg_addr0' is read.
4394  *
4395  **/
4396 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4397 					  u32 reg_addr0, u32 *reg_val0,
4398 					  u32 reg_addr1, u32 *reg_val1)
4399 {
4400 	struct i40e_aq_desc desc;
4401 	struct i40e_aqc_alternate_write *cmd_resp =
4402 		(struct i40e_aqc_alternate_write *)&desc.params.raw;
4403 	i40e_status status;
4404 
4405 	if (!reg_val0)
4406 		return I40E_ERR_PARAM;
4407 
4408 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4409 	cmd_resp->address0 = cpu_to_le32(reg_addr0);
4410 	cmd_resp->address1 = cpu_to_le32(reg_addr1);
4411 
4412 	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4413 
4414 	if (!status) {
4415 		*reg_val0 = le32_to_cpu(cmd_resp->data0);
4416 
4417 		if (reg_val1)
4418 			*reg_val1 = le32_to_cpu(cmd_resp->data1);
4419 	}
4420 
4421 	return status;
4422 }
4423 
4424 /**
4425  * i40e_aq_resume_port_tx
4426  * @hw: pointer to the hardware structure
4427  * @cmd_details: pointer to command details structure or NULL
4428  *
4429  * Resume port's Tx traffic
4430  **/
4431 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4432 				   struct i40e_asq_cmd_details *cmd_details)
4433 {
4434 	struct i40e_aq_desc desc;
4435 	i40e_status status;
4436 
4437 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4438 
4439 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4440 
4441 	return status;
4442 }
4443 
4444 /**
4445  * i40e_set_pci_config_data - store PCI bus info
4446  * @hw: pointer to hardware structure
4447  * @link_status: the link status word from PCI config space
4448  *
4449  * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4450  **/
4451 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4452 {
4453 	hw->bus.type = i40e_bus_type_pci_express;
4454 
4455 	switch (link_status & PCI_EXP_LNKSTA_NLW) {
4456 	case PCI_EXP_LNKSTA_NLW_X1:
4457 		hw->bus.width = i40e_bus_width_pcie_x1;
4458 		break;
4459 	case PCI_EXP_LNKSTA_NLW_X2:
4460 		hw->bus.width = i40e_bus_width_pcie_x2;
4461 		break;
4462 	case PCI_EXP_LNKSTA_NLW_X4:
4463 		hw->bus.width = i40e_bus_width_pcie_x4;
4464 		break;
4465 	case PCI_EXP_LNKSTA_NLW_X8:
4466 		hw->bus.width = i40e_bus_width_pcie_x8;
4467 		break;
4468 	default:
4469 		hw->bus.width = i40e_bus_width_unknown;
4470 		break;
4471 	}
4472 
4473 	switch (link_status & PCI_EXP_LNKSTA_CLS) {
4474 	case PCI_EXP_LNKSTA_CLS_2_5GB:
4475 		hw->bus.speed = i40e_bus_speed_2500;
4476 		break;
4477 	case PCI_EXP_LNKSTA_CLS_5_0GB:
4478 		hw->bus.speed = i40e_bus_speed_5000;
4479 		break;
4480 	case PCI_EXP_LNKSTA_CLS_8_0GB:
4481 		hw->bus.speed = i40e_bus_speed_8000;
4482 		break;
4483 	default:
4484 		hw->bus.speed = i40e_bus_speed_unknown;
4485 		break;
4486 	}
4487 }
4488 
4489 /**
4490  * i40e_aq_debug_dump
4491  * @hw: pointer to the hardware structure
4492  * @cluster_id: specific cluster to dump
4493  * @table_id: table id within cluster
4494  * @start_index: index of line in the block to read
4495  * @buff_size: dump buffer size
4496  * @buff: dump buffer
4497  * @ret_buff_size: actual buffer size returned
4498  * @ret_next_table: next block to read
4499  * @ret_next_index: next index to read
4500  * @cmd_details: pointer to command details structure or NULL
4501  *
4502  * Dump internal FW/HW data for debug purposes.
4503  *
4504  **/
4505 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4506 			       u8 table_id, u32 start_index, u16 buff_size,
4507 			       void *buff, u16 *ret_buff_size,
4508 			       u8 *ret_next_table, u32 *ret_next_index,
4509 			       struct i40e_asq_cmd_details *cmd_details)
4510 {
4511 	struct i40e_aq_desc desc;
4512 	struct i40e_aqc_debug_dump_internals *cmd =
4513 		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4514 	struct i40e_aqc_debug_dump_internals *resp =
4515 		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4516 	i40e_status status;
4517 
4518 	if (buff_size == 0 || !buff)
4519 		return I40E_ERR_PARAM;
4520 
4521 	i40e_fill_default_direct_cmd_desc(&desc,
4522 					  i40e_aqc_opc_debug_dump_internals);
4523 	/* Indirect Command */
4524 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4525 	if (buff_size > I40E_AQ_LARGE_BUF)
4526 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4527 
4528 	cmd->cluster_id = cluster_id;
4529 	cmd->table_id = table_id;
4530 	cmd->idx = cpu_to_le32(start_index);
4531 
4532 	desc.datalen = cpu_to_le16(buff_size);
4533 
4534 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4535 	if (!status) {
4536 		if (ret_buff_size)
4537 			*ret_buff_size = le16_to_cpu(desc.datalen);
4538 		if (ret_next_table)
4539 			*ret_next_table = resp->table_id;
4540 		if (ret_next_index)
4541 			*ret_next_index = le32_to_cpu(resp->idx);
4542 	}
4543 
4544 	return status;
4545 }
4546 
4547 /**
4548  * i40e_read_bw_from_alt_ram
4549  * @hw: pointer to the hardware structure
4550  * @max_bw: pointer for max_bw read
4551  * @min_bw: pointer for min_bw read
4552  * @min_valid: pointer for bool that is true if min_bw is a valid value
4553  * @max_valid: pointer for bool that is true if max_bw is a valid value
4554  *
4555  * Read bw from the alternate ram for the given pf
4556  **/
4557 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4558 				      u32 *max_bw, u32 *min_bw,
4559 				      bool *min_valid, bool *max_valid)
4560 {
4561 	i40e_status status;
4562 	u32 max_bw_addr, min_bw_addr;
4563 
4564 	/* Calculate the address of the min/max bw registers */
4565 	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4566 		      I40E_ALT_STRUCT_MAX_BW_OFFSET +
4567 		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4568 	min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4569 		      I40E_ALT_STRUCT_MIN_BW_OFFSET +
4570 		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4571 
4572 	/* Read the bandwidths from alt ram */
4573 	status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4574 					min_bw_addr, min_bw);
4575 
4576 	if (*min_bw & I40E_ALT_BW_VALID_MASK)
4577 		*min_valid = true;
4578 	else
4579 		*min_valid = false;
4580 
4581 	if (*max_bw & I40E_ALT_BW_VALID_MASK)
4582 		*max_valid = true;
4583 	else
4584 		*max_valid = false;
4585 
4586 	return status;
4587 }
4588 
4589 /**
4590  * i40e_aq_configure_partition_bw
4591  * @hw: pointer to the hardware structure
4592  * @bw_data: Buffer holding valid pfs and bw limits
4593  * @cmd_details: pointer to command details
4594  *
4595  * Configure partitions guaranteed/max bw
4596  **/
4597 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4598 			struct i40e_aqc_configure_partition_bw_data *bw_data,
4599 			struct i40e_asq_cmd_details *cmd_details)
4600 {
4601 	i40e_status status;
4602 	struct i40e_aq_desc desc;
4603 	u16 bwd_size = sizeof(*bw_data);
4604 
4605 	i40e_fill_default_direct_cmd_desc(&desc,
4606 					  i40e_aqc_opc_configure_partition_bw);
4607 
4608 	/* Indirect command */
4609 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4610 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4611 
4612 	if (bwd_size > I40E_AQ_LARGE_BUF)
4613 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4614 
4615 	desc.datalen = cpu_to_le16(bwd_size);
4616 
4617 	status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4618 				       cmd_details);
4619 
4620 	return status;
4621 }
4622 
4623 /**
4624  * i40e_read_phy_register_clause22
4625  * @hw: pointer to the HW structure
4626  * @reg: register address in the page
4627  * @phy_addr: PHY address on MDIO interface
4628  * @value: PHY register value
4629  *
4630  * Reads specified PHY register value
4631  **/
4632 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4633 					    u16 reg, u8 phy_addr, u16 *value)
4634 {
4635 	i40e_status status = I40E_ERR_TIMEOUT;
4636 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4637 	u32 command = 0;
4638 	u16 retry = 1000;
4639 
4640 	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4641 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4642 		  (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4643 		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4644 		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4645 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4646 	do {
4647 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4648 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4649 			status = 0;
4650 			break;
4651 		}
4652 		udelay(10);
4653 		retry--;
4654 	} while (retry);
4655 
4656 	if (status) {
4657 		i40e_debug(hw, I40E_DEBUG_PHY,
4658 			   "PHY: Can't write command to external PHY.\n");
4659 	} else {
4660 		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4661 		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4662 			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4663 	}
4664 
4665 	return status;
4666 }
4667 
4668 /**
4669  * i40e_write_phy_register_clause22
4670  * @hw: pointer to the HW structure
4671  * @reg: register address in the page
4672  * @phy_addr: PHY address on MDIO interface
4673  * @value: PHY register value
4674  *
4675  * Writes specified PHY register value
4676  **/
4677 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4678 					     u16 reg, u8 phy_addr, u16 value)
4679 {
4680 	i40e_status status = I40E_ERR_TIMEOUT;
4681 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4682 	u32 command  = 0;
4683 	u16 retry = 1000;
4684 
4685 	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4686 	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4687 
4688 	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4689 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4690 		  (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4691 		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4692 		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4693 
4694 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4695 	do {
4696 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4697 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4698 			status = 0;
4699 			break;
4700 		}
4701 		udelay(10);
4702 		retry--;
4703 	} while (retry);
4704 
4705 	return status;
4706 }
4707 
4708 /**
4709  * i40e_read_phy_register_clause45
4710  * @hw: pointer to the HW structure
4711  * @page: registers page number
4712  * @reg: register address in the page
4713  * @phy_addr: PHY address on MDIO interface
4714  * @value: PHY register value
4715  *
4716  * Reads specified PHY register value
4717  **/
4718 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4719 				u8 page, u16 reg, u8 phy_addr, u16 *value)
4720 {
4721 	i40e_status status = I40E_ERR_TIMEOUT;
4722 	u32 command = 0;
4723 	u16 retry = 1000;
4724 	u8 port_num = hw->func_caps.mdio_port_num;
4725 
4726 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4727 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4728 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4729 		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4730 		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4731 		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4732 		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4733 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4734 	do {
4735 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4736 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4737 			status = 0;
4738 			break;
4739 		}
4740 		usleep_range(10, 20);
4741 		retry--;
4742 	} while (retry);
4743 
4744 	if (status) {
4745 		i40e_debug(hw, I40E_DEBUG_PHY,
4746 			   "PHY: Can't write command to external PHY.\n");
4747 		goto phy_read_end;
4748 	}
4749 
4750 	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4751 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4752 		  (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4753 		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4754 		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4755 		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4756 	status = I40E_ERR_TIMEOUT;
4757 	retry = 1000;
4758 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4759 	do {
4760 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4761 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4762 			status = 0;
4763 			break;
4764 		}
4765 		usleep_range(10, 20);
4766 		retry--;
4767 	} while (retry);
4768 
4769 	if (!status) {
4770 		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4771 		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4772 			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4773 	} else {
4774 		i40e_debug(hw, I40E_DEBUG_PHY,
4775 			   "PHY: Can't read register value from external PHY.\n");
4776 	}
4777 
4778 phy_read_end:
4779 	return status;
4780 }
4781 
4782 /**
4783  * i40e_write_phy_register_clause45
4784  * @hw: pointer to the HW structure
4785  * @page: registers page number
4786  * @reg: register address in the page
4787  * @phy_addr: PHY address on MDIO interface
4788  * @value: PHY register value
4789  *
4790  * Writes value to specified PHY register
4791  **/
4792 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4793 				u8 page, u16 reg, u8 phy_addr, u16 value)
4794 {
4795 	i40e_status status = I40E_ERR_TIMEOUT;
4796 	u32 command = 0;
4797 	u16 retry = 1000;
4798 	u8 port_num = hw->func_caps.mdio_port_num;
4799 
4800 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4801 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4802 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4803 		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4804 		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4805 		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4806 		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4807 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4808 	do {
4809 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4810 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4811 			status = 0;
4812 			break;
4813 		}
4814 		usleep_range(10, 20);
4815 		retry--;
4816 	} while (retry);
4817 	if (status) {
4818 		i40e_debug(hw, I40E_DEBUG_PHY,
4819 			   "PHY: Can't write command to external PHY.\n");
4820 		goto phy_write_end;
4821 	}
4822 
4823 	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4824 	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4825 
4826 	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4827 		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4828 		  (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4829 		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4830 		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4831 		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4832 	status = I40E_ERR_TIMEOUT;
4833 	retry = 1000;
4834 	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4835 	do {
4836 		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4837 		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4838 			status = 0;
4839 			break;
4840 		}
4841 		usleep_range(10, 20);
4842 		retry--;
4843 	} while (retry);
4844 
4845 phy_write_end:
4846 	return status;
4847 }
4848 
4849 /**
4850  * i40e_write_phy_register
4851  * @hw: pointer to the HW structure
4852  * @page: registers page number
4853  * @reg: register address in the page
4854  * @phy_addr: PHY address on MDIO interface
4855  * @value: PHY register value
4856  *
4857  * Writes value to specified PHY register
4858  **/
4859 i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4860 				    u8 page, u16 reg, u8 phy_addr, u16 value)
4861 {
4862 	i40e_status status;
4863 
4864 	switch (hw->device_id) {
4865 	case I40E_DEV_ID_1G_BASE_T_X722:
4866 		status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4867 							  value);
4868 		break;
4869 	case I40E_DEV_ID_10G_BASE_T:
4870 	case I40E_DEV_ID_10G_BASE_T4:
4871 	case I40E_DEV_ID_10G_BASE_T_X722:
4872 	case I40E_DEV_ID_25G_B:
4873 	case I40E_DEV_ID_25G_SFP28:
4874 		status = i40e_write_phy_register_clause45(hw, page, reg,
4875 							  phy_addr, value);
4876 		break;
4877 	default:
4878 		status = I40E_ERR_UNKNOWN_PHY;
4879 		break;
4880 	}
4881 
4882 	return status;
4883 }
4884 
4885 /**
4886  * i40e_read_phy_register
4887  * @hw: pointer to the HW structure
4888  * @page: registers page number
4889  * @reg: register address in the page
4890  * @phy_addr: PHY address on MDIO interface
4891  * @value: PHY register value
4892  *
4893  * Reads specified PHY register value
4894  **/
4895 i40e_status i40e_read_phy_register(struct i40e_hw *hw,
4896 				   u8 page, u16 reg, u8 phy_addr, u16 *value)
4897 {
4898 	i40e_status status;
4899 
4900 	switch (hw->device_id) {
4901 	case I40E_DEV_ID_1G_BASE_T_X722:
4902 		status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4903 							 value);
4904 		break;
4905 	case I40E_DEV_ID_10G_BASE_T:
4906 	case I40E_DEV_ID_10G_BASE_T4:
4907 	case I40E_DEV_ID_10G_BASE_T_BC:
4908 	case I40E_DEV_ID_10G_BASE_T_X722:
4909 	case I40E_DEV_ID_25G_B:
4910 	case I40E_DEV_ID_25G_SFP28:
4911 		status = i40e_read_phy_register_clause45(hw, page, reg,
4912 							 phy_addr, value);
4913 		break;
4914 	default:
4915 		status = I40E_ERR_UNKNOWN_PHY;
4916 		break;
4917 	}
4918 
4919 	return status;
4920 }
4921 
4922 /**
4923  * i40e_get_phy_address
4924  * @hw: pointer to the HW structure
4925  * @dev_num: PHY port num that address we want
4926  *
4927  * Gets PHY address for current port
4928  **/
4929 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4930 {
4931 	u8 port_num = hw->func_caps.mdio_port_num;
4932 	u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4933 
4934 	return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4935 }
4936 
4937 /**
4938  * i40e_blink_phy_led
4939  * @hw: pointer to the HW structure
4940  * @time: time how long led will blinks in secs
4941  * @interval: gap between LED on and off in msecs
4942  *
4943  * Blinks PHY link LED
4944  **/
4945 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
4946 				    u32 time, u32 interval)
4947 {
4948 	i40e_status status = 0;
4949 	u32 i;
4950 	u16 led_ctl;
4951 	u16 gpio_led_port;
4952 	u16 led_reg;
4953 	u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4954 	u8 phy_addr = 0;
4955 	u8 port_num;
4956 
4957 	i = rd32(hw, I40E_PFGEN_PORTNUM);
4958 	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4959 	phy_addr = i40e_get_phy_address(hw, port_num);
4960 
4961 	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4962 	     led_addr++) {
4963 		status = i40e_read_phy_register_clause45(hw,
4964 							 I40E_PHY_COM_REG_PAGE,
4965 							 led_addr, phy_addr,
4966 							 &led_reg);
4967 		if (status)
4968 			goto phy_blinking_end;
4969 		led_ctl = led_reg;
4970 		if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4971 			led_reg = 0;
4972 			status = i40e_write_phy_register_clause45(hw,
4973 							 I40E_PHY_COM_REG_PAGE,
4974 							 led_addr, phy_addr,
4975 							 led_reg);
4976 			if (status)
4977 				goto phy_blinking_end;
4978 			break;
4979 		}
4980 	}
4981 
4982 	if (time > 0 && interval > 0) {
4983 		for (i = 0; i < time * 1000; i += interval) {
4984 			status = i40e_read_phy_register_clause45(hw,
4985 						I40E_PHY_COM_REG_PAGE,
4986 						led_addr, phy_addr, &led_reg);
4987 			if (status)
4988 				goto restore_config;
4989 			if (led_reg & I40E_PHY_LED_MANUAL_ON)
4990 				led_reg = 0;
4991 			else
4992 				led_reg = I40E_PHY_LED_MANUAL_ON;
4993 			status = i40e_write_phy_register_clause45(hw,
4994 						I40E_PHY_COM_REG_PAGE,
4995 						led_addr, phy_addr, led_reg);
4996 			if (status)
4997 				goto restore_config;
4998 			msleep(interval);
4999 		}
5000 	}
5001 
5002 restore_config:
5003 	status = i40e_write_phy_register_clause45(hw,
5004 						  I40E_PHY_COM_REG_PAGE,
5005 						  led_addr, phy_addr, led_ctl);
5006 
5007 phy_blinking_end:
5008 	return status;
5009 }
5010 
5011 /**
5012  * i40e_led_get_reg - read LED register
5013  * @hw: pointer to the HW structure
5014  * @led_addr: LED register address
5015  * @reg_val: read register value
5016  **/
5017 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5018 					      u32 *reg_val)
5019 {
5020 	enum i40e_status_code status;
5021 	u8 phy_addr = 0;
5022 	u8 port_num;
5023 	u32 i;
5024 
5025 	*reg_val = 0;
5026 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5027 		status =
5028 		       i40e_aq_get_phy_register(hw,
5029 						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5030 						I40E_PHY_COM_REG_PAGE,
5031 						I40E_PHY_LED_PROV_REG_1,
5032 						reg_val, NULL);
5033 	} else {
5034 		i = rd32(hw, I40E_PFGEN_PORTNUM);
5035 		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5036 		phy_addr = i40e_get_phy_address(hw, port_num);
5037 		status = i40e_read_phy_register_clause45(hw,
5038 							 I40E_PHY_COM_REG_PAGE,
5039 							 led_addr, phy_addr,
5040 							 (u16 *)reg_val);
5041 	}
5042 	return status;
5043 }
5044 
5045 /**
5046  * i40e_led_set_reg - write LED register
5047  * @hw: pointer to the HW structure
5048  * @led_addr: LED register address
5049  * @reg_val: register value to write
5050  **/
5051 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5052 					      u32 reg_val)
5053 {
5054 	enum i40e_status_code status;
5055 	u8 phy_addr = 0;
5056 	u8 port_num;
5057 	u32 i;
5058 
5059 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5060 		status =
5061 		       i40e_aq_set_phy_register(hw,
5062 						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5063 						I40E_PHY_COM_REG_PAGE,
5064 						I40E_PHY_LED_PROV_REG_1,
5065 						reg_val, NULL);
5066 	} else {
5067 		i = rd32(hw, I40E_PFGEN_PORTNUM);
5068 		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5069 		phy_addr = i40e_get_phy_address(hw, port_num);
5070 		status = i40e_write_phy_register_clause45(hw,
5071 							  I40E_PHY_COM_REG_PAGE,
5072 							  led_addr, phy_addr,
5073 							  (u16)reg_val);
5074 	}
5075 
5076 	return status;
5077 }
5078 
5079 /**
5080  * i40e_led_get_phy - return current on/off mode
5081  * @hw: pointer to the hw struct
5082  * @led_addr: address of led register to use
5083  * @val: original value of register to use
5084  *
5085  **/
5086 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5087 			     u16 *val)
5088 {
5089 	i40e_status status = 0;
5090 	u16 gpio_led_port;
5091 	u8 phy_addr = 0;
5092 	u16 reg_val;
5093 	u16 temp_addr;
5094 	u8 port_num;
5095 	u32 i;
5096 	u32 reg_val_aq;
5097 
5098 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5099 		status =
5100 		      i40e_aq_get_phy_register(hw,
5101 					       I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5102 					       I40E_PHY_COM_REG_PAGE,
5103 					       I40E_PHY_LED_PROV_REG_1,
5104 					       &reg_val_aq, NULL);
5105 		if (status == I40E_SUCCESS)
5106 			*val = (u16)reg_val_aq;
5107 		return status;
5108 	}
5109 	temp_addr = I40E_PHY_LED_PROV_REG_1;
5110 	i = rd32(hw, I40E_PFGEN_PORTNUM);
5111 	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5112 	phy_addr = i40e_get_phy_address(hw, port_num);
5113 
5114 	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5115 	     temp_addr++) {
5116 		status = i40e_read_phy_register_clause45(hw,
5117 							 I40E_PHY_COM_REG_PAGE,
5118 							 temp_addr, phy_addr,
5119 							 &reg_val);
5120 		if (status)
5121 			return status;
5122 		*val = reg_val;
5123 		if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5124 			*led_addr = temp_addr;
5125 			break;
5126 		}
5127 	}
5128 	return status;
5129 }
5130 
5131 /**
5132  * i40e_led_set_phy
5133  * @hw: pointer to the HW structure
5134  * @on: true or false
5135  * @led_addr: address of led register to use
5136  * @mode: original val plus bit for set or ignore
5137  *
5138  * Set led's on or off when controlled by the PHY
5139  *
5140  **/
5141 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5142 			     u16 led_addr, u32 mode)
5143 {
5144 	i40e_status status = 0;
5145 	u32 led_ctl = 0;
5146 	u32 led_reg = 0;
5147 
5148 	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5149 	if (status)
5150 		return status;
5151 	led_ctl = led_reg;
5152 	if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5153 		led_reg = 0;
5154 		status = i40e_led_set_reg(hw, led_addr, led_reg);
5155 		if (status)
5156 			return status;
5157 	}
5158 	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5159 	if (status)
5160 		goto restore_config;
5161 	if (on)
5162 		led_reg = I40E_PHY_LED_MANUAL_ON;
5163 	else
5164 		led_reg = 0;
5165 
5166 	status = i40e_led_set_reg(hw, led_addr, led_reg);
5167 	if (status)
5168 		goto restore_config;
5169 	if (mode & I40E_PHY_LED_MODE_ORIG) {
5170 		led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5171 		status = i40e_led_set_reg(hw, led_addr, led_ctl);
5172 	}
5173 	return status;
5174 
5175 restore_config:
5176 	status = i40e_led_set_reg(hw, led_addr, led_ctl);
5177 	return status;
5178 }
5179 
5180 /**
5181  * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5182  * @hw: pointer to the hw struct
5183  * @reg_addr: register address
5184  * @reg_val: ptr to register value
5185  * @cmd_details: pointer to command details structure or NULL
5186  *
5187  * Use the firmware to read the Rx control register,
5188  * especially useful if the Rx unit is under heavy pressure
5189  **/
5190 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5191 				u32 reg_addr, u32 *reg_val,
5192 				struct i40e_asq_cmd_details *cmd_details)
5193 {
5194 	struct i40e_aq_desc desc;
5195 	struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5196 		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5197 	i40e_status status;
5198 
5199 	if (!reg_val)
5200 		return I40E_ERR_PARAM;
5201 
5202 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5203 
5204 	cmd_resp->address = cpu_to_le32(reg_addr);
5205 
5206 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5207 
5208 	if (status == 0)
5209 		*reg_val = le32_to_cpu(cmd_resp->value);
5210 
5211 	return status;
5212 }
5213 
5214 /**
5215  * i40e_read_rx_ctl - read from an Rx control register
5216  * @hw: pointer to the hw struct
5217  * @reg_addr: register address
5218  **/
5219 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5220 {
5221 	i40e_status status = 0;
5222 	bool use_register;
5223 	int retry = 5;
5224 	u32 val = 0;
5225 
5226 	use_register = (((hw->aq.api_maj_ver == 1) &&
5227 			(hw->aq.api_min_ver < 5)) ||
5228 			(hw->mac.type == I40E_MAC_X722));
5229 	if (!use_register) {
5230 do_retry:
5231 		status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5232 		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5233 			usleep_range(1000, 2000);
5234 			retry--;
5235 			goto do_retry;
5236 		}
5237 	}
5238 
5239 	/* if the AQ access failed, try the old-fashioned way */
5240 	if (status || use_register)
5241 		val = rd32(hw, reg_addr);
5242 
5243 	return val;
5244 }
5245 
5246 /**
5247  * i40e_aq_rx_ctl_write_register
5248  * @hw: pointer to the hw struct
5249  * @reg_addr: register address
5250  * @reg_val: register value
5251  * @cmd_details: pointer to command details structure or NULL
5252  *
5253  * Use the firmware to write to an Rx control register,
5254  * especially useful if the Rx unit is under heavy pressure
5255  **/
5256 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5257 				u32 reg_addr, u32 reg_val,
5258 				struct i40e_asq_cmd_details *cmd_details)
5259 {
5260 	struct i40e_aq_desc desc;
5261 	struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5262 		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5263 	i40e_status status;
5264 
5265 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5266 
5267 	cmd->address = cpu_to_le32(reg_addr);
5268 	cmd->value = cpu_to_le32(reg_val);
5269 
5270 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5271 
5272 	return status;
5273 }
5274 
5275 /**
5276  * i40e_write_rx_ctl - write to an Rx control register
5277  * @hw: pointer to the hw struct
5278  * @reg_addr: register address
5279  * @reg_val: register value
5280  **/
5281 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5282 {
5283 	i40e_status status = 0;
5284 	bool use_register;
5285 	int retry = 5;
5286 
5287 	use_register = (((hw->aq.api_maj_ver == 1) &&
5288 			(hw->aq.api_min_ver < 5)) ||
5289 			(hw->mac.type == I40E_MAC_X722));
5290 	if (!use_register) {
5291 do_retry:
5292 		status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5293 						       reg_val, NULL);
5294 		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5295 			usleep_range(1000, 2000);
5296 			retry--;
5297 			goto do_retry;
5298 		}
5299 	}
5300 
5301 	/* if the AQ access failed, try the old-fashioned way */
5302 	if (status || use_register)
5303 		wr32(hw, reg_addr, reg_val);
5304 }
5305 
5306 /**
5307  * i40e_aq_set_phy_register
5308  * @hw: pointer to the hw struct
5309  * @phy_select: select which phy should be accessed
5310  * @dev_addr: PHY device address
5311  * @reg_addr: PHY register address
5312  * @reg_val: new register value
5313  * @cmd_details: pointer to command details structure or NULL
5314  *
5315  * Write the external PHY register.
5316  **/
5317 i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
5318 				     u8 phy_select, u8 dev_addr,
5319 				     u32 reg_addr, u32 reg_val,
5320 				     struct i40e_asq_cmd_details *cmd_details)
5321 {
5322 	struct i40e_aq_desc desc;
5323 	struct i40e_aqc_phy_register_access *cmd =
5324 		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5325 	i40e_status status;
5326 
5327 	i40e_fill_default_direct_cmd_desc(&desc,
5328 					  i40e_aqc_opc_set_phy_register);
5329 
5330 	cmd->phy_interface = phy_select;
5331 	cmd->dev_address = dev_addr;
5332 	cmd->reg_address = cpu_to_le32(reg_addr);
5333 	cmd->reg_value = cpu_to_le32(reg_val);
5334 
5335 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5336 
5337 	return status;
5338 }
5339 
5340 /**
5341  * i40e_aq_get_phy_register
5342  * @hw: pointer to the hw struct
5343  * @phy_select: select which phy should be accessed
5344  * @dev_addr: PHY device address
5345  * @reg_addr: PHY register address
5346  * @reg_val: read register value
5347  * @cmd_details: pointer to command details structure or NULL
5348  *
5349  * Read the external PHY register.
5350  **/
5351 i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
5352 				     u8 phy_select, u8 dev_addr,
5353 				     u32 reg_addr, u32 *reg_val,
5354 				     struct i40e_asq_cmd_details *cmd_details)
5355 {
5356 	struct i40e_aq_desc desc;
5357 	struct i40e_aqc_phy_register_access *cmd =
5358 		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5359 	i40e_status status;
5360 
5361 	i40e_fill_default_direct_cmd_desc(&desc,
5362 					  i40e_aqc_opc_get_phy_register);
5363 
5364 	cmd->phy_interface = phy_select;
5365 	cmd->dev_address = dev_addr;
5366 	cmd->reg_address = cpu_to_le32(reg_addr);
5367 
5368 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5369 	if (!status)
5370 		*reg_val = le32_to_cpu(cmd->reg_value);
5371 
5372 	return status;
5373 }
5374 
5375 /**
5376  * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5377  * @hw: pointer to the hw struct
5378  * @buff: command buffer (size in bytes = buff_size)
5379  * @buff_size: buffer size in bytes
5380  * @track_id: package tracking id
5381  * @error_offset: returns error offset
5382  * @error_info: returns error information
5383  * @cmd_details: pointer to command details structure or NULL
5384  **/
5385 enum
5386 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5387 				   u16 buff_size, u32 track_id,
5388 				   u32 *error_offset, u32 *error_info,
5389 				   struct i40e_asq_cmd_details *cmd_details)
5390 {
5391 	struct i40e_aq_desc desc;
5392 	struct i40e_aqc_write_personalization_profile *cmd =
5393 		(struct i40e_aqc_write_personalization_profile *)
5394 		&desc.params.raw;
5395 	struct i40e_aqc_write_ddp_resp *resp;
5396 	i40e_status status;
5397 
5398 	i40e_fill_default_direct_cmd_desc(&desc,
5399 					  i40e_aqc_opc_write_personalization_profile);
5400 
5401 	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5402 	if (buff_size > I40E_AQ_LARGE_BUF)
5403 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5404 
5405 	desc.datalen = cpu_to_le16(buff_size);
5406 
5407 	cmd->profile_track_id = cpu_to_le32(track_id);
5408 
5409 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5410 	if (!status) {
5411 		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5412 		if (error_offset)
5413 			*error_offset = le32_to_cpu(resp->error_offset);
5414 		if (error_info)
5415 			*error_info = le32_to_cpu(resp->error_info);
5416 	}
5417 
5418 	return status;
5419 }
5420 
5421 /**
5422  * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5423  * @hw: pointer to the hw struct
5424  * @buff: command buffer (size in bytes = buff_size)
5425  * @buff_size: buffer size in bytes
5426  * @flags: AdminQ command flags
5427  * @cmd_details: pointer to command details structure or NULL
5428  **/
5429 enum
5430 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5431 				      u16 buff_size, u8 flags,
5432 				      struct i40e_asq_cmd_details *cmd_details)
5433 {
5434 	struct i40e_aq_desc desc;
5435 	struct i40e_aqc_get_applied_profiles *cmd =
5436 		(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5437 	i40e_status status;
5438 
5439 	i40e_fill_default_direct_cmd_desc(&desc,
5440 					  i40e_aqc_opc_get_personalization_profile_list);
5441 
5442 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5443 	if (buff_size > I40E_AQ_LARGE_BUF)
5444 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5445 	desc.datalen = cpu_to_le16(buff_size);
5446 
5447 	cmd->flags = flags;
5448 
5449 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5450 
5451 	return status;
5452 }
5453 
5454 /**
5455  * i40e_find_segment_in_package
5456  * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5457  * @pkg_hdr: pointer to the package header to be searched
5458  *
5459  * This function searches a package file for a particular segment type. On
5460  * success it returns a pointer to the segment header, otherwise it will
5461  * return NULL.
5462  **/
5463 struct i40e_generic_seg_header *
5464 i40e_find_segment_in_package(u32 segment_type,
5465 			     struct i40e_package_header *pkg_hdr)
5466 {
5467 	struct i40e_generic_seg_header *segment;
5468 	u32 i;
5469 
5470 	/* Search all package segments for the requested segment type */
5471 	for (i = 0; i < pkg_hdr->segment_count; i++) {
5472 		segment =
5473 			(struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5474 			 pkg_hdr->segment_offset[i]);
5475 
5476 		if (segment->type == segment_type)
5477 			return segment;
5478 	}
5479 
5480 	return NULL;
5481 }
5482 
5483 /* Get section table in profile */
5484 #define I40E_SECTION_TABLE(profile, sec_tbl)				\
5485 	do {								\
5486 		struct i40e_profile_segment *p = (profile);		\
5487 		u32 count;						\
5488 		u32 *nvm;						\
5489 		count = p->device_table_count;				\
5490 		nvm = (u32 *)&p->device_table[count];			\
5491 		sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5492 	} while (0)
5493 
5494 /* Get section header in profile */
5495 #define I40E_SECTION_HEADER(profile, offset)				\
5496 	(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5497 
5498 /**
5499  * i40e_find_section_in_profile
5500  * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5501  * @profile: pointer to the i40e segment header to be searched
5502  *
5503  * This function searches i40e segment for a particular section type. On
5504  * success it returns a pointer to the section header, otherwise it will
5505  * return NULL.
5506  **/
5507 struct i40e_profile_section_header *
5508 i40e_find_section_in_profile(u32 section_type,
5509 			     struct i40e_profile_segment *profile)
5510 {
5511 	struct i40e_profile_section_header *sec;
5512 	struct i40e_section_table *sec_tbl;
5513 	u32 sec_off;
5514 	u32 i;
5515 
5516 	if (profile->header.type != SEGMENT_TYPE_I40E)
5517 		return NULL;
5518 
5519 	I40E_SECTION_TABLE(profile, sec_tbl);
5520 
5521 	for (i = 0; i < sec_tbl->section_count; i++) {
5522 		sec_off = sec_tbl->section_offset[i];
5523 		sec = I40E_SECTION_HEADER(profile, sec_off);
5524 		if (sec->section.type == section_type)
5525 			return sec;
5526 	}
5527 
5528 	return NULL;
5529 }
5530 
5531 /**
5532  * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5533  * @hw: pointer to the hw struct
5534  * @aq: command buffer containing all data to execute AQ
5535  **/
5536 static enum
5537 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5538 					  struct i40e_profile_aq_section *aq)
5539 {
5540 	i40e_status status;
5541 	struct i40e_aq_desc desc;
5542 	u8 *msg = NULL;
5543 	u16 msglen;
5544 
5545 	i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5546 	desc.flags |= cpu_to_le16(aq->flags);
5547 	memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5548 
5549 	msglen = aq->datalen;
5550 	if (msglen) {
5551 		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5552 						I40E_AQ_FLAG_RD));
5553 		if (msglen > I40E_AQ_LARGE_BUF)
5554 			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5555 		desc.datalen = cpu_to_le16(msglen);
5556 		msg = &aq->data[0];
5557 	}
5558 
5559 	status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5560 
5561 	if (status) {
5562 		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5563 			   "unable to exec DDP AQ opcode %u, error %d\n",
5564 			   aq->opcode, status);
5565 		return status;
5566 	}
5567 
5568 	/* copy returned desc to aq_buf */
5569 	memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5570 
5571 	return 0;
5572 }
5573 
5574 /**
5575  * i40e_validate_profile
5576  * @hw: pointer to the hardware structure
5577  * @profile: pointer to the profile segment of the package to be validated
5578  * @track_id: package tracking id
5579  * @rollback: flag if the profile is for rollback.
5580  *
5581  * Validates supported devices and profile's sections.
5582  */
5583 static enum i40e_status_code
5584 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5585 		      u32 track_id, bool rollback)
5586 {
5587 	struct i40e_profile_section_header *sec = NULL;
5588 	i40e_status status = 0;
5589 	struct i40e_section_table *sec_tbl;
5590 	u32 vendor_dev_id;
5591 	u32 dev_cnt;
5592 	u32 sec_off;
5593 	u32 i;
5594 
5595 	if (track_id == I40E_DDP_TRACKID_INVALID) {
5596 		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5597 		return I40E_NOT_SUPPORTED;
5598 	}
5599 
5600 	dev_cnt = profile->device_table_count;
5601 	for (i = 0; i < dev_cnt; i++) {
5602 		vendor_dev_id = profile->device_table[i].vendor_dev_id;
5603 		if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5604 		    hw->device_id == (vendor_dev_id & 0xFFFF))
5605 			break;
5606 	}
5607 	if (dev_cnt && i == dev_cnt) {
5608 		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5609 			   "Device doesn't support DDP\n");
5610 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
5611 	}
5612 
5613 	I40E_SECTION_TABLE(profile, sec_tbl);
5614 
5615 	/* Validate sections types */
5616 	for (i = 0; i < sec_tbl->section_count; i++) {
5617 		sec_off = sec_tbl->section_offset[i];
5618 		sec = I40E_SECTION_HEADER(profile, sec_off);
5619 		if (rollback) {
5620 			if (sec->section.type == SECTION_TYPE_MMIO ||
5621 			    sec->section.type == SECTION_TYPE_AQ ||
5622 			    sec->section.type == SECTION_TYPE_RB_AQ) {
5623 				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5624 					   "Not a roll-back package\n");
5625 				return I40E_NOT_SUPPORTED;
5626 			}
5627 		} else {
5628 			if (sec->section.type == SECTION_TYPE_RB_AQ ||
5629 			    sec->section.type == SECTION_TYPE_RB_MMIO) {
5630 				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5631 					   "Not an original package\n");
5632 				return I40E_NOT_SUPPORTED;
5633 			}
5634 		}
5635 	}
5636 
5637 	return status;
5638 }
5639 
5640 /**
5641  * i40e_write_profile
5642  * @hw: pointer to the hardware structure
5643  * @profile: pointer to the profile segment of the package to be downloaded
5644  * @track_id: package tracking id
5645  *
5646  * Handles the download of a complete package.
5647  */
5648 enum i40e_status_code
5649 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5650 		   u32 track_id)
5651 {
5652 	i40e_status status = 0;
5653 	struct i40e_section_table *sec_tbl;
5654 	struct i40e_profile_section_header *sec = NULL;
5655 	struct i40e_profile_aq_section *ddp_aq;
5656 	u32 section_size = 0;
5657 	u32 offset = 0, info = 0;
5658 	u32 sec_off;
5659 	u32 i;
5660 
5661 	status = i40e_validate_profile(hw, profile, track_id, false);
5662 	if (status)
5663 		return status;
5664 
5665 	I40E_SECTION_TABLE(profile, sec_tbl);
5666 
5667 	for (i = 0; i < sec_tbl->section_count; i++) {
5668 		sec_off = sec_tbl->section_offset[i];
5669 		sec = I40E_SECTION_HEADER(profile, sec_off);
5670 		/* Process generic admin command */
5671 		if (sec->section.type == SECTION_TYPE_AQ) {
5672 			ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5673 			status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5674 			if (status) {
5675 				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5676 					   "Failed to execute aq: section %d, opcode %u\n",
5677 					   i, ddp_aq->opcode);
5678 				break;
5679 			}
5680 			sec->section.type = SECTION_TYPE_RB_AQ;
5681 		}
5682 
5683 		/* Skip any non-mmio sections */
5684 		if (sec->section.type != SECTION_TYPE_MMIO)
5685 			continue;
5686 
5687 		section_size = sec->section.size +
5688 			sizeof(struct i40e_profile_section_header);
5689 
5690 		/* Write MMIO section */
5691 		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5692 					   track_id, &offset, &info, NULL);
5693 		if (status) {
5694 			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5695 				   "Failed to write profile: section %d, offset %d, info %d\n",
5696 				   i, offset, info);
5697 			break;
5698 		}
5699 	}
5700 	return status;
5701 }
5702 
5703 /**
5704  * i40e_rollback_profile
5705  * @hw: pointer to the hardware structure
5706  * @profile: pointer to the profile segment of the package to be removed
5707  * @track_id: package tracking id
5708  *
5709  * Rolls back previously loaded package.
5710  */
5711 enum i40e_status_code
5712 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5713 		      u32 track_id)
5714 {
5715 	struct i40e_profile_section_header *sec = NULL;
5716 	i40e_status status = 0;
5717 	struct i40e_section_table *sec_tbl;
5718 	u32 offset = 0, info = 0;
5719 	u32 section_size = 0;
5720 	u32 sec_off;
5721 	int i;
5722 
5723 	status = i40e_validate_profile(hw, profile, track_id, true);
5724 	if (status)
5725 		return status;
5726 
5727 	I40E_SECTION_TABLE(profile, sec_tbl);
5728 
5729 	/* For rollback write sections in reverse */
5730 	for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5731 		sec_off = sec_tbl->section_offset[i];
5732 		sec = I40E_SECTION_HEADER(profile, sec_off);
5733 
5734 		/* Skip any non-rollback sections */
5735 		if (sec->section.type != SECTION_TYPE_RB_MMIO)
5736 			continue;
5737 
5738 		section_size = sec->section.size +
5739 			sizeof(struct i40e_profile_section_header);
5740 
5741 		/* Write roll-back MMIO section */
5742 		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5743 					   track_id, &offset, &info, NULL);
5744 		if (status) {
5745 			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5746 				   "Failed to write profile: section %d, offset %d, info %d\n",
5747 				   i, offset, info);
5748 			break;
5749 		}
5750 	}
5751 	return status;
5752 }
5753 
5754 /**
5755  * i40e_add_pinfo_to_list
5756  * @hw: pointer to the hardware structure
5757  * @profile: pointer to the profile segment of the package
5758  * @profile_info_sec: buffer for information section
5759  * @track_id: package tracking id
5760  *
5761  * Register a profile to the list of loaded profiles.
5762  */
5763 enum i40e_status_code
5764 i40e_add_pinfo_to_list(struct i40e_hw *hw,
5765 		       struct i40e_profile_segment *profile,
5766 		       u8 *profile_info_sec, u32 track_id)
5767 {
5768 	i40e_status status = 0;
5769 	struct i40e_profile_section_header *sec = NULL;
5770 	struct i40e_profile_info *pinfo;
5771 	u32 offset = 0, info = 0;
5772 
5773 	sec = (struct i40e_profile_section_header *)profile_info_sec;
5774 	sec->tbl_size = 1;
5775 	sec->data_end = sizeof(struct i40e_profile_section_header) +
5776 			sizeof(struct i40e_profile_info);
5777 	sec->section.type = SECTION_TYPE_INFO;
5778 	sec->section.offset = sizeof(struct i40e_profile_section_header);
5779 	sec->section.size = sizeof(struct i40e_profile_info);
5780 	pinfo = (struct i40e_profile_info *)(profile_info_sec +
5781 					     sec->section.offset);
5782 	pinfo->track_id = track_id;
5783 	pinfo->version = profile->version;
5784 	pinfo->op = I40E_DDP_ADD_TRACKID;
5785 	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5786 
5787 	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5788 				   track_id, &offset, &info, NULL);
5789 
5790 	return status;
5791 }
5792 
5793 /**
5794  * i40e_aq_add_cloud_filters
5795  * @hw: pointer to the hardware structure
5796  * @seid: VSI seid to add cloud filters from
5797  * @filters: Buffer which contains the filters to be added
5798  * @filter_count: number of filters contained in the buffer
5799  *
5800  * Set the cloud filters for a given VSI.  The contents of the
5801  * i40e_aqc_cloud_filters_element_data are filled in by the caller
5802  * of the function.
5803  *
5804  **/
5805 enum i40e_status_code
5806 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5807 			  struct i40e_aqc_cloud_filters_element_data *filters,
5808 			  u8 filter_count)
5809 {
5810 	struct i40e_aq_desc desc;
5811 	struct i40e_aqc_add_remove_cloud_filters *cmd =
5812 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5813 	enum i40e_status_code status;
5814 	u16 buff_len;
5815 
5816 	i40e_fill_default_direct_cmd_desc(&desc,
5817 					  i40e_aqc_opc_add_cloud_filters);
5818 
5819 	buff_len = filter_count * sizeof(*filters);
5820 	desc.datalen = cpu_to_le16(buff_len);
5821 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5822 	cmd->num_filters = filter_count;
5823 	cmd->seid = cpu_to_le16(seid);
5824 
5825 	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5826 
5827 	return status;
5828 }
5829 
5830 /**
5831  * i40e_aq_add_cloud_filters_bb
5832  * @hw: pointer to the hardware structure
5833  * @seid: VSI seid to add cloud filters from
5834  * @filters: Buffer which contains the filters in big buffer to be added
5835  * @filter_count: number of filters contained in the buffer
5836  *
5837  * Set the big buffer cloud filters for a given VSI.  The contents of the
5838  * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5839  * function.
5840  *
5841  **/
5842 enum i40e_status_code
5843 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5844 			     struct i40e_aqc_cloud_filters_element_bb *filters,
5845 			     u8 filter_count)
5846 {
5847 	struct i40e_aq_desc desc;
5848 	struct i40e_aqc_add_remove_cloud_filters *cmd =
5849 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5850 	i40e_status status;
5851 	u16 buff_len;
5852 	int i;
5853 
5854 	i40e_fill_default_direct_cmd_desc(&desc,
5855 					  i40e_aqc_opc_add_cloud_filters);
5856 
5857 	buff_len = filter_count * sizeof(*filters);
5858 	desc.datalen = cpu_to_le16(buff_len);
5859 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5860 	cmd->num_filters = filter_count;
5861 	cmd->seid = cpu_to_le16(seid);
5862 	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5863 
5864 	for (i = 0; i < filter_count; i++) {
5865 		u16 tnl_type;
5866 		u32 ti;
5867 
5868 		tnl_type = (le16_to_cpu(filters[i].element.flags) &
5869 			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5870 			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5871 
5872 		/* Due to hardware eccentricities, the VNI for Geneve is shifted
5873 		 * one more byte further than normally used for Tenant ID in
5874 		 * other tunnel types.
5875 		 */
5876 		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5877 			ti = le32_to_cpu(filters[i].element.tenant_id);
5878 			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5879 		}
5880 	}
5881 
5882 	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5883 
5884 	return status;
5885 }
5886 
5887 /**
5888  * i40e_aq_rem_cloud_filters
5889  * @hw: pointer to the hardware structure
5890  * @seid: VSI seid to remove cloud filters from
5891  * @filters: Buffer which contains the filters to be removed
5892  * @filter_count: number of filters contained in the buffer
5893  *
5894  * Remove the cloud filters for a given VSI.  The contents of the
5895  * i40e_aqc_cloud_filters_element_data are filled in by the caller
5896  * of the function.
5897  *
5898  **/
5899 enum i40e_status_code
5900 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5901 			  struct i40e_aqc_cloud_filters_element_data *filters,
5902 			  u8 filter_count)
5903 {
5904 	struct i40e_aq_desc desc;
5905 	struct i40e_aqc_add_remove_cloud_filters *cmd =
5906 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5907 	enum i40e_status_code status;
5908 	u16 buff_len;
5909 
5910 	i40e_fill_default_direct_cmd_desc(&desc,
5911 					  i40e_aqc_opc_remove_cloud_filters);
5912 
5913 	buff_len = filter_count * sizeof(*filters);
5914 	desc.datalen = cpu_to_le16(buff_len);
5915 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5916 	cmd->num_filters = filter_count;
5917 	cmd->seid = cpu_to_le16(seid);
5918 
5919 	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5920 
5921 	return status;
5922 }
5923 
5924 /**
5925  * i40e_aq_rem_cloud_filters_bb
5926  * @hw: pointer to the hardware structure
5927  * @seid: VSI seid to remove cloud filters from
5928  * @filters: Buffer which contains the filters in big buffer to be removed
5929  * @filter_count: number of filters contained in the buffer
5930  *
5931  * Remove the big buffer cloud filters for a given VSI.  The contents of the
5932  * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5933  * function.
5934  *
5935  **/
5936 enum i40e_status_code
5937 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5938 			     struct i40e_aqc_cloud_filters_element_bb *filters,
5939 			     u8 filter_count)
5940 {
5941 	struct i40e_aq_desc desc;
5942 	struct i40e_aqc_add_remove_cloud_filters *cmd =
5943 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5944 	i40e_status status;
5945 	u16 buff_len;
5946 	int i;
5947 
5948 	i40e_fill_default_direct_cmd_desc(&desc,
5949 					  i40e_aqc_opc_remove_cloud_filters);
5950 
5951 	buff_len = filter_count * sizeof(*filters);
5952 	desc.datalen = cpu_to_le16(buff_len);
5953 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5954 	cmd->num_filters = filter_count;
5955 	cmd->seid = cpu_to_le16(seid);
5956 	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5957 
5958 	for (i = 0; i < filter_count; i++) {
5959 		u16 tnl_type;
5960 		u32 ti;
5961 
5962 		tnl_type = (le16_to_cpu(filters[i].element.flags) &
5963 			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5964 			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5965 
5966 		/* Due to hardware eccentricities, the VNI for Geneve is shifted
5967 		 * one more byte further than normally used for Tenant ID in
5968 		 * other tunnel types.
5969 		 */
5970 		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5971 			ti = le32_to_cpu(filters[i].element.tenant_id);
5972 			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5973 		}
5974 	}
5975 
5976 	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5977 
5978 	return status;
5979 }
5980