1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3 
4 #include "vf.h"
5 #include "ixgbevf.h"
6 
7 /* On Hyper-V, to reset, we need to read from this offset
8  * from the PCI config space. This is the mechanism used on
9  * Hyper-V to support PF/VF communication.
10  */
11 #define IXGBE_HV_RESET_OFFSET           0x201
12 
ixgbevf_write_msg_read_ack(struct ixgbe_hw * hw,u32 * msg,u32 * retmsg,u16 size)13 static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
14 					     u32 *retmsg, u16 size)
15 {
16 	struct ixgbe_mbx_info *mbx = &hw->mbx;
17 	s32 retval = mbx->ops.write_posted(hw, msg, size);
18 
19 	if (retval)
20 		return retval;
21 
22 	return mbx->ops.read_posted(hw, retmsg, size);
23 }
24 
25 /**
26  *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
27  *  @hw: pointer to hardware structure
28  *
29  *  Starts the hardware by filling the bus info structure and media type, clears
30  *  all on chip counters, initializes receive address registers, multicast
31  *  table, VLAN filter table, calls routine to set up link and flow control
32  *  settings, and leaves transmit and receive units disabled and uninitialized
33  **/
ixgbevf_start_hw_vf(struct ixgbe_hw * hw)34 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
35 {
36 	/* Clear adapter stopped flag */
37 	hw->adapter_stopped = false;
38 
39 	return 0;
40 }
41 
42 /**
43  *  ixgbevf_init_hw_vf - virtual function hardware initialization
44  *  @hw: pointer to hardware structure
45  *
46  *  Initialize the hardware by resetting the hardware and then starting
47  *  the hardware
48  **/
ixgbevf_init_hw_vf(struct ixgbe_hw * hw)49 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
50 {
51 	s32 status = hw->mac.ops.start_hw(hw);
52 
53 	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
54 
55 	return status;
56 }
57 
58 /**
59  *  ixgbevf_reset_hw_vf - Performs hardware reset
60  *  @hw: pointer to hardware structure
61  *
62  *  Resets the hardware by resetting the transmit and receive units, masks and
63  *  clears all interrupts.
64  **/
ixgbevf_reset_hw_vf(struct ixgbe_hw * hw)65 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
66 {
67 	struct ixgbe_mbx_info *mbx = &hw->mbx;
68 	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
69 	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
70 	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
71 	u8 *addr = (u8 *)(&msgbuf[1]);
72 
73 	/* Call adapter stop to disable tx/rx and clear interrupts */
74 	hw->mac.ops.stop_adapter(hw);
75 
76 	/* reset the api version */
77 	hw->api_version = ixgbe_mbox_api_10;
78 
79 	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
80 	IXGBE_WRITE_FLUSH(hw);
81 
82 	/* we cannot reset while the RSTI / RSTD bits are asserted */
83 	while (!mbx->ops.check_for_rst(hw) && timeout) {
84 		timeout--;
85 		udelay(5);
86 	}
87 
88 	if (!timeout)
89 		return IXGBE_ERR_RESET_FAILED;
90 
91 	/* mailbox timeout can now become active */
92 	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
93 
94 	msgbuf[0] = IXGBE_VF_RESET;
95 	mbx->ops.write_posted(hw, msgbuf, 1);
96 
97 	mdelay(10);
98 
99 	/* set our "perm_addr" based on info provided by PF
100 	 * also set up the mc_filter_type which is piggy backed
101 	 * on the mac address in word 3
102 	 */
103 	ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
104 	if (ret_val)
105 		return ret_val;
106 
107 	/* New versions of the PF may NACK the reset return message
108 	 * to indicate that no MAC address has yet been assigned for
109 	 * the VF.
110 	 */
111 	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
112 	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
113 		return IXGBE_ERR_INVALID_MAC_ADDR;
114 
115 	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
116 		ether_addr_copy(hw->mac.perm_addr, addr);
117 
118 	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
119 
120 	return 0;
121 }
122 
123 /**
124  * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
125  * @hw: pointer to private hardware struct
126  *
127  * Hyper-V variant; the VF/PF communication is through the PCI
128  * config space.
129  */
ixgbevf_hv_reset_hw_vf(struct ixgbe_hw * hw)130 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
131 {
132 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
133 	struct ixgbevf_adapter *adapter = hw->back;
134 	int i;
135 
136 	for (i = 0; i < 6; i++)
137 		pci_read_config_byte(adapter->pdev,
138 				     (i + IXGBE_HV_RESET_OFFSET),
139 				     &hw->mac.perm_addr[i]);
140 	return 0;
141 #else
142 	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
143 	return -EOPNOTSUPP;
144 #endif
145 }
146 
147 /**
148  *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
149  *  @hw: pointer to hardware structure
150  *
151  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
152  *  disables transmit and receive units. The adapter_stopped flag is used by
153  *  the shared code and drivers to determine if the adapter is in a stopped
154  *  state and should not touch the hardware.
155  **/
ixgbevf_stop_hw_vf(struct ixgbe_hw * hw)156 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
157 {
158 	u32 number_of_queues;
159 	u32 reg_val;
160 	u16 i;
161 
162 	/* Set the adapter_stopped flag so other driver functions stop touching
163 	 * the hardware
164 	 */
165 	hw->adapter_stopped = true;
166 
167 	/* Disable the receive unit by stopped each queue */
168 	number_of_queues = hw->mac.max_rx_queues;
169 	for (i = 0; i < number_of_queues; i++) {
170 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
171 		if (reg_val & IXGBE_RXDCTL_ENABLE) {
172 			reg_val &= ~IXGBE_RXDCTL_ENABLE;
173 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
174 		}
175 	}
176 
177 	IXGBE_WRITE_FLUSH(hw);
178 
179 	/* Clear interrupt mask to stop from interrupts being generated */
180 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
181 
182 	/* Clear any pending interrupts */
183 	IXGBE_READ_REG(hw, IXGBE_VTEICR);
184 
185 	/* Disable the transmit unit.  Each queue must be disabled. */
186 	number_of_queues = hw->mac.max_tx_queues;
187 	for (i = 0; i < number_of_queues; i++) {
188 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
189 		if (reg_val & IXGBE_TXDCTL_ENABLE) {
190 			reg_val &= ~IXGBE_TXDCTL_ENABLE;
191 			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
192 		}
193 	}
194 
195 	return 0;
196 }
197 
198 /**
199  *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
200  *  @hw: pointer to hardware structure
201  *  @mc_addr: the multicast address
202  *
203  *  Extracts the 12 bits, from a multicast address, to determine which
204  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
205  *  incoming Rx multicast addresses, to determine the bit-vector to check in
206  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
207  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
208  *  to mc_filter_type.
209  **/
ixgbevf_mta_vector(struct ixgbe_hw * hw,u8 * mc_addr)210 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
211 {
212 	u32 vector = 0;
213 
214 	switch (hw->mac.mc_filter_type) {
215 	case 0:   /* use bits [47:36] of the address */
216 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
217 		break;
218 	case 1:   /* use bits [46:35] of the address */
219 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
220 		break;
221 	case 2:   /* use bits [45:34] of the address */
222 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
223 		break;
224 	case 3:   /* use bits [43:32] of the address */
225 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
226 		break;
227 	default:  /* Invalid mc_filter_type */
228 		break;
229 	}
230 
231 	/* vector can only be 12-bits or boundary will be exceeded */
232 	vector &= 0xFFF;
233 	return vector;
234 }
235 
236 /**
237  *  ixgbevf_get_mac_addr_vf - Read device MAC address
238  *  @hw: pointer to the HW structure
239  *  @mac_addr: pointer to storage for retrieved MAC address
240  **/
ixgbevf_get_mac_addr_vf(struct ixgbe_hw * hw,u8 * mac_addr)241 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
242 {
243 	ether_addr_copy(mac_addr, hw->mac.perm_addr);
244 
245 	return 0;
246 }
247 
ixgbevf_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)248 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
249 {
250 	u32 msgbuf[3], msgbuf_chk;
251 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
252 	s32 ret_val;
253 
254 	memset(msgbuf, 0, sizeof(msgbuf));
255 	/* If index is one then this is the start of a new list and needs
256 	 * indication to the PF so it can do it's own list management.
257 	 * If it is zero then that tells the PF to just clear all of
258 	 * this VF's macvlans and there is no new list.
259 	 */
260 	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
261 	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
262 	msgbuf_chk = msgbuf[0];
263 
264 	if (addr)
265 		ether_addr_copy(msg_addr, addr);
266 
267 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
268 					     ARRAY_SIZE(msgbuf));
269 	if (!ret_val) {
270 		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
271 
272 		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
273 			return -ENOMEM;
274 	}
275 
276 	return ret_val;
277 }
278 
ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)279 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
280 {
281 	return -EOPNOTSUPP;
282 }
283 
284 /**
285  * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
286  * @hw: pointer to hardware structure
287  * @reta: buffer to fill with RETA contents.
288  * @num_rx_queues: Number of Rx queues configured for this port
289  *
290  * The "reta" buffer should be big enough to contain 32 registers.
291  *
292  * Returns: 0 on success.
293  *          if API doesn't support this operation - (-EOPNOTSUPP).
294  */
ixgbevf_get_reta_locked(struct ixgbe_hw * hw,u32 * reta,int num_rx_queues)295 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
296 {
297 	int err, i, j;
298 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
299 	u32 *hw_reta = &msgbuf[1];
300 	u32 mask = 0;
301 
302 	/* We have to use a mailbox for 82599 and x540 devices only.
303 	 * For these devices RETA has 128 entries.
304 	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
305 	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
306 	 */
307 	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
308 
309 	/* We support the RSS querying for 82599 and x540 devices only.
310 	 * Thus return an error if API doesn't support RETA querying or querying
311 	 * is not supported for this device type.
312 	 */
313 	switch (hw->api_version) {
314 	case ixgbe_mbox_api_14:
315 	case ixgbe_mbox_api_13:
316 	case ixgbe_mbox_api_12:
317 		if (hw->mac.type < ixgbe_mac_X550_vf)
318 			break;
319 		fallthrough;
320 	default:
321 		return -EOPNOTSUPP;
322 	}
323 
324 	msgbuf[0] = IXGBE_VF_GET_RETA;
325 
326 	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
327 
328 	if (err)
329 		return err;
330 
331 	err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
332 
333 	if (err)
334 		return err;
335 
336 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
337 
338 	/* If the operation has been refused by a PF return -EPERM */
339 	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
340 		return -EPERM;
341 
342 	/* If we didn't get an ACK there must have been
343 	 * some sort of mailbox error so we should treat it
344 	 * as such.
345 	 */
346 	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
347 		return IXGBE_ERR_MBX;
348 
349 	/* ixgbevf doesn't support more than 2 queues at the moment */
350 	if (num_rx_queues > 1)
351 		mask = 0x1;
352 
353 	for (i = 0; i < dwords; i++)
354 		for (j = 0; j < 16; j++)
355 			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
356 
357 	return 0;
358 }
359 
360 /**
361  * ixgbevf_get_rss_key_locked - get the RSS Random Key
362  * @hw: pointer to the HW structure
363  * @rss_key: buffer to fill with RSS Hash Key contents.
364  *
365  * The "rss_key" buffer should be big enough to contain 10 registers.
366  *
367  * Returns: 0 on success.
368  *          if API doesn't support this operation - (-EOPNOTSUPP).
369  */
ixgbevf_get_rss_key_locked(struct ixgbe_hw * hw,u8 * rss_key)370 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
371 {
372 	int err;
373 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
374 
375 	/* We currently support the RSS Random Key retrieval for 82599 and x540
376 	 * devices only.
377 	 *
378 	 * Thus return an error if API doesn't support RSS Random Key retrieval
379 	 * or if the operation is not supported for this device type.
380 	 */
381 	switch (hw->api_version) {
382 	case ixgbe_mbox_api_14:
383 	case ixgbe_mbox_api_13:
384 	case ixgbe_mbox_api_12:
385 		if (hw->mac.type < ixgbe_mac_X550_vf)
386 			break;
387 		fallthrough;
388 	default:
389 		return -EOPNOTSUPP;
390 	}
391 
392 	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
393 	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
394 
395 	if (err)
396 		return err;
397 
398 	err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
399 
400 	if (err)
401 		return err;
402 
403 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
404 
405 	/* If the operation has been refused by a PF return -EPERM */
406 	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
407 		return -EPERM;
408 
409 	/* If we didn't get an ACK there must have been
410 	 * some sort of mailbox error so we should treat it
411 	 * as such.
412 	 */
413 	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
414 		return IXGBE_ERR_MBX;
415 
416 	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
417 
418 	return 0;
419 }
420 
421 /**
422  *  ixgbevf_set_rar_vf - set device MAC address
423  *  @hw: pointer to hardware structure
424  *  @index: Receive address register to write
425  *  @addr: Address to put into receive address register
426  *  @vmdq: Unused in this implementation
427  **/
ixgbevf_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)428 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
429 			      u32 vmdq)
430 {
431 	u32 msgbuf[3];
432 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
433 	s32 ret_val;
434 
435 	memset(msgbuf, 0, sizeof(msgbuf));
436 	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
437 	ether_addr_copy(msg_addr, addr);
438 
439 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
440 					     ARRAY_SIZE(msgbuf));
441 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
442 
443 	/* if nacked the address was rejected, use "perm_addr" */
444 	if (!ret_val &&
445 	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
446 		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
447 		return IXGBE_ERR_MBX;
448 	}
449 
450 	return ret_val;
451 }
452 
453 /**
454  *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
455  *  @hw: pointer to hardware structure
456  *  @index: Receive address register to write
457  *  @addr: Address to put into receive address register
458  *  @vmdq: Unused in this implementation
459  *
460  * We don't really allow setting the device MAC address. However,
461  * if the address being set is the permanent MAC address we will
462  * permit that.
463  **/
ixgbevf_hv_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)464 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
465 				 u32 vmdq)
466 {
467 	if (ether_addr_equal(addr, hw->mac.perm_addr))
468 		return 0;
469 
470 	return -EOPNOTSUPP;
471 }
472 
473 /**
474  *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
475  *  @hw: pointer to the HW structure
476  *  @netdev: pointer to net device structure
477  *
478  *  Updates the Multicast Table Array.
479  **/
ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)480 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
481 					  struct net_device *netdev)
482 {
483 	struct netdev_hw_addr *ha;
484 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
485 	u16 *vector_list = (u16 *)&msgbuf[1];
486 	u32 cnt, i;
487 
488 	/* Each entry in the list uses 1 16 bit word.  We have 30
489 	 * 16 bit words available in our HW msg buffer (minus 1 for the
490 	 * msg type).  That's 30 hash values if we pack 'em right.  If
491 	 * there are more than 30 MC addresses to add then punt the
492 	 * extras for now and then add code to handle more than 30 later.
493 	 * It would be unusual for a server to request that many multi-cast
494 	 * addresses except for in large enterprise network environments.
495 	 */
496 
497 	cnt = netdev_mc_count(netdev);
498 	if (cnt > 30)
499 		cnt = 30;
500 	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
501 	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
502 
503 	i = 0;
504 	netdev_for_each_mc_addr(ha, netdev) {
505 		if (i == cnt)
506 			break;
507 		if (is_link_local_ether_addr(ha->addr))
508 			continue;
509 
510 		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
511 	}
512 
513 	return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
514 			IXGBE_VFMAILBOX_SIZE);
515 }
516 
517 /**
518  * ixgbevf_hv_update_mc_addr_list_vf - stub
519  * @hw: unused
520  * @netdev: unused
521  *
522  * Hyper-V variant - just a stub.
523  */
ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)524 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
525 					     struct net_device *netdev)
526 {
527 	return -EOPNOTSUPP;
528 }
529 
530 /**
531  *  ixgbevf_update_xcast_mode - Update Multicast mode
532  *  @hw: pointer to the HW structure
533  *  @xcast_mode: new multicast mode
534  *
535  *  Updates the Multicast Mode of VF.
536  **/
ixgbevf_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)537 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
538 {
539 	u32 msgbuf[2];
540 	s32 err;
541 
542 	switch (hw->api_version) {
543 	case ixgbe_mbox_api_12:
544 		/* promisc introduced in 1.3 version */
545 		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
546 			return -EOPNOTSUPP;
547 		fallthrough;
548 	case ixgbe_mbox_api_14:
549 	case ixgbe_mbox_api_13:
550 		break;
551 	default:
552 		return -EOPNOTSUPP;
553 	}
554 
555 	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
556 	msgbuf[1] = xcast_mode;
557 
558 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
559 					 ARRAY_SIZE(msgbuf));
560 	if (err)
561 		return err;
562 
563 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
564 	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
565 		return -EPERM;
566 
567 	return 0;
568 }
569 
570 /**
571  * ixgbevf_hv_update_xcast_mode - stub
572  * @hw: unused
573  * @xcast_mode: unused
574  *
575  * Hyper-V variant - just a stub.
576  */
ixgbevf_hv_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)577 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
578 {
579 	return -EOPNOTSUPP;
580 }
581 
582 /**
583  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
584  *  @hw: pointer to the HW structure
585  *  @vlan: 12 bit VLAN ID
586  *  @vind: unused by VF drivers
587  *  @vlan_on: if true then set bit, else clear bit
588  **/
ixgbevf_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)589 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
590 			       bool vlan_on)
591 {
592 	u32 msgbuf[2];
593 	s32 err;
594 
595 	msgbuf[0] = IXGBE_VF_SET_VLAN;
596 	msgbuf[1] = vlan;
597 	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
598 	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
599 
600 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
601 					 ARRAY_SIZE(msgbuf));
602 	if (err)
603 		goto mbx_err;
604 
605 	/* remove extra bits from the message */
606 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
607 	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
608 
609 	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
610 		err = IXGBE_ERR_INVALID_ARGUMENT;
611 
612 mbx_err:
613 	return err;
614 }
615 
616 /**
617  * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
618  * @hw: unused
619  * @vlan: unused
620  * @vind: unused
621  * @vlan_on: unused
622  */
ixgbevf_hv_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)623 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
624 				  bool vlan_on)
625 {
626 	return -EOPNOTSUPP;
627 }
628 
629 /**
630  *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
631  *  @hw: pointer to hardware structure
632  *  @speed: Unused in this implementation
633  *  @autoneg: Unused in this implementation
634  *  @autoneg_wait_to_complete: Unused in this implementation
635  *
636  *  Do nothing and return success.  VF drivers are not allowed to change
637  *  global settings.  Maintained for driver compatibility.
638  **/
ixgbevf_setup_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)639 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
640 				     ixgbe_link_speed speed, bool autoneg,
641 				     bool autoneg_wait_to_complete)
642 {
643 	return 0;
644 }
645 
646 /**
647  *  ixgbevf_check_mac_link_vf - Get link/speed status
648  *  @hw: pointer to hardware structure
649  *  @speed: pointer to link speed
650  *  @link_up: true is link is up, false otherwise
651  *  @autoneg_wait_to_complete: unused
652  *
653  *  Reads the links register to determine if link is up and the current speed
654  **/
ixgbevf_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)655 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
656 				     ixgbe_link_speed *speed,
657 				     bool *link_up,
658 				     bool autoneg_wait_to_complete)
659 {
660 	struct ixgbe_mbx_info *mbx = &hw->mbx;
661 	struct ixgbe_mac_info *mac = &hw->mac;
662 	s32 ret_val = 0;
663 	u32 links_reg;
664 	u32 in_msg = 0;
665 
666 	/* If we were hit with a reset drop the link */
667 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
668 		mac->get_link_status = true;
669 
670 	if (!mac->get_link_status)
671 		goto out;
672 
673 	/* if link status is down no point in checking to see if pf is up */
674 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
675 	if (!(links_reg & IXGBE_LINKS_UP))
676 		goto out;
677 
678 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
679 	 * before the link status is correct
680 	 */
681 	if (mac->type == ixgbe_mac_82599_vf) {
682 		int i;
683 
684 		for (i = 0; i < 5; i++) {
685 			udelay(100);
686 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
687 
688 			if (!(links_reg & IXGBE_LINKS_UP))
689 				goto out;
690 		}
691 	}
692 
693 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
694 	case IXGBE_LINKS_SPEED_10G_82599:
695 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
696 		break;
697 	case IXGBE_LINKS_SPEED_1G_82599:
698 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
699 		break;
700 	case IXGBE_LINKS_SPEED_100_82599:
701 		*speed = IXGBE_LINK_SPEED_100_FULL;
702 		break;
703 	}
704 
705 	/* if the read failed it could just be a mailbox collision, best wait
706 	 * until we are called again and don't report an error
707 	 */
708 	if (mbx->ops.read(hw, &in_msg, 1))
709 		goto out;
710 
711 	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
712 		/* msg is not CTS and is NACK we must have lost CTS status */
713 		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
714 			ret_val = -1;
715 		goto out;
716 	}
717 
718 	/* the pf is talking, if we timed out in the past we reinit */
719 	if (!mbx->timeout) {
720 		ret_val = -1;
721 		goto out;
722 	}
723 
724 	/* if we passed all the tests above then the link is up and we no
725 	 * longer need to check for link
726 	 */
727 	mac->get_link_status = false;
728 
729 out:
730 	*link_up = !mac->get_link_status;
731 	return ret_val;
732 }
733 
734 /**
735  * ixgbevf_hv_check_mac_link_vf - check link
736  * @hw: pointer to private hardware struct
737  * @speed: pointer to link speed
738  * @link_up: true is link is up, false otherwise
739  * @autoneg_wait_to_complete: unused
740  *
741  * Hyper-V variant; there is no mailbox communication.
742  */
ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)743 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
744 					ixgbe_link_speed *speed,
745 					bool *link_up,
746 					bool autoneg_wait_to_complete)
747 {
748 	struct ixgbe_mbx_info *mbx = &hw->mbx;
749 	struct ixgbe_mac_info *mac = &hw->mac;
750 	u32 links_reg;
751 
752 	/* If we were hit with a reset drop the link */
753 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
754 		mac->get_link_status = true;
755 
756 	if (!mac->get_link_status)
757 		goto out;
758 
759 	/* if link status is down no point in checking to see if pf is up */
760 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
761 	if (!(links_reg & IXGBE_LINKS_UP))
762 		goto out;
763 
764 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
765 	 * before the link status is correct
766 	 */
767 	if (mac->type == ixgbe_mac_82599_vf) {
768 		int i;
769 
770 		for (i = 0; i < 5; i++) {
771 			udelay(100);
772 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
773 
774 			if (!(links_reg & IXGBE_LINKS_UP))
775 				goto out;
776 		}
777 	}
778 
779 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
780 	case IXGBE_LINKS_SPEED_10G_82599:
781 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
782 		break;
783 	case IXGBE_LINKS_SPEED_1G_82599:
784 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
785 		break;
786 	case IXGBE_LINKS_SPEED_100_82599:
787 		*speed = IXGBE_LINK_SPEED_100_FULL;
788 		break;
789 	}
790 
791 	/* if we passed all the tests above then the link is up and we no
792 	 * longer need to check for link
793 	 */
794 	mac->get_link_status = false;
795 
796 out:
797 	*link_up = !mac->get_link_status;
798 	return 0;
799 }
800 
801 /**
802  *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
803  *  @hw: pointer to the HW structure
804  *  @max_size: value to assign to max frame size
805  **/
ixgbevf_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)806 static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
807 {
808 	u32 msgbuf[2];
809 	s32 ret_val;
810 
811 	msgbuf[0] = IXGBE_VF_SET_LPE;
812 	msgbuf[1] = max_size;
813 
814 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
815 					     ARRAY_SIZE(msgbuf));
816 	if (ret_val)
817 		return ret_val;
818 	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
819 	    (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
820 		return IXGBE_ERR_MBX;
821 
822 	return 0;
823 }
824 
825 /**
826  * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
827  * @hw: pointer to the HW structure
828  * @max_size: value to assign to max frame size
829  * Hyper-V variant.
830  **/
ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)831 static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
832 {
833 	u32 reg;
834 
835 	/* If we are on Hyper-V, we implement this functionality
836 	 * differently.
837 	 */
838 	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
839 	/* CRC == 4 */
840 	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
841 	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
842 
843 	return 0;
844 }
845 
846 /**
847  *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
848  *  @hw: pointer to the HW structure
849  *  @api: integer containing requested API version
850  **/
ixgbevf_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)851 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
852 {
853 	int err;
854 	u32 msg[3];
855 
856 	/* Negotiate the mailbox API version */
857 	msg[0] = IXGBE_VF_API_NEGOTIATE;
858 	msg[1] = api;
859 	msg[2] = 0;
860 
861 	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
862 	if (!err) {
863 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
864 
865 		/* Store value and return 0 on success */
866 		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
867 			hw->api_version = api;
868 			return 0;
869 		}
870 
871 		err = IXGBE_ERR_INVALID_ARGUMENT;
872 	}
873 
874 	return err;
875 }
876 
877 /**
878  *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
879  *  @hw: pointer to the HW structure
880  *  @api: integer containing requested API version
881  *  Hyper-V version - only ixgbe_mbox_api_10 supported.
882  **/
ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)883 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
884 {
885 	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
886 	if (api != ixgbe_mbox_api_10)
887 		return IXGBE_ERR_INVALID_ARGUMENT;
888 
889 	return 0;
890 }
891 
ixgbevf_get_queues(struct ixgbe_hw * hw,unsigned int * num_tcs,unsigned int * default_tc)892 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
893 		       unsigned int *default_tc)
894 {
895 	int err;
896 	u32 msg[5];
897 
898 	/* do nothing if API doesn't support ixgbevf_get_queues */
899 	switch (hw->api_version) {
900 	case ixgbe_mbox_api_11:
901 	case ixgbe_mbox_api_12:
902 	case ixgbe_mbox_api_13:
903 	case ixgbe_mbox_api_14:
904 		break;
905 	default:
906 		return 0;
907 	}
908 
909 	/* Fetch queue configuration from the PF */
910 	msg[0] = IXGBE_VF_GET_QUEUE;
911 	msg[1] = msg[2] = msg[3] = msg[4] = 0;
912 
913 	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
914 	if (!err) {
915 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
916 
917 		/* if we we didn't get an ACK there must have been
918 		 * some sort of mailbox error so we should treat it
919 		 * as such
920 		 */
921 		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
922 			return IXGBE_ERR_MBX;
923 
924 		/* record and validate values from message */
925 		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
926 		if (hw->mac.max_tx_queues == 0 ||
927 		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
928 			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
929 
930 		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
931 		if (hw->mac.max_rx_queues == 0 ||
932 		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
933 			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
934 
935 		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
936 		/* in case of unknown state assume we cannot tag frames */
937 		if (*num_tcs > hw->mac.max_rx_queues)
938 			*num_tcs = 1;
939 
940 		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
941 		/* default to queue 0 on out-of-bounds queue number */
942 		if (*default_tc >= hw->mac.max_tx_queues)
943 			*default_tc = 0;
944 	}
945 
946 	return err;
947 }
948 
949 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
950 	.init_hw		= ixgbevf_init_hw_vf,
951 	.reset_hw		= ixgbevf_reset_hw_vf,
952 	.start_hw		= ixgbevf_start_hw_vf,
953 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
954 	.stop_adapter		= ixgbevf_stop_hw_vf,
955 	.setup_link		= ixgbevf_setup_mac_link_vf,
956 	.check_link		= ixgbevf_check_mac_link_vf,
957 	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
958 	.set_rar		= ixgbevf_set_rar_vf,
959 	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
960 	.update_xcast_mode	= ixgbevf_update_xcast_mode,
961 	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
962 	.set_vfta		= ixgbevf_set_vfta_vf,
963 	.set_rlpml		= ixgbevf_set_rlpml_vf,
964 };
965 
966 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
967 	.init_hw		= ixgbevf_init_hw_vf,
968 	.reset_hw		= ixgbevf_hv_reset_hw_vf,
969 	.start_hw		= ixgbevf_start_hw_vf,
970 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
971 	.stop_adapter		= ixgbevf_stop_hw_vf,
972 	.setup_link		= ixgbevf_setup_mac_link_vf,
973 	.check_link		= ixgbevf_hv_check_mac_link_vf,
974 	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
975 	.set_rar		= ixgbevf_hv_set_rar_vf,
976 	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
977 	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
978 	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
979 	.set_vfta		= ixgbevf_hv_set_vfta_vf,
980 	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
981 };
982 
983 const struct ixgbevf_info ixgbevf_82599_vf_info = {
984 	.mac = ixgbe_mac_82599_vf,
985 	.mac_ops = &ixgbevf_mac_ops,
986 };
987 
988 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
989 	.mac = ixgbe_mac_82599_vf,
990 	.mac_ops = &ixgbevf_hv_mac_ops,
991 };
992 
993 const struct ixgbevf_info ixgbevf_X540_vf_info = {
994 	.mac = ixgbe_mac_X540_vf,
995 	.mac_ops = &ixgbevf_mac_ops,
996 };
997 
998 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
999 	.mac = ixgbe_mac_X540_vf,
1000 	.mac_ops = &ixgbevf_hv_mac_ops,
1001 };
1002 
1003 const struct ixgbevf_info ixgbevf_X550_vf_info = {
1004 	.mac = ixgbe_mac_X550_vf,
1005 	.mac_ops = &ixgbevf_mac_ops,
1006 };
1007 
1008 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1009 	.mac = ixgbe_mac_X550_vf,
1010 	.mac_ops = &ixgbevf_hv_mac_ops,
1011 };
1012 
1013 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1014 	.mac = ixgbe_mac_X550EM_x_vf,
1015 	.mac_ops = &ixgbevf_mac_ops,
1016 };
1017 
1018 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1019 	.mac = ixgbe_mac_X550EM_x_vf,
1020 	.mac_ops = &ixgbevf_hv_mac_ops,
1021 };
1022 
1023 const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1024 	.mac = ixgbe_mac_x550em_a_vf,
1025 	.mac_ops = &ixgbevf_mac_ops,
1026 };
1027