1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 /******************************************************************************
5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
6 ******************************************************************************/
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/vmalloc.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/ip.h>
19 #include <linux/tcp.h>
20 #include <linux/sctp.h>
21 #include <linux/ipv6.h>
22 #include <linux/slab.h>
23 #include <net/checksum.h>
24 #include <net/ip6_checksum.h>
25 #include <linux/ethtool.h>
26 #include <linux/if.h>
27 #include <linux/if_vlan.h>
28 #include <linux/prefetch.h>
29 #include <net/mpls.h>
30 #include <linux/bpf.h>
31 #include <linux/bpf_trace.h>
32 #include <linux/atomic.h>
33 #include <net/xfrm.h>
34
35 #include "ixgbevf.h"
36
37 const char ixgbevf_driver_name[] = "ixgbevf";
38 static const char ixgbevf_driver_string[] =
39 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
40
41 static char ixgbevf_copyright[] =
42 "Copyright (c) 2009 - 2018 Intel Corporation.";
43
44 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
45 [board_82599_vf] = &ixgbevf_82599_vf_info,
46 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
47 [board_X540_vf] = &ixgbevf_X540_vf_info,
48 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
49 [board_X550_vf] = &ixgbevf_X550_vf_info,
50 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
51 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
52 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
53 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
54 };
55
56 /* ixgbevf_pci_tbl - PCI Device ID Table
57 *
58 * Wildcard entries (PCI_ANY_ID) should come last
59 * Last entry must be all 0s
60 *
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
62 * Class, Class Mask, private data (not used) }
63 */
64 static const struct pci_device_id ixgbevf_pci_tbl[] = {
65 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
66 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
68 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
74 /* required last entry */
75 {0, }
76 };
77 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
78
79 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
80 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
81 MODULE_LICENSE("GPL v2");
82
83 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
84 static int debug = -1;
85 module_param(debug, int, 0);
86 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
87
88 static struct workqueue_struct *ixgbevf_wq;
89
ixgbevf_service_event_schedule(struct ixgbevf_adapter * adapter)90 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
91 {
92 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
93 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
94 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
95 queue_work(ixgbevf_wq, &adapter->service_task);
96 }
97
ixgbevf_service_event_complete(struct ixgbevf_adapter * adapter)98 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
99 {
100 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
101
102 /* flush memory to make sure state is correct before next watchdog */
103 smp_mb__before_atomic();
104 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
105 }
106
107 /* forward decls */
108 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
109 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
110 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
111 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
112 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
113 struct ixgbevf_rx_buffer *old_buff);
114
ixgbevf_remove_adapter(struct ixgbe_hw * hw)115 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
116 {
117 struct ixgbevf_adapter *adapter = hw->back;
118
119 if (!hw->hw_addr)
120 return;
121 hw->hw_addr = NULL;
122 dev_err(&adapter->pdev->dev, "Adapter removed\n");
123 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
124 ixgbevf_service_event_schedule(adapter);
125 }
126
ixgbevf_check_remove(struct ixgbe_hw * hw,u32 reg)127 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
128 {
129 u32 value;
130
131 /* The following check not only optimizes a bit by not
132 * performing a read on the status register when the
133 * register just read was a status register read that
134 * returned IXGBE_FAILED_READ_REG. It also blocks any
135 * potential recursion.
136 */
137 if (reg == IXGBE_VFSTATUS) {
138 ixgbevf_remove_adapter(hw);
139 return;
140 }
141 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
142 if (value == IXGBE_FAILED_READ_REG)
143 ixgbevf_remove_adapter(hw);
144 }
145
ixgbevf_read_reg(struct ixgbe_hw * hw,u32 reg)146 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
147 {
148 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
149 u32 value;
150
151 if (IXGBE_REMOVED(reg_addr))
152 return IXGBE_FAILED_READ_REG;
153 value = readl(reg_addr + reg);
154 if (unlikely(value == IXGBE_FAILED_READ_REG))
155 ixgbevf_check_remove(hw, reg);
156 return value;
157 }
158
159 /**
160 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
161 * @adapter: pointer to adapter struct
162 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
163 * @queue: queue to map the corresponding interrupt to
164 * @msix_vector: the vector to map to the corresponding queue
165 **/
ixgbevf_set_ivar(struct ixgbevf_adapter * adapter,s8 direction,u8 queue,u8 msix_vector)166 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
167 u8 queue, u8 msix_vector)
168 {
169 u32 ivar, index;
170 struct ixgbe_hw *hw = &adapter->hw;
171
172 if (direction == -1) {
173 /* other causes */
174 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
175 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
176 ivar &= ~0xFF;
177 ivar |= msix_vector;
178 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
179 } else {
180 /* Tx or Rx causes */
181 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
182 index = ((16 * (queue & 1)) + (8 * direction));
183 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
184 ivar &= ~(0xFF << index);
185 ivar |= (msix_vector << index);
186 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
187 }
188 }
189
ixgbevf_get_tx_completed(struct ixgbevf_ring * ring)190 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
191 {
192 return ring->stats.packets;
193 }
194
ixgbevf_get_tx_pending(struct ixgbevf_ring * ring)195 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
196 {
197 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
198 struct ixgbe_hw *hw = &adapter->hw;
199
200 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
201 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
202
203 if (head != tail)
204 return (head < tail) ?
205 tail - head : (tail + ring->count - head);
206
207 return 0;
208 }
209
ixgbevf_check_tx_hang(struct ixgbevf_ring * tx_ring)210 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
211 {
212 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
213 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
214 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
215
216 clear_check_for_tx_hang(tx_ring);
217
218 /* Check for a hung queue, but be thorough. This verifies
219 * that a transmit has been completed since the previous
220 * check AND there is at least one packet pending. The
221 * ARMED bit is set to indicate a potential hang.
222 */
223 if ((tx_done_old == tx_done) && tx_pending) {
224 /* make sure it is true for two checks in a row */
225 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
226 &tx_ring->state);
227 }
228 /* reset the countdown */
229 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
230
231 /* update completed stats and continue */
232 tx_ring->tx_stats.tx_done_old = tx_done;
233
234 return false;
235 }
236
ixgbevf_tx_timeout_reset(struct ixgbevf_adapter * adapter)237 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
238 {
239 /* Do the reset outside of interrupt context */
240 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
241 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
242 ixgbevf_service_event_schedule(adapter);
243 }
244 }
245
246 /**
247 * ixgbevf_tx_timeout - Respond to a Tx Hang
248 * @netdev: network interface device structure
249 * @txqueue: transmit queue hanging (unused)
250 **/
ixgbevf_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)251 static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
252 {
253 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
254
255 ixgbevf_tx_timeout_reset(adapter);
256 }
257
258 /**
259 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
260 * @q_vector: board private structure
261 * @tx_ring: tx ring to clean
262 * @napi_budget: Used to determine if we are in netpoll
263 **/
ixgbevf_clean_tx_irq(struct ixgbevf_q_vector * q_vector,struct ixgbevf_ring * tx_ring,int napi_budget)264 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
265 struct ixgbevf_ring *tx_ring, int napi_budget)
266 {
267 struct ixgbevf_adapter *adapter = q_vector->adapter;
268 struct ixgbevf_tx_buffer *tx_buffer;
269 union ixgbe_adv_tx_desc *tx_desc;
270 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
271 unsigned int budget = tx_ring->count / 2;
272 unsigned int i = tx_ring->next_to_clean;
273
274 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
275 return true;
276
277 tx_buffer = &tx_ring->tx_buffer_info[i];
278 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
279 i -= tx_ring->count;
280
281 do {
282 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
283
284 /* if next_to_watch is not set then there is no work pending */
285 if (!eop_desc)
286 break;
287
288 /* prevent any other reads prior to eop_desc */
289 smp_rmb();
290
291 /* if DD is not set pending work has not been completed */
292 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
293 break;
294
295 /* clear next_to_watch to prevent false hangs */
296 tx_buffer->next_to_watch = NULL;
297
298 /* update the statistics for this packet */
299 total_bytes += tx_buffer->bytecount;
300 total_packets += tx_buffer->gso_segs;
301 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
302 total_ipsec++;
303
304 /* free the skb */
305 if (ring_is_xdp(tx_ring))
306 page_frag_free(tx_buffer->data);
307 else
308 napi_consume_skb(tx_buffer->skb, napi_budget);
309
310 /* unmap skb header data */
311 dma_unmap_single(tx_ring->dev,
312 dma_unmap_addr(tx_buffer, dma),
313 dma_unmap_len(tx_buffer, len),
314 DMA_TO_DEVICE);
315
316 /* clear tx_buffer data */
317 dma_unmap_len_set(tx_buffer, len, 0);
318
319 /* unmap remaining buffers */
320 while (tx_desc != eop_desc) {
321 tx_buffer++;
322 tx_desc++;
323 i++;
324 if (unlikely(!i)) {
325 i -= tx_ring->count;
326 tx_buffer = tx_ring->tx_buffer_info;
327 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
328 }
329
330 /* unmap any remaining paged data */
331 if (dma_unmap_len(tx_buffer, len)) {
332 dma_unmap_page(tx_ring->dev,
333 dma_unmap_addr(tx_buffer, dma),
334 dma_unmap_len(tx_buffer, len),
335 DMA_TO_DEVICE);
336 dma_unmap_len_set(tx_buffer, len, 0);
337 }
338 }
339
340 /* move us one more past the eop_desc for start of next pkt */
341 tx_buffer++;
342 tx_desc++;
343 i++;
344 if (unlikely(!i)) {
345 i -= tx_ring->count;
346 tx_buffer = tx_ring->tx_buffer_info;
347 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
348 }
349
350 /* issue prefetch for next Tx descriptor */
351 prefetch(tx_desc);
352
353 /* update budget accounting */
354 budget--;
355 } while (likely(budget));
356
357 i += tx_ring->count;
358 tx_ring->next_to_clean = i;
359 u64_stats_update_begin(&tx_ring->syncp);
360 tx_ring->stats.bytes += total_bytes;
361 tx_ring->stats.packets += total_packets;
362 u64_stats_update_end(&tx_ring->syncp);
363 q_vector->tx.total_bytes += total_bytes;
364 q_vector->tx.total_packets += total_packets;
365 adapter->tx_ipsec += total_ipsec;
366
367 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
368 struct ixgbe_hw *hw = &adapter->hw;
369 union ixgbe_adv_tx_desc *eop_desc;
370
371 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
372
373 pr_err("Detected Tx Unit Hang%s\n"
374 " Tx Queue <%d>\n"
375 " TDH, TDT <%x>, <%x>\n"
376 " next_to_use <%x>\n"
377 " next_to_clean <%x>\n"
378 "tx_buffer_info[next_to_clean]\n"
379 " next_to_watch <%p>\n"
380 " eop_desc->wb.status <%x>\n"
381 " time_stamp <%lx>\n"
382 " jiffies <%lx>\n",
383 ring_is_xdp(tx_ring) ? " XDP" : "",
384 tx_ring->queue_index,
385 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
386 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
387 tx_ring->next_to_use, i,
388 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
389 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
390
391 if (!ring_is_xdp(tx_ring))
392 netif_stop_subqueue(tx_ring->netdev,
393 tx_ring->queue_index);
394
395 /* schedule immediate reset if we believe we hung */
396 ixgbevf_tx_timeout_reset(adapter);
397
398 return true;
399 }
400
401 if (ring_is_xdp(tx_ring))
402 return !!budget;
403
404 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
405 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
406 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
407 /* Make sure that anybody stopping the queue after this
408 * sees the new next_to_clean.
409 */
410 smp_mb();
411
412 if (__netif_subqueue_stopped(tx_ring->netdev,
413 tx_ring->queue_index) &&
414 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
415 netif_wake_subqueue(tx_ring->netdev,
416 tx_ring->queue_index);
417 ++tx_ring->tx_stats.restart_queue;
418 }
419 }
420
421 return !!budget;
422 }
423
424 /**
425 * ixgbevf_rx_skb - Helper function to determine proper Rx method
426 * @q_vector: structure containing interrupt and ring information
427 * @skb: packet to send up
428 **/
ixgbevf_rx_skb(struct ixgbevf_q_vector * q_vector,struct sk_buff * skb)429 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
430 struct sk_buff *skb)
431 {
432 napi_gro_receive(&q_vector->napi, skb);
433 }
434
435 #define IXGBE_RSS_L4_TYPES_MASK \
436 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
437 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
438 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
439 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
440
ixgbevf_rx_hash(struct ixgbevf_ring * ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)441 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
442 union ixgbe_adv_rx_desc *rx_desc,
443 struct sk_buff *skb)
444 {
445 u16 rss_type;
446
447 if (!(ring->netdev->features & NETIF_F_RXHASH))
448 return;
449
450 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
451 IXGBE_RXDADV_RSSTYPE_MASK;
452
453 if (!rss_type)
454 return;
455
456 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
457 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
458 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
459 }
460
461 /**
462 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
463 * @ring: structure containig ring specific data
464 * @rx_desc: current Rx descriptor being processed
465 * @skb: skb currently being received and modified
466 **/
ixgbevf_rx_checksum(struct ixgbevf_ring * ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)467 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
468 union ixgbe_adv_rx_desc *rx_desc,
469 struct sk_buff *skb)
470 {
471 skb_checksum_none_assert(skb);
472
473 /* Rx csum disabled */
474 if (!(ring->netdev->features & NETIF_F_RXCSUM))
475 return;
476
477 /* if IP and error */
478 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
479 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
480 ring->rx_stats.csum_err++;
481 return;
482 }
483
484 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
485 return;
486
487 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
488 ring->rx_stats.csum_err++;
489 return;
490 }
491
492 /* It must be a TCP or UDP packet with a valid checksum */
493 skb->ip_summed = CHECKSUM_UNNECESSARY;
494 }
495
496 /**
497 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
498 * @rx_ring: rx descriptor ring packet is being transacted on
499 * @rx_desc: pointer to the EOP Rx descriptor
500 * @skb: pointer to current skb being populated
501 *
502 * This function checks the ring, descriptor, and packet information in
503 * order to populate the checksum, VLAN, protocol, and other fields within
504 * the skb.
505 **/
ixgbevf_process_skb_fields(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)506 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
507 union ixgbe_adv_rx_desc *rx_desc,
508 struct sk_buff *skb)
509 {
510 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
511 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
512
513 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
514 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
515 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
516
517 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
518 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
519 }
520
521 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
522 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
523
524 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
525 }
526
527 static
ixgbevf_get_rx_buffer(struct ixgbevf_ring * rx_ring,const unsigned int size)528 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
529 const unsigned int size)
530 {
531 struct ixgbevf_rx_buffer *rx_buffer;
532
533 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
534 prefetchw(rx_buffer->page);
535
536 /* we are reusing so sync this buffer for CPU use */
537 dma_sync_single_range_for_cpu(rx_ring->dev,
538 rx_buffer->dma,
539 rx_buffer->page_offset,
540 size,
541 DMA_FROM_DEVICE);
542
543 rx_buffer->pagecnt_bias--;
544
545 return rx_buffer;
546 }
547
ixgbevf_put_rx_buffer(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct sk_buff * skb)548 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
549 struct ixgbevf_rx_buffer *rx_buffer,
550 struct sk_buff *skb)
551 {
552 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
553 /* hand second half of page back to the ring */
554 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
555 } else {
556 if (IS_ERR(skb))
557 /* We are not reusing the buffer so unmap it and free
558 * any references we are holding to it
559 */
560 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
561 ixgbevf_rx_pg_size(rx_ring),
562 DMA_FROM_DEVICE,
563 IXGBEVF_RX_DMA_ATTR);
564 __page_frag_cache_drain(rx_buffer->page,
565 rx_buffer->pagecnt_bias);
566 }
567
568 /* clear contents of rx_buffer */
569 rx_buffer->page = NULL;
570 }
571
572 /**
573 * ixgbevf_is_non_eop - process handling of non-EOP buffers
574 * @rx_ring: Rx ring being processed
575 * @rx_desc: Rx descriptor for current buffer
576 *
577 * This function updates next to clean. If the buffer is an EOP buffer
578 * this function exits returning false, otherwise it will place the
579 * sk_buff in the next buffer to be chained and return true indicating
580 * that this is in fact a non-EOP buffer.
581 **/
ixgbevf_is_non_eop(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc)582 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
583 union ixgbe_adv_rx_desc *rx_desc)
584 {
585 u32 ntc = rx_ring->next_to_clean + 1;
586
587 /* fetch, update, and store next to clean */
588 ntc = (ntc < rx_ring->count) ? ntc : 0;
589 rx_ring->next_to_clean = ntc;
590
591 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
592
593 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
594 return false;
595
596 return true;
597 }
598
ixgbevf_rx_offset(struct ixgbevf_ring * rx_ring)599 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
600 {
601 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
602 }
603
ixgbevf_alloc_mapped_page(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * bi)604 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
605 struct ixgbevf_rx_buffer *bi)
606 {
607 struct page *page = bi->page;
608 dma_addr_t dma;
609
610 /* since we are recycling buffers we should seldom need to alloc */
611 if (likely(page))
612 return true;
613
614 /* alloc new page for storage */
615 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
616 if (unlikely(!page)) {
617 rx_ring->rx_stats.alloc_rx_page_failed++;
618 return false;
619 }
620
621 /* map page for use */
622 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
623 ixgbevf_rx_pg_size(rx_ring),
624 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
625
626 /* if mapping failed free memory back to system since
627 * there isn't much point in holding memory we can't use
628 */
629 if (dma_mapping_error(rx_ring->dev, dma)) {
630 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
631
632 rx_ring->rx_stats.alloc_rx_page_failed++;
633 return false;
634 }
635
636 bi->dma = dma;
637 bi->page = page;
638 bi->page_offset = ixgbevf_rx_offset(rx_ring);
639 bi->pagecnt_bias = 1;
640 rx_ring->rx_stats.alloc_rx_page++;
641
642 return true;
643 }
644
645 /**
646 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
647 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
648 * @cleaned_count: number of buffers to replace
649 **/
ixgbevf_alloc_rx_buffers(struct ixgbevf_ring * rx_ring,u16 cleaned_count)650 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
651 u16 cleaned_count)
652 {
653 union ixgbe_adv_rx_desc *rx_desc;
654 struct ixgbevf_rx_buffer *bi;
655 unsigned int i = rx_ring->next_to_use;
656
657 /* nothing to do or no valid netdev defined */
658 if (!cleaned_count || !rx_ring->netdev)
659 return;
660
661 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
662 bi = &rx_ring->rx_buffer_info[i];
663 i -= rx_ring->count;
664
665 do {
666 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
667 break;
668
669 /* sync the buffer for use by the device */
670 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
671 bi->page_offset,
672 ixgbevf_rx_bufsz(rx_ring),
673 DMA_FROM_DEVICE);
674
675 /* Refresh the desc even if pkt_addr didn't change
676 * because each write-back erases this info.
677 */
678 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
679
680 rx_desc++;
681 bi++;
682 i++;
683 if (unlikely(!i)) {
684 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
685 bi = rx_ring->rx_buffer_info;
686 i -= rx_ring->count;
687 }
688
689 /* clear the length for the next_to_use descriptor */
690 rx_desc->wb.upper.length = 0;
691
692 cleaned_count--;
693 } while (cleaned_count);
694
695 i += rx_ring->count;
696
697 if (rx_ring->next_to_use != i) {
698 /* record the next descriptor to use */
699 rx_ring->next_to_use = i;
700
701 /* update next to alloc since we have filled the ring */
702 rx_ring->next_to_alloc = i;
703
704 /* Force memory writes to complete before letting h/w
705 * know there are new descriptors to fetch. (Only
706 * applicable for weak-ordered memory model archs,
707 * such as IA-64).
708 */
709 wmb();
710 ixgbevf_write_tail(rx_ring, i);
711 }
712 }
713
714 /**
715 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
716 * @rx_ring: rx descriptor ring packet is being transacted on
717 * @rx_desc: pointer to the EOP Rx descriptor
718 * @skb: pointer to current skb being fixed
719 *
720 * Check for corrupted packet headers caused by senders on the local L2
721 * embedded NIC switch not setting up their Tx Descriptors right. These
722 * should be very rare.
723 *
724 * Also address the case where we are pulling data in on pages only
725 * and as such no data is present in the skb header.
726 *
727 * In addition if skb is not at least 60 bytes we need to pad it so that
728 * it is large enough to qualify as a valid Ethernet frame.
729 *
730 * Returns true if an error was encountered and skb was freed.
731 **/
ixgbevf_cleanup_headers(struct ixgbevf_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)732 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
733 union ixgbe_adv_rx_desc *rx_desc,
734 struct sk_buff *skb)
735 {
736 /* XDP packets use error pointer so abort at this point */
737 if (IS_ERR(skb))
738 return true;
739
740 /* verify that the packet does not have any known errors */
741 if (unlikely(ixgbevf_test_staterr(rx_desc,
742 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
743 struct net_device *netdev = rx_ring->netdev;
744
745 if (!(netdev->features & NETIF_F_RXALL)) {
746 dev_kfree_skb_any(skb);
747 return true;
748 }
749 }
750
751 /* if eth_skb_pad returns an error the skb was freed */
752 if (eth_skb_pad(skb))
753 return true;
754
755 return false;
756 }
757
758 /**
759 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
760 * @rx_ring: rx descriptor ring to store buffers on
761 * @old_buff: donor buffer to have page reused
762 *
763 * Synchronizes page for reuse by the adapter
764 **/
ixgbevf_reuse_rx_page(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * old_buff)765 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
766 struct ixgbevf_rx_buffer *old_buff)
767 {
768 struct ixgbevf_rx_buffer *new_buff;
769 u16 nta = rx_ring->next_to_alloc;
770
771 new_buff = &rx_ring->rx_buffer_info[nta];
772
773 /* update, and store next to alloc */
774 nta++;
775 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
776
777 /* transfer page from old buffer to new buffer */
778 new_buff->page = old_buff->page;
779 new_buff->dma = old_buff->dma;
780 new_buff->page_offset = old_buff->page_offset;
781 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
782 }
783
ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer * rx_buffer)784 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
785 {
786 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
787 struct page *page = rx_buffer->page;
788
789 /* avoid re-using remote and pfmemalloc pages */
790 if (!dev_page_is_reusable(page))
791 return false;
792
793 #if (PAGE_SIZE < 8192)
794 /* if we are only owner of page we can reuse it */
795 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
796 return false;
797 #else
798 #define IXGBEVF_LAST_OFFSET \
799 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
800
801 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
802 return false;
803
804 #endif
805
806 /* If we have drained the page fragment pool we need to update
807 * the pagecnt_bias and page count so that we fully restock the
808 * number of references the driver holds.
809 */
810 if (unlikely(!pagecnt_bias)) {
811 page_ref_add(page, USHRT_MAX);
812 rx_buffer->pagecnt_bias = USHRT_MAX;
813 }
814
815 return true;
816 }
817
818 /**
819 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
820 * @rx_ring: rx descriptor ring to transact packets on
821 * @rx_buffer: buffer containing page to add
822 * @skb: sk_buff to place the data into
823 * @size: size of buffer to be added
824 *
825 * This function will add the data contained in rx_buffer->page to the skb.
826 **/
ixgbevf_add_rx_frag(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)827 static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
828 struct ixgbevf_rx_buffer *rx_buffer,
829 struct sk_buff *skb,
830 unsigned int size)
831 {
832 #if (PAGE_SIZE < 8192)
833 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
834 #else
835 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
836 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
837 SKB_DATA_ALIGN(size);
838 #endif
839 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
840 rx_buffer->page_offset, size, truesize);
841 #if (PAGE_SIZE < 8192)
842 rx_buffer->page_offset ^= truesize;
843 #else
844 rx_buffer->page_offset += truesize;
845 #endif
846 }
847
848 static
ixgbevf_construct_skb(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct xdp_buff * xdp,union ixgbe_adv_rx_desc * rx_desc)849 struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
850 struct ixgbevf_rx_buffer *rx_buffer,
851 struct xdp_buff *xdp,
852 union ixgbe_adv_rx_desc *rx_desc)
853 {
854 unsigned int size = xdp->data_end - xdp->data;
855 #if (PAGE_SIZE < 8192)
856 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
857 #else
858 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
859 xdp->data_hard_start);
860 #endif
861 unsigned int headlen;
862 struct sk_buff *skb;
863
864 /* prefetch first cache line of first page */
865 net_prefetch(xdp->data);
866
867 /* Note, we get here by enabling legacy-rx via:
868 *
869 * ethtool --set-priv-flags <dev> legacy-rx on
870 *
871 * In this mode, we currently get 0 extra XDP headroom as
872 * opposed to having legacy-rx off, where we process XDP
873 * packets going to stack via ixgbevf_build_skb().
874 *
875 * For ixgbevf_construct_skb() mode it means that the
876 * xdp->data_meta will always point to xdp->data, since
877 * the helper cannot expand the head. Should this ever
878 * changed in future for legacy-rx mode on, then lets also
879 * add xdp->data_meta handling here.
880 */
881
882 /* allocate a skb to store the frags */
883 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
884 if (unlikely(!skb))
885 return NULL;
886
887 /* Determine available headroom for copy */
888 headlen = size;
889 if (headlen > IXGBEVF_RX_HDR_SIZE)
890 headlen = eth_get_headlen(skb->dev, xdp->data,
891 IXGBEVF_RX_HDR_SIZE);
892
893 /* align pull length to size of long to optimize memcpy performance */
894 memcpy(__skb_put(skb, headlen), xdp->data,
895 ALIGN(headlen, sizeof(long)));
896
897 /* update all of the pointers */
898 size -= headlen;
899 if (size) {
900 skb_add_rx_frag(skb, 0, rx_buffer->page,
901 (xdp->data + headlen) -
902 page_address(rx_buffer->page),
903 size, truesize);
904 #if (PAGE_SIZE < 8192)
905 rx_buffer->page_offset ^= truesize;
906 #else
907 rx_buffer->page_offset += truesize;
908 #endif
909 } else {
910 rx_buffer->pagecnt_bias++;
911 }
912
913 return skb;
914 }
915
ixgbevf_irq_enable_queues(struct ixgbevf_adapter * adapter,u32 qmask)916 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
917 u32 qmask)
918 {
919 struct ixgbe_hw *hw = &adapter->hw;
920
921 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
922 }
923
ixgbevf_build_skb(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,struct xdp_buff * xdp,union ixgbe_adv_rx_desc * rx_desc)924 static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
925 struct ixgbevf_rx_buffer *rx_buffer,
926 struct xdp_buff *xdp,
927 union ixgbe_adv_rx_desc *rx_desc)
928 {
929 unsigned int metasize = xdp->data - xdp->data_meta;
930 #if (PAGE_SIZE < 8192)
931 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
932 #else
933 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
934 SKB_DATA_ALIGN(xdp->data_end -
935 xdp->data_hard_start);
936 #endif
937 struct sk_buff *skb;
938
939 /* Prefetch first cache line of first page. If xdp->data_meta
940 * is unused, this points to xdp->data, otherwise, we likely
941 * have a consumer accessing first few bytes of meta data,
942 * and then actual data.
943 */
944 net_prefetch(xdp->data_meta);
945
946 /* build an skb around the page buffer */
947 skb = build_skb(xdp->data_hard_start, truesize);
948 if (unlikely(!skb))
949 return NULL;
950
951 /* update pointers within the skb to store the data */
952 skb_reserve(skb, xdp->data - xdp->data_hard_start);
953 __skb_put(skb, xdp->data_end - xdp->data);
954 if (metasize)
955 skb_metadata_set(skb, metasize);
956
957 /* update buffer offset */
958 #if (PAGE_SIZE < 8192)
959 rx_buffer->page_offset ^= truesize;
960 #else
961 rx_buffer->page_offset += truesize;
962 #endif
963
964 return skb;
965 }
966
967 #define IXGBEVF_XDP_PASS 0
968 #define IXGBEVF_XDP_CONSUMED 1
969 #define IXGBEVF_XDP_TX 2
970
ixgbevf_xmit_xdp_ring(struct ixgbevf_ring * ring,struct xdp_buff * xdp)971 static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
972 struct xdp_buff *xdp)
973 {
974 struct ixgbevf_tx_buffer *tx_buffer;
975 union ixgbe_adv_tx_desc *tx_desc;
976 u32 len, cmd_type;
977 dma_addr_t dma;
978 u16 i;
979
980 len = xdp->data_end - xdp->data;
981
982 if (unlikely(!ixgbevf_desc_unused(ring)))
983 return IXGBEVF_XDP_CONSUMED;
984
985 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
986 if (dma_mapping_error(ring->dev, dma))
987 return IXGBEVF_XDP_CONSUMED;
988
989 /* record the location of the first descriptor for this packet */
990 i = ring->next_to_use;
991 tx_buffer = &ring->tx_buffer_info[i];
992
993 dma_unmap_len_set(tx_buffer, len, len);
994 dma_unmap_addr_set(tx_buffer, dma, dma);
995 tx_buffer->data = xdp->data;
996 tx_buffer->bytecount = len;
997 tx_buffer->gso_segs = 1;
998 tx_buffer->protocol = 0;
999
1000 /* Populate minimal context descriptor that will provide for the
1001 * fact that we are expected to process Ethernet frames.
1002 */
1003 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
1004 struct ixgbe_adv_tx_context_desc *context_desc;
1005
1006 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1007
1008 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
1009 context_desc->vlan_macip_lens =
1010 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
1011 context_desc->fceof_saidx = 0;
1012 context_desc->type_tucmd_mlhl =
1013 cpu_to_le32(IXGBE_TXD_CMD_DEXT |
1014 IXGBE_ADVTXD_DTYP_CTXT);
1015 context_desc->mss_l4len_idx = 0;
1016
1017 i = 1;
1018 }
1019
1020 /* put descriptor type bits */
1021 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1022 IXGBE_ADVTXD_DCMD_DEXT |
1023 IXGBE_ADVTXD_DCMD_IFCS;
1024 cmd_type |= len | IXGBE_TXD_CMD;
1025
1026 tx_desc = IXGBEVF_TX_DESC(ring, i);
1027 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1028
1029 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1030 tx_desc->read.olinfo_status =
1031 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1032 IXGBE_ADVTXD_CC);
1033
1034 /* Avoid any potential race with cleanup */
1035 smp_wmb();
1036
1037 /* set next_to_watch value indicating a packet is present */
1038 i++;
1039 if (i == ring->count)
1040 i = 0;
1041
1042 tx_buffer->next_to_watch = tx_desc;
1043 ring->next_to_use = i;
1044
1045 return IXGBEVF_XDP_TX;
1046 }
1047
ixgbevf_run_xdp(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * rx_ring,struct xdp_buff * xdp)1048 static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1049 struct ixgbevf_ring *rx_ring,
1050 struct xdp_buff *xdp)
1051 {
1052 int result = IXGBEVF_XDP_PASS;
1053 struct ixgbevf_ring *xdp_ring;
1054 struct bpf_prog *xdp_prog;
1055 u32 act;
1056
1057 rcu_read_lock();
1058 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1059
1060 if (!xdp_prog)
1061 goto xdp_out;
1062
1063 act = bpf_prog_run_xdp(xdp_prog, xdp);
1064 switch (act) {
1065 case XDP_PASS:
1066 break;
1067 case XDP_TX:
1068 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1069 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1070 break;
1071 default:
1072 bpf_warn_invalid_xdp_action(act);
1073 fallthrough;
1074 case XDP_ABORTED:
1075 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1076 fallthrough; /* handle aborts by dropping packet */
1077 case XDP_DROP:
1078 result = IXGBEVF_XDP_CONSUMED;
1079 break;
1080 }
1081 xdp_out:
1082 rcu_read_unlock();
1083 return ERR_PTR(-result);
1084 }
1085
ixgbevf_rx_frame_truesize(struct ixgbevf_ring * rx_ring,unsigned int size)1086 static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
1087 unsigned int size)
1088 {
1089 unsigned int truesize;
1090
1091 #if (PAGE_SIZE < 8192)
1092 truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
1093 #else
1094 truesize = ring_uses_build_skb(rx_ring) ?
1095 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
1096 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1097 SKB_DATA_ALIGN(size);
1098 #endif
1099 return truesize;
1100 }
1101
ixgbevf_rx_buffer_flip(struct ixgbevf_ring * rx_ring,struct ixgbevf_rx_buffer * rx_buffer,unsigned int size)1102 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1103 struct ixgbevf_rx_buffer *rx_buffer,
1104 unsigned int size)
1105 {
1106 unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
1107
1108 #if (PAGE_SIZE < 8192)
1109 rx_buffer->page_offset ^= truesize;
1110 #else
1111 rx_buffer->page_offset += truesize;
1112 #endif
1113 }
1114
ixgbevf_clean_rx_irq(struct ixgbevf_q_vector * q_vector,struct ixgbevf_ring * rx_ring,int budget)1115 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1116 struct ixgbevf_ring *rx_ring,
1117 int budget)
1118 {
1119 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
1120 struct ixgbevf_adapter *adapter = q_vector->adapter;
1121 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1122 struct sk_buff *skb = rx_ring->skb;
1123 bool xdp_xmit = false;
1124 struct xdp_buff xdp;
1125
1126 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1127 #if (PAGE_SIZE < 8192)
1128 frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
1129 #endif
1130 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1131
1132 while (likely(total_rx_packets < budget)) {
1133 struct ixgbevf_rx_buffer *rx_buffer;
1134 union ixgbe_adv_rx_desc *rx_desc;
1135 unsigned int size;
1136
1137 /* return some buffers to hardware, one at a time is too slow */
1138 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1139 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1140 cleaned_count = 0;
1141 }
1142
1143 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1144 size = le16_to_cpu(rx_desc->wb.upper.length);
1145 if (!size)
1146 break;
1147
1148 /* This memory barrier is needed to keep us from reading
1149 * any other fields out of the rx_desc until we know the
1150 * RXD_STAT_DD bit is set
1151 */
1152 rmb();
1153
1154 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1155
1156 /* retrieve a buffer from the ring */
1157 if (!skb) {
1158 unsigned int offset = ixgbevf_rx_offset(rx_ring);
1159 unsigned char *hard_start;
1160
1161 hard_start = page_address(rx_buffer->page) +
1162 rx_buffer->page_offset - offset;
1163 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1164 #if (PAGE_SIZE > 4096)
1165 /* At larger PAGE_SIZE, frame_sz depend on len size */
1166 xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
1167 #endif
1168 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1169 }
1170
1171 if (IS_ERR(skb)) {
1172 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1173 xdp_xmit = true;
1174 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1175 size);
1176 } else {
1177 rx_buffer->pagecnt_bias++;
1178 }
1179 total_rx_packets++;
1180 total_rx_bytes += size;
1181 } else if (skb) {
1182 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1183 } else if (ring_uses_build_skb(rx_ring)) {
1184 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1185 &xdp, rx_desc);
1186 } else {
1187 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1188 &xdp, rx_desc);
1189 }
1190
1191 /* exit if we failed to retrieve a buffer */
1192 if (!skb) {
1193 rx_ring->rx_stats.alloc_rx_buff_failed++;
1194 rx_buffer->pagecnt_bias++;
1195 break;
1196 }
1197
1198 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1199 cleaned_count++;
1200
1201 /* fetch next buffer in frame if non-eop */
1202 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1203 continue;
1204
1205 /* verify the packet layout is correct */
1206 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1207 skb = NULL;
1208 continue;
1209 }
1210
1211 /* probably a little skewed due to removing CRC */
1212 total_rx_bytes += skb->len;
1213
1214 /* Workaround hardware that can't do proper VEPA multicast
1215 * source pruning.
1216 */
1217 if ((skb->pkt_type == PACKET_BROADCAST ||
1218 skb->pkt_type == PACKET_MULTICAST) &&
1219 ether_addr_equal(rx_ring->netdev->dev_addr,
1220 eth_hdr(skb)->h_source)) {
1221 dev_kfree_skb_irq(skb);
1222 continue;
1223 }
1224
1225 /* populate checksum, VLAN, and protocol */
1226 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1227
1228 ixgbevf_rx_skb(q_vector, skb);
1229
1230 /* reset skb pointer */
1231 skb = NULL;
1232
1233 /* update budget accounting */
1234 total_rx_packets++;
1235 }
1236
1237 /* place incomplete frames back on ring for completion */
1238 rx_ring->skb = skb;
1239
1240 if (xdp_xmit) {
1241 struct ixgbevf_ring *xdp_ring =
1242 adapter->xdp_ring[rx_ring->queue_index];
1243
1244 /* Force memory writes to complete before letting h/w
1245 * know there are new descriptors to fetch.
1246 */
1247 wmb();
1248 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1249 }
1250
1251 u64_stats_update_begin(&rx_ring->syncp);
1252 rx_ring->stats.packets += total_rx_packets;
1253 rx_ring->stats.bytes += total_rx_bytes;
1254 u64_stats_update_end(&rx_ring->syncp);
1255 q_vector->rx.total_packets += total_rx_packets;
1256 q_vector->rx.total_bytes += total_rx_bytes;
1257
1258 return total_rx_packets;
1259 }
1260
1261 /**
1262 * ixgbevf_poll - NAPI polling calback
1263 * @napi: napi struct with our devices info in it
1264 * @budget: amount of work driver is allowed to do this pass, in packets
1265 *
1266 * This function will clean more than one or more rings associated with a
1267 * q_vector.
1268 **/
ixgbevf_poll(struct napi_struct * napi,int budget)1269 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1270 {
1271 struct ixgbevf_q_vector *q_vector =
1272 container_of(napi, struct ixgbevf_q_vector, napi);
1273 struct ixgbevf_adapter *adapter = q_vector->adapter;
1274 struct ixgbevf_ring *ring;
1275 int per_ring_budget, work_done = 0;
1276 bool clean_complete = true;
1277
1278 ixgbevf_for_each_ring(ring, q_vector->tx) {
1279 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1280 clean_complete = false;
1281 }
1282
1283 if (budget <= 0)
1284 return budget;
1285
1286 /* attempt to distribute budget to each queue fairly, but don't allow
1287 * the budget to go below 1 because we'll exit polling
1288 */
1289 if (q_vector->rx.count > 1)
1290 per_ring_budget = max(budget/q_vector->rx.count, 1);
1291 else
1292 per_ring_budget = budget;
1293
1294 ixgbevf_for_each_ring(ring, q_vector->rx) {
1295 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1296 per_ring_budget);
1297 work_done += cleaned;
1298 if (cleaned >= per_ring_budget)
1299 clean_complete = false;
1300 }
1301
1302 /* If all work not completed, return budget and keep polling */
1303 if (!clean_complete)
1304 return budget;
1305
1306 /* Exit the polling mode, but don't re-enable interrupts if stack might
1307 * poll us due to busy-polling
1308 */
1309 if (likely(napi_complete_done(napi, work_done))) {
1310 if (adapter->rx_itr_setting == 1)
1311 ixgbevf_set_itr(q_vector);
1312 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1313 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1314 ixgbevf_irq_enable_queues(adapter,
1315 BIT(q_vector->v_idx));
1316 }
1317
1318 return min(work_done, budget - 1);
1319 }
1320
1321 /**
1322 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1323 * @q_vector: structure containing interrupt and ring information
1324 **/
ixgbevf_write_eitr(struct ixgbevf_q_vector * q_vector)1325 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1326 {
1327 struct ixgbevf_adapter *adapter = q_vector->adapter;
1328 struct ixgbe_hw *hw = &adapter->hw;
1329 int v_idx = q_vector->v_idx;
1330 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1331
1332 /* set the WDIS bit to not clear the timer bits and cause an
1333 * immediate assertion of the interrupt
1334 */
1335 itr_reg |= IXGBE_EITR_CNT_WDIS;
1336
1337 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1338 }
1339
1340 /**
1341 * ixgbevf_configure_msix - Configure MSI-X hardware
1342 * @adapter: board private structure
1343 *
1344 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1345 * interrupts.
1346 **/
ixgbevf_configure_msix(struct ixgbevf_adapter * adapter)1347 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1348 {
1349 struct ixgbevf_q_vector *q_vector;
1350 int q_vectors, v_idx;
1351
1352 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1353 adapter->eims_enable_mask = 0;
1354
1355 /* Populate the IVAR table and set the ITR values to the
1356 * corresponding register.
1357 */
1358 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1359 struct ixgbevf_ring *ring;
1360
1361 q_vector = adapter->q_vector[v_idx];
1362
1363 ixgbevf_for_each_ring(ring, q_vector->rx)
1364 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1365
1366 ixgbevf_for_each_ring(ring, q_vector->tx)
1367 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1368
1369 if (q_vector->tx.ring && !q_vector->rx.ring) {
1370 /* Tx only vector */
1371 if (adapter->tx_itr_setting == 1)
1372 q_vector->itr = IXGBE_12K_ITR;
1373 else
1374 q_vector->itr = adapter->tx_itr_setting;
1375 } else {
1376 /* Rx or Rx/Tx vector */
1377 if (adapter->rx_itr_setting == 1)
1378 q_vector->itr = IXGBE_20K_ITR;
1379 else
1380 q_vector->itr = adapter->rx_itr_setting;
1381 }
1382
1383 /* add q_vector eims value to global eims_enable_mask */
1384 adapter->eims_enable_mask |= BIT(v_idx);
1385
1386 ixgbevf_write_eitr(q_vector);
1387 }
1388
1389 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1390 /* setup eims_other and add value to global eims_enable_mask */
1391 adapter->eims_other = BIT(v_idx);
1392 adapter->eims_enable_mask |= adapter->eims_other;
1393 }
1394
1395 enum latency_range {
1396 lowest_latency = 0,
1397 low_latency = 1,
1398 bulk_latency = 2,
1399 latency_invalid = 255
1400 };
1401
1402 /**
1403 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1404 * @q_vector: structure containing interrupt and ring information
1405 * @ring_container: structure containing ring performance data
1406 *
1407 * Stores a new ITR value based on packets and byte
1408 * counts during the last interrupt. The advantage of per interrupt
1409 * computation is faster updates and more accurate ITR for the current
1410 * traffic pattern. Constants in this function were computed
1411 * based on theoretical maximum wire speed and thresholds were set based
1412 * on testing data as well as attempting to minimize response time
1413 * while increasing bulk throughput.
1414 **/
ixgbevf_update_itr(struct ixgbevf_q_vector * q_vector,struct ixgbevf_ring_container * ring_container)1415 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1416 struct ixgbevf_ring_container *ring_container)
1417 {
1418 int bytes = ring_container->total_bytes;
1419 int packets = ring_container->total_packets;
1420 u32 timepassed_us;
1421 u64 bytes_perint;
1422 u8 itr_setting = ring_container->itr;
1423
1424 if (packets == 0)
1425 return;
1426
1427 /* simple throttle rate management
1428 * 0-20MB/s lowest (100000 ints/s)
1429 * 20-100MB/s low (20000 ints/s)
1430 * 100-1249MB/s bulk (12000 ints/s)
1431 */
1432 /* what was last interrupt timeslice? */
1433 timepassed_us = q_vector->itr >> 2;
1434 if (timepassed_us == 0)
1435 return;
1436
1437 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1438
1439 switch (itr_setting) {
1440 case lowest_latency:
1441 if (bytes_perint > 10)
1442 itr_setting = low_latency;
1443 break;
1444 case low_latency:
1445 if (bytes_perint > 20)
1446 itr_setting = bulk_latency;
1447 else if (bytes_perint <= 10)
1448 itr_setting = lowest_latency;
1449 break;
1450 case bulk_latency:
1451 if (bytes_perint <= 20)
1452 itr_setting = low_latency;
1453 break;
1454 }
1455
1456 /* clear work counters since we have the values we need */
1457 ring_container->total_bytes = 0;
1458 ring_container->total_packets = 0;
1459
1460 /* write updated itr to ring container */
1461 ring_container->itr = itr_setting;
1462 }
1463
ixgbevf_set_itr(struct ixgbevf_q_vector * q_vector)1464 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1465 {
1466 u32 new_itr = q_vector->itr;
1467 u8 current_itr;
1468
1469 ixgbevf_update_itr(q_vector, &q_vector->tx);
1470 ixgbevf_update_itr(q_vector, &q_vector->rx);
1471
1472 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1473
1474 switch (current_itr) {
1475 /* counts and packets in update_itr are dependent on these numbers */
1476 case lowest_latency:
1477 new_itr = IXGBE_100K_ITR;
1478 break;
1479 case low_latency:
1480 new_itr = IXGBE_20K_ITR;
1481 break;
1482 case bulk_latency:
1483 new_itr = IXGBE_12K_ITR;
1484 break;
1485 default:
1486 break;
1487 }
1488
1489 if (new_itr != q_vector->itr) {
1490 /* do an exponential smoothing */
1491 new_itr = (10 * new_itr * q_vector->itr) /
1492 ((9 * new_itr) + q_vector->itr);
1493
1494 /* save the algorithm value here */
1495 q_vector->itr = new_itr;
1496
1497 ixgbevf_write_eitr(q_vector);
1498 }
1499 }
1500
ixgbevf_msix_other(int irq,void * data)1501 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1502 {
1503 struct ixgbevf_adapter *adapter = data;
1504 struct ixgbe_hw *hw = &adapter->hw;
1505
1506 hw->mac.get_link_status = 1;
1507
1508 ixgbevf_service_event_schedule(adapter);
1509
1510 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1511
1512 return IRQ_HANDLED;
1513 }
1514
1515 /**
1516 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1517 * @irq: unused
1518 * @data: pointer to our q_vector struct for this interrupt vector
1519 **/
ixgbevf_msix_clean_rings(int irq,void * data)1520 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1521 {
1522 struct ixgbevf_q_vector *q_vector = data;
1523
1524 /* EIAM disabled interrupts (on this vector) for us */
1525 if (q_vector->rx.ring || q_vector->tx.ring)
1526 napi_schedule_irqoff(&q_vector->napi);
1527
1528 return IRQ_HANDLED;
1529 }
1530
1531 /**
1532 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1533 * @adapter: board private structure
1534 *
1535 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1536 * interrupts from the kernel.
1537 **/
ixgbevf_request_msix_irqs(struct ixgbevf_adapter * adapter)1538 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1539 {
1540 struct net_device *netdev = adapter->netdev;
1541 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1542 unsigned int ri = 0, ti = 0;
1543 int vector, err;
1544
1545 for (vector = 0; vector < q_vectors; vector++) {
1546 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1547 struct msix_entry *entry = &adapter->msix_entries[vector];
1548
1549 if (q_vector->tx.ring && q_vector->rx.ring) {
1550 snprintf(q_vector->name, sizeof(q_vector->name),
1551 "%s-TxRx-%u", netdev->name, ri++);
1552 ti++;
1553 } else if (q_vector->rx.ring) {
1554 snprintf(q_vector->name, sizeof(q_vector->name),
1555 "%s-rx-%u", netdev->name, ri++);
1556 } else if (q_vector->tx.ring) {
1557 snprintf(q_vector->name, sizeof(q_vector->name),
1558 "%s-tx-%u", netdev->name, ti++);
1559 } else {
1560 /* skip this unused q_vector */
1561 continue;
1562 }
1563 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1564 q_vector->name, q_vector);
1565 if (err) {
1566 hw_dbg(&adapter->hw,
1567 "request_irq failed for MSIX interrupt Error: %d\n",
1568 err);
1569 goto free_queue_irqs;
1570 }
1571 }
1572
1573 err = request_irq(adapter->msix_entries[vector].vector,
1574 &ixgbevf_msix_other, 0, netdev->name, adapter);
1575 if (err) {
1576 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1577 err);
1578 goto free_queue_irqs;
1579 }
1580
1581 return 0;
1582
1583 free_queue_irqs:
1584 while (vector) {
1585 vector--;
1586 free_irq(adapter->msix_entries[vector].vector,
1587 adapter->q_vector[vector]);
1588 }
1589 /* This failure is non-recoverable - it indicates the system is
1590 * out of MSIX vector resources and the VF driver cannot run
1591 * without them. Set the number of msix vectors to zero
1592 * indicating that not enough can be allocated. The error
1593 * will be returned to the user indicating device open failed.
1594 * Any further attempts to force the driver to open will also
1595 * fail. The only way to recover is to unload the driver and
1596 * reload it again. If the system has recovered some MSIX
1597 * vectors then it may succeed.
1598 */
1599 adapter->num_msix_vectors = 0;
1600 return err;
1601 }
1602
1603 /**
1604 * ixgbevf_request_irq - initialize interrupts
1605 * @adapter: board private structure
1606 *
1607 * Attempts to configure interrupts using the best available
1608 * capabilities of the hardware and kernel.
1609 **/
ixgbevf_request_irq(struct ixgbevf_adapter * adapter)1610 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1611 {
1612 int err = ixgbevf_request_msix_irqs(adapter);
1613
1614 if (err)
1615 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1616
1617 return err;
1618 }
1619
ixgbevf_free_irq(struct ixgbevf_adapter * adapter)1620 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1621 {
1622 int i, q_vectors;
1623
1624 if (!adapter->msix_entries)
1625 return;
1626
1627 q_vectors = adapter->num_msix_vectors;
1628 i = q_vectors - 1;
1629
1630 free_irq(adapter->msix_entries[i].vector, adapter);
1631 i--;
1632
1633 for (; i >= 0; i--) {
1634 /* free only the irqs that were actually requested */
1635 if (!adapter->q_vector[i]->rx.ring &&
1636 !adapter->q_vector[i]->tx.ring)
1637 continue;
1638
1639 free_irq(adapter->msix_entries[i].vector,
1640 adapter->q_vector[i]);
1641 }
1642 }
1643
1644 /**
1645 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1646 * @adapter: board private structure
1647 **/
ixgbevf_irq_disable(struct ixgbevf_adapter * adapter)1648 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1649 {
1650 struct ixgbe_hw *hw = &adapter->hw;
1651 int i;
1652
1653 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1654 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1655 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1656
1657 IXGBE_WRITE_FLUSH(hw);
1658
1659 for (i = 0; i < adapter->num_msix_vectors; i++)
1660 synchronize_irq(adapter->msix_entries[i].vector);
1661 }
1662
1663 /**
1664 * ixgbevf_irq_enable - Enable default interrupt generation settings
1665 * @adapter: board private structure
1666 **/
ixgbevf_irq_enable(struct ixgbevf_adapter * adapter)1667 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1668 {
1669 struct ixgbe_hw *hw = &adapter->hw;
1670
1671 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1672 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1673 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1674 }
1675
1676 /**
1677 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1678 * @adapter: board private structure
1679 * @ring: structure containing ring specific data
1680 *
1681 * Configure the Tx descriptor ring after a reset.
1682 **/
ixgbevf_configure_tx_ring(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1683 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1684 struct ixgbevf_ring *ring)
1685 {
1686 struct ixgbe_hw *hw = &adapter->hw;
1687 u64 tdba = ring->dma;
1688 int wait_loop = 10;
1689 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1690 u8 reg_idx = ring->reg_idx;
1691
1692 /* disable queue to avoid issues while updating state */
1693 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1694 IXGBE_WRITE_FLUSH(hw);
1695
1696 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1697 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1698 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1699 ring->count * sizeof(union ixgbe_adv_tx_desc));
1700
1701 /* disable head writeback */
1702 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1703 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1704
1705 /* enable relaxed ordering */
1706 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1707 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1708 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1709
1710 /* reset head and tail pointers */
1711 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1712 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1713 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1714
1715 /* reset ntu and ntc to place SW in sync with hardwdare */
1716 ring->next_to_clean = 0;
1717 ring->next_to_use = 0;
1718
1719 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1720 * to or less than the number of on chip descriptors, which is
1721 * currently 40.
1722 */
1723 txdctl |= (8 << 16); /* WTHRESH = 8 */
1724
1725 /* Setting PTHRESH to 32 both improves performance */
1726 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1727 32; /* PTHRESH = 32 */
1728
1729 /* reinitialize tx_buffer_info */
1730 memset(ring->tx_buffer_info, 0,
1731 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1732
1733 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1734 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1735
1736 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1737
1738 /* poll to verify queue is enabled */
1739 do {
1740 usleep_range(1000, 2000);
1741 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1742 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1743 if (!wait_loop)
1744 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1745 }
1746
1747 /**
1748 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1749 * @adapter: board private structure
1750 *
1751 * Configure the Tx unit of the MAC after a reset.
1752 **/
ixgbevf_configure_tx(struct ixgbevf_adapter * adapter)1753 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1754 {
1755 u32 i;
1756
1757 /* Setup the HW Tx Head and Tail descriptor pointers */
1758 for (i = 0; i < adapter->num_tx_queues; i++)
1759 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1760 for (i = 0; i < adapter->num_xdp_queues; i++)
1761 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1762 }
1763
1764 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1765
ixgbevf_configure_srrctl(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring,int index)1766 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1767 struct ixgbevf_ring *ring, int index)
1768 {
1769 struct ixgbe_hw *hw = &adapter->hw;
1770 u32 srrctl;
1771
1772 srrctl = IXGBE_SRRCTL_DROP_EN;
1773
1774 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1775 if (ring_uses_large_buffer(ring))
1776 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1777 else
1778 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1779 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1780
1781 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1782 }
1783
ixgbevf_setup_psrtype(struct ixgbevf_adapter * adapter)1784 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1785 {
1786 struct ixgbe_hw *hw = &adapter->hw;
1787
1788 /* PSRTYPE must be initialized in 82599 */
1789 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1790 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1791 IXGBE_PSRTYPE_L2HDR;
1792
1793 if (adapter->num_rx_queues > 1)
1794 psrtype |= BIT(29);
1795
1796 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1797 }
1798
1799 #define IXGBEVF_MAX_RX_DESC_POLL 10
ixgbevf_disable_rx_queue(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1800 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1801 struct ixgbevf_ring *ring)
1802 {
1803 struct ixgbe_hw *hw = &adapter->hw;
1804 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1805 u32 rxdctl;
1806 u8 reg_idx = ring->reg_idx;
1807
1808 if (IXGBE_REMOVED(hw->hw_addr))
1809 return;
1810 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1811 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1812
1813 /* write value back with RXDCTL.ENABLE bit cleared */
1814 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1815
1816 /* the hardware may take up to 100us to really disable the Rx queue */
1817 do {
1818 udelay(10);
1819 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1820 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1821
1822 if (!wait_loop)
1823 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1824 reg_idx);
1825 }
1826
ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1827 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1828 struct ixgbevf_ring *ring)
1829 {
1830 struct ixgbe_hw *hw = &adapter->hw;
1831 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1832 u32 rxdctl;
1833 u8 reg_idx = ring->reg_idx;
1834
1835 if (IXGBE_REMOVED(hw->hw_addr))
1836 return;
1837 do {
1838 usleep_range(1000, 2000);
1839 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1840 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1841
1842 if (!wait_loop)
1843 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1844 reg_idx);
1845 }
1846
1847 /**
1848 * ixgbevf_init_rss_key - Initialize adapter RSS key
1849 * @adapter: device handle
1850 *
1851 * Allocates and initializes the RSS key if it is not allocated.
1852 **/
ixgbevf_init_rss_key(struct ixgbevf_adapter * adapter)1853 static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1854 {
1855 u32 *rss_key;
1856
1857 if (!adapter->rss_key) {
1858 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1859 if (unlikely(!rss_key))
1860 return -ENOMEM;
1861
1862 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1863 adapter->rss_key = rss_key;
1864 }
1865
1866 return 0;
1867 }
1868
ixgbevf_setup_vfmrqc(struct ixgbevf_adapter * adapter)1869 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1870 {
1871 struct ixgbe_hw *hw = &adapter->hw;
1872 u32 vfmrqc = 0, vfreta = 0;
1873 u16 rss_i = adapter->num_rx_queues;
1874 u8 i, j;
1875
1876 /* Fill out hash function seeds */
1877 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1878 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1879
1880 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1881 if (j == rss_i)
1882 j = 0;
1883
1884 adapter->rss_indir_tbl[i] = j;
1885
1886 vfreta |= j << (i & 0x3) * 8;
1887 if ((i & 3) == 3) {
1888 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1889 vfreta = 0;
1890 }
1891 }
1892
1893 /* Perform hash on these packet types */
1894 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1895 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1896 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1897 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1898
1899 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1900
1901 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1902 }
1903
ixgbevf_configure_rx_ring(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * ring)1904 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1905 struct ixgbevf_ring *ring)
1906 {
1907 struct ixgbe_hw *hw = &adapter->hw;
1908 union ixgbe_adv_rx_desc *rx_desc;
1909 u64 rdba = ring->dma;
1910 u32 rxdctl;
1911 u8 reg_idx = ring->reg_idx;
1912
1913 /* disable queue to avoid issues while updating state */
1914 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1915 ixgbevf_disable_rx_queue(adapter, ring);
1916
1917 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1918 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1919 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1920 ring->count * sizeof(union ixgbe_adv_rx_desc));
1921
1922 #ifndef CONFIG_SPARC
1923 /* enable relaxed ordering */
1924 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1925 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1926 #else
1927 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1928 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1929 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1930 #endif
1931
1932 /* reset head and tail pointers */
1933 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1934 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1935 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1936
1937 /* initialize rx_buffer_info */
1938 memset(ring->rx_buffer_info, 0,
1939 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1940
1941 /* initialize Rx descriptor 0 */
1942 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1943 rx_desc->wb.upper.length = 0;
1944
1945 /* reset ntu and ntc to place SW in sync with hardwdare */
1946 ring->next_to_clean = 0;
1947 ring->next_to_use = 0;
1948 ring->next_to_alloc = 0;
1949
1950 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1951
1952 /* RXDCTL.RLPML does not work on 82599 */
1953 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1954 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1955 IXGBE_RXDCTL_RLPML_EN);
1956
1957 #if (PAGE_SIZE < 8192)
1958 /* Limit the maximum frame size so we don't overrun the skb */
1959 if (ring_uses_build_skb(ring) &&
1960 !ring_uses_large_buffer(ring))
1961 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1962 IXGBE_RXDCTL_RLPML_EN;
1963 #endif
1964 }
1965
1966 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1967 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1968
1969 ixgbevf_rx_desc_queue_enable(adapter, ring);
1970 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1971 }
1972
ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * rx_ring)1973 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1974 struct ixgbevf_ring *rx_ring)
1975 {
1976 struct net_device *netdev = adapter->netdev;
1977 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1978
1979 /* set build_skb and buffer size flags */
1980 clear_ring_build_skb_enabled(rx_ring);
1981 clear_ring_uses_large_buffer(rx_ring);
1982
1983 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1984 return;
1985
1986 set_ring_build_skb_enabled(rx_ring);
1987
1988 if (PAGE_SIZE < 8192) {
1989 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
1990 return;
1991
1992 set_ring_uses_large_buffer(rx_ring);
1993 }
1994 }
1995
1996 /**
1997 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1998 * @adapter: board private structure
1999 *
2000 * Configure the Rx unit of the MAC after a reset.
2001 **/
ixgbevf_configure_rx(struct ixgbevf_adapter * adapter)2002 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
2003 {
2004 struct ixgbe_hw *hw = &adapter->hw;
2005 struct net_device *netdev = adapter->netdev;
2006 int i, ret;
2007
2008 ixgbevf_setup_psrtype(adapter);
2009 if (hw->mac.type >= ixgbe_mac_X550_vf)
2010 ixgbevf_setup_vfmrqc(adapter);
2011
2012 spin_lock_bh(&adapter->mbx_lock);
2013 /* notify the PF of our intent to use this size of frame */
2014 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
2015 spin_unlock_bh(&adapter->mbx_lock);
2016 if (ret)
2017 dev_err(&adapter->pdev->dev,
2018 "Failed to set MTU at %d\n", netdev->mtu);
2019
2020 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2021 * the Base and Length of the Rx Descriptor Ring
2022 */
2023 for (i = 0; i < adapter->num_rx_queues; i++) {
2024 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2025
2026 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2027 ixgbevf_configure_rx_ring(adapter, rx_ring);
2028 }
2029 }
2030
ixgbevf_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2031 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2032 __be16 proto, u16 vid)
2033 {
2034 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2035 struct ixgbe_hw *hw = &adapter->hw;
2036 int err;
2037
2038 spin_lock_bh(&adapter->mbx_lock);
2039
2040 /* add VID to filter table */
2041 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2042
2043 spin_unlock_bh(&adapter->mbx_lock);
2044
2045 /* translate error return types so error makes sense */
2046 if (err == IXGBE_ERR_MBX)
2047 return -EIO;
2048
2049 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2050 return -EACCES;
2051
2052 set_bit(vid, adapter->active_vlans);
2053
2054 return err;
2055 }
2056
ixgbevf_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2057 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2058 __be16 proto, u16 vid)
2059 {
2060 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2061 struct ixgbe_hw *hw = &adapter->hw;
2062 int err;
2063
2064 spin_lock_bh(&adapter->mbx_lock);
2065
2066 /* remove VID from filter table */
2067 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2068
2069 spin_unlock_bh(&adapter->mbx_lock);
2070
2071 clear_bit(vid, adapter->active_vlans);
2072
2073 return err;
2074 }
2075
ixgbevf_restore_vlan(struct ixgbevf_adapter * adapter)2076 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2077 {
2078 u16 vid;
2079
2080 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2081 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2082 htons(ETH_P_8021Q), vid);
2083 }
2084
ixgbevf_write_uc_addr_list(struct net_device * netdev)2085 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2086 {
2087 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2088 struct ixgbe_hw *hw = &adapter->hw;
2089 int count = 0;
2090
2091 if (!netdev_uc_empty(netdev)) {
2092 struct netdev_hw_addr *ha;
2093
2094 netdev_for_each_uc_addr(ha, netdev) {
2095 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2096 udelay(200);
2097 }
2098 } else {
2099 /* If the list is empty then send message to PF driver to
2100 * clear all MAC VLANs on this VF.
2101 */
2102 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2103 }
2104
2105 return count;
2106 }
2107
2108 /**
2109 * ixgbevf_set_rx_mode - Multicast and unicast set
2110 * @netdev: network interface device structure
2111 *
2112 * The set_rx_method entry point is called whenever the multicast address
2113 * list, unicast address list or the network interface flags are updated.
2114 * This routine is responsible for configuring the hardware for proper
2115 * multicast mode and configuring requested unicast filters.
2116 **/
ixgbevf_set_rx_mode(struct net_device * netdev)2117 static void ixgbevf_set_rx_mode(struct net_device *netdev)
2118 {
2119 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2120 struct ixgbe_hw *hw = &adapter->hw;
2121 unsigned int flags = netdev->flags;
2122 int xcast_mode;
2123
2124 /* request the most inclusive mode we need */
2125 if (flags & IFF_PROMISC)
2126 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2127 else if (flags & IFF_ALLMULTI)
2128 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2129 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2130 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2131 else
2132 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2133
2134 spin_lock_bh(&adapter->mbx_lock);
2135
2136 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2137
2138 /* reprogram multicast list */
2139 hw->mac.ops.update_mc_addr_list(hw, netdev);
2140
2141 ixgbevf_write_uc_addr_list(netdev);
2142
2143 spin_unlock_bh(&adapter->mbx_lock);
2144 }
2145
ixgbevf_napi_enable_all(struct ixgbevf_adapter * adapter)2146 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2147 {
2148 int q_idx;
2149 struct ixgbevf_q_vector *q_vector;
2150 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2151
2152 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2153 q_vector = adapter->q_vector[q_idx];
2154 napi_enable(&q_vector->napi);
2155 }
2156 }
2157
ixgbevf_napi_disable_all(struct ixgbevf_adapter * adapter)2158 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2159 {
2160 int q_idx;
2161 struct ixgbevf_q_vector *q_vector;
2162 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2163
2164 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2165 q_vector = adapter->q_vector[q_idx];
2166 napi_disable(&q_vector->napi);
2167 }
2168 }
2169
ixgbevf_configure_dcb(struct ixgbevf_adapter * adapter)2170 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2171 {
2172 struct ixgbe_hw *hw = &adapter->hw;
2173 unsigned int def_q = 0;
2174 unsigned int num_tcs = 0;
2175 unsigned int num_rx_queues = adapter->num_rx_queues;
2176 unsigned int num_tx_queues = adapter->num_tx_queues;
2177 int err;
2178
2179 spin_lock_bh(&adapter->mbx_lock);
2180
2181 /* fetch queue configuration from the PF */
2182 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2183
2184 spin_unlock_bh(&adapter->mbx_lock);
2185
2186 if (err)
2187 return err;
2188
2189 if (num_tcs > 1) {
2190 /* we need only one Tx queue */
2191 num_tx_queues = 1;
2192
2193 /* update default Tx ring register index */
2194 adapter->tx_ring[0]->reg_idx = def_q;
2195
2196 /* we need as many queues as traffic classes */
2197 num_rx_queues = num_tcs;
2198 }
2199
2200 /* if we have a bad config abort request queue reset */
2201 if ((adapter->num_rx_queues != num_rx_queues) ||
2202 (adapter->num_tx_queues != num_tx_queues)) {
2203 /* force mailbox timeout to prevent further messages */
2204 hw->mbx.timeout = 0;
2205
2206 /* wait for watchdog to come around and bail us out */
2207 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2208 }
2209
2210 return 0;
2211 }
2212
ixgbevf_configure(struct ixgbevf_adapter * adapter)2213 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2214 {
2215 ixgbevf_configure_dcb(adapter);
2216
2217 ixgbevf_set_rx_mode(adapter->netdev);
2218
2219 ixgbevf_restore_vlan(adapter);
2220 ixgbevf_ipsec_restore(adapter);
2221
2222 ixgbevf_configure_tx(adapter);
2223 ixgbevf_configure_rx(adapter);
2224 }
2225
ixgbevf_save_reset_stats(struct ixgbevf_adapter * adapter)2226 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2227 {
2228 /* Only save pre-reset stats if there are some */
2229 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2230 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2231 adapter->stats.base_vfgprc;
2232 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2233 adapter->stats.base_vfgptc;
2234 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2235 adapter->stats.base_vfgorc;
2236 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2237 adapter->stats.base_vfgotc;
2238 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2239 adapter->stats.base_vfmprc;
2240 }
2241 }
2242
ixgbevf_init_last_counter_stats(struct ixgbevf_adapter * adapter)2243 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2244 {
2245 struct ixgbe_hw *hw = &adapter->hw;
2246
2247 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2248 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2249 adapter->stats.last_vfgorc |=
2250 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2251 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2252 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2253 adapter->stats.last_vfgotc |=
2254 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2255 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2256
2257 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2258 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2259 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2260 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2261 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2262 }
2263
ixgbevf_negotiate_api(struct ixgbevf_adapter * adapter)2264 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2265 {
2266 struct ixgbe_hw *hw = &adapter->hw;
2267 static const int api[] = {
2268 ixgbe_mbox_api_14,
2269 ixgbe_mbox_api_13,
2270 ixgbe_mbox_api_12,
2271 ixgbe_mbox_api_11,
2272 ixgbe_mbox_api_10,
2273 ixgbe_mbox_api_unknown
2274 };
2275 int err, idx = 0;
2276
2277 spin_lock_bh(&adapter->mbx_lock);
2278
2279 while (api[idx] != ixgbe_mbox_api_unknown) {
2280 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2281 if (!err)
2282 break;
2283 idx++;
2284 }
2285
2286 spin_unlock_bh(&adapter->mbx_lock);
2287 }
2288
ixgbevf_up_complete(struct ixgbevf_adapter * adapter)2289 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2290 {
2291 struct net_device *netdev = adapter->netdev;
2292 struct ixgbe_hw *hw = &adapter->hw;
2293
2294 ixgbevf_configure_msix(adapter);
2295
2296 spin_lock_bh(&adapter->mbx_lock);
2297
2298 if (is_valid_ether_addr(hw->mac.addr))
2299 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2300 else
2301 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2302
2303 spin_unlock_bh(&adapter->mbx_lock);
2304
2305 smp_mb__before_atomic();
2306 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2307 ixgbevf_napi_enable_all(adapter);
2308
2309 /* clear any pending interrupts, may auto mask */
2310 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2311 ixgbevf_irq_enable(adapter);
2312
2313 /* enable transmits */
2314 netif_tx_start_all_queues(netdev);
2315
2316 ixgbevf_save_reset_stats(adapter);
2317 ixgbevf_init_last_counter_stats(adapter);
2318
2319 hw->mac.get_link_status = 1;
2320 mod_timer(&adapter->service_timer, jiffies);
2321 }
2322
ixgbevf_up(struct ixgbevf_adapter * adapter)2323 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2324 {
2325 ixgbevf_configure(adapter);
2326
2327 ixgbevf_up_complete(adapter);
2328 }
2329
2330 /**
2331 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2332 * @rx_ring: ring to free buffers from
2333 **/
ixgbevf_clean_rx_ring(struct ixgbevf_ring * rx_ring)2334 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2335 {
2336 u16 i = rx_ring->next_to_clean;
2337
2338 /* Free Rx ring sk_buff */
2339 if (rx_ring->skb) {
2340 dev_kfree_skb(rx_ring->skb);
2341 rx_ring->skb = NULL;
2342 }
2343
2344 /* Free all the Rx ring pages */
2345 while (i != rx_ring->next_to_alloc) {
2346 struct ixgbevf_rx_buffer *rx_buffer;
2347
2348 rx_buffer = &rx_ring->rx_buffer_info[i];
2349
2350 /* Invalidate cache lines that may have been written to by
2351 * device so that we avoid corrupting memory.
2352 */
2353 dma_sync_single_range_for_cpu(rx_ring->dev,
2354 rx_buffer->dma,
2355 rx_buffer->page_offset,
2356 ixgbevf_rx_bufsz(rx_ring),
2357 DMA_FROM_DEVICE);
2358
2359 /* free resources associated with mapping */
2360 dma_unmap_page_attrs(rx_ring->dev,
2361 rx_buffer->dma,
2362 ixgbevf_rx_pg_size(rx_ring),
2363 DMA_FROM_DEVICE,
2364 IXGBEVF_RX_DMA_ATTR);
2365
2366 __page_frag_cache_drain(rx_buffer->page,
2367 rx_buffer->pagecnt_bias);
2368
2369 i++;
2370 if (i == rx_ring->count)
2371 i = 0;
2372 }
2373
2374 rx_ring->next_to_alloc = 0;
2375 rx_ring->next_to_clean = 0;
2376 rx_ring->next_to_use = 0;
2377 }
2378
2379 /**
2380 * ixgbevf_clean_tx_ring - Free Tx Buffers
2381 * @tx_ring: ring to be cleaned
2382 **/
ixgbevf_clean_tx_ring(struct ixgbevf_ring * tx_ring)2383 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2384 {
2385 u16 i = tx_ring->next_to_clean;
2386 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2387
2388 while (i != tx_ring->next_to_use) {
2389 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2390
2391 /* Free all the Tx ring sk_buffs */
2392 if (ring_is_xdp(tx_ring))
2393 page_frag_free(tx_buffer->data);
2394 else
2395 dev_kfree_skb_any(tx_buffer->skb);
2396
2397 /* unmap skb header data */
2398 dma_unmap_single(tx_ring->dev,
2399 dma_unmap_addr(tx_buffer, dma),
2400 dma_unmap_len(tx_buffer, len),
2401 DMA_TO_DEVICE);
2402
2403 /* check for eop_desc to determine the end of the packet */
2404 eop_desc = tx_buffer->next_to_watch;
2405 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2406
2407 /* unmap remaining buffers */
2408 while (tx_desc != eop_desc) {
2409 tx_buffer++;
2410 tx_desc++;
2411 i++;
2412 if (unlikely(i == tx_ring->count)) {
2413 i = 0;
2414 tx_buffer = tx_ring->tx_buffer_info;
2415 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2416 }
2417
2418 /* unmap any remaining paged data */
2419 if (dma_unmap_len(tx_buffer, len))
2420 dma_unmap_page(tx_ring->dev,
2421 dma_unmap_addr(tx_buffer, dma),
2422 dma_unmap_len(tx_buffer, len),
2423 DMA_TO_DEVICE);
2424 }
2425
2426 /* move us one more past the eop_desc for start of next pkt */
2427 tx_buffer++;
2428 i++;
2429 if (unlikely(i == tx_ring->count)) {
2430 i = 0;
2431 tx_buffer = tx_ring->tx_buffer_info;
2432 }
2433 }
2434
2435 /* reset next_to_use and next_to_clean */
2436 tx_ring->next_to_use = 0;
2437 tx_ring->next_to_clean = 0;
2438
2439 }
2440
2441 /**
2442 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2443 * @adapter: board private structure
2444 **/
ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter * adapter)2445 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2446 {
2447 int i;
2448
2449 for (i = 0; i < adapter->num_rx_queues; i++)
2450 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2451 }
2452
2453 /**
2454 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2455 * @adapter: board private structure
2456 **/
ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter * adapter)2457 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2458 {
2459 int i;
2460
2461 for (i = 0; i < adapter->num_tx_queues; i++)
2462 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2463 for (i = 0; i < adapter->num_xdp_queues; i++)
2464 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2465 }
2466
ixgbevf_down(struct ixgbevf_adapter * adapter)2467 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2468 {
2469 struct net_device *netdev = adapter->netdev;
2470 struct ixgbe_hw *hw = &adapter->hw;
2471 int i;
2472
2473 /* signal that we are down to the interrupt handler */
2474 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2475 return; /* do nothing if already down */
2476
2477 /* disable all enabled Rx queues */
2478 for (i = 0; i < adapter->num_rx_queues; i++)
2479 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2480
2481 usleep_range(10000, 20000);
2482
2483 netif_tx_stop_all_queues(netdev);
2484
2485 /* call carrier off first to avoid false dev_watchdog timeouts */
2486 netif_carrier_off(netdev);
2487 netif_tx_disable(netdev);
2488
2489 ixgbevf_irq_disable(adapter);
2490
2491 ixgbevf_napi_disable_all(adapter);
2492
2493 del_timer_sync(&adapter->service_timer);
2494
2495 /* disable transmits in the hardware now that interrupts are off */
2496 for (i = 0; i < adapter->num_tx_queues; i++) {
2497 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2498
2499 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2500 IXGBE_TXDCTL_SWFLSH);
2501 }
2502
2503 for (i = 0; i < adapter->num_xdp_queues; i++) {
2504 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2505
2506 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2507 IXGBE_TXDCTL_SWFLSH);
2508 }
2509
2510 if (!pci_channel_offline(adapter->pdev))
2511 ixgbevf_reset(adapter);
2512
2513 ixgbevf_clean_all_tx_rings(adapter);
2514 ixgbevf_clean_all_rx_rings(adapter);
2515 }
2516
ixgbevf_reinit_locked(struct ixgbevf_adapter * adapter)2517 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2518 {
2519 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2520 msleep(1);
2521
2522 ixgbevf_down(adapter);
2523 pci_set_master(adapter->pdev);
2524 ixgbevf_up(adapter);
2525
2526 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2527 }
2528
ixgbevf_reset(struct ixgbevf_adapter * adapter)2529 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2530 {
2531 struct ixgbe_hw *hw = &adapter->hw;
2532 struct net_device *netdev = adapter->netdev;
2533
2534 if (hw->mac.ops.reset_hw(hw)) {
2535 hw_dbg(hw, "PF still resetting\n");
2536 } else {
2537 hw->mac.ops.init_hw(hw);
2538 ixgbevf_negotiate_api(adapter);
2539 }
2540
2541 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2542 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2543 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2544 }
2545
2546 adapter->last_reset = jiffies;
2547 }
2548
ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter * adapter,int vectors)2549 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2550 int vectors)
2551 {
2552 int vector_threshold;
2553
2554 /* We'll want at least 2 (vector_threshold):
2555 * 1) TxQ[0] + RxQ[0] handler
2556 * 2) Other (Link Status Change, etc.)
2557 */
2558 vector_threshold = MIN_MSIX_COUNT;
2559
2560 /* The more we get, the more we will assign to Tx/Rx Cleanup
2561 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2562 * Right now, we simply care about how many we'll get; we'll
2563 * set them up later while requesting irq's.
2564 */
2565 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2566 vector_threshold, vectors);
2567
2568 if (vectors < 0) {
2569 dev_err(&adapter->pdev->dev,
2570 "Unable to allocate MSI-X interrupts\n");
2571 kfree(adapter->msix_entries);
2572 adapter->msix_entries = NULL;
2573 return vectors;
2574 }
2575
2576 /* Adjust for only the vectors we'll use, which is minimum
2577 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2578 * vectors we were allocated.
2579 */
2580 adapter->num_msix_vectors = vectors;
2581
2582 return 0;
2583 }
2584
2585 /**
2586 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2587 * @adapter: board private structure to initialize
2588 *
2589 * This is the top level queue allocation routine. The order here is very
2590 * important, starting with the "most" number of features turned on at once,
2591 * and ending with the smallest set of features. This way large combinations
2592 * can be allocated if they're turned on, and smaller combinations are the
2593 * fall through conditions.
2594 *
2595 **/
ixgbevf_set_num_queues(struct ixgbevf_adapter * adapter)2596 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2597 {
2598 struct ixgbe_hw *hw = &adapter->hw;
2599 unsigned int def_q = 0;
2600 unsigned int num_tcs = 0;
2601 int err;
2602
2603 /* Start with base case */
2604 adapter->num_rx_queues = 1;
2605 adapter->num_tx_queues = 1;
2606 adapter->num_xdp_queues = 0;
2607
2608 spin_lock_bh(&adapter->mbx_lock);
2609
2610 /* fetch queue configuration from the PF */
2611 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2612
2613 spin_unlock_bh(&adapter->mbx_lock);
2614
2615 if (err)
2616 return;
2617
2618 /* we need as many queues as traffic classes */
2619 if (num_tcs > 1) {
2620 adapter->num_rx_queues = num_tcs;
2621 } else {
2622 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2623
2624 switch (hw->api_version) {
2625 case ixgbe_mbox_api_11:
2626 case ixgbe_mbox_api_12:
2627 case ixgbe_mbox_api_13:
2628 case ixgbe_mbox_api_14:
2629 if (adapter->xdp_prog &&
2630 hw->mac.max_tx_queues == rss)
2631 rss = rss > 3 ? 2 : 1;
2632
2633 adapter->num_rx_queues = rss;
2634 adapter->num_tx_queues = rss;
2635 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2636 break;
2637 default:
2638 break;
2639 }
2640 }
2641 }
2642
2643 /**
2644 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2645 * @adapter: board private structure to initialize
2646 *
2647 * Attempt to configure the interrupts using the best available
2648 * capabilities of the hardware and the kernel.
2649 **/
ixgbevf_set_interrupt_capability(struct ixgbevf_adapter * adapter)2650 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2651 {
2652 int vector, v_budget;
2653
2654 /* It's easy to be greedy for MSI-X vectors, but it really
2655 * doesn't do us much good if we have a lot more vectors
2656 * than CPU's. So let's be conservative and only ask for
2657 * (roughly) the same number of vectors as there are CPU's.
2658 * The default is to use pairs of vectors.
2659 */
2660 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2661 v_budget = min_t(int, v_budget, num_online_cpus());
2662 v_budget += NON_Q_VECTORS;
2663
2664 adapter->msix_entries = kcalloc(v_budget,
2665 sizeof(struct msix_entry), GFP_KERNEL);
2666 if (!adapter->msix_entries)
2667 return -ENOMEM;
2668
2669 for (vector = 0; vector < v_budget; vector++)
2670 adapter->msix_entries[vector].entry = vector;
2671
2672 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
2673 * does not support any other modes, so we will simply fail here. Note
2674 * that we clean up the msix_entries pointer else-where.
2675 */
2676 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2677 }
2678
ixgbevf_add_ring(struct ixgbevf_ring * ring,struct ixgbevf_ring_container * head)2679 static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2680 struct ixgbevf_ring_container *head)
2681 {
2682 ring->next = head->ring;
2683 head->ring = ring;
2684 head->count++;
2685 }
2686
2687 /**
2688 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2689 * @adapter: board private structure to initialize
2690 * @v_idx: index of vector in adapter struct
2691 * @txr_count: number of Tx rings for q vector
2692 * @txr_idx: index of first Tx ring to assign
2693 * @xdp_count: total number of XDP rings to allocate
2694 * @xdp_idx: index of first XDP ring to allocate
2695 * @rxr_count: number of Rx rings for q vector
2696 * @rxr_idx: index of first Rx ring to assign
2697 *
2698 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2699 **/
ixgbevf_alloc_q_vector(struct ixgbevf_adapter * adapter,int v_idx,int txr_count,int txr_idx,int xdp_count,int xdp_idx,int rxr_count,int rxr_idx)2700 static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2701 int txr_count, int txr_idx,
2702 int xdp_count, int xdp_idx,
2703 int rxr_count, int rxr_idx)
2704 {
2705 struct ixgbevf_q_vector *q_vector;
2706 int reg_idx = txr_idx + xdp_idx;
2707 struct ixgbevf_ring *ring;
2708 int ring_count, size;
2709
2710 ring_count = txr_count + xdp_count + rxr_count;
2711 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2712
2713 /* allocate q_vector and rings */
2714 q_vector = kzalloc(size, GFP_KERNEL);
2715 if (!q_vector)
2716 return -ENOMEM;
2717
2718 /* initialize NAPI */
2719 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2720
2721 /* tie q_vector and adapter together */
2722 adapter->q_vector[v_idx] = q_vector;
2723 q_vector->adapter = adapter;
2724 q_vector->v_idx = v_idx;
2725
2726 /* initialize pointer to rings */
2727 ring = q_vector->ring;
2728
2729 while (txr_count) {
2730 /* assign generic ring traits */
2731 ring->dev = &adapter->pdev->dev;
2732 ring->netdev = adapter->netdev;
2733
2734 /* configure backlink on ring */
2735 ring->q_vector = q_vector;
2736
2737 /* update q_vector Tx values */
2738 ixgbevf_add_ring(ring, &q_vector->tx);
2739
2740 /* apply Tx specific ring traits */
2741 ring->count = adapter->tx_ring_count;
2742 ring->queue_index = txr_idx;
2743 ring->reg_idx = reg_idx;
2744
2745 /* assign ring to adapter */
2746 adapter->tx_ring[txr_idx] = ring;
2747
2748 /* update count and index */
2749 txr_count--;
2750 txr_idx++;
2751 reg_idx++;
2752
2753 /* push pointer to next ring */
2754 ring++;
2755 }
2756
2757 while (xdp_count) {
2758 /* assign generic ring traits */
2759 ring->dev = &adapter->pdev->dev;
2760 ring->netdev = adapter->netdev;
2761
2762 /* configure backlink on ring */
2763 ring->q_vector = q_vector;
2764
2765 /* update q_vector Tx values */
2766 ixgbevf_add_ring(ring, &q_vector->tx);
2767
2768 /* apply Tx specific ring traits */
2769 ring->count = adapter->tx_ring_count;
2770 ring->queue_index = xdp_idx;
2771 ring->reg_idx = reg_idx;
2772 set_ring_xdp(ring);
2773
2774 /* assign ring to adapter */
2775 adapter->xdp_ring[xdp_idx] = ring;
2776
2777 /* update count and index */
2778 xdp_count--;
2779 xdp_idx++;
2780 reg_idx++;
2781
2782 /* push pointer to next ring */
2783 ring++;
2784 }
2785
2786 while (rxr_count) {
2787 /* assign generic ring traits */
2788 ring->dev = &adapter->pdev->dev;
2789 ring->netdev = adapter->netdev;
2790
2791 /* configure backlink on ring */
2792 ring->q_vector = q_vector;
2793
2794 /* update q_vector Rx values */
2795 ixgbevf_add_ring(ring, &q_vector->rx);
2796
2797 /* apply Rx specific ring traits */
2798 ring->count = adapter->rx_ring_count;
2799 ring->queue_index = rxr_idx;
2800 ring->reg_idx = rxr_idx;
2801
2802 /* assign ring to adapter */
2803 adapter->rx_ring[rxr_idx] = ring;
2804
2805 /* update count and index */
2806 rxr_count--;
2807 rxr_idx++;
2808
2809 /* push pointer to next ring */
2810 ring++;
2811 }
2812
2813 return 0;
2814 }
2815
2816 /**
2817 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2818 * @adapter: board private structure to initialize
2819 * @v_idx: index of vector in adapter struct
2820 *
2821 * This function frees the memory allocated to the q_vector. In addition if
2822 * NAPI is enabled it will delete any references to the NAPI struct prior
2823 * to freeing the q_vector.
2824 **/
ixgbevf_free_q_vector(struct ixgbevf_adapter * adapter,int v_idx)2825 static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2826 {
2827 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2828 struct ixgbevf_ring *ring;
2829
2830 ixgbevf_for_each_ring(ring, q_vector->tx) {
2831 if (ring_is_xdp(ring))
2832 adapter->xdp_ring[ring->queue_index] = NULL;
2833 else
2834 adapter->tx_ring[ring->queue_index] = NULL;
2835 }
2836
2837 ixgbevf_for_each_ring(ring, q_vector->rx)
2838 adapter->rx_ring[ring->queue_index] = NULL;
2839
2840 adapter->q_vector[v_idx] = NULL;
2841 netif_napi_del(&q_vector->napi);
2842
2843 /* ixgbevf_get_stats() might access the rings on this vector,
2844 * we must wait a grace period before freeing it.
2845 */
2846 kfree_rcu(q_vector, rcu);
2847 }
2848
2849 /**
2850 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2851 * @adapter: board private structure to initialize
2852 *
2853 * We allocate one q_vector per queue interrupt. If allocation fails we
2854 * return -ENOMEM.
2855 **/
ixgbevf_alloc_q_vectors(struct ixgbevf_adapter * adapter)2856 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2857 {
2858 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2859 int rxr_remaining = adapter->num_rx_queues;
2860 int txr_remaining = adapter->num_tx_queues;
2861 int xdp_remaining = adapter->num_xdp_queues;
2862 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2863 int err;
2864
2865 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2866 for (; rxr_remaining; v_idx++, q_vectors--) {
2867 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2868
2869 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2870 0, 0, 0, 0, rqpv, rxr_idx);
2871 if (err)
2872 goto err_out;
2873
2874 /* update counts and index */
2875 rxr_remaining -= rqpv;
2876 rxr_idx += rqpv;
2877 }
2878 }
2879
2880 for (; q_vectors; v_idx++, q_vectors--) {
2881 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2882 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2883 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2884
2885 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2886 tqpv, txr_idx,
2887 xqpv, xdp_idx,
2888 rqpv, rxr_idx);
2889
2890 if (err)
2891 goto err_out;
2892
2893 /* update counts and index */
2894 rxr_remaining -= rqpv;
2895 rxr_idx += rqpv;
2896 txr_remaining -= tqpv;
2897 txr_idx += tqpv;
2898 xdp_remaining -= xqpv;
2899 xdp_idx += xqpv;
2900 }
2901
2902 return 0;
2903
2904 err_out:
2905 while (v_idx) {
2906 v_idx--;
2907 ixgbevf_free_q_vector(adapter, v_idx);
2908 }
2909
2910 return -ENOMEM;
2911 }
2912
2913 /**
2914 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2915 * @adapter: board private structure to initialize
2916 *
2917 * This function frees the memory allocated to the q_vectors. In addition if
2918 * NAPI is enabled it will delete any references to the NAPI struct prior
2919 * to freeing the q_vector.
2920 **/
ixgbevf_free_q_vectors(struct ixgbevf_adapter * adapter)2921 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2922 {
2923 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2924
2925 while (q_vectors) {
2926 q_vectors--;
2927 ixgbevf_free_q_vector(adapter, q_vectors);
2928 }
2929 }
2930
2931 /**
2932 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2933 * @adapter: board private structure
2934 *
2935 **/
ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter * adapter)2936 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2937 {
2938 if (!adapter->msix_entries)
2939 return;
2940
2941 pci_disable_msix(adapter->pdev);
2942 kfree(adapter->msix_entries);
2943 adapter->msix_entries = NULL;
2944 }
2945
2946 /**
2947 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2948 * @adapter: board private structure to initialize
2949 *
2950 **/
ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter * adapter)2951 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2952 {
2953 int err;
2954
2955 /* Number of supported queues */
2956 ixgbevf_set_num_queues(adapter);
2957
2958 err = ixgbevf_set_interrupt_capability(adapter);
2959 if (err) {
2960 hw_dbg(&adapter->hw,
2961 "Unable to setup interrupt capabilities\n");
2962 goto err_set_interrupt;
2963 }
2964
2965 err = ixgbevf_alloc_q_vectors(adapter);
2966 if (err) {
2967 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2968 goto err_alloc_q_vectors;
2969 }
2970
2971 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2972 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2973 adapter->num_rx_queues, adapter->num_tx_queues,
2974 adapter->num_xdp_queues);
2975
2976 set_bit(__IXGBEVF_DOWN, &adapter->state);
2977
2978 return 0;
2979 err_alloc_q_vectors:
2980 ixgbevf_reset_interrupt_capability(adapter);
2981 err_set_interrupt:
2982 return err;
2983 }
2984
2985 /**
2986 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2987 * @adapter: board private structure to clear interrupt scheme on
2988 *
2989 * We go through and clear interrupt specific resources and reset the structure
2990 * to pre-load conditions
2991 **/
ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter * adapter)2992 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2993 {
2994 adapter->num_tx_queues = 0;
2995 adapter->num_xdp_queues = 0;
2996 adapter->num_rx_queues = 0;
2997
2998 ixgbevf_free_q_vectors(adapter);
2999 ixgbevf_reset_interrupt_capability(adapter);
3000 }
3001
3002 /**
3003 * ixgbevf_sw_init - Initialize general software structures
3004 * @adapter: board private structure to initialize
3005 *
3006 * ixgbevf_sw_init initializes the Adapter private data structure.
3007 * Fields are initialized based on PCI device information and
3008 * OS network device settings (MTU size).
3009 **/
ixgbevf_sw_init(struct ixgbevf_adapter * adapter)3010 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
3011 {
3012 struct ixgbe_hw *hw = &adapter->hw;
3013 struct pci_dev *pdev = adapter->pdev;
3014 struct net_device *netdev = adapter->netdev;
3015 int err;
3016
3017 /* PCI config space info */
3018 hw->vendor_id = pdev->vendor;
3019 hw->device_id = pdev->device;
3020 hw->revision_id = pdev->revision;
3021 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3022 hw->subsystem_device_id = pdev->subsystem_device;
3023
3024 hw->mbx.ops.init_params(hw);
3025
3026 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3027 err = ixgbevf_init_rss_key(adapter);
3028 if (err)
3029 goto out;
3030 }
3031
3032 /* assume legacy case in which PF would only give VF 2 queues */
3033 hw->mac.max_tx_queues = 2;
3034 hw->mac.max_rx_queues = 2;
3035
3036 /* lock to protect mailbox accesses */
3037 spin_lock_init(&adapter->mbx_lock);
3038
3039 err = hw->mac.ops.reset_hw(hw);
3040 if (err) {
3041 dev_info(&pdev->dev,
3042 "PF still in reset state. Is the PF interface up?\n");
3043 } else {
3044 err = hw->mac.ops.init_hw(hw);
3045 if (err) {
3046 pr_err("init_shared_code failed: %d\n", err);
3047 goto out;
3048 }
3049 ixgbevf_negotiate_api(adapter);
3050 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3051 if (err)
3052 dev_info(&pdev->dev, "Error reading MAC address\n");
3053 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3054 dev_info(&pdev->dev,
3055 "MAC address not assigned by administrator.\n");
3056 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
3057 }
3058
3059 if (!is_valid_ether_addr(netdev->dev_addr)) {
3060 dev_info(&pdev->dev, "Assigning random MAC address\n");
3061 eth_hw_addr_random(netdev);
3062 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3063 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3064 }
3065
3066 /* Enable dynamic interrupt throttling rates */
3067 adapter->rx_itr_setting = 1;
3068 adapter->tx_itr_setting = 1;
3069
3070 /* set default ring sizes */
3071 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3072 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3073
3074 set_bit(__IXGBEVF_DOWN, &adapter->state);
3075 return 0;
3076
3077 out:
3078 return err;
3079 }
3080
3081 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3082 { \
3083 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3084 if (current_counter < last_counter) \
3085 counter += 0x100000000LL; \
3086 last_counter = current_counter; \
3087 counter &= 0xFFFFFFFF00000000LL; \
3088 counter |= current_counter; \
3089 }
3090
3091 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3092 { \
3093 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3094 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3095 u64 current_counter = (current_counter_msb << 32) | \
3096 current_counter_lsb; \
3097 if (current_counter < last_counter) \
3098 counter += 0x1000000000LL; \
3099 last_counter = current_counter; \
3100 counter &= 0xFFFFFFF000000000LL; \
3101 counter |= current_counter; \
3102 }
3103 /**
3104 * ixgbevf_update_stats - Update the board statistics counters.
3105 * @adapter: board private structure
3106 **/
ixgbevf_update_stats(struct ixgbevf_adapter * adapter)3107 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3108 {
3109 struct ixgbe_hw *hw = &adapter->hw;
3110 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3111 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3112 int i;
3113
3114 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3115 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3116 return;
3117
3118 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3119 adapter->stats.vfgprc);
3120 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3121 adapter->stats.vfgptc);
3122 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3123 adapter->stats.last_vfgorc,
3124 adapter->stats.vfgorc);
3125 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3126 adapter->stats.last_vfgotc,
3127 adapter->stats.vfgotc);
3128 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3129 adapter->stats.vfmprc);
3130
3131 for (i = 0; i < adapter->num_rx_queues; i++) {
3132 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3133
3134 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3135 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3136 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3137 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3138 }
3139
3140 adapter->hw_csum_rx_error = hw_csum_rx_error;
3141 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3142 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3143 adapter->alloc_rx_page = alloc_rx_page;
3144 }
3145
3146 /**
3147 * ixgbevf_service_timer - Timer Call-back
3148 * @t: pointer to timer_list struct
3149 **/
ixgbevf_service_timer(struct timer_list * t)3150 static void ixgbevf_service_timer(struct timer_list *t)
3151 {
3152 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3153 service_timer);
3154
3155 /* Reset the timer */
3156 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3157
3158 ixgbevf_service_event_schedule(adapter);
3159 }
3160
ixgbevf_reset_subtask(struct ixgbevf_adapter * adapter)3161 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3162 {
3163 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3164 return;
3165
3166 rtnl_lock();
3167 /* If we're already down or resetting, just bail */
3168 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3169 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3170 test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
3171 rtnl_unlock();
3172 return;
3173 }
3174
3175 adapter->tx_timeout_count++;
3176
3177 ixgbevf_reinit_locked(adapter);
3178 rtnl_unlock();
3179 }
3180
3181 /**
3182 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3183 * @adapter: pointer to the device adapter structure
3184 *
3185 * This function serves two purposes. First it strobes the interrupt lines
3186 * in order to make certain interrupts are occurring. Secondly it sets the
3187 * bits needed to check for TX hangs. As a result we should immediately
3188 * determine if a hang has occurred.
3189 **/
ixgbevf_check_hang_subtask(struct ixgbevf_adapter * adapter)3190 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3191 {
3192 struct ixgbe_hw *hw = &adapter->hw;
3193 u32 eics = 0;
3194 int i;
3195
3196 /* If we're down or resetting, just bail */
3197 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3198 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3199 return;
3200
3201 /* Force detection of hung controller */
3202 if (netif_carrier_ok(adapter->netdev)) {
3203 for (i = 0; i < adapter->num_tx_queues; i++)
3204 set_check_for_tx_hang(adapter->tx_ring[i]);
3205 for (i = 0; i < adapter->num_xdp_queues; i++)
3206 set_check_for_tx_hang(adapter->xdp_ring[i]);
3207 }
3208
3209 /* get one bit for every active Tx/Rx interrupt vector */
3210 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3211 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3212
3213 if (qv->rx.ring || qv->tx.ring)
3214 eics |= BIT(i);
3215 }
3216
3217 /* Cause software interrupt to ensure rings are cleaned */
3218 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3219 }
3220
3221 /**
3222 * ixgbevf_watchdog_update_link - update the link status
3223 * @adapter: pointer to the device adapter structure
3224 **/
ixgbevf_watchdog_update_link(struct ixgbevf_adapter * adapter)3225 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3226 {
3227 struct ixgbe_hw *hw = &adapter->hw;
3228 u32 link_speed = adapter->link_speed;
3229 bool link_up = adapter->link_up;
3230 s32 err;
3231
3232 spin_lock_bh(&adapter->mbx_lock);
3233
3234 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3235
3236 spin_unlock_bh(&adapter->mbx_lock);
3237
3238 /* if check for link returns error we will need to reset */
3239 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3240 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3241 link_up = false;
3242 }
3243
3244 adapter->link_up = link_up;
3245 adapter->link_speed = link_speed;
3246 }
3247
3248 /**
3249 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3250 * print link up message
3251 * @adapter: pointer to the device adapter structure
3252 **/
ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter * adapter)3253 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3254 {
3255 struct net_device *netdev = adapter->netdev;
3256
3257 /* only continue if link was previously down */
3258 if (netif_carrier_ok(netdev))
3259 return;
3260
3261 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3262 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3263 "10 Gbps" :
3264 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3265 "1 Gbps" :
3266 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3267 "100 Mbps" :
3268 "unknown speed");
3269
3270 netif_carrier_on(netdev);
3271 }
3272
3273 /**
3274 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3275 * print link down message
3276 * @adapter: pointer to the adapter structure
3277 **/
ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter * adapter)3278 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3279 {
3280 struct net_device *netdev = adapter->netdev;
3281
3282 adapter->link_speed = 0;
3283
3284 /* only continue if link was up previously */
3285 if (!netif_carrier_ok(netdev))
3286 return;
3287
3288 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3289
3290 netif_carrier_off(netdev);
3291 }
3292
3293 /**
3294 * ixgbevf_watchdog_subtask - worker thread to bring link up
3295 * @adapter: board private structure
3296 **/
ixgbevf_watchdog_subtask(struct ixgbevf_adapter * adapter)3297 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3298 {
3299 /* if interface is down do nothing */
3300 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3301 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3302 return;
3303
3304 ixgbevf_watchdog_update_link(adapter);
3305
3306 if (adapter->link_up)
3307 ixgbevf_watchdog_link_is_up(adapter);
3308 else
3309 ixgbevf_watchdog_link_is_down(adapter);
3310
3311 ixgbevf_update_stats(adapter);
3312 }
3313
3314 /**
3315 * ixgbevf_service_task - manages and runs subtasks
3316 * @work: pointer to work_struct containing our data
3317 **/
ixgbevf_service_task(struct work_struct * work)3318 static void ixgbevf_service_task(struct work_struct *work)
3319 {
3320 struct ixgbevf_adapter *adapter = container_of(work,
3321 struct ixgbevf_adapter,
3322 service_task);
3323 struct ixgbe_hw *hw = &adapter->hw;
3324
3325 if (IXGBE_REMOVED(hw->hw_addr)) {
3326 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3327 rtnl_lock();
3328 ixgbevf_down(adapter);
3329 rtnl_unlock();
3330 }
3331 return;
3332 }
3333
3334 ixgbevf_queue_reset_subtask(adapter);
3335 ixgbevf_reset_subtask(adapter);
3336 ixgbevf_watchdog_subtask(adapter);
3337 ixgbevf_check_hang_subtask(adapter);
3338
3339 ixgbevf_service_event_complete(adapter);
3340 }
3341
3342 /**
3343 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3344 * @tx_ring: Tx descriptor ring for a specific queue
3345 *
3346 * Free all transmit software resources
3347 **/
ixgbevf_free_tx_resources(struct ixgbevf_ring * tx_ring)3348 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3349 {
3350 ixgbevf_clean_tx_ring(tx_ring);
3351
3352 vfree(tx_ring->tx_buffer_info);
3353 tx_ring->tx_buffer_info = NULL;
3354
3355 /* if not set, then don't free */
3356 if (!tx_ring->desc)
3357 return;
3358
3359 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3360 tx_ring->dma);
3361
3362 tx_ring->desc = NULL;
3363 }
3364
3365 /**
3366 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3367 * @adapter: board private structure
3368 *
3369 * Free all transmit software resources
3370 **/
ixgbevf_free_all_tx_resources(struct ixgbevf_adapter * adapter)3371 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3372 {
3373 int i;
3374
3375 for (i = 0; i < adapter->num_tx_queues; i++)
3376 if (adapter->tx_ring[i]->desc)
3377 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3378 for (i = 0; i < adapter->num_xdp_queues; i++)
3379 if (adapter->xdp_ring[i]->desc)
3380 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3381 }
3382
3383 /**
3384 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3385 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
3386 *
3387 * Return 0 on success, negative on failure
3388 **/
ixgbevf_setup_tx_resources(struct ixgbevf_ring * tx_ring)3389 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3390 {
3391 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3392 int size;
3393
3394 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3395 tx_ring->tx_buffer_info = vmalloc(size);
3396 if (!tx_ring->tx_buffer_info)
3397 goto err;
3398
3399 u64_stats_init(&tx_ring->syncp);
3400
3401 /* round up to nearest 4K */
3402 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3403 tx_ring->size = ALIGN(tx_ring->size, 4096);
3404
3405 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3406 &tx_ring->dma, GFP_KERNEL);
3407 if (!tx_ring->desc)
3408 goto err;
3409
3410 return 0;
3411
3412 err:
3413 vfree(tx_ring->tx_buffer_info);
3414 tx_ring->tx_buffer_info = NULL;
3415 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3416 return -ENOMEM;
3417 }
3418
3419 /**
3420 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3421 * @adapter: board private structure
3422 *
3423 * If this function returns with an error, then it's possible one or
3424 * more of the rings is populated (while the rest are not). It is the
3425 * callers duty to clean those orphaned rings.
3426 *
3427 * Return 0 on success, negative on failure
3428 **/
ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter * adapter)3429 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3430 {
3431 int i, j = 0, err = 0;
3432
3433 for (i = 0; i < adapter->num_tx_queues; i++) {
3434 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3435 if (!err)
3436 continue;
3437 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3438 goto err_setup_tx;
3439 }
3440
3441 for (j = 0; j < adapter->num_xdp_queues; j++) {
3442 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3443 if (!err)
3444 continue;
3445 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3446 goto err_setup_tx;
3447 }
3448
3449 return 0;
3450 err_setup_tx:
3451 /* rewind the index freeing the rings as we go */
3452 while (j--)
3453 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3454 while (i--)
3455 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3456
3457 return err;
3458 }
3459
3460 /**
3461 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3462 * @adapter: board private structure
3463 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3464 *
3465 * Returns 0 on success, negative on failure
3466 **/
ixgbevf_setup_rx_resources(struct ixgbevf_adapter * adapter,struct ixgbevf_ring * rx_ring)3467 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3468 struct ixgbevf_ring *rx_ring)
3469 {
3470 int size;
3471
3472 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3473 rx_ring->rx_buffer_info = vmalloc(size);
3474 if (!rx_ring->rx_buffer_info)
3475 goto err;
3476
3477 u64_stats_init(&rx_ring->syncp);
3478
3479 /* Round up to nearest 4K */
3480 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3481 rx_ring->size = ALIGN(rx_ring->size, 4096);
3482
3483 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3484 &rx_ring->dma, GFP_KERNEL);
3485
3486 if (!rx_ring->desc)
3487 goto err;
3488
3489 /* XDP RX-queue info */
3490 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3491 rx_ring->queue_index, 0) < 0)
3492 goto err;
3493
3494 rx_ring->xdp_prog = adapter->xdp_prog;
3495
3496 return 0;
3497 err:
3498 vfree(rx_ring->rx_buffer_info);
3499 rx_ring->rx_buffer_info = NULL;
3500 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3501 return -ENOMEM;
3502 }
3503
3504 /**
3505 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3506 * @adapter: board private structure
3507 *
3508 * If this function returns with an error, then it's possible one or
3509 * more of the rings is populated (while the rest are not). It is the
3510 * callers duty to clean those orphaned rings.
3511 *
3512 * Return 0 on success, negative on failure
3513 **/
ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter * adapter)3514 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3515 {
3516 int i, err = 0;
3517
3518 for (i = 0; i < adapter->num_rx_queues; i++) {
3519 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3520 if (!err)
3521 continue;
3522 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3523 goto err_setup_rx;
3524 }
3525
3526 return 0;
3527 err_setup_rx:
3528 /* rewind the index freeing the rings as we go */
3529 while (i--)
3530 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3531 return err;
3532 }
3533
3534 /**
3535 * ixgbevf_free_rx_resources - Free Rx Resources
3536 * @rx_ring: ring to clean the resources from
3537 *
3538 * Free all receive software resources
3539 **/
ixgbevf_free_rx_resources(struct ixgbevf_ring * rx_ring)3540 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3541 {
3542 ixgbevf_clean_rx_ring(rx_ring);
3543
3544 rx_ring->xdp_prog = NULL;
3545 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3546 vfree(rx_ring->rx_buffer_info);
3547 rx_ring->rx_buffer_info = NULL;
3548
3549 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3550 rx_ring->dma);
3551
3552 rx_ring->desc = NULL;
3553 }
3554
3555 /**
3556 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3557 * @adapter: board private structure
3558 *
3559 * Free all receive software resources
3560 **/
ixgbevf_free_all_rx_resources(struct ixgbevf_adapter * adapter)3561 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3562 {
3563 int i;
3564
3565 for (i = 0; i < adapter->num_rx_queues; i++)
3566 if (adapter->rx_ring[i]->desc)
3567 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3568 }
3569
3570 /**
3571 * ixgbevf_open - Called when a network interface is made active
3572 * @netdev: network interface device structure
3573 *
3574 * Returns 0 on success, negative value on failure
3575 *
3576 * The open entry point is called when a network interface is made
3577 * active by the system (IFF_UP). At this point all resources needed
3578 * for transmit and receive operations are allocated, the interrupt
3579 * handler is registered with the OS, the watchdog timer is started,
3580 * and the stack is notified that the interface is ready.
3581 **/
ixgbevf_open(struct net_device * netdev)3582 int ixgbevf_open(struct net_device *netdev)
3583 {
3584 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3585 struct ixgbe_hw *hw = &adapter->hw;
3586 int err;
3587
3588 /* A previous failure to open the device because of a lack of
3589 * available MSIX vector resources may have reset the number
3590 * of msix vectors variable to zero. The only way to recover
3591 * is to unload/reload the driver and hope that the system has
3592 * been able to recover some MSIX vector resources.
3593 */
3594 if (!adapter->num_msix_vectors)
3595 return -ENOMEM;
3596
3597 if (hw->adapter_stopped) {
3598 ixgbevf_reset(adapter);
3599 /* if adapter is still stopped then PF isn't up and
3600 * the VF can't start.
3601 */
3602 if (hw->adapter_stopped) {
3603 err = IXGBE_ERR_MBX;
3604 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3605 goto err_setup_reset;
3606 }
3607 }
3608
3609 /* disallow open during test */
3610 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3611 return -EBUSY;
3612
3613 netif_carrier_off(netdev);
3614
3615 /* allocate transmit descriptors */
3616 err = ixgbevf_setup_all_tx_resources(adapter);
3617 if (err)
3618 goto err_setup_tx;
3619
3620 /* allocate receive descriptors */
3621 err = ixgbevf_setup_all_rx_resources(adapter);
3622 if (err)
3623 goto err_setup_rx;
3624
3625 ixgbevf_configure(adapter);
3626
3627 err = ixgbevf_request_irq(adapter);
3628 if (err)
3629 goto err_req_irq;
3630
3631 /* Notify the stack of the actual queue counts. */
3632 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3633 if (err)
3634 goto err_set_queues;
3635
3636 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3637 if (err)
3638 goto err_set_queues;
3639
3640 ixgbevf_up_complete(adapter);
3641
3642 return 0;
3643
3644 err_set_queues:
3645 ixgbevf_free_irq(adapter);
3646 err_req_irq:
3647 ixgbevf_free_all_rx_resources(adapter);
3648 err_setup_rx:
3649 ixgbevf_free_all_tx_resources(adapter);
3650 err_setup_tx:
3651 ixgbevf_reset(adapter);
3652 err_setup_reset:
3653
3654 return err;
3655 }
3656
3657 /**
3658 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3659 * @adapter: the private adapter struct
3660 *
3661 * This function should contain the necessary work common to both suspending
3662 * and closing of the device.
3663 */
ixgbevf_close_suspend(struct ixgbevf_adapter * adapter)3664 static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3665 {
3666 ixgbevf_down(adapter);
3667 ixgbevf_free_irq(adapter);
3668 ixgbevf_free_all_tx_resources(adapter);
3669 ixgbevf_free_all_rx_resources(adapter);
3670 }
3671
3672 /**
3673 * ixgbevf_close - Disables a network interface
3674 * @netdev: network interface device structure
3675 *
3676 * Returns 0, this is not allowed to fail
3677 *
3678 * The close entry point is called when an interface is de-activated
3679 * by the OS. The hardware is still under the drivers control, but
3680 * needs to be disabled. A global MAC reset is issued to stop the
3681 * hardware, and all transmit and receive resources are freed.
3682 **/
ixgbevf_close(struct net_device * netdev)3683 int ixgbevf_close(struct net_device *netdev)
3684 {
3685 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3686
3687 if (netif_device_present(netdev))
3688 ixgbevf_close_suspend(adapter);
3689
3690 return 0;
3691 }
3692
ixgbevf_queue_reset_subtask(struct ixgbevf_adapter * adapter)3693 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3694 {
3695 struct net_device *dev = adapter->netdev;
3696
3697 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3698 &adapter->state))
3699 return;
3700
3701 /* if interface is down do nothing */
3702 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3703 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3704 return;
3705
3706 /* Hardware has to reinitialize queues and interrupts to
3707 * match packet buffer alignment. Unfortunately, the
3708 * hardware is not flexible enough to do this dynamically.
3709 */
3710 rtnl_lock();
3711
3712 if (netif_running(dev))
3713 ixgbevf_close(dev);
3714
3715 ixgbevf_clear_interrupt_scheme(adapter);
3716 ixgbevf_init_interrupt_scheme(adapter);
3717
3718 if (netif_running(dev))
3719 ixgbevf_open(dev);
3720
3721 rtnl_unlock();
3722 }
3723
ixgbevf_tx_ctxtdesc(struct ixgbevf_ring * tx_ring,u32 vlan_macip_lens,u32 fceof_saidx,u32 type_tucmd,u32 mss_l4len_idx)3724 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3725 u32 vlan_macip_lens, u32 fceof_saidx,
3726 u32 type_tucmd, u32 mss_l4len_idx)
3727 {
3728 struct ixgbe_adv_tx_context_desc *context_desc;
3729 u16 i = tx_ring->next_to_use;
3730
3731 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3732
3733 i++;
3734 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3735
3736 /* set bits to identify this as an advanced context descriptor */
3737 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3738
3739 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3740 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
3741 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3742 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3743 }
3744
ixgbevf_tso(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,u8 * hdr_len,struct ixgbevf_ipsec_tx_data * itd)3745 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3746 struct ixgbevf_tx_buffer *first,
3747 u8 *hdr_len,
3748 struct ixgbevf_ipsec_tx_data *itd)
3749 {
3750 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3751 struct sk_buff *skb = first->skb;
3752 union {
3753 struct iphdr *v4;
3754 struct ipv6hdr *v6;
3755 unsigned char *hdr;
3756 } ip;
3757 union {
3758 struct tcphdr *tcp;
3759 unsigned char *hdr;
3760 } l4;
3761 u32 paylen, l4_offset;
3762 u32 fceof_saidx = 0;
3763 int err;
3764
3765 if (skb->ip_summed != CHECKSUM_PARTIAL)
3766 return 0;
3767
3768 if (!skb_is_gso(skb))
3769 return 0;
3770
3771 err = skb_cow_head(skb, 0);
3772 if (err < 0)
3773 return err;
3774
3775 if (eth_p_mpls(first->protocol))
3776 ip.hdr = skb_inner_network_header(skb);
3777 else
3778 ip.hdr = skb_network_header(skb);
3779 l4.hdr = skb_checksum_start(skb);
3780
3781 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3782 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3783
3784 /* initialize outer IP header fields */
3785 if (ip.v4->version == 4) {
3786 unsigned char *csum_start = skb_checksum_start(skb);
3787 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3788 int len = csum_start - trans_start;
3789
3790 /* IP header will have to cancel out any data that
3791 * is not a part of the outer IP header, so set to
3792 * a reverse csum if needed, else init check to 0.
3793 */
3794 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
3795 csum_fold(csum_partial(trans_start,
3796 len, 0)) : 0;
3797 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3798
3799 ip.v4->tot_len = 0;
3800 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3801 IXGBE_TX_FLAGS_CSUM |
3802 IXGBE_TX_FLAGS_IPV4;
3803 } else {
3804 ip.v6->payload_len = 0;
3805 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3806 IXGBE_TX_FLAGS_CSUM;
3807 }
3808
3809 /* determine offset of inner transport header */
3810 l4_offset = l4.hdr - skb->data;
3811
3812 /* compute length of segmentation header */
3813 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3814
3815 /* remove payload length from inner checksum */
3816 paylen = skb->len - l4_offset;
3817 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3818
3819 /* update gso size and bytecount with header size */
3820 first->gso_segs = skb_shinfo(skb)->gso_segs;
3821 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3822
3823 /* mss_l4len_id: use 1 as index for TSO */
3824 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3825 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3826 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3827
3828 fceof_saidx |= itd->pfsa;
3829 type_tucmd |= itd->flags | itd->trailer_len;
3830
3831 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3832 vlan_macip_lens = l4.hdr - ip.hdr;
3833 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3834 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3835
3836 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
3837 mss_l4len_idx);
3838
3839 return 1;
3840 }
3841
ixgbevf_tx_csum(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,struct ixgbevf_ipsec_tx_data * itd)3842 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3843 struct ixgbevf_tx_buffer *first,
3844 struct ixgbevf_ipsec_tx_data *itd)
3845 {
3846 struct sk_buff *skb = first->skb;
3847 u32 vlan_macip_lens = 0;
3848 u32 fceof_saidx = 0;
3849 u32 type_tucmd = 0;
3850
3851 if (skb->ip_summed != CHECKSUM_PARTIAL)
3852 goto no_csum;
3853
3854 switch (skb->csum_offset) {
3855 case offsetof(struct tcphdr, check):
3856 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3857 fallthrough;
3858 case offsetof(struct udphdr, check):
3859 break;
3860 case offsetof(struct sctphdr, checksum):
3861 /* validate that this is actually an SCTP request */
3862 if (skb_csum_is_sctp(skb)) {
3863 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3864 break;
3865 }
3866 fallthrough;
3867 default:
3868 skb_checksum_help(skb);
3869 goto no_csum;
3870 }
3871
3872 if (first->protocol == htons(ETH_P_IP))
3873 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3874
3875 /* update TX checksum flag */
3876 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3877 vlan_macip_lens = skb_checksum_start_offset(skb) -
3878 skb_network_offset(skb);
3879 no_csum:
3880 /* vlan_macip_lens: MACLEN, VLAN tag */
3881 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3882 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3883
3884 fceof_saidx |= itd->pfsa;
3885 type_tucmd |= itd->flags | itd->trailer_len;
3886
3887 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3888 fceof_saidx, type_tucmd, 0);
3889 }
3890
ixgbevf_tx_cmd_type(u32 tx_flags)3891 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3892 {
3893 /* set type for advanced descriptor with frame checksum insertion */
3894 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3895 IXGBE_ADVTXD_DCMD_IFCS |
3896 IXGBE_ADVTXD_DCMD_DEXT);
3897
3898 /* set HW VLAN bit if VLAN is present */
3899 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3900 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3901
3902 /* set segmentation enable bits for TSO/FSO */
3903 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3904 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3905
3906 return cmd_type;
3907 }
3908
ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc * tx_desc,u32 tx_flags,unsigned int paylen)3909 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3910 u32 tx_flags, unsigned int paylen)
3911 {
3912 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3913
3914 /* enable L4 checksum for TSO and TX checksum offload */
3915 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3916 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3917
3918 /* enble IPv4 checksum for TSO */
3919 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3920 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3921
3922 /* enable IPsec */
3923 if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
3924 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
3925
3926 /* use index 1 context for TSO/FSO/FCOE/IPSEC */
3927 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
3928 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3929
3930 /* Check Context must be set if Tx switch is enabled, which it
3931 * always is for case where virtual functions are running
3932 */
3933 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3934
3935 tx_desc->read.olinfo_status = olinfo_status;
3936 }
3937
ixgbevf_tx_map(struct ixgbevf_ring * tx_ring,struct ixgbevf_tx_buffer * first,const u8 hdr_len)3938 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3939 struct ixgbevf_tx_buffer *first,
3940 const u8 hdr_len)
3941 {
3942 struct sk_buff *skb = first->skb;
3943 struct ixgbevf_tx_buffer *tx_buffer;
3944 union ixgbe_adv_tx_desc *tx_desc;
3945 skb_frag_t *frag;
3946 dma_addr_t dma;
3947 unsigned int data_len, size;
3948 u32 tx_flags = first->tx_flags;
3949 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3950 u16 i = tx_ring->next_to_use;
3951
3952 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3953
3954 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3955
3956 size = skb_headlen(skb);
3957 data_len = skb->data_len;
3958
3959 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3960
3961 tx_buffer = first;
3962
3963 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3964 if (dma_mapping_error(tx_ring->dev, dma))
3965 goto dma_error;
3966
3967 /* record length, and DMA address */
3968 dma_unmap_len_set(tx_buffer, len, size);
3969 dma_unmap_addr_set(tx_buffer, dma, dma);
3970
3971 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3972
3973 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3974 tx_desc->read.cmd_type_len =
3975 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3976
3977 i++;
3978 tx_desc++;
3979 if (i == tx_ring->count) {
3980 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3981 i = 0;
3982 }
3983 tx_desc->read.olinfo_status = 0;
3984
3985 dma += IXGBE_MAX_DATA_PER_TXD;
3986 size -= IXGBE_MAX_DATA_PER_TXD;
3987
3988 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3989 }
3990
3991 if (likely(!data_len))
3992 break;
3993
3994 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3995
3996 i++;
3997 tx_desc++;
3998 if (i == tx_ring->count) {
3999 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4000 i = 0;
4001 }
4002 tx_desc->read.olinfo_status = 0;
4003
4004 size = skb_frag_size(frag);
4005 data_len -= size;
4006
4007 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
4008 DMA_TO_DEVICE);
4009
4010 tx_buffer = &tx_ring->tx_buffer_info[i];
4011 }
4012
4013 /* write last descriptor with RS and EOP bits */
4014 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
4015 tx_desc->read.cmd_type_len = cmd_type;
4016
4017 /* set the timestamp */
4018 first->time_stamp = jiffies;
4019
4020 skb_tx_timestamp(skb);
4021
4022 /* Force memory writes to complete before letting h/w know there
4023 * are new descriptors to fetch. (Only applicable for weak-ordered
4024 * memory model archs, such as IA-64).
4025 *
4026 * We also need this memory barrier (wmb) to make certain all of the
4027 * status bits have been updated before next_to_watch is written.
4028 */
4029 wmb();
4030
4031 /* set next_to_watch value indicating a packet is present */
4032 first->next_to_watch = tx_desc;
4033
4034 i++;
4035 if (i == tx_ring->count)
4036 i = 0;
4037
4038 tx_ring->next_to_use = i;
4039
4040 /* notify HW of packet */
4041 ixgbevf_write_tail(tx_ring, i);
4042
4043 return;
4044 dma_error:
4045 dev_err(tx_ring->dev, "TX DMA map failed\n");
4046 tx_buffer = &tx_ring->tx_buffer_info[i];
4047
4048 /* clear dma mappings for failed tx_buffer_info map */
4049 while (tx_buffer != first) {
4050 if (dma_unmap_len(tx_buffer, len))
4051 dma_unmap_page(tx_ring->dev,
4052 dma_unmap_addr(tx_buffer, dma),
4053 dma_unmap_len(tx_buffer, len),
4054 DMA_TO_DEVICE);
4055 dma_unmap_len_set(tx_buffer, len, 0);
4056
4057 if (i-- == 0)
4058 i += tx_ring->count;
4059 tx_buffer = &tx_ring->tx_buffer_info[i];
4060 }
4061
4062 if (dma_unmap_len(tx_buffer, len))
4063 dma_unmap_single(tx_ring->dev,
4064 dma_unmap_addr(tx_buffer, dma),
4065 dma_unmap_len(tx_buffer, len),
4066 DMA_TO_DEVICE);
4067 dma_unmap_len_set(tx_buffer, len, 0);
4068
4069 dev_kfree_skb_any(tx_buffer->skb);
4070 tx_buffer->skb = NULL;
4071
4072 tx_ring->next_to_use = i;
4073 }
4074
__ixgbevf_maybe_stop_tx(struct ixgbevf_ring * tx_ring,int size)4075 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4076 {
4077 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4078 /* Herbert's original patch had:
4079 * smp_mb__after_netif_stop_queue();
4080 * but since that doesn't exist yet, just open code it.
4081 */
4082 smp_mb();
4083
4084 /* We need to check again in a case another CPU has just
4085 * made room available.
4086 */
4087 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4088 return -EBUSY;
4089
4090 /* A reprieve! - use start_queue because it doesn't call schedule */
4091 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4092 ++tx_ring->tx_stats.restart_queue;
4093
4094 return 0;
4095 }
4096
ixgbevf_maybe_stop_tx(struct ixgbevf_ring * tx_ring,int size)4097 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4098 {
4099 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4100 return 0;
4101 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4102 }
4103
ixgbevf_xmit_frame_ring(struct sk_buff * skb,struct ixgbevf_ring * tx_ring)4104 static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4105 struct ixgbevf_ring *tx_ring)
4106 {
4107 struct ixgbevf_tx_buffer *first;
4108 int tso;
4109 u32 tx_flags = 0;
4110 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4111 struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
4112 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4113 unsigned short f;
4114 #endif
4115 u8 hdr_len = 0;
4116 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4117
4118 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4119 dev_kfree_skb_any(skb);
4120 return NETDEV_TX_OK;
4121 }
4122
4123 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
4124 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
4125 * + 2 desc gap to keep tail from touching head,
4126 * + 1 desc for context descriptor,
4127 * otherwise try next time
4128 */
4129 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4130 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
4131 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
4132
4133 count += TXD_USE_COUNT(skb_frag_size(frag));
4134 }
4135 #else
4136 count += skb_shinfo(skb)->nr_frags;
4137 #endif
4138 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4139 tx_ring->tx_stats.tx_busy++;
4140 return NETDEV_TX_BUSY;
4141 }
4142
4143 /* record the location of the first descriptor for this packet */
4144 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4145 first->skb = skb;
4146 first->bytecount = skb->len;
4147 first->gso_segs = 1;
4148
4149 if (skb_vlan_tag_present(skb)) {
4150 tx_flags |= skb_vlan_tag_get(skb);
4151 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4152 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4153 }
4154
4155 /* record initial flags and protocol */
4156 first->tx_flags = tx_flags;
4157 first->protocol = vlan_get_protocol(skb);
4158
4159 #ifdef CONFIG_IXGBEVF_IPSEC
4160 if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4161 goto out_drop;
4162 #endif
4163 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
4164 if (tso < 0)
4165 goto out_drop;
4166 else if (!tso)
4167 ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
4168
4169 ixgbevf_tx_map(tx_ring, first, hdr_len);
4170
4171 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4172
4173 return NETDEV_TX_OK;
4174
4175 out_drop:
4176 dev_kfree_skb_any(first->skb);
4177 first->skb = NULL;
4178
4179 return NETDEV_TX_OK;
4180 }
4181
ixgbevf_xmit_frame(struct sk_buff * skb,struct net_device * netdev)4182 static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4183 {
4184 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4185 struct ixgbevf_ring *tx_ring;
4186
4187 if (skb->len <= 0) {
4188 dev_kfree_skb_any(skb);
4189 return NETDEV_TX_OK;
4190 }
4191
4192 /* The minimum packet size for olinfo paylen is 17 so pad the skb
4193 * in order to meet this minimum size requirement.
4194 */
4195 if (skb->len < 17) {
4196 if (skb_padto(skb, 17))
4197 return NETDEV_TX_OK;
4198 skb->len = 17;
4199 }
4200
4201 tx_ring = adapter->tx_ring[skb->queue_mapping];
4202 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4203 }
4204
4205 /**
4206 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4207 * @netdev: network interface device structure
4208 * @p: pointer to an address structure
4209 *
4210 * Returns 0 on success, negative on failure
4211 **/
ixgbevf_set_mac(struct net_device * netdev,void * p)4212 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4213 {
4214 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4215 struct ixgbe_hw *hw = &adapter->hw;
4216 struct sockaddr *addr = p;
4217 int err;
4218
4219 if (!is_valid_ether_addr(addr->sa_data))
4220 return -EADDRNOTAVAIL;
4221
4222 spin_lock_bh(&adapter->mbx_lock);
4223
4224 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4225
4226 spin_unlock_bh(&adapter->mbx_lock);
4227
4228 if (err)
4229 return -EPERM;
4230
4231 ether_addr_copy(hw->mac.addr, addr->sa_data);
4232 ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
4233 ether_addr_copy(netdev->dev_addr, addr->sa_data);
4234
4235 return 0;
4236 }
4237
4238 /**
4239 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4240 * @netdev: network interface device structure
4241 * @new_mtu: new value for maximum frame size
4242 *
4243 * Returns 0 on success, negative on failure
4244 **/
ixgbevf_change_mtu(struct net_device * netdev,int new_mtu)4245 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4246 {
4247 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4248 struct ixgbe_hw *hw = &adapter->hw;
4249 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4250 int ret;
4251
4252 /* prevent MTU being changed to a size unsupported by XDP */
4253 if (adapter->xdp_prog) {
4254 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4255 return -EPERM;
4256 }
4257
4258 spin_lock_bh(&adapter->mbx_lock);
4259 /* notify the PF of our intent to use this size of frame */
4260 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4261 spin_unlock_bh(&adapter->mbx_lock);
4262 if (ret)
4263 return -EINVAL;
4264
4265 hw_dbg(hw, "changing MTU from %d to %d\n",
4266 netdev->mtu, new_mtu);
4267
4268 /* must set new MTU before calling down or up */
4269 netdev->mtu = new_mtu;
4270
4271 if (netif_running(netdev))
4272 ixgbevf_reinit_locked(adapter);
4273
4274 return 0;
4275 }
4276
ixgbevf_suspend(struct device * dev_d)4277 static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
4278 {
4279 struct net_device *netdev = dev_get_drvdata(dev_d);
4280 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4281
4282 rtnl_lock();
4283 netif_device_detach(netdev);
4284
4285 if (netif_running(netdev))
4286 ixgbevf_close_suspend(adapter);
4287
4288 ixgbevf_clear_interrupt_scheme(adapter);
4289 rtnl_unlock();
4290
4291 return 0;
4292 }
4293
ixgbevf_resume(struct device * dev_d)4294 static int __maybe_unused ixgbevf_resume(struct device *dev_d)
4295 {
4296 struct pci_dev *pdev = to_pci_dev(dev_d);
4297 struct net_device *netdev = pci_get_drvdata(pdev);
4298 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4299 u32 err;
4300
4301 adapter->hw.hw_addr = adapter->io_addr;
4302 smp_mb__before_atomic();
4303 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4304 pci_set_master(pdev);
4305
4306 ixgbevf_reset(adapter);
4307
4308 rtnl_lock();
4309 err = ixgbevf_init_interrupt_scheme(adapter);
4310 if (!err && netif_running(netdev))
4311 err = ixgbevf_open(netdev);
4312 rtnl_unlock();
4313 if (err)
4314 return err;
4315
4316 netif_device_attach(netdev);
4317
4318 return err;
4319 }
4320
ixgbevf_shutdown(struct pci_dev * pdev)4321 static void ixgbevf_shutdown(struct pci_dev *pdev)
4322 {
4323 ixgbevf_suspend(&pdev->dev);
4324 }
4325
ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 * stats,const struct ixgbevf_ring * ring)4326 static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4327 const struct ixgbevf_ring *ring)
4328 {
4329 u64 bytes, packets;
4330 unsigned int start;
4331
4332 if (ring) {
4333 do {
4334 start = u64_stats_fetch_begin_irq(&ring->syncp);
4335 bytes = ring->stats.bytes;
4336 packets = ring->stats.packets;
4337 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4338 stats->tx_bytes += bytes;
4339 stats->tx_packets += packets;
4340 }
4341 }
4342
ixgbevf_get_stats(struct net_device * netdev,struct rtnl_link_stats64 * stats)4343 static void ixgbevf_get_stats(struct net_device *netdev,
4344 struct rtnl_link_stats64 *stats)
4345 {
4346 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4347 unsigned int start;
4348 u64 bytes, packets;
4349 const struct ixgbevf_ring *ring;
4350 int i;
4351
4352 ixgbevf_update_stats(adapter);
4353
4354 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4355
4356 rcu_read_lock();
4357 for (i = 0; i < adapter->num_rx_queues; i++) {
4358 ring = adapter->rx_ring[i];
4359 do {
4360 start = u64_stats_fetch_begin_irq(&ring->syncp);
4361 bytes = ring->stats.bytes;
4362 packets = ring->stats.packets;
4363 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4364 stats->rx_bytes += bytes;
4365 stats->rx_packets += packets;
4366 }
4367
4368 for (i = 0; i < adapter->num_tx_queues; i++) {
4369 ring = adapter->tx_ring[i];
4370 ixgbevf_get_tx_ring_stats(stats, ring);
4371 }
4372
4373 for (i = 0; i < adapter->num_xdp_queues; i++) {
4374 ring = adapter->xdp_ring[i];
4375 ixgbevf_get_tx_ring_stats(stats, ring);
4376 }
4377 rcu_read_unlock();
4378 }
4379
4380 #define IXGBEVF_MAX_MAC_HDR_LEN 127
4381 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4382
4383 static netdev_features_t
ixgbevf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4384 ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4385 netdev_features_t features)
4386 {
4387 unsigned int network_hdr_len, mac_hdr_len;
4388
4389 /* Make certain the headers can be described by a context descriptor */
4390 mac_hdr_len = skb_network_header(skb) - skb->data;
4391 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4392 return features & ~(NETIF_F_HW_CSUM |
4393 NETIF_F_SCTP_CRC |
4394 NETIF_F_HW_VLAN_CTAG_TX |
4395 NETIF_F_TSO |
4396 NETIF_F_TSO6);
4397
4398 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4399 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4400 return features & ~(NETIF_F_HW_CSUM |
4401 NETIF_F_SCTP_CRC |
4402 NETIF_F_TSO |
4403 NETIF_F_TSO6);
4404
4405 /* We can only support IPV4 TSO in tunnels if we can mangle the
4406 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4407 */
4408 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4409 features &= ~NETIF_F_TSO;
4410
4411 return features;
4412 }
4413
ixgbevf_xdp_setup(struct net_device * dev,struct bpf_prog * prog)4414 static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4415 {
4416 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4417 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4418 struct bpf_prog *old_prog;
4419
4420 /* verify ixgbevf ring attributes are sufficient for XDP */
4421 for (i = 0; i < adapter->num_rx_queues; i++) {
4422 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4423
4424 if (frame_size > ixgbevf_rx_bufsz(ring))
4425 return -EINVAL;
4426 }
4427
4428 old_prog = xchg(&adapter->xdp_prog, prog);
4429
4430 /* If transitioning XDP modes reconfigure rings */
4431 if (!!prog != !!old_prog) {
4432 /* Hardware has to reinitialize queues and interrupts to
4433 * match packet buffer alignment. Unfortunately, the
4434 * hardware is not flexible enough to do this dynamically.
4435 */
4436 if (netif_running(dev))
4437 ixgbevf_close(dev);
4438
4439 ixgbevf_clear_interrupt_scheme(adapter);
4440 ixgbevf_init_interrupt_scheme(adapter);
4441
4442 if (netif_running(dev))
4443 ixgbevf_open(dev);
4444 } else {
4445 for (i = 0; i < adapter->num_rx_queues; i++)
4446 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4447 }
4448
4449 if (old_prog)
4450 bpf_prog_put(old_prog);
4451
4452 return 0;
4453 }
4454
ixgbevf_xdp(struct net_device * dev,struct netdev_bpf * xdp)4455 static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4456 {
4457 switch (xdp->command) {
4458 case XDP_SETUP_PROG:
4459 return ixgbevf_xdp_setup(dev, xdp->prog);
4460 default:
4461 return -EINVAL;
4462 }
4463 }
4464
4465 static const struct net_device_ops ixgbevf_netdev_ops = {
4466 .ndo_open = ixgbevf_open,
4467 .ndo_stop = ixgbevf_close,
4468 .ndo_start_xmit = ixgbevf_xmit_frame,
4469 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4470 .ndo_get_stats64 = ixgbevf_get_stats,
4471 .ndo_validate_addr = eth_validate_addr,
4472 .ndo_set_mac_address = ixgbevf_set_mac,
4473 .ndo_change_mtu = ixgbevf_change_mtu,
4474 .ndo_tx_timeout = ixgbevf_tx_timeout,
4475 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4476 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4477 .ndo_features_check = ixgbevf_features_check,
4478 .ndo_bpf = ixgbevf_xdp,
4479 };
4480
ixgbevf_assign_netdev_ops(struct net_device * dev)4481 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4482 {
4483 dev->netdev_ops = &ixgbevf_netdev_ops;
4484 ixgbevf_set_ethtool_ops(dev);
4485 dev->watchdog_timeo = 5 * HZ;
4486 }
4487
4488 /**
4489 * ixgbevf_probe - Device Initialization Routine
4490 * @pdev: PCI device information struct
4491 * @ent: entry in ixgbevf_pci_tbl
4492 *
4493 * Returns 0 on success, negative on failure
4494 *
4495 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
4496 * The OS initialization, configuring of the adapter private structure,
4497 * and a hardware reset occur.
4498 **/
ixgbevf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)4499 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4500 {
4501 struct net_device *netdev;
4502 struct ixgbevf_adapter *adapter = NULL;
4503 struct ixgbe_hw *hw = NULL;
4504 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4505 int err, pci_using_dac;
4506 bool disable_dev = false;
4507
4508 err = pci_enable_device(pdev);
4509 if (err)
4510 return err;
4511
4512 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
4513 pci_using_dac = 1;
4514 } else {
4515 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4516 if (err) {
4517 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4518 goto err_dma;
4519 }
4520 pci_using_dac = 0;
4521 }
4522
4523 err = pci_request_regions(pdev, ixgbevf_driver_name);
4524 if (err) {
4525 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4526 goto err_pci_reg;
4527 }
4528
4529 pci_set_master(pdev);
4530
4531 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4532 MAX_TX_QUEUES);
4533 if (!netdev) {
4534 err = -ENOMEM;
4535 goto err_alloc_etherdev;
4536 }
4537
4538 SET_NETDEV_DEV(netdev, &pdev->dev);
4539
4540 adapter = netdev_priv(netdev);
4541
4542 adapter->netdev = netdev;
4543 adapter->pdev = pdev;
4544 hw = &adapter->hw;
4545 hw->back = adapter;
4546 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4547
4548 /* call save state here in standalone driver because it relies on
4549 * adapter struct to exist, and needs to call netdev_priv
4550 */
4551 pci_save_state(pdev);
4552
4553 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4554 pci_resource_len(pdev, 0));
4555 adapter->io_addr = hw->hw_addr;
4556 if (!hw->hw_addr) {
4557 err = -EIO;
4558 goto err_ioremap;
4559 }
4560
4561 ixgbevf_assign_netdev_ops(netdev);
4562
4563 /* Setup HW API */
4564 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4565 hw->mac.type = ii->mac;
4566
4567 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4568 sizeof(struct ixgbe_mbx_operations));
4569
4570 /* setup the private structure */
4571 err = ixgbevf_sw_init(adapter);
4572 if (err)
4573 goto err_sw_init;
4574
4575 /* The HW MAC address was set and/or determined in sw_init */
4576 if (!is_valid_ether_addr(netdev->dev_addr)) {
4577 pr_err("invalid MAC address\n");
4578 err = -EIO;
4579 goto err_sw_init;
4580 }
4581
4582 netdev->hw_features = NETIF_F_SG |
4583 NETIF_F_TSO |
4584 NETIF_F_TSO6 |
4585 NETIF_F_RXCSUM |
4586 NETIF_F_HW_CSUM |
4587 NETIF_F_SCTP_CRC;
4588
4589 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4590 NETIF_F_GSO_GRE_CSUM | \
4591 NETIF_F_GSO_IPXIP4 | \
4592 NETIF_F_GSO_IPXIP6 | \
4593 NETIF_F_GSO_UDP_TUNNEL | \
4594 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4595
4596 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4597 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4598 IXGBEVF_GSO_PARTIAL_FEATURES;
4599
4600 netdev->features = netdev->hw_features;
4601
4602 if (pci_using_dac)
4603 netdev->features |= NETIF_F_HIGHDMA;
4604
4605 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4606 netdev->mpls_features |= NETIF_F_SG |
4607 NETIF_F_TSO |
4608 NETIF_F_TSO6 |
4609 NETIF_F_HW_CSUM;
4610 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4611 netdev->hw_enc_features |= netdev->vlan_features;
4612
4613 /* set this bit last since it cannot be part of vlan_features */
4614 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4615 NETIF_F_HW_VLAN_CTAG_RX |
4616 NETIF_F_HW_VLAN_CTAG_TX;
4617
4618 netdev->priv_flags |= IFF_UNICAST_FLT;
4619
4620 /* MTU range: 68 - 1504 or 9710 */
4621 netdev->min_mtu = ETH_MIN_MTU;
4622 switch (adapter->hw.api_version) {
4623 case ixgbe_mbox_api_11:
4624 case ixgbe_mbox_api_12:
4625 case ixgbe_mbox_api_13:
4626 case ixgbe_mbox_api_14:
4627 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4628 (ETH_HLEN + ETH_FCS_LEN);
4629 break;
4630 default:
4631 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4632 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4633 (ETH_HLEN + ETH_FCS_LEN);
4634 else
4635 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4636 break;
4637 }
4638
4639 if (IXGBE_REMOVED(hw->hw_addr)) {
4640 err = -EIO;
4641 goto err_sw_init;
4642 }
4643
4644 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4645
4646 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4647 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4648 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4649
4650 err = ixgbevf_init_interrupt_scheme(adapter);
4651 if (err)
4652 goto err_sw_init;
4653
4654 strcpy(netdev->name, "eth%d");
4655
4656 err = register_netdev(netdev);
4657 if (err)
4658 goto err_register;
4659
4660 pci_set_drvdata(pdev, netdev);
4661 netif_carrier_off(netdev);
4662 ixgbevf_init_ipsec_offload(adapter);
4663
4664 ixgbevf_init_last_counter_stats(adapter);
4665
4666 /* print the VF info */
4667 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4668 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4669
4670 switch (hw->mac.type) {
4671 case ixgbe_mac_X550_vf:
4672 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4673 break;
4674 case ixgbe_mac_X540_vf:
4675 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4676 break;
4677 case ixgbe_mac_82599_vf:
4678 default:
4679 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4680 break;
4681 }
4682
4683 return 0;
4684
4685 err_register:
4686 ixgbevf_clear_interrupt_scheme(adapter);
4687 err_sw_init:
4688 ixgbevf_reset_interrupt_capability(adapter);
4689 iounmap(adapter->io_addr);
4690 kfree(adapter->rss_key);
4691 err_ioremap:
4692 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4693 free_netdev(netdev);
4694 err_alloc_etherdev:
4695 pci_release_regions(pdev);
4696 err_pci_reg:
4697 err_dma:
4698 if (!adapter || disable_dev)
4699 pci_disable_device(pdev);
4700 return err;
4701 }
4702
4703 /**
4704 * ixgbevf_remove - Device Removal Routine
4705 * @pdev: PCI device information struct
4706 *
4707 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4708 * that it should release a PCI device. The could be caused by a
4709 * Hot-Plug event, or because the driver is going to be removed from
4710 * memory.
4711 **/
ixgbevf_remove(struct pci_dev * pdev)4712 static void ixgbevf_remove(struct pci_dev *pdev)
4713 {
4714 struct net_device *netdev = pci_get_drvdata(pdev);
4715 struct ixgbevf_adapter *adapter;
4716 bool disable_dev;
4717
4718 if (!netdev)
4719 return;
4720
4721 adapter = netdev_priv(netdev);
4722
4723 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4724 cancel_work_sync(&adapter->service_task);
4725
4726 if (netdev->reg_state == NETREG_REGISTERED)
4727 unregister_netdev(netdev);
4728
4729 ixgbevf_stop_ipsec_offload(adapter);
4730 ixgbevf_clear_interrupt_scheme(adapter);
4731 ixgbevf_reset_interrupt_capability(adapter);
4732
4733 iounmap(adapter->io_addr);
4734 pci_release_regions(pdev);
4735
4736 hw_dbg(&adapter->hw, "Remove complete\n");
4737
4738 kfree(adapter->rss_key);
4739 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4740 free_netdev(netdev);
4741
4742 if (disable_dev)
4743 pci_disable_device(pdev);
4744 }
4745
4746 /**
4747 * ixgbevf_io_error_detected - called when PCI error is detected
4748 * @pdev: Pointer to PCI device
4749 * @state: The current pci connection state
4750 *
4751 * This function is called after a PCI bus error affecting
4752 * this device has been detected.
4753 **/
ixgbevf_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)4754 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4755 pci_channel_state_t state)
4756 {
4757 struct net_device *netdev = pci_get_drvdata(pdev);
4758 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4759
4760 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4761 return PCI_ERS_RESULT_DISCONNECT;
4762
4763 rtnl_lock();
4764 netif_device_detach(netdev);
4765
4766 if (netif_running(netdev))
4767 ixgbevf_close_suspend(adapter);
4768
4769 if (state == pci_channel_io_perm_failure) {
4770 rtnl_unlock();
4771 return PCI_ERS_RESULT_DISCONNECT;
4772 }
4773
4774 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4775 pci_disable_device(pdev);
4776 rtnl_unlock();
4777
4778 /* Request a slot slot reset. */
4779 return PCI_ERS_RESULT_NEED_RESET;
4780 }
4781
4782 /**
4783 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4784 * @pdev: Pointer to PCI device
4785 *
4786 * Restart the card from scratch, as if from a cold-boot. Implementation
4787 * resembles the first-half of the ixgbevf_resume routine.
4788 **/
ixgbevf_io_slot_reset(struct pci_dev * pdev)4789 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4790 {
4791 struct net_device *netdev = pci_get_drvdata(pdev);
4792 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4793
4794 if (pci_enable_device_mem(pdev)) {
4795 dev_err(&pdev->dev,
4796 "Cannot re-enable PCI device after reset.\n");
4797 return PCI_ERS_RESULT_DISCONNECT;
4798 }
4799
4800 adapter->hw.hw_addr = adapter->io_addr;
4801 smp_mb__before_atomic();
4802 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4803 pci_set_master(pdev);
4804
4805 ixgbevf_reset(adapter);
4806
4807 return PCI_ERS_RESULT_RECOVERED;
4808 }
4809
4810 /**
4811 * ixgbevf_io_resume - called when traffic can start flowing again.
4812 * @pdev: Pointer to PCI device
4813 *
4814 * This callback is called when the error recovery driver tells us that
4815 * its OK to resume normal operation. Implementation resembles the
4816 * second-half of the ixgbevf_resume routine.
4817 **/
ixgbevf_io_resume(struct pci_dev * pdev)4818 static void ixgbevf_io_resume(struct pci_dev *pdev)
4819 {
4820 struct net_device *netdev = pci_get_drvdata(pdev);
4821
4822 rtnl_lock();
4823 if (netif_running(netdev))
4824 ixgbevf_open(netdev);
4825
4826 netif_device_attach(netdev);
4827 rtnl_unlock();
4828 }
4829
4830 /* PCI Error Recovery (ERS) */
4831 static const struct pci_error_handlers ixgbevf_err_handler = {
4832 .error_detected = ixgbevf_io_error_detected,
4833 .slot_reset = ixgbevf_io_slot_reset,
4834 .resume = ixgbevf_io_resume,
4835 };
4836
4837 static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
4838
4839 static struct pci_driver ixgbevf_driver = {
4840 .name = ixgbevf_driver_name,
4841 .id_table = ixgbevf_pci_tbl,
4842 .probe = ixgbevf_probe,
4843 .remove = ixgbevf_remove,
4844
4845 /* Power Management Hooks */
4846 .driver.pm = &ixgbevf_pm_ops,
4847
4848 .shutdown = ixgbevf_shutdown,
4849 .err_handler = &ixgbevf_err_handler
4850 };
4851
4852 /**
4853 * ixgbevf_init_module - Driver Registration Routine
4854 *
4855 * ixgbevf_init_module is the first routine called when the driver is
4856 * loaded. All it does is register with the PCI subsystem.
4857 **/
ixgbevf_init_module(void)4858 static int __init ixgbevf_init_module(void)
4859 {
4860 pr_info("%s\n", ixgbevf_driver_string);
4861 pr_info("%s\n", ixgbevf_copyright);
4862 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4863 if (!ixgbevf_wq) {
4864 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4865 return -ENOMEM;
4866 }
4867
4868 return pci_register_driver(&ixgbevf_driver);
4869 }
4870
4871 module_init(ixgbevf_init_module);
4872
4873 /**
4874 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4875 *
4876 * ixgbevf_exit_module is called just before the driver is removed
4877 * from memory.
4878 **/
ixgbevf_exit_module(void)4879 static void __exit ixgbevf_exit_module(void)
4880 {
4881 pci_unregister_driver(&ixgbevf_driver);
4882 if (ixgbevf_wq) {
4883 destroy_workqueue(ixgbevf_wq);
4884 ixgbevf_wq = NULL;
4885 }
4886 }
4887
4888 #ifdef DEBUG
4889 /**
4890 * ixgbevf_get_hw_dev_name - return device name string
4891 * used by hardware layer to print debugging information
4892 * @hw: pointer to private hardware struct
4893 **/
ixgbevf_get_hw_dev_name(struct ixgbe_hw * hw)4894 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4895 {
4896 struct ixgbevf_adapter *adapter = hw->back;
4897
4898 return adapter->netdev->name;
4899 }
4900
4901 #endif
4902 module_exit(ixgbevf_exit_module);
4903
4904 /* ixgbevf_main.c */
4905