1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Intel Corporation */
3
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
7 #include <linux/aer.h>
8 #include <linux/tcp.h>
9 #include <linux/udp.h>
10 #include <linux/ip.h>
11 #include <linux/pm_runtime.h>
12 #include <net/pkt_sched.h>
13 #include <linux/bpf_trace.h>
14
15 #include <net/ipv6.h>
16
17 #include "igc.h"
18 #include "igc_hw.h"
19 #include "igc_tsn.h"
20 #include "igc_xdp.h"
21
22 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
23
24 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
25
26 #define IGC_XDP_PASS 0
27 #define IGC_XDP_CONSUMED BIT(0)
28 #define IGC_XDP_TX BIT(1)
29 #define IGC_XDP_REDIRECT BIT(2)
30
31 static int debug = -1;
32
33 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
34 MODULE_DESCRIPTION(DRV_SUMMARY);
35 MODULE_LICENSE("GPL v2");
36 module_param(debug, int, 0);
37 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
38
39 char igc_driver_name[] = "igc";
40 static const char igc_driver_string[] = DRV_SUMMARY;
41 static const char igc_copyright[] =
42 "Copyright(c) 2018 Intel Corporation.";
43
44 static const struct igc_info *igc_info_tbl[] = {
45 [board_base] = &igc_base_info,
46 };
47
48 static const struct pci_device_id igc_pci_tbl[] = {
49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
64 /* required last entry */
65 {0, }
66 };
67
68 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
69
70 enum latency_range {
71 lowest_latency = 0,
72 low_latency = 1,
73 bulk_latency = 2,
74 latency_invalid = 255
75 };
76
igc_reset(struct igc_adapter * adapter)77 void igc_reset(struct igc_adapter *adapter)
78 {
79 struct net_device *dev = adapter->netdev;
80 struct igc_hw *hw = &adapter->hw;
81 struct igc_fc_info *fc = &hw->fc;
82 u32 pba, hwm;
83
84 /* Repartition PBA for greater than 9k MTU if required */
85 pba = IGC_PBA_34K;
86
87 /* flow control settings
88 * The high water mark must be low enough to fit one full frame
89 * after transmitting the pause frame. As such we must have enough
90 * space to allow for us to complete our current transmit and then
91 * receive the frame that is in progress from the link partner.
92 * Set it to:
93 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
94 */
95 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
96
97 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
98 fc->low_water = fc->high_water - 16;
99 fc->pause_time = 0xFFFF;
100 fc->send_xon = 1;
101 fc->current_mode = fc->requested_mode;
102
103 hw->mac.ops.reset_hw(hw);
104
105 if (hw->mac.ops.init_hw(hw))
106 netdev_err(dev, "Error on hardware initialization\n");
107
108 /* Re-establish EEE setting */
109 igc_set_eee_i225(hw, true, true, true);
110
111 if (!netif_running(adapter->netdev))
112 igc_power_down_phy_copper_base(&adapter->hw);
113
114 /* Re-enable PTP, where applicable. */
115 igc_ptp_reset(adapter);
116
117 /* Re-enable TSN offloading, where applicable. */
118 igc_tsn_offload_apply(adapter);
119
120 igc_get_phy_info(hw);
121 }
122
123 /**
124 * igc_power_up_link - Power up the phy link
125 * @adapter: address of board private structure
126 */
igc_power_up_link(struct igc_adapter * adapter)127 static void igc_power_up_link(struct igc_adapter *adapter)
128 {
129 igc_reset_phy(&adapter->hw);
130
131 igc_power_up_phy_copper(&adapter->hw);
132
133 igc_setup_link(&adapter->hw);
134 }
135
136 /**
137 * igc_release_hw_control - release control of the h/w to f/w
138 * @adapter: address of board private structure
139 *
140 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
141 * For ASF and Pass Through versions of f/w this means that the
142 * driver is no longer loaded.
143 */
igc_release_hw_control(struct igc_adapter * adapter)144 static void igc_release_hw_control(struct igc_adapter *adapter)
145 {
146 struct igc_hw *hw = &adapter->hw;
147 u32 ctrl_ext;
148
149 /* Let firmware take over control of h/w */
150 ctrl_ext = rd32(IGC_CTRL_EXT);
151 wr32(IGC_CTRL_EXT,
152 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
153 }
154
155 /**
156 * igc_get_hw_control - get control of the h/w from f/w
157 * @adapter: address of board private structure
158 *
159 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
160 * For ASF and Pass Through versions of f/w this means that
161 * the driver is loaded.
162 */
igc_get_hw_control(struct igc_adapter * adapter)163 static void igc_get_hw_control(struct igc_adapter *adapter)
164 {
165 struct igc_hw *hw = &adapter->hw;
166 u32 ctrl_ext;
167
168 /* Let firmware know the driver has taken over */
169 ctrl_ext = rd32(IGC_CTRL_EXT);
170 wr32(IGC_CTRL_EXT,
171 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
172 }
173
174 /**
175 * igc_clean_tx_ring - Free Tx Buffers
176 * @tx_ring: ring to be cleaned
177 */
igc_clean_tx_ring(struct igc_ring * tx_ring)178 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
179 {
180 u16 i = tx_ring->next_to_clean;
181 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
182
183 while (i != tx_ring->next_to_use) {
184 union igc_adv_tx_desc *eop_desc, *tx_desc;
185
186 if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
187 xdp_return_frame(tx_buffer->xdpf);
188 else
189 dev_kfree_skb_any(tx_buffer->skb);
190
191 /* unmap skb header data */
192 dma_unmap_single(tx_ring->dev,
193 dma_unmap_addr(tx_buffer, dma),
194 dma_unmap_len(tx_buffer, len),
195 DMA_TO_DEVICE);
196
197 /* check for eop_desc to determine the end of the packet */
198 eop_desc = tx_buffer->next_to_watch;
199 tx_desc = IGC_TX_DESC(tx_ring, i);
200
201 /* unmap remaining buffers */
202 while (tx_desc != eop_desc) {
203 tx_buffer++;
204 tx_desc++;
205 i++;
206 if (unlikely(i == tx_ring->count)) {
207 i = 0;
208 tx_buffer = tx_ring->tx_buffer_info;
209 tx_desc = IGC_TX_DESC(tx_ring, 0);
210 }
211
212 /* unmap any remaining paged data */
213 if (dma_unmap_len(tx_buffer, len))
214 dma_unmap_page(tx_ring->dev,
215 dma_unmap_addr(tx_buffer, dma),
216 dma_unmap_len(tx_buffer, len),
217 DMA_TO_DEVICE);
218 }
219
220 /* move us one more past the eop_desc for start of next pkt */
221 tx_buffer++;
222 i++;
223 if (unlikely(i == tx_ring->count)) {
224 i = 0;
225 tx_buffer = tx_ring->tx_buffer_info;
226 }
227 }
228
229 /* reset BQL for queue */
230 netdev_tx_reset_queue(txring_txq(tx_ring));
231
232 /* reset next_to_use and next_to_clean */
233 tx_ring->next_to_use = 0;
234 tx_ring->next_to_clean = 0;
235 }
236
237 /**
238 * igc_free_tx_resources - Free Tx Resources per Queue
239 * @tx_ring: Tx descriptor ring for a specific queue
240 *
241 * Free all transmit software resources
242 */
igc_free_tx_resources(struct igc_ring * tx_ring)243 void igc_free_tx_resources(struct igc_ring *tx_ring)
244 {
245 igc_clean_tx_ring(tx_ring);
246
247 vfree(tx_ring->tx_buffer_info);
248 tx_ring->tx_buffer_info = NULL;
249
250 /* if not set, then don't free */
251 if (!tx_ring->desc)
252 return;
253
254 dma_free_coherent(tx_ring->dev, tx_ring->size,
255 tx_ring->desc, tx_ring->dma);
256
257 tx_ring->desc = NULL;
258 }
259
260 /**
261 * igc_free_all_tx_resources - Free Tx Resources for All Queues
262 * @adapter: board private structure
263 *
264 * Free all transmit software resources
265 */
igc_free_all_tx_resources(struct igc_adapter * adapter)266 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
267 {
268 int i;
269
270 for (i = 0; i < adapter->num_tx_queues; i++)
271 igc_free_tx_resources(adapter->tx_ring[i]);
272 }
273
274 /**
275 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
276 * @adapter: board private structure
277 */
igc_clean_all_tx_rings(struct igc_adapter * adapter)278 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
279 {
280 int i;
281
282 for (i = 0; i < adapter->num_tx_queues; i++)
283 if (adapter->tx_ring[i])
284 igc_clean_tx_ring(adapter->tx_ring[i]);
285 }
286
287 /**
288 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
289 * @tx_ring: tx descriptor ring (for a specific queue) to setup
290 *
291 * Return 0 on success, negative on failure
292 */
igc_setup_tx_resources(struct igc_ring * tx_ring)293 int igc_setup_tx_resources(struct igc_ring *tx_ring)
294 {
295 struct net_device *ndev = tx_ring->netdev;
296 struct device *dev = tx_ring->dev;
297 int size = 0;
298
299 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
300 tx_ring->tx_buffer_info = vzalloc(size);
301 if (!tx_ring->tx_buffer_info)
302 goto err;
303
304 /* round up to nearest 4K */
305 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
306 tx_ring->size = ALIGN(tx_ring->size, 4096);
307
308 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
309 &tx_ring->dma, GFP_KERNEL);
310
311 if (!tx_ring->desc)
312 goto err;
313
314 tx_ring->next_to_use = 0;
315 tx_ring->next_to_clean = 0;
316
317 return 0;
318
319 err:
320 vfree(tx_ring->tx_buffer_info);
321 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
322 return -ENOMEM;
323 }
324
325 /**
326 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
327 * @adapter: board private structure
328 *
329 * Return 0 on success, negative on failure
330 */
igc_setup_all_tx_resources(struct igc_adapter * adapter)331 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
332 {
333 struct net_device *dev = adapter->netdev;
334 int i, err = 0;
335
336 for (i = 0; i < adapter->num_tx_queues; i++) {
337 err = igc_setup_tx_resources(adapter->tx_ring[i]);
338 if (err) {
339 netdev_err(dev, "Error on Tx queue %u setup\n", i);
340 for (i--; i >= 0; i--)
341 igc_free_tx_resources(adapter->tx_ring[i]);
342 break;
343 }
344 }
345
346 return err;
347 }
348
349 /**
350 * igc_clean_rx_ring - Free Rx Buffers per Queue
351 * @rx_ring: ring to free buffers from
352 */
igc_clean_rx_ring(struct igc_ring * rx_ring)353 static void igc_clean_rx_ring(struct igc_ring *rx_ring)
354 {
355 u16 i = rx_ring->next_to_clean;
356
357 dev_kfree_skb(rx_ring->skb);
358 rx_ring->skb = NULL;
359
360 /* Free all the Rx ring sk_buffs */
361 while (i != rx_ring->next_to_alloc) {
362 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
363
364 /* Invalidate cache lines that may have been written to by
365 * device so that we avoid corrupting memory.
366 */
367 dma_sync_single_range_for_cpu(rx_ring->dev,
368 buffer_info->dma,
369 buffer_info->page_offset,
370 igc_rx_bufsz(rx_ring),
371 DMA_FROM_DEVICE);
372
373 /* free resources associated with mapping */
374 dma_unmap_page_attrs(rx_ring->dev,
375 buffer_info->dma,
376 igc_rx_pg_size(rx_ring),
377 DMA_FROM_DEVICE,
378 IGC_RX_DMA_ATTR);
379 __page_frag_cache_drain(buffer_info->page,
380 buffer_info->pagecnt_bias);
381
382 i++;
383 if (i == rx_ring->count)
384 i = 0;
385 }
386
387 clear_ring_uses_large_buffer(rx_ring);
388
389 rx_ring->next_to_alloc = 0;
390 rx_ring->next_to_clean = 0;
391 rx_ring->next_to_use = 0;
392 }
393
394 /**
395 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
396 * @adapter: board private structure
397 */
igc_clean_all_rx_rings(struct igc_adapter * adapter)398 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
399 {
400 int i;
401
402 for (i = 0; i < adapter->num_rx_queues; i++)
403 if (adapter->rx_ring[i])
404 igc_clean_rx_ring(adapter->rx_ring[i]);
405 }
406
407 /**
408 * igc_free_rx_resources - Free Rx Resources
409 * @rx_ring: ring to clean the resources from
410 *
411 * Free all receive software resources
412 */
igc_free_rx_resources(struct igc_ring * rx_ring)413 void igc_free_rx_resources(struct igc_ring *rx_ring)
414 {
415 igc_clean_rx_ring(rx_ring);
416
417 igc_xdp_unregister_rxq_info(rx_ring);
418
419 vfree(rx_ring->rx_buffer_info);
420 rx_ring->rx_buffer_info = NULL;
421
422 /* if not set, then don't free */
423 if (!rx_ring->desc)
424 return;
425
426 dma_free_coherent(rx_ring->dev, rx_ring->size,
427 rx_ring->desc, rx_ring->dma);
428
429 rx_ring->desc = NULL;
430 }
431
432 /**
433 * igc_free_all_rx_resources - Free Rx Resources for All Queues
434 * @adapter: board private structure
435 *
436 * Free all receive software resources
437 */
igc_free_all_rx_resources(struct igc_adapter * adapter)438 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
439 {
440 int i;
441
442 for (i = 0; i < adapter->num_rx_queues; i++)
443 igc_free_rx_resources(adapter->rx_ring[i]);
444 }
445
446 /**
447 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
448 * @rx_ring: rx descriptor ring (for a specific queue) to setup
449 *
450 * Returns 0 on success, negative on failure
451 */
igc_setup_rx_resources(struct igc_ring * rx_ring)452 int igc_setup_rx_resources(struct igc_ring *rx_ring)
453 {
454 struct net_device *ndev = rx_ring->netdev;
455 struct device *dev = rx_ring->dev;
456 int size, desc_len, res;
457
458 res = igc_xdp_register_rxq_info(rx_ring);
459 if (res < 0)
460 return res;
461
462 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
463 rx_ring->rx_buffer_info = vzalloc(size);
464 if (!rx_ring->rx_buffer_info)
465 goto err;
466
467 desc_len = sizeof(union igc_adv_rx_desc);
468
469 /* Round up to nearest 4K */
470 rx_ring->size = rx_ring->count * desc_len;
471 rx_ring->size = ALIGN(rx_ring->size, 4096);
472
473 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
474 &rx_ring->dma, GFP_KERNEL);
475
476 if (!rx_ring->desc)
477 goto err;
478
479 rx_ring->next_to_alloc = 0;
480 rx_ring->next_to_clean = 0;
481 rx_ring->next_to_use = 0;
482
483 return 0;
484
485 err:
486 igc_xdp_unregister_rxq_info(rx_ring);
487 vfree(rx_ring->rx_buffer_info);
488 rx_ring->rx_buffer_info = NULL;
489 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
490 return -ENOMEM;
491 }
492
493 /**
494 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
495 * (Descriptors) for all queues
496 * @adapter: board private structure
497 *
498 * Return 0 on success, negative on failure
499 */
igc_setup_all_rx_resources(struct igc_adapter * adapter)500 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
501 {
502 struct net_device *dev = adapter->netdev;
503 int i, err = 0;
504
505 for (i = 0; i < adapter->num_rx_queues; i++) {
506 err = igc_setup_rx_resources(adapter->rx_ring[i]);
507 if (err) {
508 netdev_err(dev, "Error on Rx queue %u setup\n", i);
509 for (i--; i >= 0; i--)
510 igc_free_rx_resources(adapter->rx_ring[i]);
511 break;
512 }
513 }
514
515 return err;
516 }
517
igc_xdp_is_enabled(struct igc_adapter * adapter)518 static bool igc_xdp_is_enabled(struct igc_adapter *adapter)
519 {
520 return !!adapter->xdp_prog;
521 }
522
523 /**
524 * igc_configure_rx_ring - Configure a receive ring after Reset
525 * @adapter: board private structure
526 * @ring: receive ring to be configured
527 *
528 * Configure the Rx unit of the MAC after a reset.
529 */
igc_configure_rx_ring(struct igc_adapter * adapter,struct igc_ring * ring)530 static void igc_configure_rx_ring(struct igc_adapter *adapter,
531 struct igc_ring *ring)
532 {
533 struct igc_hw *hw = &adapter->hw;
534 union igc_adv_rx_desc *rx_desc;
535 int reg_idx = ring->reg_idx;
536 u32 srrctl = 0, rxdctl = 0;
537 u64 rdba = ring->dma;
538
539 if (igc_xdp_is_enabled(adapter))
540 set_ring_uses_large_buffer(ring);
541
542 /* disable the queue */
543 wr32(IGC_RXDCTL(reg_idx), 0);
544
545 /* Set DMA base address registers */
546 wr32(IGC_RDBAL(reg_idx),
547 rdba & 0x00000000ffffffffULL);
548 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
549 wr32(IGC_RDLEN(reg_idx),
550 ring->count * sizeof(union igc_adv_rx_desc));
551
552 /* initialize head and tail */
553 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
554 wr32(IGC_RDH(reg_idx), 0);
555 writel(0, ring->tail);
556
557 /* reset next-to- use/clean to place SW in sync with hardware */
558 ring->next_to_clean = 0;
559 ring->next_to_use = 0;
560
561 /* set descriptor configuration */
562 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
563 if (ring_uses_large_buffer(ring))
564 srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
565 else
566 srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
567 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
568
569 wr32(IGC_SRRCTL(reg_idx), srrctl);
570
571 rxdctl |= IGC_RX_PTHRESH;
572 rxdctl |= IGC_RX_HTHRESH << 8;
573 rxdctl |= IGC_RX_WTHRESH << 16;
574
575 /* initialize rx_buffer_info */
576 memset(ring->rx_buffer_info, 0,
577 sizeof(struct igc_rx_buffer) * ring->count);
578
579 /* initialize Rx descriptor 0 */
580 rx_desc = IGC_RX_DESC(ring, 0);
581 rx_desc->wb.upper.length = 0;
582
583 /* enable receive descriptor fetching */
584 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
585
586 wr32(IGC_RXDCTL(reg_idx), rxdctl);
587 }
588
589 /**
590 * igc_configure_rx - Configure receive Unit after Reset
591 * @adapter: board private structure
592 *
593 * Configure the Rx unit of the MAC after a reset.
594 */
igc_configure_rx(struct igc_adapter * adapter)595 static void igc_configure_rx(struct igc_adapter *adapter)
596 {
597 int i;
598
599 /* Setup the HW Rx Head and Tail Descriptor Pointers and
600 * the Base and Length of the Rx Descriptor Ring
601 */
602 for (i = 0; i < adapter->num_rx_queues; i++)
603 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
604 }
605
606 /**
607 * igc_configure_tx_ring - Configure transmit ring after Reset
608 * @adapter: board private structure
609 * @ring: tx ring to configure
610 *
611 * Configure a transmit ring after a reset.
612 */
igc_configure_tx_ring(struct igc_adapter * adapter,struct igc_ring * ring)613 static void igc_configure_tx_ring(struct igc_adapter *adapter,
614 struct igc_ring *ring)
615 {
616 struct igc_hw *hw = &adapter->hw;
617 int reg_idx = ring->reg_idx;
618 u64 tdba = ring->dma;
619 u32 txdctl = 0;
620
621 /* disable the queue */
622 wr32(IGC_TXDCTL(reg_idx), 0);
623 wrfl();
624 mdelay(10);
625
626 wr32(IGC_TDLEN(reg_idx),
627 ring->count * sizeof(union igc_adv_tx_desc));
628 wr32(IGC_TDBAL(reg_idx),
629 tdba & 0x00000000ffffffffULL);
630 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
631
632 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
633 wr32(IGC_TDH(reg_idx), 0);
634 writel(0, ring->tail);
635
636 txdctl |= IGC_TX_PTHRESH;
637 txdctl |= IGC_TX_HTHRESH << 8;
638 txdctl |= IGC_TX_WTHRESH << 16;
639
640 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
641 wr32(IGC_TXDCTL(reg_idx), txdctl);
642 }
643
644 /**
645 * igc_configure_tx - Configure transmit Unit after Reset
646 * @adapter: board private structure
647 *
648 * Configure the Tx unit of the MAC after a reset.
649 */
igc_configure_tx(struct igc_adapter * adapter)650 static void igc_configure_tx(struct igc_adapter *adapter)
651 {
652 int i;
653
654 for (i = 0; i < adapter->num_tx_queues; i++)
655 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
656 }
657
658 /**
659 * igc_setup_mrqc - configure the multiple receive queue control registers
660 * @adapter: Board private structure
661 */
igc_setup_mrqc(struct igc_adapter * adapter)662 static void igc_setup_mrqc(struct igc_adapter *adapter)
663 {
664 struct igc_hw *hw = &adapter->hw;
665 u32 j, num_rx_queues;
666 u32 mrqc, rxcsum;
667 u32 rss_key[10];
668
669 netdev_rss_key_fill(rss_key, sizeof(rss_key));
670 for (j = 0; j < 10; j++)
671 wr32(IGC_RSSRK(j), rss_key[j]);
672
673 num_rx_queues = adapter->rss_queues;
674
675 if (adapter->rss_indir_tbl_init != num_rx_queues) {
676 for (j = 0; j < IGC_RETA_SIZE; j++)
677 adapter->rss_indir_tbl[j] =
678 (j * num_rx_queues) / IGC_RETA_SIZE;
679 adapter->rss_indir_tbl_init = num_rx_queues;
680 }
681 igc_write_rss_indir_tbl(adapter);
682
683 /* Disable raw packet checksumming so that RSS hash is placed in
684 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
685 * offloads as they are enabled by default
686 */
687 rxcsum = rd32(IGC_RXCSUM);
688 rxcsum |= IGC_RXCSUM_PCSD;
689
690 /* Enable Receive Checksum Offload for SCTP */
691 rxcsum |= IGC_RXCSUM_CRCOFL;
692
693 /* Don't need to set TUOFL or IPOFL, they default to 1 */
694 wr32(IGC_RXCSUM, rxcsum);
695
696 /* Generate RSS hash based on packet types, TCP/UDP
697 * port numbers and/or IPv4/v6 src and dst addresses
698 */
699 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
700 IGC_MRQC_RSS_FIELD_IPV4_TCP |
701 IGC_MRQC_RSS_FIELD_IPV6 |
702 IGC_MRQC_RSS_FIELD_IPV6_TCP |
703 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
704
705 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
706 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
707 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
708 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
709
710 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
711
712 wr32(IGC_MRQC, mrqc);
713 }
714
715 /**
716 * igc_setup_rctl - configure the receive control registers
717 * @adapter: Board private structure
718 */
igc_setup_rctl(struct igc_adapter * adapter)719 static void igc_setup_rctl(struct igc_adapter *adapter)
720 {
721 struct igc_hw *hw = &adapter->hw;
722 u32 rctl;
723
724 rctl = rd32(IGC_RCTL);
725
726 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
727 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
728
729 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
730 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
731
732 /* enable stripping of CRC. Newer features require
733 * that the HW strips the CRC.
734 */
735 rctl |= IGC_RCTL_SECRC;
736
737 /* disable store bad packets and clear size bits. */
738 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
739
740 /* enable LPE to allow for reception of jumbo frames */
741 rctl |= IGC_RCTL_LPE;
742
743 /* disable queue 0 to prevent tail write w/o re-config */
744 wr32(IGC_RXDCTL(0), 0);
745
746 /* This is useful for sniffing bad packets. */
747 if (adapter->netdev->features & NETIF_F_RXALL) {
748 /* UPE and MPE will be handled by normal PROMISC logic
749 * in set_rx_mode
750 */
751 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
752 IGC_RCTL_BAM | /* RX All Bcast Pkts */
753 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
754
755 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
756 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
757 }
758
759 wr32(IGC_RCTL, rctl);
760 }
761
762 /**
763 * igc_setup_tctl - configure the transmit control registers
764 * @adapter: Board private structure
765 */
igc_setup_tctl(struct igc_adapter * adapter)766 static void igc_setup_tctl(struct igc_adapter *adapter)
767 {
768 struct igc_hw *hw = &adapter->hw;
769 u32 tctl;
770
771 /* disable queue 0 which icould be enabled by default */
772 wr32(IGC_TXDCTL(0), 0);
773
774 /* Program the Transmit Control Register */
775 tctl = rd32(IGC_TCTL);
776 tctl &= ~IGC_TCTL_CT;
777 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
778 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
779
780 /* Enable transmits */
781 tctl |= IGC_TCTL_EN;
782
783 wr32(IGC_TCTL, tctl);
784 }
785
786 /**
787 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
788 * @adapter: Pointer to adapter where the filter should be set
789 * @index: Filter index
790 * @type: MAC address filter type (source or destination)
791 * @addr: MAC address
792 * @queue: If non-negative, queue assignment feature is enabled and frames
793 * matching the filter are enqueued onto 'queue'. Otherwise, queue
794 * assignment is disabled.
795 */
igc_set_mac_filter_hw(struct igc_adapter * adapter,int index,enum igc_mac_filter_type type,const u8 * addr,int queue)796 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
797 enum igc_mac_filter_type type,
798 const u8 *addr, int queue)
799 {
800 struct net_device *dev = adapter->netdev;
801 struct igc_hw *hw = &adapter->hw;
802 u32 ral, rah;
803
804 if (WARN_ON(index >= hw->mac.rar_entry_count))
805 return;
806
807 ral = le32_to_cpup((__le32 *)(addr));
808 rah = le16_to_cpup((__le16 *)(addr + 4));
809
810 if (type == IGC_MAC_FILTER_TYPE_SRC) {
811 rah &= ~IGC_RAH_ASEL_MASK;
812 rah |= IGC_RAH_ASEL_SRC_ADDR;
813 }
814
815 if (queue >= 0) {
816 rah &= ~IGC_RAH_QSEL_MASK;
817 rah |= (queue << IGC_RAH_QSEL_SHIFT);
818 rah |= IGC_RAH_QSEL_ENABLE;
819 }
820
821 rah |= IGC_RAH_AV;
822
823 wr32(IGC_RAL(index), ral);
824 wr32(IGC_RAH(index), rah);
825
826 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
827 }
828
829 /**
830 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
831 * @adapter: Pointer to adapter where the filter should be cleared
832 * @index: Filter index
833 */
igc_clear_mac_filter_hw(struct igc_adapter * adapter,int index)834 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
835 {
836 struct net_device *dev = adapter->netdev;
837 struct igc_hw *hw = &adapter->hw;
838
839 if (WARN_ON(index >= hw->mac.rar_entry_count))
840 return;
841
842 wr32(IGC_RAL(index), 0);
843 wr32(IGC_RAH(index), 0);
844
845 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
846 }
847
848 /* Set default MAC address for the PF in the first RAR entry */
igc_set_default_mac_filter(struct igc_adapter * adapter)849 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
850 {
851 struct net_device *dev = adapter->netdev;
852 u8 *addr = adapter->hw.mac.addr;
853
854 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
855
856 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
857 }
858
859 /**
860 * igc_set_mac - Change the Ethernet Address of the NIC
861 * @netdev: network interface device structure
862 * @p: pointer to an address structure
863 *
864 * Returns 0 on success, negative on failure
865 */
igc_set_mac(struct net_device * netdev,void * p)866 static int igc_set_mac(struct net_device *netdev, void *p)
867 {
868 struct igc_adapter *adapter = netdev_priv(netdev);
869 struct igc_hw *hw = &adapter->hw;
870 struct sockaddr *addr = p;
871
872 if (!is_valid_ether_addr(addr->sa_data))
873 return -EADDRNOTAVAIL;
874
875 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
876 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
877
878 /* set the correct pool for the new PF MAC address in entry 0 */
879 igc_set_default_mac_filter(adapter);
880
881 return 0;
882 }
883
884 /**
885 * igc_write_mc_addr_list - write multicast addresses to MTA
886 * @netdev: network interface device structure
887 *
888 * Writes multicast address list to the MTA hash table.
889 * Returns: -ENOMEM on failure
890 * 0 on no addresses written
891 * X on writing X addresses to MTA
892 **/
igc_write_mc_addr_list(struct net_device * netdev)893 static int igc_write_mc_addr_list(struct net_device *netdev)
894 {
895 struct igc_adapter *adapter = netdev_priv(netdev);
896 struct igc_hw *hw = &adapter->hw;
897 struct netdev_hw_addr *ha;
898 u8 *mta_list;
899 int i;
900
901 if (netdev_mc_empty(netdev)) {
902 /* nothing to program, so clear mc list */
903 igc_update_mc_addr_list(hw, NULL, 0);
904 return 0;
905 }
906
907 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
908 if (!mta_list)
909 return -ENOMEM;
910
911 /* The shared function expects a packed array of only addresses. */
912 i = 0;
913 netdev_for_each_mc_addr(ha, netdev)
914 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
915
916 igc_update_mc_addr_list(hw, mta_list, i);
917 kfree(mta_list);
918
919 return netdev_mc_count(netdev);
920 }
921
igc_tx_launchtime(struct igc_adapter * adapter,ktime_t txtime)922 static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
923 {
924 ktime_t cycle_time = adapter->cycle_time;
925 ktime_t base_time = adapter->base_time;
926 u32 launchtime;
927
928 /* FIXME: when using ETF together with taprio, we may have a
929 * case where 'delta' is larger than the cycle_time, this may
930 * cause problems if we don't read the current value of
931 * IGC_BASET, as the value writen into the launchtime
932 * descriptor field may be misinterpreted.
933 */
934 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
935
936 return cpu_to_le32(launchtime);
937 }
938
igc_tx_ctxtdesc(struct igc_ring * tx_ring,struct igc_tx_buffer * first,u32 vlan_macip_lens,u32 type_tucmd,u32 mss_l4len_idx)939 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
940 struct igc_tx_buffer *first,
941 u32 vlan_macip_lens, u32 type_tucmd,
942 u32 mss_l4len_idx)
943 {
944 struct igc_adv_tx_context_desc *context_desc;
945 u16 i = tx_ring->next_to_use;
946
947 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
948
949 i++;
950 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
951
952 /* set bits to identify this as an advanced context descriptor */
953 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
954
955 /* For i225, context index must be unique per ring. */
956 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
957 mss_l4len_idx |= tx_ring->reg_idx << 4;
958
959 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
960 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
961 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
962
963 /* We assume there is always a valid Tx time available. Invalid times
964 * should have been handled by the upper layers.
965 */
966 if (tx_ring->launchtime_enable) {
967 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
968 ktime_t txtime = first->skb->tstamp;
969
970 skb_txtime_consumed(first->skb);
971 context_desc->launch_time = igc_tx_launchtime(adapter,
972 txtime);
973 } else {
974 context_desc->launch_time = 0;
975 }
976 }
977
igc_tx_csum(struct igc_ring * tx_ring,struct igc_tx_buffer * first)978 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
979 {
980 struct sk_buff *skb = first->skb;
981 u32 vlan_macip_lens = 0;
982 u32 type_tucmd = 0;
983
984 if (skb->ip_summed != CHECKSUM_PARTIAL) {
985 csum_failed:
986 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
987 !tx_ring->launchtime_enable)
988 return;
989 goto no_csum;
990 }
991
992 switch (skb->csum_offset) {
993 case offsetof(struct tcphdr, check):
994 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
995 fallthrough;
996 case offsetof(struct udphdr, check):
997 break;
998 case offsetof(struct sctphdr, checksum):
999 /* validate that this is actually an SCTP request */
1000 if (skb_csum_is_sctp(skb)) {
1001 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1002 break;
1003 }
1004 fallthrough;
1005 default:
1006 skb_checksum_help(skb);
1007 goto csum_failed;
1008 }
1009
1010 /* update TX checksum flag */
1011 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1012 vlan_macip_lens = skb_checksum_start_offset(skb) -
1013 skb_network_offset(skb);
1014 no_csum:
1015 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1016 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1017
1018 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
1019 }
1020
__igc_maybe_stop_tx(struct igc_ring * tx_ring,const u16 size)1021 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1022 {
1023 struct net_device *netdev = tx_ring->netdev;
1024
1025 netif_stop_subqueue(netdev, tx_ring->queue_index);
1026
1027 /* memory barriier comment */
1028 smp_mb();
1029
1030 /* We need to check again in a case another CPU has just
1031 * made room available.
1032 */
1033 if (igc_desc_unused(tx_ring) < size)
1034 return -EBUSY;
1035
1036 /* A reprieve! */
1037 netif_wake_subqueue(netdev, tx_ring->queue_index);
1038
1039 u64_stats_update_begin(&tx_ring->tx_syncp2);
1040 tx_ring->tx_stats.restart_queue2++;
1041 u64_stats_update_end(&tx_ring->tx_syncp2);
1042
1043 return 0;
1044 }
1045
igc_maybe_stop_tx(struct igc_ring * tx_ring,const u16 size)1046 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1047 {
1048 if (igc_desc_unused(tx_ring) >= size)
1049 return 0;
1050 return __igc_maybe_stop_tx(tx_ring, size);
1051 }
1052
1053 #define IGC_SET_FLAG(_input, _flag, _result) \
1054 (((_flag) <= (_result)) ? \
1055 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1056 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1057
igc_tx_cmd_type(u32 tx_flags)1058 static u32 igc_tx_cmd_type(u32 tx_flags)
1059 {
1060 /* set type for advanced descriptor with frame checksum insertion */
1061 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1062 IGC_ADVTXD_DCMD_DEXT |
1063 IGC_ADVTXD_DCMD_IFCS;
1064
1065 /* set segmentation bits for TSO */
1066 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1067 (IGC_ADVTXD_DCMD_TSE));
1068
1069 /* set timestamp bit if present */
1070 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1071 (IGC_ADVTXD_MAC_TSTAMP));
1072
1073 return cmd_type;
1074 }
1075
igc_tx_olinfo_status(struct igc_ring * tx_ring,union igc_adv_tx_desc * tx_desc,u32 tx_flags,unsigned int paylen)1076 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1077 union igc_adv_tx_desc *tx_desc,
1078 u32 tx_flags, unsigned int paylen)
1079 {
1080 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1081
1082 /* insert L4 checksum */
1083 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1084 ((IGC_TXD_POPTS_TXSM << 8) /
1085 IGC_TX_FLAGS_CSUM);
1086
1087 /* insert IPv4 checksum */
1088 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1089 (((IGC_TXD_POPTS_IXSM << 8)) /
1090 IGC_TX_FLAGS_IPV4);
1091
1092 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1093 }
1094
igc_tx_map(struct igc_ring * tx_ring,struct igc_tx_buffer * first,const u8 hdr_len)1095 static int igc_tx_map(struct igc_ring *tx_ring,
1096 struct igc_tx_buffer *first,
1097 const u8 hdr_len)
1098 {
1099 struct sk_buff *skb = first->skb;
1100 struct igc_tx_buffer *tx_buffer;
1101 union igc_adv_tx_desc *tx_desc;
1102 u32 tx_flags = first->tx_flags;
1103 skb_frag_t *frag;
1104 u16 i = tx_ring->next_to_use;
1105 unsigned int data_len, size;
1106 dma_addr_t dma;
1107 u32 cmd_type = igc_tx_cmd_type(tx_flags);
1108
1109 tx_desc = IGC_TX_DESC(tx_ring, i);
1110
1111 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1112
1113 size = skb_headlen(skb);
1114 data_len = skb->data_len;
1115
1116 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1117
1118 tx_buffer = first;
1119
1120 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1121 if (dma_mapping_error(tx_ring->dev, dma))
1122 goto dma_error;
1123
1124 /* record length, and DMA address */
1125 dma_unmap_len_set(tx_buffer, len, size);
1126 dma_unmap_addr_set(tx_buffer, dma, dma);
1127
1128 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1129
1130 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1131 tx_desc->read.cmd_type_len =
1132 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1133
1134 i++;
1135 tx_desc++;
1136 if (i == tx_ring->count) {
1137 tx_desc = IGC_TX_DESC(tx_ring, 0);
1138 i = 0;
1139 }
1140 tx_desc->read.olinfo_status = 0;
1141
1142 dma += IGC_MAX_DATA_PER_TXD;
1143 size -= IGC_MAX_DATA_PER_TXD;
1144
1145 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1146 }
1147
1148 if (likely(!data_len))
1149 break;
1150
1151 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1152
1153 i++;
1154 tx_desc++;
1155 if (i == tx_ring->count) {
1156 tx_desc = IGC_TX_DESC(tx_ring, 0);
1157 i = 0;
1158 }
1159 tx_desc->read.olinfo_status = 0;
1160
1161 size = skb_frag_size(frag);
1162 data_len -= size;
1163
1164 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1165 size, DMA_TO_DEVICE);
1166
1167 tx_buffer = &tx_ring->tx_buffer_info[i];
1168 }
1169
1170 /* write last descriptor with RS and EOP bits */
1171 cmd_type |= size | IGC_TXD_DCMD;
1172 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1173
1174 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1175
1176 /* set the timestamp */
1177 first->time_stamp = jiffies;
1178
1179 skb_tx_timestamp(skb);
1180
1181 /* Force memory writes to complete before letting h/w know there
1182 * are new descriptors to fetch. (Only applicable for weak-ordered
1183 * memory model archs, such as IA-64).
1184 *
1185 * We also need this memory barrier to make certain all of the
1186 * status bits have been updated before next_to_watch is written.
1187 */
1188 wmb();
1189
1190 /* set next_to_watch value indicating a packet is present */
1191 first->next_to_watch = tx_desc;
1192
1193 i++;
1194 if (i == tx_ring->count)
1195 i = 0;
1196
1197 tx_ring->next_to_use = i;
1198
1199 /* Make sure there is space in the ring for the next send. */
1200 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1201
1202 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1203 writel(i, tx_ring->tail);
1204 }
1205
1206 return 0;
1207 dma_error:
1208 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1209 tx_buffer = &tx_ring->tx_buffer_info[i];
1210
1211 /* clear dma mappings for failed tx_buffer_info map */
1212 while (tx_buffer != first) {
1213 if (dma_unmap_len(tx_buffer, len))
1214 dma_unmap_page(tx_ring->dev,
1215 dma_unmap_addr(tx_buffer, dma),
1216 dma_unmap_len(tx_buffer, len),
1217 DMA_TO_DEVICE);
1218 dma_unmap_len_set(tx_buffer, len, 0);
1219
1220 if (i-- == 0)
1221 i += tx_ring->count;
1222 tx_buffer = &tx_ring->tx_buffer_info[i];
1223 }
1224
1225 if (dma_unmap_len(tx_buffer, len))
1226 dma_unmap_single(tx_ring->dev,
1227 dma_unmap_addr(tx_buffer, dma),
1228 dma_unmap_len(tx_buffer, len),
1229 DMA_TO_DEVICE);
1230 dma_unmap_len_set(tx_buffer, len, 0);
1231
1232 dev_kfree_skb_any(tx_buffer->skb);
1233 tx_buffer->skb = NULL;
1234
1235 tx_ring->next_to_use = i;
1236
1237 return -1;
1238 }
1239
igc_tso(struct igc_ring * tx_ring,struct igc_tx_buffer * first,u8 * hdr_len)1240 static int igc_tso(struct igc_ring *tx_ring,
1241 struct igc_tx_buffer *first,
1242 u8 *hdr_len)
1243 {
1244 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1245 struct sk_buff *skb = first->skb;
1246 union {
1247 struct iphdr *v4;
1248 struct ipv6hdr *v6;
1249 unsigned char *hdr;
1250 } ip;
1251 union {
1252 struct tcphdr *tcp;
1253 struct udphdr *udp;
1254 unsigned char *hdr;
1255 } l4;
1256 u32 paylen, l4_offset;
1257 int err;
1258
1259 if (skb->ip_summed != CHECKSUM_PARTIAL)
1260 return 0;
1261
1262 if (!skb_is_gso(skb))
1263 return 0;
1264
1265 err = skb_cow_head(skb, 0);
1266 if (err < 0)
1267 return err;
1268
1269 ip.hdr = skb_network_header(skb);
1270 l4.hdr = skb_checksum_start(skb);
1271
1272 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1273 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1274
1275 /* initialize outer IP header fields */
1276 if (ip.v4->version == 4) {
1277 unsigned char *csum_start = skb_checksum_start(skb);
1278 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1279
1280 /* IP header will have to cancel out any data that
1281 * is not a part of the outer IP header
1282 */
1283 ip.v4->check = csum_fold(csum_partial(trans_start,
1284 csum_start - trans_start,
1285 0));
1286 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1287
1288 ip.v4->tot_len = 0;
1289 first->tx_flags |= IGC_TX_FLAGS_TSO |
1290 IGC_TX_FLAGS_CSUM |
1291 IGC_TX_FLAGS_IPV4;
1292 } else {
1293 ip.v6->payload_len = 0;
1294 first->tx_flags |= IGC_TX_FLAGS_TSO |
1295 IGC_TX_FLAGS_CSUM;
1296 }
1297
1298 /* determine offset of inner transport header */
1299 l4_offset = l4.hdr - skb->data;
1300
1301 /* remove payload length from inner checksum */
1302 paylen = skb->len - l4_offset;
1303 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1304 /* compute length of segmentation header */
1305 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1306 csum_replace_by_diff(&l4.tcp->check,
1307 (__force __wsum)htonl(paylen));
1308 } else {
1309 /* compute length of segmentation header */
1310 *hdr_len = sizeof(*l4.udp) + l4_offset;
1311 csum_replace_by_diff(&l4.udp->check,
1312 (__force __wsum)htonl(paylen));
1313 }
1314
1315 /* update gso size and bytecount with header size */
1316 first->gso_segs = skb_shinfo(skb)->gso_segs;
1317 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1318
1319 /* MSS L4LEN IDX */
1320 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1321 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1322
1323 /* VLAN MACLEN IPLEN */
1324 vlan_macip_lens = l4.hdr - ip.hdr;
1325 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1326 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1327
1328 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
1329 type_tucmd, mss_l4len_idx);
1330
1331 return 1;
1332 }
1333
igc_xmit_frame_ring(struct sk_buff * skb,struct igc_ring * tx_ring)1334 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1335 struct igc_ring *tx_ring)
1336 {
1337 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1338 __be16 protocol = vlan_get_protocol(skb);
1339 struct igc_tx_buffer *first;
1340 u32 tx_flags = 0;
1341 unsigned short f;
1342 u8 hdr_len = 0;
1343 int tso = 0;
1344
1345 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1346 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1347 * + 2 desc gap to keep tail from touching head,
1348 * + 1 desc for context descriptor,
1349 * otherwise try next time
1350 */
1351 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1352 count += TXD_USE_COUNT(skb_frag_size(
1353 &skb_shinfo(skb)->frags[f]));
1354
1355 if (igc_maybe_stop_tx(tx_ring, count + 3)) {
1356 /* this is a hard error */
1357 return NETDEV_TX_BUSY;
1358 }
1359
1360 /* record the location of the first descriptor for this packet */
1361 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1362 first->skb = skb;
1363 first->bytecount = skb->len;
1364 first->gso_segs = 1;
1365
1366 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1367 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1368
1369 /* FIXME: add support for retrieving timestamps from
1370 * the other timer registers before skipping the
1371 * timestamping request.
1372 */
1373 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1374 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1375 &adapter->state)) {
1376 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1377 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1378
1379 adapter->ptp_tx_skb = skb_get(skb);
1380 adapter->ptp_tx_start = jiffies;
1381 } else {
1382 adapter->tx_hwtstamp_skipped++;
1383 }
1384 }
1385
1386 /* record initial flags and protocol */
1387 first->tx_flags = tx_flags;
1388 first->protocol = protocol;
1389
1390 tso = igc_tso(tx_ring, first, &hdr_len);
1391 if (tso < 0)
1392 goto out_drop;
1393 else if (!tso)
1394 igc_tx_csum(tx_ring, first);
1395
1396 igc_tx_map(tx_ring, first, hdr_len);
1397
1398 return NETDEV_TX_OK;
1399
1400 out_drop:
1401 dev_kfree_skb_any(first->skb);
1402 first->skb = NULL;
1403
1404 return NETDEV_TX_OK;
1405 }
1406
igc_tx_queue_mapping(struct igc_adapter * adapter,struct sk_buff * skb)1407 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1408 struct sk_buff *skb)
1409 {
1410 unsigned int r_idx = skb->queue_mapping;
1411
1412 if (r_idx >= adapter->num_tx_queues)
1413 r_idx = r_idx % adapter->num_tx_queues;
1414
1415 return adapter->tx_ring[r_idx];
1416 }
1417
igc_xmit_frame(struct sk_buff * skb,struct net_device * netdev)1418 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1419 struct net_device *netdev)
1420 {
1421 struct igc_adapter *adapter = netdev_priv(netdev);
1422
1423 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1424 * in order to meet this minimum size requirement.
1425 */
1426 if (skb->len < 17) {
1427 if (skb_padto(skb, 17))
1428 return NETDEV_TX_OK;
1429 skb->len = 17;
1430 }
1431
1432 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1433 }
1434
igc_rx_checksum(struct igc_ring * ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1435 static void igc_rx_checksum(struct igc_ring *ring,
1436 union igc_adv_rx_desc *rx_desc,
1437 struct sk_buff *skb)
1438 {
1439 skb_checksum_none_assert(skb);
1440
1441 /* Ignore Checksum bit is set */
1442 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1443 return;
1444
1445 /* Rx checksum disabled via ethtool */
1446 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1447 return;
1448
1449 /* TCP/UDP checksum error bit is set */
1450 if (igc_test_staterr(rx_desc,
1451 IGC_RXDEXT_STATERR_L4E |
1452 IGC_RXDEXT_STATERR_IPE)) {
1453 /* work around errata with sctp packets where the TCPE aka
1454 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1455 * packets (aka let the stack check the crc32c)
1456 */
1457 if (!(skb->len == 60 &&
1458 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1459 u64_stats_update_begin(&ring->rx_syncp);
1460 ring->rx_stats.csum_err++;
1461 u64_stats_update_end(&ring->rx_syncp);
1462 }
1463 /* let the stack verify checksum errors */
1464 return;
1465 }
1466 /* It must be a TCP or UDP packet with a valid checksum */
1467 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1468 IGC_RXD_STAT_UDPCS))
1469 skb->ip_summed = CHECKSUM_UNNECESSARY;
1470
1471 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1472 le32_to_cpu(rx_desc->wb.upper.status_error));
1473 }
1474
igc_rx_hash(struct igc_ring * ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1475 static inline void igc_rx_hash(struct igc_ring *ring,
1476 union igc_adv_rx_desc *rx_desc,
1477 struct sk_buff *skb)
1478 {
1479 if (ring->netdev->features & NETIF_F_RXHASH)
1480 skb_set_hash(skb,
1481 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1482 PKT_HASH_TYPE_L3);
1483 }
1484
1485 /**
1486 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1487 * @rx_ring: rx descriptor ring packet is being transacted on
1488 * @rx_desc: pointer to the EOP Rx descriptor
1489 * @skb: pointer to current skb being populated
1490 *
1491 * This function checks the ring, descriptor, and packet information in order
1492 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1493 * skb.
1494 */
igc_process_skb_fields(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1495 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1496 union igc_adv_rx_desc *rx_desc,
1497 struct sk_buff *skb)
1498 {
1499 igc_rx_hash(rx_ring, rx_desc, skb);
1500
1501 igc_rx_checksum(rx_ring, rx_desc, skb);
1502
1503 skb_record_rx_queue(skb, rx_ring->queue_index);
1504
1505 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1506 }
1507
igc_get_rx_buffer(struct igc_ring * rx_ring,const unsigned int size,int * rx_buffer_pgcnt)1508 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1509 const unsigned int size,
1510 int *rx_buffer_pgcnt)
1511 {
1512 struct igc_rx_buffer *rx_buffer;
1513
1514 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1515 *rx_buffer_pgcnt =
1516 #if (PAGE_SIZE < 8192)
1517 page_count(rx_buffer->page);
1518 #else
1519 0;
1520 #endif
1521 prefetchw(rx_buffer->page);
1522
1523 /* we are reusing so sync this buffer for CPU use */
1524 dma_sync_single_range_for_cpu(rx_ring->dev,
1525 rx_buffer->dma,
1526 rx_buffer->page_offset,
1527 size,
1528 DMA_FROM_DEVICE);
1529
1530 rx_buffer->pagecnt_bias--;
1531
1532 return rx_buffer;
1533 }
1534
igc_rx_buffer_flip(struct igc_rx_buffer * buffer,unsigned int truesize)1535 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1536 unsigned int truesize)
1537 {
1538 #if (PAGE_SIZE < 8192)
1539 buffer->page_offset ^= truesize;
1540 #else
1541 buffer->page_offset += truesize;
1542 #endif
1543 }
1544
igc_get_rx_frame_truesize(struct igc_ring * ring,unsigned int size)1545 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1546 unsigned int size)
1547 {
1548 unsigned int truesize;
1549
1550 #if (PAGE_SIZE < 8192)
1551 truesize = igc_rx_pg_size(ring) / 2;
1552 #else
1553 truesize = ring_uses_build_skb(ring) ?
1554 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1555 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1556 SKB_DATA_ALIGN(size);
1557 #endif
1558 return truesize;
1559 }
1560
1561 /**
1562 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1563 * @rx_ring: rx descriptor ring to transact packets on
1564 * @rx_buffer: buffer containing page to add
1565 * @skb: sk_buff to place the data into
1566 * @size: size of buffer to be added
1567 *
1568 * This function will add the data contained in rx_buffer->page to the skb.
1569 */
igc_add_rx_frag(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)1570 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1571 struct igc_rx_buffer *rx_buffer,
1572 struct sk_buff *skb,
1573 unsigned int size)
1574 {
1575 unsigned int truesize;
1576
1577 #if (PAGE_SIZE < 8192)
1578 truesize = igc_rx_pg_size(rx_ring) / 2;
1579 #else
1580 truesize = ring_uses_build_skb(rx_ring) ?
1581 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1582 SKB_DATA_ALIGN(size);
1583 #endif
1584 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1585 rx_buffer->page_offset, size, truesize);
1586
1587 igc_rx_buffer_flip(rx_buffer, truesize);
1588 }
1589
igc_build_skb(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,union igc_adv_rx_desc * rx_desc,unsigned int size)1590 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1591 struct igc_rx_buffer *rx_buffer,
1592 union igc_adv_rx_desc *rx_desc,
1593 unsigned int size)
1594 {
1595 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1596 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1597 struct sk_buff *skb;
1598
1599 /* prefetch first cache line of first page */
1600 net_prefetch(va);
1601
1602 /* build an skb around the page buffer */
1603 skb = build_skb(va - IGC_SKB_PAD, truesize);
1604 if (unlikely(!skb))
1605 return NULL;
1606
1607 /* update pointers within the skb to store the data */
1608 skb_reserve(skb, IGC_SKB_PAD);
1609 __skb_put(skb, size);
1610
1611 igc_rx_buffer_flip(rx_buffer, truesize);
1612 return skb;
1613 }
1614
igc_construct_skb(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,struct xdp_buff * xdp,ktime_t timestamp)1615 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1616 struct igc_rx_buffer *rx_buffer,
1617 struct xdp_buff *xdp,
1618 ktime_t timestamp)
1619 {
1620 unsigned int size = xdp->data_end - xdp->data;
1621 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1622 void *va = xdp->data;
1623 unsigned int headlen;
1624 struct sk_buff *skb;
1625
1626 /* prefetch first cache line of first page */
1627 net_prefetch(va);
1628
1629 /* allocate a skb to store the frags */
1630 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1631 if (unlikely(!skb))
1632 return NULL;
1633
1634 if (timestamp)
1635 skb_hwtstamps(skb)->hwtstamp = timestamp;
1636
1637 /* Determine available headroom for copy */
1638 headlen = size;
1639 if (headlen > IGC_RX_HDR_LEN)
1640 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1641
1642 /* align pull length to size of long to optimize memcpy performance */
1643 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1644
1645 /* update all of the pointers */
1646 size -= headlen;
1647 if (size) {
1648 skb_add_rx_frag(skb, 0, rx_buffer->page,
1649 (va + headlen) - page_address(rx_buffer->page),
1650 size, truesize);
1651 igc_rx_buffer_flip(rx_buffer, truesize);
1652 } else {
1653 rx_buffer->pagecnt_bias++;
1654 }
1655
1656 return skb;
1657 }
1658
1659 /**
1660 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1661 * @rx_ring: rx descriptor ring to store buffers on
1662 * @old_buff: donor buffer to have page reused
1663 *
1664 * Synchronizes page for reuse by the adapter
1665 */
igc_reuse_rx_page(struct igc_ring * rx_ring,struct igc_rx_buffer * old_buff)1666 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1667 struct igc_rx_buffer *old_buff)
1668 {
1669 u16 nta = rx_ring->next_to_alloc;
1670 struct igc_rx_buffer *new_buff;
1671
1672 new_buff = &rx_ring->rx_buffer_info[nta];
1673
1674 /* update, and store next to alloc */
1675 nta++;
1676 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1677
1678 /* Transfer page from old buffer to new buffer.
1679 * Move each member individually to avoid possible store
1680 * forwarding stalls.
1681 */
1682 new_buff->dma = old_buff->dma;
1683 new_buff->page = old_buff->page;
1684 new_buff->page_offset = old_buff->page_offset;
1685 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1686 }
1687
igc_can_reuse_rx_page(struct igc_rx_buffer * rx_buffer,int rx_buffer_pgcnt)1688 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
1689 int rx_buffer_pgcnt)
1690 {
1691 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1692 struct page *page = rx_buffer->page;
1693
1694 /* avoid re-using remote and pfmemalloc pages */
1695 if (!dev_page_is_reusable(page))
1696 return false;
1697
1698 #if (PAGE_SIZE < 8192)
1699 /* if we are only owner of page we can reuse it */
1700 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1701 return false;
1702 #else
1703 #define IGC_LAST_OFFSET \
1704 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1705
1706 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1707 return false;
1708 #endif
1709
1710 /* If we have drained the page fragment pool we need to update
1711 * the pagecnt_bias and page count so that we fully restock the
1712 * number of references the driver holds.
1713 */
1714 if (unlikely(pagecnt_bias == 1)) {
1715 page_ref_add(page, USHRT_MAX - 1);
1716 rx_buffer->pagecnt_bias = USHRT_MAX;
1717 }
1718
1719 return true;
1720 }
1721
1722 /**
1723 * igc_is_non_eop - process handling of non-EOP buffers
1724 * @rx_ring: Rx ring being processed
1725 * @rx_desc: Rx descriptor for current buffer
1726 *
1727 * This function updates next to clean. If the buffer is an EOP buffer
1728 * this function exits returning false, otherwise it will place the
1729 * sk_buff in the next buffer to be chained and return true indicating
1730 * that this is in fact a non-EOP buffer.
1731 */
igc_is_non_eop(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc)1732 static bool igc_is_non_eop(struct igc_ring *rx_ring,
1733 union igc_adv_rx_desc *rx_desc)
1734 {
1735 u32 ntc = rx_ring->next_to_clean + 1;
1736
1737 /* fetch, update, and store next to clean */
1738 ntc = (ntc < rx_ring->count) ? ntc : 0;
1739 rx_ring->next_to_clean = ntc;
1740
1741 prefetch(IGC_RX_DESC(rx_ring, ntc));
1742
1743 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1744 return false;
1745
1746 return true;
1747 }
1748
1749 /**
1750 * igc_cleanup_headers - Correct corrupted or empty headers
1751 * @rx_ring: rx descriptor ring packet is being transacted on
1752 * @rx_desc: pointer to the EOP Rx descriptor
1753 * @skb: pointer to current skb being fixed
1754 *
1755 * Address the case where we are pulling data in on pages only
1756 * and as such no data is present in the skb header.
1757 *
1758 * In addition if skb is not at least 60 bytes we need to pad it so that
1759 * it is large enough to qualify as a valid Ethernet frame.
1760 *
1761 * Returns true if an error was encountered and skb was freed.
1762 */
igc_cleanup_headers(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1763 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1764 union igc_adv_rx_desc *rx_desc,
1765 struct sk_buff *skb)
1766 {
1767 /* XDP packets use error pointer so abort at this point */
1768 if (IS_ERR(skb))
1769 return true;
1770
1771 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
1772 struct net_device *netdev = rx_ring->netdev;
1773
1774 if (!(netdev->features & NETIF_F_RXALL)) {
1775 dev_kfree_skb_any(skb);
1776 return true;
1777 }
1778 }
1779
1780 /* if eth_skb_pad returns an error the skb was freed */
1781 if (eth_skb_pad(skb))
1782 return true;
1783
1784 return false;
1785 }
1786
igc_put_rx_buffer(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,int rx_buffer_pgcnt)1787 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1788 struct igc_rx_buffer *rx_buffer,
1789 int rx_buffer_pgcnt)
1790 {
1791 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
1792 /* hand second half of page back to the ring */
1793 igc_reuse_rx_page(rx_ring, rx_buffer);
1794 } else {
1795 /* We are not reusing the buffer so unmap it and free
1796 * any references we are holding to it
1797 */
1798 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1799 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1800 IGC_RX_DMA_ATTR);
1801 __page_frag_cache_drain(rx_buffer->page,
1802 rx_buffer->pagecnt_bias);
1803 }
1804
1805 /* clear contents of rx_buffer */
1806 rx_buffer->page = NULL;
1807 }
1808
igc_rx_offset(struct igc_ring * rx_ring)1809 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1810 {
1811 struct igc_adapter *adapter = rx_ring->q_vector->adapter;
1812
1813 if (ring_uses_build_skb(rx_ring))
1814 return IGC_SKB_PAD;
1815 if (igc_xdp_is_enabled(adapter))
1816 return XDP_PACKET_HEADROOM;
1817
1818 return 0;
1819 }
1820
igc_alloc_mapped_page(struct igc_ring * rx_ring,struct igc_rx_buffer * bi)1821 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1822 struct igc_rx_buffer *bi)
1823 {
1824 struct page *page = bi->page;
1825 dma_addr_t dma;
1826
1827 /* since we are recycling buffers we should seldom need to alloc */
1828 if (likely(page))
1829 return true;
1830
1831 /* alloc new page for storage */
1832 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1833 if (unlikely(!page)) {
1834 rx_ring->rx_stats.alloc_failed++;
1835 return false;
1836 }
1837
1838 /* map page for use */
1839 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1840 igc_rx_pg_size(rx_ring),
1841 DMA_FROM_DEVICE,
1842 IGC_RX_DMA_ATTR);
1843
1844 /* if mapping failed free memory back to system since
1845 * there isn't much point in holding memory we can't use
1846 */
1847 if (dma_mapping_error(rx_ring->dev, dma)) {
1848 __free_page(page);
1849
1850 rx_ring->rx_stats.alloc_failed++;
1851 return false;
1852 }
1853
1854 bi->dma = dma;
1855 bi->page = page;
1856 bi->page_offset = igc_rx_offset(rx_ring);
1857 page_ref_add(page, USHRT_MAX - 1);
1858 bi->pagecnt_bias = USHRT_MAX;
1859
1860 return true;
1861 }
1862
1863 /**
1864 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
1865 * @rx_ring: rx descriptor ring
1866 * @cleaned_count: number of buffers to clean
1867 */
igc_alloc_rx_buffers(struct igc_ring * rx_ring,u16 cleaned_count)1868 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1869 {
1870 union igc_adv_rx_desc *rx_desc;
1871 u16 i = rx_ring->next_to_use;
1872 struct igc_rx_buffer *bi;
1873 u16 bufsz;
1874
1875 /* nothing to do */
1876 if (!cleaned_count)
1877 return;
1878
1879 rx_desc = IGC_RX_DESC(rx_ring, i);
1880 bi = &rx_ring->rx_buffer_info[i];
1881 i -= rx_ring->count;
1882
1883 bufsz = igc_rx_bufsz(rx_ring);
1884
1885 do {
1886 if (!igc_alloc_mapped_page(rx_ring, bi))
1887 break;
1888
1889 /* sync the buffer for use by the device */
1890 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1891 bi->page_offset, bufsz,
1892 DMA_FROM_DEVICE);
1893
1894 /* Refresh the desc even if buffer_addrs didn't change
1895 * because each write-back erases this info.
1896 */
1897 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1898
1899 rx_desc++;
1900 bi++;
1901 i++;
1902 if (unlikely(!i)) {
1903 rx_desc = IGC_RX_DESC(rx_ring, 0);
1904 bi = rx_ring->rx_buffer_info;
1905 i -= rx_ring->count;
1906 }
1907
1908 /* clear the length for the next_to_use descriptor */
1909 rx_desc->wb.upper.length = 0;
1910
1911 cleaned_count--;
1912 } while (cleaned_count);
1913
1914 i += rx_ring->count;
1915
1916 if (rx_ring->next_to_use != i) {
1917 /* record the next descriptor to use */
1918 rx_ring->next_to_use = i;
1919
1920 /* update next to alloc since we have filled the ring */
1921 rx_ring->next_to_alloc = i;
1922
1923 /* Force memory writes to complete before letting h/w
1924 * know there are new descriptors to fetch. (Only
1925 * applicable for weak-ordered memory model archs,
1926 * such as IA-64).
1927 */
1928 wmb();
1929 writel(i, rx_ring->tail);
1930 }
1931 }
1932
igc_xdp_init_tx_buffer(struct igc_tx_buffer * buffer,struct xdp_frame * xdpf,struct igc_ring * ring)1933 static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
1934 struct xdp_frame *xdpf,
1935 struct igc_ring *ring)
1936 {
1937 dma_addr_t dma;
1938
1939 dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
1940 if (dma_mapping_error(ring->dev, dma)) {
1941 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
1942 return -ENOMEM;
1943 }
1944
1945 buffer->xdpf = xdpf;
1946 buffer->tx_flags = IGC_TX_FLAGS_XDP;
1947 buffer->protocol = 0;
1948 buffer->bytecount = xdpf->len;
1949 buffer->gso_segs = 1;
1950 buffer->time_stamp = jiffies;
1951 dma_unmap_len_set(buffer, len, xdpf->len);
1952 dma_unmap_addr_set(buffer, dma, dma);
1953 return 0;
1954 }
1955
1956 /* This function requires __netif_tx_lock is held by the caller. */
igc_xdp_init_tx_descriptor(struct igc_ring * ring,struct xdp_frame * xdpf)1957 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
1958 struct xdp_frame *xdpf)
1959 {
1960 struct igc_tx_buffer *buffer;
1961 union igc_adv_tx_desc *desc;
1962 u32 cmd_type, olinfo_status;
1963 int err;
1964
1965 if (!igc_desc_unused(ring))
1966 return -EBUSY;
1967
1968 buffer = &ring->tx_buffer_info[ring->next_to_use];
1969 err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
1970 if (err)
1971 return err;
1972
1973 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1974 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1975 buffer->bytecount;
1976 olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
1977
1978 desc = IGC_TX_DESC(ring, ring->next_to_use);
1979 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1980 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1981 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
1982
1983 netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
1984
1985 buffer->next_to_watch = desc;
1986
1987 ring->next_to_use++;
1988 if (ring->next_to_use == ring->count)
1989 ring->next_to_use = 0;
1990
1991 return 0;
1992 }
1993
igc_xdp_get_tx_ring(struct igc_adapter * adapter,int cpu)1994 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
1995 int cpu)
1996 {
1997 int index = cpu;
1998
1999 if (unlikely(index < 0))
2000 index = 0;
2001
2002 while (index >= adapter->num_tx_queues)
2003 index -= adapter->num_tx_queues;
2004
2005 return adapter->tx_ring[index];
2006 }
2007
igc_xdp_xmit_back(struct igc_adapter * adapter,struct xdp_buff * xdp)2008 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2009 {
2010 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2011 int cpu = smp_processor_id();
2012 struct netdev_queue *nq;
2013 struct igc_ring *ring;
2014 int res;
2015
2016 if (unlikely(!xdpf))
2017 return -EFAULT;
2018
2019 ring = igc_xdp_get_tx_ring(adapter, cpu);
2020 nq = txring_txq(ring);
2021
2022 __netif_tx_lock(nq, cpu);
2023 res = igc_xdp_init_tx_descriptor(ring, xdpf);
2024 __netif_tx_unlock(nq);
2025 return res;
2026 }
2027
igc_xdp_run_prog(struct igc_adapter * adapter,struct xdp_buff * xdp)2028 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2029 struct xdp_buff *xdp)
2030 {
2031 struct bpf_prog *prog;
2032 int res;
2033 u32 act;
2034
2035 rcu_read_lock();
2036
2037 prog = READ_ONCE(adapter->xdp_prog);
2038 if (!prog) {
2039 res = IGC_XDP_PASS;
2040 goto unlock;
2041 }
2042
2043 act = bpf_prog_run_xdp(prog, xdp);
2044 switch (act) {
2045 case XDP_PASS:
2046 res = IGC_XDP_PASS;
2047 break;
2048 case XDP_TX:
2049 if (igc_xdp_xmit_back(adapter, xdp) < 0)
2050 res = IGC_XDP_CONSUMED;
2051 else
2052 res = IGC_XDP_TX;
2053 break;
2054 case XDP_REDIRECT:
2055 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2056 res = IGC_XDP_CONSUMED;
2057 else
2058 res = IGC_XDP_REDIRECT;
2059 break;
2060 default:
2061 bpf_warn_invalid_xdp_action(act);
2062 fallthrough;
2063 case XDP_ABORTED:
2064 trace_xdp_exception(adapter->netdev, prog, act);
2065 fallthrough;
2066 case XDP_DROP:
2067 res = IGC_XDP_CONSUMED;
2068 break;
2069 }
2070
2071 unlock:
2072 rcu_read_unlock();
2073 return ERR_PTR(-res);
2074 }
2075
2076 /* This function assumes __netif_tx_lock is held by the caller. */
igc_flush_tx_descriptors(struct igc_ring * ring)2077 static void igc_flush_tx_descriptors(struct igc_ring *ring)
2078 {
2079 /* Once tail pointer is updated, hardware can fetch the descriptors
2080 * any time so we issue a write membar here to ensure all memory
2081 * writes are complete before the tail pointer is updated.
2082 */
2083 wmb();
2084 writel(ring->next_to_use, ring->tail);
2085 }
2086
igc_finalize_xdp(struct igc_adapter * adapter,int status)2087 static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2088 {
2089 int cpu = smp_processor_id();
2090 struct netdev_queue *nq;
2091 struct igc_ring *ring;
2092
2093 if (status & IGC_XDP_TX) {
2094 ring = igc_xdp_get_tx_ring(adapter, cpu);
2095 nq = txring_txq(ring);
2096
2097 __netif_tx_lock(nq, cpu);
2098 igc_flush_tx_descriptors(ring);
2099 __netif_tx_unlock(nq);
2100 }
2101
2102 if (status & IGC_XDP_REDIRECT)
2103 xdp_do_flush();
2104 }
2105
igc_clean_rx_irq(struct igc_q_vector * q_vector,const int budget)2106 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2107 {
2108 unsigned int total_bytes = 0, total_packets = 0;
2109 struct igc_adapter *adapter = q_vector->adapter;
2110 struct igc_ring *rx_ring = q_vector->rx.ring;
2111 struct sk_buff *skb = rx_ring->skb;
2112 u16 cleaned_count = igc_desc_unused(rx_ring);
2113 int xdp_status = 0, rx_buffer_pgcnt;
2114
2115 while (likely(total_packets < budget)) {
2116 union igc_adv_rx_desc *rx_desc;
2117 struct igc_rx_buffer *rx_buffer;
2118 unsigned int size, truesize;
2119 ktime_t timestamp = 0;
2120 struct xdp_buff xdp;
2121 int pkt_offset = 0;
2122 void *pktbuf;
2123
2124 /* return some buffers to hardware, one at a time is too slow */
2125 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2126 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2127 cleaned_count = 0;
2128 }
2129
2130 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2131 size = le16_to_cpu(rx_desc->wb.upper.length);
2132 if (!size)
2133 break;
2134
2135 /* This memory barrier is needed to keep us from reading
2136 * any other fields out of the rx_desc until we know the
2137 * descriptor has been written back
2138 */
2139 dma_rmb();
2140
2141 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2142 truesize = igc_get_rx_frame_truesize(rx_ring, size);
2143
2144 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2145
2146 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2147 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2148 pktbuf);
2149 pkt_offset = IGC_TS_HDR_LEN;
2150 size -= IGC_TS_HDR_LEN;
2151 }
2152
2153 if (!skb) {
2154 xdp.data = pktbuf + pkt_offset;
2155 xdp.data_end = xdp.data + size;
2156 xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring);
2157 xdp_set_data_meta_invalid(&xdp);
2158 xdp.frame_sz = truesize;
2159 xdp.rxq = &rx_ring->xdp_rxq;
2160
2161 skb = igc_xdp_run_prog(adapter, &xdp);
2162 }
2163
2164 if (IS_ERR(skb)) {
2165 unsigned int xdp_res = -PTR_ERR(skb);
2166
2167 switch (xdp_res) {
2168 case IGC_XDP_CONSUMED:
2169 rx_buffer->pagecnt_bias++;
2170 break;
2171 case IGC_XDP_TX:
2172 case IGC_XDP_REDIRECT:
2173 igc_rx_buffer_flip(rx_buffer, truesize);
2174 xdp_status |= xdp_res;
2175 break;
2176 }
2177
2178 total_packets++;
2179 total_bytes += size;
2180 } else if (skb)
2181 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2182 else if (ring_uses_build_skb(rx_ring))
2183 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
2184 else
2185 skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
2186 timestamp);
2187
2188 /* exit if we failed to retrieve a buffer */
2189 if (!skb) {
2190 rx_ring->rx_stats.alloc_failed++;
2191 rx_buffer->pagecnt_bias++;
2192 break;
2193 }
2194
2195 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2196 cleaned_count++;
2197
2198 /* fetch next buffer in frame if non-eop */
2199 if (igc_is_non_eop(rx_ring, rx_desc))
2200 continue;
2201
2202 /* verify the packet layout is correct */
2203 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2204 skb = NULL;
2205 continue;
2206 }
2207
2208 /* probably a little skewed due to removing CRC */
2209 total_bytes += skb->len;
2210
2211 /* populate checksum, VLAN, and protocol */
2212 igc_process_skb_fields(rx_ring, rx_desc, skb);
2213
2214 napi_gro_receive(&q_vector->napi, skb);
2215
2216 /* reset skb pointer */
2217 skb = NULL;
2218
2219 /* update budget accounting */
2220 total_packets++;
2221 }
2222
2223 if (xdp_status)
2224 igc_finalize_xdp(adapter, xdp_status);
2225
2226 /* place incomplete frames back on ring for completion */
2227 rx_ring->skb = skb;
2228
2229 u64_stats_update_begin(&rx_ring->rx_syncp);
2230 rx_ring->rx_stats.packets += total_packets;
2231 rx_ring->rx_stats.bytes += total_bytes;
2232 u64_stats_update_end(&rx_ring->rx_syncp);
2233 q_vector->rx.total_packets += total_packets;
2234 q_vector->rx.total_bytes += total_bytes;
2235
2236 if (cleaned_count)
2237 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2238
2239 return total_packets;
2240 }
2241
2242 /**
2243 * igc_clean_tx_irq - Reclaim resources after transmit completes
2244 * @q_vector: pointer to q_vector containing needed info
2245 * @napi_budget: Used to determine if we are in netpoll
2246 *
2247 * returns true if ring is completely cleaned
2248 */
igc_clean_tx_irq(struct igc_q_vector * q_vector,int napi_budget)2249 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2250 {
2251 struct igc_adapter *adapter = q_vector->adapter;
2252 unsigned int total_bytes = 0, total_packets = 0;
2253 unsigned int budget = q_vector->tx.work_limit;
2254 struct igc_ring *tx_ring = q_vector->tx.ring;
2255 unsigned int i = tx_ring->next_to_clean;
2256 struct igc_tx_buffer *tx_buffer;
2257 union igc_adv_tx_desc *tx_desc;
2258
2259 if (test_bit(__IGC_DOWN, &adapter->state))
2260 return true;
2261
2262 tx_buffer = &tx_ring->tx_buffer_info[i];
2263 tx_desc = IGC_TX_DESC(tx_ring, i);
2264 i -= tx_ring->count;
2265
2266 do {
2267 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2268
2269 /* if next_to_watch is not set then there is no work pending */
2270 if (!eop_desc)
2271 break;
2272
2273 /* prevent any other reads prior to eop_desc */
2274 smp_rmb();
2275
2276 /* if DD is not set pending work has not been completed */
2277 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2278 break;
2279
2280 /* clear next_to_watch to prevent false hangs */
2281 tx_buffer->next_to_watch = NULL;
2282
2283 /* update the statistics for this packet */
2284 total_bytes += tx_buffer->bytecount;
2285 total_packets += tx_buffer->gso_segs;
2286
2287 if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
2288 xdp_return_frame(tx_buffer->xdpf);
2289 else
2290 napi_consume_skb(tx_buffer->skb, napi_budget);
2291
2292 /* unmap skb header data */
2293 dma_unmap_single(tx_ring->dev,
2294 dma_unmap_addr(tx_buffer, dma),
2295 dma_unmap_len(tx_buffer, len),
2296 DMA_TO_DEVICE);
2297
2298 /* clear tx_buffer data */
2299 dma_unmap_len_set(tx_buffer, len, 0);
2300
2301 /* clear last DMA location and unmap remaining buffers */
2302 while (tx_desc != eop_desc) {
2303 tx_buffer++;
2304 tx_desc++;
2305 i++;
2306 if (unlikely(!i)) {
2307 i -= tx_ring->count;
2308 tx_buffer = tx_ring->tx_buffer_info;
2309 tx_desc = IGC_TX_DESC(tx_ring, 0);
2310 }
2311
2312 /* unmap any remaining paged data */
2313 if (dma_unmap_len(tx_buffer, len)) {
2314 dma_unmap_page(tx_ring->dev,
2315 dma_unmap_addr(tx_buffer, dma),
2316 dma_unmap_len(tx_buffer, len),
2317 DMA_TO_DEVICE);
2318 dma_unmap_len_set(tx_buffer, len, 0);
2319 }
2320 }
2321
2322 /* move us one more past the eop_desc for start of next pkt */
2323 tx_buffer++;
2324 tx_desc++;
2325 i++;
2326 if (unlikely(!i)) {
2327 i -= tx_ring->count;
2328 tx_buffer = tx_ring->tx_buffer_info;
2329 tx_desc = IGC_TX_DESC(tx_ring, 0);
2330 }
2331
2332 /* issue prefetch for next Tx descriptor */
2333 prefetch(tx_desc);
2334
2335 /* update budget accounting */
2336 budget--;
2337 } while (likely(budget));
2338
2339 netdev_tx_completed_queue(txring_txq(tx_ring),
2340 total_packets, total_bytes);
2341
2342 i += tx_ring->count;
2343 tx_ring->next_to_clean = i;
2344 u64_stats_update_begin(&tx_ring->tx_syncp);
2345 tx_ring->tx_stats.bytes += total_bytes;
2346 tx_ring->tx_stats.packets += total_packets;
2347 u64_stats_update_end(&tx_ring->tx_syncp);
2348 q_vector->tx.total_bytes += total_bytes;
2349 q_vector->tx.total_packets += total_packets;
2350
2351 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2352 struct igc_hw *hw = &adapter->hw;
2353
2354 /* Detect a transmit hang in hardware, this serializes the
2355 * check with the clearing of time_stamp and movement of i
2356 */
2357 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2358 if (tx_buffer->next_to_watch &&
2359 time_after(jiffies, tx_buffer->time_stamp +
2360 (adapter->tx_timeout_factor * HZ)) &&
2361 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
2362 /* detected Tx unit hang */
2363 netdev_err(tx_ring->netdev,
2364 "Detected Tx Unit Hang\n"
2365 " Tx Queue <%d>\n"
2366 " TDH <%x>\n"
2367 " TDT <%x>\n"
2368 " next_to_use <%x>\n"
2369 " next_to_clean <%x>\n"
2370 "buffer_info[next_to_clean]\n"
2371 " time_stamp <%lx>\n"
2372 " next_to_watch <%p>\n"
2373 " jiffies <%lx>\n"
2374 " desc.status <%x>\n",
2375 tx_ring->queue_index,
2376 rd32(IGC_TDH(tx_ring->reg_idx)),
2377 readl(tx_ring->tail),
2378 tx_ring->next_to_use,
2379 tx_ring->next_to_clean,
2380 tx_buffer->time_stamp,
2381 tx_buffer->next_to_watch,
2382 jiffies,
2383 tx_buffer->next_to_watch->wb.status);
2384 netif_stop_subqueue(tx_ring->netdev,
2385 tx_ring->queue_index);
2386
2387 /* we are about to reset, no point in enabling stuff */
2388 return true;
2389 }
2390 }
2391
2392 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2393 if (unlikely(total_packets &&
2394 netif_carrier_ok(tx_ring->netdev) &&
2395 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2396 /* Make sure that anybody stopping the queue after this
2397 * sees the new next_to_clean.
2398 */
2399 smp_mb();
2400 if (__netif_subqueue_stopped(tx_ring->netdev,
2401 tx_ring->queue_index) &&
2402 !(test_bit(__IGC_DOWN, &adapter->state))) {
2403 netif_wake_subqueue(tx_ring->netdev,
2404 tx_ring->queue_index);
2405
2406 u64_stats_update_begin(&tx_ring->tx_syncp);
2407 tx_ring->tx_stats.restart_queue++;
2408 u64_stats_update_end(&tx_ring->tx_syncp);
2409 }
2410 }
2411
2412 return !!budget;
2413 }
2414
igc_find_mac_filter(struct igc_adapter * adapter,enum igc_mac_filter_type type,const u8 * addr)2415 static int igc_find_mac_filter(struct igc_adapter *adapter,
2416 enum igc_mac_filter_type type, const u8 *addr)
2417 {
2418 struct igc_hw *hw = &adapter->hw;
2419 int max_entries = hw->mac.rar_entry_count;
2420 u32 ral, rah;
2421 int i;
2422
2423 for (i = 0; i < max_entries; i++) {
2424 ral = rd32(IGC_RAL(i));
2425 rah = rd32(IGC_RAH(i));
2426
2427 if (!(rah & IGC_RAH_AV))
2428 continue;
2429 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2430 continue;
2431 if ((rah & IGC_RAH_RAH_MASK) !=
2432 le16_to_cpup((__le16 *)(addr + 4)))
2433 continue;
2434 if (ral != le32_to_cpup((__le32 *)(addr)))
2435 continue;
2436
2437 return i;
2438 }
2439
2440 return -1;
2441 }
2442
igc_get_avail_mac_filter_slot(struct igc_adapter * adapter)2443 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
2444 {
2445 struct igc_hw *hw = &adapter->hw;
2446 int max_entries = hw->mac.rar_entry_count;
2447 u32 rah;
2448 int i;
2449
2450 for (i = 0; i < max_entries; i++) {
2451 rah = rd32(IGC_RAH(i));
2452
2453 if (!(rah & IGC_RAH_AV))
2454 return i;
2455 }
2456
2457 return -1;
2458 }
2459
2460 /**
2461 * igc_add_mac_filter() - Add MAC address filter
2462 * @adapter: Pointer to adapter where the filter should be added
2463 * @type: MAC address filter type (source or destination)
2464 * @addr: MAC address
2465 * @queue: If non-negative, queue assignment feature is enabled and frames
2466 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2467 * assignment is disabled.
2468 *
2469 * Return: 0 in case of success, negative errno code otherwise.
2470 */
igc_add_mac_filter(struct igc_adapter * adapter,enum igc_mac_filter_type type,const u8 * addr,int queue)2471 static int igc_add_mac_filter(struct igc_adapter *adapter,
2472 enum igc_mac_filter_type type, const u8 *addr,
2473 int queue)
2474 {
2475 struct net_device *dev = adapter->netdev;
2476 int index;
2477
2478 index = igc_find_mac_filter(adapter, type, addr);
2479 if (index >= 0)
2480 goto update_filter;
2481
2482 index = igc_get_avail_mac_filter_slot(adapter);
2483 if (index < 0)
2484 return -ENOSPC;
2485
2486 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2487 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2488 addr, queue);
2489
2490 update_filter:
2491 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
2492 return 0;
2493 }
2494
2495 /**
2496 * igc_del_mac_filter() - Delete MAC address filter
2497 * @adapter: Pointer to adapter where the filter should be deleted from
2498 * @type: MAC address filter type (source or destination)
2499 * @addr: MAC address
2500 */
igc_del_mac_filter(struct igc_adapter * adapter,enum igc_mac_filter_type type,const u8 * addr)2501 static void igc_del_mac_filter(struct igc_adapter *adapter,
2502 enum igc_mac_filter_type type, const u8 *addr)
2503 {
2504 struct net_device *dev = adapter->netdev;
2505 int index;
2506
2507 index = igc_find_mac_filter(adapter, type, addr);
2508 if (index < 0)
2509 return;
2510
2511 if (index == 0) {
2512 /* If this is the default filter, we don't actually delete it.
2513 * We just reset to its default value i.e. disable queue
2514 * assignment.
2515 */
2516 netdev_dbg(dev, "Disable default MAC filter queue assignment");
2517
2518 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
2519 } else {
2520 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2521 index,
2522 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2523 addr);
2524
2525 igc_clear_mac_filter_hw(adapter, index);
2526 }
2527 }
2528
2529 /**
2530 * igc_add_vlan_prio_filter() - Add VLAN priority filter
2531 * @adapter: Pointer to adapter where the filter should be added
2532 * @prio: VLAN priority value
2533 * @queue: Queue number which matching frames are assigned to
2534 *
2535 * Return: 0 in case of success, negative errno code otherwise.
2536 */
igc_add_vlan_prio_filter(struct igc_adapter * adapter,int prio,int queue)2537 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
2538 int queue)
2539 {
2540 struct net_device *dev = adapter->netdev;
2541 struct igc_hw *hw = &adapter->hw;
2542 u32 vlanpqf;
2543
2544 vlanpqf = rd32(IGC_VLANPQF);
2545
2546 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
2547 netdev_dbg(dev, "VLAN priority filter already in use\n");
2548 return -EEXIST;
2549 }
2550
2551 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
2552 vlanpqf |= IGC_VLANPQF_VALID(prio);
2553
2554 wr32(IGC_VLANPQF, vlanpqf);
2555
2556 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
2557 prio, queue);
2558 return 0;
2559 }
2560
2561 /**
2562 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
2563 * @adapter: Pointer to adapter where the filter should be deleted from
2564 * @prio: VLAN priority value
2565 */
igc_del_vlan_prio_filter(struct igc_adapter * adapter,int prio)2566 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
2567 {
2568 struct igc_hw *hw = &adapter->hw;
2569 u32 vlanpqf;
2570
2571 vlanpqf = rd32(IGC_VLANPQF);
2572
2573 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
2574 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
2575
2576 wr32(IGC_VLANPQF, vlanpqf);
2577
2578 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
2579 prio);
2580 }
2581
igc_get_avail_etype_filter_slot(struct igc_adapter * adapter)2582 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2583 {
2584 struct igc_hw *hw = &adapter->hw;
2585 int i;
2586
2587 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2588 u32 etqf = rd32(IGC_ETQF(i));
2589
2590 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
2591 return i;
2592 }
2593
2594 return -1;
2595 }
2596
2597 /**
2598 * igc_add_etype_filter() - Add ethertype filter
2599 * @adapter: Pointer to adapter where the filter should be added
2600 * @etype: Ethertype value
2601 * @queue: If non-negative, queue assignment feature is enabled and frames
2602 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2603 * assignment is disabled.
2604 *
2605 * Return: 0 in case of success, negative errno code otherwise.
2606 */
igc_add_etype_filter(struct igc_adapter * adapter,u16 etype,int queue)2607 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
2608 int queue)
2609 {
2610 struct igc_hw *hw = &adapter->hw;
2611 int index;
2612 u32 etqf;
2613
2614 index = igc_get_avail_etype_filter_slot(adapter);
2615 if (index < 0)
2616 return -ENOSPC;
2617
2618 etqf = rd32(IGC_ETQF(index));
2619
2620 etqf &= ~IGC_ETQF_ETYPE_MASK;
2621 etqf |= etype;
2622
2623 if (queue >= 0) {
2624 etqf &= ~IGC_ETQF_QUEUE_MASK;
2625 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
2626 etqf |= IGC_ETQF_QUEUE_ENABLE;
2627 }
2628
2629 etqf |= IGC_ETQF_FILTER_ENABLE;
2630
2631 wr32(IGC_ETQF(index), etqf);
2632
2633 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
2634 etype, queue);
2635 return 0;
2636 }
2637
igc_find_etype_filter(struct igc_adapter * adapter,u16 etype)2638 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
2639 {
2640 struct igc_hw *hw = &adapter->hw;
2641 int i;
2642
2643 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2644 u32 etqf = rd32(IGC_ETQF(i));
2645
2646 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
2647 return i;
2648 }
2649
2650 return -1;
2651 }
2652
2653 /**
2654 * igc_del_etype_filter() - Delete ethertype filter
2655 * @adapter: Pointer to adapter where the filter should be deleted from
2656 * @etype: Ethertype value
2657 */
igc_del_etype_filter(struct igc_adapter * adapter,u16 etype)2658 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
2659 {
2660 struct igc_hw *hw = &adapter->hw;
2661 int index;
2662
2663 index = igc_find_etype_filter(adapter, etype);
2664 if (index < 0)
2665 return;
2666
2667 wr32(IGC_ETQF(index), 0);
2668
2669 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
2670 etype);
2671 }
2672
igc_enable_nfc_rule(struct igc_adapter * adapter,const struct igc_nfc_rule * rule)2673 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
2674 const struct igc_nfc_rule *rule)
2675 {
2676 int err;
2677
2678 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
2679 err = igc_add_etype_filter(adapter, rule->filter.etype,
2680 rule->action);
2681 if (err)
2682 return err;
2683 }
2684
2685 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
2686 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2687 rule->filter.src_addr, rule->action);
2688 if (err)
2689 return err;
2690 }
2691
2692 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
2693 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2694 rule->filter.dst_addr, rule->action);
2695 if (err)
2696 return err;
2697 }
2698
2699 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2700 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2701 VLAN_PRIO_SHIFT;
2702
2703 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
2704 if (err)
2705 return err;
2706 }
2707
2708 return 0;
2709 }
2710
igc_disable_nfc_rule(struct igc_adapter * adapter,const struct igc_nfc_rule * rule)2711 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
2712 const struct igc_nfc_rule *rule)
2713 {
2714 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
2715 igc_del_etype_filter(adapter, rule->filter.etype);
2716
2717 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2718 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2719 VLAN_PRIO_SHIFT;
2720
2721 igc_del_vlan_prio_filter(adapter, prio);
2722 }
2723
2724 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
2725 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2726 rule->filter.src_addr);
2727
2728 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
2729 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2730 rule->filter.dst_addr);
2731 }
2732
2733 /**
2734 * igc_get_nfc_rule() - Get NFC rule
2735 * @adapter: Pointer to adapter
2736 * @location: Rule location
2737 *
2738 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2739 *
2740 * Return: Pointer to NFC rule at @location. If not found, NULL.
2741 */
igc_get_nfc_rule(struct igc_adapter * adapter,u32 location)2742 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
2743 u32 location)
2744 {
2745 struct igc_nfc_rule *rule;
2746
2747 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
2748 if (rule->location == location)
2749 return rule;
2750 if (rule->location > location)
2751 break;
2752 }
2753
2754 return NULL;
2755 }
2756
2757 /**
2758 * igc_del_nfc_rule() - Delete NFC rule
2759 * @adapter: Pointer to adapter
2760 * @rule: Pointer to rule to be deleted
2761 *
2762 * Disable NFC rule in hardware and delete it from adapter.
2763 *
2764 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2765 */
igc_del_nfc_rule(struct igc_adapter * adapter,struct igc_nfc_rule * rule)2766 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2767 {
2768 igc_disable_nfc_rule(adapter, rule);
2769
2770 list_del(&rule->list);
2771 adapter->nfc_rule_count--;
2772
2773 kfree(rule);
2774 }
2775
igc_flush_nfc_rules(struct igc_adapter * adapter)2776 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
2777 {
2778 struct igc_nfc_rule *rule, *tmp;
2779
2780 mutex_lock(&adapter->nfc_rule_lock);
2781
2782 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
2783 igc_del_nfc_rule(adapter, rule);
2784
2785 mutex_unlock(&adapter->nfc_rule_lock);
2786 }
2787
2788 /**
2789 * igc_add_nfc_rule() - Add NFC rule
2790 * @adapter: Pointer to adapter
2791 * @rule: Pointer to rule to be added
2792 *
2793 * Enable NFC rule in hardware and add it to adapter.
2794 *
2795 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2796 *
2797 * Return: 0 on success, negative errno on failure.
2798 */
igc_add_nfc_rule(struct igc_adapter * adapter,struct igc_nfc_rule * rule)2799 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2800 {
2801 struct igc_nfc_rule *pred, *cur;
2802 int err;
2803
2804 err = igc_enable_nfc_rule(adapter, rule);
2805 if (err)
2806 return err;
2807
2808 pred = NULL;
2809 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
2810 if (cur->location >= rule->location)
2811 break;
2812 pred = cur;
2813 }
2814
2815 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
2816 adapter->nfc_rule_count++;
2817 return 0;
2818 }
2819
igc_restore_nfc_rules(struct igc_adapter * adapter)2820 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
2821 {
2822 struct igc_nfc_rule *rule;
2823
2824 mutex_lock(&adapter->nfc_rule_lock);
2825
2826 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
2827 igc_enable_nfc_rule(adapter, rule);
2828
2829 mutex_unlock(&adapter->nfc_rule_lock);
2830 }
2831
igc_uc_sync(struct net_device * netdev,const unsigned char * addr)2832 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
2833 {
2834 struct igc_adapter *adapter = netdev_priv(netdev);
2835
2836 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
2837 }
2838
igc_uc_unsync(struct net_device * netdev,const unsigned char * addr)2839 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
2840 {
2841 struct igc_adapter *adapter = netdev_priv(netdev);
2842
2843 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
2844 return 0;
2845 }
2846
2847 /**
2848 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2849 * @netdev: network interface device structure
2850 *
2851 * The set_rx_mode entry point is called whenever the unicast or multicast
2852 * address lists or the network interface flags are updated. This routine is
2853 * responsible for configuring the hardware for proper unicast, multicast,
2854 * promiscuous mode, and all-multi behavior.
2855 */
igc_set_rx_mode(struct net_device * netdev)2856 static void igc_set_rx_mode(struct net_device *netdev)
2857 {
2858 struct igc_adapter *adapter = netdev_priv(netdev);
2859 struct igc_hw *hw = &adapter->hw;
2860 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
2861 int count;
2862
2863 /* Check for Promiscuous and All Multicast modes */
2864 if (netdev->flags & IFF_PROMISC) {
2865 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
2866 } else {
2867 if (netdev->flags & IFF_ALLMULTI) {
2868 rctl |= IGC_RCTL_MPE;
2869 } else {
2870 /* Write addresses to the MTA, if the attempt fails
2871 * then we should just turn on promiscuous mode so
2872 * that we can at least receive multicast traffic
2873 */
2874 count = igc_write_mc_addr_list(netdev);
2875 if (count < 0)
2876 rctl |= IGC_RCTL_MPE;
2877 }
2878 }
2879
2880 /* Write addresses to available RAR registers, if there is not
2881 * sufficient space to store all the addresses then enable
2882 * unicast promiscuous mode
2883 */
2884 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
2885 rctl |= IGC_RCTL_UPE;
2886
2887 /* update state of unicast and multicast */
2888 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
2889 wr32(IGC_RCTL, rctl);
2890
2891 #if (PAGE_SIZE < 8192)
2892 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
2893 rlpml = IGC_MAX_FRAME_BUILD_SKB;
2894 #endif
2895 wr32(IGC_RLPML, rlpml);
2896 }
2897
2898 /**
2899 * igc_configure - configure the hardware for RX and TX
2900 * @adapter: private board structure
2901 */
igc_configure(struct igc_adapter * adapter)2902 static void igc_configure(struct igc_adapter *adapter)
2903 {
2904 struct net_device *netdev = adapter->netdev;
2905 int i = 0;
2906
2907 igc_get_hw_control(adapter);
2908 igc_set_rx_mode(netdev);
2909
2910 igc_setup_tctl(adapter);
2911 igc_setup_mrqc(adapter);
2912 igc_setup_rctl(adapter);
2913
2914 igc_set_default_mac_filter(adapter);
2915 igc_restore_nfc_rules(adapter);
2916
2917 igc_configure_tx(adapter);
2918 igc_configure_rx(adapter);
2919
2920 igc_rx_fifo_flush_base(&adapter->hw);
2921
2922 /* call igc_desc_unused which always leaves
2923 * at least 1 descriptor unused to make sure
2924 * next_to_use != next_to_clean
2925 */
2926 for (i = 0; i < adapter->num_rx_queues; i++) {
2927 struct igc_ring *ring = adapter->rx_ring[i];
2928
2929 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
2930 }
2931 }
2932
2933 /**
2934 * igc_write_ivar - configure ivar for given MSI-X vector
2935 * @hw: pointer to the HW structure
2936 * @msix_vector: vector number we are allocating to a given ring
2937 * @index: row index of IVAR register to write within IVAR table
2938 * @offset: column offset of in IVAR, should be multiple of 8
2939 *
2940 * The IVAR table consists of 2 columns,
2941 * each containing an cause allocation for an Rx and Tx ring, and a
2942 * variable number of rows depending on the number of queues supported.
2943 */
igc_write_ivar(struct igc_hw * hw,int msix_vector,int index,int offset)2944 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
2945 int index, int offset)
2946 {
2947 u32 ivar = array_rd32(IGC_IVAR0, index);
2948
2949 /* clear any bits that are currently set */
2950 ivar &= ~((u32)0xFF << offset);
2951
2952 /* write vector and valid bit */
2953 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
2954
2955 array_wr32(IGC_IVAR0, index, ivar);
2956 }
2957
igc_assign_vector(struct igc_q_vector * q_vector,int msix_vector)2958 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
2959 {
2960 struct igc_adapter *adapter = q_vector->adapter;
2961 struct igc_hw *hw = &adapter->hw;
2962 int rx_queue = IGC_N0_QUEUE;
2963 int tx_queue = IGC_N0_QUEUE;
2964
2965 if (q_vector->rx.ring)
2966 rx_queue = q_vector->rx.ring->reg_idx;
2967 if (q_vector->tx.ring)
2968 tx_queue = q_vector->tx.ring->reg_idx;
2969
2970 switch (hw->mac.type) {
2971 case igc_i225:
2972 if (rx_queue > IGC_N0_QUEUE)
2973 igc_write_ivar(hw, msix_vector,
2974 rx_queue >> 1,
2975 (rx_queue & 0x1) << 4);
2976 if (tx_queue > IGC_N0_QUEUE)
2977 igc_write_ivar(hw, msix_vector,
2978 tx_queue >> 1,
2979 ((tx_queue & 0x1) << 4) + 8);
2980 q_vector->eims_value = BIT(msix_vector);
2981 break;
2982 default:
2983 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
2984 break;
2985 }
2986
2987 /* add q_vector eims value to global eims_enable_mask */
2988 adapter->eims_enable_mask |= q_vector->eims_value;
2989
2990 /* configure q_vector to set itr on first interrupt */
2991 q_vector->set_itr = 1;
2992 }
2993
2994 /**
2995 * igc_configure_msix - Configure MSI-X hardware
2996 * @adapter: Pointer to adapter structure
2997 *
2998 * igc_configure_msix sets up the hardware to properly
2999 * generate MSI-X interrupts.
3000 */
igc_configure_msix(struct igc_adapter * adapter)3001 static void igc_configure_msix(struct igc_adapter *adapter)
3002 {
3003 struct igc_hw *hw = &adapter->hw;
3004 int i, vector = 0;
3005 u32 tmp;
3006
3007 adapter->eims_enable_mask = 0;
3008
3009 /* set vector for other causes, i.e. link changes */
3010 switch (hw->mac.type) {
3011 case igc_i225:
3012 /* Turn on MSI-X capability first, or our settings
3013 * won't stick. And it will take days to debug.
3014 */
3015 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
3016 IGC_GPIE_PBA | IGC_GPIE_EIAME |
3017 IGC_GPIE_NSICR);
3018
3019 /* enable msix_other interrupt */
3020 adapter->eims_other = BIT(vector);
3021 tmp = (vector++ | IGC_IVAR_VALID) << 8;
3022
3023 wr32(IGC_IVAR_MISC, tmp);
3024 break;
3025 default:
3026 /* do nothing, since nothing else supports MSI-X */
3027 break;
3028 } /* switch (hw->mac.type) */
3029
3030 adapter->eims_enable_mask |= adapter->eims_other;
3031
3032 for (i = 0; i < adapter->num_q_vectors; i++)
3033 igc_assign_vector(adapter->q_vector[i], vector++);
3034
3035 wrfl();
3036 }
3037
3038 /**
3039 * igc_irq_enable - Enable default interrupt generation settings
3040 * @adapter: board private structure
3041 */
igc_irq_enable(struct igc_adapter * adapter)3042 static void igc_irq_enable(struct igc_adapter *adapter)
3043 {
3044 struct igc_hw *hw = &adapter->hw;
3045
3046 if (adapter->msix_entries) {
3047 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
3048 u32 regval = rd32(IGC_EIAC);
3049
3050 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
3051 regval = rd32(IGC_EIAM);
3052 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
3053 wr32(IGC_EIMS, adapter->eims_enable_mask);
3054 wr32(IGC_IMS, ims);
3055 } else {
3056 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3057 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3058 }
3059 }
3060
3061 /**
3062 * igc_irq_disable - Mask off interrupt generation on the NIC
3063 * @adapter: board private structure
3064 */
igc_irq_disable(struct igc_adapter * adapter)3065 static void igc_irq_disable(struct igc_adapter *adapter)
3066 {
3067 struct igc_hw *hw = &adapter->hw;
3068
3069 if (adapter->msix_entries) {
3070 u32 regval = rd32(IGC_EIAM);
3071
3072 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
3073 wr32(IGC_EIMC, adapter->eims_enable_mask);
3074 regval = rd32(IGC_EIAC);
3075 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
3076 }
3077
3078 wr32(IGC_IAM, 0);
3079 wr32(IGC_IMC, ~0);
3080 wrfl();
3081
3082 if (adapter->msix_entries) {
3083 int vector = 0, i;
3084
3085 synchronize_irq(adapter->msix_entries[vector++].vector);
3086
3087 for (i = 0; i < adapter->num_q_vectors; i++)
3088 synchronize_irq(adapter->msix_entries[vector++].vector);
3089 } else {
3090 synchronize_irq(adapter->pdev->irq);
3091 }
3092 }
3093
igc_set_flag_queue_pairs(struct igc_adapter * adapter,const u32 max_rss_queues)3094 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
3095 const u32 max_rss_queues)
3096 {
3097 /* Determine if we need to pair queues. */
3098 /* If rss_queues > half of max_rss_queues, pair the queues in
3099 * order to conserve interrupts due to limited supply.
3100 */
3101 if (adapter->rss_queues > (max_rss_queues / 2))
3102 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3103 else
3104 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
3105 }
3106
igc_get_max_rss_queues(struct igc_adapter * adapter)3107 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
3108 {
3109 return IGC_MAX_RX_QUEUES;
3110 }
3111
igc_init_queue_configuration(struct igc_adapter * adapter)3112 static void igc_init_queue_configuration(struct igc_adapter *adapter)
3113 {
3114 u32 max_rss_queues;
3115
3116 max_rss_queues = igc_get_max_rss_queues(adapter);
3117 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3118
3119 igc_set_flag_queue_pairs(adapter, max_rss_queues);
3120 }
3121
3122 /**
3123 * igc_reset_q_vector - Reset config for interrupt vector
3124 * @adapter: board private structure to initialize
3125 * @v_idx: Index of vector to be reset
3126 *
3127 * If NAPI is enabled it will delete any references to the
3128 * NAPI struct. This is preparation for igc_free_q_vector.
3129 */
igc_reset_q_vector(struct igc_adapter * adapter,int v_idx)3130 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
3131 {
3132 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3133
3134 /* if we're coming from igc_set_interrupt_capability, the vectors are
3135 * not yet allocated
3136 */
3137 if (!q_vector)
3138 return;
3139
3140 if (q_vector->tx.ring)
3141 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
3142
3143 if (q_vector->rx.ring)
3144 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
3145
3146 netif_napi_del(&q_vector->napi);
3147 }
3148
3149 /**
3150 * igc_free_q_vector - Free memory allocated for specific interrupt vector
3151 * @adapter: board private structure to initialize
3152 * @v_idx: Index of vector to be freed
3153 *
3154 * This function frees the memory allocated to the q_vector.
3155 */
igc_free_q_vector(struct igc_adapter * adapter,int v_idx)3156 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
3157 {
3158 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3159
3160 adapter->q_vector[v_idx] = NULL;
3161
3162 /* igc_get_stats64() might access the rings on this vector,
3163 * we must wait a grace period before freeing it.
3164 */
3165 if (q_vector)
3166 kfree_rcu(q_vector, rcu);
3167 }
3168
3169 /**
3170 * igc_free_q_vectors - Free memory allocated for interrupt vectors
3171 * @adapter: board private structure to initialize
3172 *
3173 * This function frees the memory allocated to the q_vectors. In addition if
3174 * NAPI is enabled it will delete any references to the NAPI struct prior
3175 * to freeing the q_vector.
3176 */
igc_free_q_vectors(struct igc_adapter * adapter)3177 static void igc_free_q_vectors(struct igc_adapter *adapter)
3178 {
3179 int v_idx = adapter->num_q_vectors;
3180
3181 adapter->num_tx_queues = 0;
3182 adapter->num_rx_queues = 0;
3183 adapter->num_q_vectors = 0;
3184
3185 while (v_idx--) {
3186 igc_reset_q_vector(adapter, v_idx);
3187 igc_free_q_vector(adapter, v_idx);
3188 }
3189 }
3190
3191 /**
3192 * igc_update_itr - update the dynamic ITR value based on statistics
3193 * @q_vector: pointer to q_vector
3194 * @ring_container: ring info to update the itr for
3195 *
3196 * Stores a new ITR value based on packets and byte
3197 * counts during the last interrupt. The advantage of per interrupt
3198 * computation is faster updates and more accurate ITR for the current
3199 * traffic pattern. Constants in this function were computed
3200 * based on theoretical maximum wire speed and thresholds were set based
3201 * on testing data as well as attempting to minimize response time
3202 * while increasing bulk throughput.
3203 * NOTE: These calculations are only valid when operating in a single-
3204 * queue environment.
3205 */
igc_update_itr(struct igc_q_vector * q_vector,struct igc_ring_container * ring_container)3206 static void igc_update_itr(struct igc_q_vector *q_vector,
3207 struct igc_ring_container *ring_container)
3208 {
3209 unsigned int packets = ring_container->total_packets;
3210 unsigned int bytes = ring_container->total_bytes;
3211 u8 itrval = ring_container->itr;
3212
3213 /* no packets, exit with status unchanged */
3214 if (packets == 0)
3215 return;
3216
3217 switch (itrval) {
3218 case lowest_latency:
3219 /* handle TSO and jumbo frames */
3220 if (bytes / packets > 8000)
3221 itrval = bulk_latency;
3222 else if ((packets < 5) && (bytes > 512))
3223 itrval = low_latency;
3224 break;
3225 case low_latency: /* 50 usec aka 20000 ints/s */
3226 if (bytes > 10000) {
3227 /* this if handles the TSO accounting */
3228 if (bytes / packets > 8000)
3229 itrval = bulk_latency;
3230 else if ((packets < 10) || ((bytes / packets) > 1200))
3231 itrval = bulk_latency;
3232 else if ((packets > 35))
3233 itrval = lowest_latency;
3234 } else if (bytes / packets > 2000) {
3235 itrval = bulk_latency;
3236 } else if (packets <= 2 && bytes < 512) {
3237 itrval = lowest_latency;
3238 }
3239 break;
3240 case bulk_latency: /* 250 usec aka 4000 ints/s */
3241 if (bytes > 25000) {
3242 if (packets > 35)
3243 itrval = low_latency;
3244 } else if (bytes < 1500) {
3245 itrval = low_latency;
3246 }
3247 break;
3248 }
3249
3250 /* clear work counters since we have the values we need */
3251 ring_container->total_bytes = 0;
3252 ring_container->total_packets = 0;
3253
3254 /* write updated itr to ring container */
3255 ring_container->itr = itrval;
3256 }
3257
igc_set_itr(struct igc_q_vector * q_vector)3258 static void igc_set_itr(struct igc_q_vector *q_vector)
3259 {
3260 struct igc_adapter *adapter = q_vector->adapter;
3261 u32 new_itr = q_vector->itr_val;
3262 u8 current_itr = 0;
3263
3264 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3265 switch (adapter->link_speed) {
3266 case SPEED_10:
3267 case SPEED_100:
3268 current_itr = 0;
3269 new_itr = IGC_4K_ITR;
3270 goto set_itr_now;
3271 default:
3272 break;
3273 }
3274
3275 igc_update_itr(q_vector, &q_vector->tx);
3276 igc_update_itr(q_vector, &q_vector->rx);
3277
3278 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3279
3280 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3281 if (current_itr == lowest_latency &&
3282 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3283 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3284 current_itr = low_latency;
3285
3286 switch (current_itr) {
3287 /* counts and packets in update_itr are dependent on these numbers */
3288 case lowest_latency:
3289 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
3290 break;
3291 case low_latency:
3292 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
3293 break;
3294 case bulk_latency:
3295 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
3296 break;
3297 default:
3298 break;
3299 }
3300
3301 set_itr_now:
3302 if (new_itr != q_vector->itr_val) {
3303 /* this attempts to bias the interrupt rate towards Bulk
3304 * by adding intermediate steps when interrupt rate is
3305 * increasing
3306 */
3307 new_itr = new_itr > q_vector->itr_val ?
3308 max((new_itr * q_vector->itr_val) /
3309 (new_itr + (q_vector->itr_val >> 2)),
3310 new_itr) : new_itr;
3311 /* Don't write the value here; it resets the adapter's
3312 * internal timer, and causes us to delay far longer than
3313 * we should between interrupts. Instead, we write the ITR
3314 * value at the beginning of the next interrupt so the timing
3315 * ends up being correct.
3316 */
3317 q_vector->itr_val = new_itr;
3318 q_vector->set_itr = 1;
3319 }
3320 }
3321
igc_reset_interrupt_capability(struct igc_adapter * adapter)3322 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
3323 {
3324 int v_idx = adapter->num_q_vectors;
3325
3326 if (adapter->msix_entries) {
3327 pci_disable_msix(adapter->pdev);
3328 kfree(adapter->msix_entries);
3329 adapter->msix_entries = NULL;
3330 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
3331 pci_disable_msi(adapter->pdev);
3332 }
3333
3334 while (v_idx--)
3335 igc_reset_q_vector(adapter, v_idx);
3336 }
3337
3338 /**
3339 * igc_set_interrupt_capability - set MSI or MSI-X if supported
3340 * @adapter: Pointer to adapter structure
3341 * @msix: boolean value for MSI-X capability
3342 *
3343 * Attempt to configure interrupts using the best available
3344 * capabilities of the hardware and kernel.
3345 */
igc_set_interrupt_capability(struct igc_adapter * adapter,bool msix)3346 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
3347 bool msix)
3348 {
3349 int numvecs, i;
3350 int err;
3351
3352 if (!msix)
3353 goto msi_only;
3354 adapter->flags |= IGC_FLAG_HAS_MSIX;
3355
3356 /* Number of supported queues. */
3357 adapter->num_rx_queues = adapter->rss_queues;
3358
3359 adapter->num_tx_queues = adapter->rss_queues;
3360
3361 /* start with one vector for every Rx queue */
3362 numvecs = adapter->num_rx_queues;
3363
3364 /* if Tx handler is separate add 1 for every Tx queue */
3365 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
3366 numvecs += adapter->num_tx_queues;
3367
3368 /* store the number of vectors reserved for queues */
3369 adapter->num_q_vectors = numvecs;
3370
3371 /* add 1 vector for link status interrupts */
3372 numvecs++;
3373
3374 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
3375 GFP_KERNEL);
3376
3377 if (!adapter->msix_entries)
3378 return;
3379
3380 /* populate entry values */
3381 for (i = 0; i < numvecs; i++)
3382 adapter->msix_entries[i].entry = i;
3383
3384 err = pci_enable_msix_range(adapter->pdev,
3385 adapter->msix_entries,
3386 numvecs,
3387 numvecs);
3388 if (err > 0)
3389 return;
3390
3391 kfree(adapter->msix_entries);
3392 adapter->msix_entries = NULL;
3393
3394 igc_reset_interrupt_capability(adapter);
3395
3396 msi_only:
3397 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
3398
3399 adapter->rss_queues = 1;
3400 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3401 adapter->num_rx_queues = 1;
3402 adapter->num_tx_queues = 1;
3403 adapter->num_q_vectors = 1;
3404 if (!pci_enable_msi(adapter->pdev))
3405 adapter->flags |= IGC_FLAG_HAS_MSI;
3406 }
3407
3408 /**
3409 * igc_update_ring_itr - update the dynamic ITR value based on packet size
3410 * @q_vector: pointer to q_vector
3411 *
3412 * Stores a new ITR value based on strictly on packet size. This
3413 * algorithm is less sophisticated than that used in igc_update_itr,
3414 * due to the difficulty of synchronizing statistics across multiple
3415 * receive rings. The divisors and thresholds used by this function
3416 * were determined based on theoretical maximum wire speed and testing
3417 * data, in order to minimize response time while increasing bulk
3418 * throughput.
3419 * NOTE: This function is called only when operating in a multiqueue
3420 * receive environment.
3421 */
igc_update_ring_itr(struct igc_q_vector * q_vector)3422 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
3423 {
3424 struct igc_adapter *adapter = q_vector->adapter;
3425 int new_val = q_vector->itr_val;
3426 int avg_wire_size = 0;
3427 unsigned int packets;
3428
3429 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3430 * ints/sec - ITR timer value of 120 ticks.
3431 */
3432 switch (adapter->link_speed) {
3433 case SPEED_10:
3434 case SPEED_100:
3435 new_val = IGC_4K_ITR;
3436 goto set_itr_val;
3437 default:
3438 break;
3439 }
3440
3441 packets = q_vector->rx.total_packets;
3442 if (packets)
3443 avg_wire_size = q_vector->rx.total_bytes / packets;
3444
3445 packets = q_vector->tx.total_packets;
3446 if (packets)
3447 avg_wire_size = max_t(u32, avg_wire_size,
3448 q_vector->tx.total_bytes / packets);
3449
3450 /* if avg_wire_size isn't set no work was done */
3451 if (!avg_wire_size)
3452 goto clear_counts;
3453
3454 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3455 avg_wire_size += 24;
3456
3457 /* Don't starve jumbo frames */
3458 avg_wire_size = min(avg_wire_size, 3000);
3459
3460 /* Give a little boost to mid-size frames */
3461 if (avg_wire_size > 300 && avg_wire_size < 1200)
3462 new_val = avg_wire_size / 3;
3463 else
3464 new_val = avg_wire_size / 2;
3465
3466 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3467 if (new_val < IGC_20K_ITR &&
3468 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3469 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3470 new_val = IGC_20K_ITR;
3471
3472 set_itr_val:
3473 if (new_val != q_vector->itr_val) {
3474 q_vector->itr_val = new_val;
3475 q_vector->set_itr = 1;
3476 }
3477 clear_counts:
3478 q_vector->rx.total_bytes = 0;
3479 q_vector->rx.total_packets = 0;
3480 q_vector->tx.total_bytes = 0;
3481 q_vector->tx.total_packets = 0;
3482 }
3483
igc_ring_irq_enable(struct igc_q_vector * q_vector)3484 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
3485 {
3486 struct igc_adapter *adapter = q_vector->adapter;
3487 struct igc_hw *hw = &adapter->hw;
3488
3489 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
3490 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
3491 if (adapter->num_q_vectors == 1)
3492 igc_set_itr(q_vector);
3493 else
3494 igc_update_ring_itr(q_vector);
3495 }
3496
3497 if (!test_bit(__IGC_DOWN, &adapter->state)) {
3498 if (adapter->msix_entries)
3499 wr32(IGC_EIMS, q_vector->eims_value);
3500 else
3501 igc_irq_enable(adapter);
3502 }
3503 }
3504
igc_add_ring(struct igc_ring * ring,struct igc_ring_container * head)3505 static void igc_add_ring(struct igc_ring *ring,
3506 struct igc_ring_container *head)
3507 {
3508 head->ring = ring;
3509 head->count++;
3510 }
3511
3512 /**
3513 * igc_cache_ring_register - Descriptor ring to register mapping
3514 * @adapter: board private structure to initialize
3515 *
3516 * Once we know the feature-set enabled for the device, we'll cache
3517 * the register offset the descriptor ring is assigned to.
3518 */
igc_cache_ring_register(struct igc_adapter * adapter)3519 static void igc_cache_ring_register(struct igc_adapter *adapter)
3520 {
3521 int i = 0, j = 0;
3522
3523 switch (adapter->hw.mac.type) {
3524 case igc_i225:
3525 default:
3526 for (; i < adapter->num_rx_queues; i++)
3527 adapter->rx_ring[i]->reg_idx = i;
3528 for (; j < adapter->num_tx_queues; j++)
3529 adapter->tx_ring[j]->reg_idx = j;
3530 break;
3531 }
3532 }
3533
3534 /**
3535 * igc_poll - NAPI Rx polling callback
3536 * @napi: napi polling structure
3537 * @budget: count of how many packets we should handle
3538 */
igc_poll(struct napi_struct * napi,int budget)3539 static int igc_poll(struct napi_struct *napi, int budget)
3540 {
3541 struct igc_q_vector *q_vector = container_of(napi,
3542 struct igc_q_vector,
3543 napi);
3544 bool clean_complete = true;
3545 int work_done = 0;
3546
3547 if (q_vector->tx.ring)
3548 clean_complete = igc_clean_tx_irq(q_vector, budget);
3549
3550 if (q_vector->rx.ring) {
3551 int cleaned = igc_clean_rx_irq(q_vector, budget);
3552
3553 work_done += cleaned;
3554 if (cleaned >= budget)
3555 clean_complete = false;
3556 }
3557
3558 /* If all work not completed, return budget and keep polling */
3559 if (!clean_complete)
3560 return budget;
3561
3562 /* Exit the polling mode, but don't re-enable interrupts if stack might
3563 * poll us due to busy-polling
3564 */
3565 if (likely(napi_complete_done(napi, work_done)))
3566 igc_ring_irq_enable(q_vector);
3567
3568 return min(work_done, budget - 1);
3569 }
3570
3571 /**
3572 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
3573 * @adapter: board private structure to initialize
3574 * @v_count: q_vectors allocated on adapter, used for ring interleaving
3575 * @v_idx: index of vector in adapter struct
3576 * @txr_count: total number of Tx rings to allocate
3577 * @txr_idx: index of first Tx ring to allocate
3578 * @rxr_count: total number of Rx rings to allocate
3579 * @rxr_idx: index of first Rx ring to allocate
3580 *
3581 * We allocate one q_vector. If allocation fails we return -ENOMEM.
3582 */
igc_alloc_q_vector(struct igc_adapter * adapter,unsigned int v_count,unsigned int v_idx,unsigned int txr_count,unsigned int txr_idx,unsigned int rxr_count,unsigned int rxr_idx)3583 static int igc_alloc_q_vector(struct igc_adapter *adapter,
3584 unsigned int v_count, unsigned int v_idx,
3585 unsigned int txr_count, unsigned int txr_idx,
3586 unsigned int rxr_count, unsigned int rxr_idx)
3587 {
3588 struct igc_q_vector *q_vector;
3589 struct igc_ring *ring;
3590 int ring_count;
3591
3592 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
3593 if (txr_count > 1 || rxr_count > 1)
3594 return -ENOMEM;
3595
3596 ring_count = txr_count + rxr_count;
3597
3598 /* allocate q_vector and rings */
3599 q_vector = adapter->q_vector[v_idx];
3600 if (!q_vector)
3601 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
3602 GFP_KERNEL);
3603 else
3604 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
3605 if (!q_vector)
3606 return -ENOMEM;
3607
3608 /* initialize NAPI */
3609 netif_napi_add(adapter->netdev, &q_vector->napi,
3610 igc_poll, 64);
3611
3612 /* tie q_vector and adapter together */
3613 adapter->q_vector[v_idx] = q_vector;
3614 q_vector->adapter = adapter;
3615
3616 /* initialize work limits */
3617 q_vector->tx.work_limit = adapter->tx_work_limit;
3618
3619 /* initialize ITR configuration */
3620 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
3621 q_vector->itr_val = IGC_START_ITR;
3622
3623 /* initialize pointer to rings */
3624 ring = q_vector->ring;
3625
3626 /* initialize ITR */
3627 if (rxr_count) {
3628 /* rx or rx/tx vector */
3629 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
3630 q_vector->itr_val = adapter->rx_itr_setting;
3631 } else {
3632 /* tx only vector */
3633 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
3634 q_vector->itr_val = adapter->tx_itr_setting;
3635 }
3636
3637 if (txr_count) {
3638 /* assign generic ring traits */
3639 ring->dev = &adapter->pdev->dev;
3640 ring->netdev = adapter->netdev;
3641
3642 /* configure backlink on ring */
3643 ring->q_vector = q_vector;
3644
3645 /* update q_vector Tx values */
3646 igc_add_ring(ring, &q_vector->tx);
3647
3648 /* apply Tx specific ring traits */
3649 ring->count = adapter->tx_ring_count;
3650 ring->queue_index = txr_idx;
3651
3652 /* assign ring to adapter */
3653 adapter->tx_ring[txr_idx] = ring;
3654
3655 /* push pointer to next ring */
3656 ring++;
3657 }
3658
3659 if (rxr_count) {
3660 /* assign generic ring traits */
3661 ring->dev = &adapter->pdev->dev;
3662 ring->netdev = adapter->netdev;
3663
3664 /* configure backlink on ring */
3665 ring->q_vector = q_vector;
3666
3667 /* update q_vector Rx values */
3668 igc_add_ring(ring, &q_vector->rx);
3669
3670 /* apply Rx specific ring traits */
3671 ring->count = adapter->rx_ring_count;
3672 ring->queue_index = rxr_idx;
3673
3674 /* assign ring to adapter */
3675 adapter->rx_ring[rxr_idx] = ring;
3676 }
3677
3678 return 0;
3679 }
3680
3681 /**
3682 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
3683 * @adapter: board private structure to initialize
3684 *
3685 * We allocate one q_vector per queue interrupt. If allocation fails we
3686 * return -ENOMEM.
3687 */
igc_alloc_q_vectors(struct igc_adapter * adapter)3688 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
3689 {
3690 int rxr_remaining = adapter->num_rx_queues;
3691 int txr_remaining = adapter->num_tx_queues;
3692 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
3693 int q_vectors = adapter->num_q_vectors;
3694 int err;
3695
3696 if (q_vectors >= (rxr_remaining + txr_remaining)) {
3697 for (; rxr_remaining; v_idx++) {
3698 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3699 0, 0, 1, rxr_idx);
3700
3701 if (err)
3702 goto err_out;
3703
3704 /* update counts and index */
3705 rxr_remaining--;
3706 rxr_idx++;
3707 }
3708 }
3709
3710 for (; v_idx < q_vectors; v_idx++) {
3711 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
3712 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
3713
3714 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3715 tqpv, txr_idx, rqpv, rxr_idx);
3716
3717 if (err)
3718 goto err_out;
3719
3720 /* update counts and index */
3721 rxr_remaining -= rqpv;
3722 txr_remaining -= tqpv;
3723 rxr_idx++;
3724 txr_idx++;
3725 }
3726
3727 return 0;
3728
3729 err_out:
3730 adapter->num_tx_queues = 0;
3731 adapter->num_rx_queues = 0;
3732 adapter->num_q_vectors = 0;
3733
3734 while (v_idx--)
3735 igc_free_q_vector(adapter, v_idx);
3736
3737 return -ENOMEM;
3738 }
3739
3740 /**
3741 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
3742 * @adapter: Pointer to adapter structure
3743 * @msix: boolean for MSI-X capability
3744 *
3745 * This function initializes the interrupts and allocates all of the queues.
3746 */
igc_init_interrupt_scheme(struct igc_adapter * adapter,bool msix)3747 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
3748 {
3749 struct net_device *dev = adapter->netdev;
3750 int err = 0;
3751
3752 igc_set_interrupt_capability(adapter, msix);
3753
3754 err = igc_alloc_q_vectors(adapter);
3755 if (err) {
3756 netdev_err(dev, "Unable to allocate memory for vectors\n");
3757 goto err_alloc_q_vectors;
3758 }
3759
3760 igc_cache_ring_register(adapter);
3761
3762 return 0;
3763
3764 err_alloc_q_vectors:
3765 igc_reset_interrupt_capability(adapter);
3766 return err;
3767 }
3768
3769 /**
3770 * igc_sw_init - Initialize general software structures (struct igc_adapter)
3771 * @adapter: board private structure to initialize
3772 *
3773 * igc_sw_init initializes the Adapter private data structure.
3774 * Fields are initialized based on PCI device information and
3775 * OS network device settings (MTU size).
3776 */
igc_sw_init(struct igc_adapter * adapter)3777 static int igc_sw_init(struct igc_adapter *adapter)
3778 {
3779 struct net_device *netdev = adapter->netdev;
3780 struct pci_dev *pdev = adapter->pdev;
3781 struct igc_hw *hw = &adapter->hw;
3782
3783 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3784
3785 /* set default ring sizes */
3786 adapter->tx_ring_count = IGC_DEFAULT_TXD;
3787 adapter->rx_ring_count = IGC_DEFAULT_RXD;
3788
3789 /* set default ITR values */
3790 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
3791 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
3792
3793 /* set default work limits */
3794 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
3795
3796 /* adjust max frame to be at least the size of a standard frame */
3797 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3798 VLAN_HLEN;
3799 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3800
3801 mutex_init(&adapter->nfc_rule_lock);
3802 INIT_LIST_HEAD(&adapter->nfc_rule_list);
3803 adapter->nfc_rule_count = 0;
3804
3805 spin_lock_init(&adapter->stats64_lock);
3806 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
3807 adapter->flags |= IGC_FLAG_HAS_MSIX;
3808
3809 igc_init_queue_configuration(adapter);
3810
3811 /* This call may decrease the number of queues */
3812 if (igc_init_interrupt_scheme(adapter, true)) {
3813 netdev_err(netdev, "Unable to allocate memory for queues\n");
3814 return -ENOMEM;
3815 }
3816
3817 /* Explicitly disable IRQ since the NIC can be in any state. */
3818 igc_irq_disable(adapter);
3819
3820 set_bit(__IGC_DOWN, &adapter->state);
3821
3822 return 0;
3823 }
3824
3825 /**
3826 * igc_up - Open the interface and prepare it to handle traffic
3827 * @adapter: board private structure
3828 */
igc_up(struct igc_adapter * adapter)3829 void igc_up(struct igc_adapter *adapter)
3830 {
3831 struct igc_hw *hw = &adapter->hw;
3832 int i = 0;
3833
3834 /* hardware has been reset, we need to reload some things */
3835 igc_configure(adapter);
3836
3837 clear_bit(__IGC_DOWN, &adapter->state);
3838
3839 for (i = 0; i < adapter->num_q_vectors; i++)
3840 napi_enable(&adapter->q_vector[i]->napi);
3841
3842 if (adapter->msix_entries)
3843 igc_configure_msix(adapter);
3844 else
3845 igc_assign_vector(adapter->q_vector[0], 0);
3846
3847 /* Clear any pending interrupts. */
3848 rd32(IGC_ICR);
3849 igc_irq_enable(adapter);
3850
3851 netif_tx_start_all_queues(adapter->netdev);
3852
3853 /* start the watchdog. */
3854 hw->mac.get_link_status = true;
3855 schedule_work(&adapter->watchdog_task);
3856 }
3857
3858 /**
3859 * igc_update_stats - Update the board statistics counters
3860 * @adapter: board private structure
3861 */
igc_update_stats(struct igc_adapter * adapter)3862 void igc_update_stats(struct igc_adapter *adapter)
3863 {
3864 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
3865 struct pci_dev *pdev = adapter->pdev;
3866 struct igc_hw *hw = &adapter->hw;
3867 u64 _bytes, _packets;
3868 u64 bytes, packets;
3869 unsigned int start;
3870 u32 mpc;
3871 int i;
3872
3873 /* Prevent stats update while adapter is being reset, or if the pci
3874 * connection is down.
3875 */
3876 if (adapter->link_speed == 0)
3877 return;
3878 if (pci_channel_offline(pdev))
3879 return;
3880
3881 packets = 0;
3882 bytes = 0;
3883
3884 rcu_read_lock();
3885 for (i = 0; i < adapter->num_rx_queues; i++) {
3886 struct igc_ring *ring = adapter->rx_ring[i];
3887 u32 rqdpc = rd32(IGC_RQDPC(i));
3888
3889 if (hw->mac.type >= igc_i225)
3890 wr32(IGC_RQDPC(i), 0);
3891
3892 if (rqdpc) {
3893 ring->rx_stats.drops += rqdpc;
3894 net_stats->rx_fifo_errors += rqdpc;
3895 }
3896
3897 do {
3898 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
3899 _bytes = ring->rx_stats.bytes;
3900 _packets = ring->rx_stats.packets;
3901 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
3902 bytes += _bytes;
3903 packets += _packets;
3904 }
3905
3906 net_stats->rx_bytes = bytes;
3907 net_stats->rx_packets = packets;
3908
3909 packets = 0;
3910 bytes = 0;
3911 for (i = 0; i < adapter->num_tx_queues; i++) {
3912 struct igc_ring *ring = adapter->tx_ring[i];
3913
3914 do {
3915 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
3916 _bytes = ring->tx_stats.bytes;
3917 _packets = ring->tx_stats.packets;
3918 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
3919 bytes += _bytes;
3920 packets += _packets;
3921 }
3922 net_stats->tx_bytes = bytes;
3923 net_stats->tx_packets = packets;
3924 rcu_read_unlock();
3925
3926 /* read stats registers */
3927 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
3928 adapter->stats.gprc += rd32(IGC_GPRC);
3929 adapter->stats.gorc += rd32(IGC_GORCL);
3930 rd32(IGC_GORCH); /* clear GORCL */
3931 adapter->stats.bprc += rd32(IGC_BPRC);
3932 adapter->stats.mprc += rd32(IGC_MPRC);
3933 adapter->stats.roc += rd32(IGC_ROC);
3934
3935 adapter->stats.prc64 += rd32(IGC_PRC64);
3936 adapter->stats.prc127 += rd32(IGC_PRC127);
3937 adapter->stats.prc255 += rd32(IGC_PRC255);
3938 adapter->stats.prc511 += rd32(IGC_PRC511);
3939 adapter->stats.prc1023 += rd32(IGC_PRC1023);
3940 adapter->stats.prc1522 += rd32(IGC_PRC1522);
3941 adapter->stats.tlpic += rd32(IGC_TLPIC);
3942 adapter->stats.rlpic += rd32(IGC_RLPIC);
3943 adapter->stats.hgptc += rd32(IGC_HGPTC);
3944
3945 mpc = rd32(IGC_MPC);
3946 adapter->stats.mpc += mpc;
3947 net_stats->rx_fifo_errors += mpc;
3948 adapter->stats.scc += rd32(IGC_SCC);
3949 adapter->stats.ecol += rd32(IGC_ECOL);
3950 adapter->stats.mcc += rd32(IGC_MCC);
3951 adapter->stats.latecol += rd32(IGC_LATECOL);
3952 adapter->stats.dc += rd32(IGC_DC);
3953 adapter->stats.rlec += rd32(IGC_RLEC);
3954 adapter->stats.xonrxc += rd32(IGC_XONRXC);
3955 adapter->stats.xontxc += rd32(IGC_XONTXC);
3956 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
3957 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
3958 adapter->stats.fcruc += rd32(IGC_FCRUC);
3959 adapter->stats.gptc += rd32(IGC_GPTC);
3960 adapter->stats.gotc += rd32(IGC_GOTCL);
3961 rd32(IGC_GOTCH); /* clear GOTCL */
3962 adapter->stats.rnbc += rd32(IGC_RNBC);
3963 adapter->stats.ruc += rd32(IGC_RUC);
3964 adapter->stats.rfc += rd32(IGC_RFC);
3965 adapter->stats.rjc += rd32(IGC_RJC);
3966 adapter->stats.tor += rd32(IGC_TORH);
3967 adapter->stats.tot += rd32(IGC_TOTH);
3968 adapter->stats.tpr += rd32(IGC_TPR);
3969
3970 adapter->stats.ptc64 += rd32(IGC_PTC64);
3971 adapter->stats.ptc127 += rd32(IGC_PTC127);
3972 adapter->stats.ptc255 += rd32(IGC_PTC255);
3973 adapter->stats.ptc511 += rd32(IGC_PTC511);
3974 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
3975 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
3976
3977 adapter->stats.mptc += rd32(IGC_MPTC);
3978 adapter->stats.bptc += rd32(IGC_BPTC);
3979
3980 adapter->stats.tpt += rd32(IGC_TPT);
3981 adapter->stats.colc += rd32(IGC_COLC);
3982 adapter->stats.colc += rd32(IGC_RERC);
3983
3984 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
3985
3986 adapter->stats.tsctc += rd32(IGC_TSCTC);
3987
3988 adapter->stats.iac += rd32(IGC_IAC);
3989
3990 /* Fill out the OS statistics structure */
3991 net_stats->multicast = adapter->stats.mprc;
3992 net_stats->collisions = adapter->stats.colc;
3993
3994 /* Rx Errors */
3995
3996 /* RLEC on some newer hardware can be incorrect so build
3997 * our own version based on RUC and ROC
3998 */
3999 net_stats->rx_errors = adapter->stats.rxerrc +
4000 adapter->stats.crcerrs + adapter->stats.algnerrc +
4001 adapter->stats.ruc + adapter->stats.roc +
4002 adapter->stats.cexterr;
4003 net_stats->rx_length_errors = adapter->stats.ruc +
4004 adapter->stats.roc;
4005 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4006 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4007 net_stats->rx_missed_errors = adapter->stats.mpc;
4008
4009 /* Tx Errors */
4010 net_stats->tx_errors = adapter->stats.ecol +
4011 adapter->stats.latecol;
4012 net_stats->tx_aborted_errors = adapter->stats.ecol;
4013 net_stats->tx_window_errors = adapter->stats.latecol;
4014 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4015
4016 /* Tx Dropped needs to be maintained elsewhere */
4017
4018 /* Management Stats */
4019 adapter->stats.mgptc += rd32(IGC_MGTPTC);
4020 adapter->stats.mgprc += rd32(IGC_MGTPRC);
4021 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
4022 }
4023
4024 /**
4025 * igc_down - Close the interface
4026 * @adapter: board private structure
4027 */
igc_down(struct igc_adapter * adapter)4028 void igc_down(struct igc_adapter *adapter)
4029 {
4030 struct net_device *netdev = adapter->netdev;
4031 struct igc_hw *hw = &adapter->hw;
4032 u32 tctl, rctl;
4033 int i = 0;
4034
4035 set_bit(__IGC_DOWN, &adapter->state);
4036
4037 igc_ptp_suspend(adapter);
4038
4039 /* disable receives in the hardware */
4040 rctl = rd32(IGC_RCTL);
4041 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
4042 /* flush and sleep below */
4043
4044 /* set trans_start so we don't get spurious watchdogs during reset */
4045 netif_trans_update(netdev);
4046
4047 netif_carrier_off(netdev);
4048 netif_tx_stop_all_queues(netdev);
4049
4050 /* disable transmits in the hardware */
4051 tctl = rd32(IGC_TCTL);
4052 tctl &= ~IGC_TCTL_EN;
4053 wr32(IGC_TCTL, tctl);
4054 /* flush both disables and wait for them to finish */
4055 wrfl();
4056 usleep_range(10000, 20000);
4057
4058 igc_irq_disable(adapter);
4059
4060 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4061
4062 for (i = 0; i < adapter->num_q_vectors; i++) {
4063 if (adapter->q_vector[i]) {
4064 napi_synchronize(&adapter->q_vector[i]->napi);
4065 napi_disable(&adapter->q_vector[i]->napi);
4066 }
4067 }
4068
4069 del_timer_sync(&adapter->watchdog_timer);
4070 del_timer_sync(&adapter->phy_info_timer);
4071
4072 /* record the stats before reset*/
4073 spin_lock(&adapter->stats64_lock);
4074 igc_update_stats(adapter);
4075 spin_unlock(&adapter->stats64_lock);
4076
4077 adapter->link_speed = 0;
4078 adapter->link_duplex = 0;
4079
4080 if (!pci_channel_offline(adapter->pdev))
4081 igc_reset(adapter);
4082
4083 /* clear VLAN promisc flag so VFTA will be updated if necessary */
4084 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
4085
4086 igc_clean_all_tx_rings(adapter);
4087 igc_clean_all_rx_rings(adapter);
4088 }
4089
igc_reinit_locked(struct igc_adapter * adapter)4090 void igc_reinit_locked(struct igc_adapter *adapter)
4091 {
4092 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4093 usleep_range(1000, 2000);
4094 igc_down(adapter);
4095 igc_up(adapter);
4096 clear_bit(__IGC_RESETTING, &adapter->state);
4097 }
4098
igc_reset_task(struct work_struct * work)4099 static void igc_reset_task(struct work_struct *work)
4100 {
4101 struct igc_adapter *adapter;
4102
4103 adapter = container_of(work, struct igc_adapter, reset_task);
4104
4105 rtnl_lock();
4106 /* If we're already down or resetting, just bail */
4107 if (test_bit(__IGC_DOWN, &adapter->state) ||
4108 test_bit(__IGC_RESETTING, &adapter->state)) {
4109 rtnl_unlock();
4110 return;
4111 }
4112
4113 igc_rings_dump(adapter);
4114 igc_regs_dump(adapter);
4115 netdev_err(adapter->netdev, "Reset adapter\n");
4116 igc_reinit_locked(adapter);
4117 rtnl_unlock();
4118 }
4119
4120 /**
4121 * igc_change_mtu - Change the Maximum Transfer Unit
4122 * @netdev: network interface device structure
4123 * @new_mtu: new value for maximum frame size
4124 *
4125 * Returns 0 on success, negative on failure
4126 */
igc_change_mtu(struct net_device * netdev,int new_mtu)4127 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
4128 {
4129 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4130 struct igc_adapter *adapter = netdev_priv(netdev);
4131
4132 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
4133 netdev_dbg(netdev, "Jumbo frames not supported with XDP");
4134 return -EINVAL;
4135 }
4136
4137 /* adjust max frame to be at least the size of a standard frame */
4138 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4139 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
4140
4141 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4142 usleep_range(1000, 2000);
4143
4144 /* igc_down has a dependency on max_frame_size */
4145 adapter->max_frame_size = max_frame;
4146
4147 if (netif_running(netdev))
4148 igc_down(adapter);
4149
4150 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4151 netdev->mtu = new_mtu;
4152
4153 if (netif_running(netdev))
4154 igc_up(adapter);
4155 else
4156 igc_reset(adapter);
4157
4158 clear_bit(__IGC_RESETTING, &adapter->state);
4159
4160 return 0;
4161 }
4162
4163 /**
4164 * igc_get_stats64 - Get System Network Statistics
4165 * @netdev: network interface device structure
4166 * @stats: rtnl_link_stats64 pointer
4167 *
4168 * Returns the address of the device statistics structure.
4169 * The statistics are updated here and also from the timer callback.
4170 */
igc_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)4171 static void igc_get_stats64(struct net_device *netdev,
4172 struct rtnl_link_stats64 *stats)
4173 {
4174 struct igc_adapter *adapter = netdev_priv(netdev);
4175
4176 spin_lock(&adapter->stats64_lock);
4177 if (!test_bit(__IGC_RESETTING, &adapter->state))
4178 igc_update_stats(adapter);
4179 memcpy(stats, &adapter->stats64, sizeof(*stats));
4180 spin_unlock(&adapter->stats64_lock);
4181 }
4182
igc_fix_features(struct net_device * netdev,netdev_features_t features)4183 static netdev_features_t igc_fix_features(struct net_device *netdev,
4184 netdev_features_t features)
4185 {
4186 /* Since there is no support for separate Rx/Tx vlan accel
4187 * enable/disable make sure Tx flag is always in same state as Rx.
4188 */
4189 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4190 features |= NETIF_F_HW_VLAN_CTAG_TX;
4191 else
4192 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4193
4194 return features;
4195 }
4196
igc_set_features(struct net_device * netdev,netdev_features_t features)4197 static int igc_set_features(struct net_device *netdev,
4198 netdev_features_t features)
4199 {
4200 netdev_features_t changed = netdev->features ^ features;
4201 struct igc_adapter *adapter = netdev_priv(netdev);
4202
4203 /* Add VLAN support */
4204 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
4205 return 0;
4206
4207 if (!(features & NETIF_F_NTUPLE))
4208 igc_flush_nfc_rules(adapter);
4209
4210 netdev->features = features;
4211
4212 if (netif_running(netdev))
4213 igc_reinit_locked(adapter);
4214 else
4215 igc_reset(adapter);
4216
4217 return 1;
4218 }
4219
4220 static netdev_features_t
igc_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4221 igc_features_check(struct sk_buff *skb, struct net_device *dev,
4222 netdev_features_t features)
4223 {
4224 unsigned int network_hdr_len, mac_hdr_len;
4225
4226 /* Make certain the headers can be described by a context descriptor */
4227 mac_hdr_len = skb_network_header(skb) - skb->data;
4228 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
4229 return features & ~(NETIF_F_HW_CSUM |
4230 NETIF_F_SCTP_CRC |
4231 NETIF_F_HW_VLAN_CTAG_TX |
4232 NETIF_F_TSO |
4233 NETIF_F_TSO6);
4234
4235 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4236 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
4237 return features & ~(NETIF_F_HW_CSUM |
4238 NETIF_F_SCTP_CRC |
4239 NETIF_F_TSO |
4240 NETIF_F_TSO6);
4241
4242 /* We can only support IPv4 TSO in tunnels if we can mangle the
4243 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4244 */
4245 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4246 features &= ~NETIF_F_TSO;
4247
4248 return features;
4249 }
4250
igc_tsync_interrupt(struct igc_adapter * adapter)4251 static void igc_tsync_interrupt(struct igc_adapter *adapter)
4252 {
4253 u32 ack, tsauxc, sec, nsec, tsicr;
4254 struct igc_hw *hw = &adapter->hw;
4255 struct ptp_clock_event event;
4256 struct timespec64 ts;
4257
4258 tsicr = rd32(IGC_TSICR);
4259 ack = 0;
4260
4261 if (tsicr & IGC_TSICR_SYS_WRAP) {
4262 event.type = PTP_CLOCK_PPS;
4263 if (adapter->ptp_caps.pps)
4264 ptp_clock_event(adapter->ptp_clock, &event);
4265 ack |= IGC_TSICR_SYS_WRAP;
4266 }
4267
4268 if (tsicr & IGC_TSICR_TXTS) {
4269 /* retrieve hardware timestamp */
4270 schedule_work(&adapter->ptp_tx_work);
4271 ack |= IGC_TSICR_TXTS;
4272 }
4273
4274 if (tsicr & IGC_TSICR_TT0) {
4275 spin_lock(&adapter->tmreg_lock);
4276 ts = timespec64_add(adapter->perout[0].start,
4277 adapter->perout[0].period);
4278 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
4279 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
4280 tsauxc = rd32(IGC_TSAUXC);
4281 tsauxc |= IGC_TSAUXC_EN_TT0;
4282 wr32(IGC_TSAUXC, tsauxc);
4283 adapter->perout[0].start = ts;
4284 spin_unlock(&adapter->tmreg_lock);
4285 ack |= IGC_TSICR_TT0;
4286 }
4287
4288 if (tsicr & IGC_TSICR_TT1) {
4289 spin_lock(&adapter->tmreg_lock);
4290 ts = timespec64_add(adapter->perout[1].start,
4291 adapter->perout[1].period);
4292 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
4293 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
4294 tsauxc = rd32(IGC_TSAUXC);
4295 tsauxc |= IGC_TSAUXC_EN_TT1;
4296 wr32(IGC_TSAUXC, tsauxc);
4297 adapter->perout[1].start = ts;
4298 spin_unlock(&adapter->tmreg_lock);
4299 ack |= IGC_TSICR_TT1;
4300 }
4301
4302 if (tsicr & IGC_TSICR_AUTT0) {
4303 nsec = rd32(IGC_AUXSTMPL0);
4304 sec = rd32(IGC_AUXSTMPH0);
4305 event.type = PTP_CLOCK_EXTTS;
4306 event.index = 0;
4307 event.timestamp = sec * NSEC_PER_SEC + nsec;
4308 ptp_clock_event(adapter->ptp_clock, &event);
4309 ack |= IGC_TSICR_AUTT0;
4310 }
4311
4312 if (tsicr & IGC_TSICR_AUTT1) {
4313 nsec = rd32(IGC_AUXSTMPL1);
4314 sec = rd32(IGC_AUXSTMPH1);
4315 event.type = PTP_CLOCK_EXTTS;
4316 event.index = 1;
4317 event.timestamp = sec * NSEC_PER_SEC + nsec;
4318 ptp_clock_event(adapter->ptp_clock, &event);
4319 ack |= IGC_TSICR_AUTT1;
4320 }
4321
4322 /* acknowledge the interrupts */
4323 wr32(IGC_TSICR, ack);
4324 }
4325
4326 /**
4327 * igc_msix_other - msix other interrupt handler
4328 * @irq: interrupt number
4329 * @data: pointer to a q_vector
4330 */
igc_msix_other(int irq,void * data)4331 static irqreturn_t igc_msix_other(int irq, void *data)
4332 {
4333 struct igc_adapter *adapter = data;
4334 struct igc_hw *hw = &adapter->hw;
4335 u32 icr = rd32(IGC_ICR);
4336
4337 /* reading ICR causes bit 31 of EICR to be cleared */
4338 if (icr & IGC_ICR_DRSTA)
4339 schedule_work(&adapter->reset_task);
4340
4341 if (icr & IGC_ICR_DOUTSYNC) {
4342 /* HW is reporting DMA is out of sync */
4343 adapter->stats.doosync++;
4344 }
4345
4346 if (icr & IGC_ICR_LSC) {
4347 hw->mac.get_link_status = true;
4348 /* guard against interrupt when we're going down */
4349 if (!test_bit(__IGC_DOWN, &adapter->state))
4350 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4351 }
4352
4353 if (icr & IGC_ICR_TS)
4354 igc_tsync_interrupt(adapter);
4355
4356 wr32(IGC_EIMS, adapter->eims_other);
4357
4358 return IRQ_HANDLED;
4359 }
4360
igc_write_itr(struct igc_q_vector * q_vector)4361 static void igc_write_itr(struct igc_q_vector *q_vector)
4362 {
4363 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
4364
4365 if (!q_vector->set_itr)
4366 return;
4367
4368 if (!itr_val)
4369 itr_val = IGC_ITR_VAL_MASK;
4370
4371 itr_val |= IGC_EITR_CNT_IGNR;
4372
4373 writel(itr_val, q_vector->itr_register);
4374 q_vector->set_itr = 0;
4375 }
4376
igc_msix_ring(int irq,void * data)4377 static irqreturn_t igc_msix_ring(int irq, void *data)
4378 {
4379 struct igc_q_vector *q_vector = data;
4380
4381 /* Write the ITR value calculated from the previous interrupt. */
4382 igc_write_itr(q_vector);
4383
4384 napi_schedule(&q_vector->napi);
4385
4386 return IRQ_HANDLED;
4387 }
4388
4389 /**
4390 * igc_request_msix - Initialize MSI-X interrupts
4391 * @adapter: Pointer to adapter structure
4392 *
4393 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
4394 * kernel.
4395 */
igc_request_msix(struct igc_adapter * adapter)4396 static int igc_request_msix(struct igc_adapter *adapter)
4397 {
4398 int i = 0, err = 0, vector = 0, free_vector = 0;
4399 struct net_device *netdev = adapter->netdev;
4400
4401 err = request_irq(adapter->msix_entries[vector].vector,
4402 &igc_msix_other, 0, netdev->name, adapter);
4403 if (err)
4404 goto err_out;
4405
4406 for (i = 0; i < adapter->num_q_vectors; i++) {
4407 struct igc_q_vector *q_vector = adapter->q_vector[i];
4408
4409 vector++;
4410
4411 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
4412
4413 if (q_vector->rx.ring && q_vector->tx.ring)
4414 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
4415 q_vector->rx.ring->queue_index);
4416 else if (q_vector->tx.ring)
4417 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
4418 q_vector->tx.ring->queue_index);
4419 else if (q_vector->rx.ring)
4420 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
4421 q_vector->rx.ring->queue_index);
4422 else
4423 sprintf(q_vector->name, "%s-unused", netdev->name);
4424
4425 err = request_irq(adapter->msix_entries[vector].vector,
4426 igc_msix_ring, 0, q_vector->name,
4427 q_vector);
4428 if (err)
4429 goto err_free;
4430 }
4431
4432 igc_configure_msix(adapter);
4433 return 0;
4434
4435 err_free:
4436 /* free already assigned IRQs */
4437 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
4438
4439 vector--;
4440 for (i = 0; i < vector; i++) {
4441 free_irq(adapter->msix_entries[free_vector++].vector,
4442 adapter->q_vector[i]);
4443 }
4444 err_out:
4445 return err;
4446 }
4447
4448 /**
4449 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
4450 * @adapter: Pointer to adapter structure
4451 *
4452 * This function resets the device so that it has 0 rx queues, tx queues, and
4453 * MSI-X interrupts allocated.
4454 */
igc_clear_interrupt_scheme(struct igc_adapter * adapter)4455 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
4456 {
4457 igc_free_q_vectors(adapter);
4458 igc_reset_interrupt_capability(adapter);
4459 }
4460
4461 /* Need to wait a few seconds after link up to get diagnostic information from
4462 * the phy
4463 */
igc_update_phy_info(struct timer_list * t)4464 static void igc_update_phy_info(struct timer_list *t)
4465 {
4466 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4467
4468 igc_get_phy_info(&adapter->hw);
4469 }
4470
4471 /**
4472 * igc_has_link - check shared code for link and determine up/down
4473 * @adapter: pointer to driver private info
4474 */
igc_has_link(struct igc_adapter * adapter)4475 bool igc_has_link(struct igc_adapter *adapter)
4476 {
4477 struct igc_hw *hw = &adapter->hw;
4478 bool link_active = false;
4479
4480 /* get_link_status is set on LSC (link status) interrupt or
4481 * rx sequence error interrupt. get_link_status will stay
4482 * false until the igc_check_for_link establishes link
4483 * for copper adapters ONLY
4484 */
4485 switch (hw->phy.media_type) {
4486 case igc_media_type_copper:
4487 if (!hw->mac.get_link_status)
4488 return true;
4489 hw->mac.ops.check_for_link(hw);
4490 link_active = !hw->mac.get_link_status;
4491 break;
4492 default:
4493 case igc_media_type_unknown:
4494 break;
4495 }
4496
4497 if (hw->mac.type == igc_i225 &&
4498 hw->phy.id == I225_I_PHY_ID) {
4499 if (!netif_carrier_ok(adapter->netdev)) {
4500 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4501 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
4502 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
4503 adapter->link_check_timeout = jiffies;
4504 }
4505 }
4506
4507 return link_active;
4508 }
4509
4510 /**
4511 * igc_watchdog - Timer Call-back
4512 * @t: timer for the watchdog
4513 */
igc_watchdog(struct timer_list * t)4514 static void igc_watchdog(struct timer_list *t)
4515 {
4516 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
4517 /* Do the rest outside of interrupt context */
4518 schedule_work(&adapter->watchdog_task);
4519 }
4520
igc_watchdog_task(struct work_struct * work)4521 static void igc_watchdog_task(struct work_struct *work)
4522 {
4523 struct igc_adapter *adapter = container_of(work,
4524 struct igc_adapter,
4525 watchdog_task);
4526 struct net_device *netdev = adapter->netdev;
4527 struct igc_hw *hw = &adapter->hw;
4528 struct igc_phy_info *phy = &hw->phy;
4529 u16 phy_data, retry_count = 20;
4530 u32 link;
4531 int i;
4532
4533 link = igc_has_link(adapter);
4534
4535 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
4536 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4537 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4538 else
4539 link = false;
4540 }
4541
4542 if (link) {
4543 /* Cancel scheduled suspend requests. */
4544 pm_runtime_resume(netdev->dev.parent);
4545
4546 if (!netif_carrier_ok(netdev)) {
4547 u32 ctrl;
4548
4549 hw->mac.ops.get_speed_and_duplex(hw,
4550 &adapter->link_speed,
4551 &adapter->link_duplex);
4552
4553 ctrl = rd32(IGC_CTRL);
4554 /* Link status message must follow this format */
4555 netdev_info(netdev,
4556 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4557 adapter->link_speed,
4558 adapter->link_duplex == FULL_DUPLEX ?
4559 "Full" : "Half",
4560 (ctrl & IGC_CTRL_TFCE) &&
4561 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
4562 (ctrl & IGC_CTRL_RFCE) ? "RX" :
4563 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
4564
4565 /* disable EEE if enabled */
4566 if ((adapter->flags & IGC_FLAG_EEE) &&
4567 adapter->link_duplex == HALF_DUPLEX) {
4568 netdev_info(netdev,
4569 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
4570 adapter->hw.dev_spec._base.eee_enable = false;
4571 adapter->flags &= ~IGC_FLAG_EEE;
4572 }
4573
4574 /* check if SmartSpeed worked */
4575 igc_check_downshift(hw);
4576 if (phy->speed_downgraded)
4577 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4578
4579 /* adjust timeout factor according to speed/duplex */
4580 adapter->tx_timeout_factor = 1;
4581 switch (adapter->link_speed) {
4582 case SPEED_10:
4583 adapter->tx_timeout_factor = 14;
4584 break;
4585 case SPEED_100:
4586 /* maybe add some timeout factor ? */
4587 break;
4588 }
4589
4590 if (adapter->link_speed != SPEED_1000)
4591 goto no_wait;
4592
4593 /* wait for Remote receiver status OK */
4594 retry_read_status:
4595 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
4596 &phy_data)) {
4597 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
4598 retry_count) {
4599 msleep(100);
4600 retry_count--;
4601 goto retry_read_status;
4602 } else if (!retry_count) {
4603 netdev_err(netdev, "exceed max 2 second\n");
4604 }
4605 } else {
4606 netdev_err(netdev, "read 1000Base-T Status Reg\n");
4607 }
4608 no_wait:
4609 netif_carrier_on(netdev);
4610
4611 /* link state has changed, schedule phy info update */
4612 if (!test_bit(__IGC_DOWN, &adapter->state))
4613 mod_timer(&adapter->phy_info_timer,
4614 round_jiffies(jiffies + 2 * HZ));
4615 }
4616 } else {
4617 if (netif_carrier_ok(netdev)) {
4618 adapter->link_speed = 0;
4619 adapter->link_duplex = 0;
4620
4621 /* Links status message must follow this format */
4622 netdev_info(netdev, "NIC Link is Down\n");
4623 netif_carrier_off(netdev);
4624
4625 /* link state has changed, schedule phy info update */
4626 if (!test_bit(__IGC_DOWN, &adapter->state))
4627 mod_timer(&adapter->phy_info_timer,
4628 round_jiffies(jiffies + 2 * HZ));
4629
4630 /* link is down, time to check for alternate media */
4631 if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
4632 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4633 schedule_work(&adapter->reset_task);
4634 /* return immediately */
4635 return;
4636 }
4637 }
4638 pm_schedule_suspend(netdev->dev.parent,
4639 MSEC_PER_SEC * 5);
4640
4641 /* also check for alternate media here */
4642 } else if (!netif_carrier_ok(netdev) &&
4643 (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
4644 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4645 schedule_work(&adapter->reset_task);
4646 /* return immediately */
4647 return;
4648 }
4649 }
4650 }
4651
4652 spin_lock(&adapter->stats64_lock);
4653 igc_update_stats(adapter);
4654 spin_unlock(&adapter->stats64_lock);
4655
4656 for (i = 0; i < adapter->num_tx_queues; i++) {
4657 struct igc_ring *tx_ring = adapter->tx_ring[i];
4658
4659 if (!netif_carrier_ok(netdev)) {
4660 /* We've lost link, so the controller stops DMA,
4661 * but we've got queued Tx work that's never going
4662 * to get done, so reset controller to flush Tx.
4663 * (Do the reset outside of interrupt context).
4664 */
4665 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
4666 adapter->tx_timeout_count++;
4667 schedule_work(&adapter->reset_task);
4668 /* return immediately since reset is imminent */
4669 return;
4670 }
4671 }
4672
4673 /* Force detection of hung controller every watchdog period */
4674 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
4675 }
4676
4677 /* Cause software interrupt to ensure Rx ring is cleaned */
4678 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4679 u32 eics = 0;
4680
4681 for (i = 0; i < adapter->num_q_vectors; i++)
4682 eics |= adapter->q_vector[i]->eims_value;
4683 wr32(IGC_EICS, eics);
4684 } else {
4685 wr32(IGC_ICS, IGC_ICS_RXDMT0);
4686 }
4687
4688 igc_ptp_tx_hang(adapter);
4689
4690 /* Reset the timer */
4691 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4692 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
4693 mod_timer(&adapter->watchdog_timer,
4694 round_jiffies(jiffies + HZ));
4695 else
4696 mod_timer(&adapter->watchdog_timer,
4697 round_jiffies(jiffies + 2 * HZ));
4698 }
4699 }
4700
4701 /**
4702 * igc_intr_msi - Interrupt Handler
4703 * @irq: interrupt number
4704 * @data: pointer to a network interface device structure
4705 */
igc_intr_msi(int irq,void * data)4706 static irqreturn_t igc_intr_msi(int irq, void *data)
4707 {
4708 struct igc_adapter *adapter = data;
4709 struct igc_q_vector *q_vector = adapter->q_vector[0];
4710 struct igc_hw *hw = &adapter->hw;
4711 /* read ICR disables interrupts using IAM */
4712 u32 icr = rd32(IGC_ICR);
4713
4714 igc_write_itr(q_vector);
4715
4716 if (icr & IGC_ICR_DRSTA)
4717 schedule_work(&adapter->reset_task);
4718
4719 if (icr & IGC_ICR_DOUTSYNC) {
4720 /* HW is reporting DMA is out of sync */
4721 adapter->stats.doosync++;
4722 }
4723
4724 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4725 hw->mac.get_link_status = true;
4726 if (!test_bit(__IGC_DOWN, &adapter->state))
4727 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4728 }
4729
4730 napi_schedule(&q_vector->napi);
4731
4732 return IRQ_HANDLED;
4733 }
4734
4735 /**
4736 * igc_intr - Legacy Interrupt Handler
4737 * @irq: interrupt number
4738 * @data: pointer to a network interface device structure
4739 */
igc_intr(int irq,void * data)4740 static irqreturn_t igc_intr(int irq, void *data)
4741 {
4742 struct igc_adapter *adapter = data;
4743 struct igc_q_vector *q_vector = adapter->q_vector[0];
4744 struct igc_hw *hw = &adapter->hw;
4745 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4746 * need for the IMC write
4747 */
4748 u32 icr = rd32(IGC_ICR);
4749
4750 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4751 * not set, then the adapter didn't send an interrupt
4752 */
4753 if (!(icr & IGC_ICR_INT_ASSERTED))
4754 return IRQ_NONE;
4755
4756 igc_write_itr(q_vector);
4757
4758 if (icr & IGC_ICR_DRSTA)
4759 schedule_work(&adapter->reset_task);
4760
4761 if (icr & IGC_ICR_DOUTSYNC) {
4762 /* HW is reporting DMA is out of sync */
4763 adapter->stats.doosync++;
4764 }
4765
4766 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4767 hw->mac.get_link_status = true;
4768 /* guard against interrupt when we're going down */
4769 if (!test_bit(__IGC_DOWN, &adapter->state))
4770 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4771 }
4772
4773 napi_schedule(&q_vector->napi);
4774
4775 return IRQ_HANDLED;
4776 }
4777
igc_free_irq(struct igc_adapter * adapter)4778 static void igc_free_irq(struct igc_adapter *adapter)
4779 {
4780 if (adapter->msix_entries) {
4781 int vector = 0, i;
4782
4783 free_irq(adapter->msix_entries[vector++].vector, adapter);
4784
4785 for (i = 0; i < adapter->num_q_vectors; i++)
4786 free_irq(adapter->msix_entries[vector++].vector,
4787 adapter->q_vector[i]);
4788 } else {
4789 free_irq(adapter->pdev->irq, adapter);
4790 }
4791 }
4792
4793 /**
4794 * igc_request_irq - initialize interrupts
4795 * @adapter: Pointer to adapter structure
4796 *
4797 * Attempts to configure interrupts using the best available
4798 * capabilities of the hardware and kernel.
4799 */
igc_request_irq(struct igc_adapter * adapter)4800 static int igc_request_irq(struct igc_adapter *adapter)
4801 {
4802 struct net_device *netdev = adapter->netdev;
4803 struct pci_dev *pdev = adapter->pdev;
4804 int err = 0;
4805
4806 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4807 err = igc_request_msix(adapter);
4808 if (!err)
4809 goto request_done;
4810 /* fall back to MSI */
4811 igc_free_all_tx_resources(adapter);
4812 igc_free_all_rx_resources(adapter);
4813
4814 igc_clear_interrupt_scheme(adapter);
4815 err = igc_init_interrupt_scheme(adapter, false);
4816 if (err)
4817 goto request_done;
4818 igc_setup_all_tx_resources(adapter);
4819 igc_setup_all_rx_resources(adapter);
4820 igc_configure(adapter);
4821 }
4822
4823 igc_assign_vector(adapter->q_vector[0], 0);
4824
4825 if (adapter->flags & IGC_FLAG_HAS_MSI) {
4826 err = request_irq(pdev->irq, &igc_intr_msi, 0,
4827 netdev->name, adapter);
4828 if (!err)
4829 goto request_done;
4830
4831 /* fall back to legacy interrupts */
4832 igc_reset_interrupt_capability(adapter);
4833 adapter->flags &= ~IGC_FLAG_HAS_MSI;
4834 }
4835
4836 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
4837 netdev->name, adapter);
4838
4839 if (err)
4840 netdev_err(netdev, "Error %d getting interrupt\n", err);
4841
4842 request_done:
4843 return err;
4844 }
4845
4846 /**
4847 * __igc_open - Called when a network interface is made active
4848 * @netdev: network interface device structure
4849 * @resuming: boolean indicating if the device is resuming
4850 *
4851 * Returns 0 on success, negative value on failure
4852 *
4853 * The open entry point is called when a network interface is made
4854 * active by the system (IFF_UP). At this point all resources needed
4855 * for transmit and receive operations are allocated, the interrupt
4856 * handler is registered with the OS, the watchdog timer is started,
4857 * and the stack is notified that the interface is ready.
4858 */
__igc_open(struct net_device * netdev,bool resuming)4859 static int __igc_open(struct net_device *netdev, bool resuming)
4860 {
4861 struct igc_adapter *adapter = netdev_priv(netdev);
4862 struct pci_dev *pdev = adapter->pdev;
4863 struct igc_hw *hw = &adapter->hw;
4864 int err = 0;
4865 int i = 0;
4866
4867 /* disallow open during test */
4868
4869 if (test_bit(__IGC_TESTING, &adapter->state)) {
4870 WARN_ON(resuming);
4871 return -EBUSY;
4872 }
4873
4874 if (!resuming)
4875 pm_runtime_get_sync(&pdev->dev);
4876
4877 netif_carrier_off(netdev);
4878
4879 /* allocate transmit descriptors */
4880 err = igc_setup_all_tx_resources(adapter);
4881 if (err)
4882 goto err_setup_tx;
4883
4884 /* allocate receive descriptors */
4885 err = igc_setup_all_rx_resources(adapter);
4886 if (err)
4887 goto err_setup_rx;
4888
4889 igc_power_up_link(adapter);
4890
4891 igc_configure(adapter);
4892
4893 err = igc_request_irq(adapter);
4894 if (err)
4895 goto err_req_irq;
4896
4897 /* Notify the stack of the actual queue counts. */
4898 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
4899 if (err)
4900 goto err_set_queues;
4901
4902 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
4903 if (err)
4904 goto err_set_queues;
4905
4906 clear_bit(__IGC_DOWN, &adapter->state);
4907
4908 for (i = 0; i < adapter->num_q_vectors; i++)
4909 napi_enable(&adapter->q_vector[i]->napi);
4910
4911 /* Clear any pending interrupts. */
4912 rd32(IGC_ICR);
4913 igc_irq_enable(adapter);
4914
4915 if (!resuming)
4916 pm_runtime_put(&pdev->dev);
4917
4918 netif_tx_start_all_queues(netdev);
4919
4920 /* start the watchdog. */
4921 hw->mac.get_link_status = true;
4922 schedule_work(&adapter->watchdog_task);
4923
4924 return IGC_SUCCESS;
4925
4926 err_set_queues:
4927 igc_free_irq(adapter);
4928 err_req_irq:
4929 igc_release_hw_control(adapter);
4930 igc_power_down_phy_copper_base(&adapter->hw);
4931 igc_free_all_rx_resources(adapter);
4932 err_setup_rx:
4933 igc_free_all_tx_resources(adapter);
4934 err_setup_tx:
4935 igc_reset(adapter);
4936 if (!resuming)
4937 pm_runtime_put(&pdev->dev);
4938
4939 return err;
4940 }
4941
igc_open(struct net_device * netdev)4942 int igc_open(struct net_device *netdev)
4943 {
4944 return __igc_open(netdev, false);
4945 }
4946
4947 /**
4948 * __igc_close - Disables a network interface
4949 * @netdev: network interface device structure
4950 * @suspending: boolean indicating the device is suspending
4951 *
4952 * Returns 0, this is not allowed to fail
4953 *
4954 * The close entry point is called when an interface is de-activated
4955 * by the OS. The hardware is still under the driver's control, but
4956 * needs to be disabled. A global MAC reset is issued to stop the
4957 * hardware, and all transmit and receive resources are freed.
4958 */
__igc_close(struct net_device * netdev,bool suspending)4959 static int __igc_close(struct net_device *netdev, bool suspending)
4960 {
4961 struct igc_adapter *adapter = netdev_priv(netdev);
4962 struct pci_dev *pdev = adapter->pdev;
4963
4964 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
4965
4966 if (!suspending)
4967 pm_runtime_get_sync(&pdev->dev);
4968
4969 igc_down(adapter);
4970
4971 igc_release_hw_control(adapter);
4972
4973 igc_free_irq(adapter);
4974
4975 igc_free_all_tx_resources(adapter);
4976 igc_free_all_rx_resources(adapter);
4977
4978 if (!suspending)
4979 pm_runtime_put_sync(&pdev->dev);
4980
4981 return 0;
4982 }
4983
igc_close(struct net_device * netdev)4984 int igc_close(struct net_device *netdev)
4985 {
4986 if (netif_device_present(netdev) || netdev->dismantle)
4987 return __igc_close(netdev, false);
4988 return 0;
4989 }
4990
4991 /**
4992 * igc_ioctl - Access the hwtstamp interface
4993 * @netdev: network interface device structure
4994 * @ifr: interface request data
4995 * @cmd: ioctl command
4996 **/
igc_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4997 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4998 {
4999 switch (cmd) {
5000 case SIOCGHWTSTAMP:
5001 return igc_ptp_get_ts_config(netdev, ifr);
5002 case SIOCSHWTSTAMP:
5003 return igc_ptp_set_ts_config(netdev, ifr);
5004 default:
5005 return -EOPNOTSUPP;
5006 }
5007 }
5008
igc_save_launchtime_params(struct igc_adapter * adapter,int queue,bool enable)5009 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
5010 bool enable)
5011 {
5012 struct igc_ring *ring;
5013 int i;
5014
5015 if (queue < 0 || queue >= adapter->num_tx_queues)
5016 return -EINVAL;
5017
5018 ring = adapter->tx_ring[queue];
5019 ring->launchtime_enable = enable;
5020
5021 if (adapter->base_time)
5022 return 0;
5023
5024 adapter->cycle_time = NSEC_PER_SEC;
5025
5026 for (i = 0; i < adapter->num_tx_queues; i++) {
5027 ring = adapter->tx_ring[i];
5028 ring->start_time = 0;
5029 ring->end_time = NSEC_PER_SEC;
5030 }
5031
5032 return 0;
5033 }
5034
is_base_time_past(ktime_t base_time,const struct timespec64 * now)5035 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
5036 {
5037 struct timespec64 b;
5038
5039 b = ktime_to_timespec64(base_time);
5040
5041 return timespec64_compare(now, &b) > 0;
5042 }
5043
validate_schedule(struct igc_adapter * adapter,const struct tc_taprio_qopt_offload * qopt)5044 static bool validate_schedule(struct igc_adapter *adapter,
5045 const struct tc_taprio_qopt_offload *qopt)
5046 {
5047 int queue_uses[IGC_MAX_TX_QUEUES] = { };
5048 struct timespec64 now;
5049 size_t n;
5050
5051 if (qopt->cycle_time_extension)
5052 return false;
5053
5054 igc_ptp_read(adapter, &now);
5055
5056 /* If we program the controller's BASET registers with a time
5057 * in the future, it will hold all the packets until that
5058 * time, causing a lot of TX Hangs, so to avoid that, we
5059 * reject schedules that would start in the future.
5060 */
5061 if (!is_base_time_past(qopt->base_time, &now))
5062 return false;
5063
5064 for (n = 0; n < qopt->num_entries; n++) {
5065 const struct tc_taprio_sched_entry *e;
5066 int i;
5067
5068 e = &qopt->entries[n];
5069
5070 /* i225 only supports "global" frame preemption
5071 * settings.
5072 */
5073 if (e->command != TC_TAPRIO_CMD_SET_GATES)
5074 return false;
5075
5076 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
5077 if (e->gate_mask & BIT(i))
5078 queue_uses[i]++;
5079
5080 if (queue_uses[i] > 1)
5081 return false;
5082 }
5083 }
5084
5085 return true;
5086 }
5087
igc_tsn_enable_launchtime(struct igc_adapter * adapter,struct tc_etf_qopt_offload * qopt)5088 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
5089 struct tc_etf_qopt_offload *qopt)
5090 {
5091 struct igc_hw *hw = &adapter->hw;
5092 int err;
5093
5094 if (hw->mac.type != igc_i225)
5095 return -EOPNOTSUPP;
5096
5097 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
5098 if (err)
5099 return err;
5100
5101 return igc_tsn_offload_apply(adapter);
5102 }
5103
igc_save_qbv_schedule(struct igc_adapter * adapter,struct tc_taprio_qopt_offload * qopt)5104 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
5105 struct tc_taprio_qopt_offload *qopt)
5106 {
5107 u32 start_time = 0, end_time = 0;
5108 size_t n;
5109
5110 if (!qopt->enable) {
5111 adapter->base_time = 0;
5112 return 0;
5113 }
5114
5115 if (adapter->base_time)
5116 return -EALREADY;
5117
5118 if (!validate_schedule(adapter, qopt))
5119 return -EINVAL;
5120
5121 adapter->cycle_time = qopt->cycle_time;
5122 adapter->base_time = qopt->base_time;
5123
5124 /* FIXME: be a little smarter about cases when the gate for a
5125 * queue stays open for more than one entry.
5126 */
5127 for (n = 0; n < qopt->num_entries; n++) {
5128 struct tc_taprio_sched_entry *e = &qopt->entries[n];
5129 int i;
5130
5131 end_time += e->interval;
5132
5133 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
5134 struct igc_ring *ring = adapter->tx_ring[i];
5135
5136 if (!(e->gate_mask & BIT(i)))
5137 continue;
5138
5139 ring->start_time = start_time;
5140 ring->end_time = end_time;
5141 }
5142
5143 start_time += e->interval;
5144 }
5145
5146 return 0;
5147 }
5148
igc_tsn_enable_qbv_scheduling(struct igc_adapter * adapter,struct tc_taprio_qopt_offload * qopt)5149 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
5150 struct tc_taprio_qopt_offload *qopt)
5151 {
5152 struct igc_hw *hw = &adapter->hw;
5153 int err;
5154
5155 if (hw->mac.type != igc_i225)
5156 return -EOPNOTSUPP;
5157
5158 err = igc_save_qbv_schedule(adapter, qopt);
5159 if (err)
5160 return err;
5161
5162 return igc_tsn_offload_apply(adapter);
5163 }
5164
igc_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)5165 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
5166 void *type_data)
5167 {
5168 struct igc_adapter *adapter = netdev_priv(dev);
5169
5170 switch (type) {
5171 case TC_SETUP_QDISC_TAPRIO:
5172 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
5173
5174 case TC_SETUP_QDISC_ETF:
5175 return igc_tsn_enable_launchtime(adapter, type_data);
5176
5177 default:
5178 return -EOPNOTSUPP;
5179 }
5180 }
5181
igc_bpf(struct net_device * dev,struct netdev_bpf * bpf)5182 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
5183 {
5184 struct igc_adapter *adapter = netdev_priv(dev);
5185
5186 switch (bpf->command) {
5187 case XDP_SETUP_PROG:
5188 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
5189 default:
5190 return -EOPNOTSUPP;
5191 }
5192 }
5193
igc_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)5194 static int igc_xdp_xmit(struct net_device *dev, int num_frames,
5195 struct xdp_frame **frames, u32 flags)
5196 {
5197 struct igc_adapter *adapter = netdev_priv(dev);
5198 int cpu = smp_processor_id();
5199 struct netdev_queue *nq;
5200 struct igc_ring *ring;
5201 int i, drops;
5202
5203 if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
5204 return -ENETDOWN;
5205
5206 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
5207 return -EINVAL;
5208
5209 ring = igc_xdp_get_tx_ring(adapter, cpu);
5210 nq = txring_txq(ring);
5211
5212 __netif_tx_lock(nq, cpu);
5213
5214 drops = 0;
5215 for (i = 0; i < num_frames; i++) {
5216 int err;
5217 struct xdp_frame *xdpf = frames[i];
5218
5219 err = igc_xdp_init_tx_descriptor(ring, xdpf);
5220 if (err) {
5221 xdp_return_frame_rx_napi(xdpf);
5222 drops++;
5223 }
5224 }
5225
5226 if (flags & XDP_XMIT_FLUSH)
5227 igc_flush_tx_descriptors(ring);
5228
5229 __netif_tx_unlock(nq);
5230
5231 return num_frames - drops;
5232 }
5233
5234 static const struct net_device_ops igc_netdev_ops = {
5235 .ndo_open = igc_open,
5236 .ndo_stop = igc_close,
5237 .ndo_start_xmit = igc_xmit_frame,
5238 .ndo_set_rx_mode = igc_set_rx_mode,
5239 .ndo_set_mac_address = igc_set_mac,
5240 .ndo_change_mtu = igc_change_mtu,
5241 .ndo_get_stats64 = igc_get_stats64,
5242 .ndo_fix_features = igc_fix_features,
5243 .ndo_set_features = igc_set_features,
5244 .ndo_features_check = igc_features_check,
5245 .ndo_do_ioctl = igc_ioctl,
5246 .ndo_setup_tc = igc_setup_tc,
5247 .ndo_bpf = igc_bpf,
5248 .ndo_xdp_xmit = igc_xdp_xmit,
5249 };
5250
5251 /* PCIe configuration access */
igc_read_pci_cfg(struct igc_hw * hw,u32 reg,u16 * value)5252 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
5253 {
5254 struct igc_adapter *adapter = hw->back;
5255
5256 pci_read_config_word(adapter->pdev, reg, value);
5257 }
5258
igc_write_pci_cfg(struct igc_hw * hw,u32 reg,u16 * value)5259 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
5260 {
5261 struct igc_adapter *adapter = hw->back;
5262
5263 pci_write_config_word(adapter->pdev, reg, *value);
5264 }
5265
igc_read_pcie_cap_reg(struct igc_hw * hw,u32 reg,u16 * value)5266 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
5267 {
5268 struct igc_adapter *adapter = hw->back;
5269
5270 if (!pci_is_pcie(adapter->pdev))
5271 return -IGC_ERR_CONFIG;
5272
5273 pcie_capability_read_word(adapter->pdev, reg, value);
5274
5275 return IGC_SUCCESS;
5276 }
5277
igc_write_pcie_cap_reg(struct igc_hw * hw,u32 reg,u16 * value)5278 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
5279 {
5280 struct igc_adapter *adapter = hw->back;
5281
5282 if (!pci_is_pcie(adapter->pdev))
5283 return -IGC_ERR_CONFIG;
5284
5285 pcie_capability_write_word(adapter->pdev, reg, *value);
5286
5287 return IGC_SUCCESS;
5288 }
5289
igc_rd32(struct igc_hw * hw,u32 reg)5290 u32 igc_rd32(struct igc_hw *hw, u32 reg)
5291 {
5292 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
5293 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
5294 u32 value = 0;
5295
5296 value = readl(&hw_addr[reg]);
5297
5298 /* reads should not return all F's */
5299 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
5300 struct net_device *netdev = igc->netdev;
5301
5302 hw->hw_addr = NULL;
5303 netif_device_detach(netdev);
5304 netdev_err(netdev, "PCIe link lost, device now detached\n");
5305 WARN(pci_device_is_present(igc->pdev),
5306 "igc: Failed to read reg 0x%x!\n", reg);
5307 }
5308
5309 return value;
5310 }
5311
igc_set_spd_dplx(struct igc_adapter * adapter,u32 spd,u8 dplx)5312 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
5313 {
5314 struct igc_mac_info *mac = &adapter->hw.mac;
5315
5316 mac->autoneg = false;
5317
5318 /* Make sure dplx is at most 1 bit and lsb of speed is not set
5319 * for the switch() below to work
5320 */
5321 if ((spd & 1) || (dplx & ~1))
5322 goto err_inval;
5323
5324 switch (spd + dplx) {
5325 case SPEED_10 + DUPLEX_HALF:
5326 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5327 break;
5328 case SPEED_10 + DUPLEX_FULL:
5329 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5330 break;
5331 case SPEED_100 + DUPLEX_HALF:
5332 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5333 break;
5334 case SPEED_100 + DUPLEX_FULL:
5335 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5336 break;
5337 case SPEED_1000 + DUPLEX_FULL:
5338 mac->autoneg = true;
5339 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5340 break;
5341 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5342 goto err_inval;
5343 case SPEED_2500 + DUPLEX_FULL:
5344 mac->autoneg = true;
5345 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
5346 break;
5347 case SPEED_2500 + DUPLEX_HALF: /* not supported */
5348 default:
5349 goto err_inval;
5350 }
5351
5352 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5353 adapter->hw.phy.mdix = AUTO_ALL_MODES;
5354
5355 return 0;
5356
5357 err_inval:
5358 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
5359 return -EINVAL;
5360 }
5361
5362 /**
5363 * igc_probe - Device Initialization Routine
5364 * @pdev: PCI device information struct
5365 * @ent: entry in igc_pci_tbl
5366 *
5367 * Returns 0 on success, negative on failure
5368 *
5369 * igc_probe initializes an adapter identified by a pci_dev structure.
5370 * The OS initialization, configuring the adapter private structure,
5371 * and a hardware reset occur.
5372 */
igc_probe(struct pci_dev * pdev,const struct pci_device_id * ent)5373 static int igc_probe(struct pci_dev *pdev,
5374 const struct pci_device_id *ent)
5375 {
5376 struct igc_adapter *adapter;
5377 struct net_device *netdev;
5378 struct igc_hw *hw;
5379 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
5380 int err, pci_using_dac;
5381
5382 err = pci_enable_device_mem(pdev);
5383 if (err)
5384 return err;
5385
5386 pci_using_dac = 0;
5387 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5388 if (!err) {
5389 pci_using_dac = 1;
5390 } else {
5391 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5392 if (err) {
5393 dev_err(&pdev->dev,
5394 "No usable DMA configuration, aborting\n");
5395 goto err_dma;
5396 }
5397 }
5398
5399 err = pci_request_mem_regions(pdev, igc_driver_name);
5400 if (err)
5401 goto err_pci_reg;
5402
5403 pci_enable_pcie_error_reporting(pdev);
5404
5405 pci_set_master(pdev);
5406
5407 err = -ENOMEM;
5408 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
5409 IGC_MAX_TX_QUEUES);
5410
5411 if (!netdev)
5412 goto err_alloc_etherdev;
5413
5414 SET_NETDEV_DEV(netdev, &pdev->dev);
5415
5416 pci_set_drvdata(pdev, netdev);
5417 adapter = netdev_priv(netdev);
5418 adapter->netdev = netdev;
5419 adapter->pdev = pdev;
5420 hw = &adapter->hw;
5421 hw->back = adapter;
5422 adapter->port_num = hw->bus.func;
5423 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
5424
5425 err = pci_save_state(pdev);
5426 if (err)
5427 goto err_ioremap;
5428
5429 err = -EIO;
5430 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
5431 pci_resource_len(pdev, 0));
5432 if (!adapter->io_addr)
5433 goto err_ioremap;
5434
5435 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
5436 hw->hw_addr = adapter->io_addr;
5437
5438 netdev->netdev_ops = &igc_netdev_ops;
5439 igc_ethtool_set_ops(netdev);
5440 netdev->watchdog_timeo = 5 * HZ;
5441
5442 netdev->mem_start = pci_resource_start(pdev, 0);
5443 netdev->mem_end = pci_resource_end(pdev, 0);
5444
5445 /* PCI config space info */
5446 hw->vendor_id = pdev->vendor;
5447 hw->device_id = pdev->device;
5448 hw->revision_id = pdev->revision;
5449 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5450 hw->subsystem_device_id = pdev->subsystem_device;
5451
5452 /* Copy the default MAC and PHY function pointers */
5453 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5454 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5455
5456 /* Initialize skew-specific constants */
5457 err = ei->get_invariants(hw);
5458 if (err)
5459 goto err_sw_init;
5460
5461 /* Add supported features to the features list*/
5462 netdev->features |= NETIF_F_SG;
5463 netdev->features |= NETIF_F_TSO;
5464 netdev->features |= NETIF_F_TSO6;
5465 netdev->features |= NETIF_F_TSO_ECN;
5466 netdev->features |= NETIF_F_RXCSUM;
5467 netdev->features |= NETIF_F_HW_CSUM;
5468 netdev->features |= NETIF_F_SCTP_CRC;
5469 netdev->features |= NETIF_F_HW_TC;
5470
5471 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
5472 NETIF_F_GSO_GRE_CSUM | \
5473 NETIF_F_GSO_IPXIP4 | \
5474 NETIF_F_GSO_IPXIP6 | \
5475 NETIF_F_GSO_UDP_TUNNEL | \
5476 NETIF_F_GSO_UDP_TUNNEL_CSUM)
5477
5478 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
5479 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
5480
5481 /* setup the private structure */
5482 err = igc_sw_init(adapter);
5483 if (err)
5484 goto err_sw_init;
5485
5486 /* copy netdev features into list of user selectable features */
5487 netdev->hw_features |= NETIF_F_NTUPLE;
5488 netdev->hw_features |= netdev->features;
5489
5490 if (pci_using_dac)
5491 netdev->features |= NETIF_F_HIGHDMA;
5492
5493 /* MTU range: 68 - 9216 */
5494 netdev->min_mtu = ETH_MIN_MTU;
5495 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
5496
5497 /* before reading the NVM, reset the controller to put the device in a
5498 * known good starting state
5499 */
5500 hw->mac.ops.reset_hw(hw);
5501
5502 if (igc_get_flash_presence_i225(hw)) {
5503 if (hw->nvm.ops.validate(hw) < 0) {
5504 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
5505 err = -EIO;
5506 goto err_eeprom;
5507 }
5508 }
5509
5510 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
5511 /* copy the MAC address out of the NVM */
5512 if (hw->mac.ops.read_mac_addr(hw))
5513 dev_err(&pdev->dev, "NVM Read Error\n");
5514 }
5515
5516 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
5517
5518 if (!is_valid_ether_addr(netdev->dev_addr)) {
5519 dev_err(&pdev->dev, "Invalid MAC Address\n");
5520 err = -EIO;
5521 goto err_eeprom;
5522 }
5523
5524 /* configure RXPBSIZE and TXPBSIZE */
5525 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
5526 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
5527
5528 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
5529 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
5530
5531 INIT_WORK(&adapter->reset_task, igc_reset_task);
5532 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
5533
5534 /* Initialize link properties that are user-changeable */
5535 adapter->fc_autoneg = true;
5536 hw->mac.autoneg = true;
5537 hw->phy.autoneg_advertised = 0xaf;
5538
5539 hw->fc.requested_mode = igc_fc_default;
5540 hw->fc.current_mode = igc_fc_default;
5541
5542 /* By default, support wake on port A */
5543 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
5544
5545 /* initialize the wol settings based on the eeprom settings */
5546 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
5547 adapter->wol |= IGC_WUFC_MAG;
5548
5549 device_set_wakeup_enable(&adapter->pdev->dev,
5550 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
5551
5552 igc_ptp_init(adapter);
5553
5554 /* reset the hardware with the new settings */
5555 igc_reset(adapter);
5556
5557 /* let the f/w know that the h/w is now under the control of the
5558 * driver.
5559 */
5560 igc_get_hw_control(adapter);
5561
5562 strncpy(netdev->name, "eth%d", IFNAMSIZ);
5563 err = register_netdev(netdev);
5564 if (err)
5565 goto err_register;
5566
5567 /* carrier off reporting is important to ethtool even BEFORE open */
5568 netif_carrier_off(netdev);
5569
5570 /* Check if Media Autosense is enabled */
5571 adapter->ei = *ei;
5572
5573 /* print pcie link status and MAC address */
5574 pcie_print_link_status(pdev);
5575 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
5576
5577 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
5578 /* Disable EEE for internal PHY devices */
5579 hw->dev_spec._base.eee_enable = false;
5580 adapter->flags &= ~IGC_FLAG_EEE;
5581 igc_set_eee_i225(hw, false, false, false);
5582
5583 pm_runtime_put_noidle(&pdev->dev);
5584
5585 return 0;
5586
5587 err_register:
5588 igc_release_hw_control(adapter);
5589 err_eeprom:
5590 if (!igc_check_reset_block(hw))
5591 igc_reset_phy(hw);
5592 err_sw_init:
5593 igc_clear_interrupt_scheme(adapter);
5594 iounmap(adapter->io_addr);
5595 err_ioremap:
5596 free_netdev(netdev);
5597 err_alloc_etherdev:
5598 pci_release_mem_regions(pdev);
5599 err_pci_reg:
5600 err_dma:
5601 pci_disable_device(pdev);
5602 return err;
5603 }
5604
5605 /**
5606 * igc_remove - Device Removal Routine
5607 * @pdev: PCI device information struct
5608 *
5609 * igc_remove is called by the PCI subsystem to alert the driver
5610 * that it should release a PCI device. This could be caused by a
5611 * Hot-Plug event, or because the driver is going to be removed from
5612 * memory.
5613 */
igc_remove(struct pci_dev * pdev)5614 static void igc_remove(struct pci_dev *pdev)
5615 {
5616 struct net_device *netdev = pci_get_drvdata(pdev);
5617 struct igc_adapter *adapter = netdev_priv(netdev);
5618
5619 pm_runtime_get_noresume(&pdev->dev);
5620
5621 igc_flush_nfc_rules(adapter);
5622
5623 igc_ptp_stop(adapter);
5624
5625 set_bit(__IGC_DOWN, &adapter->state);
5626
5627 del_timer_sync(&adapter->watchdog_timer);
5628 del_timer_sync(&adapter->phy_info_timer);
5629
5630 cancel_work_sync(&adapter->reset_task);
5631 cancel_work_sync(&adapter->watchdog_task);
5632
5633 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5634 * would have already happened in close and is redundant.
5635 */
5636 igc_release_hw_control(adapter);
5637 unregister_netdev(netdev);
5638
5639 igc_clear_interrupt_scheme(adapter);
5640 pci_iounmap(pdev, adapter->io_addr);
5641 pci_release_mem_regions(pdev);
5642
5643 free_netdev(netdev);
5644
5645 pci_disable_pcie_error_reporting(pdev);
5646
5647 pci_disable_device(pdev);
5648 }
5649
__igc_shutdown(struct pci_dev * pdev,bool * enable_wake,bool runtime)5650 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
5651 bool runtime)
5652 {
5653 struct net_device *netdev = pci_get_drvdata(pdev);
5654 struct igc_adapter *adapter = netdev_priv(netdev);
5655 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
5656 struct igc_hw *hw = &adapter->hw;
5657 u32 ctrl, rctl, status;
5658 bool wake;
5659
5660 rtnl_lock();
5661 netif_device_detach(netdev);
5662
5663 if (netif_running(netdev))
5664 __igc_close(netdev, true);
5665
5666 igc_ptp_suspend(adapter);
5667
5668 igc_clear_interrupt_scheme(adapter);
5669 rtnl_unlock();
5670
5671 status = rd32(IGC_STATUS);
5672 if (status & IGC_STATUS_LU)
5673 wufc &= ~IGC_WUFC_LNKC;
5674
5675 if (wufc) {
5676 igc_setup_rctl(adapter);
5677 igc_set_rx_mode(netdev);
5678
5679 /* turn on all-multi mode if wake on multicast is enabled */
5680 if (wufc & IGC_WUFC_MC) {
5681 rctl = rd32(IGC_RCTL);
5682 rctl |= IGC_RCTL_MPE;
5683 wr32(IGC_RCTL, rctl);
5684 }
5685
5686 ctrl = rd32(IGC_CTRL);
5687 ctrl |= IGC_CTRL_ADVD3WUC;
5688 wr32(IGC_CTRL, ctrl);
5689
5690 /* Allow time for pending master requests to run */
5691 igc_disable_pcie_master(hw);
5692
5693 wr32(IGC_WUC, IGC_WUC_PME_EN);
5694 wr32(IGC_WUFC, wufc);
5695 } else {
5696 wr32(IGC_WUC, 0);
5697 wr32(IGC_WUFC, 0);
5698 }
5699
5700 wake = wufc || adapter->en_mng_pt;
5701 if (!wake)
5702 igc_power_down_phy_copper_base(&adapter->hw);
5703 else
5704 igc_power_up_link(adapter);
5705
5706 if (enable_wake)
5707 *enable_wake = wake;
5708
5709 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5710 * would have already happened in close and is redundant.
5711 */
5712 igc_release_hw_control(adapter);
5713
5714 pci_disable_device(pdev);
5715
5716 return 0;
5717 }
5718
5719 #ifdef CONFIG_PM
igc_runtime_suspend(struct device * dev)5720 static int __maybe_unused igc_runtime_suspend(struct device *dev)
5721 {
5722 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
5723 }
5724
igc_deliver_wake_packet(struct net_device * netdev)5725 static void igc_deliver_wake_packet(struct net_device *netdev)
5726 {
5727 struct igc_adapter *adapter = netdev_priv(netdev);
5728 struct igc_hw *hw = &adapter->hw;
5729 struct sk_buff *skb;
5730 u32 wupl;
5731
5732 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
5733
5734 /* WUPM stores only the first 128 bytes of the wake packet.
5735 * Read the packet only if we have the whole thing.
5736 */
5737 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
5738 return;
5739
5740 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
5741 if (!skb)
5742 return;
5743
5744 skb_put(skb, wupl);
5745
5746 /* Ensure reads are 32-bit aligned */
5747 wupl = roundup(wupl, 4);
5748
5749 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
5750
5751 skb->protocol = eth_type_trans(skb, netdev);
5752 netif_rx(skb);
5753 }
5754
igc_resume(struct device * dev)5755 static int __maybe_unused igc_resume(struct device *dev)
5756 {
5757 struct pci_dev *pdev = to_pci_dev(dev);
5758 struct net_device *netdev = pci_get_drvdata(pdev);
5759 struct igc_adapter *adapter = netdev_priv(netdev);
5760 struct igc_hw *hw = &adapter->hw;
5761 u32 err, val;
5762
5763 pci_set_power_state(pdev, PCI_D0);
5764 pci_restore_state(pdev);
5765 pci_save_state(pdev);
5766
5767 if (!pci_device_is_present(pdev))
5768 return -ENODEV;
5769 err = pci_enable_device_mem(pdev);
5770 if (err) {
5771 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
5772 return err;
5773 }
5774 pci_set_master(pdev);
5775
5776 pci_enable_wake(pdev, PCI_D3hot, 0);
5777 pci_enable_wake(pdev, PCI_D3cold, 0);
5778
5779 if (igc_init_interrupt_scheme(adapter, true)) {
5780 netdev_err(netdev, "Unable to allocate memory for queues\n");
5781 return -ENOMEM;
5782 }
5783
5784 igc_reset(adapter);
5785
5786 /* let the f/w know that the h/w is now under the control of the
5787 * driver.
5788 */
5789 igc_get_hw_control(adapter);
5790
5791 val = rd32(IGC_WUS);
5792 if (val & WAKE_PKT_WUS)
5793 igc_deliver_wake_packet(netdev);
5794
5795 wr32(IGC_WUS, ~0);
5796
5797 rtnl_lock();
5798 if (!err && netif_running(netdev))
5799 err = __igc_open(netdev, true);
5800
5801 if (!err)
5802 netif_device_attach(netdev);
5803 rtnl_unlock();
5804
5805 return err;
5806 }
5807
igc_runtime_resume(struct device * dev)5808 static int __maybe_unused igc_runtime_resume(struct device *dev)
5809 {
5810 return igc_resume(dev);
5811 }
5812
igc_suspend(struct device * dev)5813 static int __maybe_unused igc_suspend(struct device *dev)
5814 {
5815 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
5816 }
5817
igc_runtime_idle(struct device * dev)5818 static int __maybe_unused igc_runtime_idle(struct device *dev)
5819 {
5820 struct net_device *netdev = dev_get_drvdata(dev);
5821 struct igc_adapter *adapter = netdev_priv(netdev);
5822
5823 if (!igc_has_link(adapter))
5824 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
5825
5826 return -EBUSY;
5827 }
5828 #endif /* CONFIG_PM */
5829
igc_shutdown(struct pci_dev * pdev)5830 static void igc_shutdown(struct pci_dev *pdev)
5831 {
5832 bool wake;
5833
5834 __igc_shutdown(pdev, &wake, 0);
5835
5836 if (system_state == SYSTEM_POWER_OFF) {
5837 pci_wake_from_d3(pdev, wake);
5838 pci_set_power_state(pdev, PCI_D3hot);
5839 }
5840 }
5841
5842 /**
5843 * igc_io_error_detected - called when PCI error is detected
5844 * @pdev: Pointer to PCI device
5845 * @state: The current PCI connection state
5846 *
5847 * This function is called after a PCI bus error affecting
5848 * this device has been detected.
5849 **/
igc_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5850 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
5851 pci_channel_state_t state)
5852 {
5853 struct net_device *netdev = pci_get_drvdata(pdev);
5854 struct igc_adapter *adapter = netdev_priv(netdev);
5855
5856 netif_device_detach(netdev);
5857
5858 if (state == pci_channel_io_perm_failure)
5859 return PCI_ERS_RESULT_DISCONNECT;
5860
5861 if (netif_running(netdev))
5862 igc_down(adapter);
5863 pci_disable_device(pdev);
5864
5865 /* Request a slot reset. */
5866 return PCI_ERS_RESULT_NEED_RESET;
5867 }
5868
5869 /**
5870 * igc_io_slot_reset - called after the PCI bus has been reset.
5871 * @pdev: Pointer to PCI device
5872 *
5873 * Restart the card from scratch, as if from a cold-boot. Implementation
5874 * resembles the first-half of the igc_resume routine.
5875 **/
igc_io_slot_reset(struct pci_dev * pdev)5876 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
5877 {
5878 struct net_device *netdev = pci_get_drvdata(pdev);
5879 struct igc_adapter *adapter = netdev_priv(netdev);
5880 struct igc_hw *hw = &adapter->hw;
5881 pci_ers_result_t result;
5882
5883 if (pci_enable_device_mem(pdev)) {
5884 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
5885 result = PCI_ERS_RESULT_DISCONNECT;
5886 } else {
5887 pci_set_master(pdev);
5888 pci_restore_state(pdev);
5889 pci_save_state(pdev);
5890
5891 pci_enable_wake(pdev, PCI_D3hot, 0);
5892 pci_enable_wake(pdev, PCI_D3cold, 0);
5893
5894 /* In case of PCI error, adapter loses its HW address
5895 * so we should re-assign it here.
5896 */
5897 hw->hw_addr = adapter->io_addr;
5898
5899 igc_reset(adapter);
5900 wr32(IGC_WUS, ~0);
5901 result = PCI_ERS_RESULT_RECOVERED;
5902 }
5903
5904 return result;
5905 }
5906
5907 /**
5908 * igc_io_resume - called when traffic can start to flow again.
5909 * @pdev: Pointer to PCI device
5910 *
5911 * This callback is called when the error recovery driver tells us that
5912 * its OK to resume normal operation. Implementation resembles the
5913 * second-half of the igc_resume routine.
5914 */
igc_io_resume(struct pci_dev * pdev)5915 static void igc_io_resume(struct pci_dev *pdev)
5916 {
5917 struct net_device *netdev = pci_get_drvdata(pdev);
5918 struct igc_adapter *adapter = netdev_priv(netdev);
5919
5920 rtnl_lock();
5921 if (netif_running(netdev)) {
5922 if (igc_open(netdev)) {
5923 netdev_err(netdev, "igc_open failed after reset\n");
5924 return;
5925 }
5926 }
5927
5928 netif_device_attach(netdev);
5929
5930 /* let the f/w know that the h/w is now under the control of the
5931 * driver.
5932 */
5933 igc_get_hw_control(adapter);
5934 rtnl_unlock();
5935 }
5936
5937 static const struct pci_error_handlers igc_err_handler = {
5938 .error_detected = igc_io_error_detected,
5939 .slot_reset = igc_io_slot_reset,
5940 .resume = igc_io_resume,
5941 };
5942
5943 #ifdef CONFIG_PM
5944 static const struct dev_pm_ops igc_pm_ops = {
5945 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
5946 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
5947 igc_runtime_idle)
5948 };
5949 #endif
5950
5951 static struct pci_driver igc_driver = {
5952 .name = igc_driver_name,
5953 .id_table = igc_pci_tbl,
5954 .probe = igc_probe,
5955 .remove = igc_remove,
5956 #ifdef CONFIG_PM
5957 .driver.pm = &igc_pm_ops,
5958 #endif
5959 .shutdown = igc_shutdown,
5960 .err_handler = &igc_err_handler,
5961 };
5962
5963 /**
5964 * igc_reinit_queues - return error
5965 * @adapter: pointer to adapter structure
5966 */
igc_reinit_queues(struct igc_adapter * adapter)5967 int igc_reinit_queues(struct igc_adapter *adapter)
5968 {
5969 struct net_device *netdev = adapter->netdev;
5970 int err = 0;
5971
5972 if (netif_running(netdev))
5973 igc_close(netdev);
5974
5975 igc_reset_interrupt_capability(adapter);
5976
5977 if (igc_init_interrupt_scheme(adapter, true)) {
5978 netdev_err(netdev, "Unable to allocate memory for queues\n");
5979 return -ENOMEM;
5980 }
5981
5982 if (netif_running(netdev))
5983 err = igc_open(netdev);
5984
5985 return err;
5986 }
5987
5988 /**
5989 * igc_get_hw_dev - return device
5990 * @hw: pointer to hardware structure
5991 *
5992 * used by hardware layer to print debugging information
5993 */
igc_get_hw_dev(struct igc_hw * hw)5994 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
5995 {
5996 struct igc_adapter *adapter = hw->back;
5997
5998 return adapter->netdev;
5999 }
6000
6001 /**
6002 * igc_init_module - Driver Registration Routine
6003 *
6004 * igc_init_module is the first routine called when the driver is
6005 * loaded. All it does is register with the PCI subsystem.
6006 */
igc_init_module(void)6007 static int __init igc_init_module(void)
6008 {
6009 int ret;
6010
6011 pr_info("%s\n", igc_driver_string);
6012 pr_info("%s\n", igc_copyright);
6013
6014 ret = pci_register_driver(&igc_driver);
6015 return ret;
6016 }
6017
6018 module_init(igc_init_module);
6019
6020 /**
6021 * igc_exit_module - Driver Exit Cleanup Routine
6022 *
6023 * igc_exit_module is called just before the driver is removed
6024 * from memory.
6025 */
igc_exit_module(void)6026 static void __exit igc_exit_module(void)
6027 {
6028 pci_unregister_driver(&igc_driver);
6029 }
6030
6031 module_exit(igc_exit_module);
6032 /* igc_main.c */
6033