1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
3
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6 #include <linux/module.h>
7 #include <linux/types.h>
8 #include <linux/init.h>
9 #include <linux/bitops.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pagemap.h>
12 #include <linux/netdevice.h>
13 #include <linux/ipv6.h>
14 #include <linux/slab.h>
15 #include <net/checksum.h>
16 #include <net/ip6_checksum.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/mii.h>
21 #include <linux/ethtool.h>
22 #include <linux/if.h>
23 #include <linux/if_vlan.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/ip.h>
28 #include <linux/tcp.h>
29 #include <linux/sctp.h>
30 #include <linux/if_ether.h>
31 #include <linux/prefetch.h>
32 #include <linux/bpf.h>
33 #include <linux/bpf_trace.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/etherdevice.h>
36 #ifdef CONFIG_IGB_DCA
37 #include <linux/dca.h>
38 #endif
39 #include <linux/i2c.h>
40 #include "igb.h"
41
42 enum queue_mode {
43 QUEUE_MODE_STRICT_PRIORITY,
44 QUEUE_MODE_STREAM_RESERVATION,
45 };
46
47 enum tx_queue_prio {
48 TX_QUEUE_PRIO_HIGH,
49 TX_QUEUE_PRIO_LOW,
50 };
51
52 char igb_driver_name[] = "igb";
53 static const char igb_driver_string[] =
54 "Intel(R) Gigabit Ethernet Network Driver";
55 static const char igb_copyright[] =
56 "Copyright (c) 2007-2014 Intel Corporation.";
57
58 static const struct e1000_info *igb_info_tbl[] = {
59 [board_82575] = &e1000_82575_info,
60 };
61
62 static const struct pci_device_id igb_pci_tbl[] = {
63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
98 /* required last entry */
99 {0, }
100 };
101
102 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
103
104 static int igb_setup_all_tx_resources(struct igb_adapter *);
105 static int igb_setup_all_rx_resources(struct igb_adapter *);
106 static void igb_free_all_tx_resources(struct igb_adapter *);
107 static void igb_free_all_rx_resources(struct igb_adapter *);
108 static void igb_setup_mrqc(struct igb_adapter *);
109 static void igb_init_queue_configuration(struct igb_adapter *adapter);
110 static int igb_sw_init(struct igb_adapter *);
111 int igb_open(struct net_device *);
112 int igb_close(struct net_device *);
113 static void igb_configure(struct igb_adapter *);
114 static void igb_configure_tx(struct igb_adapter *);
115 static void igb_configure_rx(struct igb_adapter *);
116 static void igb_clean_all_tx_rings(struct igb_adapter *);
117 static void igb_clean_all_rx_rings(struct igb_adapter *);
118 static void igb_clean_tx_ring(struct igb_ring *);
119 static void igb_clean_rx_ring(struct igb_ring *);
120 static void igb_set_rx_mode(struct net_device *);
121 static void igb_update_phy_info(struct timer_list *);
122 static void igb_watchdog(struct timer_list *);
123 static void igb_watchdog_task(struct work_struct *);
124 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
125 static void igb_get_stats64(struct net_device *dev,
126 struct rtnl_link_stats64 *stats);
127 static int igb_change_mtu(struct net_device *, int);
128 static int igb_set_mac(struct net_device *, void *);
129 static void igb_set_uta(struct igb_adapter *adapter, bool set);
130 static irqreturn_t igb_intr(int irq, void *);
131 static irqreturn_t igb_intr_msi(int irq, void *);
132 static irqreturn_t igb_msix_other(int irq, void *);
133 static irqreturn_t igb_msix_ring(int irq, void *);
134 #ifdef CONFIG_IGB_DCA
135 static void igb_update_dca(struct igb_q_vector *);
136 static void igb_setup_dca(struct igb_adapter *);
137 #endif /* CONFIG_IGB_DCA */
138 static int igb_poll(struct napi_struct *, int);
139 static bool igb_clean_tx_irq(struct igb_q_vector *, int);
140 static int igb_clean_rx_irq(struct igb_q_vector *, int);
141 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
142 static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
143 static void igb_reset_task(struct work_struct *);
144 static void igb_vlan_mode(struct net_device *netdev,
145 netdev_features_t features);
146 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
147 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
148 static void igb_restore_vlan(struct igb_adapter *);
149 static void igb_rar_set_index(struct igb_adapter *, u32);
150 static void igb_ping_all_vfs(struct igb_adapter *);
151 static void igb_msg_task(struct igb_adapter *);
152 static void igb_vmm_control(struct igb_adapter *);
153 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
154 static void igb_flush_mac_table(struct igb_adapter *);
155 static int igb_available_rars(struct igb_adapter *, u8);
156 static void igb_set_default_mac_filter(struct igb_adapter *);
157 static int igb_uc_sync(struct net_device *, const unsigned char *);
158 static int igb_uc_unsync(struct net_device *, const unsigned char *);
159 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
160 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
161 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
162 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
163 static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
164 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
165 bool setting);
166 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
167 bool setting);
168 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
169 struct ifla_vf_info *ivi);
170 static void igb_check_vf_rate_limit(struct igb_adapter *);
171 static void igb_nfc_filter_exit(struct igb_adapter *adapter);
172 static void igb_nfc_filter_restore(struct igb_adapter *adapter);
173
174 #ifdef CONFIG_PCI_IOV
175 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
176 static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
177 #endif
178
179 #ifdef CONFIG_IGB_DCA
180 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
181 static struct notifier_block dca_notifier = {
182 .notifier_call = igb_notify_dca,
183 .next = NULL,
184 .priority = 0
185 };
186 #endif
187 #ifdef CONFIG_PCI_IOV
188 static unsigned int max_vfs;
189 module_param(max_vfs, uint, 0444);
190 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
191 #endif /* CONFIG_PCI_IOV */
192
193 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
194 pci_channel_state_t);
195 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
196 static void igb_io_resume(struct pci_dev *);
197
198 static const struct pci_error_handlers igb_err_handler = {
199 .error_detected = igb_io_error_detected,
200 .slot_reset = igb_io_slot_reset,
201 .resume = igb_io_resume,
202 };
203
204 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
205
206 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
207 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
208 MODULE_LICENSE("GPL v2");
209
210 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
211 static int debug = -1;
212 module_param(debug, int, 0);
213 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
214
215 struct igb_reg_info {
216 u32 ofs;
217 char *name;
218 };
219
220 static const struct igb_reg_info igb_reg_info_tbl[] = {
221
222 /* General Registers */
223 {E1000_CTRL, "CTRL"},
224 {E1000_STATUS, "STATUS"},
225 {E1000_CTRL_EXT, "CTRL_EXT"},
226
227 /* Interrupt Registers */
228 {E1000_ICR, "ICR"},
229
230 /* RX Registers */
231 {E1000_RCTL, "RCTL"},
232 {E1000_RDLEN(0), "RDLEN"},
233 {E1000_RDH(0), "RDH"},
234 {E1000_RDT(0), "RDT"},
235 {E1000_RXDCTL(0), "RXDCTL"},
236 {E1000_RDBAL(0), "RDBAL"},
237 {E1000_RDBAH(0), "RDBAH"},
238
239 /* TX Registers */
240 {E1000_TCTL, "TCTL"},
241 {E1000_TDBAL(0), "TDBAL"},
242 {E1000_TDBAH(0), "TDBAH"},
243 {E1000_TDLEN(0), "TDLEN"},
244 {E1000_TDH(0), "TDH"},
245 {E1000_TDT(0), "TDT"},
246 {E1000_TXDCTL(0), "TXDCTL"},
247 {E1000_TDFH, "TDFH"},
248 {E1000_TDFT, "TDFT"},
249 {E1000_TDFHS, "TDFHS"},
250 {E1000_TDFPC, "TDFPC"},
251
252 /* List Terminator */
253 {}
254 };
255
256 /* igb_regdump - register printout routine */
igb_regdump(struct e1000_hw * hw,struct igb_reg_info * reginfo)257 static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
258 {
259 int n = 0;
260 char rname[16];
261 u32 regs[8];
262
263 switch (reginfo->ofs) {
264 case E1000_RDLEN(0):
265 for (n = 0; n < 4; n++)
266 regs[n] = rd32(E1000_RDLEN(n));
267 break;
268 case E1000_RDH(0):
269 for (n = 0; n < 4; n++)
270 regs[n] = rd32(E1000_RDH(n));
271 break;
272 case E1000_RDT(0):
273 for (n = 0; n < 4; n++)
274 regs[n] = rd32(E1000_RDT(n));
275 break;
276 case E1000_RXDCTL(0):
277 for (n = 0; n < 4; n++)
278 regs[n] = rd32(E1000_RXDCTL(n));
279 break;
280 case E1000_RDBAL(0):
281 for (n = 0; n < 4; n++)
282 regs[n] = rd32(E1000_RDBAL(n));
283 break;
284 case E1000_RDBAH(0):
285 for (n = 0; n < 4; n++)
286 regs[n] = rd32(E1000_RDBAH(n));
287 break;
288 case E1000_TDBAL(0):
289 for (n = 0; n < 4; n++)
290 regs[n] = rd32(E1000_TDBAL(n));
291 break;
292 case E1000_TDBAH(0):
293 for (n = 0; n < 4; n++)
294 regs[n] = rd32(E1000_TDBAH(n));
295 break;
296 case E1000_TDLEN(0):
297 for (n = 0; n < 4; n++)
298 regs[n] = rd32(E1000_TDLEN(n));
299 break;
300 case E1000_TDH(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_TDH(n));
303 break;
304 case E1000_TDT(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_TDT(n));
307 break;
308 case E1000_TXDCTL(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_TXDCTL(n));
311 break;
312 default:
313 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
314 return;
315 }
316
317 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
318 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
319 regs[2], regs[3]);
320 }
321
322 /* igb_dump - Print registers, Tx-rings and Rx-rings */
igb_dump(struct igb_adapter * adapter)323 static void igb_dump(struct igb_adapter *adapter)
324 {
325 struct net_device *netdev = adapter->netdev;
326 struct e1000_hw *hw = &adapter->hw;
327 struct igb_reg_info *reginfo;
328 struct igb_ring *tx_ring;
329 union e1000_adv_tx_desc *tx_desc;
330 struct my_u0 { __le64 a; __le64 b; } *u0;
331 struct igb_ring *rx_ring;
332 union e1000_adv_rx_desc *rx_desc;
333 u32 staterr;
334 u16 i, n;
335
336 if (!netif_msg_hw(adapter))
337 return;
338
339 /* Print netdevice Info */
340 if (netdev) {
341 dev_info(&adapter->pdev->dev, "Net device Info\n");
342 pr_info("Device Name state trans_start\n");
343 pr_info("%-15s %016lX %016lX\n", netdev->name,
344 netdev->state, dev_trans_start(netdev));
345 }
346
347 /* Print Registers */
348 dev_info(&adapter->pdev->dev, "Register Dump\n");
349 pr_info(" Register Name Value\n");
350 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
351 reginfo->name; reginfo++) {
352 igb_regdump(hw, reginfo);
353 }
354
355 /* Print TX Ring Summary */
356 if (!netdev || !netif_running(netdev))
357 goto exit;
358
359 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
360 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
361 for (n = 0; n < adapter->num_tx_queues; n++) {
362 struct igb_tx_buffer *buffer_info;
363 tx_ring = adapter->tx_ring[n];
364 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
365 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
366 n, tx_ring->next_to_use, tx_ring->next_to_clean,
367 (u64)dma_unmap_addr(buffer_info, dma),
368 dma_unmap_len(buffer_info, len),
369 buffer_info->next_to_watch,
370 (u64)buffer_info->time_stamp);
371 }
372
373 /* Print TX Rings */
374 if (!netif_msg_tx_done(adapter))
375 goto rx_ring_summary;
376
377 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
378
379 /* Transmit Descriptor Formats
380 *
381 * Advanced Transmit Descriptor
382 * +--------------------------------------------------------------+
383 * 0 | Buffer Address [63:0] |
384 * +--------------------------------------------------------------+
385 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
386 * +--------------------------------------------------------------+
387 * 63 46 45 40 39 38 36 35 32 31 24 15 0
388 */
389
390 for (n = 0; n < adapter->num_tx_queues; n++) {
391 tx_ring = adapter->tx_ring[n];
392 pr_info("------------------------------------\n");
393 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
394 pr_info("------------------------------------\n");
395 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
396
397 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
398 const char *next_desc;
399 struct igb_tx_buffer *buffer_info;
400 tx_desc = IGB_TX_DESC(tx_ring, i);
401 buffer_info = &tx_ring->tx_buffer_info[i];
402 u0 = (struct my_u0 *)tx_desc;
403 if (i == tx_ring->next_to_use &&
404 i == tx_ring->next_to_clean)
405 next_desc = " NTC/U";
406 else if (i == tx_ring->next_to_use)
407 next_desc = " NTU";
408 else if (i == tx_ring->next_to_clean)
409 next_desc = " NTC";
410 else
411 next_desc = "";
412
413 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
414 i, le64_to_cpu(u0->a),
415 le64_to_cpu(u0->b),
416 (u64)dma_unmap_addr(buffer_info, dma),
417 dma_unmap_len(buffer_info, len),
418 buffer_info->next_to_watch,
419 (u64)buffer_info->time_stamp,
420 buffer_info->skb, next_desc);
421
422 if (netif_msg_pktdata(adapter) && buffer_info->skb)
423 print_hex_dump(KERN_INFO, "",
424 DUMP_PREFIX_ADDRESS,
425 16, 1, buffer_info->skb->data,
426 dma_unmap_len(buffer_info, len),
427 true);
428 }
429 }
430
431 /* Print RX Rings Summary */
432 rx_ring_summary:
433 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
434 pr_info("Queue [NTU] [NTC]\n");
435 for (n = 0; n < adapter->num_rx_queues; n++) {
436 rx_ring = adapter->rx_ring[n];
437 pr_info(" %5d %5X %5X\n",
438 n, rx_ring->next_to_use, rx_ring->next_to_clean);
439 }
440
441 /* Print RX Rings */
442 if (!netif_msg_rx_status(adapter))
443 goto exit;
444
445 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
446
447 /* Advanced Receive Descriptor (Read) Format
448 * 63 1 0
449 * +-----------------------------------------------------+
450 * 0 | Packet Buffer Address [63:1] |A0/NSE|
451 * +----------------------------------------------+------+
452 * 8 | Header Buffer Address [63:1] | DD |
453 * +-----------------------------------------------------+
454 *
455 *
456 * Advanced Receive Descriptor (Write-Back) Format
457 *
458 * 63 48 47 32 31 30 21 20 17 16 4 3 0
459 * +------------------------------------------------------+
460 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
461 * | Checksum Ident | | | | Type | Type |
462 * +------------------------------------------------------+
463 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
464 * +------------------------------------------------------+
465 * 63 48 47 32 31 20 19 0
466 */
467
468 for (n = 0; n < adapter->num_rx_queues; n++) {
469 rx_ring = adapter->rx_ring[n];
470 pr_info("------------------------------------\n");
471 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
472 pr_info("------------------------------------\n");
473 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
474 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
475
476 for (i = 0; i < rx_ring->count; i++) {
477 const char *next_desc;
478 struct igb_rx_buffer *buffer_info;
479 buffer_info = &rx_ring->rx_buffer_info[i];
480 rx_desc = IGB_RX_DESC(rx_ring, i);
481 u0 = (struct my_u0 *)rx_desc;
482 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
483
484 if (i == rx_ring->next_to_use)
485 next_desc = " NTU";
486 else if (i == rx_ring->next_to_clean)
487 next_desc = " NTC";
488 else
489 next_desc = "";
490
491 if (staterr & E1000_RXD_STAT_DD) {
492 /* Descriptor Done */
493 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
494 "RWB", i,
495 le64_to_cpu(u0->a),
496 le64_to_cpu(u0->b),
497 next_desc);
498 } else {
499 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
500 "R ", i,
501 le64_to_cpu(u0->a),
502 le64_to_cpu(u0->b),
503 (u64)buffer_info->dma,
504 next_desc);
505
506 if (netif_msg_pktdata(adapter) &&
507 buffer_info->dma && buffer_info->page) {
508 print_hex_dump(KERN_INFO, "",
509 DUMP_PREFIX_ADDRESS,
510 16, 1,
511 page_address(buffer_info->page) +
512 buffer_info->page_offset,
513 igb_rx_bufsz(rx_ring), true);
514 }
515 }
516 }
517 }
518
519 exit:
520 return;
521 }
522
523 /**
524 * igb_get_i2c_data - Reads the I2C SDA data bit
525 * @data: opaque pointer to adapter struct
526 *
527 * Returns the I2C data bit value
528 **/
igb_get_i2c_data(void * data)529 static int igb_get_i2c_data(void *data)
530 {
531 struct igb_adapter *adapter = (struct igb_adapter *)data;
532 struct e1000_hw *hw = &adapter->hw;
533 s32 i2cctl = rd32(E1000_I2CPARAMS);
534
535 return !!(i2cctl & E1000_I2C_DATA_IN);
536 }
537
538 /**
539 * igb_set_i2c_data - Sets the I2C data bit
540 * @data: pointer to hardware structure
541 * @state: I2C data value (0 or 1) to set
542 *
543 * Sets the I2C data bit
544 **/
igb_set_i2c_data(void * data,int state)545 static void igb_set_i2c_data(void *data, int state)
546 {
547 struct igb_adapter *adapter = (struct igb_adapter *)data;
548 struct e1000_hw *hw = &adapter->hw;
549 s32 i2cctl = rd32(E1000_I2CPARAMS);
550
551 if (state) {
552 i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
553 } else {
554 i2cctl &= ~E1000_I2C_DATA_OE_N;
555 i2cctl &= ~E1000_I2C_DATA_OUT;
556 }
557
558 wr32(E1000_I2CPARAMS, i2cctl);
559 wrfl();
560 }
561
562 /**
563 * igb_set_i2c_clk - Sets the I2C SCL clock
564 * @data: pointer to hardware structure
565 * @state: state to set clock
566 *
567 * Sets the I2C clock line to state
568 **/
igb_set_i2c_clk(void * data,int state)569 static void igb_set_i2c_clk(void *data, int state)
570 {
571 struct igb_adapter *adapter = (struct igb_adapter *)data;
572 struct e1000_hw *hw = &adapter->hw;
573 s32 i2cctl = rd32(E1000_I2CPARAMS);
574
575 if (state) {
576 i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N;
577 } else {
578 i2cctl &= ~E1000_I2C_CLK_OUT;
579 i2cctl &= ~E1000_I2C_CLK_OE_N;
580 }
581 wr32(E1000_I2CPARAMS, i2cctl);
582 wrfl();
583 }
584
585 /**
586 * igb_get_i2c_clk - Gets the I2C SCL clock state
587 * @data: pointer to hardware structure
588 *
589 * Gets the I2C clock state
590 **/
igb_get_i2c_clk(void * data)591 static int igb_get_i2c_clk(void *data)
592 {
593 struct igb_adapter *adapter = (struct igb_adapter *)data;
594 struct e1000_hw *hw = &adapter->hw;
595 s32 i2cctl = rd32(E1000_I2CPARAMS);
596
597 return !!(i2cctl & E1000_I2C_CLK_IN);
598 }
599
600 static const struct i2c_algo_bit_data igb_i2c_algo = {
601 .setsda = igb_set_i2c_data,
602 .setscl = igb_set_i2c_clk,
603 .getsda = igb_get_i2c_data,
604 .getscl = igb_get_i2c_clk,
605 .udelay = 5,
606 .timeout = 20,
607 };
608
609 /**
610 * igb_get_hw_dev - return device
611 * @hw: pointer to hardware structure
612 *
613 * used by hardware layer to print debugging information
614 **/
igb_get_hw_dev(struct e1000_hw * hw)615 struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
616 {
617 struct igb_adapter *adapter = hw->back;
618 return adapter->netdev;
619 }
620
621 static struct pci_driver igb_driver;
622
623 /**
624 * igb_init_module - Driver Registration Routine
625 *
626 * igb_init_module is the first routine called when the driver is
627 * loaded. All it does is register with the PCI subsystem.
628 **/
igb_init_module(void)629 static int __init igb_init_module(void)
630 {
631 int ret;
632
633 pr_info("%s\n", igb_driver_string);
634 pr_info("%s\n", igb_copyright);
635
636 #ifdef CONFIG_IGB_DCA
637 dca_register_notify(&dca_notifier);
638 #endif
639 ret = pci_register_driver(&igb_driver);
640 return ret;
641 }
642
643 module_init(igb_init_module);
644
645 /**
646 * igb_exit_module - Driver Exit Cleanup Routine
647 *
648 * igb_exit_module is called just before the driver is removed
649 * from memory.
650 **/
igb_exit_module(void)651 static void __exit igb_exit_module(void)
652 {
653 #ifdef CONFIG_IGB_DCA
654 dca_unregister_notify(&dca_notifier);
655 #endif
656 pci_unregister_driver(&igb_driver);
657 }
658
659 module_exit(igb_exit_module);
660
661 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
662 /**
663 * igb_cache_ring_register - Descriptor ring to register mapping
664 * @adapter: board private structure to initialize
665 *
666 * Once we know the feature-set enabled for the device, we'll cache
667 * the register offset the descriptor ring is assigned to.
668 **/
igb_cache_ring_register(struct igb_adapter * adapter)669 static void igb_cache_ring_register(struct igb_adapter *adapter)
670 {
671 int i = 0, j = 0;
672 u32 rbase_offset = adapter->vfs_allocated_count;
673
674 switch (adapter->hw.mac.type) {
675 case e1000_82576:
676 /* The queues are allocated for virtualization such that VF 0
677 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
678 * In order to avoid collision we start at the first free queue
679 * and continue consuming queues in the same sequence
680 */
681 if (adapter->vfs_allocated_count) {
682 for (; i < adapter->rss_queues; i++)
683 adapter->rx_ring[i]->reg_idx = rbase_offset +
684 Q_IDX_82576(i);
685 }
686 fallthrough;
687 case e1000_82575:
688 case e1000_82580:
689 case e1000_i350:
690 case e1000_i354:
691 case e1000_i210:
692 case e1000_i211:
693 default:
694 for (; i < adapter->num_rx_queues; i++)
695 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
696 for (; j < adapter->num_tx_queues; j++)
697 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
698 break;
699 }
700 }
701
igb_rd32(struct e1000_hw * hw,u32 reg)702 u32 igb_rd32(struct e1000_hw *hw, u32 reg)
703 {
704 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
705 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
706 u32 value = 0;
707
708 if (E1000_REMOVED(hw_addr))
709 return ~value;
710
711 value = readl(&hw_addr[reg]);
712
713 /* reads should not return all F's */
714 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
715 struct net_device *netdev = igb->netdev;
716 hw->hw_addr = NULL;
717 netdev_err(netdev, "PCIe link lost\n");
718 WARN(pci_device_is_present(igb->pdev),
719 "igb: Failed to read reg 0x%x!\n", reg);
720 }
721
722 return value;
723 }
724
725 /**
726 * igb_write_ivar - configure ivar for given MSI-X vector
727 * @hw: pointer to the HW structure
728 * @msix_vector: vector number we are allocating to a given ring
729 * @index: row index of IVAR register to write within IVAR table
730 * @offset: column offset of in IVAR, should be multiple of 8
731 *
732 * This function is intended to handle the writing of the IVAR register
733 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
734 * each containing an cause allocation for an Rx and Tx ring, and a
735 * variable number of rows depending on the number of queues supported.
736 **/
igb_write_ivar(struct e1000_hw * hw,int msix_vector,int index,int offset)737 static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
738 int index, int offset)
739 {
740 u32 ivar = array_rd32(E1000_IVAR0, index);
741
742 /* clear any bits that are currently set */
743 ivar &= ~((u32)0xFF << offset);
744
745 /* write vector and valid bit */
746 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
747
748 array_wr32(E1000_IVAR0, index, ivar);
749 }
750
751 #define IGB_N0_QUEUE -1
igb_assign_vector(struct igb_q_vector * q_vector,int msix_vector)752 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
753 {
754 struct igb_adapter *adapter = q_vector->adapter;
755 struct e1000_hw *hw = &adapter->hw;
756 int rx_queue = IGB_N0_QUEUE;
757 int tx_queue = IGB_N0_QUEUE;
758 u32 msixbm = 0;
759
760 if (q_vector->rx.ring)
761 rx_queue = q_vector->rx.ring->reg_idx;
762 if (q_vector->tx.ring)
763 tx_queue = q_vector->tx.ring->reg_idx;
764
765 switch (hw->mac.type) {
766 case e1000_82575:
767 /* The 82575 assigns vectors using a bitmask, which matches the
768 * bitmask for the EICR/EIMS/EIMC registers. To assign one
769 * or more queues to a vector, we write the appropriate bits
770 * into the MSIXBM register for that vector.
771 */
772 if (rx_queue > IGB_N0_QUEUE)
773 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
774 if (tx_queue > IGB_N0_QUEUE)
775 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
776 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
777 msixbm |= E1000_EIMS_OTHER;
778 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
779 q_vector->eims_value = msixbm;
780 break;
781 case e1000_82576:
782 /* 82576 uses a table that essentially consists of 2 columns
783 * with 8 rows. The ordering is column-major so we use the
784 * lower 3 bits as the row index, and the 4th bit as the
785 * column offset.
786 */
787 if (rx_queue > IGB_N0_QUEUE)
788 igb_write_ivar(hw, msix_vector,
789 rx_queue & 0x7,
790 (rx_queue & 0x8) << 1);
791 if (tx_queue > IGB_N0_QUEUE)
792 igb_write_ivar(hw, msix_vector,
793 tx_queue & 0x7,
794 ((tx_queue & 0x8) << 1) + 8);
795 q_vector->eims_value = BIT(msix_vector);
796 break;
797 case e1000_82580:
798 case e1000_i350:
799 case e1000_i354:
800 case e1000_i210:
801 case e1000_i211:
802 /* On 82580 and newer adapters the scheme is similar to 82576
803 * however instead of ordering column-major we have things
804 * ordered row-major. So we traverse the table by using
805 * bit 0 as the column offset, and the remaining bits as the
806 * row index.
807 */
808 if (rx_queue > IGB_N0_QUEUE)
809 igb_write_ivar(hw, msix_vector,
810 rx_queue >> 1,
811 (rx_queue & 0x1) << 4);
812 if (tx_queue > IGB_N0_QUEUE)
813 igb_write_ivar(hw, msix_vector,
814 tx_queue >> 1,
815 ((tx_queue & 0x1) << 4) + 8);
816 q_vector->eims_value = BIT(msix_vector);
817 break;
818 default:
819 BUG();
820 break;
821 }
822
823 /* add q_vector eims value to global eims_enable_mask */
824 adapter->eims_enable_mask |= q_vector->eims_value;
825
826 /* configure q_vector to set itr on first interrupt */
827 q_vector->set_itr = 1;
828 }
829
830 /**
831 * igb_configure_msix - Configure MSI-X hardware
832 * @adapter: board private structure to initialize
833 *
834 * igb_configure_msix sets up the hardware to properly
835 * generate MSI-X interrupts.
836 **/
igb_configure_msix(struct igb_adapter * adapter)837 static void igb_configure_msix(struct igb_adapter *adapter)
838 {
839 u32 tmp;
840 int i, vector = 0;
841 struct e1000_hw *hw = &adapter->hw;
842
843 adapter->eims_enable_mask = 0;
844
845 /* set vector for other causes, i.e. link changes */
846 switch (hw->mac.type) {
847 case e1000_82575:
848 tmp = rd32(E1000_CTRL_EXT);
849 /* enable MSI-X PBA support*/
850 tmp |= E1000_CTRL_EXT_PBA_CLR;
851
852 /* Auto-Mask interrupts upon ICR read. */
853 tmp |= E1000_CTRL_EXT_EIAME;
854 tmp |= E1000_CTRL_EXT_IRCA;
855
856 wr32(E1000_CTRL_EXT, tmp);
857
858 /* enable msix_other interrupt */
859 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
860 adapter->eims_other = E1000_EIMS_OTHER;
861
862 break;
863
864 case e1000_82576:
865 case e1000_82580:
866 case e1000_i350:
867 case e1000_i354:
868 case e1000_i210:
869 case e1000_i211:
870 /* Turn on MSI-X capability first, or our settings
871 * won't stick. And it will take days to debug.
872 */
873 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
874 E1000_GPIE_PBA | E1000_GPIE_EIAME |
875 E1000_GPIE_NSICR);
876
877 /* enable msix_other interrupt */
878 adapter->eims_other = BIT(vector);
879 tmp = (vector++ | E1000_IVAR_VALID) << 8;
880
881 wr32(E1000_IVAR_MISC, tmp);
882 break;
883 default:
884 /* do nothing, since nothing else supports MSI-X */
885 break;
886 } /* switch (hw->mac.type) */
887
888 adapter->eims_enable_mask |= adapter->eims_other;
889
890 for (i = 0; i < adapter->num_q_vectors; i++)
891 igb_assign_vector(adapter->q_vector[i], vector++);
892
893 wrfl();
894 }
895
896 /**
897 * igb_request_msix - Initialize MSI-X interrupts
898 * @adapter: board private structure to initialize
899 *
900 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
901 * kernel.
902 **/
igb_request_msix(struct igb_adapter * adapter)903 static int igb_request_msix(struct igb_adapter *adapter)
904 {
905 unsigned int num_q_vectors = adapter->num_q_vectors;
906 struct net_device *netdev = adapter->netdev;
907 int i, err = 0, vector = 0, free_vector = 0;
908
909 err = request_irq(adapter->msix_entries[vector].vector,
910 igb_msix_other, 0, netdev->name, adapter);
911 if (err)
912 goto err_out;
913
914 if (num_q_vectors > MAX_Q_VECTORS) {
915 num_q_vectors = MAX_Q_VECTORS;
916 dev_warn(&adapter->pdev->dev,
917 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
918 adapter->num_q_vectors, MAX_Q_VECTORS);
919 }
920 for (i = 0; i < num_q_vectors; i++) {
921 struct igb_q_vector *q_vector = adapter->q_vector[i];
922
923 vector++;
924
925 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
926
927 if (q_vector->rx.ring && q_vector->tx.ring)
928 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
929 q_vector->rx.ring->queue_index);
930 else if (q_vector->tx.ring)
931 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
932 q_vector->tx.ring->queue_index);
933 else if (q_vector->rx.ring)
934 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
935 q_vector->rx.ring->queue_index);
936 else
937 sprintf(q_vector->name, "%s-unused", netdev->name);
938
939 err = request_irq(adapter->msix_entries[vector].vector,
940 igb_msix_ring, 0, q_vector->name,
941 q_vector);
942 if (err)
943 goto err_free;
944 }
945
946 igb_configure_msix(adapter);
947 return 0;
948
949 err_free:
950 /* free already assigned IRQs */
951 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
952
953 vector--;
954 for (i = 0; i < vector; i++) {
955 free_irq(adapter->msix_entries[free_vector++].vector,
956 adapter->q_vector[i]);
957 }
958 err_out:
959 return err;
960 }
961
962 /**
963 * igb_free_q_vector - Free memory allocated for specific interrupt vector
964 * @adapter: board private structure to initialize
965 * @v_idx: Index of vector to be freed
966 *
967 * This function frees the memory allocated to the q_vector.
968 **/
igb_free_q_vector(struct igb_adapter * adapter,int v_idx)969 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
970 {
971 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
972
973 adapter->q_vector[v_idx] = NULL;
974
975 /* igb_get_stats64() might access the rings on this vector,
976 * we must wait a grace period before freeing it.
977 */
978 if (q_vector)
979 kfree_rcu(q_vector, rcu);
980 }
981
982 /**
983 * igb_reset_q_vector - Reset config for interrupt vector
984 * @adapter: board private structure to initialize
985 * @v_idx: Index of vector to be reset
986 *
987 * If NAPI is enabled it will delete any references to the
988 * NAPI struct. This is preparation for igb_free_q_vector.
989 **/
igb_reset_q_vector(struct igb_adapter * adapter,int v_idx)990 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
991 {
992 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
993
994 /* Coming from igb_set_interrupt_capability, the vectors are not yet
995 * allocated. So, q_vector is NULL so we should stop here.
996 */
997 if (!q_vector)
998 return;
999
1000 if (q_vector->tx.ring)
1001 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1002
1003 if (q_vector->rx.ring)
1004 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1005
1006 netif_napi_del(&q_vector->napi);
1007
1008 }
1009
igb_reset_interrupt_capability(struct igb_adapter * adapter)1010 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1011 {
1012 int v_idx = adapter->num_q_vectors;
1013
1014 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1015 pci_disable_msix(adapter->pdev);
1016 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1017 pci_disable_msi(adapter->pdev);
1018
1019 while (v_idx--)
1020 igb_reset_q_vector(adapter, v_idx);
1021 }
1022
1023 /**
1024 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1025 * @adapter: board private structure to initialize
1026 *
1027 * This function frees the memory allocated to the q_vectors. In addition if
1028 * NAPI is enabled it will delete any references to the NAPI struct prior
1029 * to freeing the q_vector.
1030 **/
igb_free_q_vectors(struct igb_adapter * adapter)1031 static void igb_free_q_vectors(struct igb_adapter *adapter)
1032 {
1033 int v_idx = adapter->num_q_vectors;
1034
1035 adapter->num_tx_queues = 0;
1036 adapter->num_rx_queues = 0;
1037 adapter->num_q_vectors = 0;
1038
1039 while (v_idx--) {
1040 igb_reset_q_vector(adapter, v_idx);
1041 igb_free_q_vector(adapter, v_idx);
1042 }
1043 }
1044
1045 /**
1046 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1047 * @adapter: board private structure to initialize
1048 *
1049 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1050 * MSI-X interrupts allocated.
1051 */
igb_clear_interrupt_scheme(struct igb_adapter * adapter)1052 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1053 {
1054 igb_free_q_vectors(adapter);
1055 igb_reset_interrupt_capability(adapter);
1056 }
1057
1058 /**
1059 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1060 * @adapter: board private structure to initialize
1061 * @msix: boolean value of MSIX capability
1062 *
1063 * Attempt to configure interrupts using the best available
1064 * capabilities of the hardware and kernel.
1065 **/
igb_set_interrupt_capability(struct igb_adapter * adapter,bool msix)1066 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1067 {
1068 int err;
1069 int numvecs, i;
1070
1071 if (!msix)
1072 goto msi_only;
1073 adapter->flags |= IGB_FLAG_HAS_MSIX;
1074
1075 /* Number of supported queues. */
1076 adapter->num_rx_queues = adapter->rss_queues;
1077 if (adapter->vfs_allocated_count)
1078 adapter->num_tx_queues = 1;
1079 else
1080 adapter->num_tx_queues = adapter->rss_queues;
1081
1082 /* start with one vector for every Rx queue */
1083 numvecs = adapter->num_rx_queues;
1084
1085 /* if Tx handler is separate add 1 for every Tx queue */
1086 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1087 numvecs += adapter->num_tx_queues;
1088
1089 /* store the number of vectors reserved for queues */
1090 adapter->num_q_vectors = numvecs;
1091
1092 /* add 1 vector for link status interrupts */
1093 numvecs++;
1094 for (i = 0; i < numvecs; i++)
1095 adapter->msix_entries[i].entry = i;
1096
1097 err = pci_enable_msix_range(adapter->pdev,
1098 adapter->msix_entries,
1099 numvecs,
1100 numvecs);
1101 if (err > 0)
1102 return;
1103
1104 igb_reset_interrupt_capability(adapter);
1105
1106 /* If we can't do MSI-X, try MSI */
1107 msi_only:
1108 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1109 #ifdef CONFIG_PCI_IOV
1110 /* disable SR-IOV for non MSI-X configurations */
1111 if (adapter->vf_data) {
1112 struct e1000_hw *hw = &adapter->hw;
1113 /* disable iov and allow time for transactions to clear */
1114 pci_disable_sriov(adapter->pdev);
1115 msleep(500);
1116
1117 kfree(adapter->vf_mac_list);
1118 adapter->vf_mac_list = NULL;
1119 kfree(adapter->vf_data);
1120 adapter->vf_data = NULL;
1121 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1122 wrfl();
1123 msleep(100);
1124 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1125 }
1126 #endif
1127 adapter->vfs_allocated_count = 0;
1128 adapter->rss_queues = 1;
1129 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1130 adapter->num_rx_queues = 1;
1131 adapter->num_tx_queues = 1;
1132 adapter->num_q_vectors = 1;
1133 if (!pci_enable_msi(adapter->pdev))
1134 adapter->flags |= IGB_FLAG_HAS_MSI;
1135 }
1136
igb_add_ring(struct igb_ring * ring,struct igb_ring_container * head)1137 static void igb_add_ring(struct igb_ring *ring,
1138 struct igb_ring_container *head)
1139 {
1140 head->ring = ring;
1141 head->count++;
1142 }
1143
1144 /**
1145 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1146 * @adapter: board private structure to initialize
1147 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1148 * @v_idx: index of vector in adapter struct
1149 * @txr_count: total number of Tx rings to allocate
1150 * @txr_idx: index of first Tx ring to allocate
1151 * @rxr_count: total number of Rx rings to allocate
1152 * @rxr_idx: index of first Rx ring to allocate
1153 *
1154 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1155 **/
igb_alloc_q_vector(struct igb_adapter * adapter,int v_count,int v_idx,int txr_count,int txr_idx,int rxr_count,int rxr_idx)1156 static int igb_alloc_q_vector(struct igb_adapter *adapter,
1157 int v_count, int v_idx,
1158 int txr_count, int txr_idx,
1159 int rxr_count, int rxr_idx)
1160 {
1161 struct igb_q_vector *q_vector;
1162 struct igb_ring *ring;
1163 int ring_count;
1164 size_t size;
1165
1166 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
1167 if (txr_count > 1 || rxr_count > 1)
1168 return -ENOMEM;
1169
1170 ring_count = txr_count + rxr_count;
1171 size = kmalloc_size_roundup(struct_size(q_vector, ring, ring_count));
1172
1173 /* allocate q_vector and rings */
1174 q_vector = adapter->q_vector[v_idx];
1175 if (!q_vector) {
1176 q_vector = kzalloc(size, GFP_KERNEL);
1177 } else if (size > ksize(q_vector)) {
1178 struct igb_q_vector *new_q_vector;
1179
1180 new_q_vector = kzalloc(size, GFP_KERNEL);
1181 if (new_q_vector)
1182 kfree_rcu(q_vector, rcu);
1183 q_vector = new_q_vector;
1184 } else {
1185 memset(q_vector, 0, size);
1186 }
1187 if (!q_vector)
1188 return -ENOMEM;
1189
1190 /* initialize NAPI */
1191 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
1192
1193 /* tie q_vector and adapter together */
1194 adapter->q_vector[v_idx] = q_vector;
1195 q_vector->adapter = adapter;
1196
1197 /* initialize work limits */
1198 q_vector->tx.work_limit = adapter->tx_work_limit;
1199
1200 /* initialize ITR configuration */
1201 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1202 q_vector->itr_val = IGB_START_ITR;
1203
1204 /* initialize pointer to rings */
1205 ring = q_vector->ring;
1206
1207 /* intialize ITR */
1208 if (rxr_count) {
1209 /* rx or rx/tx vector */
1210 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1211 q_vector->itr_val = adapter->rx_itr_setting;
1212 } else {
1213 /* tx only vector */
1214 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1215 q_vector->itr_val = adapter->tx_itr_setting;
1216 }
1217
1218 if (txr_count) {
1219 /* assign generic ring traits */
1220 ring->dev = &adapter->pdev->dev;
1221 ring->netdev = adapter->netdev;
1222
1223 /* configure backlink on ring */
1224 ring->q_vector = q_vector;
1225
1226 /* update q_vector Tx values */
1227 igb_add_ring(ring, &q_vector->tx);
1228
1229 /* For 82575, context index must be unique per ring. */
1230 if (adapter->hw.mac.type == e1000_82575)
1231 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1232
1233 /* apply Tx specific ring traits */
1234 ring->count = adapter->tx_ring_count;
1235 ring->queue_index = txr_idx;
1236
1237 ring->cbs_enable = false;
1238 ring->idleslope = 0;
1239 ring->sendslope = 0;
1240 ring->hicredit = 0;
1241 ring->locredit = 0;
1242
1243 u64_stats_init(&ring->tx_syncp);
1244 u64_stats_init(&ring->tx_syncp2);
1245
1246 /* assign ring to adapter */
1247 adapter->tx_ring[txr_idx] = ring;
1248
1249 /* push pointer to next ring */
1250 ring++;
1251 }
1252
1253 if (rxr_count) {
1254 /* assign generic ring traits */
1255 ring->dev = &adapter->pdev->dev;
1256 ring->netdev = adapter->netdev;
1257
1258 /* configure backlink on ring */
1259 ring->q_vector = q_vector;
1260
1261 /* update q_vector Rx values */
1262 igb_add_ring(ring, &q_vector->rx);
1263
1264 /* set flag indicating ring supports SCTP checksum offload */
1265 if (adapter->hw.mac.type >= e1000_82576)
1266 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1267
1268 /* On i350, i354, i210, and i211, loopback VLAN packets
1269 * have the tag byte-swapped.
1270 */
1271 if (adapter->hw.mac.type >= e1000_i350)
1272 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1273
1274 /* apply Rx specific ring traits */
1275 ring->count = adapter->rx_ring_count;
1276 ring->queue_index = rxr_idx;
1277
1278 u64_stats_init(&ring->rx_syncp);
1279
1280 /* assign ring to adapter */
1281 adapter->rx_ring[rxr_idx] = ring;
1282 }
1283
1284 return 0;
1285 }
1286
1287
1288 /**
1289 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1290 * @adapter: board private structure to initialize
1291 *
1292 * We allocate one q_vector per queue interrupt. If allocation fails we
1293 * return -ENOMEM.
1294 **/
igb_alloc_q_vectors(struct igb_adapter * adapter)1295 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1296 {
1297 int q_vectors = adapter->num_q_vectors;
1298 int rxr_remaining = adapter->num_rx_queues;
1299 int txr_remaining = adapter->num_tx_queues;
1300 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1301 int err;
1302
1303 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1304 for (; rxr_remaining; v_idx++) {
1305 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1306 0, 0, 1, rxr_idx);
1307
1308 if (err)
1309 goto err_out;
1310
1311 /* update counts and index */
1312 rxr_remaining--;
1313 rxr_idx++;
1314 }
1315 }
1316
1317 for (; v_idx < q_vectors; v_idx++) {
1318 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1319 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1320
1321 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1322 tqpv, txr_idx, rqpv, rxr_idx);
1323
1324 if (err)
1325 goto err_out;
1326
1327 /* update counts and index */
1328 rxr_remaining -= rqpv;
1329 txr_remaining -= tqpv;
1330 rxr_idx++;
1331 txr_idx++;
1332 }
1333
1334 return 0;
1335
1336 err_out:
1337 adapter->num_tx_queues = 0;
1338 adapter->num_rx_queues = 0;
1339 adapter->num_q_vectors = 0;
1340
1341 while (v_idx--)
1342 igb_free_q_vector(adapter, v_idx);
1343
1344 return -ENOMEM;
1345 }
1346
1347 /**
1348 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1349 * @adapter: board private structure to initialize
1350 * @msix: boolean value of MSIX capability
1351 *
1352 * This function initializes the interrupts and allocates all of the queues.
1353 **/
igb_init_interrupt_scheme(struct igb_adapter * adapter,bool msix)1354 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1355 {
1356 struct pci_dev *pdev = adapter->pdev;
1357 int err;
1358
1359 igb_set_interrupt_capability(adapter, msix);
1360
1361 err = igb_alloc_q_vectors(adapter);
1362 if (err) {
1363 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1364 goto err_alloc_q_vectors;
1365 }
1366
1367 igb_cache_ring_register(adapter);
1368
1369 return 0;
1370
1371 err_alloc_q_vectors:
1372 igb_reset_interrupt_capability(adapter);
1373 return err;
1374 }
1375
1376 /**
1377 * igb_request_irq - initialize interrupts
1378 * @adapter: board private structure to initialize
1379 *
1380 * Attempts to configure interrupts using the best available
1381 * capabilities of the hardware and kernel.
1382 **/
igb_request_irq(struct igb_adapter * adapter)1383 static int igb_request_irq(struct igb_adapter *adapter)
1384 {
1385 struct net_device *netdev = adapter->netdev;
1386 struct pci_dev *pdev = adapter->pdev;
1387 int err = 0;
1388
1389 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1390 err = igb_request_msix(adapter);
1391 if (!err)
1392 goto request_done;
1393 /* fall back to MSI */
1394 igb_free_all_tx_resources(adapter);
1395 igb_free_all_rx_resources(adapter);
1396
1397 igb_clear_interrupt_scheme(adapter);
1398 err = igb_init_interrupt_scheme(adapter, false);
1399 if (err)
1400 goto request_done;
1401
1402 igb_setup_all_tx_resources(adapter);
1403 igb_setup_all_rx_resources(adapter);
1404 igb_configure(adapter);
1405 }
1406
1407 igb_assign_vector(adapter->q_vector[0], 0);
1408
1409 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1410 err = request_irq(pdev->irq, igb_intr_msi, 0,
1411 netdev->name, adapter);
1412 if (!err)
1413 goto request_done;
1414
1415 /* fall back to legacy interrupts */
1416 igb_reset_interrupt_capability(adapter);
1417 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1418 }
1419
1420 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1421 netdev->name, adapter);
1422
1423 if (err)
1424 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1425 err);
1426
1427 request_done:
1428 return err;
1429 }
1430
igb_free_irq(struct igb_adapter * adapter)1431 static void igb_free_irq(struct igb_adapter *adapter)
1432 {
1433 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1434 int vector = 0, i;
1435
1436 free_irq(adapter->msix_entries[vector++].vector, adapter);
1437
1438 for (i = 0; i < adapter->num_q_vectors; i++)
1439 free_irq(adapter->msix_entries[vector++].vector,
1440 adapter->q_vector[i]);
1441 } else {
1442 free_irq(adapter->pdev->irq, adapter);
1443 }
1444 }
1445
1446 /**
1447 * igb_irq_disable - Mask off interrupt generation on the NIC
1448 * @adapter: board private structure
1449 **/
igb_irq_disable(struct igb_adapter * adapter)1450 static void igb_irq_disable(struct igb_adapter *adapter)
1451 {
1452 struct e1000_hw *hw = &adapter->hw;
1453
1454 /* we need to be careful when disabling interrupts. The VFs are also
1455 * mapped into these registers and so clearing the bits can cause
1456 * issues on the VF drivers so we only need to clear what we set
1457 */
1458 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1459 u32 regval = rd32(E1000_EIAM);
1460
1461 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1462 wr32(E1000_EIMC, adapter->eims_enable_mask);
1463 regval = rd32(E1000_EIAC);
1464 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1465 }
1466
1467 wr32(E1000_IAM, 0);
1468 wr32(E1000_IMC, ~0);
1469 wrfl();
1470 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1471 int i;
1472
1473 for (i = 0; i < adapter->num_q_vectors; i++)
1474 synchronize_irq(adapter->msix_entries[i].vector);
1475 } else {
1476 synchronize_irq(adapter->pdev->irq);
1477 }
1478 }
1479
1480 /**
1481 * igb_irq_enable - Enable default interrupt generation settings
1482 * @adapter: board private structure
1483 **/
igb_irq_enable(struct igb_adapter * adapter)1484 static void igb_irq_enable(struct igb_adapter *adapter)
1485 {
1486 struct e1000_hw *hw = &adapter->hw;
1487
1488 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1489 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1490 u32 regval = rd32(E1000_EIAC);
1491
1492 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1493 regval = rd32(E1000_EIAM);
1494 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1495 wr32(E1000_EIMS, adapter->eims_enable_mask);
1496 if (adapter->vfs_allocated_count) {
1497 wr32(E1000_MBVFIMR, 0xFF);
1498 ims |= E1000_IMS_VMMB;
1499 }
1500 wr32(E1000_IMS, ims);
1501 } else {
1502 wr32(E1000_IMS, IMS_ENABLE_MASK |
1503 E1000_IMS_DRSTA);
1504 wr32(E1000_IAM, IMS_ENABLE_MASK |
1505 E1000_IMS_DRSTA);
1506 }
1507 }
1508
igb_update_mng_vlan(struct igb_adapter * adapter)1509 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1510 {
1511 struct e1000_hw *hw = &adapter->hw;
1512 u16 pf_id = adapter->vfs_allocated_count;
1513 u16 vid = adapter->hw.mng_cookie.vlan_id;
1514 u16 old_vid = adapter->mng_vlan_id;
1515
1516 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1517 /* add VID to filter table */
1518 igb_vfta_set(hw, vid, pf_id, true, true);
1519 adapter->mng_vlan_id = vid;
1520 } else {
1521 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1522 }
1523
1524 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1525 (vid != old_vid) &&
1526 !test_bit(old_vid, adapter->active_vlans)) {
1527 /* remove VID from filter table */
1528 igb_vfta_set(hw, vid, pf_id, false, true);
1529 }
1530 }
1531
1532 /**
1533 * igb_release_hw_control - release control of the h/w to f/w
1534 * @adapter: address of board private structure
1535 *
1536 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1537 * For ASF and Pass Through versions of f/w this means that the
1538 * driver is no longer loaded.
1539 **/
igb_release_hw_control(struct igb_adapter * adapter)1540 static void igb_release_hw_control(struct igb_adapter *adapter)
1541 {
1542 struct e1000_hw *hw = &adapter->hw;
1543 u32 ctrl_ext;
1544
1545 /* Let firmware take over control of h/w */
1546 ctrl_ext = rd32(E1000_CTRL_EXT);
1547 wr32(E1000_CTRL_EXT,
1548 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1549 }
1550
1551 /**
1552 * igb_get_hw_control - get control of the h/w from f/w
1553 * @adapter: address of board private structure
1554 *
1555 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1556 * For ASF and Pass Through versions of f/w this means that
1557 * the driver is loaded.
1558 **/
igb_get_hw_control(struct igb_adapter * adapter)1559 static void igb_get_hw_control(struct igb_adapter *adapter)
1560 {
1561 struct e1000_hw *hw = &adapter->hw;
1562 u32 ctrl_ext;
1563
1564 /* Let firmware know the driver has taken over */
1565 ctrl_ext = rd32(E1000_CTRL_EXT);
1566 wr32(E1000_CTRL_EXT,
1567 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1568 }
1569
enable_fqtss(struct igb_adapter * adapter,bool enable)1570 static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1571 {
1572 struct net_device *netdev = adapter->netdev;
1573 struct e1000_hw *hw = &adapter->hw;
1574
1575 WARN_ON(hw->mac.type != e1000_i210);
1576
1577 if (enable)
1578 adapter->flags |= IGB_FLAG_FQTSS;
1579 else
1580 adapter->flags &= ~IGB_FLAG_FQTSS;
1581
1582 if (netif_running(netdev))
1583 schedule_work(&adapter->reset_task);
1584 }
1585
is_fqtss_enabled(struct igb_adapter * adapter)1586 static bool is_fqtss_enabled(struct igb_adapter *adapter)
1587 {
1588 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1589 }
1590
set_tx_desc_fetch_prio(struct e1000_hw * hw,int queue,enum tx_queue_prio prio)1591 static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1592 enum tx_queue_prio prio)
1593 {
1594 u32 val;
1595
1596 WARN_ON(hw->mac.type != e1000_i210);
1597 WARN_ON(queue < 0 || queue > 4);
1598
1599 val = rd32(E1000_I210_TXDCTL(queue));
1600
1601 if (prio == TX_QUEUE_PRIO_HIGH)
1602 val |= E1000_TXDCTL_PRIORITY;
1603 else
1604 val &= ~E1000_TXDCTL_PRIORITY;
1605
1606 wr32(E1000_I210_TXDCTL(queue), val);
1607 }
1608
set_queue_mode(struct e1000_hw * hw,int queue,enum queue_mode mode)1609 static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1610 {
1611 u32 val;
1612
1613 WARN_ON(hw->mac.type != e1000_i210);
1614 WARN_ON(queue < 0 || queue > 1);
1615
1616 val = rd32(E1000_I210_TQAVCC(queue));
1617
1618 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1619 val |= E1000_TQAVCC_QUEUEMODE;
1620 else
1621 val &= ~E1000_TQAVCC_QUEUEMODE;
1622
1623 wr32(E1000_I210_TQAVCC(queue), val);
1624 }
1625
is_any_cbs_enabled(struct igb_adapter * adapter)1626 static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1627 {
1628 int i;
1629
1630 for (i = 0; i < adapter->num_tx_queues; i++) {
1631 if (adapter->tx_ring[i]->cbs_enable)
1632 return true;
1633 }
1634
1635 return false;
1636 }
1637
is_any_txtime_enabled(struct igb_adapter * adapter)1638 static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1639 {
1640 int i;
1641
1642 for (i = 0; i < adapter->num_tx_queues; i++) {
1643 if (adapter->tx_ring[i]->launchtime_enable)
1644 return true;
1645 }
1646
1647 return false;
1648 }
1649
1650 /**
1651 * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
1652 * @adapter: pointer to adapter struct
1653 * @queue: queue number
1654 *
1655 * Configure CBS and Launchtime for a given hardware queue.
1656 * Parameters are retrieved from the correct Tx ring, so
1657 * igb_save_cbs_params() and igb_save_txtime_params() should be used
1658 * for setting those correctly prior to this function being called.
1659 **/
igb_config_tx_modes(struct igb_adapter * adapter,int queue)1660 static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1661 {
1662 struct net_device *netdev = adapter->netdev;
1663 struct e1000_hw *hw = &adapter->hw;
1664 struct igb_ring *ring;
1665 u32 tqavcc, tqavctrl;
1666 u16 value;
1667
1668 WARN_ON(hw->mac.type != e1000_i210);
1669 WARN_ON(queue < 0 || queue > 1);
1670 ring = adapter->tx_ring[queue];
1671
1672 /* If any of the Qav features is enabled, configure queues as SR and
1673 * with HIGH PRIO. If none is, then configure them with LOW PRIO and
1674 * as SP.
1675 */
1676 if (ring->cbs_enable || ring->launchtime_enable) {
1677 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1678 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1679 } else {
1680 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1681 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1682 }
1683
1684 /* If CBS is enabled, set DataTranARB and config its parameters. */
1685 if (ring->cbs_enable || queue == 0) {
1686 /* i210 does not allow the queue 0 to be in the Strict
1687 * Priority mode while the Qav mode is enabled, so,
1688 * instead of disabling strict priority mode, we give
1689 * queue 0 the maximum of credits possible.
1690 *
1691 * See section 8.12.19 of the i210 datasheet, "Note:
1692 * Queue0 QueueMode must be set to 1b when
1693 * TransmitMode is set to Qav."
1694 */
1695 if (queue == 0 && !ring->cbs_enable) {
1696 /* max "linkspeed" idleslope in kbps */
1697 ring->idleslope = 1000000;
1698 ring->hicredit = ETH_FRAME_LEN;
1699 }
1700
1701 /* Always set data transfer arbitration to credit-based
1702 * shaper algorithm on TQAVCTRL if CBS is enabled for any of
1703 * the queues.
1704 */
1705 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1706 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1707 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1708
1709 /* According to i210 datasheet section 7.2.7.7, we should set
1710 * the 'idleSlope' field from TQAVCC register following the
1711 * equation:
1712 *
1713 * For 100 Mbps link speed:
1714 *
1715 * value = BW * 0x7735 * 0.2 (E1)
1716 *
1717 * For 1000Mbps link speed:
1718 *
1719 * value = BW * 0x7735 * 2 (E2)
1720 *
1721 * E1 and E2 can be merged into one equation as shown below.
1722 * Note that 'link-speed' is in Mbps.
1723 *
1724 * value = BW * 0x7735 * 2 * link-speed
1725 * -------------- (E3)
1726 * 1000
1727 *
1728 * 'BW' is the percentage bandwidth out of full link speed
1729 * which can be found with the following equation. Note that
1730 * idleSlope here is the parameter from this function which
1731 * is in kbps.
1732 *
1733 * BW = idleSlope
1734 * ----------------- (E4)
1735 * link-speed * 1000
1736 *
1737 * That said, we can come up with a generic equation to
1738 * calculate the value we should set it TQAVCC register by
1739 * replacing 'BW' in E3 by E4. The resulting equation is:
1740 *
1741 * value = idleSlope * 0x7735 * 2 * link-speed
1742 * ----------------- -------------- (E5)
1743 * link-speed * 1000 1000
1744 *
1745 * 'link-speed' is present in both sides of the fraction so
1746 * it is canceled out. The final equation is the following:
1747 *
1748 * value = idleSlope * 61034
1749 * ----------------- (E6)
1750 * 1000000
1751 *
1752 * NOTE: For i210, given the above, we can see that idleslope
1753 * is represented in 16.38431 kbps units by the value at
1754 * the TQAVCC register (1Gbps / 61034), which reduces
1755 * the granularity for idleslope increments.
1756 * For instance, if you want to configure a 2576kbps
1757 * idleslope, the value to be written on the register
1758 * would have to be 157.23. If rounded down, you end
1759 * up with less bandwidth available than originally
1760 * required (~2572 kbps). If rounded up, you end up
1761 * with a higher bandwidth (~2589 kbps). Below the
1762 * approach we take is to always round up the
1763 * calculated value, so the resulting bandwidth might
1764 * be slightly higher for some configurations.
1765 */
1766 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1767
1768 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1769 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1770 tqavcc |= value;
1771 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1772
1773 wr32(E1000_I210_TQAVHC(queue),
1774 0x80000000 + ring->hicredit * 0x7735);
1775 } else {
1776
1777 /* Set idleSlope to zero. */
1778 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1779 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1780 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1781
1782 /* Set hiCredit to zero. */
1783 wr32(E1000_I210_TQAVHC(queue), 0);
1784
1785 /* If CBS is not enabled for any queues anymore, then return to
1786 * the default state of Data Transmission Arbitration on
1787 * TQAVCTRL.
1788 */
1789 if (!is_any_cbs_enabled(adapter)) {
1790 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1791 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1792 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1793 }
1794 }
1795
1796 /* If LaunchTime is enabled, set DataTranTIM. */
1797 if (ring->launchtime_enable) {
1798 /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
1799 * for any of the SR queues, and configure fetchtime delta.
1800 * XXX NOTE:
1801 * - LaunchTime will be enabled for all SR queues.
1802 * - A fixed offset can be added relative to the launch
1803 * time of all packets if configured at reg LAUNCH_OS0.
1804 * We are keeping it as 0 for now (default value).
1805 */
1806 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1807 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1808 E1000_TQAVCTRL_FETCHTIME_DELTA;
1809 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1810 } else {
1811 /* If Launchtime is not enabled for any SR queues anymore,
1812 * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
1813 * effectively disabling Launchtime.
1814 */
1815 if (!is_any_txtime_enabled(adapter)) {
1816 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1817 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1818 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1819 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1820 }
1821 }
1822
1823 /* XXX: In i210 controller the sendSlope and loCredit parameters from
1824 * CBS are not configurable by software so we don't do any 'controller
1825 * configuration' in respect to these parameters.
1826 */
1827
1828 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1829 ring->cbs_enable ? "enabled" : "disabled",
1830 ring->launchtime_enable ? "enabled" : "disabled",
1831 queue,
1832 ring->idleslope, ring->sendslope,
1833 ring->hicredit, ring->locredit);
1834 }
1835
igb_save_txtime_params(struct igb_adapter * adapter,int queue,bool enable)1836 static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1837 bool enable)
1838 {
1839 struct igb_ring *ring;
1840
1841 if (queue < 0 || queue > adapter->num_tx_queues)
1842 return -EINVAL;
1843
1844 ring = adapter->tx_ring[queue];
1845 ring->launchtime_enable = enable;
1846
1847 return 0;
1848 }
1849
igb_save_cbs_params(struct igb_adapter * adapter,int queue,bool enable,int idleslope,int sendslope,int hicredit,int locredit)1850 static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1851 bool enable, int idleslope, int sendslope,
1852 int hicredit, int locredit)
1853 {
1854 struct igb_ring *ring;
1855
1856 if (queue < 0 || queue > adapter->num_tx_queues)
1857 return -EINVAL;
1858
1859 ring = adapter->tx_ring[queue];
1860
1861 ring->cbs_enable = enable;
1862 ring->idleslope = idleslope;
1863 ring->sendslope = sendslope;
1864 ring->hicredit = hicredit;
1865 ring->locredit = locredit;
1866
1867 return 0;
1868 }
1869
1870 /**
1871 * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
1872 * @adapter: pointer to adapter struct
1873 *
1874 * Configure TQAVCTRL register switching the controller's Tx mode
1875 * if FQTSS mode is enabled or disabled. Additionally, will issue
1876 * a call to igb_config_tx_modes() per queue so any previously saved
1877 * Tx parameters are applied.
1878 **/
igb_setup_tx_mode(struct igb_adapter * adapter)1879 static void igb_setup_tx_mode(struct igb_adapter *adapter)
1880 {
1881 struct net_device *netdev = adapter->netdev;
1882 struct e1000_hw *hw = &adapter->hw;
1883 u32 val;
1884
1885 /* Only i210 controller supports changing the transmission mode. */
1886 if (hw->mac.type != e1000_i210)
1887 return;
1888
1889 if (is_fqtss_enabled(adapter)) {
1890 int i, max_queue;
1891
1892 /* Configure TQAVCTRL register: set transmit mode to 'Qav',
1893 * set data fetch arbitration to 'round robin', set SP_WAIT_SR
1894 * so SP queues wait for SR ones.
1895 */
1896 val = rd32(E1000_I210_TQAVCTRL);
1897 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1898 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1899 wr32(E1000_I210_TQAVCTRL, val);
1900
1901 /* Configure Tx and Rx packet buffers sizes as described in
1902 * i210 datasheet section 7.2.7.7.
1903 */
1904 val = rd32(E1000_TXPBS);
1905 val &= ~I210_TXPBSIZE_MASK;
1906 val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB |
1907 I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB;
1908 wr32(E1000_TXPBS, val);
1909
1910 val = rd32(E1000_RXPBS);
1911 val &= ~I210_RXPBSIZE_MASK;
1912 val |= I210_RXPBSIZE_PB_30KB;
1913 wr32(E1000_RXPBS, val);
1914
1915 /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
1916 * register should not exceed the buffer size programmed in
1917 * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
1918 * so according to the datasheet we should set MAX_TPKT_SIZE to
1919 * 4kB / 64.
1920 *
1921 * However, when we do so, no frame from queue 2 and 3 are
1922 * transmitted. It seems the MAX_TPKT_SIZE should not be great
1923 * or _equal_ to the buffer size programmed in TXPBS. For this
1924 * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64.
1925 */
1926 val = (4096 - 1) / 64;
1927 wr32(E1000_I210_DTXMXPKTSZ, val);
1928
1929 /* Since FQTSS mode is enabled, apply any CBS configuration
1930 * previously set. If no previous CBS configuration has been
1931 * done, then the initial configuration is applied, which means
1932 * CBS is disabled.
1933 */
1934 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1935 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1936
1937 for (i = 0; i < max_queue; i++) {
1938 igb_config_tx_modes(adapter, i);
1939 }
1940 } else {
1941 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1942 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1943 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1944
1945 val = rd32(E1000_I210_TQAVCTRL);
1946 /* According to Section 8.12.21, the other flags we've set when
1947 * enabling FQTSS are not relevant when disabling FQTSS so we
1948 * don't set they here.
1949 */
1950 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1951 wr32(E1000_I210_TQAVCTRL, val);
1952 }
1953
1954 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1955 "enabled" : "disabled");
1956 }
1957
1958 /**
1959 * igb_configure - configure the hardware for RX and TX
1960 * @adapter: private board structure
1961 **/
igb_configure(struct igb_adapter * adapter)1962 static void igb_configure(struct igb_adapter *adapter)
1963 {
1964 struct net_device *netdev = adapter->netdev;
1965 int i;
1966
1967 igb_get_hw_control(adapter);
1968 igb_set_rx_mode(netdev);
1969 igb_setup_tx_mode(adapter);
1970
1971 igb_restore_vlan(adapter);
1972
1973 igb_setup_tctl(adapter);
1974 igb_setup_mrqc(adapter);
1975 igb_setup_rctl(adapter);
1976
1977 igb_nfc_filter_restore(adapter);
1978 igb_configure_tx(adapter);
1979 igb_configure_rx(adapter);
1980
1981 igb_rx_fifo_flush_82575(&adapter->hw);
1982
1983 /* call igb_desc_unused which always leaves
1984 * at least 1 descriptor unused to make sure
1985 * next_to_use != next_to_clean
1986 */
1987 for (i = 0; i < adapter->num_rx_queues; i++) {
1988 struct igb_ring *ring = adapter->rx_ring[i];
1989 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
1990 }
1991 }
1992
1993 /**
1994 * igb_power_up_link - Power up the phy/serdes link
1995 * @adapter: address of board private structure
1996 **/
igb_power_up_link(struct igb_adapter * adapter)1997 void igb_power_up_link(struct igb_adapter *adapter)
1998 {
1999 igb_reset_phy(&adapter->hw);
2000
2001 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2002 igb_power_up_phy_copper(&adapter->hw);
2003 else
2004 igb_power_up_serdes_link_82575(&adapter->hw);
2005
2006 igb_setup_link(&adapter->hw);
2007 }
2008
2009 /**
2010 * igb_power_down_link - Power down the phy/serdes link
2011 * @adapter: address of board private structure
2012 */
igb_power_down_link(struct igb_adapter * adapter)2013 static void igb_power_down_link(struct igb_adapter *adapter)
2014 {
2015 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2016 igb_power_down_phy_copper_82575(&adapter->hw);
2017 else
2018 igb_shutdown_serdes_link_82575(&adapter->hw);
2019 }
2020
2021 /**
2022 * igb_check_swap_media - Detect and switch function for Media Auto Sense
2023 * @adapter: address of the board private structure
2024 **/
igb_check_swap_media(struct igb_adapter * adapter)2025 static void igb_check_swap_media(struct igb_adapter *adapter)
2026 {
2027 struct e1000_hw *hw = &adapter->hw;
2028 u32 ctrl_ext, connsw;
2029 bool swap_now = false;
2030
2031 ctrl_ext = rd32(E1000_CTRL_EXT);
2032 connsw = rd32(E1000_CONNSW);
2033
2034 /* need to live swap if current media is copper and we have fiber/serdes
2035 * to go to.
2036 */
2037
2038 if ((hw->phy.media_type == e1000_media_type_copper) &&
2039 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2040 swap_now = true;
2041 } else if ((hw->phy.media_type != e1000_media_type_copper) &&
2042 !(connsw & E1000_CONNSW_SERDESD)) {
2043 /* copper signal takes time to appear */
2044 if (adapter->copper_tries < 4) {
2045 adapter->copper_tries++;
2046 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2047 wr32(E1000_CONNSW, connsw);
2048 return;
2049 } else {
2050 adapter->copper_tries = 0;
2051 if ((connsw & E1000_CONNSW_PHYSD) &&
2052 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2053 swap_now = true;
2054 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2055 wr32(E1000_CONNSW, connsw);
2056 }
2057 }
2058 }
2059
2060 if (!swap_now)
2061 return;
2062
2063 switch (hw->phy.media_type) {
2064 case e1000_media_type_copper:
2065 netdev_info(adapter->netdev,
2066 "MAS: changing media to fiber/serdes\n");
2067 ctrl_ext |=
2068 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2069 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2070 adapter->copper_tries = 0;
2071 break;
2072 case e1000_media_type_internal_serdes:
2073 case e1000_media_type_fiber:
2074 netdev_info(adapter->netdev,
2075 "MAS: changing media to copper\n");
2076 ctrl_ext &=
2077 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2078 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2079 break;
2080 default:
2081 /* shouldn't get here during regular operation */
2082 netdev_err(adapter->netdev,
2083 "AMS: Invalid media type found, returning\n");
2084 break;
2085 }
2086 wr32(E1000_CTRL_EXT, ctrl_ext);
2087 }
2088
2089 /**
2090 * igb_up - Open the interface and prepare it to handle traffic
2091 * @adapter: board private structure
2092 **/
igb_up(struct igb_adapter * adapter)2093 int igb_up(struct igb_adapter *adapter)
2094 {
2095 struct e1000_hw *hw = &adapter->hw;
2096 int i;
2097
2098 /* hardware has been reset, we need to reload some things */
2099 igb_configure(adapter);
2100
2101 clear_bit(__IGB_DOWN, &adapter->state);
2102
2103 for (i = 0; i < adapter->num_q_vectors; i++)
2104 napi_enable(&(adapter->q_vector[i]->napi));
2105
2106 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2107 igb_configure_msix(adapter);
2108 else
2109 igb_assign_vector(adapter->q_vector[0], 0);
2110
2111 /* Clear any pending interrupts. */
2112 rd32(E1000_TSICR);
2113 rd32(E1000_ICR);
2114 igb_irq_enable(adapter);
2115
2116 /* notify VFs that reset has been completed */
2117 if (adapter->vfs_allocated_count) {
2118 u32 reg_data = rd32(E1000_CTRL_EXT);
2119
2120 reg_data |= E1000_CTRL_EXT_PFRSTD;
2121 wr32(E1000_CTRL_EXT, reg_data);
2122 }
2123
2124 netif_tx_start_all_queues(adapter->netdev);
2125
2126 /* start the watchdog. */
2127 hw->mac.get_link_status = 1;
2128 schedule_work(&adapter->watchdog_task);
2129
2130 if ((adapter->flags & IGB_FLAG_EEE) &&
2131 (!hw->dev_spec._82575.eee_disable))
2132 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2133
2134 return 0;
2135 }
2136
igb_down(struct igb_adapter * adapter)2137 void igb_down(struct igb_adapter *adapter)
2138 {
2139 struct net_device *netdev = adapter->netdev;
2140 struct e1000_hw *hw = &adapter->hw;
2141 u32 tctl, rctl;
2142 int i;
2143
2144 /* signal that we're down so the interrupt handler does not
2145 * reschedule our watchdog timer
2146 */
2147 set_bit(__IGB_DOWN, &adapter->state);
2148
2149 /* disable receives in the hardware */
2150 rctl = rd32(E1000_RCTL);
2151 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2152 /* flush and sleep below */
2153
2154 igb_nfc_filter_exit(adapter);
2155
2156 netif_carrier_off(netdev);
2157 netif_tx_stop_all_queues(netdev);
2158
2159 /* disable transmits in the hardware */
2160 tctl = rd32(E1000_TCTL);
2161 tctl &= ~E1000_TCTL_EN;
2162 wr32(E1000_TCTL, tctl);
2163 /* flush both disables and wait for them to finish */
2164 wrfl();
2165 usleep_range(10000, 11000);
2166
2167 igb_irq_disable(adapter);
2168
2169 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2170
2171 for (i = 0; i < adapter->num_q_vectors; i++) {
2172 if (adapter->q_vector[i]) {
2173 napi_synchronize(&adapter->q_vector[i]->napi);
2174 napi_disable(&adapter->q_vector[i]->napi);
2175 }
2176 }
2177
2178 del_timer_sync(&adapter->watchdog_timer);
2179 del_timer_sync(&adapter->phy_info_timer);
2180
2181 /* record the stats before reset*/
2182 spin_lock(&adapter->stats64_lock);
2183 igb_update_stats(adapter);
2184 spin_unlock(&adapter->stats64_lock);
2185
2186 adapter->link_speed = 0;
2187 adapter->link_duplex = 0;
2188
2189 if (!pci_channel_offline(adapter->pdev))
2190 igb_reset(adapter);
2191
2192 /* clear VLAN promisc flag so VFTA will be updated if necessary */
2193 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2194
2195 igb_clean_all_tx_rings(adapter);
2196 igb_clean_all_rx_rings(adapter);
2197 #ifdef CONFIG_IGB_DCA
2198
2199 /* since we reset the hardware DCA settings were cleared */
2200 igb_setup_dca(adapter);
2201 #endif
2202 }
2203
igb_reinit_locked(struct igb_adapter * adapter)2204 void igb_reinit_locked(struct igb_adapter *adapter)
2205 {
2206 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2207 usleep_range(1000, 2000);
2208 igb_down(adapter);
2209 igb_up(adapter);
2210 clear_bit(__IGB_RESETTING, &adapter->state);
2211 }
2212
2213 /** igb_enable_mas - Media Autosense re-enable after swap
2214 *
2215 * @adapter: adapter struct
2216 **/
igb_enable_mas(struct igb_adapter * adapter)2217 static void igb_enable_mas(struct igb_adapter *adapter)
2218 {
2219 struct e1000_hw *hw = &adapter->hw;
2220 u32 connsw = rd32(E1000_CONNSW);
2221
2222 /* configure for SerDes media detect */
2223 if ((hw->phy.media_type == e1000_media_type_copper) &&
2224 (!(connsw & E1000_CONNSW_SERDESD))) {
2225 connsw |= E1000_CONNSW_ENRGSRC;
2226 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2227 wr32(E1000_CONNSW, connsw);
2228 wrfl();
2229 }
2230 }
2231
2232 #ifdef CONFIG_IGB_HWMON
2233 /**
2234 * igb_set_i2c_bb - Init I2C interface
2235 * @hw: pointer to hardware structure
2236 **/
igb_set_i2c_bb(struct e1000_hw * hw)2237 static void igb_set_i2c_bb(struct e1000_hw *hw)
2238 {
2239 u32 ctrl_ext;
2240 s32 i2cctl;
2241
2242 ctrl_ext = rd32(E1000_CTRL_EXT);
2243 ctrl_ext |= E1000_CTRL_I2C_ENA;
2244 wr32(E1000_CTRL_EXT, ctrl_ext);
2245 wrfl();
2246
2247 i2cctl = rd32(E1000_I2CPARAMS);
2248 i2cctl |= E1000_I2CBB_EN
2249 | E1000_I2C_CLK_OE_N
2250 | E1000_I2C_DATA_OE_N;
2251 wr32(E1000_I2CPARAMS, i2cctl);
2252 wrfl();
2253 }
2254 #endif
2255
igb_reset(struct igb_adapter * adapter)2256 void igb_reset(struct igb_adapter *adapter)
2257 {
2258 struct pci_dev *pdev = adapter->pdev;
2259 struct e1000_hw *hw = &adapter->hw;
2260 struct e1000_mac_info *mac = &hw->mac;
2261 struct e1000_fc_info *fc = &hw->fc;
2262 u32 pba, hwm;
2263
2264 /* Repartition Pba for greater than 9k mtu
2265 * To take effect CTRL.RST is required.
2266 */
2267 switch (mac->type) {
2268 case e1000_i350:
2269 case e1000_i354:
2270 case e1000_82580:
2271 pba = rd32(E1000_RXPBS);
2272 pba = igb_rxpbs_adjust_82580(pba);
2273 break;
2274 case e1000_82576:
2275 pba = rd32(E1000_RXPBS);
2276 pba &= E1000_RXPBS_SIZE_MASK_82576;
2277 break;
2278 case e1000_82575:
2279 case e1000_i210:
2280 case e1000_i211:
2281 default:
2282 pba = E1000_PBA_34K;
2283 break;
2284 }
2285
2286 if (mac->type == e1000_82575) {
2287 u32 min_rx_space, min_tx_space, needed_tx_space;
2288
2289 /* write Rx PBA so that hardware can report correct Tx PBA */
2290 wr32(E1000_PBA, pba);
2291
2292 /* To maintain wire speed transmits, the Tx FIFO should be
2293 * large enough to accommodate two full transmit packets,
2294 * rounded up to the next 1KB and expressed in KB. Likewise,
2295 * the Rx FIFO should be large enough to accommodate at least
2296 * one full receive packet and is similarly rounded up and
2297 * expressed in KB.
2298 */
2299 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2300
2301 /* The Tx FIFO also stores 16 bytes of information about the Tx
2302 * but don't include Ethernet FCS because hardware appends it.
2303 * We only need to round down to the nearest 512 byte block
2304 * count since the value we care about is 2 frames, not 1.
2305 */
2306 min_tx_space = adapter->max_frame_size;
2307 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2308 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2309
2310 /* upper 16 bits has Tx packet buffer allocation size in KB */
2311 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2312
2313 /* If current Tx allocation is less than the min Tx FIFO size,
2314 * and the min Tx FIFO size is less than the current Rx FIFO
2315 * allocation, take space away from current Rx allocation.
2316 */
2317 if (needed_tx_space < pba) {
2318 pba -= needed_tx_space;
2319
2320 /* if short on Rx space, Rx wins and must trump Tx
2321 * adjustment
2322 */
2323 if (pba < min_rx_space)
2324 pba = min_rx_space;
2325 }
2326
2327 /* adjust PBA for jumbo frames */
2328 wr32(E1000_PBA, pba);
2329 }
2330
2331 /* flow control settings
2332 * The high water mark must be low enough to fit one full frame
2333 * after transmitting the pause frame. As such we must have enough
2334 * space to allow for us to complete our current transmit and then
2335 * receive the frame that is in progress from the link partner.
2336 * Set it to:
2337 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
2338 */
2339 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2340
2341 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
2342 fc->low_water = fc->high_water - 16;
2343 fc->pause_time = 0xFFFF;
2344 fc->send_xon = 1;
2345 fc->current_mode = fc->requested_mode;
2346
2347 /* disable receive for all VFs and wait one second */
2348 if (adapter->vfs_allocated_count) {
2349 int i;
2350
2351 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2352 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2353
2354 /* ping all the active vfs to let them know we are going down */
2355 igb_ping_all_vfs(adapter);
2356
2357 /* disable transmits and receives */
2358 wr32(E1000_VFRE, 0);
2359 wr32(E1000_VFTE, 0);
2360 }
2361
2362 /* Allow time for pending master requests to run */
2363 hw->mac.ops.reset_hw(hw);
2364 wr32(E1000_WUC, 0);
2365
2366 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2367 /* need to resetup here after media swap */
2368 adapter->ei.get_invariants(hw);
2369 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2370 }
2371 if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2372 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2373 igb_enable_mas(adapter);
2374 }
2375 if (hw->mac.ops.init_hw(hw))
2376 dev_err(&pdev->dev, "Hardware Error\n");
2377
2378 /* RAR registers were cleared during init_hw, clear mac table */
2379 igb_flush_mac_table(adapter);
2380 __dev_uc_unsync(adapter->netdev, NULL);
2381
2382 /* Recover default RAR entry */
2383 igb_set_default_mac_filter(adapter);
2384
2385 /* Flow control settings reset on hardware reset, so guarantee flow
2386 * control is off when forcing speed.
2387 */
2388 if (!hw->mac.autoneg)
2389 igb_force_mac_fc(hw);
2390
2391 igb_init_dmac(adapter, pba);
2392 #ifdef CONFIG_IGB_HWMON
2393 /* Re-initialize the thermal sensor on i350 devices. */
2394 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2395 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2396 /* If present, re-initialize the external thermal sensor
2397 * interface.
2398 */
2399 if (adapter->ets)
2400 igb_set_i2c_bb(hw);
2401 mac->ops.init_thermal_sensor_thresh(hw);
2402 }
2403 }
2404 #endif
2405 /* Re-establish EEE setting */
2406 if (hw->phy.media_type == e1000_media_type_copper) {
2407 switch (mac->type) {
2408 case e1000_i350:
2409 case e1000_i210:
2410 case e1000_i211:
2411 igb_set_eee_i350(hw, true, true);
2412 break;
2413 case e1000_i354:
2414 igb_set_eee_i354(hw, true, true);
2415 break;
2416 default:
2417 break;
2418 }
2419 }
2420 if (!netif_running(adapter->netdev))
2421 igb_power_down_link(adapter);
2422
2423 igb_update_mng_vlan(adapter);
2424
2425 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2426 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2427
2428 /* Re-enable PTP, where applicable. */
2429 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2430 igb_ptp_reset(adapter);
2431
2432 igb_get_phy_info(hw);
2433 }
2434
igb_fix_features(struct net_device * netdev,netdev_features_t features)2435 static netdev_features_t igb_fix_features(struct net_device *netdev,
2436 netdev_features_t features)
2437 {
2438 /* Since there is no support for separate Rx/Tx vlan accel
2439 * enable/disable make sure Tx flag is always in same state as Rx.
2440 */
2441 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2442 features |= NETIF_F_HW_VLAN_CTAG_TX;
2443 else
2444 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2445
2446 return features;
2447 }
2448
igb_set_features(struct net_device * netdev,netdev_features_t features)2449 static int igb_set_features(struct net_device *netdev,
2450 netdev_features_t features)
2451 {
2452 netdev_features_t changed = netdev->features ^ features;
2453 struct igb_adapter *adapter = netdev_priv(netdev);
2454
2455 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2456 igb_vlan_mode(netdev, features);
2457
2458 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2459 return 0;
2460
2461 if (!(features & NETIF_F_NTUPLE)) {
2462 struct hlist_node *node2;
2463 struct igb_nfc_filter *rule;
2464
2465 spin_lock(&adapter->nfc_lock);
2466 hlist_for_each_entry_safe(rule, node2,
2467 &adapter->nfc_filter_list, nfc_node) {
2468 igb_erase_filter(adapter, rule);
2469 hlist_del(&rule->nfc_node);
2470 kfree(rule);
2471 }
2472 spin_unlock(&adapter->nfc_lock);
2473 adapter->nfc_filter_count = 0;
2474 }
2475
2476 netdev->features = features;
2477
2478 if (netif_running(netdev))
2479 igb_reinit_locked(adapter);
2480 else
2481 igb_reset(adapter);
2482
2483 return 1;
2484 }
2485
igb_ndo_fdb_add(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack * extack)2486 static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2487 struct net_device *dev,
2488 const unsigned char *addr, u16 vid,
2489 u16 flags,
2490 struct netlink_ext_ack *extack)
2491 {
2492 /* guarantee we can provide a unique filter for the unicast address */
2493 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2494 struct igb_adapter *adapter = netdev_priv(dev);
2495 int vfn = adapter->vfs_allocated_count;
2496
2497 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2498 return -ENOMEM;
2499 }
2500
2501 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2502 }
2503
2504 #define IGB_MAX_MAC_HDR_LEN 127
2505 #define IGB_MAX_NETWORK_HDR_LEN 511
2506
2507 static netdev_features_t
igb_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)2508 igb_features_check(struct sk_buff *skb, struct net_device *dev,
2509 netdev_features_t features)
2510 {
2511 unsigned int network_hdr_len, mac_hdr_len;
2512
2513 /* Make certain the headers can be described by a context descriptor */
2514 mac_hdr_len = skb_network_offset(skb);
2515 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2516 return features & ~(NETIF_F_HW_CSUM |
2517 NETIF_F_SCTP_CRC |
2518 NETIF_F_GSO_UDP_L4 |
2519 NETIF_F_HW_VLAN_CTAG_TX |
2520 NETIF_F_TSO |
2521 NETIF_F_TSO6);
2522
2523 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2524 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2525 return features & ~(NETIF_F_HW_CSUM |
2526 NETIF_F_SCTP_CRC |
2527 NETIF_F_GSO_UDP_L4 |
2528 NETIF_F_TSO |
2529 NETIF_F_TSO6);
2530
2531 /* We can only support IPV4 TSO in tunnels if we can mangle the
2532 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2533 */
2534 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2535 features &= ~NETIF_F_TSO;
2536
2537 return features;
2538 }
2539
igb_offload_apply(struct igb_adapter * adapter,s32 queue)2540 static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2541 {
2542 if (!is_fqtss_enabled(adapter)) {
2543 enable_fqtss(adapter, true);
2544 return;
2545 }
2546
2547 igb_config_tx_modes(adapter, queue);
2548
2549 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2550 enable_fqtss(adapter, false);
2551 }
2552
igb_offload_cbs(struct igb_adapter * adapter,struct tc_cbs_qopt_offload * qopt)2553 static int igb_offload_cbs(struct igb_adapter *adapter,
2554 struct tc_cbs_qopt_offload *qopt)
2555 {
2556 struct e1000_hw *hw = &adapter->hw;
2557 int err;
2558
2559 /* CBS offloading is only supported by i210 controller. */
2560 if (hw->mac.type != e1000_i210)
2561 return -EOPNOTSUPP;
2562
2563 /* CBS offloading is only supported by queue 0 and queue 1. */
2564 if (qopt->queue < 0 || qopt->queue > 1)
2565 return -EINVAL;
2566
2567 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2568 qopt->idleslope, qopt->sendslope,
2569 qopt->hicredit, qopt->locredit);
2570 if (err)
2571 return err;
2572
2573 igb_offload_apply(adapter, qopt->queue);
2574
2575 return 0;
2576 }
2577
2578 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2579 #define VLAN_PRIO_FULL_MASK (0x07)
2580
igb_parse_cls_flower(struct igb_adapter * adapter,struct flow_cls_offload * f,int traffic_class,struct igb_nfc_filter * input)2581 static int igb_parse_cls_flower(struct igb_adapter *adapter,
2582 struct flow_cls_offload *f,
2583 int traffic_class,
2584 struct igb_nfc_filter *input)
2585 {
2586 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2587 struct flow_dissector *dissector = rule->match.dissector;
2588 struct netlink_ext_ack *extack = f->common.extack;
2589
2590 if (dissector->used_keys &
2591 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
2592 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
2593 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2594 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
2595 NL_SET_ERR_MSG_MOD(extack,
2596 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2597 return -EOPNOTSUPP;
2598 }
2599
2600 if (flow_rule_match_has_control_flags(rule, extack))
2601 return -EOPNOTSUPP;
2602
2603 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2604 struct flow_match_eth_addrs match;
2605
2606 flow_rule_match_eth_addrs(rule, &match);
2607 if (!is_zero_ether_addr(match.mask->dst)) {
2608 if (!is_broadcast_ether_addr(match.mask->dst)) {
2609 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2610 return -EINVAL;
2611 }
2612
2613 input->filter.match_flags |=
2614 IGB_FILTER_FLAG_DST_MAC_ADDR;
2615 ether_addr_copy(input->filter.dst_addr, match.key->dst);
2616 }
2617
2618 if (!is_zero_ether_addr(match.mask->src)) {
2619 if (!is_broadcast_ether_addr(match.mask->src)) {
2620 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2621 return -EINVAL;
2622 }
2623
2624 input->filter.match_flags |=
2625 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2626 ether_addr_copy(input->filter.src_addr, match.key->src);
2627 }
2628 }
2629
2630 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2631 struct flow_match_basic match;
2632
2633 flow_rule_match_basic(rule, &match);
2634 if (match.mask->n_proto) {
2635 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2636 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2637 return -EINVAL;
2638 }
2639
2640 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2641 input->filter.etype = match.key->n_proto;
2642 }
2643 }
2644
2645 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2646 struct flow_match_vlan match;
2647
2648 flow_rule_match_vlan(rule, &match);
2649 if (match.mask->vlan_priority) {
2650 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2651 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2652 return -EINVAL;
2653 }
2654
2655 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2656 input->filter.vlan_tci =
2657 (__force __be16)match.key->vlan_priority;
2658 }
2659 }
2660
2661 input->action = traffic_class;
2662 input->cookie = f->cookie;
2663
2664 return 0;
2665 }
2666
igb_configure_clsflower(struct igb_adapter * adapter,struct flow_cls_offload * cls_flower)2667 static int igb_configure_clsflower(struct igb_adapter *adapter,
2668 struct flow_cls_offload *cls_flower)
2669 {
2670 struct netlink_ext_ack *extack = cls_flower->common.extack;
2671 struct igb_nfc_filter *filter, *f;
2672 int err, tc;
2673
2674 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2675 if (tc < 0) {
2676 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2677 return -EINVAL;
2678 }
2679
2680 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2681 if (!filter)
2682 return -ENOMEM;
2683
2684 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2685 if (err < 0)
2686 goto err_parse;
2687
2688 spin_lock(&adapter->nfc_lock);
2689
2690 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2691 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2692 err = -EEXIST;
2693 NL_SET_ERR_MSG_MOD(extack,
2694 "This filter is already set in ethtool");
2695 goto err_locked;
2696 }
2697 }
2698
2699 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2700 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2701 err = -EEXIST;
2702 NL_SET_ERR_MSG_MOD(extack,
2703 "This filter is already set in cls_flower");
2704 goto err_locked;
2705 }
2706 }
2707
2708 err = igb_add_filter(adapter, filter);
2709 if (err < 0) {
2710 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2711 goto err_locked;
2712 }
2713
2714 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2715
2716 spin_unlock(&adapter->nfc_lock);
2717
2718 return 0;
2719
2720 err_locked:
2721 spin_unlock(&adapter->nfc_lock);
2722
2723 err_parse:
2724 kfree(filter);
2725
2726 return err;
2727 }
2728
igb_delete_clsflower(struct igb_adapter * adapter,struct flow_cls_offload * cls_flower)2729 static int igb_delete_clsflower(struct igb_adapter *adapter,
2730 struct flow_cls_offload *cls_flower)
2731 {
2732 struct igb_nfc_filter *filter;
2733 int err;
2734
2735 spin_lock(&adapter->nfc_lock);
2736
2737 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2738 if (filter->cookie == cls_flower->cookie)
2739 break;
2740
2741 if (!filter) {
2742 err = -ENOENT;
2743 goto out;
2744 }
2745
2746 err = igb_erase_filter(adapter, filter);
2747 if (err < 0)
2748 goto out;
2749
2750 hlist_del(&filter->nfc_node);
2751 kfree(filter);
2752
2753 out:
2754 spin_unlock(&adapter->nfc_lock);
2755
2756 return err;
2757 }
2758
igb_setup_tc_cls_flower(struct igb_adapter * adapter,struct flow_cls_offload * cls_flower)2759 static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2760 struct flow_cls_offload *cls_flower)
2761 {
2762 switch (cls_flower->command) {
2763 case FLOW_CLS_REPLACE:
2764 return igb_configure_clsflower(adapter, cls_flower);
2765 case FLOW_CLS_DESTROY:
2766 return igb_delete_clsflower(adapter, cls_flower);
2767 case FLOW_CLS_STATS:
2768 return -EOPNOTSUPP;
2769 default:
2770 return -EOPNOTSUPP;
2771 }
2772 }
2773
igb_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)2774 static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2775 void *cb_priv)
2776 {
2777 struct igb_adapter *adapter = cb_priv;
2778
2779 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2780 return -EOPNOTSUPP;
2781
2782 switch (type) {
2783 case TC_SETUP_CLSFLOWER:
2784 return igb_setup_tc_cls_flower(adapter, type_data);
2785
2786 default:
2787 return -EOPNOTSUPP;
2788 }
2789 }
2790
igb_offload_txtime(struct igb_adapter * adapter,struct tc_etf_qopt_offload * qopt)2791 static int igb_offload_txtime(struct igb_adapter *adapter,
2792 struct tc_etf_qopt_offload *qopt)
2793 {
2794 struct e1000_hw *hw = &adapter->hw;
2795 int err;
2796
2797 /* Launchtime offloading is only supported by i210 controller. */
2798 if (hw->mac.type != e1000_i210)
2799 return -EOPNOTSUPP;
2800
2801 /* Launchtime offloading is only supported by queues 0 and 1. */
2802 if (qopt->queue < 0 || qopt->queue > 1)
2803 return -EINVAL;
2804
2805 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2806 if (err)
2807 return err;
2808
2809 igb_offload_apply(adapter, qopt->queue);
2810
2811 return 0;
2812 }
2813
igb_tc_query_caps(struct igb_adapter * adapter,struct tc_query_caps_base * base)2814 static int igb_tc_query_caps(struct igb_adapter *adapter,
2815 struct tc_query_caps_base *base)
2816 {
2817 switch (base->type) {
2818 case TC_SETUP_QDISC_TAPRIO: {
2819 struct tc_taprio_caps *caps = base->caps;
2820
2821 caps->broken_mqprio = true;
2822
2823 return 0;
2824 }
2825 default:
2826 return -EOPNOTSUPP;
2827 }
2828 }
2829
2830 static LIST_HEAD(igb_block_cb_list);
2831
igb_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)2832 static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2833 void *type_data)
2834 {
2835 struct igb_adapter *adapter = netdev_priv(dev);
2836
2837 switch (type) {
2838 case TC_QUERY_CAPS:
2839 return igb_tc_query_caps(adapter, type_data);
2840 case TC_SETUP_QDISC_CBS:
2841 return igb_offload_cbs(adapter, type_data);
2842 case TC_SETUP_BLOCK:
2843 return flow_block_cb_setup_simple(type_data,
2844 &igb_block_cb_list,
2845 igb_setup_tc_block_cb,
2846 adapter, adapter, true);
2847
2848 case TC_SETUP_QDISC_ETF:
2849 return igb_offload_txtime(adapter, type_data);
2850
2851 default:
2852 return -EOPNOTSUPP;
2853 }
2854 }
2855
igb_xdp_setup(struct net_device * dev,struct netdev_bpf * bpf)2856 static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
2857 {
2858 int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
2859 struct igb_adapter *adapter = netdev_priv(dev);
2860 struct bpf_prog *prog = bpf->prog, *old_prog;
2861 bool running = netif_running(dev);
2862 bool need_reset;
2863
2864 /* verify igb ring attributes are sufficient for XDP */
2865 for (i = 0; i < adapter->num_rx_queues; i++) {
2866 struct igb_ring *ring = adapter->rx_ring[i];
2867
2868 if (frame_size > igb_rx_bufsz(ring)) {
2869 NL_SET_ERR_MSG_MOD(bpf->extack,
2870 "The RX buffer size is too small for the frame size");
2871 netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
2872 igb_rx_bufsz(ring), frame_size);
2873 return -EINVAL;
2874 }
2875 }
2876
2877 old_prog = xchg(&adapter->xdp_prog, prog);
2878 need_reset = (!!prog != !!old_prog);
2879
2880 /* device is up and bpf is added/removed, must setup the RX queues */
2881 if (need_reset && running) {
2882 igb_close(dev);
2883 } else {
2884 for (i = 0; i < adapter->num_rx_queues; i++)
2885 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
2886 adapter->xdp_prog);
2887 }
2888
2889 if (old_prog)
2890 bpf_prog_put(old_prog);
2891
2892 /* bpf is just replaced, RXQ and MTU are already setup */
2893 if (!need_reset) {
2894 return 0;
2895 } else {
2896 if (prog)
2897 xdp_features_set_redirect_target(dev, true);
2898 else
2899 xdp_features_clear_redirect_target(dev);
2900 }
2901
2902 if (running)
2903 igb_open(dev);
2904
2905 return 0;
2906 }
2907
igb_xdp(struct net_device * dev,struct netdev_bpf * xdp)2908 static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2909 {
2910 switch (xdp->command) {
2911 case XDP_SETUP_PROG:
2912 return igb_xdp_setup(dev, xdp);
2913 default:
2914 return -EINVAL;
2915 }
2916 }
2917
igb_xdp_ring_update_tail(struct igb_ring * ring)2918 static void igb_xdp_ring_update_tail(struct igb_ring *ring)
2919 {
2920 /* Force memory writes to complete before letting h/w know there
2921 * are new descriptors to fetch.
2922 */
2923 wmb();
2924 writel(ring->next_to_use, ring->tail);
2925 }
2926
igb_xdp_tx_queue_mapping(struct igb_adapter * adapter)2927 static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
2928 {
2929 unsigned int r_idx = smp_processor_id();
2930
2931 if (r_idx >= adapter->num_tx_queues)
2932 r_idx = r_idx % adapter->num_tx_queues;
2933
2934 return adapter->tx_ring[r_idx];
2935 }
2936
igb_xdp_xmit_back(struct igb_adapter * adapter,struct xdp_buff * xdp)2937 static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
2938 {
2939 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2940 int cpu = smp_processor_id();
2941 struct igb_ring *tx_ring;
2942 struct netdev_queue *nq;
2943 u32 ret;
2944
2945 if (unlikely(!xdpf))
2946 return IGB_XDP_CONSUMED;
2947
2948 /* During program transitions its possible adapter->xdp_prog is assigned
2949 * but ring has not been configured yet. In this case simply abort xmit.
2950 */
2951 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2952 if (unlikely(!tx_ring))
2953 return IGB_XDP_CONSUMED;
2954
2955 nq = txring_txq(tx_ring);
2956 __netif_tx_lock(nq, cpu);
2957 /* Avoid transmit queue timeout since we share it with the slow path */
2958 txq_trans_cond_update(nq);
2959 ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2960 __netif_tx_unlock(nq);
2961
2962 return ret;
2963 }
2964
igb_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)2965 static int igb_xdp_xmit(struct net_device *dev, int n,
2966 struct xdp_frame **frames, u32 flags)
2967 {
2968 struct igb_adapter *adapter = netdev_priv(dev);
2969 int cpu = smp_processor_id();
2970 struct igb_ring *tx_ring;
2971 struct netdev_queue *nq;
2972 int nxmit = 0;
2973 int i;
2974
2975 if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
2976 return -ENETDOWN;
2977
2978 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2979 return -EINVAL;
2980
2981 /* During program transitions its possible adapter->xdp_prog is assigned
2982 * but ring has not been configured yet. In this case simply abort xmit.
2983 */
2984 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2985 if (unlikely(!tx_ring))
2986 return -ENXIO;
2987
2988 nq = txring_txq(tx_ring);
2989 __netif_tx_lock(nq, cpu);
2990
2991 /* Avoid transmit queue timeout since we share it with the slow path */
2992 txq_trans_cond_update(nq);
2993
2994 for (i = 0; i < n; i++) {
2995 struct xdp_frame *xdpf = frames[i];
2996 int err;
2997
2998 err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2999 if (err != IGB_XDP_TX)
3000 break;
3001 nxmit++;
3002 }
3003
3004 __netif_tx_unlock(nq);
3005
3006 if (unlikely(flags & XDP_XMIT_FLUSH))
3007 igb_xdp_ring_update_tail(tx_ring);
3008
3009 return nxmit;
3010 }
3011
3012 static const struct net_device_ops igb_netdev_ops = {
3013 .ndo_open = igb_open,
3014 .ndo_stop = igb_close,
3015 .ndo_start_xmit = igb_xmit_frame,
3016 .ndo_get_stats64 = igb_get_stats64,
3017 .ndo_set_rx_mode = igb_set_rx_mode,
3018 .ndo_set_mac_address = igb_set_mac,
3019 .ndo_change_mtu = igb_change_mtu,
3020 .ndo_eth_ioctl = igb_ioctl,
3021 .ndo_tx_timeout = igb_tx_timeout,
3022 .ndo_validate_addr = eth_validate_addr,
3023 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
3024 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
3025 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
3026 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
3027 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
3028 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
3029 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
3030 .ndo_get_vf_config = igb_ndo_get_vf_config,
3031 .ndo_fix_features = igb_fix_features,
3032 .ndo_set_features = igb_set_features,
3033 .ndo_fdb_add = igb_ndo_fdb_add,
3034 .ndo_features_check = igb_features_check,
3035 .ndo_setup_tc = igb_setup_tc,
3036 .ndo_bpf = igb_xdp,
3037 .ndo_xdp_xmit = igb_xdp_xmit,
3038 };
3039
3040 /**
3041 * igb_set_fw_version - Configure version string for ethtool
3042 * @adapter: adapter struct
3043 **/
igb_set_fw_version(struct igb_adapter * adapter)3044 void igb_set_fw_version(struct igb_adapter *adapter)
3045 {
3046 struct e1000_hw *hw = &adapter->hw;
3047 struct e1000_fw_version fw;
3048
3049 igb_get_fw_version(hw, &fw);
3050
3051 switch (hw->mac.type) {
3052 case e1000_i210:
3053 case e1000_i211:
3054 if (!(igb_get_flash_presence_i210(hw))) {
3055 snprintf(adapter->fw_version,
3056 sizeof(adapter->fw_version),
3057 "%2d.%2d-%d",
3058 fw.invm_major, fw.invm_minor,
3059 fw.invm_img_type);
3060 break;
3061 }
3062 fallthrough;
3063 default:
3064 /* if option rom is valid, display its version too */
3065 if (fw.or_valid) {
3066 snprintf(adapter->fw_version,
3067 sizeof(adapter->fw_version),
3068 "%d.%d, 0x%08x, %d.%d.%d",
3069 fw.eep_major, fw.eep_minor, fw.etrack_id,
3070 fw.or_major, fw.or_build, fw.or_patch);
3071 /* no option rom */
3072 } else if (fw.etrack_id != 0X0000) {
3073 snprintf(adapter->fw_version,
3074 sizeof(adapter->fw_version),
3075 "%d.%d, 0x%08x",
3076 fw.eep_major, fw.eep_minor, fw.etrack_id);
3077 } else {
3078 snprintf(adapter->fw_version,
3079 sizeof(adapter->fw_version),
3080 "%d.%d.%d",
3081 fw.eep_major, fw.eep_minor, fw.eep_build);
3082 }
3083 break;
3084 }
3085 }
3086
3087 /**
3088 * igb_init_mas - init Media Autosense feature if enabled in the NVM
3089 *
3090 * @adapter: adapter struct
3091 **/
igb_init_mas(struct igb_adapter * adapter)3092 static void igb_init_mas(struct igb_adapter *adapter)
3093 {
3094 struct e1000_hw *hw = &adapter->hw;
3095 u16 eeprom_data;
3096
3097 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
3098 switch (hw->bus.func) {
3099 case E1000_FUNC_0:
3100 if (eeprom_data & IGB_MAS_ENABLE_0) {
3101 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3102 netdev_info(adapter->netdev,
3103 "MAS: Enabling Media Autosense for port %d\n",
3104 hw->bus.func);
3105 }
3106 break;
3107 case E1000_FUNC_1:
3108 if (eeprom_data & IGB_MAS_ENABLE_1) {
3109 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3110 netdev_info(adapter->netdev,
3111 "MAS: Enabling Media Autosense for port %d\n",
3112 hw->bus.func);
3113 }
3114 break;
3115 case E1000_FUNC_2:
3116 if (eeprom_data & IGB_MAS_ENABLE_2) {
3117 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3118 netdev_info(adapter->netdev,
3119 "MAS: Enabling Media Autosense for port %d\n",
3120 hw->bus.func);
3121 }
3122 break;
3123 case E1000_FUNC_3:
3124 if (eeprom_data & IGB_MAS_ENABLE_3) {
3125 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3126 netdev_info(adapter->netdev,
3127 "MAS: Enabling Media Autosense for port %d\n",
3128 hw->bus.func);
3129 }
3130 break;
3131 default:
3132 /* Shouldn't get here */
3133 netdev_err(adapter->netdev,
3134 "MAS: Invalid port configuration, returning\n");
3135 break;
3136 }
3137 }
3138
3139 /**
3140 * igb_init_i2c - Init I2C interface
3141 * @adapter: pointer to adapter structure
3142 **/
igb_init_i2c(struct igb_adapter * adapter)3143 static s32 igb_init_i2c(struct igb_adapter *adapter)
3144 {
3145 s32 status = 0;
3146
3147 /* I2C interface supported on i350 devices */
3148 if (adapter->hw.mac.type != e1000_i350)
3149 return 0;
3150
3151 /* Initialize the i2c bus which is controlled by the registers.
3152 * This bus will use the i2c_algo_bit structure that implements
3153 * the protocol through toggling of the 4 bits in the register.
3154 */
3155 adapter->i2c_adap.owner = THIS_MODULE;
3156 adapter->i2c_algo = igb_i2c_algo;
3157 adapter->i2c_algo.data = adapter;
3158 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
3159 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
3160 strscpy(adapter->i2c_adap.name, "igb BB",
3161 sizeof(adapter->i2c_adap.name));
3162 status = i2c_bit_add_bus(&adapter->i2c_adap);
3163 return status;
3164 }
3165
3166 /**
3167 * igb_probe - Device Initialization Routine
3168 * @pdev: PCI device information struct
3169 * @ent: entry in igb_pci_tbl
3170 *
3171 * Returns 0 on success, negative on failure
3172 *
3173 * igb_probe initializes an adapter identified by a pci_dev structure.
3174 * The OS initialization, configuring of the adapter private structure,
3175 * and a hardware reset occur.
3176 **/
igb_probe(struct pci_dev * pdev,const struct pci_device_id * ent)3177 static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3178 {
3179 struct net_device *netdev;
3180 struct igb_adapter *adapter;
3181 struct e1000_hw *hw;
3182 u16 eeprom_data = 0;
3183 s32 ret_val;
3184 static int global_quad_port_a; /* global quad port a indication */
3185 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3186 u8 part_str[E1000_PBANUM_LENGTH];
3187 int err;
3188
3189 /* Catch broken hardware that put the wrong VF device ID in
3190 * the PCIe SR-IOV capability.
3191 */
3192 if (pdev->is_virtfn) {
3193 WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n",
3194 pci_name(pdev), pdev->vendor, pdev->device);
3195 return -EINVAL;
3196 }
3197
3198 err = pci_enable_device_mem(pdev);
3199 if (err)
3200 return err;
3201
3202 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3203 if (err) {
3204 dev_err(&pdev->dev,
3205 "No usable DMA configuration, aborting\n");
3206 goto err_dma;
3207 }
3208
3209 err = pci_request_mem_regions(pdev, igb_driver_name);
3210 if (err)
3211 goto err_pci_reg;
3212
3213 pci_set_master(pdev);
3214 pci_save_state(pdev);
3215
3216 err = -ENOMEM;
3217 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3218 IGB_MAX_TX_QUEUES);
3219 if (!netdev)
3220 goto err_alloc_etherdev;
3221
3222 SET_NETDEV_DEV(netdev, &pdev->dev);
3223
3224 pci_set_drvdata(pdev, netdev);
3225 adapter = netdev_priv(netdev);
3226 adapter->netdev = netdev;
3227 adapter->pdev = pdev;
3228 hw = &adapter->hw;
3229 hw->back = adapter;
3230 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3231
3232 err = -EIO;
3233 adapter->io_addr = pci_iomap(pdev, 0, 0);
3234 if (!adapter->io_addr)
3235 goto err_ioremap;
3236 /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
3237 hw->hw_addr = adapter->io_addr;
3238
3239 netdev->netdev_ops = &igb_netdev_ops;
3240 igb_set_ethtool_ops(netdev);
3241 netdev->watchdog_timeo = 5 * HZ;
3242
3243 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
3244
3245 netdev->mem_start = pci_resource_start(pdev, 0);
3246 netdev->mem_end = pci_resource_end(pdev, 0);
3247
3248 /* PCI config space info */
3249 hw->vendor_id = pdev->vendor;
3250 hw->device_id = pdev->device;
3251 hw->revision_id = pdev->revision;
3252 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3253 hw->subsystem_device_id = pdev->subsystem_device;
3254
3255 /* Copy the default MAC, PHY and NVM function pointers */
3256 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3257 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3258 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3259 /* Initialize skew-specific constants */
3260 err = ei->get_invariants(hw);
3261 if (err)
3262 goto err_sw_init;
3263
3264 /* setup the private structure */
3265 err = igb_sw_init(adapter);
3266 if (err)
3267 goto err_sw_init;
3268
3269 igb_get_bus_info_pcie(hw);
3270
3271 hw->phy.autoneg_wait_to_complete = false;
3272
3273 /* Copper options */
3274 if (hw->phy.media_type == e1000_media_type_copper) {
3275 hw->phy.mdix = AUTO_ALL_MODES;
3276 hw->phy.disable_polarity_correction = false;
3277 hw->phy.ms_type = e1000_ms_hw_default;
3278 }
3279
3280 if (igb_check_reset_block(hw))
3281 dev_info(&pdev->dev,
3282 "PHY reset is blocked due to SOL/IDER session.\n");
3283
3284 /* features is initialized to 0 in allocation, it might have bits
3285 * set by igb_sw_init so we should use an or instead of an
3286 * assignment.
3287 */
3288 netdev->features |= NETIF_F_SG |
3289 NETIF_F_TSO |
3290 NETIF_F_TSO6 |
3291 NETIF_F_RXHASH |
3292 NETIF_F_RXCSUM |
3293 NETIF_F_HW_CSUM;
3294
3295 if (hw->mac.type >= e1000_82576)
3296 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
3297
3298 if (hw->mac.type >= e1000_i350)
3299 netdev->features |= NETIF_F_HW_TC;
3300
3301 #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3302 NETIF_F_GSO_GRE_CSUM | \
3303 NETIF_F_GSO_IPXIP4 | \
3304 NETIF_F_GSO_IPXIP6 | \
3305 NETIF_F_GSO_UDP_TUNNEL | \
3306 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3307
3308 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3309 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3310
3311 /* copy netdev features into list of user selectable features */
3312 netdev->hw_features |= netdev->features |
3313 NETIF_F_HW_VLAN_CTAG_RX |
3314 NETIF_F_HW_VLAN_CTAG_TX |
3315 NETIF_F_RXALL;
3316
3317 if (hw->mac.type >= e1000_i350)
3318 netdev->hw_features |= NETIF_F_NTUPLE;
3319
3320 netdev->features |= NETIF_F_HIGHDMA;
3321
3322 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3323 netdev->mpls_features |= NETIF_F_HW_CSUM;
3324 netdev->hw_enc_features |= netdev->vlan_features;
3325
3326 /* set this bit last since it cannot be part of vlan_features */
3327 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3328 NETIF_F_HW_VLAN_CTAG_RX |
3329 NETIF_F_HW_VLAN_CTAG_TX;
3330
3331 netdev->priv_flags |= IFF_SUPP_NOFCS;
3332
3333 netdev->priv_flags |= IFF_UNICAST_FLT;
3334 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
3335
3336 /* MTU range: 68 - 9216 */
3337 netdev->min_mtu = ETH_MIN_MTU;
3338 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3339
3340 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3341
3342 /* before reading the NVM, reset the controller to put the device in a
3343 * known good starting state
3344 */
3345 hw->mac.ops.reset_hw(hw);
3346
3347 /* make sure the NVM is good , i211/i210 parts can have special NVM
3348 * that doesn't contain a checksum
3349 */
3350 switch (hw->mac.type) {
3351 case e1000_i210:
3352 case e1000_i211:
3353 if (igb_get_flash_presence_i210(hw)) {
3354 if (hw->nvm.ops.validate(hw) < 0) {
3355 dev_err(&pdev->dev,
3356 "The NVM Checksum Is Not Valid\n");
3357 err = -EIO;
3358 goto err_eeprom;
3359 }
3360 }
3361 break;
3362 default:
3363 if (hw->nvm.ops.validate(hw) < 0) {
3364 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3365 err = -EIO;
3366 goto err_eeprom;
3367 }
3368 break;
3369 }
3370
3371 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3372 /* copy the MAC address out of the NVM */
3373 if (hw->mac.ops.read_mac_addr(hw))
3374 dev_err(&pdev->dev, "NVM Read Error\n");
3375 }
3376
3377 eth_hw_addr_set(netdev, hw->mac.addr);
3378
3379 if (!is_valid_ether_addr(netdev->dev_addr)) {
3380 dev_err(&pdev->dev, "Invalid MAC Address\n");
3381 err = -EIO;
3382 goto err_eeprom;
3383 }
3384
3385 igb_set_default_mac_filter(adapter);
3386
3387 /* get firmware version for ethtool -i */
3388 igb_set_fw_version(adapter);
3389
3390 /* configure RXPBSIZE and TXPBSIZE */
3391 if (hw->mac.type == e1000_i210) {
3392 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3393 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3394 }
3395
3396 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3397 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3398
3399 INIT_WORK(&adapter->reset_task, igb_reset_task);
3400 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3401
3402 /* Initialize link properties that are user-changeable */
3403 adapter->fc_autoneg = true;
3404 hw->mac.autoneg = true;
3405 hw->phy.autoneg_advertised = 0x2f;
3406
3407 hw->fc.requested_mode = e1000_fc_default;
3408 hw->fc.current_mode = e1000_fc_default;
3409
3410 igb_validate_mdi_setting(hw);
3411
3412 /* By default, support wake on port A */
3413 if (hw->bus.func == 0)
3414 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3415
3416 /* Check the NVM for wake support on non-port A ports */
3417 if (hw->mac.type >= e1000_82580)
3418 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3419 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3420 &eeprom_data);
3421 else if (hw->bus.func == 1)
3422 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3423
3424 if (eeprom_data & IGB_EEPROM_APME)
3425 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3426
3427 /* now that we have the eeprom settings, apply the special cases where
3428 * the eeprom may be wrong or the board simply won't support wake on
3429 * lan on a particular port
3430 */
3431 switch (pdev->device) {
3432 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3433 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3434 break;
3435 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3436 case E1000_DEV_ID_82576_FIBER:
3437 case E1000_DEV_ID_82576_SERDES:
3438 /* Wake events only supported on port A for dual fiber
3439 * regardless of eeprom setting
3440 */
3441 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3442 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3443 break;
3444 case E1000_DEV_ID_82576_QUAD_COPPER:
3445 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3446 /* if quad port adapter, disable WoL on all but port A */
3447 if (global_quad_port_a != 0)
3448 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3449 else
3450 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3451 /* Reset for multiple quad port adapters */
3452 if (++global_quad_port_a == 4)
3453 global_quad_port_a = 0;
3454 break;
3455 default:
3456 /* If the device can't wake, don't set software support */
3457 if (!device_can_wakeup(&adapter->pdev->dev))
3458 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3459 }
3460
3461 /* initialize the wol settings based on the eeprom settings */
3462 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3463 adapter->wol |= E1000_WUFC_MAG;
3464
3465 /* Some vendors want WoL disabled by default, but still supported */
3466 if ((hw->mac.type == e1000_i350) &&
3467 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3468 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3469 adapter->wol = 0;
3470 }
3471
3472 /* Some vendors want the ability to Use the EEPROM setting as
3473 * enable/disable only, and not for capability
3474 */
3475 if (((hw->mac.type == e1000_i350) ||
3476 (hw->mac.type == e1000_i354)) &&
3477 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3478 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3479 adapter->wol = 0;
3480 }
3481 if (hw->mac.type == e1000_i350) {
3482 if (((pdev->subsystem_device == 0x5001) ||
3483 (pdev->subsystem_device == 0x5002)) &&
3484 (hw->bus.func == 0)) {
3485 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3486 adapter->wol = 0;
3487 }
3488 if (pdev->subsystem_device == 0x1F52)
3489 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3490 }
3491
3492 device_set_wakeup_enable(&adapter->pdev->dev,
3493 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3494
3495 /* reset the hardware with the new settings */
3496 igb_reset(adapter);
3497
3498 /* Init the I2C interface */
3499 err = igb_init_i2c(adapter);
3500 if (err) {
3501 dev_err(&pdev->dev, "failed to init i2c interface\n");
3502 goto err_eeprom;
3503 }
3504
3505 /* let the f/w know that the h/w is now under the control of the
3506 * driver.
3507 */
3508 igb_get_hw_control(adapter);
3509
3510 strcpy(netdev->name, "eth%d");
3511 err = register_netdev(netdev);
3512 if (err)
3513 goto err_register;
3514
3515 /* carrier off reporting is important to ethtool even BEFORE open */
3516 netif_carrier_off(netdev);
3517
3518 #ifdef CONFIG_IGB_DCA
3519 if (dca_add_requester(&pdev->dev) == 0) {
3520 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3521 dev_info(&pdev->dev, "DCA enabled\n");
3522 igb_setup_dca(adapter);
3523 }
3524
3525 #endif
3526 #ifdef CONFIG_IGB_HWMON
3527 /* Initialize the thermal sensor on i350 devices. */
3528 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3529 u16 ets_word;
3530
3531 /* Read the NVM to determine if this i350 device supports an
3532 * external thermal sensor.
3533 */
3534 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3535 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3536 adapter->ets = true;
3537 else
3538 adapter->ets = false;
3539 /* Only enable I2C bit banging if an external thermal
3540 * sensor is supported.
3541 */
3542 if (adapter->ets)
3543 igb_set_i2c_bb(hw);
3544 hw->mac.ops.init_thermal_sensor_thresh(hw);
3545 if (igb_sysfs_init(adapter))
3546 dev_err(&pdev->dev,
3547 "failed to allocate sysfs resources\n");
3548 } else {
3549 adapter->ets = false;
3550 }
3551 #endif
3552 /* Check if Media Autosense is enabled */
3553 adapter->ei = *ei;
3554 if (hw->dev_spec._82575.mas_capable)
3555 igb_init_mas(adapter);
3556
3557 /* do hw tstamp init after resetting */
3558 igb_ptp_init(adapter);
3559
3560 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3561 /* print bus type/speed/width info, not applicable to i354 */
3562 if (hw->mac.type != e1000_i354) {
3563 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3564 netdev->name,
3565 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3566 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3567 "unknown"),
3568 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3569 "Width x4" :
3570 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3571 "Width x2" :
3572 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3573 "Width x1" : "unknown"), netdev->dev_addr);
3574 }
3575
3576 if ((hw->mac.type == e1000_82576 &&
3577 rd32(E1000_EECD) & E1000_EECD_PRES) ||
3578 (hw->mac.type >= e1000_i210 ||
3579 igb_get_flash_presence_i210(hw))) {
3580 ret_val = igb_read_part_string(hw, part_str,
3581 E1000_PBANUM_LENGTH);
3582 } else {
3583 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3584 }
3585
3586 if (ret_val)
3587 strcpy(part_str, "Unknown");
3588 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3589 dev_info(&pdev->dev,
3590 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3591 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3592 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3593 adapter->num_rx_queues, adapter->num_tx_queues);
3594 if (hw->phy.media_type == e1000_media_type_copper) {
3595 switch (hw->mac.type) {
3596 case e1000_i350:
3597 case e1000_i210:
3598 case e1000_i211:
3599 /* Enable EEE for internal copper PHY devices */
3600 err = igb_set_eee_i350(hw, true, true);
3601 if ((!err) &&
3602 (!hw->dev_spec._82575.eee_disable)) {
3603 adapter->eee_advert =
3604 MDIO_EEE_100TX | MDIO_EEE_1000T;
3605 adapter->flags |= IGB_FLAG_EEE;
3606 }
3607 break;
3608 case e1000_i354:
3609 if ((rd32(E1000_CTRL_EXT) &
3610 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3611 err = igb_set_eee_i354(hw, true, true);
3612 if ((!err) &&
3613 (!hw->dev_spec._82575.eee_disable)) {
3614 adapter->eee_advert =
3615 MDIO_EEE_100TX | MDIO_EEE_1000T;
3616 adapter->flags |= IGB_FLAG_EEE;
3617 }
3618 }
3619 break;
3620 default:
3621 break;
3622 }
3623 }
3624
3625 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
3626
3627 pm_runtime_put_noidle(&pdev->dev);
3628 return 0;
3629
3630 err_register:
3631 igb_release_hw_control(adapter);
3632 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3633 err_eeprom:
3634 if (!igb_check_reset_block(hw))
3635 igb_reset_phy(hw);
3636
3637 if (hw->flash_address)
3638 iounmap(hw->flash_address);
3639 err_sw_init:
3640 kfree(adapter->mac_table);
3641 kfree(adapter->shadow_vfta);
3642 igb_clear_interrupt_scheme(adapter);
3643 #ifdef CONFIG_PCI_IOV
3644 igb_disable_sriov(pdev, false);
3645 #endif
3646 pci_iounmap(pdev, adapter->io_addr);
3647 err_ioremap:
3648 free_netdev(netdev);
3649 err_alloc_etherdev:
3650 pci_release_mem_regions(pdev);
3651 err_pci_reg:
3652 err_dma:
3653 pci_disable_device(pdev);
3654 return err;
3655 }
3656
3657 #ifdef CONFIG_PCI_IOV
igb_sriov_reinit(struct pci_dev * dev)3658 static int igb_sriov_reinit(struct pci_dev *dev)
3659 {
3660 struct net_device *netdev = pci_get_drvdata(dev);
3661 struct igb_adapter *adapter = netdev_priv(netdev);
3662 struct pci_dev *pdev = adapter->pdev;
3663
3664 rtnl_lock();
3665
3666 if (netif_running(netdev))
3667 igb_close(netdev);
3668 else
3669 igb_reset(adapter);
3670
3671 igb_clear_interrupt_scheme(adapter);
3672
3673 igb_init_queue_configuration(adapter);
3674
3675 if (igb_init_interrupt_scheme(adapter, true)) {
3676 rtnl_unlock();
3677 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3678 return -ENOMEM;
3679 }
3680
3681 if (netif_running(netdev))
3682 igb_open(netdev);
3683
3684 rtnl_unlock();
3685
3686 return 0;
3687 }
3688
igb_disable_sriov(struct pci_dev * pdev,bool reinit)3689 static int igb_disable_sriov(struct pci_dev *pdev, bool reinit)
3690 {
3691 struct net_device *netdev = pci_get_drvdata(pdev);
3692 struct igb_adapter *adapter = netdev_priv(netdev);
3693 struct e1000_hw *hw = &adapter->hw;
3694 unsigned long flags;
3695
3696 /* reclaim resources allocated to VFs */
3697 if (adapter->vf_data) {
3698 /* disable iov and allow time for transactions to clear */
3699 if (pci_vfs_assigned(pdev)) {
3700 dev_warn(&pdev->dev,
3701 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3702 return -EPERM;
3703 } else {
3704 pci_disable_sriov(pdev);
3705 msleep(500);
3706 }
3707 spin_lock_irqsave(&adapter->vfs_lock, flags);
3708 kfree(adapter->vf_mac_list);
3709 adapter->vf_mac_list = NULL;
3710 kfree(adapter->vf_data);
3711 adapter->vf_data = NULL;
3712 adapter->vfs_allocated_count = 0;
3713 spin_unlock_irqrestore(&adapter->vfs_lock, flags);
3714 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3715 wrfl();
3716 msleep(100);
3717 dev_info(&pdev->dev, "IOV Disabled\n");
3718
3719 /* Re-enable DMA Coalescing flag since IOV is turned off */
3720 adapter->flags |= IGB_FLAG_DMAC;
3721 }
3722
3723 return reinit ? igb_sriov_reinit(pdev) : 0;
3724 }
3725
igb_enable_sriov(struct pci_dev * pdev,int num_vfs,bool reinit)3726 static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
3727 {
3728 struct net_device *netdev = pci_get_drvdata(pdev);
3729 struct igb_adapter *adapter = netdev_priv(netdev);
3730 int old_vfs = pci_num_vf(pdev);
3731 struct vf_mac_filter *mac_list;
3732 int err = 0;
3733 int num_vf_mac_filters, i;
3734
3735 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3736 err = -EPERM;
3737 goto out;
3738 }
3739 if (!num_vfs)
3740 goto out;
3741
3742 if (old_vfs) {
3743 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3744 old_vfs, max_vfs);
3745 adapter->vfs_allocated_count = old_vfs;
3746 } else
3747 adapter->vfs_allocated_count = num_vfs;
3748
3749 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3750 sizeof(struct vf_data_storage), GFP_KERNEL);
3751
3752 /* if allocation failed then we do not support SR-IOV */
3753 if (!adapter->vf_data) {
3754 adapter->vfs_allocated_count = 0;
3755 err = -ENOMEM;
3756 goto out;
3757 }
3758
3759 /* Due to the limited number of RAR entries calculate potential
3760 * number of MAC filters available for the VFs. Reserve entries
3761 * for PF default MAC, PF MAC filters and at least one RAR entry
3762 * for each VF for VF MAC.
3763 */
3764 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3765 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3766 adapter->vfs_allocated_count);
3767
3768 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3769 sizeof(struct vf_mac_filter),
3770 GFP_KERNEL);
3771
3772 mac_list = adapter->vf_mac_list;
3773 INIT_LIST_HEAD(&adapter->vf_macs.l);
3774
3775 if (adapter->vf_mac_list) {
3776 /* Initialize list of VF MAC filters */
3777 for (i = 0; i < num_vf_mac_filters; i++) {
3778 mac_list->vf = -1;
3779 mac_list->free = true;
3780 list_add(&mac_list->l, &adapter->vf_macs.l);
3781 mac_list++;
3782 }
3783 } else {
3784 /* If we could not allocate memory for the VF MAC filters
3785 * we can continue without this feature but warn user.
3786 */
3787 dev_err(&pdev->dev,
3788 "Unable to allocate memory for VF MAC filter list\n");
3789 }
3790
3791 dev_info(&pdev->dev, "%d VFs allocated\n",
3792 adapter->vfs_allocated_count);
3793 for (i = 0; i < adapter->vfs_allocated_count; i++)
3794 igb_vf_configure(adapter, i);
3795
3796 /* DMA Coalescing is not supported in IOV mode. */
3797 adapter->flags &= ~IGB_FLAG_DMAC;
3798
3799 if (reinit) {
3800 err = igb_sriov_reinit(pdev);
3801 if (err)
3802 goto err_out;
3803 }
3804
3805 /* only call pci_enable_sriov() if no VFs are allocated already */
3806 if (!old_vfs) {
3807 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3808 if (err)
3809 goto err_out;
3810 }
3811
3812 goto out;
3813
3814 err_out:
3815 kfree(adapter->vf_mac_list);
3816 adapter->vf_mac_list = NULL;
3817 kfree(adapter->vf_data);
3818 adapter->vf_data = NULL;
3819 adapter->vfs_allocated_count = 0;
3820 out:
3821 return err;
3822 }
3823
3824 #endif
3825 /**
3826 * igb_remove_i2c - Cleanup I2C interface
3827 * @adapter: pointer to adapter structure
3828 **/
igb_remove_i2c(struct igb_adapter * adapter)3829 static void igb_remove_i2c(struct igb_adapter *adapter)
3830 {
3831 /* free the adapter bus structure */
3832 i2c_del_adapter(&adapter->i2c_adap);
3833 }
3834
3835 /**
3836 * igb_remove - Device Removal Routine
3837 * @pdev: PCI device information struct
3838 *
3839 * igb_remove is called by the PCI subsystem to alert the driver
3840 * that it should release a PCI device. The could be caused by a
3841 * Hot-Plug event, or because the driver is going to be removed from
3842 * memory.
3843 **/
igb_remove(struct pci_dev * pdev)3844 static void igb_remove(struct pci_dev *pdev)
3845 {
3846 struct net_device *netdev = pci_get_drvdata(pdev);
3847 struct igb_adapter *adapter = netdev_priv(netdev);
3848 struct e1000_hw *hw = &adapter->hw;
3849
3850 pm_runtime_get_noresume(&pdev->dev);
3851 #ifdef CONFIG_IGB_HWMON
3852 igb_sysfs_exit(adapter);
3853 #endif
3854 igb_remove_i2c(adapter);
3855 igb_ptp_stop(adapter);
3856 /* The watchdog timer may be rescheduled, so explicitly
3857 * disable watchdog from being rescheduled.
3858 */
3859 set_bit(__IGB_DOWN, &adapter->state);
3860 del_timer_sync(&adapter->watchdog_timer);
3861 del_timer_sync(&adapter->phy_info_timer);
3862
3863 cancel_work_sync(&adapter->reset_task);
3864 cancel_work_sync(&adapter->watchdog_task);
3865
3866 #ifdef CONFIG_IGB_DCA
3867 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3868 dev_info(&pdev->dev, "DCA disabled\n");
3869 dca_remove_requester(&pdev->dev);
3870 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3871 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3872 }
3873 #endif
3874
3875 /* Release control of h/w to f/w. If f/w is AMT enabled, this
3876 * would have already happened in close and is redundant.
3877 */
3878 igb_release_hw_control(adapter);
3879
3880 #ifdef CONFIG_PCI_IOV
3881 igb_disable_sriov(pdev, false);
3882 #endif
3883
3884 unregister_netdev(netdev);
3885
3886 igb_clear_interrupt_scheme(adapter);
3887
3888 pci_iounmap(pdev, adapter->io_addr);
3889 if (hw->flash_address)
3890 iounmap(hw->flash_address);
3891 pci_release_mem_regions(pdev);
3892
3893 kfree(adapter->mac_table);
3894 kfree(adapter->shadow_vfta);
3895 free_netdev(netdev);
3896
3897 pci_disable_device(pdev);
3898 }
3899
3900 /**
3901 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
3902 * @adapter: board private structure to initialize
3903 *
3904 * This function initializes the vf specific data storage and then attempts to
3905 * allocate the VFs. The reason for ordering it this way is because it is much
3906 * mor expensive time wise to disable SR-IOV than it is to allocate and free
3907 * the memory for the VFs.
3908 **/
igb_probe_vfs(struct igb_adapter * adapter)3909 static void igb_probe_vfs(struct igb_adapter *adapter)
3910 {
3911 #ifdef CONFIG_PCI_IOV
3912 struct pci_dev *pdev = adapter->pdev;
3913 struct e1000_hw *hw = &adapter->hw;
3914
3915 /* Virtualization features not supported on i210 and 82580 family. */
3916 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
3917 (hw->mac.type == e1000_82580))
3918 return;
3919
3920 /* Of the below we really only want the effect of getting
3921 * IGB_FLAG_HAS_MSIX set (if available), without which
3922 * igb_enable_sriov() has no effect.
3923 */
3924 igb_set_interrupt_capability(adapter, true);
3925 igb_reset_interrupt_capability(adapter);
3926
3927 pci_sriov_set_totalvfs(pdev, 7);
3928 igb_enable_sriov(pdev, max_vfs, false);
3929
3930 #endif /* CONFIG_PCI_IOV */
3931 }
3932
igb_get_max_rss_queues(struct igb_adapter * adapter)3933 unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3934 {
3935 struct e1000_hw *hw = &adapter->hw;
3936 unsigned int max_rss_queues;
3937
3938 /* Determine the maximum number of RSS queues supported. */
3939 switch (hw->mac.type) {
3940 case e1000_i211:
3941 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3942 break;
3943 case e1000_82575:
3944 case e1000_i210:
3945 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3946 break;
3947 case e1000_i350:
3948 /* I350 cannot do RSS and SR-IOV at the same time */
3949 if (!!adapter->vfs_allocated_count) {
3950 max_rss_queues = 1;
3951 break;
3952 }
3953 fallthrough;
3954 case e1000_82576:
3955 if (!!adapter->vfs_allocated_count) {
3956 max_rss_queues = 2;
3957 break;
3958 }
3959 fallthrough;
3960 case e1000_82580:
3961 case e1000_i354:
3962 default:
3963 max_rss_queues = IGB_MAX_RX_QUEUES;
3964 break;
3965 }
3966
3967 return max_rss_queues;
3968 }
3969
igb_init_queue_configuration(struct igb_adapter * adapter)3970 static void igb_init_queue_configuration(struct igb_adapter *adapter)
3971 {
3972 u32 max_rss_queues;
3973
3974 max_rss_queues = igb_get_max_rss_queues(adapter);
3975 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3976
3977 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3978 }
3979
igb_set_flag_queue_pairs(struct igb_adapter * adapter,const u32 max_rss_queues)3980 void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3981 const u32 max_rss_queues)
3982 {
3983 struct e1000_hw *hw = &adapter->hw;
3984
3985 /* Determine if we need to pair queues. */
3986 switch (hw->mac.type) {
3987 case e1000_82575:
3988 case e1000_i211:
3989 /* Device supports enough interrupts without queue pairing. */
3990 break;
3991 case e1000_82576:
3992 case e1000_82580:
3993 case e1000_i350:
3994 case e1000_i354:
3995 case e1000_i210:
3996 default:
3997 /* If rss_queues > half of max_rss_queues, pair the queues in
3998 * order to conserve interrupts due to limited supply.
3999 */
4000 if (adapter->rss_queues > (max_rss_queues / 2))
4001 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
4002 else
4003 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
4004 break;
4005 }
4006 }
4007
4008 /**
4009 * igb_sw_init - Initialize general software structures (struct igb_adapter)
4010 * @adapter: board private structure to initialize
4011 *
4012 * igb_sw_init initializes the Adapter private data structure.
4013 * Fields are initialized based on PCI device information and
4014 * OS network device settings (MTU size).
4015 **/
igb_sw_init(struct igb_adapter * adapter)4016 static int igb_sw_init(struct igb_adapter *adapter)
4017 {
4018 struct e1000_hw *hw = &adapter->hw;
4019 struct net_device *netdev = adapter->netdev;
4020 struct pci_dev *pdev = adapter->pdev;
4021
4022 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4023
4024 /* set default ring sizes */
4025 adapter->tx_ring_count = IGB_DEFAULT_TXD;
4026 adapter->rx_ring_count = IGB_DEFAULT_RXD;
4027
4028 /* set default ITR values */
4029 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
4030 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
4031
4032 /* set default work limits */
4033 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
4034
4035 adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
4036 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4037
4038 spin_lock_init(&adapter->nfc_lock);
4039 spin_lock_init(&adapter->stats64_lock);
4040
4041 /* init spinlock to avoid concurrency of VF resources */
4042 spin_lock_init(&adapter->vfs_lock);
4043 #ifdef CONFIG_PCI_IOV
4044 switch (hw->mac.type) {
4045 case e1000_82576:
4046 case e1000_i350:
4047 if (max_vfs > 7) {
4048 dev_warn(&pdev->dev,
4049 "Maximum of 7 VFs per PF, using max\n");
4050 max_vfs = adapter->vfs_allocated_count = 7;
4051 } else
4052 adapter->vfs_allocated_count = max_vfs;
4053 if (adapter->vfs_allocated_count)
4054 dev_warn(&pdev->dev,
4055 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
4056 break;
4057 default:
4058 break;
4059 }
4060 #endif /* CONFIG_PCI_IOV */
4061
4062 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
4063 adapter->flags |= IGB_FLAG_HAS_MSIX;
4064
4065 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
4066 sizeof(struct igb_mac_addr),
4067 GFP_KERNEL);
4068 if (!adapter->mac_table)
4069 return -ENOMEM;
4070
4071 igb_probe_vfs(adapter);
4072
4073 igb_init_queue_configuration(adapter);
4074
4075 /* Setup and initialize a copy of the hw vlan table array */
4076 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
4077 GFP_KERNEL);
4078 if (!adapter->shadow_vfta)
4079 return -ENOMEM;
4080
4081 /* This call may decrease the number of queues */
4082 if (igb_init_interrupt_scheme(adapter, true)) {
4083 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4084 return -ENOMEM;
4085 }
4086
4087 /* Explicitly disable IRQ since the NIC can be in any state. */
4088 igb_irq_disable(adapter);
4089
4090 if (hw->mac.type >= e1000_i350)
4091 adapter->flags &= ~IGB_FLAG_DMAC;
4092
4093 set_bit(__IGB_DOWN, &adapter->state);
4094 return 0;
4095 }
4096
4097 /**
4098 * __igb_open - Called when a network interface is made active
4099 * @netdev: network interface device structure
4100 * @resuming: indicates whether we are in a resume call
4101 *
4102 * Returns 0 on success, negative value on failure
4103 *
4104 * The open entry point is called when a network interface is made
4105 * active by the system (IFF_UP). At this point all resources needed
4106 * for transmit and receive operations are allocated, the interrupt
4107 * handler is registered with the OS, the watchdog timer is started,
4108 * and the stack is notified that the interface is ready.
4109 **/
__igb_open(struct net_device * netdev,bool resuming)4110 static int __igb_open(struct net_device *netdev, bool resuming)
4111 {
4112 struct igb_adapter *adapter = netdev_priv(netdev);
4113 struct e1000_hw *hw = &adapter->hw;
4114 struct pci_dev *pdev = adapter->pdev;
4115 int err;
4116 int i;
4117
4118 /* disallow open during test */
4119 if (test_bit(__IGB_TESTING, &adapter->state)) {
4120 WARN_ON(resuming);
4121 return -EBUSY;
4122 }
4123
4124 if (!resuming)
4125 pm_runtime_get_sync(&pdev->dev);
4126
4127 netif_carrier_off(netdev);
4128
4129 /* allocate transmit descriptors */
4130 err = igb_setup_all_tx_resources(adapter);
4131 if (err)
4132 goto err_setup_tx;
4133
4134 /* allocate receive descriptors */
4135 err = igb_setup_all_rx_resources(adapter);
4136 if (err)
4137 goto err_setup_rx;
4138
4139 igb_power_up_link(adapter);
4140
4141 /* before we allocate an interrupt, we must be ready to handle it.
4142 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4143 * as soon as we call pci_request_irq, so we have to setup our
4144 * clean_rx handler before we do so.
4145 */
4146 igb_configure(adapter);
4147
4148 err = igb_request_irq(adapter);
4149 if (err)
4150 goto err_req_irq;
4151
4152 /* Notify the stack of the actual queue counts. */
4153 err = netif_set_real_num_tx_queues(adapter->netdev,
4154 adapter->num_tx_queues);
4155 if (err)
4156 goto err_set_queues;
4157
4158 err = netif_set_real_num_rx_queues(adapter->netdev,
4159 adapter->num_rx_queues);
4160 if (err)
4161 goto err_set_queues;
4162
4163 /* From here on the code is the same as igb_up() */
4164 clear_bit(__IGB_DOWN, &adapter->state);
4165
4166 for (i = 0; i < adapter->num_q_vectors; i++)
4167 napi_enable(&(adapter->q_vector[i]->napi));
4168
4169 /* Clear any pending interrupts. */
4170 rd32(E1000_TSICR);
4171 rd32(E1000_ICR);
4172
4173 igb_irq_enable(adapter);
4174
4175 /* notify VFs that reset has been completed */
4176 if (adapter->vfs_allocated_count) {
4177 u32 reg_data = rd32(E1000_CTRL_EXT);
4178
4179 reg_data |= E1000_CTRL_EXT_PFRSTD;
4180 wr32(E1000_CTRL_EXT, reg_data);
4181 }
4182
4183 netif_tx_start_all_queues(netdev);
4184
4185 if (!resuming)
4186 pm_runtime_put(&pdev->dev);
4187
4188 /* start the watchdog. */
4189 hw->mac.get_link_status = 1;
4190 schedule_work(&adapter->watchdog_task);
4191
4192 return 0;
4193
4194 err_set_queues:
4195 igb_free_irq(adapter);
4196 err_req_irq:
4197 igb_release_hw_control(adapter);
4198 igb_power_down_link(adapter);
4199 igb_free_all_rx_resources(adapter);
4200 err_setup_rx:
4201 igb_free_all_tx_resources(adapter);
4202 err_setup_tx:
4203 igb_reset(adapter);
4204 if (!resuming)
4205 pm_runtime_put(&pdev->dev);
4206
4207 return err;
4208 }
4209
igb_open(struct net_device * netdev)4210 int igb_open(struct net_device *netdev)
4211 {
4212 return __igb_open(netdev, false);
4213 }
4214
4215 /**
4216 * __igb_close - Disables a network interface
4217 * @netdev: network interface device structure
4218 * @suspending: indicates we are in a suspend call
4219 *
4220 * Returns 0, this is not allowed to fail
4221 *
4222 * The close entry point is called when an interface is de-activated
4223 * by the OS. The hardware is still under the driver's control, but
4224 * needs to be disabled. A global MAC reset is issued to stop the
4225 * hardware, and all transmit and receive resources are freed.
4226 **/
__igb_close(struct net_device * netdev,bool suspending)4227 static int __igb_close(struct net_device *netdev, bool suspending)
4228 {
4229 struct igb_adapter *adapter = netdev_priv(netdev);
4230 struct pci_dev *pdev = adapter->pdev;
4231
4232 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4233
4234 if (!suspending)
4235 pm_runtime_get_sync(&pdev->dev);
4236
4237 igb_down(adapter);
4238 igb_free_irq(adapter);
4239
4240 igb_free_all_tx_resources(adapter);
4241 igb_free_all_rx_resources(adapter);
4242
4243 if (!suspending)
4244 pm_runtime_put_sync(&pdev->dev);
4245 return 0;
4246 }
4247
igb_close(struct net_device * netdev)4248 int igb_close(struct net_device *netdev)
4249 {
4250 if (netif_device_present(netdev) || netdev->dismantle)
4251 return __igb_close(netdev, false);
4252 return 0;
4253 }
4254
4255 /**
4256 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
4257 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4258 *
4259 * Return 0 on success, negative on failure
4260 **/
igb_setup_tx_resources(struct igb_ring * tx_ring)4261 int igb_setup_tx_resources(struct igb_ring *tx_ring)
4262 {
4263 struct device *dev = tx_ring->dev;
4264 int size;
4265
4266 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4267
4268 tx_ring->tx_buffer_info = vmalloc(size);
4269 if (!tx_ring->tx_buffer_info)
4270 goto err;
4271
4272 /* round up to nearest 4K */
4273 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4274 tx_ring->size = ALIGN(tx_ring->size, 4096);
4275
4276 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4277 &tx_ring->dma, GFP_KERNEL);
4278 if (!tx_ring->desc)
4279 goto err;
4280
4281 tx_ring->next_to_use = 0;
4282 tx_ring->next_to_clean = 0;
4283
4284 return 0;
4285
4286 err:
4287 vfree(tx_ring->tx_buffer_info);
4288 tx_ring->tx_buffer_info = NULL;
4289 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4290 return -ENOMEM;
4291 }
4292
4293 /**
4294 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
4295 * (Descriptors) for all queues
4296 * @adapter: board private structure
4297 *
4298 * Return 0 on success, negative on failure
4299 **/
igb_setup_all_tx_resources(struct igb_adapter * adapter)4300 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4301 {
4302 struct pci_dev *pdev = adapter->pdev;
4303 int i, err = 0;
4304
4305 for (i = 0; i < adapter->num_tx_queues; i++) {
4306 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4307 if (err) {
4308 dev_err(&pdev->dev,
4309 "Allocation for Tx Queue %u failed\n", i);
4310 for (i--; i >= 0; i--)
4311 igb_free_tx_resources(adapter->tx_ring[i]);
4312 break;
4313 }
4314 }
4315
4316 return err;
4317 }
4318
4319 /**
4320 * igb_setup_tctl - configure the transmit control registers
4321 * @adapter: Board private structure
4322 **/
igb_setup_tctl(struct igb_adapter * adapter)4323 void igb_setup_tctl(struct igb_adapter *adapter)
4324 {
4325 struct e1000_hw *hw = &adapter->hw;
4326 u32 tctl;
4327
4328 /* disable queue 0 which is enabled by default on 82575 and 82576 */
4329 wr32(E1000_TXDCTL(0), 0);
4330
4331 /* Program the Transmit Control Register */
4332 tctl = rd32(E1000_TCTL);
4333 tctl &= ~E1000_TCTL_CT;
4334 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4335 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4336
4337 igb_config_collision_dist(hw);
4338
4339 /* Enable transmits */
4340 tctl |= E1000_TCTL_EN;
4341
4342 wr32(E1000_TCTL, tctl);
4343 }
4344
4345 /**
4346 * igb_configure_tx_ring - Configure transmit ring after Reset
4347 * @adapter: board private structure
4348 * @ring: tx ring to configure
4349 *
4350 * Configure a transmit ring after a reset.
4351 **/
igb_configure_tx_ring(struct igb_adapter * adapter,struct igb_ring * ring)4352 void igb_configure_tx_ring(struct igb_adapter *adapter,
4353 struct igb_ring *ring)
4354 {
4355 struct e1000_hw *hw = &adapter->hw;
4356 u32 txdctl = 0;
4357 u64 tdba = ring->dma;
4358 int reg_idx = ring->reg_idx;
4359
4360 wr32(E1000_TDLEN(reg_idx),
4361 ring->count * sizeof(union e1000_adv_tx_desc));
4362 wr32(E1000_TDBAL(reg_idx),
4363 tdba & 0x00000000ffffffffULL);
4364 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4365
4366 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4367 wr32(E1000_TDH(reg_idx), 0);
4368 writel(0, ring->tail);
4369
4370 txdctl |= IGB_TX_PTHRESH;
4371 txdctl |= IGB_TX_HTHRESH << 8;
4372 txdctl |= IGB_TX_WTHRESH << 16;
4373
4374 /* reinitialize tx_buffer_info */
4375 memset(ring->tx_buffer_info, 0,
4376 sizeof(struct igb_tx_buffer) * ring->count);
4377
4378 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4379 wr32(E1000_TXDCTL(reg_idx), txdctl);
4380 }
4381
4382 /**
4383 * igb_configure_tx - Configure transmit Unit after Reset
4384 * @adapter: board private structure
4385 *
4386 * Configure the Tx unit of the MAC after a reset.
4387 **/
igb_configure_tx(struct igb_adapter * adapter)4388 static void igb_configure_tx(struct igb_adapter *adapter)
4389 {
4390 struct e1000_hw *hw = &adapter->hw;
4391 int i;
4392
4393 /* disable the queues */
4394 for (i = 0; i < adapter->num_tx_queues; i++)
4395 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4396
4397 wrfl();
4398 usleep_range(10000, 20000);
4399
4400 for (i = 0; i < adapter->num_tx_queues; i++)
4401 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4402 }
4403
4404 /**
4405 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
4406 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
4407 *
4408 * Returns 0 on success, negative on failure
4409 **/
igb_setup_rx_resources(struct igb_ring * rx_ring)4410 int igb_setup_rx_resources(struct igb_ring *rx_ring)
4411 {
4412 struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
4413 struct device *dev = rx_ring->dev;
4414 int size, res;
4415
4416 /* XDP RX-queue info */
4417 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
4418 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4419 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
4420 rx_ring->queue_index, 0);
4421 if (res < 0) {
4422 dev_err(dev, "Failed to register xdp_rxq index %u\n",
4423 rx_ring->queue_index);
4424 return res;
4425 }
4426
4427 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4428
4429 rx_ring->rx_buffer_info = vmalloc(size);
4430 if (!rx_ring->rx_buffer_info)
4431 goto err;
4432
4433 /* Round up to nearest 4K */
4434 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4435 rx_ring->size = ALIGN(rx_ring->size, 4096);
4436
4437 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4438 &rx_ring->dma, GFP_KERNEL);
4439 if (!rx_ring->desc)
4440 goto err;
4441
4442 rx_ring->next_to_alloc = 0;
4443 rx_ring->next_to_clean = 0;
4444 rx_ring->next_to_use = 0;
4445
4446 rx_ring->xdp_prog = adapter->xdp_prog;
4447
4448 return 0;
4449
4450 err:
4451 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4452 vfree(rx_ring->rx_buffer_info);
4453 rx_ring->rx_buffer_info = NULL;
4454 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4455 return -ENOMEM;
4456 }
4457
4458 /**
4459 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
4460 * (Descriptors) for all queues
4461 * @adapter: board private structure
4462 *
4463 * Return 0 on success, negative on failure
4464 **/
igb_setup_all_rx_resources(struct igb_adapter * adapter)4465 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4466 {
4467 struct pci_dev *pdev = adapter->pdev;
4468 int i, err = 0;
4469
4470 for (i = 0; i < adapter->num_rx_queues; i++) {
4471 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4472 if (err) {
4473 dev_err(&pdev->dev,
4474 "Allocation for Rx Queue %u failed\n", i);
4475 for (i--; i >= 0; i--)
4476 igb_free_rx_resources(adapter->rx_ring[i]);
4477 break;
4478 }
4479 }
4480
4481 return err;
4482 }
4483
4484 /**
4485 * igb_setup_mrqc - configure the multiple receive queue control registers
4486 * @adapter: Board private structure
4487 **/
igb_setup_mrqc(struct igb_adapter * adapter)4488 static void igb_setup_mrqc(struct igb_adapter *adapter)
4489 {
4490 struct e1000_hw *hw = &adapter->hw;
4491 u32 mrqc, rxcsum;
4492 u32 j, num_rx_queues;
4493 u32 rss_key[10];
4494
4495 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4496 for (j = 0; j < 10; j++)
4497 wr32(E1000_RSSRK(j), rss_key[j]);
4498
4499 num_rx_queues = adapter->rss_queues;
4500
4501 switch (hw->mac.type) {
4502 case e1000_82576:
4503 /* 82576 supports 2 RSS queues for SR-IOV */
4504 if (adapter->vfs_allocated_count)
4505 num_rx_queues = 2;
4506 break;
4507 default:
4508 break;
4509 }
4510
4511 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4512 for (j = 0; j < IGB_RETA_SIZE; j++)
4513 adapter->rss_indir_tbl[j] =
4514 (j * num_rx_queues) / IGB_RETA_SIZE;
4515 adapter->rss_indir_tbl_init = num_rx_queues;
4516 }
4517 igb_write_rss_indir_tbl(adapter);
4518
4519 /* Disable raw packet checksumming so that RSS hash is placed in
4520 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
4521 * offloads as they are enabled by default
4522 */
4523 rxcsum = rd32(E1000_RXCSUM);
4524 rxcsum |= E1000_RXCSUM_PCSD;
4525
4526 if (adapter->hw.mac.type >= e1000_82576)
4527 /* Enable Receive Checksum Offload for SCTP */
4528 rxcsum |= E1000_RXCSUM_CRCOFL;
4529
4530 /* Don't need to set TUOFL or IPOFL, they default to 1 */
4531 wr32(E1000_RXCSUM, rxcsum);
4532
4533 /* Generate RSS hash based on packet types, TCP/UDP
4534 * port numbers and/or IPv4/v6 src and dst addresses
4535 */
4536 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4537 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4538 E1000_MRQC_RSS_FIELD_IPV6 |
4539 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4540 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4541
4542 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4543 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4544 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4545 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4546
4547 /* If VMDq is enabled then we set the appropriate mode for that, else
4548 * we default to RSS so that an RSS hash is calculated per packet even
4549 * if we are only using one queue
4550 */
4551 if (adapter->vfs_allocated_count) {
4552 if (hw->mac.type > e1000_82575) {
4553 /* Set the default pool for the PF's first queue */
4554 u32 vtctl = rd32(E1000_VT_CTL);
4555
4556 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4557 E1000_VT_CTL_DISABLE_DEF_POOL);
4558 vtctl |= adapter->vfs_allocated_count <<
4559 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4560 wr32(E1000_VT_CTL, vtctl);
4561 }
4562 if (adapter->rss_queues > 1)
4563 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4564 else
4565 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4566 } else {
4567 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4568 }
4569 igb_vmm_control(adapter);
4570
4571 wr32(E1000_MRQC, mrqc);
4572 }
4573
4574 /**
4575 * igb_setup_rctl - configure the receive control registers
4576 * @adapter: Board private structure
4577 **/
igb_setup_rctl(struct igb_adapter * adapter)4578 void igb_setup_rctl(struct igb_adapter *adapter)
4579 {
4580 struct e1000_hw *hw = &adapter->hw;
4581 u32 rctl;
4582
4583 rctl = rd32(E1000_RCTL);
4584
4585 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4586 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4587
4588 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4589 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4590
4591 /* enable stripping of CRC. It's unlikely this will break BMC
4592 * redirection as it did with e1000. Newer features require
4593 * that the HW strips the CRC.
4594 */
4595 rctl |= E1000_RCTL_SECRC;
4596
4597 /* disable store bad packets and clear size bits. */
4598 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4599
4600 /* enable LPE to allow for reception of jumbo frames */
4601 rctl |= E1000_RCTL_LPE;
4602
4603 /* disable queue 0 to prevent tail write w/o re-config */
4604 wr32(E1000_RXDCTL(0), 0);
4605
4606 /* Attention!!! For SR-IOV PF driver operations you must enable
4607 * queue drop for all VF and PF queues to prevent head of line blocking
4608 * if an un-trusted VF does not provide descriptors to hardware.
4609 */
4610 if (adapter->vfs_allocated_count) {
4611 /* set all queue drop enable bits */
4612 wr32(E1000_QDE, ALL_QUEUES);
4613 }
4614
4615 /* This is useful for sniffing bad packets. */
4616 if (adapter->netdev->features & NETIF_F_RXALL) {
4617 /* UPE and MPE will be handled by normal PROMISC logic
4618 * in e1000e_set_rx_mode
4619 */
4620 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
4621 E1000_RCTL_BAM | /* RX All Bcast Pkts */
4622 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
4623
4624 rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
4625 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
4626 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
4627 * and that breaks VLANs.
4628 */
4629 }
4630
4631 wr32(E1000_RCTL, rctl);
4632 }
4633
igb_set_vf_rlpml(struct igb_adapter * adapter,int size,int vfn)4634 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4635 int vfn)
4636 {
4637 struct e1000_hw *hw = &adapter->hw;
4638 u32 vmolr;
4639
4640 if (size > MAX_JUMBO_FRAME_SIZE)
4641 size = MAX_JUMBO_FRAME_SIZE;
4642
4643 vmolr = rd32(E1000_VMOLR(vfn));
4644 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4645 vmolr |= size | E1000_VMOLR_LPE;
4646 wr32(E1000_VMOLR(vfn), vmolr);
4647
4648 return 0;
4649 }
4650
igb_set_vf_vlan_strip(struct igb_adapter * adapter,int vfn,bool enable)4651 static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4652 int vfn, bool enable)
4653 {
4654 struct e1000_hw *hw = &adapter->hw;
4655 u32 val, reg;
4656
4657 if (hw->mac.type < e1000_82576)
4658 return;
4659
4660 if (hw->mac.type == e1000_i350)
4661 reg = E1000_DVMOLR(vfn);
4662 else
4663 reg = E1000_VMOLR(vfn);
4664
4665 val = rd32(reg);
4666 if (enable)
4667 val |= E1000_VMOLR_STRVLAN;
4668 else
4669 val &= ~(E1000_VMOLR_STRVLAN);
4670 wr32(reg, val);
4671 }
4672
igb_set_vmolr(struct igb_adapter * adapter,int vfn,bool aupe)4673 static inline void igb_set_vmolr(struct igb_adapter *adapter,
4674 int vfn, bool aupe)
4675 {
4676 struct e1000_hw *hw = &adapter->hw;
4677 u32 vmolr;
4678
4679 /* This register exists only on 82576 and newer so if we are older then
4680 * we should exit and do nothing
4681 */
4682 if (hw->mac.type < e1000_82576)
4683 return;
4684
4685 vmolr = rd32(E1000_VMOLR(vfn));
4686 if (aupe)
4687 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
4688 else
4689 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
4690
4691 /* clear all bits that might not be set */
4692 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4693
4694 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4695 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
4696 /* for VMDq only allow the VFs and pool 0 to accept broadcast and
4697 * multicast packets
4698 */
4699 if (vfn <= adapter->vfs_allocated_count)
4700 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
4701
4702 wr32(E1000_VMOLR(vfn), vmolr);
4703 }
4704
4705 /**
4706 * igb_setup_srrctl - configure the split and replication receive control
4707 * registers
4708 * @adapter: Board private structure
4709 * @ring: receive ring to be configured
4710 **/
igb_setup_srrctl(struct igb_adapter * adapter,struct igb_ring * ring)4711 void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
4712 {
4713 struct e1000_hw *hw = &adapter->hw;
4714 int reg_idx = ring->reg_idx;
4715 u32 srrctl = 0;
4716
4717 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4718 if (ring_uses_large_buffer(ring))
4719 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4720 else
4721 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4722 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4723 if (hw->mac.type >= e1000_82580)
4724 srrctl |= E1000_SRRCTL_TIMESTAMP;
4725 /* Only set Drop Enable if VFs allocated, or we are supporting multiple
4726 * queues and rx flow control is disabled
4727 */
4728 if (adapter->vfs_allocated_count ||
4729 (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
4730 adapter->num_rx_queues > 1))
4731 srrctl |= E1000_SRRCTL_DROP_EN;
4732
4733 wr32(E1000_SRRCTL(reg_idx), srrctl);
4734 }
4735
4736 /**
4737 * igb_configure_rx_ring - Configure a receive ring after Reset
4738 * @adapter: board private structure
4739 * @ring: receive ring to be configured
4740 *
4741 * Configure the Rx unit of the MAC after a reset.
4742 **/
igb_configure_rx_ring(struct igb_adapter * adapter,struct igb_ring * ring)4743 void igb_configure_rx_ring(struct igb_adapter *adapter,
4744 struct igb_ring *ring)
4745 {
4746 struct e1000_hw *hw = &adapter->hw;
4747 union e1000_adv_rx_desc *rx_desc;
4748 u64 rdba = ring->dma;
4749 int reg_idx = ring->reg_idx;
4750 u32 rxdctl = 0;
4751
4752 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4753 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4754 MEM_TYPE_PAGE_SHARED, NULL));
4755
4756 /* disable the queue */
4757 wr32(E1000_RXDCTL(reg_idx), 0);
4758
4759 /* Set DMA base address registers */
4760 wr32(E1000_RDBAL(reg_idx),
4761 rdba & 0x00000000ffffffffULL);
4762 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4763 wr32(E1000_RDLEN(reg_idx),
4764 ring->count * sizeof(union e1000_adv_rx_desc));
4765
4766 /* initialize head and tail */
4767 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4768 wr32(E1000_RDH(reg_idx), 0);
4769 writel(0, ring->tail);
4770
4771 /* set descriptor configuration */
4772 igb_setup_srrctl(adapter, ring);
4773
4774 /* set filtering for VMDQ pools */
4775 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4776
4777 rxdctl |= IGB_RX_PTHRESH;
4778 rxdctl |= IGB_RX_HTHRESH << 8;
4779 rxdctl |= IGB_RX_WTHRESH << 16;
4780
4781 /* initialize rx_buffer_info */
4782 memset(ring->rx_buffer_info, 0,
4783 sizeof(struct igb_rx_buffer) * ring->count);
4784
4785 /* initialize Rx descriptor 0 */
4786 rx_desc = IGB_RX_DESC(ring, 0);
4787 rx_desc->wb.upper.length = 0;
4788
4789 /* enable receive descriptor fetching */
4790 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4791 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4792 }
4793
igb_set_rx_buffer_len(struct igb_adapter * adapter,struct igb_ring * rx_ring)4794 static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4795 struct igb_ring *rx_ring)
4796 {
4797 #if (PAGE_SIZE < 8192)
4798 struct e1000_hw *hw = &adapter->hw;
4799 #endif
4800
4801 /* set build_skb and buffer size flags */
4802 clear_ring_build_skb_enabled(rx_ring);
4803 clear_ring_uses_large_buffer(rx_ring);
4804
4805 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4806 return;
4807
4808 set_ring_build_skb_enabled(rx_ring);
4809
4810 #if (PAGE_SIZE < 8192)
4811 if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
4812 rd32(E1000_RCTL) & E1000_RCTL_SBP)
4813 set_ring_uses_large_buffer(rx_ring);
4814 #endif
4815 }
4816
4817 /**
4818 * igb_configure_rx - Configure receive Unit after Reset
4819 * @adapter: board private structure
4820 *
4821 * Configure the Rx unit of the MAC after a reset.
4822 **/
igb_configure_rx(struct igb_adapter * adapter)4823 static void igb_configure_rx(struct igb_adapter *adapter)
4824 {
4825 int i;
4826
4827 /* set the correct pool for the PF default MAC address in entry 0 */
4828 igb_set_default_mac_filter(adapter);
4829
4830 /* Setup the HW Rx Head and Tail Descriptor Pointers and
4831 * the Base and Length of the Rx Descriptor Ring
4832 */
4833 for (i = 0; i < adapter->num_rx_queues; i++) {
4834 struct igb_ring *rx_ring = adapter->rx_ring[i];
4835
4836 igb_set_rx_buffer_len(adapter, rx_ring);
4837 igb_configure_rx_ring(adapter, rx_ring);
4838 }
4839 }
4840
4841 /**
4842 * igb_free_tx_resources - Free Tx Resources per Queue
4843 * @tx_ring: Tx descriptor ring for a specific queue
4844 *
4845 * Free all transmit software resources
4846 **/
igb_free_tx_resources(struct igb_ring * tx_ring)4847 void igb_free_tx_resources(struct igb_ring *tx_ring)
4848 {
4849 igb_clean_tx_ring(tx_ring);
4850
4851 vfree(tx_ring->tx_buffer_info);
4852 tx_ring->tx_buffer_info = NULL;
4853
4854 /* if not set, then don't free */
4855 if (!tx_ring->desc)
4856 return;
4857
4858 dma_free_coherent(tx_ring->dev, tx_ring->size,
4859 tx_ring->desc, tx_ring->dma);
4860
4861 tx_ring->desc = NULL;
4862 }
4863
4864 /**
4865 * igb_free_all_tx_resources - Free Tx Resources for All Queues
4866 * @adapter: board private structure
4867 *
4868 * Free all transmit software resources
4869 **/
igb_free_all_tx_resources(struct igb_adapter * adapter)4870 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4871 {
4872 int i;
4873
4874 for (i = 0; i < adapter->num_tx_queues; i++)
4875 if (adapter->tx_ring[i])
4876 igb_free_tx_resources(adapter->tx_ring[i]);
4877 }
4878
4879 /**
4880 * igb_clean_tx_ring - Free Tx Buffers
4881 * @tx_ring: ring to be cleaned
4882 **/
igb_clean_tx_ring(struct igb_ring * tx_ring)4883 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4884 {
4885 u16 i = tx_ring->next_to_clean;
4886 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4887
4888 while (i != tx_ring->next_to_use) {
4889 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4890
4891 /* Free all the Tx ring sk_buffs or xdp frames */
4892 if (tx_buffer->type == IGB_TYPE_SKB)
4893 dev_kfree_skb_any(tx_buffer->skb);
4894 else
4895 xdp_return_frame(tx_buffer->xdpf);
4896
4897 /* unmap skb header data */
4898 dma_unmap_single(tx_ring->dev,
4899 dma_unmap_addr(tx_buffer, dma),
4900 dma_unmap_len(tx_buffer, len),
4901 DMA_TO_DEVICE);
4902
4903 /* check for eop_desc to determine the end of the packet */
4904 eop_desc = tx_buffer->next_to_watch;
4905 tx_desc = IGB_TX_DESC(tx_ring, i);
4906
4907 /* unmap remaining buffers */
4908 while (tx_desc != eop_desc) {
4909 tx_buffer++;
4910 tx_desc++;
4911 i++;
4912 if (unlikely(i == tx_ring->count)) {
4913 i = 0;
4914 tx_buffer = tx_ring->tx_buffer_info;
4915 tx_desc = IGB_TX_DESC(tx_ring, 0);
4916 }
4917
4918 /* unmap any remaining paged data */
4919 if (dma_unmap_len(tx_buffer, len))
4920 dma_unmap_page(tx_ring->dev,
4921 dma_unmap_addr(tx_buffer, dma),
4922 dma_unmap_len(tx_buffer, len),
4923 DMA_TO_DEVICE);
4924 }
4925
4926 tx_buffer->next_to_watch = NULL;
4927
4928 /* move us one more past the eop_desc for start of next pkt */
4929 tx_buffer++;
4930 i++;
4931 if (unlikely(i == tx_ring->count)) {
4932 i = 0;
4933 tx_buffer = tx_ring->tx_buffer_info;
4934 }
4935 }
4936
4937 /* reset BQL for queue */
4938 netdev_tx_reset_queue(txring_txq(tx_ring));
4939
4940 /* reset next_to_use and next_to_clean */
4941 tx_ring->next_to_use = 0;
4942 tx_ring->next_to_clean = 0;
4943 }
4944
4945 /**
4946 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
4947 * @adapter: board private structure
4948 **/
igb_clean_all_tx_rings(struct igb_adapter * adapter)4949 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4950 {
4951 int i;
4952
4953 for (i = 0; i < adapter->num_tx_queues; i++)
4954 if (adapter->tx_ring[i])
4955 igb_clean_tx_ring(adapter->tx_ring[i]);
4956 }
4957
4958 /**
4959 * igb_free_rx_resources - Free Rx Resources
4960 * @rx_ring: ring to clean the resources from
4961 *
4962 * Free all receive software resources
4963 **/
igb_free_rx_resources(struct igb_ring * rx_ring)4964 void igb_free_rx_resources(struct igb_ring *rx_ring)
4965 {
4966 igb_clean_rx_ring(rx_ring);
4967
4968 rx_ring->xdp_prog = NULL;
4969 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4970 vfree(rx_ring->rx_buffer_info);
4971 rx_ring->rx_buffer_info = NULL;
4972
4973 /* if not set, then don't free */
4974 if (!rx_ring->desc)
4975 return;
4976
4977 dma_free_coherent(rx_ring->dev, rx_ring->size,
4978 rx_ring->desc, rx_ring->dma);
4979
4980 rx_ring->desc = NULL;
4981 }
4982
4983 /**
4984 * igb_free_all_rx_resources - Free Rx Resources for All Queues
4985 * @adapter: board private structure
4986 *
4987 * Free all receive software resources
4988 **/
igb_free_all_rx_resources(struct igb_adapter * adapter)4989 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4990 {
4991 int i;
4992
4993 for (i = 0; i < adapter->num_rx_queues; i++)
4994 if (adapter->rx_ring[i])
4995 igb_free_rx_resources(adapter->rx_ring[i]);
4996 }
4997
4998 /**
4999 * igb_clean_rx_ring - Free Rx Buffers per Queue
5000 * @rx_ring: ring to free buffers from
5001 **/
igb_clean_rx_ring(struct igb_ring * rx_ring)5002 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
5003 {
5004 u16 i = rx_ring->next_to_clean;
5005
5006 dev_kfree_skb(rx_ring->skb);
5007 rx_ring->skb = NULL;
5008
5009 /* Free all the Rx ring sk_buffs */
5010 while (i != rx_ring->next_to_alloc) {
5011 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
5012
5013 /* Invalidate cache lines that may have been written to by
5014 * device so that we avoid corrupting memory.
5015 */
5016 dma_sync_single_range_for_cpu(rx_ring->dev,
5017 buffer_info->dma,
5018 buffer_info->page_offset,
5019 igb_rx_bufsz(rx_ring),
5020 DMA_FROM_DEVICE);
5021
5022 /* free resources associated with mapping */
5023 dma_unmap_page_attrs(rx_ring->dev,
5024 buffer_info->dma,
5025 igb_rx_pg_size(rx_ring),
5026 DMA_FROM_DEVICE,
5027 IGB_RX_DMA_ATTR);
5028 __page_frag_cache_drain(buffer_info->page,
5029 buffer_info->pagecnt_bias);
5030
5031 i++;
5032 if (i == rx_ring->count)
5033 i = 0;
5034 }
5035
5036 rx_ring->next_to_alloc = 0;
5037 rx_ring->next_to_clean = 0;
5038 rx_ring->next_to_use = 0;
5039 }
5040
5041 /**
5042 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
5043 * @adapter: board private structure
5044 **/
igb_clean_all_rx_rings(struct igb_adapter * adapter)5045 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
5046 {
5047 int i;
5048
5049 for (i = 0; i < adapter->num_rx_queues; i++)
5050 if (adapter->rx_ring[i])
5051 igb_clean_rx_ring(adapter->rx_ring[i]);
5052 }
5053
5054 /**
5055 * igb_set_mac - Change the Ethernet Address of the NIC
5056 * @netdev: network interface device structure
5057 * @p: pointer to an address structure
5058 *
5059 * Returns 0 on success, negative on failure
5060 **/
igb_set_mac(struct net_device * netdev,void * p)5061 static int igb_set_mac(struct net_device *netdev, void *p)
5062 {
5063 struct igb_adapter *adapter = netdev_priv(netdev);
5064 struct e1000_hw *hw = &adapter->hw;
5065 struct sockaddr *addr = p;
5066
5067 if (!is_valid_ether_addr(addr->sa_data))
5068 return -EADDRNOTAVAIL;
5069
5070 eth_hw_addr_set(netdev, addr->sa_data);
5071 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5072
5073 /* set the correct pool for the new PF MAC address in entry 0 */
5074 igb_set_default_mac_filter(adapter);
5075
5076 return 0;
5077 }
5078
5079 /**
5080 * igb_write_mc_addr_list - write multicast addresses to MTA
5081 * @netdev: network interface device structure
5082 *
5083 * Writes multicast address list to the MTA hash table.
5084 * Returns: -ENOMEM on failure
5085 * 0 on no addresses written
5086 * X on writing X addresses to MTA
5087 **/
igb_write_mc_addr_list(struct net_device * netdev)5088 static int igb_write_mc_addr_list(struct net_device *netdev)
5089 {
5090 struct igb_adapter *adapter = netdev_priv(netdev);
5091 struct e1000_hw *hw = &adapter->hw;
5092 struct netdev_hw_addr *ha;
5093 u8 *mta_list;
5094 int i;
5095
5096 if (netdev_mc_empty(netdev)) {
5097 /* nothing to program, so clear mc list */
5098 igb_update_mc_addr_list(hw, NULL, 0);
5099 igb_restore_vf_multicasts(adapter);
5100 return 0;
5101 }
5102
5103 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
5104 if (!mta_list)
5105 return -ENOMEM;
5106
5107 /* The shared function expects a packed array of only addresses. */
5108 i = 0;
5109 netdev_for_each_mc_addr(ha, netdev)
5110 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
5111
5112 igb_update_mc_addr_list(hw, mta_list, i);
5113 kfree(mta_list);
5114
5115 return netdev_mc_count(netdev);
5116 }
5117
igb_vlan_promisc_enable(struct igb_adapter * adapter)5118 static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
5119 {
5120 struct e1000_hw *hw = &adapter->hw;
5121 u32 i, pf_id;
5122
5123 switch (hw->mac.type) {
5124 case e1000_i210:
5125 case e1000_i211:
5126 case e1000_i350:
5127 /* VLAN filtering needed for VLAN prio filter */
5128 if (adapter->netdev->features & NETIF_F_NTUPLE)
5129 break;
5130 fallthrough;
5131 case e1000_82576:
5132 case e1000_82580:
5133 case e1000_i354:
5134 /* VLAN filtering needed for pool filtering */
5135 if (adapter->vfs_allocated_count)
5136 break;
5137 fallthrough;
5138 default:
5139 return 1;
5140 }
5141
5142 /* We are already in VLAN promisc, nothing to do */
5143 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
5144 return 0;
5145
5146 if (!adapter->vfs_allocated_count)
5147 goto set_vfta;
5148
5149 /* Add PF to all active pools */
5150 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5151
5152 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5153 u32 vlvf = rd32(E1000_VLVF(i));
5154
5155 vlvf |= BIT(pf_id);
5156 wr32(E1000_VLVF(i), vlvf);
5157 }
5158
5159 set_vfta:
5160 /* Set all bits in the VLAN filter table array */
5161 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
5162 hw->mac.ops.write_vfta(hw, i, ~0U);
5163
5164 /* Set flag so we don't redo unnecessary work */
5165 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
5166
5167 return 0;
5168 }
5169
5170 #define VFTA_BLOCK_SIZE 8
igb_scrub_vfta(struct igb_adapter * adapter,u32 vfta_offset)5171 static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
5172 {
5173 struct e1000_hw *hw = &adapter->hw;
5174 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
5175 u32 vid_start = vfta_offset * 32;
5176 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
5177 u32 i, vid, word, bits, pf_id;
5178
5179 /* guarantee that we don't scrub out management VLAN */
5180 vid = adapter->mng_vlan_id;
5181 if (vid >= vid_start && vid < vid_end)
5182 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5183
5184 if (!adapter->vfs_allocated_count)
5185 goto set_vfta;
5186
5187 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5188
5189 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5190 u32 vlvf = rd32(E1000_VLVF(i));
5191
5192 /* pull VLAN ID from VLVF */
5193 vid = vlvf & VLAN_VID_MASK;
5194
5195 /* only concern ourselves with a certain range */
5196 if (vid < vid_start || vid >= vid_end)
5197 continue;
5198
5199 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
5200 /* record VLAN ID in VFTA */
5201 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5202
5203 /* if PF is part of this then continue */
5204 if (test_bit(vid, adapter->active_vlans))
5205 continue;
5206 }
5207
5208 /* remove PF from the pool */
5209 bits = ~BIT(pf_id);
5210 bits &= rd32(E1000_VLVF(i));
5211 wr32(E1000_VLVF(i), bits);
5212 }
5213
5214 set_vfta:
5215 /* extract values from active_vlans and write back to VFTA */
5216 for (i = VFTA_BLOCK_SIZE; i--;) {
5217 vid = (vfta_offset + i) * 32;
5218 word = vid / BITS_PER_LONG;
5219 bits = vid % BITS_PER_LONG;
5220
5221 vfta[i] |= adapter->active_vlans[word] >> bits;
5222
5223 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
5224 }
5225 }
5226
igb_vlan_promisc_disable(struct igb_adapter * adapter)5227 static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
5228 {
5229 u32 i;
5230
5231 /* We are not in VLAN promisc, nothing to do */
5232 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
5233 return;
5234
5235 /* Set flag so we don't redo unnecessary work */
5236 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
5237
5238 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
5239 igb_scrub_vfta(adapter, i);
5240 }
5241
5242 /**
5243 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
5244 * @netdev: network interface device structure
5245 *
5246 * The set_rx_mode entry point is called whenever the unicast or multicast
5247 * address lists or the network interface flags are updated. This routine is
5248 * responsible for configuring the hardware for proper unicast, multicast,
5249 * promiscuous mode, and all-multi behavior.
5250 **/
igb_set_rx_mode(struct net_device * netdev)5251 static void igb_set_rx_mode(struct net_device *netdev)
5252 {
5253 struct igb_adapter *adapter = netdev_priv(netdev);
5254 struct e1000_hw *hw = &adapter->hw;
5255 unsigned int vfn = adapter->vfs_allocated_count;
5256 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
5257 int count;
5258
5259 /* Check for Promiscuous and All Multicast modes */
5260 if (netdev->flags & IFF_PROMISC) {
5261 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5262 vmolr |= E1000_VMOLR_MPME;
5263
5264 /* enable use of UTA filter to force packets to default pool */
5265 if (hw->mac.type == e1000_82576)
5266 vmolr |= E1000_VMOLR_ROPE;
5267 } else {
5268 if (netdev->flags & IFF_ALLMULTI) {
5269 rctl |= E1000_RCTL_MPE;
5270 vmolr |= E1000_VMOLR_MPME;
5271 } else {
5272 /* Write addresses to the MTA, if the attempt fails
5273 * then we should just turn on promiscuous mode so
5274 * that we can at least receive multicast traffic
5275 */
5276 count = igb_write_mc_addr_list(netdev);
5277 if (count < 0) {
5278 rctl |= E1000_RCTL_MPE;
5279 vmolr |= E1000_VMOLR_MPME;
5280 } else if (count) {
5281 vmolr |= E1000_VMOLR_ROMPE;
5282 }
5283 }
5284 }
5285
5286 /* Write addresses to available RAR registers, if there is not
5287 * sufficient space to store all the addresses then enable
5288 * unicast promiscuous mode
5289 */
5290 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5291 rctl |= E1000_RCTL_UPE;
5292 vmolr |= E1000_VMOLR_ROPE;
5293 }
5294
5295 /* enable VLAN filtering by default */
5296 rctl |= E1000_RCTL_VFE;
5297
5298 /* disable VLAN filtering for modes that require it */
5299 if ((netdev->flags & IFF_PROMISC) ||
5300 (netdev->features & NETIF_F_RXALL)) {
5301 /* if we fail to set all rules then just clear VFE */
5302 if (igb_vlan_promisc_enable(adapter))
5303 rctl &= ~E1000_RCTL_VFE;
5304 } else {
5305 igb_vlan_promisc_disable(adapter);
5306 }
5307
5308 /* update state of unicast, multicast, and VLAN filtering modes */
5309 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5310 E1000_RCTL_VFE);
5311 wr32(E1000_RCTL, rctl);
5312
5313 #if (PAGE_SIZE < 8192)
5314 if (!adapter->vfs_allocated_count) {
5315 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5316 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5317 }
5318 #endif
5319 wr32(E1000_RLPML, rlpml);
5320
5321 /* In order to support SR-IOV and eventually VMDq it is necessary to set
5322 * the VMOLR to enable the appropriate modes. Without this workaround
5323 * we will have issues with VLAN tag stripping not being done for frames
5324 * that are only arriving because we are the default pool
5325 */
5326 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5327 return;
5328
5329 /* set UTA to appropriate mode */
5330 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5331
5332 vmolr |= rd32(E1000_VMOLR(vfn)) &
5333 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5334
5335 /* enable Rx jumbo frames, restrict as needed to support build_skb */
5336 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5337 #if (PAGE_SIZE < 8192)
5338 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5339 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5340 else
5341 #endif
5342 vmolr |= MAX_JUMBO_FRAME_SIZE;
5343 vmolr |= E1000_VMOLR_LPE;
5344
5345 wr32(E1000_VMOLR(vfn), vmolr);
5346
5347 igb_restore_vf_multicasts(adapter);
5348 }
5349
igb_check_wvbr(struct igb_adapter * adapter)5350 static void igb_check_wvbr(struct igb_adapter *adapter)
5351 {
5352 struct e1000_hw *hw = &adapter->hw;
5353 u32 wvbr = 0;
5354
5355 switch (hw->mac.type) {
5356 case e1000_82576:
5357 case e1000_i350:
5358 wvbr = rd32(E1000_WVBR);
5359 if (!wvbr)
5360 return;
5361 break;
5362 default:
5363 break;
5364 }
5365
5366 adapter->wvbr |= wvbr;
5367 }
5368
5369 #define IGB_STAGGERED_QUEUE_OFFSET 8
5370
igb_spoof_check(struct igb_adapter * adapter)5371 static void igb_spoof_check(struct igb_adapter *adapter)
5372 {
5373 int j;
5374
5375 if (!adapter->wvbr)
5376 return;
5377
5378 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5379 if (adapter->wvbr & BIT(j) ||
5380 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5381 dev_warn(&adapter->pdev->dev,
5382 "Spoof event(s) detected on VF %d\n", j);
5383 adapter->wvbr &=
5384 ~(BIT(j) |
5385 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5386 }
5387 }
5388 }
5389
5390 /* Need to wait a few seconds after link up to get diagnostic information from
5391 * the phy
5392 */
igb_update_phy_info(struct timer_list * t)5393 static void igb_update_phy_info(struct timer_list *t)
5394 {
5395 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5396 igb_get_phy_info(&adapter->hw);
5397 }
5398
5399 /**
5400 * igb_has_link - check shared code for link and determine up/down
5401 * @adapter: pointer to driver private info
5402 **/
igb_has_link(struct igb_adapter * adapter)5403 bool igb_has_link(struct igb_adapter *adapter)
5404 {
5405 struct e1000_hw *hw = &adapter->hw;
5406 bool link_active = false;
5407
5408 /* get_link_status is set on LSC (link status) interrupt or
5409 * rx sequence error interrupt. get_link_status will stay
5410 * false until the e1000_check_for_link establishes link
5411 * for copper adapters ONLY
5412 */
5413 switch (hw->phy.media_type) {
5414 case e1000_media_type_copper:
5415 if (!hw->mac.get_link_status)
5416 return true;
5417 fallthrough;
5418 case e1000_media_type_internal_serdes:
5419 hw->mac.ops.check_for_link(hw);
5420 link_active = !hw->mac.get_link_status;
5421 break;
5422 default:
5423 case e1000_media_type_unknown:
5424 break;
5425 }
5426
5427 if (((hw->mac.type == e1000_i210) ||
5428 (hw->mac.type == e1000_i211)) &&
5429 (hw->phy.id == I210_I_PHY_ID)) {
5430 if (!netif_carrier_ok(adapter->netdev)) {
5431 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5432 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5433 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5434 adapter->link_check_timeout = jiffies;
5435 }
5436 }
5437
5438 return link_active;
5439 }
5440
igb_thermal_sensor_event(struct e1000_hw * hw,u32 event)5441 static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5442 {
5443 bool ret = false;
5444 u32 ctrl_ext, thstat;
5445
5446 /* check for thermal sensor event on i350 copper only */
5447 if (hw->mac.type == e1000_i350) {
5448 thstat = rd32(E1000_THSTAT);
5449 ctrl_ext = rd32(E1000_CTRL_EXT);
5450
5451 if ((hw->phy.media_type == e1000_media_type_copper) &&
5452 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5453 ret = !!(thstat & event);
5454 }
5455
5456 return ret;
5457 }
5458
5459 /**
5460 * igb_check_lvmmc - check for malformed packets received
5461 * and indicated in LVMMC register
5462 * @adapter: pointer to adapter
5463 **/
igb_check_lvmmc(struct igb_adapter * adapter)5464 static void igb_check_lvmmc(struct igb_adapter *adapter)
5465 {
5466 struct e1000_hw *hw = &adapter->hw;
5467 u32 lvmmc;
5468
5469 lvmmc = rd32(E1000_LVMMC);
5470 if (lvmmc) {
5471 if (unlikely(net_ratelimit())) {
5472 netdev_warn(adapter->netdev,
5473 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5474 lvmmc);
5475 }
5476 }
5477 }
5478
5479 /**
5480 * igb_watchdog - Timer Call-back
5481 * @t: pointer to timer_list containing our private info pointer
5482 **/
igb_watchdog(struct timer_list * t)5483 static void igb_watchdog(struct timer_list *t)
5484 {
5485 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5486 /* Do the rest outside of interrupt context */
5487 schedule_work(&adapter->watchdog_task);
5488 }
5489
igb_watchdog_task(struct work_struct * work)5490 static void igb_watchdog_task(struct work_struct *work)
5491 {
5492 struct igb_adapter *adapter = container_of(work,
5493 struct igb_adapter,
5494 watchdog_task);
5495 struct e1000_hw *hw = &adapter->hw;
5496 struct e1000_phy_info *phy = &hw->phy;
5497 struct net_device *netdev = adapter->netdev;
5498 u32 link;
5499 int i;
5500 u32 connsw;
5501 u16 phy_data, retry_count = 20;
5502
5503 link = igb_has_link(adapter);
5504
5505 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5506 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5507 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5508 else
5509 link = false;
5510 }
5511
5512 /* Force link down if we have fiber to swap to */
5513 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5514 if (hw->phy.media_type == e1000_media_type_copper) {
5515 connsw = rd32(E1000_CONNSW);
5516 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5517 link = 0;
5518 }
5519 }
5520 if (link) {
5521 /* Perform a reset if the media type changed. */
5522 if (hw->dev_spec._82575.media_changed) {
5523 hw->dev_spec._82575.media_changed = false;
5524 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5525 igb_reset(adapter);
5526 }
5527 /* Cancel scheduled suspend requests. */
5528 pm_runtime_resume(netdev->dev.parent);
5529
5530 if (!netif_carrier_ok(netdev)) {
5531 u32 ctrl;
5532
5533 hw->mac.ops.get_speed_and_duplex(hw,
5534 &adapter->link_speed,
5535 &adapter->link_duplex);
5536
5537 ctrl = rd32(E1000_CTRL);
5538 /* Links status message must follow this format */
5539 netdev_info(netdev,
5540 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5541 netdev->name,
5542 adapter->link_speed,
5543 adapter->link_duplex == FULL_DUPLEX ?
5544 "Full" : "Half",
5545 (ctrl & E1000_CTRL_TFCE) &&
5546 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5547 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5548 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5549
5550 /* disable EEE if enabled */
5551 if ((adapter->flags & IGB_FLAG_EEE) &&
5552 (adapter->link_duplex == HALF_DUPLEX)) {
5553 dev_info(&adapter->pdev->dev,
5554 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5555 adapter->hw.dev_spec._82575.eee_disable = true;
5556 adapter->flags &= ~IGB_FLAG_EEE;
5557 }
5558
5559 /* check if SmartSpeed worked */
5560 igb_check_downshift(hw);
5561 if (phy->speed_downgraded)
5562 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5563
5564 /* check for thermal sensor event */
5565 if (igb_thermal_sensor_event(hw,
5566 E1000_THSTAT_LINK_THROTTLE))
5567 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5568
5569 /* adjust timeout factor according to speed/duplex */
5570 adapter->tx_timeout_factor = 1;
5571 switch (adapter->link_speed) {
5572 case SPEED_10:
5573 adapter->tx_timeout_factor = 14;
5574 break;
5575 case SPEED_100:
5576 /* maybe add some timeout factor ? */
5577 break;
5578 }
5579
5580 if (adapter->link_speed != SPEED_1000 ||
5581 !hw->phy.ops.read_reg)
5582 goto no_wait;
5583
5584 /* wait for Remote receiver status OK */
5585 retry_read_status:
5586 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5587 &phy_data)) {
5588 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5589 retry_count) {
5590 msleep(100);
5591 retry_count--;
5592 goto retry_read_status;
5593 } else if (!retry_count) {
5594 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5595 }
5596 } else {
5597 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5598 }
5599 no_wait:
5600 netif_carrier_on(netdev);
5601
5602 igb_ping_all_vfs(adapter);
5603 igb_check_vf_rate_limit(adapter);
5604
5605 /* link state has changed, schedule phy info update */
5606 if (!test_bit(__IGB_DOWN, &adapter->state))
5607 mod_timer(&adapter->phy_info_timer,
5608 round_jiffies(jiffies + 2 * HZ));
5609 }
5610 } else {
5611 if (netif_carrier_ok(netdev)) {
5612 adapter->link_speed = 0;
5613 adapter->link_duplex = 0;
5614
5615 /* check for thermal sensor event */
5616 if (igb_thermal_sensor_event(hw,
5617 E1000_THSTAT_PWR_DOWN)) {
5618 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5619 }
5620
5621 /* Links status message must follow this format */
5622 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5623 netdev->name);
5624 netif_carrier_off(netdev);
5625
5626 igb_ping_all_vfs(adapter);
5627
5628 /* link state has changed, schedule phy info update */
5629 if (!test_bit(__IGB_DOWN, &adapter->state))
5630 mod_timer(&adapter->phy_info_timer,
5631 round_jiffies(jiffies + 2 * HZ));
5632
5633 /* link is down, time to check for alternate media */
5634 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5635 igb_check_swap_media(adapter);
5636 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5637 schedule_work(&adapter->reset_task);
5638 /* return immediately */
5639 return;
5640 }
5641 }
5642 pm_schedule_suspend(netdev->dev.parent,
5643 MSEC_PER_SEC * 5);
5644
5645 /* also check for alternate media here */
5646 } else if (!netif_carrier_ok(netdev) &&
5647 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5648 igb_check_swap_media(adapter);
5649 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5650 schedule_work(&adapter->reset_task);
5651 /* return immediately */
5652 return;
5653 }
5654 }
5655 }
5656
5657 spin_lock(&adapter->stats64_lock);
5658 igb_update_stats(adapter);
5659 spin_unlock(&adapter->stats64_lock);
5660
5661 for (i = 0; i < adapter->num_tx_queues; i++) {
5662 struct igb_ring *tx_ring = adapter->tx_ring[i];
5663 if (!netif_carrier_ok(netdev)) {
5664 /* We've lost link, so the controller stops DMA,
5665 * but we've got queued Tx work that's never going
5666 * to get done, so reset controller to flush Tx.
5667 * (Do the reset outside of interrupt context).
5668 */
5669 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5670 adapter->tx_timeout_count++;
5671 schedule_work(&adapter->reset_task);
5672 /* return immediately since reset is imminent */
5673 return;
5674 }
5675 }
5676
5677 /* Force detection of hung controller every watchdog period */
5678 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5679 }
5680
5681 /* Cause software interrupt to ensure Rx ring is cleaned */
5682 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5683 u32 eics = 0;
5684
5685 for (i = 0; i < adapter->num_q_vectors; i++)
5686 eics |= adapter->q_vector[i]->eims_value;
5687 wr32(E1000_EICS, eics);
5688 } else {
5689 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5690 }
5691
5692 igb_spoof_check(adapter);
5693 igb_ptp_rx_hang(adapter);
5694 igb_ptp_tx_hang(adapter);
5695
5696 /* Check LVMMC register on i350/i354 only */
5697 if ((adapter->hw.mac.type == e1000_i350) ||
5698 (adapter->hw.mac.type == e1000_i354))
5699 igb_check_lvmmc(adapter);
5700
5701 /* Reset the timer */
5702 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5703 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5704 mod_timer(&adapter->watchdog_timer,
5705 round_jiffies(jiffies + HZ));
5706 else
5707 mod_timer(&adapter->watchdog_timer,
5708 round_jiffies(jiffies + 2 * HZ));
5709 }
5710 }
5711
5712 enum latency_range {
5713 lowest_latency = 0,
5714 low_latency = 1,
5715 bulk_latency = 2,
5716 latency_invalid = 255
5717 };
5718
5719 /**
5720 * igb_update_ring_itr - update the dynamic ITR value based on packet size
5721 * @q_vector: pointer to q_vector
5722 *
5723 * Stores a new ITR value based on strictly on packet size. This
5724 * algorithm is less sophisticated than that used in igb_update_itr,
5725 * due to the difficulty of synchronizing statistics across multiple
5726 * receive rings. The divisors and thresholds used by this function
5727 * were determined based on theoretical maximum wire speed and testing
5728 * data, in order to minimize response time while increasing bulk
5729 * throughput.
5730 * This functionality is controlled by ethtool's coalescing settings.
5731 * NOTE: This function is called only when operating in a multiqueue
5732 * receive environment.
5733 **/
igb_update_ring_itr(struct igb_q_vector * q_vector)5734 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5735 {
5736 int new_val = q_vector->itr_val;
5737 int avg_wire_size = 0;
5738 struct igb_adapter *adapter = q_vector->adapter;
5739 unsigned int packets;
5740
5741 /* For non-gigabit speeds, just fix the interrupt rate at 4000
5742 * ints/sec - ITR timer value of 120 ticks.
5743 */
5744 if (adapter->link_speed != SPEED_1000) {
5745 new_val = IGB_4K_ITR;
5746 goto set_itr_val;
5747 }
5748
5749 packets = q_vector->rx.total_packets;
5750 if (packets)
5751 avg_wire_size = q_vector->rx.total_bytes / packets;
5752
5753 packets = q_vector->tx.total_packets;
5754 if (packets)
5755 avg_wire_size = max_t(u32, avg_wire_size,
5756 q_vector->tx.total_bytes / packets);
5757
5758 /* if avg_wire_size isn't set no work was done */
5759 if (!avg_wire_size)
5760 goto clear_counts;
5761
5762 /* Add 24 bytes to size to account for CRC, preamble, and gap */
5763 avg_wire_size += 24;
5764
5765 /* Don't starve jumbo frames */
5766 avg_wire_size = min(avg_wire_size, 3000);
5767
5768 /* Give a little boost to mid-size frames */
5769 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5770 new_val = avg_wire_size / 3;
5771 else
5772 new_val = avg_wire_size / 2;
5773
5774 /* conservative mode (itr 3) eliminates the lowest_latency setting */
5775 if (new_val < IGB_20K_ITR &&
5776 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5777 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5778 new_val = IGB_20K_ITR;
5779
5780 set_itr_val:
5781 if (new_val != q_vector->itr_val) {
5782 q_vector->itr_val = new_val;
5783 q_vector->set_itr = 1;
5784 }
5785 clear_counts:
5786 q_vector->rx.total_bytes = 0;
5787 q_vector->rx.total_packets = 0;
5788 q_vector->tx.total_bytes = 0;
5789 q_vector->tx.total_packets = 0;
5790 }
5791
5792 /**
5793 * igb_update_itr - update the dynamic ITR value based on statistics
5794 * @q_vector: pointer to q_vector
5795 * @ring_container: ring info to update the itr for
5796 *
5797 * Stores a new ITR value based on packets and byte
5798 * counts during the last interrupt. The advantage of per interrupt
5799 * computation is faster updates and more accurate ITR for the current
5800 * traffic pattern. Constants in this function were computed
5801 * based on theoretical maximum wire speed and thresholds were set based
5802 * on testing data as well as attempting to minimize response time
5803 * while increasing bulk throughput.
5804 * This functionality is controlled by ethtool's coalescing settings.
5805 * NOTE: These calculations are only valid when operating in a single-
5806 * queue environment.
5807 **/
igb_update_itr(struct igb_q_vector * q_vector,struct igb_ring_container * ring_container)5808 static void igb_update_itr(struct igb_q_vector *q_vector,
5809 struct igb_ring_container *ring_container)
5810 {
5811 unsigned int packets = ring_container->total_packets;
5812 unsigned int bytes = ring_container->total_bytes;
5813 u8 itrval = ring_container->itr;
5814
5815 /* no packets, exit with status unchanged */
5816 if (packets == 0)
5817 return;
5818
5819 switch (itrval) {
5820 case lowest_latency:
5821 /* handle TSO and jumbo frames */
5822 if (bytes/packets > 8000)
5823 itrval = bulk_latency;
5824 else if ((packets < 5) && (bytes > 512))
5825 itrval = low_latency;
5826 break;
5827 case low_latency: /* 50 usec aka 20000 ints/s */
5828 if (bytes > 10000) {
5829 /* this if handles the TSO accounting */
5830 if (bytes/packets > 8000)
5831 itrval = bulk_latency;
5832 else if ((packets < 10) || ((bytes/packets) > 1200))
5833 itrval = bulk_latency;
5834 else if ((packets > 35))
5835 itrval = lowest_latency;
5836 } else if (bytes/packets > 2000) {
5837 itrval = bulk_latency;
5838 } else if (packets <= 2 && bytes < 512) {
5839 itrval = lowest_latency;
5840 }
5841 break;
5842 case bulk_latency: /* 250 usec aka 4000 ints/s */
5843 if (bytes > 25000) {
5844 if (packets > 35)
5845 itrval = low_latency;
5846 } else if (bytes < 1500) {
5847 itrval = low_latency;
5848 }
5849 break;
5850 }
5851
5852 /* clear work counters since we have the values we need */
5853 ring_container->total_bytes = 0;
5854 ring_container->total_packets = 0;
5855
5856 /* write updated itr to ring container */
5857 ring_container->itr = itrval;
5858 }
5859
igb_set_itr(struct igb_q_vector * q_vector)5860 static void igb_set_itr(struct igb_q_vector *q_vector)
5861 {
5862 struct igb_adapter *adapter = q_vector->adapter;
5863 u32 new_itr = q_vector->itr_val;
5864 u8 current_itr = 0;
5865
5866 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
5867 if (adapter->link_speed != SPEED_1000) {
5868 current_itr = 0;
5869 new_itr = IGB_4K_ITR;
5870 goto set_itr_now;
5871 }
5872
5873 igb_update_itr(q_vector, &q_vector->tx);
5874 igb_update_itr(q_vector, &q_vector->rx);
5875
5876 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5877
5878 /* conservative mode (itr 3) eliminates the lowest_latency setting */
5879 if (current_itr == lowest_latency &&
5880 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5881 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5882 current_itr = low_latency;
5883
5884 switch (current_itr) {
5885 /* counts and packets in update_itr are dependent on these numbers */
5886 case lowest_latency:
5887 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
5888 break;
5889 case low_latency:
5890 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
5891 break;
5892 case bulk_latency:
5893 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
5894 break;
5895 default:
5896 break;
5897 }
5898
5899 set_itr_now:
5900 if (new_itr != q_vector->itr_val) {
5901 /* this attempts to bias the interrupt rate towards Bulk
5902 * by adding intermediate steps when interrupt rate is
5903 * increasing
5904 */
5905 new_itr = new_itr > q_vector->itr_val ?
5906 max((new_itr * q_vector->itr_val) /
5907 (new_itr + (q_vector->itr_val >> 2)),
5908 new_itr) : new_itr;
5909 /* Don't write the value here; it resets the adapter's
5910 * internal timer, and causes us to delay far longer than
5911 * we should between interrupts. Instead, we write the ITR
5912 * value at the beginning of the next interrupt so the timing
5913 * ends up being correct.
5914 */
5915 q_vector->itr_val = new_itr;
5916 q_vector->set_itr = 1;
5917 }
5918 }
5919
igb_tx_ctxtdesc(struct igb_ring * tx_ring,struct igb_tx_buffer * first,u32 vlan_macip_lens,u32 type_tucmd,u32 mss_l4len_idx)5920 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5921 struct igb_tx_buffer *first,
5922 u32 vlan_macip_lens, u32 type_tucmd,
5923 u32 mss_l4len_idx)
5924 {
5925 struct e1000_adv_tx_context_desc *context_desc;
5926 u16 i = tx_ring->next_to_use;
5927 struct timespec64 ts;
5928
5929 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5930
5931 i++;
5932 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5933
5934 /* set bits to identify this as an advanced context descriptor */
5935 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5936
5937 /* For 82575, context index must be unique per ring. */
5938 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5939 mss_l4len_idx |= tx_ring->reg_idx << 4;
5940
5941 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5942 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5943 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5944
5945 /* We assume there is always a valid tx time available. Invalid times
5946 * should have been handled by the upper layers.
5947 */
5948 if (tx_ring->launchtime_enable) {
5949 ts = ktime_to_timespec64(first->skb->tstamp);
5950 skb_txtime_consumed(first->skb);
5951 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5952 } else {
5953 context_desc->seqnum_seed = 0;
5954 }
5955 }
5956
igb_tso(struct igb_ring * tx_ring,struct igb_tx_buffer * first,u8 * hdr_len)5957 static int igb_tso(struct igb_ring *tx_ring,
5958 struct igb_tx_buffer *first,
5959 u8 *hdr_len)
5960 {
5961 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5962 struct sk_buff *skb = first->skb;
5963 union {
5964 struct iphdr *v4;
5965 struct ipv6hdr *v6;
5966 unsigned char *hdr;
5967 } ip;
5968 union {
5969 struct tcphdr *tcp;
5970 struct udphdr *udp;
5971 unsigned char *hdr;
5972 } l4;
5973 u32 paylen, l4_offset;
5974 int err;
5975
5976 if (skb->ip_summed != CHECKSUM_PARTIAL)
5977 return 0;
5978
5979 if (!skb_is_gso(skb))
5980 return 0;
5981
5982 err = skb_cow_head(skb, 0);
5983 if (err < 0)
5984 return err;
5985
5986 ip.hdr = skb_network_header(skb);
5987 l4.hdr = skb_checksum_start(skb);
5988
5989 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5990 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
5991 E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
5992
5993 /* initialize outer IP header fields */
5994 if (ip.v4->version == 4) {
5995 unsigned char *csum_start = skb_checksum_start(skb);
5996 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5997
5998 /* IP header will have to cancel out any data that
5999 * is not a part of the outer IP header
6000 */
6001 ip.v4->check = csum_fold(csum_partial(trans_start,
6002 csum_start - trans_start,
6003 0));
6004 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
6005
6006 ip.v4->tot_len = 0;
6007 first->tx_flags |= IGB_TX_FLAGS_TSO |
6008 IGB_TX_FLAGS_CSUM |
6009 IGB_TX_FLAGS_IPV4;
6010 } else {
6011 ip.v6->payload_len = 0;
6012 first->tx_flags |= IGB_TX_FLAGS_TSO |
6013 IGB_TX_FLAGS_CSUM;
6014 }
6015
6016 /* determine offset of inner transport header */
6017 l4_offset = l4.hdr - skb->data;
6018
6019 /* remove payload length from inner checksum */
6020 paylen = skb->len - l4_offset;
6021 if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
6022 /* compute length of segmentation header */
6023 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
6024 csum_replace_by_diff(&l4.tcp->check,
6025 (__force __wsum)htonl(paylen));
6026 } else {
6027 /* compute length of segmentation header */
6028 *hdr_len = sizeof(*l4.udp) + l4_offset;
6029 csum_replace_by_diff(&l4.udp->check,
6030 (__force __wsum)htonl(paylen));
6031 }
6032
6033 /* update gso size and bytecount with header size */
6034 first->gso_segs = skb_shinfo(skb)->gso_segs;
6035 first->bytecount += (first->gso_segs - 1) * *hdr_len;
6036
6037 /* MSS L4LEN IDX */
6038 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
6039 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
6040
6041 /* VLAN MACLEN IPLEN */
6042 vlan_macip_lens = l4.hdr - ip.hdr;
6043 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
6044 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
6045
6046 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
6047 type_tucmd, mss_l4len_idx);
6048
6049 return 1;
6050 }
6051
igb_tx_csum(struct igb_ring * tx_ring,struct igb_tx_buffer * first)6052 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
6053 {
6054 struct sk_buff *skb = first->skb;
6055 u32 vlan_macip_lens = 0;
6056 u32 type_tucmd = 0;
6057
6058 if (skb->ip_summed != CHECKSUM_PARTIAL) {
6059 csum_failed:
6060 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
6061 !tx_ring->launchtime_enable)
6062 return;
6063 goto no_csum;
6064 }
6065
6066 switch (skb->csum_offset) {
6067 case offsetof(struct tcphdr, check):
6068 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
6069 fallthrough;
6070 case offsetof(struct udphdr, check):
6071 break;
6072 case offsetof(struct sctphdr, checksum):
6073 /* validate that this is actually an SCTP request */
6074 if (skb_csum_is_sctp(skb)) {
6075 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
6076 break;
6077 }
6078 fallthrough;
6079 default:
6080 skb_checksum_help(skb);
6081 goto csum_failed;
6082 }
6083
6084 /* update TX checksum flag */
6085 first->tx_flags |= IGB_TX_FLAGS_CSUM;
6086 vlan_macip_lens = skb_checksum_start_offset(skb) -
6087 skb_network_offset(skb);
6088 no_csum:
6089 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
6090 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
6091
6092 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
6093 }
6094
6095 #define IGB_SET_FLAG(_input, _flag, _result) \
6096 ((_flag <= _result) ? \
6097 ((u32)(_input & _flag) * (_result / _flag)) : \
6098 ((u32)(_input & _flag) / (_flag / _result)))
6099
igb_tx_cmd_type(struct sk_buff * skb,u32 tx_flags)6100 static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6101 {
6102 /* set type for advanced descriptor with frame checksum insertion */
6103 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
6104 E1000_ADVTXD_DCMD_DEXT |
6105 E1000_ADVTXD_DCMD_IFCS;
6106
6107 /* set HW vlan bit if vlan is present */
6108 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
6109 (E1000_ADVTXD_DCMD_VLE));
6110
6111 /* set segmentation bits for TSO */
6112 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
6113 (E1000_ADVTXD_DCMD_TSE));
6114
6115 /* set timestamp bit if present */
6116 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
6117 (E1000_ADVTXD_MAC_TSTAMP));
6118
6119 /* insert frame checksum */
6120 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
6121
6122 return cmd_type;
6123 }
6124
igb_tx_olinfo_status(struct igb_ring * tx_ring,union e1000_adv_tx_desc * tx_desc,u32 tx_flags,unsigned int paylen)6125 static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
6126 union e1000_adv_tx_desc *tx_desc,
6127 u32 tx_flags, unsigned int paylen)
6128 {
6129 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
6130
6131 /* 82575 requires a unique index per ring */
6132 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6133 olinfo_status |= tx_ring->reg_idx << 4;
6134
6135 /* insert L4 checksum */
6136 olinfo_status |= IGB_SET_FLAG(tx_flags,
6137 IGB_TX_FLAGS_CSUM,
6138 (E1000_TXD_POPTS_TXSM << 8));
6139
6140 /* insert IPv4 checksum */
6141 olinfo_status |= IGB_SET_FLAG(tx_flags,
6142 IGB_TX_FLAGS_IPV4,
6143 (E1000_TXD_POPTS_IXSM << 8));
6144
6145 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6146 }
6147
__igb_maybe_stop_tx(struct igb_ring * tx_ring,const u16 size)6148 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6149 {
6150 struct net_device *netdev = tx_ring->netdev;
6151
6152 netif_stop_subqueue(netdev, tx_ring->queue_index);
6153
6154 /* Herbert's original patch had:
6155 * smp_mb__after_netif_stop_queue();
6156 * but since that doesn't exist yet, just open code it.
6157 */
6158 smp_mb();
6159
6160 /* We need to check again in a case another CPU has just
6161 * made room available.
6162 */
6163 if (igb_desc_unused(tx_ring) < size)
6164 return -EBUSY;
6165
6166 /* A reprieve! */
6167 netif_wake_subqueue(netdev, tx_ring->queue_index);
6168
6169 u64_stats_update_begin(&tx_ring->tx_syncp2);
6170 tx_ring->tx_stats.restart_queue2++;
6171 u64_stats_update_end(&tx_ring->tx_syncp2);
6172
6173 return 0;
6174 }
6175
igb_maybe_stop_tx(struct igb_ring * tx_ring,const u16 size)6176 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6177 {
6178 if (igb_desc_unused(tx_ring) >= size)
6179 return 0;
6180 return __igb_maybe_stop_tx(tx_ring, size);
6181 }
6182
igb_tx_map(struct igb_ring * tx_ring,struct igb_tx_buffer * first,const u8 hdr_len)6183 static int igb_tx_map(struct igb_ring *tx_ring,
6184 struct igb_tx_buffer *first,
6185 const u8 hdr_len)
6186 {
6187 struct sk_buff *skb = first->skb;
6188 struct igb_tx_buffer *tx_buffer;
6189 union e1000_adv_tx_desc *tx_desc;
6190 skb_frag_t *frag;
6191 dma_addr_t dma;
6192 unsigned int data_len, size;
6193 u32 tx_flags = first->tx_flags;
6194 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
6195 u16 i = tx_ring->next_to_use;
6196
6197 tx_desc = IGB_TX_DESC(tx_ring, i);
6198
6199 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
6200
6201 size = skb_headlen(skb);
6202 data_len = skb->data_len;
6203
6204 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6205
6206 tx_buffer = first;
6207
6208 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6209 if (dma_mapping_error(tx_ring->dev, dma))
6210 goto dma_error;
6211
6212 /* record length, and DMA address */
6213 dma_unmap_len_set(tx_buffer, len, size);
6214 dma_unmap_addr_set(tx_buffer, dma, dma);
6215
6216 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6217
6218 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
6219 tx_desc->read.cmd_type_len =
6220 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
6221
6222 i++;
6223 tx_desc++;
6224 if (i == tx_ring->count) {
6225 tx_desc = IGB_TX_DESC(tx_ring, 0);
6226 i = 0;
6227 }
6228 tx_desc->read.olinfo_status = 0;
6229
6230 dma += IGB_MAX_DATA_PER_TXD;
6231 size -= IGB_MAX_DATA_PER_TXD;
6232
6233 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6234 }
6235
6236 if (likely(!data_len))
6237 break;
6238
6239 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6240
6241 i++;
6242 tx_desc++;
6243 if (i == tx_ring->count) {
6244 tx_desc = IGB_TX_DESC(tx_ring, 0);
6245 i = 0;
6246 }
6247 tx_desc->read.olinfo_status = 0;
6248
6249 size = skb_frag_size(frag);
6250 data_len -= size;
6251
6252 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6253 size, DMA_TO_DEVICE);
6254
6255 tx_buffer = &tx_ring->tx_buffer_info[i];
6256 }
6257
6258 /* write last descriptor with RS and EOP bits */
6259 cmd_type |= size | IGB_TXD_DCMD;
6260 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6261
6262 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6263
6264 /* set the timestamp */
6265 first->time_stamp = jiffies;
6266
6267 skb_tx_timestamp(skb);
6268
6269 /* Force memory writes to complete before letting h/w know there
6270 * are new descriptors to fetch. (Only applicable for weak-ordered
6271 * memory model archs, such as IA-64).
6272 *
6273 * We also need this memory barrier to make certain all of the
6274 * status bits have been updated before next_to_watch is written.
6275 */
6276 dma_wmb();
6277
6278 /* set next_to_watch value indicating a packet is present */
6279 first->next_to_watch = tx_desc;
6280
6281 i++;
6282 if (i == tx_ring->count)
6283 i = 0;
6284
6285 tx_ring->next_to_use = i;
6286
6287 /* Make sure there is space in the ring for the next send. */
6288 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6289
6290 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6291 writel(i, tx_ring->tail);
6292 }
6293 return 0;
6294
6295 dma_error:
6296 dev_err(tx_ring->dev, "TX DMA map failed\n");
6297 tx_buffer = &tx_ring->tx_buffer_info[i];
6298
6299 /* clear dma mappings for failed tx_buffer_info map */
6300 while (tx_buffer != first) {
6301 if (dma_unmap_len(tx_buffer, len))
6302 dma_unmap_page(tx_ring->dev,
6303 dma_unmap_addr(tx_buffer, dma),
6304 dma_unmap_len(tx_buffer, len),
6305 DMA_TO_DEVICE);
6306 dma_unmap_len_set(tx_buffer, len, 0);
6307
6308 if (i-- == 0)
6309 i += tx_ring->count;
6310 tx_buffer = &tx_ring->tx_buffer_info[i];
6311 }
6312
6313 if (dma_unmap_len(tx_buffer, len))
6314 dma_unmap_single(tx_ring->dev,
6315 dma_unmap_addr(tx_buffer, dma),
6316 dma_unmap_len(tx_buffer, len),
6317 DMA_TO_DEVICE);
6318 dma_unmap_len_set(tx_buffer, len, 0);
6319
6320 dev_kfree_skb_any(tx_buffer->skb);
6321 tx_buffer->skb = NULL;
6322
6323 tx_ring->next_to_use = i;
6324
6325 return -1;
6326 }
6327
igb_xmit_xdp_ring(struct igb_adapter * adapter,struct igb_ring * tx_ring,struct xdp_frame * xdpf)6328 int igb_xmit_xdp_ring(struct igb_adapter *adapter,
6329 struct igb_ring *tx_ring,
6330 struct xdp_frame *xdpf)
6331 {
6332 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
6333 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
6334 u16 count, i, index = tx_ring->next_to_use;
6335 struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index];
6336 struct igb_tx_buffer *tx_buffer = tx_head;
6337 union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index);
6338 u32 len = xdpf->len, cmd_type, olinfo_status;
6339 void *data = xdpf->data;
6340
6341 count = TXD_USE_COUNT(len);
6342 for (i = 0; i < nr_frags; i++)
6343 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
6344
6345 if (igb_maybe_stop_tx(tx_ring, count + 3))
6346 return IGB_XDP_CONSUMED;
6347
6348 i = 0;
6349 /* record the location of the first descriptor for this packet */
6350 tx_head->bytecount = xdp_get_frame_len(xdpf);
6351 tx_head->type = IGB_TYPE_XDP;
6352 tx_head->gso_segs = 1;
6353 tx_head->xdpf = xdpf;
6354
6355 olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT;
6356 /* 82575 requires a unique index per ring */
6357 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6358 olinfo_status |= tx_ring->reg_idx << 4;
6359 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6360
6361 for (;;) {
6362 dma_addr_t dma;
6363
6364 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
6365 if (dma_mapping_error(tx_ring->dev, dma))
6366 goto unmap;
6367
6368 /* record length, and DMA address */
6369 dma_unmap_len_set(tx_buffer, len, len);
6370 dma_unmap_addr_set(tx_buffer, dma, dma);
6371
6372 /* put descriptor type bits */
6373 cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
6374 E1000_ADVTXD_DCMD_IFCS | len;
6375
6376 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6377 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6378
6379 tx_buffer->protocol = 0;
6380
6381 if (++index == tx_ring->count)
6382 index = 0;
6383
6384 if (i == nr_frags)
6385 break;
6386
6387 tx_buffer = &tx_ring->tx_buffer_info[index];
6388 tx_desc = IGB_TX_DESC(tx_ring, index);
6389 tx_desc->read.olinfo_status = 0;
6390
6391 data = skb_frag_address(&sinfo->frags[i]);
6392 len = skb_frag_size(&sinfo->frags[i]);
6393 i++;
6394 }
6395 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD);
6396
6397 netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount);
6398 /* set the timestamp */
6399 tx_head->time_stamp = jiffies;
6400
6401 /* Avoid any potential race with xdp_xmit and cleanup */
6402 smp_wmb();
6403
6404 /* set next_to_watch value indicating a packet is present */
6405 tx_head->next_to_watch = tx_desc;
6406 tx_ring->next_to_use = index;
6407
6408 /* Make sure there is space in the ring for the next send. */
6409 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6410
6411 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
6412 writel(index, tx_ring->tail);
6413
6414 return IGB_XDP_TX;
6415
6416 unmap:
6417 for (;;) {
6418 tx_buffer = &tx_ring->tx_buffer_info[index];
6419 if (dma_unmap_len(tx_buffer, len))
6420 dma_unmap_page(tx_ring->dev,
6421 dma_unmap_addr(tx_buffer, dma),
6422 dma_unmap_len(tx_buffer, len),
6423 DMA_TO_DEVICE);
6424 dma_unmap_len_set(tx_buffer, len, 0);
6425 if (tx_buffer == tx_head)
6426 break;
6427
6428 if (!index)
6429 index += tx_ring->count;
6430 index--;
6431 }
6432
6433 return IGB_XDP_CONSUMED;
6434 }
6435
igb_xmit_frame_ring(struct sk_buff * skb,struct igb_ring * tx_ring)6436 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6437 struct igb_ring *tx_ring)
6438 {
6439 struct igb_tx_buffer *first;
6440 int tso;
6441 u32 tx_flags = 0;
6442 unsigned short f;
6443 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6444 __be16 protocol = vlan_get_protocol(skb);
6445 u8 hdr_len = 0;
6446
6447 /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
6448 * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
6449 * + 2 desc gap to keep tail from touching head,
6450 * + 1 desc for context descriptor,
6451 * otherwise try next time
6452 */
6453 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6454 count += TXD_USE_COUNT(skb_frag_size(
6455 &skb_shinfo(skb)->frags[f]));
6456
6457 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6458 /* this is a hard error */
6459 return NETDEV_TX_BUSY;
6460 }
6461
6462 /* record the location of the first descriptor for this packet */
6463 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6464 first->type = IGB_TYPE_SKB;
6465 first->skb = skb;
6466 first->bytecount = skb->len;
6467 first->gso_segs = 1;
6468
6469 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6470 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6471
6472 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6473 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6474 &adapter->state)) {
6475 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6476 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6477
6478 adapter->ptp_tx_skb = skb_get(skb);
6479 adapter->ptp_tx_start = jiffies;
6480 if (adapter->hw.mac.type == e1000_82576)
6481 schedule_work(&adapter->ptp_tx_work);
6482 } else {
6483 adapter->tx_hwtstamp_skipped++;
6484 }
6485 }
6486
6487 if (skb_vlan_tag_present(skb)) {
6488 tx_flags |= IGB_TX_FLAGS_VLAN;
6489 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6490 }
6491
6492 /* record initial flags and protocol */
6493 first->tx_flags = tx_flags;
6494 first->protocol = protocol;
6495
6496 tso = igb_tso(tx_ring, first, &hdr_len);
6497 if (tso < 0)
6498 goto out_drop;
6499 else if (!tso)
6500 igb_tx_csum(tx_ring, first);
6501
6502 if (igb_tx_map(tx_ring, first, hdr_len))
6503 goto cleanup_tx_tstamp;
6504
6505 return NETDEV_TX_OK;
6506
6507 out_drop:
6508 dev_kfree_skb_any(first->skb);
6509 first->skb = NULL;
6510 cleanup_tx_tstamp:
6511 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6512 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6513
6514 dev_kfree_skb_any(adapter->ptp_tx_skb);
6515 adapter->ptp_tx_skb = NULL;
6516 if (adapter->hw.mac.type == e1000_82576)
6517 cancel_work_sync(&adapter->ptp_tx_work);
6518 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6519 }
6520
6521 return NETDEV_TX_OK;
6522 }
6523
igb_tx_queue_mapping(struct igb_adapter * adapter,struct sk_buff * skb)6524 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6525 struct sk_buff *skb)
6526 {
6527 unsigned int r_idx = skb->queue_mapping;
6528
6529 if (r_idx >= adapter->num_tx_queues)
6530 r_idx = r_idx % adapter->num_tx_queues;
6531
6532 return adapter->tx_ring[r_idx];
6533 }
6534
igb_xmit_frame(struct sk_buff * skb,struct net_device * netdev)6535 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6536 struct net_device *netdev)
6537 {
6538 struct igb_adapter *adapter = netdev_priv(netdev);
6539
6540 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
6541 * in order to meet this minimum size requirement.
6542 */
6543 if (skb_put_padto(skb, 17))
6544 return NETDEV_TX_OK;
6545
6546 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6547 }
6548
6549 /**
6550 * igb_tx_timeout - Respond to a Tx Hang
6551 * @netdev: network interface device structure
6552 * @txqueue: number of the Tx queue that hung (unused)
6553 **/
igb_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)6554 static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6555 {
6556 struct igb_adapter *adapter = netdev_priv(netdev);
6557 struct e1000_hw *hw = &adapter->hw;
6558
6559 /* Do the reset outside of interrupt context */
6560 adapter->tx_timeout_count++;
6561
6562 if (hw->mac.type >= e1000_82580)
6563 hw->dev_spec._82575.global_device_reset = true;
6564
6565 schedule_work(&adapter->reset_task);
6566 wr32(E1000_EICS,
6567 (adapter->eims_enable_mask & ~adapter->eims_other));
6568 }
6569
igb_reset_task(struct work_struct * work)6570 static void igb_reset_task(struct work_struct *work)
6571 {
6572 struct igb_adapter *adapter;
6573 adapter = container_of(work, struct igb_adapter, reset_task);
6574
6575 rtnl_lock();
6576 /* If we're already down or resetting, just bail */
6577 if (test_bit(__IGB_DOWN, &adapter->state) ||
6578 test_bit(__IGB_RESETTING, &adapter->state)) {
6579 rtnl_unlock();
6580 return;
6581 }
6582
6583 igb_dump(adapter);
6584 netdev_err(adapter->netdev, "Reset adapter\n");
6585 igb_reinit_locked(adapter);
6586 rtnl_unlock();
6587 }
6588
6589 /**
6590 * igb_get_stats64 - Get System Network Statistics
6591 * @netdev: network interface device structure
6592 * @stats: rtnl_link_stats64 pointer
6593 **/
igb_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)6594 static void igb_get_stats64(struct net_device *netdev,
6595 struct rtnl_link_stats64 *stats)
6596 {
6597 struct igb_adapter *adapter = netdev_priv(netdev);
6598
6599 spin_lock(&adapter->stats64_lock);
6600 igb_update_stats(adapter);
6601 memcpy(stats, &adapter->stats64, sizeof(*stats));
6602 spin_unlock(&adapter->stats64_lock);
6603 }
6604
6605 /**
6606 * igb_change_mtu - Change the Maximum Transfer Unit
6607 * @netdev: network interface device structure
6608 * @new_mtu: new value for maximum frame size
6609 *
6610 * Returns 0 on success, negative on failure
6611 **/
igb_change_mtu(struct net_device * netdev,int new_mtu)6612 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6613 {
6614 struct igb_adapter *adapter = netdev_priv(netdev);
6615 int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
6616
6617 if (adapter->xdp_prog) {
6618 int i;
6619
6620 for (i = 0; i < adapter->num_rx_queues; i++) {
6621 struct igb_ring *ring = adapter->rx_ring[i];
6622
6623 if (max_frame > igb_rx_bufsz(ring)) {
6624 netdev_warn(adapter->netdev,
6625 "Requested MTU size is not supported with XDP. Max frame size is %d\n",
6626 max_frame);
6627 return -EINVAL;
6628 }
6629 }
6630 }
6631
6632 /* adjust max frame to be at least the size of a standard frame */
6633 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6634 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6635
6636 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6637 usleep_range(1000, 2000);
6638
6639 /* igb_down has a dependency on max_frame_size */
6640 adapter->max_frame_size = max_frame;
6641
6642 if (netif_running(netdev))
6643 igb_down(adapter);
6644
6645 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6646 netdev->mtu, new_mtu);
6647 WRITE_ONCE(netdev->mtu, new_mtu);
6648
6649 if (netif_running(netdev))
6650 igb_up(adapter);
6651 else
6652 igb_reset(adapter);
6653
6654 clear_bit(__IGB_RESETTING, &adapter->state);
6655
6656 return 0;
6657 }
6658
6659 /**
6660 * igb_update_stats - Update the board statistics counters
6661 * @adapter: board private structure
6662 **/
igb_update_stats(struct igb_adapter * adapter)6663 void igb_update_stats(struct igb_adapter *adapter)
6664 {
6665 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6666 struct e1000_hw *hw = &adapter->hw;
6667 struct pci_dev *pdev = adapter->pdev;
6668 u32 reg, mpc;
6669 int i;
6670 u64 bytes, packets;
6671 unsigned int start;
6672 u64 _bytes, _packets;
6673
6674 /* Prevent stats update while adapter is being reset, or if the pci
6675 * connection is down.
6676 */
6677 if (adapter->link_speed == 0)
6678 return;
6679 if (pci_channel_offline(pdev))
6680 return;
6681
6682 bytes = 0;
6683 packets = 0;
6684
6685 rcu_read_lock();
6686 for (i = 0; i < adapter->num_rx_queues; i++) {
6687 struct igb_ring *ring = adapter->rx_ring[i];
6688 u32 rqdpc = rd32(E1000_RQDPC(i));
6689 if (hw->mac.type >= e1000_i210)
6690 wr32(E1000_RQDPC(i), 0);
6691
6692 if (rqdpc) {
6693 ring->rx_stats.drops += rqdpc;
6694 net_stats->rx_fifo_errors += rqdpc;
6695 }
6696
6697 do {
6698 start = u64_stats_fetch_begin(&ring->rx_syncp);
6699 _bytes = ring->rx_stats.bytes;
6700 _packets = ring->rx_stats.packets;
6701 } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
6702 bytes += _bytes;
6703 packets += _packets;
6704 }
6705
6706 net_stats->rx_bytes = bytes;
6707 net_stats->rx_packets = packets;
6708
6709 bytes = 0;
6710 packets = 0;
6711 for (i = 0; i < adapter->num_tx_queues; i++) {
6712 struct igb_ring *ring = adapter->tx_ring[i];
6713 do {
6714 start = u64_stats_fetch_begin(&ring->tx_syncp);
6715 _bytes = ring->tx_stats.bytes;
6716 _packets = ring->tx_stats.packets;
6717 } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
6718 bytes += _bytes;
6719 packets += _packets;
6720 }
6721 net_stats->tx_bytes = bytes;
6722 net_stats->tx_packets = packets;
6723 rcu_read_unlock();
6724
6725 /* read stats registers */
6726 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6727 adapter->stats.gprc += rd32(E1000_GPRC);
6728 adapter->stats.gorc += rd32(E1000_GORCL);
6729 rd32(E1000_GORCH); /* clear GORCL */
6730 adapter->stats.bprc += rd32(E1000_BPRC);
6731 adapter->stats.mprc += rd32(E1000_MPRC);
6732 adapter->stats.roc += rd32(E1000_ROC);
6733
6734 adapter->stats.prc64 += rd32(E1000_PRC64);
6735 adapter->stats.prc127 += rd32(E1000_PRC127);
6736 adapter->stats.prc255 += rd32(E1000_PRC255);
6737 adapter->stats.prc511 += rd32(E1000_PRC511);
6738 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6739 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6740 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6741 adapter->stats.sec += rd32(E1000_SEC);
6742
6743 mpc = rd32(E1000_MPC);
6744 adapter->stats.mpc += mpc;
6745 net_stats->rx_fifo_errors += mpc;
6746 adapter->stats.scc += rd32(E1000_SCC);
6747 adapter->stats.ecol += rd32(E1000_ECOL);
6748 adapter->stats.mcc += rd32(E1000_MCC);
6749 adapter->stats.latecol += rd32(E1000_LATECOL);
6750 adapter->stats.dc += rd32(E1000_DC);
6751 adapter->stats.rlec += rd32(E1000_RLEC);
6752 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6753 adapter->stats.xontxc += rd32(E1000_XONTXC);
6754 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6755 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6756 adapter->stats.fcruc += rd32(E1000_FCRUC);
6757 adapter->stats.gptc += rd32(E1000_GPTC);
6758 adapter->stats.gotc += rd32(E1000_GOTCL);
6759 rd32(E1000_GOTCH); /* clear GOTCL */
6760 adapter->stats.rnbc += rd32(E1000_RNBC);
6761 adapter->stats.ruc += rd32(E1000_RUC);
6762 adapter->stats.rfc += rd32(E1000_RFC);
6763 adapter->stats.rjc += rd32(E1000_RJC);
6764 adapter->stats.tor += rd32(E1000_TORH);
6765 adapter->stats.tot += rd32(E1000_TOTH);
6766 adapter->stats.tpr += rd32(E1000_TPR);
6767
6768 adapter->stats.ptc64 += rd32(E1000_PTC64);
6769 adapter->stats.ptc127 += rd32(E1000_PTC127);
6770 adapter->stats.ptc255 += rd32(E1000_PTC255);
6771 adapter->stats.ptc511 += rd32(E1000_PTC511);
6772 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6773 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6774
6775 adapter->stats.mptc += rd32(E1000_MPTC);
6776 adapter->stats.bptc += rd32(E1000_BPTC);
6777
6778 adapter->stats.tpt += rd32(E1000_TPT);
6779 adapter->stats.colc += rd32(E1000_COLC);
6780
6781 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6782 /* read internal phy specific stats */
6783 reg = rd32(E1000_CTRL_EXT);
6784 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6785 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6786
6787 /* this stat has invalid values on i210/i211 */
6788 if ((hw->mac.type != e1000_i210) &&
6789 (hw->mac.type != e1000_i211))
6790 adapter->stats.tncrs += rd32(E1000_TNCRS);
6791 }
6792
6793 adapter->stats.tsctc += rd32(E1000_TSCTC);
6794 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6795
6796 adapter->stats.iac += rd32(E1000_IAC);
6797 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6798 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6799 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6800 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6801 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6802 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6803 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6804 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6805
6806 /* Fill out the OS statistics structure */
6807 net_stats->multicast = adapter->stats.mprc;
6808 net_stats->collisions = adapter->stats.colc;
6809
6810 /* Rx Errors */
6811
6812 /* RLEC on some newer hardware can be incorrect so build
6813 * our own version based on RUC and ROC
6814 */
6815 net_stats->rx_errors = adapter->stats.rxerrc +
6816 adapter->stats.crcerrs + adapter->stats.algnerrc +
6817 adapter->stats.ruc + adapter->stats.roc +
6818 adapter->stats.cexterr;
6819 net_stats->rx_length_errors = adapter->stats.ruc +
6820 adapter->stats.roc;
6821 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6822 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6823 net_stats->rx_missed_errors = adapter->stats.mpc;
6824
6825 /* Tx Errors */
6826 net_stats->tx_errors = adapter->stats.ecol +
6827 adapter->stats.latecol;
6828 net_stats->tx_aborted_errors = adapter->stats.ecol;
6829 net_stats->tx_window_errors = adapter->stats.latecol;
6830 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6831
6832 /* Tx Dropped needs to be maintained elsewhere */
6833
6834 /* Management Stats */
6835 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6836 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6837 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6838
6839 /* OS2BMC Stats */
6840 reg = rd32(E1000_MANC);
6841 if (reg & E1000_MANC_EN_BMC2OS) {
6842 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6843 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6844 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6845 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6846 }
6847 }
6848
igb_perout(struct igb_adapter * adapter,int tsintr_tt)6849 static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
6850 {
6851 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt);
6852 struct e1000_hw *hw = &adapter->hw;
6853 struct timespec64 ts;
6854 u32 tsauxc;
6855
6856 if (pin < 0 || pin >= IGB_N_SDP)
6857 return;
6858
6859 spin_lock(&adapter->tmreg_lock);
6860
6861 if (hw->mac.type == e1000_82580 ||
6862 hw->mac.type == e1000_i354 ||
6863 hw->mac.type == e1000_i350) {
6864 s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);
6865 u32 systiml, systimh, level_mask, level, rem;
6866 u64 systim, now;
6867
6868 /* read systim registers in sequence */
6869 rd32(E1000_SYSTIMR);
6870 systiml = rd32(E1000_SYSTIML);
6871 systimh = rd32(E1000_SYSTIMH);
6872 systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
6873 now = timecounter_cyc2time(&adapter->tc, systim);
6874
6875 if (pin < 2) {
6876 level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000;
6877 level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
6878 } else {
6879 level_mask = (tsintr_tt == 1) ? 0x80 : 0x40;
6880 level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
6881 }
6882
6883 div_u64_rem(now, ns, &rem);
6884 systim = systim + (ns - rem);
6885
6886 /* synchronize pin level with rising/falling edges */
6887 div_u64_rem(now, ns << 1, &rem);
6888 if (rem < ns) {
6889 /* first half of period */
6890 if (level == 0) {
6891 /* output is already low, skip this period */
6892 systim += ns;
6893 pr_notice("igb: periodic output on %s missed falling edge\n",
6894 adapter->sdp_config[pin].name);
6895 }
6896 } else {
6897 /* second half of period */
6898 if (level == 1) {
6899 /* output is already high, skip this period */
6900 systim += ns;
6901 pr_notice("igb: periodic output on %s missed rising edge\n",
6902 adapter->sdp_config[pin].name);
6903 }
6904 }
6905
6906 /* for this chip family tv_sec is the upper part of the binary value,
6907 * so not seconds
6908 */
6909 ts.tv_nsec = (u32)systim;
6910 ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
6911 } else {
6912 ts = timespec64_add(adapter->perout[tsintr_tt].start,
6913 adapter->perout[tsintr_tt].period);
6914 }
6915
6916 /* u32 conversion of tv_sec is safe until y2106 */
6917 wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec);
6918 wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec);
6919 tsauxc = rd32(E1000_TSAUXC);
6920 tsauxc |= TSAUXC_EN_TT0;
6921 wr32(E1000_TSAUXC, tsauxc);
6922 adapter->perout[tsintr_tt].start = ts;
6923
6924 spin_unlock(&adapter->tmreg_lock);
6925 }
6926
igb_extts(struct igb_adapter * adapter,int tsintr_tt)6927 static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
6928 {
6929 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt);
6930 int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0;
6931 int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0;
6932 struct e1000_hw *hw = &adapter->hw;
6933 struct ptp_clock_event event;
6934 struct timespec64 ts;
6935 unsigned long flags;
6936
6937 if (pin < 0 || pin >= IGB_N_SDP)
6938 return;
6939
6940 if (hw->mac.type == e1000_82580 ||
6941 hw->mac.type == e1000_i354 ||
6942 hw->mac.type == e1000_i350) {
6943 u64 ns = rd32(auxstmpl);
6944
6945 ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32;
6946 spin_lock_irqsave(&adapter->tmreg_lock, flags);
6947 ns = timecounter_cyc2time(&adapter->tc, ns);
6948 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
6949 ts = ns_to_timespec64(ns);
6950 } else {
6951 ts.tv_nsec = rd32(auxstmpl);
6952 ts.tv_sec = rd32(auxstmph);
6953 }
6954
6955 event.type = PTP_CLOCK_EXTTS;
6956 event.index = tsintr_tt;
6957 event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
6958 ptp_clock_event(adapter->ptp_clock, &event);
6959 }
6960
igb_tsync_interrupt(struct igb_adapter * adapter)6961 static void igb_tsync_interrupt(struct igb_adapter *adapter)
6962 {
6963 struct e1000_hw *hw = &adapter->hw;
6964 u32 tsicr = rd32(E1000_TSICR);
6965 struct ptp_clock_event event;
6966
6967 if (tsicr & TSINTR_SYS_WRAP) {
6968 event.type = PTP_CLOCK_PPS;
6969 if (adapter->ptp_caps.pps)
6970 ptp_clock_event(adapter->ptp_clock, &event);
6971 }
6972
6973 if (tsicr & E1000_TSICR_TXTS) {
6974 /* retrieve hardware timestamp */
6975 schedule_work(&adapter->ptp_tx_work);
6976 }
6977
6978 if (tsicr & TSINTR_TT0)
6979 igb_perout(adapter, 0);
6980
6981 if (tsicr & TSINTR_TT1)
6982 igb_perout(adapter, 1);
6983
6984 if (tsicr & TSINTR_AUTT0)
6985 igb_extts(adapter, 0);
6986
6987 if (tsicr & TSINTR_AUTT1)
6988 igb_extts(adapter, 1);
6989 }
6990
igb_msix_other(int irq,void * data)6991 static irqreturn_t igb_msix_other(int irq, void *data)
6992 {
6993 struct igb_adapter *adapter = data;
6994 struct e1000_hw *hw = &adapter->hw;
6995 u32 icr = rd32(E1000_ICR);
6996 /* reading ICR causes bit 31 of EICR to be cleared */
6997
6998 if (icr & E1000_ICR_DRSTA)
6999 schedule_work(&adapter->reset_task);
7000
7001 if (icr & E1000_ICR_DOUTSYNC) {
7002 /* HW is reporting DMA is out of sync */
7003 adapter->stats.doosync++;
7004 /* The DMA Out of Sync is also indication of a spoof event
7005 * in IOV mode. Check the Wrong VM Behavior register to
7006 * see if it is really a spoof event.
7007 */
7008 igb_check_wvbr(adapter);
7009 }
7010
7011 /* Check for a mailbox event */
7012 if (icr & E1000_ICR_VMMB)
7013 igb_msg_task(adapter);
7014
7015 if (icr & E1000_ICR_LSC) {
7016 hw->mac.get_link_status = 1;
7017 /* guard against interrupt when we're going down */
7018 if (!test_bit(__IGB_DOWN, &adapter->state))
7019 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7020 }
7021
7022 if (icr & E1000_ICR_TS)
7023 igb_tsync_interrupt(adapter);
7024
7025 wr32(E1000_EIMS, adapter->eims_other);
7026
7027 return IRQ_HANDLED;
7028 }
7029
igb_write_itr(struct igb_q_vector * q_vector)7030 static void igb_write_itr(struct igb_q_vector *q_vector)
7031 {
7032 struct igb_adapter *adapter = q_vector->adapter;
7033 u32 itr_val = q_vector->itr_val & 0x7FFC;
7034
7035 if (!q_vector->set_itr)
7036 return;
7037
7038 if (!itr_val)
7039 itr_val = 0x4;
7040
7041 if (adapter->hw.mac.type == e1000_82575)
7042 itr_val |= itr_val << 16;
7043 else
7044 itr_val |= E1000_EITR_CNT_IGNR;
7045
7046 writel(itr_val, q_vector->itr_register);
7047 q_vector->set_itr = 0;
7048 }
7049
igb_msix_ring(int irq,void * data)7050 static irqreturn_t igb_msix_ring(int irq, void *data)
7051 {
7052 struct igb_q_vector *q_vector = data;
7053
7054 /* Write the ITR value calculated from the previous interrupt. */
7055 igb_write_itr(q_vector);
7056
7057 napi_schedule(&q_vector->napi);
7058
7059 return IRQ_HANDLED;
7060 }
7061
7062 #ifdef CONFIG_IGB_DCA
igb_update_tx_dca(struct igb_adapter * adapter,struct igb_ring * tx_ring,int cpu)7063 static void igb_update_tx_dca(struct igb_adapter *adapter,
7064 struct igb_ring *tx_ring,
7065 int cpu)
7066 {
7067 struct e1000_hw *hw = &adapter->hw;
7068 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
7069
7070 if (hw->mac.type != e1000_82575)
7071 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
7072
7073 /* We can enable relaxed ordering for reads, but not writes when
7074 * DCA is enabled. This is due to a known issue in some chipsets
7075 * which will cause the DCA tag to be cleared.
7076 */
7077 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
7078 E1000_DCA_TXCTRL_DATA_RRO_EN |
7079 E1000_DCA_TXCTRL_DESC_DCA_EN;
7080
7081 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
7082 }
7083
igb_update_rx_dca(struct igb_adapter * adapter,struct igb_ring * rx_ring,int cpu)7084 static void igb_update_rx_dca(struct igb_adapter *adapter,
7085 struct igb_ring *rx_ring,
7086 int cpu)
7087 {
7088 struct e1000_hw *hw = &adapter->hw;
7089 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
7090
7091 if (hw->mac.type != e1000_82575)
7092 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
7093
7094 /* We can enable relaxed ordering for reads, but not writes when
7095 * DCA is enabled. This is due to a known issue in some chipsets
7096 * which will cause the DCA tag to be cleared.
7097 */
7098 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
7099 E1000_DCA_RXCTRL_DESC_DCA_EN;
7100
7101 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
7102 }
7103
igb_update_dca(struct igb_q_vector * q_vector)7104 static void igb_update_dca(struct igb_q_vector *q_vector)
7105 {
7106 struct igb_adapter *adapter = q_vector->adapter;
7107 int cpu = get_cpu();
7108
7109 if (q_vector->cpu == cpu)
7110 goto out_no_update;
7111
7112 if (q_vector->tx.ring)
7113 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
7114
7115 if (q_vector->rx.ring)
7116 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
7117
7118 q_vector->cpu = cpu;
7119 out_no_update:
7120 put_cpu();
7121 }
7122
igb_setup_dca(struct igb_adapter * adapter)7123 static void igb_setup_dca(struct igb_adapter *adapter)
7124 {
7125 struct e1000_hw *hw = &adapter->hw;
7126 int i;
7127
7128 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
7129 return;
7130
7131 /* Always use CB2 mode, difference is masked in the CB driver. */
7132 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
7133
7134 for (i = 0; i < adapter->num_q_vectors; i++) {
7135 adapter->q_vector[i]->cpu = -1;
7136 igb_update_dca(adapter->q_vector[i]);
7137 }
7138 }
7139
__igb_notify_dca(struct device * dev,void * data)7140 static int __igb_notify_dca(struct device *dev, void *data)
7141 {
7142 struct net_device *netdev = dev_get_drvdata(dev);
7143 struct igb_adapter *adapter = netdev_priv(netdev);
7144 struct pci_dev *pdev = adapter->pdev;
7145 struct e1000_hw *hw = &adapter->hw;
7146 unsigned long event = *(unsigned long *)data;
7147
7148 switch (event) {
7149 case DCA_PROVIDER_ADD:
7150 /* if already enabled, don't do it again */
7151 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
7152 break;
7153 if (dca_add_requester(dev) == 0) {
7154 adapter->flags |= IGB_FLAG_DCA_ENABLED;
7155 dev_info(&pdev->dev, "DCA enabled\n");
7156 igb_setup_dca(adapter);
7157 break;
7158 }
7159 fallthrough; /* since DCA is disabled. */
7160 case DCA_PROVIDER_REMOVE:
7161 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
7162 /* without this a class_device is left
7163 * hanging around in the sysfs model
7164 */
7165 dca_remove_requester(dev);
7166 dev_info(&pdev->dev, "DCA disabled\n");
7167 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
7168 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
7169 }
7170 break;
7171 }
7172
7173 return 0;
7174 }
7175
igb_notify_dca(struct notifier_block * nb,unsigned long event,void * p)7176 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
7177 void *p)
7178 {
7179 int ret_val;
7180
7181 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
7182 __igb_notify_dca);
7183
7184 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7185 }
7186 #endif /* CONFIG_IGB_DCA */
7187
7188 #ifdef CONFIG_PCI_IOV
igb_vf_configure(struct igb_adapter * adapter,int vf)7189 static int igb_vf_configure(struct igb_adapter *adapter, int vf)
7190 {
7191 unsigned char mac_addr[ETH_ALEN];
7192
7193 eth_zero_addr(mac_addr);
7194 igb_set_vf_mac(adapter, vf, mac_addr);
7195
7196 /* By default spoof check is enabled for all VFs */
7197 adapter->vf_data[vf].spoofchk_enabled = true;
7198
7199 /* By default VFs are not trusted */
7200 adapter->vf_data[vf].trusted = false;
7201
7202 return 0;
7203 }
7204
7205 #endif
igb_ping_all_vfs(struct igb_adapter * adapter)7206 static void igb_ping_all_vfs(struct igb_adapter *adapter)
7207 {
7208 struct e1000_hw *hw = &adapter->hw;
7209 u32 ping;
7210 int i;
7211
7212 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
7213 ping = E1000_PF_CONTROL_MSG;
7214 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
7215 ping |= E1000_VT_MSGTYPE_CTS;
7216 igb_write_mbx(hw, &ping, 1, i);
7217 }
7218 }
7219
igb_set_vf_promisc(struct igb_adapter * adapter,u32 * msgbuf,u32 vf)7220 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7221 {
7222 struct e1000_hw *hw = &adapter->hw;
7223 u32 vmolr = rd32(E1000_VMOLR(vf));
7224 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7225
7226 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7227 IGB_VF_FLAG_MULTI_PROMISC);
7228 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7229
7230 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
7231 vmolr |= E1000_VMOLR_MPME;
7232 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7233 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
7234 } else {
7235 /* if we have hashes and we are clearing a multicast promisc
7236 * flag we need to write the hashes to the MTA as this step
7237 * was previously skipped
7238 */
7239 if (vf_data->num_vf_mc_hashes > 30) {
7240 vmolr |= E1000_VMOLR_MPME;
7241 } else if (vf_data->num_vf_mc_hashes) {
7242 int j;
7243
7244 vmolr |= E1000_VMOLR_ROMPE;
7245 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7246 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7247 }
7248 }
7249
7250 wr32(E1000_VMOLR(vf), vmolr);
7251
7252 /* there are flags left unprocessed, likely not supported */
7253 if (*msgbuf & E1000_VT_MSGINFO_MASK)
7254 return -EINVAL;
7255
7256 return 0;
7257 }
7258
igb_set_vf_multicasts(struct igb_adapter * adapter,u32 * msgbuf,u32 vf)7259 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
7260 u32 *msgbuf, u32 vf)
7261 {
7262 int n = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
7263 u16 *hash_list = (u16 *)&msgbuf[1];
7264 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7265 int i;
7266
7267 /* salt away the number of multicast addresses assigned
7268 * to this VF for later use to restore when the PF multi cast
7269 * list changes
7270 */
7271 vf_data->num_vf_mc_hashes = n;
7272
7273 /* only up to 30 hash values supported */
7274 if (n > 30)
7275 n = 30;
7276
7277 /* store the hashes for later use */
7278 for (i = 0; i < n; i++)
7279 vf_data->vf_mc_hashes[i] = hash_list[i];
7280
7281 /* Flush and reset the mta with the new values */
7282 igb_set_rx_mode(adapter->netdev);
7283
7284 return 0;
7285 }
7286
igb_restore_vf_multicasts(struct igb_adapter * adapter)7287 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
7288 {
7289 struct e1000_hw *hw = &adapter->hw;
7290 struct vf_data_storage *vf_data;
7291 int i, j;
7292
7293 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7294 u32 vmolr = rd32(E1000_VMOLR(i));
7295
7296 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7297
7298 vf_data = &adapter->vf_data[i];
7299
7300 if ((vf_data->num_vf_mc_hashes > 30) ||
7301 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
7302 vmolr |= E1000_VMOLR_MPME;
7303 } else if (vf_data->num_vf_mc_hashes) {
7304 vmolr |= E1000_VMOLR_ROMPE;
7305 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7306 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7307 }
7308 wr32(E1000_VMOLR(i), vmolr);
7309 }
7310 }
7311
igb_clear_vf_vfta(struct igb_adapter * adapter,u32 vf)7312 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
7313 {
7314 struct e1000_hw *hw = &adapter->hw;
7315 u32 pool_mask, vlvf_mask, i;
7316
7317 /* create mask for VF and other pools */
7318 pool_mask = E1000_VLVF_POOLSEL_MASK;
7319 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
7320
7321 /* drop PF from pool bits */
7322 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
7323 adapter->vfs_allocated_count);
7324
7325 /* Find the vlan filter for this id */
7326 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
7327 u32 vlvf = rd32(E1000_VLVF(i));
7328 u32 vfta_mask, vid, vfta;
7329
7330 /* remove the vf from the pool */
7331 if (!(vlvf & vlvf_mask))
7332 continue;
7333
7334 /* clear out bit from VLVF */
7335 vlvf ^= vlvf_mask;
7336
7337 /* if other pools are present, just remove ourselves */
7338 if (vlvf & pool_mask)
7339 goto update_vlvfb;
7340
7341 /* if PF is present, leave VFTA */
7342 if (vlvf & E1000_VLVF_POOLSEL_MASK)
7343 goto update_vlvf;
7344
7345 vid = vlvf & E1000_VLVF_VLANID_MASK;
7346 vfta_mask = BIT(vid % 32);
7347
7348 /* clear bit from VFTA */
7349 vfta = adapter->shadow_vfta[vid / 32];
7350 if (vfta & vfta_mask)
7351 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
7352 update_vlvf:
7353 /* clear pool selection enable */
7354 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7355 vlvf &= E1000_VLVF_POOLSEL_MASK;
7356 else
7357 vlvf = 0;
7358 update_vlvfb:
7359 /* clear pool bits */
7360 wr32(E1000_VLVF(i), vlvf);
7361 }
7362 }
7363
igb_find_vlvf_entry(struct e1000_hw * hw,u32 vlan)7364 static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
7365 {
7366 u32 vlvf;
7367 int idx;
7368
7369 /* short cut the special case */
7370 if (vlan == 0)
7371 return 0;
7372
7373 /* Search for the VLAN id in the VLVF entries */
7374 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
7375 vlvf = rd32(E1000_VLVF(idx));
7376 if ((vlvf & VLAN_VID_MASK) == vlan)
7377 break;
7378 }
7379
7380 return idx;
7381 }
7382
igb_update_pf_vlvf(struct igb_adapter * adapter,u32 vid)7383 static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
7384 {
7385 struct e1000_hw *hw = &adapter->hw;
7386 u32 bits, pf_id;
7387 int idx;
7388
7389 idx = igb_find_vlvf_entry(hw, vid);
7390 if (!idx)
7391 return;
7392
7393 /* See if any other pools are set for this VLAN filter
7394 * entry other than the PF.
7395 */
7396 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
7397 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
7398 bits &= rd32(E1000_VLVF(idx));
7399
7400 /* Disable the filter so this falls into the default pool. */
7401 if (!bits) {
7402 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7403 wr32(E1000_VLVF(idx), BIT(pf_id));
7404 else
7405 wr32(E1000_VLVF(idx), 0);
7406 }
7407 }
7408
igb_set_vf_vlan(struct igb_adapter * adapter,u32 vid,bool add,u32 vf)7409 static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
7410 bool add, u32 vf)
7411 {
7412 int pf_id = adapter->vfs_allocated_count;
7413 struct e1000_hw *hw = &adapter->hw;
7414 int err;
7415
7416 /* If VLAN overlaps with one the PF is currently monitoring make
7417 * sure that we are able to allocate a VLVF entry. This may be
7418 * redundant but it guarantees PF will maintain visibility to
7419 * the VLAN.
7420 */
7421 if (add && test_bit(vid, adapter->active_vlans)) {
7422 err = igb_vfta_set(hw, vid, pf_id, true, false);
7423 if (err)
7424 return err;
7425 }
7426
7427 err = igb_vfta_set(hw, vid, vf, add, false);
7428
7429 if (add && !err)
7430 return err;
7431
7432 /* If we failed to add the VF VLAN or we are removing the VF VLAN
7433 * we may need to drop the PF pool bit in order to allow us to free
7434 * up the VLVF resources.
7435 */
7436 if (test_bit(vid, adapter->active_vlans) ||
7437 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
7438 igb_update_pf_vlvf(adapter, vid);
7439
7440 return err;
7441 }
7442
igb_set_vmvir(struct igb_adapter * adapter,u32 vid,u32 vf)7443 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
7444 {
7445 struct e1000_hw *hw = &adapter->hw;
7446
7447 if (vid)
7448 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
7449 else
7450 wr32(E1000_VMVIR(vf), 0);
7451 }
7452
igb_enable_port_vlan(struct igb_adapter * adapter,int vf,u16 vlan,u8 qos)7453 static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
7454 u16 vlan, u8 qos)
7455 {
7456 int err;
7457
7458 err = igb_set_vf_vlan(adapter, vlan, true, vf);
7459 if (err)
7460 return err;
7461
7462 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7463 igb_set_vmolr(adapter, vf, !vlan);
7464
7465 /* revoke access to previous VLAN */
7466 if (vlan != adapter->vf_data[vf].pf_vlan)
7467 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7468 false, vf);
7469
7470 adapter->vf_data[vf].pf_vlan = vlan;
7471 adapter->vf_data[vf].pf_qos = qos;
7472 igb_set_vf_vlan_strip(adapter, vf, true);
7473 dev_info(&adapter->pdev->dev,
7474 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7475 if (test_bit(__IGB_DOWN, &adapter->state)) {
7476 dev_warn(&adapter->pdev->dev,
7477 "The VF VLAN has been set, but the PF device is not up.\n");
7478 dev_warn(&adapter->pdev->dev,
7479 "Bring the PF device up before attempting to use the VF device.\n");
7480 }
7481
7482 return err;
7483 }
7484
igb_disable_port_vlan(struct igb_adapter * adapter,int vf)7485 static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7486 {
7487 /* Restore tagless access via VLAN 0 */
7488 igb_set_vf_vlan(adapter, 0, true, vf);
7489
7490 igb_set_vmvir(adapter, 0, vf);
7491 igb_set_vmolr(adapter, vf, true);
7492
7493 /* Remove any PF assigned VLAN */
7494 if (adapter->vf_data[vf].pf_vlan)
7495 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7496 false, vf);
7497
7498 adapter->vf_data[vf].pf_vlan = 0;
7499 adapter->vf_data[vf].pf_qos = 0;
7500 igb_set_vf_vlan_strip(adapter, vf, false);
7501
7502 return 0;
7503 }
7504
igb_ndo_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)7505 static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7506 u16 vlan, u8 qos, __be16 vlan_proto)
7507 {
7508 struct igb_adapter *adapter = netdev_priv(netdev);
7509
7510 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7511 return -EINVAL;
7512
7513 if (vlan_proto != htons(ETH_P_8021Q))
7514 return -EPROTONOSUPPORT;
7515
7516 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7517 igb_disable_port_vlan(adapter, vf);
7518 }
7519
igb_set_vf_vlan_msg(struct igb_adapter * adapter,u32 * msgbuf,u32 vf)7520 static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7521 {
7522 int add = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
7523 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7524 int ret;
7525
7526 if (adapter->vf_data[vf].pf_vlan)
7527 return -1;
7528
7529 /* VLAN 0 is a special case, don't allow it to be removed */
7530 if (!vid && !add)
7531 return 0;
7532
7533 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7534 if (!ret)
7535 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7536 return ret;
7537 }
7538
igb_vf_reset(struct igb_adapter * adapter,u32 vf)7539 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7540 {
7541 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7542
7543 /* clear flags - except flag that indicates PF has set the MAC */
7544 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7545 vf_data->last_nack = jiffies;
7546
7547 /* reset vlans for device */
7548 igb_clear_vf_vfta(adapter, vf);
7549 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7550 igb_set_vmvir(adapter, vf_data->pf_vlan |
7551 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7552 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7553 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7554
7555 /* reset multicast table array for vf */
7556 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7557
7558 /* Flush and reset the mta with the new values */
7559 igb_set_rx_mode(adapter->netdev);
7560 }
7561
igb_vf_reset_event(struct igb_adapter * adapter,u32 vf)7562 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7563 {
7564 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7565
7566 /* clear mac address as we were hotplug removed/added */
7567 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7568 eth_zero_addr(vf_mac);
7569
7570 /* process remaining reset events */
7571 igb_vf_reset(adapter, vf);
7572 }
7573
igb_vf_reset_msg(struct igb_adapter * adapter,u32 vf)7574 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7575 {
7576 struct e1000_hw *hw = &adapter->hw;
7577 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7578 u32 reg, msgbuf[3] = {};
7579 u8 *addr = (u8 *)(&msgbuf[1]);
7580
7581 /* process all the same items cleared in a function level reset */
7582 igb_vf_reset(adapter, vf);
7583
7584 /* set vf mac address */
7585 igb_set_vf_mac(adapter, vf, vf_mac);
7586
7587 /* enable transmit and receive for vf */
7588 reg = rd32(E1000_VFTE);
7589 wr32(E1000_VFTE, reg | BIT(vf));
7590 reg = rd32(E1000_VFRE);
7591 wr32(E1000_VFRE, reg | BIT(vf));
7592
7593 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7594
7595 /* reply to reset with ack and vf mac address */
7596 if (!is_zero_ether_addr(vf_mac)) {
7597 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7598 memcpy(addr, vf_mac, ETH_ALEN);
7599 } else {
7600 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7601 }
7602 igb_write_mbx(hw, msgbuf, 3, vf);
7603 }
7604
igb_flush_mac_table(struct igb_adapter * adapter)7605 static void igb_flush_mac_table(struct igb_adapter *adapter)
7606 {
7607 struct e1000_hw *hw = &adapter->hw;
7608 int i;
7609
7610 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7611 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7612 eth_zero_addr(adapter->mac_table[i].addr);
7613 adapter->mac_table[i].queue = 0;
7614 igb_rar_set_index(adapter, i);
7615 }
7616 }
7617
igb_available_rars(struct igb_adapter * adapter,u8 queue)7618 static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7619 {
7620 struct e1000_hw *hw = &adapter->hw;
7621 /* do not count rar entries reserved for VFs MAC addresses */
7622 int rar_entries = hw->mac.rar_entry_count -
7623 adapter->vfs_allocated_count;
7624 int i, count = 0;
7625
7626 for (i = 0; i < rar_entries; i++) {
7627 /* do not count default entries */
7628 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7629 continue;
7630
7631 /* do not count "in use" entries for different queues */
7632 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7633 (adapter->mac_table[i].queue != queue))
7634 continue;
7635
7636 count++;
7637 }
7638
7639 return count;
7640 }
7641
7642 /* Set default MAC address for the PF in the first RAR entry */
igb_set_default_mac_filter(struct igb_adapter * adapter)7643 static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7644 {
7645 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7646
7647 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7648 mac_table->queue = adapter->vfs_allocated_count;
7649 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7650
7651 igb_rar_set_index(adapter, 0);
7652 }
7653
7654 /* If the filter to be added and an already existing filter express
7655 * the same address and address type, it should be possible to only
7656 * override the other configurations, for example the queue to steer
7657 * traffic.
7658 */
igb_mac_entry_can_be_used(const struct igb_mac_addr * entry,const u8 * addr,const u8 flags)7659 static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7660 const u8 *addr, const u8 flags)
7661 {
7662 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7663 return true;
7664
7665 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7666 (flags & IGB_MAC_STATE_SRC_ADDR))
7667 return false;
7668
7669 if (!ether_addr_equal(addr, entry->addr))
7670 return false;
7671
7672 return true;
7673 }
7674
7675 /* Add a MAC filter for 'addr' directing matching traffic to 'queue',
7676 * 'flags' is used to indicate what kind of match is made, match is by
7677 * default for the destination address, if matching by source address
7678 * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used.
7679 */
igb_add_mac_filter_flags(struct igb_adapter * adapter,const u8 * addr,const u8 queue,const u8 flags)7680 static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7681 const u8 *addr, const u8 queue,
7682 const u8 flags)
7683 {
7684 struct e1000_hw *hw = &adapter->hw;
7685 int rar_entries = hw->mac.rar_entry_count -
7686 adapter->vfs_allocated_count;
7687 int i;
7688
7689 if (is_zero_ether_addr(addr))
7690 return -EINVAL;
7691
7692 /* Search for the first empty entry in the MAC table.
7693 * Do not touch entries at the end of the table reserved for the VF MAC
7694 * addresses.
7695 */
7696 for (i = 0; i < rar_entries; i++) {
7697 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7698 addr, flags))
7699 continue;
7700
7701 ether_addr_copy(adapter->mac_table[i].addr, addr);
7702 adapter->mac_table[i].queue = queue;
7703 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7704
7705 igb_rar_set_index(adapter, i);
7706 return i;
7707 }
7708
7709 return -ENOSPC;
7710 }
7711
igb_add_mac_filter(struct igb_adapter * adapter,const u8 * addr,const u8 queue)7712 static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7713 const u8 queue)
7714 {
7715 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7716 }
7717
7718 /* Remove a MAC filter for 'addr' directing matching traffic to
7719 * 'queue', 'flags' is used to indicate what kind of match need to be
7720 * removed, match is by default for the destination address, if
7721 * matching by source address is to be removed the flag
7722 * IGB_MAC_STATE_SRC_ADDR can be used.
7723 */
igb_del_mac_filter_flags(struct igb_adapter * adapter,const u8 * addr,const u8 queue,const u8 flags)7724 static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7725 const u8 *addr, const u8 queue,
7726 const u8 flags)
7727 {
7728 struct e1000_hw *hw = &adapter->hw;
7729 int rar_entries = hw->mac.rar_entry_count -
7730 adapter->vfs_allocated_count;
7731 int i;
7732
7733 if (is_zero_ether_addr(addr))
7734 return -EINVAL;
7735
7736 /* Search for matching entry in the MAC table based on given address
7737 * and queue. Do not touch entries at the end of the table reserved
7738 * for the VF MAC addresses.
7739 */
7740 for (i = 0; i < rar_entries; i++) {
7741 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7742 continue;
7743 if ((adapter->mac_table[i].state & flags) != flags)
7744 continue;
7745 if (adapter->mac_table[i].queue != queue)
7746 continue;
7747 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7748 continue;
7749
7750 /* When a filter for the default address is "deleted",
7751 * we return it to its initial configuration
7752 */
7753 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7754 adapter->mac_table[i].state =
7755 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7756 adapter->mac_table[i].queue =
7757 adapter->vfs_allocated_count;
7758 } else {
7759 adapter->mac_table[i].state = 0;
7760 adapter->mac_table[i].queue = 0;
7761 eth_zero_addr(adapter->mac_table[i].addr);
7762 }
7763
7764 igb_rar_set_index(adapter, i);
7765 return 0;
7766 }
7767
7768 return -ENOENT;
7769 }
7770
igb_del_mac_filter(struct igb_adapter * adapter,const u8 * addr,const u8 queue)7771 static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7772 const u8 queue)
7773 {
7774 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7775 }
7776
igb_add_mac_steering_filter(struct igb_adapter * adapter,const u8 * addr,u8 queue,u8 flags)7777 int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7778 const u8 *addr, u8 queue, u8 flags)
7779 {
7780 struct e1000_hw *hw = &adapter->hw;
7781
7782 /* In theory, this should be supported on 82575 as well, but
7783 * that part wasn't easily accessible during development.
7784 */
7785 if (hw->mac.type != e1000_i210)
7786 return -EOPNOTSUPP;
7787
7788 return igb_add_mac_filter_flags(adapter, addr, queue,
7789 IGB_MAC_STATE_QUEUE_STEERING | flags);
7790 }
7791
igb_del_mac_steering_filter(struct igb_adapter * adapter,const u8 * addr,u8 queue,u8 flags)7792 int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7793 const u8 *addr, u8 queue, u8 flags)
7794 {
7795 return igb_del_mac_filter_flags(adapter, addr, queue,
7796 IGB_MAC_STATE_QUEUE_STEERING | flags);
7797 }
7798
igb_uc_sync(struct net_device * netdev,const unsigned char * addr)7799 static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7800 {
7801 struct igb_adapter *adapter = netdev_priv(netdev);
7802 int ret;
7803
7804 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7805
7806 return min_t(int, ret, 0);
7807 }
7808
igb_uc_unsync(struct net_device * netdev,const unsigned char * addr)7809 static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7810 {
7811 struct igb_adapter *adapter = netdev_priv(netdev);
7812
7813 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7814
7815 return 0;
7816 }
7817
igb_set_vf_mac_filter(struct igb_adapter * adapter,const int vf,const u32 info,const u8 * addr)7818 static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7819 const u32 info, const u8 *addr)
7820 {
7821 struct pci_dev *pdev = adapter->pdev;
7822 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7823 struct vf_mac_filter *entry;
7824 bool found = false;
7825 int ret = 0;
7826
7827 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7828 !vf_data->trusted) {
7829 dev_warn(&pdev->dev,
7830 "VF %d requested MAC filter but is administratively denied\n",
7831 vf);
7832 return -EINVAL;
7833 }
7834 if (!is_valid_ether_addr(addr)) {
7835 dev_warn(&pdev->dev,
7836 "VF %d attempted to set invalid MAC filter\n",
7837 vf);
7838 return -EINVAL;
7839 }
7840
7841 switch (info) {
7842 case E1000_VF_MAC_FILTER_CLR:
7843 /* remove all unicast MAC filters related to the current VF */
7844 list_for_each_entry(entry, &adapter->vf_macs.l, l) {
7845 if (entry->vf == vf) {
7846 entry->vf = -1;
7847 entry->free = true;
7848 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7849 }
7850 }
7851 break;
7852 case E1000_VF_MAC_FILTER_ADD:
7853 /* try to find empty slot in the list */
7854 list_for_each_entry(entry, &adapter->vf_macs.l, l) {
7855 if (entry->free) {
7856 found = true;
7857 break;
7858 }
7859 }
7860
7861 if (found) {
7862 entry->free = false;
7863 entry->vf = vf;
7864 ether_addr_copy(entry->vf_mac, addr);
7865
7866 ret = igb_add_mac_filter(adapter, addr, vf);
7867 ret = min_t(int, ret, 0);
7868 } else {
7869 ret = -ENOSPC;
7870 }
7871
7872 if (ret == -ENOSPC)
7873 dev_warn(&pdev->dev,
7874 "VF %d has requested MAC filter but there is no space for it\n",
7875 vf);
7876 break;
7877 default:
7878 ret = -EINVAL;
7879 break;
7880 }
7881
7882 return ret;
7883 }
7884
igb_set_vf_mac_addr(struct igb_adapter * adapter,u32 * msg,int vf)7885 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7886 {
7887 struct pci_dev *pdev = adapter->pdev;
7888 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7889 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7890
7891 /* The VF MAC Address is stored in a packed array of bytes
7892 * starting at the second 32 bit word of the msg array
7893 */
7894 unsigned char *addr = (unsigned char *)&msg[1];
7895 int ret = 0;
7896
7897 if (!info) {
7898 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7899 !vf_data->trusted) {
7900 dev_warn(&pdev->dev,
7901 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7902 vf);
7903 return -EINVAL;
7904 }
7905
7906 if (!is_valid_ether_addr(addr)) {
7907 dev_warn(&pdev->dev,
7908 "VF %d attempted to set invalid MAC\n",
7909 vf);
7910 return -EINVAL;
7911 }
7912
7913 ret = igb_set_vf_mac(adapter, vf, addr);
7914 } else {
7915 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7916 }
7917
7918 return ret;
7919 }
7920
igb_rcv_ack_from_vf(struct igb_adapter * adapter,u32 vf)7921 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7922 {
7923 struct e1000_hw *hw = &adapter->hw;
7924 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7925 u32 msg = E1000_VT_MSGTYPE_NACK;
7926
7927 /* if device isn't clear to send it shouldn't be reading either */
7928 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7929 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7930 igb_write_mbx(hw, &msg, 1, vf);
7931 vf_data->last_nack = jiffies;
7932 }
7933 }
7934
igb_rcv_msg_from_vf(struct igb_adapter * adapter,u32 vf)7935 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7936 {
7937 struct pci_dev *pdev = adapter->pdev;
7938 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7939 struct e1000_hw *hw = &adapter->hw;
7940 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7941 s32 retval;
7942
7943 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7944
7945 if (retval) {
7946 /* if receive failed revoke VF CTS stats and restart init */
7947 dev_err(&pdev->dev, "Error receiving message from VF\n");
7948 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7949 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7950 goto unlock;
7951 goto out;
7952 }
7953
7954 /* this is a message we already processed, do nothing */
7955 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7956 goto unlock;
7957
7958 /* until the vf completes a reset it should not be
7959 * allowed to start any configuration.
7960 */
7961 if (msgbuf[0] == E1000_VF_RESET) {
7962 /* unlocks mailbox */
7963 igb_vf_reset_msg(adapter, vf);
7964 return;
7965 }
7966
7967 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7968 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7969 goto unlock;
7970 retval = -1;
7971 goto out;
7972 }
7973
7974 switch ((msgbuf[0] & 0xFFFF)) {
7975 case E1000_VF_SET_MAC_ADDR:
7976 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7977 break;
7978 case E1000_VF_SET_PROMISC:
7979 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7980 break;
7981 case E1000_VF_SET_MULTICAST:
7982 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7983 break;
7984 case E1000_VF_SET_LPE:
7985 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7986 break;
7987 case E1000_VF_SET_VLAN:
7988 retval = -1;
7989 if (vf_data->pf_vlan)
7990 dev_warn(&pdev->dev,
7991 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7992 vf);
7993 else
7994 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7995 break;
7996 default:
7997 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7998 retval = -1;
7999 break;
8000 }
8001
8002 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
8003 out:
8004 /* notify the VF of the results of what it sent us */
8005 if (retval)
8006 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
8007 else
8008 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
8009
8010 /* unlocks mailbox */
8011 igb_write_mbx(hw, msgbuf, 1, vf);
8012 return;
8013
8014 unlock:
8015 igb_unlock_mbx(hw, vf);
8016 }
8017
igb_msg_task(struct igb_adapter * adapter)8018 static void igb_msg_task(struct igb_adapter *adapter)
8019 {
8020 struct e1000_hw *hw = &adapter->hw;
8021 unsigned long flags;
8022 u32 vf;
8023
8024 spin_lock_irqsave(&adapter->vfs_lock, flags);
8025 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
8026 /* process any reset requests */
8027 if (!igb_check_for_rst(hw, vf))
8028 igb_vf_reset_event(adapter, vf);
8029
8030 /* process any messages pending */
8031 if (!igb_check_for_msg(hw, vf))
8032 igb_rcv_msg_from_vf(adapter, vf);
8033
8034 /* process any acks */
8035 if (!igb_check_for_ack(hw, vf))
8036 igb_rcv_ack_from_vf(adapter, vf);
8037 }
8038 spin_unlock_irqrestore(&adapter->vfs_lock, flags);
8039 }
8040
8041 /**
8042 * igb_set_uta - Set unicast filter table address
8043 * @adapter: board private structure
8044 * @set: boolean indicating if we are setting or clearing bits
8045 *
8046 * The unicast table address is a register array of 32-bit registers.
8047 * The table is meant to be used in a way similar to how the MTA is used
8048 * however due to certain limitations in the hardware it is necessary to
8049 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
8050 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
8051 **/
igb_set_uta(struct igb_adapter * adapter,bool set)8052 static void igb_set_uta(struct igb_adapter *adapter, bool set)
8053 {
8054 struct e1000_hw *hw = &adapter->hw;
8055 u32 uta = set ? ~0 : 0;
8056 int i;
8057
8058 /* we only need to do this if VMDq is enabled */
8059 if (!adapter->vfs_allocated_count)
8060 return;
8061
8062 for (i = hw->mac.uta_reg_count; i--;)
8063 array_wr32(E1000_UTA, i, uta);
8064 }
8065
8066 /**
8067 * igb_intr_msi - Interrupt Handler
8068 * @irq: interrupt number
8069 * @data: pointer to a network interface device structure
8070 **/
igb_intr_msi(int irq,void * data)8071 static irqreturn_t igb_intr_msi(int irq, void *data)
8072 {
8073 struct igb_adapter *adapter = data;
8074 struct igb_q_vector *q_vector = adapter->q_vector[0];
8075 struct e1000_hw *hw = &adapter->hw;
8076 /* read ICR disables interrupts using IAM */
8077 u32 icr = rd32(E1000_ICR);
8078
8079 igb_write_itr(q_vector);
8080
8081 if (icr & E1000_ICR_DRSTA)
8082 schedule_work(&adapter->reset_task);
8083
8084 if (icr & E1000_ICR_DOUTSYNC) {
8085 /* HW is reporting DMA is out of sync */
8086 adapter->stats.doosync++;
8087 }
8088
8089 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
8090 hw->mac.get_link_status = 1;
8091 if (!test_bit(__IGB_DOWN, &adapter->state))
8092 mod_timer(&adapter->watchdog_timer, jiffies + 1);
8093 }
8094
8095 if (icr & E1000_ICR_TS)
8096 igb_tsync_interrupt(adapter);
8097
8098 napi_schedule(&q_vector->napi);
8099
8100 return IRQ_HANDLED;
8101 }
8102
8103 /**
8104 * igb_intr - Legacy Interrupt Handler
8105 * @irq: interrupt number
8106 * @data: pointer to a network interface device structure
8107 **/
igb_intr(int irq,void * data)8108 static irqreturn_t igb_intr(int irq, void *data)
8109 {
8110 struct igb_adapter *adapter = data;
8111 struct igb_q_vector *q_vector = adapter->q_vector[0];
8112 struct e1000_hw *hw = &adapter->hw;
8113 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
8114 * need for the IMC write
8115 */
8116 u32 icr = rd32(E1000_ICR);
8117
8118 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
8119 * not set, then the adapter didn't send an interrupt
8120 */
8121 if (!(icr & E1000_ICR_INT_ASSERTED))
8122 return IRQ_NONE;
8123
8124 igb_write_itr(q_vector);
8125
8126 if (icr & E1000_ICR_DRSTA)
8127 schedule_work(&adapter->reset_task);
8128
8129 if (icr & E1000_ICR_DOUTSYNC) {
8130 /* HW is reporting DMA is out of sync */
8131 adapter->stats.doosync++;
8132 }
8133
8134 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
8135 hw->mac.get_link_status = 1;
8136 /* guard against interrupt when we're going down */
8137 if (!test_bit(__IGB_DOWN, &adapter->state))
8138 mod_timer(&adapter->watchdog_timer, jiffies + 1);
8139 }
8140
8141 if (icr & E1000_ICR_TS)
8142 igb_tsync_interrupt(adapter);
8143
8144 napi_schedule(&q_vector->napi);
8145
8146 return IRQ_HANDLED;
8147 }
8148
igb_ring_irq_enable(struct igb_q_vector * q_vector)8149 static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
8150 {
8151 struct igb_adapter *adapter = q_vector->adapter;
8152 struct e1000_hw *hw = &adapter->hw;
8153
8154 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
8155 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
8156 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
8157 igb_set_itr(q_vector);
8158 else
8159 igb_update_ring_itr(q_vector);
8160 }
8161
8162 if (!test_bit(__IGB_DOWN, &adapter->state)) {
8163 if (adapter->flags & IGB_FLAG_HAS_MSIX)
8164 wr32(E1000_EIMS, q_vector->eims_value);
8165 else
8166 igb_irq_enable(adapter);
8167 }
8168 }
8169
8170 /**
8171 * igb_poll - NAPI Rx polling callback
8172 * @napi: napi polling structure
8173 * @budget: count of how many packets we should handle
8174 **/
igb_poll(struct napi_struct * napi,int budget)8175 static int igb_poll(struct napi_struct *napi, int budget)
8176 {
8177 struct igb_q_vector *q_vector = container_of(napi,
8178 struct igb_q_vector,
8179 napi);
8180 bool clean_complete = true;
8181 int work_done = 0;
8182
8183 #ifdef CONFIG_IGB_DCA
8184 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
8185 igb_update_dca(q_vector);
8186 #endif
8187 if (q_vector->tx.ring)
8188 clean_complete = igb_clean_tx_irq(q_vector, budget);
8189
8190 if (q_vector->rx.ring) {
8191 int cleaned = igb_clean_rx_irq(q_vector, budget);
8192
8193 work_done += cleaned;
8194 if (cleaned >= budget)
8195 clean_complete = false;
8196 }
8197
8198 /* If all work not completed, return budget and keep polling */
8199 if (!clean_complete)
8200 return budget;
8201
8202 /* Exit the polling mode, but don't re-enable interrupts if stack might
8203 * poll us due to busy-polling
8204 */
8205 if (likely(napi_complete_done(napi, work_done)))
8206 igb_ring_irq_enable(q_vector);
8207
8208 return work_done;
8209 }
8210
8211 /**
8212 * igb_clean_tx_irq - Reclaim resources after transmit completes
8213 * @q_vector: pointer to q_vector containing needed info
8214 * @napi_budget: Used to determine if we are in netpoll
8215 *
8216 * returns true if ring is completely cleaned
8217 **/
igb_clean_tx_irq(struct igb_q_vector * q_vector,int napi_budget)8218 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
8219 {
8220 struct igb_adapter *adapter = q_vector->adapter;
8221 struct igb_ring *tx_ring = q_vector->tx.ring;
8222 struct igb_tx_buffer *tx_buffer;
8223 union e1000_adv_tx_desc *tx_desc;
8224 unsigned int total_bytes = 0, total_packets = 0;
8225 unsigned int budget = q_vector->tx.work_limit;
8226 unsigned int i = tx_ring->next_to_clean;
8227
8228 if (test_bit(__IGB_DOWN, &adapter->state))
8229 return true;
8230
8231 tx_buffer = &tx_ring->tx_buffer_info[i];
8232 tx_desc = IGB_TX_DESC(tx_ring, i);
8233 i -= tx_ring->count;
8234
8235 do {
8236 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
8237
8238 /* if next_to_watch is not set then there is no work pending */
8239 if (!eop_desc)
8240 break;
8241
8242 /* prevent any other reads prior to eop_desc */
8243 smp_rmb();
8244
8245 /* if DD is not set pending work has not been completed */
8246 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
8247 break;
8248
8249 /* clear next_to_watch to prevent false hangs */
8250 tx_buffer->next_to_watch = NULL;
8251
8252 /* update the statistics for this packet */
8253 total_bytes += tx_buffer->bytecount;
8254 total_packets += tx_buffer->gso_segs;
8255
8256 /* free the skb */
8257 if (tx_buffer->type == IGB_TYPE_SKB)
8258 napi_consume_skb(tx_buffer->skb, napi_budget);
8259 else
8260 xdp_return_frame(tx_buffer->xdpf);
8261
8262 /* unmap skb header data */
8263 dma_unmap_single(tx_ring->dev,
8264 dma_unmap_addr(tx_buffer, dma),
8265 dma_unmap_len(tx_buffer, len),
8266 DMA_TO_DEVICE);
8267
8268 /* clear tx_buffer data */
8269 dma_unmap_len_set(tx_buffer, len, 0);
8270
8271 /* clear last DMA location and unmap remaining buffers */
8272 while (tx_desc != eop_desc) {
8273 tx_buffer++;
8274 tx_desc++;
8275 i++;
8276 if (unlikely(!i)) {
8277 i -= tx_ring->count;
8278 tx_buffer = tx_ring->tx_buffer_info;
8279 tx_desc = IGB_TX_DESC(tx_ring, 0);
8280 }
8281
8282 /* unmap any remaining paged data */
8283 if (dma_unmap_len(tx_buffer, len)) {
8284 dma_unmap_page(tx_ring->dev,
8285 dma_unmap_addr(tx_buffer, dma),
8286 dma_unmap_len(tx_buffer, len),
8287 DMA_TO_DEVICE);
8288 dma_unmap_len_set(tx_buffer, len, 0);
8289 }
8290 }
8291
8292 /* move us one more past the eop_desc for start of next pkt */
8293 tx_buffer++;
8294 tx_desc++;
8295 i++;
8296 if (unlikely(!i)) {
8297 i -= tx_ring->count;
8298 tx_buffer = tx_ring->tx_buffer_info;
8299 tx_desc = IGB_TX_DESC(tx_ring, 0);
8300 }
8301
8302 /* issue prefetch for next Tx descriptor */
8303 prefetch(tx_desc);
8304
8305 /* update budget accounting */
8306 budget--;
8307 } while (likely(budget));
8308
8309 netdev_tx_completed_queue(txring_txq(tx_ring),
8310 total_packets, total_bytes);
8311 i += tx_ring->count;
8312 tx_ring->next_to_clean = i;
8313 u64_stats_update_begin(&tx_ring->tx_syncp);
8314 tx_ring->tx_stats.bytes += total_bytes;
8315 tx_ring->tx_stats.packets += total_packets;
8316 u64_stats_update_end(&tx_ring->tx_syncp);
8317 q_vector->tx.total_bytes += total_bytes;
8318 q_vector->tx.total_packets += total_packets;
8319
8320 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
8321 struct e1000_hw *hw = &adapter->hw;
8322
8323 /* Detect a transmit hang in hardware, this serializes the
8324 * check with the clearing of time_stamp and movement of i
8325 */
8326 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
8327 if (tx_buffer->next_to_watch &&
8328 time_after(jiffies, tx_buffer->time_stamp +
8329 (adapter->tx_timeout_factor * HZ)) &&
8330 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
8331
8332 /* detected Tx unit hang */
8333 dev_err(tx_ring->dev,
8334 "Detected Tx Unit Hang\n"
8335 " Tx Queue <%d>\n"
8336 " TDH <%x>\n"
8337 " TDT <%x>\n"
8338 " next_to_use <%x>\n"
8339 " next_to_clean <%x>\n"
8340 "buffer_info[next_to_clean]\n"
8341 " time_stamp <%lx>\n"
8342 " next_to_watch <%p>\n"
8343 " jiffies <%lx>\n"
8344 " desc.status <%x>\n",
8345 tx_ring->queue_index,
8346 rd32(E1000_TDH(tx_ring->reg_idx)),
8347 readl(tx_ring->tail),
8348 tx_ring->next_to_use,
8349 tx_ring->next_to_clean,
8350 tx_buffer->time_stamp,
8351 tx_buffer->next_to_watch,
8352 jiffies,
8353 tx_buffer->next_to_watch->wb.status);
8354 netif_stop_subqueue(tx_ring->netdev,
8355 tx_ring->queue_index);
8356
8357 /* we are about to reset, no point in enabling stuff */
8358 return true;
8359 }
8360 }
8361
8362 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
8363 if (unlikely(total_packets &&
8364 netif_carrier_ok(tx_ring->netdev) &&
8365 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
8366 /* Make sure that anybody stopping the queue after this
8367 * sees the new next_to_clean.
8368 */
8369 smp_mb();
8370 if (__netif_subqueue_stopped(tx_ring->netdev,
8371 tx_ring->queue_index) &&
8372 !(test_bit(__IGB_DOWN, &adapter->state))) {
8373 netif_wake_subqueue(tx_ring->netdev,
8374 tx_ring->queue_index);
8375
8376 u64_stats_update_begin(&tx_ring->tx_syncp);
8377 tx_ring->tx_stats.restart_queue++;
8378 u64_stats_update_end(&tx_ring->tx_syncp);
8379 }
8380 }
8381
8382 return !!budget;
8383 }
8384
8385 /**
8386 * igb_reuse_rx_page - page flip buffer and store it back on the ring
8387 * @rx_ring: rx descriptor ring to store buffers on
8388 * @old_buff: donor buffer to have page reused
8389 *
8390 * Synchronizes page for reuse by the adapter
8391 **/
igb_reuse_rx_page(struct igb_ring * rx_ring,struct igb_rx_buffer * old_buff)8392 static void igb_reuse_rx_page(struct igb_ring *rx_ring,
8393 struct igb_rx_buffer *old_buff)
8394 {
8395 struct igb_rx_buffer *new_buff;
8396 u16 nta = rx_ring->next_to_alloc;
8397
8398 new_buff = &rx_ring->rx_buffer_info[nta];
8399
8400 /* update, and store next to alloc */
8401 nta++;
8402 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
8403
8404 /* Transfer page from old buffer to new buffer.
8405 * Move each member individually to avoid possible store
8406 * forwarding stalls.
8407 */
8408 new_buff->dma = old_buff->dma;
8409 new_buff->page = old_buff->page;
8410 new_buff->page_offset = old_buff->page_offset;
8411 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
8412 }
8413
igb_can_reuse_rx_page(struct igb_rx_buffer * rx_buffer,int rx_buf_pgcnt)8414 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
8415 int rx_buf_pgcnt)
8416 {
8417 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
8418 struct page *page = rx_buffer->page;
8419
8420 /* avoid re-using remote and pfmemalloc pages */
8421 if (!dev_page_is_reusable(page))
8422 return false;
8423
8424 #if (PAGE_SIZE < 8192)
8425 /* if we are only owner of page we can reuse it */
8426 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
8427 return false;
8428 #else
8429 #define IGB_LAST_OFFSET \
8430 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
8431
8432 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
8433 return false;
8434 #endif
8435
8436 /* If we have drained the page fragment pool we need to update
8437 * the pagecnt_bias and page count so that we fully restock the
8438 * number of references the driver holds.
8439 */
8440 if (unlikely(pagecnt_bias == 1)) {
8441 page_ref_add(page, USHRT_MAX - 1);
8442 rx_buffer->pagecnt_bias = USHRT_MAX;
8443 }
8444
8445 return true;
8446 }
8447
8448 /**
8449 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
8450 * @rx_ring: rx descriptor ring to transact packets on
8451 * @rx_buffer: buffer containing page to add
8452 * @skb: sk_buff to place the data into
8453 * @size: size of buffer to be added
8454 *
8455 * This function will add the data contained in rx_buffer->page to the skb.
8456 **/
igb_add_rx_frag(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)8457 static void igb_add_rx_frag(struct igb_ring *rx_ring,
8458 struct igb_rx_buffer *rx_buffer,
8459 struct sk_buff *skb,
8460 unsigned int size)
8461 {
8462 #if (PAGE_SIZE < 8192)
8463 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8464 #else
8465 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8466 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8467 SKB_DATA_ALIGN(size);
8468 #endif
8469 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8470 rx_buffer->page_offset, size, truesize);
8471 #if (PAGE_SIZE < 8192)
8472 rx_buffer->page_offset ^= truesize;
8473 #else
8474 rx_buffer->page_offset += truesize;
8475 #endif
8476 }
8477
igb_construct_skb(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,struct xdp_buff * xdp,ktime_t timestamp)8478 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8479 struct igb_rx_buffer *rx_buffer,
8480 struct xdp_buff *xdp,
8481 ktime_t timestamp)
8482 {
8483 #if (PAGE_SIZE < 8192)
8484 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8485 #else
8486 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
8487 xdp->data_hard_start);
8488 #endif
8489 unsigned int size = xdp->data_end - xdp->data;
8490 unsigned int headlen;
8491 struct sk_buff *skb;
8492
8493 /* prefetch first cache line of first page */
8494 net_prefetch(xdp->data);
8495
8496 /* allocate a skb to store the frags */
8497 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8498 if (unlikely(!skb))
8499 return NULL;
8500
8501 if (timestamp)
8502 skb_hwtstamps(skb)->hwtstamp = timestamp;
8503
8504 /* Determine available headroom for copy */
8505 headlen = size;
8506 if (headlen > IGB_RX_HDR_LEN)
8507 headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
8508
8509 /* align pull length to size of long to optimize memcpy performance */
8510 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
8511
8512 /* update all of the pointers */
8513 size -= headlen;
8514 if (size) {
8515 skb_add_rx_frag(skb, 0, rx_buffer->page,
8516 (xdp->data + headlen) - page_address(rx_buffer->page),
8517 size, truesize);
8518 #if (PAGE_SIZE < 8192)
8519 rx_buffer->page_offset ^= truesize;
8520 #else
8521 rx_buffer->page_offset += truesize;
8522 #endif
8523 } else {
8524 rx_buffer->pagecnt_bias++;
8525 }
8526
8527 return skb;
8528 }
8529
igb_build_skb(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,struct xdp_buff * xdp,ktime_t timestamp)8530 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8531 struct igb_rx_buffer *rx_buffer,
8532 struct xdp_buff *xdp,
8533 ktime_t timestamp)
8534 {
8535 #if (PAGE_SIZE < 8192)
8536 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8537 #else
8538 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8539 SKB_DATA_ALIGN(xdp->data_end -
8540 xdp->data_hard_start);
8541 #endif
8542 unsigned int metasize = xdp->data - xdp->data_meta;
8543 struct sk_buff *skb;
8544
8545 /* prefetch first cache line of first page */
8546 net_prefetch(xdp->data_meta);
8547
8548 /* build an skb around the page buffer */
8549 skb = napi_build_skb(xdp->data_hard_start, truesize);
8550 if (unlikely(!skb))
8551 return NULL;
8552
8553 /* update pointers within the skb to store the data */
8554 skb_reserve(skb, xdp->data - xdp->data_hard_start);
8555 __skb_put(skb, xdp->data_end - xdp->data);
8556
8557 if (metasize)
8558 skb_metadata_set(skb, metasize);
8559
8560 if (timestamp)
8561 skb_hwtstamps(skb)->hwtstamp = timestamp;
8562
8563 /* update buffer offset */
8564 #if (PAGE_SIZE < 8192)
8565 rx_buffer->page_offset ^= truesize;
8566 #else
8567 rx_buffer->page_offset += truesize;
8568 #endif
8569
8570 return skb;
8571 }
8572
igb_run_xdp(struct igb_adapter * adapter,struct igb_ring * rx_ring,struct xdp_buff * xdp)8573 static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
8574 struct igb_ring *rx_ring,
8575 struct xdp_buff *xdp)
8576 {
8577 int err, result = IGB_XDP_PASS;
8578 struct bpf_prog *xdp_prog;
8579 u32 act;
8580
8581 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
8582
8583 if (!xdp_prog)
8584 goto xdp_out;
8585
8586 prefetchw(xdp->data_hard_start); /* xdp_frame write */
8587
8588 act = bpf_prog_run_xdp(xdp_prog, xdp);
8589 switch (act) {
8590 case XDP_PASS:
8591 break;
8592 case XDP_TX:
8593 result = igb_xdp_xmit_back(adapter, xdp);
8594 if (result == IGB_XDP_CONSUMED)
8595 goto out_failure;
8596 break;
8597 case XDP_REDIRECT:
8598 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
8599 if (err)
8600 goto out_failure;
8601 result = IGB_XDP_REDIR;
8602 break;
8603 default:
8604 bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
8605 fallthrough;
8606 case XDP_ABORTED:
8607 out_failure:
8608 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
8609 fallthrough;
8610 case XDP_DROP:
8611 result = IGB_XDP_CONSUMED;
8612 break;
8613 }
8614 xdp_out:
8615 return ERR_PTR(-result);
8616 }
8617
igb_rx_frame_truesize(struct igb_ring * rx_ring,unsigned int size)8618 static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
8619 unsigned int size)
8620 {
8621 unsigned int truesize;
8622
8623 #if (PAGE_SIZE < 8192)
8624 truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
8625 #else
8626 truesize = ring_uses_build_skb(rx_ring) ?
8627 SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
8628 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
8629 SKB_DATA_ALIGN(size);
8630 #endif
8631 return truesize;
8632 }
8633
igb_rx_buffer_flip(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,unsigned int size)8634 static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
8635 struct igb_rx_buffer *rx_buffer,
8636 unsigned int size)
8637 {
8638 unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
8639 #if (PAGE_SIZE < 8192)
8640 rx_buffer->page_offset ^= truesize;
8641 #else
8642 rx_buffer->page_offset += truesize;
8643 #endif
8644 }
8645
igb_rx_checksum(struct igb_ring * ring,union e1000_adv_rx_desc * rx_desc,struct sk_buff * skb)8646 static inline void igb_rx_checksum(struct igb_ring *ring,
8647 union e1000_adv_rx_desc *rx_desc,
8648 struct sk_buff *skb)
8649 {
8650 skb_checksum_none_assert(skb);
8651
8652 /* Ignore Checksum bit is set */
8653 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8654 return;
8655
8656 /* Rx checksum disabled via ethtool */
8657 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8658 return;
8659
8660 /* TCP/UDP checksum error bit is set */
8661 if (igb_test_staterr(rx_desc,
8662 E1000_RXDEXT_STATERR_TCPE |
8663 E1000_RXDEXT_STATERR_IPE)) {
8664 /* work around errata with sctp packets where the TCPE aka
8665 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
8666 * packets, (aka let the stack check the crc32c)
8667 */
8668 if (!((skb->len == 60) &&
8669 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8670 u64_stats_update_begin(&ring->rx_syncp);
8671 ring->rx_stats.csum_err++;
8672 u64_stats_update_end(&ring->rx_syncp);
8673 }
8674 /* let the stack verify checksum errors */
8675 return;
8676 }
8677 /* It must be a TCP or UDP packet with a valid checksum */
8678 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8679 E1000_RXD_STAT_UDPCS))
8680 skb->ip_summed = CHECKSUM_UNNECESSARY;
8681
8682 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8683 le32_to_cpu(rx_desc->wb.upper.status_error));
8684 }
8685
igb_rx_hash(struct igb_ring * ring,union e1000_adv_rx_desc * rx_desc,struct sk_buff * skb)8686 static inline void igb_rx_hash(struct igb_ring *ring,
8687 union e1000_adv_rx_desc *rx_desc,
8688 struct sk_buff *skb)
8689 {
8690 if (ring->netdev->features & NETIF_F_RXHASH)
8691 skb_set_hash(skb,
8692 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8693 PKT_HASH_TYPE_L3);
8694 }
8695
8696 /**
8697 * igb_is_non_eop - process handling of non-EOP buffers
8698 * @rx_ring: Rx ring being processed
8699 * @rx_desc: Rx descriptor for current buffer
8700 *
8701 * This function updates next to clean. If the buffer is an EOP buffer
8702 * this function exits returning false, otherwise it will place the
8703 * sk_buff in the next buffer to be chained and return true indicating
8704 * that this is in fact a non-EOP buffer.
8705 **/
igb_is_non_eop(struct igb_ring * rx_ring,union e1000_adv_rx_desc * rx_desc)8706 static bool igb_is_non_eop(struct igb_ring *rx_ring,
8707 union e1000_adv_rx_desc *rx_desc)
8708 {
8709 u32 ntc = rx_ring->next_to_clean + 1;
8710
8711 /* fetch, update, and store next to clean */
8712 ntc = (ntc < rx_ring->count) ? ntc : 0;
8713 rx_ring->next_to_clean = ntc;
8714
8715 prefetch(IGB_RX_DESC(rx_ring, ntc));
8716
8717 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8718 return false;
8719
8720 return true;
8721 }
8722
8723 /**
8724 * igb_cleanup_headers - Correct corrupted or empty headers
8725 * @rx_ring: rx descriptor ring packet is being transacted on
8726 * @rx_desc: pointer to the EOP Rx descriptor
8727 * @skb: pointer to current skb being fixed
8728 *
8729 * Address the case where we are pulling data in on pages only
8730 * and as such no data is present in the skb header.
8731 *
8732 * In addition if skb is not at least 60 bytes we need to pad it so that
8733 * it is large enough to qualify as a valid Ethernet frame.
8734 *
8735 * Returns true if an error was encountered and skb was freed.
8736 **/
igb_cleanup_headers(struct igb_ring * rx_ring,union e1000_adv_rx_desc * rx_desc,struct sk_buff * skb)8737 static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8738 union e1000_adv_rx_desc *rx_desc,
8739 struct sk_buff *skb)
8740 {
8741 /* XDP packets use error pointer so abort at this point */
8742 if (IS_ERR(skb))
8743 return true;
8744
8745 if (unlikely((igb_test_staterr(rx_desc,
8746 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8747 struct net_device *netdev = rx_ring->netdev;
8748 if (!(netdev->features & NETIF_F_RXALL)) {
8749 dev_kfree_skb_any(skb);
8750 return true;
8751 }
8752 }
8753
8754 /* if eth_skb_pad returns an error the skb was freed */
8755 if (eth_skb_pad(skb))
8756 return true;
8757
8758 return false;
8759 }
8760
8761 /**
8762 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
8763 * @rx_ring: rx descriptor ring packet is being transacted on
8764 * @rx_desc: pointer to the EOP Rx descriptor
8765 * @skb: pointer to current skb being populated
8766 *
8767 * This function checks the ring, descriptor, and packet information in
8768 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
8769 * other fields within the skb.
8770 **/
igb_process_skb_fields(struct igb_ring * rx_ring,union e1000_adv_rx_desc * rx_desc,struct sk_buff * skb)8771 static void igb_process_skb_fields(struct igb_ring *rx_ring,
8772 union e1000_adv_rx_desc *rx_desc,
8773 struct sk_buff *skb)
8774 {
8775 struct net_device *dev = rx_ring->netdev;
8776
8777 igb_rx_hash(rx_ring, rx_desc, skb);
8778
8779 igb_rx_checksum(rx_ring, rx_desc, skb);
8780
8781 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8782 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8783 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8784
8785 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8786 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8787 u16 vid;
8788
8789 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8790 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8791 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
8792 else
8793 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8794
8795 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8796 }
8797
8798 skb_record_rx_queue(skb, rx_ring->queue_index);
8799
8800 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8801 }
8802
igb_rx_offset(struct igb_ring * rx_ring)8803 static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8804 {
8805 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8806 }
8807
igb_get_rx_buffer(struct igb_ring * rx_ring,const unsigned int size,int * rx_buf_pgcnt)8808 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8809 const unsigned int size, int *rx_buf_pgcnt)
8810 {
8811 struct igb_rx_buffer *rx_buffer;
8812
8813 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8814 *rx_buf_pgcnt =
8815 #if (PAGE_SIZE < 8192)
8816 page_count(rx_buffer->page);
8817 #else
8818 0;
8819 #endif
8820 prefetchw(rx_buffer->page);
8821
8822 /* we are reusing so sync this buffer for CPU use */
8823 dma_sync_single_range_for_cpu(rx_ring->dev,
8824 rx_buffer->dma,
8825 rx_buffer->page_offset,
8826 size,
8827 DMA_FROM_DEVICE);
8828
8829 rx_buffer->pagecnt_bias--;
8830
8831 return rx_buffer;
8832 }
8833
igb_put_rx_buffer(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,int rx_buf_pgcnt)8834 static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8835 struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
8836 {
8837 if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
8838 /* hand second half of page back to the ring */
8839 igb_reuse_rx_page(rx_ring, rx_buffer);
8840 } else {
8841 /* We are not reusing the buffer so unmap it and free
8842 * any references we are holding to it
8843 */
8844 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8845 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8846 IGB_RX_DMA_ATTR);
8847 __page_frag_cache_drain(rx_buffer->page,
8848 rx_buffer->pagecnt_bias);
8849 }
8850
8851 /* clear contents of rx_buffer */
8852 rx_buffer->page = NULL;
8853 }
8854
igb_clean_rx_irq(struct igb_q_vector * q_vector,const int budget)8855 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8856 {
8857 struct igb_adapter *adapter = q_vector->adapter;
8858 struct igb_ring *rx_ring = q_vector->rx.ring;
8859 struct sk_buff *skb = rx_ring->skb;
8860 unsigned int total_bytes = 0, total_packets = 0;
8861 u16 cleaned_count = igb_desc_unused(rx_ring);
8862 unsigned int xdp_xmit = 0;
8863 struct xdp_buff xdp;
8864 u32 frame_sz = 0;
8865 int rx_buf_pgcnt;
8866
8867 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
8868 #if (PAGE_SIZE < 8192)
8869 frame_sz = igb_rx_frame_truesize(rx_ring, 0);
8870 #endif
8871 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
8872
8873 while (likely(total_packets < budget)) {
8874 union e1000_adv_rx_desc *rx_desc;
8875 struct igb_rx_buffer *rx_buffer;
8876 ktime_t timestamp = 0;
8877 int pkt_offset = 0;
8878 unsigned int size;
8879 void *pktbuf;
8880
8881 /* return some buffers to hardware, one at a time is too slow */
8882 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8883 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8884 cleaned_count = 0;
8885 }
8886
8887 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8888 size = le16_to_cpu(rx_desc->wb.upper.length);
8889 if (!size)
8890 break;
8891
8892 /* This memory barrier is needed to keep us from reading
8893 * any other fields out of the rx_desc until we know the
8894 * descriptor has been written back
8895 */
8896 dma_rmb();
8897
8898 rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
8899 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
8900
8901 /* pull rx packet timestamp if available and valid */
8902 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8903 int ts_hdr_len;
8904
8905 ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
8906 pktbuf, ×tamp);
8907
8908 pkt_offset += ts_hdr_len;
8909 size -= ts_hdr_len;
8910 }
8911
8912 /* retrieve a buffer from the ring */
8913 if (!skb) {
8914 unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
8915 unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
8916
8917 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
8918 xdp_buff_clear_frags_flag(&xdp);
8919 #if (PAGE_SIZE > 4096)
8920 /* At larger PAGE_SIZE, frame_sz depend on len size */
8921 xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
8922 #endif
8923 skb = igb_run_xdp(adapter, rx_ring, &xdp);
8924 }
8925
8926 if (IS_ERR(skb)) {
8927 unsigned int xdp_res = -PTR_ERR(skb);
8928
8929 if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
8930 xdp_xmit |= xdp_res;
8931 igb_rx_buffer_flip(rx_ring, rx_buffer, size);
8932 } else {
8933 rx_buffer->pagecnt_bias++;
8934 }
8935 total_packets++;
8936 total_bytes += size;
8937 } else if (skb)
8938 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8939 else if (ring_uses_build_skb(rx_ring))
8940 skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
8941 timestamp);
8942 else
8943 skb = igb_construct_skb(rx_ring, rx_buffer,
8944 &xdp, timestamp);
8945
8946 /* exit if we failed to retrieve a buffer */
8947 if (!skb) {
8948 rx_ring->rx_stats.alloc_failed++;
8949 rx_buffer->pagecnt_bias++;
8950 break;
8951 }
8952
8953 igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
8954 cleaned_count++;
8955
8956 /* fetch next buffer in frame if non-eop */
8957 if (igb_is_non_eop(rx_ring, rx_desc))
8958 continue;
8959
8960 /* verify the packet layout is correct */
8961 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8962 skb = NULL;
8963 continue;
8964 }
8965
8966 /* probably a little skewed due to removing CRC */
8967 total_bytes += skb->len;
8968
8969 /* populate checksum, timestamp, VLAN, and protocol */
8970 igb_process_skb_fields(rx_ring, rx_desc, skb);
8971
8972 napi_gro_receive(&q_vector->napi, skb);
8973
8974 /* reset skb pointer */
8975 skb = NULL;
8976
8977 /* update budget accounting */
8978 total_packets++;
8979 }
8980
8981 /* place incomplete frames back on ring for completion */
8982 rx_ring->skb = skb;
8983
8984 if (xdp_xmit & IGB_XDP_REDIR)
8985 xdp_do_flush();
8986
8987 if (xdp_xmit & IGB_XDP_TX) {
8988 struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
8989
8990 igb_xdp_ring_update_tail(tx_ring);
8991 }
8992
8993 u64_stats_update_begin(&rx_ring->rx_syncp);
8994 rx_ring->rx_stats.packets += total_packets;
8995 rx_ring->rx_stats.bytes += total_bytes;
8996 u64_stats_update_end(&rx_ring->rx_syncp);
8997 q_vector->rx.total_packets += total_packets;
8998 q_vector->rx.total_bytes += total_bytes;
8999
9000 if (cleaned_count)
9001 igb_alloc_rx_buffers(rx_ring, cleaned_count);
9002
9003 return total_packets;
9004 }
9005
igb_alloc_mapped_page(struct igb_ring * rx_ring,struct igb_rx_buffer * bi)9006 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
9007 struct igb_rx_buffer *bi)
9008 {
9009 struct page *page = bi->page;
9010 dma_addr_t dma;
9011
9012 /* since we are recycling buffers we should seldom need to alloc */
9013 if (likely(page))
9014 return true;
9015
9016 /* alloc new page for storage */
9017 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
9018 if (unlikely(!page)) {
9019 rx_ring->rx_stats.alloc_failed++;
9020 return false;
9021 }
9022
9023 /* map page for use */
9024 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
9025 igb_rx_pg_size(rx_ring),
9026 DMA_FROM_DEVICE,
9027 IGB_RX_DMA_ATTR);
9028
9029 /* if mapping failed free memory back to system since
9030 * there isn't much point in holding memory we can't use
9031 */
9032 if (dma_mapping_error(rx_ring->dev, dma)) {
9033 __free_pages(page, igb_rx_pg_order(rx_ring));
9034
9035 rx_ring->rx_stats.alloc_failed++;
9036 return false;
9037 }
9038
9039 bi->dma = dma;
9040 bi->page = page;
9041 bi->page_offset = igb_rx_offset(rx_ring);
9042 page_ref_add(page, USHRT_MAX - 1);
9043 bi->pagecnt_bias = USHRT_MAX;
9044
9045 return true;
9046 }
9047
9048 /**
9049 * igb_alloc_rx_buffers - Replace used receive buffers
9050 * @rx_ring: rx descriptor ring to allocate new receive buffers
9051 * @cleaned_count: count of buffers to allocate
9052 **/
igb_alloc_rx_buffers(struct igb_ring * rx_ring,u16 cleaned_count)9053 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
9054 {
9055 union e1000_adv_rx_desc *rx_desc;
9056 struct igb_rx_buffer *bi;
9057 u16 i = rx_ring->next_to_use;
9058 u16 bufsz;
9059
9060 /* nothing to do */
9061 if (!cleaned_count)
9062 return;
9063
9064 rx_desc = IGB_RX_DESC(rx_ring, i);
9065 bi = &rx_ring->rx_buffer_info[i];
9066 i -= rx_ring->count;
9067
9068 bufsz = igb_rx_bufsz(rx_ring);
9069
9070 do {
9071 if (!igb_alloc_mapped_page(rx_ring, bi))
9072 break;
9073
9074 /* sync the buffer for use by the device */
9075 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
9076 bi->page_offset, bufsz,
9077 DMA_FROM_DEVICE);
9078
9079 /* Refresh the desc even if buffer_addrs didn't change
9080 * because each write-back erases this info.
9081 */
9082 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
9083
9084 rx_desc++;
9085 bi++;
9086 i++;
9087 if (unlikely(!i)) {
9088 rx_desc = IGB_RX_DESC(rx_ring, 0);
9089 bi = rx_ring->rx_buffer_info;
9090 i -= rx_ring->count;
9091 }
9092
9093 /* clear the length for the next_to_use descriptor */
9094 rx_desc->wb.upper.length = 0;
9095
9096 cleaned_count--;
9097 } while (cleaned_count);
9098
9099 i += rx_ring->count;
9100
9101 if (rx_ring->next_to_use != i) {
9102 /* record the next descriptor to use */
9103 rx_ring->next_to_use = i;
9104
9105 /* update next to alloc since we have filled the ring */
9106 rx_ring->next_to_alloc = i;
9107
9108 /* Force memory writes to complete before letting h/w
9109 * know there are new descriptors to fetch. (Only
9110 * applicable for weak-ordered memory model archs,
9111 * such as IA-64).
9112 */
9113 dma_wmb();
9114 writel(i, rx_ring->tail);
9115 }
9116 }
9117
9118 /**
9119 * igb_mii_ioctl -
9120 * @netdev: pointer to netdev struct
9121 * @ifr: interface structure
9122 * @cmd: ioctl command to execute
9123 **/
igb_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)9124 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
9125 {
9126 struct igb_adapter *adapter = netdev_priv(netdev);
9127 struct mii_ioctl_data *data = if_mii(ifr);
9128
9129 if (adapter->hw.phy.media_type != e1000_media_type_copper)
9130 return -EOPNOTSUPP;
9131
9132 switch (cmd) {
9133 case SIOCGMIIPHY:
9134 data->phy_id = adapter->hw.phy.addr;
9135 break;
9136 case SIOCGMIIREG:
9137 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
9138 &data->val_out))
9139 return -EIO;
9140 break;
9141 case SIOCSMIIREG:
9142 default:
9143 return -EOPNOTSUPP;
9144 }
9145 return 0;
9146 }
9147
9148 /**
9149 * igb_ioctl -
9150 * @netdev: pointer to netdev struct
9151 * @ifr: interface structure
9152 * @cmd: ioctl command to execute
9153 **/
igb_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)9154 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
9155 {
9156 switch (cmd) {
9157 case SIOCGMIIPHY:
9158 case SIOCGMIIREG:
9159 case SIOCSMIIREG:
9160 return igb_mii_ioctl(netdev, ifr, cmd);
9161 case SIOCGHWTSTAMP:
9162 return igb_ptp_get_ts_config(netdev, ifr);
9163 case SIOCSHWTSTAMP:
9164 return igb_ptp_set_ts_config(netdev, ifr);
9165 default:
9166 return -EOPNOTSUPP;
9167 }
9168 }
9169
igb_read_pci_cfg(struct e1000_hw * hw,u32 reg,u16 * value)9170 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9171 {
9172 struct igb_adapter *adapter = hw->back;
9173
9174 pci_read_config_word(adapter->pdev, reg, value);
9175 }
9176
igb_write_pci_cfg(struct e1000_hw * hw,u32 reg,u16 * value)9177 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9178 {
9179 struct igb_adapter *adapter = hw->back;
9180
9181 pci_write_config_word(adapter->pdev, reg, *value);
9182 }
9183
igb_read_pcie_cap_reg(struct e1000_hw * hw,u32 reg,u16 * value)9184 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9185 {
9186 struct igb_adapter *adapter = hw->back;
9187
9188 if (pcie_capability_read_word(adapter->pdev, reg, value))
9189 return -E1000_ERR_CONFIG;
9190
9191 return 0;
9192 }
9193
igb_write_pcie_cap_reg(struct e1000_hw * hw,u32 reg,u16 * value)9194 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9195 {
9196 struct igb_adapter *adapter = hw->back;
9197
9198 if (pcie_capability_write_word(adapter->pdev, reg, *value))
9199 return -E1000_ERR_CONFIG;
9200
9201 return 0;
9202 }
9203
igb_vlan_mode(struct net_device * netdev,netdev_features_t features)9204 static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
9205 {
9206 struct igb_adapter *adapter = netdev_priv(netdev);
9207 struct e1000_hw *hw = &adapter->hw;
9208 u32 ctrl, rctl;
9209 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
9210
9211 if (enable) {
9212 /* enable VLAN tag insert/strip */
9213 ctrl = rd32(E1000_CTRL);
9214 ctrl |= E1000_CTRL_VME;
9215 wr32(E1000_CTRL, ctrl);
9216
9217 /* Disable CFI check */
9218 rctl = rd32(E1000_RCTL);
9219 rctl &= ~E1000_RCTL_CFIEN;
9220 wr32(E1000_RCTL, rctl);
9221 } else {
9222 /* disable VLAN tag insert/strip */
9223 ctrl = rd32(E1000_CTRL);
9224 ctrl &= ~E1000_CTRL_VME;
9225 wr32(E1000_CTRL, ctrl);
9226 }
9227
9228 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
9229 }
9230
igb_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)9231 static int igb_vlan_rx_add_vid(struct net_device *netdev,
9232 __be16 proto, u16 vid)
9233 {
9234 struct igb_adapter *adapter = netdev_priv(netdev);
9235 struct e1000_hw *hw = &adapter->hw;
9236 int pf_id = adapter->vfs_allocated_count;
9237
9238 /* add the filter since PF can receive vlans w/o entry in vlvf */
9239 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9240 igb_vfta_set(hw, vid, pf_id, true, !!vid);
9241
9242 set_bit(vid, adapter->active_vlans);
9243
9244 return 0;
9245 }
9246
igb_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)9247 static int igb_vlan_rx_kill_vid(struct net_device *netdev,
9248 __be16 proto, u16 vid)
9249 {
9250 struct igb_adapter *adapter = netdev_priv(netdev);
9251 int pf_id = adapter->vfs_allocated_count;
9252 struct e1000_hw *hw = &adapter->hw;
9253
9254 /* remove VID from filter table */
9255 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9256 igb_vfta_set(hw, vid, pf_id, false, true);
9257
9258 clear_bit(vid, adapter->active_vlans);
9259
9260 return 0;
9261 }
9262
igb_restore_vlan(struct igb_adapter * adapter)9263 static void igb_restore_vlan(struct igb_adapter *adapter)
9264 {
9265 u16 vid = 1;
9266
9267 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
9268 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
9269
9270 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
9271 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
9272 }
9273
igb_set_spd_dplx(struct igb_adapter * adapter,u32 spd,u8 dplx)9274 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9275 {
9276 struct pci_dev *pdev = adapter->pdev;
9277 struct e1000_mac_info *mac = &adapter->hw.mac;
9278
9279 mac->autoneg = 0;
9280
9281 /* Make sure dplx is at most 1 bit and lsb of speed is not set
9282 * for the switch() below to work
9283 */
9284 if ((spd & 1) || (dplx & ~1))
9285 goto err_inval;
9286
9287 /* Fiber NIC's only allow 1000 gbps Full duplex
9288 * and 100Mbps Full duplex for 100baseFx sfp
9289 */
9290 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
9291 switch (spd + dplx) {
9292 case SPEED_10 + DUPLEX_HALF:
9293 case SPEED_10 + DUPLEX_FULL:
9294 case SPEED_100 + DUPLEX_HALF:
9295 goto err_inval;
9296 default:
9297 break;
9298 }
9299 }
9300
9301 switch (spd + dplx) {
9302 case SPEED_10 + DUPLEX_HALF:
9303 mac->forced_speed_duplex = ADVERTISE_10_HALF;
9304 break;
9305 case SPEED_10 + DUPLEX_FULL:
9306 mac->forced_speed_duplex = ADVERTISE_10_FULL;
9307 break;
9308 case SPEED_100 + DUPLEX_HALF:
9309 mac->forced_speed_duplex = ADVERTISE_100_HALF;
9310 break;
9311 case SPEED_100 + DUPLEX_FULL:
9312 mac->forced_speed_duplex = ADVERTISE_100_FULL;
9313 break;
9314 case SPEED_1000 + DUPLEX_FULL:
9315 mac->autoneg = 1;
9316 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
9317 break;
9318 case SPEED_1000 + DUPLEX_HALF: /* not supported */
9319 default:
9320 goto err_inval;
9321 }
9322
9323 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
9324 adapter->hw.phy.mdix = AUTO_ALL_MODES;
9325
9326 return 0;
9327
9328 err_inval:
9329 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
9330 return -EINVAL;
9331 }
9332
__igb_shutdown(struct pci_dev * pdev,bool * enable_wake,bool runtime)9333 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
9334 bool runtime)
9335 {
9336 struct net_device *netdev = pci_get_drvdata(pdev);
9337 struct igb_adapter *adapter = netdev_priv(netdev);
9338 struct e1000_hw *hw = &adapter->hw;
9339 u32 ctrl, rctl, status;
9340 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
9341 bool wake;
9342
9343 rtnl_lock();
9344 netif_device_detach(netdev);
9345
9346 if (netif_running(netdev))
9347 __igb_close(netdev, true);
9348
9349 igb_ptp_suspend(adapter);
9350
9351 igb_clear_interrupt_scheme(adapter);
9352 rtnl_unlock();
9353
9354 status = rd32(E1000_STATUS);
9355 if (status & E1000_STATUS_LU)
9356 wufc &= ~E1000_WUFC_LNKC;
9357
9358 if (wufc) {
9359 igb_setup_rctl(adapter);
9360 igb_set_rx_mode(netdev);
9361
9362 /* turn on all-multi mode if wake on multicast is enabled */
9363 if (wufc & E1000_WUFC_MC) {
9364 rctl = rd32(E1000_RCTL);
9365 rctl |= E1000_RCTL_MPE;
9366 wr32(E1000_RCTL, rctl);
9367 }
9368
9369 ctrl = rd32(E1000_CTRL);
9370 ctrl |= E1000_CTRL_ADVD3WUC;
9371 wr32(E1000_CTRL, ctrl);
9372
9373 /* Allow time for pending master requests to run */
9374 igb_disable_pcie_master(hw);
9375
9376 wr32(E1000_WUC, E1000_WUC_PME_EN);
9377 wr32(E1000_WUFC, wufc);
9378 } else {
9379 wr32(E1000_WUC, 0);
9380 wr32(E1000_WUFC, 0);
9381 }
9382
9383 wake = wufc || adapter->en_mng_pt;
9384 if (!wake)
9385 igb_power_down_link(adapter);
9386 else
9387 igb_power_up_link(adapter);
9388
9389 if (enable_wake)
9390 *enable_wake = wake;
9391
9392 /* Release control of h/w to f/w. If f/w is AMT enabled, this
9393 * would have already happened in close and is redundant.
9394 */
9395 igb_release_hw_control(adapter);
9396
9397 pci_disable_device(pdev);
9398
9399 return 0;
9400 }
9401
igb_deliver_wake_packet(struct net_device * netdev)9402 static void igb_deliver_wake_packet(struct net_device *netdev)
9403 {
9404 struct igb_adapter *adapter = netdev_priv(netdev);
9405 struct e1000_hw *hw = &adapter->hw;
9406 struct sk_buff *skb;
9407 u32 wupl;
9408
9409 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
9410
9411 /* WUPM stores only the first 128 bytes of the wake packet.
9412 * Read the packet only if we have the whole thing.
9413 */
9414 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
9415 return;
9416
9417 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
9418 if (!skb)
9419 return;
9420
9421 skb_put(skb, wupl);
9422
9423 /* Ensure reads are 32-bit aligned */
9424 wupl = roundup(wupl, 4);
9425
9426 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
9427
9428 skb->protocol = eth_type_trans(skb, netdev);
9429 netif_rx(skb);
9430 }
9431
igb_suspend(struct device * dev)9432 static int igb_suspend(struct device *dev)
9433 {
9434 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
9435 }
9436
__igb_resume(struct device * dev,bool rpm)9437 static int __igb_resume(struct device *dev, bool rpm)
9438 {
9439 struct pci_dev *pdev = to_pci_dev(dev);
9440 struct net_device *netdev = pci_get_drvdata(pdev);
9441 struct igb_adapter *adapter = netdev_priv(netdev);
9442 struct e1000_hw *hw = &adapter->hw;
9443 u32 err, val;
9444
9445 pci_set_power_state(pdev, PCI_D0);
9446 pci_restore_state(pdev);
9447 pci_save_state(pdev);
9448
9449 if (!pci_device_is_present(pdev))
9450 return -ENODEV;
9451 err = pci_enable_device_mem(pdev);
9452 if (err) {
9453 dev_err(&pdev->dev,
9454 "igb: Cannot enable PCI device from suspend\n");
9455 return err;
9456 }
9457 pci_set_master(pdev);
9458
9459 pci_enable_wake(pdev, PCI_D3hot, 0);
9460 pci_enable_wake(pdev, PCI_D3cold, 0);
9461
9462 if (igb_init_interrupt_scheme(adapter, true)) {
9463 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9464 return -ENOMEM;
9465 }
9466
9467 igb_reset(adapter);
9468
9469 /* let the f/w know that the h/w is now under the control of the
9470 * driver.
9471 */
9472 igb_get_hw_control(adapter);
9473
9474 val = rd32(E1000_WUS);
9475 if (val & WAKE_PKT_WUS)
9476 igb_deliver_wake_packet(netdev);
9477
9478 wr32(E1000_WUS, ~0);
9479
9480 if (!rpm)
9481 rtnl_lock();
9482 if (!err && netif_running(netdev))
9483 err = __igb_open(netdev, true);
9484
9485 if (!err)
9486 netif_device_attach(netdev);
9487 if (!rpm)
9488 rtnl_unlock();
9489
9490 return err;
9491 }
9492
igb_resume(struct device * dev)9493 static int igb_resume(struct device *dev)
9494 {
9495 return __igb_resume(dev, false);
9496 }
9497
igb_runtime_idle(struct device * dev)9498 static int igb_runtime_idle(struct device *dev)
9499 {
9500 struct net_device *netdev = dev_get_drvdata(dev);
9501 struct igb_adapter *adapter = netdev_priv(netdev);
9502
9503 if (!igb_has_link(adapter))
9504 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
9505
9506 return -EBUSY;
9507 }
9508
igb_runtime_suspend(struct device * dev)9509 static int igb_runtime_suspend(struct device *dev)
9510 {
9511 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
9512 }
9513
igb_runtime_resume(struct device * dev)9514 static int igb_runtime_resume(struct device *dev)
9515 {
9516 return __igb_resume(dev, true);
9517 }
9518
igb_shutdown(struct pci_dev * pdev)9519 static void igb_shutdown(struct pci_dev *pdev)
9520 {
9521 bool wake;
9522
9523 __igb_shutdown(pdev, &wake, 0);
9524
9525 if (system_state == SYSTEM_POWER_OFF) {
9526 pci_wake_from_d3(pdev, wake);
9527 pci_set_power_state(pdev, PCI_D3hot);
9528 }
9529 }
9530
igb_pci_sriov_configure(struct pci_dev * dev,int num_vfs)9531 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9532 {
9533 #ifdef CONFIG_PCI_IOV
9534 int err;
9535
9536 if (num_vfs == 0) {
9537 return igb_disable_sriov(dev, true);
9538 } else {
9539 err = igb_enable_sriov(dev, num_vfs, true);
9540 return err ? err : num_vfs;
9541 }
9542 #endif
9543 return 0;
9544 }
9545
9546 /**
9547 * igb_io_error_detected - called when PCI error is detected
9548 * @pdev: Pointer to PCI device
9549 * @state: The current pci connection state
9550 *
9551 * This function is called after a PCI bus error affecting
9552 * this device has been detected.
9553 **/
igb_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)9554 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9555 pci_channel_state_t state)
9556 {
9557 struct net_device *netdev = pci_get_drvdata(pdev);
9558 struct igb_adapter *adapter = netdev_priv(netdev);
9559
9560 if (state == pci_channel_io_normal) {
9561 dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n");
9562 return PCI_ERS_RESULT_CAN_RECOVER;
9563 }
9564
9565 netif_device_detach(netdev);
9566
9567 if (state == pci_channel_io_perm_failure)
9568 return PCI_ERS_RESULT_DISCONNECT;
9569
9570 if (netif_running(netdev))
9571 igb_down(adapter);
9572 pci_disable_device(pdev);
9573
9574 /* Request a slot reset. */
9575 return PCI_ERS_RESULT_NEED_RESET;
9576 }
9577
9578 /**
9579 * igb_io_slot_reset - called after the pci bus has been reset.
9580 * @pdev: Pointer to PCI device
9581 *
9582 * Restart the card from scratch, as if from a cold-boot. Implementation
9583 * resembles the first-half of the __igb_resume routine.
9584 **/
igb_io_slot_reset(struct pci_dev * pdev)9585 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9586 {
9587 struct net_device *netdev = pci_get_drvdata(pdev);
9588 struct igb_adapter *adapter = netdev_priv(netdev);
9589 struct e1000_hw *hw = &adapter->hw;
9590 pci_ers_result_t result;
9591
9592 if (pci_enable_device_mem(pdev)) {
9593 dev_err(&pdev->dev,
9594 "Cannot re-enable PCI device after reset.\n");
9595 result = PCI_ERS_RESULT_DISCONNECT;
9596 } else {
9597 pci_set_master(pdev);
9598 pci_restore_state(pdev);
9599 pci_save_state(pdev);
9600
9601 pci_enable_wake(pdev, PCI_D3hot, 0);
9602 pci_enable_wake(pdev, PCI_D3cold, 0);
9603
9604 /* In case of PCI error, adapter lose its HW address
9605 * so we should re-assign it here.
9606 */
9607 hw->hw_addr = adapter->io_addr;
9608
9609 igb_reset(adapter);
9610 wr32(E1000_WUS, ~0);
9611 result = PCI_ERS_RESULT_RECOVERED;
9612 }
9613
9614 return result;
9615 }
9616
9617 /**
9618 * igb_io_resume - called when traffic can start flowing again.
9619 * @pdev: Pointer to PCI device
9620 *
9621 * This callback is called when the error recovery driver tells us that
9622 * its OK to resume normal operation. Implementation resembles the
9623 * second-half of the __igb_resume routine.
9624 */
igb_io_resume(struct pci_dev * pdev)9625 static void igb_io_resume(struct pci_dev *pdev)
9626 {
9627 struct net_device *netdev = pci_get_drvdata(pdev);
9628 struct igb_adapter *adapter = netdev_priv(netdev);
9629
9630 if (netif_running(netdev)) {
9631 if (igb_up(adapter)) {
9632 dev_err(&pdev->dev, "igb_up failed after reset\n");
9633 return;
9634 }
9635 }
9636
9637 netif_device_attach(netdev);
9638
9639 /* let the f/w know that the h/w is now under the control of the
9640 * driver.
9641 */
9642 igb_get_hw_control(adapter);
9643 }
9644
9645 /**
9646 * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
9647 * @adapter: Pointer to adapter structure
9648 * @index: Index of the RAR entry which need to be synced with MAC table
9649 **/
igb_rar_set_index(struct igb_adapter * adapter,u32 index)9650 static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9651 {
9652 struct e1000_hw *hw = &adapter->hw;
9653 u32 rar_low, rar_high;
9654 u8 *addr = adapter->mac_table[index].addr;
9655
9656 /* HW expects these to be in network order when they are plugged
9657 * into the registers which are little endian. In order to guarantee
9658 * that ordering we need to do an leXX_to_cpup here in order to be
9659 * ready for the byteswap that occurs with writel
9660 */
9661 rar_low = le32_to_cpup((__le32 *)(addr));
9662 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9663
9664 /* Indicate to hardware the Address is Valid. */
9665 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9666 if (is_valid_ether_addr(addr))
9667 rar_high |= E1000_RAH_AV;
9668
9669 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9670 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9671
9672 switch (hw->mac.type) {
9673 case e1000_82575:
9674 case e1000_i210:
9675 if (adapter->mac_table[index].state &
9676 IGB_MAC_STATE_QUEUE_STEERING)
9677 rar_high |= E1000_RAH_QSEL_ENABLE;
9678
9679 rar_high |= E1000_RAH_POOL_1 *
9680 adapter->mac_table[index].queue;
9681 break;
9682 default:
9683 rar_high |= E1000_RAH_POOL_1 <<
9684 adapter->mac_table[index].queue;
9685 break;
9686 }
9687 }
9688
9689 wr32(E1000_RAL(index), rar_low);
9690 wrfl();
9691 wr32(E1000_RAH(index), rar_high);
9692 wrfl();
9693 }
9694
igb_set_vf_mac(struct igb_adapter * adapter,int vf,unsigned char * mac_addr)9695 static int igb_set_vf_mac(struct igb_adapter *adapter,
9696 int vf, unsigned char *mac_addr)
9697 {
9698 struct e1000_hw *hw = &adapter->hw;
9699 /* VF MAC addresses start at end of receive addresses and moves
9700 * towards the first, as a result a collision should not be possible
9701 */
9702 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9703 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9704
9705 ether_addr_copy(vf_mac_addr, mac_addr);
9706 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9707 adapter->mac_table[rar_entry].queue = vf;
9708 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9709 igb_rar_set_index(adapter, rar_entry);
9710
9711 return 0;
9712 }
9713
igb_ndo_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)9714 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9715 {
9716 struct igb_adapter *adapter = netdev_priv(netdev);
9717
9718 if (vf >= adapter->vfs_allocated_count)
9719 return -EINVAL;
9720
9721 /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
9722 * flag and allows to overwrite the MAC via VF netdev. This
9723 * is necessary to allow libvirt a way to restore the original
9724 * MAC after unbinding vfio-pci and reloading igbvf after shutting
9725 * down a VM.
9726 */
9727 if (is_zero_ether_addr(mac)) {
9728 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9729 dev_info(&adapter->pdev->dev,
9730 "remove administratively set MAC on VF %d\n",
9731 vf);
9732 } else if (is_valid_ether_addr(mac)) {
9733 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9734 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9735 mac, vf);
9736 dev_info(&adapter->pdev->dev,
9737 "Reload the VF driver to make this change effective.");
9738 /* Generate additional warning if PF is down */
9739 if (test_bit(__IGB_DOWN, &adapter->state)) {
9740 dev_warn(&adapter->pdev->dev,
9741 "The VF MAC address has been set, but the PF device is not up.\n");
9742 dev_warn(&adapter->pdev->dev,
9743 "Bring the PF device up before attempting to use the VF device.\n");
9744 }
9745 } else {
9746 return -EINVAL;
9747 }
9748 return igb_set_vf_mac(adapter, vf, mac);
9749 }
9750
igb_link_mbps(int internal_link_speed)9751 static int igb_link_mbps(int internal_link_speed)
9752 {
9753 switch (internal_link_speed) {
9754 case SPEED_100:
9755 return 100;
9756 case SPEED_1000:
9757 return 1000;
9758 default:
9759 return 0;
9760 }
9761 }
9762
igb_set_vf_rate_limit(struct e1000_hw * hw,int vf,int tx_rate,int link_speed)9763 static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9764 int link_speed)
9765 {
9766 int rf_dec, rf_int;
9767 u32 bcnrc_val;
9768
9769 if (tx_rate != 0) {
9770 /* Calculate the rate factor values to set */
9771 rf_int = link_speed / tx_rate;
9772 rf_dec = (link_speed - (rf_int * tx_rate));
9773 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9774 tx_rate;
9775
9776 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9777 bcnrc_val |= FIELD_PREP(E1000_RTTBCNRC_RF_INT_MASK, rf_int);
9778 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9779 } else {
9780 bcnrc_val = 0;
9781 }
9782
9783 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
9784 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
9785 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
9786 */
9787 wr32(E1000_RTTBCNRM, 0x14);
9788 wr32(E1000_RTTBCNRC, bcnrc_val);
9789 }
9790
igb_check_vf_rate_limit(struct igb_adapter * adapter)9791 static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9792 {
9793 int actual_link_speed, i;
9794 bool reset_rate = false;
9795
9796 /* VF TX rate limit was not set or not supported */
9797 if ((adapter->vf_rate_link_speed == 0) ||
9798 (adapter->hw.mac.type != e1000_82576))
9799 return;
9800
9801 actual_link_speed = igb_link_mbps(adapter->link_speed);
9802 if (actual_link_speed != adapter->vf_rate_link_speed) {
9803 reset_rate = true;
9804 adapter->vf_rate_link_speed = 0;
9805 dev_info(&adapter->pdev->dev,
9806 "Link speed has been changed. VF Transmit rate is disabled\n");
9807 }
9808
9809 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9810 if (reset_rate)
9811 adapter->vf_data[i].tx_rate = 0;
9812
9813 igb_set_vf_rate_limit(&adapter->hw, i,
9814 adapter->vf_data[i].tx_rate,
9815 actual_link_speed);
9816 }
9817 }
9818
igb_ndo_set_vf_bw(struct net_device * netdev,int vf,int min_tx_rate,int max_tx_rate)9819 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9820 int min_tx_rate, int max_tx_rate)
9821 {
9822 struct igb_adapter *adapter = netdev_priv(netdev);
9823 struct e1000_hw *hw = &adapter->hw;
9824 int actual_link_speed;
9825
9826 if (hw->mac.type != e1000_82576)
9827 return -EOPNOTSUPP;
9828
9829 if (min_tx_rate)
9830 return -EINVAL;
9831
9832 actual_link_speed = igb_link_mbps(adapter->link_speed);
9833 if ((vf >= adapter->vfs_allocated_count) ||
9834 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9835 (max_tx_rate < 0) ||
9836 (max_tx_rate > actual_link_speed))
9837 return -EINVAL;
9838
9839 adapter->vf_rate_link_speed = actual_link_speed;
9840 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9841 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9842
9843 return 0;
9844 }
9845
igb_ndo_set_vf_spoofchk(struct net_device * netdev,int vf,bool setting)9846 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9847 bool setting)
9848 {
9849 struct igb_adapter *adapter = netdev_priv(netdev);
9850 struct e1000_hw *hw = &adapter->hw;
9851 u32 reg_val, reg_offset;
9852
9853 if (!adapter->vfs_allocated_count)
9854 return -EOPNOTSUPP;
9855
9856 if (vf >= adapter->vfs_allocated_count)
9857 return -EINVAL;
9858
9859 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9860 reg_val = rd32(reg_offset);
9861 if (setting)
9862 reg_val |= (BIT(vf) |
9863 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9864 else
9865 reg_val &= ~(BIT(vf) |
9866 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9867 wr32(reg_offset, reg_val);
9868
9869 adapter->vf_data[vf].spoofchk_enabled = setting;
9870 return 0;
9871 }
9872
igb_ndo_set_vf_trust(struct net_device * netdev,int vf,bool setting)9873 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9874 {
9875 struct igb_adapter *adapter = netdev_priv(netdev);
9876
9877 if (vf >= adapter->vfs_allocated_count)
9878 return -EINVAL;
9879 if (adapter->vf_data[vf].trusted == setting)
9880 return 0;
9881
9882 adapter->vf_data[vf].trusted = setting;
9883
9884 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9885 vf, setting ? "" : "not ");
9886 return 0;
9887 }
9888
igb_ndo_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivi)9889 static int igb_ndo_get_vf_config(struct net_device *netdev,
9890 int vf, struct ifla_vf_info *ivi)
9891 {
9892 struct igb_adapter *adapter = netdev_priv(netdev);
9893 if (vf >= adapter->vfs_allocated_count)
9894 return -EINVAL;
9895 ivi->vf = vf;
9896 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9897 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9898 ivi->min_tx_rate = 0;
9899 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9900 ivi->qos = adapter->vf_data[vf].pf_qos;
9901 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9902 ivi->trusted = adapter->vf_data[vf].trusted;
9903 return 0;
9904 }
9905
igb_vmm_control(struct igb_adapter * adapter)9906 static void igb_vmm_control(struct igb_adapter *adapter)
9907 {
9908 struct e1000_hw *hw = &adapter->hw;
9909 u32 reg;
9910
9911 switch (hw->mac.type) {
9912 case e1000_82575:
9913 case e1000_i210:
9914 case e1000_i211:
9915 case e1000_i354:
9916 default:
9917 /* replication is not supported for 82575 */
9918 return;
9919 case e1000_82576:
9920 /* notify HW that the MAC is adding vlan tags */
9921 reg = rd32(E1000_DTXCTL);
9922 reg |= E1000_DTXCTL_VLAN_ADDED;
9923 wr32(E1000_DTXCTL, reg);
9924 fallthrough;
9925 case e1000_82580:
9926 /* enable replication vlan tag stripping */
9927 reg = rd32(E1000_RPLOLR);
9928 reg |= E1000_RPLOLR_STRVLAN;
9929 wr32(E1000_RPLOLR, reg);
9930 fallthrough;
9931 case e1000_i350:
9932 /* none of the above registers are supported by i350 */
9933 break;
9934 }
9935
9936 if (adapter->vfs_allocated_count) {
9937 igb_vmdq_set_loopback_pf(hw, true);
9938 igb_vmdq_set_replication_pf(hw, true);
9939 igb_vmdq_set_anti_spoofing_pf(hw, true,
9940 adapter->vfs_allocated_count);
9941 } else {
9942 igb_vmdq_set_loopback_pf(hw, false);
9943 igb_vmdq_set_replication_pf(hw, false);
9944 }
9945 }
9946
igb_init_dmac(struct igb_adapter * adapter,u32 pba)9947 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9948 {
9949 struct e1000_hw *hw = &adapter->hw;
9950 u32 dmac_thr;
9951 u16 hwm;
9952 u32 reg;
9953
9954 if (hw->mac.type > e1000_82580) {
9955 if (adapter->flags & IGB_FLAG_DMAC) {
9956 /* force threshold to 0. */
9957 wr32(E1000_DMCTXTH, 0);
9958
9959 /* DMA Coalescing high water mark needs to be greater
9960 * than the Rx threshold. Set hwm to PBA - max frame
9961 * size in 16B units, capping it at PBA - 6KB.
9962 */
9963 hwm = 64 * (pba - 6);
9964 reg = rd32(E1000_FCRTC);
9965 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9966 reg |= FIELD_PREP(E1000_FCRTC_RTH_COAL_MASK, hwm);
9967 wr32(E1000_FCRTC, reg);
9968
9969 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
9970 * frame size, capping it at PBA - 10KB.
9971 */
9972 dmac_thr = pba - 10;
9973 reg = rd32(E1000_DMACR);
9974 reg &= ~E1000_DMACR_DMACTHR_MASK;
9975 reg |= FIELD_PREP(E1000_DMACR_DMACTHR_MASK, dmac_thr);
9976
9977 /* transition to L0x or L1 if available..*/
9978 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9979
9980 /* watchdog timer= +-1000 usec in 32usec intervals */
9981 reg |= (1000 >> 5);
9982
9983 /* Disable BMC-to-OS Watchdog Enable */
9984 if (hw->mac.type != e1000_i354)
9985 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9986 wr32(E1000_DMACR, reg);
9987
9988 /* no lower threshold to disable
9989 * coalescing(smart fifb)-UTRESH=0
9990 */
9991 wr32(E1000_DMCRTRH, 0);
9992
9993 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9994
9995 wr32(E1000_DMCTLX, reg);
9996
9997 /* free space in tx packet buffer to wake from
9998 * DMA coal
9999 */
10000 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
10001 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
10002 }
10003
10004 if (hw->mac.type >= e1000_i210 ||
10005 (adapter->flags & IGB_FLAG_DMAC)) {
10006 reg = rd32(E1000_PCIEMISC);
10007 reg |= E1000_PCIEMISC_LX_DECISION;
10008 wr32(E1000_PCIEMISC, reg);
10009 } /* endif adapter->dmac is not disabled */
10010 } else if (hw->mac.type == e1000_82580) {
10011 u32 reg = rd32(E1000_PCIEMISC);
10012
10013 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
10014 wr32(E1000_DMACR, 0);
10015 }
10016 }
10017
10018 /**
10019 * igb_read_i2c_byte - Reads 8 bit word over I2C
10020 * @hw: pointer to hardware structure
10021 * @byte_offset: byte offset to read
10022 * @dev_addr: device address
10023 * @data: value read
10024 *
10025 * Performs byte read operation over I2C interface at
10026 * a specified device address.
10027 **/
igb_read_i2c_byte(struct e1000_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data)10028 s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
10029 u8 dev_addr, u8 *data)
10030 {
10031 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
10032 struct i2c_client *this_client = adapter->i2c_client;
10033 s32 status;
10034 u16 swfw_mask = 0;
10035
10036 if (!this_client)
10037 return E1000_ERR_I2C;
10038
10039 swfw_mask = E1000_SWFW_PHY0_SM;
10040
10041 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
10042 return E1000_ERR_SWFW_SYNC;
10043
10044 status = i2c_smbus_read_byte_data(this_client, byte_offset);
10045 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
10046
10047 if (status < 0)
10048 return E1000_ERR_I2C;
10049 else {
10050 *data = status;
10051 return 0;
10052 }
10053 }
10054
10055 /**
10056 * igb_write_i2c_byte - Writes 8 bit word over I2C
10057 * @hw: pointer to hardware structure
10058 * @byte_offset: byte offset to write
10059 * @dev_addr: device address
10060 * @data: value to write
10061 *
10062 * Performs byte write operation over I2C interface at
10063 * a specified device address.
10064 **/
igb_write_i2c_byte(struct e1000_hw * hw,u8 byte_offset,u8 dev_addr,u8 data)10065 s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
10066 u8 dev_addr, u8 data)
10067 {
10068 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
10069 struct i2c_client *this_client = adapter->i2c_client;
10070 s32 status;
10071 u16 swfw_mask = E1000_SWFW_PHY0_SM;
10072
10073 if (!this_client)
10074 return E1000_ERR_I2C;
10075
10076 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
10077 return E1000_ERR_SWFW_SYNC;
10078 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
10079 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
10080
10081 if (status)
10082 return E1000_ERR_I2C;
10083 else
10084 return 0;
10085
10086 }
10087
igb_reinit_queues(struct igb_adapter * adapter)10088 int igb_reinit_queues(struct igb_adapter *adapter)
10089 {
10090 struct net_device *netdev = adapter->netdev;
10091 struct pci_dev *pdev = adapter->pdev;
10092 int err = 0;
10093
10094 if (netif_running(netdev))
10095 igb_close(netdev);
10096
10097 igb_reset_interrupt_capability(adapter);
10098
10099 if (igb_init_interrupt_scheme(adapter, true)) {
10100 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
10101 return -ENOMEM;
10102 }
10103
10104 if (netif_running(netdev))
10105 err = igb_open(netdev);
10106
10107 return err;
10108 }
10109
igb_nfc_filter_exit(struct igb_adapter * adapter)10110 static void igb_nfc_filter_exit(struct igb_adapter *adapter)
10111 {
10112 struct igb_nfc_filter *rule;
10113
10114 spin_lock(&adapter->nfc_lock);
10115
10116 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10117 igb_erase_filter(adapter, rule);
10118
10119 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
10120 igb_erase_filter(adapter, rule);
10121
10122 spin_unlock(&adapter->nfc_lock);
10123 }
10124
igb_nfc_filter_restore(struct igb_adapter * adapter)10125 static void igb_nfc_filter_restore(struct igb_adapter *adapter)
10126 {
10127 struct igb_nfc_filter *rule;
10128
10129 spin_lock(&adapter->nfc_lock);
10130
10131 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10132 igb_add_filter(adapter, rule);
10133
10134 spin_unlock(&adapter->nfc_lock);
10135 }
10136
10137 static _DEFINE_DEV_PM_OPS(igb_pm_ops, igb_suspend, igb_resume,
10138 igb_runtime_suspend, igb_runtime_resume,
10139 igb_runtime_idle);
10140
10141 static struct pci_driver igb_driver = {
10142 .name = igb_driver_name,
10143 .id_table = igb_pci_tbl,
10144 .probe = igb_probe,
10145 .remove = igb_remove,
10146 .driver.pm = pm_ptr(&igb_pm_ops),
10147 .shutdown = igb_shutdown,
10148 .sriov_configure = igb_pci_sriov_configure,
10149 .err_handler = &igb_err_handler
10150 };
10151
10152 /* igb_main.c */
10153