1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
3
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "devlink/devlink.h"
17 #include "devlink/devlink_port.h"
18 #include "ice_hwmon.h"
19 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
20 * ice tracepoint functions. This must be done exactly once across the
21 * ice driver.
22 */
23 #define CREATE_TRACE_POINTS
24 #include "ice_trace.h"
25 #include "ice_eswitch.h"
26 #include "ice_tc_lib.h"
27 #include "ice_vsi_vlan_ops.h"
28 #include <net/xdp_sock_drv.h>
29
30 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
31 static const char ice_driver_string[] = DRV_SUMMARY;
32 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
33
34 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
35 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
36 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
37
38 MODULE_DESCRIPTION(DRV_SUMMARY);
39 MODULE_IMPORT_NS(LIBIE);
40 MODULE_LICENSE("GPL v2");
41 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
42
43 static int debug = -1;
44 module_param(debug, int, 0644);
45 #ifndef CONFIG_DYNAMIC_DEBUG
46 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
47 #else
48 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
49 #endif /* !CONFIG_DYNAMIC_DEBUG */
50
51 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
52 EXPORT_SYMBOL(ice_xdp_locking_key);
53
54 /**
55 * ice_hw_to_dev - Get device pointer from the hardware structure
56 * @hw: pointer to the device HW structure
57 *
58 * Used to access the device pointer from compilation units which can't easily
59 * include the definition of struct ice_pf without leading to circular header
60 * dependencies.
61 */
ice_hw_to_dev(struct ice_hw * hw)62 struct device *ice_hw_to_dev(struct ice_hw *hw)
63 {
64 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
65
66 return &pf->pdev->dev;
67 }
68
69 static struct workqueue_struct *ice_wq;
70 struct workqueue_struct *ice_lag_wq;
71 static const struct net_device_ops ice_netdev_safe_mode_ops;
72 static const struct net_device_ops ice_netdev_ops;
73
74 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
75
76 static void ice_vsi_release_all(struct ice_pf *pf);
77
78 static int ice_rebuild_channels(struct ice_pf *pf);
79 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
80
81 static int
82 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
83 void *cb_priv, enum tc_setup_type type, void *type_data,
84 void *data,
85 void (*cleanup)(struct flow_block_cb *block_cb));
86
netif_is_ice(const struct net_device * dev)87 bool netif_is_ice(const struct net_device *dev)
88 {
89 return dev && (dev->netdev_ops == &ice_netdev_ops);
90 }
91
92 /**
93 * ice_get_tx_pending - returns number of Tx descriptors not processed
94 * @ring: the ring of descriptors
95 */
ice_get_tx_pending(struct ice_tx_ring * ring)96 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
97 {
98 u16 head, tail;
99
100 head = ring->next_to_clean;
101 tail = ring->next_to_use;
102
103 if (head != tail)
104 return (head < tail) ?
105 tail - head : (tail + ring->count - head);
106 return 0;
107 }
108
109 /**
110 * ice_check_for_hang_subtask - check for and recover hung queues
111 * @pf: pointer to PF struct
112 */
ice_check_for_hang_subtask(struct ice_pf * pf)113 static void ice_check_for_hang_subtask(struct ice_pf *pf)
114 {
115 struct ice_vsi *vsi = NULL;
116 struct ice_hw *hw;
117 unsigned int i;
118 int packets;
119 u32 v;
120
121 ice_for_each_vsi(pf, v)
122 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
123 vsi = pf->vsi[v];
124 break;
125 }
126
127 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
128 return;
129
130 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
131 return;
132
133 hw = &vsi->back->hw;
134
135 ice_for_each_txq(vsi, i) {
136 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
137 struct ice_ring_stats *ring_stats;
138
139 if (!tx_ring)
140 continue;
141 if (ice_ring_ch_enabled(tx_ring))
142 continue;
143
144 ring_stats = tx_ring->ring_stats;
145 if (!ring_stats)
146 continue;
147
148 if (tx_ring->desc) {
149 /* If packet counter has not changed the queue is
150 * likely stalled, so force an interrupt for this
151 * queue.
152 *
153 * prev_pkt would be negative if there was no
154 * pending work.
155 */
156 packets = ring_stats->stats.pkts & INT_MAX;
157 if (ring_stats->tx_stats.prev_pkt == packets) {
158 /* Trigger sw interrupt to revive the queue */
159 ice_trigger_sw_intr(hw, tx_ring->q_vector);
160 continue;
161 }
162
163 /* Memory barrier between read of packet count and call
164 * to ice_get_tx_pending()
165 */
166 smp_rmb();
167 ring_stats->tx_stats.prev_pkt =
168 ice_get_tx_pending(tx_ring) ? packets : -1;
169 }
170 }
171 }
172
173 /**
174 * ice_init_mac_fltr - Set initial MAC filters
175 * @pf: board private structure
176 *
177 * Set initial set of MAC filters for PF VSI; configure filters for permanent
178 * address and broadcast address. If an error is encountered, netdevice will be
179 * unregistered.
180 */
ice_init_mac_fltr(struct ice_pf * pf)181 static int ice_init_mac_fltr(struct ice_pf *pf)
182 {
183 struct ice_vsi *vsi;
184 u8 *perm_addr;
185
186 vsi = ice_get_main_vsi(pf);
187 if (!vsi)
188 return -EINVAL;
189
190 perm_addr = vsi->port_info->mac.perm_addr;
191 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
192 }
193
194 /**
195 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
196 * @netdev: the net device on which the sync is happening
197 * @addr: MAC address to sync
198 *
199 * This is a callback function which is called by the in kernel device sync
200 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
201 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
202 * MAC filters from the hardware.
203 */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)204 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
205 {
206 struct ice_netdev_priv *np = netdev_priv(netdev);
207 struct ice_vsi *vsi = np->vsi;
208
209 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
210 ICE_FWD_TO_VSI))
211 return -EINVAL;
212
213 return 0;
214 }
215
216 /**
217 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
218 * @netdev: the net device on which the unsync is happening
219 * @addr: MAC address to unsync
220 *
221 * This is a callback function which is called by the in kernel device unsync
222 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
223 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
224 * delete the MAC filters from the hardware.
225 */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)226 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
227 {
228 struct ice_netdev_priv *np = netdev_priv(netdev);
229 struct ice_vsi *vsi = np->vsi;
230
231 /* Under some circumstances, we might receive a request to delete our
232 * own device address from our uc list. Because we store the device
233 * address in the VSI's MAC filter list, we need to ignore such
234 * requests and not delete our device address from this list.
235 */
236 if (ether_addr_equal(addr, netdev->dev_addr))
237 return 0;
238
239 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
240 ICE_FWD_TO_VSI))
241 return -EINVAL;
242
243 return 0;
244 }
245
246 /**
247 * ice_vsi_fltr_changed - check if filter state changed
248 * @vsi: VSI to be checked
249 *
250 * returns true if filter state has changed, false otherwise.
251 */
ice_vsi_fltr_changed(struct ice_vsi * vsi)252 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
253 {
254 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
255 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
256 }
257
258 /**
259 * ice_set_promisc - Enable promiscuous mode for a given PF
260 * @vsi: the VSI being configured
261 * @promisc_m: mask of promiscuous config bits
262 *
263 */
ice_set_promisc(struct ice_vsi * vsi,u8 promisc_m)264 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
265 {
266 int status;
267
268 if (vsi->type != ICE_VSI_PF)
269 return 0;
270
271 if (ice_vsi_has_non_zero_vlans(vsi)) {
272 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
273 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
274 promisc_m);
275 } else {
276 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
277 promisc_m, 0);
278 }
279 if (status && status != -EEXIST)
280 return status;
281
282 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
283 vsi->vsi_num, promisc_m);
284 return 0;
285 }
286
287 /**
288 * ice_clear_promisc - Disable promiscuous mode for a given PF
289 * @vsi: the VSI being configured
290 * @promisc_m: mask of promiscuous config bits
291 *
292 */
ice_clear_promisc(struct ice_vsi * vsi,u8 promisc_m)293 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
294 {
295 int status;
296
297 if (vsi->type != ICE_VSI_PF)
298 return 0;
299
300 if (ice_vsi_has_non_zero_vlans(vsi)) {
301 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
302 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
303 promisc_m);
304 } else {
305 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
306 promisc_m, 0);
307 }
308
309 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
310 vsi->vsi_num, promisc_m);
311 return status;
312 }
313
314 /**
315 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
316 * @vsi: ptr to the VSI
317 *
318 * Push any outstanding VSI filter changes through the AdminQ.
319 */
ice_vsi_sync_fltr(struct ice_vsi * vsi)320 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
321 {
322 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
323 struct device *dev = ice_pf_to_dev(vsi->back);
324 struct net_device *netdev = vsi->netdev;
325 bool promisc_forced_on = false;
326 struct ice_pf *pf = vsi->back;
327 struct ice_hw *hw = &pf->hw;
328 u32 changed_flags = 0;
329 int err;
330
331 if (!vsi->netdev)
332 return -EINVAL;
333
334 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
335 usleep_range(1000, 2000);
336
337 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
338 vsi->current_netdev_flags = vsi->netdev->flags;
339
340 INIT_LIST_HEAD(&vsi->tmp_sync_list);
341 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
342
343 if (ice_vsi_fltr_changed(vsi)) {
344 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
345 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
346
347 /* grab the netdev's addr_list_lock */
348 netif_addr_lock_bh(netdev);
349 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
350 ice_add_mac_to_unsync_list);
351 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
352 ice_add_mac_to_unsync_list);
353 /* our temp lists are populated. release lock */
354 netif_addr_unlock_bh(netdev);
355 }
356
357 /* Remove MAC addresses in the unsync list */
358 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
359 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
360 if (err) {
361 netdev_err(netdev, "Failed to delete MAC filters\n");
362 /* if we failed because of alloc failures, just bail */
363 if (err == -ENOMEM)
364 goto out;
365 }
366
367 /* Add MAC addresses in the sync list */
368 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
369 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
370 /* If filter is added successfully or already exists, do not go into
371 * 'if' condition and report it as error. Instead continue processing
372 * rest of the function.
373 */
374 if (err && err != -EEXIST) {
375 netdev_err(netdev, "Failed to add MAC filters\n");
376 /* If there is no more space for new umac filters, VSI
377 * should go into promiscuous mode. There should be some
378 * space reserved for promiscuous filters.
379 */
380 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
381 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
382 vsi->state)) {
383 promisc_forced_on = true;
384 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
385 vsi->vsi_num);
386 } else {
387 goto out;
388 }
389 }
390 err = 0;
391 /* check for changes in promiscuous modes */
392 if (changed_flags & IFF_ALLMULTI) {
393 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
394 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
395 if (err) {
396 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
397 goto out_promisc;
398 }
399 } else {
400 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
401 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
402 if (err) {
403 vsi->current_netdev_flags |= IFF_ALLMULTI;
404 goto out_promisc;
405 }
406 }
407 }
408
409 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
410 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
411 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
412 if (vsi->current_netdev_flags & IFF_PROMISC) {
413 /* Apply Rx filter rule to get traffic from wire */
414 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
415 err = ice_set_dflt_vsi(vsi);
416 if (err && err != -EEXIST) {
417 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
418 err, vsi->vsi_num);
419 vsi->current_netdev_flags &=
420 ~IFF_PROMISC;
421 goto out_promisc;
422 }
423 err = 0;
424 vlan_ops->dis_rx_filtering(vsi);
425
426 /* promiscuous mode implies allmulticast so
427 * that VSIs that are in promiscuous mode are
428 * subscribed to multicast packets coming to
429 * the port
430 */
431 err = ice_set_promisc(vsi,
432 ICE_MCAST_PROMISC_BITS);
433 if (err)
434 goto out_promisc;
435 }
436 } else {
437 /* Clear Rx filter to remove traffic from wire */
438 if (ice_is_vsi_dflt_vsi(vsi)) {
439 err = ice_clear_dflt_vsi(vsi);
440 if (err) {
441 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
442 err, vsi->vsi_num);
443 vsi->current_netdev_flags |=
444 IFF_PROMISC;
445 goto out_promisc;
446 }
447 if (vsi->netdev->features &
448 NETIF_F_HW_VLAN_CTAG_FILTER)
449 vlan_ops->ena_rx_filtering(vsi);
450 }
451
452 /* disable allmulti here, but only if allmulti is not
453 * still enabled for the netdev
454 */
455 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
456 err = ice_clear_promisc(vsi,
457 ICE_MCAST_PROMISC_BITS);
458 if (err) {
459 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
460 err, vsi->vsi_num);
461 }
462 }
463 }
464 }
465 goto exit;
466
467 out_promisc:
468 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
469 goto exit;
470 out:
471 /* if something went wrong then set the changed flag so we try again */
472 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
473 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
474 exit:
475 clear_bit(ICE_CFG_BUSY, vsi->state);
476 return err;
477 }
478
479 /**
480 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
481 * @pf: board private structure
482 */
ice_sync_fltr_subtask(struct ice_pf * pf)483 static void ice_sync_fltr_subtask(struct ice_pf *pf)
484 {
485 int v;
486
487 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
488 return;
489
490 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
491
492 ice_for_each_vsi(pf, v)
493 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
494 ice_vsi_sync_fltr(pf->vsi[v])) {
495 /* come back and try again later */
496 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
497 break;
498 }
499 }
500
501 /**
502 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
503 * @pf: the PF
504 * @locked: is the rtnl_lock already held
505 */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)506 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
507 {
508 int node;
509 int v;
510
511 ice_for_each_vsi(pf, v)
512 if (pf->vsi[v])
513 ice_dis_vsi(pf->vsi[v], locked);
514
515 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
516 pf->pf_agg_node[node].num_vsis = 0;
517
518 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
519 pf->vf_agg_node[node].num_vsis = 0;
520 }
521
522 /**
523 * ice_clear_sw_switch_recipes - clear switch recipes
524 * @pf: board private structure
525 *
526 * Mark switch recipes as not created in sw structures. There are cases where
527 * rules (especially advanced rules) need to be restored, either re-read from
528 * hardware or added again. For example after the reset. 'recp_created' flag
529 * prevents from doing that and need to be cleared upfront.
530 */
ice_clear_sw_switch_recipes(struct ice_pf * pf)531 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
532 {
533 struct ice_sw_recipe *recp;
534 u8 i;
535
536 recp = pf->hw.switch_info->recp_list;
537 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
538 recp[i].recp_created = false;
539 }
540
541 /**
542 * ice_prepare_for_reset - prep for reset
543 * @pf: board private structure
544 * @reset_type: reset type requested
545 *
546 * Inform or close all dependent features in prep for reset.
547 */
548 static void
ice_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)549 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
550 {
551 struct ice_hw *hw = &pf->hw;
552 struct ice_vsi *vsi;
553 struct ice_vf *vf;
554 unsigned int bkt;
555
556 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
557
558 /* already prepared for reset */
559 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
560 return;
561
562 ice_unplug_aux_dev(pf);
563
564 /* Notify VFs of impending reset */
565 if (ice_check_sq_alive(hw, &hw->mailboxq))
566 ice_vc_notify_reset(pf);
567
568 /* Disable VFs until reset is completed */
569 mutex_lock(&pf->vfs.table_lock);
570 ice_for_each_vf(pf, bkt, vf)
571 ice_set_vf_state_dis(vf);
572 mutex_unlock(&pf->vfs.table_lock);
573
574 if (ice_is_eswitch_mode_switchdev(pf)) {
575 if (reset_type != ICE_RESET_PFR)
576 ice_clear_sw_switch_recipes(pf);
577 }
578
579 /* release ADQ specific HW and SW resources */
580 vsi = ice_get_main_vsi(pf);
581 if (!vsi)
582 goto skip;
583
584 /* to be on safe side, reset orig_rss_size so that normal flow
585 * of deciding rss_size can take precedence
586 */
587 vsi->orig_rss_size = 0;
588
589 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
590 if (reset_type == ICE_RESET_PFR) {
591 vsi->old_ena_tc = vsi->all_enatc;
592 vsi->old_numtc = vsi->all_numtc;
593 } else {
594 ice_remove_q_channels(vsi, true);
595
596 /* for other reset type, do not support channel rebuild
597 * hence reset needed info
598 */
599 vsi->old_ena_tc = 0;
600 vsi->all_enatc = 0;
601 vsi->old_numtc = 0;
602 vsi->all_numtc = 0;
603 vsi->req_txq = 0;
604 vsi->req_rxq = 0;
605 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
606 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
607 }
608 }
609 skip:
610
611 /* clear SW filtering DB */
612 ice_clear_hw_tbls(hw);
613 /* disable the VSIs and their queues that are not already DOWN */
614 ice_pf_dis_all_vsi(pf, false);
615
616 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
617 ice_ptp_prepare_for_reset(pf, reset_type);
618
619 if (ice_is_feature_supported(pf, ICE_F_GNSS))
620 ice_gnss_exit(pf);
621
622 if (hw->port_info)
623 ice_sched_clear_port(hw->port_info);
624
625 ice_shutdown_all_ctrlq(hw, false);
626
627 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
628 }
629
630 /**
631 * ice_do_reset - Initiate one of many types of resets
632 * @pf: board private structure
633 * @reset_type: reset type requested before this function was called.
634 */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)635 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
636 {
637 struct device *dev = ice_pf_to_dev(pf);
638 struct ice_hw *hw = &pf->hw;
639
640 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
641
642 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
643 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
644 reset_type = ICE_RESET_CORER;
645 }
646
647 ice_prepare_for_reset(pf, reset_type);
648
649 /* trigger the reset */
650 if (ice_reset(hw, reset_type)) {
651 dev_err(dev, "reset %d failed\n", reset_type);
652 set_bit(ICE_RESET_FAILED, pf->state);
653 clear_bit(ICE_RESET_OICR_RECV, pf->state);
654 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
655 clear_bit(ICE_PFR_REQ, pf->state);
656 clear_bit(ICE_CORER_REQ, pf->state);
657 clear_bit(ICE_GLOBR_REQ, pf->state);
658 wake_up(&pf->reset_wait_queue);
659 return;
660 }
661
662 /* PFR is a bit of a special case because it doesn't result in an OICR
663 * interrupt. So for PFR, rebuild after the reset and clear the reset-
664 * associated state bits.
665 */
666 if (reset_type == ICE_RESET_PFR) {
667 pf->pfr_count++;
668 ice_rebuild(pf, reset_type);
669 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
670 clear_bit(ICE_PFR_REQ, pf->state);
671 wake_up(&pf->reset_wait_queue);
672 ice_reset_all_vfs(pf);
673 }
674 }
675
676 /**
677 * ice_reset_subtask - Set up for resetting the device and driver
678 * @pf: board private structure
679 */
ice_reset_subtask(struct ice_pf * pf)680 static void ice_reset_subtask(struct ice_pf *pf)
681 {
682 enum ice_reset_req reset_type = ICE_RESET_INVAL;
683
684 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
685 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
686 * of reset is pending and sets bits in pf->state indicating the reset
687 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
688 * prepare for pending reset if not already (for PF software-initiated
689 * global resets the software should already be prepared for it as
690 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
691 * by firmware or software on other PFs, that bit is not set so prepare
692 * for the reset now), poll for reset done, rebuild and return.
693 */
694 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
695 /* Perform the largest reset requested */
696 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
697 reset_type = ICE_RESET_CORER;
698 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
699 reset_type = ICE_RESET_GLOBR;
700 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
701 reset_type = ICE_RESET_EMPR;
702 /* return if no valid reset type requested */
703 if (reset_type == ICE_RESET_INVAL)
704 return;
705 ice_prepare_for_reset(pf, reset_type);
706
707 /* make sure we are ready to rebuild */
708 if (ice_check_reset(&pf->hw)) {
709 set_bit(ICE_RESET_FAILED, pf->state);
710 } else {
711 /* done with reset. start rebuild */
712 pf->hw.reset_ongoing = false;
713 ice_rebuild(pf, reset_type);
714 /* clear bit to resume normal operations, but
715 * ICE_NEEDS_RESTART bit is set in case rebuild failed
716 */
717 clear_bit(ICE_RESET_OICR_RECV, pf->state);
718 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
719 clear_bit(ICE_PFR_REQ, pf->state);
720 clear_bit(ICE_CORER_REQ, pf->state);
721 clear_bit(ICE_GLOBR_REQ, pf->state);
722 wake_up(&pf->reset_wait_queue);
723 ice_reset_all_vfs(pf);
724 }
725
726 return;
727 }
728
729 /* No pending resets to finish processing. Check for new resets */
730 if (test_bit(ICE_PFR_REQ, pf->state)) {
731 reset_type = ICE_RESET_PFR;
732 if (pf->lag && pf->lag->bonded) {
733 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
734 reset_type = ICE_RESET_CORER;
735 }
736 }
737 if (test_bit(ICE_CORER_REQ, pf->state))
738 reset_type = ICE_RESET_CORER;
739 if (test_bit(ICE_GLOBR_REQ, pf->state))
740 reset_type = ICE_RESET_GLOBR;
741 /* If no valid reset type requested just return */
742 if (reset_type == ICE_RESET_INVAL)
743 return;
744
745 /* reset if not already down or busy */
746 if (!test_bit(ICE_DOWN, pf->state) &&
747 !test_bit(ICE_CFG_BUSY, pf->state)) {
748 ice_do_reset(pf, reset_type);
749 }
750 }
751
752 /**
753 * ice_print_topo_conflict - print topology conflict message
754 * @vsi: the VSI whose topology status is being checked
755 */
ice_print_topo_conflict(struct ice_vsi * vsi)756 static void ice_print_topo_conflict(struct ice_vsi *vsi)
757 {
758 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
759 case ICE_AQ_LINK_TOPO_CONFLICT:
760 case ICE_AQ_LINK_MEDIA_CONFLICT:
761 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
762 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
763 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
764 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
765 break;
766 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
767 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
768 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
769 else
770 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
771 break;
772 default:
773 break;
774 }
775 }
776
777 /**
778 * ice_print_link_msg - print link up or down message
779 * @vsi: the VSI whose link status is being queried
780 * @isup: boolean for if the link is now up or down
781 */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)782 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
783 {
784 struct ice_aqc_get_phy_caps_data *caps;
785 const char *an_advertised;
786 const char *fec_req;
787 const char *speed;
788 const char *fec;
789 const char *fc;
790 const char *an;
791 int status;
792
793 if (!vsi)
794 return;
795
796 if (vsi->current_isup == isup)
797 return;
798
799 vsi->current_isup = isup;
800
801 if (!isup) {
802 netdev_info(vsi->netdev, "NIC Link is Down\n");
803 return;
804 }
805
806 switch (vsi->port_info->phy.link_info.link_speed) {
807 case ICE_AQ_LINK_SPEED_200GB:
808 speed = "200 G";
809 break;
810 case ICE_AQ_LINK_SPEED_100GB:
811 speed = "100 G";
812 break;
813 case ICE_AQ_LINK_SPEED_50GB:
814 speed = "50 G";
815 break;
816 case ICE_AQ_LINK_SPEED_40GB:
817 speed = "40 G";
818 break;
819 case ICE_AQ_LINK_SPEED_25GB:
820 speed = "25 G";
821 break;
822 case ICE_AQ_LINK_SPEED_20GB:
823 speed = "20 G";
824 break;
825 case ICE_AQ_LINK_SPEED_10GB:
826 speed = "10 G";
827 break;
828 case ICE_AQ_LINK_SPEED_5GB:
829 speed = "5 G";
830 break;
831 case ICE_AQ_LINK_SPEED_2500MB:
832 speed = "2.5 G";
833 break;
834 case ICE_AQ_LINK_SPEED_1000MB:
835 speed = "1 G";
836 break;
837 case ICE_AQ_LINK_SPEED_100MB:
838 speed = "100 M";
839 break;
840 default:
841 speed = "Unknown ";
842 break;
843 }
844
845 switch (vsi->port_info->fc.current_mode) {
846 case ICE_FC_FULL:
847 fc = "Rx/Tx";
848 break;
849 case ICE_FC_TX_PAUSE:
850 fc = "Tx";
851 break;
852 case ICE_FC_RX_PAUSE:
853 fc = "Rx";
854 break;
855 case ICE_FC_NONE:
856 fc = "None";
857 break;
858 default:
859 fc = "Unknown";
860 break;
861 }
862
863 /* Get FEC mode based on negotiated link info */
864 switch (vsi->port_info->phy.link_info.fec_info) {
865 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
866 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
867 fec = "RS-FEC";
868 break;
869 case ICE_AQ_LINK_25G_KR_FEC_EN:
870 fec = "FC-FEC/BASE-R";
871 break;
872 default:
873 fec = "NONE";
874 break;
875 }
876
877 /* check if autoneg completed, might be false due to not supported */
878 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
879 an = "True";
880 else
881 an = "False";
882
883 /* Get FEC mode requested based on PHY caps last SW configuration */
884 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
885 if (!caps) {
886 fec_req = "Unknown";
887 an_advertised = "Unknown";
888 goto done;
889 }
890
891 status = ice_aq_get_phy_caps(vsi->port_info, false,
892 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
893 if (status)
894 netdev_info(vsi->netdev, "Get phy capability failed.\n");
895
896 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
897
898 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
899 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
900 fec_req = "RS-FEC";
901 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
902 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
903 fec_req = "FC-FEC/BASE-R";
904 else
905 fec_req = "NONE";
906
907 kfree(caps);
908
909 done:
910 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
911 speed, fec_req, fec, an_advertised, an, fc);
912 ice_print_topo_conflict(vsi);
913 }
914
915 /**
916 * ice_vsi_link_event - update the VSI's netdev
917 * @vsi: the VSI on which the link event occurred
918 * @link_up: whether or not the VSI needs to be set up or down
919 */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)920 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
921 {
922 if (!vsi)
923 return;
924
925 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
926 return;
927
928 if (vsi->type == ICE_VSI_PF) {
929 if (link_up == netif_carrier_ok(vsi->netdev))
930 return;
931
932 if (link_up) {
933 netif_carrier_on(vsi->netdev);
934 netif_tx_wake_all_queues(vsi->netdev);
935 } else {
936 netif_carrier_off(vsi->netdev);
937 netif_tx_stop_all_queues(vsi->netdev);
938 }
939 }
940 }
941
942 /**
943 * ice_set_dflt_mib - send a default config MIB to the FW
944 * @pf: private PF struct
945 *
946 * This function sends a default configuration MIB to the FW.
947 *
948 * If this function errors out at any point, the driver is still able to
949 * function. The main impact is that LFC may not operate as expected.
950 * Therefore an error state in this function should be treated with a DBG
951 * message and continue on with driver rebuild/reenable.
952 */
ice_set_dflt_mib(struct ice_pf * pf)953 static void ice_set_dflt_mib(struct ice_pf *pf)
954 {
955 struct device *dev = ice_pf_to_dev(pf);
956 u8 mib_type, *buf, *lldpmib = NULL;
957 u16 len, typelen, offset = 0;
958 struct ice_lldp_org_tlv *tlv;
959 struct ice_hw *hw = &pf->hw;
960 u32 ouisubtype;
961
962 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
963 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
964 if (!lldpmib) {
965 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
966 __func__);
967 return;
968 }
969
970 /* Add ETS CFG TLV */
971 tlv = (struct ice_lldp_org_tlv *)lldpmib;
972 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
973 ICE_IEEE_ETS_TLV_LEN);
974 tlv->typelen = htons(typelen);
975 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
976 ICE_IEEE_SUBTYPE_ETS_CFG);
977 tlv->ouisubtype = htonl(ouisubtype);
978
979 buf = tlv->tlvinfo;
980 buf[0] = 0;
981
982 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
983 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
984 * Octets 13 - 20 are TSA values - leave as zeros
985 */
986 buf[5] = 0x64;
987 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
988 offset += len + 2;
989 tlv = (struct ice_lldp_org_tlv *)
990 ((char *)tlv + sizeof(tlv->typelen) + len);
991
992 /* Add ETS REC TLV */
993 buf = tlv->tlvinfo;
994 tlv->typelen = htons(typelen);
995
996 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
997 ICE_IEEE_SUBTYPE_ETS_REC);
998 tlv->ouisubtype = htonl(ouisubtype);
999
1000 /* First octet of buf is reserved
1001 * Octets 1 - 4 map UP to TC - all UPs map to zero
1002 * Octets 5 - 12 are BW values - set TC 0 to 100%.
1003 * Octets 13 - 20 are TSA value - leave as zeros
1004 */
1005 buf[5] = 0x64;
1006 offset += len + 2;
1007 tlv = (struct ice_lldp_org_tlv *)
1008 ((char *)tlv + sizeof(tlv->typelen) + len);
1009
1010 /* Add PFC CFG TLV */
1011 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1012 ICE_IEEE_PFC_TLV_LEN);
1013 tlv->typelen = htons(typelen);
1014
1015 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1016 ICE_IEEE_SUBTYPE_PFC_CFG);
1017 tlv->ouisubtype = htonl(ouisubtype);
1018
1019 /* Octet 1 left as all zeros - PFC disabled */
1020 buf[0] = 0x08;
1021 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
1022 offset += len + 2;
1023
1024 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1025 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1026
1027 kfree(lldpmib);
1028 }
1029
1030 /**
1031 * ice_check_phy_fw_load - check if PHY FW load failed
1032 * @pf: pointer to PF struct
1033 * @link_cfg_err: bitmap from the link info structure
1034 *
1035 * check if external PHY FW load failed and print an error message if it did
1036 */
ice_check_phy_fw_load(struct ice_pf * pf,u8 link_cfg_err)1037 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1038 {
1039 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1040 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1041 return;
1042 }
1043
1044 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1045 return;
1046
1047 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1048 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1049 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1050 }
1051 }
1052
1053 /**
1054 * ice_check_module_power
1055 * @pf: pointer to PF struct
1056 * @link_cfg_err: bitmap from the link info structure
1057 *
1058 * check module power level returned by a previous call to aq_get_link_info
1059 * and print error messages if module power level is not supported
1060 */
ice_check_module_power(struct ice_pf * pf,u8 link_cfg_err)1061 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1062 {
1063 /* if module power level is supported, clear the flag */
1064 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1065 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1066 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1067 return;
1068 }
1069
1070 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1071 * above block didn't clear this bit, there's nothing to do
1072 */
1073 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1074 return;
1075
1076 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1077 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1078 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1079 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1080 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1081 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1082 }
1083 }
1084
1085 /**
1086 * ice_check_link_cfg_err - check if link configuration failed
1087 * @pf: pointer to the PF struct
1088 * @link_cfg_err: bitmap from the link info structure
1089 *
1090 * print if any link configuration failure happens due to the value in the
1091 * link_cfg_err parameter in the link info structure
1092 */
ice_check_link_cfg_err(struct ice_pf * pf,u8 link_cfg_err)1093 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1094 {
1095 ice_check_module_power(pf, link_cfg_err);
1096 ice_check_phy_fw_load(pf, link_cfg_err);
1097 }
1098
1099 /**
1100 * ice_link_event - process the link event
1101 * @pf: PF that the link event is associated with
1102 * @pi: port_info for the port that the link event is associated with
1103 * @link_up: true if the physical link is up and false if it is down
1104 * @link_speed: current link speed received from the link event
1105 *
1106 * Returns 0 on success and negative on failure
1107 */
1108 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)1109 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1110 u16 link_speed)
1111 {
1112 struct device *dev = ice_pf_to_dev(pf);
1113 struct ice_phy_info *phy_info;
1114 struct ice_vsi *vsi;
1115 u16 old_link_speed;
1116 bool old_link;
1117 int status;
1118
1119 phy_info = &pi->phy;
1120 phy_info->link_info_old = phy_info->link_info;
1121
1122 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1123 old_link_speed = phy_info->link_info_old.link_speed;
1124
1125 /* update the link info structures and re-enable link events,
1126 * don't bail on failure due to other book keeping needed
1127 */
1128 status = ice_update_link_info(pi);
1129 if (status)
1130 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1131 pi->lport, status,
1132 ice_aq_str(pi->hw->adminq.sq_last_status));
1133
1134 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1135
1136 /* Check if the link state is up after updating link info, and treat
1137 * this event as an UP event since the link is actually UP now.
1138 */
1139 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1140 link_up = true;
1141
1142 vsi = ice_get_main_vsi(pf);
1143 if (!vsi || !vsi->port_info)
1144 return -EINVAL;
1145
1146 /* turn off PHY if media was removed */
1147 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1148 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1149 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1150 ice_set_link(vsi, false);
1151 }
1152
1153 /* if the old link up/down and speed is the same as the new */
1154 if (link_up == old_link && link_speed == old_link_speed)
1155 return 0;
1156
1157 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1158
1159 if (ice_is_dcb_active(pf)) {
1160 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1161 ice_dcb_rebuild(pf);
1162 } else {
1163 if (link_up)
1164 ice_set_dflt_mib(pf);
1165 }
1166 ice_vsi_link_event(vsi, link_up);
1167 ice_print_link_msg(vsi, link_up);
1168
1169 ice_vc_notify_link_state(pf);
1170
1171 return 0;
1172 }
1173
1174 /**
1175 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1176 * @pf: board private structure
1177 */
ice_watchdog_subtask(struct ice_pf * pf)1178 static void ice_watchdog_subtask(struct ice_pf *pf)
1179 {
1180 int i;
1181
1182 /* if interface is down do nothing */
1183 if (test_bit(ICE_DOWN, pf->state) ||
1184 test_bit(ICE_CFG_BUSY, pf->state))
1185 return;
1186
1187 /* make sure we don't do these things too often */
1188 if (time_before(jiffies,
1189 pf->serv_tmr_prev + pf->serv_tmr_period))
1190 return;
1191
1192 pf->serv_tmr_prev = jiffies;
1193
1194 /* Update the stats for active netdevs so the network stack
1195 * can look at updated numbers whenever it cares to
1196 */
1197 ice_update_pf_stats(pf);
1198 ice_for_each_vsi(pf, i)
1199 if (pf->vsi[i] && pf->vsi[i]->netdev)
1200 ice_update_vsi_stats(pf->vsi[i]);
1201 }
1202
1203 /**
1204 * ice_init_link_events - enable/initialize link events
1205 * @pi: pointer to the port_info instance
1206 *
1207 * Returns -EIO on failure, 0 on success
1208 */
ice_init_link_events(struct ice_port_info * pi)1209 static int ice_init_link_events(struct ice_port_info *pi)
1210 {
1211 u16 mask;
1212
1213 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1214 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1215 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1216
1217 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1218 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1219 pi->lport);
1220 return -EIO;
1221 }
1222
1223 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1224 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1225 pi->lport);
1226 return -EIO;
1227 }
1228
1229 return 0;
1230 }
1231
1232 /**
1233 * ice_handle_link_event - handle link event via ARQ
1234 * @pf: PF that the link event is associated with
1235 * @event: event structure containing link status info
1236 */
1237 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1238 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1239 {
1240 struct ice_aqc_get_link_status_data *link_data;
1241 struct ice_port_info *port_info;
1242 int status;
1243
1244 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1245 port_info = pf->hw.port_info;
1246 if (!port_info)
1247 return -EINVAL;
1248
1249 status = ice_link_event(pf, port_info,
1250 !!(link_data->link_info & ICE_AQ_LINK_UP),
1251 le16_to_cpu(link_data->link_speed));
1252 if (status)
1253 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1254 status);
1255
1256 return status;
1257 }
1258
1259 /**
1260 * ice_get_fwlog_data - copy the FW log data from ARQ event
1261 * @pf: PF that the FW log event is associated with
1262 * @event: event structure containing FW log data
1263 */
1264 static void
ice_get_fwlog_data(struct ice_pf * pf,struct ice_rq_event_info * event)1265 ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
1266 {
1267 struct ice_fwlog_data *fwlog;
1268 struct ice_hw *hw = &pf->hw;
1269
1270 fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
1271
1272 memset(fwlog->data, 0, PAGE_SIZE);
1273 fwlog->data_size = le16_to_cpu(event->desc.datalen);
1274
1275 memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
1276 ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
1277
1278 if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
1279 /* the rings are full so bump the head to create room */
1280 ice_fwlog_ring_increment(&hw->fwlog_ring.head,
1281 hw->fwlog_ring.size);
1282 }
1283 }
1284
1285 /**
1286 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1287 * @pf: pointer to the PF private structure
1288 * @task: intermediate helper storage and identifier for waiting
1289 * @opcode: the opcode to wait for
1290 *
1291 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1292 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1293 *
1294 * Calls are separated to allow caller registering for event before sending
1295 * the command, which mitigates a race between registering and FW responding.
1296 *
1297 * To obtain only the descriptor contents, pass an task->event with null
1298 * msg_buf. If the complete data buffer is desired, allocate the
1299 * task->event.msg_buf with enough space ahead of time.
1300 */
ice_aq_prep_for_event(struct ice_pf * pf,struct ice_aq_task * task,u16 opcode)1301 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1302 u16 opcode)
1303 {
1304 INIT_HLIST_NODE(&task->entry);
1305 task->opcode = opcode;
1306 task->state = ICE_AQ_TASK_WAITING;
1307
1308 spin_lock_bh(&pf->aq_wait_lock);
1309 hlist_add_head(&task->entry, &pf->aq_wait_list);
1310 spin_unlock_bh(&pf->aq_wait_lock);
1311 }
1312
1313 /**
1314 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1315 * @pf: pointer to the PF private structure
1316 * @task: ptr prepared by ice_aq_prep_for_event()
1317 * @timeout: how long to wait, in jiffies
1318 *
1319 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1320 * current thread will be put to sleep until the specified event occurs or
1321 * until the given timeout is reached.
1322 *
1323 * Returns: zero on success, or a negative error code on failure.
1324 */
ice_aq_wait_for_event(struct ice_pf * pf,struct ice_aq_task * task,unsigned long timeout)1325 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1326 unsigned long timeout)
1327 {
1328 enum ice_aq_task_state *state = &task->state;
1329 struct device *dev = ice_pf_to_dev(pf);
1330 unsigned long start = jiffies;
1331 long ret;
1332 int err;
1333
1334 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1335 *state != ICE_AQ_TASK_WAITING,
1336 timeout);
1337 switch (*state) {
1338 case ICE_AQ_TASK_NOT_PREPARED:
1339 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1340 err = -EINVAL;
1341 break;
1342 case ICE_AQ_TASK_WAITING:
1343 err = ret < 0 ? ret : -ETIMEDOUT;
1344 break;
1345 case ICE_AQ_TASK_CANCELED:
1346 err = ret < 0 ? ret : -ECANCELED;
1347 break;
1348 case ICE_AQ_TASK_COMPLETE:
1349 err = ret < 0 ? ret : 0;
1350 break;
1351 default:
1352 WARN(1, "Unexpected AdminQ wait task state %u", *state);
1353 err = -EINVAL;
1354 break;
1355 }
1356
1357 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1358 jiffies_to_msecs(jiffies - start),
1359 jiffies_to_msecs(timeout),
1360 task->opcode);
1361
1362 spin_lock_bh(&pf->aq_wait_lock);
1363 hlist_del(&task->entry);
1364 spin_unlock_bh(&pf->aq_wait_lock);
1365
1366 return err;
1367 }
1368
1369 /**
1370 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1371 * @pf: pointer to the PF private structure
1372 * @opcode: the opcode of the event
1373 * @event: the event to check
1374 *
1375 * Loops over the current list of pending threads waiting for an AdminQ event.
1376 * For each matching task, copy the contents of the event into the task
1377 * structure and wake up the thread.
1378 *
1379 * If multiple threads wait for the same opcode, they will all be woken up.
1380 *
1381 * Note that event->msg_buf will only be duplicated if the event has a buffer
1382 * with enough space already allocated. Otherwise, only the descriptor and
1383 * message length will be copied.
1384 *
1385 * Returns: true if an event was found, false otherwise
1386 */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1387 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1388 struct ice_rq_event_info *event)
1389 {
1390 struct ice_rq_event_info *task_ev;
1391 struct ice_aq_task *task;
1392 bool found = false;
1393
1394 spin_lock_bh(&pf->aq_wait_lock);
1395 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1396 if (task->state != ICE_AQ_TASK_WAITING)
1397 continue;
1398 if (task->opcode != opcode)
1399 continue;
1400
1401 task_ev = &task->event;
1402 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1403 task_ev->msg_len = event->msg_len;
1404
1405 /* Only copy the data buffer if a destination was set */
1406 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1407 memcpy(task_ev->msg_buf, event->msg_buf,
1408 event->buf_len);
1409 task_ev->buf_len = event->buf_len;
1410 }
1411
1412 task->state = ICE_AQ_TASK_COMPLETE;
1413 found = true;
1414 }
1415 spin_unlock_bh(&pf->aq_wait_lock);
1416
1417 if (found)
1418 wake_up(&pf->aq_wait_queue);
1419 }
1420
1421 /**
1422 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1423 * @pf: the PF private structure
1424 *
1425 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1426 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1427 */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1428 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1429 {
1430 struct ice_aq_task *task;
1431
1432 spin_lock_bh(&pf->aq_wait_lock);
1433 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1434 task->state = ICE_AQ_TASK_CANCELED;
1435 spin_unlock_bh(&pf->aq_wait_lock);
1436
1437 wake_up(&pf->aq_wait_queue);
1438 }
1439
1440 #define ICE_MBX_OVERFLOW_WATERMARK 64
1441
1442 /**
1443 * __ice_clean_ctrlq - helper function to clean controlq rings
1444 * @pf: ptr to struct ice_pf
1445 * @q_type: specific Control queue type
1446 */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1447 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1448 {
1449 struct device *dev = ice_pf_to_dev(pf);
1450 struct ice_rq_event_info event;
1451 struct ice_hw *hw = &pf->hw;
1452 struct ice_ctl_q_info *cq;
1453 u16 pending, i = 0;
1454 const char *qtype;
1455 u32 oldval, val;
1456
1457 /* Do not clean control queue if/when PF reset fails */
1458 if (test_bit(ICE_RESET_FAILED, pf->state))
1459 return 0;
1460
1461 switch (q_type) {
1462 case ICE_CTL_Q_ADMIN:
1463 cq = &hw->adminq;
1464 qtype = "Admin";
1465 break;
1466 case ICE_CTL_Q_SB:
1467 cq = &hw->sbq;
1468 qtype = "Sideband";
1469 break;
1470 case ICE_CTL_Q_MAILBOX:
1471 cq = &hw->mailboxq;
1472 qtype = "Mailbox";
1473 /* we are going to try to detect a malicious VF, so set the
1474 * state to begin detection
1475 */
1476 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1477 break;
1478 default:
1479 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1480 return 0;
1481 }
1482
1483 /* check for error indications - PF_xx_AxQLEN register layout for
1484 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1485 */
1486 val = rd32(hw, cq->rq.len);
1487 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1488 PF_FW_ARQLEN_ARQCRIT_M)) {
1489 oldval = val;
1490 if (val & PF_FW_ARQLEN_ARQVFE_M)
1491 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1492 qtype);
1493 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1494 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1495 qtype);
1496 }
1497 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1498 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1499 qtype);
1500 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1501 PF_FW_ARQLEN_ARQCRIT_M);
1502 if (oldval != val)
1503 wr32(hw, cq->rq.len, val);
1504 }
1505
1506 val = rd32(hw, cq->sq.len);
1507 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1508 PF_FW_ATQLEN_ATQCRIT_M)) {
1509 oldval = val;
1510 if (val & PF_FW_ATQLEN_ATQVFE_M)
1511 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1512 qtype);
1513 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1514 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1515 qtype);
1516 }
1517 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1518 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1519 qtype);
1520 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1521 PF_FW_ATQLEN_ATQCRIT_M);
1522 if (oldval != val)
1523 wr32(hw, cq->sq.len, val);
1524 }
1525
1526 event.buf_len = cq->rq_buf_size;
1527 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1528 if (!event.msg_buf)
1529 return 0;
1530
1531 do {
1532 struct ice_mbx_data data = {};
1533 u16 opcode;
1534 int ret;
1535
1536 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1537 if (ret == -EALREADY)
1538 break;
1539 if (ret) {
1540 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1541 ret);
1542 break;
1543 }
1544
1545 opcode = le16_to_cpu(event.desc.opcode);
1546
1547 /* Notify any thread that might be waiting for this event */
1548 ice_aq_check_events(pf, opcode, &event);
1549
1550 switch (opcode) {
1551 case ice_aqc_opc_get_link_status:
1552 if (ice_handle_link_event(pf, &event))
1553 dev_err(dev, "Could not handle link event\n");
1554 break;
1555 case ice_aqc_opc_event_lan_overflow:
1556 ice_vf_lan_overflow_event(pf, &event);
1557 break;
1558 case ice_mbx_opc_send_msg_to_pf:
1559 data.num_msg_proc = i;
1560 data.num_pending_arq = pending;
1561 data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1562 data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1563
1564 ice_vc_process_vf_msg(pf, &event, &data);
1565 break;
1566 case ice_aqc_opc_fw_logs_event:
1567 ice_get_fwlog_data(pf, &event);
1568 break;
1569 case ice_aqc_opc_lldp_set_mib_change:
1570 ice_dcb_process_lldp_set_mib_change(pf, &event);
1571 break;
1572 default:
1573 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1574 qtype, opcode);
1575 break;
1576 }
1577 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1578
1579 kfree(event.msg_buf);
1580
1581 return pending && (i == ICE_DFLT_IRQ_WORK);
1582 }
1583
1584 /**
1585 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1586 * @hw: pointer to hardware info
1587 * @cq: control queue information
1588 *
1589 * returns true if there are pending messages in a queue, false if there aren't
1590 */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1591 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1592 {
1593 u16 ntu;
1594
1595 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1596 return cq->rq.next_to_clean != ntu;
1597 }
1598
1599 /**
1600 * ice_clean_adminq_subtask - clean the AdminQ rings
1601 * @pf: board private structure
1602 */
ice_clean_adminq_subtask(struct ice_pf * pf)1603 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1604 {
1605 struct ice_hw *hw = &pf->hw;
1606
1607 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1608 return;
1609
1610 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1611 return;
1612
1613 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1614
1615 /* There might be a situation where new messages arrive to a control
1616 * queue between processing the last message and clearing the
1617 * EVENT_PENDING bit. So before exiting, check queue head again (using
1618 * ice_ctrlq_pending) and process new messages if any.
1619 */
1620 if (ice_ctrlq_pending(hw, &hw->adminq))
1621 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1622
1623 ice_flush(hw);
1624 }
1625
1626 /**
1627 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1628 * @pf: board private structure
1629 */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1630 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1631 {
1632 struct ice_hw *hw = &pf->hw;
1633
1634 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1635 return;
1636
1637 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1638 return;
1639
1640 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1641
1642 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1643 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1644
1645 ice_flush(hw);
1646 }
1647
1648 /**
1649 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1650 * @pf: board private structure
1651 */
ice_clean_sbq_subtask(struct ice_pf * pf)1652 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1653 {
1654 struct ice_hw *hw = &pf->hw;
1655
1656 /* if mac_type is not generic, sideband is not supported
1657 * and there's nothing to do here
1658 */
1659 if (!ice_is_generic_mac(hw)) {
1660 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1661 return;
1662 }
1663
1664 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1665 return;
1666
1667 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1668 return;
1669
1670 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1671
1672 if (ice_ctrlq_pending(hw, &hw->sbq))
1673 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1674
1675 ice_flush(hw);
1676 }
1677
1678 /**
1679 * ice_service_task_schedule - schedule the service task to wake up
1680 * @pf: board private structure
1681 *
1682 * If not already scheduled, this puts the task into the work queue.
1683 */
ice_service_task_schedule(struct ice_pf * pf)1684 void ice_service_task_schedule(struct ice_pf *pf)
1685 {
1686 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1687 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1688 !test_bit(ICE_NEEDS_RESTART, pf->state))
1689 queue_work(ice_wq, &pf->serv_task);
1690 }
1691
1692 /**
1693 * ice_service_task_complete - finish up the service task
1694 * @pf: board private structure
1695 */
ice_service_task_complete(struct ice_pf * pf)1696 static void ice_service_task_complete(struct ice_pf *pf)
1697 {
1698 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1699
1700 /* force memory (pf->state) to sync before next service task */
1701 smp_mb__before_atomic();
1702 clear_bit(ICE_SERVICE_SCHED, pf->state);
1703 }
1704
1705 /**
1706 * ice_service_task_stop - stop service task and cancel works
1707 * @pf: board private structure
1708 *
1709 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1710 * 1 otherwise.
1711 */
ice_service_task_stop(struct ice_pf * pf)1712 static int ice_service_task_stop(struct ice_pf *pf)
1713 {
1714 int ret;
1715
1716 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1717
1718 if (pf->serv_tmr.function)
1719 del_timer_sync(&pf->serv_tmr);
1720 if (pf->serv_task.func)
1721 cancel_work_sync(&pf->serv_task);
1722
1723 clear_bit(ICE_SERVICE_SCHED, pf->state);
1724 return ret;
1725 }
1726
1727 /**
1728 * ice_service_task_restart - restart service task and schedule works
1729 * @pf: board private structure
1730 *
1731 * This function is needed for suspend and resume works (e.g WoL scenario)
1732 */
ice_service_task_restart(struct ice_pf * pf)1733 static void ice_service_task_restart(struct ice_pf *pf)
1734 {
1735 clear_bit(ICE_SERVICE_DIS, pf->state);
1736 ice_service_task_schedule(pf);
1737 }
1738
1739 /**
1740 * ice_service_timer - timer callback to schedule service task
1741 * @t: pointer to timer_list
1742 */
ice_service_timer(struct timer_list * t)1743 static void ice_service_timer(struct timer_list *t)
1744 {
1745 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1746
1747 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1748 ice_service_task_schedule(pf);
1749 }
1750
1751 /**
1752 * ice_mdd_maybe_reset_vf - reset VF after MDD event
1753 * @pf: pointer to the PF structure
1754 * @vf: pointer to the VF structure
1755 * @reset_vf_tx: whether Tx MDD has occurred
1756 * @reset_vf_rx: whether Rx MDD has occurred
1757 *
1758 * Since the queue can get stuck on VF MDD events, the PF can be configured to
1759 * automatically reset the VF by enabling the private ethtool flag
1760 * mdd-auto-reset-vf.
1761 */
ice_mdd_maybe_reset_vf(struct ice_pf * pf,struct ice_vf * vf,bool reset_vf_tx,bool reset_vf_rx)1762 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
1763 bool reset_vf_tx, bool reset_vf_rx)
1764 {
1765 struct device *dev = ice_pf_to_dev(pf);
1766
1767 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
1768 return;
1769
1770 /* VF MDD event counters will be cleared by reset, so print the event
1771 * prior to reset.
1772 */
1773 if (reset_vf_tx)
1774 ice_print_vf_tx_mdd_event(vf);
1775
1776 if (reset_vf_rx)
1777 ice_print_vf_rx_mdd_event(vf);
1778
1779 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
1780 pf->hw.pf_id, vf->vf_id);
1781 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1782 }
1783
1784 /**
1785 * ice_handle_mdd_event - handle malicious driver detect event
1786 * @pf: pointer to the PF structure
1787 *
1788 * Called from service task. OICR interrupt handler indicates MDD event.
1789 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1790 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1791 * disable the queue, the PF can be configured to reset the VF using ethtool
1792 * private flag mdd-auto-reset-vf.
1793 */
ice_handle_mdd_event(struct ice_pf * pf)1794 static void ice_handle_mdd_event(struct ice_pf *pf)
1795 {
1796 struct device *dev = ice_pf_to_dev(pf);
1797 struct ice_hw *hw = &pf->hw;
1798 struct ice_vf *vf;
1799 unsigned int bkt;
1800 u32 reg;
1801
1802 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1803 /* Since the VF MDD event logging is rate limited, check if
1804 * there are pending MDD events.
1805 */
1806 ice_print_vfs_mdd_events(pf);
1807 return;
1808 }
1809
1810 /* find what triggered an MDD event */
1811 reg = rd32(hw, GL_MDET_TX_PQM);
1812 if (reg & GL_MDET_TX_PQM_VALID_M) {
1813 u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
1814 u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
1815 u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
1816 u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
1817
1818 if (netif_msg_tx_err(pf))
1819 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1820 event, queue, pf_num, vf_num);
1821 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1822 }
1823
1824 reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1825 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1826 u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
1827 u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
1828 u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
1829 u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
1830
1831 if (netif_msg_tx_err(pf))
1832 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1833 event, queue, pf_num, vf_num);
1834 wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1835 }
1836
1837 reg = rd32(hw, GL_MDET_RX);
1838 if (reg & GL_MDET_RX_VALID_M) {
1839 u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
1840 u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
1841 u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
1842 u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
1843
1844 if (netif_msg_rx_err(pf))
1845 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1846 event, queue, pf_num, vf_num);
1847 wr32(hw, GL_MDET_RX, 0xffffffff);
1848 }
1849
1850 /* check to see if this PF caused an MDD event */
1851 reg = rd32(hw, PF_MDET_TX_PQM);
1852 if (reg & PF_MDET_TX_PQM_VALID_M) {
1853 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1854 if (netif_msg_tx_err(pf))
1855 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1856 }
1857
1858 reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1859 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1860 wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1861 if (netif_msg_tx_err(pf))
1862 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1863 }
1864
1865 reg = rd32(hw, PF_MDET_RX);
1866 if (reg & PF_MDET_RX_VALID_M) {
1867 wr32(hw, PF_MDET_RX, 0xFFFF);
1868 if (netif_msg_rx_err(pf))
1869 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1870 }
1871
1872 /* Check to see if one of the VFs caused an MDD event, and then
1873 * increment counters and set print pending
1874 */
1875 mutex_lock(&pf->vfs.table_lock);
1876 ice_for_each_vf(pf, bkt, vf) {
1877 bool reset_vf_tx = false, reset_vf_rx = false;
1878
1879 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1880 if (reg & VP_MDET_TX_PQM_VALID_M) {
1881 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1882 vf->mdd_tx_events.count++;
1883 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1884 if (netif_msg_tx_err(pf))
1885 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1886 vf->vf_id);
1887
1888 reset_vf_tx = true;
1889 }
1890
1891 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1892 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1893 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1894 vf->mdd_tx_events.count++;
1895 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1896 if (netif_msg_tx_err(pf))
1897 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1898 vf->vf_id);
1899
1900 reset_vf_tx = true;
1901 }
1902
1903 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1904 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1905 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1906 vf->mdd_tx_events.count++;
1907 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1908 if (netif_msg_tx_err(pf))
1909 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1910 vf->vf_id);
1911
1912 reset_vf_tx = true;
1913 }
1914
1915 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1916 if (reg & VP_MDET_RX_VALID_M) {
1917 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1918 vf->mdd_rx_events.count++;
1919 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1920 if (netif_msg_rx_err(pf))
1921 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1922 vf->vf_id);
1923
1924 reset_vf_rx = true;
1925 }
1926
1927 if (reset_vf_tx || reset_vf_rx)
1928 ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
1929 reset_vf_rx);
1930 }
1931 mutex_unlock(&pf->vfs.table_lock);
1932
1933 ice_print_vfs_mdd_events(pf);
1934 }
1935
1936 /**
1937 * ice_force_phys_link_state - Force the physical link state
1938 * @vsi: VSI to force the physical link state to up/down
1939 * @link_up: true/false indicates to set the physical link to up/down
1940 *
1941 * Force the physical link state by getting the current PHY capabilities from
1942 * hardware and setting the PHY config based on the determined capabilities. If
1943 * link changes a link event will be triggered because both the Enable Automatic
1944 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1945 *
1946 * Returns 0 on success, negative on failure
1947 */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1948 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1949 {
1950 struct ice_aqc_get_phy_caps_data *pcaps;
1951 struct ice_aqc_set_phy_cfg_data *cfg;
1952 struct ice_port_info *pi;
1953 struct device *dev;
1954 int retcode;
1955
1956 if (!vsi || !vsi->port_info || !vsi->back)
1957 return -EINVAL;
1958 if (vsi->type != ICE_VSI_PF)
1959 return 0;
1960
1961 dev = ice_pf_to_dev(vsi->back);
1962
1963 pi = vsi->port_info;
1964
1965 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1966 if (!pcaps)
1967 return -ENOMEM;
1968
1969 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1970 NULL);
1971 if (retcode) {
1972 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1973 vsi->vsi_num, retcode);
1974 retcode = -EIO;
1975 goto out;
1976 }
1977
1978 /* No change in link */
1979 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1980 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1981 goto out;
1982
1983 /* Use the current user PHY configuration. The current user PHY
1984 * configuration is initialized during probe from PHY capabilities
1985 * software mode, and updated on set PHY configuration.
1986 */
1987 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1988 if (!cfg) {
1989 retcode = -ENOMEM;
1990 goto out;
1991 }
1992
1993 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1994 if (link_up)
1995 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1996 else
1997 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1998
1999 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
2000 if (retcode) {
2001 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2002 vsi->vsi_num, retcode);
2003 retcode = -EIO;
2004 }
2005
2006 kfree(cfg);
2007 out:
2008 kfree(pcaps);
2009 return retcode;
2010 }
2011
2012 /**
2013 * ice_init_nvm_phy_type - Initialize the NVM PHY type
2014 * @pi: port info structure
2015 *
2016 * Initialize nvm_phy_type_[low|high] for link lenient mode support
2017 */
ice_init_nvm_phy_type(struct ice_port_info * pi)2018 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
2019 {
2020 struct ice_aqc_get_phy_caps_data *pcaps;
2021 struct ice_pf *pf = pi->hw->back;
2022 int err;
2023
2024 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2025 if (!pcaps)
2026 return -ENOMEM;
2027
2028 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
2029 pcaps, NULL);
2030
2031 if (err) {
2032 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2033 goto out;
2034 }
2035
2036 pf->nvm_phy_type_hi = pcaps->phy_type_high;
2037 pf->nvm_phy_type_lo = pcaps->phy_type_low;
2038
2039 out:
2040 kfree(pcaps);
2041 return err;
2042 }
2043
2044 /**
2045 * ice_init_link_dflt_override - Initialize link default override
2046 * @pi: port info structure
2047 *
2048 * Initialize link default override and PHY total port shutdown during probe
2049 */
ice_init_link_dflt_override(struct ice_port_info * pi)2050 static void ice_init_link_dflt_override(struct ice_port_info *pi)
2051 {
2052 struct ice_link_default_override_tlv *ldo;
2053 struct ice_pf *pf = pi->hw->back;
2054
2055 ldo = &pf->link_dflt_override;
2056 if (ice_get_link_default_override(ldo, pi))
2057 return;
2058
2059 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2060 return;
2061
2062 /* Enable Total Port Shutdown (override/replace link-down-on-close
2063 * ethtool private flag) for ports with Port Disable bit set.
2064 */
2065 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2066 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2067 }
2068
2069 /**
2070 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2071 * @pi: port info structure
2072 *
2073 * If default override is enabled, initialize the user PHY cfg speed and FEC
2074 * settings using the default override mask from the NVM.
2075 *
2076 * The PHY should only be configured with the default override settings the
2077 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2078 * is used to indicate that the user PHY cfg default override is initialized
2079 * and the PHY has not been configured with the default override settings. The
2080 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2081 * configured.
2082 *
2083 * This function should be called only if the FW doesn't support default
2084 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2085 */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)2086 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2087 {
2088 struct ice_link_default_override_tlv *ldo;
2089 struct ice_aqc_set_phy_cfg_data *cfg;
2090 struct ice_phy_info *phy = &pi->phy;
2091 struct ice_pf *pf = pi->hw->back;
2092
2093 ldo = &pf->link_dflt_override;
2094
2095 /* If link default override is enabled, use to mask NVM PHY capabilities
2096 * for speed and FEC default configuration.
2097 */
2098 cfg = &phy->curr_user_phy_cfg;
2099
2100 if (ldo->phy_type_low || ldo->phy_type_high) {
2101 cfg->phy_type_low = pf->nvm_phy_type_lo &
2102 cpu_to_le64(ldo->phy_type_low);
2103 cfg->phy_type_high = pf->nvm_phy_type_hi &
2104 cpu_to_le64(ldo->phy_type_high);
2105 }
2106 cfg->link_fec_opt = ldo->fec_options;
2107 phy->curr_user_fec_req = ICE_FEC_AUTO;
2108
2109 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2110 }
2111
2112 /**
2113 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2114 * @pi: port info structure
2115 *
2116 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2117 * mode to default. The PHY defaults are from get PHY capabilities topology
2118 * with media so call when media is first available. An error is returned if
2119 * called when media is not available. The PHY initialization completed state is
2120 * set here.
2121 *
2122 * These configurations are used when setting PHY
2123 * configuration. The user PHY configuration is updated on set PHY
2124 * configuration. Returns 0 on success, negative on failure
2125 */
ice_init_phy_user_cfg(struct ice_port_info * pi)2126 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2127 {
2128 struct ice_aqc_get_phy_caps_data *pcaps;
2129 struct ice_phy_info *phy = &pi->phy;
2130 struct ice_pf *pf = pi->hw->back;
2131 int err;
2132
2133 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2134 return -EIO;
2135
2136 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2137 if (!pcaps)
2138 return -ENOMEM;
2139
2140 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2141 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2142 pcaps, NULL);
2143 else
2144 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2145 pcaps, NULL);
2146 if (err) {
2147 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2148 goto err_out;
2149 }
2150
2151 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2152
2153 /* check if lenient mode is supported and enabled */
2154 if (ice_fw_supports_link_override(pi->hw) &&
2155 !(pcaps->module_compliance_enforcement &
2156 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2157 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2158
2159 /* if the FW supports default PHY configuration mode, then the driver
2160 * does not have to apply link override settings. If not,
2161 * initialize user PHY configuration with link override values
2162 */
2163 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2164 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2165 ice_init_phy_cfg_dflt_override(pi);
2166 goto out;
2167 }
2168 }
2169
2170 /* if link default override is not enabled, set user flow control and
2171 * FEC settings based on what get_phy_caps returned
2172 */
2173 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2174 pcaps->link_fec_options);
2175 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2176
2177 out:
2178 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2179 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2180 err_out:
2181 kfree(pcaps);
2182 return err;
2183 }
2184
2185 /**
2186 * ice_configure_phy - configure PHY
2187 * @vsi: VSI of PHY
2188 *
2189 * Set the PHY configuration. If the current PHY configuration is the same as
2190 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2191 * configure the based get PHY capabilities for topology with media.
2192 */
ice_configure_phy(struct ice_vsi * vsi)2193 static int ice_configure_phy(struct ice_vsi *vsi)
2194 {
2195 struct device *dev = ice_pf_to_dev(vsi->back);
2196 struct ice_port_info *pi = vsi->port_info;
2197 struct ice_aqc_get_phy_caps_data *pcaps;
2198 struct ice_aqc_set_phy_cfg_data *cfg;
2199 struct ice_phy_info *phy = &pi->phy;
2200 struct ice_pf *pf = vsi->back;
2201 int err;
2202
2203 /* Ensure we have media as we cannot configure a medialess port */
2204 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2205 return -ENOMEDIUM;
2206
2207 ice_print_topo_conflict(vsi);
2208
2209 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2210 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2211 return -EPERM;
2212
2213 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2214 return ice_force_phys_link_state(vsi, true);
2215
2216 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2217 if (!pcaps)
2218 return -ENOMEM;
2219
2220 /* Get current PHY config */
2221 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2222 NULL);
2223 if (err) {
2224 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2225 vsi->vsi_num, err);
2226 goto done;
2227 }
2228
2229 /* If PHY enable link is configured and configuration has not changed,
2230 * there's nothing to do
2231 */
2232 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2233 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2234 goto done;
2235
2236 /* Use PHY topology as baseline for configuration */
2237 memset(pcaps, 0, sizeof(*pcaps));
2238 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2239 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2240 pcaps, NULL);
2241 else
2242 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2243 pcaps, NULL);
2244 if (err) {
2245 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2246 vsi->vsi_num, err);
2247 goto done;
2248 }
2249
2250 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2251 if (!cfg) {
2252 err = -ENOMEM;
2253 goto done;
2254 }
2255
2256 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2257
2258 /* Speed - If default override pending, use curr_user_phy_cfg set in
2259 * ice_init_phy_user_cfg_ldo.
2260 */
2261 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2262 vsi->back->state)) {
2263 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2264 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2265 } else {
2266 u64 phy_low = 0, phy_high = 0;
2267
2268 ice_update_phy_type(&phy_low, &phy_high,
2269 pi->phy.curr_user_speed_req);
2270 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2271 cfg->phy_type_high = pcaps->phy_type_high &
2272 cpu_to_le64(phy_high);
2273 }
2274
2275 /* Can't provide what was requested; use PHY capabilities */
2276 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2277 cfg->phy_type_low = pcaps->phy_type_low;
2278 cfg->phy_type_high = pcaps->phy_type_high;
2279 }
2280
2281 /* FEC */
2282 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2283
2284 /* Can't provide what was requested; use PHY capabilities */
2285 if (cfg->link_fec_opt !=
2286 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2287 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2288 cfg->link_fec_opt = pcaps->link_fec_options;
2289 }
2290
2291 /* Flow Control - always supported; no need to check against
2292 * capabilities
2293 */
2294 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2295
2296 /* Enable link and link update */
2297 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2298
2299 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2300 if (err)
2301 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2302 vsi->vsi_num, err);
2303
2304 kfree(cfg);
2305 done:
2306 kfree(pcaps);
2307 return err;
2308 }
2309
2310 /**
2311 * ice_check_media_subtask - Check for media
2312 * @pf: pointer to PF struct
2313 *
2314 * If media is available, then initialize PHY user configuration if it is not
2315 * been, and configure the PHY if the interface is up.
2316 */
ice_check_media_subtask(struct ice_pf * pf)2317 static void ice_check_media_subtask(struct ice_pf *pf)
2318 {
2319 struct ice_port_info *pi;
2320 struct ice_vsi *vsi;
2321 int err;
2322
2323 /* No need to check for media if it's already present */
2324 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2325 return;
2326
2327 vsi = ice_get_main_vsi(pf);
2328 if (!vsi)
2329 return;
2330
2331 /* Refresh link info and check if media is present */
2332 pi = vsi->port_info;
2333 err = ice_update_link_info(pi);
2334 if (err)
2335 return;
2336
2337 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2338
2339 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2340 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2341 ice_init_phy_user_cfg(pi);
2342
2343 /* PHY settings are reset on media insertion, reconfigure
2344 * PHY to preserve settings.
2345 */
2346 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2347 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2348 return;
2349
2350 err = ice_configure_phy(vsi);
2351 if (!err)
2352 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2353
2354 /* A Link Status Event will be generated; the event handler
2355 * will complete bringing the interface up
2356 */
2357 }
2358 }
2359
2360 /**
2361 * ice_service_task - manage and run subtasks
2362 * @work: pointer to work_struct contained by the PF struct
2363 */
ice_service_task(struct work_struct * work)2364 static void ice_service_task(struct work_struct *work)
2365 {
2366 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2367 unsigned long start_time = jiffies;
2368
2369 /* subtasks */
2370
2371 /* process reset requests first */
2372 ice_reset_subtask(pf);
2373
2374 /* bail if a reset/recovery cycle is pending or rebuild failed */
2375 if (ice_is_reset_in_progress(pf->state) ||
2376 test_bit(ICE_SUSPENDED, pf->state) ||
2377 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2378 ice_service_task_complete(pf);
2379 return;
2380 }
2381
2382 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2383 struct iidc_event *event;
2384
2385 event = kzalloc(sizeof(*event), GFP_KERNEL);
2386 if (event) {
2387 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2388 /* report the entire OICR value to AUX driver */
2389 swap(event->reg, pf->oicr_err_reg);
2390 ice_send_event_to_aux(pf, event);
2391 kfree(event);
2392 }
2393 }
2394
2395 /* unplug aux dev per request, if an unplug request came in
2396 * while processing a plug request, this will handle it
2397 */
2398 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2399 ice_unplug_aux_dev(pf);
2400
2401 /* Plug aux device per request */
2402 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2403 ice_plug_aux_dev(pf);
2404
2405 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2406 struct iidc_event *event;
2407
2408 event = kzalloc(sizeof(*event), GFP_KERNEL);
2409 if (event) {
2410 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2411 ice_send_event_to_aux(pf, event);
2412 kfree(event);
2413 }
2414 }
2415
2416 ice_clean_adminq_subtask(pf);
2417 ice_check_media_subtask(pf);
2418 ice_check_for_hang_subtask(pf);
2419 ice_sync_fltr_subtask(pf);
2420 ice_handle_mdd_event(pf);
2421 ice_watchdog_subtask(pf);
2422
2423 if (ice_is_safe_mode(pf)) {
2424 ice_service_task_complete(pf);
2425 return;
2426 }
2427
2428 ice_process_vflr_event(pf);
2429 ice_clean_mailboxq_subtask(pf);
2430 ice_clean_sbq_subtask(pf);
2431 ice_sync_arfs_fltrs(pf);
2432 ice_flush_fdir_ctx(pf);
2433
2434 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2435 ice_service_task_complete(pf);
2436
2437 /* If the tasks have taken longer than one service timer period
2438 * or there is more work to be done, reset the service timer to
2439 * schedule the service task now.
2440 */
2441 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2442 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2443 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2444 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2445 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2446 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2447 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2448 mod_timer(&pf->serv_tmr, jiffies);
2449 }
2450
2451 /**
2452 * ice_set_ctrlq_len - helper function to set controlq length
2453 * @hw: pointer to the HW instance
2454 */
ice_set_ctrlq_len(struct ice_hw * hw)2455 static void ice_set_ctrlq_len(struct ice_hw *hw)
2456 {
2457 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2458 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2459 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2460 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2461 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2462 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2463 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2464 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2465 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2466 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2467 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2468 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2469 }
2470
2471 /**
2472 * ice_schedule_reset - schedule a reset
2473 * @pf: board private structure
2474 * @reset: reset being requested
2475 */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2476 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2477 {
2478 struct device *dev = ice_pf_to_dev(pf);
2479
2480 /* bail out if earlier reset has failed */
2481 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2482 dev_dbg(dev, "earlier reset has failed\n");
2483 return -EIO;
2484 }
2485 /* bail if reset/recovery already in progress */
2486 if (ice_is_reset_in_progress(pf->state)) {
2487 dev_dbg(dev, "Reset already in progress\n");
2488 return -EBUSY;
2489 }
2490
2491 switch (reset) {
2492 case ICE_RESET_PFR:
2493 set_bit(ICE_PFR_REQ, pf->state);
2494 break;
2495 case ICE_RESET_CORER:
2496 set_bit(ICE_CORER_REQ, pf->state);
2497 break;
2498 case ICE_RESET_GLOBR:
2499 set_bit(ICE_GLOBR_REQ, pf->state);
2500 break;
2501 default:
2502 return -EINVAL;
2503 }
2504
2505 ice_service_task_schedule(pf);
2506 return 0;
2507 }
2508
2509 /**
2510 * ice_irq_affinity_notify - Callback for affinity changes
2511 * @notify: context as to what irq was changed
2512 * @mask: the new affinity mask
2513 *
2514 * This is a callback function used by the irq_set_affinity_notifier function
2515 * so that we may register to receive changes to the irq affinity masks.
2516 */
2517 static void
ice_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2518 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2519 const cpumask_t *mask)
2520 {
2521 struct ice_q_vector *q_vector =
2522 container_of(notify, struct ice_q_vector, affinity_notify);
2523
2524 cpumask_copy(&q_vector->affinity_mask, mask);
2525 }
2526
2527 /**
2528 * ice_irq_affinity_release - Callback for affinity notifier release
2529 * @ref: internal core kernel usage
2530 *
2531 * This is a callback function used by the irq_set_affinity_notifier function
2532 * to inform the current notification subscriber that they will no longer
2533 * receive notifications.
2534 */
ice_irq_affinity_release(struct kref __always_unused * ref)2535 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2536
2537 /**
2538 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2539 * @vsi: the VSI being configured
2540 */
ice_vsi_ena_irq(struct ice_vsi * vsi)2541 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2542 {
2543 struct ice_hw *hw = &vsi->back->hw;
2544 int i;
2545
2546 ice_for_each_q_vector(vsi, i)
2547 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2548
2549 ice_flush(hw);
2550 return 0;
2551 }
2552
2553 /**
2554 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2555 * @vsi: the VSI being configured
2556 * @basename: name for the vector
2557 */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2558 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2559 {
2560 int q_vectors = vsi->num_q_vectors;
2561 struct ice_pf *pf = vsi->back;
2562 struct device *dev;
2563 int rx_int_idx = 0;
2564 int tx_int_idx = 0;
2565 int vector, err;
2566 int irq_num;
2567
2568 dev = ice_pf_to_dev(pf);
2569 for (vector = 0; vector < q_vectors; vector++) {
2570 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2571
2572 irq_num = q_vector->irq.virq;
2573
2574 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2575 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2576 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2577 tx_int_idx++;
2578 } else if (q_vector->rx.rx_ring) {
2579 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2580 "%s-%s-%d", basename, "rx", rx_int_idx++);
2581 } else if (q_vector->tx.tx_ring) {
2582 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2583 "%s-%s-%d", basename, "tx", tx_int_idx++);
2584 } else {
2585 /* skip this unused q_vector */
2586 continue;
2587 }
2588 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2589 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2590 IRQF_SHARED, q_vector->name,
2591 q_vector);
2592 else
2593 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2594 0, q_vector->name, q_vector);
2595 if (err) {
2596 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2597 err);
2598 goto free_q_irqs;
2599 }
2600
2601 /* register for affinity change notifications */
2602 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2603 struct irq_affinity_notify *affinity_notify;
2604
2605 affinity_notify = &q_vector->affinity_notify;
2606 affinity_notify->notify = ice_irq_affinity_notify;
2607 affinity_notify->release = ice_irq_affinity_release;
2608 irq_set_affinity_notifier(irq_num, affinity_notify);
2609 }
2610
2611 /* assign the mask for this irq */
2612 irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
2613 }
2614
2615 err = ice_set_cpu_rx_rmap(vsi);
2616 if (err) {
2617 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2618 vsi->vsi_num, ERR_PTR(err));
2619 goto free_q_irqs;
2620 }
2621
2622 vsi->irqs_ready = true;
2623 return 0;
2624
2625 free_q_irqs:
2626 while (vector--) {
2627 irq_num = vsi->q_vectors[vector]->irq.virq;
2628 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2629 irq_set_affinity_notifier(irq_num, NULL);
2630 irq_update_affinity_hint(irq_num, NULL);
2631 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2632 }
2633 return err;
2634 }
2635
2636 /**
2637 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2638 * @vsi: VSI to setup Tx rings used by XDP
2639 *
2640 * Return 0 on success and negative value on error
2641 */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2642 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2643 {
2644 struct device *dev = ice_pf_to_dev(vsi->back);
2645 struct ice_tx_desc *tx_desc;
2646 int i, j;
2647
2648 ice_for_each_xdp_txq(vsi, i) {
2649 u16 xdp_q_idx = vsi->alloc_txq + i;
2650 struct ice_ring_stats *ring_stats;
2651 struct ice_tx_ring *xdp_ring;
2652
2653 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2654 if (!xdp_ring)
2655 goto free_xdp_rings;
2656
2657 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2658 if (!ring_stats) {
2659 ice_free_tx_ring(xdp_ring);
2660 goto free_xdp_rings;
2661 }
2662
2663 xdp_ring->ring_stats = ring_stats;
2664 xdp_ring->q_index = xdp_q_idx;
2665 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2666 xdp_ring->vsi = vsi;
2667 xdp_ring->netdev = NULL;
2668 xdp_ring->dev = dev;
2669 xdp_ring->count = vsi->num_tx_desc;
2670 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2671 if (ice_setup_tx_ring(xdp_ring))
2672 goto free_xdp_rings;
2673 ice_set_ring_xdp(xdp_ring);
2674 spin_lock_init(&xdp_ring->tx_lock);
2675 for (j = 0; j < xdp_ring->count; j++) {
2676 tx_desc = ICE_TX_DESC(xdp_ring, j);
2677 tx_desc->cmd_type_offset_bsz = 0;
2678 }
2679 }
2680
2681 return 0;
2682
2683 free_xdp_rings:
2684 for (; i >= 0; i--) {
2685 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2686 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2687 vsi->xdp_rings[i]->ring_stats = NULL;
2688 ice_free_tx_ring(vsi->xdp_rings[i]);
2689 }
2690 }
2691 return -ENOMEM;
2692 }
2693
2694 /**
2695 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2696 * @vsi: VSI to set the bpf prog on
2697 * @prog: the bpf prog pointer
2698 */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2699 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2700 {
2701 struct bpf_prog *old_prog;
2702 int i;
2703
2704 old_prog = xchg(&vsi->xdp_prog, prog);
2705 ice_for_each_rxq(vsi, i)
2706 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2707
2708 if (old_prog)
2709 bpf_prog_put(old_prog);
2710 }
2711
ice_xdp_ring_from_qid(struct ice_vsi * vsi,int qid)2712 static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2713 {
2714 struct ice_q_vector *q_vector;
2715 struct ice_tx_ring *ring;
2716
2717 if (static_key_enabled(&ice_xdp_locking_key))
2718 return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2719
2720 q_vector = vsi->rx_rings[qid]->q_vector;
2721 ice_for_each_tx_ring(ring, q_vector->tx)
2722 if (ice_ring_is_xdp(ring))
2723 return ring;
2724
2725 return NULL;
2726 }
2727
2728 /**
2729 * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2730 * @vsi: the VSI with XDP rings being configured
2731 *
2732 * Map XDP rings to interrupt vectors and perform the configuration steps
2733 * dependent on the mapping.
2734 */
ice_map_xdp_rings(struct ice_vsi * vsi)2735 void ice_map_xdp_rings(struct ice_vsi *vsi)
2736 {
2737 int xdp_rings_rem = vsi->num_xdp_txq;
2738 int v_idx, q_idx;
2739
2740 /* follow the logic from ice_vsi_map_rings_to_vectors */
2741 ice_for_each_q_vector(vsi, v_idx) {
2742 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2743 int xdp_rings_per_v, q_id, q_base;
2744
2745 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2746 vsi->num_q_vectors - v_idx);
2747 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2748
2749 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2750 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2751
2752 xdp_ring->q_vector = q_vector;
2753 xdp_ring->next = q_vector->tx.tx_ring;
2754 q_vector->tx.tx_ring = xdp_ring;
2755 }
2756 xdp_rings_rem -= xdp_rings_per_v;
2757 }
2758
2759 ice_for_each_rxq(vsi, q_idx) {
2760 vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2761 q_idx);
2762 ice_tx_xsk_pool(vsi, q_idx);
2763 }
2764 }
2765
2766 /**
2767 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2768 * @vsi: VSI to bring up Tx rings used by XDP
2769 * @prog: bpf program that will be assigned to VSI
2770 * @cfg_type: create from scratch or restore the existing configuration
2771 *
2772 * Return 0 on success and negative value on error
2773 */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog,enum ice_xdp_cfg cfg_type)2774 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2775 enum ice_xdp_cfg cfg_type)
2776 {
2777 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2778 struct ice_pf *pf = vsi->back;
2779 struct ice_qs_cfg xdp_qs_cfg = {
2780 .qs_mutex = &pf->avail_q_mutex,
2781 .pf_map = pf->avail_txqs,
2782 .pf_map_size = pf->max_pf_txqs,
2783 .q_count = vsi->num_xdp_txq,
2784 .scatter_count = ICE_MAX_SCATTER_TXQS,
2785 .vsi_map = vsi->txq_map,
2786 .vsi_map_offset = vsi->alloc_txq,
2787 .mapping_mode = ICE_VSI_MAP_CONTIG
2788 };
2789 struct device *dev;
2790 int status, i;
2791
2792 dev = ice_pf_to_dev(pf);
2793 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2794 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2795 if (!vsi->xdp_rings)
2796 return -ENOMEM;
2797
2798 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2799 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2800 goto err_map_xdp;
2801
2802 if (static_key_enabled(&ice_xdp_locking_key))
2803 netdev_warn(vsi->netdev,
2804 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2805
2806 if (ice_xdp_alloc_setup_rings(vsi))
2807 goto clear_xdp_rings;
2808
2809 /* omit the scheduler update if in reset path; XDP queues will be
2810 * taken into account at the end of ice_vsi_rebuild, where
2811 * ice_cfg_vsi_lan is being called
2812 */
2813 if (cfg_type == ICE_XDP_CFG_PART)
2814 return 0;
2815
2816 ice_map_xdp_rings(vsi);
2817
2818 /* tell the Tx scheduler that right now we have
2819 * additional queues
2820 */
2821 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2822 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2823
2824 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2825 max_txqs);
2826 if (status) {
2827 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2828 status);
2829 goto clear_xdp_rings;
2830 }
2831
2832 /* assign the prog only when it's not already present on VSI;
2833 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2834 * VSI rebuild that happens under ethtool -L can expose us to
2835 * the bpf_prog refcount issues as we would be swapping same
2836 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2837 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2838 * this is not harmful as dev_xdp_install bumps the refcount
2839 * before calling the op exposed by the driver;
2840 */
2841 if (!ice_is_xdp_ena_vsi(vsi))
2842 ice_vsi_assign_bpf_prog(vsi, prog);
2843
2844 return 0;
2845 clear_xdp_rings:
2846 ice_for_each_xdp_txq(vsi, i)
2847 if (vsi->xdp_rings[i]) {
2848 kfree_rcu(vsi->xdp_rings[i], rcu);
2849 vsi->xdp_rings[i] = NULL;
2850 }
2851
2852 err_map_xdp:
2853 mutex_lock(&pf->avail_q_mutex);
2854 ice_for_each_xdp_txq(vsi, i) {
2855 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2856 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2857 }
2858 mutex_unlock(&pf->avail_q_mutex);
2859
2860 devm_kfree(dev, vsi->xdp_rings);
2861 return -ENOMEM;
2862 }
2863
2864 /**
2865 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2866 * @vsi: VSI to remove XDP rings
2867 * @cfg_type: disable XDP permanently or allow it to be restored later
2868 *
2869 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2870 * resources
2871 */
ice_destroy_xdp_rings(struct ice_vsi * vsi,enum ice_xdp_cfg cfg_type)2872 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2873 {
2874 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2875 struct ice_pf *pf = vsi->back;
2876 int i, v_idx;
2877
2878 /* q_vectors are freed in reset path so there's no point in detaching
2879 * rings
2880 */
2881 if (cfg_type == ICE_XDP_CFG_PART)
2882 goto free_qmap;
2883
2884 ice_for_each_q_vector(vsi, v_idx) {
2885 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2886 struct ice_tx_ring *ring;
2887
2888 ice_for_each_tx_ring(ring, q_vector->tx)
2889 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2890 break;
2891
2892 /* restore the value of last node prior to XDP setup */
2893 q_vector->tx.tx_ring = ring;
2894 }
2895
2896 free_qmap:
2897 mutex_lock(&pf->avail_q_mutex);
2898 ice_for_each_xdp_txq(vsi, i) {
2899 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2900 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2901 }
2902 mutex_unlock(&pf->avail_q_mutex);
2903
2904 ice_for_each_xdp_txq(vsi, i)
2905 if (vsi->xdp_rings[i]) {
2906 if (vsi->xdp_rings[i]->desc) {
2907 synchronize_rcu();
2908 ice_free_tx_ring(vsi->xdp_rings[i]);
2909 }
2910 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2911 vsi->xdp_rings[i]->ring_stats = NULL;
2912 kfree_rcu(vsi->xdp_rings[i], rcu);
2913 vsi->xdp_rings[i] = NULL;
2914 }
2915
2916 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2917 vsi->xdp_rings = NULL;
2918
2919 if (static_key_enabled(&ice_xdp_locking_key))
2920 static_branch_dec(&ice_xdp_locking_key);
2921
2922 if (cfg_type == ICE_XDP_CFG_PART)
2923 return 0;
2924
2925 ice_vsi_assign_bpf_prog(vsi, NULL);
2926
2927 /* notify Tx scheduler that we destroyed XDP queues and bring
2928 * back the old number of child nodes
2929 */
2930 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2931 max_txqs[i] = vsi->num_txq;
2932
2933 /* change number of XDP Tx queues to 0 */
2934 vsi->num_xdp_txq = 0;
2935
2936 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2937 max_txqs);
2938 }
2939
2940 /**
2941 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2942 * @vsi: VSI to schedule napi on
2943 */
ice_vsi_rx_napi_schedule(struct ice_vsi * vsi)2944 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2945 {
2946 int i;
2947
2948 ice_for_each_rxq(vsi, i) {
2949 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2950
2951 if (READ_ONCE(rx_ring->xsk_pool))
2952 napi_schedule(&rx_ring->q_vector->napi);
2953 }
2954 }
2955
2956 /**
2957 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2958 * @vsi: VSI to determine the count of XDP Tx qs
2959 *
2960 * returns 0 if Tx qs count is higher than at least half of CPU count,
2961 * -ENOMEM otherwise
2962 */
ice_vsi_determine_xdp_res(struct ice_vsi * vsi)2963 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2964 {
2965 u16 avail = ice_get_avail_txq_count(vsi->back);
2966 u16 cpus = num_possible_cpus();
2967
2968 if (avail < cpus / 2)
2969 return -ENOMEM;
2970
2971 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2972
2973 if (vsi->num_xdp_txq < cpus)
2974 static_branch_inc(&ice_xdp_locking_key);
2975
2976 return 0;
2977 }
2978
2979 /**
2980 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2981 * @vsi: Pointer to VSI structure
2982 */
ice_max_xdp_frame_size(struct ice_vsi * vsi)2983 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2984 {
2985 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2986 return ICE_RXBUF_1664;
2987 else
2988 return ICE_RXBUF_3072;
2989 }
2990
2991 /**
2992 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2993 * @vsi: VSI to setup XDP for
2994 * @prog: XDP program
2995 * @extack: netlink extended ack
2996 */
2997 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)2998 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2999 struct netlink_ext_ack *extack)
3000 {
3001 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
3002 bool if_running = netif_running(vsi->netdev);
3003 int ret = 0, xdp_ring_err = 0;
3004
3005 if (prog && !prog->aux->xdp_has_frags) {
3006 if (frame_size > ice_max_xdp_frame_size(vsi)) {
3007 NL_SET_ERR_MSG_MOD(extack,
3008 "MTU is too large for linear frames and XDP prog does not support frags");
3009 return -EOPNOTSUPP;
3010 }
3011 }
3012
3013 /* hot swap progs and avoid toggling link */
3014 if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
3015 ice_vsi_assign_bpf_prog(vsi, prog);
3016 return 0;
3017 }
3018
3019 /* need to stop netdev while setting up the program for Rx rings */
3020 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
3021 ret = ice_down(vsi);
3022 if (ret) {
3023 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
3024 return ret;
3025 }
3026 }
3027
3028 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
3029 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
3030 if (xdp_ring_err) {
3031 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
3032 } else {
3033 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
3034 ICE_XDP_CFG_FULL);
3035 if (xdp_ring_err)
3036 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
3037 }
3038 xdp_features_set_redirect_target(vsi->netdev, true);
3039 /* reallocate Rx queues that are used for zero-copy */
3040 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
3041 if (xdp_ring_err)
3042 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
3043 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
3044 xdp_features_clear_redirect_target(vsi->netdev);
3045 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
3046 if (xdp_ring_err)
3047 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
3048 /* reallocate Rx queues that were used for zero-copy */
3049 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
3050 if (xdp_ring_err)
3051 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
3052 }
3053
3054 if (if_running)
3055 ret = ice_up(vsi);
3056
3057 if (!ret && prog)
3058 ice_vsi_rx_napi_schedule(vsi);
3059
3060 return (ret || xdp_ring_err) ? -ENOMEM : 0;
3061 }
3062
3063 /**
3064 * ice_xdp_safe_mode - XDP handler for safe mode
3065 * @dev: netdevice
3066 * @xdp: XDP command
3067 */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)3068 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3069 struct netdev_bpf *xdp)
3070 {
3071 NL_SET_ERR_MSG_MOD(xdp->extack,
3072 "Please provide working DDP firmware package in order to use XDP\n"
3073 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3074 return -EOPNOTSUPP;
3075 }
3076
3077 /**
3078 * ice_xdp - implements XDP handler
3079 * @dev: netdevice
3080 * @xdp: XDP command
3081 */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)3082 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3083 {
3084 struct ice_netdev_priv *np = netdev_priv(dev);
3085 struct ice_vsi *vsi = np->vsi;
3086
3087 if (vsi->type != ICE_VSI_PF) {
3088 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3089 return -EINVAL;
3090 }
3091
3092 switch (xdp->command) {
3093 case XDP_SETUP_PROG:
3094 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3095 case XDP_SETUP_XSK_POOL:
3096 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3097 xdp->xsk.queue_id);
3098 default:
3099 return -EINVAL;
3100 }
3101 }
3102
3103 /**
3104 * ice_ena_misc_vector - enable the non-queue interrupts
3105 * @pf: board private structure
3106 */
ice_ena_misc_vector(struct ice_pf * pf)3107 static void ice_ena_misc_vector(struct ice_pf *pf)
3108 {
3109 struct ice_hw *hw = &pf->hw;
3110 u32 pf_intr_start_offset;
3111 u32 val;
3112
3113 /* Disable anti-spoof detection interrupt to prevent spurious event
3114 * interrupts during a function reset. Anti-spoof functionally is
3115 * still supported.
3116 */
3117 val = rd32(hw, GL_MDCK_TX_TDPU);
3118 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3119 wr32(hw, GL_MDCK_TX_TDPU, val);
3120
3121 /* clear things first */
3122 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3123 rd32(hw, PFINT_OICR); /* read to clear */
3124
3125 val = (PFINT_OICR_ECC_ERR_M |
3126 PFINT_OICR_MAL_DETECT_M |
3127 PFINT_OICR_GRST_M |
3128 PFINT_OICR_PCI_EXCEPTION_M |
3129 PFINT_OICR_VFLR_M |
3130 PFINT_OICR_HMC_ERR_M |
3131 PFINT_OICR_PE_PUSH_M |
3132 PFINT_OICR_PE_CRITERR_M);
3133
3134 wr32(hw, PFINT_OICR_ENA, val);
3135
3136 /* SW_ITR_IDX = 0, but don't change INTENA */
3137 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3138 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3139
3140 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3141 return;
3142 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3143 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3144 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3145 }
3146
3147 /**
3148 * ice_ll_ts_intr - ll_ts interrupt handler
3149 * @irq: interrupt number
3150 * @data: pointer to a q_vector
3151 */
ice_ll_ts_intr(int __always_unused irq,void * data)3152 static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
3153 {
3154 struct ice_pf *pf = data;
3155 u32 pf_intr_start_offset;
3156 struct ice_ptp_tx *tx;
3157 unsigned long flags;
3158 struct ice_hw *hw;
3159 u32 val;
3160 u8 idx;
3161
3162 hw = &pf->hw;
3163 tx = &pf->ptp.port.tx;
3164 spin_lock_irqsave(&tx->lock, flags);
3165 ice_ptp_complete_tx_single_tstamp(tx);
3166
3167 idx = find_next_bit_wrap(tx->in_use, tx->len,
3168 tx->last_ll_ts_idx_read + 1);
3169 if (idx != tx->len)
3170 ice_ptp_req_tx_single_tstamp(tx, idx);
3171 spin_unlock_irqrestore(&tx->lock, flags);
3172
3173 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3174 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
3175 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3176 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3177 val);
3178
3179 return IRQ_HANDLED;
3180 }
3181
3182 /**
3183 * ice_misc_intr - misc interrupt handler
3184 * @irq: interrupt number
3185 * @data: pointer to a q_vector
3186 */
ice_misc_intr(int __always_unused irq,void * data)3187 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3188 {
3189 struct ice_pf *pf = (struct ice_pf *)data;
3190 irqreturn_t ret = IRQ_HANDLED;
3191 struct ice_hw *hw = &pf->hw;
3192 struct device *dev;
3193 u32 oicr, ena_mask;
3194
3195 dev = ice_pf_to_dev(pf);
3196 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3197 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3198 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3199
3200 oicr = rd32(hw, PFINT_OICR);
3201 ena_mask = rd32(hw, PFINT_OICR_ENA);
3202
3203 if (oicr & PFINT_OICR_SWINT_M) {
3204 ena_mask &= ~PFINT_OICR_SWINT_M;
3205 pf->sw_int_count++;
3206 }
3207
3208 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3209 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3210 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3211 }
3212 if (oicr & PFINT_OICR_VFLR_M) {
3213 /* disable any further VFLR event notifications */
3214 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3215 u32 reg = rd32(hw, PFINT_OICR_ENA);
3216
3217 reg &= ~PFINT_OICR_VFLR_M;
3218 wr32(hw, PFINT_OICR_ENA, reg);
3219 } else {
3220 ena_mask &= ~PFINT_OICR_VFLR_M;
3221 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3222 }
3223 }
3224
3225 if (oicr & PFINT_OICR_GRST_M) {
3226 u32 reset;
3227
3228 /* we have a reset warning */
3229 ena_mask &= ~PFINT_OICR_GRST_M;
3230 reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
3231 rd32(hw, GLGEN_RSTAT));
3232
3233 if (reset == ICE_RESET_CORER)
3234 pf->corer_count++;
3235 else if (reset == ICE_RESET_GLOBR)
3236 pf->globr_count++;
3237 else if (reset == ICE_RESET_EMPR)
3238 pf->empr_count++;
3239 else
3240 dev_dbg(dev, "Invalid reset type %d\n", reset);
3241
3242 /* If a reset cycle isn't already in progress, we set a bit in
3243 * pf->state so that the service task can start a reset/rebuild.
3244 */
3245 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3246 if (reset == ICE_RESET_CORER)
3247 set_bit(ICE_CORER_RECV, pf->state);
3248 else if (reset == ICE_RESET_GLOBR)
3249 set_bit(ICE_GLOBR_RECV, pf->state);
3250 else
3251 set_bit(ICE_EMPR_RECV, pf->state);
3252
3253 /* There are couple of different bits at play here.
3254 * hw->reset_ongoing indicates whether the hardware is
3255 * in reset. This is set to true when a reset interrupt
3256 * is received and set back to false after the driver
3257 * has determined that the hardware is out of reset.
3258 *
3259 * ICE_RESET_OICR_RECV in pf->state indicates
3260 * that a post reset rebuild is required before the
3261 * driver is operational again. This is set above.
3262 *
3263 * As this is the start of the reset/rebuild cycle, set
3264 * both to indicate that.
3265 */
3266 hw->reset_ongoing = true;
3267 }
3268 }
3269
3270 if (oicr & PFINT_OICR_TSYN_TX_M) {
3271 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3272 if (ice_pf_state_is_nominal(pf) &&
3273 pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
3274 struct ice_ptp_tx *tx = &pf->ptp.port.tx;
3275 unsigned long flags;
3276 u8 idx;
3277
3278 spin_lock_irqsave(&tx->lock, flags);
3279 idx = find_next_bit_wrap(tx->in_use, tx->len,
3280 tx->last_ll_ts_idx_read + 1);
3281 if (idx != tx->len)
3282 ice_ptp_req_tx_single_tstamp(tx, idx);
3283 spin_unlock_irqrestore(&tx->lock, flags);
3284 } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
3285 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3286 ret = IRQ_WAKE_THREAD;
3287 }
3288 }
3289
3290 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3291 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3292 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3293
3294 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3295
3296 if (ice_pf_src_tmr_owned(pf)) {
3297 /* Save EVENTs from GLTSYN register */
3298 pf->ptp.ext_ts_irq |= gltsyn_stat &
3299 (GLTSYN_STAT_EVENT0_M |
3300 GLTSYN_STAT_EVENT1_M |
3301 GLTSYN_STAT_EVENT2_M);
3302
3303 ice_ptp_extts_event(pf);
3304 }
3305 }
3306
3307 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3308 if (oicr & ICE_AUX_CRIT_ERR) {
3309 pf->oicr_err_reg |= oicr;
3310 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3311 ena_mask &= ~ICE_AUX_CRIT_ERR;
3312 }
3313
3314 /* Report any remaining unexpected interrupts */
3315 oicr &= ena_mask;
3316 if (oicr) {
3317 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3318 /* If a critical error is pending there is no choice but to
3319 * reset the device.
3320 */
3321 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3322 PFINT_OICR_ECC_ERR_M)) {
3323 set_bit(ICE_PFR_REQ, pf->state);
3324 }
3325 }
3326 ice_service_task_schedule(pf);
3327 if (ret == IRQ_HANDLED)
3328 ice_irq_dynamic_ena(hw, NULL, NULL);
3329
3330 return ret;
3331 }
3332
3333 /**
3334 * ice_misc_intr_thread_fn - misc interrupt thread function
3335 * @irq: interrupt number
3336 * @data: pointer to a q_vector
3337 */
ice_misc_intr_thread_fn(int __always_unused irq,void * data)3338 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3339 {
3340 struct ice_pf *pf = data;
3341 struct ice_hw *hw;
3342
3343 hw = &pf->hw;
3344
3345 if (ice_is_reset_in_progress(pf->state))
3346 goto skip_irq;
3347
3348 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3349 /* Process outstanding Tx timestamps. If there is more work,
3350 * re-arm the interrupt to trigger again.
3351 */
3352 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3353 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3354 ice_flush(hw);
3355 }
3356 }
3357
3358 skip_irq:
3359 ice_irq_dynamic_ena(hw, NULL, NULL);
3360
3361 return IRQ_HANDLED;
3362 }
3363
3364 /**
3365 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3366 * @hw: pointer to HW structure
3367 */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)3368 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3369 {
3370 /* disable Admin queue Interrupt causes */
3371 wr32(hw, PFINT_FW_CTL,
3372 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3373
3374 /* disable Mailbox queue Interrupt causes */
3375 wr32(hw, PFINT_MBX_CTL,
3376 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3377
3378 wr32(hw, PFINT_SB_CTL,
3379 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3380
3381 /* disable Control queue Interrupt causes */
3382 wr32(hw, PFINT_OICR_CTL,
3383 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3384
3385 ice_flush(hw);
3386 }
3387
3388 /**
3389 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3390 * @pf: board private structure
3391 */
ice_free_irq_msix_ll_ts(struct ice_pf * pf)3392 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3393 {
3394 int irq_num = pf->ll_ts_irq.virq;
3395
3396 synchronize_irq(irq_num);
3397 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3398
3399 ice_free_irq(pf, pf->ll_ts_irq);
3400 }
3401
3402 /**
3403 * ice_free_irq_msix_misc - Unroll misc vector setup
3404 * @pf: board private structure
3405 */
ice_free_irq_msix_misc(struct ice_pf * pf)3406 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3407 {
3408 int misc_irq_num = pf->oicr_irq.virq;
3409 struct ice_hw *hw = &pf->hw;
3410
3411 ice_dis_ctrlq_interrupts(hw);
3412
3413 /* disable OICR interrupt */
3414 wr32(hw, PFINT_OICR_ENA, 0);
3415 ice_flush(hw);
3416
3417 synchronize_irq(misc_irq_num);
3418 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3419
3420 ice_free_irq(pf, pf->oicr_irq);
3421 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3422 ice_free_irq_msix_ll_ts(pf);
3423 }
3424
3425 /**
3426 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3427 * @hw: pointer to HW structure
3428 * @reg_idx: HW vector index to associate the control queue interrupts with
3429 */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)3430 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3431 {
3432 u32 val;
3433
3434 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3435 PFINT_OICR_CTL_CAUSE_ENA_M);
3436 wr32(hw, PFINT_OICR_CTL, val);
3437
3438 /* enable Admin queue Interrupt causes */
3439 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3440 PFINT_FW_CTL_CAUSE_ENA_M);
3441 wr32(hw, PFINT_FW_CTL, val);
3442
3443 /* enable Mailbox queue Interrupt causes */
3444 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3445 PFINT_MBX_CTL_CAUSE_ENA_M);
3446 wr32(hw, PFINT_MBX_CTL, val);
3447
3448 if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
3449 /* enable Sideband queue Interrupt causes */
3450 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3451 PFINT_SB_CTL_CAUSE_ENA_M);
3452 wr32(hw, PFINT_SB_CTL, val);
3453 }
3454
3455 ice_flush(hw);
3456 }
3457
3458 /**
3459 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3460 * @pf: board private structure
3461 *
3462 * This sets up the handler for MSIX 0, which is used to manage the
3463 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3464 * when in MSI or Legacy interrupt mode.
3465 */
ice_req_irq_msix_misc(struct ice_pf * pf)3466 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3467 {
3468 struct device *dev = ice_pf_to_dev(pf);
3469 struct ice_hw *hw = &pf->hw;
3470 u32 pf_intr_start_offset;
3471 struct msi_map irq;
3472 int err = 0;
3473
3474 if (!pf->int_name[0])
3475 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3476 dev_driver_string(dev), dev_name(dev));
3477
3478 if (!pf->int_name_ll_ts[0])
3479 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3480 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
3481 /* Do not request IRQ but do enable OICR interrupt since settings are
3482 * lost during reset. Note that this function is called only during
3483 * rebuild path and not while reset is in progress.
3484 */
3485 if (ice_is_reset_in_progress(pf->state))
3486 goto skip_req_irq;
3487
3488 /* reserve one vector in irq_tracker for misc interrupts */
3489 irq = ice_alloc_irq(pf, false);
3490 if (irq.index < 0)
3491 return irq.index;
3492
3493 pf->oicr_irq = irq;
3494 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3495 ice_misc_intr_thread_fn, 0,
3496 pf->int_name, pf);
3497 if (err) {
3498 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3499 pf->int_name, err);
3500 ice_free_irq(pf, pf->oicr_irq);
3501 return err;
3502 }
3503
3504 /* reserve one vector in irq_tracker for ll_ts interrupt */
3505 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3506 goto skip_req_irq;
3507
3508 irq = ice_alloc_irq(pf, false);
3509 if (irq.index < 0)
3510 return irq.index;
3511
3512 pf->ll_ts_irq = irq;
3513 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3514 pf->int_name_ll_ts, pf);
3515 if (err) {
3516 dev_err(dev, "devm_request_irq for %s failed: %d\n",
3517 pf->int_name_ll_ts, err);
3518 ice_free_irq(pf, pf->ll_ts_irq);
3519 return err;
3520 }
3521
3522 skip_req_irq:
3523 ice_ena_misc_vector(pf);
3524
3525 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3526 /* This enables LL TS interrupt */
3527 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3528 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3529 wr32(hw, PFINT_SB_CTL,
3530 ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3531 PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
3532 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3533 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3534
3535 ice_flush(hw);
3536 ice_irq_dynamic_ena(hw, NULL, NULL);
3537
3538 return 0;
3539 }
3540
3541 /**
3542 * ice_napi_add - register NAPI handler for the VSI
3543 * @vsi: VSI for which NAPI handler is to be registered
3544 *
3545 * This function is only called in the driver's load path. Registering the NAPI
3546 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3547 * reset/rebuild, etc.)
3548 */
ice_napi_add(struct ice_vsi * vsi)3549 static void ice_napi_add(struct ice_vsi *vsi)
3550 {
3551 int v_idx;
3552
3553 if (!vsi->netdev)
3554 return;
3555
3556 ice_for_each_q_vector(vsi, v_idx) {
3557 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3558 ice_napi_poll);
3559 __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
3560 }
3561 }
3562
3563 /**
3564 * ice_set_ops - set netdev and ethtools ops for the given netdev
3565 * @vsi: the VSI associated with the new netdev
3566 */
ice_set_ops(struct ice_vsi * vsi)3567 static void ice_set_ops(struct ice_vsi *vsi)
3568 {
3569 struct net_device *netdev = vsi->netdev;
3570 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3571
3572 if (ice_is_safe_mode(pf)) {
3573 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3574 ice_set_ethtool_safe_mode_ops(netdev);
3575 return;
3576 }
3577
3578 netdev->netdev_ops = &ice_netdev_ops;
3579 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3580 netdev->xdp_metadata_ops = &ice_xdp_md_ops;
3581 ice_set_ethtool_ops(netdev);
3582
3583 if (vsi->type != ICE_VSI_PF)
3584 return;
3585
3586 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3587 NETDEV_XDP_ACT_XSK_ZEROCOPY |
3588 NETDEV_XDP_ACT_RX_SG;
3589 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3590 }
3591
3592 /**
3593 * ice_set_netdev_features - set features for the given netdev
3594 * @netdev: netdev instance
3595 */
ice_set_netdev_features(struct net_device * netdev)3596 static void ice_set_netdev_features(struct net_device *netdev)
3597 {
3598 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3599 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3600 netdev_features_t csumo_features;
3601 netdev_features_t vlano_features;
3602 netdev_features_t dflt_features;
3603 netdev_features_t tso_features;
3604
3605 if (ice_is_safe_mode(pf)) {
3606 /* safe mode */
3607 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3608 netdev->hw_features = netdev->features;
3609 return;
3610 }
3611
3612 dflt_features = NETIF_F_SG |
3613 NETIF_F_HIGHDMA |
3614 NETIF_F_NTUPLE |
3615 NETIF_F_RXHASH;
3616
3617 csumo_features = NETIF_F_RXCSUM |
3618 NETIF_F_IP_CSUM |
3619 NETIF_F_SCTP_CRC |
3620 NETIF_F_IPV6_CSUM;
3621
3622 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3623 NETIF_F_HW_VLAN_CTAG_TX |
3624 NETIF_F_HW_VLAN_CTAG_RX;
3625
3626 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3627 if (is_dvm_ena)
3628 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3629
3630 tso_features = NETIF_F_TSO |
3631 NETIF_F_TSO_ECN |
3632 NETIF_F_TSO6 |
3633 NETIF_F_GSO_GRE |
3634 NETIF_F_GSO_UDP_TUNNEL |
3635 NETIF_F_GSO_GRE_CSUM |
3636 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3637 NETIF_F_GSO_PARTIAL |
3638 NETIF_F_GSO_IPXIP4 |
3639 NETIF_F_GSO_IPXIP6 |
3640 NETIF_F_GSO_UDP_L4;
3641
3642 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3643 NETIF_F_GSO_GRE_CSUM;
3644 /* set features that user can change */
3645 netdev->hw_features = dflt_features | csumo_features |
3646 vlano_features | tso_features;
3647
3648 /* add support for HW_CSUM on packets with MPLS header */
3649 netdev->mpls_features = NETIF_F_HW_CSUM |
3650 NETIF_F_TSO |
3651 NETIF_F_TSO6;
3652
3653 /* enable features */
3654 netdev->features |= netdev->hw_features;
3655
3656 netdev->hw_features |= NETIF_F_HW_TC;
3657 netdev->hw_features |= NETIF_F_LOOPBACK;
3658
3659 /* encap and VLAN devices inherit default, csumo and tso features */
3660 netdev->hw_enc_features |= dflt_features | csumo_features |
3661 tso_features;
3662 netdev->vlan_features |= dflt_features | csumo_features |
3663 tso_features;
3664
3665 /* advertise support but don't enable by default since only one type of
3666 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3667 * type turns on the other has to be turned off. This is enforced by the
3668 * ice_fix_features() ndo callback.
3669 */
3670 if (is_dvm_ena)
3671 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3672 NETIF_F_HW_VLAN_STAG_TX;
3673
3674 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3675 * be changed at runtime
3676 */
3677 netdev->hw_features |= NETIF_F_RXFCS;
3678
3679 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3680 }
3681
3682 /**
3683 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3684 * @lut: Lookup table
3685 * @rss_table_size: Lookup table size
3686 * @rss_size: Range of queue number for hashing
3687 */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3688 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3689 {
3690 u16 i;
3691
3692 for (i = 0; i < rss_table_size; i++)
3693 lut[i] = i % rss_size;
3694 }
3695
3696 /**
3697 * ice_pf_vsi_setup - Set up a PF VSI
3698 * @pf: board private structure
3699 * @pi: pointer to the port_info instance
3700 *
3701 * Returns pointer to the successfully allocated VSI software struct
3702 * on success, otherwise returns NULL on failure.
3703 */
3704 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3705 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3706 {
3707 struct ice_vsi_cfg_params params = {};
3708
3709 params.type = ICE_VSI_PF;
3710 params.port_info = pi;
3711 params.flags = ICE_VSI_FLAG_INIT;
3712
3713 return ice_vsi_setup(pf, ¶ms);
3714 }
3715
3716 static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi,struct ice_channel * ch)3717 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3718 struct ice_channel *ch)
3719 {
3720 struct ice_vsi_cfg_params params = {};
3721
3722 params.type = ICE_VSI_CHNL;
3723 params.port_info = pi;
3724 params.ch = ch;
3725 params.flags = ICE_VSI_FLAG_INIT;
3726
3727 return ice_vsi_setup(pf, ¶ms);
3728 }
3729
3730 /**
3731 * ice_ctrl_vsi_setup - Set up a control VSI
3732 * @pf: board private structure
3733 * @pi: pointer to the port_info instance
3734 *
3735 * Returns pointer to the successfully allocated VSI software struct
3736 * on success, otherwise returns NULL on failure.
3737 */
3738 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3739 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3740 {
3741 struct ice_vsi_cfg_params params = {};
3742
3743 params.type = ICE_VSI_CTRL;
3744 params.port_info = pi;
3745 params.flags = ICE_VSI_FLAG_INIT;
3746
3747 return ice_vsi_setup(pf, ¶ms);
3748 }
3749
3750 /**
3751 * ice_lb_vsi_setup - Set up a loopback VSI
3752 * @pf: board private structure
3753 * @pi: pointer to the port_info instance
3754 *
3755 * Returns pointer to the successfully allocated VSI software struct
3756 * on success, otherwise returns NULL on failure.
3757 */
3758 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3759 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3760 {
3761 struct ice_vsi_cfg_params params = {};
3762
3763 params.type = ICE_VSI_LB;
3764 params.port_info = pi;
3765 params.flags = ICE_VSI_FLAG_INIT;
3766
3767 return ice_vsi_setup(pf, ¶ms);
3768 }
3769
3770 /**
3771 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3772 * @netdev: network interface to be adjusted
3773 * @proto: VLAN TPID
3774 * @vid: VLAN ID to be added
3775 *
3776 * net_device_ops implementation for adding VLAN IDs
3777 */
3778 static int
ice_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3779 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3780 {
3781 struct ice_netdev_priv *np = netdev_priv(netdev);
3782 struct ice_vsi_vlan_ops *vlan_ops;
3783 struct ice_vsi *vsi = np->vsi;
3784 struct ice_vlan vlan;
3785 int ret;
3786
3787 /* VLAN 0 is added by default during load/reset */
3788 if (!vid)
3789 return 0;
3790
3791 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3792 usleep_range(1000, 2000);
3793
3794 /* Add multicast promisc rule for the VLAN ID to be added if
3795 * all-multicast is currently enabled.
3796 */
3797 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3798 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3799 ICE_MCAST_VLAN_PROMISC_BITS,
3800 vid);
3801 if (ret)
3802 goto finish;
3803 }
3804
3805 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3806
3807 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3808 * packets aren't pruned by the device's internal switch on Rx
3809 */
3810 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3811 ret = vlan_ops->add_vlan(vsi, &vlan);
3812 if (ret)
3813 goto finish;
3814
3815 /* If all-multicast is currently enabled and this VLAN ID is only one
3816 * besides VLAN-0 we have to update look-up type of multicast promisc
3817 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3818 */
3819 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3820 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3821 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3822 ICE_MCAST_PROMISC_BITS, 0);
3823 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3824 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3825 }
3826
3827 finish:
3828 clear_bit(ICE_CFG_BUSY, vsi->state);
3829
3830 return ret;
3831 }
3832
3833 /**
3834 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3835 * @netdev: network interface to be adjusted
3836 * @proto: VLAN TPID
3837 * @vid: VLAN ID to be removed
3838 *
3839 * net_device_ops implementation for removing VLAN IDs
3840 */
3841 static int
ice_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3842 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3843 {
3844 struct ice_netdev_priv *np = netdev_priv(netdev);
3845 struct ice_vsi_vlan_ops *vlan_ops;
3846 struct ice_vsi *vsi = np->vsi;
3847 struct ice_vlan vlan;
3848 int ret;
3849
3850 /* don't allow removal of VLAN 0 */
3851 if (!vid)
3852 return 0;
3853
3854 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3855 usleep_range(1000, 2000);
3856
3857 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3858 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3859 if (ret) {
3860 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3861 vsi->vsi_num);
3862 vsi->current_netdev_flags |= IFF_ALLMULTI;
3863 }
3864
3865 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3866
3867 /* Make sure VLAN delete is successful before updating VLAN
3868 * information
3869 */
3870 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3871 ret = vlan_ops->del_vlan(vsi, &vlan);
3872 if (ret)
3873 goto finish;
3874
3875 /* Remove multicast promisc rule for the removed VLAN ID if
3876 * all-multicast is enabled.
3877 */
3878 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3879 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3880 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3881
3882 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3883 /* Update look-up type of multicast promisc rule for VLAN 0
3884 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3885 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3886 */
3887 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3888 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3889 ICE_MCAST_VLAN_PROMISC_BITS,
3890 0);
3891 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3892 ICE_MCAST_PROMISC_BITS, 0);
3893 }
3894 }
3895
3896 finish:
3897 clear_bit(ICE_CFG_BUSY, vsi->state);
3898
3899 return ret;
3900 }
3901
3902 /**
3903 * ice_rep_indr_tc_block_unbind
3904 * @cb_priv: indirection block private data
3905 */
ice_rep_indr_tc_block_unbind(void * cb_priv)3906 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3907 {
3908 struct ice_indr_block_priv *indr_priv = cb_priv;
3909
3910 list_del(&indr_priv->list);
3911 kfree(indr_priv);
3912 }
3913
3914 /**
3915 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3916 * @vsi: VSI struct which has the netdev
3917 */
ice_tc_indir_block_unregister(struct ice_vsi * vsi)3918 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3919 {
3920 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3921
3922 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3923 ice_rep_indr_tc_block_unbind);
3924 }
3925
3926 /**
3927 * ice_tc_indir_block_register - Register TC indirect block notifications
3928 * @vsi: VSI struct which has the netdev
3929 *
3930 * Returns 0 on success, negative value on failure
3931 */
ice_tc_indir_block_register(struct ice_vsi * vsi)3932 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3933 {
3934 struct ice_netdev_priv *np;
3935
3936 if (!vsi || !vsi->netdev)
3937 return -EINVAL;
3938
3939 np = netdev_priv(vsi->netdev);
3940
3941 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3942 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3943 }
3944
3945 /**
3946 * ice_get_avail_q_count - Get count of queues in use
3947 * @pf_qmap: bitmap to get queue use count from
3948 * @lock: pointer to a mutex that protects access to pf_qmap
3949 * @size: size of the bitmap
3950 */
3951 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3952 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3953 {
3954 unsigned long bit;
3955 u16 count = 0;
3956
3957 mutex_lock(lock);
3958 for_each_clear_bit(bit, pf_qmap, size)
3959 count++;
3960 mutex_unlock(lock);
3961
3962 return count;
3963 }
3964
3965 /**
3966 * ice_get_avail_txq_count - Get count of Tx queues in use
3967 * @pf: pointer to an ice_pf instance
3968 */
ice_get_avail_txq_count(struct ice_pf * pf)3969 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3970 {
3971 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3972 pf->max_pf_txqs);
3973 }
3974
3975 /**
3976 * ice_get_avail_rxq_count - Get count of Rx queues in use
3977 * @pf: pointer to an ice_pf instance
3978 */
ice_get_avail_rxq_count(struct ice_pf * pf)3979 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3980 {
3981 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3982 pf->max_pf_rxqs);
3983 }
3984
3985 /**
3986 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3987 * @pf: board private structure to initialize
3988 */
ice_deinit_pf(struct ice_pf * pf)3989 static void ice_deinit_pf(struct ice_pf *pf)
3990 {
3991 ice_service_task_stop(pf);
3992 mutex_destroy(&pf->lag_mutex);
3993 mutex_destroy(&pf->adev_mutex);
3994 mutex_destroy(&pf->sw_mutex);
3995 mutex_destroy(&pf->tc_mutex);
3996 mutex_destroy(&pf->avail_q_mutex);
3997 mutex_destroy(&pf->vfs.table_lock);
3998
3999 if (pf->avail_txqs) {
4000 bitmap_free(pf->avail_txqs);
4001 pf->avail_txqs = NULL;
4002 }
4003
4004 if (pf->avail_rxqs) {
4005 bitmap_free(pf->avail_rxqs);
4006 pf->avail_rxqs = NULL;
4007 }
4008
4009 if (pf->ptp.clock)
4010 ptp_clock_unregister(pf->ptp.clock);
4011 }
4012
4013 /**
4014 * ice_set_pf_caps - set PFs capability flags
4015 * @pf: pointer to the PF instance
4016 */
ice_set_pf_caps(struct ice_pf * pf)4017 static void ice_set_pf_caps(struct ice_pf *pf)
4018 {
4019 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
4020
4021 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4022 if (func_caps->common_cap.rdma)
4023 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4024 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4025 if (func_caps->common_cap.dcb)
4026 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4027 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4028 if (func_caps->common_cap.sr_iov_1_1) {
4029 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4030 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
4031 ICE_MAX_SRIOV_VFS);
4032 }
4033 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
4034 if (func_caps->common_cap.rss_table_size)
4035 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
4036
4037 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
4038 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
4039 u16 unused;
4040
4041 /* ctrl_vsi_idx will be set to a valid value when flow director
4042 * is setup by ice_init_fdir
4043 */
4044 pf->ctrl_vsi_idx = ICE_NO_VSI;
4045 set_bit(ICE_FLAG_FD_ENA, pf->flags);
4046 /* force guaranteed filter pool for PF */
4047 ice_alloc_fd_guar_item(&pf->hw, &unused,
4048 func_caps->fd_fltr_guar);
4049 /* force shared filter pool for PF */
4050 ice_alloc_fd_shrd_item(&pf->hw, &unused,
4051 func_caps->fd_fltr_best_effort);
4052 }
4053
4054 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4055 if (func_caps->common_cap.ieee_1588 &&
4056 !(pf->hw.mac_type == ICE_MAC_E830))
4057 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4058
4059 pf->max_pf_txqs = func_caps->common_cap.num_txq;
4060 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4061 }
4062
4063 /**
4064 * ice_init_pf - Initialize general software structures (struct ice_pf)
4065 * @pf: board private structure to initialize
4066 */
ice_init_pf(struct ice_pf * pf)4067 static int ice_init_pf(struct ice_pf *pf)
4068 {
4069 ice_set_pf_caps(pf);
4070
4071 mutex_init(&pf->sw_mutex);
4072 mutex_init(&pf->tc_mutex);
4073 mutex_init(&pf->adev_mutex);
4074 mutex_init(&pf->lag_mutex);
4075
4076 INIT_HLIST_HEAD(&pf->aq_wait_list);
4077 spin_lock_init(&pf->aq_wait_lock);
4078 init_waitqueue_head(&pf->aq_wait_queue);
4079
4080 init_waitqueue_head(&pf->reset_wait_queue);
4081
4082 /* setup service timer and periodic service task */
4083 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4084 pf->serv_tmr_period = HZ;
4085 INIT_WORK(&pf->serv_task, ice_service_task);
4086 clear_bit(ICE_SERVICE_SCHED, pf->state);
4087
4088 mutex_init(&pf->avail_q_mutex);
4089 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4090 if (!pf->avail_txqs)
4091 return -ENOMEM;
4092
4093 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4094 if (!pf->avail_rxqs) {
4095 bitmap_free(pf->avail_txqs);
4096 pf->avail_txqs = NULL;
4097 return -ENOMEM;
4098 }
4099
4100 mutex_init(&pf->vfs.table_lock);
4101 hash_init(pf->vfs.table);
4102 ice_mbx_init_snapshot(&pf->hw);
4103
4104 return 0;
4105 }
4106
4107 /**
4108 * ice_is_wol_supported - check if WoL is supported
4109 * @hw: pointer to hardware info
4110 *
4111 * Check if WoL is supported based on the HW configuration.
4112 * Returns true if NVM supports and enables WoL for this port, false otherwise
4113 */
ice_is_wol_supported(struct ice_hw * hw)4114 bool ice_is_wol_supported(struct ice_hw *hw)
4115 {
4116 u16 wol_ctrl;
4117
4118 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4119 * word) indicates WoL is not supported on the corresponding PF ID.
4120 */
4121 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4122 return false;
4123
4124 return !(BIT(hw->port_info->lport) & wol_ctrl);
4125 }
4126
4127 /**
4128 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4129 * @vsi: VSI being changed
4130 * @new_rx: new number of Rx queues
4131 * @new_tx: new number of Tx queues
4132 * @locked: is adev device_lock held
4133 *
4134 * Only change the number of queues if new_tx, or new_rx is non-0.
4135 *
4136 * Returns 0 on success.
4137 */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx,bool locked)4138 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4139 {
4140 struct ice_pf *pf = vsi->back;
4141 int i, err = 0, timeout = 50;
4142
4143 if (!new_rx && !new_tx)
4144 return -EINVAL;
4145
4146 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4147 timeout--;
4148 if (!timeout)
4149 return -EBUSY;
4150 usleep_range(1000, 2000);
4151 }
4152
4153 if (new_tx)
4154 vsi->req_txq = (u16)new_tx;
4155 if (new_rx)
4156 vsi->req_rxq = (u16)new_rx;
4157
4158 /* set for the next time the netdev is started */
4159 if (!netif_running(vsi->netdev)) {
4160 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4161 if (err)
4162 goto rebuild_err;
4163 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4164 goto done;
4165 }
4166
4167 ice_vsi_close(vsi);
4168 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4169 if (err)
4170 goto rebuild_err;
4171
4172 ice_for_each_traffic_class(i) {
4173 if (vsi->tc_cfg.ena_tc & BIT(i))
4174 netdev_set_tc_queue(vsi->netdev,
4175 vsi->tc_cfg.tc_info[i].netdev_tc,
4176 vsi->tc_cfg.tc_info[i].qcount_tx,
4177 vsi->tc_cfg.tc_info[i].qoffset);
4178 }
4179 ice_pf_dcb_recfg(pf, locked);
4180 ice_vsi_open(vsi);
4181 goto done;
4182
4183 rebuild_err:
4184 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
4185 err);
4186 done:
4187 clear_bit(ICE_CFG_BUSY, pf->state);
4188 return err;
4189 }
4190
4191 /**
4192 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4193 * @pf: PF to configure
4194 *
4195 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4196 * VSI can still Tx/Rx VLAN tagged packets.
4197 */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)4198 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4199 {
4200 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4201 struct ice_vsi_ctx *ctxt;
4202 struct ice_hw *hw;
4203 int status;
4204
4205 if (!vsi)
4206 return;
4207
4208 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4209 if (!ctxt)
4210 return;
4211
4212 hw = &pf->hw;
4213 ctxt->info = vsi->info;
4214
4215 ctxt->info.valid_sections =
4216 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4217 ICE_AQ_VSI_PROP_SECURITY_VALID |
4218 ICE_AQ_VSI_PROP_SW_VALID);
4219
4220 /* disable VLAN anti-spoof */
4221 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4222 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4223
4224 /* disable VLAN pruning and keep all other settings */
4225 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4226
4227 /* allow all VLANs on Tx and don't strip on Rx */
4228 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4229 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4230
4231 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4232 if (status) {
4233 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4234 status, ice_aq_str(hw->adminq.sq_last_status));
4235 } else {
4236 vsi->info.sec_flags = ctxt->info.sec_flags;
4237 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4238 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4239 }
4240
4241 kfree(ctxt);
4242 }
4243
4244 /**
4245 * ice_log_pkg_init - log result of DDP package load
4246 * @hw: pointer to hardware info
4247 * @state: state of package load
4248 */
ice_log_pkg_init(struct ice_hw * hw,enum ice_ddp_state state)4249 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4250 {
4251 struct ice_pf *pf = hw->back;
4252 struct device *dev;
4253
4254 dev = ice_pf_to_dev(pf);
4255
4256 switch (state) {
4257 case ICE_DDP_PKG_SUCCESS:
4258 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4259 hw->active_pkg_name,
4260 hw->active_pkg_ver.major,
4261 hw->active_pkg_ver.minor,
4262 hw->active_pkg_ver.update,
4263 hw->active_pkg_ver.draft);
4264 break;
4265 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4266 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4267 hw->active_pkg_name,
4268 hw->active_pkg_ver.major,
4269 hw->active_pkg_ver.minor,
4270 hw->active_pkg_ver.update,
4271 hw->active_pkg_ver.draft);
4272 break;
4273 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4274 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4275 hw->active_pkg_name,
4276 hw->active_pkg_ver.major,
4277 hw->active_pkg_ver.minor,
4278 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4279 break;
4280 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4281 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4282 hw->active_pkg_name,
4283 hw->active_pkg_ver.major,
4284 hw->active_pkg_ver.minor,
4285 hw->active_pkg_ver.update,
4286 hw->active_pkg_ver.draft,
4287 hw->pkg_name,
4288 hw->pkg_ver.major,
4289 hw->pkg_ver.minor,
4290 hw->pkg_ver.update,
4291 hw->pkg_ver.draft);
4292 break;
4293 case ICE_DDP_PKG_FW_MISMATCH:
4294 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4295 break;
4296 case ICE_DDP_PKG_INVALID_FILE:
4297 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4298 break;
4299 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4300 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4301 break;
4302 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4303 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4304 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4305 break;
4306 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4307 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4308 break;
4309 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4310 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4311 break;
4312 case ICE_DDP_PKG_LOAD_ERROR:
4313 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4314 /* poll for reset to complete */
4315 if (ice_check_reset(hw))
4316 dev_err(dev, "Error resetting device. Please reload the driver\n");
4317 break;
4318 case ICE_DDP_PKG_ERR:
4319 default:
4320 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4321 break;
4322 }
4323 }
4324
4325 /**
4326 * ice_load_pkg - load/reload the DDP Package file
4327 * @firmware: firmware structure when firmware requested or NULL for reload
4328 * @pf: pointer to the PF instance
4329 *
4330 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4331 * initialize HW tables.
4332 */
4333 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)4334 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4335 {
4336 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4337 struct device *dev = ice_pf_to_dev(pf);
4338 struct ice_hw *hw = &pf->hw;
4339
4340 /* Load DDP Package */
4341 if (firmware && !hw->pkg_copy) {
4342 state = ice_copy_and_init_pkg(hw, firmware->data,
4343 firmware->size);
4344 ice_log_pkg_init(hw, state);
4345 } else if (!firmware && hw->pkg_copy) {
4346 /* Reload package during rebuild after CORER/GLOBR reset */
4347 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4348 ice_log_pkg_init(hw, state);
4349 } else {
4350 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4351 }
4352
4353 if (!ice_is_init_pkg_successful(state)) {
4354 /* Safe Mode */
4355 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4356 return;
4357 }
4358
4359 /* Successful download package is the precondition for advanced
4360 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4361 */
4362 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4363 }
4364
4365 /**
4366 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4367 * @pf: pointer to the PF structure
4368 *
4369 * There is no error returned here because the driver should be able to handle
4370 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4371 * specifically with Tx.
4372 */
ice_verify_cacheline_size(struct ice_pf * pf)4373 static void ice_verify_cacheline_size(struct ice_pf *pf)
4374 {
4375 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4376 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4377 ICE_CACHE_LINE_BYTES);
4378 }
4379
4380 /**
4381 * ice_send_version - update firmware with driver version
4382 * @pf: PF struct
4383 *
4384 * Returns 0 on success, else error code
4385 */
ice_send_version(struct ice_pf * pf)4386 static int ice_send_version(struct ice_pf *pf)
4387 {
4388 struct ice_driver_ver dv;
4389
4390 dv.major_ver = 0xff;
4391 dv.minor_ver = 0xff;
4392 dv.build_ver = 0xff;
4393 dv.subbuild_ver = 0;
4394 strscpy((char *)dv.driver_string, UTS_RELEASE,
4395 sizeof(dv.driver_string));
4396 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4397 }
4398
4399 /**
4400 * ice_init_fdir - Initialize flow director VSI and configuration
4401 * @pf: pointer to the PF instance
4402 *
4403 * returns 0 on success, negative on error
4404 */
ice_init_fdir(struct ice_pf * pf)4405 static int ice_init_fdir(struct ice_pf *pf)
4406 {
4407 struct device *dev = ice_pf_to_dev(pf);
4408 struct ice_vsi *ctrl_vsi;
4409 int err;
4410
4411 /* Side Band Flow Director needs to have a control VSI.
4412 * Allocate it and store it in the PF.
4413 */
4414 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4415 if (!ctrl_vsi) {
4416 dev_dbg(dev, "could not create control VSI\n");
4417 return -ENOMEM;
4418 }
4419
4420 err = ice_vsi_open_ctrl(ctrl_vsi);
4421 if (err) {
4422 dev_dbg(dev, "could not open control VSI\n");
4423 goto err_vsi_open;
4424 }
4425
4426 mutex_init(&pf->hw.fdir_fltr_lock);
4427
4428 err = ice_fdir_create_dflt_rules(pf);
4429 if (err)
4430 goto err_fdir_rule;
4431
4432 return 0;
4433
4434 err_fdir_rule:
4435 ice_fdir_release_flows(&pf->hw);
4436 ice_vsi_close(ctrl_vsi);
4437 err_vsi_open:
4438 ice_vsi_release(ctrl_vsi);
4439 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4440 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4441 pf->ctrl_vsi_idx = ICE_NO_VSI;
4442 }
4443 return err;
4444 }
4445
ice_deinit_fdir(struct ice_pf * pf)4446 static void ice_deinit_fdir(struct ice_pf *pf)
4447 {
4448 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4449
4450 if (!vsi)
4451 return;
4452
4453 ice_vsi_manage_fdir(vsi, false);
4454 ice_vsi_release(vsi);
4455 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4456 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4457 pf->ctrl_vsi_idx = ICE_NO_VSI;
4458 }
4459
4460 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4461 }
4462
4463 /**
4464 * ice_get_opt_fw_name - return optional firmware file name or NULL
4465 * @pf: pointer to the PF instance
4466 */
ice_get_opt_fw_name(struct ice_pf * pf)4467 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4468 {
4469 /* Optional firmware name same as default with additional dash
4470 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4471 */
4472 struct pci_dev *pdev = pf->pdev;
4473 char *opt_fw_filename;
4474 u64 dsn;
4475
4476 /* Determine the name of the optional file using the DSN (two
4477 * dwords following the start of the DSN Capability).
4478 */
4479 dsn = pci_get_dsn(pdev);
4480 if (!dsn)
4481 return NULL;
4482
4483 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4484 if (!opt_fw_filename)
4485 return NULL;
4486
4487 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4488 ICE_DDP_PKG_PATH, dsn);
4489
4490 return opt_fw_filename;
4491 }
4492
4493 /**
4494 * ice_request_fw - Device initialization routine
4495 * @pf: pointer to the PF instance
4496 * @firmware: double pointer to firmware struct
4497 *
4498 * Return: zero when successful, negative values otherwise.
4499 */
ice_request_fw(struct ice_pf * pf,const struct firmware ** firmware)4500 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
4501 {
4502 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4503 struct device *dev = ice_pf_to_dev(pf);
4504 int err = 0;
4505
4506 /* optional device-specific DDP (if present) overrides the default DDP
4507 * package file. kernel logs a debug message if the file doesn't exist,
4508 * and warning messages for other errors.
4509 */
4510 if (opt_fw_filename) {
4511 err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
4512 kfree(opt_fw_filename);
4513 if (!err)
4514 return err;
4515 }
4516 err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
4517 if (err)
4518 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4519
4520 return err;
4521 }
4522
4523 /**
4524 * ice_init_tx_topology - performs Tx topology initialization
4525 * @hw: pointer to the hardware structure
4526 * @firmware: pointer to firmware structure
4527 *
4528 * Return: zero when init was successful, negative values otherwise.
4529 */
4530 static int
ice_init_tx_topology(struct ice_hw * hw,const struct firmware * firmware)4531 ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
4532 {
4533 u8 num_tx_sched_layers = hw->num_tx_sched_layers;
4534 struct ice_pf *pf = hw->back;
4535 struct device *dev;
4536 u8 *buf_copy;
4537 int err;
4538
4539 dev = ice_pf_to_dev(pf);
4540 /* ice_cfg_tx_topo buf argument is not a constant,
4541 * so we have to make a copy
4542 */
4543 buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
4544
4545 err = ice_cfg_tx_topo(hw, buf_copy, firmware->size);
4546 if (!err) {
4547 if (hw->num_tx_sched_layers > num_tx_sched_layers)
4548 dev_info(dev, "Tx scheduling layers switching feature disabled\n");
4549 else
4550 dev_info(dev, "Tx scheduling layers switching feature enabled\n");
4551 /* if there was a change in topology ice_cfg_tx_topo triggered
4552 * a CORER and we need to re-init hw
4553 */
4554 ice_deinit_hw(hw);
4555 err = ice_init_hw(hw);
4556
4557 return err;
4558 } else if (err == -EIO) {
4559 dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
4560 }
4561
4562 return 0;
4563 }
4564
4565 /**
4566 * ice_init_ddp_config - DDP related configuration
4567 * @hw: pointer to the hardware structure
4568 * @pf: pointer to pf structure
4569 *
4570 * This function loads DDP file from the disk, then initializes Tx
4571 * topology. At the end DDP package is loaded on the card.
4572 *
4573 * Return: zero when init was successful, negative values otherwise.
4574 */
ice_init_ddp_config(struct ice_hw * hw,struct ice_pf * pf)4575 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
4576 {
4577 struct device *dev = ice_pf_to_dev(pf);
4578 const struct firmware *firmware = NULL;
4579 int err;
4580
4581 err = ice_request_fw(pf, &firmware);
4582 if (err) {
4583 dev_err(dev, "Fail during requesting FW: %d\n", err);
4584 return err;
4585 }
4586
4587 err = ice_init_tx_topology(hw, firmware);
4588 if (err) {
4589 dev_err(dev, "Fail during initialization of Tx topology: %d\n",
4590 err);
4591 release_firmware(firmware);
4592 return err;
4593 }
4594
4595 /* Download firmware to device */
4596 ice_load_pkg(firmware, pf);
4597 release_firmware(firmware);
4598
4599 return 0;
4600 }
4601
4602 /**
4603 * ice_print_wake_reason - show the wake up cause in the log
4604 * @pf: pointer to the PF struct
4605 */
ice_print_wake_reason(struct ice_pf * pf)4606 static void ice_print_wake_reason(struct ice_pf *pf)
4607 {
4608 u32 wus = pf->wakeup_reason;
4609 const char *wake_str;
4610
4611 /* if no wake event, nothing to print */
4612 if (!wus)
4613 return;
4614
4615 if (wus & PFPM_WUS_LNKC_M)
4616 wake_str = "Link\n";
4617 else if (wus & PFPM_WUS_MAG_M)
4618 wake_str = "Magic Packet\n";
4619 else if (wus & PFPM_WUS_MNG_M)
4620 wake_str = "Management\n";
4621 else if (wus & PFPM_WUS_FW_RST_WK_M)
4622 wake_str = "Firmware Reset\n";
4623 else
4624 wake_str = "Unknown\n";
4625
4626 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4627 }
4628
4629 /**
4630 * ice_pf_fwlog_update_module - update 1 module
4631 * @pf: pointer to the PF struct
4632 * @log_level: log_level to use for the @module
4633 * @module: module to update
4634 */
ice_pf_fwlog_update_module(struct ice_pf * pf,int log_level,int module)4635 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
4636 {
4637 struct ice_hw *hw = &pf->hw;
4638
4639 hw->fwlog_cfg.module_entries[module].log_level = log_level;
4640 }
4641
4642 /**
4643 * ice_register_netdev - register netdev
4644 * @vsi: pointer to the VSI struct
4645 */
ice_register_netdev(struct ice_vsi * vsi)4646 static int ice_register_netdev(struct ice_vsi *vsi)
4647 {
4648 int err;
4649
4650 if (!vsi || !vsi->netdev)
4651 return -EIO;
4652
4653 err = register_netdev(vsi->netdev);
4654 if (err)
4655 return err;
4656
4657 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4658 netif_carrier_off(vsi->netdev);
4659 netif_tx_stop_all_queues(vsi->netdev);
4660
4661 return 0;
4662 }
4663
ice_unregister_netdev(struct ice_vsi * vsi)4664 static void ice_unregister_netdev(struct ice_vsi *vsi)
4665 {
4666 if (!vsi || !vsi->netdev)
4667 return;
4668
4669 unregister_netdev(vsi->netdev);
4670 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4671 }
4672
4673 /**
4674 * ice_cfg_netdev - Allocate, configure and register a netdev
4675 * @vsi: the VSI associated with the new netdev
4676 *
4677 * Returns 0 on success, negative value on failure
4678 */
ice_cfg_netdev(struct ice_vsi * vsi)4679 static int ice_cfg_netdev(struct ice_vsi *vsi)
4680 {
4681 struct ice_netdev_priv *np;
4682 struct net_device *netdev;
4683 u8 mac_addr[ETH_ALEN];
4684
4685 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4686 vsi->alloc_rxq);
4687 if (!netdev)
4688 return -ENOMEM;
4689
4690 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4691 vsi->netdev = netdev;
4692 np = netdev_priv(netdev);
4693 np->vsi = vsi;
4694
4695 ice_set_netdev_features(netdev);
4696 ice_set_ops(vsi);
4697
4698 if (vsi->type == ICE_VSI_PF) {
4699 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4700 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4701 eth_hw_addr_set(netdev, mac_addr);
4702 }
4703
4704 netdev->priv_flags |= IFF_UNICAST_FLT;
4705
4706 /* Setup netdev TC information */
4707 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4708
4709 netdev->max_mtu = ICE_MAX_MTU;
4710
4711 return 0;
4712 }
4713
ice_decfg_netdev(struct ice_vsi * vsi)4714 static void ice_decfg_netdev(struct ice_vsi *vsi)
4715 {
4716 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4717 free_netdev(vsi->netdev);
4718 vsi->netdev = NULL;
4719 }
4720
4721 /**
4722 * ice_wait_for_fw - wait for full FW readiness
4723 * @hw: pointer to the hardware structure
4724 * @timeout: milliseconds that can elapse before timing out
4725 */
ice_wait_for_fw(struct ice_hw * hw,u32 timeout)4726 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4727 {
4728 int fw_loading;
4729 u32 elapsed = 0;
4730
4731 while (elapsed <= timeout) {
4732 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4733
4734 /* firmware was not yet loaded, we have to wait more */
4735 if (fw_loading) {
4736 elapsed += 100;
4737 msleep(100);
4738 continue;
4739 }
4740 return 0;
4741 }
4742
4743 return -ETIMEDOUT;
4744 }
4745
ice_init_dev(struct ice_pf * pf)4746 int ice_init_dev(struct ice_pf *pf)
4747 {
4748 struct device *dev = ice_pf_to_dev(pf);
4749 struct ice_hw *hw = &pf->hw;
4750 int err;
4751
4752 err = ice_init_hw(hw);
4753 if (err) {
4754 dev_err(dev, "ice_init_hw failed: %d\n", err);
4755 return err;
4756 }
4757
4758 /* Some cards require longer initialization times
4759 * due to necessity of loading FW from an external source.
4760 * This can take even half a minute.
4761 */
4762 if (ice_is_pf_c827(hw)) {
4763 err = ice_wait_for_fw(hw, 30000);
4764 if (err) {
4765 dev_err(dev, "ice_wait_for_fw timed out");
4766 return err;
4767 }
4768 }
4769
4770 ice_init_feature_support(pf);
4771
4772 err = ice_init_ddp_config(hw, pf);
4773 if (err)
4774 return err;
4775
4776 /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
4777 * set in pf->state, which will cause ice_is_safe_mode to return
4778 * true
4779 */
4780 if (ice_is_safe_mode(pf)) {
4781 /* we already got function/device capabilities but these don't
4782 * reflect what the driver needs to do in safe mode. Instead of
4783 * adding conditional logic everywhere to ignore these
4784 * device/function capabilities, override them.
4785 */
4786 ice_set_safe_mode_caps(hw);
4787 }
4788
4789 err = ice_init_pf(pf);
4790 if (err) {
4791 dev_err(dev, "ice_init_pf failed: %d\n", err);
4792 goto err_init_pf;
4793 }
4794
4795 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4796 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4797 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4798 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4799 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4800 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4801 pf->hw.tnl.valid_count[TNL_VXLAN];
4802 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4803 UDP_TUNNEL_TYPE_VXLAN;
4804 }
4805 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4806 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4807 pf->hw.tnl.valid_count[TNL_GENEVE];
4808 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4809 UDP_TUNNEL_TYPE_GENEVE;
4810 }
4811
4812 err = ice_init_interrupt_scheme(pf);
4813 if (err) {
4814 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4815 err = -EIO;
4816 goto err_init_interrupt_scheme;
4817 }
4818
4819 /* In case of MSIX we are going to setup the misc vector right here
4820 * to handle admin queue events etc. In case of legacy and MSI
4821 * the misc functionality and queue processing is combined in
4822 * the same vector and that gets setup at open.
4823 */
4824 err = ice_req_irq_msix_misc(pf);
4825 if (err) {
4826 dev_err(dev, "setup of misc vector failed: %d\n", err);
4827 goto err_req_irq_msix_misc;
4828 }
4829
4830 return 0;
4831
4832 err_req_irq_msix_misc:
4833 ice_clear_interrupt_scheme(pf);
4834 err_init_interrupt_scheme:
4835 ice_deinit_pf(pf);
4836 err_init_pf:
4837 ice_deinit_hw(hw);
4838 return err;
4839 }
4840
ice_deinit_dev(struct ice_pf * pf)4841 void ice_deinit_dev(struct ice_pf *pf)
4842 {
4843 ice_free_irq_msix_misc(pf);
4844 ice_deinit_pf(pf);
4845 ice_deinit_hw(&pf->hw);
4846
4847 /* Service task is already stopped, so call reset directly. */
4848 ice_reset(&pf->hw, ICE_RESET_PFR);
4849 pci_wait_for_pending_transaction(pf->pdev);
4850 ice_clear_interrupt_scheme(pf);
4851 }
4852
ice_init_features(struct ice_pf * pf)4853 static void ice_init_features(struct ice_pf *pf)
4854 {
4855 struct device *dev = ice_pf_to_dev(pf);
4856
4857 if (ice_is_safe_mode(pf))
4858 return;
4859
4860 /* initialize DDP driven features */
4861 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4862 ice_ptp_init(pf);
4863
4864 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4865 ice_gnss_init(pf);
4866
4867 if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4868 ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4869 ice_dpll_init(pf);
4870
4871 /* Note: Flow director init failure is non-fatal to load */
4872 if (ice_init_fdir(pf))
4873 dev_err(dev, "could not initialize flow director\n");
4874
4875 /* Note: DCB init failure is non-fatal to load */
4876 if (ice_init_pf_dcb(pf, false)) {
4877 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4878 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4879 } else {
4880 ice_cfg_lldp_mib_change(&pf->hw, true);
4881 }
4882
4883 if (ice_init_lag(pf))
4884 dev_warn(dev, "Failed to init link aggregation support\n");
4885
4886 ice_hwmon_init(pf);
4887 }
4888
ice_deinit_features(struct ice_pf * pf)4889 static void ice_deinit_features(struct ice_pf *pf)
4890 {
4891 if (ice_is_safe_mode(pf))
4892 return;
4893
4894 ice_deinit_lag(pf);
4895 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4896 ice_cfg_lldp_mib_change(&pf->hw, false);
4897 ice_deinit_fdir(pf);
4898 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4899 ice_gnss_exit(pf);
4900 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4901 ice_ptp_release(pf);
4902 if (test_bit(ICE_FLAG_DPLL, pf->flags))
4903 ice_dpll_deinit(pf);
4904 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4905 xa_destroy(&pf->eswitch.reprs);
4906 }
4907
ice_init_wakeup(struct ice_pf * pf)4908 static void ice_init_wakeup(struct ice_pf *pf)
4909 {
4910 /* Save wakeup reason register for later use */
4911 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4912
4913 /* check for a power management event */
4914 ice_print_wake_reason(pf);
4915
4916 /* clear wake status, all bits */
4917 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4918
4919 /* Disable WoL at init, wait for user to enable */
4920 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4921 }
4922
ice_init_link(struct ice_pf * pf)4923 static int ice_init_link(struct ice_pf *pf)
4924 {
4925 struct device *dev = ice_pf_to_dev(pf);
4926 int err;
4927
4928 err = ice_init_link_events(pf->hw.port_info);
4929 if (err) {
4930 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4931 return err;
4932 }
4933
4934 /* not a fatal error if this fails */
4935 err = ice_init_nvm_phy_type(pf->hw.port_info);
4936 if (err)
4937 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4938
4939 /* not a fatal error if this fails */
4940 err = ice_update_link_info(pf->hw.port_info);
4941 if (err)
4942 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4943
4944 ice_init_link_dflt_override(pf->hw.port_info);
4945
4946 ice_check_link_cfg_err(pf,
4947 pf->hw.port_info->phy.link_info.link_cfg_err);
4948
4949 /* if media available, initialize PHY settings */
4950 if (pf->hw.port_info->phy.link_info.link_info &
4951 ICE_AQ_MEDIA_AVAILABLE) {
4952 /* not a fatal error if this fails */
4953 err = ice_init_phy_user_cfg(pf->hw.port_info);
4954 if (err)
4955 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4956
4957 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4958 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4959
4960 if (vsi)
4961 ice_configure_phy(vsi);
4962 }
4963 } else {
4964 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4965 }
4966
4967 return err;
4968 }
4969
ice_init_pf_sw(struct ice_pf * pf)4970 static int ice_init_pf_sw(struct ice_pf *pf)
4971 {
4972 bool dvm = ice_is_dvm_ena(&pf->hw);
4973 struct ice_vsi *vsi;
4974 int err;
4975
4976 /* create switch struct for the switch element created by FW on boot */
4977 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4978 if (!pf->first_sw)
4979 return -ENOMEM;
4980
4981 if (pf->hw.evb_veb)
4982 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4983 else
4984 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4985
4986 pf->first_sw->pf = pf;
4987
4988 /* record the sw_id available for later use */
4989 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4990
4991 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4992 if (err)
4993 goto err_aq_set_port_params;
4994
4995 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4996 if (!vsi) {
4997 err = -ENOMEM;
4998 goto err_pf_vsi_setup;
4999 }
5000
5001 return 0;
5002
5003 err_pf_vsi_setup:
5004 err_aq_set_port_params:
5005 kfree(pf->first_sw);
5006 return err;
5007 }
5008
ice_deinit_pf_sw(struct ice_pf * pf)5009 static void ice_deinit_pf_sw(struct ice_pf *pf)
5010 {
5011 struct ice_vsi *vsi = ice_get_main_vsi(pf);
5012
5013 if (!vsi)
5014 return;
5015
5016 ice_vsi_release(vsi);
5017 kfree(pf->first_sw);
5018 }
5019
ice_alloc_vsis(struct ice_pf * pf)5020 static int ice_alloc_vsis(struct ice_pf *pf)
5021 {
5022 struct device *dev = ice_pf_to_dev(pf);
5023
5024 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
5025 if (!pf->num_alloc_vsi)
5026 return -EIO;
5027
5028 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
5029 dev_warn(dev,
5030 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
5031 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
5032 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
5033 }
5034
5035 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
5036 GFP_KERNEL);
5037 if (!pf->vsi)
5038 return -ENOMEM;
5039
5040 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
5041 sizeof(*pf->vsi_stats), GFP_KERNEL);
5042 if (!pf->vsi_stats) {
5043 devm_kfree(dev, pf->vsi);
5044 return -ENOMEM;
5045 }
5046
5047 return 0;
5048 }
5049
ice_dealloc_vsis(struct ice_pf * pf)5050 static void ice_dealloc_vsis(struct ice_pf *pf)
5051 {
5052 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
5053 pf->vsi_stats = NULL;
5054
5055 pf->num_alloc_vsi = 0;
5056 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
5057 pf->vsi = NULL;
5058 }
5059
ice_init_devlink(struct ice_pf * pf)5060 static int ice_init_devlink(struct ice_pf *pf)
5061 {
5062 int err;
5063
5064 err = ice_devlink_register_params(pf);
5065 if (err)
5066 return err;
5067
5068 ice_devlink_init_regions(pf);
5069 ice_devlink_register(pf);
5070
5071 return 0;
5072 }
5073
ice_deinit_devlink(struct ice_pf * pf)5074 static void ice_deinit_devlink(struct ice_pf *pf)
5075 {
5076 ice_devlink_unregister(pf);
5077 ice_devlink_destroy_regions(pf);
5078 ice_devlink_unregister_params(pf);
5079 }
5080
ice_init(struct ice_pf * pf)5081 static int ice_init(struct ice_pf *pf)
5082 {
5083 int err;
5084
5085 err = ice_init_dev(pf);
5086 if (err)
5087 return err;
5088
5089 err = ice_alloc_vsis(pf);
5090 if (err)
5091 goto err_alloc_vsis;
5092
5093 err = ice_init_pf_sw(pf);
5094 if (err)
5095 goto err_init_pf_sw;
5096
5097 ice_init_wakeup(pf);
5098
5099 err = ice_init_link(pf);
5100 if (err)
5101 goto err_init_link;
5102
5103 err = ice_send_version(pf);
5104 if (err)
5105 goto err_init_link;
5106
5107 ice_verify_cacheline_size(pf);
5108
5109 if (ice_is_safe_mode(pf))
5110 ice_set_safe_mode_vlan_cfg(pf);
5111 else
5112 /* print PCI link speed and width */
5113 pcie_print_link_status(pf->pdev);
5114
5115 /* ready to go, so clear down state bit */
5116 clear_bit(ICE_DOWN, pf->state);
5117 clear_bit(ICE_SERVICE_DIS, pf->state);
5118
5119 /* since everything is good, start the service timer */
5120 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5121
5122 return 0;
5123
5124 err_init_link:
5125 ice_deinit_pf_sw(pf);
5126 err_init_pf_sw:
5127 ice_dealloc_vsis(pf);
5128 err_alloc_vsis:
5129 ice_deinit_dev(pf);
5130 return err;
5131 }
5132
ice_deinit(struct ice_pf * pf)5133 static void ice_deinit(struct ice_pf *pf)
5134 {
5135 set_bit(ICE_SERVICE_DIS, pf->state);
5136 set_bit(ICE_DOWN, pf->state);
5137
5138 ice_deinit_pf_sw(pf);
5139 ice_dealloc_vsis(pf);
5140 ice_deinit_dev(pf);
5141 }
5142
5143 /**
5144 * ice_load - load pf by init hw and starting VSI
5145 * @pf: pointer to the pf instance
5146 *
5147 * This function has to be called under devl_lock.
5148 */
ice_load(struct ice_pf * pf)5149 int ice_load(struct ice_pf *pf)
5150 {
5151 struct ice_vsi *vsi;
5152 int err;
5153
5154 devl_assert_locked(priv_to_devlink(pf));
5155
5156 vsi = ice_get_main_vsi(pf);
5157
5158 /* init channel list */
5159 INIT_LIST_HEAD(&vsi->ch_list);
5160
5161 err = ice_cfg_netdev(vsi);
5162 if (err)
5163 return err;
5164
5165 /* Setup DCB netlink interface */
5166 ice_dcbnl_setup(vsi);
5167
5168 err = ice_init_mac_fltr(pf);
5169 if (err)
5170 goto err_init_mac_fltr;
5171
5172 err = ice_devlink_create_pf_port(pf);
5173 if (err)
5174 goto err_devlink_create_pf_port;
5175
5176 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5177
5178 err = ice_register_netdev(vsi);
5179 if (err)
5180 goto err_register_netdev;
5181
5182 err = ice_tc_indir_block_register(vsi);
5183 if (err)
5184 goto err_tc_indir_block_register;
5185
5186 ice_napi_add(vsi);
5187
5188 err = ice_init_rdma(pf);
5189 if (err)
5190 goto err_init_rdma;
5191
5192 ice_init_features(pf);
5193 ice_service_task_restart(pf);
5194
5195 clear_bit(ICE_DOWN, pf->state);
5196
5197 return 0;
5198
5199 err_init_rdma:
5200 ice_tc_indir_block_unregister(vsi);
5201 err_tc_indir_block_register:
5202 ice_unregister_netdev(vsi);
5203 err_register_netdev:
5204 ice_devlink_destroy_pf_port(pf);
5205 err_devlink_create_pf_port:
5206 err_init_mac_fltr:
5207 ice_decfg_netdev(vsi);
5208 return err;
5209 }
5210
5211 /**
5212 * ice_unload - unload pf by stopping VSI and deinit hw
5213 * @pf: pointer to the pf instance
5214 *
5215 * This function has to be called under devl_lock.
5216 */
ice_unload(struct ice_pf * pf)5217 void ice_unload(struct ice_pf *pf)
5218 {
5219 struct ice_vsi *vsi = ice_get_main_vsi(pf);
5220
5221 devl_assert_locked(priv_to_devlink(pf));
5222
5223 ice_deinit_features(pf);
5224 ice_deinit_rdma(pf);
5225 ice_tc_indir_block_unregister(vsi);
5226 ice_unregister_netdev(vsi);
5227 ice_devlink_destroy_pf_port(pf);
5228 ice_decfg_netdev(vsi);
5229 }
5230
5231 /**
5232 * ice_probe - Device initialization routine
5233 * @pdev: PCI device information struct
5234 * @ent: entry in ice_pci_tbl
5235 *
5236 * Returns 0 on success, negative on failure
5237 */
5238 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)5239 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5240 {
5241 struct device *dev = &pdev->dev;
5242 struct ice_adapter *adapter;
5243 struct ice_pf *pf;
5244 struct ice_hw *hw;
5245 int err;
5246
5247 if (pdev->is_virtfn) {
5248 dev_err(dev, "can't probe a virtual function\n");
5249 return -EINVAL;
5250 }
5251
5252 /* when under a kdump kernel initiate a reset before enabling the
5253 * device in order to clear out any pending DMA transactions. These
5254 * transactions can cause some systems to machine check when doing
5255 * the pcim_enable_device() below.
5256 */
5257 if (is_kdump_kernel()) {
5258 pci_save_state(pdev);
5259 pci_clear_master(pdev);
5260 err = pcie_flr(pdev);
5261 if (err)
5262 return err;
5263 pci_restore_state(pdev);
5264 }
5265
5266 /* this driver uses devres, see
5267 * Documentation/driver-api/driver-model/devres.rst
5268 */
5269 err = pcim_enable_device(pdev);
5270 if (err)
5271 return err;
5272
5273 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5274 if (err) {
5275 dev_err(dev, "BAR0 I/O map error %d\n", err);
5276 return err;
5277 }
5278
5279 pf = ice_allocate_pf(dev);
5280 if (!pf)
5281 return -ENOMEM;
5282
5283 /* initialize Auxiliary index to invalid value */
5284 pf->aux_idx = -1;
5285
5286 /* set up for high or low DMA */
5287 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5288 if (err) {
5289 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5290 return err;
5291 }
5292
5293 pci_set_master(pdev);
5294
5295 adapter = ice_adapter_get(pdev);
5296 if (IS_ERR(adapter))
5297 return PTR_ERR(adapter);
5298
5299 pf->pdev = pdev;
5300 pf->adapter = adapter;
5301 pci_set_drvdata(pdev, pf);
5302 set_bit(ICE_DOWN, pf->state);
5303 /* Disable service task until DOWN bit is cleared */
5304 set_bit(ICE_SERVICE_DIS, pf->state);
5305
5306 hw = &pf->hw;
5307 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5308 pci_save_state(pdev);
5309
5310 hw->back = pf;
5311 hw->port_info = NULL;
5312 hw->vendor_id = pdev->vendor;
5313 hw->device_id = pdev->device;
5314 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5315 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5316 hw->subsystem_device_id = pdev->subsystem_device;
5317 hw->bus.device = PCI_SLOT(pdev->devfn);
5318 hw->bus.func = PCI_FUNC(pdev->devfn);
5319 ice_set_ctrlq_len(hw);
5320
5321 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5322
5323 #ifndef CONFIG_DYNAMIC_DEBUG
5324 if (debug < -1)
5325 hw->debug_mask = debug;
5326 #endif
5327
5328 err = ice_init(pf);
5329 if (err)
5330 goto err_init;
5331
5332 devl_lock(priv_to_devlink(pf));
5333 err = ice_load(pf);
5334 if (err)
5335 goto err_load;
5336
5337 err = ice_init_devlink(pf);
5338 if (err)
5339 goto err_init_devlink;
5340 devl_unlock(priv_to_devlink(pf));
5341
5342 return 0;
5343
5344 err_init_devlink:
5345 ice_unload(pf);
5346 err_load:
5347 devl_unlock(priv_to_devlink(pf));
5348 ice_deinit(pf);
5349 err_init:
5350 ice_adapter_put(pdev);
5351 pci_disable_device(pdev);
5352 return err;
5353 }
5354
5355 /**
5356 * ice_set_wake - enable or disable Wake on LAN
5357 * @pf: pointer to the PF struct
5358 *
5359 * Simple helper for WoL control
5360 */
ice_set_wake(struct ice_pf * pf)5361 static void ice_set_wake(struct ice_pf *pf)
5362 {
5363 struct ice_hw *hw = &pf->hw;
5364 bool wol = pf->wol_ena;
5365
5366 /* clear wake state, otherwise new wake events won't fire */
5367 wr32(hw, PFPM_WUS, U32_MAX);
5368
5369 /* enable / disable APM wake up, no RMW needed */
5370 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5371
5372 /* set magic packet filter enabled */
5373 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5374 }
5375
5376 /**
5377 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5378 * @pf: pointer to the PF struct
5379 *
5380 * Issue firmware command to enable multicast magic wake, making
5381 * sure that any locally administered address (LAA) is used for
5382 * wake, and that PF reset doesn't undo the LAA.
5383 */
ice_setup_mc_magic_wake(struct ice_pf * pf)5384 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5385 {
5386 struct device *dev = ice_pf_to_dev(pf);
5387 struct ice_hw *hw = &pf->hw;
5388 u8 mac_addr[ETH_ALEN];
5389 struct ice_vsi *vsi;
5390 int status;
5391 u8 flags;
5392
5393 if (!pf->wol_ena)
5394 return;
5395
5396 vsi = ice_get_main_vsi(pf);
5397 if (!vsi)
5398 return;
5399
5400 /* Get current MAC address in case it's an LAA */
5401 if (vsi->netdev)
5402 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5403 else
5404 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5405
5406 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5407 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5408 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5409
5410 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5411 if (status)
5412 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5413 status, ice_aq_str(hw->adminq.sq_last_status));
5414 }
5415
5416 /**
5417 * ice_remove - Device removal routine
5418 * @pdev: PCI device information struct
5419 */
ice_remove(struct pci_dev * pdev)5420 static void ice_remove(struct pci_dev *pdev)
5421 {
5422 struct ice_pf *pf = pci_get_drvdata(pdev);
5423 int i;
5424
5425 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5426 if (!ice_is_reset_in_progress(pf->state))
5427 break;
5428 msleep(100);
5429 }
5430
5431 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5432 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5433 ice_free_vfs(pf);
5434 }
5435
5436 ice_hwmon_exit(pf);
5437
5438 ice_service_task_stop(pf);
5439 ice_aq_cancel_waiting_tasks(pf);
5440 set_bit(ICE_DOWN, pf->state);
5441
5442 if (!ice_is_safe_mode(pf))
5443 ice_remove_arfs(pf);
5444
5445 devl_lock(priv_to_devlink(pf));
5446 ice_deinit_devlink(pf);
5447
5448 ice_unload(pf);
5449 devl_unlock(priv_to_devlink(pf));
5450
5451 ice_deinit(pf);
5452 ice_vsi_release_all(pf);
5453
5454 ice_setup_mc_magic_wake(pf);
5455 ice_set_wake(pf);
5456
5457 ice_adapter_put(pdev);
5458 pci_disable_device(pdev);
5459 }
5460
5461 /**
5462 * ice_shutdown - PCI callback for shutting down device
5463 * @pdev: PCI device information struct
5464 */
ice_shutdown(struct pci_dev * pdev)5465 static void ice_shutdown(struct pci_dev *pdev)
5466 {
5467 struct ice_pf *pf = pci_get_drvdata(pdev);
5468
5469 ice_remove(pdev);
5470
5471 if (system_state == SYSTEM_POWER_OFF) {
5472 pci_wake_from_d3(pdev, pf->wol_ena);
5473 pci_set_power_state(pdev, PCI_D3hot);
5474 }
5475 }
5476
5477 /**
5478 * ice_prepare_for_shutdown - prep for PCI shutdown
5479 * @pf: board private structure
5480 *
5481 * Inform or close all dependent features in prep for PCI device shutdown
5482 */
ice_prepare_for_shutdown(struct ice_pf * pf)5483 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5484 {
5485 struct ice_hw *hw = &pf->hw;
5486 u32 v;
5487
5488 /* Notify VFs of impending reset */
5489 if (ice_check_sq_alive(hw, &hw->mailboxq))
5490 ice_vc_notify_reset(pf);
5491
5492 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5493
5494 /* disable the VSIs and their queues that are not already DOWN */
5495 ice_pf_dis_all_vsi(pf, false);
5496
5497 ice_for_each_vsi(pf, v)
5498 if (pf->vsi[v])
5499 pf->vsi[v]->vsi_num = 0;
5500
5501 ice_shutdown_all_ctrlq(hw, true);
5502 }
5503
5504 /**
5505 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5506 * @pf: board private structure to reinitialize
5507 *
5508 * This routine reinitialize interrupt scheme that was cleared during
5509 * power management suspend callback.
5510 *
5511 * This should be called during resume routine to re-allocate the q_vectors
5512 * and reacquire interrupts.
5513 */
ice_reinit_interrupt_scheme(struct ice_pf * pf)5514 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5515 {
5516 struct device *dev = ice_pf_to_dev(pf);
5517 int ret, v;
5518
5519 /* Since we clear MSIX flag during suspend, we need to
5520 * set it back during resume...
5521 */
5522
5523 ret = ice_init_interrupt_scheme(pf);
5524 if (ret) {
5525 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5526 return ret;
5527 }
5528
5529 /* Remap vectors and rings, after successful re-init interrupts */
5530 ice_for_each_vsi(pf, v) {
5531 if (!pf->vsi[v])
5532 continue;
5533
5534 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5535 if (ret)
5536 goto err_reinit;
5537 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5538 ice_vsi_set_napi_queues(pf->vsi[v]);
5539 }
5540
5541 ret = ice_req_irq_msix_misc(pf);
5542 if (ret) {
5543 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5544 ret);
5545 goto err_reinit;
5546 }
5547
5548 return 0;
5549
5550 err_reinit:
5551 while (v--)
5552 if (pf->vsi[v])
5553 ice_vsi_free_q_vectors(pf->vsi[v]);
5554
5555 return ret;
5556 }
5557
5558 /**
5559 * ice_suspend
5560 * @dev: generic device information structure
5561 *
5562 * Power Management callback to quiesce the device and prepare
5563 * for D3 transition.
5564 */
ice_suspend(struct device * dev)5565 static int ice_suspend(struct device *dev)
5566 {
5567 struct pci_dev *pdev = to_pci_dev(dev);
5568 struct ice_pf *pf;
5569 int disabled, v;
5570
5571 pf = pci_get_drvdata(pdev);
5572
5573 if (!ice_pf_state_is_nominal(pf)) {
5574 dev_err(dev, "Device is not ready, no need to suspend it\n");
5575 return -EBUSY;
5576 }
5577
5578 /* Stop watchdog tasks until resume completion.
5579 * Even though it is most likely that the service task is
5580 * disabled if the device is suspended or down, the service task's
5581 * state is controlled by a different state bit, and we should
5582 * store and honor whatever state that bit is in at this point.
5583 */
5584 disabled = ice_service_task_stop(pf);
5585
5586 ice_deinit_rdma(pf);
5587
5588 /* Already suspended?, then there is nothing to do */
5589 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5590 if (!disabled)
5591 ice_service_task_restart(pf);
5592 return 0;
5593 }
5594
5595 if (test_bit(ICE_DOWN, pf->state) ||
5596 ice_is_reset_in_progress(pf->state)) {
5597 dev_err(dev, "can't suspend device in reset or already down\n");
5598 if (!disabled)
5599 ice_service_task_restart(pf);
5600 return 0;
5601 }
5602
5603 ice_setup_mc_magic_wake(pf);
5604
5605 ice_prepare_for_shutdown(pf);
5606
5607 ice_set_wake(pf);
5608
5609 /* Free vectors, clear the interrupt scheme and release IRQs
5610 * for proper hibernation, especially with large number of CPUs.
5611 * Otherwise hibernation might fail when mapping all the vectors back
5612 * to CPU0.
5613 */
5614 ice_free_irq_msix_misc(pf);
5615 ice_for_each_vsi(pf, v) {
5616 if (!pf->vsi[v])
5617 continue;
5618 ice_vsi_free_q_vectors(pf->vsi[v]);
5619 }
5620 ice_clear_interrupt_scheme(pf);
5621
5622 pci_save_state(pdev);
5623 pci_wake_from_d3(pdev, pf->wol_ena);
5624 pci_set_power_state(pdev, PCI_D3hot);
5625 return 0;
5626 }
5627
5628 /**
5629 * ice_resume - PM callback for waking up from D3
5630 * @dev: generic device information structure
5631 */
ice_resume(struct device * dev)5632 static int ice_resume(struct device *dev)
5633 {
5634 struct pci_dev *pdev = to_pci_dev(dev);
5635 enum ice_reset_req reset_type;
5636 struct ice_pf *pf;
5637 struct ice_hw *hw;
5638 int ret;
5639
5640 pci_set_power_state(pdev, PCI_D0);
5641 pci_restore_state(pdev);
5642 pci_save_state(pdev);
5643
5644 if (!pci_device_is_present(pdev))
5645 return -ENODEV;
5646
5647 ret = pci_enable_device_mem(pdev);
5648 if (ret) {
5649 dev_err(dev, "Cannot enable device after suspend\n");
5650 return ret;
5651 }
5652
5653 pf = pci_get_drvdata(pdev);
5654 hw = &pf->hw;
5655
5656 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5657 ice_print_wake_reason(pf);
5658
5659 /* We cleared the interrupt scheme when we suspended, so we need to
5660 * restore it now to resume device functionality.
5661 */
5662 ret = ice_reinit_interrupt_scheme(pf);
5663 if (ret)
5664 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5665
5666 ret = ice_init_rdma(pf);
5667 if (ret)
5668 dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5669 ret);
5670
5671 clear_bit(ICE_DOWN, pf->state);
5672 /* Now perform PF reset and rebuild */
5673 reset_type = ICE_RESET_PFR;
5674 /* re-enable service task for reset, but allow reset to schedule it */
5675 clear_bit(ICE_SERVICE_DIS, pf->state);
5676
5677 if (ice_schedule_reset(pf, reset_type))
5678 dev_err(dev, "Reset during resume failed.\n");
5679
5680 clear_bit(ICE_SUSPENDED, pf->state);
5681 ice_service_task_restart(pf);
5682
5683 /* Restart the service task */
5684 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5685
5686 return 0;
5687 }
5688
5689 /**
5690 * ice_pci_err_detected - warning that PCI error has been detected
5691 * @pdev: PCI device information struct
5692 * @err: the type of PCI error
5693 *
5694 * Called to warn that something happened on the PCI bus and the error handling
5695 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5696 */
5697 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)5698 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5699 {
5700 struct ice_pf *pf = pci_get_drvdata(pdev);
5701
5702 if (!pf) {
5703 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5704 __func__, err);
5705 return PCI_ERS_RESULT_DISCONNECT;
5706 }
5707
5708 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5709 ice_service_task_stop(pf);
5710
5711 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5712 set_bit(ICE_PFR_REQ, pf->state);
5713 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5714 }
5715 }
5716
5717 return PCI_ERS_RESULT_NEED_RESET;
5718 }
5719
5720 /**
5721 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5722 * @pdev: PCI device information struct
5723 *
5724 * Called to determine if the driver can recover from the PCI slot reset by
5725 * using a register read to determine if the device is recoverable.
5726 */
ice_pci_err_slot_reset(struct pci_dev * pdev)5727 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5728 {
5729 struct ice_pf *pf = pci_get_drvdata(pdev);
5730 pci_ers_result_t result;
5731 int err;
5732 u32 reg;
5733
5734 err = pci_enable_device_mem(pdev);
5735 if (err) {
5736 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5737 err);
5738 result = PCI_ERS_RESULT_DISCONNECT;
5739 } else {
5740 pci_set_master(pdev);
5741 pci_restore_state(pdev);
5742 pci_save_state(pdev);
5743 pci_wake_from_d3(pdev, false);
5744
5745 /* Check for life */
5746 reg = rd32(&pf->hw, GLGEN_RTRIG);
5747 if (!reg)
5748 result = PCI_ERS_RESULT_RECOVERED;
5749 else
5750 result = PCI_ERS_RESULT_DISCONNECT;
5751 }
5752
5753 return result;
5754 }
5755
5756 /**
5757 * ice_pci_err_resume - restart operations after PCI error recovery
5758 * @pdev: PCI device information struct
5759 *
5760 * Called to allow the driver to bring things back up after PCI error and/or
5761 * reset recovery have finished
5762 */
ice_pci_err_resume(struct pci_dev * pdev)5763 static void ice_pci_err_resume(struct pci_dev *pdev)
5764 {
5765 struct ice_pf *pf = pci_get_drvdata(pdev);
5766
5767 if (!pf) {
5768 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5769 __func__);
5770 return;
5771 }
5772
5773 if (test_bit(ICE_SUSPENDED, pf->state)) {
5774 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5775 __func__);
5776 return;
5777 }
5778
5779 ice_restore_all_vfs_msi_state(pf);
5780
5781 ice_do_reset(pf, ICE_RESET_PFR);
5782 ice_service_task_restart(pf);
5783 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5784 }
5785
5786 /**
5787 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5788 * @pdev: PCI device information struct
5789 */
ice_pci_err_reset_prepare(struct pci_dev * pdev)5790 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5791 {
5792 struct ice_pf *pf = pci_get_drvdata(pdev);
5793
5794 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5795 ice_service_task_stop(pf);
5796
5797 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5798 set_bit(ICE_PFR_REQ, pf->state);
5799 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5800 }
5801 }
5802 }
5803
5804 /**
5805 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5806 * @pdev: PCI device information struct
5807 */
ice_pci_err_reset_done(struct pci_dev * pdev)5808 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5809 {
5810 ice_pci_err_resume(pdev);
5811 }
5812
5813 /* ice_pci_tbl - PCI Device ID Table
5814 *
5815 * Wildcard entries (PCI_ANY_ID) should come last
5816 * Last entry must be all 0s
5817 *
5818 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5819 * Class, Class Mask, private data (not used) }
5820 */
5821 static const struct pci_device_id ice_pci_tbl[] = {
5822 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5823 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5824 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5825 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5826 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5827 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5828 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5829 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5830 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5831 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5832 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5833 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5834 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5835 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5836 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5837 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5838 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5839 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5840 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5841 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5842 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5843 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5844 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5845 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5846 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5847 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5848 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
5849 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
5850 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
5851 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
5852 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
5853 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
5854 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
5855 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
5856 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
5857 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
5858 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
5859 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
5860 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
5861 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
5862 /* required last entry */
5863 {}
5864 };
5865 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5866
5867 static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5868
5869 static const struct pci_error_handlers ice_pci_err_handler = {
5870 .error_detected = ice_pci_err_detected,
5871 .slot_reset = ice_pci_err_slot_reset,
5872 .reset_prepare = ice_pci_err_reset_prepare,
5873 .reset_done = ice_pci_err_reset_done,
5874 .resume = ice_pci_err_resume
5875 };
5876
5877 static struct pci_driver ice_driver = {
5878 .name = KBUILD_MODNAME,
5879 .id_table = ice_pci_tbl,
5880 .probe = ice_probe,
5881 .remove = ice_remove,
5882 .driver.pm = pm_sleep_ptr(&ice_pm_ops),
5883 .shutdown = ice_shutdown,
5884 .sriov_configure = ice_sriov_configure,
5885 .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5886 .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5887 .err_handler = &ice_pci_err_handler
5888 };
5889
5890 /**
5891 * ice_module_init - Driver registration routine
5892 *
5893 * ice_module_init is the first routine called when the driver is
5894 * loaded. All it does is register with the PCI subsystem.
5895 */
ice_module_init(void)5896 static int __init ice_module_init(void)
5897 {
5898 int status = -ENOMEM;
5899
5900 pr_info("%s\n", ice_driver_string);
5901 pr_info("%s\n", ice_copyright);
5902
5903 ice_adv_lnk_speed_maps_init();
5904
5905 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5906 if (!ice_wq) {
5907 pr_err("Failed to create workqueue\n");
5908 return status;
5909 }
5910
5911 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5912 if (!ice_lag_wq) {
5913 pr_err("Failed to create LAG workqueue\n");
5914 goto err_dest_wq;
5915 }
5916
5917 ice_debugfs_init();
5918
5919 status = pci_register_driver(&ice_driver);
5920 if (status) {
5921 pr_err("failed to register PCI driver, err %d\n", status);
5922 goto err_dest_lag_wq;
5923 }
5924
5925 return 0;
5926
5927 err_dest_lag_wq:
5928 destroy_workqueue(ice_lag_wq);
5929 ice_debugfs_exit();
5930 err_dest_wq:
5931 destroy_workqueue(ice_wq);
5932 return status;
5933 }
5934 module_init(ice_module_init);
5935
5936 /**
5937 * ice_module_exit - Driver exit cleanup routine
5938 *
5939 * ice_module_exit is called just before the driver is removed
5940 * from memory.
5941 */
ice_module_exit(void)5942 static void __exit ice_module_exit(void)
5943 {
5944 pci_unregister_driver(&ice_driver);
5945 ice_debugfs_exit();
5946 destroy_workqueue(ice_wq);
5947 destroy_workqueue(ice_lag_wq);
5948 pr_info("module unloaded\n");
5949 }
5950 module_exit(ice_module_exit);
5951
5952 /**
5953 * ice_set_mac_address - NDO callback to set MAC address
5954 * @netdev: network interface device structure
5955 * @pi: pointer to an address structure
5956 *
5957 * Returns 0 on success, negative on failure
5958 */
ice_set_mac_address(struct net_device * netdev,void * pi)5959 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5960 {
5961 struct ice_netdev_priv *np = netdev_priv(netdev);
5962 struct ice_vsi *vsi = np->vsi;
5963 struct ice_pf *pf = vsi->back;
5964 struct ice_hw *hw = &pf->hw;
5965 struct sockaddr *addr = pi;
5966 u8 old_mac[ETH_ALEN];
5967 u8 flags = 0;
5968 u8 *mac;
5969 int err;
5970
5971 mac = (u8 *)addr->sa_data;
5972
5973 if (!is_valid_ether_addr(mac))
5974 return -EADDRNOTAVAIL;
5975
5976 if (test_bit(ICE_DOWN, pf->state) ||
5977 ice_is_reset_in_progress(pf->state)) {
5978 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5979 mac);
5980 return -EBUSY;
5981 }
5982
5983 if (ice_chnl_dmac_fltr_cnt(pf)) {
5984 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5985 mac);
5986 return -EAGAIN;
5987 }
5988
5989 netif_addr_lock_bh(netdev);
5990 ether_addr_copy(old_mac, netdev->dev_addr);
5991 /* change the netdev's MAC address */
5992 eth_hw_addr_set(netdev, mac);
5993 netif_addr_unlock_bh(netdev);
5994
5995 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5996 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5997 if (err && err != -ENOENT) {
5998 err = -EADDRNOTAVAIL;
5999 goto err_update_filters;
6000 }
6001
6002 /* Add filter for new MAC. If filter exists, return success */
6003 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
6004 if (err == -EEXIST) {
6005 /* Although this MAC filter is already present in hardware it's
6006 * possible in some cases (e.g. bonding) that dev_addr was
6007 * modified outside of the driver and needs to be restored back
6008 * to this value.
6009 */
6010 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
6011
6012 return 0;
6013 } else if (err) {
6014 /* error if the new filter addition failed */
6015 err = -EADDRNOTAVAIL;
6016 }
6017
6018 err_update_filters:
6019 if (err) {
6020 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
6021 mac);
6022 netif_addr_lock_bh(netdev);
6023 eth_hw_addr_set(netdev, old_mac);
6024 netif_addr_unlock_bh(netdev);
6025 return err;
6026 }
6027
6028 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
6029 netdev->dev_addr);
6030
6031 /* write new MAC address to the firmware */
6032 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
6033 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
6034 if (err) {
6035 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
6036 mac, err);
6037 }
6038 return 0;
6039 }
6040
6041 /**
6042 * ice_set_rx_mode - NDO callback to set the netdev filters
6043 * @netdev: network interface device structure
6044 */
ice_set_rx_mode(struct net_device * netdev)6045 static void ice_set_rx_mode(struct net_device *netdev)
6046 {
6047 struct ice_netdev_priv *np = netdev_priv(netdev);
6048 struct ice_vsi *vsi = np->vsi;
6049
6050 if (!vsi || ice_is_switchdev_running(vsi->back))
6051 return;
6052
6053 /* Set the flags to synchronize filters
6054 * ndo_set_rx_mode may be triggered even without a change in netdev
6055 * flags
6056 */
6057 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
6058 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
6059 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
6060
6061 /* schedule our worker thread which will take care of
6062 * applying the new filter changes
6063 */
6064 ice_service_task_schedule(vsi->back);
6065 }
6066
6067 /**
6068 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6069 * @netdev: network interface device structure
6070 * @queue_index: Queue ID
6071 * @maxrate: maximum bandwidth in Mbps
6072 */
6073 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)6074 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
6075 {
6076 struct ice_netdev_priv *np = netdev_priv(netdev);
6077 struct ice_vsi *vsi = np->vsi;
6078 u16 q_handle;
6079 int status;
6080 u8 tc;
6081
6082 /* Validate maxrate requested is within permitted range */
6083 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
6084 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
6085 maxrate, queue_index);
6086 return -EINVAL;
6087 }
6088
6089 q_handle = vsi->tx_rings[queue_index]->q_handle;
6090 tc = ice_dcb_get_tc(vsi, queue_index);
6091
6092 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
6093 if (!vsi) {
6094 netdev_err(netdev, "Invalid VSI for given queue %d\n",
6095 queue_index);
6096 return -EINVAL;
6097 }
6098
6099 /* Set BW back to default, when user set maxrate to 0 */
6100 if (!maxrate)
6101 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
6102 q_handle, ICE_MAX_BW);
6103 else
6104 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
6105 q_handle, ICE_MAX_BW, maxrate * 1000);
6106 if (status)
6107 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
6108 status);
6109
6110 return status;
6111 }
6112
6113 /**
6114 * ice_fdb_add - add an entry to the hardware database
6115 * @ndm: the input from the stack
6116 * @tb: pointer to array of nladdr (unused)
6117 * @dev: the net device pointer
6118 * @addr: the MAC address entry being added
6119 * @vid: VLAN ID
6120 * @flags: instructions from stack about fdb operation
6121 * @extack: netlink extended ack
6122 */
6123 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack __always_unused * extack)6124 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
6125 struct net_device *dev, const unsigned char *addr, u16 vid,
6126 u16 flags, struct netlink_ext_ack __always_unused *extack)
6127 {
6128 int err;
6129
6130 if (vid) {
6131 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
6132 return -EINVAL;
6133 }
6134 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6135 netdev_err(dev, "FDB only supports static addresses\n");
6136 return -EINVAL;
6137 }
6138
6139 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6140 err = dev_uc_add_excl(dev, addr);
6141 else if (is_multicast_ether_addr(addr))
6142 err = dev_mc_add_excl(dev, addr);
6143 else
6144 err = -EINVAL;
6145
6146 /* Only return duplicate errors if NLM_F_EXCL is set */
6147 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6148 err = 0;
6149
6150 return err;
6151 }
6152
6153 /**
6154 * ice_fdb_del - delete an entry from the hardware database
6155 * @ndm: the input from the stack
6156 * @tb: pointer to array of nladdr (unused)
6157 * @dev: the net device pointer
6158 * @addr: the MAC address entry being added
6159 * @vid: VLAN ID
6160 * @extack: netlink extended ack
6161 */
6162 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid,struct netlink_ext_ack * extack)6163 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6164 struct net_device *dev, const unsigned char *addr,
6165 __always_unused u16 vid, struct netlink_ext_ack *extack)
6166 {
6167 int err;
6168
6169 if (ndm->ndm_state & NUD_PERMANENT) {
6170 netdev_err(dev, "FDB only supports static addresses\n");
6171 return -EINVAL;
6172 }
6173
6174 if (is_unicast_ether_addr(addr))
6175 err = dev_uc_del(dev, addr);
6176 else if (is_multicast_ether_addr(addr))
6177 err = dev_mc_del(dev, addr);
6178 else
6179 err = -EINVAL;
6180
6181 return err;
6182 }
6183
6184 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
6185 NETIF_F_HW_VLAN_CTAG_TX | \
6186 NETIF_F_HW_VLAN_STAG_RX | \
6187 NETIF_F_HW_VLAN_STAG_TX)
6188
6189 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
6190 NETIF_F_HW_VLAN_STAG_RX)
6191
6192 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
6193 NETIF_F_HW_VLAN_STAG_FILTER)
6194
6195 /**
6196 * ice_fix_features - fix the netdev features flags based on device limitations
6197 * @netdev: ptr to the netdev that flags are being fixed on
6198 * @features: features that need to be checked and possibly fixed
6199 *
6200 * Make sure any fixups are made to features in this callback. This enables the
6201 * driver to not have to check unsupported configurations throughout the driver
6202 * because that's the responsiblity of this callback.
6203 *
6204 * Single VLAN Mode (SVM) Supported Features:
6205 * NETIF_F_HW_VLAN_CTAG_FILTER
6206 * NETIF_F_HW_VLAN_CTAG_RX
6207 * NETIF_F_HW_VLAN_CTAG_TX
6208 *
6209 * Double VLAN Mode (DVM) Supported Features:
6210 * NETIF_F_HW_VLAN_CTAG_FILTER
6211 * NETIF_F_HW_VLAN_CTAG_RX
6212 * NETIF_F_HW_VLAN_CTAG_TX
6213 *
6214 * NETIF_F_HW_VLAN_STAG_FILTER
6215 * NETIF_HW_VLAN_STAG_RX
6216 * NETIF_HW_VLAN_STAG_TX
6217 *
6218 * Features that need fixing:
6219 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6220 * These are mutually exlusive as the VSI context cannot support multiple
6221 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
6222 * is not done, then default to clearing the requested STAG offload
6223 * settings.
6224 *
6225 * All supported filtering has to be enabled or disabled together. For
6226 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6227 * together. If this is not done, then default to VLAN filtering disabled.
6228 * These are mutually exclusive as there is currently no way to
6229 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6230 * prune rules.
6231 */
6232 static netdev_features_t
ice_fix_features(struct net_device * netdev,netdev_features_t features)6233 ice_fix_features(struct net_device *netdev, netdev_features_t features)
6234 {
6235 struct ice_netdev_priv *np = netdev_priv(netdev);
6236 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6237 bool cur_ctag, cur_stag, req_ctag, req_stag;
6238
6239 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6240 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6241 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6242
6243 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6244 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6245 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6246
6247 if (req_vlan_fltr != cur_vlan_fltr) {
6248 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6249 if (req_ctag && req_stag) {
6250 features |= NETIF_VLAN_FILTERING_FEATURES;
6251 } else if (!req_ctag && !req_stag) {
6252 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6253 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
6254 (!cur_stag && req_stag && !cur_ctag)) {
6255 features |= NETIF_VLAN_FILTERING_FEATURES;
6256 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6257 } else if ((cur_ctag && !req_ctag && cur_stag) ||
6258 (cur_stag && !req_stag && cur_ctag)) {
6259 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6260 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6261 }
6262 } else {
6263 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6264 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6265
6266 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6267 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6268 }
6269 }
6270
6271 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6272 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6273 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6274 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6275 NETIF_F_HW_VLAN_STAG_TX);
6276 }
6277
6278 if (!(netdev->features & NETIF_F_RXFCS) &&
6279 (features & NETIF_F_RXFCS) &&
6280 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6281 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6282 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6283 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6284 }
6285
6286 return features;
6287 }
6288
6289 /**
6290 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6291 * @vsi: PF's VSI
6292 * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
6293 *
6294 * Store current stripped VLAN proto in ring packet context,
6295 * so it can be accessed more efficiently by packet processing code.
6296 */
6297 static void
ice_set_rx_rings_vlan_proto(struct ice_vsi * vsi,__be16 vlan_ethertype)6298 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6299 {
6300 u16 i;
6301
6302 ice_for_each_alloc_rxq(vsi, i)
6303 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6304 }
6305
6306 /**
6307 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6308 * @vsi: PF's VSI
6309 * @features: features used to determine VLAN offload settings
6310 *
6311 * First, determine the vlan_ethertype based on the VLAN offload bits in
6312 * features. Then determine if stripping and insertion should be enabled or
6313 * disabled. Finally enable or disable VLAN stripping and insertion.
6314 */
6315 static int
ice_set_vlan_offload_features(struct ice_vsi * vsi,netdev_features_t features)6316 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6317 {
6318 bool enable_stripping = true, enable_insertion = true;
6319 struct ice_vsi_vlan_ops *vlan_ops;
6320 int strip_err = 0, insert_err = 0;
6321 u16 vlan_ethertype = 0;
6322
6323 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6324
6325 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6326 vlan_ethertype = ETH_P_8021AD;
6327 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6328 vlan_ethertype = ETH_P_8021Q;
6329
6330 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6331 enable_stripping = false;
6332 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6333 enable_insertion = false;
6334
6335 if (enable_stripping)
6336 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6337 else
6338 strip_err = vlan_ops->dis_stripping(vsi);
6339
6340 if (enable_insertion)
6341 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6342 else
6343 insert_err = vlan_ops->dis_insertion(vsi);
6344
6345 if (strip_err || insert_err)
6346 return -EIO;
6347
6348 ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6349 htons(vlan_ethertype) : 0);
6350
6351 return 0;
6352 }
6353
6354 /**
6355 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6356 * @vsi: PF's VSI
6357 * @features: features used to determine VLAN filtering settings
6358 *
6359 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6360 * features.
6361 */
6362 static int
ice_set_vlan_filtering_features(struct ice_vsi * vsi,netdev_features_t features)6363 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6364 {
6365 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6366 int err = 0;
6367
6368 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6369 * if either bit is set
6370 */
6371 if (features &
6372 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6373 err = vlan_ops->ena_rx_filtering(vsi);
6374 else
6375 err = vlan_ops->dis_rx_filtering(vsi);
6376
6377 return err;
6378 }
6379
6380 /**
6381 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6382 * @netdev: ptr to the netdev being adjusted
6383 * @features: the feature set that the stack is suggesting
6384 *
6385 * Only update VLAN settings if the requested_vlan_features are different than
6386 * the current_vlan_features.
6387 */
6388 static int
ice_set_vlan_features(struct net_device * netdev,netdev_features_t features)6389 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6390 {
6391 netdev_features_t current_vlan_features, requested_vlan_features;
6392 struct ice_netdev_priv *np = netdev_priv(netdev);
6393 struct ice_vsi *vsi = np->vsi;
6394 int err;
6395
6396 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6397 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6398 if (current_vlan_features ^ requested_vlan_features) {
6399 if ((features & NETIF_F_RXFCS) &&
6400 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6401 dev_err(ice_pf_to_dev(vsi->back),
6402 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6403 return -EIO;
6404 }
6405
6406 err = ice_set_vlan_offload_features(vsi, features);
6407 if (err)
6408 return err;
6409 }
6410
6411 current_vlan_features = netdev->features &
6412 NETIF_VLAN_FILTERING_FEATURES;
6413 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6414 if (current_vlan_features ^ requested_vlan_features) {
6415 err = ice_set_vlan_filtering_features(vsi, features);
6416 if (err)
6417 return err;
6418 }
6419
6420 return 0;
6421 }
6422
6423 /**
6424 * ice_set_loopback - turn on/off loopback mode on underlying PF
6425 * @vsi: ptr to VSI
6426 * @ena: flag to indicate the on/off setting
6427 */
ice_set_loopback(struct ice_vsi * vsi,bool ena)6428 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6429 {
6430 bool if_running = netif_running(vsi->netdev);
6431 int ret;
6432
6433 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6434 ret = ice_down(vsi);
6435 if (ret) {
6436 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6437 return ret;
6438 }
6439 }
6440 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6441 if (ret)
6442 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6443 if (if_running)
6444 ret = ice_up(vsi);
6445
6446 return ret;
6447 }
6448
6449 /**
6450 * ice_set_features - set the netdev feature flags
6451 * @netdev: ptr to the netdev being adjusted
6452 * @features: the feature set that the stack is suggesting
6453 */
6454 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)6455 ice_set_features(struct net_device *netdev, netdev_features_t features)
6456 {
6457 netdev_features_t changed = netdev->features ^ features;
6458 struct ice_netdev_priv *np = netdev_priv(netdev);
6459 struct ice_vsi *vsi = np->vsi;
6460 struct ice_pf *pf = vsi->back;
6461 int ret = 0;
6462
6463 /* Don't set any netdev advanced features with device in Safe Mode */
6464 if (ice_is_safe_mode(pf)) {
6465 dev_err(ice_pf_to_dev(pf),
6466 "Device is in Safe Mode - not enabling advanced netdev features\n");
6467 return ret;
6468 }
6469
6470 /* Do not change setting during reset */
6471 if (ice_is_reset_in_progress(pf->state)) {
6472 dev_err(ice_pf_to_dev(pf),
6473 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6474 return -EBUSY;
6475 }
6476
6477 /* Multiple features can be changed in one call so keep features in
6478 * separate if/else statements to guarantee each feature is checked
6479 */
6480 if (changed & NETIF_F_RXHASH)
6481 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6482
6483 ret = ice_set_vlan_features(netdev, features);
6484 if (ret)
6485 return ret;
6486
6487 /* Turn on receive of FCS aka CRC, and after setting this
6488 * flag the packet data will have the 4 byte CRC appended
6489 */
6490 if (changed & NETIF_F_RXFCS) {
6491 if ((features & NETIF_F_RXFCS) &&
6492 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6493 dev_err(ice_pf_to_dev(vsi->back),
6494 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6495 return -EIO;
6496 }
6497
6498 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6499 ret = ice_down_up(vsi);
6500 if (ret)
6501 return ret;
6502 }
6503
6504 if (changed & NETIF_F_NTUPLE) {
6505 bool ena = !!(features & NETIF_F_NTUPLE);
6506
6507 ice_vsi_manage_fdir(vsi, ena);
6508 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6509 }
6510
6511 /* don't turn off hw_tc_offload when ADQ is already enabled */
6512 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6513 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6514 return -EACCES;
6515 }
6516
6517 if (changed & NETIF_F_HW_TC) {
6518 bool ena = !!(features & NETIF_F_HW_TC);
6519
6520 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6521 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6522 }
6523
6524 if (changed & NETIF_F_LOOPBACK)
6525 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6526
6527 return ret;
6528 }
6529
6530 /**
6531 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6532 * @vsi: VSI to setup VLAN properties for
6533 */
ice_vsi_vlan_setup(struct ice_vsi * vsi)6534 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6535 {
6536 int err;
6537
6538 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6539 if (err)
6540 return err;
6541
6542 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6543 if (err)
6544 return err;
6545
6546 return ice_vsi_add_vlan_zero(vsi);
6547 }
6548
6549 /**
6550 * ice_vsi_cfg_lan - Setup the VSI lan related config
6551 * @vsi: the VSI being configured
6552 *
6553 * Return 0 on success and negative value on error
6554 */
ice_vsi_cfg_lan(struct ice_vsi * vsi)6555 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6556 {
6557 int err;
6558
6559 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6560 ice_set_rx_mode(vsi->netdev);
6561
6562 err = ice_vsi_vlan_setup(vsi);
6563 if (err)
6564 return err;
6565 }
6566 ice_vsi_cfg_dcb_rings(vsi);
6567
6568 err = ice_vsi_cfg_lan_txqs(vsi);
6569 if (!err && ice_is_xdp_ena_vsi(vsi))
6570 err = ice_vsi_cfg_xdp_txqs(vsi);
6571 if (!err)
6572 err = ice_vsi_cfg_rxqs(vsi);
6573
6574 return err;
6575 }
6576
6577 /* THEORY OF MODERATION:
6578 * The ice driver hardware works differently than the hardware that DIMLIB was
6579 * originally made for. ice hardware doesn't have packet count limits that
6580 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6581 * which is hard-coded to a limit of 250,000 ints/second.
6582 * If not using dynamic moderation, the INTRL value can be modified
6583 * by ethtool rx-usecs-high.
6584 */
6585 struct ice_dim {
6586 /* the throttle rate for interrupts, basically worst case delay before
6587 * an initial interrupt fires, value is stored in microseconds.
6588 */
6589 u16 itr;
6590 };
6591
6592 /* Make a different profile for Rx that doesn't allow quite so aggressive
6593 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6594 * second.
6595 */
6596 static const struct ice_dim rx_profile[] = {
6597 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6598 {8}, /* 125,000 ints/s */
6599 {16}, /* 62,500 ints/s */
6600 {62}, /* 16,129 ints/s */
6601 {126} /* 7,936 ints/s */
6602 };
6603
6604 /* The transmit profile, which has the same sorts of values
6605 * as the previous struct
6606 */
6607 static const struct ice_dim tx_profile[] = {
6608 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6609 {8}, /* 125,000 ints/s */
6610 {40}, /* 16,125 ints/s */
6611 {128}, /* 7,812 ints/s */
6612 {256} /* 3,906 ints/s */
6613 };
6614
ice_tx_dim_work(struct work_struct * work)6615 static void ice_tx_dim_work(struct work_struct *work)
6616 {
6617 struct ice_ring_container *rc;
6618 struct dim *dim;
6619 u16 itr;
6620
6621 dim = container_of(work, struct dim, work);
6622 rc = dim->priv;
6623
6624 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6625
6626 /* look up the values in our local table */
6627 itr = tx_profile[dim->profile_ix].itr;
6628
6629 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6630 ice_write_itr(rc, itr);
6631
6632 dim->state = DIM_START_MEASURE;
6633 }
6634
ice_rx_dim_work(struct work_struct * work)6635 static void ice_rx_dim_work(struct work_struct *work)
6636 {
6637 struct ice_ring_container *rc;
6638 struct dim *dim;
6639 u16 itr;
6640
6641 dim = container_of(work, struct dim, work);
6642 rc = dim->priv;
6643
6644 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6645
6646 /* look up the values in our local table */
6647 itr = rx_profile[dim->profile_ix].itr;
6648
6649 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6650 ice_write_itr(rc, itr);
6651
6652 dim->state = DIM_START_MEASURE;
6653 }
6654
6655 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6656
6657 /**
6658 * ice_init_moderation - set up interrupt moderation
6659 * @q_vector: the vector containing rings to be configured
6660 *
6661 * Set up interrupt moderation registers, with the intent to do the right thing
6662 * when called from reset or from probe, and whether or not dynamic moderation
6663 * is enabled or not. Take special care to write all the registers in both
6664 * dynamic moderation mode or not in order to make sure hardware is in a known
6665 * state.
6666 */
ice_init_moderation(struct ice_q_vector * q_vector)6667 static void ice_init_moderation(struct ice_q_vector *q_vector)
6668 {
6669 struct ice_ring_container *rc;
6670 bool tx_dynamic, rx_dynamic;
6671
6672 rc = &q_vector->tx;
6673 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6674 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6675 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6676 rc->dim.priv = rc;
6677 tx_dynamic = ITR_IS_DYNAMIC(rc);
6678
6679 /* set the initial TX ITR to match the above */
6680 ice_write_itr(rc, tx_dynamic ?
6681 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6682
6683 rc = &q_vector->rx;
6684 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6685 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6686 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6687 rc->dim.priv = rc;
6688 rx_dynamic = ITR_IS_DYNAMIC(rc);
6689
6690 /* set the initial RX ITR to match the above */
6691 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6692 rc->itr_setting);
6693
6694 ice_set_q_vector_intrl(q_vector);
6695 }
6696
6697 /**
6698 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6699 * @vsi: the VSI being configured
6700 */
ice_napi_enable_all(struct ice_vsi * vsi)6701 static void ice_napi_enable_all(struct ice_vsi *vsi)
6702 {
6703 int q_idx;
6704
6705 if (!vsi->netdev)
6706 return;
6707
6708 ice_for_each_q_vector(vsi, q_idx) {
6709 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6710
6711 ice_init_moderation(q_vector);
6712
6713 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6714 napi_enable(&q_vector->napi);
6715 }
6716 }
6717
6718 /**
6719 * ice_up_complete - Finish the last steps of bringing up a connection
6720 * @vsi: The VSI being configured
6721 *
6722 * Return 0 on success and negative value on error
6723 */
ice_up_complete(struct ice_vsi * vsi)6724 static int ice_up_complete(struct ice_vsi *vsi)
6725 {
6726 struct ice_pf *pf = vsi->back;
6727 int err;
6728
6729 ice_vsi_cfg_msix(vsi);
6730
6731 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6732 * Tx queue group list was configured and the context bits were
6733 * programmed using ice_vsi_cfg_txqs
6734 */
6735 err = ice_vsi_start_all_rx_rings(vsi);
6736 if (err)
6737 return err;
6738
6739 clear_bit(ICE_VSI_DOWN, vsi->state);
6740 ice_napi_enable_all(vsi);
6741 ice_vsi_ena_irq(vsi);
6742
6743 if (vsi->port_info &&
6744 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6745 vsi->netdev && vsi->type == ICE_VSI_PF) {
6746 ice_print_link_msg(vsi, true);
6747 netif_tx_start_all_queues(vsi->netdev);
6748 netif_carrier_on(vsi->netdev);
6749 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6750 }
6751
6752 /* Perform an initial read of the statistics registers now to
6753 * set the baseline so counters are ready when interface is up
6754 */
6755 ice_update_eth_stats(vsi);
6756
6757 if (vsi->type == ICE_VSI_PF)
6758 ice_service_task_schedule(pf);
6759
6760 return 0;
6761 }
6762
6763 /**
6764 * ice_up - Bring the connection back up after being down
6765 * @vsi: VSI being configured
6766 */
ice_up(struct ice_vsi * vsi)6767 int ice_up(struct ice_vsi *vsi)
6768 {
6769 int err;
6770
6771 err = ice_vsi_cfg_lan(vsi);
6772 if (!err)
6773 err = ice_up_complete(vsi);
6774
6775 return err;
6776 }
6777
6778 /**
6779 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6780 * @syncp: pointer to u64_stats_sync
6781 * @stats: stats that pkts and bytes count will be taken from
6782 * @pkts: packets stats counter
6783 * @bytes: bytes stats counter
6784 *
6785 * This function fetches stats from the ring considering the atomic operations
6786 * that needs to be performed to read u64 values in 32 bit machine.
6787 */
6788 void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync * syncp,struct ice_q_stats stats,u64 * pkts,u64 * bytes)6789 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6790 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6791 {
6792 unsigned int start;
6793
6794 do {
6795 start = u64_stats_fetch_begin(syncp);
6796 *pkts = stats.pkts;
6797 *bytes = stats.bytes;
6798 } while (u64_stats_fetch_retry(syncp, start));
6799 }
6800
6801 /**
6802 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6803 * @vsi: the VSI to be updated
6804 * @vsi_stats: the stats struct to be updated
6805 * @rings: rings to work on
6806 * @count: number of rings
6807 */
6808 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct rtnl_link_stats64 * vsi_stats,struct ice_tx_ring ** rings,u16 count)6809 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6810 struct rtnl_link_stats64 *vsi_stats,
6811 struct ice_tx_ring **rings, u16 count)
6812 {
6813 u16 i;
6814
6815 for (i = 0; i < count; i++) {
6816 struct ice_tx_ring *ring;
6817 u64 pkts = 0, bytes = 0;
6818
6819 ring = READ_ONCE(rings[i]);
6820 if (!ring || !ring->ring_stats)
6821 continue;
6822 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6823 ring->ring_stats->stats, &pkts,
6824 &bytes);
6825 vsi_stats->tx_packets += pkts;
6826 vsi_stats->tx_bytes += bytes;
6827 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6828 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6829 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6830 }
6831 }
6832
6833 /**
6834 * ice_update_vsi_ring_stats - Update VSI stats counters
6835 * @vsi: the VSI to be updated
6836 */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)6837 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6838 {
6839 struct rtnl_link_stats64 *net_stats, *stats_prev;
6840 struct rtnl_link_stats64 *vsi_stats;
6841 struct ice_pf *pf = vsi->back;
6842 u64 pkts, bytes;
6843 int i;
6844
6845 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6846 if (!vsi_stats)
6847 return;
6848
6849 /* reset non-netdev (extended) stats */
6850 vsi->tx_restart = 0;
6851 vsi->tx_busy = 0;
6852 vsi->tx_linearize = 0;
6853 vsi->rx_buf_failed = 0;
6854 vsi->rx_page_failed = 0;
6855
6856 rcu_read_lock();
6857
6858 /* update Tx rings counters */
6859 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6860 vsi->num_txq);
6861
6862 /* update Rx rings counters */
6863 ice_for_each_rxq(vsi, i) {
6864 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6865 struct ice_ring_stats *ring_stats;
6866
6867 ring_stats = ring->ring_stats;
6868 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6869 ring_stats->stats, &pkts,
6870 &bytes);
6871 vsi_stats->rx_packets += pkts;
6872 vsi_stats->rx_bytes += bytes;
6873 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6874 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6875 }
6876
6877 /* update XDP Tx rings counters */
6878 if (ice_is_xdp_ena_vsi(vsi))
6879 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6880 vsi->num_xdp_txq);
6881
6882 rcu_read_unlock();
6883
6884 net_stats = &vsi->net_stats;
6885 stats_prev = &vsi->net_stats_prev;
6886
6887 /* Update netdev counters, but keep in mind that values could start at
6888 * random value after PF reset. And as we increase the reported stat by
6889 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6890 * let's skip this round.
6891 */
6892 if (likely(pf->stat_prev_loaded)) {
6893 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6894 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6895 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6896 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6897 }
6898
6899 stats_prev->tx_packets = vsi_stats->tx_packets;
6900 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6901 stats_prev->rx_packets = vsi_stats->rx_packets;
6902 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6903
6904 kfree(vsi_stats);
6905 }
6906
6907 /**
6908 * ice_update_vsi_stats - Update VSI stats counters
6909 * @vsi: the VSI to be updated
6910 */
ice_update_vsi_stats(struct ice_vsi * vsi)6911 void ice_update_vsi_stats(struct ice_vsi *vsi)
6912 {
6913 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6914 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6915 struct ice_pf *pf = vsi->back;
6916
6917 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6918 test_bit(ICE_CFG_BUSY, pf->state))
6919 return;
6920
6921 /* get stats as recorded by Tx/Rx rings */
6922 ice_update_vsi_ring_stats(vsi);
6923
6924 /* get VSI stats as recorded by the hardware */
6925 ice_update_eth_stats(vsi);
6926
6927 cur_ns->tx_errors = cur_es->tx_errors;
6928 cur_ns->rx_dropped = cur_es->rx_discards;
6929 cur_ns->tx_dropped = cur_es->tx_discards;
6930 cur_ns->multicast = cur_es->rx_multicast;
6931
6932 /* update some more netdev stats if this is main VSI */
6933 if (vsi->type == ICE_VSI_PF) {
6934 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6935 cur_ns->rx_errors = pf->stats.crc_errors +
6936 pf->stats.illegal_bytes +
6937 pf->stats.rx_undersize +
6938 pf->hw_csum_rx_error +
6939 pf->stats.rx_jabber +
6940 pf->stats.rx_fragments +
6941 pf->stats.rx_oversize;
6942 /* record drops from the port level */
6943 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6944 }
6945 }
6946
6947 /**
6948 * ice_update_pf_stats - Update PF port stats counters
6949 * @pf: PF whose stats needs to be updated
6950 */
ice_update_pf_stats(struct ice_pf * pf)6951 void ice_update_pf_stats(struct ice_pf *pf)
6952 {
6953 struct ice_hw_port_stats *prev_ps, *cur_ps;
6954 struct ice_hw *hw = &pf->hw;
6955 u16 fd_ctr_base;
6956 u8 port;
6957
6958 port = hw->port_info->lport;
6959 prev_ps = &pf->stats_prev;
6960 cur_ps = &pf->stats;
6961
6962 if (ice_is_reset_in_progress(pf->state))
6963 pf->stat_prev_loaded = false;
6964
6965 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6966 &prev_ps->eth.rx_bytes,
6967 &cur_ps->eth.rx_bytes);
6968
6969 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6970 &prev_ps->eth.rx_unicast,
6971 &cur_ps->eth.rx_unicast);
6972
6973 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6974 &prev_ps->eth.rx_multicast,
6975 &cur_ps->eth.rx_multicast);
6976
6977 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6978 &prev_ps->eth.rx_broadcast,
6979 &cur_ps->eth.rx_broadcast);
6980
6981 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6982 &prev_ps->eth.rx_discards,
6983 &cur_ps->eth.rx_discards);
6984
6985 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6986 &prev_ps->eth.tx_bytes,
6987 &cur_ps->eth.tx_bytes);
6988
6989 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6990 &prev_ps->eth.tx_unicast,
6991 &cur_ps->eth.tx_unicast);
6992
6993 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6994 &prev_ps->eth.tx_multicast,
6995 &cur_ps->eth.tx_multicast);
6996
6997 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6998 &prev_ps->eth.tx_broadcast,
6999 &cur_ps->eth.tx_broadcast);
7000
7001 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
7002 &prev_ps->tx_dropped_link_down,
7003 &cur_ps->tx_dropped_link_down);
7004
7005 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
7006 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
7007
7008 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
7009 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
7010
7011 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
7012 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
7013
7014 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
7015 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
7016
7017 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
7018 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
7019
7020 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
7021 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
7022
7023 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
7024 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
7025
7026 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
7027 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
7028
7029 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
7030 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
7031
7032 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
7033 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
7034
7035 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
7036 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
7037
7038 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
7039 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
7040
7041 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
7042 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
7043
7044 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
7045 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
7046
7047 fd_ctr_base = hw->fd_ctr_base;
7048
7049 ice_stat_update40(hw,
7050 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
7051 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
7052 &cur_ps->fd_sb_match);
7053 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
7054 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
7055
7056 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
7057 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
7058
7059 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
7060 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
7061
7062 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
7063 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
7064
7065 ice_update_dcb_stats(pf);
7066
7067 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
7068 &prev_ps->crc_errors, &cur_ps->crc_errors);
7069
7070 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
7071 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
7072
7073 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
7074 &prev_ps->mac_local_faults,
7075 &cur_ps->mac_local_faults);
7076
7077 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
7078 &prev_ps->mac_remote_faults,
7079 &cur_ps->mac_remote_faults);
7080
7081 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
7082 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
7083
7084 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
7085 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
7086
7087 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
7088 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
7089
7090 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
7091 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
7092
7093 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
7094
7095 pf->stat_prev_loaded = true;
7096 }
7097
7098 /**
7099 * ice_get_stats64 - get statistics for network device structure
7100 * @netdev: network interface device structure
7101 * @stats: main device statistics structure
7102 */
7103 static
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)7104 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
7105 {
7106 struct ice_netdev_priv *np = netdev_priv(netdev);
7107 struct rtnl_link_stats64 *vsi_stats;
7108 struct ice_vsi *vsi = np->vsi;
7109
7110 vsi_stats = &vsi->net_stats;
7111
7112 if (!vsi->num_txq || !vsi->num_rxq)
7113 return;
7114
7115 /* netdev packet/byte stats come from ring counter. These are obtained
7116 * by summing up ring counters (done by ice_update_vsi_ring_stats).
7117 * But, only call the update routine and read the registers if VSI is
7118 * not down.
7119 */
7120 if (!test_bit(ICE_VSI_DOWN, vsi->state))
7121 ice_update_vsi_ring_stats(vsi);
7122 stats->tx_packets = vsi_stats->tx_packets;
7123 stats->tx_bytes = vsi_stats->tx_bytes;
7124 stats->rx_packets = vsi_stats->rx_packets;
7125 stats->rx_bytes = vsi_stats->rx_bytes;
7126
7127 /* The rest of the stats can be read from the hardware but instead we
7128 * just return values that the watchdog task has already obtained from
7129 * the hardware.
7130 */
7131 stats->multicast = vsi_stats->multicast;
7132 stats->tx_errors = vsi_stats->tx_errors;
7133 stats->tx_dropped = vsi_stats->tx_dropped;
7134 stats->rx_errors = vsi_stats->rx_errors;
7135 stats->rx_dropped = vsi_stats->rx_dropped;
7136 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
7137 stats->rx_length_errors = vsi_stats->rx_length_errors;
7138 }
7139
7140 /**
7141 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7142 * @vsi: VSI having NAPI disabled
7143 */
ice_napi_disable_all(struct ice_vsi * vsi)7144 static void ice_napi_disable_all(struct ice_vsi *vsi)
7145 {
7146 int q_idx;
7147
7148 if (!vsi->netdev)
7149 return;
7150
7151 ice_for_each_q_vector(vsi, q_idx) {
7152 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7153
7154 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
7155 napi_disable(&q_vector->napi);
7156
7157 cancel_work_sync(&q_vector->tx.dim.work);
7158 cancel_work_sync(&q_vector->rx.dim.work);
7159 }
7160 }
7161
7162 /**
7163 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7164 * @vsi: the VSI being un-configured
7165 */
ice_vsi_dis_irq(struct ice_vsi * vsi)7166 static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7167 {
7168 struct ice_pf *pf = vsi->back;
7169 struct ice_hw *hw = &pf->hw;
7170 u32 val;
7171 int i;
7172
7173 /* disable interrupt causation from each Rx queue; Tx queues are
7174 * handled in ice_vsi_stop_tx_ring()
7175 */
7176 if (vsi->rx_rings) {
7177 ice_for_each_rxq(vsi, i) {
7178 if (vsi->rx_rings[i]) {
7179 u16 reg;
7180
7181 reg = vsi->rx_rings[i]->reg_idx;
7182 val = rd32(hw, QINT_RQCTL(reg));
7183 val &= ~QINT_RQCTL_CAUSE_ENA_M;
7184 wr32(hw, QINT_RQCTL(reg), val);
7185 }
7186 }
7187 }
7188
7189 /* disable each interrupt */
7190 ice_for_each_q_vector(vsi, i) {
7191 if (!vsi->q_vectors[i])
7192 continue;
7193 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7194 }
7195
7196 ice_flush(hw);
7197
7198 /* don't call synchronize_irq() for VF's from the host */
7199 if (vsi->type == ICE_VSI_VF)
7200 return;
7201
7202 ice_for_each_q_vector(vsi, i)
7203 synchronize_irq(vsi->q_vectors[i]->irq.virq);
7204 }
7205
7206 /**
7207 * ice_down - Shutdown the connection
7208 * @vsi: The VSI being stopped
7209 *
7210 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7211 */
ice_down(struct ice_vsi * vsi)7212 int ice_down(struct ice_vsi *vsi)
7213 {
7214 int i, tx_err, rx_err, vlan_err = 0;
7215
7216 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7217
7218 if (vsi->netdev) {
7219 vlan_err = ice_vsi_del_vlan_zero(vsi);
7220 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
7221 netif_carrier_off(vsi->netdev);
7222 netif_tx_disable(vsi->netdev);
7223 }
7224
7225 ice_vsi_dis_irq(vsi);
7226
7227 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7228 if (tx_err)
7229 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7230 vsi->vsi_num, tx_err);
7231 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
7232 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7233 if (tx_err)
7234 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7235 vsi->vsi_num, tx_err);
7236 }
7237
7238 rx_err = ice_vsi_stop_all_rx_rings(vsi);
7239 if (rx_err)
7240 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7241 vsi->vsi_num, rx_err);
7242
7243 ice_napi_disable_all(vsi);
7244
7245 ice_for_each_txq(vsi, i)
7246 ice_clean_tx_ring(vsi->tx_rings[i]);
7247
7248 if (ice_is_xdp_ena_vsi(vsi))
7249 ice_for_each_xdp_txq(vsi, i)
7250 ice_clean_tx_ring(vsi->xdp_rings[i]);
7251
7252 ice_for_each_rxq(vsi, i)
7253 ice_clean_rx_ring(vsi->rx_rings[i]);
7254
7255 if (tx_err || rx_err || vlan_err) {
7256 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7257 vsi->vsi_num, vsi->vsw->sw_id);
7258 return -EIO;
7259 }
7260
7261 return 0;
7262 }
7263
7264 /**
7265 * ice_down_up - shutdown the VSI connection and bring it up
7266 * @vsi: the VSI to be reconnected
7267 */
ice_down_up(struct ice_vsi * vsi)7268 int ice_down_up(struct ice_vsi *vsi)
7269 {
7270 int ret;
7271
7272 /* if DOWN already set, nothing to do */
7273 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7274 return 0;
7275
7276 ret = ice_down(vsi);
7277 if (ret)
7278 return ret;
7279
7280 ret = ice_up(vsi);
7281 if (ret) {
7282 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7283 return ret;
7284 }
7285
7286 return 0;
7287 }
7288
7289 /**
7290 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7291 * @vsi: VSI having resources allocated
7292 *
7293 * Return 0 on success, negative on failure
7294 */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)7295 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7296 {
7297 int i, err = 0;
7298
7299 if (!vsi->num_txq) {
7300 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7301 vsi->vsi_num);
7302 return -EINVAL;
7303 }
7304
7305 ice_for_each_txq(vsi, i) {
7306 struct ice_tx_ring *ring = vsi->tx_rings[i];
7307
7308 if (!ring)
7309 return -EINVAL;
7310
7311 if (vsi->netdev)
7312 ring->netdev = vsi->netdev;
7313 err = ice_setup_tx_ring(ring);
7314 if (err)
7315 break;
7316 }
7317
7318 return err;
7319 }
7320
7321 /**
7322 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7323 * @vsi: VSI having resources allocated
7324 *
7325 * Return 0 on success, negative on failure
7326 */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)7327 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7328 {
7329 int i, err = 0;
7330
7331 if (!vsi->num_rxq) {
7332 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7333 vsi->vsi_num);
7334 return -EINVAL;
7335 }
7336
7337 ice_for_each_rxq(vsi, i) {
7338 struct ice_rx_ring *ring = vsi->rx_rings[i];
7339
7340 if (!ring)
7341 return -EINVAL;
7342
7343 if (vsi->netdev)
7344 ring->netdev = vsi->netdev;
7345 err = ice_setup_rx_ring(ring);
7346 if (err)
7347 break;
7348 }
7349
7350 return err;
7351 }
7352
7353 /**
7354 * ice_vsi_open_ctrl - open control VSI for use
7355 * @vsi: the VSI to open
7356 *
7357 * Initialization of the Control VSI
7358 *
7359 * Returns 0 on success, negative value on error
7360 */
ice_vsi_open_ctrl(struct ice_vsi * vsi)7361 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7362 {
7363 char int_name[ICE_INT_NAME_STR_LEN];
7364 struct ice_pf *pf = vsi->back;
7365 struct device *dev;
7366 int err;
7367
7368 dev = ice_pf_to_dev(pf);
7369 /* allocate descriptors */
7370 err = ice_vsi_setup_tx_rings(vsi);
7371 if (err)
7372 goto err_setup_tx;
7373
7374 err = ice_vsi_setup_rx_rings(vsi);
7375 if (err)
7376 goto err_setup_rx;
7377
7378 err = ice_vsi_cfg_lan(vsi);
7379 if (err)
7380 goto err_setup_rx;
7381
7382 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7383 dev_driver_string(dev), dev_name(dev));
7384 err = ice_vsi_req_irq_msix(vsi, int_name);
7385 if (err)
7386 goto err_setup_rx;
7387
7388 ice_vsi_cfg_msix(vsi);
7389
7390 err = ice_vsi_start_all_rx_rings(vsi);
7391 if (err)
7392 goto err_up_complete;
7393
7394 clear_bit(ICE_VSI_DOWN, vsi->state);
7395 ice_vsi_ena_irq(vsi);
7396
7397 return 0;
7398
7399 err_up_complete:
7400 ice_down(vsi);
7401 err_setup_rx:
7402 ice_vsi_free_rx_rings(vsi);
7403 err_setup_tx:
7404 ice_vsi_free_tx_rings(vsi);
7405
7406 return err;
7407 }
7408
7409 /**
7410 * ice_vsi_open - Called when a network interface is made active
7411 * @vsi: the VSI to open
7412 *
7413 * Initialization of the VSI
7414 *
7415 * Returns 0 on success, negative value on error
7416 */
ice_vsi_open(struct ice_vsi * vsi)7417 int ice_vsi_open(struct ice_vsi *vsi)
7418 {
7419 char int_name[ICE_INT_NAME_STR_LEN];
7420 struct ice_pf *pf = vsi->back;
7421 int err;
7422
7423 /* allocate descriptors */
7424 err = ice_vsi_setup_tx_rings(vsi);
7425 if (err)
7426 goto err_setup_tx;
7427
7428 err = ice_vsi_setup_rx_rings(vsi);
7429 if (err)
7430 goto err_setup_rx;
7431
7432 err = ice_vsi_cfg_lan(vsi);
7433 if (err)
7434 goto err_setup_rx;
7435
7436 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7437 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7438 err = ice_vsi_req_irq_msix(vsi, int_name);
7439 if (err)
7440 goto err_setup_rx;
7441
7442 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7443
7444 if (vsi->type == ICE_VSI_PF) {
7445 /* Notify the stack of the actual queue counts. */
7446 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7447 if (err)
7448 goto err_set_qs;
7449
7450 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7451 if (err)
7452 goto err_set_qs;
7453 }
7454
7455 err = ice_up_complete(vsi);
7456 if (err)
7457 goto err_up_complete;
7458
7459 return 0;
7460
7461 err_up_complete:
7462 ice_down(vsi);
7463 err_set_qs:
7464 ice_vsi_free_irq(vsi);
7465 err_setup_rx:
7466 ice_vsi_free_rx_rings(vsi);
7467 err_setup_tx:
7468 ice_vsi_free_tx_rings(vsi);
7469
7470 return err;
7471 }
7472
7473 /**
7474 * ice_vsi_release_all - Delete all VSIs
7475 * @pf: PF from which all VSIs are being removed
7476 */
ice_vsi_release_all(struct ice_pf * pf)7477 static void ice_vsi_release_all(struct ice_pf *pf)
7478 {
7479 int err, i;
7480
7481 if (!pf->vsi)
7482 return;
7483
7484 ice_for_each_vsi(pf, i) {
7485 if (!pf->vsi[i])
7486 continue;
7487
7488 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7489 continue;
7490
7491 err = ice_vsi_release(pf->vsi[i]);
7492 if (err)
7493 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7494 i, err, pf->vsi[i]->vsi_num);
7495 }
7496 }
7497
7498 /**
7499 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7500 * @pf: pointer to the PF instance
7501 * @type: VSI type to rebuild
7502 *
7503 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7504 */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)7505 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7506 {
7507 struct device *dev = ice_pf_to_dev(pf);
7508 int i, err;
7509
7510 ice_for_each_vsi(pf, i) {
7511 struct ice_vsi *vsi = pf->vsi[i];
7512
7513 if (!vsi || vsi->type != type)
7514 continue;
7515
7516 /* rebuild the VSI */
7517 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7518 if (err) {
7519 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7520 err, vsi->idx, ice_vsi_type_str(type));
7521 return err;
7522 }
7523
7524 /* replay filters for the VSI */
7525 err = ice_replay_vsi(&pf->hw, vsi->idx);
7526 if (err) {
7527 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7528 err, vsi->idx, ice_vsi_type_str(type));
7529 return err;
7530 }
7531
7532 /* Re-map HW VSI number, using VSI handle that has been
7533 * previously validated in ice_replay_vsi() call above
7534 */
7535 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7536
7537 /* enable the VSI */
7538 err = ice_ena_vsi(vsi, false);
7539 if (err) {
7540 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7541 err, vsi->idx, ice_vsi_type_str(type));
7542 return err;
7543 }
7544
7545 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7546 ice_vsi_type_str(type));
7547 }
7548
7549 return 0;
7550 }
7551
7552 /**
7553 * ice_update_pf_netdev_link - Update PF netdev link status
7554 * @pf: pointer to the PF instance
7555 */
ice_update_pf_netdev_link(struct ice_pf * pf)7556 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7557 {
7558 bool link_up;
7559 int i;
7560
7561 ice_for_each_vsi(pf, i) {
7562 struct ice_vsi *vsi = pf->vsi[i];
7563
7564 if (!vsi || vsi->type != ICE_VSI_PF)
7565 return;
7566
7567 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7568 if (link_up) {
7569 netif_carrier_on(pf->vsi[i]->netdev);
7570 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7571 } else {
7572 netif_carrier_off(pf->vsi[i]->netdev);
7573 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7574 }
7575 }
7576 }
7577
7578 /**
7579 * ice_rebuild - rebuild after reset
7580 * @pf: PF to rebuild
7581 * @reset_type: type of reset
7582 *
7583 * Do not rebuild VF VSI in this flow because that is already handled via
7584 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7585 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7586 * to reset/rebuild all the VF VSI twice.
7587 */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)7588 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7589 {
7590 struct device *dev = ice_pf_to_dev(pf);
7591 struct ice_hw *hw = &pf->hw;
7592 bool dvm;
7593 int err;
7594
7595 if (test_bit(ICE_DOWN, pf->state))
7596 goto clear_recovery;
7597
7598 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7599
7600 #define ICE_EMP_RESET_SLEEP_MS 5000
7601 if (reset_type == ICE_RESET_EMPR) {
7602 /* If an EMP reset has occurred, any previously pending flash
7603 * update will have completed. We no longer know whether or
7604 * not the NVM update EMP reset is restricted.
7605 */
7606 pf->fw_emp_reset_disabled = false;
7607
7608 msleep(ICE_EMP_RESET_SLEEP_MS);
7609 }
7610
7611 err = ice_init_all_ctrlq(hw);
7612 if (err) {
7613 dev_err(dev, "control queues init failed %d\n", err);
7614 goto err_init_ctrlq;
7615 }
7616
7617 /* if DDP was previously loaded successfully */
7618 if (!ice_is_safe_mode(pf)) {
7619 /* reload the SW DB of filter tables */
7620 if (reset_type == ICE_RESET_PFR)
7621 ice_fill_blk_tbls(hw);
7622 else
7623 /* Reload DDP Package after CORER/GLOBR reset */
7624 ice_load_pkg(NULL, pf);
7625 }
7626
7627 err = ice_clear_pf_cfg(hw);
7628 if (err) {
7629 dev_err(dev, "clear PF configuration failed %d\n", err);
7630 goto err_init_ctrlq;
7631 }
7632
7633 ice_clear_pxe_mode(hw);
7634
7635 err = ice_init_nvm(hw);
7636 if (err) {
7637 dev_err(dev, "ice_init_nvm failed %d\n", err);
7638 goto err_init_ctrlq;
7639 }
7640
7641 err = ice_get_caps(hw);
7642 if (err) {
7643 dev_err(dev, "ice_get_caps failed %d\n", err);
7644 goto err_init_ctrlq;
7645 }
7646
7647 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7648 if (err) {
7649 dev_err(dev, "set_mac_cfg failed %d\n", err);
7650 goto err_init_ctrlq;
7651 }
7652
7653 dvm = ice_is_dvm_ena(hw);
7654
7655 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7656 if (err)
7657 goto err_init_ctrlq;
7658
7659 err = ice_sched_init_port(hw->port_info);
7660 if (err)
7661 goto err_sched_init_port;
7662
7663 /* start misc vector */
7664 err = ice_req_irq_msix_misc(pf);
7665 if (err) {
7666 dev_err(dev, "misc vector setup failed: %d\n", err);
7667 goto err_sched_init_port;
7668 }
7669
7670 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7671 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7672 if (!rd32(hw, PFQF_FD_SIZE)) {
7673 u16 unused, guar, b_effort;
7674
7675 guar = hw->func_caps.fd_fltr_guar;
7676 b_effort = hw->func_caps.fd_fltr_best_effort;
7677
7678 /* force guaranteed filter pool for PF */
7679 ice_alloc_fd_guar_item(hw, &unused, guar);
7680 /* force shared filter pool for PF */
7681 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7682 }
7683 }
7684
7685 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7686 ice_dcb_rebuild(pf);
7687
7688 /* If the PF previously had enabled PTP, PTP init needs to happen before
7689 * the VSI rebuild. If not, this causes the PTP link status events to
7690 * fail.
7691 */
7692 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7693 ice_ptp_rebuild(pf, reset_type);
7694
7695 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7696 ice_gnss_init(pf);
7697
7698 /* rebuild PF VSI */
7699 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7700 if (err) {
7701 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7702 goto err_vsi_rebuild;
7703 }
7704
7705 if (reset_type == ICE_RESET_PFR) {
7706 err = ice_rebuild_channels(pf);
7707 if (err) {
7708 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7709 err);
7710 goto err_vsi_rebuild;
7711 }
7712 }
7713
7714 /* If Flow Director is active */
7715 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7716 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7717 if (err) {
7718 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7719 goto err_vsi_rebuild;
7720 }
7721
7722 /* replay HW Flow Director recipes */
7723 if (hw->fdir_prof)
7724 ice_fdir_replay_flows(hw);
7725
7726 /* replay Flow Director filters */
7727 ice_fdir_replay_fltrs(pf);
7728
7729 ice_rebuild_arfs(pf);
7730 }
7731
7732 ice_update_pf_netdev_link(pf);
7733
7734 /* tell the firmware we are up */
7735 err = ice_send_version(pf);
7736 if (err) {
7737 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7738 err);
7739 goto err_vsi_rebuild;
7740 }
7741
7742 ice_replay_post(hw);
7743
7744 /* if we get here, reset flow is successful */
7745 clear_bit(ICE_RESET_FAILED, pf->state);
7746
7747 ice_plug_aux_dev(pf);
7748 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7749 ice_lag_rebuild(pf);
7750
7751 /* Restore timestamp mode settings after VSI rebuild */
7752 ice_ptp_restore_timestamp_mode(pf);
7753 return;
7754
7755 err_vsi_rebuild:
7756 err_sched_init_port:
7757 ice_sched_cleanup_all(hw);
7758 err_init_ctrlq:
7759 ice_shutdown_all_ctrlq(hw, false);
7760 set_bit(ICE_RESET_FAILED, pf->state);
7761 clear_recovery:
7762 /* set this bit in PF state to control service task scheduling */
7763 set_bit(ICE_NEEDS_RESTART, pf->state);
7764 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7765 }
7766
7767 /**
7768 * ice_change_mtu - NDO callback to change the MTU
7769 * @netdev: network interface device structure
7770 * @new_mtu: new value for maximum frame size
7771 *
7772 * Returns 0 on success, negative on failure
7773 */
ice_change_mtu(struct net_device * netdev,int new_mtu)7774 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7775 {
7776 struct ice_netdev_priv *np = netdev_priv(netdev);
7777 struct ice_vsi *vsi = np->vsi;
7778 struct ice_pf *pf = vsi->back;
7779 struct bpf_prog *prog;
7780 u8 count = 0;
7781 int err = 0;
7782
7783 if (new_mtu == (int)netdev->mtu) {
7784 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7785 return 0;
7786 }
7787
7788 prog = vsi->xdp_prog;
7789 if (prog && !prog->aux->xdp_has_frags) {
7790 int frame_size = ice_max_xdp_frame_size(vsi);
7791
7792 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7793 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7794 frame_size - ICE_ETH_PKT_HDR_PAD);
7795 return -EINVAL;
7796 }
7797 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7798 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7799 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7800 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7801 return -EINVAL;
7802 }
7803 }
7804
7805 /* if a reset is in progress, wait for some time for it to complete */
7806 do {
7807 if (ice_is_reset_in_progress(pf->state)) {
7808 count++;
7809 usleep_range(1000, 2000);
7810 } else {
7811 break;
7812 }
7813
7814 } while (count < 100);
7815
7816 if (count == 100) {
7817 netdev_err(netdev, "can't change MTU. Device is busy\n");
7818 return -EBUSY;
7819 }
7820
7821 WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
7822 err = ice_down_up(vsi);
7823 if (err)
7824 return err;
7825
7826 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7827 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7828
7829 return err;
7830 }
7831
7832 /**
7833 * ice_eth_ioctl - Access the hwtstamp interface
7834 * @netdev: network interface device structure
7835 * @ifr: interface request data
7836 * @cmd: ioctl command
7837 */
ice_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)7838 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7839 {
7840 struct ice_netdev_priv *np = netdev_priv(netdev);
7841 struct ice_pf *pf = np->vsi->back;
7842
7843 switch (cmd) {
7844 case SIOCGHWTSTAMP:
7845 return ice_ptp_get_ts_config(pf, ifr);
7846 case SIOCSHWTSTAMP:
7847 return ice_ptp_set_ts_config(pf, ifr);
7848 default:
7849 return -EOPNOTSUPP;
7850 }
7851 }
7852
7853 /**
7854 * ice_aq_str - convert AQ err code to a string
7855 * @aq_err: the AQ error code to convert
7856 */
ice_aq_str(enum ice_aq_err aq_err)7857 const char *ice_aq_str(enum ice_aq_err aq_err)
7858 {
7859 switch (aq_err) {
7860 case ICE_AQ_RC_OK:
7861 return "OK";
7862 case ICE_AQ_RC_EPERM:
7863 return "ICE_AQ_RC_EPERM";
7864 case ICE_AQ_RC_ENOENT:
7865 return "ICE_AQ_RC_ENOENT";
7866 case ICE_AQ_RC_ENOMEM:
7867 return "ICE_AQ_RC_ENOMEM";
7868 case ICE_AQ_RC_EBUSY:
7869 return "ICE_AQ_RC_EBUSY";
7870 case ICE_AQ_RC_EEXIST:
7871 return "ICE_AQ_RC_EEXIST";
7872 case ICE_AQ_RC_EINVAL:
7873 return "ICE_AQ_RC_EINVAL";
7874 case ICE_AQ_RC_ENOSPC:
7875 return "ICE_AQ_RC_ENOSPC";
7876 case ICE_AQ_RC_ENOSYS:
7877 return "ICE_AQ_RC_ENOSYS";
7878 case ICE_AQ_RC_EMODE:
7879 return "ICE_AQ_RC_EMODE";
7880 case ICE_AQ_RC_ENOSEC:
7881 return "ICE_AQ_RC_ENOSEC";
7882 case ICE_AQ_RC_EBADSIG:
7883 return "ICE_AQ_RC_EBADSIG";
7884 case ICE_AQ_RC_ESVN:
7885 return "ICE_AQ_RC_ESVN";
7886 case ICE_AQ_RC_EBADMAN:
7887 return "ICE_AQ_RC_EBADMAN";
7888 case ICE_AQ_RC_EBADBUF:
7889 return "ICE_AQ_RC_EBADBUF";
7890 }
7891
7892 return "ICE_AQ_RC_UNKNOWN";
7893 }
7894
7895 /**
7896 * ice_set_rss_lut - Set RSS LUT
7897 * @vsi: Pointer to VSI structure
7898 * @lut: Lookup table
7899 * @lut_size: Lookup table size
7900 *
7901 * Returns 0 on success, negative on failure
7902 */
ice_set_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7903 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7904 {
7905 struct ice_aq_get_set_rss_lut_params params = {};
7906 struct ice_hw *hw = &vsi->back->hw;
7907 int status;
7908
7909 if (!lut)
7910 return -EINVAL;
7911
7912 params.vsi_handle = vsi->idx;
7913 params.lut_size = lut_size;
7914 params.lut_type = vsi->rss_lut_type;
7915 params.lut = lut;
7916
7917 status = ice_aq_set_rss_lut(hw, ¶ms);
7918 if (status)
7919 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7920 status, ice_aq_str(hw->adminq.sq_last_status));
7921
7922 return status;
7923 }
7924
7925 /**
7926 * ice_set_rss_key - Set RSS key
7927 * @vsi: Pointer to the VSI structure
7928 * @seed: RSS hash seed
7929 *
7930 * Returns 0 on success, negative on failure
7931 */
ice_set_rss_key(struct ice_vsi * vsi,u8 * seed)7932 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7933 {
7934 struct ice_hw *hw = &vsi->back->hw;
7935 int status;
7936
7937 if (!seed)
7938 return -EINVAL;
7939
7940 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7941 if (status)
7942 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7943 status, ice_aq_str(hw->adminq.sq_last_status));
7944
7945 return status;
7946 }
7947
7948 /**
7949 * ice_get_rss_lut - Get RSS LUT
7950 * @vsi: Pointer to VSI structure
7951 * @lut: Buffer to store the lookup table entries
7952 * @lut_size: Size of buffer to store the lookup table entries
7953 *
7954 * Returns 0 on success, negative on failure
7955 */
ice_get_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7956 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7957 {
7958 struct ice_aq_get_set_rss_lut_params params = {};
7959 struct ice_hw *hw = &vsi->back->hw;
7960 int status;
7961
7962 if (!lut)
7963 return -EINVAL;
7964
7965 params.vsi_handle = vsi->idx;
7966 params.lut_size = lut_size;
7967 params.lut_type = vsi->rss_lut_type;
7968 params.lut = lut;
7969
7970 status = ice_aq_get_rss_lut(hw, ¶ms);
7971 if (status)
7972 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7973 status, ice_aq_str(hw->adminq.sq_last_status));
7974
7975 return status;
7976 }
7977
7978 /**
7979 * ice_get_rss_key - Get RSS key
7980 * @vsi: Pointer to VSI structure
7981 * @seed: Buffer to store the key in
7982 *
7983 * Returns 0 on success, negative on failure
7984 */
ice_get_rss_key(struct ice_vsi * vsi,u8 * seed)7985 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7986 {
7987 struct ice_hw *hw = &vsi->back->hw;
7988 int status;
7989
7990 if (!seed)
7991 return -EINVAL;
7992
7993 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7994 if (status)
7995 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7996 status, ice_aq_str(hw->adminq.sq_last_status));
7997
7998 return status;
7999 }
8000
8001 /**
8002 * ice_set_rss_hfunc - Set RSS HASH function
8003 * @vsi: Pointer to VSI structure
8004 * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
8005 *
8006 * Returns 0 on success, negative on failure
8007 */
ice_set_rss_hfunc(struct ice_vsi * vsi,u8 hfunc)8008 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
8009 {
8010 struct ice_hw *hw = &vsi->back->hw;
8011 struct ice_vsi_ctx *ctx;
8012 bool symm;
8013 int err;
8014
8015 if (hfunc == vsi->rss_hfunc)
8016 return 0;
8017
8018 if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
8019 hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
8020 return -EOPNOTSUPP;
8021
8022 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8023 if (!ctx)
8024 return -ENOMEM;
8025
8026 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
8027 ctx->info.q_opt_rss = vsi->info.q_opt_rss;
8028 ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
8029 ctx->info.q_opt_rss |=
8030 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
8031 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
8032 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
8033
8034 err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
8035 if (err) {
8036 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
8037 vsi->vsi_num, err);
8038 } else {
8039 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
8040 vsi->rss_hfunc = hfunc;
8041 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
8042 hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
8043 "Symmetric " : "");
8044 }
8045 kfree(ctx);
8046 if (err)
8047 return err;
8048
8049 /* Fix the symmetry setting for all existing RSS configurations */
8050 symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
8051 return ice_set_rss_cfg_symm(hw, vsi, symm);
8052 }
8053
8054 /**
8055 * ice_bridge_getlink - Get the hardware bridge mode
8056 * @skb: skb buff
8057 * @pid: process ID
8058 * @seq: RTNL message seq
8059 * @dev: the netdev being configured
8060 * @filter_mask: filter mask passed in
8061 * @nlflags: netlink flags passed in
8062 *
8063 * Return the bridge mode (VEB/VEPA)
8064 */
8065 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)8066 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8067 struct net_device *dev, u32 filter_mask, int nlflags)
8068 {
8069 struct ice_netdev_priv *np = netdev_priv(dev);
8070 struct ice_vsi *vsi = np->vsi;
8071 struct ice_pf *pf = vsi->back;
8072 u16 bmode;
8073
8074 bmode = pf->first_sw->bridge_mode;
8075
8076 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
8077 filter_mask, NULL);
8078 }
8079
8080 /**
8081 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8082 * @vsi: Pointer to VSI structure
8083 * @bmode: Hardware bridge mode (VEB/VEPA)
8084 *
8085 * Returns 0 on success, negative on failure
8086 */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)8087 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
8088 {
8089 struct ice_aqc_vsi_props *vsi_props;
8090 struct ice_hw *hw = &vsi->back->hw;
8091 struct ice_vsi_ctx *ctxt;
8092 int ret;
8093
8094 vsi_props = &vsi->info;
8095
8096 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
8097 if (!ctxt)
8098 return -ENOMEM;
8099
8100 ctxt->info = vsi->info;
8101
8102 if (bmode == BRIDGE_MODE_VEB)
8103 /* change from VEPA to VEB mode */
8104 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8105 else
8106 /* change from VEB to VEPA mode */
8107 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8108 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
8109
8110 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
8111 if (ret) {
8112 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
8113 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
8114 goto out;
8115 }
8116 /* Update sw flags for book keeping */
8117 vsi_props->sw_flags = ctxt->info.sw_flags;
8118
8119 out:
8120 kfree(ctxt);
8121 return ret;
8122 }
8123
8124 /**
8125 * ice_bridge_setlink - Set the hardware bridge mode
8126 * @dev: the netdev being configured
8127 * @nlh: RTNL message
8128 * @flags: bridge setlink flags
8129 * @extack: netlink extended ack
8130 *
8131 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
8132 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8133 * not already set for all VSIs connected to this switch. And also update the
8134 * unicast switch filter rules for the corresponding switch of the netdev.
8135 */
8136 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)8137 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
8138 u16 __always_unused flags,
8139 struct netlink_ext_ack __always_unused *extack)
8140 {
8141 struct ice_netdev_priv *np = netdev_priv(dev);
8142 struct ice_pf *pf = np->vsi->back;
8143 struct nlattr *attr, *br_spec;
8144 struct ice_hw *hw = &pf->hw;
8145 struct ice_sw *pf_sw;
8146 int rem, v, err = 0;
8147
8148 pf_sw = pf->first_sw;
8149 /* find the attribute in the netlink message */
8150 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8151 if (!br_spec)
8152 return -EINVAL;
8153
8154 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
8155 __u16 mode = nla_get_u16(attr);
8156
8157 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
8158 return -EINVAL;
8159 /* Continue if bridge mode is not being flipped */
8160 if (mode == pf_sw->bridge_mode)
8161 continue;
8162 /* Iterates through the PF VSI list and update the loopback
8163 * mode of the VSI
8164 */
8165 ice_for_each_vsi(pf, v) {
8166 if (!pf->vsi[v])
8167 continue;
8168 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8169 if (err)
8170 return err;
8171 }
8172
8173 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
8174 /* Update the unicast switch filter rules for the corresponding
8175 * switch of the netdev
8176 */
8177 err = ice_update_sw_rule_bridge_mode(hw);
8178 if (err) {
8179 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
8180 mode, err,
8181 ice_aq_str(hw->adminq.sq_last_status));
8182 /* revert hw->evb_veb */
8183 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
8184 return err;
8185 }
8186
8187 pf_sw->bridge_mode = mode;
8188 }
8189
8190 return 0;
8191 }
8192
8193 /**
8194 * ice_tx_timeout - Respond to a Tx Hang
8195 * @netdev: network interface device structure
8196 * @txqueue: Tx queue
8197 */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)8198 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
8199 {
8200 struct ice_netdev_priv *np = netdev_priv(netdev);
8201 struct ice_tx_ring *tx_ring = NULL;
8202 struct ice_vsi *vsi = np->vsi;
8203 struct ice_pf *pf = vsi->back;
8204 u32 i;
8205
8206 pf->tx_timeout_count++;
8207
8208 /* Check if PFC is enabled for the TC to which the queue belongs
8209 * to. If yes then Tx timeout is not caused by a hung queue, no
8210 * need to reset and rebuild
8211 */
8212 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8213 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8214 txqueue);
8215 return;
8216 }
8217
8218 /* now that we have an index, find the tx_ring struct */
8219 ice_for_each_txq(vsi, i)
8220 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8221 if (txqueue == vsi->tx_rings[i]->q_index) {
8222 tx_ring = vsi->tx_rings[i];
8223 break;
8224 }
8225
8226 /* Reset recovery level if enough time has elapsed after last timeout.
8227 * Also ensure no new reset action happens before next timeout period.
8228 */
8229 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8230 pf->tx_timeout_recovery_level = 1;
8231 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8232 netdev->watchdog_timeo)))
8233 return;
8234
8235 if (tx_ring) {
8236 struct ice_hw *hw = &pf->hw;
8237 u32 head, val = 0;
8238
8239 head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
8240 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8241 /* Read interrupt register */
8242 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8243
8244 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8245 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8246 head, tx_ring->next_to_use, val);
8247 }
8248
8249 pf->tx_timeout_last_recovery = jiffies;
8250 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8251 pf->tx_timeout_recovery_level, txqueue);
8252
8253 switch (pf->tx_timeout_recovery_level) {
8254 case 1:
8255 set_bit(ICE_PFR_REQ, pf->state);
8256 break;
8257 case 2:
8258 set_bit(ICE_CORER_REQ, pf->state);
8259 break;
8260 case 3:
8261 set_bit(ICE_GLOBR_REQ, pf->state);
8262 break;
8263 default:
8264 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8265 set_bit(ICE_DOWN, pf->state);
8266 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8267 set_bit(ICE_SERVICE_DIS, pf->state);
8268 break;
8269 }
8270
8271 ice_service_task_schedule(pf);
8272 pf->tx_timeout_recovery_level++;
8273 }
8274
8275 /**
8276 * ice_setup_tc_cls_flower - flower classifier offloads
8277 * @np: net device to configure
8278 * @filter_dev: device on which filter is added
8279 * @cls_flower: offload data
8280 */
8281 static int
ice_setup_tc_cls_flower(struct ice_netdev_priv * np,struct net_device * filter_dev,struct flow_cls_offload * cls_flower)8282 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8283 struct net_device *filter_dev,
8284 struct flow_cls_offload *cls_flower)
8285 {
8286 struct ice_vsi *vsi = np->vsi;
8287
8288 if (cls_flower->common.chain_index)
8289 return -EOPNOTSUPP;
8290
8291 switch (cls_flower->command) {
8292 case FLOW_CLS_REPLACE:
8293 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
8294 case FLOW_CLS_DESTROY:
8295 return ice_del_cls_flower(vsi, cls_flower);
8296 default:
8297 return -EINVAL;
8298 }
8299 }
8300
8301 /**
8302 * ice_setup_tc_block_cb - callback handler registered for TC block
8303 * @type: TC SETUP type
8304 * @type_data: TC flower offload data that contains user input
8305 * @cb_priv: netdev private data
8306 */
8307 static int
ice_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)8308 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
8309 {
8310 struct ice_netdev_priv *np = cb_priv;
8311
8312 switch (type) {
8313 case TC_SETUP_CLSFLOWER:
8314 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8315 type_data);
8316 default:
8317 return -EOPNOTSUPP;
8318 }
8319 }
8320
8321 /**
8322 * ice_validate_mqprio_qopt - Validate TCF input parameters
8323 * @vsi: Pointer to VSI
8324 * @mqprio_qopt: input parameters for mqprio queue configuration
8325 *
8326 * This function validates MQPRIO params, such as qcount (power of 2 wherever
8327 * needed), and make sure user doesn't specify qcount and BW rate limit
8328 * for TCs, which are more than "num_tc"
8329 */
8330 static int
ice_validate_mqprio_qopt(struct ice_vsi * vsi,struct tc_mqprio_qopt_offload * mqprio_qopt)8331 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8332 struct tc_mqprio_qopt_offload *mqprio_qopt)
8333 {
8334 int non_power_of_2_qcount = 0;
8335 struct ice_pf *pf = vsi->back;
8336 int max_rss_q_cnt = 0;
8337 u64 sum_min_rate = 0;
8338 struct device *dev;
8339 int i, speed;
8340 u8 num_tc;
8341
8342 if (vsi->type != ICE_VSI_PF)
8343 return -EINVAL;
8344
8345 if (mqprio_qopt->qopt.offset[0] != 0 ||
8346 mqprio_qopt->qopt.num_tc < 1 ||
8347 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8348 return -EINVAL;
8349
8350 dev = ice_pf_to_dev(pf);
8351 vsi->ch_rss_size = 0;
8352 num_tc = mqprio_qopt->qopt.num_tc;
8353 speed = ice_get_link_speed_kbps(vsi);
8354
8355 for (i = 0; num_tc; i++) {
8356 int qcount = mqprio_qopt->qopt.count[i];
8357 u64 max_rate, min_rate, rem;
8358
8359 if (!qcount)
8360 return -EINVAL;
8361
8362 if (is_power_of_2(qcount)) {
8363 if (non_power_of_2_qcount &&
8364 qcount > non_power_of_2_qcount) {
8365 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8366 qcount, non_power_of_2_qcount);
8367 return -EINVAL;
8368 }
8369 if (qcount > max_rss_q_cnt)
8370 max_rss_q_cnt = qcount;
8371 } else {
8372 if (non_power_of_2_qcount &&
8373 qcount != non_power_of_2_qcount) {
8374 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8375 qcount, non_power_of_2_qcount);
8376 return -EINVAL;
8377 }
8378 if (qcount < max_rss_q_cnt) {
8379 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8380 qcount, max_rss_q_cnt);
8381 return -EINVAL;
8382 }
8383 max_rss_q_cnt = qcount;
8384 non_power_of_2_qcount = qcount;
8385 }
8386
8387 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8388 * converts the bandwidth rate limit into Bytes/s when
8389 * passing it down to the driver. So convert input bandwidth
8390 * from Bytes/s to Kbps
8391 */
8392 max_rate = mqprio_qopt->max_rate[i];
8393 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8394
8395 /* min_rate is minimum guaranteed rate and it can't be zero */
8396 min_rate = mqprio_qopt->min_rate[i];
8397 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8398 sum_min_rate += min_rate;
8399
8400 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8401 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8402 min_rate, ICE_MIN_BW_LIMIT);
8403 return -EINVAL;
8404 }
8405
8406 if (max_rate && max_rate > speed) {
8407 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8408 i, max_rate, speed);
8409 return -EINVAL;
8410 }
8411
8412 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8413 if (rem) {
8414 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8415 i, ICE_MIN_BW_LIMIT);
8416 return -EINVAL;
8417 }
8418
8419 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8420 if (rem) {
8421 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8422 i, ICE_MIN_BW_LIMIT);
8423 return -EINVAL;
8424 }
8425
8426 /* min_rate can't be more than max_rate, except when max_rate
8427 * is zero (implies max_rate sought is max line rate). In such
8428 * a case min_rate can be more than max.
8429 */
8430 if (max_rate && min_rate > max_rate) {
8431 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8432 min_rate, max_rate);
8433 return -EINVAL;
8434 }
8435
8436 if (i >= mqprio_qopt->qopt.num_tc - 1)
8437 break;
8438 if (mqprio_qopt->qopt.offset[i + 1] !=
8439 (mqprio_qopt->qopt.offset[i] + qcount))
8440 return -EINVAL;
8441 }
8442 if (vsi->num_rxq <
8443 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8444 return -EINVAL;
8445 if (vsi->num_txq <
8446 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8447 return -EINVAL;
8448
8449 if (sum_min_rate && sum_min_rate > (u64)speed) {
8450 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8451 sum_min_rate, speed);
8452 return -EINVAL;
8453 }
8454
8455 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8456 vsi->ch_rss_size = max_rss_q_cnt;
8457
8458 return 0;
8459 }
8460
8461 /**
8462 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8463 * @pf: ptr to PF device
8464 * @vsi: ptr to VSI
8465 */
ice_add_vsi_to_fdir(struct ice_pf * pf,struct ice_vsi * vsi)8466 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8467 {
8468 struct device *dev = ice_pf_to_dev(pf);
8469 bool added = false;
8470 struct ice_hw *hw;
8471 int flow;
8472
8473 if (!(vsi->num_gfltr || vsi->num_bfltr))
8474 return -EINVAL;
8475
8476 hw = &pf->hw;
8477 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8478 struct ice_fd_hw_prof *prof;
8479 int tun, status;
8480 u64 entry_h;
8481
8482 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8483 hw->fdir_prof[flow]->cnt))
8484 continue;
8485
8486 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8487 enum ice_flow_priority prio;
8488
8489 /* add this VSI to FDir profile for this flow */
8490 prio = ICE_FLOW_PRIO_NORMAL;
8491 prof = hw->fdir_prof[flow];
8492 status = ice_flow_add_entry(hw, ICE_BLK_FD,
8493 prof->prof_id[tun],
8494 prof->vsi_h[0], vsi->idx,
8495 prio, prof->fdir_seg[tun],
8496 &entry_h);
8497 if (status) {
8498 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8499 vsi->idx, flow);
8500 continue;
8501 }
8502
8503 prof->entry_h[prof->cnt][tun] = entry_h;
8504 }
8505
8506 /* store VSI for filter replay and delete */
8507 prof->vsi_h[prof->cnt] = vsi->idx;
8508 prof->cnt++;
8509
8510 added = true;
8511 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8512 flow);
8513 }
8514
8515 if (!added)
8516 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8517
8518 return 0;
8519 }
8520
8521 /**
8522 * ice_add_channel - add a channel by adding VSI
8523 * @pf: ptr to PF device
8524 * @sw_id: underlying HW switching element ID
8525 * @ch: ptr to channel structure
8526 *
8527 * Add a channel (VSI) using add_vsi and queue_map
8528 */
ice_add_channel(struct ice_pf * pf,u16 sw_id,struct ice_channel * ch)8529 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8530 {
8531 struct device *dev = ice_pf_to_dev(pf);
8532 struct ice_vsi *vsi;
8533
8534 if (ch->type != ICE_VSI_CHNL) {
8535 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8536 return -EINVAL;
8537 }
8538
8539 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8540 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8541 dev_err(dev, "create chnl VSI failure\n");
8542 return -EINVAL;
8543 }
8544
8545 ice_add_vsi_to_fdir(pf, vsi);
8546
8547 ch->sw_id = sw_id;
8548 ch->vsi_num = vsi->vsi_num;
8549 ch->info.mapping_flags = vsi->info.mapping_flags;
8550 ch->ch_vsi = vsi;
8551 /* set the back pointer of channel for newly created VSI */
8552 vsi->ch = ch;
8553
8554 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8555 sizeof(vsi->info.q_mapping));
8556 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8557 sizeof(vsi->info.tc_mapping));
8558
8559 return 0;
8560 }
8561
8562 /**
8563 * ice_chnl_cfg_res
8564 * @vsi: the VSI being setup
8565 * @ch: ptr to channel structure
8566 *
8567 * Configure channel specific resources such as rings, vector.
8568 */
ice_chnl_cfg_res(struct ice_vsi * vsi,struct ice_channel * ch)8569 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8570 {
8571 int i;
8572
8573 for (i = 0; i < ch->num_txq; i++) {
8574 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8575 struct ice_ring_container *rc;
8576 struct ice_tx_ring *tx_ring;
8577 struct ice_rx_ring *rx_ring;
8578
8579 tx_ring = vsi->tx_rings[ch->base_q + i];
8580 rx_ring = vsi->rx_rings[ch->base_q + i];
8581 if (!tx_ring || !rx_ring)
8582 continue;
8583
8584 /* setup ring being channel enabled */
8585 tx_ring->ch = ch;
8586 rx_ring->ch = ch;
8587
8588 /* following code block sets up vector specific attributes */
8589 tx_q_vector = tx_ring->q_vector;
8590 rx_q_vector = rx_ring->q_vector;
8591 if (!tx_q_vector && !rx_q_vector)
8592 continue;
8593
8594 if (tx_q_vector) {
8595 tx_q_vector->ch = ch;
8596 /* setup Tx and Rx ITR setting if DIM is off */
8597 rc = &tx_q_vector->tx;
8598 if (!ITR_IS_DYNAMIC(rc))
8599 ice_write_itr(rc, rc->itr_setting);
8600 }
8601 if (rx_q_vector) {
8602 rx_q_vector->ch = ch;
8603 /* setup Tx and Rx ITR setting if DIM is off */
8604 rc = &rx_q_vector->rx;
8605 if (!ITR_IS_DYNAMIC(rc))
8606 ice_write_itr(rc, rc->itr_setting);
8607 }
8608 }
8609
8610 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8611 * GLINT_ITR register would have written to perform in-context
8612 * update, hence perform flush
8613 */
8614 if (ch->num_txq || ch->num_rxq)
8615 ice_flush(&vsi->back->hw);
8616 }
8617
8618 /**
8619 * ice_cfg_chnl_all_res - configure channel resources
8620 * @vsi: pte to main_vsi
8621 * @ch: ptr to channel structure
8622 *
8623 * This function configures channel specific resources such as flow-director
8624 * counter index, and other resources such as queues, vectors, ITR settings
8625 */
8626 static void
ice_cfg_chnl_all_res(struct ice_vsi * vsi,struct ice_channel * ch)8627 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8628 {
8629 /* configure channel (aka ADQ) resources such as queues, vectors,
8630 * ITR settings for channel specific vectors and anything else
8631 */
8632 ice_chnl_cfg_res(vsi, ch);
8633 }
8634
8635 /**
8636 * ice_setup_hw_channel - setup new channel
8637 * @pf: ptr to PF device
8638 * @vsi: the VSI being setup
8639 * @ch: ptr to channel structure
8640 * @sw_id: underlying HW switching element ID
8641 * @type: type of channel to be created (VMDq2/VF)
8642 *
8643 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8644 * and configures Tx rings accordingly
8645 */
8646 static int
ice_setup_hw_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch,u16 sw_id,u8 type)8647 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8648 struct ice_channel *ch, u16 sw_id, u8 type)
8649 {
8650 struct device *dev = ice_pf_to_dev(pf);
8651 int ret;
8652
8653 ch->base_q = vsi->next_base_q;
8654 ch->type = type;
8655
8656 ret = ice_add_channel(pf, sw_id, ch);
8657 if (ret) {
8658 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8659 return ret;
8660 }
8661
8662 /* configure/setup ADQ specific resources */
8663 ice_cfg_chnl_all_res(vsi, ch);
8664
8665 /* make sure to update the next_base_q so that subsequent channel's
8666 * (aka ADQ) VSI queue map is correct
8667 */
8668 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8669 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8670 ch->num_rxq);
8671
8672 return 0;
8673 }
8674
8675 /**
8676 * ice_setup_channel - setup new channel using uplink element
8677 * @pf: ptr to PF device
8678 * @vsi: the VSI being setup
8679 * @ch: ptr to channel structure
8680 *
8681 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8682 * and uplink switching element
8683 */
8684 static bool
ice_setup_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch)8685 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8686 struct ice_channel *ch)
8687 {
8688 struct device *dev = ice_pf_to_dev(pf);
8689 u16 sw_id;
8690 int ret;
8691
8692 if (vsi->type != ICE_VSI_PF) {
8693 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8694 return false;
8695 }
8696
8697 sw_id = pf->first_sw->sw_id;
8698
8699 /* create channel (VSI) */
8700 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8701 if (ret) {
8702 dev_err(dev, "failed to setup hw_channel\n");
8703 return false;
8704 }
8705 dev_dbg(dev, "successfully created channel()\n");
8706
8707 return ch->ch_vsi ? true : false;
8708 }
8709
8710 /**
8711 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8712 * @vsi: VSI to be configured
8713 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8714 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8715 */
8716 static int
ice_set_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate,u64 min_tx_rate)8717 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8718 {
8719 int err;
8720
8721 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8722 if (err)
8723 return err;
8724
8725 return ice_set_max_bw_limit(vsi, max_tx_rate);
8726 }
8727
8728 /**
8729 * ice_create_q_channel - function to create channel
8730 * @vsi: VSI to be configured
8731 * @ch: ptr to channel (it contains channel specific params)
8732 *
8733 * This function creates channel (VSI) using num_queues specified by user,
8734 * reconfigs RSS if needed.
8735 */
ice_create_q_channel(struct ice_vsi * vsi,struct ice_channel * ch)8736 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8737 {
8738 struct ice_pf *pf = vsi->back;
8739 struct device *dev;
8740
8741 if (!ch)
8742 return -EINVAL;
8743
8744 dev = ice_pf_to_dev(pf);
8745 if (!ch->num_txq || !ch->num_rxq) {
8746 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8747 return -EINVAL;
8748 }
8749
8750 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8751 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8752 vsi->cnt_q_avail, ch->num_txq);
8753 return -EINVAL;
8754 }
8755
8756 if (!ice_setup_channel(pf, vsi, ch)) {
8757 dev_info(dev, "Failed to setup channel\n");
8758 return -EINVAL;
8759 }
8760 /* configure BW rate limit */
8761 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8762 int ret;
8763
8764 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8765 ch->min_tx_rate);
8766 if (ret)
8767 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8768 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8769 else
8770 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8771 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8772 }
8773
8774 vsi->cnt_q_avail -= ch->num_txq;
8775
8776 return 0;
8777 }
8778
8779 /**
8780 * ice_rem_all_chnl_fltrs - removes all channel filters
8781 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8782 *
8783 * Remove all advanced switch filters only if they are channel specific
8784 * tc-flower based filter
8785 */
ice_rem_all_chnl_fltrs(struct ice_pf * pf)8786 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8787 {
8788 struct ice_tc_flower_fltr *fltr;
8789 struct hlist_node *node;
8790
8791 /* to remove all channel filters, iterate an ordered list of filters */
8792 hlist_for_each_entry_safe(fltr, node,
8793 &pf->tc_flower_fltr_list,
8794 tc_flower_node) {
8795 struct ice_rule_query_data rule;
8796 int status;
8797
8798 /* for now process only channel specific filters */
8799 if (!ice_is_chnl_fltr(fltr))
8800 continue;
8801
8802 rule.rid = fltr->rid;
8803 rule.rule_id = fltr->rule_id;
8804 rule.vsi_handle = fltr->dest_vsi_handle;
8805 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8806 if (status) {
8807 if (status == -ENOENT)
8808 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8809 rule.rule_id);
8810 else
8811 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8812 status);
8813 } else if (fltr->dest_vsi) {
8814 /* update advanced switch filter count */
8815 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8816 u32 flags = fltr->flags;
8817
8818 fltr->dest_vsi->num_chnl_fltr--;
8819 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8820 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8821 pf->num_dmac_chnl_fltrs--;
8822 }
8823 }
8824
8825 hlist_del(&fltr->tc_flower_node);
8826 kfree(fltr);
8827 }
8828 }
8829
8830 /**
8831 * ice_remove_q_channels - Remove queue channels for the TCs
8832 * @vsi: VSI to be configured
8833 * @rem_fltr: delete advanced switch filter or not
8834 *
8835 * Remove queue channels for the TCs
8836 */
ice_remove_q_channels(struct ice_vsi * vsi,bool rem_fltr)8837 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8838 {
8839 struct ice_channel *ch, *ch_tmp;
8840 struct ice_pf *pf = vsi->back;
8841 int i;
8842
8843 /* remove all tc-flower based filter if they are channel filters only */
8844 if (rem_fltr)
8845 ice_rem_all_chnl_fltrs(pf);
8846
8847 /* remove ntuple filters since queue configuration is being changed */
8848 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8849 struct ice_hw *hw = &pf->hw;
8850
8851 mutex_lock(&hw->fdir_fltr_lock);
8852 ice_fdir_del_all_fltrs(vsi);
8853 mutex_unlock(&hw->fdir_fltr_lock);
8854 }
8855
8856 /* perform cleanup for channels if they exist */
8857 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8858 struct ice_vsi *ch_vsi;
8859
8860 list_del(&ch->list);
8861 ch_vsi = ch->ch_vsi;
8862 if (!ch_vsi) {
8863 kfree(ch);
8864 continue;
8865 }
8866
8867 /* Reset queue contexts */
8868 for (i = 0; i < ch->num_rxq; i++) {
8869 struct ice_tx_ring *tx_ring;
8870 struct ice_rx_ring *rx_ring;
8871
8872 tx_ring = vsi->tx_rings[ch->base_q + i];
8873 rx_ring = vsi->rx_rings[ch->base_q + i];
8874 if (tx_ring) {
8875 tx_ring->ch = NULL;
8876 if (tx_ring->q_vector)
8877 tx_ring->q_vector->ch = NULL;
8878 }
8879 if (rx_ring) {
8880 rx_ring->ch = NULL;
8881 if (rx_ring->q_vector)
8882 rx_ring->q_vector->ch = NULL;
8883 }
8884 }
8885
8886 /* Release FD resources for the channel VSI */
8887 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8888
8889 /* clear the VSI from scheduler tree */
8890 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8891
8892 /* Delete VSI from FW, PF and HW VSI arrays */
8893 ice_vsi_delete(ch->ch_vsi);
8894
8895 /* free the channel */
8896 kfree(ch);
8897 }
8898
8899 /* clear the channel VSI map which is stored in main VSI */
8900 ice_for_each_chnl_tc(i)
8901 vsi->tc_map_vsi[i] = NULL;
8902
8903 /* reset main VSI's all TC information */
8904 vsi->all_enatc = 0;
8905 vsi->all_numtc = 0;
8906 }
8907
8908 /**
8909 * ice_rebuild_channels - rebuild channel
8910 * @pf: ptr to PF
8911 *
8912 * Recreate channel VSIs and replay filters
8913 */
ice_rebuild_channels(struct ice_pf * pf)8914 static int ice_rebuild_channels(struct ice_pf *pf)
8915 {
8916 struct device *dev = ice_pf_to_dev(pf);
8917 struct ice_vsi *main_vsi;
8918 bool rem_adv_fltr = true;
8919 struct ice_channel *ch;
8920 struct ice_vsi *vsi;
8921 int tc_idx = 1;
8922 int i, err;
8923
8924 main_vsi = ice_get_main_vsi(pf);
8925 if (!main_vsi)
8926 return 0;
8927
8928 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8929 main_vsi->old_numtc == 1)
8930 return 0; /* nothing to be done */
8931
8932 /* reconfigure main VSI based on old value of TC and cached values
8933 * for MQPRIO opts
8934 */
8935 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8936 if (err) {
8937 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8938 main_vsi->old_ena_tc, main_vsi->vsi_num);
8939 return err;
8940 }
8941
8942 /* rebuild ADQ VSIs */
8943 ice_for_each_vsi(pf, i) {
8944 enum ice_vsi_type type;
8945
8946 vsi = pf->vsi[i];
8947 if (!vsi || vsi->type != ICE_VSI_CHNL)
8948 continue;
8949
8950 type = vsi->type;
8951
8952 /* rebuild ADQ VSI */
8953 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8954 if (err) {
8955 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8956 ice_vsi_type_str(type), vsi->idx, err);
8957 goto cleanup;
8958 }
8959
8960 /* Re-map HW VSI number, using VSI handle that has been
8961 * previously validated in ice_replay_vsi() call above
8962 */
8963 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8964
8965 /* replay filters for the VSI */
8966 err = ice_replay_vsi(&pf->hw, vsi->idx);
8967 if (err) {
8968 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8969 ice_vsi_type_str(type), err, vsi->idx);
8970 rem_adv_fltr = false;
8971 goto cleanup;
8972 }
8973 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8974 ice_vsi_type_str(type), vsi->idx);
8975
8976 /* store ADQ VSI at correct TC index in main VSI's
8977 * map of TC to VSI
8978 */
8979 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8980 }
8981
8982 /* ADQ VSI(s) has been rebuilt successfully, so setup
8983 * channel for main VSI's Tx and Rx rings
8984 */
8985 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8986 struct ice_vsi *ch_vsi;
8987
8988 ch_vsi = ch->ch_vsi;
8989 if (!ch_vsi)
8990 continue;
8991
8992 /* reconfig channel resources */
8993 ice_cfg_chnl_all_res(main_vsi, ch);
8994
8995 /* replay BW rate limit if it is non-zero */
8996 if (!ch->max_tx_rate && !ch->min_tx_rate)
8997 continue;
8998
8999 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
9000 ch->min_tx_rate);
9001 if (err)
9002 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9003 err, ch->max_tx_rate, ch->min_tx_rate,
9004 ch_vsi->vsi_num);
9005 else
9006 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9007 ch->max_tx_rate, ch->min_tx_rate,
9008 ch_vsi->vsi_num);
9009 }
9010
9011 /* reconfig RSS for main VSI */
9012 if (main_vsi->ch_rss_size)
9013 ice_vsi_cfg_rss_lut_key(main_vsi);
9014
9015 return 0;
9016
9017 cleanup:
9018 ice_remove_q_channels(main_vsi, rem_adv_fltr);
9019 return err;
9020 }
9021
9022 /**
9023 * ice_create_q_channels - Add queue channel for the given TCs
9024 * @vsi: VSI to be configured
9025 *
9026 * Configures queue channel mapping to the given TCs
9027 */
ice_create_q_channels(struct ice_vsi * vsi)9028 static int ice_create_q_channels(struct ice_vsi *vsi)
9029 {
9030 struct ice_pf *pf = vsi->back;
9031 struct ice_channel *ch;
9032 int ret = 0, i;
9033
9034 ice_for_each_chnl_tc(i) {
9035 if (!(vsi->all_enatc & BIT(i)))
9036 continue;
9037
9038 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
9039 if (!ch) {
9040 ret = -ENOMEM;
9041 goto err_free;
9042 }
9043 INIT_LIST_HEAD(&ch->list);
9044 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
9045 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
9046 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
9047 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
9048 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
9049
9050 /* convert to Kbits/s */
9051 if (ch->max_tx_rate)
9052 ch->max_tx_rate = div_u64(ch->max_tx_rate,
9053 ICE_BW_KBPS_DIVISOR);
9054 if (ch->min_tx_rate)
9055 ch->min_tx_rate = div_u64(ch->min_tx_rate,
9056 ICE_BW_KBPS_DIVISOR);
9057
9058 ret = ice_create_q_channel(vsi, ch);
9059 if (ret) {
9060 dev_err(ice_pf_to_dev(pf),
9061 "failed creating channel TC:%d\n", i);
9062 kfree(ch);
9063 goto err_free;
9064 }
9065 list_add_tail(&ch->list, &vsi->ch_list);
9066 vsi->tc_map_vsi[i] = ch->ch_vsi;
9067 dev_dbg(ice_pf_to_dev(pf),
9068 "successfully created channel: VSI %pK\n", ch->ch_vsi);
9069 }
9070 return 0;
9071
9072 err_free:
9073 ice_remove_q_channels(vsi, false);
9074
9075 return ret;
9076 }
9077
9078 /**
9079 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9080 * @netdev: net device to configure
9081 * @type_data: TC offload data
9082 */
ice_setup_tc_mqprio_qdisc(struct net_device * netdev,void * type_data)9083 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
9084 {
9085 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9086 struct ice_netdev_priv *np = netdev_priv(netdev);
9087 struct ice_vsi *vsi = np->vsi;
9088 struct ice_pf *pf = vsi->back;
9089 u16 mode, ena_tc_qdisc = 0;
9090 int cur_txq, cur_rxq;
9091 u8 hw = 0, num_tcf;
9092 struct device *dev;
9093 int ret, i;
9094
9095 dev = ice_pf_to_dev(pf);
9096 num_tcf = mqprio_qopt->qopt.num_tc;
9097 hw = mqprio_qopt->qopt.hw;
9098 mode = mqprio_qopt->mode;
9099 if (!hw) {
9100 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9101 vsi->ch_rss_size = 0;
9102 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9103 goto config_tcf;
9104 }
9105
9106 /* Generate queue region map for number of TCF requested */
9107 for (i = 0; i < num_tcf; i++)
9108 ena_tc_qdisc |= BIT(i);
9109
9110 switch (mode) {
9111 case TC_MQPRIO_MODE_CHANNEL:
9112
9113 if (pf->hw.port_info->is_custom_tx_enabled) {
9114 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
9115 return -EBUSY;
9116 }
9117 ice_tear_down_devlink_rate_tree(pf);
9118
9119 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
9120 if (ret) {
9121 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
9122 ret);
9123 return ret;
9124 }
9125 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9126 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9127 /* don't assume state of hw_tc_offload during driver load
9128 * and set the flag for TC flower filter if hw_tc_offload
9129 * already ON
9130 */
9131 if (vsi->netdev->features & NETIF_F_HW_TC)
9132 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
9133 break;
9134 default:
9135 return -EINVAL;
9136 }
9137
9138 config_tcf:
9139
9140 /* Requesting same TCF configuration as already enabled */
9141 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
9142 mode != TC_MQPRIO_MODE_CHANNEL)
9143 return 0;
9144
9145 /* Pause VSI queues */
9146 ice_dis_vsi(vsi, true);
9147
9148 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9149 ice_remove_q_channels(vsi, true);
9150
9151 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9152 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9153 num_online_cpus());
9154 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9155 num_online_cpus());
9156 } else {
9157 /* logic to rebuild VSI, same like ethtool -L */
9158 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
9159
9160 for (i = 0; i < num_tcf; i++) {
9161 if (!(ena_tc_qdisc & BIT(i)))
9162 continue;
9163
9164 offset = vsi->mqprio_qopt.qopt.offset[i];
9165 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9166 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9167 }
9168 vsi->req_txq = offset + qcount_tx;
9169 vsi->req_rxq = offset + qcount_rx;
9170
9171 /* store away original rss_size info, so that it gets reused
9172 * form ice_vsi_rebuild during tc-qdisc delete stage - to
9173 * determine, what should be the rss_sizefor main VSI
9174 */
9175 vsi->orig_rss_size = vsi->rss_size;
9176 }
9177
9178 /* save current values of Tx and Rx queues before calling VSI rebuild
9179 * for fallback option
9180 */
9181 cur_txq = vsi->num_txq;
9182 cur_rxq = vsi->num_rxq;
9183
9184 /* proceed with rebuild main VSI using correct number of queues */
9185 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9186 if (ret) {
9187 /* fallback to current number of queues */
9188 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
9189 vsi->req_txq = cur_txq;
9190 vsi->req_rxq = cur_rxq;
9191 clear_bit(ICE_RESET_FAILED, pf->state);
9192 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9193 dev_err(dev, "Rebuild of main VSI failed again\n");
9194 return ret;
9195 }
9196 }
9197
9198 vsi->all_numtc = num_tcf;
9199 vsi->all_enatc = ena_tc_qdisc;
9200 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9201 if (ret) {
9202 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
9203 vsi->vsi_num);
9204 goto exit;
9205 }
9206
9207 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9208 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9209 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9210
9211 /* set TC0 rate limit if specified */
9212 if (max_tx_rate || min_tx_rate) {
9213 /* convert to Kbits/s */
9214 if (max_tx_rate)
9215 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
9216 if (min_tx_rate)
9217 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
9218
9219 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9220 if (!ret) {
9221 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
9222 max_tx_rate, min_tx_rate, vsi->vsi_num);
9223 } else {
9224 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
9225 max_tx_rate, min_tx_rate, vsi->vsi_num);
9226 goto exit;
9227 }
9228 }
9229 ret = ice_create_q_channels(vsi);
9230 if (ret) {
9231 netdev_err(netdev, "failed configuring queue channels\n");
9232 goto exit;
9233 } else {
9234 netdev_dbg(netdev, "successfully configured channels\n");
9235 }
9236 }
9237
9238 if (vsi->ch_rss_size)
9239 ice_vsi_cfg_rss_lut_key(vsi);
9240
9241 exit:
9242 /* if error, reset the all_numtc and all_enatc */
9243 if (ret) {
9244 vsi->all_numtc = 0;
9245 vsi->all_enatc = 0;
9246 }
9247 /* resume VSI */
9248 ice_ena_vsi(vsi, true);
9249
9250 return ret;
9251 }
9252
9253 static LIST_HEAD(ice_block_cb_list);
9254
9255 static int
ice_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)9256 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9257 void *type_data)
9258 {
9259 struct ice_netdev_priv *np = netdev_priv(netdev);
9260 struct ice_pf *pf = np->vsi->back;
9261 bool locked = false;
9262 int err;
9263
9264 switch (type) {
9265 case TC_SETUP_BLOCK:
9266 return flow_block_cb_setup_simple(type_data,
9267 &ice_block_cb_list,
9268 ice_setup_tc_block_cb,
9269 np, np, true);
9270 case TC_SETUP_QDISC_MQPRIO:
9271 if (ice_is_eswitch_mode_switchdev(pf)) {
9272 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
9273 return -EOPNOTSUPP;
9274 }
9275
9276 if (pf->adev) {
9277 mutex_lock(&pf->adev_mutex);
9278 device_lock(&pf->adev->dev);
9279 locked = true;
9280 if (pf->adev->dev.driver) {
9281 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
9282 err = -EBUSY;
9283 goto adev_unlock;
9284 }
9285 }
9286
9287 /* setup traffic classifier for receive side */
9288 mutex_lock(&pf->tc_mutex);
9289 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9290 mutex_unlock(&pf->tc_mutex);
9291
9292 adev_unlock:
9293 if (locked) {
9294 device_unlock(&pf->adev->dev);
9295 mutex_unlock(&pf->adev_mutex);
9296 }
9297 return err;
9298 default:
9299 return -EOPNOTSUPP;
9300 }
9301 return -EOPNOTSUPP;
9302 }
9303
9304 static struct ice_indr_block_priv *
ice_indr_block_priv_lookup(struct ice_netdev_priv * np,struct net_device * netdev)9305 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9306 struct net_device *netdev)
9307 {
9308 struct ice_indr_block_priv *cb_priv;
9309
9310 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9311 if (!cb_priv->netdev)
9312 return NULL;
9313 if (cb_priv->netdev == netdev)
9314 return cb_priv;
9315 }
9316 return NULL;
9317 }
9318
9319 static int
ice_indr_setup_block_cb(enum tc_setup_type type,void * type_data,void * indr_priv)9320 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9321 void *indr_priv)
9322 {
9323 struct ice_indr_block_priv *priv = indr_priv;
9324 struct ice_netdev_priv *np = priv->np;
9325
9326 switch (type) {
9327 case TC_SETUP_CLSFLOWER:
9328 return ice_setup_tc_cls_flower(np, priv->netdev,
9329 (struct flow_cls_offload *)
9330 type_data);
9331 default:
9332 return -EOPNOTSUPP;
9333 }
9334 }
9335
9336 static int
ice_indr_setup_tc_block(struct net_device * netdev,struct Qdisc * sch,struct ice_netdev_priv * np,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9337 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9338 struct ice_netdev_priv *np,
9339 struct flow_block_offload *f, void *data,
9340 void (*cleanup)(struct flow_block_cb *block_cb))
9341 {
9342 struct ice_indr_block_priv *indr_priv;
9343 struct flow_block_cb *block_cb;
9344
9345 if (!ice_is_tunnel_supported(netdev) &&
9346 !(is_vlan_dev(netdev) &&
9347 vlan_dev_real_dev(netdev) == np->vsi->netdev))
9348 return -EOPNOTSUPP;
9349
9350 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9351 return -EOPNOTSUPP;
9352
9353 switch (f->command) {
9354 case FLOW_BLOCK_BIND:
9355 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9356 if (indr_priv)
9357 return -EEXIST;
9358
9359 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9360 if (!indr_priv)
9361 return -ENOMEM;
9362
9363 indr_priv->netdev = netdev;
9364 indr_priv->np = np;
9365 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9366
9367 block_cb =
9368 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9369 indr_priv, indr_priv,
9370 ice_rep_indr_tc_block_unbind,
9371 f, netdev, sch, data, np,
9372 cleanup);
9373
9374 if (IS_ERR(block_cb)) {
9375 list_del(&indr_priv->list);
9376 kfree(indr_priv);
9377 return PTR_ERR(block_cb);
9378 }
9379 flow_block_cb_add(block_cb, f);
9380 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9381 break;
9382 case FLOW_BLOCK_UNBIND:
9383 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9384 if (!indr_priv)
9385 return -ENOENT;
9386
9387 block_cb = flow_block_cb_lookup(f->block,
9388 ice_indr_setup_block_cb,
9389 indr_priv);
9390 if (!block_cb)
9391 return -ENOENT;
9392
9393 flow_indr_block_cb_remove(block_cb, f);
9394
9395 list_del(&block_cb->driver_list);
9396 break;
9397 default:
9398 return -EOPNOTSUPP;
9399 }
9400 return 0;
9401 }
9402
9403 static int
ice_indr_setup_tc_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9404 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9405 void *cb_priv, enum tc_setup_type type, void *type_data,
9406 void *data,
9407 void (*cleanup)(struct flow_block_cb *block_cb))
9408 {
9409 switch (type) {
9410 case TC_SETUP_BLOCK:
9411 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9412 data, cleanup);
9413
9414 default:
9415 return -EOPNOTSUPP;
9416 }
9417 }
9418
9419 /**
9420 * ice_open - Called when a network interface becomes active
9421 * @netdev: network interface device structure
9422 *
9423 * The open entry point is called when a network interface is made
9424 * active by the system (IFF_UP). At this point all resources needed
9425 * for transmit and receive operations are allocated, the interrupt
9426 * handler is registered with the OS, the netdev watchdog is enabled,
9427 * and the stack is notified that the interface is ready.
9428 *
9429 * Returns 0 on success, negative value on failure
9430 */
ice_open(struct net_device * netdev)9431 int ice_open(struct net_device *netdev)
9432 {
9433 struct ice_netdev_priv *np = netdev_priv(netdev);
9434 struct ice_pf *pf = np->vsi->back;
9435
9436 if (ice_is_reset_in_progress(pf->state)) {
9437 netdev_err(netdev, "can't open net device while reset is in progress");
9438 return -EBUSY;
9439 }
9440
9441 return ice_open_internal(netdev);
9442 }
9443
9444 /**
9445 * ice_open_internal - Called when a network interface becomes active
9446 * @netdev: network interface device structure
9447 *
9448 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9449 * handling routine
9450 *
9451 * Returns 0 on success, negative value on failure
9452 */
ice_open_internal(struct net_device * netdev)9453 int ice_open_internal(struct net_device *netdev)
9454 {
9455 struct ice_netdev_priv *np = netdev_priv(netdev);
9456 struct ice_vsi *vsi = np->vsi;
9457 struct ice_pf *pf = vsi->back;
9458 struct ice_port_info *pi;
9459 int err;
9460
9461 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9462 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9463 return -EIO;
9464 }
9465
9466 netif_carrier_off(netdev);
9467
9468 pi = vsi->port_info;
9469 err = ice_update_link_info(pi);
9470 if (err) {
9471 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9472 return err;
9473 }
9474
9475 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9476
9477 /* Set PHY if there is media, otherwise, turn off PHY */
9478 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9479 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9480 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9481 err = ice_init_phy_user_cfg(pi);
9482 if (err) {
9483 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9484 err);
9485 return err;
9486 }
9487 }
9488
9489 err = ice_configure_phy(vsi);
9490 if (err) {
9491 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9492 err);
9493 return err;
9494 }
9495 } else {
9496 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9497 ice_set_link(vsi, false);
9498 }
9499
9500 err = ice_vsi_open(vsi);
9501 if (err)
9502 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9503 vsi->vsi_num, vsi->vsw->sw_id);
9504
9505 /* Update existing tunnels information */
9506 udp_tunnel_get_rx_info(netdev);
9507
9508 return err;
9509 }
9510
9511 /**
9512 * ice_stop - Disables a network interface
9513 * @netdev: network interface device structure
9514 *
9515 * The stop entry point is called when an interface is de-activated by the OS,
9516 * and the netdevice enters the DOWN state. The hardware is still under the
9517 * driver's control, but the netdev interface is disabled.
9518 *
9519 * Returns success only - not allowed to fail
9520 */
ice_stop(struct net_device * netdev)9521 int ice_stop(struct net_device *netdev)
9522 {
9523 struct ice_netdev_priv *np = netdev_priv(netdev);
9524 struct ice_vsi *vsi = np->vsi;
9525 struct ice_pf *pf = vsi->back;
9526
9527 if (ice_is_reset_in_progress(pf->state)) {
9528 netdev_err(netdev, "can't stop net device while reset is in progress");
9529 return -EBUSY;
9530 }
9531
9532 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9533 int link_err = ice_force_phys_link_state(vsi, false);
9534
9535 if (link_err) {
9536 if (link_err == -ENOMEDIUM)
9537 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9538 vsi->vsi_num);
9539 else
9540 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9541 vsi->vsi_num, link_err);
9542
9543 ice_vsi_close(vsi);
9544 return -EIO;
9545 }
9546 }
9547
9548 ice_vsi_close(vsi);
9549
9550 return 0;
9551 }
9552
9553 /**
9554 * ice_features_check - Validate encapsulated packet conforms to limits
9555 * @skb: skb buffer
9556 * @netdev: This port's netdev
9557 * @features: Offload features that the stack believes apply
9558 */
9559 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)9560 ice_features_check(struct sk_buff *skb,
9561 struct net_device __always_unused *netdev,
9562 netdev_features_t features)
9563 {
9564 bool gso = skb_is_gso(skb);
9565 size_t len;
9566
9567 /* No point in doing any of this if neither checksum nor GSO are
9568 * being requested for this frame. We can rule out both by just
9569 * checking for CHECKSUM_PARTIAL
9570 */
9571 if (skb->ip_summed != CHECKSUM_PARTIAL)
9572 return features;
9573
9574 /* We cannot support GSO if the MSS is going to be less than
9575 * 64 bytes. If it is then we need to drop support for GSO.
9576 */
9577 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9578 features &= ~NETIF_F_GSO_MASK;
9579
9580 len = skb_network_offset(skb);
9581 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9582 goto out_rm_features;
9583
9584 len = skb_network_header_len(skb);
9585 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9586 goto out_rm_features;
9587
9588 if (skb->encapsulation) {
9589 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9590 * the case of IPIP frames, the transport header pointer is
9591 * after the inner header! So check to make sure that this
9592 * is a GRE or UDP_TUNNEL frame before doing that math.
9593 */
9594 if (gso && (skb_shinfo(skb)->gso_type &
9595 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9596 len = skb_inner_network_header(skb) -
9597 skb_transport_header(skb);
9598 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9599 goto out_rm_features;
9600 }
9601
9602 len = skb_inner_network_header_len(skb);
9603 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9604 goto out_rm_features;
9605 }
9606
9607 return features;
9608 out_rm_features:
9609 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9610 }
9611
9612 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9613 .ndo_open = ice_open,
9614 .ndo_stop = ice_stop,
9615 .ndo_start_xmit = ice_start_xmit,
9616 .ndo_set_mac_address = ice_set_mac_address,
9617 .ndo_validate_addr = eth_validate_addr,
9618 .ndo_change_mtu = ice_change_mtu,
9619 .ndo_get_stats64 = ice_get_stats64,
9620 .ndo_tx_timeout = ice_tx_timeout,
9621 .ndo_bpf = ice_xdp_safe_mode,
9622 };
9623
9624 static const struct net_device_ops ice_netdev_ops = {
9625 .ndo_open = ice_open,
9626 .ndo_stop = ice_stop,
9627 .ndo_start_xmit = ice_start_xmit,
9628 .ndo_select_queue = ice_select_queue,
9629 .ndo_features_check = ice_features_check,
9630 .ndo_fix_features = ice_fix_features,
9631 .ndo_set_rx_mode = ice_set_rx_mode,
9632 .ndo_set_mac_address = ice_set_mac_address,
9633 .ndo_validate_addr = eth_validate_addr,
9634 .ndo_change_mtu = ice_change_mtu,
9635 .ndo_get_stats64 = ice_get_stats64,
9636 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9637 .ndo_eth_ioctl = ice_eth_ioctl,
9638 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9639 .ndo_set_vf_mac = ice_set_vf_mac,
9640 .ndo_get_vf_config = ice_get_vf_cfg,
9641 .ndo_set_vf_trust = ice_set_vf_trust,
9642 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9643 .ndo_set_vf_link_state = ice_set_vf_link_state,
9644 .ndo_get_vf_stats = ice_get_vf_stats,
9645 .ndo_set_vf_rate = ice_set_vf_bw,
9646 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9647 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9648 .ndo_setup_tc = ice_setup_tc,
9649 .ndo_set_features = ice_set_features,
9650 .ndo_bridge_getlink = ice_bridge_getlink,
9651 .ndo_bridge_setlink = ice_bridge_setlink,
9652 .ndo_fdb_add = ice_fdb_add,
9653 .ndo_fdb_del = ice_fdb_del,
9654 #ifdef CONFIG_RFS_ACCEL
9655 .ndo_rx_flow_steer = ice_rx_flow_steer,
9656 #endif
9657 .ndo_tx_timeout = ice_tx_timeout,
9658 .ndo_bpf = ice_xdp,
9659 .ndo_xdp_xmit = ice_xdp_xmit,
9660 .ndo_xsk_wakeup = ice_xsk_wakeup,
9661 };
9662