1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_flow.h"
7 #include "ice_lib.h"
8 #include "ice_fltr.h"
9 #include "ice_dcb_lib.h"
10 #include "ice_vsi_vlan_ops.h"
11
12 /**
13 * ice_vsi_type_str - maps VSI type enum to string equivalents
14 * @vsi_type: VSI type enum
15 */
ice_vsi_type_str(enum ice_vsi_type vsi_type)16 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
17 {
18 switch (vsi_type) {
19 case ICE_VSI_PF:
20 return "ICE_VSI_PF";
21 case ICE_VSI_VF:
22 return "ICE_VSI_VF";
23 case ICE_VSI_CTRL:
24 return "ICE_VSI_CTRL";
25 case ICE_VSI_CHNL:
26 return "ICE_VSI_CHNL";
27 case ICE_VSI_LB:
28 return "ICE_VSI_LB";
29 default:
30 return "unknown";
31 }
32 }
33
34 /**
35 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
36 * @vsi: the VSI being configured
37 * @ena: start or stop the Rx rings
38 *
39 * First enable/disable all of the Rx rings, flush any remaining writes, and
40 * then verify that they have all been enabled/disabled successfully. This will
41 * let all of the register writes complete when enabling/disabling the Rx rings
42 * before waiting for the change in hardware to complete.
43 */
ice_vsi_ctrl_all_rx_rings(struct ice_vsi * vsi,bool ena)44 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
45 {
46 int ret = 0;
47 u16 i;
48
49 ice_for_each_rxq(vsi, i)
50 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
51
52 ice_flush(&vsi->back->hw);
53
54 ice_for_each_rxq(vsi, i) {
55 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
56 if (ret)
57 break;
58 }
59
60 return ret;
61 }
62
63 /**
64 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
65 * @vsi: VSI pointer
66 *
67 * On error: returns error code (negative)
68 * On success: returns 0
69 */
ice_vsi_alloc_arrays(struct ice_vsi * vsi)70 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
71 {
72 struct ice_pf *pf = vsi->back;
73 struct device *dev;
74
75 dev = ice_pf_to_dev(pf);
76 if (vsi->type == ICE_VSI_CHNL)
77 return 0;
78
79 /* allocate memory for both Tx and Rx ring pointers */
80 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
81 sizeof(*vsi->tx_rings), GFP_KERNEL);
82 if (!vsi->tx_rings)
83 return -ENOMEM;
84
85 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
86 sizeof(*vsi->rx_rings), GFP_KERNEL);
87 if (!vsi->rx_rings)
88 goto err_rings;
89
90 /* txq_map needs to have enough space to track both Tx (stack) rings
91 * and XDP rings; at this point vsi->num_xdp_txq might not be set,
92 * so use num_possible_cpus() as we want to always provide XDP ring
93 * per CPU, regardless of queue count settings from user that might
94 * have come from ethtool's set_channels() callback;
95 */
96 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
97 sizeof(*vsi->txq_map), GFP_KERNEL);
98
99 if (!vsi->txq_map)
100 goto err_txq_map;
101
102 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
103 sizeof(*vsi->rxq_map), GFP_KERNEL);
104 if (!vsi->rxq_map)
105 goto err_rxq_map;
106
107 /* There is no need to allocate q_vectors for a loopback VSI. */
108 if (vsi->type == ICE_VSI_LB)
109 return 0;
110
111 /* allocate memory for q_vector pointers */
112 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
113 sizeof(*vsi->q_vectors), GFP_KERNEL);
114 if (!vsi->q_vectors)
115 goto err_vectors;
116
117 return 0;
118
119 err_vectors:
120 devm_kfree(dev, vsi->rxq_map);
121 err_rxq_map:
122 devm_kfree(dev, vsi->txq_map);
123 err_txq_map:
124 devm_kfree(dev, vsi->rx_rings);
125 err_rings:
126 devm_kfree(dev, vsi->tx_rings);
127 return -ENOMEM;
128 }
129
130 /**
131 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
132 * @vsi: the VSI being configured
133 */
ice_vsi_set_num_desc(struct ice_vsi * vsi)134 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
135 {
136 switch (vsi->type) {
137 case ICE_VSI_PF:
138 case ICE_VSI_CTRL:
139 case ICE_VSI_LB:
140 /* a user could change the values of num_[tr]x_desc using
141 * ethtool -G so we should keep those values instead of
142 * overwriting them with the defaults.
143 */
144 if (!vsi->num_rx_desc)
145 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
146 if (!vsi->num_tx_desc)
147 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
148 break;
149 default:
150 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
151 vsi->type);
152 break;
153 }
154 }
155
156 /**
157 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
158 * @vsi: the VSI being configured
159 *
160 * Return 0 on success and a negative value on error
161 */
ice_vsi_set_num_qs(struct ice_vsi * vsi)162 static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
163 {
164 enum ice_vsi_type vsi_type = vsi->type;
165 struct ice_pf *pf = vsi->back;
166 struct ice_vf *vf = vsi->vf;
167
168 if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
169 return;
170
171 switch (vsi_type) {
172 case ICE_VSI_PF:
173 if (vsi->req_txq) {
174 vsi->alloc_txq = vsi->req_txq;
175 vsi->num_txq = vsi->req_txq;
176 } else {
177 vsi->alloc_txq = min3(pf->num_lan_msix,
178 ice_get_avail_txq_count(pf),
179 (u16)num_online_cpus());
180 }
181
182 pf->num_lan_tx = vsi->alloc_txq;
183
184 /* only 1 Rx queue unless RSS is enabled */
185 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
186 vsi->alloc_rxq = 1;
187 } else {
188 if (vsi->req_rxq) {
189 vsi->alloc_rxq = vsi->req_rxq;
190 vsi->num_rxq = vsi->req_rxq;
191 } else {
192 vsi->alloc_rxq = min3(pf->num_lan_msix,
193 ice_get_avail_rxq_count(pf),
194 (u16)num_online_cpus());
195 }
196 }
197
198 pf->num_lan_rx = vsi->alloc_rxq;
199
200 vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
201 max_t(int, vsi->alloc_rxq,
202 vsi->alloc_txq));
203 break;
204 case ICE_VSI_VF:
205 if (vf->num_req_qs)
206 vf->num_vf_qs = vf->num_req_qs;
207 vsi->alloc_txq = vf->num_vf_qs;
208 vsi->alloc_rxq = vf->num_vf_qs;
209 /* pf->vfs.num_msix_per includes (VF miscellaneous vector +
210 * data queue interrupts). Since vsi->num_q_vectors is number
211 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
212 * original vector count
213 */
214 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF;
215 break;
216 case ICE_VSI_CTRL:
217 vsi->alloc_txq = 1;
218 vsi->alloc_rxq = 1;
219 vsi->num_q_vectors = 1;
220 break;
221 case ICE_VSI_CHNL:
222 vsi->alloc_txq = 0;
223 vsi->alloc_rxq = 0;
224 break;
225 case ICE_VSI_LB:
226 vsi->alloc_txq = 1;
227 vsi->alloc_rxq = 1;
228 break;
229 default:
230 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
231 break;
232 }
233
234 ice_vsi_set_num_desc(vsi);
235 }
236
237 /**
238 * ice_get_free_slot - get the next non-NULL location index in array
239 * @array: array to search
240 * @size: size of the array
241 * @curr: last known occupied index to be used as a search hint
242 *
243 * void * is being used to keep the functionality generic. This lets us use this
244 * function on any array of pointers.
245 */
ice_get_free_slot(void * array,int size,int curr)246 static int ice_get_free_slot(void *array, int size, int curr)
247 {
248 int **tmp_array = (int **)array;
249 int next;
250
251 if (curr < (size - 1) && !tmp_array[curr + 1]) {
252 next = curr + 1;
253 } else {
254 int i = 0;
255
256 while ((i < size) && (tmp_array[i]))
257 i++;
258 if (i == size)
259 next = ICE_NO_VSI;
260 else
261 next = i;
262 }
263 return next;
264 }
265
266 /**
267 * ice_vsi_delete_from_hw - delete a VSI from the switch
268 * @vsi: pointer to VSI being removed
269 */
ice_vsi_delete_from_hw(struct ice_vsi * vsi)270 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
271 {
272 struct ice_pf *pf = vsi->back;
273 struct ice_vsi_ctx *ctxt;
274 int status;
275
276 ice_fltr_remove_all(vsi);
277 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
278 if (!ctxt)
279 return;
280
281 if (vsi->type == ICE_VSI_VF)
282 ctxt->vf_num = vsi->vf->vf_id;
283 ctxt->vsi_num = vsi->vsi_num;
284
285 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
286
287 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
288 if (status)
289 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
290 vsi->vsi_num, status);
291
292 kfree(ctxt);
293 }
294
295 /**
296 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
297 * @vsi: pointer to VSI being cleared
298 */
ice_vsi_free_arrays(struct ice_vsi * vsi)299 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
300 {
301 struct ice_pf *pf = vsi->back;
302 struct device *dev;
303
304 dev = ice_pf_to_dev(pf);
305
306 /* free the ring and vector containers */
307 devm_kfree(dev, vsi->q_vectors);
308 vsi->q_vectors = NULL;
309 devm_kfree(dev, vsi->tx_rings);
310 vsi->tx_rings = NULL;
311 devm_kfree(dev, vsi->rx_rings);
312 vsi->rx_rings = NULL;
313 devm_kfree(dev, vsi->txq_map);
314 vsi->txq_map = NULL;
315 devm_kfree(dev, vsi->rxq_map);
316 vsi->rxq_map = NULL;
317 }
318
319 /**
320 * ice_vsi_free_stats - Free the ring statistics structures
321 * @vsi: VSI pointer
322 */
ice_vsi_free_stats(struct ice_vsi * vsi)323 static void ice_vsi_free_stats(struct ice_vsi *vsi)
324 {
325 struct ice_vsi_stats *vsi_stat;
326 struct ice_pf *pf = vsi->back;
327 int i;
328
329 if (vsi->type == ICE_VSI_CHNL)
330 return;
331 if (!pf->vsi_stats)
332 return;
333
334 vsi_stat = pf->vsi_stats[vsi->idx];
335 if (!vsi_stat)
336 return;
337
338 ice_for_each_alloc_txq(vsi, i) {
339 if (vsi_stat->tx_ring_stats[i]) {
340 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
341 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
342 }
343 }
344
345 ice_for_each_alloc_rxq(vsi, i) {
346 if (vsi_stat->rx_ring_stats[i]) {
347 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
348 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
349 }
350 }
351
352 kfree(vsi_stat->tx_ring_stats);
353 kfree(vsi_stat->rx_ring_stats);
354 kfree(vsi_stat);
355 pf->vsi_stats[vsi->idx] = NULL;
356 }
357
358 /**
359 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
360 * @vsi: VSI which is having stats allocated
361 */
ice_vsi_alloc_ring_stats(struct ice_vsi * vsi)362 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
363 {
364 struct ice_ring_stats **tx_ring_stats;
365 struct ice_ring_stats **rx_ring_stats;
366 struct ice_vsi_stats *vsi_stats;
367 struct ice_pf *pf = vsi->back;
368 u16 i;
369
370 vsi_stats = pf->vsi_stats[vsi->idx];
371 tx_ring_stats = vsi_stats->tx_ring_stats;
372 rx_ring_stats = vsi_stats->rx_ring_stats;
373
374 /* Allocate Tx ring stats */
375 ice_for_each_alloc_txq(vsi, i) {
376 struct ice_ring_stats *ring_stats;
377 struct ice_tx_ring *ring;
378
379 ring = vsi->tx_rings[i];
380 ring_stats = tx_ring_stats[i];
381
382 if (!ring_stats) {
383 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
384 if (!ring_stats)
385 goto err_out;
386
387 WRITE_ONCE(tx_ring_stats[i], ring_stats);
388 }
389
390 ring->ring_stats = ring_stats;
391 }
392
393 /* Allocate Rx ring stats */
394 ice_for_each_alloc_rxq(vsi, i) {
395 struct ice_ring_stats *ring_stats;
396 struct ice_rx_ring *ring;
397
398 ring = vsi->rx_rings[i];
399 ring_stats = rx_ring_stats[i];
400
401 if (!ring_stats) {
402 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
403 if (!ring_stats)
404 goto err_out;
405
406 WRITE_ONCE(rx_ring_stats[i], ring_stats);
407 }
408
409 ring->ring_stats = ring_stats;
410 }
411
412 return 0;
413
414 err_out:
415 ice_vsi_free_stats(vsi);
416 return -ENOMEM;
417 }
418
419 /**
420 * ice_vsi_free - clean up and deallocate the provided VSI
421 * @vsi: pointer to VSI being cleared
422 *
423 * This deallocates the VSI's queue resources, removes it from the PF's
424 * VSI array if necessary, and deallocates the VSI
425 */
ice_vsi_free(struct ice_vsi * vsi)426 static void ice_vsi_free(struct ice_vsi *vsi)
427 {
428 struct ice_pf *pf = NULL;
429 struct device *dev;
430
431 if (!vsi || !vsi->back)
432 return;
433
434 pf = vsi->back;
435 dev = ice_pf_to_dev(pf);
436
437 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
438 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
439 return;
440 }
441
442 mutex_lock(&pf->sw_mutex);
443 /* updates the PF for this cleared VSI */
444
445 pf->vsi[vsi->idx] = NULL;
446 pf->next_vsi = vsi->idx;
447
448 ice_vsi_free_stats(vsi);
449 ice_vsi_free_arrays(vsi);
450 mutex_unlock(&pf->sw_mutex);
451 devm_kfree(dev, vsi);
452 }
453
ice_vsi_delete(struct ice_vsi * vsi)454 void ice_vsi_delete(struct ice_vsi *vsi)
455 {
456 ice_vsi_delete_from_hw(vsi);
457 ice_vsi_free(vsi);
458 }
459
460 /**
461 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
462 * @irq: interrupt number
463 * @data: pointer to a q_vector
464 */
ice_msix_clean_ctrl_vsi(int __always_unused irq,void * data)465 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
466 {
467 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
468
469 if (!q_vector->tx.tx_ring)
470 return IRQ_HANDLED;
471
472 #define FDIR_RX_DESC_CLEAN_BUDGET 64
473 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
474 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
475
476 return IRQ_HANDLED;
477 }
478
479 /**
480 * ice_msix_clean_rings - MSIX mode Interrupt Handler
481 * @irq: interrupt number
482 * @data: pointer to a q_vector
483 */
ice_msix_clean_rings(int __always_unused irq,void * data)484 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
485 {
486 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
487
488 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
489 return IRQ_HANDLED;
490
491 q_vector->total_events++;
492
493 napi_schedule(&q_vector->napi);
494
495 return IRQ_HANDLED;
496 }
497
498 /**
499 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
500 * @vsi: VSI pointer
501 */
ice_vsi_alloc_stat_arrays(struct ice_vsi * vsi)502 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
503 {
504 struct ice_vsi_stats *vsi_stat;
505 struct ice_pf *pf = vsi->back;
506
507 if (vsi->type == ICE_VSI_CHNL)
508 return 0;
509 if (!pf->vsi_stats)
510 return -ENOENT;
511
512 if (pf->vsi_stats[vsi->idx])
513 /* realloc will happen in rebuild path */
514 return 0;
515
516 vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
517 if (!vsi_stat)
518 return -ENOMEM;
519
520 vsi_stat->tx_ring_stats =
521 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
522 GFP_KERNEL);
523 if (!vsi_stat->tx_ring_stats)
524 goto err_alloc_tx;
525
526 vsi_stat->rx_ring_stats =
527 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
528 GFP_KERNEL);
529 if (!vsi_stat->rx_ring_stats)
530 goto err_alloc_rx;
531
532 pf->vsi_stats[vsi->idx] = vsi_stat;
533
534 return 0;
535
536 err_alloc_rx:
537 kfree(vsi_stat->rx_ring_stats);
538 err_alloc_tx:
539 kfree(vsi_stat->tx_ring_stats);
540 kfree(vsi_stat);
541 pf->vsi_stats[vsi->idx] = NULL;
542 return -ENOMEM;
543 }
544
545 /**
546 * ice_vsi_alloc_def - set default values for already allocated VSI
547 * @vsi: ptr to VSI
548 * @ch: ptr to channel
549 */
550 static int
ice_vsi_alloc_def(struct ice_vsi * vsi,struct ice_channel * ch)551 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
552 {
553 if (vsi->type != ICE_VSI_CHNL) {
554 ice_vsi_set_num_qs(vsi);
555 if (ice_vsi_alloc_arrays(vsi))
556 return -ENOMEM;
557 }
558
559 switch (vsi->type) {
560 case ICE_VSI_PF:
561 /* Setup default MSIX irq handler for VSI */
562 vsi->irq_handler = ice_msix_clean_rings;
563 break;
564 case ICE_VSI_CTRL:
565 /* Setup ctrl VSI MSIX irq handler */
566 vsi->irq_handler = ice_msix_clean_ctrl_vsi;
567 break;
568 case ICE_VSI_CHNL:
569 if (!ch)
570 return -EINVAL;
571
572 vsi->num_rxq = ch->num_rxq;
573 vsi->num_txq = ch->num_txq;
574 vsi->next_base_q = ch->base_q;
575 break;
576 case ICE_VSI_VF:
577 case ICE_VSI_LB:
578 break;
579 default:
580 ice_vsi_free_arrays(vsi);
581 return -EINVAL;
582 }
583
584 return 0;
585 }
586
587 /**
588 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
589 * @pf: board private structure
590 *
591 * Reserves a VSI index from the PF and allocates an empty VSI structure
592 * without a type. The VSI structure must later be initialized by calling
593 * ice_vsi_cfg().
594 *
595 * returns a pointer to a VSI on success, NULL on failure.
596 */
ice_vsi_alloc(struct ice_pf * pf)597 static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
598 {
599 struct device *dev = ice_pf_to_dev(pf);
600 struct ice_vsi *vsi = NULL;
601
602 /* Need to protect the allocation of the VSIs at the PF level */
603 mutex_lock(&pf->sw_mutex);
604
605 /* If we have already allocated our maximum number of VSIs,
606 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
607 * is available to be populated
608 */
609 if (pf->next_vsi == ICE_NO_VSI) {
610 dev_dbg(dev, "out of VSI slots!\n");
611 goto unlock_pf;
612 }
613
614 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
615 if (!vsi)
616 goto unlock_pf;
617
618 vsi->back = pf;
619 set_bit(ICE_VSI_DOWN, vsi->state);
620
621 /* fill slot and make note of the index */
622 vsi->idx = pf->next_vsi;
623 pf->vsi[pf->next_vsi] = vsi;
624
625 /* prepare pf->next_vsi for next use */
626 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
627 pf->next_vsi);
628
629 unlock_pf:
630 mutex_unlock(&pf->sw_mutex);
631 return vsi;
632 }
633
634 /**
635 * ice_alloc_fd_res - Allocate FD resource for a VSI
636 * @vsi: pointer to the ice_vsi
637 *
638 * This allocates the FD resources
639 *
640 * Returns 0 on success, -EPERM on no-op or -EIO on failure
641 */
ice_alloc_fd_res(struct ice_vsi * vsi)642 static int ice_alloc_fd_res(struct ice_vsi *vsi)
643 {
644 struct ice_pf *pf = vsi->back;
645 u32 g_val, b_val;
646
647 /* Flow Director filters are only allocated/assigned to the PF VSI or
648 * CHNL VSI which passes the traffic. The CTRL VSI is only used to
649 * add/delete filters so resources are not allocated to it
650 */
651 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
652 return -EPERM;
653
654 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
655 vsi->type == ICE_VSI_CHNL))
656 return -EPERM;
657
658 /* FD filters from guaranteed pool per VSI */
659 g_val = pf->hw.func_caps.fd_fltr_guar;
660 if (!g_val)
661 return -EPERM;
662
663 /* FD filters from best effort pool */
664 b_val = pf->hw.func_caps.fd_fltr_best_effort;
665 if (!b_val)
666 return -EPERM;
667
668 /* PF main VSI gets only 64 FD resources from guaranteed pool
669 * when ADQ is configured.
670 */
671 #define ICE_PF_VSI_GFLTR 64
672
673 /* determine FD filter resources per VSI from shared(best effort) and
674 * dedicated pool
675 */
676 if (vsi->type == ICE_VSI_PF) {
677 vsi->num_gfltr = g_val;
678 /* if MQPRIO is configured, main VSI doesn't get all FD
679 * resources from guaranteed pool. PF VSI gets 64 FD resources
680 */
681 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
682 if (g_val < ICE_PF_VSI_GFLTR)
683 return -EPERM;
684 /* allow bare minimum entries for PF VSI */
685 vsi->num_gfltr = ICE_PF_VSI_GFLTR;
686 }
687
688 /* each VSI gets same "best_effort" quota */
689 vsi->num_bfltr = b_val;
690 } else if (vsi->type == ICE_VSI_VF) {
691 vsi->num_gfltr = 0;
692
693 /* each VSI gets same "best_effort" quota */
694 vsi->num_bfltr = b_val;
695 } else {
696 struct ice_vsi *main_vsi;
697 int numtc;
698
699 main_vsi = ice_get_main_vsi(pf);
700 if (!main_vsi)
701 return -EPERM;
702
703 if (!main_vsi->all_numtc)
704 return -EINVAL;
705
706 /* figure out ADQ numtc */
707 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
708
709 /* only one TC but still asking resources for channels,
710 * invalid config
711 */
712 if (numtc < ICE_CHNL_START_TC)
713 return -EPERM;
714
715 g_val -= ICE_PF_VSI_GFLTR;
716 /* channel VSIs gets equal share from guaranteed pool */
717 vsi->num_gfltr = g_val / numtc;
718
719 /* each VSI gets same "best_effort" quota */
720 vsi->num_bfltr = b_val;
721 }
722
723 return 0;
724 }
725
726 /**
727 * ice_vsi_get_qs - Assign queues from PF to VSI
728 * @vsi: the VSI to assign queues to
729 *
730 * Returns 0 on success and a negative value on error
731 */
ice_vsi_get_qs(struct ice_vsi * vsi)732 static int ice_vsi_get_qs(struct ice_vsi *vsi)
733 {
734 struct ice_pf *pf = vsi->back;
735 struct ice_qs_cfg tx_qs_cfg = {
736 .qs_mutex = &pf->avail_q_mutex,
737 .pf_map = pf->avail_txqs,
738 .pf_map_size = pf->max_pf_txqs,
739 .q_count = vsi->alloc_txq,
740 .scatter_count = ICE_MAX_SCATTER_TXQS,
741 .vsi_map = vsi->txq_map,
742 .vsi_map_offset = 0,
743 .mapping_mode = ICE_VSI_MAP_CONTIG
744 };
745 struct ice_qs_cfg rx_qs_cfg = {
746 .qs_mutex = &pf->avail_q_mutex,
747 .pf_map = pf->avail_rxqs,
748 .pf_map_size = pf->max_pf_rxqs,
749 .q_count = vsi->alloc_rxq,
750 .scatter_count = ICE_MAX_SCATTER_RXQS,
751 .vsi_map = vsi->rxq_map,
752 .vsi_map_offset = 0,
753 .mapping_mode = ICE_VSI_MAP_CONTIG
754 };
755 int ret;
756
757 if (vsi->type == ICE_VSI_CHNL)
758 return 0;
759
760 ret = __ice_vsi_get_qs(&tx_qs_cfg);
761 if (ret)
762 return ret;
763 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
764
765 ret = __ice_vsi_get_qs(&rx_qs_cfg);
766 if (ret)
767 return ret;
768 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
769
770 return 0;
771 }
772
773 /**
774 * ice_vsi_put_qs - Release queues from VSI to PF
775 * @vsi: the VSI that is going to release queues
776 */
ice_vsi_put_qs(struct ice_vsi * vsi)777 static void ice_vsi_put_qs(struct ice_vsi *vsi)
778 {
779 struct ice_pf *pf = vsi->back;
780 int i;
781
782 mutex_lock(&pf->avail_q_mutex);
783
784 ice_for_each_alloc_txq(vsi, i) {
785 clear_bit(vsi->txq_map[i], pf->avail_txqs);
786 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
787 }
788
789 ice_for_each_alloc_rxq(vsi, i) {
790 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
791 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
792 }
793
794 mutex_unlock(&pf->avail_q_mutex);
795 }
796
797 /**
798 * ice_is_safe_mode
799 * @pf: pointer to the PF struct
800 *
801 * returns true if driver is in safe mode, false otherwise
802 */
ice_is_safe_mode(struct ice_pf * pf)803 bool ice_is_safe_mode(struct ice_pf *pf)
804 {
805 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
806 }
807
808 /**
809 * ice_is_rdma_ena
810 * @pf: pointer to the PF struct
811 *
812 * returns true if RDMA is currently supported, false otherwise
813 */
ice_is_rdma_ena(struct ice_pf * pf)814 bool ice_is_rdma_ena(struct ice_pf *pf)
815 {
816 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
817 }
818
819 /**
820 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
821 * @vsi: the VSI being cleaned up
822 *
823 * This function deletes RSS input set for all flows that were configured
824 * for this VSI
825 */
ice_vsi_clean_rss_flow_fld(struct ice_vsi * vsi)826 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
827 {
828 struct ice_pf *pf = vsi->back;
829 int status;
830
831 if (ice_is_safe_mode(pf))
832 return;
833
834 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
835 if (status)
836 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
837 vsi->vsi_num, status);
838 }
839
840 /**
841 * ice_rss_clean - Delete RSS related VSI structures and configuration
842 * @vsi: the VSI being removed
843 */
ice_rss_clean(struct ice_vsi * vsi)844 static void ice_rss_clean(struct ice_vsi *vsi)
845 {
846 struct ice_pf *pf = vsi->back;
847 struct device *dev;
848
849 dev = ice_pf_to_dev(pf);
850
851 devm_kfree(dev, vsi->rss_hkey_user);
852 devm_kfree(dev, vsi->rss_lut_user);
853
854 ice_vsi_clean_rss_flow_fld(vsi);
855 /* remove RSS replay list */
856 if (!ice_is_safe_mode(pf))
857 ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
858 }
859
860 /**
861 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
862 * @vsi: the VSI being configured
863 */
ice_vsi_set_rss_params(struct ice_vsi * vsi)864 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
865 {
866 struct ice_hw_common_caps *cap;
867 struct ice_pf *pf = vsi->back;
868 u16 max_rss_size;
869
870 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
871 vsi->rss_size = 1;
872 return;
873 }
874
875 cap = &pf->hw.func_caps.common_cap;
876 max_rss_size = BIT(cap->rss_table_entry_width);
877 switch (vsi->type) {
878 case ICE_VSI_CHNL:
879 case ICE_VSI_PF:
880 /* PF VSI will inherit RSS instance of PF */
881 vsi->rss_table_size = (u16)cap->rss_table_size;
882 if (vsi->type == ICE_VSI_CHNL)
883 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
884 else
885 vsi->rss_size = min_t(u16, num_online_cpus(),
886 max_rss_size);
887 vsi->rss_lut_type = ICE_LUT_PF;
888 break;
889 case ICE_VSI_VF:
890 /* VF VSI will get a small RSS table.
891 * For VSI_LUT, LUT size should be set to 64 bytes.
892 */
893 vsi->rss_table_size = ICE_LUT_VSI_SIZE;
894 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
895 vsi->rss_lut_type = ICE_LUT_VSI;
896 break;
897 case ICE_VSI_LB:
898 break;
899 default:
900 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
901 ice_vsi_type_str(vsi->type));
902 break;
903 }
904 }
905
906 /**
907 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
908 * @hw: HW structure used to determine the VLAN mode of the device
909 * @ctxt: the VSI context being set
910 *
911 * This initializes a default VSI context for all sections except the Queues.
912 */
ice_set_dflt_vsi_ctx(struct ice_hw * hw,struct ice_vsi_ctx * ctxt)913 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
914 {
915 u32 table = 0;
916
917 memset(&ctxt->info, 0, sizeof(ctxt->info));
918 /* VSI's should be allocated from shared pool */
919 ctxt->alloc_from_pool = true;
920 /* Src pruning enabled by default */
921 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
922 /* Traffic from VSI can be sent to LAN */
923 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
924 /* allow all untagged/tagged packets by default on Tx */
925 ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
926 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
927 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
928 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
929 *
930 * DVM - leave inner VLAN in packet by default
931 */
932 if (ice_is_dvm_ena(hw)) {
933 ctxt->info.inner_vlan_flags |=
934 FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
935 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
936 ctxt->info.outer_vlan_flags =
937 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
938 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
939 ctxt->info.outer_vlan_flags |=
940 FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
941 ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
942 ctxt->info.outer_vlan_flags |=
943 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
944 ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
945 }
946 /* Have 1:1 UP mapping for both ingress/egress tables */
947 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
948 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
949 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
950 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
951 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
952 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
953 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
954 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
955 ctxt->info.ingress_table = cpu_to_le32(table);
956 ctxt->info.egress_table = cpu_to_le32(table);
957 /* Have 1:1 UP mapping for outer to inner UP table */
958 ctxt->info.outer_up_table = cpu_to_le32(table);
959 /* No Outer tag support outer_tag_flags remains to zero */
960 }
961
962 /**
963 * ice_vsi_setup_q_map - Setup a VSI queue map
964 * @vsi: the VSI being configured
965 * @ctxt: VSI context structure
966 */
ice_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt)967 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
968 {
969 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
970 u16 num_txq_per_tc, num_rxq_per_tc;
971 u16 qcount_tx = vsi->alloc_txq;
972 u16 qcount_rx = vsi->alloc_rxq;
973 u8 netdev_tc = 0;
974 int i;
975
976 if (!vsi->tc_cfg.numtc) {
977 /* at least TC0 should be enabled by default */
978 vsi->tc_cfg.numtc = 1;
979 vsi->tc_cfg.ena_tc = 1;
980 }
981
982 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
983 if (!num_rxq_per_tc)
984 num_rxq_per_tc = 1;
985 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
986 if (!num_txq_per_tc)
987 num_txq_per_tc = 1;
988
989 /* find the (rounded up) power-of-2 of qcount */
990 pow = (u16)order_base_2(num_rxq_per_tc);
991
992 /* TC mapping is a function of the number of Rx queues assigned to the
993 * VSI for each traffic class and the offset of these queues.
994 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
995 * queues allocated to TC0. No:of queues is a power-of-2.
996 *
997 * If TC is not enabled, the queue offset is set to 0, and allocate one
998 * queue, this way, traffic for the given TC will be sent to the default
999 * queue.
1000 *
1001 * Setup number and offset of Rx queues for all TCs for the VSI
1002 */
1003 ice_for_each_traffic_class(i) {
1004 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
1005 /* TC is not enabled */
1006 vsi->tc_cfg.tc_info[i].qoffset = 0;
1007 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
1008 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
1009 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
1010 ctxt->info.tc_mapping[i] = 0;
1011 continue;
1012 }
1013
1014 /* TC is enabled */
1015 vsi->tc_cfg.tc_info[i].qoffset = offset;
1016 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
1017 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
1018 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
1019
1020 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1021 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
1022 offset += num_rxq_per_tc;
1023 tx_count += num_txq_per_tc;
1024 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1025 }
1026
1027 /* if offset is non-zero, means it is calculated correctly based on
1028 * enabled TCs for a given VSI otherwise qcount_rx will always
1029 * be correct and non-zero because it is based off - VSI's
1030 * allocated Rx queues which is at least 1 (hence qcount_tx will be
1031 * at least 1)
1032 */
1033 if (offset)
1034 rx_count = offset;
1035 else
1036 rx_count = num_rxq_per_tc;
1037
1038 if (rx_count > vsi->alloc_rxq) {
1039 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
1040 rx_count, vsi->alloc_rxq);
1041 return -EINVAL;
1042 }
1043
1044 if (tx_count > vsi->alloc_txq) {
1045 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
1046 tx_count, vsi->alloc_txq);
1047 return -EINVAL;
1048 }
1049
1050 vsi->num_txq = tx_count;
1051 vsi->num_rxq = rx_count;
1052
1053 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
1054 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
1055 /* since there is a chance that num_rxq could have been changed
1056 * in the above for loop, make num_txq equal to num_rxq.
1057 */
1058 vsi->num_txq = vsi->num_rxq;
1059 }
1060
1061 /* Rx queue mapping */
1062 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1063 /* q_mapping buffer holds the info for the first queue allocated for
1064 * this VSI in the PF space and also the number of queues associated
1065 * with this VSI.
1066 */
1067 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1068 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1069
1070 return 0;
1071 }
1072
1073 /**
1074 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1075 * @ctxt: the VSI context being set
1076 * @vsi: the VSI being configured
1077 */
ice_set_fd_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi)1078 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1079 {
1080 u8 dflt_q_group, dflt_q_prio;
1081 u16 dflt_q, report_q, val;
1082
1083 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
1084 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1085 return;
1086
1087 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1088 ctxt->info.valid_sections |= cpu_to_le16(val);
1089 dflt_q = 0;
1090 dflt_q_group = 0;
1091 report_q = 0;
1092 dflt_q_prio = 0;
1093
1094 /* enable flow director filtering/programming */
1095 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1096 ctxt->info.fd_options = cpu_to_le16(val);
1097 /* max of allocated flow director filters */
1098 ctxt->info.max_fd_fltr_dedicated =
1099 cpu_to_le16(vsi->num_gfltr);
1100 /* max of shared flow director filters any VSI may program */
1101 ctxt->info.max_fd_fltr_shared =
1102 cpu_to_le16(vsi->num_bfltr);
1103 /* default queue index within the VSI of the default FD */
1104 val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q);
1105 /* target queue or queue group to the FD filter */
1106 val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group);
1107 ctxt->info.fd_def_q = cpu_to_le16(val);
1108 /* queue index on which FD filter completion is reported */
1109 val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q);
1110 /* priority of the default qindex action */
1111 val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio);
1112 ctxt->info.fd_report_opt = cpu_to_le16(val);
1113 }
1114
1115 /**
1116 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1117 * @ctxt: the VSI context being set
1118 * @vsi: the VSI being configured
1119 */
ice_set_rss_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi)1120 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1121 {
1122 u8 lut_type, hash_type;
1123 struct device *dev;
1124 struct ice_pf *pf;
1125
1126 pf = vsi->back;
1127 dev = ice_pf_to_dev(pf);
1128
1129 switch (vsi->type) {
1130 case ICE_VSI_CHNL:
1131 case ICE_VSI_PF:
1132 /* PF VSI will inherit RSS instance of PF */
1133 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1134 break;
1135 case ICE_VSI_VF:
1136 /* VF VSI will gets a small RSS table which is a VSI LUT type */
1137 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
1138 break;
1139 default:
1140 dev_dbg(dev, "Unsupported VSI type %s\n",
1141 ice_vsi_type_str(vsi->type));
1142 return;
1143 }
1144
1145 hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1146 vsi->rss_hfunc = hash_type;
1147
1148 ctxt->info.q_opt_rss =
1149 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
1150 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
1151 }
1152
1153 static void
ice_chnl_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt)1154 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1155 {
1156 struct ice_pf *pf = vsi->back;
1157 u16 qcount, qmap;
1158 u8 offset = 0;
1159 int pow;
1160
1161 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
1162
1163 pow = order_base_2(qcount);
1164 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1165 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
1166
1167 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1168 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1169 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
1170 ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
1171 }
1172
1173 /**
1174 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1175 * @vsi: VSI to check whether or not VLAN pruning is enabled.
1176 *
1177 * returns true if Rx VLAN pruning is enabled and false otherwise.
1178 */
ice_vsi_is_vlan_pruning_ena(struct ice_vsi * vsi)1179 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
1180 {
1181 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1182 }
1183
1184 /**
1185 * ice_vsi_init - Create and initialize a VSI
1186 * @vsi: the VSI being configured
1187 * @vsi_flags: VSI configuration flags
1188 *
1189 * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
1190 * reconfigure an existing context.
1191 *
1192 * This initializes a VSI context depending on the VSI type to be added and
1193 * passes it down to the add_vsi aq command to create a new VSI.
1194 */
ice_vsi_init(struct ice_vsi * vsi,u32 vsi_flags)1195 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
1196 {
1197 struct ice_pf *pf = vsi->back;
1198 struct ice_hw *hw = &pf->hw;
1199 struct ice_vsi_ctx *ctxt;
1200 struct device *dev;
1201 int ret = 0;
1202
1203 dev = ice_pf_to_dev(pf);
1204 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1205 if (!ctxt)
1206 return -ENOMEM;
1207
1208 switch (vsi->type) {
1209 case ICE_VSI_CTRL:
1210 case ICE_VSI_LB:
1211 case ICE_VSI_PF:
1212 ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1213 break;
1214 case ICE_VSI_CHNL:
1215 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
1216 break;
1217 case ICE_VSI_VF:
1218 ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1219 /* VF number here is the absolute VF number (0-255) */
1220 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
1221 break;
1222 default:
1223 ret = -ENODEV;
1224 goto out;
1225 }
1226
1227 /* Handle VLAN pruning for channel VSI if main VSI has VLAN
1228 * prune enabled
1229 */
1230 if (vsi->type == ICE_VSI_CHNL) {
1231 struct ice_vsi *main_vsi;
1232
1233 main_vsi = ice_get_main_vsi(pf);
1234 if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
1235 ctxt->info.sw_flags2 |=
1236 ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1237 else
1238 ctxt->info.sw_flags2 &=
1239 ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1240 }
1241
1242 ice_set_dflt_vsi_ctx(hw, ctxt);
1243 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
1244 ice_set_fd_vsi_ctx(ctxt, vsi);
1245 /* if the switch is in VEB mode, allow VSI loopback */
1246 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1247 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1248
1249 /* Set LUT type and HASH type if RSS is enabled */
1250 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
1251 vsi->type != ICE_VSI_CTRL) {
1252 ice_set_rss_vsi_ctx(ctxt, vsi);
1253 /* if updating VSI context, make sure to set valid_section:
1254 * to indicate which section of VSI context being updated
1255 */
1256 if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1257 ctxt->info.valid_sections |=
1258 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1259 }
1260
1261 ctxt->info.sw_id = vsi->port_info->sw_id;
1262 if (vsi->type == ICE_VSI_CHNL) {
1263 ice_chnl_vsi_setup_q_map(vsi, ctxt);
1264 } else {
1265 ret = ice_vsi_setup_q_map(vsi, ctxt);
1266 if (ret)
1267 goto out;
1268
1269 if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1270 /* means VSI being updated */
1271 /* must to indicate which section of VSI context are
1272 * being modified
1273 */
1274 ctxt->info.valid_sections |=
1275 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
1276 }
1277
1278 /* Allow control frames out of main VSI */
1279 if (vsi->type == ICE_VSI_PF) {
1280 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1281 ctxt->info.valid_sections |=
1282 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1283 }
1284
1285 if (vsi_flags & ICE_VSI_FLAG_INIT) {
1286 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1287 if (ret) {
1288 dev_err(dev, "Add VSI failed, err %d\n", ret);
1289 ret = -EIO;
1290 goto out;
1291 }
1292 } else {
1293 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1294 if (ret) {
1295 dev_err(dev, "Update VSI failed, err %d\n", ret);
1296 ret = -EIO;
1297 goto out;
1298 }
1299 }
1300
1301 /* keep context for update VSI operations */
1302 vsi->info = ctxt->info;
1303
1304 /* record VSI number returned */
1305 vsi->vsi_num = ctxt->vsi_num;
1306
1307 out:
1308 kfree(ctxt);
1309 return ret;
1310 }
1311
1312 /**
1313 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1314 * @vsi: the VSI having rings deallocated
1315 */
ice_vsi_clear_rings(struct ice_vsi * vsi)1316 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1317 {
1318 int i;
1319
1320 /* Avoid stale references by clearing map from vector to ring */
1321 if (vsi->q_vectors) {
1322 ice_for_each_q_vector(vsi, i) {
1323 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1324
1325 if (q_vector) {
1326 q_vector->tx.tx_ring = NULL;
1327 q_vector->rx.rx_ring = NULL;
1328 }
1329 }
1330 }
1331
1332 if (vsi->tx_rings) {
1333 ice_for_each_alloc_txq(vsi, i) {
1334 if (vsi->tx_rings[i]) {
1335 kfree_rcu(vsi->tx_rings[i], rcu);
1336 WRITE_ONCE(vsi->tx_rings[i], NULL);
1337 }
1338 }
1339 }
1340 if (vsi->rx_rings) {
1341 ice_for_each_alloc_rxq(vsi, i) {
1342 if (vsi->rx_rings[i]) {
1343 kfree_rcu(vsi->rx_rings[i], rcu);
1344 WRITE_ONCE(vsi->rx_rings[i], NULL);
1345 }
1346 }
1347 }
1348 }
1349
1350 /**
1351 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1352 * @vsi: VSI which is having rings allocated
1353 */
ice_vsi_alloc_rings(struct ice_vsi * vsi)1354 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1355 {
1356 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
1357 struct ice_pf *pf = vsi->back;
1358 struct device *dev;
1359 u16 i;
1360
1361 dev = ice_pf_to_dev(pf);
1362 /* Allocate Tx rings */
1363 ice_for_each_alloc_txq(vsi, i) {
1364 struct ice_tx_ring *ring;
1365
1366 /* allocate with kzalloc(), free with kfree_rcu() */
1367 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1368
1369 if (!ring)
1370 goto err_out;
1371
1372 ring->q_index = i;
1373 ring->reg_idx = vsi->txq_map[i];
1374 ring->vsi = vsi;
1375 ring->tx_tstamps = &pf->ptp.port.tx;
1376 ring->dev = dev;
1377 ring->count = vsi->num_tx_desc;
1378 ring->txq_teid = ICE_INVAL_TEID;
1379 if (dvm_ena)
1380 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
1381 else
1382 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
1383 WRITE_ONCE(vsi->tx_rings[i], ring);
1384 }
1385
1386 /* Allocate Rx rings */
1387 ice_for_each_alloc_rxq(vsi, i) {
1388 struct ice_rx_ring *ring;
1389
1390 /* allocate with kzalloc(), free with kfree_rcu() */
1391 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1392 if (!ring)
1393 goto err_out;
1394
1395 ring->q_index = i;
1396 ring->reg_idx = vsi->rxq_map[i];
1397 ring->vsi = vsi;
1398 ring->netdev = vsi->netdev;
1399 ring->dev = dev;
1400 ring->count = vsi->num_rx_desc;
1401 ring->cached_phctime = pf->ptp.cached_phc_time;
1402 WRITE_ONCE(vsi->rx_rings[i], ring);
1403 }
1404
1405 return 0;
1406
1407 err_out:
1408 ice_vsi_clear_rings(vsi);
1409 return -ENOMEM;
1410 }
1411
1412 /**
1413 * ice_vsi_manage_rss_lut - disable/enable RSS
1414 * @vsi: the VSI being changed
1415 * @ena: boolean value indicating if this is an enable or disable request
1416 *
1417 * In the event of disable request for RSS, this function will zero out RSS
1418 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1419 * LUT.
1420 */
ice_vsi_manage_rss_lut(struct ice_vsi * vsi,bool ena)1421 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1422 {
1423 u8 *lut;
1424
1425 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1426 if (!lut)
1427 return;
1428
1429 if (ena) {
1430 if (vsi->rss_lut_user)
1431 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1432 else
1433 ice_fill_rss_lut(lut, vsi->rss_table_size,
1434 vsi->rss_size);
1435 }
1436
1437 ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1438 kfree(lut);
1439 }
1440
1441 /**
1442 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1443 * @vsi: VSI to be configured
1444 * @disable: set to true to have FCS / CRC in the frame data
1445 */
ice_vsi_cfg_crc_strip(struct ice_vsi * vsi,bool disable)1446 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
1447 {
1448 int i;
1449
1450 ice_for_each_rxq(vsi, i)
1451 if (disable)
1452 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1453 else
1454 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1455 }
1456
1457 /**
1458 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1459 * @vsi: VSI to be configured
1460 */
ice_vsi_cfg_rss_lut_key(struct ice_vsi * vsi)1461 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1462 {
1463 struct ice_pf *pf = vsi->back;
1464 struct device *dev;
1465 u8 *lut, *key;
1466 int err;
1467
1468 dev = ice_pf_to_dev(pf);
1469 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
1470 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
1471 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
1472 } else {
1473 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1474
1475 /* If orig_rss_size is valid and it is less than determined
1476 * main VSI's rss_size, update main VSI's rss_size to be
1477 * orig_rss_size so that when tc-qdisc is deleted, main VSI
1478 * RSS table gets programmed to be correct (whatever it was
1479 * to begin with (prior to setup-tc for ADQ config)
1480 */
1481 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
1482 vsi->orig_rss_size <= vsi->num_rxq) {
1483 vsi->rss_size = vsi->orig_rss_size;
1484 /* now orig_rss_size is used, reset it to zero */
1485 vsi->orig_rss_size = 0;
1486 }
1487 }
1488
1489 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1490 if (!lut)
1491 return -ENOMEM;
1492
1493 if (vsi->rss_lut_user)
1494 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1495 else
1496 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1497
1498 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1499 if (err) {
1500 dev_err(dev, "set_rss_lut failed, error %d\n", err);
1501 goto ice_vsi_cfg_rss_exit;
1502 }
1503
1504 key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
1505 if (!key) {
1506 err = -ENOMEM;
1507 goto ice_vsi_cfg_rss_exit;
1508 }
1509
1510 if (vsi->rss_hkey_user)
1511 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1512 else
1513 netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1514
1515 err = ice_set_rss_key(vsi, key);
1516 if (err)
1517 dev_err(dev, "set_rss_key failed, error %d\n", err);
1518
1519 kfree(key);
1520 ice_vsi_cfg_rss_exit:
1521 kfree(lut);
1522 return err;
1523 }
1524
1525 /**
1526 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1527 * @vsi: VSI to be configured
1528 *
1529 * This function will only be called during the VF VSI setup. Upon successful
1530 * completion of package download, this function will configure default RSS
1531 * input sets for VF VSI.
1532 */
ice_vsi_set_vf_rss_flow_fld(struct ice_vsi * vsi)1533 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
1534 {
1535 struct ice_pf *pf = vsi->back;
1536 struct device *dev;
1537 int status;
1538
1539 dev = ice_pf_to_dev(pf);
1540 if (ice_is_safe_mode(pf)) {
1541 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1542 vsi->vsi_num);
1543 return;
1544 }
1545
1546 status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
1547 if (status)
1548 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
1549 vsi->vsi_num, status);
1550 }
1551
1552 static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
1553 /* configure RSS for IPv4 with input set IP src/dst */
1554 {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
1555 /* configure RSS for IPv6 with input set IPv6 src/dst */
1556 {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
1557 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1558 {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
1559 ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false},
1560 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1561 {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
1562 ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false},
1563 /* configure RSS for sctp4 with input set IP src/dst - only support
1564 * RSS on SCTPv4 on outer headers (non-tunneled)
1565 */
1566 {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
1567 ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
1568 /* configure RSS for gtpc4 with input set IPv4 src/dst */
1569 {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4,
1570 ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false},
1571 /* configure RSS for gtpc4t with input set IPv4 src/dst */
1572 {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4,
1573 ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
1574 /* configure RSS for gtpu4 with input set IPv4 src/dst */
1575 {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4,
1576 ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
1577 /* configure RSS for gtpu4e with input set IPv4 src/dst */
1578 {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4,
1579 ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false},
1580 /* configure RSS for gtpu4u with input set IPv4 src/dst */
1581 { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4,
1582 ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false},
1583 /* configure RSS for gtpu4d with input set IPv4 src/dst */
1584 {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4,
1585 ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false},
1586
1587 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1588 {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
1589 ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
1590 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1591 {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
1592 ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false},
1593 /* configure RSS for sctp6 with input set IPv6 src/dst - only support
1594 * RSS on SCTPv6 on outer headers (non-tunneled)
1595 */
1596 {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
1597 ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
1598 /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
1599 {ICE_FLOW_SEG_HDR_ESP,
1600 ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
1601 /* configure RSS for gtpc6 with input set IPv6 src/dst */
1602 {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6,
1603 ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false},
1604 /* configure RSS for gtpc6t with input set IPv6 src/dst */
1605 {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6,
1606 ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
1607 /* configure RSS for gtpu6 with input set IPv6 src/dst */
1608 {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6,
1609 ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
1610 /* configure RSS for gtpu6e with input set IPv6 src/dst */
1611 {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6,
1612 ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false},
1613 /* configure RSS for gtpu6u with input set IPv6 src/dst */
1614 { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6,
1615 ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false},
1616 /* configure RSS for gtpu6d with input set IPv6 src/dst */
1617 {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6,
1618 ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false},
1619 };
1620
1621 /**
1622 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1623 * @vsi: VSI to be configured
1624 *
1625 * This function will only be called after successful download package call
1626 * during initialization of PF. Since the downloaded package will erase the
1627 * RSS section, this function will configure RSS input sets for different
1628 * flow types. The last profile added has the highest priority, therefore 2
1629 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
1630 * (i.e. IPv4 src/dst TCP src/dst port).
1631 */
ice_vsi_set_rss_flow_fld(struct ice_vsi * vsi)1632 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1633 {
1634 u16 vsi_num = vsi->vsi_num;
1635 struct ice_pf *pf = vsi->back;
1636 struct ice_hw *hw = &pf->hw;
1637 struct device *dev;
1638 int status;
1639 u32 i;
1640
1641 dev = ice_pf_to_dev(pf);
1642 if (ice_is_safe_mode(pf)) {
1643 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1644 vsi_num);
1645 return;
1646 }
1647 for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
1648 const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
1649
1650 status = ice_add_rss_cfg(hw, vsi, cfg);
1651 if (status)
1652 dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
1653 cfg->addl_hdrs, cfg->hash_flds,
1654 cfg->hdr_type, cfg->symm);
1655 }
1656 }
1657
1658 /**
1659 * ice_pf_state_is_nominal - checks the PF for nominal state
1660 * @pf: pointer to PF to check
1661 *
1662 * Check the PF's state for a collection of bits that would indicate
1663 * the PF is in a state that would inhibit normal operation for
1664 * driver functionality.
1665 *
1666 * Returns true if PF is in a nominal state, false otherwise
1667 */
ice_pf_state_is_nominal(struct ice_pf * pf)1668 bool ice_pf_state_is_nominal(struct ice_pf *pf)
1669 {
1670 DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
1671
1672 if (!pf)
1673 return false;
1674
1675 bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
1676 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
1677 return false;
1678
1679 return true;
1680 }
1681
1682 /**
1683 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1684 * @vsi: the VSI to be updated
1685 */
ice_update_eth_stats(struct ice_vsi * vsi)1686 void ice_update_eth_stats(struct ice_vsi *vsi)
1687 {
1688 struct ice_eth_stats *prev_es, *cur_es;
1689 struct ice_hw *hw = &vsi->back->hw;
1690 struct ice_pf *pf = vsi->back;
1691 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
1692
1693 prev_es = &vsi->eth_stats_prev;
1694 cur_es = &vsi->eth_stats;
1695
1696 if (ice_is_reset_in_progress(pf->state))
1697 vsi->stat_offsets_loaded = false;
1698
1699 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1700 &prev_es->rx_bytes, &cur_es->rx_bytes);
1701
1702 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1703 &prev_es->rx_unicast, &cur_es->rx_unicast);
1704
1705 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1706 &prev_es->rx_multicast, &cur_es->rx_multicast);
1707
1708 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1709 &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1710
1711 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1712 &prev_es->rx_discards, &cur_es->rx_discards);
1713
1714 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1715 &prev_es->tx_bytes, &cur_es->tx_bytes);
1716
1717 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1718 &prev_es->tx_unicast, &cur_es->tx_unicast);
1719
1720 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1721 &prev_es->tx_multicast, &cur_es->tx_multicast);
1722
1723 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1724 &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1725
1726 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1727 &prev_es->tx_errors, &cur_es->tx_errors);
1728
1729 vsi->stat_offsets_loaded = true;
1730 }
1731
1732 /**
1733 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1734 * @hw: HW pointer
1735 * @pf_q: index of the Rx queue in the PF's queue space
1736 * @rxdid: flexible descriptor RXDID
1737 * @prio: priority for the RXDID for this queue
1738 * @ena_ts: true to enable timestamp and false to disable timestamp
1739 */
1740 void
ice_write_qrxflxp_cntxt(struct ice_hw * hw,u16 pf_q,u32 rxdid,u32 prio,bool ena_ts)1741 ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
1742 bool ena_ts)
1743 {
1744 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
1745
1746 /* clear any previous values */
1747 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
1748 QRXFLXP_CNTXT_RXDID_PRIO_M |
1749 QRXFLXP_CNTXT_TS_M);
1750
1751 regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid);
1752 regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio);
1753
1754 if (ena_ts)
1755 /* Enable TimeSync on this queue */
1756 regval |= QRXFLXP_CNTXT_TS_M;
1757
1758 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
1759 }
1760
1761 /**
1762 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1763 * @intrl: interrupt rate limit in usecs
1764 * @gran: interrupt rate limit granularity in usecs
1765 *
1766 * This function converts a decimal interrupt rate limit in usecs to the format
1767 * expected by firmware.
1768 */
ice_intrl_usec_to_reg(u8 intrl,u8 gran)1769 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1770 {
1771 u32 val = intrl / gran;
1772
1773 if (val)
1774 return val | GLINT_RATE_INTRL_ENA_M;
1775 return 0;
1776 }
1777
1778 /**
1779 * ice_write_intrl - write throttle rate limit to interrupt specific register
1780 * @q_vector: pointer to interrupt specific structure
1781 * @intrl: throttle rate limit in microseconds to write
1782 */
ice_write_intrl(struct ice_q_vector * q_vector,u8 intrl)1783 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
1784 {
1785 struct ice_hw *hw = &q_vector->vsi->back->hw;
1786
1787 wr32(hw, GLINT_RATE(q_vector->reg_idx),
1788 ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
1789 }
1790
ice_pull_qvec_from_rc(struct ice_ring_container * rc)1791 static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
1792 {
1793 switch (rc->type) {
1794 case ICE_RX_CONTAINER:
1795 if (rc->rx_ring)
1796 return rc->rx_ring->q_vector;
1797 break;
1798 case ICE_TX_CONTAINER:
1799 if (rc->tx_ring)
1800 return rc->tx_ring->q_vector;
1801 break;
1802 default:
1803 break;
1804 }
1805
1806 return NULL;
1807 }
1808
1809 /**
1810 * __ice_write_itr - write throttle rate to register
1811 * @q_vector: pointer to interrupt data structure
1812 * @rc: pointer to ring container
1813 * @itr: throttle rate in microseconds to write
1814 */
__ice_write_itr(struct ice_q_vector * q_vector,struct ice_ring_container * rc,u16 itr)1815 static void __ice_write_itr(struct ice_q_vector *q_vector,
1816 struct ice_ring_container *rc, u16 itr)
1817 {
1818 struct ice_hw *hw = &q_vector->vsi->back->hw;
1819
1820 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1821 ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
1822 }
1823
1824 /**
1825 * ice_write_itr - write throttle rate to queue specific register
1826 * @rc: pointer to ring container
1827 * @itr: throttle rate in microseconds to write
1828 */
ice_write_itr(struct ice_ring_container * rc,u16 itr)1829 void ice_write_itr(struct ice_ring_container *rc, u16 itr)
1830 {
1831 struct ice_q_vector *q_vector;
1832
1833 q_vector = ice_pull_qvec_from_rc(rc);
1834 if (!q_vector)
1835 return;
1836
1837 __ice_write_itr(q_vector, rc, itr);
1838 }
1839
1840 /**
1841 * ice_set_q_vector_intrl - set up interrupt rate limiting
1842 * @q_vector: the vector to be configured
1843 *
1844 * Interrupt rate limiting is local to the vector, not per-queue so we must
1845 * detect if either ring container has dynamic moderation enabled to decide
1846 * what to set the interrupt rate limit to via INTRL settings. In the case that
1847 * dynamic moderation is disabled on both, write the value with the cached
1848 * setting to make sure INTRL register matches the user visible value.
1849 */
ice_set_q_vector_intrl(struct ice_q_vector * q_vector)1850 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
1851 {
1852 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
1853 /* in the case of dynamic enabled, cap each vector to no more
1854 * than (4 us) 250,000 ints/sec, which allows low latency
1855 * but still less than 500,000 interrupts per second, which
1856 * reduces CPU a bit in the case of the lowest latency
1857 * setting. The 4 here is a value in microseconds.
1858 */
1859 ice_write_intrl(q_vector, 4);
1860 } else {
1861 ice_write_intrl(q_vector, q_vector->intrl);
1862 }
1863 }
1864
1865 /**
1866 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1867 * @vsi: the VSI being configured
1868 *
1869 * This configures MSIX mode interrupts for the PF VSI, and should not be used
1870 * for the VF VSI.
1871 */
ice_vsi_cfg_msix(struct ice_vsi * vsi)1872 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1873 {
1874 struct ice_pf *pf = vsi->back;
1875 struct ice_hw *hw = &pf->hw;
1876 u16 txq = 0, rxq = 0;
1877 int i, q;
1878
1879 ice_for_each_q_vector(vsi, i) {
1880 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1881 u16 reg_idx = q_vector->reg_idx;
1882
1883 ice_cfg_itr(hw, q_vector);
1884
1885 /* Both Transmit Queue Interrupt Cause Control register
1886 * and Receive Queue Interrupt Cause control register
1887 * expects MSIX_INDX field to be the vector index
1888 * within the function space and not the absolute
1889 * vector index across PF or across device.
1890 * For SR-IOV VF VSIs queue vector index always starts
1891 * with 1 since first vector index(0) is used for OICR
1892 * in VF space. Since VMDq and other PF VSIs are within
1893 * the PF function space, use the vector index that is
1894 * tracked for this PF.
1895 */
1896 for (q = 0; q < q_vector->num_ring_tx; q++) {
1897 ice_cfg_txq_interrupt(vsi, txq, reg_idx,
1898 q_vector->tx.itr_idx);
1899 txq++;
1900 }
1901
1902 for (q = 0; q < q_vector->num_ring_rx; q++) {
1903 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
1904 q_vector->rx.itr_idx);
1905 rxq++;
1906 }
1907 }
1908 }
1909
1910 /**
1911 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
1912 * @vsi: the VSI whose rings are to be enabled
1913 *
1914 * Returns 0 on success and a negative value on error
1915 */
ice_vsi_start_all_rx_rings(struct ice_vsi * vsi)1916 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
1917 {
1918 return ice_vsi_ctrl_all_rx_rings(vsi, true);
1919 }
1920
1921 /**
1922 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
1923 * @vsi: the VSI whose rings are to be disabled
1924 *
1925 * Returns 0 on success and a negative value on error
1926 */
ice_vsi_stop_all_rx_rings(struct ice_vsi * vsi)1927 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
1928 {
1929 return ice_vsi_ctrl_all_rx_rings(vsi, false);
1930 }
1931
1932 /**
1933 * ice_vsi_stop_tx_rings - Disable Tx rings
1934 * @vsi: the VSI being configured
1935 * @rst_src: reset source
1936 * @rel_vmvf_num: Relative ID of VF/VM
1937 * @rings: Tx ring array to be stopped
1938 * @count: number of Tx ring array elements
1939 */
1940 static int
ice_vsi_stop_tx_rings(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num,struct ice_tx_ring ** rings,u16 count)1941 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1942 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
1943 {
1944 u16 q_idx;
1945
1946 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
1947 return -EINVAL;
1948
1949 for (q_idx = 0; q_idx < count; q_idx++) {
1950 struct ice_txq_meta txq_meta = { };
1951 int status;
1952
1953 if (!rings || !rings[q_idx])
1954 return -EINVAL;
1955
1956 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
1957 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
1958 rings[q_idx], &txq_meta);
1959
1960 if (status)
1961 return status;
1962 }
1963
1964 return 0;
1965 }
1966
1967 /**
1968 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
1969 * @vsi: the VSI being configured
1970 * @rst_src: reset source
1971 * @rel_vmvf_num: Relative ID of VF/VM
1972 */
1973 int
ice_vsi_stop_lan_tx_rings(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num)1974 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1975 u16 rel_vmvf_num)
1976 {
1977 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
1978 }
1979
1980 /**
1981 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
1982 * @vsi: the VSI being configured
1983 */
ice_vsi_stop_xdp_tx_rings(struct ice_vsi * vsi)1984 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
1985 {
1986 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
1987 }
1988
1989 /**
1990 * ice_vsi_is_rx_queue_active
1991 * @vsi: the VSI being configured
1992 *
1993 * Return true if at least one queue is active.
1994 */
ice_vsi_is_rx_queue_active(struct ice_vsi * vsi)1995 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
1996 {
1997 struct ice_pf *pf = vsi->back;
1998 struct ice_hw *hw = &pf->hw;
1999 int i;
2000
2001 ice_for_each_rxq(vsi, i) {
2002 u32 rx_reg;
2003 int pf_q;
2004
2005 pf_q = vsi->rxq_map[i];
2006 rx_reg = rd32(hw, QRX_CTRL(pf_q));
2007 if (rx_reg & QRX_CTRL_QENA_STAT_M)
2008 return true;
2009 }
2010
2011 return false;
2012 }
2013
ice_vsi_set_tc_cfg(struct ice_vsi * vsi)2014 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2015 {
2016 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
2017 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
2018 vsi->tc_cfg.numtc = 1;
2019 return;
2020 }
2021
2022 /* set VSI TC information based on DCB config */
2023 ice_vsi_set_dcb_tc_cfg(vsi);
2024 }
2025
2026 /**
2027 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2028 * @vsi: the VSI being configured
2029 * @tx: bool to determine Tx or Rx rule
2030 * @create: bool to determine create or remove Rule
2031 */
ice_cfg_sw_lldp(struct ice_vsi * vsi,bool tx,bool create)2032 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2033 {
2034 int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
2035 enum ice_sw_fwd_act_type act);
2036 struct ice_pf *pf = vsi->back;
2037 struct device *dev;
2038 int status;
2039
2040 dev = ice_pf_to_dev(pf);
2041 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
2042
2043 if (tx) {
2044 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
2045 ICE_DROP_PACKET);
2046 } else {
2047 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
2048 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
2049 create);
2050 } else {
2051 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
2052 ICE_FWD_TO_VSI);
2053 }
2054 }
2055
2056 if (status)
2057 dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
2058 create ? "adding" : "removing", tx ? "TX" : "RX",
2059 vsi->vsi_num, status);
2060 }
2061
2062 /**
2063 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2064 * @vsi: pointer to the VSI
2065 *
2066 * This function will allocate new scheduler aggregator now if needed and will
2067 * move specified VSI into it.
2068 */
ice_set_agg_vsi(struct ice_vsi * vsi)2069 static void ice_set_agg_vsi(struct ice_vsi *vsi)
2070 {
2071 struct device *dev = ice_pf_to_dev(vsi->back);
2072 struct ice_agg_node *agg_node_iter = NULL;
2073 u32 agg_id = ICE_INVALID_AGG_NODE_ID;
2074 struct ice_agg_node *agg_node = NULL;
2075 int node_offset, max_agg_nodes = 0;
2076 struct ice_port_info *port_info;
2077 struct ice_pf *pf = vsi->back;
2078 u32 agg_node_id_start = 0;
2079 int status;
2080
2081 /* create (as needed) scheduler aggregator node and move VSI into
2082 * corresponding aggregator node
2083 * - PF aggregator node to contains VSIs of type _PF and _CTRL
2084 * - VF aggregator nodes will contain VF VSI
2085 */
2086 port_info = pf->hw.port_info;
2087 if (!port_info)
2088 return;
2089
2090 switch (vsi->type) {
2091 case ICE_VSI_CTRL:
2092 case ICE_VSI_CHNL:
2093 case ICE_VSI_LB:
2094 case ICE_VSI_PF:
2095 max_agg_nodes = ICE_MAX_PF_AGG_NODES;
2096 agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
2097 agg_node_iter = &pf->pf_agg_node[0];
2098 break;
2099 case ICE_VSI_VF:
2100 /* user can create 'n' VFs on a given PF, but since max children
2101 * per aggregator node can be only 64. Following code handles
2102 * aggregator(s) for VF VSIs, either selects a agg_node which
2103 * was already created provided num_vsis < 64, otherwise
2104 * select next available node, which will be created
2105 */
2106 max_agg_nodes = ICE_MAX_VF_AGG_NODES;
2107 agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
2108 agg_node_iter = &pf->vf_agg_node[0];
2109 break;
2110 default:
2111 /* other VSI type, handle later if needed */
2112 dev_dbg(dev, "unexpected VSI type %s\n",
2113 ice_vsi_type_str(vsi->type));
2114 return;
2115 }
2116
2117 /* find the appropriate aggregator node */
2118 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
2119 /* see if we can find space in previously created
2120 * node if num_vsis < 64, otherwise skip
2121 */
2122 if (agg_node_iter->num_vsis &&
2123 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2124 agg_node_iter++;
2125 continue;
2126 }
2127
2128 if (agg_node_iter->valid &&
2129 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2130 agg_id = agg_node_iter->agg_id;
2131 agg_node = agg_node_iter;
2132 break;
2133 }
2134
2135 /* find unclaimed agg_id */
2136 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2137 agg_id = node_offset + agg_node_id_start;
2138 agg_node = agg_node_iter;
2139 break;
2140 }
2141 /* move to next agg_node */
2142 agg_node_iter++;
2143 }
2144
2145 if (!agg_node)
2146 return;
2147
2148 /* if selected aggregator node was not created, create it */
2149 if (!agg_node->valid) {
2150 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
2151 (u8)vsi->tc_cfg.ena_tc);
2152 if (status) {
2153 dev_err(dev, "unable to create aggregator node with agg_id %u\n",
2154 agg_id);
2155 return;
2156 }
2157 /* aggregator node is created, store the needed info */
2158 agg_node->valid = true;
2159 agg_node->agg_id = agg_id;
2160 }
2161
2162 /* move VSI to corresponding aggregator node */
2163 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2164 (u8)vsi->tc_cfg.ena_tc);
2165 if (status) {
2166 dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
2167 vsi->idx, agg_id);
2168 return;
2169 }
2170
2171 /* keep active children count for aggregator node */
2172 agg_node->num_vsis++;
2173
2174 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
2175 * to aggregator node
2176 */
2177 vsi->agg_node = agg_node;
2178 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
2179 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2180 vsi->agg_node->num_vsis);
2181 }
2182
ice_vsi_cfg_tc_lan(struct ice_pf * pf,struct ice_vsi * vsi)2183 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
2184 {
2185 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2186 struct device *dev = ice_pf_to_dev(pf);
2187 int ret, i;
2188
2189 /* configure VSI nodes based on number of queues and TC's */
2190 ice_for_each_traffic_class(i) {
2191 if (!(vsi->tc_cfg.ena_tc & BIT(i)))
2192 continue;
2193
2194 if (vsi->type == ICE_VSI_CHNL) {
2195 if (!vsi->alloc_txq && vsi->num_txq)
2196 max_txqs[i] = vsi->num_txq;
2197 else
2198 max_txqs[i] = pf->num_lan_tx;
2199 } else {
2200 max_txqs[i] = vsi->alloc_txq;
2201 }
2202
2203 if (vsi->type == ICE_VSI_PF)
2204 max_txqs[i] += vsi->num_xdp_txq;
2205 }
2206
2207 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
2208 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2209 max_txqs);
2210 if (ret) {
2211 dev_err(dev, "VSI %d failed lan queue config, error %d\n",
2212 vsi->vsi_num, ret);
2213 return ret;
2214 }
2215
2216 return 0;
2217 }
2218
2219 /**
2220 * ice_vsi_cfg_def - configure default VSI based on the type
2221 * @vsi: pointer to VSI
2222 */
ice_vsi_cfg_def(struct ice_vsi * vsi)2223 static int ice_vsi_cfg_def(struct ice_vsi *vsi)
2224 {
2225 struct device *dev = ice_pf_to_dev(vsi->back);
2226 struct ice_pf *pf = vsi->back;
2227 int ret;
2228
2229 vsi->vsw = pf->first_sw;
2230
2231 ret = ice_vsi_alloc_def(vsi, vsi->ch);
2232 if (ret)
2233 return ret;
2234
2235 /* allocate memory for Tx/Rx ring stat pointers */
2236 ret = ice_vsi_alloc_stat_arrays(vsi);
2237 if (ret)
2238 goto unroll_vsi_alloc;
2239
2240 ice_alloc_fd_res(vsi);
2241
2242 ret = ice_vsi_get_qs(vsi);
2243 if (ret) {
2244 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2245 vsi->idx);
2246 goto unroll_vsi_alloc_stat;
2247 }
2248
2249 /* set RSS capabilities */
2250 ice_vsi_set_rss_params(vsi);
2251
2252 /* set TC configuration */
2253 ice_vsi_set_tc_cfg(vsi);
2254
2255 /* create the VSI */
2256 ret = ice_vsi_init(vsi, vsi->flags);
2257 if (ret)
2258 goto unroll_get_qs;
2259
2260 ice_vsi_init_vlan_ops(vsi);
2261
2262 switch (vsi->type) {
2263 case ICE_VSI_CTRL:
2264 case ICE_VSI_PF:
2265 ret = ice_vsi_alloc_q_vectors(vsi);
2266 if (ret)
2267 goto unroll_vsi_init;
2268
2269 ret = ice_vsi_alloc_rings(vsi);
2270 if (ret)
2271 goto unroll_vector_base;
2272
2273 ret = ice_vsi_alloc_ring_stats(vsi);
2274 if (ret)
2275 goto unroll_vector_base;
2276
2277 if (ice_is_xdp_ena_vsi(vsi)) {
2278 ret = ice_vsi_determine_xdp_res(vsi);
2279 if (ret)
2280 goto unroll_vector_base;
2281 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
2282 ICE_XDP_CFG_PART);
2283 if (ret)
2284 goto unroll_vector_base;
2285 }
2286
2287 ice_vsi_map_rings_to_vectors(vsi);
2288
2289 /* Associate q_vector rings to napi */
2290 ice_vsi_set_napi_queues(vsi);
2291
2292 vsi->stat_offsets_loaded = false;
2293
2294 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
2295 if (vsi->type != ICE_VSI_CTRL)
2296 /* Do not exit if configuring RSS had an issue, at
2297 * least receive traffic on first queue. Hence no
2298 * need to capture return value
2299 */
2300 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2301 ice_vsi_cfg_rss_lut_key(vsi);
2302 ice_vsi_set_rss_flow_fld(vsi);
2303 }
2304 ice_init_arfs(vsi);
2305 break;
2306 case ICE_VSI_CHNL:
2307 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2308 ice_vsi_cfg_rss_lut_key(vsi);
2309 ice_vsi_set_rss_flow_fld(vsi);
2310 }
2311 break;
2312 case ICE_VSI_VF:
2313 /* VF driver will take care of creating netdev for this type and
2314 * map queues to vectors through Virtchnl, PF driver only
2315 * creates a VSI and corresponding structures for bookkeeping
2316 * purpose
2317 */
2318 ret = ice_vsi_alloc_q_vectors(vsi);
2319 if (ret)
2320 goto unroll_vsi_init;
2321
2322 ret = ice_vsi_alloc_rings(vsi);
2323 if (ret)
2324 goto unroll_alloc_q_vector;
2325
2326 ret = ice_vsi_alloc_ring_stats(vsi);
2327 if (ret)
2328 goto unroll_vector_base;
2329
2330 vsi->stat_offsets_loaded = false;
2331
2332 /* Do not exit if configuring RSS had an issue, at least
2333 * receive traffic on first queue. Hence no need to capture
2334 * return value
2335 */
2336 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2337 ice_vsi_cfg_rss_lut_key(vsi);
2338 ice_vsi_set_vf_rss_flow_fld(vsi);
2339 }
2340 break;
2341 case ICE_VSI_LB:
2342 ret = ice_vsi_alloc_rings(vsi);
2343 if (ret)
2344 goto unroll_vsi_init;
2345
2346 ret = ice_vsi_alloc_ring_stats(vsi);
2347 if (ret)
2348 goto unroll_vector_base;
2349
2350 break;
2351 default:
2352 /* clean up the resources and exit */
2353 ret = -EINVAL;
2354 goto unroll_vsi_init;
2355 }
2356
2357 return 0;
2358
2359 unroll_vector_base:
2360 /* reclaim SW interrupts back to the common pool */
2361 unroll_alloc_q_vector:
2362 ice_vsi_free_q_vectors(vsi);
2363 unroll_vsi_init:
2364 ice_vsi_delete_from_hw(vsi);
2365 unroll_get_qs:
2366 ice_vsi_put_qs(vsi);
2367 unroll_vsi_alloc_stat:
2368 ice_vsi_free_stats(vsi);
2369 unroll_vsi_alloc:
2370 ice_vsi_free_arrays(vsi);
2371 return ret;
2372 }
2373
2374 /**
2375 * ice_vsi_cfg - configure a previously allocated VSI
2376 * @vsi: pointer to VSI
2377 */
ice_vsi_cfg(struct ice_vsi * vsi)2378 int ice_vsi_cfg(struct ice_vsi *vsi)
2379 {
2380 struct ice_pf *pf = vsi->back;
2381 int ret;
2382
2383 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
2384 return -EINVAL;
2385
2386 ret = ice_vsi_cfg_def(vsi);
2387 if (ret)
2388 return ret;
2389
2390 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
2391 if (ret)
2392 ice_vsi_decfg(vsi);
2393
2394 if (vsi->type == ICE_VSI_CTRL) {
2395 if (vsi->vf) {
2396 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
2397 vsi->vf->ctrl_vsi_idx = vsi->idx;
2398 } else {
2399 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
2400 pf->ctrl_vsi_idx = vsi->idx;
2401 }
2402 }
2403
2404 return ret;
2405 }
2406
2407 /**
2408 * ice_vsi_decfg - remove all VSI configuration
2409 * @vsi: pointer to VSI
2410 */
ice_vsi_decfg(struct ice_vsi * vsi)2411 void ice_vsi_decfg(struct ice_vsi *vsi)
2412 {
2413 struct ice_pf *pf = vsi->back;
2414 int err;
2415
2416 /* The Rx rule will only exist to remove if the LLDP FW
2417 * engine is currently stopped
2418 */
2419 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
2420 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2421 ice_cfg_sw_lldp(vsi, false, false);
2422
2423 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2424 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
2425 if (err)
2426 dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
2427 vsi->vsi_num, err);
2428
2429 if (ice_is_xdp_ena_vsi(vsi))
2430 /* return value check can be skipped here, it always returns
2431 * 0 if reset is in progress
2432 */
2433 ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
2434
2435 ice_vsi_clear_rings(vsi);
2436 ice_vsi_free_q_vectors(vsi);
2437 ice_vsi_put_qs(vsi);
2438 ice_vsi_free_arrays(vsi);
2439
2440 /* SR-IOV determines needed MSIX resources all at once instead of per
2441 * VSI since when VFs are spawned we know how many VFs there are and how
2442 * many interrupts each VF needs. SR-IOV MSIX resources are also
2443 * cleared in the same manner.
2444 */
2445
2446 if (vsi->type == ICE_VSI_VF &&
2447 vsi->agg_node && vsi->agg_node->valid)
2448 vsi->agg_node->num_vsis--;
2449 }
2450
2451 /**
2452 * ice_vsi_setup - Set up a VSI by a given type
2453 * @pf: board private structure
2454 * @params: parameters to use when creating the VSI
2455 *
2456 * This allocates the sw VSI structure and its queue resources.
2457 *
2458 * Returns pointer to the successfully allocated and configured VSI sw struct on
2459 * success, NULL on failure.
2460 */
2461 struct ice_vsi *
ice_vsi_setup(struct ice_pf * pf,struct ice_vsi_cfg_params * params)2462 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
2463 {
2464 struct device *dev = ice_pf_to_dev(pf);
2465 struct ice_vsi *vsi;
2466 int ret;
2467
2468 /* ice_vsi_setup can only initialize a new VSI, and we must have
2469 * a port_info structure for it.
2470 */
2471 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
2472 WARN_ON(!params->port_info))
2473 return NULL;
2474
2475 vsi = ice_vsi_alloc(pf);
2476 if (!vsi) {
2477 dev_err(dev, "could not allocate VSI\n");
2478 return NULL;
2479 }
2480
2481 vsi->params = *params;
2482 ret = ice_vsi_cfg(vsi);
2483 if (ret)
2484 goto err_vsi_cfg;
2485
2486 /* Add switch rule to drop all Tx Flow Control Frames, of look up
2487 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2488 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2489 * The rule is added once for PF VSI in order to create appropriate
2490 * recipe, since VSI/VSI list is ignored with drop action...
2491 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
2492 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
2493 * settings in the HW.
2494 */
2495 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
2496 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
2497 ICE_DROP_PACKET);
2498 ice_cfg_sw_lldp(vsi, true, true);
2499 }
2500
2501 if (!vsi->agg_node)
2502 ice_set_agg_vsi(vsi);
2503
2504 return vsi;
2505
2506 err_vsi_cfg:
2507 ice_vsi_free(vsi);
2508
2509 return NULL;
2510 }
2511
2512 /**
2513 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2514 * @vsi: the VSI being cleaned up
2515 */
ice_vsi_release_msix(struct ice_vsi * vsi)2516 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2517 {
2518 struct ice_pf *pf = vsi->back;
2519 struct ice_hw *hw = &pf->hw;
2520 u32 txq = 0;
2521 u32 rxq = 0;
2522 int i, q;
2523
2524 ice_for_each_q_vector(vsi, i) {
2525 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2526
2527 ice_write_intrl(q_vector, 0);
2528 for (q = 0; q < q_vector->num_ring_tx; q++) {
2529 ice_write_itr(&q_vector->tx, 0);
2530 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2531 if (ice_is_xdp_ena_vsi(vsi)) {
2532 u32 xdp_txq = txq + vsi->num_xdp_txq;
2533
2534 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2535 }
2536 txq++;
2537 }
2538
2539 for (q = 0; q < q_vector->num_ring_rx; q++) {
2540 ice_write_itr(&q_vector->rx, 0);
2541 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2542 rxq++;
2543 }
2544 }
2545
2546 ice_flush(hw);
2547 }
2548
2549 /**
2550 * ice_vsi_free_irq - Free the IRQ association with the OS
2551 * @vsi: the VSI being configured
2552 */
ice_vsi_free_irq(struct ice_vsi * vsi)2553 void ice_vsi_free_irq(struct ice_vsi *vsi)
2554 {
2555 struct ice_pf *pf = vsi->back;
2556 int i;
2557
2558 if (!vsi->q_vectors || !vsi->irqs_ready)
2559 return;
2560
2561 ice_vsi_release_msix(vsi);
2562 if (vsi->type == ICE_VSI_VF)
2563 return;
2564
2565 vsi->irqs_ready = false;
2566 ice_free_cpu_rx_rmap(vsi);
2567
2568 ice_for_each_q_vector(vsi, i) {
2569 int irq_num;
2570
2571 irq_num = vsi->q_vectors[i]->irq.virq;
2572
2573 /* free only the irqs that were actually requested */
2574 if (!vsi->q_vectors[i] ||
2575 !(vsi->q_vectors[i]->num_ring_tx ||
2576 vsi->q_vectors[i]->num_ring_rx))
2577 continue;
2578
2579 /* clear the affinity notifier in the IRQ descriptor */
2580 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2581 irq_set_affinity_notifier(irq_num, NULL);
2582
2583 /* clear the affinity_hint in the IRQ descriptor */
2584 irq_update_affinity_hint(irq_num, NULL);
2585 synchronize_irq(irq_num);
2586 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2587 }
2588 }
2589
2590 /**
2591 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2592 * @vsi: the VSI having resources freed
2593 */
ice_vsi_free_tx_rings(struct ice_vsi * vsi)2594 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2595 {
2596 int i;
2597
2598 if (!vsi->tx_rings)
2599 return;
2600
2601 ice_for_each_txq(vsi, i)
2602 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2603 ice_free_tx_ring(vsi->tx_rings[i]);
2604 }
2605
2606 /**
2607 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2608 * @vsi: the VSI having resources freed
2609 */
ice_vsi_free_rx_rings(struct ice_vsi * vsi)2610 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2611 {
2612 int i;
2613
2614 if (!vsi->rx_rings)
2615 return;
2616
2617 ice_for_each_rxq(vsi, i)
2618 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2619 ice_free_rx_ring(vsi->rx_rings[i]);
2620 }
2621
2622 /**
2623 * ice_vsi_close - Shut down a VSI
2624 * @vsi: the VSI being shut down
2625 */
ice_vsi_close(struct ice_vsi * vsi)2626 void ice_vsi_close(struct ice_vsi *vsi)
2627 {
2628 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
2629 ice_down(vsi);
2630
2631 ice_vsi_free_irq(vsi);
2632 ice_vsi_free_tx_rings(vsi);
2633 ice_vsi_free_rx_rings(vsi);
2634 }
2635
2636 /**
2637 * ice_ena_vsi - resume a VSI
2638 * @vsi: the VSI being resume
2639 * @locked: is the rtnl_lock already held
2640 */
ice_ena_vsi(struct ice_vsi * vsi,bool locked)2641 int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2642 {
2643 int err = 0;
2644
2645 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
2646 return 0;
2647
2648 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2649
2650 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
2651 if (netif_running(vsi->netdev)) {
2652 if (!locked)
2653 rtnl_lock();
2654
2655 err = ice_open_internal(vsi->netdev);
2656
2657 if (!locked)
2658 rtnl_unlock();
2659 }
2660 } else if (vsi->type == ICE_VSI_CTRL) {
2661 err = ice_vsi_open_ctrl(vsi);
2662 }
2663
2664 return err;
2665 }
2666
2667 /**
2668 * ice_dis_vsi - pause a VSI
2669 * @vsi: the VSI being paused
2670 * @locked: is the rtnl_lock already held
2671 */
ice_dis_vsi(struct ice_vsi * vsi,bool locked)2672 void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
2673 {
2674 if (test_bit(ICE_VSI_DOWN, vsi->state))
2675 return;
2676
2677 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2678
2679 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
2680 if (netif_running(vsi->netdev)) {
2681 if (!locked)
2682 rtnl_lock();
2683
2684 ice_vsi_close(vsi);
2685
2686 if (!locked)
2687 rtnl_unlock();
2688 } else {
2689 ice_vsi_close(vsi);
2690 }
2691 } else if (vsi->type == ICE_VSI_CTRL) {
2692 ice_vsi_close(vsi);
2693 }
2694 }
2695
2696 /**
2697 * __ice_queue_set_napi - Set the napi instance for the queue
2698 * @dev: device to which NAPI and queue belong
2699 * @queue_index: Index of queue
2700 * @type: queue type as RX or TX
2701 * @napi: NAPI context
2702 * @locked: is the rtnl_lock already held
2703 *
2704 * Set the napi instance for the queue. Caller indicates the lock status.
2705 */
2706 static void
__ice_queue_set_napi(struct net_device * dev,unsigned int queue_index,enum netdev_queue_type type,struct napi_struct * napi,bool locked)2707 __ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
2708 enum netdev_queue_type type, struct napi_struct *napi,
2709 bool locked)
2710 {
2711 if (!locked)
2712 rtnl_lock();
2713 netif_queue_set_napi(dev, queue_index, type, napi);
2714 if (!locked)
2715 rtnl_unlock();
2716 }
2717
2718 /**
2719 * ice_queue_set_napi - Set the napi instance for the queue
2720 * @vsi: VSI being configured
2721 * @queue_index: Index of queue
2722 * @type: queue type as RX or TX
2723 * @napi: NAPI context
2724 *
2725 * Set the napi instance for the queue. The rtnl lock state is derived from the
2726 * execution path.
2727 */
2728 void
ice_queue_set_napi(struct ice_vsi * vsi,unsigned int queue_index,enum netdev_queue_type type,struct napi_struct * napi)2729 ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
2730 enum netdev_queue_type type, struct napi_struct *napi)
2731 {
2732 struct ice_pf *pf = vsi->back;
2733
2734 if (!vsi->netdev)
2735 return;
2736
2737 if (current_work() == &pf->serv_task ||
2738 test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
2739 test_bit(ICE_DOWN, pf->state) ||
2740 test_bit(ICE_SUSPENDED, pf->state))
2741 __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
2742 false);
2743 else
2744 __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
2745 true);
2746 }
2747
2748 /**
2749 * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2750 * @q_vector: q_vector pointer
2751 * @locked: is the rtnl_lock already held
2752 *
2753 * Associate the q_vector napi with all the queue[s] on the vector.
2754 * Caller indicates the lock status.
2755 */
__ice_q_vector_set_napi_queues(struct ice_q_vector * q_vector,bool locked)2756 void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
2757 {
2758 struct ice_rx_ring *rx_ring;
2759 struct ice_tx_ring *tx_ring;
2760
2761 ice_for_each_rx_ring(rx_ring, q_vector->rx)
2762 __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
2763 NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
2764 locked);
2765
2766 ice_for_each_tx_ring(tx_ring, q_vector->tx)
2767 __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
2768 NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
2769 locked);
2770 /* Also set the interrupt number for the NAPI */
2771 netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
2772 }
2773
2774 /**
2775 * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2776 * @q_vector: q_vector pointer
2777 *
2778 * Associate the q_vector napi with all the queue[s] on the vector
2779 */
ice_q_vector_set_napi_queues(struct ice_q_vector * q_vector)2780 void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
2781 {
2782 struct ice_rx_ring *rx_ring;
2783 struct ice_tx_ring *tx_ring;
2784
2785 ice_for_each_rx_ring(rx_ring, q_vector->rx)
2786 ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
2787 NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
2788
2789 ice_for_each_tx_ring(tx_ring, q_vector->tx)
2790 ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
2791 NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
2792 /* Also set the interrupt number for the NAPI */
2793 netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
2794 }
2795
2796 /**
2797 * ice_vsi_set_napi_queues
2798 * @vsi: VSI pointer
2799 *
2800 * Associate queue[s] with napi for all vectors
2801 */
ice_vsi_set_napi_queues(struct ice_vsi * vsi)2802 void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
2803 {
2804 int i;
2805
2806 if (!vsi->netdev)
2807 return;
2808
2809 ice_for_each_q_vector(vsi, i)
2810 ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
2811 }
2812
2813 /**
2814 * ice_vsi_release - Delete a VSI and free its resources
2815 * @vsi: the VSI being removed
2816 *
2817 * Returns 0 on success or < 0 on error
2818 */
ice_vsi_release(struct ice_vsi * vsi)2819 int ice_vsi_release(struct ice_vsi *vsi)
2820 {
2821 struct ice_pf *pf;
2822
2823 if (!vsi->back)
2824 return -ENODEV;
2825 pf = vsi->back;
2826
2827 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2828 ice_rss_clean(vsi);
2829
2830 ice_vsi_close(vsi);
2831 ice_vsi_decfg(vsi);
2832
2833 /* retain SW VSI data structure since it is needed to unregister and
2834 * free VSI netdev when PF is not in reset recovery pending state,\
2835 * for ex: during rmmod.
2836 */
2837 if (!ice_is_reset_in_progress(pf->state))
2838 ice_vsi_delete(vsi);
2839
2840 return 0;
2841 }
2842
2843 /**
2844 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2845 * @vsi: VSI connected with q_vectors
2846 * @coalesce: array of struct with stored coalesce
2847 *
2848 * Returns array size.
2849 */
2850 static int
ice_vsi_rebuild_get_coalesce(struct ice_vsi * vsi,struct ice_coalesce_stored * coalesce)2851 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
2852 struct ice_coalesce_stored *coalesce)
2853 {
2854 int i;
2855
2856 ice_for_each_q_vector(vsi, i) {
2857 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2858
2859 coalesce[i].itr_tx = q_vector->tx.itr_settings;
2860 coalesce[i].itr_rx = q_vector->rx.itr_settings;
2861 coalesce[i].intrl = q_vector->intrl;
2862
2863 if (i < vsi->num_txq)
2864 coalesce[i].tx_valid = true;
2865 if (i < vsi->num_rxq)
2866 coalesce[i].rx_valid = true;
2867 }
2868
2869 return vsi->num_q_vectors;
2870 }
2871
2872 /**
2873 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
2874 * @vsi: VSI connected with q_vectors
2875 * @coalesce: pointer to array of struct with stored coalesce
2876 * @size: size of coalesce array
2877 *
2878 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
2879 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
2880 * to default value.
2881 */
2882 static void
ice_vsi_rebuild_set_coalesce(struct ice_vsi * vsi,struct ice_coalesce_stored * coalesce,int size)2883 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
2884 struct ice_coalesce_stored *coalesce, int size)
2885 {
2886 struct ice_ring_container *rc;
2887 int i;
2888
2889 if ((size && !coalesce) || !vsi)
2890 return;
2891
2892 /* There are a couple of cases that have to be handled here:
2893 * 1. The case where the number of queue vectors stays the same, but
2894 * the number of Tx or Rx rings changes (the first for loop)
2895 * 2. The case where the number of queue vectors increased (the
2896 * second for loop)
2897 */
2898 for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
2899 /* There are 2 cases to handle here and they are the same for
2900 * both Tx and Rx:
2901 * if the entry was valid previously (coalesce[i].[tr]x_valid
2902 * and the loop variable is less than the number of rings
2903 * allocated, then write the previous values
2904 *
2905 * if the entry was not valid previously, but the number of
2906 * rings is less than are allocated (this means the number of
2907 * rings increased from previously), then write out the
2908 * values in the first element
2909 *
2910 * Also, always write the ITR, even if in ITR_IS_DYNAMIC
2911 * as there is no harm because the dynamic algorithm
2912 * will just overwrite.
2913 */
2914 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
2915 rc = &vsi->q_vectors[i]->rx;
2916 rc->itr_settings = coalesce[i].itr_rx;
2917 ice_write_itr(rc, rc->itr_setting);
2918 } else if (i < vsi->alloc_rxq) {
2919 rc = &vsi->q_vectors[i]->rx;
2920 rc->itr_settings = coalesce[0].itr_rx;
2921 ice_write_itr(rc, rc->itr_setting);
2922 }
2923
2924 if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
2925 rc = &vsi->q_vectors[i]->tx;
2926 rc->itr_settings = coalesce[i].itr_tx;
2927 ice_write_itr(rc, rc->itr_setting);
2928 } else if (i < vsi->alloc_txq) {
2929 rc = &vsi->q_vectors[i]->tx;
2930 rc->itr_settings = coalesce[0].itr_tx;
2931 ice_write_itr(rc, rc->itr_setting);
2932 }
2933
2934 vsi->q_vectors[i]->intrl = coalesce[i].intrl;
2935 ice_set_q_vector_intrl(vsi->q_vectors[i]);
2936 }
2937
2938 /* the number of queue vectors increased so write whatever is in
2939 * the first element
2940 */
2941 for (; i < vsi->num_q_vectors; i++) {
2942 /* transmit */
2943 rc = &vsi->q_vectors[i]->tx;
2944 rc->itr_settings = coalesce[0].itr_tx;
2945 ice_write_itr(rc, rc->itr_setting);
2946
2947 /* receive */
2948 rc = &vsi->q_vectors[i]->rx;
2949 rc->itr_settings = coalesce[0].itr_rx;
2950 ice_write_itr(rc, rc->itr_setting);
2951
2952 vsi->q_vectors[i]->intrl = coalesce[0].intrl;
2953 ice_set_q_vector_intrl(vsi->q_vectors[i]);
2954 }
2955 }
2956
2957 /**
2958 * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
2959 * @vsi: VSI pointer
2960 */
2961 static int
ice_vsi_realloc_stat_arrays(struct ice_vsi * vsi)2962 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
2963 {
2964 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
2965 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
2966 struct ice_ring_stats **tx_ring_stats;
2967 struct ice_ring_stats **rx_ring_stats;
2968 struct ice_vsi_stats *vsi_stat;
2969 struct ice_pf *pf = vsi->back;
2970 u16 prev_txq = vsi->alloc_txq;
2971 u16 prev_rxq = vsi->alloc_rxq;
2972 int i;
2973
2974 vsi_stat = pf->vsi_stats[vsi->idx];
2975
2976 if (req_txq < prev_txq) {
2977 for (i = req_txq; i < prev_txq; i++) {
2978 if (vsi_stat->tx_ring_stats[i]) {
2979 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
2980 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
2981 }
2982 }
2983 }
2984
2985 tx_ring_stats = vsi_stat->tx_ring_stats;
2986 vsi_stat->tx_ring_stats =
2987 krealloc_array(vsi_stat->tx_ring_stats, req_txq,
2988 sizeof(*vsi_stat->tx_ring_stats),
2989 GFP_KERNEL | __GFP_ZERO);
2990 if (!vsi_stat->tx_ring_stats) {
2991 vsi_stat->tx_ring_stats = tx_ring_stats;
2992 return -ENOMEM;
2993 }
2994
2995 if (req_rxq < prev_rxq) {
2996 for (i = req_rxq; i < prev_rxq; i++) {
2997 if (vsi_stat->rx_ring_stats[i]) {
2998 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
2999 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
3000 }
3001 }
3002 }
3003
3004 rx_ring_stats = vsi_stat->rx_ring_stats;
3005 vsi_stat->rx_ring_stats =
3006 krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
3007 sizeof(*vsi_stat->rx_ring_stats),
3008 GFP_KERNEL | __GFP_ZERO);
3009 if (!vsi_stat->rx_ring_stats) {
3010 vsi_stat->rx_ring_stats = rx_ring_stats;
3011 return -ENOMEM;
3012 }
3013
3014 return 0;
3015 }
3016
3017 /**
3018 * ice_vsi_rebuild - Rebuild VSI after reset
3019 * @vsi: VSI to be rebuild
3020 * @vsi_flags: flags used for VSI rebuild flow
3021 *
3022 * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
3023 * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
3024 *
3025 * Returns 0 on success and negative value on failure
3026 */
ice_vsi_rebuild(struct ice_vsi * vsi,u32 vsi_flags)3027 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
3028 {
3029 struct ice_coalesce_stored *coalesce;
3030 int prev_num_q_vectors;
3031 struct ice_pf *pf;
3032 int ret;
3033
3034 if (!vsi)
3035 return -EINVAL;
3036
3037 vsi->flags = vsi_flags;
3038 pf = vsi->back;
3039 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
3040 return -EINVAL;
3041
3042 ret = ice_vsi_realloc_stat_arrays(vsi);
3043 if (ret)
3044 goto err_vsi_cfg;
3045
3046 ice_vsi_decfg(vsi);
3047 ret = ice_vsi_cfg_def(vsi);
3048 if (ret)
3049 goto err_vsi_cfg;
3050
3051 coalesce = kcalloc(vsi->num_q_vectors,
3052 sizeof(struct ice_coalesce_stored), GFP_KERNEL);
3053 if (!coalesce)
3054 return -ENOMEM;
3055
3056 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
3057
3058 ret = ice_vsi_cfg_tc_lan(pf, vsi);
3059 if (ret) {
3060 if (vsi_flags & ICE_VSI_FLAG_INIT) {
3061 ret = -EIO;
3062 goto err_vsi_cfg_tc_lan;
3063 }
3064
3065 kfree(coalesce);
3066 return ice_schedule_reset(pf, ICE_RESET_PFR);
3067 }
3068
3069 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
3070 kfree(coalesce);
3071
3072 return 0;
3073
3074 err_vsi_cfg_tc_lan:
3075 ice_vsi_decfg(vsi);
3076 kfree(coalesce);
3077 err_vsi_cfg:
3078 return ret;
3079 }
3080
3081 /**
3082 * ice_is_reset_in_progress - check for a reset in progress
3083 * @state: PF state field
3084 */
ice_is_reset_in_progress(unsigned long * state)3085 bool ice_is_reset_in_progress(unsigned long *state)
3086 {
3087 return test_bit(ICE_RESET_OICR_RECV, state) ||
3088 test_bit(ICE_PFR_REQ, state) ||
3089 test_bit(ICE_CORER_REQ, state) ||
3090 test_bit(ICE_GLOBR_REQ, state);
3091 }
3092
3093 /**
3094 * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3095 * @pf: pointer to the PF structure
3096 * @timeout: length of time to wait, in jiffies
3097 *
3098 * Wait (sleep) for a short time until the driver finishes cleaning up from
3099 * a device reset. The caller must be able to sleep. Use this to delay
3100 * operations that could fail while the driver is cleaning up after a device
3101 * reset.
3102 *
3103 * Returns 0 on success, -EBUSY if the reset is not finished within the
3104 * timeout, and -ERESTARTSYS if the thread was interrupted.
3105 */
ice_wait_for_reset(struct ice_pf * pf,unsigned long timeout)3106 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
3107 {
3108 long ret;
3109
3110 ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
3111 !ice_is_reset_in_progress(pf->state),
3112 timeout);
3113 if (ret < 0)
3114 return ret;
3115 else if (!ret)
3116 return -EBUSY;
3117 else
3118 return 0;
3119 }
3120
3121 /**
3122 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3123 * @vsi: VSI being configured
3124 * @ctx: the context buffer returned from AQ VSI update command
3125 */
ice_vsi_update_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctx)3126 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3127 {
3128 vsi->info.mapping_flags = ctx->info.mapping_flags;
3129 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3130 sizeof(vsi->info.q_mapping));
3131 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3132 sizeof(vsi->info.tc_mapping));
3133 }
3134
3135 /**
3136 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3137 * @vsi: the VSI being configured
3138 * @ena_tc: TC map to be enabled
3139 */
ice_vsi_cfg_netdev_tc(struct ice_vsi * vsi,u8 ena_tc)3140 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
3141 {
3142 struct net_device *netdev = vsi->netdev;
3143 struct ice_pf *pf = vsi->back;
3144 int numtc = vsi->tc_cfg.numtc;
3145 struct ice_dcbx_cfg *dcbcfg;
3146 u8 netdev_tc;
3147 int i;
3148
3149 if (!netdev)
3150 return;
3151
3152 /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
3153 if (vsi->type == ICE_VSI_CHNL)
3154 return;
3155
3156 if (!ena_tc) {
3157 netdev_reset_tc(netdev);
3158 return;
3159 }
3160
3161 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
3162 numtc = vsi->all_numtc;
3163
3164 if (netdev_set_num_tc(netdev, numtc))
3165 return;
3166
3167 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
3168
3169 ice_for_each_traffic_class(i)
3170 if (vsi->tc_cfg.ena_tc & BIT(i))
3171 netdev_set_tc_queue(netdev,
3172 vsi->tc_cfg.tc_info[i].netdev_tc,
3173 vsi->tc_cfg.tc_info[i].qcount_tx,
3174 vsi->tc_cfg.tc_info[i].qoffset);
3175 /* setup TC queue map for CHNL TCs */
3176 ice_for_each_chnl_tc(i) {
3177 if (!(vsi->all_enatc & BIT(i)))
3178 break;
3179 if (!vsi->mqprio_qopt.qopt.count[i])
3180 break;
3181 netdev_set_tc_queue(netdev, i,
3182 vsi->mqprio_qopt.qopt.count[i],
3183 vsi->mqprio_qopt.qopt.offset[i]);
3184 }
3185
3186 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3187 return;
3188
3189 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
3190 u8 ets_tc = dcbcfg->etscfg.prio_table[i];
3191
3192 /* Get the mapped netdev TC# for the UP */
3193 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3194 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3195 }
3196 }
3197
3198 /**
3199 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3200 * @vsi: the VSI being configured,
3201 * @ctxt: VSI context structure
3202 * @ena_tc: number of traffic classes to enable
3203 *
3204 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
3205 */
3206 static int
ice_vsi_setup_q_map_mqprio(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt,u8 ena_tc)3207 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
3208 u8 ena_tc)
3209 {
3210 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
3211 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
3212 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3213 u16 new_txq, new_rxq;
3214 u8 netdev_tc = 0;
3215 int i;
3216
3217 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
3218
3219 pow = order_base_2(tc0_qcount);
3220 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset);
3221 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
3222
3223 ice_for_each_traffic_class(i) {
3224 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
3225 /* TC is not enabled */
3226 vsi->tc_cfg.tc_info[i].qoffset = 0;
3227 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
3228 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
3229 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
3230 ctxt->info.tc_mapping[i] = 0;
3231 continue;
3232 }
3233
3234 offset = vsi->mqprio_qopt.qopt.offset[i];
3235 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3236 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3237 vsi->tc_cfg.tc_info[i].qoffset = offset;
3238 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
3239 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
3240 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
3241 }
3242
3243 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
3244 ice_for_each_chnl_tc(i) {
3245 if (!(vsi->all_enatc & BIT(i)))
3246 continue;
3247 offset = vsi->mqprio_qopt.qopt.offset[i];
3248 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3249 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3250 }
3251 }
3252
3253 new_txq = offset + qcount_tx;
3254 if (new_txq > vsi->alloc_txq) {
3255 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3256 new_txq, vsi->alloc_txq);
3257 return -EINVAL;
3258 }
3259
3260 new_rxq = offset + qcount_rx;
3261 if (new_rxq > vsi->alloc_rxq) {
3262 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3263 new_rxq, vsi->alloc_rxq);
3264 return -EINVAL;
3265 }
3266
3267 /* Set actual Tx/Rx queue pairs */
3268 vsi->num_txq = new_txq;
3269 vsi->num_rxq = new_rxq;
3270
3271 /* Setup queue TC[0].qmap for given VSI context */
3272 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
3273 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
3274 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
3275
3276 /* Find queue count available for channel VSIs and starting offset
3277 * for channel VSIs
3278 */
3279 if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
3280 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
3281 vsi->next_base_q = tc0_qcount;
3282 }
3283 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
3284 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
3285 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
3286 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3287
3288 return 0;
3289 }
3290
3291 /**
3292 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3293 * @vsi: VSI to be configured
3294 * @ena_tc: TC bitmap
3295 *
3296 * VSI queues expected to be quiesced before calling this function
3297 */
ice_vsi_cfg_tc(struct ice_vsi * vsi,u8 ena_tc)3298 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3299 {
3300 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3301 struct ice_pf *pf = vsi->back;
3302 struct ice_tc_cfg old_tc_cfg;
3303 struct ice_vsi_ctx *ctx;
3304 struct device *dev;
3305 int i, ret = 0;
3306 u8 num_tc = 0;
3307
3308 dev = ice_pf_to_dev(pf);
3309 if (vsi->tc_cfg.ena_tc == ena_tc &&
3310 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3311 return 0;
3312
3313 ice_for_each_traffic_class(i) {
3314 /* build bitmap of enabled TCs */
3315 if (ena_tc & BIT(i))
3316 num_tc++;
3317 /* populate max_txqs per TC */
3318 max_txqs[i] = vsi->alloc_txq;
3319 /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
3320 * zero for CHNL VSI, hence use num_txq instead as max_txqs
3321 */
3322 if (vsi->type == ICE_VSI_CHNL &&
3323 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3324 max_txqs[i] = vsi->num_txq;
3325 }
3326
3327 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
3328 vsi->tc_cfg.ena_tc = ena_tc;
3329 vsi->tc_cfg.numtc = num_tc;
3330
3331 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3332 if (!ctx)
3333 return -ENOMEM;
3334
3335 ctx->vf_num = 0;
3336 ctx->info = vsi->info;
3337
3338 if (vsi->type == ICE_VSI_PF &&
3339 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3340 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
3341 else
3342 ret = ice_vsi_setup_q_map(vsi, ctx);
3343
3344 if (ret) {
3345 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3346 goto out;
3347 }
3348
3349 /* must to indicate which section of VSI context are being modified */
3350 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3351 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3352 if (ret) {
3353 dev_info(dev, "Failed VSI Update\n");
3354 goto out;
3355 }
3356
3357 if (vsi->type == ICE_VSI_PF &&
3358 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3359 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
3360 else
3361 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
3362 vsi->tc_cfg.ena_tc, max_txqs);
3363
3364 if (ret) {
3365 dev_err(dev, "VSI %d failed TC config, error %d\n",
3366 vsi->vsi_num, ret);
3367 goto out;
3368 }
3369 ice_vsi_update_q_map(vsi, ctx);
3370 vsi->info.valid_sections = 0;
3371
3372 ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3373 out:
3374 kfree(ctx);
3375 return ret;
3376 }
3377
3378 /**
3379 * ice_update_ring_stats - Update ring statistics
3380 * @stats: stats to be updated
3381 * @pkts: number of processed packets
3382 * @bytes: number of processed bytes
3383 *
3384 * This function assumes that caller has acquired a u64_stats_sync lock.
3385 */
ice_update_ring_stats(struct ice_q_stats * stats,u64 pkts,u64 bytes)3386 static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
3387 {
3388 stats->bytes += bytes;
3389 stats->pkts += pkts;
3390 }
3391
3392 /**
3393 * ice_update_tx_ring_stats - Update Tx ring specific counters
3394 * @tx_ring: ring to update
3395 * @pkts: number of processed packets
3396 * @bytes: number of processed bytes
3397 */
ice_update_tx_ring_stats(struct ice_tx_ring * tx_ring,u64 pkts,u64 bytes)3398 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
3399 {
3400 u64_stats_update_begin(&tx_ring->ring_stats->syncp);
3401 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
3402 u64_stats_update_end(&tx_ring->ring_stats->syncp);
3403 }
3404
3405 /**
3406 * ice_update_rx_ring_stats - Update Rx ring specific counters
3407 * @rx_ring: ring to update
3408 * @pkts: number of processed packets
3409 * @bytes: number of processed bytes
3410 */
ice_update_rx_ring_stats(struct ice_rx_ring * rx_ring,u64 pkts,u64 bytes)3411 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
3412 {
3413 u64_stats_update_begin(&rx_ring->ring_stats->syncp);
3414 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
3415 u64_stats_update_end(&rx_ring->ring_stats->syncp);
3416 }
3417
3418 /**
3419 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3420 * @pi: port info of the switch with default VSI
3421 *
3422 * Return true if the there is a single VSI in default forwarding VSI list
3423 */
ice_is_dflt_vsi_in_use(struct ice_port_info * pi)3424 bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
3425 {
3426 bool exists = false;
3427
3428 ice_check_if_dflt_vsi(pi, 0, &exists);
3429 return exists;
3430 }
3431
3432 /**
3433 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3434 * @vsi: VSI to compare against default forwarding VSI
3435 *
3436 * If this VSI passed in is the default forwarding VSI then return true, else
3437 * return false
3438 */
ice_is_vsi_dflt_vsi(struct ice_vsi * vsi)3439 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
3440 {
3441 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3442 }
3443
3444 /**
3445 * ice_set_dflt_vsi - set the default forwarding VSI
3446 * @vsi: VSI getting set as the default forwarding VSI on the switch
3447 *
3448 * If the VSI passed in is already the default VSI and it's enabled just return
3449 * success.
3450 *
3451 * Otherwise try to set the VSI passed in as the switch's default VSI and
3452 * return the result.
3453 */
ice_set_dflt_vsi(struct ice_vsi * vsi)3454 int ice_set_dflt_vsi(struct ice_vsi *vsi)
3455 {
3456 struct device *dev;
3457 int status;
3458
3459 if (!vsi)
3460 return -EINVAL;
3461
3462 dev = ice_pf_to_dev(vsi->back);
3463
3464 if (ice_lag_is_switchdev_running(vsi->back)) {
3465 dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
3466 vsi->vsi_num);
3467 return 0;
3468 }
3469
3470 /* the VSI passed in is already the default VSI */
3471 if (ice_is_vsi_dflt_vsi(vsi)) {
3472 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3473 vsi->vsi_num);
3474 return 0;
3475 }
3476
3477 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3478 if (status) {
3479 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
3480 vsi->vsi_num, status);
3481 return status;
3482 }
3483
3484 return 0;
3485 }
3486
3487 /**
3488 * ice_clear_dflt_vsi - clear the default forwarding VSI
3489 * @vsi: VSI to remove from filter list
3490 *
3491 * If the switch has no default VSI or it's not enabled then return error.
3492 *
3493 * Otherwise try to clear the default VSI and return the result.
3494 */
ice_clear_dflt_vsi(struct ice_vsi * vsi)3495 int ice_clear_dflt_vsi(struct ice_vsi *vsi)
3496 {
3497 struct device *dev;
3498 int status;
3499
3500 if (!vsi)
3501 return -EINVAL;
3502
3503 dev = ice_pf_to_dev(vsi->back);
3504
3505 /* there is no default VSI configured */
3506 if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3507 return -ENODEV;
3508
3509 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3510 ICE_FLTR_RX);
3511 if (status) {
3512 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
3513 vsi->vsi_num, status);
3514 return -EIO;
3515 }
3516
3517 return 0;
3518 }
3519
3520 /**
3521 * ice_get_link_speed_mbps - get link speed in Mbps
3522 * @vsi: the VSI whose link speed is being queried
3523 *
3524 * Return current VSI link speed and 0 if the speed is unknown.
3525 */
ice_get_link_speed_mbps(struct ice_vsi * vsi)3526 int ice_get_link_speed_mbps(struct ice_vsi *vsi)
3527 {
3528 unsigned int link_speed;
3529
3530 link_speed = vsi->port_info->phy.link_info.link_speed;
3531
3532 return (int)ice_get_link_speed(fls(link_speed) - 1);
3533 }
3534
3535 /**
3536 * ice_get_link_speed_kbps - get link speed in Kbps
3537 * @vsi: the VSI whose link speed is being queried
3538 *
3539 * Return current VSI link speed and 0 if the speed is unknown.
3540 */
ice_get_link_speed_kbps(struct ice_vsi * vsi)3541 int ice_get_link_speed_kbps(struct ice_vsi *vsi)
3542 {
3543 int speed_mbps;
3544
3545 speed_mbps = ice_get_link_speed_mbps(vsi);
3546
3547 return speed_mbps * 1000;
3548 }
3549
3550 /**
3551 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3552 * @vsi: VSI to be configured
3553 * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
3554 *
3555 * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
3556 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3557 * on TC 0.
3558 */
ice_set_min_bw_limit(struct ice_vsi * vsi,u64 min_tx_rate)3559 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
3560 {
3561 struct ice_pf *pf = vsi->back;
3562 struct device *dev;
3563 int status;
3564 int speed;
3565
3566 dev = ice_pf_to_dev(pf);
3567 if (!vsi->port_info) {
3568 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3569 vsi->idx, vsi->type);
3570 return -EINVAL;
3571 }
3572
3573 speed = ice_get_link_speed_kbps(vsi);
3574 if (min_tx_rate > (u64)speed) {
3575 dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3576 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3577 speed);
3578 return -EINVAL;
3579 }
3580
3581 /* Configure min BW for VSI limit */
3582 if (min_tx_rate) {
3583 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3584 ICE_MIN_BW, min_tx_rate);
3585 if (status) {
3586 dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
3587 min_tx_rate, ice_vsi_type_str(vsi->type),
3588 vsi->idx);
3589 return status;
3590 }
3591
3592 dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
3593 min_tx_rate, ice_vsi_type_str(vsi->type));
3594 } else {
3595 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3596 vsi->idx, 0,
3597 ICE_MIN_BW);
3598 if (status) {
3599 dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
3600 ice_vsi_type_str(vsi->type), vsi->idx);
3601 return status;
3602 }
3603
3604 dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
3605 ice_vsi_type_str(vsi->type), vsi->idx);
3606 }
3607
3608 return 0;
3609 }
3610
3611 /**
3612 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3613 * @vsi: VSI to be configured
3614 * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
3615 *
3616 * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
3617 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3618 * on TC 0.
3619 */
ice_set_max_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate)3620 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
3621 {
3622 struct ice_pf *pf = vsi->back;
3623 struct device *dev;
3624 int status;
3625 int speed;
3626
3627 dev = ice_pf_to_dev(pf);
3628 if (!vsi->port_info) {
3629 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3630 vsi->idx, vsi->type);
3631 return -EINVAL;
3632 }
3633
3634 speed = ice_get_link_speed_kbps(vsi);
3635 if (max_tx_rate > (u64)speed) {
3636 dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3637 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3638 speed);
3639 return -EINVAL;
3640 }
3641
3642 /* Configure max BW for VSI limit */
3643 if (max_tx_rate) {
3644 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3645 ICE_MAX_BW, max_tx_rate);
3646 if (status) {
3647 dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
3648 max_tx_rate, ice_vsi_type_str(vsi->type),
3649 vsi->idx);
3650 return status;
3651 }
3652
3653 dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
3654 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
3655 } else {
3656 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3657 vsi->idx, 0,
3658 ICE_MAX_BW);
3659 if (status) {
3660 dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
3661 ice_vsi_type_str(vsi->type), vsi->idx);
3662 return status;
3663 }
3664
3665 dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
3666 ice_vsi_type_str(vsi->type), vsi->idx);
3667 }
3668
3669 return 0;
3670 }
3671
3672 /**
3673 * ice_set_link - turn on/off physical link
3674 * @vsi: VSI to modify physical link on
3675 * @ena: turn on/off physical link
3676 */
ice_set_link(struct ice_vsi * vsi,bool ena)3677 int ice_set_link(struct ice_vsi *vsi, bool ena)
3678 {
3679 struct device *dev = ice_pf_to_dev(vsi->back);
3680 struct ice_port_info *pi = vsi->port_info;
3681 struct ice_hw *hw = pi->hw;
3682 int status;
3683
3684 if (vsi->type != ICE_VSI_PF)
3685 return -EINVAL;
3686
3687 status = ice_aq_set_link_restart_an(pi, ena, NULL);
3688
3689 /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
3690 * this is not a fatal error, so print a warning message and return
3691 * a success code. Return an error if FW returns an error code other
3692 * than ICE_AQ_RC_EMODE
3693 */
3694 if (status == -EIO) {
3695 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3696 dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
3697 (ena ? "ON" : "OFF"), status,
3698 ice_aq_str(hw->adminq.sq_last_status));
3699 } else if (status) {
3700 dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
3701 (ena ? "ON" : "OFF"), status,
3702 ice_aq_str(hw->adminq.sq_last_status));
3703 return status;
3704 }
3705
3706 return 0;
3707 }
3708
3709 /**
3710 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3711 * @vsi: VSI used to add VLAN filters
3712 *
3713 * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
3714 * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
3715 * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3716 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3717 *
3718 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3719 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3720 * traffic in SVM, since the VLAN TPID isn't part of filtering.
3721 *
3722 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3723 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3724 * part of filtering.
3725 */
ice_vsi_add_vlan_zero(struct ice_vsi * vsi)3726 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3727 {
3728 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3729 struct ice_vlan vlan;
3730 int err;
3731
3732 vlan = ICE_VLAN(0, 0, 0);
3733 err = vlan_ops->add_vlan(vsi, &vlan);
3734 if (err && err != -EEXIST)
3735 return err;
3736
3737 /* in SVM both VLAN 0 filters are identical */
3738 if (!ice_is_dvm_ena(&vsi->back->hw))
3739 return 0;
3740
3741 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3742 err = vlan_ops->add_vlan(vsi, &vlan);
3743 if (err && err != -EEXIST)
3744 return err;
3745
3746 return 0;
3747 }
3748
3749 /**
3750 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3751 * @vsi: VSI used to add VLAN filters
3752 *
3753 * Delete the VLAN 0 filters in the same manner that they were added in
3754 * ice_vsi_add_vlan_zero.
3755 */
ice_vsi_del_vlan_zero(struct ice_vsi * vsi)3756 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3757 {
3758 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3759 struct ice_vlan vlan;
3760 int err;
3761
3762 vlan = ICE_VLAN(0, 0, 0);
3763 err = vlan_ops->del_vlan(vsi, &vlan);
3764 if (err && err != -EEXIST)
3765 return err;
3766
3767 /* in SVM both VLAN 0 filters are identical */
3768 if (!ice_is_dvm_ena(&vsi->back->hw))
3769 return 0;
3770
3771 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3772 err = vlan_ops->del_vlan(vsi, &vlan);
3773 if (err && err != -EEXIST)
3774 return err;
3775
3776 /* when deleting the last VLAN filter, make sure to disable the VLAN
3777 * promisc mode so the filter isn't left by accident
3778 */
3779 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3780 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3781 }
3782
3783 /**
3784 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3785 * @vsi: VSI used to get the VLAN mode
3786 *
3787 * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
3788 * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
3789 */
ice_vsi_num_zero_vlans(struct ice_vsi * vsi)3790 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
3791 {
3792 #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2
3793 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1
3794 /* no VLAN 0 filter is created when a port VLAN is active */
3795 if (vsi->type == ICE_VSI_VF) {
3796 if (WARN_ON(!vsi->vf))
3797 return 0;
3798
3799 if (ice_vf_is_port_vlan_ena(vsi->vf))
3800 return 0;
3801 }
3802
3803 if (ice_is_dvm_ena(&vsi->back->hw))
3804 return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
3805 else
3806 return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
3807 }
3808
3809 /**
3810 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3811 * @vsi: VSI used to determine if any non-zero VLANs have been added
3812 */
ice_vsi_has_non_zero_vlans(struct ice_vsi * vsi)3813 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
3814 {
3815 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
3816 }
3817
3818 /**
3819 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3820 * @vsi: VSI used to get the number of non-zero VLANs added
3821 */
ice_vsi_num_non_zero_vlans(struct ice_vsi * vsi)3822 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
3823 {
3824 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
3825 }
3826
3827 /**
3828 * ice_is_feature_supported
3829 * @pf: pointer to the struct ice_pf instance
3830 * @f: feature enum to be checked
3831 *
3832 * returns true if feature is supported, false otherwise
3833 */
ice_is_feature_supported(struct ice_pf * pf,enum ice_feature f)3834 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
3835 {
3836 if (f < 0 || f >= ICE_F_MAX)
3837 return false;
3838
3839 return test_bit(f, pf->features);
3840 }
3841
3842 /**
3843 * ice_set_feature_support
3844 * @pf: pointer to the struct ice_pf instance
3845 * @f: feature enum to set
3846 */
ice_set_feature_support(struct ice_pf * pf,enum ice_feature f)3847 void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
3848 {
3849 if (f < 0 || f >= ICE_F_MAX)
3850 return;
3851
3852 set_bit(f, pf->features);
3853 }
3854
3855 /**
3856 * ice_clear_feature_support
3857 * @pf: pointer to the struct ice_pf instance
3858 * @f: feature enum to clear
3859 */
ice_clear_feature_support(struct ice_pf * pf,enum ice_feature f)3860 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
3861 {
3862 if (f < 0 || f >= ICE_F_MAX)
3863 return;
3864
3865 clear_bit(f, pf->features);
3866 }
3867
3868 /**
3869 * ice_init_feature_support
3870 * @pf: pointer to the struct ice_pf instance
3871 *
3872 * called during init to setup supported feature
3873 */
ice_init_feature_support(struct ice_pf * pf)3874 void ice_init_feature_support(struct ice_pf *pf)
3875 {
3876 switch (pf->hw.device_id) {
3877 case ICE_DEV_ID_E810C_BACKPLANE:
3878 case ICE_DEV_ID_E810C_QSFP:
3879 case ICE_DEV_ID_E810C_SFP:
3880 case ICE_DEV_ID_E810_XXV_BACKPLANE:
3881 case ICE_DEV_ID_E810_XXV_QSFP:
3882 case ICE_DEV_ID_E810_XXV_SFP:
3883 ice_set_feature_support(pf, ICE_F_DSCP);
3884 if (ice_is_phy_rclk_in_netlist(&pf->hw))
3885 ice_set_feature_support(pf, ICE_F_PHY_RCLK);
3886 /* If we don't own the timer - don't enable other caps */
3887 if (!ice_pf_src_tmr_owned(pf))
3888 break;
3889 if (ice_is_cgu_in_netlist(&pf->hw))
3890 ice_set_feature_support(pf, ICE_F_CGU);
3891 if (ice_is_clock_mux_in_netlist(&pf->hw))
3892 ice_set_feature_support(pf, ICE_F_SMA_CTRL);
3893 if (ice_gnss_is_gps_present(&pf->hw))
3894 ice_set_feature_support(pf, ICE_F_GNSS);
3895 break;
3896 default:
3897 break;
3898 }
3899 }
3900
3901 /**
3902 * ice_vsi_update_security - update security block in VSI
3903 * @vsi: pointer to VSI structure
3904 * @fill: function pointer to fill ctx
3905 */
3906 int
ice_vsi_update_security(struct ice_vsi * vsi,void (* fill)(struct ice_vsi_ctx *))3907 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
3908 {
3909 struct ice_vsi_ctx ctx = { 0 };
3910
3911 ctx.info = vsi->info;
3912 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
3913 fill(&ctx);
3914
3915 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
3916 return -ENODEV;
3917
3918 vsi->info = ctx.info;
3919 return 0;
3920 }
3921
3922 /**
3923 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
3924 * @ctx: pointer to VSI ctx structure
3925 */
ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx * ctx)3926 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
3927 {
3928 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
3929 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3930 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3931 }
3932
3933 /**
3934 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
3935 * @ctx: pointer to VSI ctx structure
3936 */
ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx * ctx)3937 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
3938 {
3939 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
3940 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3941 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3942 }
3943
3944 /**
3945 * ice_vsi_ctx_set_allow_override - allow destination override on VSI
3946 * @ctx: pointer to VSI ctx structure
3947 */
ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx * ctx)3948 void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
3949 {
3950 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
3951 }
3952
3953 /**
3954 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
3955 * @ctx: pointer to VSI ctx structure
3956 */
ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx * ctx)3957 void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
3958 {
3959 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
3960 }
3961
3962 /**
3963 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
3964 * @vsi: pointer to VSI structure
3965 * @set: set or unset the bit
3966 */
3967 int
ice_vsi_update_local_lb(struct ice_vsi * vsi,bool set)3968 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
3969 {
3970 struct ice_vsi_ctx ctx = {
3971 .info = vsi->info,
3972 };
3973
3974 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
3975 if (set)
3976 ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
3977 else
3978 ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
3979
3980 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
3981 return -ENODEV;
3982
3983 vsi->info = ctx.info;
3984 return 0;
3985 }
3986