1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3 
4 #include "ixgbe.h"
5 #include "ixgbe_sriov.h"
6 
7 #ifdef CONFIG_IXGBE_DCB
8 /**
9  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
10  * @adapter: board private structure to initialize
11  *
12  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
13  * will also try to cache the proper offsets if RSS/FCoE are enabled along
14  * with VMDq.
15  *
16  **/
ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter * adapter)17 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
18 {
19 #ifdef IXGBE_FCOE
20 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
21 #endif /* IXGBE_FCOE */
22 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
23 	int i;
24 	u16 reg_idx, pool;
25 	u8 tcs = adapter->hw_tcs;
26 
27 	/* verify we have DCB queueing enabled before proceeding */
28 	if (tcs <= 1)
29 		return false;
30 
31 	/* verify we have VMDq enabled before proceeding */
32 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
33 		return false;
34 
35 	/* start at VMDq register offset for SR-IOV enabled setups */
36 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
37 	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
38 		/* If we are greater than indices move to next pool */
39 		if ((reg_idx & ~vmdq->mask) >= tcs) {
40 			pool++;
41 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
42 		}
43 		adapter->rx_ring[i]->reg_idx = reg_idx;
44 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
45 	}
46 
47 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
48 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
49 		/* If we are greater than indices move to next pool */
50 		if ((reg_idx & ~vmdq->mask) >= tcs)
51 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
52 		adapter->tx_ring[i]->reg_idx = reg_idx;
53 	}
54 
55 #ifdef IXGBE_FCOE
56 	/* nothing to do if FCoE is disabled */
57 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
58 		return true;
59 
60 	/* The work is already done if the FCoE ring is shared */
61 	if (fcoe->offset < tcs)
62 		return true;
63 
64 	/* The FCoE rings exist separately, we need to move their reg_idx */
65 	if (fcoe->indices) {
66 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
67 		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
68 
69 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
70 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
71 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
72 			adapter->rx_ring[i]->reg_idx = reg_idx;
73 			adapter->rx_ring[i]->netdev = adapter->netdev;
74 			reg_idx++;
75 		}
76 
77 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
78 		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
79 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
80 			adapter->tx_ring[i]->reg_idx = reg_idx;
81 			reg_idx++;
82 		}
83 	}
84 
85 #endif /* IXGBE_FCOE */
86 	return true;
87 }
88 
89 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
ixgbe_get_first_reg_idx(struct ixgbe_adapter * adapter,u8 tc,unsigned int * tx,unsigned int * rx)90 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
91 				    unsigned int *tx, unsigned int *rx)
92 {
93 	struct ixgbe_hw *hw = &adapter->hw;
94 	u8 num_tcs = adapter->hw_tcs;
95 
96 	*tx = 0;
97 	*rx = 0;
98 
99 	switch (hw->mac.type) {
100 	case ixgbe_mac_82598EB:
101 		/* TxQs/TC: 4	RxQs/TC: 8 */
102 		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
103 		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
104 		break;
105 	case ixgbe_mac_82599EB:
106 	case ixgbe_mac_X540:
107 	case ixgbe_mac_X550:
108 	case ixgbe_mac_X550EM_x:
109 	case ixgbe_mac_x550em_a:
110 		if (num_tcs > 4) {
111 			/*
112 			 * TCs    : TC0/1 TC2/3 TC4-7
113 			 * TxQs/TC:    32    16     8
114 			 * RxQs/TC:    16    16    16
115 			 */
116 			*rx = tc << 4;
117 			if (tc < 3)
118 				*tx = tc << 5;		/*   0,  32,  64 */
119 			else if (tc < 5)
120 				*tx = (tc + 2) << 4;	/*  80,  96 */
121 			else
122 				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
123 		} else {
124 			/*
125 			 * TCs    : TC0 TC1 TC2/3
126 			 * TxQs/TC:  64  32    16
127 			 * RxQs/TC:  32  32    32
128 			 */
129 			*rx = tc << 5;
130 			if (tc < 2)
131 				*tx = tc << 6;		/*  0,  64 */
132 			else
133 				*tx = (tc + 4) << 4;	/* 96, 112 */
134 		}
135 		break;
136 	default:
137 		break;
138 	}
139 }
140 
141 /**
142  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
143  * @adapter: board private structure to initialize
144  *
145  * Cache the descriptor ring offsets for DCB to the assigned rings.
146  *
147  **/
ixgbe_cache_ring_dcb(struct ixgbe_adapter * adapter)148 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
149 {
150 	u8 num_tcs = adapter->hw_tcs;
151 	unsigned int tx_idx, rx_idx;
152 	int tc, offset, rss_i, i;
153 
154 	/* verify we have DCB queueing enabled before proceeding */
155 	if (num_tcs <= 1)
156 		return false;
157 
158 	rss_i = adapter->ring_feature[RING_F_RSS].indices;
159 
160 	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
161 		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
162 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
163 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
164 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
165 			adapter->rx_ring[offset + i]->netdev = adapter->netdev;
166 			adapter->tx_ring[offset + i]->dcb_tc = tc;
167 			adapter->rx_ring[offset + i]->dcb_tc = tc;
168 		}
169 	}
170 
171 	return true;
172 }
173 
174 #endif
175 /**
176  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
177  * @adapter: board private structure to initialize
178  *
179  * SR-IOV doesn't use any descriptor rings but changes the default if
180  * no other mapping is used.
181  *
182  */
ixgbe_cache_ring_sriov(struct ixgbe_adapter * adapter)183 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
184 {
185 #ifdef IXGBE_FCOE
186 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
187 #endif /* IXGBE_FCOE */
188 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
189 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
190 	u16 reg_idx, pool;
191 	int i;
192 
193 	/* only proceed if VMDq is enabled */
194 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
195 		return false;
196 
197 	/* start at VMDq register offset for SR-IOV enabled setups */
198 	pool = 0;
199 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
200 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
201 #ifdef IXGBE_FCOE
202 		/* Allow first FCoE queue to be mapped as RSS */
203 		if (fcoe->offset && (i > fcoe->offset))
204 			break;
205 #endif
206 		/* If we are greater than indices move to next pool */
207 		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
208 			pool++;
209 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
210 		}
211 		adapter->rx_ring[i]->reg_idx = reg_idx;
212 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
213 	}
214 
215 #ifdef IXGBE_FCOE
216 	/* FCoE uses a linear block of queues so just assigning 1:1 */
217 	for (; i < adapter->num_rx_queues; i++, reg_idx++) {
218 		adapter->rx_ring[i]->reg_idx = reg_idx;
219 		adapter->rx_ring[i]->netdev = adapter->netdev;
220 	}
221 
222 #endif
223 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
224 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
225 #ifdef IXGBE_FCOE
226 		/* Allow first FCoE queue to be mapped as RSS */
227 		if (fcoe->offset && (i > fcoe->offset))
228 			break;
229 #endif
230 		/* If we are greater than indices move to next pool */
231 		if ((reg_idx & rss->mask) >= rss->indices)
232 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
233 		adapter->tx_ring[i]->reg_idx = reg_idx;
234 	}
235 
236 #ifdef IXGBE_FCOE
237 	/* FCoE uses a linear block of queues so just assigning 1:1 */
238 	for (; i < adapter->num_tx_queues; i++, reg_idx++)
239 		adapter->tx_ring[i]->reg_idx = reg_idx;
240 
241 #endif
242 
243 	return true;
244 }
245 
246 /**
247  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
248  * @adapter: board private structure to initialize
249  *
250  * Cache the descriptor ring offsets for RSS to the assigned rings.
251  *
252  **/
ixgbe_cache_ring_rss(struct ixgbe_adapter * adapter)253 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
254 {
255 	int i, reg_idx;
256 
257 	for (i = 0; i < adapter->num_rx_queues; i++) {
258 		adapter->rx_ring[i]->reg_idx = i;
259 		adapter->rx_ring[i]->netdev = adapter->netdev;
260 	}
261 	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
262 		adapter->tx_ring[i]->reg_idx = reg_idx;
263 	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
264 		adapter->xdp_ring[i]->reg_idx = reg_idx;
265 
266 	return true;
267 }
268 
269 /**
270  * ixgbe_cache_ring_register - Descriptor ring to register mapping
271  * @adapter: board private structure to initialize
272  *
273  * Once we know the feature-set enabled for the device, we'll cache
274  * the register offset the descriptor ring is assigned to.
275  *
276  * Note, the order the various feature calls is important.  It must start with
277  * the "most" features enabled at the same time, then trickle down to the
278  * least amount of features turned on at once.
279  **/
ixgbe_cache_ring_register(struct ixgbe_adapter * adapter)280 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
281 {
282 	/* start with default case */
283 	adapter->rx_ring[0]->reg_idx = 0;
284 	adapter->tx_ring[0]->reg_idx = 0;
285 
286 #ifdef CONFIG_IXGBE_DCB
287 	if (ixgbe_cache_ring_dcb_sriov(adapter))
288 		return;
289 
290 	if (ixgbe_cache_ring_dcb(adapter))
291 		return;
292 
293 #endif
294 	if (ixgbe_cache_ring_sriov(adapter))
295 		return;
296 
297 	ixgbe_cache_ring_rss(adapter);
298 }
299 
ixgbe_xdp_queues(struct ixgbe_adapter * adapter)300 static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
301 {
302 	return adapter->xdp_prog ? nr_cpu_ids : 0;
303 }
304 
305 #define IXGBE_RSS_64Q_MASK	0x3F
306 #define IXGBE_RSS_16Q_MASK	0xF
307 #define IXGBE_RSS_8Q_MASK	0x7
308 #define IXGBE_RSS_4Q_MASK	0x3
309 #define IXGBE_RSS_2Q_MASK	0x1
310 #define IXGBE_RSS_DISABLED_MASK	0x0
311 
312 #ifdef CONFIG_IXGBE_DCB
313 /**
314  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
315  * @adapter: board private structure to initialize
316  *
317  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
318  * and VM pools where appropriate.  Also assign queues based on DCB
319  * priorities and map accordingly..
320  *
321  **/
ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter * adapter)322 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
323 {
324 	int i;
325 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
326 	u16 vmdq_m = 0;
327 #ifdef IXGBE_FCOE
328 	u16 fcoe_i = 0;
329 #endif
330 	u8 tcs = adapter->hw_tcs;
331 
332 	/* verify we have DCB queueing enabled before proceeding */
333 	if (tcs <= 1)
334 		return false;
335 
336 	/* verify we have VMDq enabled before proceeding */
337 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
338 		return false;
339 
340 	/* limit VMDq instances on the PF by number of Tx queues */
341 	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
342 
343 	/* Add starting offset to total pool count */
344 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
345 
346 	/* 16 pools w/ 8 TC per pool */
347 	if (tcs > 4) {
348 		vmdq_i = min_t(u16, vmdq_i, 16);
349 		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
350 	/* 32 pools w/ 4 TC per pool */
351 	} else {
352 		vmdq_i = min_t(u16, vmdq_i, 32);
353 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
354 	}
355 
356 #ifdef IXGBE_FCOE
357 	/* queues in the remaining pools are available for FCoE */
358 	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
359 
360 #endif
361 	/* remove the starting offset from the pool count */
362 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
363 
364 	/* save features for later use */
365 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
366 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
367 
368 	/*
369 	 * We do not support DCB, VMDq, and RSS all simultaneously
370 	 * so we will disable RSS since it is the lowest priority
371 	 */
372 	adapter->ring_feature[RING_F_RSS].indices = 1;
373 	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
374 
375 	/* disable ATR as it is not supported when VMDq is enabled */
376 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
377 
378 	adapter->num_rx_pools = vmdq_i;
379 	adapter->num_rx_queues_per_pool = tcs;
380 
381 	adapter->num_tx_queues = vmdq_i * tcs;
382 	adapter->num_xdp_queues = 0;
383 	adapter->num_rx_queues = vmdq_i * tcs;
384 
385 #ifdef IXGBE_FCOE
386 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
387 		struct ixgbe_ring_feature *fcoe;
388 
389 		fcoe = &adapter->ring_feature[RING_F_FCOE];
390 
391 		/* limit ourselves based on feature limits */
392 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
393 
394 		if (fcoe_i) {
395 			/* alloc queues for FCoE separately */
396 			fcoe->indices = fcoe_i;
397 			fcoe->offset = vmdq_i * tcs;
398 
399 			/* add queues to adapter */
400 			adapter->num_tx_queues += fcoe_i;
401 			adapter->num_rx_queues += fcoe_i;
402 		} else if (tcs > 1) {
403 			/* use queue belonging to FcoE TC */
404 			fcoe->indices = 1;
405 			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
406 		} else {
407 			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
408 
409 			fcoe->indices = 0;
410 			fcoe->offset = 0;
411 		}
412 	}
413 
414 #endif /* IXGBE_FCOE */
415 	/* configure TC to queue mapping */
416 	for (i = 0; i < tcs; i++)
417 		netdev_set_tc_queue(adapter->netdev, i, 1, i);
418 
419 	return true;
420 }
421 
ixgbe_set_dcb_queues(struct ixgbe_adapter * adapter)422 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
423 {
424 	struct net_device *dev = adapter->netdev;
425 	struct ixgbe_ring_feature *f;
426 	int rss_i, rss_m, i;
427 	int tcs;
428 
429 	/* Map queue offset and counts onto allocated tx queues */
430 	tcs = adapter->hw_tcs;
431 
432 	/* verify we have DCB queueing enabled before proceeding */
433 	if (tcs <= 1)
434 		return false;
435 
436 	/* determine the upper limit for our current DCB mode */
437 	rss_i = dev->num_tx_queues / tcs;
438 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
439 		/* 8 TC w/ 4 queues per TC */
440 		rss_i = min_t(u16, rss_i, 4);
441 		rss_m = IXGBE_RSS_4Q_MASK;
442 	} else if (tcs > 4) {
443 		/* 8 TC w/ 8 queues per TC */
444 		rss_i = min_t(u16, rss_i, 8);
445 		rss_m = IXGBE_RSS_8Q_MASK;
446 	} else {
447 		/* 4 TC w/ 16 queues per TC */
448 		rss_i = min_t(u16, rss_i, 16);
449 		rss_m = IXGBE_RSS_16Q_MASK;
450 	}
451 
452 	/* set RSS mask and indices */
453 	f = &adapter->ring_feature[RING_F_RSS];
454 	rss_i = min_t(int, rss_i, f->limit);
455 	f->indices = rss_i;
456 	f->mask = rss_m;
457 
458 	/* disable ATR as it is not supported when multiple TCs are enabled */
459 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
460 
461 #ifdef IXGBE_FCOE
462 	/* FCoE enabled queues require special configuration indexed
463 	 * by feature specific indices and offset. Here we map FCoE
464 	 * indices onto the DCB queue pairs allowing FCoE to own
465 	 * configuration later.
466 	 */
467 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
468 		u8 tc = ixgbe_fcoe_get_tc(adapter);
469 
470 		f = &adapter->ring_feature[RING_F_FCOE];
471 		f->indices = min_t(u16, rss_i, f->limit);
472 		f->offset = rss_i * tc;
473 	}
474 
475 #endif /* IXGBE_FCOE */
476 	for (i = 0; i < tcs; i++)
477 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
478 
479 	adapter->num_tx_queues = rss_i * tcs;
480 	adapter->num_xdp_queues = 0;
481 	adapter->num_rx_queues = rss_i * tcs;
482 
483 	return true;
484 }
485 
486 #endif
487 /**
488  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
489  * @adapter: board private structure to initialize
490  *
491  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
492  * and VM pools where appropriate.  If RSS is available, then also try and
493  * enable RSS and map accordingly.
494  *
495  **/
ixgbe_set_sriov_queues(struct ixgbe_adapter * adapter)496 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
497 {
498 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
499 	u16 vmdq_m = 0;
500 	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
501 	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
502 #ifdef IXGBE_FCOE
503 	u16 fcoe_i = 0;
504 #endif
505 
506 	/* only proceed if SR-IOV is enabled */
507 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
508 		return false;
509 
510 	/* limit l2fwd RSS based on total Tx queue limit */
511 	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
512 
513 	/* Add starting offset to total pool count */
514 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
515 
516 	/* double check we are limited to maximum pools */
517 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
518 
519 	/* 64 pool mode with 2 queues per pool */
520 	if (vmdq_i > 32) {
521 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
522 		rss_m = IXGBE_RSS_2Q_MASK;
523 		rss_i = min_t(u16, rss_i, 2);
524 	/* 32 pool mode with up to 4 queues per pool */
525 	} else {
526 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
527 		rss_m = IXGBE_RSS_4Q_MASK;
528 		/* We can support 4, 2, or 1 queues */
529 		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
530 	}
531 
532 #ifdef IXGBE_FCOE
533 	/* queues in the remaining pools are available for FCoE */
534 	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
535 
536 #endif
537 	/* remove the starting offset from the pool count */
538 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
539 
540 	/* save features for later use */
541 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
542 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
543 
544 	/* limit RSS based on user input and save for later use */
545 	adapter->ring_feature[RING_F_RSS].indices = rss_i;
546 	adapter->ring_feature[RING_F_RSS].mask = rss_m;
547 
548 	adapter->num_rx_pools = vmdq_i;
549 	adapter->num_rx_queues_per_pool = rss_i;
550 
551 	adapter->num_rx_queues = vmdq_i * rss_i;
552 	adapter->num_tx_queues = vmdq_i * rss_i;
553 	adapter->num_xdp_queues = 0;
554 
555 	/* disable ATR as it is not supported when VMDq is enabled */
556 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
557 
558 #ifdef IXGBE_FCOE
559 	/*
560 	 * FCoE can use rings from adjacent buffers to allow RSS
561 	 * like behavior.  To account for this we need to add the
562 	 * FCoE indices to the total ring count.
563 	 */
564 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
565 		struct ixgbe_ring_feature *fcoe;
566 
567 		fcoe = &adapter->ring_feature[RING_F_FCOE];
568 
569 		/* limit ourselves based on feature limits */
570 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
571 
572 		if (vmdq_i > 1 && fcoe_i) {
573 			/* alloc queues for FCoE separately */
574 			fcoe->indices = fcoe_i;
575 			fcoe->offset = vmdq_i * rss_i;
576 		} else {
577 			/* merge FCoE queues with RSS queues */
578 			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
579 
580 			/* limit indices to rss_i if MSI-X is disabled */
581 			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
582 				fcoe_i = rss_i;
583 
584 			/* attempt to reserve some queues for just FCoE */
585 			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
586 			fcoe->offset = fcoe_i - fcoe->indices;
587 
588 			fcoe_i -= rss_i;
589 		}
590 
591 		/* add queues to adapter */
592 		adapter->num_tx_queues += fcoe_i;
593 		adapter->num_rx_queues += fcoe_i;
594 	}
595 
596 #endif
597 	/* To support macvlan offload we have to use num_tc to
598 	 * restrict the queues that can be used by the device.
599 	 * By doing this we can avoid reporting a false number of
600 	 * queues.
601 	 */
602 	if (vmdq_i > 1)
603 		netdev_set_num_tc(adapter->netdev, 1);
604 
605 	/* populate TC0 for use by pool 0 */
606 	netdev_set_tc_queue(adapter->netdev, 0,
607 			    adapter->num_rx_queues_per_pool, 0);
608 
609 	return true;
610 }
611 
612 /**
613  * ixgbe_set_rss_queues - Allocate queues for RSS
614  * @adapter: board private structure to initialize
615  *
616  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
617  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
618  *
619  **/
ixgbe_set_rss_queues(struct ixgbe_adapter * adapter)620 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
621 {
622 	struct ixgbe_hw *hw = &adapter->hw;
623 	struct ixgbe_ring_feature *f;
624 	u16 rss_i;
625 
626 	/* set mask for 16 queue limit of RSS */
627 	f = &adapter->ring_feature[RING_F_RSS];
628 	rss_i = f->limit;
629 
630 	f->indices = rss_i;
631 
632 	if (hw->mac.type < ixgbe_mac_X550)
633 		f->mask = IXGBE_RSS_16Q_MASK;
634 	else
635 		f->mask = IXGBE_RSS_64Q_MASK;
636 
637 	/* disable ATR by default, it will be configured below */
638 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
639 
640 	/*
641 	 * Use Flow Director in addition to RSS to ensure the best
642 	 * distribution of flows across cores, even when an FDIR flow
643 	 * isn't matched.
644 	 */
645 	if (rss_i > 1 && adapter->atr_sample_rate) {
646 		f = &adapter->ring_feature[RING_F_FDIR];
647 
648 		rss_i = f->indices = f->limit;
649 
650 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
651 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
652 	}
653 
654 #ifdef IXGBE_FCOE
655 	/*
656 	 * FCoE can exist on the same rings as standard network traffic
657 	 * however it is preferred to avoid that if possible.  In order
658 	 * to get the best performance we allocate as many FCoE queues
659 	 * as we can and we place them at the end of the ring array to
660 	 * avoid sharing queues with standard RSS on systems with 24 or
661 	 * more CPUs.
662 	 */
663 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
664 		struct net_device *dev = adapter->netdev;
665 		u16 fcoe_i;
666 
667 		f = &adapter->ring_feature[RING_F_FCOE];
668 
669 		/* merge FCoE queues with RSS queues */
670 		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
671 		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
672 
673 		/* limit indices to rss_i if MSI-X is disabled */
674 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
675 			fcoe_i = rss_i;
676 
677 		/* attempt to reserve some queues for just FCoE */
678 		f->indices = min_t(u16, fcoe_i, f->limit);
679 		f->offset = fcoe_i - f->indices;
680 		rss_i = max_t(u16, fcoe_i, rss_i);
681 	}
682 
683 #endif /* IXGBE_FCOE */
684 	adapter->num_rx_queues = rss_i;
685 	adapter->num_tx_queues = rss_i;
686 	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
687 
688 	return true;
689 }
690 
691 /**
692  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
693  * @adapter: board private structure to initialize
694  *
695  * This is the top level queue allocation routine.  The order here is very
696  * important, starting with the "most" number of features turned on at once,
697  * and ending with the smallest set of features.  This way large combinations
698  * can be allocated if they're turned on, and smaller combinations are the
699  * fallthrough conditions.
700  *
701  **/
ixgbe_set_num_queues(struct ixgbe_adapter * adapter)702 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
703 {
704 	/* Start with base case */
705 	adapter->num_rx_queues = 1;
706 	adapter->num_tx_queues = 1;
707 	adapter->num_xdp_queues = 0;
708 	adapter->num_rx_pools = 1;
709 	adapter->num_rx_queues_per_pool = 1;
710 
711 #ifdef CONFIG_IXGBE_DCB
712 	if (ixgbe_set_dcb_sriov_queues(adapter))
713 		return;
714 
715 	if (ixgbe_set_dcb_queues(adapter))
716 		return;
717 
718 #endif
719 	if (ixgbe_set_sriov_queues(adapter))
720 		return;
721 
722 	ixgbe_set_rss_queues(adapter);
723 }
724 
725 /**
726  * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
727  * @adapter: board private structure
728  *
729  * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
730  * return a negative error code if unable to acquire MSI-X vectors for any
731  * reason.
732  */
ixgbe_acquire_msix_vectors(struct ixgbe_adapter * adapter)733 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
734 {
735 	struct ixgbe_hw *hw = &adapter->hw;
736 	int i, vectors, vector_threshold;
737 
738 	/* We start by asking for one vector per queue pair with XDP queues
739 	 * being stacked with TX queues.
740 	 */
741 	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
742 	vectors = max(vectors, adapter->num_xdp_queues);
743 
744 	/* It is easy to be greedy for MSI-X vectors. However, it really
745 	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
746 	 * be somewhat conservative and only ask for (roughly) the same number
747 	 * of vectors as there are CPUs.
748 	 */
749 	vectors = min_t(int, vectors, num_online_cpus());
750 
751 	/* Some vectors are necessary for non-queue interrupts */
752 	vectors += NON_Q_VECTORS;
753 
754 	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
755 	 * With features such as RSS and VMDq, we can easily surpass the
756 	 * number of Rx and Tx descriptor queues supported by our device.
757 	 * Thus, we cap the maximum in the rare cases where the CPU count also
758 	 * exceeds our vector limit
759 	 */
760 	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
761 
762 	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
763 	 * handler, and (2) an Other (Link Status Change, etc.) handler.
764 	 */
765 	vector_threshold = MIN_MSIX_COUNT;
766 
767 	adapter->msix_entries = kcalloc(vectors,
768 					sizeof(struct msix_entry),
769 					GFP_KERNEL);
770 	if (!adapter->msix_entries)
771 		return -ENOMEM;
772 
773 	for (i = 0; i < vectors; i++)
774 		adapter->msix_entries[i].entry = i;
775 
776 	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
777 					vector_threshold, vectors);
778 
779 	if (vectors < 0) {
780 		/* A negative count of allocated vectors indicates an error in
781 		 * acquiring within the specified range of MSI-X vectors
782 		 */
783 		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
784 			   vectors);
785 
786 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
787 		kfree(adapter->msix_entries);
788 		adapter->msix_entries = NULL;
789 
790 		return vectors;
791 	}
792 
793 	/* we successfully allocated some number of vectors within our
794 	 * requested range.
795 	 */
796 	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
797 
798 	/* Adjust for only the vectors we'll use, which is minimum
799 	 * of max_q_vectors, or the number of vectors we were allocated.
800 	 */
801 	vectors -= NON_Q_VECTORS;
802 	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
803 
804 	return 0;
805 }
806 
ixgbe_add_ring(struct ixgbe_ring * ring,struct ixgbe_ring_container * head)807 static void ixgbe_add_ring(struct ixgbe_ring *ring,
808 			   struct ixgbe_ring_container *head)
809 {
810 	ring->next = head->ring;
811 	head->ring = ring;
812 	head->count++;
813 	head->next_update = jiffies + 1;
814 }
815 
816 /**
817  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
818  * @adapter: board private structure to initialize
819  * @v_count: q_vectors allocated on adapter, used for ring interleaving
820  * @v_idx: index of vector in adapter struct
821  * @txr_count: total number of Tx rings to allocate
822  * @txr_idx: index of first Tx ring to allocate
823  * @xdp_count: total number of XDP rings to allocate
824  * @xdp_idx: index of first XDP ring to allocate
825  * @rxr_count: total number of Rx rings to allocate
826  * @rxr_idx: index of first Rx ring to allocate
827  *
828  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
829  **/
ixgbe_alloc_q_vector(struct ixgbe_adapter * adapter,int v_count,int v_idx,int txr_count,int txr_idx,int xdp_count,int xdp_idx,int rxr_count,int rxr_idx)830 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
831 				int v_count, int v_idx,
832 				int txr_count, int txr_idx,
833 				int xdp_count, int xdp_idx,
834 				int rxr_count, int rxr_idx)
835 {
836 	int node = dev_to_node(&adapter->pdev->dev);
837 	struct ixgbe_q_vector *q_vector;
838 	struct ixgbe_ring *ring;
839 	int cpu = -1;
840 	int ring_count;
841 	u8 tcs = adapter->hw_tcs;
842 
843 	ring_count = txr_count + rxr_count + xdp_count;
844 
845 	/* customize cpu for Flow Director mapping */
846 	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
847 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
848 		if (rss_i > 1 && adapter->atr_sample_rate) {
849 			cpu = cpumask_local_spread(v_idx, node);
850 			node = cpu_to_node(cpu);
851 		}
852 	}
853 
854 	/* allocate q_vector and rings */
855 	q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
856 				GFP_KERNEL, node);
857 	if (!q_vector)
858 		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
859 				   GFP_KERNEL);
860 	if (!q_vector)
861 		return -ENOMEM;
862 
863 	/* setup affinity mask and node */
864 	if (cpu != -1)
865 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
866 	q_vector->numa_node = node;
867 
868 #ifdef CONFIG_IXGBE_DCA
869 	/* initialize CPU for DCA */
870 	q_vector->cpu = -1;
871 
872 #endif
873 	/* initialize NAPI */
874 	netif_napi_add(adapter->netdev, &q_vector->napi,
875 		       ixgbe_poll, 64);
876 
877 	/* tie q_vector and adapter together */
878 	adapter->q_vector[v_idx] = q_vector;
879 	q_vector->adapter = adapter;
880 	q_vector->v_idx = v_idx;
881 
882 	/* initialize work limits */
883 	q_vector->tx.work_limit = adapter->tx_work_limit;
884 
885 	/* Initialize setting for adaptive ITR */
886 	q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
887 			   IXGBE_ITR_ADAPTIVE_LATENCY;
888 	q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
889 			   IXGBE_ITR_ADAPTIVE_LATENCY;
890 
891 	/* intialize ITR */
892 	if (txr_count && !rxr_count) {
893 		/* tx only vector */
894 		if (adapter->tx_itr_setting == 1)
895 			q_vector->itr = IXGBE_12K_ITR;
896 		else
897 			q_vector->itr = adapter->tx_itr_setting;
898 	} else {
899 		/* rx or rx/tx vector */
900 		if (adapter->rx_itr_setting == 1)
901 			q_vector->itr = IXGBE_20K_ITR;
902 		else
903 			q_vector->itr = adapter->rx_itr_setting;
904 	}
905 
906 	/* initialize pointer to rings */
907 	ring = q_vector->ring;
908 
909 	while (txr_count) {
910 		/* assign generic ring traits */
911 		ring->dev = &adapter->pdev->dev;
912 		ring->netdev = adapter->netdev;
913 
914 		/* configure backlink on ring */
915 		ring->q_vector = q_vector;
916 
917 		/* update q_vector Tx values */
918 		ixgbe_add_ring(ring, &q_vector->tx);
919 
920 		/* apply Tx specific ring traits */
921 		ring->count = adapter->tx_ring_count;
922 		ring->queue_index = txr_idx;
923 
924 		/* assign ring to adapter */
925 		WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
926 
927 		/* update count and index */
928 		txr_count--;
929 		txr_idx += v_count;
930 
931 		/* push pointer to next ring */
932 		ring++;
933 	}
934 
935 	while (xdp_count) {
936 		/* assign generic ring traits */
937 		ring->dev = &adapter->pdev->dev;
938 		ring->netdev = adapter->netdev;
939 
940 		/* configure backlink on ring */
941 		ring->q_vector = q_vector;
942 
943 		/* update q_vector Tx values */
944 		ixgbe_add_ring(ring, &q_vector->tx);
945 
946 		/* apply Tx specific ring traits */
947 		ring->count = adapter->tx_ring_count;
948 		ring->queue_index = xdp_idx;
949 		set_ring_xdp(ring);
950 
951 		/* assign ring to adapter */
952 		WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
953 
954 		/* update count and index */
955 		xdp_count--;
956 		xdp_idx++;
957 
958 		/* push pointer to next ring */
959 		ring++;
960 	}
961 
962 	while (rxr_count) {
963 		/* assign generic ring traits */
964 		ring->dev = &adapter->pdev->dev;
965 		ring->netdev = adapter->netdev;
966 
967 		/* configure backlink on ring */
968 		ring->q_vector = q_vector;
969 
970 		/* update q_vector Rx values */
971 		ixgbe_add_ring(ring, &q_vector->rx);
972 
973 		/*
974 		 * 82599 errata, UDP frames with a 0 checksum
975 		 * can be marked as checksum errors.
976 		 */
977 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
978 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
979 
980 #ifdef IXGBE_FCOE
981 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
982 			struct ixgbe_ring_feature *f;
983 			f = &adapter->ring_feature[RING_F_FCOE];
984 			if ((rxr_idx >= f->offset) &&
985 			    (rxr_idx < f->offset + f->indices))
986 				set_bit(__IXGBE_RX_FCOE, &ring->state);
987 		}
988 
989 #endif /* IXGBE_FCOE */
990 		/* apply Rx specific ring traits */
991 		ring->count = adapter->rx_ring_count;
992 		ring->queue_index = rxr_idx;
993 
994 		/* assign ring to adapter */
995 		WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
996 
997 		/* update count and index */
998 		rxr_count--;
999 		rxr_idx += v_count;
1000 
1001 		/* push pointer to next ring */
1002 		ring++;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 /**
1009  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
1010  * @adapter: board private structure to initialize
1011  * @v_idx: Index of vector to be freed
1012  *
1013  * This function frees the memory allocated to the q_vector.  In addition if
1014  * NAPI is enabled it will delete any references to the NAPI struct prior
1015  * to freeing the q_vector.
1016  **/
ixgbe_free_q_vector(struct ixgbe_adapter * adapter,int v_idx)1017 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
1018 {
1019 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
1020 	struct ixgbe_ring *ring;
1021 
1022 	ixgbe_for_each_ring(ring, q_vector->tx) {
1023 		if (ring_is_xdp(ring))
1024 			WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
1025 		else
1026 			WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
1027 	}
1028 
1029 	ixgbe_for_each_ring(ring, q_vector->rx)
1030 		WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
1031 
1032 	adapter->q_vector[v_idx] = NULL;
1033 	__netif_napi_del(&q_vector->napi);
1034 
1035 	/*
1036 	 * after a call to __netif_napi_del() napi may still be used and
1037 	 * ixgbe_get_stats64() might access the rings on this vector,
1038 	 * we must wait a grace period before freeing it.
1039 	 */
1040 	kfree_rcu(q_vector, rcu);
1041 }
1042 
1043 /**
1044  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
1045  * @adapter: board private structure to initialize
1046  *
1047  * We allocate one q_vector per queue interrupt.  If allocation fails we
1048  * return -ENOMEM.
1049  **/
ixgbe_alloc_q_vectors(struct ixgbe_adapter * adapter)1050 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1051 {
1052 	int q_vectors = adapter->num_q_vectors;
1053 	int rxr_remaining = adapter->num_rx_queues;
1054 	int txr_remaining = adapter->num_tx_queues;
1055 	int xdp_remaining = adapter->num_xdp_queues;
1056 	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1057 	int err, i;
1058 
1059 	/* only one q_vector if MSI-X is disabled. */
1060 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1061 		q_vectors = 1;
1062 
1063 	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1064 		for (; rxr_remaining; v_idx++) {
1065 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1066 						   0, 0, 0, 0, 1, rxr_idx);
1067 
1068 			if (err)
1069 				goto err_out;
1070 
1071 			/* update counts and index */
1072 			rxr_remaining--;
1073 			rxr_idx++;
1074 		}
1075 	}
1076 
1077 	for (; v_idx < q_vectors; v_idx++) {
1078 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1079 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1080 		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
1081 
1082 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1083 					   tqpv, txr_idx,
1084 					   xqpv, xdp_idx,
1085 					   rqpv, rxr_idx);
1086 
1087 		if (err)
1088 			goto err_out;
1089 
1090 		/* update counts and index */
1091 		rxr_remaining -= rqpv;
1092 		txr_remaining -= tqpv;
1093 		xdp_remaining -= xqpv;
1094 		rxr_idx++;
1095 		txr_idx++;
1096 		xdp_idx += xqpv;
1097 	}
1098 
1099 	for (i = 0; i < adapter->num_rx_queues; i++) {
1100 		if (adapter->rx_ring[i])
1101 			adapter->rx_ring[i]->ring_idx = i;
1102 	}
1103 
1104 	for (i = 0; i < adapter->num_tx_queues; i++) {
1105 		if (adapter->tx_ring[i])
1106 			adapter->tx_ring[i]->ring_idx = i;
1107 	}
1108 
1109 	for (i = 0; i < adapter->num_xdp_queues; i++) {
1110 		if (adapter->xdp_ring[i])
1111 			adapter->xdp_ring[i]->ring_idx = i;
1112 	}
1113 
1114 	return 0;
1115 
1116 err_out:
1117 	adapter->num_tx_queues = 0;
1118 	adapter->num_xdp_queues = 0;
1119 	adapter->num_rx_queues = 0;
1120 	adapter->num_q_vectors = 0;
1121 
1122 	while (v_idx--)
1123 		ixgbe_free_q_vector(adapter, v_idx);
1124 
1125 	return -ENOMEM;
1126 }
1127 
1128 /**
1129  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1130  * @adapter: board private structure to initialize
1131  *
1132  * This function frees the memory allocated to the q_vectors.  In addition if
1133  * NAPI is enabled it will delete any references to the NAPI struct prior
1134  * to freeing the q_vector.
1135  **/
ixgbe_free_q_vectors(struct ixgbe_adapter * adapter)1136 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1137 {
1138 	int v_idx = adapter->num_q_vectors;
1139 
1140 	adapter->num_tx_queues = 0;
1141 	adapter->num_xdp_queues = 0;
1142 	adapter->num_rx_queues = 0;
1143 	adapter->num_q_vectors = 0;
1144 
1145 	while (v_idx--)
1146 		ixgbe_free_q_vector(adapter, v_idx);
1147 }
1148 
ixgbe_reset_interrupt_capability(struct ixgbe_adapter * adapter)1149 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1150 {
1151 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1152 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1153 		pci_disable_msix(adapter->pdev);
1154 		kfree(adapter->msix_entries);
1155 		adapter->msix_entries = NULL;
1156 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1157 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1158 		pci_disable_msi(adapter->pdev);
1159 	}
1160 }
1161 
1162 /**
1163  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1164  * @adapter: board private structure to initialize
1165  *
1166  * Attempt to configure the interrupts using the best available
1167  * capabilities of the hardware and the kernel.
1168  **/
ixgbe_set_interrupt_capability(struct ixgbe_adapter * adapter)1169 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1170 {
1171 	int err;
1172 
1173 	/* We will try to get MSI-X interrupts first */
1174 	if (!ixgbe_acquire_msix_vectors(adapter))
1175 		return;
1176 
1177 	/* At this point, we do not have MSI-X capabilities. We need to
1178 	 * reconfigure or disable various features which require MSI-X
1179 	 * capability.
1180 	 */
1181 
1182 	/* Disable DCB unless we only have a single traffic class */
1183 	if (adapter->hw_tcs > 1) {
1184 		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1185 		netdev_reset_tc(adapter->netdev);
1186 
1187 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1188 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1189 
1190 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1191 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1192 		adapter->dcb_cfg.pfc_mode_enable = false;
1193 	}
1194 
1195 	adapter->hw_tcs = 0;
1196 	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1197 	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1198 
1199 	/* Disable SR-IOV support */
1200 	e_dev_warn("Disabling SR-IOV support\n");
1201 	ixgbe_disable_sriov(adapter);
1202 
1203 	/* Disable RSS */
1204 	e_dev_warn("Disabling RSS support\n");
1205 	adapter->ring_feature[RING_F_RSS].limit = 1;
1206 
1207 	/* recalculate number of queues now that many features have been
1208 	 * changed or disabled.
1209 	 */
1210 	ixgbe_set_num_queues(adapter);
1211 	adapter->num_q_vectors = 1;
1212 
1213 	err = pci_enable_msi(adapter->pdev);
1214 	if (err)
1215 		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1216 			   err);
1217 	else
1218 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1219 }
1220 
1221 /**
1222  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1223  * @adapter: board private structure to initialize
1224  *
1225  * We determine which interrupt scheme to use based on...
1226  * - Kernel support (MSI, MSI-X)
1227  *   - which can be user-defined (via MODULE_PARAM)
1228  * - Hardware queue count (num_*_queues)
1229  *   - defined by miscellaneous hardware support/features (RSS, etc.)
1230  **/
ixgbe_init_interrupt_scheme(struct ixgbe_adapter * adapter)1231 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1232 {
1233 	int err;
1234 
1235 	/* Number of supported queues */
1236 	ixgbe_set_num_queues(adapter);
1237 
1238 	/* Set interrupt mode */
1239 	ixgbe_set_interrupt_capability(adapter);
1240 
1241 	err = ixgbe_alloc_q_vectors(adapter);
1242 	if (err) {
1243 		e_dev_err("Unable to allocate memory for queue vectors\n");
1244 		goto err_alloc_q_vectors;
1245 	}
1246 
1247 	ixgbe_cache_ring_register(adapter);
1248 
1249 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1250 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1251 		   adapter->num_rx_queues, adapter->num_tx_queues,
1252 		   adapter->num_xdp_queues);
1253 
1254 	set_bit(__IXGBE_DOWN, &adapter->state);
1255 
1256 	return 0;
1257 
1258 err_alloc_q_vectors:
1259 	ixgbe_reset_interrupt_capability(adapter);
1260 	return err;
1261 }
1262 
1263 /**
1264  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1265  * @adapter: board private structure to clear interrupt scheme on
1266  *
1267  * We go through and clear interrupt specific resources and reset the structure
1268  * to pre-load conditions
1269  **/
ixgbe_clear_interrupt_scheme(struct ixgbe_adapter * adapter)1270 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1271 {
1272 	adapter->num_tx_queues = 0;
1273 	adapter->num_xdp_queues = 0;
1274 	adapter->num_rx_queues = 0;
1275 
1276 	ixgbe_free_q_vectors(adapter);
1277 	ixgbe_reset_interrupt_capability(adapter);
1278 }
1279 
ixgbe_tx_ctxtdesc(struct ixgbe_ring * tx_ring,u32 vlan_macip_lens,u32 fceof_saidx,u32 type_tucmd,u32 mss_l4len_idx)1280 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1281 		       u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1282 {
1283 	struct ixgbe_adv_tx_context_desc *context_desc;
1284 	u16 i = tx_ring->next_to_use;
1285 
1286 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1287 
1288 	i++;
1289 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1290 
1291 	/* set bits to identify this as an advanced context descriptor */
1292 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1293 
1294 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
1295 	context_desc->fceof_saidx	= cpu_to_le32(fceof_saidx);
1296 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
1297 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
1298 }
1299 
1300