xref: /linux/drivers/net/ethernet/intel/ice/ice_base.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include <net/xdp_sock_drv.h>
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_dcb_lib.h"
8 #include "ice_sriov.h"
9 
10 /**
11  * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
12  * @qs_cfg: gathered variables needed for PF->VSI queues assignment
13  *
14  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
15  */
16 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
17 {
18 	unsigned int offset, i;
19 
20 	mutex_lock(qs_cfg->qs_mutex);
21 	offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
22 					    0, qs_cfg->q_count, 0);
23 	if (offset >= qs_cfg->pf_map_size) {
24 		mutex_unlock(qs_cfg->qs_mutex);
25 		return -ENOMEM;
26 	}
27 
28 	bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
29 	for (i = 0; i < qs_cfg->q_count; i++)
30 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
31 	mutex_unlock(qs_cfg->qs_mutex);
32 
33 	return 0;
34 }
35 
36 /**
37  * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
38  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
39  *
40  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
41  */
42 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
43 {
44 	unsigned int i, index = 0;
45 
46 	mutex_lock(qs_cfg->qs_mutex);
47 	for (i = 0; i < qs_cfg->q_count; i++) {
48 		index = find_next_zero_bit(qs_cfg->pf_map,
49 					   qs_cfg->pf_map_size, index);
50 		if (index >= qs_cfg->pf_map_size)
51 			goto err_scatter;
52 		set_bit(index, qs_cfg->pf_map);
53 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
54 	}
55 	mutex_unlock(qs_cfg->qs_mutex);
56 
57 	return 0;
58 err_scatter:
59 	for (index = 0; index < i; index++) {
60 		clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
61 		qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
62 	}
63 	mutex_unlock(qs_cfg->qs_mutex);
64 
65 	return -ENOMEM;
66 }
67 
68 /**
69  * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
70  * @pf: the PF being configured
71  * @pf_q: the PF queue
72  * @ena: enable or disable state of the queue
73  *
74  * This routine will wait for the given Rx queue of the PF to reach the
75  * enabled or disabled state.
76  * Returns -ETIMEDOUT in case of failing to reach the requested state after
77  * multiple retries; else will return 0 in case of success.
78  */
79 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
80 {
81 	int i;
82 
83 	for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
84 		if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
85 			      QRX_CTRL_QENA_STAT_M))
86 			return 0;
87 
88 		usleep_range(20, 40);
89 	}
90 
91 	return -ETIMEDOUT;
92 }
93 
94 /**
95  * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
96  * @vsi: the VSI being configured
97  * @v_idx: index of the vector in the VSI struct
98  *
99  * We allocate one q_vector and set default value for ITR setting associated
100  * with this q_vector. If allocation fails we return -ENOMEM.
101  */
102 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
103 {
104 	struct ice_pf *pf = vsi->back;
105 	struct ice_q_vector *q_vector;
106 
107 	/* allocate q_vector */
108 	q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
109 				GFP_KERNEL);
110 	if (!q_vector)
111 		return -ENOMEM;
112 
113 	q_vector->vsi = vsi;
114 	q_vector->v_idx = v_idx;
115 	q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
116 	q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
117 	q_vector->tx.itr_mode = ITR_DYNAMIC;
118 	q_vector->rx.itr_mode = ITR_DYNAMIC;
119 	q_vector->tx.type = ICE_TX_CONTAINER;
120 	q_vector->rx.type = ICE_RX_CONTAINER;
121 
122 	if (vsi->type == ICE_VSI_VF)
123 		goto out;
124 	/* only set affinity_mask if the CPU is online */
125 	if (cpu_online(v_idx))
126 		cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
127 
128 	/* This will not be called in the driver load path because the netdev
129 	 * will not be created yet. All other cases with register the NAPI
130 	 * handler here (i.e. resume, reset/rebuild, etc.)
131 	 */
132 	if (vsi->netdev)
133 		netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
134 
135 out:
136 	/* tie q_vector and VSI together */
137 	vsi->q_vectors[v_idx] = q_vector;
138 
139 	return 0;
140 }
141 
142 /**
143  * ice_free_q_vector - Free memory allocated for a specific interrupt vector
144  * @vsi: VSI having the memory freed
145  * @v_idx: index of the vector to be freed
146  */
147 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
148 {
149 	struct ice_q_vector *q_vector;
150 	struct ice_pf *pf = vsi->back;
151 	struct ice_tx_ring *tx_ring;
152 	struct ice_rx_ring *rx_ring;
153 	struct device *dev;
154 
155 	dev = ice_pf_to_dev(pf);
156 	if (!vsi->q_vectors[v_idx]) {
157 		dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
158 		return;
159 	}
160 	q_vector = vsi->q_vectors[v_idx];
161 
162 	ice_for_each_tx_ring(tx_ring, q_vector->tx)
163 		tx_ring->q_vector = NULL;
164 	ice_for_each_rx_ring(rx_ring, q_vector->rx)
165 		rx_ring->q_vector = NULL;
166 
167 	/* only VSI with an associated netdev is set up with NAPI */
168 	if (vsi->netdev)
169 		netif_napi_del(&q_vector->napi);
170 
171 	devm_kfree(dev, q_vector);
172 	vsi->q_vectors[v_idx] = NULL;
173 }
174 
175 /**
176  * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
177  * @hw: board specific structure
178  */
179 static void ice_cfg_itr_gran(struct ice_hw *hw)
180 {
181 	u32 regval = rd32(hw, GLINT_CTL);
182 
183 	/* no need to update global register if ITR gran is already set */
184 	if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
185 	    (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
186 	     GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
187 	    (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
188 	     GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
189 	    (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
190 	     GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
191 	    (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
192 	      GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
193 		return;
194 
195 	regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
196 		  GLINT_CTL_ITR_GRAN_200_M) |
197 		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
198 		  GLINT_CTL_ITR_GRAN_100_M) |
199 		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
200 		  GLINT_CTL_ITR_GRAN_50_M) |
201 		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
202 		  GLINT_CTL_ITR_GRAN_25_M);
203 	wr32(hw, GLINT_CTL, regval);
204 }
205 
206 /**
207  * ice_calc_txq_handle - calculate the queue handle
208  * @vsi: VSI that ring belongs to
209  * @ring: ring to get the absolute queue index
210  * @tc: traffic class number
211  */
212 static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
213 {
214 	WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
215 
216 	if (ring->ch)
217 		return ring->q_index - ring->ch->base_q;
218 
219 	/* Idea here for calculation is that we subtract the number of queue
220 	 * count from TC that ring belongs to from it's absolute queue index
221 	 * and as a result we get the queue's index within TC.
222 	 */
223 	return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
224 }
225 
226 /**
227  * ice_eswitch_calc_txq_handle
228  * @ring: pointer to ring which unique index is needed
229  *
230  * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
231  * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
232  * here by finding index in vsi->tx_rings of this ring.
233  *
234  * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
235  * because VSI is get from ring->vsi, so it has to be present in this VSI.
236  */
237 static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
238 {
239 	struct ice_vsi *vsi = ring->vsi;
240 	int i;
241 
242 	ice_for_each_txq(vsi, i) {
243 		if (vsi->tx_rings[i] == ring)
244 			return i;
245 	}
246 
247 	return ICE_INVAL_Q_INDEX;
248 }
249 
250 /**
251  * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
252  * @ring: The Tx ring to configure
253  *
254  * This enables/disables XPS for a given Tx descriptor ring
255  * based on the TCs enabled for the VSI that ring belongs to.
256  */
257 static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
258 {
259 	if (!ring->q_vector || !ring->netdev)
260 		return;
261 
262 	/* We only initialize XPS once, so as not to overwrite user settings */
263 	if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
264 		return;
265 
266 	netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
267 			    ring->q_index);
268 }
269 
270 /**
271  * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
272  * @ring: The Tx ring to configure
273  * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
274  * @pf_q: queue index in the PF space
275  *
276  * Configure the Tx descriptor ring in TLAN context.
277  */
278 static void
279 ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
280 {
281 	struct ice_vsi *vsi = ring->vsi;
282 	struct ice_hw *hw = &vsi->back->hw;
283 
284 	tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
285 
286 	tlan_ctx->port_num = vsi->port_info->lport;
287 
288 	/* Transmit Queue Length */
289 	tlan_ctx->qlen = ring->count;
290 
291 	ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
292 
293 	/* PF number */
294 	tlan_ctx->pf_num = hw->pf_id;
295 
296 	/* queue belongs to a specific VSI type
297 	 * VF / VM index should be programmed per vmvf_type setting:
298 	 * for vmvf_type = VF, it is VF number between 0-256
299 	 * for vmvf_type = VM, it is VM number between 0-767
300 	 * for PF or EMP this field should be set to zero
301 	 */
302 	switch (vsi->type) {
303 	case ICE_VSI_LB:
304 	case ICE_VSI_CTRL:
305 	case ICE_VSI_PF:
306 		if (ring->ch)
307 			tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
308 		else
309 			tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
310 		break;
311 	case ICE_VSI_VF:
312 		/* Firmware expects vmvf_num to be absolute VF ID */
313 		tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
314 		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
315 		break;
316 	case ICE_VSI_SWITCHDEV_CTRL:
317 		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
318 		break;
319 	default:
320 		return;
321 	}
322 
323 	/* make sure the context is associated with the right VSI */
324 	if (ring->ch)
325 		tlan_ctx->src_vsi = ring->ch->vsi_num;
326 	else
327 		tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
328 
329 	/* Restrict Tx timestamps to the PF VSI */
330 	switch (vsi->type) {
331 	case ICE_VSI_PF:
332 		tlan_ctx->tsyn_ena = 1;
333 		break;
334 	default:
335 		break;
336 	}
337 
338 	tlan_ctx->tso_ena = ICE_TX_LEGACY;
339 	tlan_ctx->tso_qnum = pf_q;
340 
341 	/* Legacy or Advanced Host Interface:
342 	 * 0: Advanced Host Interface
343 	 * 1: Legacy Host Interface
344 	 */
345 	tlan_ctx->legacy_int = ICE_TX_LEGACY;
346 }
347 
348 /**
349  * ice_rx_offset - Return expected offset into page to access data
350  * @rx_ring: Ring we are requesting offset of
351  *
352  * Returns the offset value for ring into the data buffer.
353  */
354 static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
355 {
356 	if (ice_ring_uses_build_skb(rx_ring))
357 		return ICE_SKB_PAD;
358 	else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
359 		return XDP_PACKET_HEADROOM;
360 
361 	return 0;
362 }
363 
364 /**
365  * ice_setup_rx_ctx - Configure a receive ring context
366  * @ring: The Rx ring to configure
367  *
368  * Configure the Rx descriptor ring in RLAN context.
369  */
370 static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
371 {
372 	int chain_len = ICE_MAX_CHAINED_RX_BUFS;
373 	struct ice_vsi *vsi = ring->vsi;
374 	u32 rxdid = ICE_RXDID_FLEX_NIC;
375 	struct ice_rlan_ctx rlan_ctx;
376 	struct ice_hw *hw;
377 	u16 pf_q;
378 	int err;
379 
380 	hw = &vsi->back->hw;
381 
382 	/* what is Rx queue number in global space of 2K Rx queues */
383 	pf_q = vsi->rxq_map[ring->q_index];
384 
385 	/* clear the context structure first */
386 	memset(&rlan_ctx, 0, sizeof(rlan_ctx));
387 
388 	/* Receive Queue Base Address.
389 	 * Indicates the starting address of the descriptor queue defined in
390 	 * 128 Byte units.
391 	 */
392 	rlan_ctx.base = ring->dma >> 7;
393 
394 	rlan_ctx.qlen = ring->count;
395 
396 	/* Receive Packet Data Buffer Size.
397 	 * The Packet Data Buffer Size is defined in 128 byte units.
398 	 */
399 	rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
400 
401 	/* use 32 byte descriptors */
402 	rlan_ctx.dsize = 1;
403 
404 	/* Strip the Ethernet CRC bytes before the packet is posted to host
405 	 * memory.
406 	 */
407 	rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
408 
409 	/* L2TSEL flag defines the reported L2 Tags in the receive descriptor
410 	 * and it needs to remain 1 for non-DVM capable configurations to not
411 	 * break backward compatibility for VF drivers. Setting this field to 0
412 	 * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
413 	 * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
414 	 * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
415 	 * check for the tag
416 	 */
417 	if (ice_is_dvm_ena(hw))
418 		if (vsi->type == ICE_VSI_VF &&
419 		    ice_vf_is_port_vlan_ena(vsi->vf))
420 			rlan_ctx.l2tsel = 1;
421 		else
422 			rlan_ctx.l2tsel = 0;
423 	else
424 		rlan_ctx.l2tsel = 1;
425 
426 	rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
427 	rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
428 	rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
429 
430 	/* This controls whether VLAN is stripped from inner headers
431 	 * The VLAN in the inner L2 header is stripped to the receive
432 	 * descriptor if enabled by this flag.
433 	 */
434 	rlan_ctx.showiv = 0;
435 
436 	/* For AF_XDP ZC, we disallow packets to span on
437 	 * multiple buffers, thus letting us skip that
438 	 * handling in the fast-path.
439 	 */
440 	if (ring->xsk_pool)
441 		chain_len = 1;
442 	/* Max packet size for this queue - must not be set to a larger value
443 	 * than 5 x DBUF
444 	 */
445 	rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
446 			       chain_len * ring->rx_buf_len);
447 
448 	/* Rx queue threshold in units of 64 */
449 	rlan_ctx.lrxqthresh = 1;
450 
451 	/* Enable Flexible Descriptors in the queue context which
452 	 * allows this driver to select a specific receive descriptor format
453 	 * increasing context priority to pick up profile ID; default is 0x01;
454 	 * setting to 0x03 to ensure profile is programming if prev context is
455 	 * of same priority
456 	 */
457 	if (vsi->type != ICE_VSI_VF)
458 		ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
459 	else
460 		ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
461 					false);
462 
463 	/* Absolute queue number out of 2K needs to be passed */
464 	err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
465 	if (err) {
466 		dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
467 			pf_q, err);
468 		return -EIO;
469 	}
470 
471 	if (vsi->type == ICE_VSI_VF)
472 		return 0;
473 
474 	/* configure Rx buffer alignment */
475 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
476 		ice_clear_ring_build_skb_ena(ring);
477 	else
478 		ice_set_ring_build_skb_ena(ring);
479 
480 	ring->rx_offset = ice_rx_offset(ring);
481 
482 	/* init queue specific tail register */
483 	ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
484 	writel(0, ring->tail);
485 
486 	return 0;
487 }
488 
489 /**
490  * ice_vsi_cfg_rxq - Configure an Rx queue
491  * @ring: the ring being configured
492  *
493  * Return 0 on success and a negative value on error.
494  */
495 int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
496 {
497 	struct device *dev = ice_pf_to_dev(ring->vsi->back);
498 	u16 num_bufs = ICE_DESC_UNUSED(ring);
499 	int err;
500 
501 	ring->rx_buf_len = ring->vsi->rx_buf_len;
502 
503 	if (ring->vsi->type == ICE_VSI_PF) {
504 		if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
505 			/* coverity[check_return] */
506 			xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
507 					 ring->q_index, ring->q_vector->napi.napi_id);
508 
509 		ring->xsk_pool = ice_xsk_pool(ring);
510 		if (ring->xsk_pool) {
511 			xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
512 
513 			ring->rx_buf_len =
514 				xsk_pool_get_rx_frame_size(ring->xsk_pool);
515 			err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
516 							 MEM_TYPE_XSK_BUFF_POOL,
517 							 NULL);
518 			if (err)
519 				return err;
520 			xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
521 
522 			dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
523 				 ring->q_index);
524 		} else {
525 			if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
526 				/* coverity[check_return] */
527 				xdp_rxq_info_reg(&ring->xdp_rxq,
528 						 ring->netdev,
529 						 ring->q_index, ring->q_vector->napi.napi_id);
530 
531 			err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
532 							 MEM_TYPE_PAGE_SHARED,
533 							 NULL);
534 			if (err)
535 				return err;
536 		}
537 	}
538 
539 	err = ice_setup_rx_ctx(ring);
540 	if (err) {
541 		dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
542 			ring->q_index, err);
543 		return err;
544 	}
545 
546 	if (ring->xsk_pool) {
547 		bool ok;
548 
549 		if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
550 			dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
551 				 num_bufs, ring->q_index);
552 			dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
553 
554 			return 0;
555 		}
556 
557 		ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
558 		if (!ok) {
559 			u16 pf_q = ring->vsi->rxq_map[ring->q_index];
560 
561 			dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
562 				 ring->q_index, pf_q);
563 		}
564 
565 		return 0;
566 	}
567 
568 	ice_alloc_rx_bufs(ring, num_bufs);
569 
570 	return 0;
571 }
572 
573 /**
574  * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
575  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
576  *
577  * This function first tries to find contiguous space. If it is not successful,
578  * it tries with the scatter approach.
579  *
580  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
581  */
582 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
583 {
584 	int ret = 0;
585 
586 	ret = __ice_vsi_get_qs_contig(qs_cfg);
587 	if (ret) {
588 		/* contig failed, so try with scatter approach */
589 		qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
590 		qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
591 					qs_cfg->scatter_count);
592 		ret = __ice_vsi_get_qs_sc(qs_cfg);
593 	}
594 	return ret;
595 }
596 
597 /**
598  * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
599  * @vsi: the VSI being configured
600  * @ena: start or stop the Rx ring
601  * @rxq_idx: 0-based Rx queue index for the VSI passed in
602  * @wait: wait or don't wait for configuration to finish in hardware
603  *
604  * Return 0 on success and negative on error.
605  */
606 int
607 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
608 {
609 	int pf_q = vsi->rxq_map[rxq_idx];
610 	struct ice_pf *pf = vsi->back;
611 	struct ice_hw *hw = &pf->hw;
612 	u32 rx_reg;
613 
614 	rx_reg = rd32(hw, QRX_CTRL(pf_q));
615 
616 	/* Skip if the queue is already in the requested state */
617 	if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
618 		return 0;
619 
620 	/* turn on/off the queue */
621 	if (ena)
622 		rx_reg |= QRX_CTRL_QENA_REQ_M;
623 	else
624 		rx_reg &= ~QRX_CTRL_QENA_REQ_M;
625 	wr32(hw, QRX_CTRL(pf_q), rx_reg);
626 
627 	if (!wait)
628 		return 0;
629 
630 	ice_flush(hw);
631 	return ice_pf_rxq_wait(pf, pf_q, ena);
632 }
633 
634 /**
635  * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
636  * @vsi: the VSI being configured
637  * @ena: true/false to verify Rx ring has been enabled/disabled respectively
638  * @rxq_idx: 0-based Rx queue index for the VSI passed in
639  *
640  * This routine will wait for the given Rx queue of the VSI to reach the
641  * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
642  * the requested state after multiple retries; else will return 0 in case of
643  * success.
644  */
645 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
646 {
647 	int pf_q = vsi->rxq_map[rxq_idx];
648 	struct ice_pf *pf = vsi->back;
649 
650 	return ice_pf_rxq_wait(pf, pf_q, ena);
651 }
652 
653 /**
654  * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
655  * @vsi: the VSI being configured
656  *
657  * We allocate one q_vector per queue interrupt. If allocation fails we
658  * return -ENOMEM.
659  */
660 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
661 {
662 	struct device *dev = ice_pf_to_dev(vsi->back);
663 	u16 v_idx;
664 	int err;
665 
666 	if (vsi->q_vectors[0]) {
667 		dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
668 		return -EEXIST;
669 	}
670 
671 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
672 		err = ice_vsi_alloc_q_vector(vsi, v_idx);
673 		if (err)
674 			goto err_out;
675 	}
676 
677 	return 0;
678 
679 err_out:
680 	while (v_idx--)
681 		ice_free_q_vector(vsi, v_idx);
682 
683 	dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
684 		vsi->num_q_vectors, vsi->vsi_num, err);
685 	vsi->num_q_vectors = 0;
686 	return err;
687 }
688 
689 /**
690  * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
691  * @vsi: the VSI being configured
692  *
693  * This function maps descriptor rings to the queue-specific vectors allotted
694  * through the MSI-X enabling code. On a constrained vector budget, we map Tx
695  * and Rx rings to the vector as "efficiently" as possible.
696  */
697 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
698 {
699 	int q_vectors = vsi->num_q_vectors;
700 	u16 tx_rings_rem, rx_rings_rem;
701 	int v_id;
702 
703 	/* initially assigning remaining rings count to VSIs num queue value */
704 	tx_rings_rem = vsi->num_txq;
705 	rx_rings_rem = vsi->num_rxq;
706 
707 	for (v_id = 0; v_id < q_vectors; v_id++) {
708 		struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
709 		u8 tx_rings_per_v, rx_rings_per_v;
710 		u16 q_id, q_base;
711 
712 		/* Tx rings mapping to vector */
713 		tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
714 						  q_vectors - v_id);
715 		q_vector->num_ring_tx = tx_rings_per_v;
716 		q_vector->tx.tx_ring = NULL;
717 		q_vector->tx.itr_idx = ICE_TX_ITR;
718 		q_base = vsi->num_txq - tx_rings_rem;
719 
720 		for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
721 			struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
722 
723 			tx_ring->q_vector = q_vector;
724 			tx_ring->next = q_vector->tx.tx_ring;
725 			q_vector->tx.tx_ring = tx_ring;
726 		}
727 		tx_rings_rem -= tx_rings_per_v;
728 
729 		/* Rx rings mapping to vector */
730 		rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
731 						  q_vectors - v_id);
732 		q_vector->num_ring_rx = rx_rings_per_v;
733 		q_vector->rx.rx_ring = NULL;
734 		q_vector->rx.itr_idx = ICE_RX_ITR;
735 		q_base = vsi->num_rxq - rx_rings_rem;
736 
737 		for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
738 			struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
739 
740 			rx_ring->q_vector = q_vector;
741 			rx_ring->next = q_vector->rx.rx_ring;
742 			q_vector->rx.rx_ring = rx_ring;
743 		}
744 		rx_rings_rem -= rx_rings_per_v;
745 	}
746 }
747 
748 /**
749  * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
750  * @vsi: the VSI having memory freed
751  */
752 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
753 {
754 	int v_idx;
755 
756 	ice_for_each_q_vector(vsi, v_idx)
757 		ice_free_q_vector(vsi, v_idx);
758 }
759 
760 /**
761  * ice_vsi_cfg_txq - Configure single Tx queue
762  * @vsi: the VSI that queue belongs to
763  * @ring: Tx ring to be configured
764  * @qg_buf: queue group buffer
765  */
766 int
767 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
768 		struct ice_aqc_add_tx_qgrp *qg_buf)
769 {
770 	u8 buf_len = struct_size(qg_buf, txqs, 1);
771 	struct ice_tlan_ctx tlan_ctx = { 0 };
772 	struct ice_aqc_add_txqs_perq *txq;
773 	struct ice_channel *ch = ring->ch;
774 	struct ice_pf *pf = vsi->back;
775 	struct ice_hw *hw = &pf->hw;
776 	int status;
777 	u16 pf_q;
778 	u8 tc;
779 
780 	/* Configure XPS */
781 	ice_cfg_xps_tx_ring(ring);
782 
783 	pf_q = ring->reg_idx;
784 	ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
785 	/* copy context contents into the qg_buf */
786 	qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
787 	ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
788 		    ice_tlan_ctx_info);
789 
790 	/* init queue specific tail reg. It is referred as
791 	 * transmit comm scheduler queue doorbell.
792 	 */
793 	ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
794 
795 	if (IS_ENABLED(CONFIG_DCB))
796 		tc = ring->dcb_tc;
797 	else
798 		tc = 0;
799 
800 	/* Add unique software queue handle of the Tx queue per
801 	 * TC into the VSI Tx ring
802 	 */
803 	if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
804 		ring->q_handle = ice_eswitch_calc_txq_handle(ring);
805 
806 		if (ring->q_handle == ICE_INVAL_Q_INDEX)
807 			return -ENODEV;
808 	} else {
809 		ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
810 	}
811 
812 	if (ch)
813 		status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
814 					 ring->q_handle, 1, qg_buf, buf_len,
815 					 NULL);
816 	else
817 		status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
818 					 ring->q_handle, 1, qg_buf, buf_len,
819 					 NULL);
820 	if (status) {
821 		dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
822 			status);
823 		return status;
824 	}
825 
826 	/* Add Tx Queue TEID into the VSI Tx ring from the
827 	 * response. This will complete configuring and
828 	 * enabling the queue.
829 	 */
830 	txq = &qg_buf->txqs[0];
831 	if (pf_q == le16_to_cpu(txq->txq_id))
832 		ring->txq_teid = le32_to_cpu(txq->q_teid);
833 
834 	return 0;
835 }
836 
837 /**
838  * ice_cfg_itr - configure the initial interrupt throttle values
839  * @hw: pointer to the HW structure
840  * @q_vector: interrupt vector that's being configured
841  *
842  * Configure interrupt throttling values for the ring containers that are
843  * associated with the interrupt vector passed in.
844  */
845 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
846 {
847 	ice_cfg_itr_gran(hw);
848 
849 	if (q_vector->num_ring_rx)
850 		ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
851 
852 	if (q_vector->num_ring_tx)
853 		ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
854 
855 	ice_write_intrl(q_vector, q_vector->intrl);
856 }
857 
858 /**
859  * ice_cfg_txq_interrupt - configure interrupt on Tx queue
860  * @vsi: the VSI being configured
861  * @txq: Tx queue being mapped to MSI-X vector
862  * @msix_idx: MSI-X vector index within the function
863  * @itr_idx: ITR index of the interrupt cause
864  *
865  * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
866  * within the function space.
867  */
868 void
869 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
870 {
871 	struct ice_pf *pf = vsi->back;
872 	struct ice_hw *hw = &pf->hw;
873 	u32 val;
874 
875 	itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
876 
877 	val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
878 	      ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
879 
880 	wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
881 	if (ice_is_xdp_ena_vsi(vsi)) {
882 		u32 xdp_txq = txq + vsi->num_xdp_txq;
883 
884 		wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
885 		     val);
886 	}
887 	ice_flush(hw);
888 }
889 
890 /**
891  * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
892  * @vsi: the VSI being configured
893  * @rxq: Rx queue being mapped to MSI-X vector
894  * @msix_idx: MSI-X vector index within the function
895  * @itr_idx: ITR index of the interrupt cause
896  *
897  * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
898  * within the function space.
899  */
900 void
901 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
902 {
903 	struct ice_pf *pf = vsi->back;
904 	struct ice_hw *hw = &pf->hw;
905 	u32 val;
906 
907 	itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
908 
909 	val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
910 	      ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
911 
912 	wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
913 
914 	ice_flush(hw);
915 }
916 
917 /**
918  * ice_trigger_sw_intr - trigger a software interrupt
919  * @hw: pointer to the HW structure
920  * @q_vector: interrupt vector to trigger the software interrupt for
921  */
922 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
923 {
924 	wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
925 	     (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
926 	     GLINT_DYN_CTL_SWINT_TRIG_M |
927 	     GLINT_DYN_CTL_INTENA_M);
928 }
929 
930 /**
931  * ice_vsi_stop_tx_ring - Disable single Tx ring
932  * @vsi: the VSI being configured
933  * @rst_src: reset source
934  * @rel_vmvf_num: Relative ID of VF/VM
935  * @ring: Tx ring to be stopped
936  * @txq_meta: Meta data of Tx ring to be stopped
937  */
938 int
939 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
940 		     u16 rel_vmvf_num, struct ice_tx_ring *ring,
941 		     struct ice_txq_meta *txq_meta)
942 {
943 	struct ice_pf *pf = vsi->back;
944 	struct ice_q_vector *q_vector;
945 	struct ice_hw *hw = &pf->hw;
946 	int status;
947 	u32 val;
948 
949 	/* clear cause_ena bit for disabled queues */
950 	val = rd32(hw, QINT_TQCTL(ring->reg_idx));
951 	val &= ~QINT_TQCTL_CAUSE_ENA_M;
952 	wr32(hw, QINT_TQCTL(ring->reg_idx), val);
953 
954 	/* software is expected to wait for 100 ns */
955 	ndelay(100);
956 
957 	/* trigger a software interrupt for the vector
958 	 * associated to the queue to schedule NAPI handler
959 	 */
960 	q_vector = ring->q_vector;
961 	if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf)))
962 		ice_trigger_sw_intr(hw, q_vector);
963 
964 	status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
965 				 txq_meta->tc, 1, &txq_meta->q_handle,
966 				 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
967 				 rel_vmvf_num, NULL);
968 
969 	/* if the disable queue command was exercised during an
970 	 * active reset flow, -EBUSY is returned.
971 	 * This is not an error as the reset operation disables
972 	 * queues at the hardware level anyway.
973 	 */
974 	if (status == -EBUSY) {
975 		dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
976 	} else if (status == -ENOENT) {
977 		dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
978 	} else if (status) {
979 		dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
980 			status);
981 		return status;
982 	}
983 
984 	return 0;
985 }
986 
987 /**
988  * ice_fill_txq_meta - Prepare the Tx queue's meta data
989  * @vsi: VSI that ring belongs to
990  * @ring: ring that txq_meta will be based on
991  * @txq_meta: a helper struct that wraps Tx queue's information
992  *
993  * Set up a helper struct that will contain all the necessary fields that
994  * are needed for stopping Tx queue
995  */
996 void
997 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
998 		  struct ice_txq_meta *txq_meta)
999 {
1000 	struct ice_channel *ch = ring->ch;
1001 	u8 tc;
1002 
1003 	if (IS_ENABLED(CONFIG_DCB))
1004 		tc = ring->dcb_tc;
1005 	else
1006 		tc = 0;
1007 
1008 	txq_meta->q_id = ring->reg_idx;
1009 	txq_meta->q_teid = ring->txq_teid;
1010 	txq_meta->q_handle = ring->q_handle;
1011 	if (ch) {
1012 		txq_meta->vsi_idx = ch->ch_vsi->idx;
1013 		txq_meta->tc = 0;
1014 	} else {
1015 		txq_meta->vsi_idx = vsi->idx;
1016 		txq_meta->tc = tc;
1017 	}
1018 }
1019