1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/irq.h>
34 #include "en.h"
35 #include "en/txrx.h"
36 #include "en/xdp.h"
37 #include "en/xsk/rx.h"
38 #include "en/xsk/tx.h"
39 #include "en_accel/ktls_txrx.h"
40 
mlx5e_channel_no_affinity_change(struct mlx5e_channel * c)41 static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
42 {
43 	int current_cpu = smp_processor_id();
44 
45 	return cpumask_test_cpu(current_cpu, c->aff_mask);
46 }
47 
mlx5e_handle_tx_dim(struct mlx5e_txqsq * sq)48 static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
49 {
50 	struct mlx5e_sq_stats *stats = sq->stats;
51 	struct dim_sample dim_sample = {};
52 
53 	if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
54 		return;
55 
56 	dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
57 	net_dim(&sq->dim, dim_sample);
58 }
59 
mlx5e_handle_rx_dim(struct mlx5e_rq * rq)60 static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
61 {
62 	struct mlx5e_rq_stats *stats = rq->stats;
63 	struct dim_sample dim_sample = {};
64 
65 	if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
66 		return;
67 
68 	dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
69 	net_dim(&rq->dim, dim_sample);
70 }
71 
mlx5e_trigger_irq(struct mlx5e_icosq * sq)72 void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
73 {
74 	struct mlx5_wq_cyc *wq = &sq->wq;
75 	struct mlx5e_tx_wqe *nopwqe;
76 	u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
77 
78 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
79 		.wqe_type   = MLX5E_ICOSQ_WQE_NOP,
80 		.num_wqebbs = 1,
81 	};
82 
83 	nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
84 	mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
85 }
86 
mlx5e_napi_xsk_post(struct mlx5e_xdpsq * xsksq,struct mlx5e_rq * xskrq)87 static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq)
88 {
89 	bool busy_xsk = false, xsk_rx_alloc_err;
90 
91 	/* Handle the race between the application querying need_wakeup and the
92 	 * driver setting it:
93 	 * 1. Update need_wakeup both before and after the TX. If it goes to
94 	 * "yes", it can only happen with the first update.
95 	 * 2. If the application queried need_wakeup before we set it, the
96 	 * packets will be transmitted anyway, even w/o a wakeup.
97 	 * 3. Give a chance to clear need_wakeup after new packets were queued
98 	 * for TX.
99 	 */
100 	mlx5e_xsk_update_tx_wakeup(xsksq);
101 	busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
102 	mlx5e_xsk_update_tx_wakeup(xsksq);
103 
104 	xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes,
105 					   mlx5e_post_rx_mpwqes,
106 					   mlx5e_post_rx_wqes,
107 					   xskrq);
108 	busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err);
109 
110 	return busy_xsk;
111 }
112 
mlx5e_napi_poll(struct napi_struct * napi,int budget)113 int mlx5e_napi_poll(struct napi_struct *napi, int budget)
114 {
115 	struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
116 					       napi);
117 	struct mlx5e_ch_stats *ch_stats = c->stats;
118 	struct mlx5e_xdpsq *xsksq = &c->xsksq;
119 	struct mlx5e_txqsq __rcu **qos_sqs;
120 	struct mlx5e_rq *xskrq = &c->xskrq;
121 	struct mlx5e_rq *rq = &c->rq;
122 	bool aff_change = false;
123 	bool busy_xsk = false;
124 	bool busy = false;
125 	int work_done = 0;
126 	u16 qos_sqs_size;
127 	bool xsk_open;
128 	int i;
129 
130 	rcu_read_lock();
131 
132 	qos_sqs = rcu_dereference(c->qos_sqs);
133 
134 	xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
135 
136 	ch_stats->poll++;
137 
138 	for (i = 0; i < c->num_tc; i++)
139 		busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
140 
141 	if (unlikely(qos_sqs)) {
142 		smp_rmb(); /* Pairs with mlx5e_qos_alloc_queues. */
143 		qos_sqs_size = READ_ONCE(c->qos_sqs_size);
144 
145 		for (i = 0; i < qos_sqs_size; i++) {
146 			struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
147 
148 			if (sq)
149 				busy |= mlx5e_poll_tx_cq(&sq->cq, budget);
150 		}
151 	}
152 
153 	busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
154 
155 	if (c->xdp)
156 		busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
157 
158 	if (likely(budget)) { /* budget=0 means: don't poll rx rings */
159 		if (xsk_open)
160 			work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
161 
162 		if (likely(budget - work_done))
163 			work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
164 
165 		busy |= work_done == budget;
166 	}
167 
168 	mlx5e_poll_ico_cq(&c->icosq.cq);
169 	if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
170 		/* Don't clear the flag if nothing was polled to prevent
171 		 * queueing more WQEs and overflowing the async ICOSQ.
172 		 */
173 		clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
174 
175 	/* Keep after async ICOSQ CQ poll */
176 	if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
177 		busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
178 
179 	busy |= INDIRECT_CALL_2(rq->post_wqes,
180 				mlx5e_post_rx_mpwqes,
181 				mlx5e_post_rx_wqes,
182 				rq);
183 	if (xsk_open) {
184 		busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
185 		busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
186 	}
187 
188 	busy |= busy_xsk;
189 
190 	if (busy) {
191 		if (likely(mlx5e_channel_no_affinity_change(c))) {
192 			work_done = budget;
193 			goto out;
194 		}
195 		ch_stats->aff_change++;
196 		aff_change = true;
197 		if (budget && work_done == budget)
198 			work_done--;
199 	}
200 
201 	if (unlikely(!napi_complete_done(napi, work_done)))
202 		goto out;
203 
204 	ch_stats->arm++;
205 
206 	for (i = 0; i < c->num_tc; i++) {
207 		mlx5e_handle_tx_dim(&c->sq[i]);
208 		mlx5e_cq_arm(&c->sq[i].cq);
209 	}
210 	if (unlikely(qos_sqs)) {
211 		for (i = 0; i < qos_sqs_size; i++) {
212 			struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
213 
214 			if (sq) {
215 				mlx5e_handle_tx_dim(sq);
216 				mlx5e_cq_arm(&sq->cq);
217 			}
218 		}
219 	}
220 
221 	mlx5e_handle_rx_dim(rq);
222 
223 	mlx5e_cq_arm(&rq->cq);
224 	mlx5e_cq_arm(&c->icosq.cq);
225 	mlx5e_cq_arm(&c->async_icosq.cq);
226 	mlx5e_cq_arm(&c->xdpsq.cq);
227 
228 	if (xsk_open) {
229 		mlx5e_handle_rx_dim(xskrq);
230 		mlx5e_cq_arm(&xsksq->cq);
231 		mlx5e_cq_arm(&xskrq->cq);
232 	}
233 
234 	if (unlikely(aff_change && busy_xsk)) {
235 		mlx5e_trigger_irq(&c->icosq);
236 		ch_stats->force_irq++;
237 	}
238 
239 out:
240 	rcu_read_unlock();
241 
242 	return work_done;
243 }
244 
mlx5e_completion_event(struct mlx5_core_cq * mcq,struct mlx5_eqe * eqe)245 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
246 {
247 	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
248 
249 	napi_schedule(cq->napi);
250 	cq->event_ctr++;
251 	cq->ch_stats->events++;
252 }
253 
mlx5e_cq_error_event(struct mlx5_core_cq * mcq,enum mlx5_event event)254 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
255 {
256 	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
257 	struct net_device *netdev = cq->netdev;
258 
259 	netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
260 		   __func__, mcq->cqn, event);
261 }
262