1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
3 
4 #include "nfp_app.h"
5 #include "nfp_net_dp.h"
6 #include "nfp_net_xsk.h"
7 
8 /**
9  * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
10  * @dp:		NFP Net data path struct
11  * @dma_addr:	Pointer to storage for DMA address (output param)
12  *
13  * This function will allcate a new page frag, map it for DMA.
14  *
15  * Return: allocated page frag or NULL on failure.
16  */
17 void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
18 {
19 	void *frag;
20 
21 	if (!dp->xdp_prog) {
22 		frag = netdev_alloc_frag(dp->fl_bufsz);
23 	} else {
24 		struct page *page;
25 
26 		page = alloc_page(GFP_KERNEL);
27 		frag = page ? page_address(page) : NULL;
28 	}
29 	if (!frag) {
30 		nn_dp_warn(dp, "Failed to alloc receive page frag\n");
31 		return NULL;
32 	}
33 
34 	*dma_addr = nfp_net_dma_map_rx(dp, frag);
35 	if (dma_mapping_error(dp->dev, *dma_addr)) {
36 		nfp_net_free_frag(frag, dp->xdp_prog);
37 		nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
38 		return NULL;
39 	}
40 
41 	return frag;
42 }
43 
44 /**
45  * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
46  * @tx_ring:  TX ring structure
47  * @r_vec:    IRQ vector servicing this ring
48  * @idx:      Ring index
49  * @is_xdp:   Is this an XDP TX ring?
50  */
51 static void
52 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
53 		     struct nfp_net_r_vector *r_vec, unsigned int idx,
54 		     bool is_xdp)
55 {
56 	struct nfp_net *nn = r_vec->nfp_net;
57 
58 	tx_ring->idx = idx;
59 	tx_ring->r_vec = r_vec;
60 	tx_ring->is_xdp = is_xdp;
61 	u64_stats_init(&tx_ring->r_vec->tx_sync);
62 
63 	tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
64 	tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
65 }
66 
67 /**
68  * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
69  * @rx_ring:  RX ring structure
70  * @r_vec:    IRQ vector servicing this ring
71  * @idx:      Ring index
72  */
73 static void
74 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
75 		     struct nfp_net_r_vector *r_vec, unsigned int idx)
76 {
77 	struct nfp_net *nn = r_vec->nfp_net;
78 
79 	rx_ring->idx = idx;
80 	rx_ring->r_vec = r_vec;
81 	u64_stats_init(&rx_ring->r_vec->rx_sync);
82 
83 	rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
84 	rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
85 }
86 
87 /**
88  * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
89  * @rx_ring:	RX ring structure
90  *
91  * Assumes that the device is stopped, must be idempotent.
92  */
93 void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
94 {
95 	unsigned int wr_idx, last_idx;
96 
97 	/* wr_p == rd_p means ring was never fed FL bufs.  RX rings are always
98 	 * kept at cnt - 1 FL bufs.
99 	 */
100 	if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
101 		return;
102 
103 	/* Move the empty entry to the end of the list */
104 	wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
105 	last_idx = rx_ring->cnt - 1;
106 	if (rx_ring->r_vec->xsk_pool) {
107 		rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
108 		memset(&rx_ring->xsk_rxbufs[last_idx], 0,
109 		       sizeof(*rx_ring->xsk_rxbufs));
110 	} else {
111 		rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
112 		memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
113 	}
114 
115 	memset(rx_ring->rxds, 0, rx_ring->size);
116 	rx_ring->wr_p = 0;
117 	rx_ring->rd_p = 0;
118 }
119 
120 /**
121  * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
122  * @dp:		NFP Net data path struct
123  * @rx_ring:	RX ring to remove buffers from
124  *
125  * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
126  * entries.  After device is disabled nfp_net_rx_ring_reset() must be called
127  * to restore required ring geometry.
128  */
129 static void
130 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
131 			  struct nfp_net_rx_ring *rx_ring)
132 {
133 	unsigned int i;
134 
135 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
136 		return;
137 
138 	for (i = 0; i < rx_ring->cnt - 1; i++) {
139 		/* NULL skb can only happen when initial filling of the ring
140 		 * fails to allocate enough buffers and calls here to free
141 		 * already allocated ones.
142 		 */
143 		if (!rx_ring->rxbufs[i].frag)
144 			continue;
145 
146 		nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
147 		nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
148 		rx_ring->rxbufs[i].dma_addr = 0;
149 		rx_ring->rxbufs[i].frag = NULL;
150 	}
151 }
152 
153 /**
154  * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
155  * @dp:		NFP Net data path struct
156  * @rx_ring:	RX ring to remove buffers from
157  */
158 static int
159 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
160 			   struct nfp_net_rx_ring *rx_ring)
161 {
162 	struct nfp_net_rx_buf *rxbufs;
163 	unsigned int i;
164 
165 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
166 		return 0;
167 
168 	rxbufs = rx_ring->rxbufs;
169 
170 	for (i = 0; i < rx_ring->cnt - 1; i++) {
171 		rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
172 		if (!rxbufs[i].frag) {
173 			nfp_net_rx_ring_bufs_free(dp, rx_ring);
174 			return -ENOMEM;
175 		}
176 	}
177 
178 	return 0;
179 }
180 
181 int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
182 {
183 	unsigned int r;
184 
185 	dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
186 			       GFP_KERNEL);
187 	if (!dp->tx_rings)
188 		return -ENOMEM;
189 
190 	for (r = 0; r < dp->num_tx_rings; r++) {
191 		int bias = 0;
192 
193 		if (r >= dp->num_stack_tx_rings)
194 			bias = dp->num_stack_tx_rings;
195 
196 		nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
197 				     r, bias);
198 
199 		if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
200 			goto err_free_prev;
201 
202 		if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
203 			goto err_free_ring;
204 	}
205 
206 	return 0;
207 
208 err_free_prev:
209 	while (r--) {
210 		nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
211 err_free_ring:
212 		nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
213 	}
214 	kfree(dp->tx_rings);
215 	return -ENOMEM;
216 }
217 
218 void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
219 {
220 	unsigned int r;
221 
222 	for (r = 0; r < dp->num_tx_rings; r++) {
223 		nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
224 		nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
225 	}
226 
227 	kfree(dp->tx_rings);
228 }
229 
230 /**
231  * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
232  * @rx_ring:  RX ring to free
233  */
234 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
235 {
236 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
237 	struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
238 
239 	if (dp->netdev)
240 		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
241 
242 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
243 		kvfree(rx_ring->xsk_rxbufs);
244 	else
245 		kvfree(rx_ring->rxbufs);
246 
247 	if (rx_ring->rxds)
248 		dma_free_coherent(dp->dev, rx_ring->size,
249 				  rx_ring->rxds, rx_ring->dma);
250 
251 	rx_ring->cnt = 0;
252 	rx_ring->rxbufs = NULL;
253 	rx_ring->xsk_rxbufs = NULL;
254 	rx_ring->rxds = NULL;
255 	rx_ring->dma = 0;
256 	rx_ring->size = 0;
257 }
258 
259 /**
260  * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
261  * @dp:	      NFP Net data path struct
262  * @rx_ring:  RX ring to allocate
263  *
264  * Return: 0 on success, negative errno otherwise.
265  */
266 static int
267 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
268 {
269 	enum xdp_mem_type mem_type;
270 	size_t rxbuf_sw_desc_sz;
271 	int err;
272 
273 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
274 		mem_type = MEM_TYPE_XSK_BUFF_POOL;
275 		rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
276 	} else {
277 		mem_type = MEM_TYPE_PAGE_ORDER0;
278 		rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
279 	}
280 
281 	if (dp->netdev) {
282 		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
283 				       rx_ring->idx, rx_ring->r_vec->napi.napi_id);
284 		if (err < 0)
285 			return err;
286 
287 		err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
288 		if (err)
289 			goto err_alloc;
290 	}
291 
292 	rx_ring->cnt = dp->rxd_cnt;
293 	rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
294 	rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
295 					   &rx_ring->dma,
296 					   GFP_KERNEL | __GFP_NOWARN);
297 	if (!rx_ring->rxds) {
298 		netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
299 			    rx_ring->cnt);
300 		goto err_alloc;
301 	}
302 
303 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
304 		rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
305 					       GFP_KERNEL);
306 		if (!rx_ring->xsk_rxbufs)
307 			goto err_alloc;
308 	} else {
309 		rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
310 					   GFP_KERNEL);
311 		if (!rx_ring->rxbufs)
312 			goto err_alloc;
313 	}
314 
315 	return 0;
316 
317 err_alloc:
318 	nfp_net_rx_ring_free(rx_ring);
319 	return -ENOMEM;
320 }
321 
322 int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
323 {
324 	unsigned int r;
325 
326 	dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
327 			       GFP_KERNEL);
328 	if (!dp->rx_rings)
329 		return -ENOMEM;
330 
331 	for (r = 0; r < dp->num_rx_rings; r++) {
332 		nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
333 
334 		if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
335 			goto err_free_prev;
336 
337 		if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
338 			goto err_free_ring;
339 	}
340 
341 	return 0;
342 
343 err_free_prev:
344 	while (r--) {
345 		nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
346 err_free_ring:
347 		nfp_net_rx_ring_free(&dp->rx_rings[r]);
348 	}
349 	kfree(dp->rx_rings);
350 	return -ENOMEM;
351 }
352 
353 void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
354 {
355 	unsigned int r;
356 
357 	for (r = 0; r < dp->num_rx_rings; r++) {
358 		nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
359 		nfp_net_rx_ring_free(&dp->rx_rings[r]);
360 	}
361 
362 	kfree(dp->rx_rings);
363 }
364 
365 void
366 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
367 			     struct nfp_net_rx_ring *rx_ring, unsigned int idx)
368 {
369 	/* Write the DMA address, size and MSI-X info to the device */
370 	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
371 	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
372 	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
373 }
374 
375 void
376 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
377 			     struct nfp_net_tx_ring *tx_ring, unsigned int idx)
378 {
379 	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
380 	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
381 	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
382 }
383 
384 void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
385 {
386 	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
387 	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
388 	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
389 
390 	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
391 	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
392 	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
393 }
394 
395 void
396 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
397 {
398 	nfp_nfd3_tx_ring_reset(dp, tx_ring);
399 }
400 
401 void nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
402 				   struct nfp_net_rx_ring *rx_ring)
403 {
404 	nfp_nfd3_rx_ring_fill_freelist(dp, rx_ring);
405 }
406 
407 int
408 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
409 {
410 	return nfp_nfd3_tx_ring_alloc(dp, tx_ring);
411 }
412 
413 void
414 nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
415 {
416 	nfp_nfd3_tx_ring_free(tx_ring);
417 }
418 
419 int nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
420 			       struct nfp_net_tx_ring *tx_ring)
421 {
422 	return nfp_nfd3_tx_ring_bufs_alloc(dp, tx_ring);
423 }
424 
425 void nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
426 			       struct nfp_net_tx_ring *tx_ring)
427 {
428 	nfp_nfd3_tx_ring_bufs_free(dp, tx_ring);
429 }
430 
431 void
432 nfp_net_debugfs_print_tx_descs(struct seq_file *file,
433 			       struct nfp_net_r_vector *r_vec,
434 			       struct nfp_net_tx_ring *tx_ring,
435 			       u32 d_rd_p, u32 d_wr_p)
436 {
437 	nfp_nfd3_print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
438 }
439 
440 bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
441 {
442 	return __nfp_nfd3_ctrl_tx(nn, skb);
443 }
444 
445 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
446 {
447 	return nfp_nfd3_ctrl_tx(nn, skb);
448 }
449