1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/skbuff.h>
10 #include <linux/bpf_trace.h>
11 #include <net/udp_tunnel.h>
12 #include <linux/ip.h>
13 #include <net/ipv6.h>
14 #include <net/tcp.h>
15 #include <linux/if_ether.h>
16 #include <linux/if_vlan.h>
17 #include <net/ip6_checksum.h>
18 #include "qede_ptp.h"
19
20 #include <linux/qed/qed_if.h>
21 #include "qede.h"
22 /*********************************
23 * Content also used by slowpath *
24 *********************************/
25
qede_alloc_rx_buffer(struct qede_rx_queue * rxq,bool allow_lazy)26 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
27 {
28 struct sw_rx_data *sw_rx_data;
29 struct eth_rx_bd *rx_bd;
30 dma_addr_t mapping;
31 struct page *data;
32
33 /* In case lazy-allocation is allowed, postpone allocation until the
34 * end of the NAPI run. We'd still need to make sure the Rx ring has
35 * sufficient buffers to guarantee an additional Rx interrupt.
36 */
37 if (allow_lazy && likely(rxq->filled_buffers > 12)) {
38 rxq->filled_buffers--;
39 return 0;
40 }
41
42 data = alloc_pages(GFP_ATOMIC, 0);
43 if (unlikely(!data))
44 return -ENOMEM;
45
46 /* Map the entire page as it would be used
47 * for multiple RX buffer segment size mapping.
48 */
49 mapping = dma_map_page(rxq->dev, data, 0,
50 PAGE_SIZE, rxq->data_direction);
51 if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
52 __free_page(data);
53 return -ENOMEM;
54 }
55
56 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
57 sw_rx_data->page_offset = 0;
58 sw_rx_data->data = data;
59 sw_rx_data->mapping = mapping;
60
61 /* Advance PROD and get BD pointer */
62 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
63 WARN_ON(!rx_bd);
64 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
65 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
66 rxq->rx_headroom);
67
68 rxq->sw_rx_prod++;
69 rxq->filled_buffers++;
70
71 return 0;
72 }
73
74 /* Unmap the data and free skb */
qede_free_tx_pkt(struct qede_dev * edev,struct qede_tx_queue * txq,int * len)75 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
76 {
77 u16 idx = txq->sw_tx_cons;
78 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
79 struct eth_tx_1st_bd *first_bd;
80 struct eth_tx_bd *tx_data_bd;
81 int bds_consumed = 0;
82 int nbds;
83 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
84 int i, split_bd_len = 0;
85
86 if (unlikely(!skb)) {
87 DP_ERR(edev,
88 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
89 idx, txq->sw_tx_cons, txq->sw_tx_prod);
90 return -1;
91 }
92
93 *len = skb->len;
94
95 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
96
97 bds_consumed++;
98
99 nbds = first_bd->data.nbds;
100
101 if (data_split) {
102 struct eth_tx_bd *split = (struct eth_tx_bd *)
103 qed_chain_consume(&txq->tx_pbl);
104 split_bd_len = BD_UNMAP_LEN(split);
105 bds_consumed++;
106 }
107 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
108 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
109
110 /* Unmap the data of the skb frags */
111 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
112 tx_data_bd = (struct eth_tx_bd *)
113 qed_chain_consume(&txq->tx_pbl);
114 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
115 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
116 }
117
118 while (bds_consumed++ < nbds)
119 qed_chain_consume(&txq->tx_pbl);
120
121 /* Free skb */
122 dev_kfree_skb_any(skb);
123 txq->sw_tx_ring.skbs[idx].skb = NULL;
124 txq->sw_tx_ring.skbs[idx].flags = 0;
125
126 return 0;
127 }
128
129 /* Unmap the data and free skb when mapping failed during start_xmit */
qede_free_failed_tx_pkt(struct qede_tx_queue * txq,struct eth_tx_1st_bd * first_bd,int nbd,bool data_split)130 static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
131 struct eth_tx_1st_bd *first_bd,
132 int nbd, bool data_split)
133 {
134 u16 idx = txq->sw_tx_prod;
135 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
136 struct eth_tx_bd *tx_data_bd;
137 int i, split_bd_len = 0;
138
139 /* Return prod to its position before this skb was handled */
140 qed_chain_set_prod(&txq->tx_pbl,
141 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
142
143 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
144
145 if (data_split) {
146 struct eth_tx_bd *split = (struct eth_tx_bd *)
147 qed_chain_produce(&txq->tx_pbl);
148 split_bd_len = BD_UNMAP_LEN(split);
149 nbd--;
150 }
151
152 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
153 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
154
155 /* Unmap the data of the skb frags */
156 for (i = 0; i < nbd; i++) {
157 tx_data_bd = (struct eth_tx_bd *)
158 qed_chain_produce(&txq->tx_pbl);
159 if (tx_data_bd->nbytes)
160 dma_unmap_page(txq->dev,
161 BD_UNMAP_ADDR(tx_data_bd),
162 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
163 }
164
165 /* Return again prod to its position before this skb was handled */
166 qed_chain_set_prod(&txq->tx_pbl,
167 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
168
169 /* Free skb */
170 dev_kfree_skb_any(skb);
171 txq->sw_tx_ring.skbs[idx].skb = NULL;
172 txq->sw_tx_ring.skbs[idx].flags = 0;
173 }
174
qede_xmit_type(struct sk_buff * skb,int * ipv6_ext)175 static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
176 {
177 u32 rc = XMIT_L4_CSUM;
178 __be16 l3_proto;
179
180 if (skb->ip_summed != CHECKSUM_PARTIAL)
181 return XMIT_PLAIN;
182
183 l3_proto = vlan_get_protocol(skb);
184 if (l3_proto == htons(ETH_P_IPV6) &&
185 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
186 *ipv6_ext = 1;
187
188 if (skb->encapsulation) {
189 rc |= XMIT_ENC;
190 if (skb_is_gso(skb)) {
191 unsigned short gso_type = skb_shinfo(skb)->gso_type;
192
193 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
194 (gso_type & SKB_GSO_GRE_CSUM))
195 rc |= XMIT_ENC_GSO_L4_CSUM;
196
197 rc |= XMIT_LSO;
198 return rc;
199 }
200 }
201
202 if (skb_is_gso(skb))
203 rc |= XMIT_LSO;
204
205 return rc;
206 }
207
qede_set_params_for_ipv6_ext(struct sk_buff * skb,struct eth_tx_2nd_bd * second_bd,struct eth_tx_3rd_bd * third_bd)208 static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
209 struct eth_tx_2nd_bd *second_bd,
210 struct eth_tx_3rd_bd *third_bd)
211 {
212 u8 l4_proto;
213 u16 bd2_bits1 = 0, bd2_bits2 = 0;
214
215 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
216
217 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
218 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
219 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
220
221 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
222 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
223
224 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
225 l4_proto = ipv6_hdr(skb)->nexthdr;
226 else
227 l4_proto = ip_hdr(skb)->protocol;
228
229 if (l4_proto == IPPROTO_UDP)
230 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
231
232 if (third_bd)
233 third_bd->data.bitfields |=
234 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
235 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
236 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
237
238 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
239 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
240 }
241
map_frag_to_bd(struct qede_tx_queue * txq,skb_frag_t * frag,struct eth_tx_bd * bd)242 static int map_frag_to_bd(struct qede_tx_queue *txq,
243 skb_frag_t *frag, struct eth_tx_bd *bd)
244 {
245 dma_addr_t mapping;
246
247 /* Map skb non-linear frag data for DMA */
248 mapping = skb_frag_dma_map(txq->dev, frag, 0,
249 skb_frag_size(frag), DMA_TO_DEVICE);
250 if (unlikely(dma_mapping_error(txq->dev, mapping)))
251 return -ENOMEM;
252
253 /* Setup the data pointer of the frag data */
254 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
255
256 return 0;
257 }
258
qede_get_skb_hlen(struct sk_buff * skb,bool is_encap_pkt)259 static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
260 {
261 if (is_encap_pkt)
262 return (skb_inner_transport_header(skb) +
263 inner_tcp_hdrlen(skb) - skb->data);
264 else
265 return (skb_transport_header(skb) +
266 tcp_hdrlen(skb) - skb->data);
267 }
268
269 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
270 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
qede_pkt_req_lin(struct sk_buff * skb,u8 xmit_type)271 static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
272 {
273 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
274
275 if (xmit_type & XMIT_LSO) {
276 int hlen;
277
278 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
279
280 /* linear payload would require its own BD */
281 if (skb_headlen(skb) > hlen)
282 allowed_frags--;
283 }
284
285 return (skb_shinfo(skb)->nr_frags > allowed_frags);
286 }
287 #endif
288
qede_update_tx_producer(struct qede_tx_queue * txq)289 static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
290 {
291 /* wmb makes sure that the BDs data is updated before updating the
292 * producer, otherwise FW may read old data from the BDs.
293 */
294 wmb();
295 barrier();
296 writel(txq->tx_db.raw, txq->doorbell_addr);
297
298 /* Fence required to flush the write combined buffer, since another
299 * CPU may write to the same doorbell address and data may be lost
300 * due to relaxed order nature of write combined bar.
301 */
302 wmb();
303 }
304
qede_xdp_xmit(struct qede_tx_queue * txq,dma_addr_t dma,u16 pad,u16 len,struct page * page,struct xdp_frame * xdpf)305 static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
306 u16 len, struct page *page, struct xdp_frame *xdpf)
307 {
308 struct eth_tx_1st_bd *bd;
309 struct sw_tx_xdp *xdp;
310 u16 val;
311
312 if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
313 txq->num_tx_buffers)) {
314 txq->stopped_cnt++;
315 return -ENOMEM;
316 }
317
318 bd = qed_chain_produce(&txq->tx_pbl);
319 bd->data.nbds = 1;
320 bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
321
322 val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
323 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
324
325 bd->data.bitfields = cpu_to_le16(val);
326
327 /* We can safely ignore the offset, as it's 0 for XDP */
328 BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
329
330 xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
331 xdp->mapping = dma;
332 xdp->page = page;
333 xdp->xdpf = xdpf;
334
335 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
336
337 return 0;
338 }
339
qede_xdp_transmit(struct net_device * dev,int n_frames,struct xdp_frame ** frames,u32 flags)340 int qede_xdp_transmit(struct net_device *dev, int n_frames,
341 struct xdp_frame **frames, u32 flags)
342 {
343 struct qede_dev *edev = netdev_priv(dev);
344 struct device *dmadev = &edev->pdev->dev;
345 struct qede_tx_queue *xdp_tx;
346 struct xdp_frame *xdpf;
347 dma_addr_t mapping;
348 int i, nxmit = 0;
349 u16 xdp_prod;
350
351 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
352 return -EINVAL;
353
354 if (unlikely(!netif_running(dev)))
355 return -ENETDOWN;
356
357 i = smp_processor_id() % edev->total_xdp_queues;
358 xdp_tx = edev->fp_array[i].xdp_tx;
359
360 spin_lock(&xdp_tx->xdp_tx_lock);
361
362 for (i = 0; i < n_frames; i++) {
363 xdpf = frames[i];
364
365 mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
366 DMA_TO_DEVICE);
367 if (unlikely(dma_mapping_error(dmadev, mapping)))
368 break;
369
370 if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
371 NULL, xdpf)))
372 break;
373 nxmit++;
374 }
375
376 if (flags & XDP_XMIT_FLUSH) {
377 xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
378
379 xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
380 qede_update_tx_producer(xdp_tx);
381 }
382
383 spin_unlock(&xdp_tx->xdp_tx_lock);
384
385 return nxmit;
386 }
387
qede_txq_has_work(struct qede_tx_queue * txq)388 int qede_txq_has_work(struct qede_tx_queue *txq)
389 {
390 u16 hw_bd_cons;
391
392 /* Tell compiler that consumer and producer can change */
393 barrier();
394 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
395 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
396 return 0;
397
398 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
399 }
400
qede_xdp_tx_int(struct qede_dev * edev,struct qede_tx_queue * txq)401 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
402 {
403 struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
404 struct device *dev = &edev->pdev->dev;
405 struct xdp_frame *xdpf;
406 u16 hw_bd_cons;
407
408 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
409 barrier();
410
411 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
412 xdp_info = xdp_arr + txq->sw_tx_cons;
413 xdpf = xdp_info->xdpf;
414
415 if (xdpf) {
416 dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
417 DMA_TO_DEVICE);
418 xdp_return_frame(xdpf);
419
420 xdp_info->xdpf = NULL;
421 } else {
422 dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
423 DMA_BIDIRECTIONAL);
424 __free_page(xdp_info->page);
425 }
426
427 qed_chain_consume(&txq->tx_pbl);
428 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
429 txq->xmit_pkts++;
430 }
431 }
432
qede_tx_int(struct qede_dev * edev,struct qede_tx_queue * txq)433 static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
434 {
435 unsigned int pkts_compl = 0, bytes_compl = 0;
436 struct netdev_queue *netdev_txq;
437 u16 hw_bd_cons;
438 int rc;
439
440 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
441
442 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
443 barrier();
444
445 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
446 int len = 0;
447
448 rc = qede_free_tx_pkt(edev, txq, &len);
449 if (rc) {
450 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
451 hw_bd_cons,
452 qed_chain_get_cons_idx(&txq->tx_pbl));
453 break;
454 }
455
456 bytes_compl += len;
457 pkts_compl++;
458 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
459 txq->xmit_pkts++;
460 }
461
462 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
463
464 /* Need to make the tx_bd_cons update visible to start_xmit()
465 * before checking for netif_tx_queue_stopped(). Without the
466 * memory barrier, there is a small possibility that
467 * start_xmit() will miss it and cause the queue to be stopped
468 * forever.
469 * On the other hand we need an rmb() here to ensure the proper
470 * ordering of bit testing in the following
471 * netif_tx_queue_stopped(txq) call.
472 */
473 smp_mb();
474
475 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
476 /* Taking tx_lock is needed to prevent reenabling the queue
477 * while it's empty. This could have happen if rx_action() gets
478 * suspended in qede_tx_int() after the condition before
479 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
480 *
481 * stops the queue->sees fresh tx_bd_cons->releases the queue->
482 * sends some packets consuming the whole queue again->
483 * stops the queue
484 */
485
486 __netif_tx_lock(netdev_txq, smp_processor_id());
487
488 if ((netif_tx_queue_stopped(netdev_txq)) &&
489 (edev->state == QEDE_STATE_OPEN) &&
490 (qed_chain_get_elem_left(&txq->tx_pbl)
491 >= (MAX_SKB_FRAGS + 1))) {
492 netif_tx_wake_queue(netdev_txq);
493 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
494 "Wake queue was called\n");
495 }
496
497 __netif_tx_unlock(netdev_txq);
498 }
499
500 return 0;
501 }
502
qede_has_rx_work(struct qede_rx_queue * rxq)503 bool qede_has_rx_work(struct qede_rx_queue *rxq)
504 {
505 u16 hw_comp_cons, sw_comp_cons;
506
507 /* Tell compiler that status block fields can change */
508 barrier();
509
510 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
511 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
512
513 return hw_comp_cons != sw_comp_cons;
514 }
515
qede_rx_bd_ring_consume(struct qede_rx_queue * rxq)516 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
517 {
518 qed_chain_consume(&rxq->rx_bd_ring);
519 rxq->sw_rx_cons++;
520 }
521
522 /* This function reuses the buffer(from an offset) from
523 * consumer index to producer index in the bd ring
524 */
qede_reuse_page(struct qede_rx_queue * rxq,struct sw_rx_data * curr_cons)525 static inline void qede_reuse_page(struct qede_rx_queue *rxq,
526 struct sw_rx_data *curr_cons)
527 {
528 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
529 struct sw_rx_data *curr_prod;
530 dma_addr_t new_mapping;
531
532 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
533 *curr_prod = *curr_cons;
534
535 new_mapping = curr_prod->mapping + curr_prod->page_offset;
536
537 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
538 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
539 rxq->rx_headroom);
540
541 rxq->sw_rx_prod++;
542 curr_cons->data = NULL;
543 }
544
545 /* In case of allocation failures reuse buffers
546 * from consumer index to produce buffers for firmware
547 */
qede_recycle_rx_bd_ring(struct qede_rx_queue * rxq,u8 count)548 void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
549 {
550 struct sw_rx_data *curr_cons;
551
552 for (; count > 0; count--) {
553 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
554 qede_reuse_page(rxq, curr_cons);
555 qede_rx_bd_ring_consume(rxq);
556 }
557 }
558
qede_realloc_rx_buffer(struct qede_rx_queue * rxq,struct sw_rx_data * curr_cons)559 static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
560 struct sw_rx_data *curr_cons)
561 {
562 /* Move to the next segment in the page */
563 curr_cons->page_offset += rxq->rx_buf_seg_size;
564
565 if (curr_cons->page_offset == PAGE_SIZE) {
566 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
567 /* Since we failed to allocate new buffer
568 * current buffer can be used again.
569 */
570 curr_cons->page_offset -= rxq->rx_buf_seg_size;
571
572 return -ENOMEM;
573 }
574
575 dma_unmap_page(rxq->dev, curr_cons->mapping,
576 PAGE_SIZE, rxq->data_direction);
577 } else {
578 /* Increment refcount of the page as we don't want
579 * network stack to take the ownership of the page
580 * which can be recycled multiple times by the driver.
581 */
582 page_ref_inc(curr_cons->data);
583 qede_reuse_page(rxq, curr_cons);
584 }
585
586 return 0;
587 }
588
qede_update_rx_prod(struct qede_dev * edev,struct qede_rx_queue * rxq)589 void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
590 {
591 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
592 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
593 struct eth_rx_prod_data rx_prods = {0};
594
595 /* Update producers */
596 rx_prods.bd_prod = cpu_to_le16(bd_prod);
597 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
598
599 /* Make sure that the BD and SGE data is updated before updating the
600 * producers since FW might read the BD/SGE right after the producer
601 * is updated.
602 */
603 wmb();
604
605 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
606 (u32 *)&rx_prods);
607 }
608
qede_get_rxhash(struct sk_buff * skb,u8 bitfields,__le32 rss_hash)609 static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
610 {
611 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
612 enum rss_hash_type htype;
613 u32 hash = 0;
614
615 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
616 if (htype) {
617 hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
618 (htype == RSS_HASH_TYPE_IPV6)) ?
619 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
620 hash = le32_to_cpu(rss_hash);
621 }
622 skb_set_hash(skb, hash, hash_type);
623 }
624
qede_set_skb_csum(struct sk_buff * skb,u8 csum_flag)625 static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
626 {
627 skb_checksum_none_assert(skb);
628
629 if (csum_flag & QEDE_CSUM_UNNECESSARY)
630 skb->ip_summed = CHECKSUM_UNNECESSARY;
631
632 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
633 skb->csum_level = 1;
634 skb->encapsulation = 1;
635 }
636 }
637
qede_skb_receive(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,struct sk_buff * skb,u16 vlan_tag)638 static inline void qede_skb_receive(struct qede_dev *edev,
639 struct qede_fastpath *fp,
640 struct qede_rx_queue *rxq,
641 struct sk_buff *skb, u16 vlan_tag)
642 {
643 if (vlan_tag)
644 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
645
646 napi_gro_receive(&fp->napi, skb);
647 }
648
qede_set_gro_params(struct qede_dev * edev,struct sk_buff * skb,struct eth_fast_path_rx_tpa_start_cqe * cqe)649 static void qede_set_gro_params(struct qede_dev *edev,
650 struct sk_buff *skb,
651 struct eth_fast_path_rx_tpa_start_cqe *cqe)
652 {
653 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
654
655 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
656 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
657 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
658 else
659 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
660
661 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
662 cqe->header_len;
663 }
664
qede_fill_frag_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,u8 tpa_agg_index,u16 len_on_bd)665 static int qede_fill_frag_skb(struct qede_dev *edev,
666 struct qede_rx_queue *rxq,
667 u8 tpa_agg_index, u16 len_on_bd)
668 {
669 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
670 NUM_RX_BDS_MAX];
671 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
672 struct sk_buff *skb = tpa_info->skb;
673
674 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
675 goto out;
676
677 /* Add one frag and update the appropriate fields in the skb */
678 skb_fill_page_desc(skb, tpa_info->frag_id++,
679 current_bd->data,
680 current_bd->page_offset + rxq->rx_headroom,
681 len_on_bd);
682
683 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
684 /* Incr page ref count to reuse on allocation failure
685 * so that it doesn't get freed while freeing SKB.
686 */
687 page_ref_inc(current_bd->data);
688 goto out;
689 }
690
691 qede_rx_bd_ring_consume(rxq);
692
693 skb->data_len += len_on_bd;
694 skb->truesize += rxq->rx_buf_seg_size;
695 skb->len += len_on_bd;
696
697 return 0;
698
699 out:
700 tpa_info->state = QEDE_AGG_STATE_ERROR;
701 qede_recycle_rx_bd_ring(rxq, 1);
702
703 return -ENOMEM;
704 }
705
qede_tunn_exist(u16 flag)706 static bool qede_tunn_exist(u16 flag)
707 {
708 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
709 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
710 }
711
qede_check_tunn_csum(u16 flag)712 static u8 qede_check_tunn_csum(u16 flag)
713 {
714 u16 csum_flag = 0;
715 u8 tcsum = 0;
716
717 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
718 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
719 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
720 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
721
722 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
723 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
724 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
725 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
726 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
727 }
728
729 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
730 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
731 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
732 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
733
734 if (csum_flag & flag)
735 return QEDE_CSUM_ERROR;
736
737 return QEDE_CSUM_UNNECESSARY | tcsum;
738 }
739
740 static inline struct sk_buff *
qede_build_skb(struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad)741 qede_build_skb(struct qede_rx_queue *rxq,
742 struct sw_rx_data *bd, u16 len, u16 pad)
743 {
744 struct sk_buff *skb;
745 void *buf;
746
747 buf = page_address(bd->data) + bd->page_offset;
748 skb = build_skb(buf, rxq->rx_buf_seg_size);
749
750 skb_reserve(skb, pad);
751 skb_put(skb, len);
752
753 return skb;
754 }
755
756 static struct sk_buff *
qede_tpa_rx_build_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad,bool alloc_skb)757 qede_tpa_rx_build_skb(struct qede_dev *edev,
758 struct qede_rx_queue *rxq,
759 struct sw_rx_data *bd, u16 len, u16 pad,
760 bool alloc_skb)
761 {
762 struct sk_buff *skb;
763
764 skb = qede_build_skb(rxq, bd, len, pad);
765 bd->page_offset += rxq->rx_buf_seg_size;
766
767 if (bd->page_offset == PAGE_SIZE) {
768 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
769 DP_NOTICE(edev,
770 "Failed to allocate RX buffer for tpa start\n");
771 bd->page_offset -= rxq->rx_buf_seg_size;
772 page_ref_inc(bd->data);
773 dev_kfree_skb_any(skb);
774 return NULL;
775 }
776 } else {
777 page_ref_inc(bd->data);
778 qede_reuse_page(rxq, bd);
779 }
780
781 /* We've consumed the first BD and prepared an SKB */
782 qede_rx_bd_ring_consume(rxq);
783
784 return skb;
785 }
786
787 static struct sk_buff *
qede_rx_build_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad)788 qede_rx_build_skb(struct qede_dev *edev,
789 struct qede_rx_queue *rxq,
790 struct sw_rx_data *bd, u16 len, u16 pad)
791 {
792 struct sk_buff *skb = NULL;
793
794 /* For smaller frames still need to allocate skb, memcpy
795 * data and benefit in reusing the page segment instead of
796 * un-mapping it.
797 */
798 if ((len + pad <= edev->rx_copybreak)) {
799 unsigned int offset = bd->page_offset + pad;
800
801 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
802 if (unlikely(!skb))
803 return NULL;
804
805 skb_reserve(skb, pad);
806 skb_put_data(skb, page_address(bd->data) + offset, len);
807 qede_reuse_page(rxq, bd);
808 goto out;
809 }
810
811 skb = qede_build_skb(rxq, bd, len, pad);
812
813 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
814 /* Incr page ref count to reuse on allocation failure so
815 * that it doesn't get freed while freeing SKB [as its
816 * already mapped there].
817 */
818 page_ref_inc(bd->data);
819 dev_kfree_skb_any(skb);
820 return NULL;
821 }
822 out:
823 /* We've consumed the first BD and prepared an SKB */
824 qede_rx_bd_ring_consume(rxq);
825
826 return skb;
827 }
828
qede_tpa_start(struct qede_dev * edev,struct qede_rx_queue * rxq,struct eth_fast_path_rx_tpa_start_cqe * cqe)829 static void qede_tpa_start(struct qede_dev *edev,
830 struct qede_rx_queue *rxq,
831 struct eth_fast_path_rx_tpa_start_cqe *cqe)
832 {
833 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
834 struct sw_rx_data *sw_rx_data_cons;
835 u16 pad;
836
837 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
838 pad = cqe->placement_offset + rxq->rx_headroom;
839
840 tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
841 le16_to_cpu(cqe->len_on_first_bd),
842 pad, false);
843 tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
844 tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
845
846 if (unlikely(!tpa_info->skb)) {
847 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
848
849 /* Consume from ring but do not produce since
850 * this might be used by FW still, it will be re-used
851 * at TPA end.
852 */
853 tpa_info->tpa_start_fail = true;
854 qede_rx_bd_ring_consume(rxq);
855 tpa_info->state = QEDE_AGG_STATE_ERROR;
856 goto cons_buf;
857 }
858
859 tpa_info->frag_id = 0;
860 tpa_info->state = QEDE_AGG_STATE_START;
861
862 if ((le16_to_cpu(cqe->pars_flags.flags) >>
863 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
864 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
865 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
866 else
867 tpa_info->vlan_tag = 0;
868
869 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
870
871 /* This is needed in order to enable forwarding support */
872 qede_set_gro_params(edev, tpa_info->skb, cqe);
873
874 cons_buf: /* We still need to handle bd_len_list to consume buffers */
875 if (likely(cqe->bw_ext_bd_len_list[0]))
876 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
877 le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
878
879 if (unlikely(cqe->bw_ext_bd_len_list[1])) {
880 DP_ERR(edev,
881 "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
882 tpa_info->state = QEDE_AGG_STATE_ERROR;
883 }
884 }
885
886 #ifdef CONFIG_INET
qede_gro_ip_csum(struct sk_buff * skb)887 static void qede_gro_ip_csum(struct sk_buff *skb)
888 {
889 const struct iphdr *iph = ip_hdr(skb);
890 struct tcphdr *th;
891
892 skb_set_transport_header(skb, sizeof(struct iphdr));
893 th = tcp_hdr(skb);
894
895 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
896 iph->saddr, iph->daddr, 0);
897
898 tcp_gro_complete(skb);
899 }
900
qede_gro_ipv6_csum(struct sk_buff * skb)901 static void qede_gro_ipv6_csum(struct sk_buff *skb)
902 {
903 struct ipv6hdr *iph = ipv6_hdr(skb);
904 struct tcphdr *th;
905
906 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
907 th = tcp_hdr(skb);
908
909 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
910 &iph->saddr, &iph->daddr, 0);
911 tcp_gro_complete(skb);
912 }
913 #endif
914
qede_gro_receive(struct qede_dev * edev,struct qede_fastpath * fp,struct sk_buff * skb,u16 vlan_tag)915 static void qede_gro_receive(struct qede_dev *edev,
916 struct qede_fastpath *fp,
917 struct sk_buff *skb,
918 u16 vlan_tag)
919 {
920 /* FW can send a single MTU sized packet from gro flow
921 * due to aggregation timeout/last segment etc. which
922 * is not expected to be a gro packet. If a skb has zero
923 * frags then simply push it in the stack as non gso skb.
924 */
925 if (unlikely(!skb->data_len)) {
926 skb_shinfo(skb)->gso_type = 0;
927 skb_shinfo(skb)->gso_size = 0;
928 goto send_skb;
929 }
930
931 #ifdef CONFIG_INET
932 if (skb_shinfo(skb)->gso_size) {
933 skb_reset_network_header(skb);
934
935 switch (skb->protocol) {
936 case htons(ETH_P_IP):
937 qede_gro_ip_csum(skb);
938 break;
939 case htons(ETH_P_IPV6):
940 qede_gro_ipv6_csum(skb);
941 break;
942 default:
943 DP_ERR(edev,
944 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
945 ntohs(skb->protocol));
946 }
947 }
948 #endif
949
950 send_skb:
951 skb_record_rx_queue(skb, fp->rxq->rxq_id);
952 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
953 }
954
qede_tpa_cont(struct qede_dev * edev,struct qede_rx_queue * rxq,struct eth_fast_path_rx_tpa_cont_cqe * cqe)955 static inline void qede_tpa_cont(struct qede_dev *edev,
956 struct qede_rx_queue *rxq,
957 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
958 {
959 int i;
960
961 for (i = 0; cqe->len_list[i]; i++)
962 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
963 le16_to_cpu(cqe->len_list[i]));
964
965 if (unlikely(i > 1))
966 DP_ERR(edev,
967 "Strange - TPA cont with more than a single len_list entry\n");
968 }
969
qede_tpa_end(struct qede_dev * edev,struct qede_fastpath * fp,struct eth_fast_path_rx_tpa_end_cqe * cqe)970 static int qede_tpa_end(struct qede_dev *edev,
971 struct qede_fastpath *fp,
972 struct eth_fast_path_rx_tpa_end_cqe *cqe)
973 {
974 struct qede_rx_queue *rxq = fp->rxq;
975 struct qede_agg_info *tpa_info;
976 struct sk_buff *skb;
977 int i;
978
979 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
980 skb = tpa_info->skb;
981
982 if (tpa_info->buffer.page_offset == PAGE_SIZE)
983 dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
984 PAGE_SIZE, rxq->data_direction);
985
986 for (i = 0; cqe->len_list[i]; i++)
987 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
988 le16_to_cpu(cqe->len_list[i]));
989 if (unlikely(i > 1))
990 DP_ERR(edev,
991 "Strange - TPA emd with more than a single len_list entry\n");
992
993 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
994 goto err;
995
996 /* Sanity */
997 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
998 DP_ERR(edev,
999 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1000 cqe->num_of_bds, tpa_info->frag_id);
1001 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
1002 DP_ERR(edev,
1003 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1004 le16_to_cpu(cqe->total_packet_len), skb->len);
1005
1006 /* Finalize the SKB */
1007 skb->protocol = eth_type_trans(skb, edev->ndev);
1008 skb->ip_summed = CHECKSUM_UNNECESSARY;
1009
1010 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1011 * to skb_shinfo(skb)->gso_segs
1012 */
1013 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
1014
1015 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
1016
1017 tpa_info->state = QEDE_AGG_STATE_NONE;
1018
1019 return 1;
1020 err:
1021 tpa_info->state = QEDE_AGG_STATE_NONE;
1022
1023 if (tpa_info->tpa_start_fail) {
1024 qede_reuse_page(rxq, &tpa_info->buffer);
1025 tpa_info->tpa_start_fail = false;
1026 }
1027
1028 dev_kfree_skb_any(tpa_info->skb);
1029 tpa_info->skb = NULL;
1030 return 0;
1031 }
1032
qede_check_notunn_csum(u16 flag)1033 static u8 qede_check_notunn_csum(u16 flag)
1034 {
1035 u16 csum_flag = 0;
1036 u8 csum = 0;
1037
1038 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1039 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1040 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1041 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1042 csum = QEDE_CSUM_UNNECESSARY;
1043 }
1044
1045 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1046 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1047
1048 if (csum_flag & flag)
1049 return QEDE_CSUM_ERROR;
1050
1051 return csum;
1052 }
1053
qede_check_csum(u16 flag)1054 static u8 qede_check_csum(u16 flag)
1055 {
1056 if (!qede_tunn_exist(flag))
1057 return qede_check_notunn_csum(flag);
1058 else
1059 return qede_check_tunn_csum(flag);
1060 }
1061
qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe * cqe,u16 flag)1062 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1063 u16 flag)
1064 {
1065 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1066
1067 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1068 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1069 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1070 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1071 return true;
1072
1073 return false;
1074 }
1075
1076 /* Return true iff packet is to be passed to stack */
qede_rx_xdp(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,struct bpf_prog * prog,struct sw_rx_data * bd,struct eth_fast_path_rx_reg_cqe * cqe,u16 * data_offset,u16 * len)1077 static bool qede_rx_xdp(struct qede_dev *edev,
1078 struct qede_fastpath *fp,
1079 struct qede_rx_queue *rxq,
1080 struct bpf_prog *prog,
1081 struct sw_rx_data *bd,
1082 struct eth_fast_path_rx_reg_cqe *cqe,
1083 u16 *data_offset, u16 *len)
1084 {
1085 struct xdp_buff xdp;
1086 enum xdp_action act;
1087
1088 xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
1089 xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
1090 *len, false);
1091
1092 /* Queues always have a full reset currently, so for the time
1093 * being until there's atomic program replace just mark read
1094 * side for map helpers.
1095 */
1096 rcu_read_lock();
1097 act = bpf_prog_run_xdp(prog, &xdp);
1098 rcu_read_unlock();
1099
1100 /* Recalculate, as XDP might have changed the headers */
1101 *data_offset = xdp.data - xdp.data_hard_start;
1102 *len = xdp.data_end - xdp.data;
1103
1104 if (act == XDP_PASS)
1105 return true;
1106
1107 /* Count number of packets not to be passed to stack */
1108 rxq->xdp_no_pass++;
1109
1110 switch (act) {
1111 case XDP_TX:
1112 /* We need the replacement buffer before transmit. */
1113 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1114 qede_recycle_rx_bd_ring(rxq, 1);
1115
1116 trace_xdp_exception(edev->ndev, prog, act);
1117 break;
1118 }
1119
1120 /* Now if there's a transmission problem, we'd still have to
1121 * throw current buffer, as replacement was already allocated.
1122 */
1123 if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
1124 *data_offset, *len, bd->data,
1125 NULL))) {
1126 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
1127 rxq->data_direction);
1128 __free_page(bd->data);
1129
1130 trace_xdp_exception(edev->ndev, prog, act);
1131 } else {
1132 dma_sync_single_for_device(rxq->dev,
1133 bd->mapping + *data_offset,
1134 *len, rxq->data_direction);
1135 fp->xdp_xmit |= QEDE_XDP_TX;
1136 }
1137
1138 /* Regardless, we've consumed an Rx BD */
1139 qede_rx_bd_ring_consume(rxq);
1140 break;
1141 case XDP_REDIRECT:
1142 /* We need the replacement buffer before transmit. */
1143 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1144 qede_recycle_rx_bd_ring(rxq, 1);
1145
1146 trace_xdp_exception(edev->ndev, prog, act);
1147 break;
1148 }
1149
1150 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
1151 rxq->data_direction);
1152
1153 if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
1154 DP_NOTICE(edev, "Failed to redirect the packet\n");
1155 else
1156 fp->xdp_xmit |= QEDE_XDP_REDIRECT;
1157
1158 qede_rx_bd_ring_consume(rxq);
1159 break;
1160 default:
1161 bpf_warn_invalid_xdp_action(act);
1162 fallthrough;
1163 case XDP_ABORTED:
1164 trace_xdp_exception(edev->ndev, prog, act);
1165 fallthrough;
1166 case XDP_DROP:
1167 qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1168 }
1169
1170 return false;
1171 }
1172
qede_rx_build_jumbo(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sk_buff * skb,struct eth_fast_path_rx_reg_cqe * cqe,u16 first_bd_len)1173 static int qede_rx_build_jumbo(struct qede_dev *edev,
1174 struct qede_rx_queue *rxq,
1175 struct sk_buff *skb,
1176 struct eth_fast_path_rx_reg_cqe *cqe,
1177 u16 first_bd_len)
1178 {
1179 u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1180 struct sw_rx_data *bd;
1181 u16 bd_cons_idx;
1182 u8 num_frags;
1183
1184 pkt_len -= first_bd_len;
1185
1186 /* We've already used one BD for the SKB. Now take care of the rest */
1187 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1188 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1189 pkt_len;
1190
1191 if (unlikely(!cur_size)) {
1192 DP_ERR(edev,
1193 "Still got %d BDs for mapping jumbo, but length became 0\n",
1194 num_frags);
1195 goto out;
1196 }
1197
1198 /* We need a replacement buffer for each BD */
1199 if (unlikely(qede_alloc_rx_buffer(rxq, true)))
1200 goto out;
1201
1202 /* Now that we've allocated the replacement buffer,
1203 * we can safely consume the next BD and map it to the SKB.
1204 */
1205 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1206 bd = &rxq->sw_rx_ring[bd_cons_idx];
1207 qede_rx_bd_ring_consume(rxq);
1208
1209 dma_unmap_page(rxq->dev, bd->mapping,
1210 PAGE_SIZE, DMA_FROM_DEVICE);
1211
1212 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data,
1213 rxq->rx_headroom, cur_size, PAGE_SIZE);
1214
1215 pkt_len -= cur_size;
1216 }
1217
1218 if (unlikely(pkt_len))
1219 DP_ERR(edev,
1220 "Mapped all BDs of jumbo, but still have %d bytes\n",
1221 pkt_len);
1222
1223 out:
1224 return num_frags;
1225 }
1226
qede_rx_process_tpa_cqe(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,union eth_rx_cqe * cqe,enum eth_rx_cqe_type type)1227 static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1228 struct qede_fastpath *fp,
1229 struct qede_rx_queue *rxq,
1230 union eth_rx_cqe *cqe,
1231 enum eth_rx_cqe_type type)
1232 {
1233 switch (type) {
1234 case ETH_RX_CQE_TYPE_TPA_START:
1235 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1236 return 0;
1237 case ETH_RX_CQE_TYPE_TPA_CONT:
1238 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1239 return 0;
1240 case ETH_RX_CQE_TYPE_TPA_END:
1241 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1242 default:
1243 return 0;
1244 }
1245 }
1246
qede_rx_process_cqe(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq)1247 static int qede_rx_process_cqe(struct qede_dev *edev,
1248 struct qede_fastpath *fp,
1249 struct qede_rx_queue *rxq)
1250 {
1251 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1252 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1253 u16 len, pad, bd_cons_idx, parse_flag;
1254 enum eth_rx_cqe_type cqe_type;
1255 union eth_rx_cqe *cqe;
1256 struct sw_rx_data *bd;
1257 struct sk_buff *skb;
1258 __le16 flags;
1259 u8 csum_flag;
1260
1261 /* Get the CQE from the completion ring */
1262 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1263 cqe_type = cqe->fast_path_regular.type;
1264
1265 /* Process an unlikely slowpath event */
1266 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1267 struct eth_slow_path_rx_cqe *sp_cqe;
1268
1269 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1270 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1271 return 0;
1272 }
1273
1274 /* Handle TPA cqes */
1275 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1276 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1277
1278 /* Get the data from the SW ring; Consume it only after it's evident
1279 * we wouldn't recycle it.
1280 */
1281 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1282 bd = &rxq->sw_rx_ring[bd_cons_idx];
1283
1284 fp_cqe = &cqe->fast_path_regular;
1285 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1286 pad = fp_cqe->placement_offset + rxq->rx_headroom;
1287
1288 /* Run eBPF program if one is attached */
1289 if (xdp_prog)
1290 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
1291 &pad, &len))
1292 return 0;
1293
1294 /* If this is an error packet then drop it */
1295 flags = cqe->fast_path_regular.pars_flags.flags;
1296 parse_flag = le16_to_cpu(flags);
1297
1298 csum_flag = qede_check_csum(parse_flag);
1299 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1300 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
1301 rxq->rx_ip_frags++;
1302 else
1303 rxq->rx_hw_errors++;
1304 }
1305
1306 /* Basic validation passed; Need to prepare an SKB. This would also
1307 * guarantee to finally consume the first BD upon success.
1308 */
1309 skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
1310 if (!skb) {
1311 rxq->rx_alloc_errors++;
1312 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1313 return 0;
1314 }
1315
1316 /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
1317 * by a single cqe.
1318 */
1319 if (fp_cqe->bd_num > 1) {
1320 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1321 fp_cqe, len);
1322
1323 if (unlikely(unmapped_frags > 0)) {
1324 qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1325 dev_kfree_skb_any(skb);
1326 return 0;
1327 }
1328 }
1329
1330 /* The SKB contains all the data. Now prepare meta-magic */
1331 skb->protocol = eth_type_trans(skb, edev->ndev);
1332 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1333 qede_set_skb_csum(skb, csum_flag);
1334 skb_record_rx_queue(skb, rxq->rxq_id);
1335 qede_ptp_record_rx_ts(edev, cqe, skb);
1336
1337 /* SKB is prepared - pass it to stack */
1338 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1339
1340 return 1;
1341 }
1342
qede_rx_int(struct qede_fastpath * fp,int budget)1343 static int qede_rx_int(struct qede_fastpath *fp, int budget)
1344 {
1345 struct qede_rx_queue *rxq = fp->rxq;
1346 struct qede_dev *edev = fp->edev;
1347 int work_done = 0, rcv_pkts = 0;
1348 u16 hw_comp_cons, sw_comp_cons;
1349
1350 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1351 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1352
1353 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1354 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1355 * read before it is written by FW, then FW writes CQE and SB, and then
1356 * the CPU reads the hw_comp_cons, it will use an old CQE.
1357 */
1358 rmb();
1359
1360 /* Loop to complete all indicated BDs */
1361 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
1362 rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
1363 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1364 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1365 work_done++;
1366 }
1367
1368 rxq->rcv_pkts += rcv_pkts;
1369
1370 /* Allocate replacement buffers */
1371 while (rxq->num_rx_buffers - rxq->filled_buffers)
1372 if (qede_alloc_rx_buffer(rxq, false))
1373 break;
1374
1375 /* Update producers */
1376 qede_update_rx_prod(edev, rxq);
1377
1378 return work_done;
1379 }
1380
qede_poll_is_more_work(struct qede_fastpath * fp)1381 static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1382 {
1383 qed_sb_update_sb_idx(fp->sb_info);
1384
1385 /* *_has_*_work() reads the status block, thus we need to ensure that
1386 * status block indices have been actually read (qed_sb_update_sb_idx)
1387 * prior to this check (*_has_*_work) so that we won't write the
1388 * "newer" value of the status block to HW (if there was a DMA right
1389 * after qede_has_rx_work and if there is no rmb, the memory reading
1390 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
1391 * In this case there will never be another interrupt until there is
1392 * another update of the status block, while there is still unhandled
1393 * work.
1394 */
1395 rmb();
1396
1397 if (likely(fp->type & QEDE_FASTPATH_RX))
1398 if (qede_has_rx_work(fp->rxq))
1399 return true;
1400
1401 if (fp->type & QEDE_FASTPATH_XDP)
1402 if (qede_txq_has_work(fp->xdp_tx))
1403 return true;
1404
1405 if (likely(fp->type & QEDE_FASTPATH_TX)) {
1406 int cos;
1407
1408 for_each_cos_in_txq(fp->edev, cos) {
1409 if (qede_txq_has_work(&fp->txq[cos]))
1410 return true;
1411 }
1412 }
1413
1414 return false;
1415 }
1416
1417 /*********************
1418 * NDO & API related *
1419 *********************/
qede_poll(struct napi_struct * napi,int budget)1420 int qede_poll(struct napi_struct *napi, int budget)
1421 {
1422 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1423 napi);
1424 struct qede_dev *edev = fp->edev;
1425 int rx_work_done = 0;
1426 u16 xdp_prod;
1427
1428 fp->xdp_xmit = 0;
1429
1430 if (likely(fp->type & QEDE_FASTPATH_TX)) {
1431 int cos;
1432
1433 for_each_cos_in_txq(fp->edev, cos) {
1434 if (qede_txq_has_work(&fp->txq[cos]))
1435 qede_tx_int(edev, &fp->txq[cos]);
1436 }
1437 }
1438
1439 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1440 qede_xdp_tx_int(edev, fp->xdp_tx);
1441
1442 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1443 qede_has_rx_work(fp->rxq)) ?
1444 qede_rx_int(fp, budget) : 0;
1445 /* Handle case where we are called by netpoll with a budget of 0 */
1446 if (rx_work_done < budget || !budget) {
1447 if (!qede_poll_is_more_work(fp)) {
1448 napi_complete_done(napi, rx_work_done);
1449
1450 /* Update and reenable interrupts */
1451 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1452 } else {
1453 rx_work_done = budget;
1454 }
1455 }
1456
1457 if (fp->xdp_xmit & QEDE_XDP_TX) {
1458 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1459
1460 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1461 qede_update_tx_producer(fp->xdp_tx);
1462 }
1463
1464 if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
1465 xdp_do_flush_map();
1466
1467 return rx_work_done;
1468 }
1469
qede_msix_fp_int(int irq,void * fp_cookie)1470 irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1471 {
1472 struct qede_fastpath *fp = fp_cookie;
1473
1474 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1475
1476 napi_schedule_irqoff(&fp->napi);
1477 return IRQ_HANDLED;
1478 }
1479
1480 /* Main transmit function */
qede_start_xmit(struct sk_buff * skb,struct net_device * ndev)1481 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1482 {
1483 struct qede_dev *edev = netdev_priv(ndev);
1484 struct netdev_queue *netdev_txq;
1485 struct qede_tx_queue *txq;
1486 struct eth_tx_1st_bd *first_bd;
1487 struct eth_tx_2nd_bd *second_bd = NULL;
1488 struct eth_tx_3rd_bd *third_bd = NULL;
1489 struct eth_tx_bd *tx_data_bd = NULL;
1490 u16 txq_index, val = 0;
1491 u8 nbd = 0;
1492 dma_addr_t mapping;
1493 int rc, frag_idx = 0, ipv6_ext = 0;
1494 u8 xmit_type;
1495 u16 idx;
1496 u16 hlen;
1497 bool data_split = false;
1498
1499 /* Get tx-queue context and netdev index */
1500 txq_index = skb_get_queue_mapping(skb);
1501 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
1502 txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
1503 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1504
1505 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1506
1507 xmit_type = qede_xmit_type(skb, &ipv6_ext);
1508
1509 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1510 if (qede_pkt_req_lin(skb, xmit_type)) {
1511 if (skb_linearize(skb)) {
1512 txq->tx_mem_alloc_err++;
1513
1514 dev_kfree_skb_any(skb);
1515 return NETDEV_TX_OK;
1516 }
1517 }
1518 #endif
1519
1520 /* Fill the entry in the SW ring and the BDs in the FW ring */
1521 idx = txq->sw_tx_prod;
1522 txq->sw_tx_ring.skbs[idx].skb = skb;
1523 first_bd = (struct eth_tx_1st_bd *)
1524 qed_chain_produce(&txq->tx_pbl);
1525 memset(first_bd, 0, sizeof(*first_bd));
1526 first_bd->data.bd_flags.bitfields =
1527 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1528
1529 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1530 qede_ptp_tx_ts(edev, skb);
1531
1532 /* Map skb linear data for DMA and set in the first BD */
1533 mapping = dma_map_single(txq->dev, skb->data,
1534 skb_headlen(skb), DMA_TO_DEVICE);
1535 if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1536 DP_NOTICE(edev, "SKB mapping failed\n");
1537 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1538 qede_update_tx_producer(txq);
1539 return NETDEV_TX_OK;
1540 }
1541 nbd++;
1542 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1543
1544 /* In case there is IPv6 with extension headers or LSO we need 2nd and
1545 * 3rd BDs.
1546 */
1547 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1548 second_bd = (struct eth_tx_2nd_bd *)
1549 qed_chain_produce(&txq->tx_pbl);
1550 memset(second_bd, 0, sizeof(*second_bd));
1551
1552 nbd++;
1553 third_bd = (struct eth_tx_3rd_bd *)
1554 qed_chain_produce(&txq->tx_pbl);
1555 memset(third_bd, 0, sizeof(*third_bd));
1556
1557 nbd++;
1558 /* We need to fill in additional data in second_bd... */
1559 tx_data_bd = (struct eth_tx_bd *)second_bd;
1560 }
1561
1562 if (skb_vlan_tag_present(skb)) {
1563 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1564 first_bd->data.bd_flags.bitfields |=
1565 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1566 }
1567
1568 /* Fill the parsing flags & params according to the requested offload */
1569 if (xmit_type & XMIT_L4_CSUM) {
1570 /* We don't re-calculate IP checksum as it is already done by
1571 * the upper stack
1572 */
1573 first_bd->data.bd_flags.bitfields |=
1574 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1575
1576 if (xmit_type & XMIT_ENC) {
1577 first_bd->data.bd_flags.bitfields |=
1578 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1579
1580 val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1581 }
1582
1583 /* Legacy FW had flipped behavior in regard to this bit -
1584 * I.e., needed to set to prevent FW from touching encapsulated
1585 * packets when it didn't need to.
1586 */
1587 if (unlikely(txq->is_legacy))
1588 val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1589
1590 /* If the packet is IPv6 with extension header, indicate that
1591 * to FW and pass few params, since the device cracker doesn't
1592 * support parsing IPv6 with extension header/s.
1593 */
1594 if (unlikely(ipv6_ext))
1595 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1596 }
1597
1598 if (xmit_type & XMIT_LSO) {
1599 first_bd->data.bd_flags.bitfields |=
1600 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1601 third_bd->data.lso_mss =
1602 cpu_to_le16(skb_shinfo(skb)->gso_size);
1603
1604 if (unlikely(xmit_type & XMIT_ENC)) {
1605 first_bd->data.bd_flags.bitfields |=
1606 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1607
1608 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1609 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1610
1611 first_bd->data.bd_flags.bitfields |= 1 << tmp;
1612 }
1613 hlen = qede_get_skb_hlen(skb, true);
1614 } else {
1615 first_bd->data.bd_flags.bitfields |=
1616 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1617 hlen = qede_get_skb_hlen(skb, false);
1618 }
1619
1620 /* @@@TBD - if will not be removed need to check */
1621 third_bd->data.bitfields |=
1622 cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1623
1624 /* Make life easier for FW guys who can't deal with header and
1625 * data on same BD. If we need to split, use the second bd...
1626 */
1627 if (unlikely(skb_headlen(skb) > hlen)) {
1628 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1629 "TSO split header size is %d (%x:%x)\n",
1630 first_bd->nbytes, first_bd->addr.hi,
1631 first_bd->addr.lo);
1632
1633 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1634 le32_to_cpu(first_bd->addr.lo)) +
1635 hlen;
1636
1637 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1638 le16_to_cpu(first_bd->nbytes) -
1639 hlen);
1640
1641 /* this marks the BD as one that has no
1642 * individual mapping
1643 */
1644 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1645
1646 first_bd->nbytes = cpu_to_le16(hlen);
1647
1648 tx_data_bd = (struct eth_tx_bd *)third_bd;
1649 data_split = true;
1650 }
1651 } else {
1652 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1653 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
1654 }
1655
1656 first_bd->data.bitfields = cpu_to_le16(val);
1657
1658 /* Handle fragmented skb */
1659 /* special handle for frags inside 2nd and 3rd bds.. */
1660 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1661 rc = map_frag_to_bd(txq,
1662 &skb_shinfo(skb)->frags[frag_idx],
1663 tx_data_bd);
1664 if (rc) {
1665 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1666 qede_update_tx_producer(txq);
1667 return NETDEV_TX_OK;
1668 }
1669
1670 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1671 tx_data_bd = (struct eth_tx_bd *)third_bd;
1672 else
1673 tx_data_bd = NULL;
1674
1675 frag_idx++;
1676 }
1677
1678 /* map last frags into 4th, 5th .... */
1679 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1680 tx_data_bd = (struct eth_tx_bd *)
1681 qed_chain_produce(&txq->tx_pbl);
1682
1683 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1684
1685 rc = map_frag_to_bd(txq,
1686 &skb_shinfo(skb)->frags[frag_idx],
1687 tx_data_bd);
1688 if (rc) {
1689 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1690 qede_update_tx_producer(txq);
1691 return NETDEV_TX_OK;
1692 }
1693 }
1694
1695 /* update the first BD with the actual num BDs */
1696 first_bd->data.nbds = nbd;
1697
1698 netdev_tx_sent_queue(netdev_txq, skb->len);
1699
1700 skb_tx_timestamp(skb);
1701
1702 /* Advance packet producer only before sending the packet since mapping
1703 * of pages may fail.
1704 */
1705 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1706
1707 /* 'next page' entries are counted in the producer value */
1708 txq->tx_db.data.bd_prod =
1709 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1710
1711 if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
1712 qede_update_tx_producer(txq);
1713
1714 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1715 < (MAX_SKB_FRAGS + 1))) {
1716 if (netdev_xmit_more())
1717 qede_update_tx_producer(txq);
1718
1719 netif_tx_stop_queue(netdev_txq);
1720 txq->stopped_cnt++;
1721 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1722 "Stop queue was called\n");
1723 /* paired memory barrier is in qede_tx_int(), we have to keep
1724 * ordering of set_bit() in netif_tx_stop_queue() and read of
1725 * fp->bd_tx_cons
1726 */
1727 smp_mb();
1728
1729 if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1730 (MAX_SKB_FRAGS + 1)) &&
1731 (edev->state == QEDE_STATE_OPEN)) {
1732 netif_tx_wake_queue(netdev_txq);
1733 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1734 "Wake queue was called\n");
1735 }
1736 }
1737
1738 return NETDEV_TX_OK;
1739 }
1740
qede_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)1741 u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
1742 struct net_device *sb_dev)
1743 {
1744 struct qede_dev *edev = netdev_priv(dev);
1745 int total_txq;
1746
1747 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
1748
1749 return QEDE_TSS_COUNT(edev) ?
1750 netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
1751 }
1752
1753 /* 8B udp header + 8B base tunnel header + 32B option length */
1754 #define QEDE_MAX_TUN_HDR_LEN 48
1755
qede_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)1756 netdev_features_t qede_features_check(struct sk_buff *skb,
1757 struct net_device *dev,
1758 netdev_features_t features)
1759 {
1760 if (skb->encapsulation) {
1761 u8 l4_proto = 0;
1762
1763 switch (vlan_get_protocol(skb)) {
1764 case htons(ETH_P_IP):
1765 l4_proto = ip_hdr(skb)->protocol;
1766 break;
1767 case htons(ETH_P_IPV6):
1768 l4_proto = ipv6_hdr(skb)->nexthdr;
1769 break;
1770 default:
1771 return features;
1772 }
1773
1774 /* Disable offloads for geneve tunnels, as HW can't parse
1775 * the geneve header which has option length greater than 32b
1776 * and disable offloads for the ports which are not offloaded.
1777 */
1778 if (l4_proto == IPPROTO_UDP) {
1779 struct qede_dev *edev = netdev_priv(dev);
1780 u16 hdrlen, vxln_port, gnv_port;
1781
1782 hdrlen = QEDE_MAX_TUN_HDR_LEN;
1783 vxln_port = edev->vxlan_dst_port;
1784 gnv_port = edev->geneve_dst_port;
1785
1786 if ((skb_inner_mac_header(skb) -
1787 skb_transport_header(skb)) > hdrlen ||
1788 (ntohs(udp_hdr(skb)->dest) != vxln_port &&
1789 ntohs(udp_hdr(skb)->dest) != gnv_port))
1790 return features & ~(NETIF_F_CSUM_MASK |
1791 NETIF_F_GSO_MASK);
1792 } else if (l4_proto == IPPROTO_IPIP) {
1793 /* IPIP tunnels are unknown to the device or at least unsupported natively,
1794 * offloads for them can't be done trivially, so disable them for such skb.
1795 */
1796 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1797 }
1798 }
1799
1800 return features;
1801 }
1802