1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #include "gve.h"
8 #include "gve_adminq.h"
9 #include "gve_utils.h"
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/vmalloc.h>
13 #include <linux/skbuff.h>
14 #include <net/xdp_sock_drv.h>
15
gve_tx_put_doorbell(struct gve_priv * priv,struct gve_queue_resources * q_resources,u32 val)16 static inline void gve_tx_put_doorbell(struct gve_priv *priv,
17 struct gve_queue_resources *q_resources,
18 u32 val)
19 {
20 iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
21 }
22
gve_xdp_tx_flush(struct gve_priv * priv,u32 xdp_qid)23 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid)
24 {
25 u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
26 struct gve_tx_ring *tx = &priv->tx[tx_qid];
27
28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
29 }
30
31 /* gvnic can only transmit from a Registered Segment.
32 * We copy skb payloads into the registered segment before writing Tx
33 * descriptors and ringing the Tx doorbell.
34 *
35 * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
36 * free allocations in the order they were allocated.
37 */
38
gve_tx_fifo_init(struct gve_priv * priv,struct gve_tx_fifo * fifo)39 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
40 {
41 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
42 PAGE_KERNEL);
43 if (unlikely(!fifo->base)) {
44 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
45 fifo->qpl->id);
46 return -ENOMEM;
47 }
48
49 fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
50 atomic_set(&fifo->available, fifo->size);
51 fifo->head = 0;
52 return 0;
53 }
54
gve_tx_fifo_release(struct gve_priv * priv,struct gve_tx_fifo * fifo)55 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
56 {
57 WARN(atomic_read(&fifo->available) != fifo->size,
58 "Releasing non-empty fifo");
59
60 vunmap(fifo->base);
61 }
62
gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo * fifo,size_t bytes)63 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
64 size_t bytes)
65 {
66 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
67 }
68
gve_tx_fifo_can_alloc(struct gve_tx_fifo * fifo,size_t bytes)69 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
70 {
71 return (atomic_read(&fifo->available) <= bytes) ? false : true;
72 }
73
74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
75 * @fifo: FIFO to allocate from
76 * @bytes: Allocation size
77 * @iov: Scatter-gather elements to fill with allocation fragment base/len
78 *
79 * Returns number of valid elements in iov[] or negative on error.
80 *
81 * Allocations from a given FIFO must be externally synchronized but concurrent
82 * allocation and frees are allowed.
83 */
gve_tx_alloc_fifo(struct gve_tx_fifo * fifo,size_t bytes,struct gve_tx_iovec iov[2])84 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
85 struct gve_tx_iovec iov[2])
86 {
87 size_t overflow, padding;
88 u32 aligned_head;
89 int nfrags = 0;
90
91 if (!bytes)
92 return 0;
93
94 /* This check happens before we know how much padding is needed to
95 * align to a cacheline boundary for the payload, but that is fine,
96 * because the FIFO head always start aligned, and the FIFO's boundaries
97 * are aligned, so if there is space for the data, there is space for
98 * the padding to the next alignment.
99 */
100 WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
101 "Reached %s when there's not enough space in the fifo", __func__);
102
103 nfrags++;
104
105 iov[0].iov_offset = fifo->head;
106 iov[0].iov_len = bytes;
107 fifo->head += bytes;
108
109 if (fifo->head > fifo->size) {
110 /* If the allocation did not fit in the tail fragment of the
111 * FIFO, also use the head fragment.
112 */
113 nfrags++;
114 overflow = fifo->head - fifo->size;
115 iov[0].iov_len -= overflow;
116 iov[1].iov_offset = 0; /* Start of fifo*/
117 iov[1].iov_len = overflow;
118
119 fifo->head = overflow;
120 }
121
122 /* Re-align to a cacheline boundary */
123 aligned_head = L1_CACHE_ALIGN(fifo->head);
124 padding = aligned_head - fifo->head;
125 iov[nfrags - 1].iov_padding = padding;
126 atomic_sub(bytes + padding, &fifo->available);
127 fifo->head = aligned_head;
128
129 if (fifo->head == fifo->size)
130 fifo->head = 0;
131
132 return nfrags;
133 }
134
135 /* gve_tx_free_fifo - Return space to Tx FIFO
136 * @fifo: FIFO to return fragments to
137 * @bytes: Bytes to free
138 */
gve_tx_free_fifo(struct gve_tx_fifo * fifo,size_t bytes)139 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
140 {
141 atomic_add(bytes, &fifo->available);
142 }
143
gve_tx_clear_buffer_state(struct gve_tx_buffer_state * info)144 static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info)
145 {
146 size_t space_freed = 0;
147 int i;
148
149 for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
150 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
151 info->iov[i].iov_len = 0;
152 info->iov[i].iov_padding = 0;
153 }
154 return space_freed;
155 }
156
gve_clean_xdp_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do)157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
158 u32 to_do)
159 {
160 struct gve_tx_buffer_state *info;
161 u32 clean_end = tx->done + to_do;
162 u64 pkts = 0, bytes = 0;
163 size_t space_freed = 0;
164 u32 xsk_complete = 0;
165 u32 idx;
166
167 for (; tx->done < clean_end; tx->done++) {
168 idx = tx->done & tx->mask;
169 info = &tx->info[idx];
170
171 if (unlikely(!info->xdp.size))
172 continue;
173
174 bytes += info->xdp.size;
175 pkts++;
176 xsk_complete += info->xdp.is_xsk;
177
178 info->xdp.size = 0;
179 if (info->xdp_frame) {
180 xdp_return_frame(info->xdp_frame);
181 info->xdp_frame = NULL;
182 }
183 space_freed += gve_tx_clear_buffer_state(info);
184 }
185
186 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
187 if (xsk_complete > 0 && tx->xsk_pool)
188 xsk_tx_completed(tx->xsk_pool, xsk_complete);
189 u64_stats_update_begin(&tx->statss);
190 tx->bytes_done += bytes;
191 tx->pkt_done += pkts;
192 u64_stats_update_end(&tx->statss);
193 return pkts;
194 }
195
196 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
197 u32 to_do, bool try_to_wake);
198
gve_tx_stop_ring_gqi(struct gve_priv * priv,int idx)199 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
200 {
201 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
202 struct gve_tx_ring *tx = &priv->tx[idx];
203
204 if (!gve_tx_was_added_to_block(priv, idx))
205 return;
206
207 gve_remove_napi(priv, ntfy_idx);
208 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
209 netdev_tx_reset_queue(tx->netdev_txq);
210 gve_tx_remove_from_block(priv, idx);
211 }
212
gve_tx_free_ring_gqi(struct gve_priv * priv,struct gve_tx_ring * tx,struct gve_tx_alloc_rings_cfg * cfg)213 static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
214 struct gve_tx_alloc_rings_cfg *cfg)
215 {
216 struct device *hdev = &priv->pdev->dev;
217 int idx = tx->q_num;
218 size_t bytes;
219 u32 qpl_id;
220 u32 slots;
221
222 slots = tx->mask + 1;
223 dma_free_coherent(hdev, sizeof(*tx->q_resources),
224 tx->q_resources, tx->q_resources_bus);
225 tx->q_resources = NULL;
226
227 if (tx->tx_fifo.qpl) {
228 if (tx->tx_fifo.base)
229 gve_tx_fifo_release(priv, &tx->tx_fifo);
230
231 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
232 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
233 tx->tx_fifo.qpl = NULL;
234 }
235
236 bytes = sizeof(*tx->desc) * slots;
237 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
238 tx->desc = NULL;
239
240 vfree(tx->info);
241 tx->info = NULL;
242
243 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
244 }
245
gve_tx_start_ring_gqi(struct gve_priv * priv,int idx)246 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
247 {
248 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
249 struct gve_tx_ring *tx = &priv->tx[idx];
250
251 gve_tx_add_to_block(priv, idx);
252
253 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
254 gve_add_napi(priv, ntfy_idx, gve_napi_poll);
255 }
256
gve_tx_alloc_ring_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg,struct gve_tx_ring * tx,int idx)257 static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
258 struct gve_tx_alloc_rings_cfg *cfg,
259 struct gve_tx_ring *tx,
260 int idx)
261 {
262 struct device *hdev = &priv->pdev->dev;
263 int qpl_page_cnt;
264 u32 qpl_id = 0;
265 size_t bytes;
266
267 /* Make sure everything is zeroed to start */
268 memset(tx, 0, sizeof(*tx));
269 spin_lock_init(&tx->clean_lock);
270 spin_lock_init(&tx->xdp_lock);
271 tx->q_num = idx;
272
273 tx->mask = cfg->ring_size - 1;
274
275 /* alloc metadata */
276 tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
277 if (!tx->info)
278 return -ENOMEM;
279
280 /* alloc tx queue */
281 bytes = sizeof(*tx->desc) * cfg->ring_size;
282 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
283 if (!tx->desc)
284 goto abort_with_info;
285
286 tx->raw_addressing = cfg->raw_addressing;
287 tx->dev = hdev;
288 if (!tx->raw_addressing) {
289 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
290 qpl_page_cnt = priv->tx_pages_per_qpl;
291
292 tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
293 qpl_page_cnt);
294 if (!tx->tx_fifo.qpl)
295 goto abort_with_desc;
296
297 /* map Tx FIFO */
298 if (gve_tx_fifo_init(priv, &tx->tx_fifo))
299 goto abort_with_qpl;
300 }
301
302 tx->q_resources =
303 dma_alloc_coherent(hdev,
304 sizeof(*tx->q_resources),
305 &tx->q_resources_bus,
306 GFP_KERNEL);
307 if (!tx->q_resources)
308 goto abort_with_fifo;
309
310 return 0;
311
312 abort_with_fifo:
313 if (!tx->raw_addressing)
314 gve_tx_fifo_release(priv, &tx->tx_fifo);
315 abort_with_qpl:
316 if (!tx->raw_addressing) {
317 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
318 tx->tx_fifo.qpl = NULL;
319 }
320 abort_with_desc:
321 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
322 tx->desc = NULL;
323 abort_with_info:
324 vfree(tx->info);
325 tx->info = NULL;
326 return -ENOMEM;
327 }
328
gve_tx_alloc_rings_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)329 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
330 struct gve_tx_alloc_rings_cfg *cfg)
331 {
332 struct gve_tx_ring *tx = cfg->tx;
333 int err = 0;
334 int i, j;
335
336 if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
337 netif_err(priv, drv, priv->dev,
338 "Cannot alloc more than the max num of Tx rings\n");
339 return -EINVAL;
340 }
341
342 if (cfg->start_idx == 0) {
343 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
344 GFP_KERNEL);
345 if (!tx)
346 return -ENOMEM;
347 } else if (!tx) {
348 netif_err(priv, drv, priv->dev,
349 "Cannot alloc tx rings from a nonzero start idx without tx array\n");
350 return -EINVAL;
351 }
352
353 for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
354 err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
355 if (err) {
356 netif_err(priv, drv, priv->dev,
357 "Failed to alloc tx ring=%d: err=%d\n",
358 i, err);
359 goto cleanup;
360 }
361 }
362
363 cfg->tx = tx;
364 return 0;
365
366 cleanup:
367 for (j = 0; j < i; j++)
368 gve_tx_free_ring_gqi(priv, &tx[j], cfg);
369 if (cfg->start_idx == 0)
370 kvfree(tx);
371 return err;
372 }
373
gve_tx_free_rings_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)374 void gve_tx_free_rings_gqi(struct gve_priv *priv,
375 struct gve_tx_alloc_rings_cfg *cfg)
376 {
377 struct gve_tx_ring *tx = cfg->tx;
378 int i;
379
380 if (!tx)
381 return;
382
383 for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
384 gve_tx_free_ring_gqi(priv, &tx[i], cfg);
385
386 if (cfg->start_idx == 0) {
387 kvfree(tx);
388 cfg->tx = NULL;
389 }
390 }
391
392 /* gve_tx_avail - Calculates the number of slots available in the ring
393 * @tx: tx ring to check
394 *
395 * Returns the number of slots available
396 *
397 * The capacity of the queue is mask + 1. We don't need to reserve an entry.
398 **/
gve_tx_avail(struct gve_tx_ring * tx)399 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
400 {
401 return tx->mask + 1 - (tx->req - tx->done);
402 }
403
gve_skb_fifo_bytes_required(struct gve_tx_ring * tx,struct sk_buff * skb)404 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
405 struct sk_buff *skb)
406 {
407 int pad_bytes, align_hdr_pad;
408 int bytes;
409 int hlen;
410
411 hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
412 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
413
414 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
415 hlen);
416 /* We need to take into account the header alignment padding. */
417 align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
418 bytes = align_hdr_pad + pad_bytes + skb->len;
419
420 return bytes;
421 }
422
423 /* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
424 * 1 for each skb frag
425 * 1 for the skb linear portion
426 * 1 for when tcp hdr needs to be in separate descriptor
427 * 1 if the payload wraps to the beginning of the FIFO
428 * 1 for metadata descriptor
429 */
430 #define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
gve_tx_unmap_buf(struct device * dev,struct gve_tx_buffer_state * info)431 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
432 {
433 if (info->skb) {
434 dma_unmap_single(dev, dma_unmap_addr(info, dma),
435 dma_unmap_len(info, len),
436 DMA_TO_DEVICE);
437 dma_unmap_len_set(info, len, 0);
438 } else {
439 dma_unmap_page(dev, dma_unmap_addr(info, dma),
440 dma_unmap_len(info, len),
441 DMA_TO_DEVICE);
442 dma_unmap_len_set(info, len, 0);
443 }
444 }
445
446 /* Check if sufficient resources (descriptor ring space, FIFO space) are
447 * available to transmit the given number of bytes.
448 */
gve_can_tx(struct gve_tx_ring * tx,int bytes_required)449 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
450 {
451 bool can_alloc = true;
452
453 if (!tx->raw_addressing)
454 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
455
456 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
457 }
458
459 static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
460
461 /* Stops the queue if the skb cannot be transmitted. */
gve_maybe_stop_tx(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)462 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
463 struct sk_buff *skb)
464 {
465 int bytes_required = 0;
466 u32 nic_done;
467 u32 to_do;
468 int ret;
469
470 if (!tx->raw_addressing)
471 bytes_required = gve_skb_fifo_bytes_required(tx, skb);
472
473 if (likely(gve_can_tx(tx, bytes_required)))
474 return 0;
475
476 ret = -EBUSY;
477 spin_lock(&tx->clean_lock);
478 nic_done = gve_tx_load_event_counter(priv, tx);
479 to_do = nic_done - tx->done;
480
481 /* Only try to clean if there is hope for TX */
482 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
483 if (to_do > 0) {
484 to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
485 gve_clean_tx_done(priv, tx, to_do, false);
486 }
487 if (likely(gve_can_tx(tx, bytes_required)))
488 ret = 0;
489 }
490 if (ret) {
491 /* No space, so stop the queue */
492 tx->stop_queue++;
493 netif_tx_stop_queue(tx->netdev_txq);
494 }
495 spin_unlock(&tx->clean_lock);
496
497 return ret;
498 }
499
gve_tx_fill_pkt_desc(union gve_tx_desc * pkt_desc,u16 csum_offset,u8 ip_summed,bool is_gso,int l4_hdr_offset,u32 desc_cnt,u16 hlen,u64 addr,u16 pkt_len)500 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
501 u16 csum_offset, u8 ip_summed, bool is_gso,
502 int l4_hdr_offset, u32 desc_cnt,
503 u16 hlen, u64 addr, u16 pkt_len)
504 {
505 /* l4_hdr_offset and csum_offset are in units of 16-bit words */
506 if (is_gso) {
507 pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
508 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
509 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
510 } else if (likely(ip_summed == CHECKSUM_PARTIAL)) {
511 pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
512 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
513 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
514 } else {
515 pkt_desc->pkt.type_flags = GVE_TXD_STD;
516 pkt_desc->pkt.l4_csum_offset = 0;
517 pkt_desc->pkt.l4_hdr_offset = 0;
518 }
519 pkt_desc->pkt.desc_cnt = desc_cnt;
520 pkt_desc->pkt.len = cpu_to_be16(pkt_len);
521 pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
522 pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
523 }
524
gve_tx_fill_mtd_desc(union gve_tx_desc * mtd_desc,struct sk_buff * skb)525 static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
526 struct sk_buff *skb)
527 {
528 BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
529
530 mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
531 mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
532 GVE_MTD_PATH_HASH_L4;
533 mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
534 mtd_desc->mtd.reserved0 = 0;
535 mtd_desc->mtd.reserved1 = 0;
536 }
537
gve_tx_fill_seg_desc(union gve_tx_desc * seg_desc,u16 l3_offset,u16 gso_size,bool is_gso_v6,bool is_gso,u16 len,u64 addr)538 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
539 u16 l3_offset, u16 gso_size,
540 bool is_gso_v6, bool is_gso,
541 u16 len, u64 addr)
542 {
543 seg_desc->seg.type_flags = GVE_TXD_SEG;
544 if (is_gso) {
545 if (is_gso_v6)
546 seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
547 seg_desc->seg.l3_offset = l3_offset >> 1;
548 seg_desc->seg.mss = cpu_to_be16(gso_size);
549 }
550 seg_desc->seg.seg_len = cpu_to_be16(len);
551 seg_desc->seg.seg_addr = cpu_to_be64(addr);
552 }
553
gve_dma_sync_for_device(struct device * dev,dma_addr_t * page_buses,u64 iov_offset,u64 iov_len)554 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
555 u64 iov_offset, u64 iov_len)
556 {
557 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
558 u64 first_page = iov_offset / PAGE_SIZE;
559 u64 page;
560
561 for (page = first_page; page <= last_page; page++)
562 dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
563 }
564
gve_tx_add_skb_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)565 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
566 {
567 int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
568 union gve_tx_desc *pkt_desc, *seg_desc;
569 struct gve_tx_buffer_state *info;
570 int mtd_desc_nr = !!skb->l4_hash;
571 bool is_gso = skb_is_gso(skb);
572 u32 idx = tx->req & tx->mask;
573 int payload_iov = 2;
574 int copy_offset;
575 u32 next_idx;
576 int i;
577
578 info = &tx->info[idx];
579 pkt_desc = &tx->desc[idx];
580
581 l4_hdr_offset = skb_checksum_start_offset(skb);
582 /* If the skb is gso, then we want the tcp header alone in the first segment
583 * otherwise we want the minimum required by the gVNIC spec.
584 */
585 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
586 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
587
588 info->skb = skb;
589 /* We don't want to split the header, so if necessary, pad to the end
590 * of the fifo and then put the header at the beginning of the fifo.
591 */
592 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
593 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
594 &info->iov[0]);
595 WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
596 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
597 &info->iov[payload_iov]);
598
599 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
600 is_gso, l4_hdr_offset,
601 1 + mtd_desc_nr + payload_nfrags, hlen,
602 info->iov[hdr_nfrags - 1].iov_offset, skb->len);
603
604 skb_copy_bits(skb, 0,
605 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
606 hlen);
607 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
608 info->iov[hdr_nfrags - 1].iov_offset,
609 info->iov[hdr_nfrags - 1].iov_len);
610 copy_offset = hlen;
611
612 if (mtd_desc_nr) {
613 next_idx = (tx->req + 1) & tx->mask;
614 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
615 }
616
617 for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
618 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
619 seg_desc = &tx->desc[next_idx];
620
621 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
622 skb_shinfo(skb)->gso_size,
623 skb_is_gso_v6(skb), is_gso,
624 info->iov[i].iov_len,
625 info->iov[i].iov_offset);
626
627 skb_copy_bits(skb, copy_offset,
628 tx->tx_fifo.base + info->iov[i].iov_offset,
629 info->iov[i].iov_len);
630 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
631 info->iov[i].iov_offset,
632 info->iov[i].iov_len);
633 copy_offset += info->iov[i].iov_len;
634 }
635
636 return 1 + mtd_desc_nr + payload_nfrags;
637 }
638
gve_tx_add_skb_no_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)639 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
640 struct sk_buff *skb)
641 {
642 const struct skb_shared_info *shinfo = skb_shinfo(skb);
643 int hlen, num_descriptors, l4_hdr_offset;
644 union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
645 struct gve_tx_buffer_state *info;
646 int mtd_desc_nr = !!skb->l4_hash;
647 bool is_gso = skb_is_gso(skb);
648 u32 idx = tx->req & tx->mask;
649 u64 addr;
650 u32 len;
651 int i;
652
653 info = &tx->info[idx];
654 pkt_desc = &tx->desc[idx];
655
656 l4_hdr_offset = skb_checksum_start_offset(skb);
657 /* If the skb is gso, then we want only up to the tcp header in the first segment
658 * to efficiently replicate on each segment otherwise we want the linear portion
659 * of the skb (which will contain the checksum because skb->csum_start and
660 * skb->csum_offset are given relative to skb->head) in the first segment.
661 */
662 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
663 len = skb_headlen(skb);
664
665 info->skb = skb;
666
667 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
668 if (unlikely(dma_mapping_error(tx->dev, addr))) {
669 tx->dma_mapping_error++;
670 goto drop;
671 }
672 dma_unmap_len_set(info, len, len);
673 dma_unmap_addr_set(info, dma, addr);
674
675 num_descriptors = 1 + shinfo->nr_frags;
676 if (hlen < len)
677 num_descriptors++;
678 if (mtd_desc_nr)
679 num_descriptors++;
680
681 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
682 is_gso, l4_hdr_offset,
683 num_descriptors, hlen, addr, skb->len);
684
685 if (mtd_desc_nr) {
686 idx = (idx + 1) & tx->mask;
687 mtd_desc = &tx->desc[idx];
688 gve_tx_fill_mtd_desc(mtd_desc, skb);
689 }
690
691 if (hlen < len) {
692 /* For gso the rest of the linear portion of the skb needs to
693 * be in its own descriptor.
694 */
695 len -= hlen;
696 addr += hlen;
697 idx = (idx + 1) & tx->mask;
698 seg_desc = &tx->desc[idx];
699 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
700 skb_shinfo(skb)->gso_size,
701 skb_is_gso_v6(skb), is_gso, len, addr);
702 }
703
704 for (i = 0; i < shinfo->nr_frags; i++) {
705 const skb_frag_t *frag = &shinfo->frags[i];
706
707 idx = (idx + 1) & tx->mask;
708 seg_desc = &tx->desc[idx];
709 len = skb_frag_size(frag);
710 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
711 if (unlikely(dma_mapping_error(tx->dev, addr))) {
712 tx->dma_mapping_error++;
713 goto unmap_drop;
714 }
715 tx->info[idx].skb = NULL;
716 dma_unmap_len_set(&tx->info[idx], len, len);
717 dma_unmap_addr_set(&tx->info[idx], dma, addr);
718
719 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
720 skb_shinfo(skb)->gso_size,
721 skb_is_gso_v6(skb), is_gso, len, addr);
722 }
723
724 return num_descriptors;
725
726 unmap_drop:
727 i += num_descriptors - shinfo->nr_frags;
728 while (i--) {
729 /* Skip metadata descriptor, if set */
730 if (i == 1 && mtd_desc_nr == 1)
731 continue;
732 idx--;
733 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
734 }
735 drop:
736 tx->dropped_pkt++;
737 return 0;
738 }
739
gve_tx(struct sk_buff * skb,struct net_device * dev)740 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
741 {
742 struct gve_priv *priv = netdev_priv(dev);
743 struct gve_tx_ring *tx;
744 int nsegs;
745
746 WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
747 "skb queue index out of range");
748 tx = &priv->tx[skb_get_queue_mapping(skb)];
749 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
750 /* We need to ring the txq doorbell -- we have stopped the Tx
751 * queue for want of resources, but prior calls to gve_tx()
752 * may have added descriptors without ringing the doorbell.
753 */
754
755 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
756 return NETDEV_TX_BUSY;
757 }
758 if (tx->raw_addressing)
759 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
760 else
761 nsegs = gve_tx_add_skb_copy(priv, tx, skb);
762
763 /* If the packet is getting sent, we need to update the skb */
764 if (nsegs) {
765 netdev_tx_sent_queue(tx->netdev_txq, skb->len);
766 skb_tx_timestamp(skb);
767 tx->req += nsegs;
768 } else {
769 dev_kfree_skb_any(skb);
770 }
771
772 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
773 return NETDEV_TX_OK;
774
775 /* Give packets to NIC. Even if this packet failed to send the doorbell
776 * might need to be rung because of xmit_more.
777 */
778 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
779 return NETDEV_TX_OK;
780 }
781
gve_tx_fill_xdp(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p,bool is_xsk)782 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
783 void *data, int len, void *frame_p, bool is_xsk)
784 {
785 int pad, nfrags, ndescs, iovi, offset;
786 struct gve_tx_buffer_state *info;
787 u32 reqi = tx->req;
788
789 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
790 if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES)
791 pad = 0;
792 info = &tx->info[reqi & tx->mask];
793 info->xdp_frame = frame_p;
794 info->xdp.size = len;
795 info->xdp.is_xsk = is_xsk;
796
797 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
798 &info->iov[0]);
799 iovi = pad > 0;
800 ndescs = nfrags - iovi;
801 offset = 0;
802
803 while (iovi < nfrags) {
804 if (!offset)
805 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0,
806 CHECKSUM_NONE, false, 0, ndescs,
807 info->iov[iovi].iov_len,
808 info->iov[iovi].iov_offset, len);
809 else
810 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask],
811 0, 0, false, false,
812 info->iov[iovi].iov_len,
813 info->iov[iovi].iov_offset);
814
815 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset,
816 data + offset, info->iov[iovi].iov_len);
817 gve_dma_sync_for_device(&priv->pdev->dev,
818 tx->tx_fifo.qpl->page_buses,
819 info->iov[iovi].iov_offset,
820 info->iov[iovi].iov_len);
821 offset += info->iov[iovi].iov_len;
822 iovi++;
823 reqi++;
824 }
825
826 return ndescs;
827 }
828
gve_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)829 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
830 u32 flags)
831 {
832 struct gve_priv *priv = netdev_priv(dev);
833 struct gve_tx_ring *tx;
834 int i, err = 0, qid;
835
836 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
837 return -EINVAL;
838
839 qid = gve_xdp_tx_queue_id(priv,
840 smp_processor_id() % priv->num_xdp_queues);
841
842 tx = &priv->tx[qid];
843
844 spin_lock(&tx->xdp_lock);
845 for (i = 0; i < n; i++) {
846 err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
847 frames[i]->len, frames[i]);
848 if (err)
849 break;
850 }
851
852 if (flags & XDP_XMIT_FLUSH)
853 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
854
855 spin_unlock(&tx->xdp_lock);
856
857 u64_stats_update_begin(&tx->statss);
858 tx->xdp_xmit += n;
859 tx->xdp_xmit_errors += n - i;
860 u64_stats_update_end(&tx->statss);
861
862 return i ? i : err;
863 }
864
gve_xdp_xmit_one(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p)865 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
866 void *data, int len, void *frame_p)
867 {
868 int nsegs;
869
870 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1))
871 return -EBUSY;
872
873 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
874 tx->req += nsegs;
875
876 return 0;
877 }
878
879 #define GVE_TX_START_THRESH 4096
880
gve_clean_tx_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do,bool try_to_wake)881 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
882 u32 to_do, bool try_to_wake)
883 {
884 struct gve_tx_buffer_state *info;
885 u64 pkts = 0, bytes = 0;
886 size_t space_freed = 0;
887 struct sk_buff *skb;
888 u32 idx;
889 int j;
890
891 for (j = 0; j < to_do; j++) {
892 idx = tx->done & tx->mask;
893 netif_info(priv, tx_done, priv->dev,
894 "[%d] %s: idx=%d (req=%u done=%u)\n",
895 tx->q_num, __func__, idx, tx->req, tx->done);
896 info = &tx->info[idx];
897 skb = info->skb;
898
899 /* Unmap the buffer */
900 if (tx->raw_addressing)
901 gve_tx_unmap_buf(tx->dev, info);
902 tx->done++;
903 /* Mark as free */
904 if (skb) {
905 info->skb = NULL;
906 bytes += skb->len;
907 pkts++;
908 dev_consume_skb_any(skb);
909 if (tx->raw_addressing)
910 continue;
911 space_freed += gve_tx_clear_buffer_state(info);
912 }
913 }
914
915 if (!tx->raw_addressing)
916 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
917 u64_stats_update_begin(&tx->statss);
918 tx->bytes_done += bytes;
919 tx->pkt_done += pkts;
920 u64_stats_update_end(&tx->statss);
921 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
922
923 /* start the queue if we've stopped it */
924 #ifndef CONFIG_BQL
925 /* Make sure that the doorbells are synced */
926 smp_mb();
927 #endif
928 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
929 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
930 tx->wake_queue++;
931 netif_tx_wake_queue(tx->netdev_txq);
932 }
933
934 return pkts;
935 }
936
gve_tx_load_event_counter(struct gve_priv * priv,struct gve_tx_ring * tx)937 u32 gve_tx_load_event_counter(struct gve_priv *priv,
938 struct gve_tx_ring *tx)
939 {
940 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
941 __be32 counter = READ_ONCE(priv->counter_array[counter_index]);
942
943 return be32_to_cpu(counter);
944 }
945
gve_xsk_tx(struct gve_priv * priv,struct gve_tx_ring * tx,int budget)946 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
947 int budget)
948 {
949 struct xdp_desc desc;
950 int sent = 0, nsegs;
951 void *data;
952
953 spin_lock(&tx->xdp_lock);
954 while (sent < budget) {
955 if (!gve_can_tx(tx, GVE_TX_START_THRESH))
956 goto out;
957
958 if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
959 tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
960 goto out;
961 }
962
963 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
964 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
965 tx->req += nsegs;
966 sent++;
967 }
968 out:
969 if (sent > 0) {
970 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
971 xsk_tx_release(tx->xsk_pool);
972 }
973 spin_unlock(&tx->xdp_lock);
974 return sent;
975 }
976
gve_xdp_poll(struct gve_notify_block * block,int budget)977 bool gve_xdp_poll(struct gve_notify_block *block, int budget)
978 {
979 struct gve_priv *priv = block->priv;
980 struct gve_tx_ring *tx = block->tx;
981 u32 nic_done;
982 bool repoll;
983 u32 to_do;
984
985 /* Find out how much work there is to be done */
986 nic_done = gve_tx_load_event_counter(priv, tx);
987 to_do = min_t(u32, (nic_done - tx->done), budget);
988 gve_clean_xdp_done(priv, tx, to_do);
989 repoll = nic_done != tx->done;
990
991 if (tx->xsk_pool) {
992 int sent = gve_xsk_tx(priv, tx, budget);
993
994 u64_stats_update_begin(&tx->statss);
995 tx->xdp_xsk_sent += sent;
996 u64_stats_update_end(&tx->statss);
997 repoll |= (sent == budget);
998 if (xsk_uses_need_wakeup(tx->xsk_pool))
999 xsk_set_tx_need_wakeup(tx->xsk_pool);
1000 }
1001
1002 /* If we still have work we want to repoll */
1003 return repoll;
1004 }
1005
gve_tx_poll(struct gve_notify_block * block,int budget)1006 bool gve_tx_poll(struct gve_notify_block *block, int budget)
1007 {
1008 struct gve_priv *priv = block->priv;
1009 struct gve_tx_ring *tx = block->tx;
1010 u32 nic_done;
1011 u32 to_do;
1012
1013 /* If budget is 0, do all the work */
1014 if (budget == 0)
1015 budget = INT_MAX;
1016
1017 /* In TX path, it may try to clean completed pkts in order to xmit,
1018 * to avoid cleaning conflict, use spin_lock(), it yields better
1019 * concurrency between xmit/clean than netif's lock.
1020 */
1021 spin_lock(&tx->clean_lock);
1022 /* Find out how much work there is to be done */
1023 nic_done = gve_tx_load_event_counter(priv, tx);
1024 to_do = min_t(u32, (nic_done - tx->done), budget);
1025 gve_clean_tx_done(priv, tx, to_do, true);
1026 spin_unlock(&tx->clean_lock);
1027 /* If we still have work we want to repoll */
1028 return nic_done != tx->done;
1029 }
1030
gve_tx_clean_pending(struct gve_priv * priv,struct gve_tx_ring * tx)1031 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
1032 {
1033 u32 nic_done = gve_tx_load_event_counter(priv, tx);
1034
1035 return nic_done != tx->done;
1036 }
1037