1 // SPDX-License-Identifier: GPL-2.0+ 2 3 #include <linux/bpf.h> 4 5 #include "lan966x_main.h" 6 7 static int lan966x_fdma_channel_active(struct lan966x *lan966x) 8 { 9 return lan_rd(lan966x, FDMA_CH_ACTIVE); 10 } 11 12 static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, 13 struct lan966x_db *db) 14 { 15 struct page *page; 16 17 page = page_pool_dev_alloc_pages(rx->page_pool); 18 if (unlikely(!page)) 19 return NULL; 20 21 db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM; 22 23 return page; 24 } 25 26 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) 27 { 28 int i, j; 29 30 for (i = 0; i < FDMA_DCB_MAX; ++i) { 31 for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) 32 page_pool_put_full_page(rx->page_pool, 33 rx->page[i][j], false); 34 } 35 } 36 37 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx) 38 { 39 struct page *page; 40 41 page = rx->page[rx->dcb_index][rx->db_index]; 42 if (unlikely(!page)) 43 return; 44 45 page_pool_recycle_direct(rx->page_pool, page); 46 } 47 48 static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, 49 struct lan966x_rx_dcb *dcb, 50 u64 nextptr) 51 { 52 struct lan966x_db *db; 53 int i; 54 55 for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) { 56 db = &dcb->db[i]; 57 db->status = FDMA_DCB_STATUS_INTR; 58 } 59 60 dcb->nextptr = FDMA_DCB_INVALID_DATA; 61 dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order); 62 63 rx->last_entry->nextptr = nextptr; 64 rx->last_entry = dcb; 65 } 66 67 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx) 68 { 69 struct lan966x *lan966x = rx->lan966x; 70 struct page_pool_params pp_params = { 71 .order = rx->page_order, 72 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 73 .pool_size = FDMA_DCB_MAX, 74 .nid = NUMA_NO_NODE, 75 .dev = lan966x->dev, 76 .dma_dir = DMA_FROM_DEVICE, 77 .offset = XDP_PACKET_HEADROOM, 78 .max_len = rx->max_mtu - 79 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 80 }; 81 82 if (lan966x_xdp_present(lan966x)) 83 pp_params.dma_dir = DMA_BIDIRECTIONAL; 84 85 rx->page_pool = page_pool_create(&pp_params); 86 87 for (int i = 0; i < lan966x->num_phys_ports; ++i) { 88 struct lan966x_port *port; 89 90 if (!lan966x->ports[i]) 91 continue; 92 93 port = lan966x->ports[i]; 94 xdp_rxq_info_unreg_mem_model(&port->xdp_rxq); 95 xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL, 96 rx->page_pool); 97 } 98 99 return PTR_ERR_OR_ZERO(rx->page_pool); 100 } 101 102 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) 103 { 104 struct lan966x *lan966x = rx->lan966x; 105 struct lan966x_rx_dcb *dcb; 106 struct lan966x_db *db; 107 struct page *page; 108 int i, j; 109 int size; 110 111 if (lan966x_fdma_rx_alloc_page_pool(rx)) 112 return PTR_ERR(rx->page_pool); 113 114 /* calculate how many pages are needed to allocate the dcbs */ 115 size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; 116 size = ALIGN(size, PAGE_SIZE); 117 118 rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL); 119 if (!rx->dcbs) 120 return -ENOMEM; 121 122 rx->last_entry = rx->dcbs; 123 rx->db_index = 0; 124 rx->dcb_index = 0; 125 126 /* Now for each dcb allocate the dbs */ 127 for (i = 0; i < FDMA_DCB_MAX; ++i) { 128 dcb = &rx->dcbs[i]; 129 dcb->info = 0; 130 131 /* For each db allocate a page and map it to the DB dataptr. */ 132 for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { 133 db = &dcb->db[j]; 134 page = lan966x_fdma_rx_alloc_page(rx, db); 135 if (!page) 136 return -ENOMEM; 137 138 db->status = 0; 139 rx->page[i][j] = page; 140 } 141 142 lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i); 143 } 144 145 return 0; 146 } 147 148 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx) 149 { 150 rx->dcb_index++; 151 rx->dcb_index &= FDMA_DCB_MAX - 1; 152 } 153 154 static void lan966x_fdma_rx_free(struct lan966x_rx *rx) 155 { 156 struct lan966x *lan966x = rx->lan966x; 157 u32 size; 158 159 /* Now it is possible to do the cleanup of dcb */ 160 size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; 161 size = ALIGN(size, PAGE_SIZE); 162 dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma); 163 } 164 165 static void lan966x_fdma_rx_start(struct lan966x_rx *rx) 166 { 167 struct lan966x *lan966x = rx->lan966x; 168 u32 mask; 169 170 /* When activating a channel, first is required to write the first DCB 171 * address and then to activate it 172 */ 173 lan_wr(lower_32_bits((u64)rx->dma), lan966x, 174 FDMA_DCB_LLP(rx->channel_id)); 175 lan_wr(upper_32_bits((u64)rx->dma), lan966x, 176 FDMA_DCB_LLP1(rx->channel_id)); 177 178 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | 179 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | 180 FDMA_CH_CFG_CH_INJ_PORT_SET(0) | 181 FDMA_CH_CFG_CH_MEM_SET(1), 182 lan966x, FDMA_CH_CFG(rx->channel_id)); 183 184 /* Start fdma */ 185 lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), 186 FDMA_PORT_CTRL_XTR_STOP, 187 lan966x, FDMA_PORT_CTRL(0)); 188 189 /* Enable interrupts */ 190 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); 191 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); 192 mask |= BIT(rx->channel_id); 193 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), 194 FDMA_INTR_DB_ENA_INTR_DB_ENA, 195 lan966x, FDMA_INTR_DB_ENA); 196 197 /* Activate the channel */ 198 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)), 199 FDMA_CH_ACTIVATE_CH_ACTIVATE, 200 lan966x, FDMA_CH_ACTIVATE); 201 } 202 203 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx) 204 { 205 struct lan966x *lan966x = rx->lan966x; 206 u32 val; 207 208 /* Disable the channel */ 209 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)), 210 FDMA_CH_DISABLE_CH_DISABLE, 211 lan966x, FDMA_CH_DISABLE); 212 213 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, 214 val, !(val & BIT(rx->channel_id)), 215 READL_SLEEP_US, READL_TIMEOUT_US); 216 217 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)), 218 FDMA_CH_DB_DISCARD_DB_DISCARD, 219 lan966x, FDMA_CH_DB_DISCARD); 220 } 221 222 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx) 223 { 224 struct lan966x *lan966x = rx->lan966x; 225 226 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)), 227 FDMA_CH_RELOAD_CH_RELOAD, 228 lan966x, FDMA_CH_RELOAD); 229 } 230 231 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx, 232 struct lan966x_tx_dcb *dcb) 233 { 234 dcb->nextptr = FDMA_DCB_INVALID_DATA; 235 dcb->info = 0; 236 } 237 238 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) 239 { 240 struct lan966x *lan966x = tx->lan966x; 241 struct lan966x_tx_dcb *dcb; 242 struct lan966x_db *db; 243 int size; 244 int i, j; 245 246 tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf), 247 GFP_KERNEL); 248 if (!tx->dcbs_buf) 249 return -ENOMEM; 250 251 /* calculate how many pages are needed to allocate the dcbs */ 252 size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; 253 size = ALIGN(size, PAGE_SIZE); 254 tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL); 255 if (!tx->dcbs) 256 goto out; 257 258 /* Now for each dcb allocate the db */ 259 for (i = 0; i < FDMA_DCB_MAX; ++i) { 260 dcb = &tx->dcbs[i]; 261 262 for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) { 263 db = &dcb->db[j]; 264 db->dataptr = 0; 265 db->status = 0; 266 } 267 268 lan966x_fdma_tx_add_dcb(tx, dcb); 269 } 270 271 return 0; 272 273 out: 274 kfree(tx->dcbs_buf); 275 return -ENOMEM; 276 } 277 278 static void lan966x_fdma_tx_free(struct lan966x_tx *tx) 279 { 280 struct lan966x *lan966x = tx->lan966x; 281 int size; 282 283 kfree(tx->dcbs_buf); 284 285 size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; 286 size = ALIGN(size, PAGE_SIZE); 287 dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma); 288 } 289 290 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) 291 { 292 struct lan966x *lan966x = tx->lan966x; 293 u32 mask; 294 295 /* When activating a channel, first is required to write the first DCB 296 * address and then to activate it 297 */ 298 lan_wr(lower_32_bits((u64)tx->dma), lan966x, 299 FDMA_DCB_LLP(tx->channel_id)); 300 lan_wr(upper_32_bits((u64)tx->dma), lan966x, 301 FDMA_DCB_LLP1(tx->channel_id)); 302 303 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | 304 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | 305 FDMA_CH_CFG_CH_INJ_PORT_SET(0) | 306 FDMA_CH_CFG_CH_MEM_SET(1), 307 lan966x, FDMA_CH_CFG(tx->channel_id)); 308 309 /* Start fdma */ 310 lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), 311 FDMA_PORT_CTRL_INJ_STOP, 312 lan966x, FDMA_PORT_CTRL(0)); 313 314 /* Enable interrupts */ 315 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); 316 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); 317 mask |= BIT(tx->channel_id); 318 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), 319 FDMA_INTR_DB_ENA_INTR_DB_ENA, 320 lan966x, FDMA_INTR_DB_ENA); 321 322 /* Activate the channel */ 323 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)), 324 FDMA_CH_ACTIVATE_CH_ACTIVATE, 325 lan966x, FDMA_CH_ACTIVATE); 326 } 327 328 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) 329 { 330 struct lan966x *lan966x = tx->lan966x; 331 u32 val; 332 333 /* Disable the channel */ 334 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)), 335 FDMA_CH_DISABLE_CH_DISABLE, 336 lan966x, FDMA_CH_DISABLE); 337 338 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, 339 val, !(val & BIT(tx->channel_id)), 340 READL_SLEEP_US, READL_TIMEOUT_US); 341 342 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)), 343 FDMA_CH_DB_DISCARD_DB_DISCARD, 344 lan966x, FDMA_CH_DB_DISCARD); 345 346 tx->activated = false; 347 tx->last_in_use = -1; 348 } 349 350 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) 351 { 352 struct lan966x *lan966x = tx->lan966x; 353 354 /* Write the registers to reload the channel */ 355 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)), 356 FDMA_CH_RELOAD_CH_RELOAD, 357 lan966x, FDMA_CH_RELOAD); 358 } 359 360 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x) 361 { 362 struct lan966x_port *port; 363 int i; 364 365 for (i = 0; i < lan966x->num_phys_ports; ++i) { 366 port = lan966x->ports[i]; 367 if (!port) 368 continue; 369 370 if (netif_queue_stopped(port->dev)) 371 netif_wake_queue(port->dev); 372 } 373 } 374 375 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x) 376 { 377 struct lan966x_port *port; 378 int i; 379 380 for (i = 0; i < lan966x->num_phys_ports; ++i) { 381 port = lan966x->ports[i]; 382 if (!port) 383 continue; 384 385 netif_stop_queue(port->dev); 386 } 387 } 388 389 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) 390 { 391 struct lan966x_tx *tx = &lan966x->tx; 392 struct lan966x_tx_dcb_buf *dcb_buf; 393 struct lan966x_db *db; 394 unsigned long flags; 395 bool clear = false; 396 int i; 397 398 spin_lock_irqsave(&lan966x->tx_lock, flags); 399 for (i = 0; i < FDMA_DCB_MAX; ++i) { 400 dcb_buf = &tx->dcbs_buf[i]; 401 402 if (!dcb_buf->used) 403 continue; 404 405 db = &tx->dcbs[i].db[0]; 406 if (!(db->status & FDMA_DCB_STATUS_DONE)) 407 continue; 408 409 dcb_buf->dev->stats.tx_packets++; 410 dcb_buf->dev->stats.tx_bytes += dcb_buf->len; 411 412 dcb_buf->used = false; 413 if (dcb_buf->use_skb) { 414 dma_unmap_single(lan966x->dev, 415 dcb_buf->dma_addr, 416 dcb_buf->len, 417 DMA_TO_DEVICE); 418 419 if (!dcb_buf->ptp) 420 napi_consume_skb(dcb_buf->data.skb, weight); 421 } else { 422 xdp_return_frame_rx_napi(dcb_buf->data.xdpf); 423 } 424 425 clear = true; 426 } 427 428 if (clear) 429 lan966x_fdma_wakeup_netdev(lan966x); 430 431 spin_unlock_irqrestore(&lan966x->tx_lock, flags); 432 } 433 434 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx) 435 { 436 struct lan966x_db *db; 437 438 /* Check if there is any data */ 439 db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; 440 if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE))) 441 return false; 442 443 return true; 444 } 445 446 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port) 447 { 448 struct lan966x *lan966x = rx->lan966x; 449 struct lan966x_port *port; 450 struct lan966x_db *db; 451 struct page *page; 452 453 db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; 454 page = rx->page[rx->dcb_index][rx->db_index]; 455 if (unlikely(!page)) 456 return FDMA_ERROR; 457 458 dma_sync_single_for_cpu(lan966x->dev, 459 (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM, 460 FDMA_DCB_STATUS_BLOCKL(db->status), 461 DMA_FROM_DEVICE); 462 463 lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM, 464 src_port); 465 if (WARN_ON(*src_port >= lan966x->num_phys_ports)) 466 return FDMA_ERROR; 467 468 port = lan966x->ports[*src_port]; 469 if (!lan966x_xdp_port_present(port)) 470 return FDMA_PASS; 471 472 return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status)); 473 } 474 475 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx, 476 u64 src_port) 477 { 478 struct lan966x *lan966x = rx->lan966x; 479 struct lan966x_db *db; 480 struct sk_buff *skb; 481 struct page *page; 482 u64 timestamp; 483 484 /* Get the received frame and unmap it */ 485 db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; 486 page = rx->page[rx->dcb_index][rx->db_index]; 487 488 skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); 489 if (unlikely(!skb)) 490 goto free_page; 491 492 skb_mark_for_recycle(skb); 493 494 skb_reserve(skb, XDP_PACKET_HEADROOM); 495 skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); 496 497 lan966x_ifh_get_timestamp(skb->data, ×tamp); 498 499 skb->dev = lan966x->ports[src_port]->dev; 500 skb_pull(skb, IFH_LEN_BYTES); 501 502 if (likely(!(skb->dev->features & NETIF_F_RXFCS))) 503 skb_trim(skb, skb->len - ETH_FCS_LEN); 504 505 lan966x_ptp_rxtstamp(lan966x, skb, timestamp); 506 skb->protocol = eth_type_trans(skb, skb->dev); 507 508 if (lan966x->bridge_mask & BIT(src_port)) { 509 skb->offload_fwd_mark = 1; 510 511 skb_reset_network_header(skb); 512 if (!lan966x_hw_offload(lan966x, src_port, skb)) 513 skb->offload_fwd_mark = 0; 514 } 515 516 skb->dev->stats.rx_bytes += skb->len; 517 skb->dev->stats.rx_packets++; 518 519 return skb; 520 521 free_page: 522 page_pool_recycle_direct(rx->page_pool, page); 523 524 return NULL; 525 } 526 527 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) 528 { 529 struct lan966x *lan966x = container_of(napi, struct lan966x, napi); 530 struct lan966x_rx *rx = &lan966x->rx; 531 int dcb_reload = rx->dcb_index; 532 struct lan966x_rx_dcb *old_dcb; 533 struct lan966x_db *db; 534 struct sk_buff *skb; 535 struct page *page; 536 int counter = 0; 537 u64 src_port; 538 u64 nextptr; 539 540 lan966x_fdma_tx_clear_buf(lan966x, weight); 541 542 /* Get all received skb */ 543 while (counter < weight) { 544 if (!lan966x_fdma_rx_more_frames(rx)) 545 break; 546 547 counter++; 548 549 switch (lan966x_fdma_rx_check_frame(rx, &src_port)) { 550 case FDMA_PASS: 551 break; 552 case FDMA_ERROR: 553 lan966x_fdma_rx_free_page(rx); 554 lan966x_fdma_rx_advance_dcb(rx); 555 goto allocate_new; 556 case FDMA_TX: 557 lan966x_fdma_rx_advance_dcb(rx); 558 continue; 559 case FDMA_DROP: 560 lan966x_fdma_rx_free_page(rx); 561 lan966x_fdma_rx_advance_dcb(rx); 562 continue; 563 } 564 565 skb = lan966x_fdma_rx_get_frame(rx, src_port); 566 lan966x_fdma_rx_advance_dcb(rx); 567 if (!skb) 568 goto allocate_new; 569 570 napi_gro_receive(&lan966x->napi, skb); 571 } 572 573 allocate_new: 574 /* Allocate new pages and map them */ 575 while (dcb_reload != rx->dcb_index) { 576 db = &rx->dcbs[dcb_reload].db[rx->db_index]; 577 page = lan966x_fdma_rx_alloc_page(rx, db); 578 if (unlikely(!page)) 579 break; 580 rx->page[dcb_reload][rx->db_index] = page; 581 582 old_dcb = &rx->dcbs[dcb_reload]; 583 dcb_reload++; 584 dcb_reload &= FDMA_DCB_MAX - 1; 585 586 nextptr = rx->dma + ((unsigned long)old_dcb - 587 (unsigned long)rx->dcbs); 588 lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr); 589 lan966x_fdma_rx_reload(rx); 590 } 591 592 if (counter < weight && napi_complete_done(napi, counter)) 593 lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA); 594 595 return counter; 596 } 597 598 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args) 599 { 600 struct lan966x *lan966x = args; 601 u32 db, err, err_type; 602 603 db = lan_rd(lan966x, FDMA_INTR_DB); 604 err = lan_rd(lan966x, FDMA_INTR_ERR); 605 606 if (db) { 607 lan_wr(0, lan966x, FDMA_INTR_DB_ENA); 608 lan_wr(db, lan966x, FDMA_INTR_DB); 609 610 napi_schedule(&lan966x->napi); 611 } 612 613 if (err) { 614 err_type = lan_rd(lan966x, FDMA_ERRORS); 615 616 WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type); 617 618 lan_wr(err, lan966x, FDMA_INTR_ERR); 619 lan_wr(err_type, lan966x, FDMA_ERRORS); 620 } 621 622 return IRQ_HANDLED; 623 } 624 625 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) 626 { 627 struct lan966x_tx_dcb_buf *dcb_buf; 628 int i; 629 630 for (i = 0; i < FDMA_DCB_MAX; ++i) { 631 dcb_buf = &tx->dcbs_buf[i]; 632 if (!dcb_buf->used && i != tx->last_in_use) 633 return i; 634 } 635 636 return -1; 637 } 638 639 static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx, 640 int next_to_use, int len, 641 dma_addr_t dma_addr) 642 { 643 struct lan966x_tx_dcb *next_dcb; 644 struct lan966x_db *next_db; 645 646 next_dcb = &tx->dcbs[next_to_use]; 647 next_dcb->nextptr = FDMA_DCB_INVALID_DATA; 648 649 next_db = &next_dcb->db[0]; 650 next_db->dataptr = dma_addr; 651 next_db->status = FDMA_DCB_STATUS_SOF | 652 FDMA_DCB_STATUS_EOF | 653 FDMA_DCB_STATUS_INTR | 654 FDMA_DCB_STATUS_BLOCKO(0) | 655 FDMA_DCB_STATUS_BLOCKL(len); 656 } 657 658 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use) 659 { 660 struct lan966x *lan966x = tx->lan966x; 661 struct lan966x_tx_dcb *dcb; 662 663 if (likely(lan966x->tx.activated)) { 664 /* Connect current dcb to the next db */ 665 dcb = &tx->dcbs[tx->last_in_use]; 666 dcb->nextptr = tx->dma + (next_to_use * 667 sizeof(struct lan966x_tx_dcb)); 668 669 lan966x_fdma_tx_reload(tx); 670 } else { 671 /* Because it is first time, then just activate */ 672 lan966x->tx.activated = true; 673 lan966x_fdma_tx_activate(tx); 674 } 675 676 /* Move to next dcb because this last in use */ 677 tx->last_in_use = next_to_use; 678 } 679 680 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, 681 struct xdp_frame *xdpf, 682 struct page *page) 683 { 684 struct lan966x *lan966x = port->lan966x; 685 struct lan966x_tx_dcb_buf *next_dcb_buf; 686 struct lan966x_tx *tx = &lan966x->tx; 687 dma_addr_t dma_addr; 688 int next_to_use; 689 __be32 *ifh; 690 int ret = 0; 691 692 spin_lock(&lan966x->tx_lock); 693 694 /* Get next index */ 695 next_to_use = lan966x_fdma_get_next_dcb(tx); 696 if (next_to_use < 0) { 697 netif_stop_queue(port->dev); 698 ret = NETDEV_TX_BUSY; 699 goto out; 700 } 701 702 /* Generate new IFH */ 703 ifh = page_address(page) + XDP_PACKET_HEADROOM; 704 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN); 705 lan966x_ifh_set_bypass(ifh, 1); 706 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); 707 708 dma_addr = page_pool_get_dma_addr(page); 709 dma_sync_single_for_device(lan966x->dev, dma_addr + XDP_PACKET_HEADROOM, 710 xdpf->len + IFH_LEN_BYTES, 711 DMA_TO_DEVICE); 712 713 /* Setup next dcb */ 714 lan966x_fdma_tx_setup_dcb(tx, next_to_use, xdpf->len + IFH_LEN_BYTES, 715 dma_addr + XDP_PACKET_HEADROOM); 716 717 /* Fill up the buffer */ 718 next_dcb_buf = &tx->dcbs_buf[next_to_use]; 719 next_dcb_buf->use_skb = false; 720 next_dcb_buf->data.xdpf = xdpf; 721 next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES; 722 next_dcb_buf->dma_addr = dma_addr; 723 next_dcb_buf->used = true; 724 next_dcb_buf->ptp = false; 725 next_dcb_buf->dev = port->dev; 726 727 /* Start the transmission */ 728 lan966x_fdma_tx_start(tx, next_to_use); 729 730 out: 731 spin_unlock(&lan966x->tx_lock); 732 733 return ret; 734 } 735 736 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) 737 { 738 struct lan966x_port *port = netdev_priv(dev); 739 struct lan966x *lan966x = port->lan966x; 740 struct lan966x_tx_dcb_buf *next_dcb_buf; 741 struct lan966x_tx *tx = &lan966x->tx; 742 int needed_headroom; 743 int needed_tailroom; 744 dma_addr_t dma_addr; 745 int next_to_use; 746 int err; 747 748 /* Get next index */ 749 next_to_use = lan966x_fdma_get_next_dcb(tx); 750 if (next_to_use < 0) { 751 netif_stop_queue(dev); 752 return NETDEV_TX_BUSY; 753 } 754 755 if (skb_put_padto(skb, ETH_ZLEN)) { 756 dev->stats.tx_dropped++; 757 return NETDEV_TX_OK; 758 } 759 760 /* skb processing */ 761 needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0); 762 needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); 763 if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) { 764 err = pskb_expand_head(skb, needed_headroom, needed_tailroom, 765 GFP_ATOMIC); 766 if (unlikely(err)) { 767 dev->stats.tx_dropped++; 768 err = NETDEV_TX_OK; 769 goto release; 770 } 771 } 772 773 skb_tx_timestamp(skb); 774 skb_push(skb, IFH_LEN_BYTES); 775 memcpy(skb->data, ifh, IFH_LEN_BYTES); 776 skb_put(skb, 4); 777 778 dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len, 779 DMA_TO_DEVICE); 780 if (dma_mapping_error(lan966x->dev, dma_addr)) { 781 dev->stats.tx_dropped++; 782 err = NETDEV_TX_OK; 783 goto release; 784 } 785 786 /* Setup next dcb */ 787 lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr); 788 789 /* Fill up the buffer */ 790 next_dcb_buf = &tx->dcbs_buf[next_to_use]; 791 next_dcb_buf->use_skb = true; 792 next_dcb_buf->data.skb = skb; 793 next_dcb_buf->len = skb->len; 794 next_dcb_buf->dma_addr = dma_addr; 795 next_dcb_buf->used = true; 796 next_dcb_buf->ptp = false; 797 next_dcb_buf->dev = dev; 798 799 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 800 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) 801 next_dcb_buf->ptp = true; 802 803 /* Start the transmission */ 804 lan966x_fdma_tx_start(tx, next_to_use); 805 806 return NETDEV_TX_OK; 807 808 release: 809 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 810 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) 811 lan966x_ptp_txtstamp_release(port, skb); 812 813 dev_kfree_skb_any(skb); 814 return err; 815 } 816 817 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x) 818 { 819 int max_mtu = 0; 820 int i; 821 822 for (i = 0; i < lan966x->num_phys_ports; ++i) { 823 struct lan966x_port *port; 824 int mtu; 825 826 port = lan966x->ports[i]; 827 if (!port) 828 continue; 829 830 mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); 831 if (mtu > max_mtu) 832 max_mtu = mtu; 833 } 834 835 return max_mtu; 836 } 837 838 static int lan966x_qsys_sw_status(struct lan966x *lan966x) 839 { 840 return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT)); 841 } 842 843 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) 844 { 845 struct page_pool *page_pool; 846 dma_addr_t rx_dma; 847 void *rx_dcbs; 848 u32 size; 849 int err; 850 851 /* Store these for later to free them */ 852 rx_dma = lan966x->rx.dma; 853 rx_dcbs = lan966x->rx.dcbs; 854 page_pool = lan966x->rx.page_pool; 855 856 napi_synchronize(&lan966x->napi); 857 napi_disable(&lan966x->napi); 858 lan966x_fdma_stop_netdev(lan966x); 859 860 lan966x_fdma_rx_disable(&lan966x->rx); 861 lan966x_fdma_rx_free_pages(&lan966x->rx); 862 lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; 863 lan966x->rx.max_mtu = new_mtu; 864 err = lan966x_fdma_rx_alloc(&lan966x->rx); 865 if (err) 866 goto restore; 867 lan966x_fdma_rx_start(&lan966x->rx); 868 869 size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; 870 size = ALIGN(size, PAGE_SIZE); 871 dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); 872 873 page_pool_destroy(page_pool); 874 875 lan966x_fdma_wakeup_netdev(lan966x); 876 napi_enable(&lan966x->napi); 877 878 return err; 879 restore: 880 lan966x->rx.page_pool = page_pool; 881 lan966x->rx.dma = rx_dma; 882 lan966x->rx.dcbs = rx_dcbs; 883 lan966x_fdma_rx_start(&lan966x->rx); 884 885 return err; 886 } 887 888 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x) 889 { 890 return lan966x_fdma_get_max_mtu(lan966x) + 891 IFH_LEN_BYTES + 892 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 893 VLAN_HLEN * 2 + 894 XDP_PACKET_HEADROOM; 895 } 896 897 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu) 898 { 899 int err; 900 u32 val; 901 902 /* Disable the CPU port */ 903 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), 904 QSYS_SW_PORT_MODE_PORT_ENA, 905 lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); 906 907 /* Flush the CPU queues */ 908 readx_poll_timeout(lan966x_qsys_sw_status, lan966x, 909 val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)), 910 READL_SLEEP_US, READL_TIMEOUT_US); 911 912 /* Add a sleep in case there are frames between the queues and the CPU 913 * port 914 */ 915 usleep_range(1000, 2000); 916 917 err = lan966x_fdma_reload(lan966x, max_mtu); 918 919 /* Enable back the CPU port */ 920 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1), 921 QSYS_SW_PORT_MODE_PORT_ENA, 922 lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); 923 924 return err; 925 } 926 927 int lan966x_fdma_change_mtu(struct lan966x *lan966x) 928 { 929 int max_mtu; 930 931 max_mtu = lan966x_fdma_get_max_frame(lan966x); 932 if (max_mtu == lan966x->rx.max_mtu) 933 return 0; 934 935 return __lan966x_fdma_reload(lan966x, max_mtu); 936 } 937 938 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x) 939 { 940 int max_mtu; 941 942 max_mtu = lan966x_fdma_get_max_frame(lan966x); 943 return __lan966x_fdma_reload(lan966x, max_mtu); 944 } 945 946 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev) 947 { 948 if (lan966x->fdma_ndev) 949 return; 950 951 lan966x->fdma_ndev = dev; 952 netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll); 953 napi_enable(&lan966x->napi); 954 } 955 956 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev) 957 { 958 if (lan966x->fdma_ndev == dev) { 959 netif_napi_del(&lan966x->napi); 960 lan966x->fdma_ndev = NULL; 961 } 962 } 963 964 int lan966x_fdma_init(struct lan966x *lan966x) 965 { 966 int err; 967 968 if (!lan966x->fdma) 969 return 0; 970 971 lan966x->rx.lan966x = lan966x; 972 lan966x->rx.channel_id = FDMA_XTR_CHANNEL; 973 lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x); 974 lan966x->tx.lan966x = lan966x; 975 lan966x->tx.channel_id = FDMA_INJ_CHANNEL; 976 lan966x->tx.last_in_use = -1; 977 978 err = lan966x_fdma_rx_alloc(&lan966x->rx); 979 if (err) 980 return err; 981 982 err = lan966x_fdma_tx_alloc(&lan966x->tx); 983 if (err) { 984 lan966x_fdma_rx_free(&lan966x->rx); 985 return err; 986 } 987 988 lan966x_fdma_rx_start(&lan966x->rx); 989 990 return 0; 991 } 992 993 void lan966x_fdma_deinit(struct lan966x *lan966x) 994 { 995 if (!lan966x->fdma) 996 return; 997 998 lan966x_fdma_rx_disable(&lan966x->rx); 999 lan966x_fdma_tx_disable(&lan966x->tx); 1000 1001 napi_synchronize(&lan966x->napi); 1002 napi_disable(&lan966x->napi); 1003 1004 lan966x_fdma_rx_free_pages(&lan966x->rx); 1005 lan966x_fdma_rx_free(&lan966x->rx); 1006 page_pool_destroy(lan966x->rx.page_pool); 1007 lan966x_fdma_tx_free(&lan966x->tx); 1008 } 1009