1 // SPDX-License-Identifier: GPL-2.0 2 /* Renesas Ethernet Switch device driver 3 * 4 * Copyright (C) 2022 Renesas Electronics Corporation 5 */ 6 7 #include <linux/dma-mapping.h> 8 #include <linux/err.h> 9 #include <linux/etherdevice.h> 10 #include <linux/iopoll.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/net_tstamp.h> 14 #include <linux/of.h> 15 #include <linux/of_mdio.h> 16 #include <linux/of_net.h> 17 #include <linux/phy/phy.h> 18 #include <linux/platform_device.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/rtnetlink.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/sys_soc.h> 24 25 #include "rswitch.h" 26 27 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) 28 { 29 u32 val; 30 31 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, 32 1, RSWITCH_TIMEOUT_US); 33 } 34 35 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) 36 { 37 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); 38 } 39 40 /* Common Agent block (COMA) */ 41 static void rswitch_reset(struct rswitch_private *priv) 42 { 43 iowrite32(RRC_RR, priv->addr + RRC); 44 iowrite32(RRC_RR_CLR, priv->addr + RRC); 45 } 46 47 static void rswitch_clock_enable(struct rswitch_private *priv) 48 { 49 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); 50 } 51 52 static void rswitch_clock_disable(struct rswitch_private *priv) 53 { 54 iowrite32(RCDC_RCD, priv->addr + RCDC); 55 } 56 57 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port) 58 { 59 u32 val = ioread32(coma_addr + RCEC); 60 61 if (val & RCEC_RCE) 62 return (val & BIT(port)) ? true : false; 63 else 64 return false; 65 } 66 67 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable) 68 { 69 u32 val; 70 71 if (enable) { 72 val = ioread32(coma_addr + RCEC); 73 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); 74 } else { 75 val = ioread32(coma_addr + RCDC); 76 iowrite32(val | BIT(port), coma_addr + RCDC); 77 } 78 } 79 80 static int rswitch_bpool_config(struct rswitch_private *priv) 81 { 82 u32 val; 83 84 val = ioread32(priv->addr + CABPIRM); 85 if (val & CABPIRM_BPR) 86 return 0; 87 88 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); 89 90 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); 91 } 92 93 static void rswitch_coma_init(struct rswitch_private *priv) 94 { 95 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); 96 } 97 98 /* R-Switch-2 block (TOP) */ 99 static void rswitch_top_init(struct rswitch_private *priv) 100 { 101 int i; 102 103 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) 104 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); 105 } 106 107 /* Forwarding engine block (MFWD) */ 108 static void rswitch_fwd_init(struct rswitch_private *priv) 109 { 110 int i; 111 112 /* For ETHA */ 113 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 114 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i)); 115 iowrite32(0, priv->addr + FWPBFC(i)); 116 } 117 118 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 119 iowrite32(priv->rdev[i]->rx_queue->index, 120 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); 121 iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i)); 122 } 123 124 /* For GWCA */ 125 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index)); 126 iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index)); 127 iowrite32(0, priv->addr + FWPBFC(priv->gwca.index)); 128 iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); 129 } 130 131 /* Gateway CPU agent block (GWCA) */ 132 static int rswitch_gwca_change_mode(struct rswitch_private *priv, 133 enum rswitch_gwca_mode mode) 134 { 135 int ret; 136 137 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) 138 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); 139 140 iowrite32(mode, priv->addr + GWMC); 141 142 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); 143 144 if (mode == GWMC_OPC_DISABLE) 145 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); 146 147 return ret; 148 } 149 150 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) 151 { 152 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); 153 154 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); 155 } 156 157 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) 158 { 159 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); 160 161 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); 162 } 163 164 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) 165 { 166 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; 167 int i; 168 169 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 170 if (dis[i] & mask[i]) 171 return true; 172 } 173 174 return false; 175 } 176 177 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) 178 { 179 int i; 180 181 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 182 dis[i] = ioread32(priv->addr + GWDIS(i)); 183 dis[i] &= ioread32(priv->addr + GWDIE(i)); 184 } 185 } 186 187 static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable) 188 { 189 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); 190 191 iowrite32(BIT(index % 32), priv->addr + offs); 192 } 193 194 static void rswitch_ack_data_irq(struct rswitch_private *priv, int index) 195 { 196 u32 offs = GWDIS(index / 32); 197 198 iowrite32(BIT(index % 32), priv->addr + offs); 199 } 200 201 static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num) 202 { 203 int index = cur ? gq->cur : gq->dirty; 204 205 if (index + num >= gq->ring_size) 206 index = (index + num) % gq->ring_size; 207 else 208 index += num; 209 210 return index; 211 } 212 213 static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) 214 { 215 if (gq->cur >= gq->dirty) 216 return gq->cur - gq->dirty; 217 else 218 return gq->ring_size - gq->dirty + gq->cur; 219 } 220 221 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) 222 { 223 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; 224 225 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 226 return true; 227 228 return false; 229 } 230 231 static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq, 232 int start_index, int num) 233 { 234 int i, index; 235 236 for (i = 0; i < num; i++) { 237 index = (i + start_index) % gq->ring_size; 238 if (gq->skbs[index]) 239 continue; 240 gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev, 241 PKT_BUF_SZ + RSWITCH_ALIGN - 1); 242 if (!gq->skbs[index]) 243 goto err; 244 } 245 246 return 0; 247 248 err: 249 for (i--; i >= 0; i--) { 250 index = (i + start_index) % gq->ring_size; 251 dev_kfree_skb(gq->skbs[index]); 252 gq->skbs[index] = NULL; 253 } 254 255 return -ENOMEM; 256 } 257 258 static void rswitch_gwca_queue_free(struct net_device *ndev, 259 struct rswitch_gwca_queue *gq) 260 { 261 int i; 262 263 if (!gq->dir_tx) { 264 dma_free_coherent(ndev->dev.parent, 265 sizeof(struct rswitch_ext_ts_desc) * 266 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); 267 gq->rx_ring = NULL; 268 269 for (i = 0; i < gq->ring_size; i++) 270 dev_kfree_skb(gq->skbs[i]); 271 } else { 272 dma_free_coherent(ndev->dev.parent, 273 sizeof(struct rswitch_ext_desc) * 274 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); 275 gq->tx_ring = NULL; 276 } 277 278 kfree(gq->skbs); 279 gq->skbs = NULL; 280 } 281 282 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) 283 { 284 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 285 286 dma_free_coherent(&priv->pdev->dev, 287 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), 288 gq->ts_ring, gq->ring_dma); 289 gq->ts_ring = NULL; 290 } 291 292 static int rswitch_gwca_queue_alloc(struct net_device *ndev, 293 struct rswitch_private *priv, 294 struct rswitch_gwca_queue *gq, 295 bool dir_tx, int ring_size) 296 { 297 int i, bit; 298 299 gq->dir_tx = dir_tx; 300 gq->ring_size = ring_size; 301 gq->ndev = ndev; 302 303 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); 304 if (!gq->skbs) 305 return -ENOMEM; 306 307 if (!dir_tx) { 308 rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size); 309 310 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, 311 sizeof(struct rswitch_ext_ts_desc) * 312 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 313 } else { 314 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, 315 sizeof(struct rswitch_ext_desc) * 316 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 317 } 318 319 if (!gq->rx_ring && !gq->tx_ring) 320 goto out; 321 322 i = gq->index / 32; 323 bit = BIT(gq->index % 32); 324 if (dir_tx) 325 priv->gwca.tx_irq_bits[i] |= bit; 326 else 327 priv->gwca.rx_irq_bits[i] |= bit; 328 329 return 0; 330 331 out: 332 rswitch_gwca_queue_free(ndev, gq); 333 334 return -ENOMEM; 335 } 336 337 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) 338 { 339 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); 340 desc->dptrh = upper_32_bits(addr) & 0xff; 341 } 342 343 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) 344 { 345 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; 346 } 347 348 static int rswitch_gwca_queue_format(struct net_device *ndev, 349 struct rswitch_private *priv, 350 struct rswitch_gwca_queue *gq) 351 { 352 int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; 353 struct rswitch_ext_desc *desc; 354 struct rswitch_desc *linkfix; 355 dma_addr_t dma_addr; 356 int i; 357 358 memset(gq->tx_ring, 0, ring_size); 359 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { 360 if (!gq->dir_tx) { 361 dma_addr = dma_map_single(ndev->dev.parent, 362 gq->skbs[i]->data, PKT_BUF_SZ, 363 DMA_FROM_DEVICE); 364 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 365 goto err; 366 367 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); 368 rswitch_desc_set_dptr(&desc->desc, dma_addr); 369 desc->desc.die_dt = DT_FEMPTY | DIE; 370 } else { 371 desc->desc.die_dt = DT_EEMPTY | DIE; 372 } 373 } 374 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 375 desc->desc.die_dt = DT_LINKFIX; 376 377 linkfix = &priv->gwca.linkfix_table[gq->index]; 378 linkfix->die_dt = DT_LINKFIX; 379 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 380 381 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, 382 priv->addr + GWDCC_OFFS(gq->index)); 383 384 return 0; 385 386 err: 387 if (!gq->dir_tx) { 388 for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) { 389 dma_addr = rswitch_desc_get_dptr(&desc->desc); 390 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, 391 DMA_FROM_DEVICE); 392 } 393 } 394 395 return -ENOMEM; 396 } 397 398 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, 399 int start_index, int num) 400 { 401 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 402 struct rswitch_ts_desc *desc; 403 int i, index; 404 405 for (i = 0; i < num; i++) { 406 index = (i + start_index) % gq->ring_size; 407 desc = &gq->ts_ring[index]; 408 desc->desc.die_dt = DT_FEMPTY_ND | DIE; 409 } 410 } 411 412 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, 413 struct rswitch_gwca_queue *gq, 414 int start_index, int num) 415 { 416 struct rswitch_device *rdev = netdev_priv(ndev); 417 struct rswitch_ext_ts_desc *desc; 418 dma_addr_t dma_addr; 419 int i, index; 420 421 for (i = 0; i < num; i++) { 422 index = (i + start_index) % gq->ring_size; 423 desc = &gq->rx_ring[index]; 424 if (!gq->dir_tx) { 425 dma_addr = dma_map_single(ndev->dev.parent, 426 gq->skbs[index]->data, PKT_BUF_SZ, 427 DMA_FROM_DEVICE); 428 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 429 goto err; 430 431 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ); 432 rswitch_desc_set_dptr(&desc->desc, dma_addr); 433 dma_wmb(); 434 desc->desc.die_dt = DT_FEMPTY | DIE; 435 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); 436 } else { 437 desc->desc.die_dt = DT_EEMPTY | DIE; 438 } 439 } 440 441 return 0; 442 443 err: 444 if (!gq->dir_tx) { 445 for (i--; i >= 0; i--) { 446 index = (i + start_index) % gq->ring_size; 447 desc = &gq->rx_ring[index]; 448 dma_addr = rswitch_desc_get_dptr(&desc->desc); 449 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, 450 DMA_FROM_DEVICE); 451 } 452 } 453 454 return -ENOMEM; 455 } 456 457 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, 458 struct rswitch_private *priv, 459 struct rswitch_gwca_queue *gq) 460 { 461 int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; 462 struct rswitch_ext_ts_desc *desc; 463 struct rswitch_desc *linkfix; 464 int err; 465 466 memset(gq->rx_ring, 0, ring_size); 467 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); 468 if (err < 0) 469 return err; 470 471 desc = &gq->rx_ring[gq->ring_size]; /* Last */ 472 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 473 desc->desc.die_dt = DT_LINKFIX; 474 475 linkfix = &priv->gwca.linkfix_table[gq->index]; 476 linkfix->die_dt = DT_LINKFIX; 477 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 478 479 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | 480 GWDCC_ETS | GWDCC_EDE, 481 priv->addr + GWDCC_OFFS(gq->index)); 482 483 return 0; 484 } 485 486 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) 487 { 488 int i, num_queues = priv->gwca.num_queues; 489 struct rswitch_gwca *gwca = &priv->gwca; 490 struct device *dev = &priv->pdev->dev; 491 492 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; 493 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, 494 &gwca->linkfix_table_dma, GFP_KERNEL); 495 if (!gwca->linkfix_table) 496 return -ENOMEM; 497 for (i = 0; i < num_queues; i++) 498 gwca->linkfix_table[i].die_dt = DT_EOS; 499 500 return 0; 501 } 502 503 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) 504 { 505 struct rswitch_gwca *gwca = &priv->gwca; 506 507 if (gwca->linkfix_table) 508 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, 509 gwca->linkfix_table, gwca->linkfix_table_dma); 510 gwca->linkfix_table = NULL; 511 } 512 513 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) 514 { 515 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 516 struct rswitch_ts_desc *desc; 517 518 gq->ring_size = TS_RING_SIZE; 519 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, 520 sizeof(struct rswitch_ts_desc) * 521 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 522 523 if (!gq->ts_ring) 524 return -ENOMEM; 525 526 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); 527 desc = &gq->ts_ring[gq->ring_size]; 528 desc->desc.die_dt = DT_LINKFIX; 529 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 530 INIT_LIST_HEAD(&priv->gwca.ts_info_list); 531 532 return 0; 533 } 534 535 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) 536 { 537 struct rswitch_gwca_queue *gq; 538 int index; 539 540 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); 541 if (index >= priv->gwca.num_queues) 542 return NULL; 543 set_bit(index, priv->gwca.used); 544 gq = &priv->gwca.queues[index]; 545 memset(gq, 0, sizeof(*gq)); 546 gq->index = index; 547 548 return gq; 549 } 550 551 static void rswitch_gwca_put(struct rswitch_private *priv, 552 struct rswitch_gwca_queue *gq) 553 { 554 clear_bit(gq->index, priv->gwca.used); 555 } 556 557 static int rswitch_txdmac_alloc(struct net_device *ndev) 558 { 559 struct rswitch_device *rdev = netdev_priv(ndev); 560 struct rswitch_private *priv = rdev->priv; 561 int err; 562 563 rdev->tx_queue = rswitch_gwca_get(priv); 564 if (!rdev->tx_queue) 565 return -EBUSY; 566 567 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); 568 if (err < 0) { 569 rswitch_gwca_put(priv, rdev->tx_queue); 570 return err; 571 } 572 573 return 0; 574 } 575 576 static void rswitch_txdmac_free(struct net_device *ndev) 577 { 578 struct rswitch_device *rdev = netdev_priv(ndev); 579 580 rswitch_gwca_queue_free(ndev, rdev->tx_queue); 581 rswitch_gwca_put(rdev->priv, rdev->tx_queue); 582 } 583 584 static int rswitch_txdmac_init(struct rswitch_private *priv, int index) 585 { 586 struct rswitch_device *rdev = priv->rdev[index]; 587 588 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); 589 } 590 591 static int rswitch_rxdmac_alloc(struct net_device *ndev) 592 { 593 struct rswitch_device *rdev = netdev_priv(ndev); 594 struct rswitch_private *priv = rdev->priv; 595 int err; 596 597 rdev->rx_queue = rswitch_gwca_get(priv); 598 if (!rdev->rx_queue) 599 return -EBUSY; 600 601 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); 602 if (err < 0) { 603 rswitch_gwca_put(priv, rdev->rx_queue); 604 return err; 605 } 606 607 return 0; 608 } 609 610 static void rswitch_rxdmac_free(struct net_device *ndev) 611 { 612 struct rswitch_device *rdev = netdev_priv(ndev); 613 614 rswitch_gwca_queue_free(ndev, rdev->rx_queue); 615 rswitch_gwca_put(rdev->priv, rdev->rx_queue); 616 } 617 618 static int rswitch_rxdmac_init(struct rswitch_private *priv, int index) 619 { 620 struct rswitch_device *rdev = priv->rdev[index]; 621 struct net_device *ndev = rdev->ndev; 622 623 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); 624 } 625 626 static int rswitch_gwca_hw_init(struct rswitch_private *priv) 627 { 628 int i, err; 629 630 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 631 if (err < 0) 632 return err; 633 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG); 634 if (err < 0) 635 return err; 636 637 err = rswitch_gwca_mcast_table_reset(priv); 638 if (err < 0) 639 return err; 640 err = rswitch_gwca_axi_ram_reset(priv); 641 if (err < 0) 642 return err; 643 644 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); 645 iowrite32(0, priv->addr + GWTTFC); 646 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); 647 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); 648 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); 649 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); 650 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); 651 652 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); 653 654 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 655 err = rswitch_rxdmac_init(priv, i); 656 if (err < 0) 657 return err; 658 err = rswitch_txdmac_init(priv, i); 659 if (err < 0) 660 return err; 661 } 662 663 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 664 if (err < 0) 665 return err; 666 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); 667 } 668 669 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) 670 { 671 int err; 672 673 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 674 if (err < 0) 675 return err; 676 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET); 677 if (err < 0) 678 return err; 679 680 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 681 } 682 683 static int rswitch_gwca_halt(struct rswitch_private *priv) 684 { 685 int err; 686 687 priv->gwca_halt = true; 688 err = rswitch_gwca_hw_deinit(priv); 689 dev_err(&priv->pdev->dev, "halted (%d)\n", err); 690 691 return err; 692 } 693 694 static bool rswitch_rx(struct net_device *ndev, int *quota) 695 { 696 struct rswitch_device *rdev = netdev_priv(ndev); 697 struct rswitch_gwca_queue *gq = rdev->rx_queue; 698 struct rswitch_ext_ts_desc *desc; 699 int limit, boguscnt, num, ret; 700 struct sk_buff *skb; 701 dma_addr_t dma_addr; 702 u16 pkt_len; 703 u32 get_ts; 704 705 if (*quota <= 0) 706 return true; 707 708 boguscnt = min_t(int, gq->ring_size, *quota); 709 limit = boguscnt; 710 711 desc = &gq->rx_ring[gq->cur]; 712 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { 713 dma_rmb(); 714 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; 715 skb = gq->skbs[gq->cur]; 716 gq->skbs[gq->cur] = NULL; 717 dma_addr = rswitch_desc_get_dptr(&desc->desc); 718 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE); 719 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 720 if (get_ts) { 721 struct skb_shared_hwtstamps *shhwtstamps; 722 struct timespec64 ts; 723 724 shhwtstamps = skb_hwtstamps(skb); 725 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 726 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 727 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 728 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 729 } 730 skb_put(skb, pkt_len); 731 skb->protocol = eth_type_trans(skb, ndev); 732 napi_gro_receive(&rdev->napi, skb); 733 rdev->ndev->stats.rx_packets++; 734 rdev->ndev->stats.rx_bytes += pkt_len; 735 736 gq->cur = rswitch_next_queue_index(gq, true, 1); 737 desc = &gq->rx_ring[gq->cur]; 738 739 if (--boguscnt <= 0) 740 break; 741 } 742 743 num = rswitch_get_num_cur_queues(gq); 744 ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num); 745 if (ret < 0) 746 goto err; 747 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); 748 if (ret < 0) 749 goto err; 750 gq->dirty = rswitch_next_queue_index(gq, false, num); 751 752 *quota -= limit - boguscnt; 753 754 return boguscnt <= 0; 755 756 err: 757 rswitch_gwca_halt(rdev->priv); 758 759 return 0; 760 } 761 762 static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only) 763 { 764 struct rswitch_device *rdev = netdev_priv(ndev); 765 struct rswitch_gwca_queue *gq = rdev->tx_queue; 766 struct rswitch_ext_desc *desc; 767 dma_addr_t dma_addr; 768 struct sk_buff *skb; 769 int free_num = 0; 770 int size; 771 772 for (; rswitch_get_num_cur_queues(gq) > 0; 773 gq->dirty = rswitch_next_queue_index(gq, false, 1)) { 774 desc = &gq->tx_ring[gq->dirty]; 775 if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 776 break; 777 778 dma_rmb(); 779 size = le16_to_cpu(desc->desc.info_ds) & TX_DS; 780 skb = gq->skbs[gq->dirty]; 781 if (skb) { 782 dma_addr = rswitch_desc_get_dptr(&desc->desc); 783 dma_unmap_single(ndev->dev.parent, dma_addr, 784 size, DMA_TO_DEVICE); 785 dev_kfree_skb_any(gq->skbs[gq->dirty]); 786 gq->skbs[gq->dirty] = NULL; 787 free_num++; 788 } 789 desc->desc.die_dt = DT_EEMPTY; 790 rdev->ndev->stats.tx_packets++; 791 rdev->ndev->stats.tx_bytes += size; 792 } 793 794 return free_num; 795 } 796 797 static int rswitch_poll(struct napi_struct *napi, int budget) 798 { 799 struct net_device *ndev = napi->dev; 800 struct rswitch_private *priv; 801 struct rswitch_device *rdev; 802 unsigned long flags; 803 int quota = budget; 804 805 rdev = netdev_priv(ndev); 806 priv = rdev->priv; 807 808 retry: 809 rswitch_tx_free(ndev, true); 810 811 if (rswitch_rx(ndev, "a)) 812 goto out; 813 else if (rdev->priv->gwca_halt) 814 goto err; 815 else if (rswitch_is_queue_rxed(rdev->rx_queue)) 816 goto retry; 817 818 netif_wake_subqueue(ndev, 0); 819 820 if (napi_complete_done(napi, budget - quota)) { 821 spin_lock_irqsave(&priv->lock, flags); 822 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); 823 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); 824 spin_unlock_irqrestore(&priv->lock, flags); 825 } 826 827 out: 828 return budget - quota; 829 830 err: 831 napi_complete(napi); 832 833 return 0; 834 } 835 836 static void rswitch_queue_interrupt(struct net_device *ndev) 837 { 838 struct rswitch_device *rdev = netdev_priv(ndev); 839 840 if (napi_schedule_prep(&rdev->napi)) { 841 spin_lock(&rdev->priv->lock); 842 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 843 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 844 spin_unlock(&rdev->priv->lock); 845 __napi_schedule(&rdev->napi); 846 } 847 } 848 849 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) 850 { 851 struct rswitch_gwca_queue *gq; 852 int i, index, bit; 853 854 for (i = 0; i < priv->gwca.num_queues; i++) { 855 gq = &priv->gwca.queues[i]; 856 index = gq->index / 32; 857 bit = BIT(gq->index % 32); 858 if (!(dis[index] & bit)) 859 continue; 860 861 rswitch_ack_data_irq(priv, gq->index); 862 rswitch_queue_interrupt(gq->ndev); 863 } 864 865 return IRQ_HANDLED; 866 } 867 868 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) 869 { 870 struct rswitch_private *priv = dev_id; 871 u32 dis[RSWITCH_NUM_IRQ_REGS]; 872 irqreturn_t ret = IRQ_NONE; 873 874 rswitch_get_data_irq_status(priv, dis); 875 876 if (rswitch_is_any_data_irq(priv, dis, true) || 877 rswitch_is_any_data_irq(priv, dis, false)) 878 ret = rswitch_data_irq(priv, dis); 879 880 return ret; 881 } 882 883 static int rswitch_gwca_request_irqs(struct rswitch_private *priv) 884 { 885 char *resource_name, *irq_name; 886 int i, ret, irq; 887 888 for (i = 0; i < GWCA_NUM_IRQS; i++) { 889 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); 890 if (!resource_name) 891 return -ENOMEM; 892 893 irq = platform_get_irq_byname(priv->pdev, resource_name); 894 kfree(resource_name); 895 if (irq < 0) 896 return irq; 897 898 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, 899 GWCA_IRQ_NAME, i); 900 if (!irq_name) 901 return -ENOMEM; 902 903 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, 904 0, irq_name, priv); 905 if (ret < 0) 906 return ret; 907 } 908 909 return 0; 910 } 911 912 static void rswitch_ts(struct rswitch_private *priv) 913 { 914 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 915 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 916 struct skb_shared_hwtstamps shhwtstamps; 917 struct rswitch_ts_desc *desc; 918 struct timespec64 ts; 919 u32 tag, port; 920 int num; 921 922 desc = &gq->ts_ring[gq->cur]; 923 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { 924 dma_rmb(); 925 926 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); 927 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); 928 929 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { 930 if (!(ts_info->port == port && ts_info->tag == tag)) 931 continue; 932 933 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 934 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 935 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 936 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 937 skb_tstamp_tx(ts_info->skb, &shhwtstamps); 938 dev_consume_skb_irq(ts_info->skb); 939 list_del(&ts_info->list); 940 kfree(ts_info); 941 break; 942 } 943 944 gq->cur = rswitch_next_queue_index(gq, true, 1); 945 desc = &gq->ts_ring[gq->cur]; 946 } 947 948 num = rswitch_get_num_cur_queues(gq); 949 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); 950 gq->dirty = rswitch_next_queue_index(gq, false, num); 951 } 952 953 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) 954 { 955 struct rswitch_private *priv = dev_id; 956 957 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { 958 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); 959 rswitch_ts(priv); 960 961 return IRQ_HANDLED; 962 } 963 964 return IRQ_NONE; 965 } 966 967 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) 968 { 969 int irq; 970 971 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); 972 if (irq < 0) 973 return irq; 974 975 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, 976 0, GWCA_TS_IRQ_NAME, priv); 977 } 978 979 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ 980 static int rswitch_etha_change_mode(struct rswitch_etha *etha, 981 enum rswitch_etha_mode mode) 982 { 983 int ret; 984 985 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) 986 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); 987 988 iowrite32(mode, etha->addr + EAMC); 989 990 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); 991 992 if (mode == EAMC_OPC_DISABLE) 993 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); 994 995 return ret; 996 } 997 998 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) 999 { 1000 u32 mrmac0 = ioread32(etha->addr + MRMAC0); 1001 u32 mrmac1 = ioread32(etha->addr + MRMAC1); 1002 u8 *mac = ða->mac_addr[0]; 1003 1004 mac[0] = (mrmac0 >> 8) & 0xFF; 1005 mac[1] = (mrmac0 >> 0) & 0xFF; 1006 mac[2] = (mrmac1 >> 24) & 0xFF; 1007 mac[3] = (mrmac1 >> 16) & 0xFF; 1008 mac[4] = (mrmac1 >> 8) & 0xFF; 1009 mac[5] = (mrmac1 >> 0) & 0xFF; 1010 } 1011 1012 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) 1013 { 1014 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); 1015 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], 1016 etha->addr + MRMAC1); 1017 } 1018 1019 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) 1020 { 1021 iowrite32(MLVC_PLV, etha->addr + MLVC); 1022 1023 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); 1024 } 1025 1026 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) 1027 { 1028 u32 val; 1029 1030 rswitch_etha_write_mac_address(etha, mac); 1031 1032 switch (etha->speed) { 1033 case 100: 1034 val = MPIC_LSC_100M; 1035 break; 1036 case 1000: 1037 val = MPIC_LSC_1G; 1038 break; 1039 case 2500: 1040 val = MPIC_LSC_2_5G; 1041 break; 1042 default: 1043 return; 1044 } 1045 1046 iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC); 1047 } 1048 1049 static void rswitch_etha_enable_mii(struct rswitch_etha *etha) 1050 { 1051 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, 1052 MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06)); 1053 rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); 1054 } 1055 1056 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) 1057 { 1058 int err; 1059 1060 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1061 if (err < 0) 1062 return err; 1063 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG); 1064 if (err < 0) 1065 return err; 1066 1067 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); 1068 rswitch_rmac_setting(etha, mac); 1069 rswitch_etha_enable_mii(etha); 1070 1071 err = rswitch_etha_wait_link_verification(etha); 1072 if (err < 0) 1073 return err; 1074 1075 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1076 if (err < 0) 1077 return err; 1078 1079 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); 1080 } 1081 1082 static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read, 1083 int phyad, int devad, int regad, int data) 1084 { 1085 int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45; 1086 u32 val; 1087 int ret; 1088 1089 if (devad == 0xffffffff) 1090 return -ENODEV; 1091 1092 writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1); 1093 1094 val = MPSM_PSME | MPSM_MFF_C45; 1095 iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); 1096 1097 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); 1098 if (ret) 1099 return ret; 1100 1101 rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); 1102 1103 if (read) { 1104 writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); 1105 1106 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); 1107 if (ret) 1108 return ret; 1109 1110 ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16; 1111 1112 rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); 1113 } else { 1114 iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val, 1115 etha->addr + MPSM); 1116 1117 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS); 1118 } 1119 1120 return ret; 1121 } 1122 1123 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, 1124 int regad) 1125 { 1126 struct rswitch_etha *etha = bus->priv; 1127 1128 return rswitch_etha_set_access(etha, true, addr, devad, regad, 0); 1129 } 1130 1131 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, 1132 int regad, u16 val) 1133 { 1134 struct rswitch_etha *etha = bus->priv; 1135 1136 return rswitch_etha_set_access(etha, false, addr, devad, regad, val); 1137 } 1138 1139 /* Call of_node_put(port) after done */ 1140 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) 1141 { 1142 struct device_node *ports, *port; 1143 int err = 0; 1144 u32 index; 1145 1146 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, 1147 "ethernet-ports"); 1148 if (!ports) 1149 return NULL; 1150 1151 for_each_child_of_node(ports, port) { 1152 err = of_property_read_u32(port, "reg", &index); 1153 if (err < 0) { 1154 port = NULL; 1155 goto out; 1156 } 1157 if (index == rdev->etha->index) { 1158 if (!of_device_is_available(port)) 1159 port = NULL; 1160 break; 1161 } 1162 } 1163 1164 out: 1165 of_node_put(ports); 1166 1167 return port; 1168 } 1169 1170 static int rswitch_etha_get_params(struct rswitch_device *rdev) 1171 { 1172 u32 max_speed; 1173 int err; 1174 1175 if (!rdev->np_port) 1176 return 0; /* ignored */ 1177 1178 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); 1179 if (err) 1180 return err; 1181 1182 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); 1183 if (!err) { 1184 rdev->etha->speed = max_speed; 1185 return 0; 1186 } 1187 1188 /* if no "max-speed" property, let's use default speed */ 1189 switch (rdev->etha->phy_interface) { 1190 case PHY_INTERFACE_MODE_MII: 1191 rdev->etha->speed = SPEED_100; 1192 break; 1193 case PHY_INTERFACE_MODE_SGMII: 1194 rdev->etha->speed = SPEED_1000; 1195 break; 1196 case PHY_INTERFACE_MODE_USXGMII: 1197 rdev->etha->speed = SPEED_2500; 1198 break; 1199 default: 1200 return -EINVAL; 1201 } 1202 1203 return 0; 1204 } 1205 1206 static int rswitch_mii_register(struct rswitch_device *rdev) 1207 { 1208 struct device_node *mdio_np; 1209 struct mii_bus *mii_bus; 1210 int err; 1211 1212 mii_bus = mdiobus_alloc(); 1213 if (!mii_bus) 1214 return -ENOMEM; 1215 1216 mii_bus->name = "rswitch_mii"; 1217 sprintf(mii_bus->id, "etha%d", rdev->etha->index); 1218 mii_bus->priv = rdev->etha; 1219 mii_bus->read_c45 = rswitch_etha_mii_read_c45; 1220 mii_bus->write_c45 = rswitch_etha_mii_write_c45; 1221 mii_bus->parent = &rdev->priv->pdev->dev; 1222 1223 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); 1224 err = of_mdiobus_register(mii_bus, mdio_np); 1225 if (err < 0) { 1226 mdiobus_free(mii_bus); 1227 goto out; 1228 } 1229 1230 rdev->etha->mii = mii_bus; 1231 1232 out: 1233 of_node_put(mdio_np); 1234 1235 return err; 1236 } 1237 1238 static void rswitch_mii_unregister(struct rswitch_device *rdev) 1239 { 1240 if (rdev->etha->mii) { 1241 mdiobus_unregister(rdev->etha->mii); 1242 mdiobus_free(rdev->etha->mii); 1243 rdev->etha->mii = NULL; 1244 } 1245 } 1246 1247 static void rswitch_adjust_link(struct net_device *ndev) 1248 { 1249 struct rswitch_device *rdev = netdev_priv(ndev); 1250 struct phy_device *phydev = ndev->phydev; 1251 1252 if (phydev->link != rdev->etha->link) { 1253 phy_print_status(phydev); 1254 if (phydev->link) 1255 phy_power_on(rdev->serdes); 1256 else 1257 phy_power_off(rdev->serdes); 1258 1259 rdev->etha->link = phydev->link; 1260 1261 if (!rdev->priv->etha_no_runtime_change && 1262 phydev->speed != rdev->etha->speed) { 1263 rdev->etha->speed = phydev->speed; 1264 1265 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1266 phy_set_speed(rdev->serdes, rdev->etha->speed); 1267 } 1268 } 1269 } 1270 1271 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, 1272 struct phy_device *phydev) 1273 { 1274 if (!rdev->priv->etha_no_runtime_change) 1275 return; 1276 1277 switch (rdev->etha->speed) { 1278 case SPEED_2500: 1279 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1280 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1281 break; 1282 case SPEED_1000: 1283 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1284 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1285 break; 1286 case SPEED_100: 1287 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1288 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1289 break; 1290 default: 1291 break; 1292 } 1293 1294 phy_set_max_speed(phydev, rdev->etha->speed); 1295 } 1296 1297 static int rswitch_phy_device_init(struct rswitch_device *rdev) 1298 { 1299 struct phy_device *phydev; 1300 struct device_node *phy; 1301 int err = -ENOENT; 1302 1303 if (!rdev->np_port) 1304 return -ENODEV; 1305 1306 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); 1307 if (!phy) 1308 return -ENODEV; 1309 1310 /* Set phydev->host_interfaces before calling of_phy_connect() to 1311 * configure the PHY with the information of host_interfaces. 1312 */ 1313 phydev = of_phy_find_device(phy); 1314 if (!phydev) 1315 goto out; 1316 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); 1317 1318 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, 1319 rdev->etha->phy_interface); 1320 if (!phydev) 1321 goto out; 1322 1323 phy_set_max_speed(phydev, SPEED_2500); 1324 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1325 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 1326 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1327 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1328 rswitch_phy_remove_link_mode(rdev, phydev); 1329 1330 phy_attached_info(phydev); 1331 1332 err = 0; 1333 out: 1334 of_node_put(phy); 1335 1336 return err; 1337 } 1338 1339 static void rswitch_phy_device_deinit(struct rswitch_device *rdev) 1340 { 1341 if (rdev->ndev->phydev) 1342 phy_disconnect(rdev->ndev->phydev); 1343 } 1344 1345 static int rswitch_serdes_set_params(struct rswitch_device *rdev) 1346 { 1347 int err; 1348 1349 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, 1350 rdev->etha->phy_interface); 1351 if (err < 0) 1352 return err; 1353 1354 return phy_set_speed(rdev->serdes, rdev->etha->speed); 1355 } 1356 1357 static int rswitch_ether_port_init_one(struct rswitch_device *rdev) 1358 { 1359 int err; 1360 1361 if (!rdev->etha->operated) { 1362 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1363 if (err < 0) 1364 return err; 1365 if (rdev->priv->etha_no_runtime_change) 1366 rdev->etha->operated = true; 1367 } 1368 1369 err = rswitch_mii_register(rdev); 1370 if (err < 0) 1371 return err; 1372 1373 err = rswitch_phy_device_init(rdev); 1374 if (err < 0) 1375 goto err_phy_device_init; 1376 1377 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); 1378 if (IS_ERR(rdev->serdes)) { 1379 err = PTR_ERR(rdev->serdes); 1380 goto err_serdes_phy_get; 1381 } 1382 1383 err = rswitch_serdes_set_params(rdev); 1384 if (err < 0) 1385 goto err_serdes_set_params; 1386 1387 return 0; 1388 1389 err_serdes_set_params: 1390 err_serdes_phy_get: 1391 rswitch_phy_device_deinit(rdev); 1392 1393 err_phy_device_init: 1394 rswitch_mii_unregister(rdev); 1395 1396 return err; 1397 } 1398 1399 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) 1400 { 1401 rswitch_phy_device_deinit(rdev); 1402 rswitch_mii_unregister(rdev); 1403 } 1404 1405 static int rswitch_ether_port_init_all(struct rswitch_private *priv) 1406 { 1407 int i, err; 1408 1409 rswitch_for_each_enabled_port(priv, i) { 1410 err = rswitch_ether_port_init_one(priv->rdev[i]); 1411 if (err) 1412 goto err_init_one; 1413 } 1414 1415 rswitch_for_each_enabled_port(priv, i) { 1416 err = phy_init(priv->rdev[i]->serdes); 1417 if (err) 1418 goto err_serdes; 1419 } 1420 1421 return 0; 1422 1423 err_serdes: 1424 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1425 phy_exit(priv->rdev[i]->serdes); 1426 i = RSWITCH_NUM_PORTS; 1427 1428 err_init_one: 1429 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1430 rswitch_ether_port_deinit_one(priv->rdev[i]); 1431 1432 return err; 1433 } 1434 1435 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) 1436 { 1437 int i; 1438 1439 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1440 phy_exit(priv->rdev[i]->serdes); 1441 rswitch_ether_port_deinit_one(priv->rdev[i]); 1442 } 1443 } 1444 1445 static int rswitch_open(struct net_device *ndev) 1446 { 1447 struct rswitch_device *rdev = netdev_priv(ndev); 1448 unsigned long flags; 1449 1450 phy_start(ndev->phydev); 1451 1452 napi_enable(&rdev->napi); 1453 netif_start_queue(ndev); 1454 1455 spin_lock_irqsave(&rdev->priv->lock, flags); 1456 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); 1457 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); 1458 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1459 1460 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1461 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); 1462 1463 bitmap_set(rdev->priv->opened_ports, rdev->port, 1); 1464 1465 return 0; 1466 }; 1467 1468 static int rswitch_stop(struct net_device *ndev) 1469 { 1470 struct rswitch_device *rdev = netdev_priv(ndev); 1471 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 1472 unsigned long flags; 1473 1474 netif_tx_stop_all_queues(ndev); 1475 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); 1476 1477 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1478 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); 1479 1480 list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { 1481 if (ts_info->port != rdev->port) 1482 continue; 1483 dev_kfree_skb_irq(ts_info->skb); 1484 list_del(&ts_info->list); 1485 kfree(ts_info); 1486 } 1487 1488 spin_lock_irqsave(&rdev->priv->lock, flags); 1489 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 1490 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 1491 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1492 1493 phy_stop(ndev->phydev); 1494 napi_disable(&rdev->napi); 1495 1496 return 0; 1497 }; 1498 1499 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1500 { 1501 struct rswitch_device *rdev = netdev_priv(ndev); 1502 struct rswitch_gwca_queue *gq = rdev->tx_queue; 1503 struct rswitch_ext_desc *desc; 1504 int ret = NETDEV_TX_OK; 1505 dma_addr_t dma_addr; 1506 1507 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { 1508 netif_stop_subqueue(ndev, 0); 1509 return NETDEV_TX_BUSY; 1510 } 1511 1512 if (skb_put_padto(skb, ETH_ZLEN)) 1513 return ret; 1514 1515 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); 1516 if (dma_mapping_error(ndev->dev.parent, dma_addr)) { 1517 dev_kfree_skb_any(skb); 1518 return ret; 1519 } 1520 1521 gq->skbs[gq->cur] = skb; 1522 desc = &gq->tx_ring[gq->cur]; 1523 rswitch_desc_set_dptr(&desc->desc, dma_addr); 1524 desc->desc.info_ds = cpu_to_le16(skb->len); 1525 1526 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | 1527 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); 1528 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 1529 struct rswitch_gwca_ts_info *ts_info; 1530 1531 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); 1532 if (!ts_info) { 1533 dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); 1534 return -ENOMEM; 1535 } 1536 1537 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1538 rdev->ts_tag++; 1539 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); 1540 1541 ts_info->skb = skb_get(skb); 1542 ts_info->port = rdev->port; 1543 ts_info->tag = rdev->ts_tag; 1544 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list); 1545 1546 skb_tx_timestamp(skb); 1547 } 1548 1549 dma_wmb(); 1550 1551 desc->desc.die_dt = DT_FSINGLE | DIE; 1552 wmb(); /* gq->cur must be incremented after die_dt was set */ 1553 1554 gq->cur = rswitch_next_queue_index(gq, true, 1); 1555 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); 1556 1557 return ret; 1558 } 1559 1560 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) 1561 { 1562 return &ndev->stats; 1563 } 1564 1565 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) 1566 { 1567 struct rswitch_device *rdev = netdev_priv(ndev); 1568 struct rcar_gen4_ptp_private *ptp_priv; 1569 struct hwtstamp_config config; 1570 1571 ptp_priv = rdev->priv->ptp_priv; 1572 1573 config.flags = 0; 1574 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : 1575 HWTSTAMP_TX_OFF; 1576 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { 1577 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: 1578 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1579 break; 1580 case RCAR_GEN4_RXTSTAMP_TYPE_ALL: 1581 config.rx_filter = HWTSTAMP_FILTER_ALL; 1582 break; 1583 default: 1584 config.rx_filter = HWTSTAMP_FILTER_NONE; 1585 break; 1586 } 1587 1588 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1589 } 1590 1591 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) 1592 { 1593 struct rswitch_device *rdev = netdev_priv(ndev); 1594 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; 1595 struct hwtstamp_config config; 1596 u32 tstamp_tx_ctrl; 1597 1598 if (copy_from_user(&config, req->ifr_data, sizeof(config))) 1599 return -EFAULT; 1600 1601 if (config.flags) 1602 return -EINVAL; 1603 1604 switch (config.tx_type) { 1605 case HWTSTAMP_TX_OFF: 1606 tstamp_tx_ctrl = 0; 1607 break; 1608 case HWTSTAMP_TX_ON: 1609 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; 1610 break; 1611 default: 1612 return -ERANGE; 1613 } 1614 1615 switch (config.rx_filter) { 1616 case HWTSTAMP_FILTER_NONE: 1617 tstamp_rx_ctrl = 0; 1618 break; 1619 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1620 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 1621 break; 1622 default: 1623 config.rx_filter = HWTSTAMP_FILTER_ALL; 1624 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; 1625 break; 1626 } 1627 1628 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 1629 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 1630 1631 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1632 } 1633 1634 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1635 { 1636 if (!netif_running(ndev)) 1637 return -EINVAL; 1638 1639 switch (cmd) { 1640 case SIOCGHWTSTAMP: 1641 return rswitch_hwstamp_get(ndev, req); 1642 case SIOCSHWTSTAMP: 1643 return rswitch_hwstamp_set(ndev, req); 1644 default: 1645 return phy_mii_ioctl(ndev->phydev, req, cmd); 1646 } 1647 } 1648 1649 static const struct net_device_ops rswitch_netdev_ops = { 1650 .ndo_open = rswitch_open, 1651 .ndo_stop = rswitch_stop, 1652 .ndo_start_xmit = rswitch_start_xmit, 1653 .ndo_get_stats = rswitch_get_stats, 1654 .ndo_eth_ioctl = rswitch_eth_ioctl, 1655 .ndo_validate_addr = eth_validate_addr, 1656 .ndo_set_mac_address = eth_mac_addr, 1657 }; 1658 1659 static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) 1660 { 1661 struct rswitch_device *rdev = netdev_priv(ndev); 1662 1663 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); 1664 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1665 SOF_TIMESTAMPING_RX_SOFTWARE | 1666 SOF_TIMESTAMPING_SOFTWARE | 1667 SOF_TIMESTAMPING_TX_HARDWARE | 1668 SOF_TIMESTAMPING_RX_HARDWARE | 1669 SOF_TIMESTAMPING_RAW_HARDWARE; 1670 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 1671 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1672 1673 return 0; 1674 } 1675 1676 static const struct ethtool_ops rswitch_ethtool_ops = { 1677 .get_ts_info = rswitch_get_ts_info, 1678 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1679 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1680 }; 1681 1682 static const struct of_device_id renesas_eth_sw_of_table[] = { 1683 { .compatible = "renesas,r8a779f0-ether-switch", }, 1684 { } 1685 }; 1686 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); 1687 1688 static void rswitch_etha_init(struct rswitch_private *priv, int index) 1689 { 1690 struct rswitch_etha *etha = &priv->etha[index]; 1691 1692 memset(etha, 0, sizeof(*etha)); 1693 etha->index = index; 1694 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; 1695 etha->coma_addr = priv->addr; 1696 } 1697 1698 static int rswitch_device_alloc(struct rswitch_private *priv, int index) 1699 { 1700 struct platform_device *pdev = priv->pdev; 1701 struct rswitch_device *rdev; 1702 struct net_device *ndev; 1703 int err; 1704 1705 if (index >= RSWITCH_NUM_PORTS) 1706 return -EINVAL; 1707 1708 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1); 1709 if (!ndev) 1710 return -ENOMEM; 1711 1712 SET_NETDEV_DEV(ndev, &pdev->dev); 1713 ether_setup(ndev); 1714 1715 rdev = netdev_priv(ndev); 1716 rdev->ndev = ndev; 1717 rdev->priv = priv; 1718 priv->rdev[index] = rdev; 1719 rdev->port = index; 1720 rdev->etha = &priv->etha[index]; 1721 rdev->addr = priv->addr; 1722 1723 ndev->base_addr = (unsigned long)rdev->addr; 1724 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); 1725 ndev->netdev_ops = &rswitch_netdev_ops; 1726 ndev->ethtool_ops = &rswitch_ethtool_ops; 1727 1728 netif_napi_add(ndev, &rdev->napi, rswitch_poll); 1729 1730 rdev->np_port = rswitch_get_port_node(rdev); 1731 rdev->disabled = !rdev->np_port; 1732 err = of_get_ethdev_address(rdev->np_port, ndev); 1733 of_node_put(rdev->np_port); 1734 if (err) { 1735 if (is_valid_ether_addr(rdev->etha->mac_addr)) 1736 eth_hw_addr_set(ndev, rdev->etha->mac_addr); 1737 else 1738 eth_hw_addr_random(ndev); 1739 } 1740 1741 err = rswitch_etha_get_params(rdev); 1742 if (err < 0) 1743 goto out_get_params; 1744 1745 if (rdev->priv->gwca.speed < rdev->etha->speed) 1746 rdev->priv->gwca.speed = rdev->etha->speed; 1747 1748 err = rswitch_rxdmac_alloc(ndev); 1749 if (err < 0) 1750 goto out_rxdmac; 1751 1752 err = rswitch_txdmac_alloc(ndev); 1753 if (err < 0) 1754 goto out_txdmac; 1755 1756 return 0; 1757 1758 out_txdmac: 1759 rswitch_rxdmac_free(ndev); 1760 1761 out_rxdmac: 1762 out_get_params: 1763 netif_napi_del(&rdev->napi); 1764 free_netdev(ndev); 1765 1766 return err; 1767 } 1768 1769 static void rswitch_device_free(struct rswitch_private *priv, int index) 1770 { 1771 struct rswitch_device *rdev = priv->rdev[index]; 1772 struct net_device *ndev = rdev->ndev; 1773 1774 rswitch_txdmac_free(ndev); 1775 rswitch_rxdmac_free(ndev); 1776 netif_napi_del(&rdev->napi); 1777 free_netdev(ndev); 1778 } 1779 1780 static int rswitch_init(struct rswitch_private *priv) 1781 { 1782 int i, err; 1783 1784 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1785 rswitch_etha_init(priv, i); 1786 1787 rswitch_clock_enable(priv); 1788 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1789 rswitch_etha_read_mac_address(&priv->etha[i]); 1790 1791 rswitch_reset(priv); 1792 1793 rswitch_clock_enable(priv); 1794 rswitch_top_init(priv); 1795 err = rswitch_bpool_config(priv); 1796 if (err < 0) 1797 return err; 1798 1799 rswitch_coma_init(priv); 1800 1801 err = rswitch_gwca_linkfix_alloc(priv); 1802 if (err < 0) 1803 return -ENOMEM; 1804 1805 err = rswitch_gwca_ts_queue_alloc(priv); 1806 if (err < 0) 1807 goto err_ts_queue_alloc; 1808 1809 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1810 err = rswitch_device_alloc(priv, i); 1811 if (err < 0) { 1812 for (i--; i >= 0; i--) 1813 rswitch_device_free(priv, i); 1814 goto err_device_alloc; 1815 } 1816 } 1817 1818 rswitch_fwd_init(priv); 1819 1820 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4, 1821 RCAR_GEN4_PTP_CLOCK_S4); 1822 if (err < 0) 1823 goto err_ptp_register; 1824 1825 err = rswitch_gwca_request_irqs(priv); 1826 if (err < 0) 1827 goto err_gwca_request_irq; 1828 1829 err = rswitch_gwca_ts_request_irqs(priv); 1830 if (err < 0) 1831 goto err_gwca_ts_request_irq; 1832 1833 err = rswitch_gwca_hw_init(priv); 1834 if (err < 0) 1835 goto err_gwca_hw_init; 1836 1837 err = rswitch_ether_port_init_all(priv); 1838 if (err) 1839 goto err_ether_port_init_all; 1840 1841 rswitch_for_each_enabled_port(priv, i) { 1842 err = register_netdev(priv->rdev[i]->ndev); 1843 if (err) { 1844 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1845 unregister_netdev(priv->rdev[i]->ndev); 1846 goto err_register_netdev; 1847 } 1848 } 1849 1850 rswitch_for_each_enabled_port(priv, i) 1851 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", 1852 priv->rdev[i]->ndev->dev_addr); 1853 1854 return 0; 1855 1856 err_register_netdev: 1857 rswitch_ether_port_deinit_all(priv); 1858 1859 err_ether_port_init_all: 1860 rswitch_gwca_hw_deinit(priv); 1861 1862 err_gwca_hw_init: 1863 err_gwca_ts_request_irq: 1864 err_gwca_request_irq: 1865 rcar_gen4_ptp_unregister(priv->ptp_priv); 1866 1867 err_ptp_register: 1868 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1869 rswitch_device_free(priv, i); 1870 1871 err_device_alloc: 1872 rswitch_gwca_ts_queue_free(priv); 1873 1874 err_ts_queue_alloc: 1875 rswitch_gwca_linkfix_free(priv); 1876 1877 return err; 1878 } 1879 1880 static const struct soc_device_attribute rswitch_soc_no_speed_change[] = { 1881 { .soc_id = "r8a779f0", .revision = "ES1.0" }, 1882 { /* Sentinel */ } 1883 }; 1884 1885 static int renesas_eth_sw_probe(struct platform_device *pdev) 1886 { 1887 const struct soc_device_attribute *attr; 1888 struct rswitch_private *priv; 1889 struct resource *res; 1890 int ret; 1891 1892 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base"); 1893 if (!res) { 1894 dev_err(&pdev->dev, "invalid resource\n"); 1895 return -EINVAL; 1896 } 1897 1898 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 1899 if (!priv) 1900 return -ENOMEM; 1901 spin_lock_init(&priv->lock); 1902 1903 attr = soc_device_match(rswitch_soc_no_speed_change); 1904 if (attr) 1905 priv->etha_no_runtime_change = true; 1906 1907 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); 1908 if (!priv->ptp_priv) 1909 return -ENOMEM; 1910 1911 platform_set_drvdata(pdev, priv); 1912 priv->pdev = pdev; 1913 priv->addr = devm_ioremap_resource(&pdev->dev, res); 1914 if (IS_ERR(priv->addr)) 1915 return PTR_ERR(priv->addr); 1916 1917 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; 1918 1919 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 1920 if (ret < 0) { 1921 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1922 if (ret < 0) 1923 return ret; 1924 } 1925 1926 priv->gwca.index = AGENT_INDEX_GWCA; 1927 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, 1928 RSWITCH_MAX_NUM_QUEUES); 1929 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, 1930 sizeof(*priv->gwca.queues), GFP_KERNEL); 1931 if (!priv->gwca.queues) 1932 return -ENOMEM; 1933 1934 pm_runtime_enable(&pdev->dev); 1935 pm_runtime_get_sync(&pdev->dev); 1936 1937 ret = rswitch_init(priv); 1938 if (ret < 0) { 1939 pm_runtime_put(&pdev->dev); 1940 pm_runtime_disable(&pdev->dev); 1941 return ret; 1942 } 1943 1944 device_set_wakeup_capable(&pdev->dev, 1); 1945 1946 return ret; 1947 } 1948 1949 static void rswitch_deinit(struct rswitch_private *priv) 1950 { 1951 int i; 1952 1953 rswitch_gwca_hw_deinit(priv); 1954 rcar_gen4_ptp_unregister(priv->ptp_priv); 1955 1956 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 1957 struct rswitch_device *rdev = priv->rdev[i]; 1958 1959 phy_exit(priv->rdev[i]->serdes); 1960 rswitch_ether_port_deinit_one(rdev); 1961 unregister_netdev(rdev->ndev); 1962 rswitch_device_free(priv, i); 1963 } 1964 1965 rswitch_gwca_ts_queue_free(priv); 1966 rswitch_gwca_linkfix_free(priv); 1967 1968 rswitch_clock_disable(priv); 1969 } 1970 1971 static int renesas_eth_sw_remove(struct platform_device *pdev) 1972 { 1973 struct rswitch_private *priv = platform_get_drvdata(pdev); 1974 1975 rswitch_deinit(priv); 1976 1977 pm_runtime_put(&pdev->dev); 1978 pm_runtime_disable(&pdev->dev); 1979 1980 platform_set_drvdata(pdev, NULL); 1981 1982 return 0; 1983 } 1984 1985 static struct platform_driver renesas_eth_sw_driver_platform = { 1986 .probe = renesas_eth_sw_probe, 1987 .remove = renesas_eth_sw_remove, 1988 .driver = { 1989 .name = "renesas_eth_sw", 1990 .of_match_table = renesas_eth_sw_of_table, 1991 } 1992 }; 1993 module_platform_driver(renesas_eth_sw_driver_platform); 1994 MODULE_AUTHOR("Yoshihiro Shimoda"); 1995 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver"); 1996 MODULE_LICENSE("GPL"); 1997