1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2016 Broadcom 4 */ 5 6 /* 7 * Broadcom PDC Mailbox Driver 8 * The PDC provides a ring based programming interface to one or more hardware 9 * offload engines. For example, the PDC driver works with both SPU-M and SPU2 10 * cryptographic offload hardware. In some chips the PDC is referred to as MDE, 11 * and in others the FA2/FA+ hardware is used with this PDC driver. 12 * 13 * The PDC driver registers with the Linux mailbox framework as a mailbox 14 * controller, once for each PDC instance. Ring 0 for each PDC is registered as 15 * a mailbox channel. The PDC driver uses interrupts to determine when data 16 * transfers to and from an offload engine are complete. The PDC driver uses 17 * threaded IRQs so that response messages are handled outside of interrupt 18 * context. 19 * 20 * The PDC driver allows multiple messages to be pending in the descriptor 21 * rings. The tx_msg_start descriptor index indicates where the last message 22 * starts. The txin_numd value at this index indicates how many descriptor 23 * indexes make up the message. Similar state is kept on the receive side. When 24 * an rx interrupt indicates a response is ready, the PDC driver processes numd 25 * descriptors from the tx and rx ring, thus processing one response at a time. 26 */ 27 28 #include <linux/errno.h> 29 #include <linux/module.h> 30 #include <linux/init.h> 31 #include <linux/slab.h> 32 #include <linux/debugfs.h> 33 #include <linux/interrupt.h> 34 #include <linux/wait.h> 35 #include <linux/platform_device.h> 36 #include <linux/io.h> 37 #include <linux/of.h> 38 #include <linux/of_device.h> 39 #include <linux/of_address.h> 40 #include <linux/of_irq.h> 41 #include <linux/mailbox_controller.h> 42 #include <linux/mailbox/brcm-message.h> 43 #include <linux/scatterlist.h> 44 #include <linux/dma-direction.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/dmapool.h> 47 48 #define PDC_SUCCESS 0 49 50 #define RING_ENTRY_SIZE sizeof(struct dma64dd) 51 52 /* # entries in PDC dma ring */ 53 #define PDC_RING_ENTRIES 512 54 /* 55 * Minimum number of ring descriptor entries that must be free to tell mailbox 56 * framework that it can submit another request 57 */ 58 #define PDC_RING_SPACE_MIN 15 59 60 #define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE) 61 /* Rings are 8k aligned */ 62 #define RING_ALIGN_ORDER 13 63 #define RING_ALIGN BIT(RING_ALIGN_ORDER) 64 65 #define RX_BUF_ALIGN_ORDER 5 66 #define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER) 67 68 /* descriptor bumping macros */ 69 #define XXD(x, max_mask) ((x) & (max_mask)) 70 #define TXD(x, max_mask) XXD((x), (max_mask)) 71 #define RXD(x, max_mask) XXD((x), (max_mask)) 72 #define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask)) 73 #define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask)) 74 #define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask)) 75 #define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask)) 76 #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask)) 77 #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask)) 78 79 /* Length of BCM header at start of SPU msg, in bytes */ 80 #define BCM_HDR_LEN 8 81 82 /* 83 * PDC driver reserves ringset 0 on each SPU for its own use. The driver does 84 * not currently support use of multiple ringsets on a single PDC engine. 85 */ 86 #define PDC_RINGSET 0 87 88 /* 89 * Interrupt mask and status definitions. Enable interrupts for tx and rx on 90 * ring 0 91 */ 92 #define PDC_RCVINT_0 (16 + PDC_RINGSET) 93 #define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0) 94 #define PDC_INTMASK (PDC_RCVINTEN_0) 95 #define PDC_LAZY_FRAMECOUNT 1 96 #define PDC_LAZY_TIMEOUT 10000 97 #define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24)) 98 #define PDC_INTMASK_OFFSET 0x24 99 #define PDC_INTSTATUS_OFFSET 0x20 100 #define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET) 101 #define FA_RCVLAZY0_OFFSET 0x100 102 103 /* 104 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata 105 * before frame 106 */ 107 #define PDC_SPU2_RESP_HDR_LEN 17 108 #define PDC_CKSUM_CTRL BIT(27) 109 #define PDC_CKSUM_CTRL_OFFSET 0x400 110 111 #define PDC_SPUM_RESP_HDR_LEN 32 112 113 /* 114 * Sets the following bits for write to transmit control reg: 115 * 11 - PtyChkDisable - parity check is disabled 116 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory 117 */ 118 #define PDC_TX_CTL 0x000C0800 119 120 /* Bit in tx control reg to enable tx channel */ 121 #define PDC_TX_ENABLE 0x1 122 123 /* 124 * Sets the following bits for write to receive control reg: 125 * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf 126 * 9 - SepRxHdrDescEn - place start of new frames only in descriptors 127 * that have StartOfFrame set 128 * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all 129 * remaining bytes in current frame, report error 130 * in rx frame status for current frame 131 * 11 - PtyChkDisable - parity check is disabled 132 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory 133 */ 134 #define PDC_RX_CTL 0x000C0E00 135 136 /* Bit in rx control reg to enable rx channel */ 137 #define PDC_RX_ENABLE 0x1 138 139 #define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1) 140 141 /* descriptor flags */ 142 #define D64_CTRL1_EOT BIT(28) /* end of descriptor table */ 143 #define D64_CTRL1_IOC BIT(29) /* interrupt on complete */ 144 #define D64_CTRL1_EOF BIT(30) /* end of frame */ 145 #define D64_CTRL1_SOF BIT(31) /* start of frame */ 146 147 #define RX_STATUS_OVERFLOW 0x00800000 148 #define RX_STATUS_LEN 0x0000FFFF 149 150 #define PDC_TXREGS_OFFSET 0x200 151 #define PDC_RXREGS_OFFSET 0x220 152 153 /* Maximum size buffer the DMA engine can handle */ 154 #define PDC_DMA_BUF_MAX 16384 155 156 enum pdc_hw { 157 FA_HW, /* FA2/FA+ hardware (i.e. Northstar Plus) */ 158 PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */ 159 }; 160 161 struct pdc_dma_map { 162 void *ctx; /* opaque context associated with frame */ 163 }; 164 165 /* dma descriptor */ 166 struct dma64dd { 167 u32 ctrl1; /* misc control bits */ 168 u32 ctrl2; /* buffer count and address extension */ 169 u32 addrlow; /* memory address of the date buffer, bits 31:0 */ 170 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */ 171 }; 172 173 /* dma registers per channel(xmt or rcv) */ 174 struct dma64_regs { 175 u32 control; /* enable, et al */ 176 u32 ptr; /* last descriptor posted to chip */ 177 u32 addrlow; /* descriptor ring base address low 32-bits */ 178 u32 addrhigh; /* descriptor ring base address bits 63:32 */ 179 u32 status0; /* last rx descriptor written by hw */ 180 u32 status1; /* driver does not use */ 181 }; 182 183 /* cpp contortions to concatenate w/arg prescan */ 184 #ifndef PAD 185 #define _PADLINE(line) pad ## line 186 #define _XSTR(line) _PADLINE(line) 187 #define PAD _XSTR(__LINE__) 188 #endif /* PAD */ 189 190 /* dma registers. matches hw layout. */ 191 struct dma64 { 192 struct dma64_regs dmaxmt; /* dma tx */ 193 u32 PAD[2]; 194 struct dma64_regs dmarcv; /* dma rx */ 195 u32 PAD[2]; 196 }; 197 198 /* PDC registers */ 199 struct pdc_regs { 200 u32 devcontrol; /* 0x000 */ 201 u32 devstatus; /* 0x004 */ 202 u32 PAD; 203 u32 biststatus; /* 0x00c */ 204 u32 PAD[4]; 205 u32 intstatus; /* 0x020 */ 206 u32 intmask; /* 0x024 */ 207 u32 gptimer; /* 0x028 */ 208 209 u32 PAD; 210 u32 intrcvlazy_0; /* 0x030 (Only in PDC, not FA2) */ 211 u32 intrcvlazy_1; /* 0x034 (Only in PDC, not FA2) */ 212 u32 intrcvlazy_2; /* 0x038 (Only in PDC, not FA2) */ 213 u32 intrcvlazy_3; /* 0x03c (Only in PDC, not FA2) */ 214 215 u32 PAD[48]; 216 u32 fa_intrecvlazy; /* 0x100 (Only in FA2, not PDC) */ 217 u32 flowctlthresh; /* 0x104 */ 218 u32 wrrthresh; /* 0x108 */ 219 u32 gmac_idle_cnt_thresh; /* 0x10c */ 220 221 u32 PAD[4]; 222 u32 ifioaccessaddr; /* 0x120 */ 223 u32 ifioaccessbyte; /* 0x124 */ 224 u32 ifioaccessdata; /* 0x128 */ 225 226 u32 PAD[21]; 227 u32 phyaccess; /* 0x180 */ 228 u32 PAD; 229 u32 phycontrol; /* 0x188 */ 230 u32 txqctl; /* 0x18c */ 231 u32 rxqctl; /* 0x190 */ 232 u32 gpioselect; /* 0x194 */ 233 u32 gpio_output_en; /* 0x198 */ 234 u32 PAD; /* 0x19c */ 235 u32 txq_rxq_mem_ctl; /* 0x1a0 */ 236 u32 memory_ecc_status; /* 0x1a4 */ 237 u32 serdes_ctl; /* 0x1a8 */ 238 u32 serdes_status0; /* 0x1ac */ 239 u32 serdes_status1; /* 0x1b0 */ 240 u32 PAD[11]; /* 0x1b4-1dc */ 241 u32 clk_ctl_st; /* 0x1e0 */ 242 u32 hw_war; /* 0x1e4 (Only in PDC, not FA2) */ 243 u32 pwrctl; /* 0x1e8 */ 244 u32 PAD[5]; 245 246 #define PDC_NUM_DMA_RINGS 4 247 struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */ 248 249 /* more registers follow, but we don't use them */ 250 }; 251 252 /* structure for allocating/freeing DMA rings */ 253 struct pdc_ring_alloc { 254 dma_addr_t dmabase; /* DMA address of start of ring */ 255 void *vbase; /* base kernel virtual address of ring */ 256 u32 size; /* ring allocation size in bytes */ 257 }; 258 259 /* 260 * context associated with a receive descriptor. 261 * @rxp_ctx: opaque context associated with frame that starts at each 262 * rx ring index. 263 * @dst_sg: Scatterlist used to form reply frames beginning at a given ring 264 * index. Retained in order to unmap each sg after reply is processed. 265 * @rxin_numd: Number of rx descriptors associated with the message that starts 266 * at a descriptor index. Not set for every index. For example, 267 * if descriptor index i points to a scatterlist with 4 entries, 268 * then the next three descriptor indexes don't have a value set. 269 * @resp_hdr: Virtual address of buffer used to catch DMA rx status 270 * @resp_hdr_daddr: physical address of DMA rx status buffer 271 */ 272 struct pdc_rx_ctx { 273 void *rxp_ctx; 274 struct scatterlist *dst_sg; 275 u32 rxin_numd; 276 void *resp_hdr; 277 dma_addr_t resp_hdr_daddr; 278 }; 279 280 /* PDC state structure */ 281 struct pdc_state { 282 /* Index of the PDC whose state is in this structure instance */ 283 u8 pdc_idx; 284 285 /* Platform device for this PDC instance */ 286 struct platform_device *pdev; 287 288 /* 289 * Each PDC instance has a mailbox controller. PDC receives request 290 * messages through mailboxes, and sends response messages through the 291 * mailbox framework. 292 */ 293 struct mbox_controller mbc; 294 295 unsigned int pdc_irq; 296 297 /* tasklet for deferred processing after DMA rx interrupt */ 298 struct tasklet_struct rx_tasklet; 299 300 /* Number of bytes of receive status prior to each rx frame */ 301 u32 rx_status_len; 302 /* Whether a BCM header is prepended to each frame */ 303 bool use_bcm_hdr; 304 /* Sum of length of BCM header and rx status header */ 305 u32 pdc_resp_hdr_len; 306 307 /* The base virtual address of DMA hw registers */ 308 void __iomem *pdc_reg_vbase; 309 310 /* Pool for allocation of DMA rings */ 311 struct dma_pool *ring_pool; 312 313 /* Pool for allocation of metadata buffers for response messages */ 314 struct dma_pool *rx_buf_pool; 315 316 /* 317 * The base virtual address of DMA tx/rx descriptor rings. Corresponding 318 * DMA address and size of ring allocation. 319 */ 320 struct pdc_ring_alloc tx_ring_alloc; 321 struct pdc_ring_alloc rx_ring_alloc; 322 323 struct pdc_regs *regs; /* start of PDC registers */ 324 325 struct dma64_regs *txregs_64; /* dma tx engine registers */ 326 struct dma64_regs *rxregs_64; /* dma rx engine registers */ 327 328 /* 329 * Arrays of PDC_RING_ENTRIES descriptors 330 * To use multiple ringsets, this needs to be extended 331 */ 332 struct dma64dd *txd_64; /* tx descriptor ring */ 333 struct dma64dd *rxd_64; /* rx descriptor ring */ 334 335 /* descriptor ring sizes */ 336 u32 ntxd; /* # tx descriptors */ 337 u32 nrxd; /* # rx descriptors */ 338 u32 nrxpost; /* # rx buffers to keep posted */ 339 u32 ntxpost; /* max number of tx buffers that can be posted */ 340 341 /* 342 * Index of next tx descriptor to reclaim. That is, the descriptor 343 * index of the oldest tx buffer for which the host has yet to process 344 * the corresponding response. 345 */ 346 u32 txin; 347 348 /* 349 * Index of the first receive descriptor for the sequence of 350 * message fragments currently under construction. Used to build up 351 * the rxin_numd count for a message. Updated to rxout when the host 352 * starts a new sequence of rx buffers for a new message. 353 */ 354 u32 tx_msg_start; 355 356 /* Index of next tx descriptor to post. */ 357 u32 txout; 358 359 /* 360 * Number of tx descriptors associated with the message that starts 361 * at this tx descriptor index. 362 */ 363 u32 txin_numd[PDC_RING_ENTRIES]; 364 365 /* 366 * Index of next rx descriptor to reclaim. This is the index of 367 * the next descriptor whose data has yet to be processed by the host. 368 */ 369 u32 rxin; 370 371 /* 372 * Index of the first receive descriptor for the sequence of 373 * message fragments currently under construction. Used to build up 374 * the rxin_numd count for a message. Updated to rxout when the host 375 * starts a new sequence of rx buffers for a new message. 376 */ 377 u32 rx_msg_start; 378 379 /* 380 * Saved value of current hardware rx descriptor index. 381 * The last rx buffer written by the hw is the index previous to 382 * this one. 383 */ 384 u32 last_rx_curr; 385 386 /* Index of next rx descriptor to post. */ 387 u32 rxout; 388 389 struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES]; 390 391 /* 392 * Scatterlists used to form request and reply frames beginning at a 393 * given ring index. Retained in order to unmap each sg after reply 394 * is processed 395 */ 396 struct scatterlist *src_sg[PDC_RING_ENTRIES]; 397 398 struct dentry *debugfs_stats; /* debug FS stats file for this PDC */ 399 400 /* counters */ 401 u32 pdc_requests; /* number of request messages submitted */ 402 u32 pdc_replies; /* number of reply messages received */ 403 u32 last_tx_not_done; /* too few tx descriptors to indicate done */ 404 u32 tx_ring_full; /* unable to accept msg because tx ring full */ 405 u32 rx_ring_full; /* unable to accept msg because rx ring full */ 406 u32 txnobuf; /* unable to create tx descriptor */ 407 u32 rxnobuf; /* unable to create rx descriptor */ 408 u32 rx_oflow; /* count of rx overflows */ 409 410 /* hardware type - FA2 or PDC/MDE */ 411 enum pdc_hw hw_type; 412 }; 413 414 /* Global variables */ 415 416 struct pdc_globals { 417 /* Actual number of SPUs in hardware, as reported by device tree */ 418 u32 num_spu; 419 }; 420 421 static struct pdc_globals pdcg; 422 423 /* top level debug FS directory for PDC driver */ 424 static struct dentry *debugfs_dir; 425 426 static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf, 427 size_t count, loff_t *offp) 428 { 429 struct pdc_state *pdcs; 430 char *buf; 431 ssize_t ret, out_offset, out_count; 432 433 out_count = 512; 434 435 buf = kmalloc(out_count, GFP_KERNEL); 436 if (!buf) 437 return -ENOMEM; 438 439 pdcs = filp->private_data; 440 out_offset = 0; 441 out_offset += snprintf(buf + out_offset, out_count - out_offset, 442 "SPU %u stats:\n", pdcs->pdc_idx); 443 out_offset += snprintf(buf + out_offset, out_count - out_offset, 444 "PDC requests....................%u\n", 445 pdcs->pdc_requests); 446 out_offset += snprintf(buf + out_offset, out_count - out_offset, 447 "PDC responses...................%u\n", 448 pdcs->pdc_replies); 449 out_offset += snprintf(buf + out_offset, out_count - out_offset, 450 "Tx not done.....................%u\n", 451 pdcs->last_tx_not_done); 452 out_offset += snprintf(buf + out_offset, out_count - out_offset, 453 "Tx ring full....................%u\n", 454 pdcs->tx_ring_full); 455 out_offset += snprintf(buf + out_offset, out_count - out_offset, 456 "Rx ring full....................%u\n", 457 pdcs->rx_ring_full); 458 out_offset += snprintf(buf + out_offset, out_count - out_offset, 459 "Tx desc write fail. Ring full...%u\n", 460 pdcs->txnobuf); 461 out_offset += snprintf(buf + out_offset, out_count - out_offset, 462 "Rx desc write fail. Ring full...%u\n", 463 pdcs->rxnobuf); 464 out_offset += snprintf(buf + out_offset, out_count - out_offset, 465 "Receive overflow................%u\n", 466 pdcs->rx_oflow); 467 out_offset += snprintf(buf + out_offset, out_count - out_offset, 468 "Num frags in rx ring............%u\n", 469 NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, 470 pdcs->nrxpost)); 471 472 if (out_offset > out_count) 473 out_offset = out_count; 474 475 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 476 kfree(buf); 477 return ret; 478 } 479 480 static const struct file_operations pdc_debugfs_stats = { 481 .owner = THIS_MODULE, 482 .open = simple_open, 483 .read = pdc_debugfs_read, 484 }; 485 486 /** 487 * pdc_setup_debugfs() - Create the debug FS directories. If the top-level 488 * directory has not yet been created, create it now. Create a stats file in 489 * this directory for a SPU. 490 * @pdcs: PDC state structure 491 */ 492 static void pdc_setup_debugfs(struct pdc_state *pdcs) 493 { 494 char spu_stats_name[16]; 495 496 if (!debugfs_initialized()) 497 return; 498 499 snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx); 500 if (!debugfs_dir) 501 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 502 503 /* S_IRUSR == 0400 */ 504 pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, 0400, 505 debugfs_dir, pdcs, 506 &pdc_debugfs_stats); 507 } 508 509 static void pdc_free_debugfs(void) 510 { 511 debugfs_remove_recursive(debugfs_dir); 512 debugfs_dir = NULL; 513 } 514 515 /** 516 * pdc_build_rxd() - Build DMA descriptor to receive SPU result. 517 * @pdcs: PDC state for SPU that will generate result 518 * @dma_addr: DMA address of buffer that descriptor is being built for 519 * @buf_len: Length of the receive buffer, in bytes 520 * @flags: Flags to be stored in descriptor 521 */ 522 static inline void 523 pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr, 524 u32 buf_len, u32 flags) 525 { 526 struct device *dev = &pdcs->pdev->dev; 527 struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout]; 528 529 dev_dbg(dev, 530 "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n", 531 pdcs->pdc_idx, pdcs->rxout, buf_len, flags); 532 533 rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr)); 534 rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr)); 535 rxd->ctrl1 = cpu_to_le32(flags); 536 rxd->ctrl2 = cpu_to_le32(buf_len); 537 538 /* bump ring index and return */ 539 pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost); 540 } 541 542 /** 543 * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to 544 * hardware. 545 * @pdcs: PDC state for the SPU that will process this request 546 * @dma_addr: DMA address of packet to be transmitted 547 * @buf_len: Length of tx buffer, in bytes 548 * @flags: Flags to be stored in descriptor 549 */ 550 static inline void 551 pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len, 552 u32 flags) 553 { 554 struct device *dev = &pdcs->pdev->dev; 555 struct dma64dd *txd = &pdcs->txd_64[pdcs->txout]; 556 557 dev_dbg(dev, 558 "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n", 559 pdcs->pdc_idx, pdcs->txout, buf_len, flags); 560 561 txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr)); 562 txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr)); 563 txd->ctrl1 = cpu_to_le32(flags); 564 txd->ctrl2 = cpu_to_le32(buf_len); 565 566 /* bump ring index and return */ 567 pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost); 568 } 569 570 /** 571 * pdc_receive_one() - Receive a response message from a given SPU. 572 * @pdcs: PDC state for the SPU to receive from 573 * 574 * When the return code indicates success, the response message is available in 575 * the receive buffers provided prior to submission of the request. 576 * 577 * Return: PDC_SUCCESS if one or more receive descriptors was processed 578 * -EAGAIN indicates that no response message is available 579 * -EIO an error occurred 580 */ 581 static int 582 pdc_receive_one(struct pdc_state *pdcs) 583 { 584 struct device *dev = &pdcs->pdev->dev; 585 struct mbox_controller *mbc; 586 struct mbox_chan *chan; 587 struct brcm_message mssg; 588 u32 len, rx_status; 589 u32 num_frags; 590 u8 *resp_hdr; /* virtual addr of start of resp message DMA header */ 591 u32 frags_rdy; /* number of fragments ready to read */ 592 u32 rx_idx; /* ring index of start of receive frame */ 593 dma_addr_t resp_hdr_daddr; 594 struct pdc_rx_ctx *rx_ctx; 595 596 mbc = &pdcs->mbc; 597 chan = &mbc->chans[0]; 598 mssg.type = BRCM_MESSAGE_SPU; 599 600 /* 601 * return if a complete response message is not yet ready. 602 * rxin_numd[rxin] is the number of fragments in the next msg 603 * to read. 604 */ 605 frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost); 606 if ((frags_rdy == 0) || 607 (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd)) 608 /* No response ready */ 609 return -EAGAIN; 610 611 num_frags = pdcs->txin_numd[pdcs->txin]; 612 WARN_ON(num_frags == 0); 613 614 dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin], 615 sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE); 616 617 pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost; 618 619 dev_dbg(dev, "PDC %u reclaimed %d tx descriptors", 620 pdcs->pdc_idx, num_frags); 621 622 rx_idx = pdcs->rxin; 623 rx_ctx = &pdcs->rx_ctx[rx_idx]; 624 num_frags = rx_ctx->rxin_numd; 625 /* Return opaque context with result */ 626 mssg.ctx = rx_ctx->rxp_ctx; 627 rx_ctx->rxp_ctx = NULL; 628 resp_hdr = rx_ctx->resp_hdr; 629 resp_hdr_daddr = rx_ctx->resp_hdr_daddr; 630 dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg), 631 DMA_FROM_DEVICE); 632 633 pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost; 634 635 dev_dbg(dev, "PDC %u reclaimed %d rx descriptors", 636 pdcs->pdc_idx, num_frags); 637 638 dev_dbg(dev, 639 "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n", 640 pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin, 641 pdcs->rxout, pdcs->last_rx_curr); 642 643 if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) { 644 /* 645 * For SPU-M, get length of response msg and rx overflow status. 646 */ 647 rx_status = *((u32 *)resp_hdr); 648 len = rx_status & RX_STATUS_LEN; 649 dev_dbg(dev, 650 "SPU response length %u bytes", len); 651 if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) { 652 if (rx_status & RX_STATUS_OVERFLOW) { 653 dev_err_ratelimited(dev, 654 "crypto receive overflow"); 655 pdcs->rx_oflow++; 656 } else { 657 dev_info_ratelimited(dev, "crypto rx len = 0"); 658 } 659 return -EIO; 660 } 661 } 662 663 dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr); 664 665 mbox_chan_received_data(chan, &mssg); 666 667 pdcs->pdc_replies++; 668 return PDC_SUCCESS; 669 } 670 671 /** 672 * pdc_receive() - Process as many responses as are available in the rx ring. 673 * @pdcs: PDC state 674 * 675 * Called within the hard IRQ. 676 * Return: 677 */ 678 static int 679 pdc_receive(struct pdc_state *pdcs) 680 { 681 int rx_status; 682 683 /* read last_rx_curr from register once */ 684 pdcs->last_rx_curr = 685 (ioread32(&pdcs->rxregs_64->status0) & 686 CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; 687 688 do { 689 /* Could be many frames ready */ 690 rx_status = pdc_receive_one(pdcs); 691 } while (rx_status == PDC_SUCCESS); 692 693 return 0; 694 } 695 696 /** 697 * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit 698 * descriptors for a given SPU. The scatterlist buffers contain the data for a 699 * SPU request message. 700 * @spu_idx: The index of the SPU to submit the request to, [0, max_spu) 701 * @sg: Scatterlist whose buffers contain part of the SPU request 702 * 703 * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors 704 * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length. 705 * 706 * Return: PDC_SUCCESS if successful 707 * < 0 otherwise 708 */ 709 static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) 710 { 711 u32 flags = 0; 712 u32 eot; 713 u32 tx_avail; 714 715 /* 716 * Num descriptors needed. Conservatively assume we need a descriptor 717 * for every entry in sg. 718 */ 719 u32 num_desc; 720 u32 desc_w = 0; /* Number of tx descriptors written */ 721 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */ 722 dma_addr_t databufptr; /* DMA address to put in descriptor */ 723 724 num_desc = (u32)sg_nents(sg); 725 726 /* check whether enough tx descriptors are available */ 727 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout, 728 pdcs->ntxpost); 729 if (unlikely(num_desc > tx_avail)) { 730 pdcs->txnobuf++; 731 return -ENOSPC; 732 } 733 734 /* build tx descriptors */ 735 if (pdcs->tx_msg_start == pdcs->txout) { 736 /* Start of frame */ 737 pdcs->txin_numd[pdcs->tx_msg_start] = 0; 738 pdcs->src_sg[pdcs->txout] = sg; 739 flags = D64_CTRL1_SOF; 740 } 741 742 while (sg) { 743 if (unlikely(pdcs->txout == (pdcs->ntxd - 1))) 744 eot = D64_CTRL1_EOT; 745 else 746 eot = 0; 747 748 /* 749 * If sg buffer larger than PDC limit, split across 750 * multiple descriptors 751 */ 752 bufcnt = sg_dma_len(sg); 753 databufptr = sg_dma_address(sg); 754 while (bufcnt > PDC_DMA_BUF_MAX) { 755 pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX, 756 flags | eot); 757 desc_w++; 758 bufcnt -= PDC_DMA_BUF_MAX; 759 databufptr += PDC_DMA_BUF_MAX; 760 if (unlikely(pdcs->txout == (pdcs->ntxd - 1))) 761 eot = D64_CTRL1_EOT; 762 else 763 eot = 0; 764 } 765 sg = sg_next(sg); 766 if (!sg) 767 /* Writing last descriptor for frame */ 768 flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC); 769 pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot); 770 desc_w++; 771 /* Clear start of frame after first descriptor */ 772 flags &= ~D64_CTRL1_SOF; 773 } 774 pdcs->txin_numd[pdcs->tx_msg_start] += desc_w; 775 776 return PDC_SUCCESS; 777 } 778 779 /** 780 * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx 781 * ring. 782 * @pdcs: PDC state for SPU to process the request 783 * 784 * Sets the index of the last descriptor written in both the rx and tx ring. 785 * 786 * Return: PDC_SUCCESS 787 */ 788 static int pdc_tx_list_final(struct pdc_state *pdcs) 789 { 790 /* 791 * write barrier to ensure all register writes are complete 792 * before chip starts to process new request 793 */ 794 wmb(); 795 iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr); 796 iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr); 797 pdcs->pdc_requests++; 798 799 return PDC_SUCCESS; 800 } 801 802 /** 803 * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC. 804 * @pdcs: PDC state for SPU handling request 805 * @dst_sg: scatterlist providing rx buffers for response to be returned to 806 * mailbox client 807 * @ctx: Opaque context for this request 808 * 809 * Posts a single receive descriptor to hold the metadata that precedes a 810 * response. For example, with SPU-M, the metadata is a 32-byte DMA header and 811 * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and 812 * rx to indicate the start of a new message. 813 * 814 * Return: PDC_SUCCESS if successful 815 * < 0 if an error (e.g., rx ring is full) 816 */ 817 static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg, 818 void *ctx) 819 { 820 u32 flags = 0; 821 u32 rx_avail; 822 u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */ 823 dma_addr_t daddr; 824 void *vaddr; 825 struct pdc_rx_ctx *rx_ctx; 826 827 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, 828 pdcs->nrxpost); 829 if (unlikely(rx_pkt_cnt > rx_avail)) { 830 pdcs->rxnobuf++; 831 return -ENOSPC; 832 } 833 834 /* allocate a buffer for the dma rx status */ 835 vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr); 836 if (unlikely(!vaddr)) 837 return -ENOMEM; 838 839 /* 840 * Update msg_start indexes for both tx and rx to indicate the start 841 * of a new sequence of descriptor indexes that contain the fragments 842 * of the same message. 843 */ 844 pdcs->rx_msg_start = pdcs->rxout; 845 pdcs->tx_msg_start = pdcs->txout; 846 847 /* This is always the first descriptor in the receive sequence */ 848 flags = D64_CTRL1_SOF; 849 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1; 850 851 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) 852 flags |= D64_CTRL1_EOT; 853 854 rx_ctx = &pdcs->rx_ctx[pdcs->rxout]; 855 rx_ctx->rxp_ctx = ctx; 856 rx_ctx->dst_sg = dst_sg; 857 rx_ctx->resp_hdr = vaddr; 858 rx_ctx->resp_hdr_daddr = daddr; 859 pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags); 860 return PDC_SUCCESS; 861 } 862 863 /** 864 * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive 865 * descriptors for a given SPU. The caller must have already DMA mapped the 866 * scatterlist. 867 * @spu_idx: Indicates which SPU the buffers are for 868 * @sg: Scatterlist whose buffers are added to the receive ring 869 * 870 * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX, 871 * multiple receive descriptors are written, each with a buffer <= 872 * PDC_DMA_BUF_MAX. 873 * 874 * Return: PDC_SUCCESS if successful 875 * < 0 otherwise (e.g., receive ring is full) 876 */ 877 static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) 878 { 879 u32 flags = 0; 880 u32 rx_avail; 881 882 /* 883 * Num descriptors needed. Conservatively assume we need a descriptor 884 * for every entry from our starting point in the scatterlist. 885 */ 886 u32 num_desc; 887 u32 desc_w = 0; /* Number of tx descriptors written */ 888 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */ 889 dma_addr_t databufptr; /* DMA address to put in descriptor */ 890 891 num_desc = (u32)sg_nents(sg); 892 893 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, 894 pdcs->nrxpost); 895 if (unlikely(num_desc > rx_avail)) { 896 pdcs->rxnobuf++; 897 return -ENOSPC; 898 } 899 900 while (sg) { 901 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) 902 flags = D64_CTRL1_EOT; 903 else 904 flags = 0; 905 906 /* 907 * If sg buffer larger than PDC limit, split across 908 * multiple descriptors 909 */ 910 bufcnt = sg_dma_len(sg); 911 databufptr = sg_dma_address(sg); 912 while (bufcnt > PDC_DMA_BUF_MAX) { 913 pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags); 914 desc_w++; 915 bufcnt -= PDC_DMA_BUF_MAX; 916 databufptr += PDC_DMA_BUF_MAX; 917 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) 918 flags = D64_CTRL1_EOT; 919 else 920 flags = 0; 921 } 922 pdc_build_rxd(pdcs, databufptr, bufcnt, flags); 923 desc_w++; 924 sg = sg_next(sg); 925 } 926 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w; 927 928 return PDC_SUCCESS; 929 } 930 931 /** 932 * pdc_irq_handler() - Interrupt handler called in interrupt context. 933 * @irq: Interrupt number that has fired 934 * @data: device struct for DMA engine that generated the interrupt 935 * 936 * We have to clear the device interrupt status flags here. So cache the 937 * status for later use in the thread function. Other than that, just return 938 * WAKE_THREAD to invoke the thread function. 939 * 940 * Return: IRQ_WAKE_THREAD if interrupt is ours 941 * IRQ_NONE otherwise 942 */ 943 static irqreturn_t pdc_irq_handler(int irq, void *data) 944 { 945 struct device *dev = (struct device *)data; 946 struct pdc_state *pdcs = dev_get_drvdata(dev); 947 u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); 948 949 if (unlikely(intstatus == 0)) 950 return IRQ_NONE; 951 952 /* Disable interrupts until soft handler runs */ 953 iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET); 954 955 /* Clear interrupt flags in device */ 956 iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); 957 958 /* Wakeup IRQ thread */ 959 tasklet_schedule(&pdcs->rx_tasklet); 960 return IRQ_HANDLED; 961 } 962 963 /** 964 * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after 965 * a DMA receive interrupt. Reenables the receive interrupt. 966 * @data: PDC state structure 967 */ 968 static void pdc_tasklet_cb(unsigned long data) 969 { 970 struct pdc_state *pdcs = (struct pdc_state *)data; 971 972 pdc_receive(pdcs); 973 974 /* reenable interrupts */ 975 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET); 976 } 977 978 /** 979 * pdc_ring_init() - Allocate DMA rings and initialize constant fields of 980 * descriptors in one ringset. 981 * @pdcs: PDC instance state 982 * @ringset: index of ringset being used 983 * 984 * Return: PDC_SUCCESS if ring initialized 985 * < 0 otherwise 986 */ 987 static int pdc_ring_init(struct pdc_state *pdcs, int ringset) 988 { 989 int i; 990 int err = PDC_SUCCESS; 991 struct dma64 *dma_reg; 992 struct device *dev = &pdcs->pdev->dev; 993 struct pdc_ring_alloc tx; 994 struct pdc_ring_alloc rx; 995 996 /* Allocate tx ring */ 997 tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase); 998 if (unlikely(!tx.vbase)) { 999 err = -ENOMEM; 1000 goto done; 1001 } 1002 1003 /* Allocate rx ring */ 1004 rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase); 1005 if (unlikely(!rx.vbase)) { 1006 err = -ENOMEM; 1007 goto fail_dealloc; 1008 } 1009 1010 dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase); 1011 dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase); 1012 dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase); 1013 dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase); 1014 1015 memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx)); 1016 memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx)); 1017 1018 pdcs->rxin = 0; 1019 pdcs->rx_msg_start = 0; 1020 pdcs->last_rx_curr = 0; 1021 pdcs->rxout = 0; 1022 pdcs->txin = 0; 1023 pdcs->tx_msg_start = 0; 1024 pdcs->txout = 0; 1025 1026 /* Set descriptor array base addresses */ 1027 pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase; 1028 pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase; 1029 1030 /* Tell device the base DMA address of each ring */ 1031 dma_reg = &pdcs->regs->dmaregs[ringset]; 1032 1033 /* But first disable DMA and set curptr to 0 for both TX & RX */ 1034 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control); 1035 iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)), 1036 &dma_reg->dmarcv.control); 1037 iowrite32(0, &dma_reg->dmaxmt.ptr); 1038 iowrite32(0, &dma_reg->dmarcv.ptr); 1039 1040 /* Set base DMA addresses */ 1041 iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase), 1042 &dma_reg->dmaxmt.addrlow); 1043 iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase), 1044 &dma_reg->dmaxmt.addrhigh); 1045 1046 iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase), 1047 &dma_reg->dmarcv.addrlow); 1048 iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase), 1049 &dma_reg->dmarcv.addrhigh); 1050 1051 /* Re-enable DMA */ 1052 iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control); 1053 iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)), 1054 &dma_reg->dmarcv.control); 1055 1056 /* Initialize descriptors */ 1057 for (i = 0; i < PDC_RING_ENTRIES; i++) { 1058 /* Every tx descriptor can be used for start of frame. */ 1059 if (i != pdcs->ntxpost) { 1060 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF, 1061 &pdcs->txd_64[i].ctrl1); 1062 } else { 1063 /* Last descriptor in ringset. Set End of Table. */ 1064 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF | 1065 D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1); 1066 } 1067 1068 /* Every rx descriptor can be used for start of frame */ 1069 if (i != pdcs->nrxpost) { 1070 iowrite32(D64_CTRL1_SOF, 1071 &pdcs->rxd_64[i].ctrl1); 1072 } else { 1073 /* Last descriptor in ringset. Set End of Table. */ 1074 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT, 1075 &pdcs->rxd_64[i].ctrl1); 1076 } 1077 } 1078 return PDC_SUCCESS; 1079 1080 fail_dealloc: 1081 dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase); 1082 done: 1083 return err; 1084 } 1085 1086 static void pdc_ring_free(struct pdc_state *pdcs) 1087 { 1088 if (pdcs->tx_ring_alloc.vbase) { 1089 dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase, 1090 pdcs->tx_ring_alloc.dmabase); 1091 pdcs->tx_ring_alloc.vbase = NULL; 1092 } 1093 1094 if (pdcs->rx_ring_alloc.vbase) { 1095 dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase, 1096 pdcs->rx_ring_alloc.dmabase); 1097 pdcs->rx_ring_alloc.vbase = NULL; 1098 } 1099 } 1100 1101 /** 1102 * pdc_desc_count() - Count the number of DMA descriptors that will be required 1103 * for a given scatterlist. Account for the max length of a DMA buffer. 1104 * @sg: Scatterlist to be DMA'd 1105 * Return: Number of descriptors required 1106 */ 1107 static u32 pdc_desc_count(struct scatterlist *sg) 1108 { 1109 u32 cnt = 0; 1110 1111 while (sg) { 1112 cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1); 1113 sg = sg_next(sg); 1114 } 1115 return cnt; 1116 } 1117 1118 /** 1119 * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors 1120 * and the rx ring has room for rx_cnt descriptors. 1121 * @pdcs: PDC state 1122 * @tx_cnt: The number of descriptors required in the tx ring 1123 * @rx_cnt: The number of descriptors required i the rx ring 1124 * 1125 * Return: true if one of the rings does not have enough space 1126 * false if sufficient space is available in both rings 1127 */ 1128 static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt) 1129 { 1130 u32 rx_avail; 1131 u32 tx_avail; 1132 bool full = false; 1133 1134 /* Check if the tx and rx rings are likely to have enough space */ 1135 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, 1136 pdcs->nrxpost); 1137 if (unlikely(rx_cnt > rx_avail)) { 1138 pdcs->rx_ring_full++; 1139 full = true; 1140 } 1141 1142 if (likely(!full)) { 1143 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout, 1144 pdcs->ntxpost); 1145 if (unlikely(tx_cnt > tx_avail)) { 1146 pdcs->tx_ring_full++; 1147 full = true; 1148 } 1149 } 1150 return full; 1151 } 1152 1153 /** 1154 * pdc_last_tx_done() - If both the tx and rx rings have at least 1155 * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox 1156 * framework can submit another message. 1157 * @chan: mailbox channel to check 1158 * Return: true if PDC can accept another message on this channel 1159 */ 1160 static bool pdc_last_tx_done(struct mbox_chan *chan) 1161 { 1162 struct pdc_state *pdcs = chan->con_priv; 1163 bool ret; 1164 1165 if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN, 1166 PDC_RING_SPACE_MIN))) { 1167 pdcs->last_tx_not_done++; 1168 ret = false; 1169 } else { 1170 ret = true; 1171 } 1172 return ret; 1173 } 1174 1175 /** 1176 * pdc_send_data() - mailbox send_data function 1177 * @chan: The mailbox channel on which the data is sent. The channel 1178 * corresponds to a DMA ringset. 1179 * @data: The mailbox message to be sent. The message must be a 1180 * brcm_message structure. 1181 * 1182 * This function is registered as the send_data function for the mailbox 1183 * controller. From the destination scatterlist in the mailbox message, it 1184 * creates a sequence of receive descriptors in the rx ring. From the source 1185 * scatterlist, it creates a sequence of transmit descriptors in the tx ring. 1186 * After creating the descriptors, it writes the rx ptr and tx ptr registers to 1187 * initiate the DMA transfer. 1188 * 1189 * This function does the DMA map and unmap of the src and dst scatterlists in 1190 * the mailbox message. 1191 * 1192 * Return: 0 if successful 1193 * -ENOTSUPP if the mailbox message is a type this driver does not 1194 * support 1195 * < 0 if an error 1196 */ 1197 static int pdc_send_data(struct mbox_chan *chan, void *data) 1198 { 1199 struct pdc_state *pdcs = chan->con_priv; 1200 struct device *dev = &pdcs->pdev->dev; 1201 struct brcm_message *mssg = data; 1202 int err = PDC_SUCCESS; 1203 int src_nent; 1204 int dst_nent; 1205 int nent; 1206 u32 tx_desc_req; 1207 u32 rx_desc_req; 1208 1209 if (unlikely(mssg->type != BRCM_MESSAGE_SPU)) 1210 return -ENOTSUPP; 1211 1212 src_nent = sg_nents(mssg->spu.src); 1213 if (likely(src_nent)) { 1214 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE); 1215 if (unlikely(nent == 0)) 1216 return -EIO; 1217 } 1218 1219 dst_nent = sg_nents(mssg->spu.dst); 1220 if (likely(dst_nent)) { 1221 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent, 1222 DMA_FROM_DEVICE); 1223 if (unlikely(nent == 0)) { 1224 dma_unmap_sg(dev, mssg->spu.src, src_nent, 1225 DMA_TO_DEVICE); 1226 return -EIO; 1227 } 1228 } 1229 1230 /* 1231 * Check if the tx and rx rings have enough space. Do this prior to 1232 * writing any tx or rx descriptors. Need to ensure that we do not write 1233 * a partial set of descriptors, or write just rx descriptors but 1234 * corresponding tx descriptors don't fit. Note that we want this check 1235 * and the entire sequence of descriptor to happen without another 1236 * thread getting in. The channel spin lock in the mailbox framework 1237 * ensures this. 1238 */ 1239 tx_desc_req = pdc_desc_count(mssg->spu.src); 1240 rx_desc_req = pdc_desc_count(mssg->spu.dst); 1241 if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1))) 1242 return -ENOSPC; 1243 1244 /* Create rx descriptors to SPU catch response */ 1245 err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx); 1246 err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst); 1247 1248 /* Create tx descriptors to submit SPU request */ 1249 err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src); 1250 err |= pdc_tx_list_final(pdcs); /* initiate transfer */ 1251 1252 if (unlikely(err)) 1253 dev_err(&pdcs->pdev->dev, 1254 "%s failed with error %d", __func__, err); 1255 1256 return err; 1257 } 1258 1259 static int pdc_startup(struct mbox_chan *chan) 1260 { 1261 return pdc_ring_init(chan->con_priv, PDC_RINGSET); 1262 } 1263 1264 static void pdc_shutdown(struct mbox_chan *chan) 1265 { 1266 struct pdc_state *pdcs = chan->con_priv; 1267 1268 if (!pdcs) 1269 return; 1270 1271 dev_dbg(&pdcs->pdev->dev, 1272 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx); 1273 pdc_ring_free(pdcs); 1274 } 1275 1276 /** 1277 * pdc_hw_init() - Use the given initialization parameters to initialize the 1278 * state for one of the PDCs. 1279 * @pdcs: state of the PDC 1280 */ 1281 static 1282 void pdc_hw_init(struct pdc_state *pdcs) 1283 { 1284 struct platform_device *pdev; 1285 struct device *dev; 1286 struct dma64 *dma_reg; 1287 int ringset = PDC_RINGSET; 1288 1289 pdev = pdcs->pdev; 1290 dev = &pdev->dev; 1291 1292 dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx); 1293 dev_dbg(dev, "state structure: %p", 1294 pdcs); 1295 dev_dbg(dev, " - base virtual addr of hw regs %p", 1296 pdcs->pdc_reg_vbase); 1297 1298 /* initialize data structures */ 1299 pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase; 1300 pdcs->txregs_64 = (struct dma64_regs *) 1301 (((u8 *)pdcs->pdc_reg_vbase) + 1302 PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset)); 1303 pdcs->rxregs_64 = (struct dma64_regs *) 1304 (((u8 *)pdcs->pdc_reg_vbase) + 1305 PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset)); 1306 1307 pdcs->ntxd = PDC_RING_ENTRIES; 1308 pdcs->nrxd = PDC_RING_ENTRIES; 1309 pdcs->ntxpost = PDC_RING_ENTRIES - 1; 1310 pdcs->nrxpost = PDC_RING_ENTRIES - 1; 1311 iowrite32(0, &pdcs->regs->intmask); 1312 1313 dma_reg = &pdcs->regs->dmaregs[ringset]; 1314 1315 /* Configure DMA but will enable later in pdc_ring_init() */ 1316 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control); 1317 1318 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1), 1319 &dma_reg->dmarcv.control); 1320 1321 /* Reset current index pointers after making sure DMA is disabled */ 1322 iowrite32(0, &dma_reg->dmaxmt.ptr); 1323 iowrite32(0, &dma_reg->dmarcv.ptr); 1324 1325 if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN) 1326 iowrite32(PDC_CKSUM_CTRL, 1327 pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET); 1328 } 1329 1330 /** 1331 * pdc_hw_disable() - Disable the tx and rx control in the hw. 1332 * @pdcs: PDC state structure 1333 * 1334 */ 1335 static void pdc_hw_disable(struct pdc_state *pdcs) 1336 { 1337 struct dma64 *dma_reg; 1338 1339 dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET]; 1340 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control); 1341 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1), 1342 &dma_reg->dmarcv.control); 1343 } 1344 1345 /** 1346 * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata 1347 * header returned with each response message. 1348 * @pdcs: PDC state structure 1349 * 1350 * The metadata is not returned to the mailbox client. So the PDC driver 1351 * manages these buffers. 1352 * 1353 * Return: PDC_SUCCESS 1354 * -ENOMEM if pool creation fails 1355 */ 1356 static int pdc_rx_buf_pool_create(struct pdc_state *pdcs) 1357 { 1358 struct platform_device *pdev; 1359 struct device *dev; 1360 1361 pdev = pdcs->pdev; 1362 dev = &pdev->dev; 1363 1364 pdcs->pdc_resp_hdr_len = pdcs->rx_status_len; 1365 if (pdcs->use_bcm_hdr) 1366 pdcs->pdc_resp_hdr_len += BCM_HDR_LEN; 1367 1368 pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev, 1369 pdcs->pdc_resp_hdr_len, 1370 RX_BUF_ALIGN, 0); 1371 if (!pdcs->rx_buf_pool) 1372 return -ENOMEM; 1373 1374 return PDC_SUCCESS; 1375 } 1376 1377 /** 1378 * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and 1379 * specify a threaded IRQ handler for deferred handling of interrupts outside of 1380 * interrupt context. 1381 * @pdcs: PDC state 1382 * 1383 * Set the interrupt mask for transmit and receive done. 1384 * Set the lazy interrupt frame count to generate an interrupt for just one pkt. 1385 * 1386 * Return: PDC_SUCCESS 1387 * <0 if threaded irq request fails 1388 */ 1389 static int pdc_interrupts_init(struct pdc_state *pdcs) 1390 { 1391 struct platform_device *pdev = pdcs->pdev; 1392 struct device *dev = &pdev->dev; 1393 struct device_node *dn = pdev->dev.of_node; 1394 int err; 1395 1396 /* interrupt configuration */ 1397 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET); 1398 1399 if (pdcs->hw_type == FA_HW) 1400 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + 1401 FA_RCVLAZY0_OFFSET); 1402 else 1403 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + 1404 PDC_RCVLAZY0_OFFSET); 1405 1406 /* read irq from device tree */ 1407 pdcs->pdc_irq = irq_of_parse_and_map(dn, 0); 1408 dev_dbg(dev, "pdc device %s irq %u for pdcs %p", 1409 dev_name(dev), pdcs->pdc_irq, pdcs); 1410 1411 err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0, 1412 dev_name(dev), dev); 1413 if (err) { 1414 dev_err(dev, "IRQ %u request failed with err %d\n", 1415 pdcs->pdc_irq, err); 1416 return err; 1417 } 1418 return PDC_SUCCESS; 1419 } 1420 1421 static const struct mbox_chan_ops pdc_mbox_chan_ops = { 1422 .send_data = pdc_send_data, 1423 .last_tx_done = pdc_last_tx_done, 1424 .startup = pdc_startup, 1425 .shutdown = pdc_shutdown 1426 }; 1427 1428 /** 1429 * pdc_mb_init() - Initialize the mailbox controller. 1430 * @pdcs: PDC state 1431 * 1432 * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel 1433 * driver only uses one ringset and thus one mb channel. PDC uses the transmit 1434 * complete interrupt to determine when a mailbox message has successfully been 1435 * transmitted. 1436 * 1437 * Return: 0 on success 1438 * < 0 if there is an allocation or registration failure 1439 */ 1440 static int pdc_mb_init(struct pdc_state *pdcs) 1441 { 1442 struct device *dev = &pdcs->pdev->dev; 1443 struct mbox_controller *mbc; 1444 int chan_index; 1445 int err; 1446 1447 mbc = &pdcs->mbc; 1448 mbc->dev = dev; 1449 mbc->ops = &pdc_mbox_chan_ops; 1450 mbc->num_chans = 1; 1451 mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans), 1452 GFP_KERNEL); 1453 if (!mbc->chans) 1454 return -ENOMEM; 1455 1456 mbc->txdone_irq = false; 1457 mbc->txdone_poll = true; 1458 mbc->txpoll_period = 1; 1459 for (chan_index = 0; chan_index < mbc->num_chans; chan_index++) 1460 mbc->chans[chan_index].con_priv = pdcs; 1461 1462 /* Register mailbox controller */ 1463 err = devm_mbox_controller_register(dev, mbc); 1464 if (err) { 1465 dev_crit(dev, 1466 "Failed to register PDC mailbox controller. Error %d.", 1467 err); 1468 return err; 1469 } 1470 return 0; 1471 } 1472 1473 /* Device tree API */ 1474 static const int pdc_hw = PDC_HW; 1475 static const int fa_hw = FA_HW; 1476 1477 static const struct of_device_id pdc_mbox_of_match[] = { 1478 {.compatible = "brcm,iproc-pdc-mbox", .data = &pdc_hw}, 1479 {.compatible = "brcm,iproc-fa2-mbox", .data = &fa_hw}, 1480 { /* sentinel */ } 1481 }; 1482 MODULE_DEVICE_TABLE(of, pdc_mbox_of_match); 1483 1484 /** 1485 * pdc_dt_read() - Read application-specific data from device tree. 1486 * @pdev: Platform device 1487 * @pdcs: PDC state 1488 * 1489 * Reads the number of bytes of receive status that precede each received frame. 1490 * Reads whether transmit and received frames should be preceded by an 8-byte 1491 * BCM header. 1492 * 1493 * Return: 0 if successful 1494 * -ENODEV if device not available 1495 */ 1496 static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs) 1497 { 1498 struct device *dev = &pdev->dev; 1499 struct device_node *dn = pdev->dev.of_node; 1500 const struct of_device_id *match; 1501 const int *hw_type; 1502 int err; 1503 1504 err = of_property_read_u32(dn, "brcm,rx-status-len", 1505 &pdcs->rx_status_len); 1506 if (err < 0) 1507 dev_err(dev, 1508 "%s failed to get DMA receive status length from device tree", 1509 __func__); 1510 1511 pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr"); 1512 1513 pdcs->hw_type = PDC_HW; 1514 1515 match = of_match_device(of_match_ptr(pdc_mbox_of_match), dev); 1516 if (match != NULL) { 1517 hw_type = match->data; 1518 pdcs->hw_type = *hw_type; 1519 } 1520 1521 return 0; 1522 } 1523 1524 /** 1525 * pdc_probe() - Probe function for PDC driver. 1526 * @pdev: PDC platform device 1527 * 1528 * Reserve and map register regions defined in device tree. 1529 * Allocate and initialize tx and rx DMA rings. 1530 * Initialize a mailbox controller for each PDC. 1531 * 1532 * Return: 0 if successful 1533 * < 0 if an error 1534 */ 1535 static int pdc_probe(struct platform_device *pdev) 1536 { 1537 int err = 0; 1538 struct device *dev = &pdev->dev; 1539 struct resource *pdc_regs; 1540 struct pdc_state *pdcs; 1541 1542 /* PDC state for one SPU */ 1543 pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL); 1544 if (!pdcs) { 1545 err = -ENOMEM; 1546 goto cleanup; 1547 } 1548 1549 pdcs->pdev = pdev; 1550 platform_set_drvdata(pdev, pdcs); 1551 pdcs->pdc_idx = pdcg.num_spu; 1552 pdcg.num_spu++; 1553 1554 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39)); 1555 if (err) { 1556 dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err); 1557 goto cleanup; 1558 } 1559 1560 /* Create DMA pool for tx ring */ 1561 pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE, 1562 RING_ALIGN, 0); 1563 if (!pdcs->ring_pool) { 1564 err = -ENOMEM; 1565 goto cleanup; 1566 } 1567 1568 err = pdc_dt_read(pdev, pdcs); 1569 if (err) 1570 goto cleanup_ring_pool; 1571 1572 pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1573 if (!pdc_regs) { 1574 err = -ENODEV; 1575 goto cleanup_ring_pool; 1576 } 1577 dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa", 1578 &pdc_regs->start, &pdc_regs->end); 1579 1580 pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs); 1581 if (IS_ERR(pdcs->pdc_reg_vbase)) { 1582 err = PTR_ERR(pdcs->pdc_reg_vbase); 1583 dev_err(&pdev->dev, "Failed to map registers: %d\n", err); 1584 goto cleanup_ring_pool; 1585 } 1586 1587 /* create rx buffer pool after dt read to know how big buffers are */ 1588 err = pdc_rx_buf_pool_create(pdcs); 1589 if (err) 1590 goto cleanup_ring_pool; 1591 1592 pdc_hw_init(pdcs); 1593 1594 /* Init tasklet for deferred DMA rx processing */ 1595 tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long)pdcs); 1596 1597 err = pdc_interrupts_init(pdcs); 1598 if (err) 1599 goto cleanup_buf_pool; 1600 1601 /* Initialize mailbox controller */ 1602 err = pdc_mb_init(pdcs); 1603 if (err) 1604 goto cleanup_buf_pool; 1605 1606 pdcs->debugfs_stats = NULL; 1607 pdc_setup_debugfs(pdcs); 1608 1609 dev_dbg(dev, "pdc_probe() successful"); 1610 return PDC_SUCCESS; 1611 1612 cleanup_buf_pool: 1613 tasklet_kill(&pdcs->rx_tasklet); 1614 dma_pool_destroy(pdcs->rx_buf_pool); 1615 1616 cleanup_ring_pool: 1617 dma_pool_destroy(pdcs->ring_pool); 1618 1619 cleanup: 1620 return err; 1621 } 1622 1623 static int pdc_remove(struct platform_device *pdev) 1624 { 1625 struct pdc_state *pdcs = platform_get_drvdata(pdev); 1626 1627 pdc_free_debugfs(); 1628 1629 tasklet_kill(&pdcs->rx_tasklet); 1630 1631 pdc_hw_disable(pdcs); 1632 1633 dma_pool_destroy(pdcs->rx_buf_pool); 1634 dma_pool_destroy(pdcs->ring_pool); 1635 return 0; 1636 } 1637 1638 static struct platform_driver pdc_mbox_driver = { 1639 .probe = pdc_probe, 1640 .remove = pdc_remove, 1641 .driver = { 1642 .name = "brcm-iproc-pdc-mbox", 1643 .of_match_table = of_match_ptr(pdc_mbox_of_match), 1644 }, 1645 }; 1646 module_platform_driver(pdc_mbox_driver); 1647 1648 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>"); 1649 MODULE_DESCRIPTION("Broadcom PDC mailbox driver"); 1650 MODULE_LICENSE("GPL v2"); 1651