1 /* 2 * Copyright (C) 2006-2007 PA Semi, Inc 3 * 4 * Common functions for DMA access on PA Semi PWRficient 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/pci.h> 24 #include <linux/of.h> 25 26 #include <asm/pasemi_dma.h> 27 28 #define MAX_TXCH 64 29 #define MAX_RXCH 64 30 31 static struct pasdma_status *dma_status; 32 33 static void __iomem *iob_regs; 34 static void __iomem *mac_regs[6]; 35 static void __iomem *dma_regs; 36 37 static int base_hw_irq; 38 39 static int num_txch, num_rxch; 40 41 static struct pci_dev *dma_pdev; 42 43 /* Bitmaps to handle allocation of channels */ 44 45 static DECLARE_BITMAP(txch_free, MAX_TXCH); 46 static DECLARE_BITMAP(rxch_free, MAX_RXCH); 47 48 /* pasemi_read_iob_reg - read IOB register 49 * @reg: Register to read (offset into PCI CFG space) 50 */ 51 unsigned int pasemi_read_iob_reg(unsigned int reg) 52 { 53 return in_le32(iob_regs+reg); 54 } 55 EXPORT_SYMBOL(pasemi_read_iob_reg); 56 57 /* pasemi_write_iob_reg - write IOB register 58 * @reg: Register to write to (offset into PCI CFG space) 59 * @val: Value to write 60 */ 61 void pasemi_write_iob_reg(unsigned int reg, unsigned int val) 62 { 63 out_le32(iob_regs+reg, val); 64 } 65 EXPORT_SYMBOL(pasemi_write_iob_reg); 66 67 /* pasemi_read_mac_reg - read MAC register 68 * @intf: MAC interface 69 * @reg: Register to read (offset into PCI CFG space) 70 */ 71 unsigned int pasemi_read_mac_reg(int intf, unsigned int reg) 72 { 73 return in_le32(mac_regs[intf]+reg); 74 } 75 EXPORT_SYMBOL(pasemi_read_mac_reg); 76 77 /* pasemi_write_mac_reg - write MAC register 78 * @intf: MAC interface 79 * @reg: Register to write to (offset into PCI CFG space) 80 * @val: Value to write 81 */ 82 void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val) 83 { 84 out_le32(mac_regs[intf]+reg, val); 85 } 86 EXPORT_SYMBOL(pasemi_write_mac_reg); 87 88 /* pasemi_read_dma_reg - read DMA register 89 * @reg: Register to read (offset into PCI CFG space) 90 */ 91 unsigned int pasemi_read_dma_reg(unsigned int reg) 92 { 93 return in_le32(dma_regs+reg); 94 } 95 EXPORT_SYMBOL(pasemi_read_dma_reg); 96 97 /* pasemi_write_dma_reg - write DMA register 98 * @reg: Register to write to (offset into PCI CFG space) 99 * @val: Value to write 100 */ 101 void pasemi_write_dma_reg(unsigned int reg, unsigned int val) 102 { 103 out_le32(dma_regs+reg, val); 104 } 105 EXPORT_SYMBOL(pasemi_write_dma_reg); 106 107 static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type) 108 { 109 int bit; 110 int start, limit; 111 112 switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) { 113 case TXCHAN_EVT0: 114 start = 0; 115 limit = 10; 116 break; 117 case TXCHAN_EVT1: 118 start = 10; 119 limit = MAX_TXCH; 120 break; 121 default: 122 start = 0; 123 limit = MAX_TXCH; 124 break; 125 } 126 retry: 127 bit = find_next_bit(txch_free, MAX_TXCH, start); 128 if (bit >= limit) 129 return -ENOSPC; 130 if (!test_and_clear_bit(bit, txch_free)) 131 goto retry; 132 133 return bit; 134 } 135 136 static void pasemi_free_tx_chan(int chan) 137 { 138 BUG_ON(test_bit(chan, txch_free)); 139 set_bit(chan, txch_free); 140 } 141 142 static int pasemi_alloc_rx_chan(void) 143 { 144 int bit; 145 retry: 146 bit = find_first_bit(rxch_free, MAX_RXCH); 147 if (bit >= MAX_TXCH) 148 return -ENOSPC; 149 if (!test_and_clear_bit(bit, rxch_free)) 150 goto retry; 151 152 return bit; 153 } 154 155 static void pasemi_free_rx_chan(int chan) 156 { 157 BUG_ON(test_bit(chan, rxch_free)); 158 set_bit(chan, rxch_free); 159 } 160 161 /* pasemi_dma_alloc_chan - Allocate a DMA channel 162 * @type: Type of channel to allocate 163 * @total_size: Total size of structure to allocate (to allow for more 164 * room behind the structure to be used by the client) 165 * @offset: Offset in bytes from start of the total structure to the beginning 166 * of struct pasemi_dmachan. Needed when struct pasemi_dmachan is 167 * not the first member of the client structure. 168 * 169 * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The 170 * type argument specifies whether it's a RX or TX channel, and in the case 171 * of TX channels which group it needs to belong to (if any). 172 * 173 * Returns a pointer to the total structure allocated on success, NULL 174 * on failure. 175 */ 176 void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type, 177 int total_size, int offset) 178 { 179 void *buf; 180 struct pasemi_dmachan *chan; 181 int chno; 182 183 BUG_ON(total_size < sizeof(struct pasemi_dmachan)); 184 185 buf = kzalloc(total_size, GFP_KERNEL); 186 187 if (!buf) 188 return NULL; 189 chan = buf + offset; 190 191 chan->priv = buf; 192 193 switch (type & (TXCHAN|RXCHAN)) { 194 case RXCHAN: 195 chno = pasemi_alloc_rx_chan(); 196 chan->chno = chno; 197 chan->irq = irq_create_mapping(NULL, 198 base_hw_irq + num_txch + chno); 199 chan->status = &dma_status->rx_sta[chno]; 200 break; 201 case TXCHAN: 202 chno = pasemi_alloc_tx_chan(type); 203 chan->chno = chno; 204 chan->irq = irq_create_mapping(NULL, base_hw_irq + chno); 205 chan->status = &dma_status->tx_sta[chno]; 206 break; 207 } 208 209 chan->chan_type = type; 210 211 return chan; 212 } 213 EXPORT_SYMBOL(pasemi_dma_alloc_chan); 214 215 /* pasemi_dma_free_chan - Free a previously allocated channel 216 * @chan: Channel to free 217 * 218 * Frees a previously allocated channel. It will also deallocate any 219 * descriptor ring associated with the channel, if allocated. 220 */ 221 void pasemi_dma_free_chan(struct pasemi_dmachan *chan) 222 { 223 if (chan->ring_virt) 224 pasemi_dma_free_ring(chan); 225 226 switch (chan->chan_type & (RXCHAN|TXCHAN)) { 227 case RXCHAN: 228 pasemi_free_rx_chan(chan->chno); 229 break; 230 case TXCHAN: 231 pasemi_free_tx_chan(chan->chno); 232 break; 233 } 234 235 kfree(chan->priv); 236 } 237 EXPORT_SYMBOL(pasemi_dma_free_chan); 238 239 /* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel 240 * @chan: Channel for which to allocate 241 * @ring_size: Ring size in 64-bit (8-byte) words 242 * 243 * Allocate a descriptor ring for a channel. Returns 0 on success, errno 244 * on failure. The passed in struct pasemi_dmachan is updated with the 245 * virtual and DMA addresses of the ring. 246 */ 247 int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size) 248 { 249 BUG_ON(chan->ring_virt); 250 251 chan->ring_size = ring_size; 252 253 chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev, 254 ring_size * sizeof(u64), 255 &chan->ring_dma, GFP_KERNEL); 256 257 if (!chan->ring_virt) 258 return -ENOMEM; 259 260 memset(chan->ring_virt, 0, ring_size * sizeof(u64)); 261 262 return 0; 263 } 264 EXPORT_SYMBOL(pasemi_dma_alloc_ring); 265 266 /* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel 267 * @chan: Channel for which to free the descriptor ring 268 * 269 * Frees a previously allocated descriptor ring for a channel. 270 */ 271 void pasemi_dma_free_ring(struct pasemi_dmachan *chan) 272 { 273 BUG_ON(!chan->ring_virt); 274 275 dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64), 276 chan->ring_virt, chan->ring_dma); 277 chan->ring_virt = NULL; 278 chan->ring_size = 0; 279 chan->ring_dma = 0; 280 } 281 EXPORT_SYMBOL(pasemi_dma_free_ring); 282 283 /* pasemi_dma_start_chan - Start a DMA channel 284 * @chan: Channel to start 285 * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write 286 * 287 * Enables (starts) a DMA channel with optional additional arguments. 288 */ 289 void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta) 290 { 291 if (chan->chan_type == RXCHAN) 292 pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno), 293 cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN); 294 else 295 pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno), 296 cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN); 297 } 298 EXPORT_SYMBOL(pasemi_dma_start_chan); 299 300 /* pasemi_dma_stop_chan - Stop a DMA channel 301 * @chan: Channel to stop 302 * 303 * Stops (disables) a DMA channel. This is done by setting the ST bit in the 304 * CMDSTA register and waiting on the ACT (active) bit to clear, then 305 * finally disabling the whole channel. 306 * 307 * This function will only try for a short while for the channel to stop, if 308 * it doesn't it will return failure. 309 * 310 * Returns 1 on success, 0 on failure. 311 */ 312 #define MAX_RETRIES 5000 313 int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan) 314 { 315 int reg, retries; 316 u32 sta; 317 318 if (chan->chan_type == RXCHAN) { 319 reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno); 320 pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST); 321 for (retries = 0; retries < MAX_RETRIES; retries++) { 322 sta = pasemi_read_dma_reg(reg); 323 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) { 324 pasemi_write_dma_reg(reg, 0); 325 return 1; 326 } 327 cond_resched(); 328 } 329 } else { 330 reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno); 331 pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST); 332 for (retries = 0; retries < MAX_RETRIES; retries++) { 333 sta = pasemi_read_dma_reg(reg); 334 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) { 335 pasemi_write_dma_reg(reg, 0); 336 return 1; 337 } 338 cond_resched(); 339 } 340 } 341 342 return 0; 343 } 344 EXPORT_SYMBOL(pasemi_dma_stop_chan); 345 346 /* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA 347 * @chan: Channel to allocate for 348 * @size: Size of buffer in bytes 349 * @handle: DMA handle 350 * 351 * Allocate a buffer to be used by the DMA engine for read/write, 352 * similar to dma_alloc_coherent(). 353 * 354 * Returns the virtual address of the buffer, or NULL in case of failure. 355 */ 356 void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size, 357 dma_addr_t *handle) 358 { 359 return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL); 360 } 361 EXPORT_SYMBOL(pasemi_dma_alloc_buf); 362 363 /* pasemi_dma_free_buf - Free a buffer used for DMA 364 * @chan: Channel the buffer was allocated for 365 * @size: Size of buffer in bytes 366 * @handle: DMA handle 367 * 368 * Frees a previously allocated buffer. 369 */ 370 void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, 371 dma_addr_t *handle) 372 { 373 dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL); 374 } 375 EXPORT_SYMBOL(pasemi_dma_free_buf); 376 377 static void *map_onedev(struct pci_dev *p, int index) 378 { 379 struct device_node *dn; 380 void __iomem *ret; 381 382 dn = pci_device_to_OF_node(p); 383 if (!dn) 384 goto fallback; 385 386 ret = of_iomap(dn, index); 387 if (!ret) 388 goto fallback; 389 390 return ret; 391 fallback: 392 /* This is hardcoded and ugly, but we have some firmware versions 393 * that don't provide the register space in the device tree. Luckily 394 * they are at well-known locations so we can just do the math here. 395 */ 396 return ioremap(0xe0000000 + (p->devfn << 12), 0x2000); 397 } 398 399 /* pasemi_dma_init - Initialize the PA Semi DMA library 400 * 401 * This function initializes the DMA library. It must be called before 402 * any other function in the library. 403 * 404 * Returns 0 on success, errno on failure. 405 */ 406 int pasemi_dma_init(void) 407 { 408 static spinlock_t init_lock = SPIN_LOCK_UNLOCKED; 409 struct pci_dev *iob_pdev; 410 struct pci_dev *pdev; 411 struct resource res; 412 struct device_node *dn; 413 int i, intf, err = 0; 414 unsigned long timeout; 415 u32 tmp; 416 417 if (!machine_is(pasemi)) 418 return -ENODEV; 419 420 spin_lock(&init_lock); 421 422 /* Make sure we haven't already initialized */ 423 if (dma_pdev) 424 goto out; 425 426 iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); 427 if (!iob_pdev) { 428 BUG(); 429 printk(KERN_WARNING "Can't find I/O Bridge\n"); 430 err = -ENODEV; 431 goto out; 432 } 433 iob_regs = map_onedev(iob_pdev, 0); 434 435 dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); 436 if (!dma_pdev) { 437 BUG(); 438 printk(KERN_WARNING "Can't find DMA controller\n"); 439 err = -ENODEV; 440 goto out; 441 } 442 dma_regs = map_onedev(dma_pdev, 0); 443 base_hw_irq = virq_to_hw(dma_pdev->irq); 444 445 pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp); 446 num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S; 447 448 pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp); 449 num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S; 450 451 intf = 0; 452 for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL); 453 pdev; 454 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev)) 455 mac_regs[intf++] = map_onedev(pdev, 0); 456 457 pci_dev_put(pdev); 458 459 for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL); 460 pdev; 461 pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev)) 462 mac_regs[intf++] = map_onedev(pdev, 0); 463 464 pci_dev_put(pdev); 465 466 dn = pci_device_to_OF_node(iob_pdev); 467 if (dn) 468 err = of_address_to_resource(dn, 1, &res); 469 if (!dn || err) { 470 /* Fallback for old firmware */ 471 res.start = 0xfd800000; 472 res.end = res.start + 0x1000; 473 } 474 dma_status = __ioremap(res.start, res.end-res.start, 0); 475 pci_dev_put(iob_pdev); 476 477 for (i = 0; i < MAX_TXCH; i++) 478 __set_bit(i, txch_free); 479 480 for (i = 0; i < MAX_RXCH; i++) 481 __set_bit(i, rxch_free); 482 483 timeout = jiffies + HZ; 484 pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0); 485 while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) { 486 if (time_after(jiffies, timeout)) { 487 pr_warning("Warning: Could not disable RX section\n"); 488 break; 489 } 490 } 491 492 timeout = jiffies + HZ; 493 pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0); 494 while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) { 495 if (time_after(jiffies, timeout)) { 496 pr_warning("Warning: Could not disable TX section\n"); 497 break; 498 } 499 } 500 501 /* setup resource allocations for the different DMA sections */ 502 tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG); 503 pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000); 504 505 /* enable tx section */ 506 pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); 507 508 /* enable rx section */ 509 pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); 510 511 printk(KERN_INFO "PA Semi PWRficient DMA library initialized " 512 "(%d tx, %d rx channels)\n", num_txch, num_rxch); 513 514 out: 515 spin_unlock(&init_lock); 516 return err; 517 } 518 EXPORT_SYMBOL(pasemi_dma_init); 519