1 /* $OpenBSD: bcm2711_pcie.c,v 1.11 2022/04/06 18:59:28 naddy Exp $ */ 2 /* 3 * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <sys/param.h> 19 #include <sys/systm.h> 20 #include <sys/device.h> 21 #include <sys/extent.h> 22 #include <sys/malloc.h> 23 24 #include <machine/intr.h> 25 #include <machine/bus.h> 26 #include <machine/fdt.h> 27 28 #include <dev/pci/pcidevs.h> 29 #include <dev/pci/pcireg.h> 30 #include <dev/pci/pcivar.h> 31 #include <dev/pci/ppbreg.h> 32 33 #include <dev/ofw/openfirm.h> 34 #include <dev/ofw/fdt.h> 35 36 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c 37 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010 38 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070 39 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI 0x4080 40 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI 0x4084 41 #define PCIE_EXT_CFG_DATA 0x8000 42 #define PCIE_EXT_CFG_INDEX 0x9000 43 #define PCIE_RGR1_SW_INIT_1 0x9210 44 #define PCIE_RGR1_SW_INIT_1_PERST_MASK (1 << 0) 45 #define PCIE_RGR1_SW_INIT_1_INIT_MASK (1 << 1) 46 47 #define HREAD4(sc, reg) \ 48 (bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))) 49 #define HWRITE4(sc, reg, val) \ 50 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 51 52 struct bcmpcie_range { 53 uint32_t flags; 54 uint64_t pci_base; 55 uint64_t phys_base; 56 uint64_t size; 57 }; 58 59 struct bcmpcie_softc { 60 struct device sc_dev; 61 bus_space_tag_t sc_iot; 62 bus_space_handle_t sc_ioh; 63 bus_dma_tag_t sc_dmat; 64 65 int sc_node; 66 int sc_acells; 67 int sc_scells; 68 int sc_pacells; 69 int sc_pscells; 70 struct bcmpcie_range *sc_ranges; 71 int sc_nranges; 72 struct bcmpcie_range *sc_dmaranges; 73 int sc_ndmaranges; 74 75 struct bus_space sc_bus_iot; 76 struct bus_space sc_bus_memt; 77 78 struct machine_bus_dma_tag sc_dma; 79 80 struct machine_pci_chipset sc_pc; 81 int sc_bus; 82 }; 83 84 int bcmpcie_match(struct device *, void *, void *); 85 void bcmpcie_attach(struct device *, struct device *, void *); 86 87 const struct cfattach bcmpcie_ca = { 88 sizeof (struct bcmpcie_softc), bcmpcie_match, bcmpcie_attach 89 }; 90 91 struct cfdriver bcmpcie_cd = { 92 NULL, "bcmpcie", DV_DULL 93 }; 94 95 int 96 bcmpcie_match(struct device *parent, void *match, void *aux) 97 { 98 struct fdt_attach_args *faa = aux; 99 100 return OF_is_compatible(faa->fa_node, "brcm,bcm2711-pcie"); 101 } 102 103 void bcmpcie_attach_hook(struct device *, struct device *, 104 struct pcibus_attach_args *); 105 int bcmpcie_bus_maxdevs(void *, int); 106 pcitag_t bcmpcie_make_tag(void *, int, int, int); 107 void bcmpcie_decompose_tag(void *, pcitag_t, int *, int *, int *); 108 int bcmpcie_conf_size(void *, pcitag_t); 109 pcireg_t bcmpcie_conf_read(void *, pcitag_t, int); 110 void bcmpcie_conf_write(void *, pcitag_t, int, pcireg_t); 111 int bcmpcie_probe_device_hook(void *, struct pci_attach_args *); 112 113 int bcmpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *); 114 const char *bcmpcie_intr_string(void *, pci_intr_handle_t); 115 void *bcmpcie_intr_establish(void *, pci_intr_handle_t, int, 116 struct cpu_info *, int (*)(void *), void *, char *); 117 void bcmpcie_intr_disestablish(void *, void *); 118 119 int bcmpcie_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int, 120 bus_space_handle_t *); 121 int bcmpcie_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int, 122 bus_space_handle_t *); 123 int bcmpcie_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, 124 bus_size_t, struct proc *, int, paddr_t *, int *, int); 125 int bcmpcie_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, 126 bus_dma_segment_t *, int, bus_size_t, int); 127 128 void 129 bcmpcie_attach(struct device *parent, struct device *self, void *aux) 130 { 131 struct bcmpcie_softc *sc = (struct bcmpcie_softc *)self; 132 struct fdt_attach_args *faa = aux; 133 struct pcibus_attach_args pba; 134 uint32_t *ranges; 135 int i, j, nranges, rangeslen; 136 uint32_t reg; 137 138 if (faa->fa_nreg < 1) { 139 printf(": no registers\n"); 140 return; 141 } 142 143 sc->sc_iot = faa->fa_iot; 144 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 145 faa->fa_reg[0].size, 0, &sc->sc_ioh)) { 146 printf(": can't map registers\n"); 147 return; 148 } 149 150 reg = HREAD4(sc, PCIE_RGR1_SW_INIT_1); 151 if (reg & PCIE_RGR1_SW_INIT_1_INIT_MASK) { 152 printf(": disabled\n"); 153 return; 154 } 155 156 sc->sc_node = faa->fa_node; 157 sc->sc_dmat = faa->fa_dmat; 158 159 sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells", 160 faa->fa_acells); 161 sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells", 162 faa->fa_scells); 163 sc->sc_pacells = faa->fa_acells; 164 sc->sc_pscells = faa->fa_scells; 165 166 /* Memory and IO space translations. */ 167 rangeslen = OF_getproplen(sc->sc_node, "ranges"); 168 if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) || 169 (rangeslen / sizeof(uint32_t)) % (sc->sc_acells + 170 sc->sc_pacells + sc->sc_scells)) { 171 printf(": invalid ranges property\n"); 172 return; 173 } 174 175 ranges = malloc(rangeslen, M_TEMP, M_WAITOK); 176 OF_getpropintarray(sc->sc_node, "ranges", ranges, 177 rangeslen); 178 179 nranges = (rangeslen / sizeof(uint32_t)) / 180 (sc->sc_acells + sc->sc_pacells + sc->sc_scells); 181 sc->sc_ranges = mallocarray(nranges, 182 sizeof(struct bcmpcie_range), M_DEVBUF, M_WAITOK); 183 sc->sc_nranges = nranges; 184 185 for (i = 0, j = 0; i < sc->sc_nranges; i++) { 186 sc->sc_ranges[i].flags = ranges[j++]; 187 sc->sc_ranges[i].pci_base = ranges[j++]; 188 if (sc->sc_acells - 1 == 2) { 189 sc->sc_ranges[i].pci_base <<= 32; 190 sc->sc_ranges[i].pci_base |= ranges[j++]; 191 } 192 sc->sc_ranges[i].phys_base = ranges[j++]; 193 if (sc->sc_pacells == 2) { 194 sc->sc_ranges[i].phys_base <<= 32; 195 sc->sc_ranges[i].phys_base |= ranges[j++]; 196 } 197 sc->sc_ranges[i].size = ranges[j++]; 198 if (sc->sc_scells == 2) { 199 sc->sc_ranges[i].size <<= 32; 200 sc->sc_ranges[i].size |= ranges[j++]; 201 } 202 } 203 204 free(ranges, M_TEMP, rangeslen); 205 206 /* DMA translations */ 207 rangeslen = OF_getproplen(sc->sc_node, "dma-ranges"); 208 if (rangeslen > 0) { 209 if ((rangeslen % sizeof(uint32_t)) || 210 (rangeslen / sizeof(uint32_t)) % (sc->sc_acells + 211 sc->sc_pacells + sc->sc_scells)) { 212 printf(": invalid dma-ranges property\n"); 213 free(sc->sc_ranges, M_DEVBUF, 214 sc->sc_nranges * sizeof(struct bcmpcie_range)); 215 return; 216 } 217 218 ranges = malloc(rangeslen, M_TEMP, M_WAITOK); 219 OF_getpropintarray(sc->sc_node, "dma-ranges", ranges, 220 rangeslen); 221 222 nranges = (rangeslen / sizeof(uint32_t)) / 223 (sc->sc_acells + sc->sc_pacells + sc->sc_scells); 224 sc->sc_dmaranges = mallocarray(nranges, 225 sizeof(struct bcmpcie_range), M_DEVBUF, M_WAITOK); 226 sc->sc_ndmaranges = nranges; 227 228 for (i = 0, j = 0; i < sc->sc_ndmaranges; i++) { 229 sc->sc_dmaranges[i].flags = ranges[j++]; 230 sc->sc_dmaranges[i].pci_base = ranges[j++]; 231 if (sc->sc_acells - 1 == 2) { 232 sc->sc_dmaranges[i].pci_base <<= 32; 233 sc->sc_dmaranges[i].pci_base |= ranges[j++]; 234 } 235 sc->sc_dmaranges[i].phys_base = ranges[j++]; 236 if (sc->sc_pacells == 2) { 237 sc->sc_dmaranges[i].phys_base <<= 32; 238 sc->sc_dmaranges[i].phys_base |= ranges[j++]; 239 } 240 sc->sc_dmaranges[i].size = ranges[j++]; 241 if (sc->sc_scells == 2) { 242 sc->sc_dmaranges[i].size <<= 32; 243 sc->sc_dmaranges[i].size |= ranges[j++]; 244 } 245 } 246 247 free(ranges, M_TEMP, rangeslen); 248 } 249 250 /* 251 * Reprogram the outbound window to match the configuration in 252 * the device tree. This is necessary since the EDK2-based 253 * UEFI firmware reprograms the window. 254 */ 255 for (i = 0; i < sc->sc_nranges; i++) { 256 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000) { 257 uint64_t cpu_base = sc->sc_ranges[i].phys_base; 258 uint64_t cpu_limit = sc->sc_ranges[i].phys_base + 259 sc->sc_ranges[i].size - 1; 260 261 HWRITE4(sc, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO, 262 sc->sc_ranges[i].pci_base); 263 HWRITE4(sc, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI, 264 sc->sc_ranges[i].pci_base >> 32); 265 HWRITE4(sc, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT, 266 (cpu_base & PPB_MEM_MASK) >> PPB_MEM_SHIFT | 267 (cpu_limit & PPB_MEM_MASK)); 268 HWRITE4(sc, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI, 269 cpu_base >> 32); 270 HWRITE4(sc, PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI, 271 cpu_limit >> 32); 272 } 273 } 274 275 printf("\n"); 276 277 memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot)); 278 sc->sc_bus_iot.bus_private = sc; 279 sc->sc_bus_iot._space_map = bcmpcie_bs_iomap; 280 memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt)); 281 sc->sc_bus_memt.bus_private = sc; 282 sc->sc_bus_memt._space_map = bcmpcie_bs_memmap; 283 284 memcpy(&sc->sc_dma, sc->sc_dmat, sizeof(sc->sc_dma)); 285 sc->sc_dma._dmamap_load_buffer = bcmpcie_dmamap_load_buffer; 286 sc->sc_dma._dmamap_load_raw = bcmpcie_dmamap_load_raw; 287 sc->sc_dma._cookie = sc; 288 289 sc->sc_pc.pc_conf_v = sc; 290 sc->sc_pc.pc_attach_hook = bcmpcie_attach_hook; 291 sc->sc_pc.pc_bus_maxdevs = bcmpcie_bus_maxdevs; 292 sc->sc_pc.pc_make_tag = bcmpcie_make_tag; 293 sc->sc_pc.pc_decompose_tag = bcmpcie_decompose_tag; 294 sc->sc_pc.pc_conf_size = bcmpcie_conf_size; 295 sc->sc_pc.pc_conf_read = bcmpcie_conf_read; 296 sc->sc_pc.pc_conf_write = bcmpcie_conf_write; 297 sc->sc_pc.pc_probe_device_hook = bcmpcie_probe_device_hook; 298 299 sc->sc_pc.pc_intr_v = sc; 300 sc->sc_pc.pc_intr_map = bcmpcie_intr_map; 301 sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi; 302 sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix; 303 sc->sc_pc.pc_intr_string = bcmpcie_intr_string; 304 sc->sc_pc.pc_intr_establish = bcmpcie_intr_establish; 305 sc->sc_pc.pc_intr_disestablish = bcmpcie_intr_disestablish; 306 307 memset(&pba, 0, sizeof(pba)); 308 pba.pba_busname = "pci"; 309 pba.pba_iot = &sc->sc_bus_iot; 310 pba.pba_memt = &sc->sc_bus_memt; 311 pba.pba_dmat = &sc->sc_dma; 312 pba.pba_pc = &sc->sc_pc; 313 pba.pba_domain = pci_ndomains++; 314 pba.pba_bus = 0; 315 316 config_found(self, &pba, NULL); 317 } 318 319 void 320 bcmpcie_attach_hook(struct device *parent, struct device *self, 321 struct pcibus_attach_args *pba) 322 { 323 } 324 325 int 326 bcmpcie_bus_maxdevs(void *v, int bus) 327 { 328 struct bcmpcie_softc *sc = v; 329 330 if (bus == sc->sc_bus || bus == sc->sc_bus + 1) 331 return 1; 332 return 32; 333 } 334 335 pcitag_t 336 bcmpcie_make_tag(void *v, int bus, int device, int function) 337 { 338 /* Return ECAM address. */ 339 return ((bus << 20) | (device << 15) | (function << 12)); 340 } 341 342 void 343 bcmpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp) 344 { 345 if (bp != NULL) 346 *bp = (tag >> 20) & 0xff; 347 if (dp != NULL) 348 *dp = (tag >> 15) & 0x1f; 349 if (fp != NULL) 350 *fp = (tag >> 12) & 0x7; 351 } 352 353 int 354 bcmpcie_conf_size(void *v, pcitag_t tag) 355 { 356 return PCIE_CONFIG_SPACE_SIZE; 357 } 358 359 pcireg_t 360 bcmpcie_conf_read(void *v, pcitag_t tag, int reg) 361 { 362 struct bcmpcie_softc *sc = v; 363 int bus, dev, fn; 364 365 bcmpcie_decompose_tag(sc, tag, &bus, &dev, &fn); 366 if (bus == 0) { 367 KASSERT(dev == 0); 368 return HREAD4(sc, tag | reg); 369 } 370 371 HWRITE4(sc, PCIE_EXT_CFG_INDEX, tag); 372 return HREAD4(sc, PCIE_EXT_CFG_DATA + reg); 373 } 374 375 void 376 bcmpcie_conf_write(void *v, pcitag_t tag, int reg, pcireg_t data) 377 { 378 struct bcmpcie_softc *sc = v; 379 int bus, dev, fn; 380 381 bcmpcie_decompose_tag(sc, tag, &bus, &dev, &fn); 382 if (bus == 0) { 383 KASSERT(dev == 0); 384 HWRITE4(sc, tag | reg, data); 385 return; 386 } 387 388 HWRITE4(sc, PCIE_EXT_CFG_INDEX, tag); 389 HWRITE4(sc, PCIE_EXT_CFG_DATA + reg, data); 390 } 391 392 int 393 bcmpcie_probe_device_hook(void *v, struct pci_attach_args *pa) 394 { 395 return 0; 396 } 397 398 int 399 bcmpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp) 400 { 401 int pin = pa->pa_rawintrpin; 402 403 if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX) 404 return -1; 405 406 if (pa->pa_tag == 0) 407 return -1; 408 409 ihp->ih_pc = pa->pa_pc; 410 ihp->ih_tag = pa->pa_intrtag; 411 ihp->ih_intrpin = pa->pa_intrpin; 412 ihp->ih_type = PCI_INTX; 413 414 return 0; 415 } 416 417 const char * 418 bcmpcie_intr_string(void *v, pci_intr_handle_t ih) 419 { 420 switch (ih.ih_type) { 421 case PCI_MSI: 422 return "msi"; 423 case PCI_MSIX: 424 return "msix"; 425 } 426 427 return "intx"; 428 } 429 430 void * 431 bcmpcie_intr_establish(void *v, pci_intr_handle_t ih, int level, 432 struct cpu_info *ci, int (*func)(void *), void *arg, char *name) 433 { 434 struct bcmpcie_softc *sc = v; 435 int bus, dev, fn; 436 uint32_t reg[4]; 437 438 KASSERT(ih.ih_type == PCI_INTX); 439 bcmpcie_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn); 440 441 reg[0] = bus << 16 | dev << 11 | fn << 8; 442 reg[1] = reg[2] = 0; 443 reg[3] = ih.ih_intrpin; 444 445 return fdt_intr_establish_imap_cpu(sc->sc_node, reg, sizeof(reg), 446 level, ci, func, arg, name); 447 } 448 449 void 450 bcmpcie_intr_disestablish(void *v, void *cookie) 451 { 452 } 453 454 int 455 bcmpcie_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size, 456 int flags, bus_space_handle_t *bshp) 457 { 458 struct bcmpcie_softc *sc = t->bus_private; 459 int i; 460 461 for (i = 0; i < sc->sc_nranges; i++) { 462 uint64_t pci_start = sc->sc_ranges[i].pci_base; 463 uint64_t pci_end = pci_start + sc->sc_ranges[i].size; 464 uint64_t phys_start = sc->sc_ranges[i].phys_base; 465 466 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 && 467 addr >= pci_start && addr + size <= pci_end) { 468 return bus_space_map(sc->sc_iot, 469 addr - pci_start + phys_start, size, flags, bshp); 470 } 471 } 472 473 return ENXIO; 474 } 475 476 int 477 bcmpcie_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size, 478 int flags, bus_space_handle_t *bshp) 479 { 480 struct bcmpcie_softc *sc = t->bus_private; 481 int i; 482 483 for (i = 0; i < sc->sc_nranges; i++) { 484 uint64_t pci_start = sc->sc_ranges[i].pci_base; 485 uint64_t pci_end = pci_start + sc->sc_ranges[i].size; 486 uint64_t phys_start = sc->sc_ranges[i].phys_base; 487 488 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 && 489 addr >= pci_start && addr + size <= pci_end) { 490 return bus_space_map(sc->sc_iot, 491 addr - pci_start + phys_start, size, flags, bshp); 492 } 493 } 494 495 return ENXIO; 496 } 497 498 int 499 bcmpcie_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 500 bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp, 501 int *segp, int first) 502 { 503 struct bcmpcie_softc *sc = t->_cookie; 504 int seg, firstseg = *segp; 505 int error; 506 507 error = sc->sc_dmat->_dmamap_load_buffer(sc->sc_dmat, map, buf, buflen, 508 p, flags, lastaddrp, segp, first); 509 if (error) 510 return error; 511 512 if (sc->sc_dmaranges == NULL) 513 return 0; 514 515 /* For each segment. */ 516 for (seg = firstseg; seg <= *segp; seg++) { 517 uint64_t addr = map->dm_segs[seg].ds_addr; 518 uint64_t size = map->dm_segs[seg].ds_len; 519 int i; 520 521 /* For each range. */ 522 for (i = 0; i < sc->sc_ndmaranges; i++) { 523 uint64_t pci_start = sc->sc_dmaranges[i].pci_base; 524 uint64_t phys_start = sc->sc_dmaranges[i].phys_base; 525 uint64_t phys_end = phys_start + 526 sc->sc_dmaranges[i].size; 527 528 if (addr >= phys_start && addr + size <= phys_end) { 529 map->dm_segs[seg].ds_addr -= phys_start; 530 map->dm_segs[seg].ds_addr += pci_start; 531 break; 532 } 533 } 534 535 if (i == sc->sc_ndmaranges) 536 return EINVAL; 537 } 538 539 return 0; 540 } 541 542 int 543 bcmpcie_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 544 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 545 { 546 struct bcmpcie_softc *sc = t->_cookie; 547 int seg, error; 548 549 error = sc->sc_dmat->_dmamap_load_raw(sc->sc_dmat, map, 550 segs, nsegs, size, flags); 551 if (error) 552 return error; 553 554 if (sc->sc_dmaranges == NULL) 555 return 0; 556 557 /* For each segment. */ 558 for (seg = 0; seg < map->dm_nsegs; seg++) { 559 uint64_t addr = map->dm_segs[seg].ds_addr; 560 uint64_t size = map->dm_segs[seg].ds_len; 561 int i; 562 563 /* For each range. */ 564 for (i = 0; i < sc->sc_ndmaranges; i++) { 565 uint64_t pci_start = sc->sc_dmaranges[i].pci_base; 566 uint64_t phys_start = sc->sc_dmaranges[i].phys_base; 567 uint64_t phys_end = phys_start + 568 sc->sc_dmaranges[i].size; 569 570 if (addr >= phys_start && addr + size <= phys_end) { 571 map->dm_segs[seg].ds_addr -= phys_start; 572 map->dm_segs[seg].ds_addr += pci_start; 573 break; 574 } 575 } 576 577 if (i == sc->sc_ndmaranges) 578 return EINVAL; 579 } 580 581 return 0; 582 } 583