1 /* $OpenBSD: xhci.c,v 1.116 2020/06/30 10:21:59 gerhard Exp $ */ 2 3 /* 4 * Copyright (c) 2014-2015 Martin Pieuchot 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/kernel.h> 22 #include <sys/malloc.h> 23 #include <sys/device.h> 24 #include <sys/queue.h> 25 #include <sys/timeout.h> 26 #include <sys/pool.h> 27 #include <sys/endian.h> 28 #include <sys/rwlock.h> 29 30 #include <machine/bus.h> 31 32 #include <dev/usb/usb.h> 33 #include <dev/usb/usbdi.h> 34 #include <dev/usb/usbdivar.h> 35 #include <dev/usb/usb_mem.h> 36 37 #include <dev/usb/xhcireg.h> 38 #include <dev/usb/xhcivar.h> 39 40 struct cfdriver xhci_cd = { 41 NULL, "xhci", DV_DULL 42 }; 43 44 #ifdef XHCI_DEBUG 45 #define DPRINTF(x) do { if (xhcidebug) printf x; } while(0) 46 #define DPRINTFN(n,x) do { if (xhcidebug>(n)) printf x; } while (0) 47 int xhcidebug = 3; 48 #else 49 #define DPRINTF(x) 50 #define DPRINTFN(n,x) 51 #endif 52 53 #define DEVNAME(sc) ((sc)->sc_bus.bdev.dv_xname) 54 55 #define TRBOFF(r, trb) ((char *)(trb) - (char *)((r)->trbs)) 56 #define DEQPTR(r) ((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index)) 57 58 struct pool *xhcixfer; 59 60 struct xhci_pipe { 61 struct usbd_pipe pipe; 62 63 uint8_t dci; 64 uint8_t slot; /* Device slot ID */ 65 struct xhci_ring ring; 66 67 /* 68 * XXX used to pass the xfer pointer back to the 69 * interrupt routine, better way? 70 */ 71 struct usbd_xfer *pending_xfers[XHCI_MAX_XFER]; 72 struct usbd_xfer *aborted_xfer; 73 int halted; 74 size_t free_trbs; 75 int skip; 76 }; 77 78 int xhci_reset(struct xhci_softc *); 79 int xhci_intr1(struct xhci_softc *); 80 void xhci_event_dequeue(struct xhci_softc *); 81 void xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t); 82 int xhci_event_xfer_generic(struct xhci_softc *, struct usbd_xfer *, 83 struct xhci_pipe *, uint32_t, int, uint8_t, uint8_t, uint8_t); 84 int xhci_event_xfer_isoc(struct usbd_xfer *, struct xhci_pipe *, 85 uint32_t, int); 86 void xhci_event_command(struct xhci_softc *, uint64_t); 87 void xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t); 88 int xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *); 89 int xhci_context_setup(struct xhci_softc *, struct usbd_pipe *); 90 int xhci_scratchpad_alloc(struct xhci_softc *, int); 91 void xhci_scratchpad_free(struct xhci_softc *); 92 int xhci_softdev_alloc(struct xhci_softc *, uint8_t); 93 void xhci_softdev_free(struct xhci_softc *, uint8_t); 94 int xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t, 95 size_t); 96 void xhci_ring_free(struct xhci_softc *, struct xhci_ring *); 97 void xhci_ring_reset(struct xhci_softc *, struct xhci_ring *); 98 struct xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *); 99 struct xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *); 100 101 struct xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*, 102 uint8_t *, int); 103 void xhci_xfer_done(struct usbd_xfer *xfer); 104 /* xHCI command helpers. */ 105 int xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int); 106 int xhci_command_abort(struct xhci_softc *); 107 108 void xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t); 109 void xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t); 110 int xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t); 111 int xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t); 112 int xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int); 113 int xhci_cmd_set_address(struct xhci_softc *, uint8_t, uint64_t, uint32_t); 114 int xhci_cmd_evaluate_ctx(struct xhci_softc *, uint8_t, uint64_t); 115 #ifdef XHCI_DEBUG 116 int xhci_cmd_noop(struct xhci_softc *); 117 #endif 118 119 /* XXX should be part of the Bus interface. */ 120 void xhci_abort_xfer(struct usbd_xfer *, usbd_status); 121 void xhci_pipe_close(struct usbd_pipe *); 122 void xhci_noop(struct usbd_xfer *); 123 124 void xhci_timeout(void *); 125 void xhci_timeout_task(void *); 126 127 /* USBD Bus Interface. */ 128 usbd_status xhci_pipe_open(struct usbd_pipe *); 129 int xhci_setaddr(struct usbd_device *, int); 130 void xhci_softintr(void *); 131 void xhci_poll(struct usbd_bus *); 132 struct usbd_xfer *xhci_allocx(struct usbd_bus *); 133 void xhci_freex(struct usbd_bus *, struct usbd_xfer *); 134 135 usbd_status xhci_root_ctrl_transfer(struct usbd_xfer *); 136 usbd_status xhci_root_ctrl_start(struct usbd_xfer *); 137 138 usbd_status xhci_root_intr_transfer(struct usbd_xfer *); 139 usbd_status xhci_root_intr_start(struct usbd_xfer *); 140 void xhci_root_intr_abort(struct usbd_xfer *); 141 void xhci_root_intr_done(struct usbd_xfer *); 142 143 usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); 144 usbd_status xhci_device_ctrl_start(struct usbd_xfer *); 145 void xhci_device_ctrl_abort(struct usbd_xfer *); 146 147 usbd_status xhci_device_generic_transfer(struct usbd_xfer *); 148 usbd_status xhci_device_generic_start(struct usbd_xfer *); 149 void xhci_device_generic_abort(struct usbd_xfer *); 150 void xhci_device_generic_done(struct usbd_xfer *); 151 152 usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); 153 usbd_status xhci_device_isoc_start(struct usbd_xfer *); 154 155 #define XHCI_INTR_ENDPT 1 156 157 struct usbd_bus_methods xhci_bus_methods = { 158 .open_pipe = xhci_pipe_open, 159 .dev_setaddr = xhci_setaddr, 160 .soft_intr = xhci_softintr, 161 .do_poll = xhci_poll, 162 .allocx = xhci_allocx, 163 .freex = xhci_freex, 164 }; 165 166 struct usbd_pipe_methods xhci_root_ctrl_methods = { 167 .transfer = xhci_root_ctrl_transfer, 168 .start = xhci_root_ctrl_start, 169 .abort = xhci_noop, 170 .close = xhci_pipe_close, 171 .done = xhci_noop, 172 }; 173 174 struct usbd_pipe_methods xhci_root_intr_methods = { 175 .transfer = xhci_root_intr_transfer, 176 .start = xhci_root_intr_start, 177 .abort = xhci_root_intr_abort, 178 .close = xhci_pipe_close, 179 .done = xhci_root_intr_done, 180 }; 181 182 struct usbd_pipe_methods xhci_device_ctrl_methods = { 183 .transfer = xhci_device_ctrl_transfer, 184 .start = xhci_device_ctrl_start, 185 .abort = xhci_device_ctrl_abort, 186 .close = xhci_pipe_close, 187 .done = xhci_noop, 188 }; 189 190 struct usbd_pipe_methods xhci_device_intr_methods = { 191 .transfer = xhci_device_generic_transfer, 192 .start = xhci_device_generic_start, 193 .abort = xhci_device_generic_abort, 194 .close = xhci_pipe_close, 195 .done = xhci_device_generic_done, 196 }; 197 198 struct usbd_pipe_methods xhci_device_bulk_methods = { 199 .transfer = xhci_device_generic_transfer, 200 .start = xhci_device_generic_start, 201 .abort = xhci_device_generic_abort, 202 .close = xhci_pipe_close, 203 .done = xhci_device_generic_done, 204 }; 205 206 struct usbd_pipe_methods xhci_device_isoc_methods = { 207 .transfer = xhci_device_isoc_transfer, 208 .start = xhci_device_isoc_start, 209 .abort = xhci_device_generic_abort, 210 .close = xhci_pipe_close, 211 .done = xhci_noop, 212 }; 213 214 #ifdef XHCI_DEBUG 215 static void 216 xhci_dump_trb(struct xhci_trb *trb) 217 { 218 printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb, 219 (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status), 220 (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK); 221 } 222 #endif 223 224 int usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *, 225 void **, bus_size_t, bus_size_t, bus_size_t); 226 void usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *); 227 228 int 229 usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma, 230 void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary) 231 { 232 int error; 233 234 dma->tag = bus->dmatag; 235 dma->size = size; 236 237 error = bus_dmamap_create(dma->tag, size, 1, size, boundary, 238 BUS_DMA_NOWAIT, &dma->map); 239 if (error != 0) 240 return (error); 241 242 error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg, 243 1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 244 if (error != 0) 245 goto destroy; 246 247 error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr, 248 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 249 if (error != 0) 250 goto free; 251 252 error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size, 253 BUS_DMA_NOWAIT); 254 if (error != 0) 255 goto unmap; 256 257 bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREREAD | 258 BUS_DMASYNC_PREWRITE); 259 260 dma->paddr = dma->map->dm_segs[0].ds_addr; 261 if (kvap != NULL) 262 *kvap = dma->vaddr; 263 264 return (0); 265 266 unmap: 267 bus_dmamem_unmap(dma->tag, dma->vaddr, size); 268 free: 269 bus_dmamem_free(dma->tag, &dma->seg, 1); 270 destroy: 271 bus_dmamap_destroy(dma->tag, dma->map); 272 return (error); 273 } 274 275 void 276 usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma) 277 { 278 if (dma->map != NULL) { 279 bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size, 280 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 281 bus_dmamap_unload(bus->dmatag, dma->map); 282 bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size); 283 bus_dmamem_free(bus->dmatag, &dma->seg, 1); 284 bus_dmamap_destroy(bus->dmatag, dma->map); 285 dma->map = NULL; 286 } 287 } 288 289 int 290 xhci_init(struct xhci_softc *sc) 291 { 292 uint32_t hcr; 293 int npage, error; 294 295 sc->sc_bus.usbrev = USBREV_3_0; 296 sc->sc_bus.methods = &xhci_bus_methods; 297 sc->sc_bus.pipe_size = sizeof(struct xhci_pipe); 298 299 sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH); 300 sc->sc_door_off = XREAD4(sc, XHCI_DBOFF); 301 sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF); 302 303 sc->sc_version = XREAD2(sc, XHCI_HCIVERSION); 304 printf(", xHCI %x.%x\n", sc->sc_version >> 8, sc->sc_version & 0xff); 305 306 #ifdef XHCI_DEBUG 307 printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off); 308 printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off); 309 printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off); 310 #endif 311 312 error = xhci_reset(sc); 313 if (error) 314 return (error); 315 316 if (xhcixfer == NULL) { 317 xhcixfer = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT); 318 if (xhcixfer == NULL) { 319 printf("%s: unable to allocate pool descriptor\n", 320 DEVNAME(sc)); 321 return (ENOMEM); 322 } 323 pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB, 324 0, "xhcixfer", NULL); 325 } 326 327 hcr = XREAD4(sc, XHCI_HCCPARAMS); 328 sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32; 329 DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize)); 330 331 #ifdef XHCI_DEBUG 332 hcr = XOREAD4(sc, XHCI_PAGESIZE); 333 printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr); 334 #endif 335 /* Use 4K for the moment since it's easier. */ 336 sc->sc_pagesize = 4096; 337 338 /* Get port and device slot numbers. */ 339 hcr = XREAD4(sc, XHCI_HCSPARAMS1); 340 sc->sc_noport = XHCI_HCS1_N_PORTS(hcr); 341 sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr); 342 DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport, 343 sc->sc_noslot)); 344 345 /* Setup Device Context Base Address Array. */ 346 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma, 347 (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t), 348 XHCI_DCBAA_ALIGN, sc->sc_pagesize); 349 if (error) 350 return (ENOMEM); 351 352 /* Setup command ring. */ 353 rw_init(&sc->sc_cmd_lock, "xhcicmd"); 354 error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS, 355 XHCI_CMDS_RING_ALIGN); 356 if (error) { 357 printf("%s: could not allocate command ring.\n", DEVNAME(sc)); 358 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 359 return (error); 360 } 361 362 /* Setup one event ring and its segment table (ERST). */ 363 error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS, 364 XHCI_EVTS_RING_ALIGN); 365 if (error) { 366 printf("%s: could not allocate event ring.\n", DEVNAME(sc)); 367 xhci_ring_free(sc, &sc->sc_cmd_ring); 368 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 369 return (error); 370 } 371 372 /* Allocate the required entry for the segment table. */ 373 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma, 374 (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg), 375 XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY); 376 if (error) { 377 printf("%s: could not allocate segment table.\n", DEVNAME(sc)); 378 xhci_ring_free(sc, &sc->sc_evt_ring); 379 xhci_ring_free(sc, &sc->sc_cmd_ring); 380 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 381 return (ENOMEM); 382 } 383 384 /* Set our ring address and size in its corresponding segment. */ 385 sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr); 386 sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS); 387 sc->sc_erst.segs[0].er_rsvd = 0; 388 bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0, 389 sc->sc_erst.dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 390 391 /* Get the number of scratch pages and configure them if necessary. */ 392 hcr = XREAD4(sc, XHCI_HCSPARAMS2); 393 npage = XHCI_HCS2_SPB_MAX(hcr); 394 DPRINTF(("%s: %u scratch pages, ETE=%u, IST=0x%x\n", DEVNAME(sc), npage, 395 XHCI_HCS2_ETE(hcr), XHCI_HCS2_IST(hcr))); 396 397 if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) { 398 printf("%s: could not allocate scratchpad.\n", DEVNAME(sc)); 399 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma); 400 xhci_ring_free(sc, &sc->sc_evt_ring); 401 xhci_ring_free(sc, &sc->sc_cmd_ring); 402 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 403 return (ENOMEM); 404 } 405 406 407 return (0); 408 } 409 410 void 411 xhci_config(struct xhci_softc *sc) 412 { 413 uint64_t paddr; 414 uint32_t hcr; 415 416 /* Make sure to program a number of device slots we can handle. */ 417 if (sc->sc_noslot > USB_MAX_DEVICES) 418 sc->sc_noslot = USB_MAX_DEVICES; 419 hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK; 420 XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot); 421 422 /* Set the device context base array address. */ 423 paddr = (uint64_t)sc->sc_dcbaa.dma.paddr; 424 XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr); 425 XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32)); 426 427 DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc), 428 XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO))); 429 430 /* Set the command ring address. */ 431 paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr; 432 XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS); 433 XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32)); 434 435 DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc), 436 XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr)); 437 438 /* Set the ERST count number to 1, since we use only one event ring. */ 439 XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1)); 440 441 /* Set the segment table address. */ 442 paddr = (uint64_t)sc->sc_erst.dma.paddr; 443 XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr); 444 XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32)); 445 446 DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc), 447 XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0)))); 448 449 /* Set the ring dequeue address. */ 450 paddr = (uint64_t)sc->sc_evt_ring.dma.paddr; 451 XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr); 452 XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32)); 453 454 DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc), 455 XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0)))); 456 457 /* Enable interrupts. */ 458 hcr = XRREAD4(sc, XHCI_IMAN(0)); 459 XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA); 460 461 /* Set default interrupt moderation. */ 462 XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT); 463 464 /* Allow event interrupt and start the controller. */ 465 XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS); 466 467 DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD))); 468 DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0)))); 469 } 470 471 int 472 xhci_detach(struct device *self, int flags) 473 { 474 struct xhci_softc *sc = (struct xhci_softc *)self; 475 int rv; 476 477 rv = config_detach_children(self, flags); 478 if (rv != 0) { 479 printf("%s: error while detaching %d\n", DEVNAME(sc), rv); 480 return (rv); 481 } 482 483 /* Since the hardware might already be gone, ignore the errors. */ 484 xhci_command_abort(sc); 485 486 xhci_reset(sc); 487 488 /* Disable interrupts. */ 489 XRWRITE4(sc, XHCI_IMOD(0), 0); 490 XRWRITE4(sc, XHCI_IMAN(0), 0); 491 492 /* Clear the event ring address. */ 493 XRWRITE4(sc, XHCI_ERDP_LO(0), 0); 494 XRWRITE4(sc, XHCI_ERDP_HI(0), 0); 495 496 XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0); 497 XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0); 498 499 XRWRITE4(sc, XHCI_ERSTSZ(0), 0); 500 501 /* Clear the command ring address. */ 502 XOWRITE4(sc, XHCI_CRCR_LO, 0); 503 XOWRITE4(sc, XHCI_CRCR_HI, 0); 504 505 XOWRITE4(sc, XHCI_DCBAAP_LO, 0); 506 XOWRITE4(sc, XHCI_DCBAAP_HI, 0); 507 508 if (sc->sc_spad.npage > 0) 509 xhci_scratchpad_free(sc); 510 511 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma); 512 xhci_ring_free(sc, &sc->sc_evt_ring); 513 xhci_ring_free(sc, &sc->sc_cmd_ring); 514 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 515 516 return (0); 517 } 518 519 int 520 xhci_activate(struct device *self, int act) 521 { 522 struct xhci_softc *sc = (struct xhci_softc *)self; 523 int rv = 0; 524 525 switch (act) { 526 case DVACT_RESUME: 527 sc->sc_bus.use_polling++; 528 529 xhci_reset(sc); 530 xhci_ring_reset(sc, &sc->sc_cmd_ring); 531 xhci_ring_reset(sc, &sc->sc_evt_ring); 532 533 /* Renesas controllers, at least, need more time to resume. */ 534 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT); 535 536 xhci_config(sc); 537 538 sc->sc_bus.use_polling--; 539 rv = config_activate_children(self, act); 540 break; 541 case DVACT_POWERDOWN: 542 rv = config_activate_children(self, act); 543 xhci_reset(sc); 544 break; 545 default: 546 rv = config_activate_children(self, act); 547 break; 548 } 549 550 return (rv); 551 } 552 553 int 554 xhci_reset(struct xhci_softc *sc) 555 { 556 uint32_t hcr; 557 int i; 558 559 XOWRITE4(sc, XHCI_USBCMD, 0); /* Halt controller */ 560 for (i = 0; i < 100; i++) { 561 usb_delay_ms(&sc->sc_bus, 1); 562 hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH; 563 if (hcr) 564 break; 565 } 566 567 if (!hcr) 568 printf("%s: halt timeout\n", DEVNAME(sc)); 569 570 XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST); 571 for (i = 0; i < 100; i++) { 572 usb_delay_ms(&sc->sc_bus, 1); 573 hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) | 574 (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR); 575 if (!hcr) 576 break; 577 } 578 579 if (hcr) { 580 printf("%s: reset timeout\n", DEVNAME(sc)); 581 return (EIO); 582 } 583 584 return (0); 585 } 586 587 588 int 589 xhci_intr(void *v) 590 { 591 struct xhci_softc *sc = v; 592 593 if (sc == NULL || sc->sc_bus.dying) 594 return (0); 595 596 /* If we get an interrupt while polling, then just ignore it. */ 597 if (sc->sc_bus.use_polling) { 598 DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n")); 599 return (0); 600 } 601 602 return (xhci_intr1(sc)); 603 } 604 605 int 606 xhci_intr1(struct xhci_softc *sc) 607 { 608 uint32_t intrs; 609 610 intrs = XOREAD4(sc, XHCI_USBSTS); 611 if (intrs == 0xffffffff) { 612 sc->sc_bus.dying = 1; 613 return (0); 614 } 615 616 if ((intrs & XHCI_STS_EINT) == 0) 617 return (0); 618 619 sc->sc_bus.no_intrs++; 620 621 if (intrs & XHCI_STS_HSE) { 622 printf("%s: host system error\n", DEVNAME(sc)); 623 sc->sc_bus.dying = 1; 624 return (1); 625 } 626 627 /* Acknowledge interrupts */ 628 XOWRITE4(sc, XHCI_USBSTS, intrs); 629 intrs = XRREAD4(sc, XHCI_IMAN(0)); 630 XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND); 631 632 usb_schedsoftintr(&sc->sc_bus); 633 634 return (1); 635 } 636 637 void 638 xhci_poll(struct usbd_bus *bus) 639 { 640 struct xhci_softc *sc = (struct xhci_softc *)bus; 641 642 if (XOREAD4(sc, XHCI_USBSTS)) 643 xhci_intr1(sc); 644 } 645 646 void 647 xhci_softintr(void *v) 648 { 649 struct xhci_softc *sc = v; 650 651 if (sc->sc_bus.dying) 652 return; 653 654 sc->sc_bus.intr_context++; 655 xhci_event_dequeue(sc); 656 sc->sc_bus.intr_context--; 657 } 658 659 void 660 xhci_event_dequeue(struct xhci_softc *sc) 661 { 662 struct xhci_trb *trb; 663 uint64_t paddr; 664 uint32_t status, flags; 665 666 while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) { 667 paddr = letoh64(trb->trb_paddr); 668 status = letoh32(trb->trb_status); 669 flags = letoh32(trb->trb_flags); 670 671 switch (flags & XHCI_TRB_TYPE_MASK) { 672 case XHCI_EVT_XFER: 673 xhci_event_xfer(sc, paddr, status, flags); 674 break; 675 case XHCI_EVT_CMD_COMPLETE: 676 memcpy(&sc->sc_result_trb, trb, sizeof(*trb)); 677 xhci_event_command(sc, paddr); 678 break; 679 case XHCI_EVT_PORT_CHANGE: 680 xhci_event_port_change(sc, paddr, status); 681 break; 682 case XHCI_EVT_HOST_CTRL: 683 /* TODO */ 684 break; 685 default: 686 #ifdef XHCI_DEBUG 687 printf("event (%d): ", XHCI_TRB_TYPE(flags)); 688 xhci_dump_trb(trb); 689 #endif 690 break; 691 } 692 693 } 694 695 paddr = (uint64_t)DEQPTR(sc->sc_evt_ring); 696 XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY); 697 XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32)); 698 } 699 700 void 701 xhci_skip_all(struct xhci_pipe *xp) 702 { 703 struct usbd_xfer *xfer, *last; 704 705 if (xp->skip) { 706 /* 707 * Find the last transfer to skip, this is necessary 708 * as xhci_xfer_done() posts new transfers which we 709 * don't want to skip 710 */ 711 last = SIMPLEQ_FIRST(&xp->pipe.queue); 712 if (last == NULL) 713 goto done; 714 while ((xfer = SIMPLEQ_NEXT(last, next)) != NULL) 715 last = xfer; 716 717 do { 718 xfer = SIMPLEQ_FIRST(&xp->pipe.queue); 719 if (xfer == NULL) 720 goto done; 721 DPRINTF(("%s: skipping %p\n", __func__, xfer)); 722 xfer->status = USBD_NORMAL_COMPLETION; 723 xhci_xfer_done(xfer); 724 } while (xfer != last); 725 done: 726 xp->skip = 0; 727 } 728 } 729 730 void 731 xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status, 732 uint32_t flags) 733 { 734 struct xhci_pipe *xp; 735 struct usbd_xfer *xfer; 736 uint8_t dci, slot, code, xfertype; 737 uint32_t remain; 738 int trb_idx; 739 740 slot = XHCI_TRB_GET_SLOT(flags); 741 dci = XHCI_TRB_GET_EP(flags); 742 if (slot > sc->sc_noslot) { 743 DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot)); 744 return; 745 } 746 747 xp = sc->sc_sdevs[slot].pipes[dci - 1]; 748 if (xp == NULL) { 749 DPRINTF(("%s: incorrect dci (%u)\n", DEVNAME(sc), dci)); 750 return; 751 } 752 753 code = XHCI_TRB_GET_CODE(status); 754 remain = XHCI_TRB_REMAIN(status); 755 756 switch (code) { 757 case XHCI_CODE_RING_UNDERRUN: 758 DPRINTF(("%s: slot %u underrun with %zu TRB\n", DEVNAME(sc), 759 slot, xp->ring.ntrb - xp->free_trbs)); 760 xhci_skip_all(xp); 761 return; 762 case XHCI_CODE_RING_OVERRUN: 763 DPRINTF(("%s: slot %u overrun with %zu TRB\n", DEVNAME(sc), 764 slot, xp->ring.ntrb - xp->free_trbs)); 765 xhci_skip_all(xp); 766 return; 767 case XHCI_CODE_MISSED_SRV: 768 DPRINTF(("%s: slot %u missed srv with %zu TRB\n", DEVNAME(sc), 769 slot, xp->ring.ntrb - xp->free_trbs)); 770 xp->skip = 1; 771 return; 772 default: 773 break; 774 } 775 776 trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb); 777 if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) { 778 printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc), 779 trb_idx, xp->ring.ntrb - 1); 780 return; 781 } 782 783 xfer = xp->pending_xfers[trb_idx]; 784 if (xfer == NULL) { 785 DPRINTF(("%s: NULL xfer pointer\n", DEVNAME(sc))); 786 return; 787 } 788 789 if (remain > xfer->length) 790 remain = xfer->length; 791 792 xfertype = UE_GET_XFERTYPE(xfer->pipe->endpoint->edesc->bmAttributes); 793 794 switch (xfertype) { 795 case UE_BULK: 796 case UE_INTERRUPT: 797 case UE_CONTROL: 798 if (xhci_event_xfer_generic(sc, xfer, xp, remain, trb_idx, 799 code, slot, dci)) 800 return; 801 break; 802 case UE_ISOCHRONOUS: 803 if (xhci_event_xfer_isoc(xfer, xp, remain, trb_idx)) 804 return; 805 break; 806 default: 807 panic("xhci_event_xfer: unknown xfer type %u", xfertype); 808 } 809 810 xhci_xfer_done(xfer); 811 } 812 813 uint32_t 814 xhci_xfer_length_generic(struct xhci_xfer *xx, struct xhci_pipe *xp, 815 int trb_idx) 816 { 817 int trb0_idx; 818 uint32_t len = 0, type; 819 820 trb0_idx = 821 ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1); 822 823 while (1) { 824 type = letoh32(xp->ring.trbs[trb0_idx].trb_flags) & 825 XHCI_TRB_TYPE_MASK; 826 if (type == XHCI_TRB_TYPE_NORMAL || type == XHCI_TRB_TYPE_DATA) 827 len += XHCI_TRB_LEN(letoh32( 828 xp->ring.trbs[trb0_idx].trb_status)); 829 if (trb0_idx == trb_idx) 830 break; 831 if (++trb0_idx == xp->ring.ntrb) 832 trb0_idx = 0; 833 } 834 return len; 835 } 836 837 int 838 xhci_event_xfer_generic(struct xhci_softc *sc, struct usbd_xfer *xfer, 839 struct xhci_pipe *xp, uint32_t remain, int trb_idx, 840 uint8_t code, uint8_t slot, uint8_t dci) 841 { 842 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 843 844 switch (code) { 845 case XHCI_CODE_SUCCESS: 846 if (xfer->actlen == 0) { 847 if (remain) 848 xfer->actlen = 849 xhci_xfer_length_generic(xx, xp, trb_idx) - 850 remain; 851 else 852 xfer->actlen = xfer->length; 853 } 854 if (xfer->actlen) 855 usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, 856 usbd_xfer_isread(xfer) ? 857 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 858 xfer->status = USBD_NORMAL_COMPLETION; 859 break; 860 case XHCI_CODE_SHORT_XFER: 861 /* 862 * Use values from the transfer TRB instead of the status TRB. 863 */ 864 if (xfer->actlen == 0) 865 xfer->actlen = 866 xhci_xfer_length_generic(xx, xp, trb_idx) - remain; 867 /* 868 * If this is not the last TRB of a transfer, we should 869 * theoretically clear the IOC at the end of the chain 870 * but the HC might have already processed it before we 871 * had a chance to schedule the softinterrupt. 872 */ 873 if (xx->index != trb_idx) { 874 DPRINTF(("%s: short xfer %p for %u\n", 875 DEVNAME(sc), xfer, xx->index)); 876 return (1); 877 } 878 if (xfer->actlen) 879 usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, 880 usbd_xfer_isread(xfer) ? 881 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 882 xfer->status = USBD_NORMAL_COMPLETION; 883 break; 884 case XHCI_CODE_TXERR: 885 case XHCI_CODE_SPLITERR: 886 DPRINTF(("%s: txerr? code %d\n", DEVNAME(sc), code)); 887 xfer->status = USBD_IOERROR; 888 break; 889 case XHCI_CODE_STALL: 890 case XHCI_CODE_BABBLE: 891 DPRINTF(("%s: babble code %d\n", DEVNAME(sc), code)); 892 /* Prevent any timeout to kick in. */ 893 timeout_del(&xfer->timeout_handle); 894 usb_rem_task(xfer->device, &xfer->abort_task); 895 896 /* We need to report this condition for umass(4). */ 897 if (code == XHCI_CODE_STALL) 898 xp->halted = USBD_STALLED; 899 else 900 xp->halted = USBD_IOERROR; 901 /* 902 * Since the stack might try to start a new transfer as 903 * soon as a pending one finishes, make sure the endpoint 904 * is fully reset before calling usb_transfer_complete(). 905 */ 906 xp->aborted_xfer = xfer; 907 xhci_cmd_reset_ep_async(sc, slot, dci); 908 return (1); 909 case XHCI_CODE_XFER_STOPPED: 910 case XHCI_CODE_XFER_STOPINV: 911 /* Endpoint stopped while processing a TD. */ 912 if (xfer == xp->aborted_xfer) { 913 DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer)); 914 return (1); 915 } 916 917 /* FALLTHROUGH */ 918 default: 919 DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code)); 920 xfer->status = USBD_IOERROR; 921 xp->halted = 1; 922 break; 923 } 924 925 return (0); 926 } 927 928 int 929 xhci_event_xfer_isoc(struct usbd_xfer *xfer, struct xhci_pipe *xp, 930 uint32_t remain, int trb_idx) 931 { 932 struct usbd_xfer *skipxfer; 933 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 934 int trb0_idx, frame_idx = 0; 935 936 KASSERT(xx->index >= 0); 937 trb0_idx = 938 ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1); 939 940 /* Find the according frame index for this TRB. */ 941 while (trb0_idx != trb_idx) { 942 if ((letoh32(xp->ring.trbs[trb0_idx].trb_flags) & 943 XHCI_TRB_TYPE_MASK) == XHCI_TRB_TYPE_ISOCH) 944 frame_idx++; 945 if (trb0_idx++ == (xp->ring.ntrb - 1)) 946 trb0_idx = 0; 947 } 948 949 /* 950 * If we queued two TRBs for a frame and this is the second TRB, 951 * check if the first TRB needs accounting since it might not have 952 * raised an interrupt in case of full data received. 953 */ 954 if ((letoh32(xp->ring.trbs[trb_idx].trb_flags) & XHCI_TRB_TYPE_MASK) == 955 XHCI_TRB_TYPE_NORMAL) { 956 frame_idx--; 957 if (trb_idx == 0) 958 trb0_idx = xp->ring.ntrb - 2; 959 else 960 trb0_idx = trb_idx - 1; 961 if (xfer->frlengths[frame_idx] == 0) { 962 xfer->frlengths[frame_idx] = XHCI_TRB_LEN(letoh32( 963 xp->ring.trbs[trb0_idx].trb_status)); 964 } 965 } 966 967 xfer->frlengths[frame_idx] += 968 XHCI_TRB_LEN(letoh32(xp->ring.trbs[trb_idx].trb_status)) - remain; 969 xfer->actlen += xfer->frlengths[frame_idx]; 970 971 if (xx->index != trb_idx) 972 return (1); 973 974 if (xp->skip) { 975 while (1) { 976 skipxfer = SIMPLEQ_FIRST(&xp->pipe.queue); 977 if (skipxfer == xfer || skipxfer == NULL) 978 break; 979 DPRINTF(("%s: skipping %p\n", __func__, skipxfer)); 980 skipxfer->status = USBD_NORMAL_COMPLETION; 981 xhci_xfer_done(skipxfer); 982 } 983 xp->skip = 0; 984 } 985 986 usb_syncmem(&xfer->dmabuf, 0, xfer->length, 987 usbd_xfer_isread(xfer) ? 988 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 989 xfer->status = USBD_NORMAL_COMPLETION; 990 991 return (0); 992 } 993 994 void 995 xhci_event_command(struct xhci_softc *sc, uint64_t paddr) 996 { 997 struct xhci_trb *trb; 998 struct xhci_pipe *xp; 999 uint32_t flags; 1000 uint8_t dci, slot; 1001 int trb_idx, status; 1002 1003 trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb); 1004 if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) { 1005 printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc), 1006 trb_idx, sc->sc_cmd_ring.ntrb - 1); 1007 return; 1008 } 1009 1010 trb = &sc->sc_cmd_ring.trbs[trb_idx]; 1011 1012 bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map, 1013 TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb), 1014 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1015 1016 flags = letoh32(trb->trb_flags); 1017 1018 slot = XHCI_TRB_GET_SLOT(flags); 1019 dci = XHCI_TRB_GET_EP(flags); 1020 1021 switch (flags & XHCI_TRB_TYPE_MASK) { 1022 case XHCI_CMD_RESET_EP: 1023 xp = sc->sc_sdevs[slot].pipes[dci - 1]; 1024 if (xp == NULL) 1025 break; 1026 1027 /* Update the dequeue pointer past the last TRB. */ 1028 xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci, 1029 DEQPTR(xp->ring) | xp->ring.toggle); 1030 break; 1031 case XHCI_CMD_SET_TR_DEQ: 1032 xp = sc->sc_sdevs[slot].pipes[dci - 1]; 1033 if (xp == NULL) 1034 break; 1035 1036 status = xp->halted; 1037 xp->halted = 0; 1038 if (xp->aborted_xfer != NULL) { 1039 xp->aborted_xfer->status = status; 1040 xhci_xfer_done(xp->aborted_xfer); 1041 wakeup(xp); 1042 } 1043 break; 1044 case XHCI_CMD_CONFIG_EP: 1045 case XHCI_CMD_STOP_EP: 1046 case XHCI_CMD_DISABLE_SLOT: 1047 case XHCI_CMD_ENABLE_SLOT: 1048 case XHCI_CMD_ADDRESS_DEVICE: 1049 case XHCI_CMD_EVAL_CTX: 1050 case XHCI_CMD_NOOP: 1051 /* 1052 * All these commands are synchronous. 1053 * 1054 * If TRBs differ, this could be a delayed result after we 1055 * gave up waiting for the expected TRB due to timeout. 1056 */ 1057 if (sc->sc_cmd_trb == trb) { 1058 sc->sc_cmd_trb = NULL; 1059 wakeup(&sc->sc_cmd_trb); 1060 } 1061 break; 1062 default: 1063 DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags)); 1064 } 1065 } 1066 1067 void 1068 xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status) 1069 { 1070 struct usbd_xfer *xfer = sc->sc_intrxfer; 1071 uint32_t port = XHCI_TRB_PORTID(paddr); 1072 uint8_t *p; 1073 1074 if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) { 1075 DPRINTF(("%s: failed port status event\n", DEVNAME(sc))); 1076 return; 1077 } 1078 1079 if (xfer == NULL) 1080 return; 1081 1082 p = KERNADDR(&xfer->dmabuf, 0); 1083 memset(p, 0, xfer->length); 1084 1085 p[port/8] |= 1 << (port%8); 1086 DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p)); 1087 1088 xfer->actlen = xfer->length; 1089 xfer->status = USBD_NORMAL_COMPLETION; 1090 1091 usb_transfer_complete(xfer); 1092 } 1093 1094 void 1095 xhci_xfer_done(struct usbd_xfer *xfer) 1096 { 1097 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 1098 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 1099 int ntrb, i; 1100 1101 splsoftassert(IPL_SOFTUSB); 1102 1103 #ifdef XHCI_DEBUG 1104 if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) { 1105 printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__, 1106 xfer, xx->index, xx->ntrb); 1107 } 1108 #endif 1109 1110 if (xp->aborted_xfer == xfer) 1111 xp->aborted_xfer = NULL; 1112 1113 for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) { 1114 xp->pending_xfers[i] = NULL; 1115 if (i == 0) 1116 i = (xp->ring.ntrb - 1); 1117 } 1118 xp->free_trbs += xx->ntrb; 1119 xx->index = -1; 1120 xx->ntrb = 0; 1121 1122 timeout_del(&xfer->timeout_handle); 1123 usb_rem_task(xfer->device, &xfer->abort_task); 1124 usb_transfer_complete(xfer); 1125 } 1126 1127 /* 1128 * Calculate the Device Context Index (DCI) for endpoints as stated 1129 * in section 4.5.1 of xHCI specification r1.1. 1130 */ 1131 static inline uint8_t 1132 xhci_ed2dci(usb_endpoint_descriptor_t *ed) 1133 { 1134 uint8_t dir; 1135 1136 if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) 1137 return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1); 1138 1139 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) 1140 dir = 1; 1141 else 1142 dir = 0; 1143 1144 return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir); 1145 } 1146 1147 usbd_status 1148 xhci_pipe_open(struct usbd_pipe *pipe) 1149 { 1150 struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus; 1151 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1152 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1153 uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1154 int error; 1155 1156 KASSERT(xp->slot == 0); 1157 1158 if (sc->sc_bus.dying) 1159 return (USBD_IOERROR); 1160 1161 /* Root Hub */ 1162 if (pipe->device->depth == 0) { 1163 switch (ed->bEndpointAddress) { 1164 case USB_CONTROL_ENDPOINT: 1165 pipe->methods = &xhci_root_ctrl_methods; 1166 break; 1167 case UE_DIR_IN | XHCI_INTR_ENDPT: 1168 pipe->methods = &xhci_root_intr_methods; 1169 break; 1170 default: 1171 pipe->methods = NULL; 1172 return (USBD_INVAL); 1173 } 1174 return (USBD_NORMAL_COMPLETION); 1175 } 1176 1177 #if 0 1178 /* Issue a noop to check if the command ring is correctly configured. */ 1179 xhci_cmd_noop(sc); 1180 #endif 1181 1182 switch (xfertype) { 1183 case UE_CONTROL: 1184 pipe->methods = &xhci_device_ctrl_methods; 1185 1186 /* 1187 * Get a slot and init the device's contexts. 1188 * 1189 * Since the control enpoint, represented as the default 1190 * pipe, is always opened first we are dealing with a 1191 * new device. Put a new slot in the ENABLED state. 1192 * 1193 */ 1194 error = xhci_cmd_slot_control(sc, &slot, 1); 1195 if (error || slot == 0 || slot > sc->sc_noslot) 1196 return (USBD_INVAL); 1197 1198 if (xhci_softdev_alloc(sc, slot)) { 1199 xhci_cmd_slot_control(sc, &slot, 0); 1200 return (USBD_NOMEM); 1201 } 1202 1203 break; 1204 case UE_ISOCHRONOUS: 1205 pipe->methods = &xhci_device_isoc_methods; 1206 break; 1207 case UE_BULK: 1208 pipe->methods = &xhci_device_bulk_methods; 1209 break; 1210 case UE_INTERRUPT: 1211 pipe->methods = &xhci_device_intr_methods; 1212 break; 1213 default: 1214 return (USBD_INVAL); 1215 } 1216 1217 /* 1218 * Our USBD Bus Interface is pipe-oriented but for most of the 1219 * operations we need to access a device context, so keep track 1220 * of the slot ID in every pipe. 1221 */ 1222 if (slot == 0) 1223 slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot; 1224 1225 xp->slot = slot; 1226 xp->dci = xhci_ed2dci(ed); 1227 1228 if (xhci_pipe_init(sc, pipe)) { 1229 xhci_cmd_slot_control(sc, &slot, 0); 1230 return (USBD_IOERROR); 1231 } 1232 1233 return (USBD_NORMAL_COMPLETION); 1234 } 1235 1236 /* 1237 * Set the maximum Endpoint Service Interface Time (ESIT) payload and 1238 * the average TRB buffer length for an endpoint. 1239 */ 1240 static inline uint32_t 1241 xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe) 1242 { 1243 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1244 uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize); 1245 1246 switch (UE_GET_XFERTYPE(ed->bmAttributes)) { 1247 case UE_CONTROL: 1248 mep = 0; 1249 atl = 8; 1250 break; 1251 case UE_INTERRUPT: 1252 case UE_ISOCHRONOUS: 1253 if (pipe->device->speed == USB_SPEED_SUPER) { 1254 /* XXX Read the companion descriptor */ 1255 } 1256 1257 mep = (UE_GET_TRANS(mps) + 1) * UE_GET_SIZE(mps); 1258 atl = mep; 1259 break; 1260 case UE_BULK: 1261 default: 1262 mep = 0; 1263 atl = 0; 1264 } 1265 1266 return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl)); 1267 } 1268 1269 static inline uint32_t 1270 xhci_linear_interval(usb_endpoint_descriptor_t *ed) 1271 { 1272 uint32_t ival = min(max(1, ed->bInterval), 255); 1273 1274 return (fls(ival) - 1); 1275 } 1276 1277 static inline uint32_t 1278 xhci_exponential_interval(usb_endpoint_descriptor_t *ed) 1279 { 1280 uint32_t ival = min(max(1, ed->bInterval), 16); 1281 1282 return (ival - 1); 1283 } 1284 /* 1285 * Return interval for endpoint expressed in 2^(ival) * 125us. 1286 * 1287 * See section 6.2.3.6 of xHCI r1.1 Specification for more details. 1288 */ 1289 uint32_t 1290 xhci_pipe_interval(struct usbd_pipe *pipe) 1291 { 1292 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1293 uint8_t speed = pipe->device->speed; 1294 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1295 uint32_t ival; 1296 1297 if (xfertype == UE_CONTROL || xfertype == UE_BULK) { 1298 /* Control and Bulk endpoints never NAKs. */ 1299 ival = 0; 1300 } else { 1301 switch (speed) { 1302 case USB_SPEED_FULL: 1303 if (xfertype == UE_ISOCHRONOUS) { 1304 /* Convert 1-2^(15)ms into 3-18 */ 1305 ival = xhci_exponential_interval(ed) + 3; 1306 break; 1307 } 1308 /* FALLTHROUGH */ 1309 case USB_SPEED_LOW: 1310 /* Convert 1-255ms into 3-10 */ 1311 ival = xhci_linear_interval(ed) + 3; 1312 break; 1313 case USB_SPEED_HIGH: 1314 case USB_SPEED_SUPER: 1315 default: 1316 /* Convert 1-2^(15) * 125us into 0-15 */ 1317 ival = xhci_exponential_interval(ed); 1318 break; 1319 } 1320 } 1321 1322 KASSERT(ival <= 15); 1323 return (XHCI_EPCTX_SET_IVAL(ival)); 1324 } 1325 1326 uint32_t 1327 xhci_pipe_maxburst(struct usbd_pipe *pipe) 1328 { 1329 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1330 uint32_t mps = UGETW(ed->wMaxPacketSize); 1331 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1332 uint32_t maxb = 0; 1333 1334 switch (pipe->device->speed) { 1335 case USB_SPEED_HIGH: 1336 if (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT) 1337 maxb = UE_GET_TRANS(mps); 1338 break; 1339 case USB_SPEED_SUPER: 1340 /* XXX Read the companion descriptor */ 1341 default: 1342 break; 1343 } 1344 1345 return (maxb); 1346 } 1347 1348 static inline uint32_t 1349 xhci_last_valid_dci(struct xhci_pipe **pipes, struct xhci_pipe *ignore) 1350 { 1351 struct xhci_pipe *lxp; 1352 int i; 1353 1354 /* Find the last valid Endpoint Context. */ 1355 for (i = 30; i >= 0; i--) { 1356 lxp = pipes[i]; 1357 if (lxp != NULL && lxp != ignore) 1358 return XHCI_SCTX_DCI(lxp->dci); 1359 } 1360 1361 return 0; 1362 } 1363 1364 int 1365 xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe) 1366 { 1367 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1368 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1369 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1370 uint32_t mps = UGETW(ed->wMaxPacketSize); 1371 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1372 uint8_t speed, cerr = 0; 1373 uint32_t route = 0, rhport = 0; 1374 struct usbd_device *hub; 1375 1376 /* 1377 * Calculate the Route String. Assume that there is no hub with 1378 * more than 15 ports and that they all have a detph < 6. See 1379 * section 8.9 of USB 3.1 Specification for more details. 1380 */ 1381 for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) { 1382 uint32_t port = hub->powersrc->portno; 1383 uint32_t depth = hub->myhub->depth; 1384 1385 route |= port << (4 * (depth - 1)); 1386 } 1387 1388 /* Get Root Hub port */ 1389 rhport = hub->powersrc->portno; 1390 1391 switch (pipe->device->speed) { 1392 case USB_SPEED_LOW: 1393 speed = XHCI_SPEED_LOW; 1394 break; 1395 case USB_SPEED_FULL: 1396 speed = XHCI_SPEED_FULL; 1397 break; 1398 case USB_SPEED_HIGH: 1399 speed = XHCI_SPEED_HIGH; 1400 break; 1401 case USB_SPEED_SUPER: 1402 speed = XHCI_SPEED_SUPER; 1403 break; 1404 default: 1405 return (USBD_INVAL); 1406 } 1407 1408 /* Setup the endpoint context */ 1409 if (xfertype != UE_ISOCHRONOUS) 1410 cerr = 3; 1411 1412 if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL)) 1413 xfertype |= 0x4; 1414 1415 sdev->ep_ctx[xp->dci-1]->info_lo = htole32(xhci_pipe_interval(pipe)); 1416 sdev->ep_ctx[xp->dci-1]->info_hi = htole32( 1417 XHCI_EPCTX_SET_MPS(UE_GET_SIZE(mps)) | 1418 XHCI_EPCTX_SET_MAXB(xhci_pipe_maxburst(pipe)) | 1419 XHCI_EPCTX_SET_EPTYPE(xfertype) | XHCI_EPCTX_SET_CERR(cerr) 1420 ); 1421 sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe)); 1422 sdev->ep_ctx[xp->dci-1]->deqp = htole64( 1423 DEQPTR(xp->ring) | xp->ring.toggle 1424 ); 1425 1426 /* Unmask the new endoint */ 1427 sdev->input_ctx->drop_flags = 0; 1428 sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci)); 1429 1430 /* Setup the slot context */ 1431 sdev->slot_ctx->info_lo = htole32( 1432 xhci_last_valid_dci(sdev->pipes, NULL) | XHCI_SCTX_SPEED(speed) | 1433 XHCI_SCTX_ROUTE(route) 1434 ); 1435 sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport)); 1436 sdev->slot_ctx->tt = 0; 1437 sdev->slot_ctx->state = 0; 1438 1439 /* XXX */ 1440 #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT) 1441 /* 1442 * If we are opening the interrupt pipe of a hub, update its 1443 * context before putting it in the CONFIGURED state. 1444 */ 1445 if (pipe->device->hub != NULL) { 1446 int nports = pipe->device->hub->nports; 1447 1448 sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1)); 1449 sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports)); 1450 1451 if (UHUB_IS_MTT(pipe->device)) 1452 sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1)); 1453 1454 sdev->slot_ctx->tt |= htole32( 1455 XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink) 1456 ); 1457 } 1458 1459 /* 1460 * If this is a Low or Full Speed device below an external High 1461 * Speed hub, it needs some TT love. 1462 */ 1463 if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) { 1464 struct usbd_device *hshub = pipe->device->myhsport->parent; 1465 uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot; 1466 1467 if (UHUB_IS_MTT(hshub)) 1468 sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1)); 1469 1470 sdev->slot_ctx->tt |= htole32( 1471 XHCI_SCTX_TT_HUB_SID(slot) | 1472 XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno) 1473 ); 1474 } 1475 #undef UHUB_IS_MTT 1476 1477 /* Unmask the slot context */ 1478 sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0)); 1479 1480 bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0, 1481 sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1482 1483 return (0); 1484 } 1485 1486 int 1487 xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe) 1488 { 1489 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1490 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1491 int error; 1492 1493 #ifdef XHCI_DEBUG 1494 struct usbd_device *dev = pipe->device; 1495 printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u" 1496 " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth, 1497 dev->powersrc->portno, dev->speed, xp->slot, xp->dci, 1498 pipe->endpoint->edesc->bEndpointAddress); 1499 #endif 1500 1501 if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN)) 1502 return (ENOMEM); 1503 1504 xp->free_trbs = xp->ring.ntrb; 1505 xp->halted = 0; 1506 1507 sdev->pipes[xp->dci - 1] = xp; 1508 1509 error = xhci_context_setup(sc, pipe); 1510 if (error) 1511 return (error); 1512 1513 if (xp->dci == 1) { 1514 /* 1515 * If we are opening the default pipe, the Slot should 1516 * be in the ENABLED state. Issue an "Address Device" 1517 * with BSR=1 to put the device in the DEFAULT state. 1518 * We cannot jump directly to the ADDRESSED state with 1519 * BSR=0 because some Low/Full speed devices won't accept 1520 * a SET_ADDRESS command before we've read their device 1521 * descriptor. 1522 */ 1523 error = xhci_cmd_set_address(sc, xp->slot, 1524 sdev->ictx_dma.paddr, XHCI_TRB_BSR); 1525 } else { 1526 error = xhci_cmd_configure_ep(sc, xp->slot, 1527 sdev->ictx_dma.paddr); 1528 } 1529 1530 if (error) { 1531 xhci_ring_free(sc, &xp->ring); 1532 return (EIO); 1533 } 1534 1535 return (0); 1536 } 1537 1538 void 1539 xhci_pipe_close(struct usbd_pipe *pipe) 1540 { 1541 struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus; 1542 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1543 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1544 1545 /* Root Hub */ 1546 if (pipe->device->depth == 0) 1547 return; 1548 1549 /* Mask the endpoint */ 1550 sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci)); 1551 sdev->input_ctx->add_flags = 0; 1552 1553 /* Update last valid Endpoint Context */ 1554 sdev->slot_ctx->info_lo &= htole32(~XHCI_SCTX_DCI(31)); 1555 sdev->slot_ctx->info_lo |= htole32(xhci_last_valid_dci(sdev->pipes, xp)); 1556 1557 /* Clear the Endpoint Context */ 1558 memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx)); 1559 1560 bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0, 1561 sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1562 1563 if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr)) 1564 DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci)); 1565 1566 xhci_ring_free(sc, &xp->ring); 1567 sdev->pipes[xp->dci - 1] = NULL; 1568 1569 /* 1570 * If we are closing the default pipe, the device is probably 1571 * gone, so put its slot in the DISABLED state. 1572 */ 1573 if (xp->dci == 1) { 1574 xhci_cmd_slot_control(sc, &xp->slot, 0); 1575 xhci_softdev_free(sc, xp->slot); 1576 } 1577 } 1578 1579 /* 1580 * Transition a device from DEFAULT to ADDRESSED Slot state, this hook 1581 * is needed for Low/Full speed devices. 1582 * 1583 * See section 4.5.3 of USB 3.1 Specification for more details. 1584 */ 1585 int 1586 xhci_setaddr(struct usbd_device *dev, int addr) 1587 { 1588 struct xhci_softc *sc = (struct xhci_softc *)dev->bus; 1589 struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe; 1590 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1591 int error; 1592 1593 /* Root Hub */ 1594 if (dev->depth == 0) 1595 return (0); 1596 1597 KASSERT(xp->dci == 1); 1598 1599 error = xhci_context_setup(sc, dev->default_pipe); 1600 if (error) 1601 return (error); 1602 1603 error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0); 1604 1605 #ifdef XHCI_DEBUG 1606 if (error == 0) { 1607 struct xhci_sctx *sctx; 1608 uint8_t addr; 1609 1610 bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0, 1611 sc->sc_pagesize, BUS_DMASYNC_POSTREAD); 1612 1613 /* Get output slot context. */ 1614 sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr; 1615 addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state)); 1616 error = (addr == 0); 1617 1618 printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr); 1619 } 1620 #endif 1621 1622 return (error); 1623 } 1624 1625 struct usbd_xfer * 1626 xhci_allocx(struct usbd_bus *bus) 1627 { 1628 return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO)); 1629 } 1630 1631 void 1632 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 1633 { 1634 pool_put(xhcixfer, xfer); 1635 } 1636 1637 int 1638 xhci_scratchpad_alloc(struct xhci_softc *sc, int npage) 1639 { 1640 uint64_t *pte; 1641 int error, i; 1642 1643 /* Allocate the required entry for the table. */ 1644 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma, 1645 (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN, 1646 sc->sc_pagesize); 1647 if (error) 1648 return (ENOMEM); 1649 1650 /* Allocate pages. XXX does not need to be contiguous. */ 1651 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma, 1652 NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0); 1653 if (error) { 1654 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma); 1655 return (ENOMEM); 1656 } 1657 1658 for (i = 0; i < npage; i++) { 1659 pte[i] = htole64( 1660 sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize) 1661 ); 1662 } 1663 1664 bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0, 1665 npage * sizeof(uint64_t), BUS_DMASYNC_PREREAD | 1666 BUS_DMASYNC_PREWRITE); 1667 1668 /* Entry 0 points to the table of scratchpad pointers. */ 1669 sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr); 1670 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0, 1671 sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1672 1673 sc->sc_spad.npage = npage; 1674 1675 return (0); 1676 } 1677 1678 void 1679 xhci_scratchpad_free(struct xhci_softc *sc) 1680 { 1681 sc->sc_dcbaa.segs[0] = 0; 1682 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0, 1683 sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1684 1685 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma); 1686 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma); 1687 } 1688 1689 int 1690 xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb, 1691 size_t alignment) 1692 { 1693 size_t size; 1694 int error; 1695 1696 size = ntrb * sizeof(struct xhci_trb); 1697 1698 error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma, 1699 (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY); 1700 if (error) 1701 return (error); 1702 1703 ring->ntrb = ntrb; 1704 1705 xhci_ring_reset(sc, ring); 1706 1707 return (0); 1708 } 1709 1710 void 1711 xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring) 1712 { 1713 usbd_dma_contig_free(&sc->sc_bus, &ring->dma); 1714 } 1715 1716 void 1717 xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring) 1718 { 1719 size_t size; 1720 1721 size = ring->ntrb * sizeof(struct xhci_trb); 1722 1723 memset(ring->trbs, 0, size); 1724 1725 ring->index = 0; 1726 ring->toggle = XHCI_TRB_CYCLE; 1727 1728 /* 1729 * Since all our rings use only one segment, at least for 1730 * the moment, link their tail to their head. 1731 */ 1732 if (ring != &sc->sc_evt_ring) { 1733 struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1]; 1734 1735 trb->trb_paddr = htole64(ring->dma.paddr); 1736 trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG | 1737 XHCI_TRB_CYCLE); 1738 bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size, 1739 BUS_DMASYNC_PREWRITE); 1740 } else 1741 bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size, 1742 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1743 } 1744 1745 struct xhci_trb* 1746 xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring) 1747 { 1748 struct xhci_trb *trb = &ring->trbs[ring->index]; 1749 1750 KASSERT(ring->index < ring->ntrb); 1751 1752 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb), 1753 sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD); 1754 1755 /* Make sure this TRB can be consumed. */ 1756 if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE)) 1757 return (NULL); 1758 1759 ring->index++; 1760 1761 if (ring->index == ring->ntrb) { 1762 ring->index = 0; 1763 ring->toggle ^= 1; 1764 } 1765 1766 return (trb); 1767 } 1768 1769 struct xhci_trb* 1770 xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring) 1771 { 1772 struct xhci_trb *lnk, *trb; 1773 1774 KASSERT(ring->index < ring->ntrb); 1775 1776 /* Setup the link TRB after the previous TRB is done. */ 1777 if (ring->index == 0) { 1778 lnk = &ring->trbs[ring->ntrb - 1]; 1779 trb = &ring->trbs[ring->ntrb - 2]; 1780 1781 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk), 1782 sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD | 1783 BUS_DMASYNC_POSTWRITE); 1784 1785 lnk->trb_flags &= htole32(~XHCI_TRB_CHAIN); 1786 if (letoh32(trb->trb_flags) & XHCI_TRB_CHAIN) 1787 lnk->trb_flags |= htole32(XHCI_TRB_CHAIN); 1788 1789 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk), 1790 sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE); 1791 1792 lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE); 1793 1794 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk), 1795 sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE); 1796 } 1797 1798 trb = &ring->trbs[ring->index++]; 1799 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb), 1800 sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD | 1801 BUS_DMASYNC_POSTWRITE); 1802 1803 /* Toggle cycle state of the link TRB and skip it. */ 1804 if (ring->index == (ring->ntrb - 1)) { 1805 ring->index = 0; 1806 ring->toggle ^= 1; 1807 } 1808 1809 return (trb); 1810 } 1811 1812 struct xhci_trb * 1813 xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer, 1814 uint8_t *togglep, int last) 1815 { 1816 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 1817 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 1818 1819 KASSERT(xp->free_trbs >= 1); 1820 xp->free_trbs--; 1821 *togglep = xp->ring.toggle; 1822 1823 switch (last) { 1824 case -1: /* This will be a zero-length TD. */ 1825 xp->pending_xfers[xp->ring.index] = NULL; 1826 break; 1827 case 0: /* This will be in a chain. */ 1828 xp->pending_xfers[xp->ring.index] = xfer; 1829 xx->index = -2; 1830 xx->ntrb += 1; 1831 break; 1832 case 1: /* This will terminate a chain. */ 1833 xp->pending_xfers[xp->ring.index] = xfer; 1834 xx->index = xp->ring.index; 1835 xx->ntrb += 1; 1836 break; 1837 } 1838 1839 return (xhci_ring_produce(sc, &xp->ring)); 1840 } 1841 1842 int 1843 xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout) 1844 { 1845 struct xhci_trb *trb; 1846 int s, error = 0; 1847 1848 KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL); 1849 1850 trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle); 1851 1852 trb = xhci_ring_produce(sc, &sc->sc_cmd_ring); 1853 if (trb == NULL) 1854 return (EAGAIN); 1855 trb->trb_paddr = trb0->trb_paddr; 1856 trb->trb_status = trb0->trb_status; 1857 bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map, 1858 TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb), 1859 BUS_DMASYNC_PREWRITE); 1860 1861 trb->trb_flags = trb0->trb_flags; 1862 bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map, 1863 TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb), 1864 BUS_DMASYNC_PREWRITE); 1865 1866 if (timeout == 0) { 1867 XDWRITE4(sc, XHCI_DOORBELL(0), 0); 1868 return (0); 1869 } 1870 1871 rw_assert_wrlock(&sc->sc_cmd_lock); 1872 1873 s = splusb(); 1874 sc->sc_cmd_trb = trb; 1875 XDWRITE4(sc, XHCI_DOORBELL(0), 0); 1876 error = tsleep_nsec(&sc->sc_cmd_trb, PZERO, "xhcicmd", timeout); 1877 if (error) { 1878 #ifdef XHCI_DEBUG 1879 printf("%s: tsleep() = %d\n", __func__, error); 1880 printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags))); 1881 xhci_dump_trb(trb); 1882 #endif 1883 KASSERT(sc->sc_cmd_trb == trb || sc->sc_cmd_trb == NULL); 1884 /* 1885 * Just because the timeout expired this does not mean that the 1886 * TRB isn't active anymore! We could get an interrupt from 1887 * this TRB later on and then wonder what to do with it. 1888 * We'd rather abort it. 1889 */ 1890 xhci_command_abort(sc); 1891 sc->sc_cmd_trb = NULL; 1892 splx(s); 1893 return (error); 1894 } 1895 splx(s); 1896 1897 memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb)); 1898 1899 if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS) 1900 return (0); 1901 1902 #ifdef XHCI_DEBUG 1903 printf("%s: event error code=%d, result=%d \n", DEVNAME(sc), 1904 XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)), 1905 XHCI_TRB_TYPE(letoh32(trb0->trb_flags))); 1906 xhci_dump_trb(trb0); 1907 #endif 1908 return (EIO); 1909 } 1910 1911 int 1912 xhci_command_abort(struct xhci_softc *sc) 1913 { 1914 uint32_t reg; 1915 int i; 1916 1917 reg = XOREAD4(sc, XHCI_CRCR_LO); 1918 if ((reg & XHCI_CRCR_LO_CRR) == 0) 1919 return (0); 1920 1921 XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA); 1922 XOWRITE4(sc, XHCI_CRCR_HI, 0); 1923 1924 for (i = 0; i < 2500; i++) { 1925 DELAY(100); 1926 reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR; 1927 if (!reg) 1928 break; 1929 } 1930 1931 if (reg) { 1932 printf("%s: command ring abort timeout\n", DEVNAME(sc)); 1933 return (1); 1934 } 1935 1936 return (0); 1937 } 1938 1939 int 1940 xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr) 1941 { 1942 struct xhci_trb trb; 1943 int error; 1944 1945 DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot)); 1946 1947 trb.trb_paddr = htole64(addr); 1948 trb.trb_status = 0; 1949 trb.trb_flags = htole32( 1950 XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP 1951 ); 1952 1953 rw_enter_write(&sc->sc_cmd_lock); 1954 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 1955 rw_exit_write(&sc->sc_cmd_lock); 1956 return (error); 1957 } 1958 1959 int 1960 xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci) 1961 { 1962 struct xhci_trb trb; 1963 int error; 1964 1965 DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); 1966 1967 trb.trb_paddr = 0; 1968 trb.trb_status = 0; 1969 trb.trb_flags = htole32( 1970 XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP 1971 ); 1972 1973 rw_enter_write(&sc->sc_cmd_lock); 1974 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 1975 rw_exit_write(&sc->sc_cmd_lock); 1976 return (error); 1977 } 1978 1979 void 1980 xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci) 1981 { 1982 struct xhci_trb trb; 1983 1984 DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); 1985 1986 trb.trb_paddr = 0; 1987 trb.trb_status = 0; 1988 trb.trb_flags = htole32( 1989 XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP 1990 ); 1991 1992 xhci_command_submit(sc, &trb, 0); 1993 } 1994 1995 void 1996 xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci, 1997 uint64_t addr) 1998 { 1999 struct xhci_trb trb; 2000 2001 DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); 2002 2003 trb.trb_paddr = htole64(addr); 2004 trb.trb_status = 0; 2005 trb.trb_flags = htole32( 2006 XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ 2007 ); 2008 2009 xhci_command_submit(sc, &trb, 0); 2010 } 2011 2012 int 2013 xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable) 2014 { 2015 struct xhci_trb trb; 2016 int error; 2017 2018 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__)); 2019 2020 trb.trb_paddr = 0; 2021 trb.trb_status = 0; 2022 if (enable) 2023 trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT); 2024 else 2025 trb.trb_flags = htole32( 2026 XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT 2027 ); 2028 2029 rw_enter_write(&sc->sc_cmd_lock); 2030 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2031 rw_exit_write(&sc->sc_cmd_lock); 2032 if (error != 0) 2033 return (EIO); 2034 2035 if (enable) 2036 *slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags)); 2037 2038 return (0); 2039 } 2040 2041 int 2042 xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr, 2043 uint32_t bsr) 2044 { 2045 struct xhci_trb trb; 2046 int error; 2047 2048 DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0)); 2049 2050 trb.trb_paddr = htole64(addr); 2051 trb.trb_status = 0; 2052 trb.trb_flags = htole32( 2053 XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr 2054 ); 2055 2056 rw_enter_write(&sc->sc_cmd_lock); 2057 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2058 rw_exit_write(&sc->sc_cmd_lock); 2059 return (error); 2060 } 2061 2062 int 2063 xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint8_t slot, uint64_t addr) 2064 { 2065 struct xhci_trb trb; 2066 int error; 2067 2068 DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot)); 2069 2070 trb.trb_paddr = htole64(addr); 2071 trb.trb_status = 0; 2072 trb.trb_flags = htole32( 2073 XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_EVAL_CTX 2074 ); 2075 2076 rw_enter_write(&sc->sc_cmd_lock); 2077 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2078 rw_exit_write(&sc->sc_cmd_lock); 2079 return (error); 2080 } 2081 2082 #ifdef XHCI_DEBUG 2083 int 2084 xhci_cmd_noop(struct xhci_softc *sc) 2085 { 2086 struct xhci_trb trb; 2087 int error; 2088 2089 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__)); 2090 2091 trb.trb_paddr = 0; 2092 trb.trb_status = 0; 2093 trb.trb_flags = htole32(XHCI_CMD_NOOP); 2094 2095 rw_enter_write(&sc->sc_cmd_lock); 2096 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2097 rw_exit_write(&sc->sc_cmd_lock); 2098 return (error); 2099 } 2100 #endif 2101 2102 int 2103 xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot) 2104 { 2105 struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot]; 2106 int i, error; 2107 uint8_t *kva; 2108 2109 /* 2110 * Setup input context. Even with 64 byte context size, it 2111 * fits into the smallest supported page size, so use that. 2112 */ 2113 error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma, 2114 (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize); 2115 if (error) 2116 return (ENOMEM); 2117 2118 sdev->input_ctx = (struct xhci_inctx *)kva; 2119 sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize); 2120 for (i = 0; i < 31; i++) 2121 sdev->ep_ctx[i] = 2122 (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize); 2123 2124 DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc), 2125 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0])); 2126 2127 /* Setup output context */ 2128 error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL, 2129 sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize); 2130 if (error) { 2131 usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma); 2132 return (ENOMEM); 2133 } 2134 2135 memset(&sdev->pipes, 0, sizeof(sdev->pipes)); 2136 2137 DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc), 2138 slot, (long long)sdev->octx_dma.paddr)); 2139 2140 sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr); 2141 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 2142 slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD | 2143 BUS_DMASYNC_PREWRITE); 2144 2145 return (0); 2146 } 2147 2148 void 2149 xhci_softdev_free(struct xhci_softc *sc, uint8_t slot) 2150 { 2151 struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot]; 2152 2153 sc->sc_dcbaa.segs[slot] = 0; 2154 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 2155 slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD | 2156 BUS_DMASYNC_PREWRITE); 2157 2158 usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma); 2159 usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma); 2160 2161 memset(sdev, 0, sizeof(struct xhci_soft_dev)); 2162 } 2163 2164 /* Root hub descriptors. */ 2165 usb_device_descriptor_t xhci_devd = { 2166 USB_DEVICE_DESCRIPTOR_SIZE, 2167 UDESC_DEVICE, /* type */ 2168 {0x00, 0x03}, /* USB version */ 2169 UDCLASS_HUB, /* class */ 2170 UDSUBCLASS_HUB, /* subclass */ 2171 UDPROTO_HSHUBSTT, /* protocol */ 2172 9, /* max packet */ 2173 {0},{0},{0x00,0x01}, /* device id */ 2174 1,2,0, /* string indexes */ 2175 1 /* # of configurations */ 2176 }; 2177 2178 const usb_config_descriptor_t xhci_confd = { 2179 USB_CONFIG_DESCRIPTOR_SIZE, 2180 UDESC_CONFIG, 2181 {USB_CONFIG_DESCRIPTOR_SIZE + 2182 USB_INTERFACE_DESCRIPTOR_SIZE + 2183 USB_ENDPOINT_DESCRIPTOR_SIZE}, 2184 1, 2185 1, 2186 0, 2187 UC_BUS_POWERED | UC_SELF_POWERED, 2188 0 /* max power */ 2189 }; 2190 2191 const usb_interface_descriptor_t xhci_ifcd = { 2192 USB_INTERFACE_DESCRIPTOR_SIZE, 2193 UDESC_INTERFACE, 2194 0, 2195 0, 2196 1, 2197 UICLASS_HUB, 2198 UISUBCLASS_HUB, 2199 UIPROTO_HSHUBSTT, 2200 0 2201 }; 2202 2203 const usb_endpoint_descriptor_t xhci_endpd = { 2204 USB_ENDPOINT_DESCRIPTOR_SIZE, 2205 UDESC_ENDPOINT, 2206 UE_DIR_IN | XHCI_INTR_ENDPT, 2207 UE_INTERRUPT, 2208 {2, 0}, /* max 15 ports */ 2209 255 2210 }; 2211 2212 const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = { 2213 USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE, 2214 UDESC_ENDPOINT_SS_COMP, 2215 0, 2216 0, 2217 {0, 0} 2218 }; 2219 2220 const usb_hub_descriptor_t xhci_hubd = { 2221 USB_HUB_DESCRIPTOR_SIZE, 2222 UDESC_SS_HUB, 2223 0, 2224 {0,0}, 2225 0, 2226 0, 2227 {0}, 2228 }; 2229 2230 void 2231 xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) 2232 { 2233 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2234 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 2235 int error; 2236 2237 splsoftassert(IPL_SOFTUSB); 2238 2239 DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n", 2240 __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status), 2241 xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index)); 2242 2243 /* XXX The stack should not call abort() in this case. */ 2244 if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) { 2245 xfer->status = status; 2246 timeout_del(&xfer->timeout_handle); 2247 usb_rem_task(xfer->device, &xfer->abort_task); 2248 usb_transfer_complete(xfer); 2249 return; 2250 } 2251 2252 /* Transfer is already done. */ 2253 if (xfer->status != USBD_IN_PROGRESS) { 2254 DPRINTF(("%s: already done \n", __func__)); 2255 return; 2256 } 2257 2258 /* Prevent any timeout to kick in. */ 2259 timeout_del(&xfer->timeout_handle); 2260 usb_rem_task(xfer->device, &xfer->abort_task); 2261 2262 /* Indicate that we are aborting this transfer. */ 2263 xp->halted = status; 2264 xp->aborted_xfer = xfer; 2265 2266 /* Stop the endpoint and wait until the hardware says so. */ 2267 if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci)) { 2268 DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc))); 2269 /* Assume the device is gone. */ 2270 xp->halted = 0; 2271 xp->aborted_xfer = NULL; 2272 xfer->status = status; 2273 usb_transfer_complete(xfer); 2274 return; 2275 } 2276 2277 /* 2278 * The transfer was already completed when we stopped the 2279 * endpoint, no need to move the dequeue pointer past its 2280 * TRBs. 2281 */ 2282 if (xp->aborted_xfer == NULL) { 2283 DPRINTF(("%s: done before stopping the endpoint\n", __func__)); 2284 xp->halted = 0; 2285 return; 2286 } 2287 2288 /* 2289 * At this stage the endpoint has been stopped, so update its 2290 * dequeue pointer past the last TRB of the transfer. 2291 * 2292 * Note: This assumes that only one transfer per endpoint has 2293 * pending TRBs on the ring. 2294 */ 2295 xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci, 2296 DEQPTR(xp->ring) | xp->ring.toggle); 2297 error = tsleep_nsec(xp, PZERO, "xhciab", XHCI_CMD_TIMEOUT); 2298 if (error) 2299 printf("%s: timeout aborting transfer\n", DEVNAME(sc)); 2300 } 2301 2302 void 2303 xhci_timeout(void *addr) 2304 { 2305 struct usbd_xfer *xfer = addr; 2306 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2307 2308 if (sc->sc_bus.dying) { 2309 xhci_timeout_task(addr); 2310 return; 2311 } 2312 2313 usb_init_task(&xfer->abort_task, xhci_timeout_task, addr, 2314 USB_TASK_TYPE_ABORT); 2315 usb_add_task(xfer->device, &xfer->abort_task); 2316 } 2317 2318 void 2319 xhci_timeout_task(void *addr) 2320 { 2321 struct usbd_xfer *xfer = addr; 2322 int s; 2323 2324 s = splusb(); 2325 xhci_abort_xfer(xfer, USBD_TIMEOUT); 2326 splx(s); 2327 } 2328 2329 usbd_status 2330 xhci_root_ctrl_transfer(struct usbd_xfer *xfer) 2331 { 2332 usbd_status err; 2333 2334 err = usb_insert_transfer(xfer); 2335 if (err) 2336 return (err); 2337 2338 return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2339 } 2340 2341 usbd_status 2342 xhci_root_ctrl_start(struct usbd_xfer *xfer) 2343 { 2344 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2345 usb_port_status_t ps; 2346 usb_device_request_t *req; 2347 void *buf = NULL; 2348 usb_hub_descriptor_t hubd; 2349 usbd_status err; 2350 int s, len, value, index; 2351 int l, totlen = 0; 2352 int port, i; 2353 uint32_t v; 2354 2355 KASSERT(xfer->rqflags & URQ_REQUEST); 2356 2357 if (sc->sc_bus.dying) 2358 return (USBD_IOERROR); 2359 2360 req = &xfer->request; 2361 2362 DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__, 2363 req->bmRequestType, req->bRequest)); 2364 2365 len = UGETW(req->wLength); 2366 value = UGETW(req->wValue); 2367 index = UGETW(req->wIndex); 2368 2369 if (len != 0) 2370 buf = KERNADDR(&xfer->dmabuf, 0); 2371 2372 #define C(x,y) ((x) | ((y) << 8)) 2373 switch(C(req->bRequest, req->bmRequestType)) { 2374 case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE): 2375 case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE): 2376 case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT): 2377 /* 2378 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops 2379 * for the integrated root hub. 2380 */ 2381 break; 2382 case C(UR_GET_CONFIG, UT_READ_DEVICE): 2383 if (len > 0) { 2384 *(uint8_t *)buf = sc->sc_conf; 2385 totlen = 1; 2386 } 2387 break; 2388 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 2389 DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value)); 2390 switch(value >> 8) { 2391 case UDESC_DEVICE: 2392 if ((value & 0xff) != 0) { 2393 err = USBD_IOERROR; 2394 goto ret; 2395 } 2396 totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE); 2397 USETW(xhci_devd.idVendor, sc->sc_id_vendor); 2398 memcpy(buf, &xhci_devd, l); 2399 break; 2400 /* 2401 * We can't really operate at another speed, but the spec says 2402 * we need this descriptor. 2403 */ 2404 case UDESC_OTHER_SPEED_CONFIGURATION: 2405 case UDESC_CONFIG: 2406 if ((value & 0xff) != 0) { 2407 err = USBD_IOERROR; 2408 goto ret; 2409 } 2410 totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE); 2411 memcpy(buf, &xhci_confd, l); 2412 ((usb_config_descriptor_t *)buf)->bDescriptorType = 2413 value >> 8; 2414 buf = (char *)buf + l; 2415 len -= l; 2416 l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE); 2417 totlen += l; 2418 memcpy(buf, &xhci_ifcd, l); 2419 buf = (char *)buf + l; 2420 len -= l; 2421 l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE); 2422 totlen += l; 2423 memcpy(buf, &xhci_endpd, l); 2424 break; 2425 case UDESC_STRING: 2426 if (len == 0) 2427 break; 2428 *(u_int8_t *)buf = 0; 2429 totlen = 1; 2430 switch (value & 0xff) { 2431 case 0: /* Language table */ 2432 totlen = usbd_str(buf, len, "\001"); 2433 break; 2434 case 1: /* Vendor */ 2435 totlen = usbd_str(buf, len, sc->sc_vendor); 2436 break; 2437 case 2: /* Product */ 2438 totlen = usbd_str(buf, len, "xHCI root hub"); 2439 break; 2440 } 2441 break; 2442 default: 2443 err = USBD_IOERROR; 2444 goto ret; 2445 } 2446 break; 2447 case C(UR_GET_INTERFACE, UT_READ_INTERFACE): 2448 if (len > 0) { 2449 *(uint8_t *)buf = 0; 2450 totlen = 1; 2451 } 2452 break; 2453 case C(UR_GET_STATUS, UT_READ_DEVICE): 2454 if (len > 1) { 2455 USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED); 2456 totlen = 2; 2457 } 2458 break; 2459 case C(UR_GET_STATUS, UT_READ_INTERFACE): 2460 case C(UR_GET_STATUS, UT_READ_ENDPOINT): 2461 if (len > 1) { 2462 USETW(((usb_status_t *)buf)->wStatus, 0); 2463 totlen = 2; 2464 } 2465 break; 2466 case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): 2467 if (value >= USB_MAX_DEVICES) { 2468 err = USBD_IOERROR; 2469 goto ret; 2470 } 2471 break; 2472 case C(UR_SET_CONFIG, UT_WRITE_DEVICE): 2473 if (value != 0 && value != 1) { 2474 err = USBD_IOERROR; 2475 goto ret; 2476 } 2477 sc->sc_conf = value; 2478 break; 2479 case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE): 2480 break; 2481 case C(UR_SET_FEATURE, UT_WRITE_DEVICE): 2482 case C(UR_SET_FEATURE, UT_WRITE_INTERFACE): 2483 case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT): 2484 err = USBD_IOERROR; 2485 goto ret; 2486 case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE): 2487 break; 2488 case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT): 2489 break; 2490 /* Hub requests */ 2491 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 2492 break; 2493 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): 2494 DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE " 2495 "port=%d feature=%d\n", index, value)); 2496 if (index < 1 || index > sc->sc_noport) { 2497 err = USBD_IOERROR; 2498 goto ret; 2499 } 2500 port = XHCI_PORTSC(index); 2501 v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR; 2502 switch (value) { 2503 case UHF_PORT_ENABLE: 2504 XOWRITE4(sc, port, v | XHCI_PS_PED); 2505 break; 2506 case UHF_PORT_SUSPEND: 2507 /* TODO */ 2508 break; 2509 case UHF_PORT_POWER: 2510 XOWRITE4(sc, port, v & ~XHCI_PS_PP); 2511 break; 2512 case UHF_PORT_INDICATOR: 2513 XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3)); 2514 break; 2515 case UHF_C_PORT_CONNECTION: 2516 XOWRITE4(sc, port, v | XHCI_PS_CSC); 2517 break; 2518 case UHF_C_PORT_ENABLE: 2519 XOWRITE4(sc, port, v | XHCI_PS_PEC); 2520 break; 2521 case UHF_C_PORT_SUSPEND: 2522 case UHF_C_PORT_LINK_STATE: 2523 XOWRITE4(sc, port, v | XHCI_PS_PLC); 2524 break; 2525 case UHF_C_PORT_OVER_CURRENT: 2526 XOWRITE4(sc, port, v | XHCI_PS_OCC); 2527 break; 2528 case UHF_C_PORT_RESET: 2529 XOWRITE4(sc, port, v | XHCI_PS_PRC); 2530 break; 2531 case UHF_C_BH_PORT_RESET: 2532 XOWRITE4(sc, port, v | XHCI_PS_WRC); 2533 break; 2534 default: 2535 err = USBD_IOERROR; 2536 goto ret; 2537 } 2538 break; 2539 2540 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 2541 if (len == 0) 2542 break; 2543 if ((value & 0xff) != 0) { 2544 err = USBD_IOERROR; 2545 goto ret; 2546 } 2547 v = XREAD4(sc, XHCI_HCCPARAMS); 2548 hubd = xhci_hubd; 2549 hubd.bNbrPorts = sc->sc_noport; 2550 USETW(hubd.wHubCharacteristics, 2551 (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) | 2552 (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0)); 2553 hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */ 2554 for (i = 1; i <= sc->sc_noport; i++) { 2555 v = XOREAD4(sc, XHCI_PORTSC(i)); 2556 if (v & XHCI_PS_DR) 2557 hubd.DeviceRemovable[i / 8] |= 1U << (i % 8); 2558 } 2559 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i; 2560 l = min(len, hubd.bDescLength); 2561 totlen = l; 2562 memcpy(buf, &hubd, l); 2563 break; 2564 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 2565 if (len != 16) { 2566 err = USBD_IOERROR; 2567 goto ret; 2568 } 2569 memset(buf, 0, len); 2570 totlen = len; 2571 break; 2572 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): 2573 DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n", 2574 index)); 2575 if (index < 1 || index > sc->sc_noport) { 2576 err = USBD_IOERROR; 2577 goto ret; 2578 } 2579 if (len != 4) { 2580 err = USBD_IOERROR; 2581 goto ret; 2582 } 2583 v = XOREAD4(sc, XHCI_PORTSC(index)); 2584 DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v)); 2585 i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v)); 2586 switch (XHCI_PS_SPEED(v)) { 2587 case XHCI_SPEED_FULL: 2588 i |= UPS_FULL_SPEED; 2589 break; 2590 case XHCI_SPEED_LOW: 2591 i |= UPS_LOW_SPEED; 2592 break; 2593 case XHCI_SPEED_HIGH: 2594 i |= UPS_HIGH_SPEED; 2595 break; 2596 case XHCI_SPEED_SUPER: 2597 default: 2598 break; 2599 } 2600 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; 2601 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; 2602 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; 2603 if (v & XHCI_PS_PR) i |= UPS_RESET; 2604 if (v & XHCI_PS_PP) { 2605 if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL && 2606 XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH) 2607 i |= UPS_PORT_POWER; 2608 else 2609 i |= UPS_PORT_POWER_SS; 2610 } 2611 USETW(ps.wPortStatus, i); 2612 i = 0; 2613 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; 2614 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; 2615 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; 2616 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; 2617 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; 2618 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; 2619 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; 2620 USETW(ps.wPortChange, i); 2621 l = min(len, sizeof ps); 2622 memcpy(buf, &ps, l); 2623 totlen = l; 2624 break; 2625 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 2626 err = USBD_IOERROR; 2627 goto ret; 2628 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 2629 break; 2630 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): 2631 2632 i = index >> 8; 2633 index &= 0x00ff; 2634 2635 if (index < 1 || index > sc->sc_noport) { 2636 err = USBD_IOERROR; 2637 goto ret; 2638 } 2639 port = XHCI_PORTSC(index); 2640 v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR; 2641 2642 switch (value) { 2643 case UHF_PORT_ENABLE: 2644 XOWRITE4(sc, port, v | XHCI_PS_PED); 2645 break; 2646 case UHF_PORT_SUSPEND: 2647 DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i)); 2648 if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) { 2649 err = USBD_IOERROR; 2650 goto ret; 2651 } 2652 XOWRITE4(sc, port, v | 2653 XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS); 2654 break; 2655 case UHF_PORT_RESET: 2656 DPRINTFN(6, ("reset port %d\n", index)); 2657 XOWRITE4(sc, port, v | XHCI_PS_PR); 2658 break; 2659 case UHF_PORT_POWER: 2660 DPRINTFN(3, ("set port power %d\n", index)); 2661 XOWRITE4(sc, port, v | XHCI_PS_PP); 2662 break; 2663 case UHF_PORT_INDICATOR: 2664 DPRINTFN(3, ("set port indicator %d\n", index)); 2665 2666 v &= ~XHCI_PS_SET_PIC(3); 2667 v |= XHCI_PS_SET_PIC(1); 2668 2669 XOWRITE4(sc, port, v); 2670 break; 2671 case UHF_C_PORT_RESET: 2672 XOWRITE4(sc, port, v | XHCI_PS_PRC); 2673 break; 2674 case UHF_C_BH_PORT_RESET: 2675 XOWRITE4(sc, port, v | XHCI_PS_WRC); 2676 break; 2677 default: 2678 err = USBD_IOERROR; 2679 goto ret; 2680 } 2681 break; 2682 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): 2683 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): 2684 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): 2685 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): 2686 break; 2687 default: 2688 err = USBD_IOERROR; 2689 goto ret; 2690 } 2691 xfer->actlen = totlen; 2692 err = USBD_NORMAL_COMPLETION; 2693 ret: 2694 xfer->status = err; 2695 s = splusb(); 2696 usb_transfer_complete(xfer); 2697 splx(s); 2698 return (err); 2699 } 2700 2701 2702 void 2703 xhci_noop(struct usbd_xfer *xfer) 2704 { 2705 } 2706 2707 2708 usbd_status 2709 xhci_root_intr_transfer(struct usbd_xfer *xfer) 2710 { 2711 usbd_status err; 2712 2713 err = usb_insert_transfer(xfer); 2714 if (err) 2715 return (err); 2716 2717 return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2718 } 2719 2720 usbd_status 2721 xhci_root_intr_start(struct usbd_xfer *xfer) 2722 { 2723 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2724 2725 if (sc->sc_bus.dying) 2726 return (USBD_IOERROR); 2727 2728 sc->sc_intrxfer = xfer; 2729 2730 return (USBD_IN_PROGRESS); 2731 } 2732 2733 void 2734 xhci_root_intr_abort(struct usbd_xfer *xfer) 2735 { 2736 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2737 int s; 2738 2739 sc->sc_intrxfer = NULL; 2740 2741 xfer->status = USBD_CANCELLED; 2742 s = splusb(); 2743 usb_transfer_complete(xfer); 2744 splx(s); 2745 } 2746 2747 void 2748 xhci_root_intr_done(struct usbd_xfer *xfer) 2749 { 2750 } 2751 2752 /* 2753 * Number of packets remaining in the TD after the corresponding TRB. 2754 * 2755 * Section 4.11.2.4 of xHCI specification r1.1. 2756 */ 2757 static inline uint32_t 2758 xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len) 2759 { 2760 uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize); 2761 2762 if (len == 0) 2763 return XHCI_TRB_TDREM(0); 2764 2765 npkt = howmany(remain - len, UE_GET_SIZE(mps)); 2766 if (npkt > 31) 2767 npkt = 31; 2768 2769 return XHCI_TRB_TDREM(npkt); 2770 } 2771 2772 /* 2773 * Transfer Burst Count (TBC) and Transfer Last Burst Packet Count (TLBPC). 2774 * 2775 * Section 4.11.2.3 of xHCI specification r1.1. 2776 */ 2777 static inline uint32_t 2778 xhci_xfer_tbc(struct usbd_xfer *xfer, uint32_t len, uint32_t *tlbpc) 2779 { 2780 uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize); 2781 uint32_t maxb, tdpc, residue, tbc; 2782 2783 /* Transfer Descriptor Packet Count, section 4.14.1. */ 2784 tdpc = howmany(len, UE_GET_SIZE(mps)); 2785 if (tdpc == 0) 2786 tdpc = 1; 2787 2788 /* Transfer Burst Count */ 2789 maxb = xhci_pipe_maxburst(xfer->pipe); 2790 tbc = howmany(tdpc, maxb + 1) - 1; 2791 2792 /* Transfer Last Burst Packet Count */ 2793 if (xfer->device->speed == USB_SPEED_SUPER) { 2794 residue = tdpc % (maxb + 1); 2795 if (residue == 0) 2796 *tlbpc = maxb; 2797 else 2798 *tlbpc = residue - 1; 2799 } else { 2800 *tlbpc = tdpc - 1; 2801 } 2802 2803 return (tbc); 2804 } 2805 2806 usbd_status 2807 xhci_device_ctrl_transfer(struct usbd_xfer *xfer) 2808 { 2809 usbd_status err; 2810 2811 err = usb_insert_transfer(xfer); 2812 if (err) 2813 return (err); 2814 2815 return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2816 } 2817 2818 usbd_status 2819 xhci_device_ctrl_start(struct usbd_xfer *xfer) 2820 { 2821 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2822 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 2823 struct xhci_trb *trb0, *trb; 2824 uint32_t flags, len = UGETW(xfer->request.wLength); 2825 uint8_t toggle; 2826 int s; 2827 2828 KASSERT(xfer->rqflags & URQ_REQUEST); 2829 2830 if (sc->sc_bus.dying || xp->halted) 2831 return (USBD_IOERROR); 2832 2833 if (xp->free_trbs < 3) 2834 return (USBD_NOMEM); 2835 2836 if (len != 0) 2837 usb_syncmem(&xfer->dmabuf, 0, len, 2838 usbd_xfer_isread(xfer) ? 2839 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2840 2841 /* We'll toggle the setup TRB once we're finished with the stages. */ 2842 trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, 0); 2843 2844 flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | (toggle ^ 1); 2845 if (len != 0) { 2846 if (usbd_xfer_isread(xfer)) 2847 flags |= XHCI_TRB_TRT_IN; 2848 else 2849 flags |= XHCI_TRB_TRT_OUT; 2850 } 2851 2852 memcpy(&trb0->trb_paddr, &xfer->request, sizeof(trb0->trb_paddr)); 2853 trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8)); 2854 trb0->trb_flags = htole32(flags); 2855 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2856 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 2857 BUS_DMASYNC_PREWRITE); 2858 2859 /* Data TRB */ 2860 if (len != 0) { 2861 trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0); 2862 2863 flags = XHCI_TRB_TYPE_DATA | toggle; 2864 if (usbd_xfer_isread(xfer)) 2865 flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP; 2866 2867 trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0)); 2868 trb->trb_status = htole32( 2869 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 2870 xhci_xfer_tdsize(xfer, len, len) 2871 ); 2872 trb->trb_flags = htole32(flags); 2873 2874 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2875 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 2876 BUS_DMASYNC_PREWRITE); 2877 } 2878 2879 /* Status TRB */ 2880 trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1); 2881 2882 flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle; 2883 if (len == 0 || !usbd_xfer_isread(xfer)) 2884 flags |= XHCI_TRB_DIR_IN; 2885 2886 trb->trb_paddr = 0; 2887 trb->trb_status = htole32(XHCI_TRB_INTR(0)); 2888 trb->trb_flags = htole32(flags); 2889 2890 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2891 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 2892 BUS_DMASYNC_PREWRITE); 2893 2894 /* Setup TRB */ 2895 trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE); 2896 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2897 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 2898 BUS_DMASYNC_PREWRITE); 2899 2900 s = splusb(); 2901 XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci); 2902 2903 xfer->status = USBD_IN_PROGRESS; 2904 if (xfer->timeout && !sc->sc_bus.use_polling) { 2905 timeout_del(&xfer->timeout_handle); 2906 timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); 2907 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 2908 } 2909 splx(s); 2910 2911 return (USBD_IN_PROGRESS); 2912 } 2913 2914 void 2915 xhci_device_ctrl_abort(struct usbd_xfer *xfer) 2916 { 2917 xhci_abort_xfer(xfer, USBD_CANCELLED); 2918 } 2919 2920 usbd_status 2921 xhci_device_generic_transfer(struct usbd_xfer *xfer) 2922 { 2923 usbd_status err; 2924 2925 err = usb_insert_transfer(xfer); 2926 if (err) 2927 return (err); 2928 2929 return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2930 } 2931 2932 usbd_status 2933 xhci_device_generic_start(struct usbd_xfer *xfer) 2934 { 2935 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2936 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 2937 struct xhci_trb *trb0, *trb; 2938 uint32_t len, remain, flags; 2939 uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize); 2940 uint64_t paddr = DMAADDR(&xfer->dmabuf, 0); 2941 uint8_t toggle; 2942 int s, i, ntrb, zerotd = 0; 2943 2944 KASSERT(!(xfer->rqflags & URQ_REQUEST)); 2945 2946 if (sc->sc_bus.dying || xp->halted) 2947 return (USBD_IOERROR); 2948 2949 /* How many TRBs do we need for this transfer? */ 2950 ntrb = howmany(xfer->length, XHCI_TRB_MAXSIZE); 2951 2952 /* If the buffer crosses a 64k boundary, we need one more. */ 2953 len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1)); 2954 if (len < xfer->length) 2955 ntrb = howmany(xfer->length - len, XHCI_TRB_MAXSIZE) + 1; 2956 else 2957 len = xfer->length; 2958 2959 /* If we need to append a zero length packet, we need one more. */ 2960 if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) && 2961 (xfer->length % UE_GET_SIZE(mps) == 0)) 2962 zerotd = 1; 2963 2964 if (xp->free_trbs < (ntrb + zerotd)) 2965 return (USBD_NOMEM); 2966 2967 usb_syncmem(&xfer->dmabuf, 0, xfer->length, 2968 usbd_xfer_isread(xfer) ? 2969 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2970 2971 /* We'll toggle the first TRB once we're finished with the chain. */ 2972 trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1)); 2973 flags = XHCI_TRB_TYPE_NORMAL | (toggle ^ 1); 2974 if (usbd_xfer_isread(xfer)) 2975 flags |= XHCI_TRB_ISP; 2976 flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 2977 2978 trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0)); 2979 trb0->trb_status = htole32( 2980 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 2981 xhci_xfer_tdsize(xfer, xfer->length, len) 2982 ); 2983 trb0->trb_flags = htole32(flags); 2984 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2985 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 2986 BUS_DMASYNC_PREWRITE); 2987 2988 remain = xfer->length - len; 2989 paddr += len; 2990 2991 /* Chain more TRBs if needed. */ 2992 for (i = ntrb - 1; i > 0; i--) { 2993 len = min(remain, XHCI_TRB_MAXSIZE); 2994 2995 /* Next (or Last) TRB. */ 2996 trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1)); 2997 flags = XHCI_TRB_TYPE_NORMAL | toggle; 2998 if (usbd_xfer_isread(xfer)) 2999 flags |= XHCI_TRB_ISP; 3000 flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 3001 3002 trb->trb_paddr = htole64(paddr); 3003 trb->trb_status = htole32( 3004 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3005 xhci_xfer_tdsize(xfer, remain, len) 3006 ); 3007 trb->trb_flags = htole32(flags); 3008 3009 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3010 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3011 BUS_DMASYNC_PREWRITE); 3012 3013 remain -= len; 3014 paddr += len; 3015 } 3016 3017 /* Do we need to issue a zero length transfer? */ 3018 if (zerotd == 1) { 3019 trb = xhci_xfer_get_trb(sc, xfer, &toggle, -1); 3020 trb->trb_paddr = 0; 3021 trb->trb_status = 0; 3022 trb->trb_flags = htole32(XHCI_TRB_TYPE_NORMAL | XHCI_TRB_IOC | toggle); 3023 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3024 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3025 BUS_DMASYNC_PREWRITE); 3026 } 3027 3028 /* First TRB. */ 3029 trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE); 3030 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3031 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 3032 BUS_DMASYNC_PREWRITE); 3033 3034 s = splusb(); 3035 XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci); 3036 3037 xfer->status = USBD_IN_PROGRESS; 3038 if (xfer->timeout && !sc->sc_bus.use_polling) { 3039 timeout_del(&xfer->timeout_handle); 3040 timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); 3041 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 3042 } 3043 splx(s); 3044 3045 return (USBD_IN_PROGRESS); 3046 } 3047 3048 void 3049 xhci_device_generic_done(struct usbd_xfer *xfer) 3050 { 3051 /* Only happens with interrupt transfers. */ 3052 if (xfer->pipe->repeat) { 3053 xfer->actlen = 0; 3054 xhci_device_generic_start(xfer); 3055 } 3056 } 3057 3058 void 3059 xhci_device_generic_abort(struct usbd_xfer *xfer) 3060 { 3061 KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer); 3062 3063 xhci_abort_xfer(xfer, USBD_CANCELLED); 3064 } 3065 3066 usbd_status 3067 xhci_device_isoc_transfer(struct usbd_xfer *xfer) 3068 { 3069 usbd_status err; 3070 3071 err = usb_insert_transfer(xfer); 3072 if (err && err != USBD_IN_PROGRESS) 3073 return (err); 3074 3075 return (xhci_device_isoc_start(xfer)); 3076 } 3077 3078 usbd_status 3079 xhci_device_isoc_start(struct usbd_xfer *xfer) 3080 { 3081 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 3082 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 3083 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 3084 struct xhci_trb *trb0, *trb; 3085 uint32_t len, remain, flags; 3086 uint64_t paddr; 3087 uint32_t tbc, tlbpc; 3088 int s, i, j, ntrb = xfer->nframes; 3089 uint8_t toggle; 3090 3091 KASSERT(!(xfer->rqflags & URQ_REQUEST)); 3092 3093 if (sc->sc_bus.dying || xp->halted) 3094 return (USBD_IOERROR); 3095 3096 /* Why would you do that anyway? */ 3097 if (sc->sc_bus.use_polling) 3098 return (USBD_INVAL); 3099 3100 /* 3101 * To allow continuous transfers, above we start all transfers 3102 * immediately. However, we're still going to get usbd_start_next call 3103 * this when another xfer completes. So, check if this is already 3104 * in progress or not 3105 */ 3106 if (xx->ntrb > 0) 3107 return (USBD_IN_PROGRESS); 3108 3109 paddr = DMAADDR(&xfer->dmabuf, 0); 3110 3111 /* How many TRBs do for all Transfers? */ 3112 for (i = 0, ntrb = 0; i < xfer->nframes; i++) { 3113 /* How many TRBs do we need for this transfer? */ 3114 ntrb += howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE); 3115 3116 /* If the buffer crosses a 64k boundary, we need one more. */ 3117 len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1)); 3118 if (len < xfer->frlengths[i]) 3119 ntrb++; 3120 3121 paddr += xfer->frlengths[i]; 3122 } 3123 3124 if (xp->free_trbs < ntrb) 3125 return (USBD_NOMEM); 3126 3127 usb_syncmem(&xfer->dmabuf, 0, xfer->length, 3128 usbd_xfer_isread(xfer) ? 3129 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 3130 3131 paddr = DMAADDR(&xfer->dmabuf, 0); 3132 3133 for (i = 0, trb0 = NULL; i < xfer->nframes; i++) { 3134 /* How many TRBs do we need for this transfer? */ 3135 ntrb = howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE); 3136 3137 /* If the buffer crosses a 64k boundary, we need one more. */ 3138 len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1)); 3139 if (len < xfer->frlengths[i]) 3140 ntrb++; 3141 else 3142 len = xfer->frlengths[i]; 3143 3144 KASSERT(ntrb < 3); 3145 3146 /* 3147 * We'll commit the first TRB once we're finished with the 3148 * chain. 3149 */ 3150 trb = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1)); 3151 3152 DPRINTFN(4, ("%s:%d: ring %p trb0_idx %lu ntrb %d paddr %llx " 3153 "len %u\n", __func__, __LINE__, 3154 &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, paddr, 3155 len)); 3156 3157 /* Record the first TRB so we can toggle later. */ 3158 if (trb0 == NULL) { 3159 trb0 = trb; 3160 toggle ^= 1; 3161 } 3162 3163 flags = XHCI_TRB_TYPE_ISOCH | XHCI_TRB_SIA | toggle; 3164 if (usbd_xfer_isread(xfer)) 3165 flags |= XHCI_TRB_ISP; 3166 flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 3167 3168 tbc = xhci_xfer_tbc(xfer, xfer->frlengths[i], &tlbpc); 3169 flags |= XHCI_TRB_ISOC_TBC(tbc) | XHCI_TRB_ISOC_TLBPC(tlbpc); 3170 3171 trb->trb_paddr = htole64(paddr); 3172 trb->trb_status = htole32( 3173 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3174 xhci_xfer_tdsize(xfer, xfer->frlengths[i], len) 3175 ); 3176 trb->trb_flags = htole32(flags); 3177 3178 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3179 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3180 BUS_DMASYNC_PREWRITE); 3181 3182 remain = xfer->frlengths[i] - len; 3183 paddr += len; 3184 3185 /* Chain more TRBs if needed. */ 3186 for (j = ntrb - 1; j > 0; j--) { 3187 len = min(remain, XHCI_TRB_MAXSIZE); 3188 3189 /* Next (or Last) TRB. */ 3190 trb = xhci_xfer_get_trb(sc, xfer, &toggle, (j == 1)); 3191 flags = XHCI_TRB_TYPE_NORMAL | toggle; 3192 if (usbd_xfer_isread(xfer)) 3193 flags |= XHCI_TRB_ISP; 3194 flags |= (j == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 3195 DPRINTFN(3, ("%s:%d: ring %p trb0_idx %lu ntrb %d " 3196 "paddr %llx len %u\n", __func__, __LINE__, 3197 &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, 3198 paddr, len)); 3199 3200 trb->trb_paddr = htole64(paddr); 3201 trb->trb_status = htole32( 3202 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3203 xhci_xfer_tdsize(xfer, remain, len) 3204 ); 3205 trb->trb_flags = htole32(flags); 3206 3207 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3208 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3209 BUS_DMASYNC_PREWRITE); 3210 3211 remain -= len; 3212 paddr += len; 3213 } 3214 3215 xfer->frlengths[i] = 0; 3216 } 3217 3218 /* First TRB. */ 3219 trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE); 3220 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3221 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 3222 BUS_DMASYNC_PREWRITE); 3223 3224 s = splusb(); 3225 XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci); 3226 3227 xfer->status = USBD_IN_PROGRESS; 3228 3229 if (xfer->timeout) { 3230 timeout_del(&xfer->timeout_handle); 3231 timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); 3232 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 3233 } 3234 splx(s); 3235 3236 return (USBD_IN_PROGRESS); 3237 } 3238