1 /* $OpenBSD: xhci.c,v 1.119 2020/07/31 19:27:57 mglocker Exp $ */ 2 3 /* 4 * Copyright (c) 2014-2015 Martin Pieuchot 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/kernel.h> 22 #include <sys/malloc.h> 23 #include <sys/device.h> 24 #include <sys/queue.h> 25 #include <sys/timeout.h> 26 #include <sys/pool.h> 27 #include <sys/endian.h> 28 #include <sys/rwlock.h> 29 30 #include <machine/bus.h> 31 32 #include <dev/usb/usb.h> 33 #include <dev/usb/usbdi.h> 34 #include <dev/usb/usbdivar.h> 35 #include <dev/usb/usb_mem.h> 36 37 #include <dev/usb/xhcireg.h> 38 #include <dev/usb/xhcivar.h> 39 40 struct cfdriver xhci_cd = { 41 NULL, "xhci", DV_DULL 42 }; 43 44 #ifdef XHCI_DEBUG 45 #define DPRINTF(x) do { if (xhcidebug) printf x; } while(0) 46 #define DPRINTFN(n,x) do { if (xhcidebug>(n)) printf x; } while (0) 47 int xhcidebug = 3; 48 #else 49 #define DPRINTF(x) 50 #define DPRINTFN(n,x) 51 #endif 52 53 #define DEVNAME(sc) ((sc)->sc_bus.bdev.dv_xname) 54 55 #define TRBOFF(r, trb) ((char *)(trb) - (char *)((r)->trbs)) 56 #define DEQPTR(r) ((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index)) 57 58 struct pool *xhcixfer; 59 60 struct xhci_pipe { 61 struct usbd_pipe pipe; 62 63 uint8_t dci; 64 uint8_t slot; /* Device slot ID */ 65 struct xhci_ring ring; 66 67 /* 68 * XXX used to pass the xfer pointer back to the 69 * interrupt routine, better way? 70 */ 71 struct usbd_xfer *pending_xfers[XHCI_MAX_XFER]; 72 struct usbd_xfer *aborted_xfer; 73 int halted; 74 size_t free_trbs; 75 int skip; 76 #define TRB_PROCESSED_NO 0 77 #define TRB_PROCESSED_YES 1 78 #define TRB_PROCESSED_SHORT 2 79 uint8_t trb_processed[XHCI_MAX_XFER]; 80 }; 81 82 int xhci_reset(struct xhci_softc *); 83 int xhci_intr1(struct xhci_softc *); 84 void xhci_event_dequeue(struct xhci_softc *); 85 void xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t); 86 int xhci_event_xfer_generic(struct xhci_softc *, struct usbd_xfer *, 87 struct xhci_pipe *, uint32_t, int, uint8_t, uint8_t, uint8_t); 88 int xhci_event_xfer_isoc(struct usbd_xfer *, struct xhci_pipe *, 89 uint32_t, int, uint8_t); 90 void xhci_event_command(struct xhci_softc *, uint64_t); 91 void xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t); 92 int xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *); 93 int xhci_context_setup(struct xhci_softc *, struct usbd_pipe *); 94 int xhci_scratchpad_alloc(struct xhci_softc *, int); 95 void xhci_scratchpad_free(struct xhci_softc *); 96 int xhci_softdev_alloc(struct xhci_softc *, uint8_t); 97 void xhci_softdev_free(struct xhci_softc *, uint8_t); 98 int xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t, 99 size_t); 100 void xhci_ring_free(struct xhci_softc *, struct xhci_ring *); 101 void xhci_ring_reset(struct xhci_softc *, struct xhci_ring *); 102 struct xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *); 103 struct xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *); 104 105 struct xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*, 106 uint8_t *, int); 107 void xhci_xfer_done(struct usbd_xfer *xfer); 108 /* xHCI command helpers. */ 109 int xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int); 110 int xhci_command_abort(struct xhci_softc *); 111 112 void xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t); 113 void xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t); 114 int xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t); 115 int xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t); 116 int xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int); 117 int xhci_cmd_set_address(struct xhci_softc *, uint8_t, uint64_t, uint32_t); 118 int xhci_cmd_evaluate_ctx(struct xhci_softc *, uint8_t, uint64_t); 119 #ifdef XHCI_DEBUG 120 int xhci_cmd_noop(struct xhci_softc *); 121 #endif 122 123 /* XXX should be part of the Bus interface. */ 124 void xhci_abort_xfer(struct usbd_xfer *, usbd_status); 125 void xhci_pipe_close(struct usbd_pipe *); 126 void xhci_noop(struct usbd_xfer *); 127 128 void xhci_timeout(void *); 129 void xhci_timeout_task(void *); 130 131 /* USBD Bus Interface. */ 132 usbd_status xhci_pipe_open(struct usbd_pipe *); 133 int xhci_setaddr(struct usbd_device *, int); 134 void xhci_softintr(void *); 135 void xhci_poll(struct usbd_bus *); 136 struct usbd_xfer *xhci_allocx(struct usbd_bus *); 137 void xhci_freex(struct usbd_bus *, struct usbd_xfer *); 138 139 usbd_status xhci_root_ctrl_transfer(struct usbd_xfer *); 140 usbd_status xhci_root_ctrl_start(struct usbd_xfer *); 141 142 usbd_status xhci_root_intr_transfer(struct usbd_xfer *); 143 usbd_status xhci_root_intr_start(struct usbd_xfer *); 144 void xhci_root_intr_abort(struct usbd_xfer *); 145 void xhci_root_intr_done(struct usbd_xfer *); 146 147 usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); 148 usbd_status xhci_device_ctrl_start(struct usbd_xfer *); 149 void xhci_device_ctrl_abort(struct usbd_xfer *); 150 151 usbd_status xhci_device_generic_transfer(struct usbd_xfer *); 152 usbd_status xhci_device_generic_start(struct usbd_xfer *); 153 void xhci_device_generic_abort(struct usbd_xfer *); 154 void xhci_device_generic_done(struct usbd_xfer *); 155 156 usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); 157 usbd_status xhci_device_isoc_start(struct usbd_xfer *); 158 159 #define XHCI_INTR_ENDPT 1 160 161 struct usbd_bus_methods xhci_bus_methods = { 162 .open_pipe = xhci_pipe_open, 163 .dev_setaddr = xhci_setaddr, 164 .soft_intr = xhci_softintr, 165 .do_poll = xhci_poll, 166 .allocx = xhci_allocx, 167 .freex = xhci_freex, 168 }; 169 170 struct usbd_pipe_methods xhci_root_ctrl_methods = { 171 .transfer = xhci_root_ctrl_transfer, 172 .start = xhci_root_ctrl_start, 173 .abort = xhci_noop, 174 .close = xhci_pipe_close, 175 .done = xhci_noop, 176 }; 177 178 struct usbd_pipe_methods xhci_root_intr_methods = { 179 .transfer = xhci_root_intr_transfer, 180 .start = xhci_root_intr_start, 181 .abort = xhci_root_intr_abort, 182 .close = xhci_pipe_close, 183 .done = xhci_root_intr_done, 184 }; 185 186 struct usbd_pipe_methods xhci_device_ctrl_methods = { 187 .transfer = xhci_device_ctrl_transfer, 188 .start = xhci_device_ctrl_start, 189 .abort = xhci_device_ctrl_abort, 190 .close = xhci_pipe_close, 191 .done = xhci_noop, 192 }; 193 194 struct usbd_pipe_methods xhci_device_intr_methods = { 195 .transfer = xhci_device_generic_transfer, 196 .start = xhci_device_generic_start, 197 .abort = xhci_device_generic_abort, 198 .close = xhci_pipe_close, 199 .done = xhci_device_generic_done, 200 }; 201 202 struct usbd_pipe_methods xhci_device_bulk_methods = { 203 .transfer = xhci_device_generic_transfer, 204 .start = xhci_device_generic_start, 205 .abort = xhci_device_generic_abort, 206 .close = xhci_pipe_close, 207 .done = xhci_device_generic_done, 208 }; 209 210 struct usbd_pipe_methods xhci_device_isoc_methods = { 211 .transfer = xhci_device_isoc_transfer, 212 .start = xhci_device_isoc_start, 213 .abort = xhci_device_generic_abort, 214 .close = xhci_pipe_close, 215 .done = xhci_noop, 216 }; 217 218 #ifdef XHCI_DEBUG 219 static void 220 xhci_dump_trb(struct xhci_trb *trb) 221 { 222 printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb, 223 (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status), 224 (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK); 225 } 226 #endif 227 228 int usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *, 229 void **, bus_size_t, bus_size_t, bus_size_t); 230 void usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *); 231 232 int 233 usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma, 234 void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary) 235 { 236 int error; 237 238 dma->tag = bus->dmatag; 239 dma->size = size; 240 241 error = bus_dmamap_create(dma->tag, size, 1, size, boundary, 242 BUS_DMA_NOWAIT, &dma->map); 243 if (error != 0) 244 return (error); 245 246 error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg, 247 1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 248 if (error != 0) 249 goto destroy; 250 251 error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr, 252 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 253 if (error != 0) 254 goto free; 255 256 error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size, 257 BUS_DMA_NOWAIT); 258 if (error != 0) 259 goto unmap; 260 261 bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREREAD | 262 BUS_DMASYNC_PREWRITE); 263 264 dma->paddr = dma->map->dm_segs[0].ds_addr; 265 if (kvap != NULL) 266 *kvap = dma->vaddr; 267 268 return (0); 269 270 unmap: 271 bus_dmamem_unmap(dma->tag, dma->vaddr, size); 272 free: 273 bus_dmamem_free(dma->tag, &dma->seg, 1); 274 destroy: 275 bus_dmamap_destroy(dma->tag, dma->map); 276 return (error); 277 } 278 279 void 280 usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma) 281 { 282 if (dma->map != NULL) { 283 bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size, 284 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 285 bus_dmamap_unload(bus->dmatag, dma->map); 286 bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size); 287 bus_dmamem_free(bus->dmatag, &dma->seg, 1); 288 bus_dmamap_destroy(bus->dmatag, dma->map); 289 dma->map = NULL; 290 } 291 } 292 293 int 294 xhci_init(struct xhci_softc *sc) 295 { 296 uint32_t hcr; 297 int npage, error; 298 299 sc->sc_bus.usbrev = USBREV_3_0; 300 sc->sc_bus.methods = &xhci_bus_methods; 301 sc->sc_bus.pipe_size = sizeof(struct xhci_pipe); 302 303 sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH); 304 sc->sc_door_off = XREAD4(sc, XHCI_DBOFF); 305 sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF); 306 307 sc->sc_version = XREAD2(sc, XHCI_HCIVERSION); 308 printf(", xHCI %x.%x\n", sc->sc_version >> 8, sc->sc_version & 0xff); 309 310 #ifdef XHCI_DEBUG 311 printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off); 312 printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off); 313 printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off); 314 #endif 315 316 error = xhci_reset(sc); 317 if (error) 318 return (error); 319 320 if (xhcixfer == NULL) { 321 xhcixfer = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT); 322 if (xhcixfer == NULL) { 323 printf("%s: unable to allocate pool descriptor\n", 324 DEVNAME(sc)); 325 return (ENOMEM); 326 } 327 pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB, 328 0, "xhcixfer", NULL); 329 } 330 331 hcr = XREAD4(sc, XHCI_HCCPARAMS); 332 sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32; 333 DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize)); 334 335 #ifdef XHCI_DEBUG 336 hcr = XOREAD4(sc, XHCI_PAGESIZE); 337 printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr); 338 #endif 339 /* Use 4K for the moment since it's easier. */ 340 sc->sc_pagesize = 4096; 341 342 /* Get port and device slot numbers. */ 343 hcr = XREAD4(sc, XHCI_HCSPARAMS1); 344 sc->sc_noport = XHCI_HCS1_N_PORTS(hcr); 345 sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr); 346 DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport, 347 sc->sc_noslot)); 348 349 /* Setup Device Context Base Address Array. */ 350 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma, 351 (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t), 352 XHCI_DCBAA_ALIGN, sc->sc_pagesize); 353 if (error) 354 return (ENOMEM); 355 356 /* Setup command ring. */ 357 rw_init(&sc->sc_cmd_lock, "xhcicmd"); 358 error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS, 359 XHCI_CMDS_RING_ALIGN); 360 if (error) { 361 printf("%s: could not allocate command ring.\n", DEVNAME(sc)); 362 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 363 return (error); 364 } 365 366 /* Setup one event ring and its segment table (ERST). */ 367 error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS, 368 XHCI_EVTS_RING_ALIGN); 369 if (error) { 370 printf("%s: could not allocate event ring.\n", DEVNAME(sc)); 371 xhci_ring_free(sc, &sc->sc_cmd_ring); 372 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 373 return (error); 374 } 375 376 /* Allocate the required entry for the segment table. */ 377 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma, 378 (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg), 379 XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY); 380 if (error) { 381 printf("%s: could not allocate segment table.\n", DEVNAME(sc)); 382 xhci_ring_free(sc, &sc->sc_evt_ring); 383 xhci_ring_free(sc, &sc->sc_cmd_ring); 384 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 385 return (ENOMEM); 386 } 387 388 /* Set our ring address and size in its corresponding segment. */ 389 sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr); 390 sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS); 391 sc->sc_erst.segs[0].er_rsvd = 0; 392 bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0, 393 sc->sc_erst.dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 394 395 /* Get the number of scratch pages and configure them if necessary. */ 396 hcr = XREAD4(sc, XHCI_HCSPARAMS2); 397 npage = XHCI_HCS2_SPB_MAX(hcr); 398 DPRINTF(("%s: %u scratch pages, ETE=%u, IST=0x%x\n", DEVNAME(sc), npage, 399 XHCI_HCS2_ETE(hcr), XHCI_HCS2_IST(hcr))); 400 401 if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) { 402 printf("%s: could not allocate scratchpad.\n", DEVNAME(sc)); 403 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma); 404 xhci_ring_free(sc, &sc->sc_evt_ring); 405 xhci_ring_free(sc, &sc->sc_cmd_ring); 406 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 407 return (ENOMEM); 408 } 409 410 411 return (0); 412 } 413 414 void 415 xhci_config(struct xhci_softc *sc) 416 { 417 uint64_t paddr; 418 uint32_t hcr; 419 420 /* Make sure to program a number of device slots we can handle. */ 421 if (sc->sc_noslot > USB_MAX_DEVICES) 422 sc->sc_noslot = USB_MAX_DEVICES; 423 hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK; 424 XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot); 425 426 /* Set the device context base array address. */ 427 paddr = (uint64_t)sc->sc_dcbaa.dma.paddr; 428 XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr); 429 XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32)); 430 431 DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc), 432 XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO))); 433 434 /* Set the command ring address. */ 435 paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr; 436 XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS); 437 XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32)); 438 439 DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc), 440 XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr)); 441 442 /* Set the ERST count number to 1, since we use only one event ring. */ 443 XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1)); 444 445 /* Set the segment table address. */ 446 paddr = (uint64_t)sc->sc_erst.dma.paddr; 447 XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr); 448 XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32)); 449 450 DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc), 451 XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0)))); 452 453 /* Set the ring dequeue address. */ 454 paddr = (uint64_t)sc->sc_evt_ring.dma.paddr; 455 XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr); 456 XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32)); 457 458 DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc), 459 XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0)))); 460 461 /* Enable interrupts. */ 462 hcr = XRREAD4(sc, XHCI_IMAN(0)); 463 XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA); 464 465 /* Set default interrupt moderation. */ 466 XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT); 467 468 /* Allow event interrupt and start the controller. */ 469 XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS); 470 471 DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD))); 472 DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0)))); 473 } 474 475 int 476 xhci_detach(struct device *self, int flags) 477 { 478 struct xhci_softc *sc = (struct xhci_softc *)self; 479 int rv; 480 481 rv = config_detach_children(self, flags); 482 if (rv != 0) { 483 printf("%s: error while detaching %d\n", DEVNAME(sc), rv); 484 return (rv); 485 } 486 487 /* Since the hardware might already be gone, ignore the errors. */ 488 xhci_command_abort(sc); 489 490 xhci_reset(sc); 491 492 /* Disable interrupts. */ 493 XRWRITE4(sc, XHCI_IMOD(0), 0); 494 XRWRITE4(sc, XHCI_IMAN(0), 0); 495 496 /* Clear the event ring address. */ 497 XRWRITE4(sc, XHCI_ERDP_LO(0), 0); 498 XRWRITE4(sc, XHCI_ERDP_HI(0), 0); 499 500 XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0); 501 XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0); 502 503 XRWRITE4(sc, XHCI_ERSTSZ(0), 0); 504 505 /* Clear the command ring address. */ 506 XOWRITE4(sc, XHCI_CRCR_LO, 0); 507 XOWRITE4(sc, XHCI_CRCR_HI, 0); 508 509 XOWRITE4(sc, XHCI_DCBAAP_LO, 0); 510 XOWRITE4(sc, XHCI_DCBAAP_HI, 0); 511 512 if (sc->sc_spad.npage > 0) 513 xhci_scratchpad_free(sc); 514 515 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma); 516 xhci_ring_free(sc, &sc->sc_evt_ring); 517 xhci_ring_free(sc, &sc->sc_cmd_ring); 518 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 519 520 return (0); 521 } 522 523 int 524 xhci_activate(struct device *self, int act) 525 { 526 struct xhci_softc *sc = (struct xhci_softc *)self; 527 int rv = 0; 528 529 switch (act) { 530 case DVACT_RESUME: 531 sc->sc_bus.use_polling++; 532 533 xhci_reset(sc); 534 xhci_ring_reset(sc, &sc->sc_cmd_ring); 535 xhci_ring_reset(sc, &sc->sc_evt_ring); 536 537 /* Renesas controllers, at least, need more time to resume. */ 538 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT); 539 540 xhci_config(sc); 541 542 sc->sc_bus.use_polling--; 543 rv = config_activate_children(self, act); 544 break; 545 case DVACT_POWERDOWN: 546 rv = config_activate_children(self, act); 547 xhci_reset(sc); 548 break; 549 default: 550 rv = config_activate_children(self, act); 551 break; 552 } 553 554 return (rv); 555 } 556 557 int 558 xhci_reset(struct xhci_softc *sc) 559 { 560 uint32_t hcr; 561 int i; 562 563 XOWRITE4(sc, XHCI_USBCMD, 0); /* Halt controller */ 564 for (i = 0; i < 100; i++) { 565 usb_delay_ms(&sc->sc_bus, 1); 566 hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH; 567 if (hcr) 568 break; 569 } 570 571 if (!hcr) 572 printf("%s: halt timeout\n", DEVNAME(sc)); 573 574 XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST); 575 for (i = 0; i < 100; i++) { 576 usb_delay_ms(&sc->sc_bus, 1); 577 hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) | 578 (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR); 579 if (!hcr) 580 break; 581 } 582 583 if (hcr) { 584 printf("%s: reset timeout\n", DEVNAME(sc)); 585 return (EIO); 586 } 587 588 return (0); 589 } 590 591 592 int 593 xhci_intr(void *v) 594 { 595 struct xhci_softc *sc = v; 596 597 if (sc == NULL || sc->sc_bus.dying) 598 return (0); 599 600 /* If we get an interrupt while polling, then just ignore it. */ 601 if (sc->sc_bus.use_polling) { 602 DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n")); 603 return (0); 604 } 605 606 return (xhci_intr1(sc)); 607 } 608 609 int 610 xhci_intr1(struct xhci_softc *sc) 611 { 612 uint32_t intrs; 613 614 intrs = XOREAD4(sc, XHCI_USBSTS); 615 if (intrs == 0xffffffff) { 616 sc->sc_bus.dying = 1; 617 return (0); 618 } 619 620 if ((intrs & XHCI_STS_EINT) == 0) 621 return (0); 622 623 sc->sc_bus.no_intrs++; 624 625 if (intrs & XHCI_STS_HSE) { 626 printf("%s: host system error\n", DEVNAME(sc)); 627 sc->sc_bus.dying = 1; 628 return (1); 629 } 630 631 /* Acknowledge interrupts */ 632 XOWRITE4(sc, XHCI_USBSTS, intrs); 633 intrs = XRREAD4(sc, XHCI_IMAN(0)); 634 XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND); 635 636 usb_schedsoftintr(&sc->sc_bus); 637 638 return (1); 639 } 640 641 void 642 xhci_poll(struct usbd_bus *bus) 643 { 644 struct xhci_softc *sc = (struct xhci_softc *)bus; 645 646 if (XOREAD4(sc, XHCI_USBSTS)) 647 xhci_intr1(sc); 648 } 649 650 void 651 xhci_softintr(void *v) 652 { 653 struct xhci_softc *sc = v; 654 655 if (sc->sc_bus.dying) 656 return; 657 658 sc->sc_bus.intr_context++; 659 xhci_event_dequeue(sc); 660 sc->sc_bus.intr_context--; 661 } 662 663 void 664 xhci_event_dequeue(struct xhci_softc *sc) 665 { 666 struct xhci_trb *trb; 667 uint64_t paddr; 668 uint32_t status, flags; 669 670 while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) { 671 paddr = letoh64(trb->trb_paddr); 672 status = letoh32(trb->trb_status); 673 flags = letoh32(trb->trb_flags); 674 675 switch (flags & XHCI_TRB_TYPE_MASK) { 676 case XHCI_EVT_XFER: 677 xhci_event_xfer(sc, paddr, status, flags); 678 break; 679 case XHCI_EVT_CMD_COMPLETE: 680 memcpy(&sc->sc_result_trb, trb, sizeof(*trb)); 681 xhci_event_command(sc, paddr); 682 break; 683 case XHCI_EVT_PORT_CHANGE: 684 xhci_event_port_change(sc, paddr, status); 685 break; 686 case XHCI_EVT_HOST_CTRL: 687 /* TODO */ 688 break; 689 default: 690 #ifdef XHCI_DEBUG 691 printf("event (%d): ", XHCI_TRB_TYPE(flags)); 692 xhci_dump_trb(trb); 693 #endif 694 break; 695 } 696 697 } 698 699 paddr = (uint64_t)DEQPTR(sc->sc_evt_ring); 700 XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY); 701 XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32)); 702 } 703 704 void 705 xhci_skip_all(struct xhci_pipe *xp) 706 { 707 struct usbd_xfer *xfer, *last; 708 709 if (xp->skip) { 710 /* 711 * Find the last transfer to skip, this is necessary 712 * as xhci_xfer_done() posts new transfers which we 713 * don't want to skip 714 */ 715 last = SIMPLEQ_FIRST(&xp->pipe.queue); 716 if (last == NULL) 717 goto done; 718 while ((xfer = SIMPLEQ_NEXT(last, next)) != NULL) 719 last = xfer; 720 721 do { 722 xfer = SIMPLEQ_FIRST(&xp->pipe.queue); 723 if (xfer == NULL) 724 goto done; 725 DPRINTF(("%s: skipping %p\n", __func__, xfer)); 726 xfer->status = USBD_NORMAL_COMPLETION; 727 xhci_xfer_done(xfer); 728 } while (xfer != last); 729 done: 730 xp->skip = 0; 731 } 732 } 733 734 void 735 xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status, 736 uint32_t flags) 737 { 738 struct xhci_pipe *xp; 739 struct usbd_xfer *xfer; 740 uint8_t dci, slot, code, xfertype; 741 uint32_t remain; 742 int trb_idx; 743 744 slot = XHCI_TRB_GET_SLOT(flags); 745 dci = XHCI_TRB_GET_EP(flags); 746 if (slot > sc->sc_noslot) { 747 DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot)); 748 return; 749 } 750 751 xp = sc->sc_sdevs[slot].pipes[dci - 1]; 752 if (xp == NULL) { 753 DPRINTF(("%s: incorrect dci (%u)\n", DEVNAME(sc), dci)); 754 return; 755 } 756 757 code = XHCI_TRB_GET_CODE(status); 758 remain = XHCI_TRB_REMAIN(status); 759 760 switch (code) { 761 case XHCI_CODE_RING_UNDERRUN: 762 DPRINTF(("%s: slot %u underrun with %zu TRB\n", DEVNAME(sc), 763 slot, xp->ring.ntrb - xp->free_trbs)); 764 xhci_skip_all(xp); 765 return; 766 case XHCI_CODE_RING_OVERRUN: 767 DPRINTF(("%s: slot %u overrun with %zu TRB\n", DEVNAME(sc), 768 slot, xp->ring.ntrb - xp->free_trbs)); 769 xhci_skip_all(xp); 770 return; 771 case XHCI_CODE_MISSED_SRV: 772 DPRINTF(("%s: slot %u missed srv with %zu TRB\n", DEVNAME(sc), 773 slot, xp->ring.ntrb - xp->free_trbs)); 774 xp->skip = 1; 775 return; 776 default: 777 break; 778 } 779 780 trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb); 781 if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) { 782 printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc), 783 trb_idx, xp->ring.ntrb - 1); 784 return; 785 } 786 787 xfer = xp->pending_xfers[trb_idx]; 788 if (xfer == NULL) { 789 DPRINTF(("%s: NULL xfer pointer\n", DEVNAME(sc))); 790 return; 791 } 792 793 if (remain > xfer->length) 794 remain = xfer->length; 795 796 xfertype = UE_GET_XFERTYPE(xfer->pipe->endpoint->edesc->bmAttributes); 797 798 switch (xfertype) { 799 case UE_BULK: 800 case UE_INTERRUPT: 801 case UE_CONTROL: 802 if (xhci_event_xfer_generic(sc, xfer, xp, remain, trb_idx, 803 code, slot, dci)) 804 return; 805 break; 806 case UE_ISOCHRONOUS: 807 if (xhci_event_xfer_isoc(xfer, xp, remain, trb_idx, code)) 808 return; 809 break; 810 default: 811 panic("xhci_event_xfer: unknown xfer type %u", xfertype); 812 } 813 814 xhci_xfer_done(xfer); 815 } 816 817 uint32_t 818 xhci_xfer_length_generic(struct xhci_xfer *xx, struct xhci_pipe *xp, 819 int trb_idx) 820 { 821 int trb0_idx; 822 uint32_t len = 0, type; 823 824 trb0_idx = 825 ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1); 826 827 while (1) { 828 type = letoh32(xp->ring.trbs[trb0_idx].trb_flags) & 829 XHCI_TRB_TYPE_MASK; 830 if (type == XHCI_TRB_TYPE_NORMAL || type == XHCI_TRB_TYPE_DATA) 831 len += XHCI_TRB_LEN(letoh32( 832 xp->ring.trbs[trb0_idx].trb_status)); 833 if (trb0_idx == trb_idx) 834 break; 835 if (++trb0_idx == xp->ring.ntrb) 836 trb0_idx = 0; 837 } 838 return len; 839 } 840 841 int 842 xhci_event_xfer_generic(struct xhci_softc *sc, struct usbd_xfer *xfer, 843 struct xhci_pipe *xp, uint32_t remain, int trb_idx, 844 uint8_t code, uint8_t slot, uint8_t dci) 845 { 846 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 847 848 switch (code) { 849 case XHCI_CODE_SUCCESS: 850 if (xfer->actlen == 0) { 851 if (remain) 852 xfer->actlen = 853 xhci_xfer_length_generic(xx, xp, trb_idx) - 854 remain; 855 else 856 xfer->actlen = xfer->length; 857 } 858 if (xfer->actlen) 859 usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, 860 usbd_xfer_isread(xfer) ? 861 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 862 xfer->status = USBD_NORMAL_COMPLETION; 863 break; 864 case XHCI_CODE_SHORT_XFER: 865 /* 866 * Use values from the transfer TRB instead of the status TRB. 867 */ 868 if (xfer->actlen == 0) 869 xfer->actlen = 870 xhci_xfer_length_generic(xx, xp, trb_idx) - remain; 871 /* 872 * If this is not the last TRB of a transfer, we should 873 * theoretically clear the IOC at the end of the chain 874 * but the HC might have already processed it before we 875 * had a chance to schedule the softinterrupt. 876 */ 877 if (xx->index != trb_idx) { 878 DPRINTF(("%s: short xfer %p for %u\n", 879 DEVNAME(sc), xfer, xx->index)); 880 return (1); 881 } 882 if (xfer->actlen) 883 usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, 884 usbd_xfer_isread(xfer) ? 885 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 886 xfer->status = USBD_NORMAL_COMPLETION; 887 break; 888 case XHCI_CODE_TXERR: 889 case XHCI_CODE_SPLITERR: 890 DPRINTF(("%s: txerr? code %d\n", DEVNAME(sc), code)); 891 xfer->status = USBD_IOERROR; 892 break; 893 case XHCI_CODE_STALL: 894 case XHCI_CODE_BABBLE: 895 DPRINTF(("%s: babble code %d\n", DEVNAME(sc), code)); 896 /* Prevent any timeout to kick in. */ 897 timeout_del(&xfer->timeout_handle); 898 usb_rem_task(xfer->device, &xfer->abort_task); 899 900 /* We need to report this condition for umass(4). */ 901 if (code == XHCI_CODE_STALL) 902 xp->halted = USBD_STALLED; 903 else 904 xp->halted = USBD_IOERROR; 905 /* 906 * Since the stack might try to start a new transfer as 907 * soon as a pending one finishes, make sure the endpoint 908 * is fully reset before calling usb_transfer_complete(). 909 */ 910 xp->aborted_xfer = xfer; 911 xhci_cmd_reset_ep_async(sc, slot, dci); 912 return (1); 913 case XHCI_CODE_XFER_STOPPED: 914 case XHCI_CODE_XFER_STOPINV: 915 /* Endpoint stopped while processing a TD. */ 916 if (xfer == xp->aborted_xfer) { 917 DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer)); 918 return (1); 919 } 920 921 /* FALLTHROUGH */ 922 default: 923 DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code)); 924 xfer->status = USBD_IOERROR; 925 xp->halted = 1; 926 break; 927 } 928 929 return (0); 930 } 931 932 int 933 xhci_event_xfer_isoc(struct usbd_xfer *xfer, struct xhci_pipe *xp, 934 uint32_t remain, int trb_idx, uint8_t code) 935 { 936 struct usbd_xfer *skipxfer; 937 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 938 int trb0_idx, frame_idx = 0, skip_trb = 0; 939 940 KASSERT(xx->index >= 0); 941 942 switch (code) { 943 case XHCI_CODE_SHORT_XFER: 944 xp->trb_processed[trb_idx] = TRB_PROCESSED_SHORT; 945 break; 946 default: 947 xp->trb_processed[trb_idx] = TRB_PROCESSED_YES; 948 break; 949 } 950 951 trb0_idx = 952 ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1); 953 954 /* Find the according frame index for this TRB. */ 955 while (trb0_idx != trb_idx) { 956 if ((letoh32(xp->ring.trbs[trb0_idx].trb_flags) & 957 XHCI_TRB_TYPE_MASK) == XHCI_TRB_TYPE_ISOCH) 958 frame_idx++; 959 if (trb0_idx++ == (xp->ring.ntrb - 1)) 960 trb0_idx = 0; 961 } 962 963 /* 964 * If we queued two TRBs for a frame and this is the second TRB, 965 * check if the first TRB needs accounting since it might not have 966 * raised an interrupt in case of full data received. 967 */ 968 if ((letoh32(xp->ring.trbs[trb_idx].trb_flags) & XHCI_TRB_TYPE_MASK) == 969 XHCI_TRB_TYPE_NORMAL) { 970 frame_idx--; 971 if (trb_idx == 0) 972 trb0_idx = xp->ring.ntrb - 2; 973 else 974 trb0_idx = trb_idx - 1; 975 if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_NO) { 976 xfer->frlengths[frame_idx] = XHCI_TRB_LEN(letoh32( 977 xp->ring.trbs[trb0_idx].trb_status)); 978 } else if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_SHORT) { 979 skip_trb = 1; 980 } 981 } 982 983 if (!skip_trb) { 984 xfer->frlengths[frame_idx] += 985 XHCI_TRB_LEN(letoh32(xp->ring.trbs[trb_idx].trb_status)) - 986 remain; 987 xfer->actlen += xfer->frlengths[frame_idx]; 988 } 989 990 if (xx->index != trb_idx) 991 return (1); 992 993 if (xp->skip) { 994 while (1) { 995 skipxfer = SIMPLEQ_FIRST(&xp->pipe.queue); 996 if (skipxfer == xfer || skipxfer == NULL) 997 break; 998 DPRINTF(("%s: skipping %p\n", __func__, skipxfer)); 999 skipxfer->status = USBD_NORMAL_COMPLETION; 1000 xhci_xfer_done(skipxfer); 1001 } 1002 xp->skip = 0; 1003 } 1004 1005 usb_syncmem(&xfer->dmabuf, 0, xfer->length, 1006 usbd_xfer_isread(xfer) ? 1007 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1008 xfer->status = USBD_NORMAL_COMPLETION; 1009 1010 return (0); 1011 } 1012 1013 void 1014 xhci_event_command(struct xhci_softc *sc, uint64_t paddr) 1015 { 1016 struct xhci_trb *trb; 1017 struct xhci_pipe *xp; 1018 uint32_t flags; 1019 uint8_t dci, slot; 1020 int trb_idx, status; 1021 1022 trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb); 1023 if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) { 1024 printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc), 1025 trb_idx, sc->sc_cmd_ring.ntrb - 1); 1026 return; 1027 } 1028 1029 trb = &sc->sc_cmd_ring.trbs[trb_idx]; 1030 1031 bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map, 1032 TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb), 1033 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1034 1035 flags = letoh32(trb->trb_flags); 1036 1037 slot = XHCI_TRB_GET_SLOT(flags); 1038 dci = XHCI_TRB_GET_EP(flags); 1039 1040 switch (flags & XHCI_TRB_TYPE_MASK) { 1041 case XHCI_CMD_RESET_EP: 1042 xp = sc->sc_sdevs[slot].pipes[dci - 1]; 1043 if (xp == NULL) 1044 break; 1045 1046 /* Update the dequeue pointer past the last TRB. */ 1047 xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci, 1048 DEQPTR(xp->ring) | xp->ring.toggle); 1049 break; 1050 case XHCI_CMD_SET_TR_DEQ: 1051 xp = sc->sc_sdevs[slot].pipes[dci - 1]; 1052 if (xp == NULL) 1053 break; 1054 1055 status = xp->halted; 1056 xp->halted = 0; 1057 if (xp->aborted_xfer != NULL) { 1058 xp->aborted_xfer->status = status; 1059 xhci_xfer_done(xp->aborted_xfer); 1060 wakeup(xp); 1061 } 1062 break; 1063 case XHCI_CMD_CONFIG_EP: 1064 case XHCI_CMD_STOP_EP: 1065 case XHCI_CMD_DISABLE_SLOT: 1066 case XHCI_CMD_ENABLE_SLOT: 1067 case XHCI_CMD_ADDRESS_DEVICE: 1068 case XHCI_CMD_EVAL_CTX: 1069 case XHCI_CMD_NOOP: 1070 /* 1071 * All these commands are synchronous. 1072 * 1073 * If TRBs differ, this could be a delayed result after we 1074 * gave up waiting for the expected TRB due to timeout. 1075 */ 1076 if (sc->sc_cmd_trb == trb) { 1077 sc->sc_cmd_trb = NULL; 1078 wakeup(&sc->sc_cmd_trb); 1079 } 1080 break; 1081 default: 1082 DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags)); 1083 } 1084 } 1085 1086 void 1087 xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status) 1088 { 1089 struct usbd_xfer *xfer = sc->sc_intrxfer; 1090 uint32_t port = XHCI_TRB_PORTID(paddr); 1091 uint8_t *p; 1092 1093 if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) { 1094 DPRINTF(("%s: failed port status event\n", DEVNAME(sc))); 1095 return; 1096 } 1097 1098 if (xfer == NULL) 1099 return; 1100 1101 p = KERNADDR(&xfer->dmabuf, 0); 1102 memset(p, 0, xfer->length); 1103 1104 p[port/8] |= 1 << (port%8); 1105 DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p)); 1106 1107 xfer->actlen = xfer->length; 1108 xfer->status = USBD_NORMAL_COMPLETION; 1109 1110 usb_transfer_complete(xfer); 1111 } 1112 1113 void 1114 xhci_xfer_done(struct usbd_xfer *xfer) 1115 { 1116 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 1117 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 1118 int ntrb, i; 1119 1120 splsoftassert(IPL_SOFTUSB); 1121 1122 #ifdef XHCI_DEBUG 1123 if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) { 1124 printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__, 1125 xfer, xx->index, xx->ntrb); 1126 } 1127 #endif 1128 1129 if (xp->aborted_xfer == xfer) 1130 xp->aborted_xfer = NULL; 1131 1132 for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) { 1133 xp->pending_xfers[i] = NULL; 1134 if (i == 0) 1135 i = (xp->ring.ntrb - 1); 1136 } 1137 xp->free_trbs += xx->ntrb; 1138 xx->index = -1; 1139 xx->ntrb = 0; 1140 1141 timeout_del(&xfer->timeout_handle); 1142 usb_rem_task(xfer->device, &xfer->abort_task); 1143 usb_transfer_complete(xfer); 1144 } 1145 1146 /* 1147 * Calculate the Device Context Index (DCI) for endpoints as stated 1148 * in section 4.5.1 of xHCI specification r1.1. 1149 */ 1150 static inline uint8_t 1151 xhci_ed2dci(usb_endpoint_descriptor_t *ed) 1152 { 1153 uint8_t dir; 1154 1155 if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) 1156 return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1); 1157 1158 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) 1159 dir = 1; 1160 else 1161 dir = 0; 1162 1163 return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir); 1164 } 1165 1166 usbd_status 1167 xhci_pipe_open(struct usbd_pipe *pipe) 1168 { 1169 struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus; 1170 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1171 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1172 uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1173 int error; 1174 1175 KASSERT(xp->slot == 0); 1176 1177 if (sc->sc_bus.dying) 1178 return (USBD_IOERROR); 1179 1180 /* Root Hub */ 1181 if (pipe->device->depth == 0) { 1182 switch (ed->bEndpointAddress) { 1183 case USB_CONTROL_ENDPOINT: 1184 pipe->methods = &xhci_root_ctrl_methods; 1185 break; 1186 case UE_DIR_IN | XHCI_INTR_ENDPT: 1187 pipe->methods = &xhci_root_intr_methods; 1188 break; 1189 default: 1190 pipe->methods = NULL; 1191 return (USBD_INVAL); 1192 } 1193 return (USBD_NORMAL_COMPLETION); 1194 } 1195 1196 #if 0 1197 /* Issue a noop to check if the command ring is correctly configured. */ 1198 xhci_cmd_noop(sc); 1199 #endif 1200 1201 switch (xfertype) { 1202 case UE_CONTROL: 1203 pipe->methods = &xhci_device_ctrl_methods; 1204 1205 /* 1206 * Get a slot and init the device's contexts. 1207 * 1208 * Since the control enpoint, represented as the default 1209 * pipe, is always opened first we are dealing with a 1210 * new device. Put a new slot in the ENABLED state. 1211 * 1212 */ 1213 error = xhci_cmd_slot_control(sc, &slot, 1); 1214 if (error || slot == 0 || slot > sc->sc_noslot) 1215 return (USBD_INVAL); 1216 1217 if (xhci_softdev_alloc(sc, slot)) { 1218 xhci_cmd_slot_control(sc, &slot, 0); 1219 return (USBD_NOMEM); 1220 } 1221 1222 break; 1223 case UE_ISOCHRONOUS: 1224 pipe->methods = &xhci_device_isoc_methods; 1225 break; 1226 case UE_BULK: 1227 pipe->methods = &xhci_device_bulk_methods; 1228 break; 1229 case UE_INTERRUPT: 1230 pipe->methods = &xhci_device_intr_methods; 1231 break; 1232 default: 1233 return (USBD_INVAL); 1234 } 1235 1236 /* 1237 * Our USBD Bus Interface is pipe-oriented but for most of the 1238 * operations we need to access a device context, so keep track 1239 * of the slot ID in every pipe. 1240 */ 1241 if (slot == 0) 1242 slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot; 1243 1244 xp->slot = slot; 1245 xp->dci = xhci_ed2dci(ed); 1246 1247 if (xhci_pipe_init(sc, pipe)) { 1248 xhci_cmd_slot_control(sc, &slot, 0); 1249 return (USBD_IOERROR); 1250 } 1251 1252 return (USBD_NORMAL_COMPLETION); 1253 } 1254 1255 /* 1256 * Set the maximum Endpoint Service Interface Time (ESIT) payload and 1257 * the average TRB buffer length for an endpoint. 1258 */ 1259 static inline uint32_t 1260 xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe) 1261 { 1262 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1263 uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize); 1264 1265 switch (UE_GET_XFERTYPE(ed->bmAttributes)) { 1266 case UE_CONTROL: 1267 mep = 0; 1268 atl = 8; 1269 break; 1270 case UE_INTERRUPT: 1271 case UE_ISOCHRONOUS: 1272 if (pipe->device->speed == USB_SPEED_SUPER) { 1273 /* XXX Read the companion descriptor */ 1274 } 1275 1276 mep = (UE_GET_TRANS(mps) + 1) * UE_GET_SIZE(mps); 1277 atl = mep; 1278 break; 1279 case UE_BULK: 1280 default: 1281 mep = 0; 1282 atl = 0; 1283 } 1284 1285 return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl)); 1286 } 1287 1288 static inline uint32_t 1289 xhci_linear_interval(usb_endpoint_descriptor_t *ed) 1290 { 1291 uint32_t ival = min(max(1, ed->bInterval), 255); 1292 1293 return (fls(ival) - 1); 1294 } 1295 1296 static inline uint32_t 1297 xhci_exponential_interval(usb_endpoint_descriptor_t *ed) 1298 { 1299 uint32_t ival = min(max(1, ed->bInterval), 16); 1300 1301 return (ival - 1); 1302 } 1303 /* 1304 * Return interval for endpoint expressed in 2^(ival) * 125us. 1305 * 1306 * See section 6.2.3.6 of xHCI r1.1 Specification for more details. 1307 */ 1308 uint32_t 1309 xhci_pipe_interval(struct usbd_pipe *pipe) 1310 { 1311 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1312 uint8_t speed = pipe->device->speed; 1313 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1314 uint32_t ival; 1315 1316 if (xfertype == UE_CONTROL || xfertype == UE_BULK) { 1317 /* Control and Bulk endpoints never NAKs. */ 1318 ival = 0; 1319 } else { 1320 switch (speed) { 1321 case USB_SPEED_FULL: 1322 if (xfertype == UE_ISOCHRONOUS) { 1323 /* Convert 1-2^(15)ms into 3-18 */ 1324 ival = xhci_exponential_interval(ed) + 3; 1325 break; 1326 } 1327 /* FALLTHROUGH */ 1328 case USB_SPEED_LOW: 1329 /* Convert 1-255ms into 3-10 */ 1330 ival = xhci_linear_interval(ed) + 3; 1331 break; 1332 case USB_SPEED_HIGH: 1333 case USB_SPEED_SUPER: 1334 default: 1335 /* Convert 1-2^(15) * 125us into 0-15 */ 1336 ival = xhci_exponential_interval(ed); 1337 break; 1338 } 1339 } 1340 1341 KASSERT(ival <= 15); 1342 return (XHCI_EPCTX_SET_IVAL(ival)); 1343 } 1344 1345 uint32_t 1346 xhci_pipe_maxburst(struct usbd_pipe *pipe) 1347 { 1348 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1349 uint32_t mps = UGETW(ed->wMaxPacketSize); 1350 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1351 uint32_t maxb = 0; 1352 1353 switch (pipe->device->speed) { 1354 case USB_SPEED_HIGH: 1355 if (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT) 1356 maxb = UE_GET_TRANS(mps); 1357 break; 1358 case USB_SPEED_SUPER: 1359 /* XXX Read the companion descriptor */ 1360 default: 1361 break; 1362 } 1363 1364 return (maxb); 1365 } 1366 1367 static inline uint32_t 1368 xhci_last_valid_dci(struct xhci_pipe **pipes, struct xhci_pipe *ignore) 1369 { 1370 struct xhci_pipe *lxp; 1371 int i; 1372 1373 /* Find the last valid Endpoint Context. */ 1374 for (i = 30; i >= 0; i--) { 1375 lxp = pipes[i]; 1376 if (lxp != NULL && lxp != ignore) 1377 return XHCI_SCTX_DCI(lxp->dci); 1378 } 1379 1380 return 0; 1381 } 1382 1383 int 1384 xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe) 1385 { 1386 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1387 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1388 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1389 uint32_t mps = UGETW(ed->wMaxPacketSize); 1390 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1391 uint8_t speed, cerr = 0; 1392 uint32_t route = 0, rhport = 0; 1393 struct usbd_device *hub; 1394 1395 /* 1396 * Calculate the Route String. Assume that there is no hub with 1397 * more than 15 ports and that they all have a detph < 6. See 1398 * section 8.9 of USB 3.1 Specification for more details. 1399 */ 1400 for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) { 1401 uint32_t port = hub->powersrc->portno; 1402 uint32_t depth = hub->myhub->depth; 1403 1404 route |= port << (4 * (depth - 1)); 1405 } 1406 1407 /* Get Root Hub port */ 1408 rhport = hub->powersrc->portno; 1409 1410 switch (pipe->device->speed) { 1411 case USB_SPEED_LOW: 1412 speed = XHCI_SPEED_LOW; 1413 break; 1414 case USB_SPEED_FULL: 1415 speed = XHCI_SPEED_FULL; 1416 break; 1417 case USB_SPEED_HIGH: 1418 speed = XHCI_SPEED_HIGH; 1419 break; 1420 case USB_SPEED_SUPER: 1421 speed = XHCI_SPEED_SUPER; 1422 break; 1423 default: 1424 return (USBD_INVAL); 1425 } 1426 1427 /* Setup the endpoint context */ 1428 if (xfertype != UE_ISOCHRONOUS) 1429 cerr = 3; 1430 1431 if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL)) 1432 xfertype |= 0x4; 1433 1434 sdev->ep_ctx[xp->dci-1]->info_lo = htole32(xhci_pipe_interval(pipe)); 1435 sdev->ep_ctx[xp->dci-1]->info_hi = htole32( 1436 XHCI_EPCTX_SET_MPS(UE_GET_SIZE(mps)) | 1437 XHCI_EPCTX_SET_MAXB(xhci_pipe_maxburst(pipe)) | 1438 XHCI_EPCTX_SET_EPTYPE(xfertype) | XHCI_EPCTX_SET_CERR(cerr) 1439 ); 1440 sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe)); 1441 sdev->ep_ctx[xp->dci-1]->deqp = htole64( 1442 DEQPTR(xp->ring) | xp->ring.toggle 1443 ); 1444 1445 /* Unmask the new endoint */ 1446 sdev->input_ctx->drop_flags = 0; 1447 sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci)); 1448 1449 /* Setup the slot context */ 1450 sdev->slot_ctx->info_lo = htole32( 1451 xhci_last_valid_dci(sdev->pipes, NULL) | XHCI_SCTX_SPEED(speed) | 1452 XHCI_SCTX_ROUTE(route) 1453 ); 1454 sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport)); 1455 sdev->slot_ctx->tt = 0; 1456 sdev->slot_ctx->state = 0; 1457 1458 /* XXX */ 1459 #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT) 1460 /* 1461 * If we are opening the interrupt pipe of a hub, update its 1462 * context before putting it in the CONFIGURED state. 1463 */ 1464 if (pipe->device->hub != NULL) { 1465 int nports = pipe->device->hub->nports; 1466 1467 sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1)); 1468 sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports)); 1469 1470 if (UHUB_IS_MTT(pipe->device)) 1471 sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1)); 1472 1473 sdev->slot_ctx->tt |= htole32( 1474 XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink) 1475 ); 1476 } 1477 1478 /* 1479 * If this is a Low or Full Speed device below an external High 1480 * Speed hub, it needs some TT love. 1481 */ 1482 if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) { 1483 struct usbd_device *hshub = pipe->device->myhsport->parent; 1484 uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot; 1485 1486 if (UHUB_IS_MTT(hshub)) 1487 sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1)); 1488 1489 sdev->slot_ctx->tt |= htole32( 1490 XHCI_SCTX_TT_HUB_SID(slot) | 1491 XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno) 1492 ); 1493 } 1494 #undef UHUB_IS_MTT 1495 1496 /* Unmask the slot context */ 1497 sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0)); 1498 1499 bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0, 1500 sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1501 1502 return (0); 1503 } 1504 1505 int 1506 xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe) 1507 { 1508 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1509 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1510 int error; 1511 1512 #ifdef XHCI_DEBUG 1513 struct usbd_device *dev = pipe->device; 1514 printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u" 1515 " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth, 1516 dev->powersrc->portno, dev->speed, xp->slot, xp->dci, 1517 pipe->endpoint->edesc->bEndpointAddress); 1518 #endif 1519 1520 if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN)) 1521 return (ENOMEM); 1522 1523 xp->free_trbs = xp->ring.ntrb; 1524 xp->halted = 0; 1525 1526 sdev->pipes[xp->dci - 1] = xp; 1527 1528 error = xhci_context_setup(sc, pipe); 1529 if (error) 1530 return (error); 1531 1532 if (xp->dci == 1) { 1533 /* 1534 * If we are opening the default pipe, the Slot should 1535 * be in the ENABLED state. Issue an "Address Device" 1536 * with BSR=1 to put the device in the DEFAULT state. 1537 * We cannot jump directly to the ADDRESSED state with 1538 * BSR=0 because some Low/Full speed devices won't accept 1539 * a SET_ADDRESS command before we've read their device 1540 * descriptor. 1541 */ 1542 error = xhci_cmd_set_address(sc, xp->slot, 1543 sdev->ictx_dma.paddr, XHCI_TRB_BSR); 1544 } else { 1545 error = xhci_cmd_configure_ep(sc, xp->slot, 1546 sdev->ictx_dma.paddr); 1547 } 1548 1549 if (error) { 1550 xhci_ring_free(sc, &xp->ring); 1551 return (EIO); 1552 } 1553 1554 return (0); 1555 } 1556 1557 void 1558 xhci_pipe_close(struct usbd_pipe *pipe) 1559 { 1560 struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus; 1561 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1562 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1563 1564 /* Root Hub */ 1565 if (pipe->device->depth == 0) 1566 return; 1567 1568 /* Mask the endpoint */ 1569 sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci)); 1570 sdev->input_ctx->add_flags = 0; 1571 1572 /* Update last valid Endpoint Context */ 1573 sdev->slot_ctx->info_lo &= htole32(~XHCI_SCTX_DCI(31)); 1574 sdev->slot_ctx->info_lo |= htole32(xhci_last_valid_dci(sdev->pipes, xp)); 1575 1576 /* Clear the Endpoint Context */ 1577 memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx)); 1578 1579 bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0, 1580 sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1581 1582 if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr)) 1583 DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci)); 1584 1585 xhci_ring_free(sc, &xp->ring); 1586 sdev->pipes[xp->dci - 1] = NULL; 1587 1588 /* 1589 * If we are closing the default pipe, the device is probably 1590 * gone, so put its slot in the DISABLED state. 1591 */ 1592 if (xp->dci == 1) { 1593 xhci_cmd_slot_control(sc, &xp->slot, 0); 1594 xhci_softdev_free(sc, xp->slot); 1595 } 1596 } 1597 1598 /* 1599 * Transition a device from DEFAULT to ADDRESSED Slot state, this hook 1600 * is needed for Low/Full speed devices. 1601 * 1602 * See section 4.5.3 of USB 3.1 Specification for more details. 1603 */ 1604 int 1605 xhci_setaddr(struct usbd_device *dev, int addr) 1606 { 1607 struct xhci_softc *sc = (struct xhci_softc *)dev->bus; 1608 struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe; 1609 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1610 int error; 1611 1612 /* Root Hub */ 1613 if (dev->depth == 0) 1614 return (0); 1615 1616 KASSERT(xp->dci == 1); 1617 1618 error = xhci_context_setup(sc, dev->default_pipe); 1619 if (error) 1620 return (error); 1621 1622 error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0); 1623 1624 #ifdef XHCI_DEBUG 1625 if (error == 0) { 1626 struct xhci_sctx *sctx; 1627 uint8_t addr; 1628 1629 bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0, 1630 sc->sc_pagesize, BUS_DMASYNC_POSTREAD); 1631 1632 /* Get output slot context. */ 1633 sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr; 1634 addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state)); 1635 error = (addr == 0); 1636 1637 printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr); 1638 } 1639 #endif 1640 1641 return (error); 1642 } 1643 1644 struct usbd_xfer * 1645 xhci_allocx(struct usbd_bus *bus) 1646 { 1647 return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO)); 1648 } 1649 1650 void 1651 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 1652 { 1653 pool_put(xhcixfer, xfer); 1654 } 1655 1656 int 1657 xhci_scratchpad_alloc(struct xhci_softc *sc, int npage) 1658 { 1659 uint64_t *pte; 1660 int error, i; 1661 1662 /* Allocate the required entry for the table. */ 1663 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma, 1664 (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN, 1665 sc->sc_pagesize); 1666 if (error) 1667 return (ENOMEM); 1668 1669 /* Allocate pages. XXX does not need to be contiguous. */ 1670 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma, 1671 NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0); 1672 if (error) { 1673 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma); 1674 return (ENOMEM); 1675 } 1676 1677 for (i = 0; i < npage; i++) { 1678 pte[i] = htole64( 1679 sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize) 1680 ); 1681 } 1682 1683 bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0, 1684 npage * sizeof(uint64_t), BUS_DMASYNC_PREREAD | 1685 BUS_DMASYNC_PREWRITE); 1686 1687 /* Entry 0 points to the table of scratchpad pointers. */ 1688 sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr); 1689 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0, 1690 sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1691 1692 sc->sc_spad.npage = npage; 1693 1694 return (0); 1695 } 1696 1697 void 1698 xhci_scratchpad_free(struct xhci_softc *sc) 1699 { 1700 sc->sc_dcbaa.segs[0] = 0; 1701 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0, 1702 sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1703 1704 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma); 1705 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma); 1706 } 1707 1708 int 1709 xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb, 1710 size_t alignment) 1711 { 1712 size_t size; 1713 int error; 1714 1715 size = ntrb * sizeof(struct xhci_trb); 1716 1717 error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma, 1718 (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY); 1719 if (error) 1720 return (error); 1721 1722 ring->ntrb = ntrb; 1723 1724 xhci_ring_reset(sc, ring); 1725 1726 return (0); 1727 } 1728 1729 void 1730 xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring) 1731 { 1732 usbd_dma_contig_free(&sc->sc_bus, &ring->dma); 1733 } 1734 1735 void 1736 xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring) 1737 { 1738 size_t size; 1739 1740 size = ring->ntrb * sizeof(struct xhci_trb); 1741 1742 memset(ring->trbs, 0, size); 1743 1744 ring->index = 0; 1745 ring->toggle = XHCI_TRB_CYCLE; 1746 1747 /* 1748 * Since all our rings use only one segment, at least for 1749 * the moment, link their tail to their head. 1750 */ 1751 if (ring != &sc->sc_evt_ring) { 1752 struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1]; 1753 1754 trb->trb_paddr = htole64(ring->dma.paddr); 1755 trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG | 1756 XHCI_TRB_CYCLE); 1757 bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size, 1758 BUS_DMASYNC_PREWRITE); 1759 } else 1760 bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size, 1761 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1762 } 1763 1764 struct xhci_trb* 1765 xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring) 1766 { 1767 struct xhci_trb *trb = &ring->trbs[ring->index]; 1768 1769 KASSERT(ring->index < ring->ntrb); 1770 1771 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb), 1772 sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD); 1773 1774 /* Make sure this TRB can be consumed. */ 1775 if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE)) 1776 return (NULL); 1777 1778 ring->index++; 1779 1780 if (ring->index == ring->ntrb) { 1781 ring->index = 0; 1782 ring->toggle ^= 1; 1783 } 1784 1785 return (trb); 1786 } 1787 1788 struct xhci_trb* 1789 xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring) 1790 { 1791 struct xhci_trb *lnk, *trb; 1792 1793 KASSERT(ring->index < ring->ntrb); 1794 1795 /* Setup the link TRB after the previous TRB is done. */ 1796 if (ring->index == 0) { 1797 lnk = &ring->trbs[ring->ntrb - 1]; 1798 trb = &ring->trbs[ring->ntrb - 2]; 1799 1800 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk), 1801 sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD | 1802 BUS_DMASYNC_POSTWRITE); 1803 1804 lnk->trb_flags &= htole32(~XHCI_TRB_CHAIN); 1805 if (letoh32(trb->trb_flags) & XHCI_TRB_CHAIN) 1806 lnk->trb_flags |= htole32(XHCI_TRB_CHAIN); 1807 1808 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk), 1809 sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE); 1810 1811 lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE); 1812 1813 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk), 1814 sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE); 1815 } 1816 1817 trb = &ring->trbs[ring->index++]; 1818 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb), 1819 sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD | 1820 BUS_DMASYNC_POSTWRITE); 1821 1822 /* Toggle cycle state of the link TRB and skip it. */ 1823 if (ring->index == (ring->ntrb - 1)) { 1824 ring->index = 0; 1825 ring->toggle ^= 1; 1826 } 1827 1828 return (trb); 1829 } 1830 1831 struct xhci_trb * 1832 xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer, 1833 uint8_t *togglep, int last) 1834 { 1835 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 1836 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 1837 1838 KASSERT(xp->free_trbs >= 1); 1839 xp->free_trbs--; 1840 *togglep = xp->ring.toggle; 1841 1842 switch (last) { 1843 case -1: /* This will be a zero-length TD. */ 1844 xp->pending_xfers[xp->ring.index] = NULL; 1845 break; 1846 case 0: /* This will be in a chain. */ 1847 xp->pending_xfers[xp->ring.index] = xfer; 1848 xx->index = -2; 1849 xx->ntrb += 1; 1850 break; 1851 case 1: /* This will terminate a chain. */ 1852 xp->pending_xfers[xp->ring.index] = xfer; 1853 xx->index = xp->ring.index; 1854 xx->ntrb += 1; 1855 break; 1856 } 1857 1858 xp->trb_processed[xp->ring.index] = TRB_PROCESSED_NO; 1859 1860 return (xhci_ring_produce(sc, &xp->ring)); 1861 } 1862 1863 int 1864 xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout) 1865 { 1866 struct xhci_trb *trb; 1867 int s, error = 0; 1868 1869 KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL); 1870 1871 trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle); 1872 1873 trb = xhci_ring_produce(sc, &sc->sc_cmd_ring); 1874 if (trb == NULL) 1875 return (EAGAIN); 1876 trb->trb_paddr = trb0->trb_paddr; 1877 trb->trb_status = trb0->trb_status; 1878 bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map, 1879 TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb), 1880 BUS_DMASYNC_PREWRITE); 1881 1882 trb->trb_flags = trb0->trb_flags; 1883 bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map, 1884 TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb), 1885 BUS_DMASYNC_PREWRITE); 1886 1887 if (timeout == 0) { 1888 XDWRITE4(sc, XHCI_DOORBELL(0), 0); 1889 return (0); 1890 } 1891 1892 rw_assert_wrlock(&sc->sc_cmd_lock); 1893 1894 s = splusb(); 1895 sc->sc_cmd_trb = trb; 1896 XDWRITE4(sc, XHCI_DOORBELL(0), 0); 1897 error = tsleep_nsec(&sc->sc_cmd_trb, PZERO, "xhcicmd", timeout); 1898 if (error) { 1899 #ifdef XHCI_DEBUG 1900 printf("%s: tsleep() = %d\n", __func__, error); 1901 printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags))); 1902 xhci_dump_trb(trb); 1903 #endif 1904 KASSERT(sc->sc_cmd_trb == trb || sc->sc_cmd_trb == NULL); 1905 /* 1906 * Just because the timeout expired this does not mean that the 1907 * TRB isn't active anymore! We could get an interrupt from 1908 * this TRB later on and then wonder what to do with it. 1909 * We'd rather abort it. 1910 */ 1911 xhci_command_abort(sc); 1912 sc->sc_cmd_trb = NULL; 1913 splx(s); 1914 return (error); 1915 } 1916 splx(s); 1917 1918 memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb)); 1919 1920 if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS) 1921 return (0); 1922 1923 #ifdef XHCI_DEBUG 1924 printf("%s: event error code=%d, result=%d \n", DEVNAME(sc), 1925 XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)), 1926 XHCI_TRB_TYPE(letoh32(trb0->trb_flags))); 1927 xhci_dump_trb(trb0); 1928 #endif 1929 return (EIO); 1930 } 1931 1932 int 1933 xhci_command_abort(struct xhci_softc *sc) 1934 { 1935 uint32_t reg; 1936 int i; 1937 1938 reg = XOREAD4(sc, XHCI_CRCR_LO); 1939 if ((reg & XHCI_CRCR_LO_CRR) == 0) 1940 return (0); 1941 1942 XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA); 1943 XOWRITE4(sc, XHCI_CRCR_HI, 0); 1944 1945 for (i = 0; i < 2500; i++) { 1946 DELAY(100); 1947 reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR; 1948 if (!reg) 1949 break; 1950 } 1951 1952 if (reg) { 1953 printf("%s: command ring abort timeout\n", DEVNAME(sc)); 1954 return (1); 1955 } 1956 1957 return (0); 1958 } 1959 1960 int 1961 xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr) 1962 { 1963 struct xhci_trb trb; 1964 int error; 1965 1966 DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot)); 1967 1968 trb.trb_paddr = htole64(addr); 1969 trb.trb_status = 0; 1970 trb.trb_flags = htole32( 1971 XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP 1972 ); 1973 1974 rw_enter_write(&sc->sc_cmd_lock); 1975 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 1976 rw_exit_write(&sc->sc_cmd_lock); 1977 return (error); 1978 } 1979 1980 int 1981 xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci) 1982 { 1983 struct xhci_trb trb; 1984 int error; 1985 1986 DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); 1987 1988 trb.trb_paddr = 0; 1989 trb.trb_status = 0; 1990 trb.trb_flags = htole32( 1991 XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP 1992 ); 1993 1994 rw_enter_write(&sc->sc_cmd_lock); 1995 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 1996 rw_exit_write(&sc->sc_cmd_lock); 1997 return (error); 1998 } 1999 2000 void 2001 xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci) 2002 { 2003 struct xhci_trb trb; 2004 2005 DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); 2006 2007 trb.trb_paddr = 0; 2008 trb.trb_status = 0; 2009 trb.trb_flags = htole32( 2010 XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP 2011 ); 2012 2013 xhci_command_submit(sc, &trb, 0); 2014 } 2015 2016 void 2017 xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci, 2018 uint64_t addr) 2019 { 2020 struct xhci_trb trb; 2021 2022 DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); 2023 2024 trb.trb_paddr = htole64(addr); 2025 trb.trb_status = 0; 2026 trb.trb_flags = htole32( 2027 XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ 2028 ); 2029 2030 xhci_command_submit(sc, &trb, 0); 2031 } 2032 2033 int 2034 xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable) 2035 { 2036 struct xhci_trb trb; 2037 int error; 2038 2039 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__)); 2040 2041 trb.trb_paddr = 0; 2042 trb.trb_status = 0; 2043 if (enable) 2044 trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT); 2045 else 2046 trb.trb_flags = htole32( 2047 XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT 2048 ); 2049 2050 rw_enter_write(&sc->sc_cmd_lock); 2051 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2052 rw_exit_write(&sc->sc_cmd_lock); 2053 if (error != 0) 2054 return (EIO); 2055 2056 if (enable) 2057 *slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags)); 2058 2059 return (0); 2060 } 2061 2062 int 2063 xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr, 2064 uint32_t bsr) 2065 { 2066 struct xhci_trb trb; 2067 int error; 2068 2069 DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0)); 2070 2071 trb.trb_paddr = htole64(addr); 2072 trb.trb_status = 0; 2073 trb.trb_flags = htole32( 2074 XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr 2075 ); 2076 2077 rw_enter_write(&sc->sc_cmd_lock); 2078 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2079 rw_exit_write(&sc->sc_cmd_lock); 2080 return (error); 2081 } 2082 2083 int 2084 xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint8_t slot, uint64_t addr) 2085 { 2086 struct xhci_trb trb; 2087 int error; 2088 2089 DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot)); 2090 2091 trb.trb_paddr = htole64(addr); 2092 trb.trb_status = 0; 2093 trb.trb_flags = htole32( 2094 XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_EVAL_CTX 2095 ); 2096 2097 rw_enter_write(&sc->sc_cmd_lock); 2098 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2099 rw_exit_write(&sc->sc_cmd_lock); 2100 return (error); 2101 } 2102 2103 #ifdef XHCI_DEBUG 2104 int 2105 xhci_cmd_noop(struct xhci_softc *sc) 2106 { 2107 struct xhci_trb trb; 2108 int error; 2109 2110 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__)); 2111 2112 trb.trb_paddr = 0; 2113 trb.trb_status = 0; 2114 trb.trb_flags = htole32(XHCI_CMD_NOOP); 2115 2116 rw_enter_write(&sc->sc_cmd_lock); 2117 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2118 rw_exit_write(&sc->sc_cmd_lock); 2119 return (error); 2120 } 2121 #endif 2122 2123 int 2124 xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot) 2125 { 2126 struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot]; 2127 int i, error; 2128 uint8_t *kva; 2129 2130 /* 2131 * Setup input context. Even with 64 byte context size, it 2132 * fits into the smallest supported page size, so use that. 2133 */ 2134 error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma, 2135 (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize); 2136 if (error) 2137 return (ENOMEM); 2138 2139 sdev->input_ctx = (struct xhci_inctx *)kva; 2140 sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize); 2141 for (i = 0; i < 31; i++) 2142 sdev->ep_ctx[i] = 2143 (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize); 2144 2145 DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc), 2146 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0])); 2147 2148 /* Setup output context */ 2149 error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL, 2150 sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize); 2151 if (error) { 2152 usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma); 2153 return (ENOMEM); 2154 } 2155 2156 memset(&sdev->pipes, 0, sizeof(sdev->pipes)); 2157 2158 DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc), 2159 slot, (long long)sdev->octx_dma.paddr)); 2160 2161 sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr); 2162 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 2163 slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD | 2164 BUS_DMASYNC_PREWRITE); 2165 2166 return (0); 2167 } 2168 2169 void 2170 xhci_softdev_free(struct xhci_softc *sc, uint8_t slot) 2171 { 2172 struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot]; 2173 2174 sc->sc_dcbaa.segs[slot] = 0; 2175 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 2176 slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD | 2177 BUS_DMASYNC_PREWRITE); 2178 2179 usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma); 2180 usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma); 2181 2182 memset(sdev, 0, sizeof(struct xhci_soft_dev)); 2183 } 2184 2185 /* Root hub descriptors. */ 2186 usb_device_descriptor_t xhci_devd = { 2187 USB_DEVICE_DESCRIPTOR_SIZE, 2188 UDESC_DEVICE, /* type */ 2189 {0x00, 0x03}, /* USB version */ 2190 UDCLASS_HUB, /* class */ 2191 UDSUBCLASS_HUB, /* subclass */ 2192 UDPROTO_HSHUBSTT, /* protocol */ 2193 9, /* max packet */ 2194 {0},{0},{0x00,0x01}, /* device id */ 2195 1,2,0, /* string indexes */ 2196 1 /* # of configurations */ 2197 }; 2198 2199 const usb_config_descriptor_t xhci_confd = { 2200 USB_CONFIG_DESCRIPTOR_SIZE, 2201 UDESC_CONFIG, 2202 {USB_CONFIG_DESCRIPTOR_SIZE + 2203 USB_INTERFACE_DESCRIPTOR_SIZE + 2204 USB_ENDPOINT_DESCRIPTOR_SIZE}, 2205 1, 2206 1, 2207 0, 2208 UC_BUS_POWERED | UC_SELF_POWERED, 2209 0 /* max power */ 2210 }; 2211 2212 const usb_interface_descriptor_t xhci_ifcd = { 2213 USB_INTERFACE_DESCRIPTOR_SIZE, 2214 UDESC_INTERFACE, 2215 0, 2216 0, 2217 1, 2218 UICLASS_HUB, 2219 UISUBCLASS_HUB, 2220 UIPROTO_HSHUBSTT, 2221 0 2222 }; 2223 2224 const usb_endpoint_descriptor_t xhci_endpd = { 2225 USB_ENDPOINT_DESCRIPTOR_SIZE, 2226 UDESC_ENDPOINT, 2227 UE_DIR_IN | XHCI_INTR_ENDPT, 2228 UE_INTERRUPT, 2229 {2, 0}, /* max 15 ports */ 2230 255 2231 }; 2232 2233 const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = { 2234 USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE, 2235 UDESC_ENDPOINT_SS_COMP, 2236 0, 2237 0, 2238 {0, 0} 2239 }; 2240 2241 const usb_hub_descriptor_t xhci_hubd = { 2242 USB_HUB_DESCRIPTOR_SIZE, 2243 UDESC_SS_HUB, 2244 0, 2245 {0,0}, 2246 0, 2247 0, 2248 {0}, 2249 }; 2250 2251 void 2252 xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) 2253 { 2254 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2255 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 2256 int error; 2257 2258 splsoftassert(IPL_SOFTUSB); 2259 2260 DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n", 2261 __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status), 2262 xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index)); 2263 2264 /* XXX The stack should not call abort() in this case. */ 2265 if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) { 2266 xfer->status = status; 2267 timeout_del(&xfer->timeout_handle); 2268 usb_rem_task(xfer->device, &xfer->abort_task); 2269 usb_transfer_complete(xfer); 2270 return; 2271 } 2272 2273 /* Transfer is already done. */ 2274 if (xfer->status != USBD_IN_PROGRESS) { 2275 DPRINTF(("%s: already done \n", __func__)); 2276 return; 2277 } 2278 2279 /* Prevent any timeout to kick in. */ 2280 timeout_del(&xfer->timeout_handle); 2281 usb_rem_task(xfer->device, &xfer->abort_task); 2282 2283 /* Indicate that we are aborting this transfer. */ 2284 xp->halted = status; 2285 xp->aborted_xfer = xfer; 2286 2287 /* Stop the endpoint and wait until the hardware says so. */ 2288 if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci)) { 2289 DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc))); 2290 /* Assume the device is gone. */ 2291 xp->halted = 0; 2292 xp->aborted_xfer = NULL; 2293 xfer->status = status; 2294 usb_transfer_complete(xfer); 2295 return; 2296 } 2297 2298 /* 2299 * The transfer was already completed when we stopped the 2300 * endpoint, no need to move the dequeue pointer past its 2301 * TRBs. 2302 */ 2303 if (xp->aborted_xfer == NULL) { 2304 DPRINTF(("%s: done before stopping the endpoint\n", __func__)); 2305 xp->halted = 0; 2306 return; 2307 } 2308 2309 /* 2310 * At this stage the endpoint has been stopped, so update its 2311 * dequeue pointer past the last TRB of the transfer. 2312 * 2313 * Note: This assumes that only one transfer per endpoint has 2314 * pending TRBs on the ring. 2315 */ 2316 xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci, 2317 DEQPTR(xp->ring) | xp->ring.toggle); 2318 error = tsleep_nsec(xp, PZERO, "xhciab", XHCI_CMD_TIMEOUT); 2319 if (error) 2320 printf("%s: timeout aborting transfer\n", DEVNAME(sc)); 2321 } 2322 2323 void 2324 xhci_timeout(void *addr) 2325 { 2326 struct usbd_xfer *xfer = addr; 2327 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2328 2329 if (sc->sc_bus.dying) { 2330 xhci_timeout_task(addr); 2331 return; 2332 } 2333 2334 usb_init_task(&xfer->abort_task, xhci_timeout_task, addr, 2335 USB_TASK_TYPE_ABORT); 2336 usb_add_task(xfer->device, &xfer->abort_task); 2337 } 2338 2339 void 2340 xhci_timeout_task(void *addr) 2341 { 2342 struct usbd_xfer *xfer = addr; 2343 int s; 2344 2345 s = splusb(); 2346 xhci_abort_xfer(xfer, USBD_TIMEOUT); 2347 splx(s); 2348 } 2349 2350 usbd_status 2351 xhci_root_ctrl_transfer(struct usbd_xfer *xfer) 2352 { 2353 usbd_status err; 2354 2355 err = usb_insert_transfer(xfer); 2356 if (err) 2357 return (err); 2358 2359 return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2360 } 2361 2362 usbd_status 2363 xhci_root_ctrl_start(struct usbd_xfer *xfer) 2364 { 2365 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2366 usb_port_status_t ps; 2367 usb_device_request_t *req; 2368 void *buf = NULL; 2369 usb_hub_descriptor_t hubd; 2370 usbd_status err; 2371 int s, len, value, index; 2372 int l, totlen = 0; 2373 int port, i; 2374 uint32_t v; 2375 2376 KASSERT(xfer->rqflags & URQ_REQUEST); 2377 2378 if (sc->sc_bus.dying) 2379 return (USBD_IOERROR); 2380 2381 req = &xfer->request; 2382 2383 DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__, 2384 req->bmRequestType, req->bRequest)); 2385 2386 len = UGETW(req->wLength); 2387 value = UGETW(req->wValue); 2388 index = UGETW(req->wIndex); 2389 2390 if (len != 0) 2391 buf = KERNADDR(&xfer->dmabuf, 0); 2392 2393 #define C(x,y) ((x) | ((y) << 8)) 2394 switch(C(req->bRequest, req->bmRequestType)) { 2395 case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE): 2396 case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE): 2397 case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT): 2398 /* 2399 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops 2400 * for the integrated root hub. 2401 */ 2402 break; 2403 case C(UR_GET_CONFIG, UT_READ_DEVICE): 2404 if (len > 0) { 2405 *(uint8_t *)buf = sc->sc_conf; 2406 totlen = 1; 2407 } 2408 break; 2409 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 2410 DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value)); 2411 switch(value >> 8) { 2412 case UDESC_DEVICE: 2413 if ((value & 0xff) != 0) { 2414 err = USBD_IOERROR; 2415 goto ret; 2416 } 2417 totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE); 2418 USETW(xhci_devd.idVendor, sc->sc_id_vendor); 2419 memcpy(buf, &xhci_devd, l); 2420 break; 2421 /* 2422 * We can't really operate at another speed, but the spec says 2423 * we need this descriptor. 2424 */ 2425 case UDESC_OTHER_SPEED_CONFIGURATION: 2426 case UDESC_CONFIG: 2427 if ((value & 0xff) != 0) { 2428 err = USBD_IOERROR; 2429 goto ret; 2430 } 2431 totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE); 2432 memcpy(buf, &xhci_confd, l); 2433 ((usb_config_descriptor_t *)buf)->bDescriptorType = 2434 value >> 8; 2435 buf = (char *)buf + l; 2436 len -= l; 2437 l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE); 2438 totlen += l; 2439 memcpy(buf, &xhci_ifcd, l); 2440 buf = (char *)buf + l; 2441 len -= l; 2442 l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE); 2443 totlen += l; 2444 memcpy(buf, &xhci_endpd, l); 2445 break; 2446 case UDESC_STRING: 2447 if (len == 0) 2448 break; 2449 *(u_int8_t *)buf = 0; 2450 totlen = 1; 2451 switch (value & 0xff) { 2452 case 0: /* Language table */ 2453 totlen = usbd_str(buf, len, "\001"); 2454 break; 2455 case 1: /* Vendor */ 2456 totlen = usbd_str(buf, len, sc->sc_vendor); 2457 break; 2458 case 2: /* Product */ 2459 totlen = usbd_str(buf, len, "xHCI root hub"); 2460 break; 2461 } 2462 break; 2463 default: 2464 err = USBD_IOERROR; 2465 goto ret; 2466 } 2467 break; 2468 case C(UR_GET_INTERFACE, UT_READ_INTERFACE): 2469 if (len > 0) { 2470 *(uint8_t *)buf = 0; 2471 totlen = 1; 2472 } 2473 break; 2474 case C(UR_GET_STATUS, UT_READ_DEVICE): 2475 if (len > 1) { 2476 USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED); 2477 totlen = 2; 2478 } 2479 break; 2480 case C(UR_GET_STATUS, UT_READ_INTERFACE): 2481 case C(UR_GET_STATUS, UT_READ_ENDPOINT): 2482 if (len > 1) { 2483 USETW(((usb_status_t *)buf)->wStatus, 0); 2484 totlen = 2; 2485 } 2486 break; 2487 case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): 2488 if (value >= USB_MAX_DEVICES) { 2489 err = USBD_IOERROR; 2490 goto ret; 2491 } 2492 break; 2493 case C(UR_SET_CONFIG, UT_WRITE_DEVICE): 2494 if (value != 0 && value != 1) { 2495 err = USBD_IOERROR; 2496 goto ret; 2497 } 2498 sc->sc_conf = value; 2499 break; 2500 case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE): 2501 break; 2502 case C(UR_SET_FEATURE, UT_WRITE_DEVICE): 2503 case C(UR_SET_FEATURE, UT_WRITE_INTERFACE): 2504 case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT): 2505 err = USBD_IOERROR; 2506 goto ret; 2507 case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE): 2508 break; 2509 case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT): 2510 break; 2511 /* Hub requests */ 2512 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 2513 break; 2514 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): 2515 DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE " 2516 "port=%d feature=%d\n", index, value)); 2517 if (index < 1 || index > sc->sc_noport) { 2518 err = USBD_IOERROR; 2519 goto ret; 2520 } 2521 port = XHCI_PORTSC(index); 2522 v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR; 2523 switch (value) { 2524 case UHF_PORT_ENABLE: 2525 XOWRITE4(sc, port, v | XHCI_PS_PED); 2526 break; 2527 case UHF_PORT_SUSPEND: 2528 /* TODO */ 2529 break; 2530 case UHF_PORT_POWER: 2531 XOWRITE4(sc, port, v & ~XHCI_PS_PP); 2532 break; 2533 case UHF_PORT_INDICATOR: 2534 XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3)); 2535 break; 2536 case UHF_C_PORT_CONNECTION: 2537 XOWRITE4(sc, port, v | XHCI_PS_CSC); 2538 break; 2539 case UHF_C_PORT_ENABLE: 2540 XOWRITE4(sc, port, v | XHCI_PS_PEC); 2541 break; 2542 case UHF_C_PORT_SUSPEND: 2543 case UHF_C_PORT_LINK_STATE: 2544 XOWRITE4(sc, port, v | XHCI_PS_PLC); 2545 break; 2546 case UHF_C_PORT_OVER_CURRENT: 2547 XOWRITE4(sc, port, v | XHCI_PS_OCC); 2548 break; 2549 case UHF_C_PORT_RESET: 2550 XOWRITE4(sc, port, v | XHCI_PS_PRC); 2551 break; 2552 case UHF_C_BH_PORT_RESET: 2553 XOWRITE4(sc, port, v | XHCI_PS_WRC); 2554 break; 2555 default: 2556 err = USBD_IOERROR; 2557 goto ret; 2558 } 2559 break; 2560 2561 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 2562 if (len == 0) 2563 break; 2564 if ((value & 0xff) != 0) { 2565 err = USBD_IOERROR; 2566 goto ret; 2567 } 2568 v = XREAD4(sc, XHCI_HCCPARAMS); 2569 hubd = xhci_hubd; 2570 hubd.bNbrPorts = sc->sc_noport; 2571 USETW(hubd.wHubCharacteristics, 2572 (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) | 2573 (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0)); 2574 hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */ 2575 for (i = 1; i <= sc->sc_noport; i++) { 2576 v = XOREAD4(sc, XHCI_PORTSC(i)); 2577 if (v & XHCI_PS_DR) 2578 hubd.DeviceRemovable[i / 8] |= 1U << (i % 8); 2579 } 2580 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i; 2581 l = min(len, hubd.bDescLength); 2582 totlen = l; 2583 memcpy(buf, &hubd, l); 2584 break; 2585 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 2586 if (len != 16) { 2587 err = USBD_IOERROR; 2588 goto ret; 2589 } 2590 memset(buf, 0, len); 2591 totlen = len; 2592 break; 2593 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): 2594 DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n", 2595 index)); 2596 if (index < 1 || index > sc->sc_noport) { 2597 err = USBD_IOERROR; 2598 goto ret; 2599 } 2600 if (len != 4) { 2601 err = USBD_IOERROR; 2602 goto ret; 2603 } 2604 v = XOREAD4(sc, XHCI_PORTSC(index)); 2605 DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v)); 2606 i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v)); 2607 switch (XHCI_PS_SPEED(v)) { 2608 case XHCI_SPEED_FULL: 2609 i |= UPS_FULL_SPEED; 2610 break; 2611 case XHCI_SPEED_LOW: 2612 i |= UPS_LOW_SPEED; 2613 break; 2614 case XHCI_SPEED_HIGH: 2615 i |= UPS_HIGH_SPEED; 2616 break; 2617 case XHCI_SPEED_SUPER: 2618 default: 2619 break; 2620 } 2621 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; 2622 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; 2623 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; 2624 if (v & XHCI_PS_PR) i |= UPS_RESET; 2625 if (v & XHCI_PS_PP) { 2626 if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL && 2627 XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH) 2628 i |= UPS_PORT_POWER; 2629 else 2630 i |= UPS_PORT_POWER_SS; 2631 } 2632 USETW(ps.wPortStatus, i); 2633 i = 0; 2634 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; 2635 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; 2636 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; 2637 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; 2638 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; 2639 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; 2640 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; 2641 USETW(ps.wPortChange, i); 2642 l = min(len, sizeof ps); 2643 memcpy(buf, &ps, l); 2644 totlen = l; 2645 break; 2646 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 2647 err = USBD_IOERROR; 2648 goto ret; 2649 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 2650 break; 2651 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): 2652 2653 i = index >> 8; 2654 index &= 0x00ff; 2655 2656 if (index < 1 || index > sc->sc_noport) { 2657 err = USBD_IOERROR; 2658 goto ret; 2659 } 2660 port = XHCI_PORTSC(index); 2661 v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR; 2662 2663 switch (value) { 2664 case UHF_PORT_ENABLE: 2665 XOWRITE4(sc, port, v | XHCI_PS_PED); 2666 break; 2667 case UHF_PORT_SUSPEND: 2668 DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i)); 2669 if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) { 2670 err = USBD_IOERROR; 2671 goto ret; 2672 } 2673 XOWRITE4(sc, port, v | 2674 XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS); 2675 break; 2676 case UHF_PORT_RESET: 2677 DPRINTFN(6, ("reset port %d\n", index)); 2678 XOWRITE4(sc, port, v | XHCI_PS_PR); 2679 break; 2680 case UHF_PORT_POWER: 2681 DPRINTFN(3, ("set port power %d\n", index)); 2682 XOWRITE4(sc, port, v | XHCI_PS_PP); 2683 break; 2684 case UHF_PORT_INDICATOR: 2685 DPRINTFN(3, ("set port indicator %d\n", index)); 2686 2687 v &= ~XHCI_PS_SET_PIC(3); 2688 v |= XHCI_PS_SET_PIC(1); 2689 2690 XOWRITE4(sc, port, v); 2691 break; 2692 case UHF_C_PORT_RESET: 2693 XOWRITE4(sc, port, v | XHCI_PS_PRC); 2694 break; 2695 case UHF_C_BH_PORT_RESET: 2696 XOWRITE4(sc, port, v | XHCI_PS_WRC); 2697 break; 2698 default: 2699 err = USBD_IOERROR; 2700 goto ret; 2701 } 2702 break; 2703 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): 2704 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): 2705 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): 2706 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): 2707 break; 2708 default: 2709 err = USBD_IOERROR; 2710 goto ret; 2711 } 2712 xfer->actlen = totlen; 2713 err = USBD_NORMAL_COMPLETION; 2714 ret: 2715 xfer->status = err; 2716 s = splusb(); 2717 usb_transfer_complete(xfer); 2718 splx(s); 2719 return (err); 2720 } 2721 2722 2723 void 2724 xhci_noop(struct usbd_xfer *xfer) 2725 { 2726 } 2727 2728 2729 usbd_status 2730 xhci_root_intr_transfer(struct usbd_xfer *xfer) 2731 { 2732 usbd_status err; 2733 2734 err = usb_insert_transfer(xfer); 2735 if (err) 2736 return (err); 2737 2738 return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2739 } 2740 2741 usbd_status 2742 xhci_root_intr_start(struct usbd_xfer *xfer) 2743 { 2744 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2745 2746 if (sc->sc_bus.dying) 2747 return (USBD_IOERROR); 2748 2749 sc->sc_intrxfer = xfer; 2750 2751 return (USBD_IN_PROGRESS); 2752 } 2753 2754 void 2755 xhci_root_intr_abort(struct usbd_xfer *xfer) 2756 { 2757 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2758 int s; 2759 2760 sc->sc_intrxfer = NULL; 2761 2762 xfer->status = USBD_CANCELLED; 2763 s = splusb(); 2764 usb_transfer_complete(xfer); 2765 splx(s); 2766 } 2767 2768 void 2769 xhci_root_intr_done(struct usbd_xfer *xfer) 2770 { 2771 } 2772 2773 /* 2774 * Number of packets remaining in the TD after the corresponding TRB. 2775 * 2776 * Section 4.11.2.4 of xHCI specification r1.1. 2777 */ 2778 static inline uint32_t 2779 xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len) 2780 { 2781 uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize); 2782 2783 if (len == 0) 2784 return XHCI_TRB_TDREM(0); 2785 2786 npkt = howmany(remain - len, UE_GET_SIZE(mps)); 2787 if (npkt > 31) 2788 npkt = 31; 2789 2790 return XHCI_TRB_TDREM(npkt); 2791 } 2792 2793 /* 2794 * Transfer Burst Count (TBC) and Transfer Last Burst Packet Count (TLBPC). 2795 * 2796 * Section 4.11.2.3 of xHCI specification r1.1. 2797 */ 2798 static inline uint32_t 2799 xhci_xfer_tbc(struct usbd_xfer *xfer, uint32_t len, uint32_t *tlbpc) 2800 { 2801 uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize); 2802 uint32_t maxb, tdpc, residue, tbc; 2803 2804 /* Transfer Descriptor Packet Count, section 4.14.1. */ 2805 tdpc = howmany(len, UE_GET_SIZE(mps)); 2806 if (tdpc == 0) 2807 tdpc = 1; 2808 2809 /* Transfer Burst Count */ 2810 maxb = xhci_pipe_maxburst(xfer->pipe); 2811 tbc = howmany(tdpc, maxb + 1) - 1; 2812 2813 /* Transfer Last Burst Packet Count */ 2814 if (xfer->device->speed == USB_SPEED_SUPER) { 2815 residue = tdpc % (maxb + 1); 2816 if (residue == 0) 2817 *tlbpc = maxb; 2818 else 2819 *tlbpc = residue - 1; 2820 } else { 2821 *tlbpc = tdpc - 1; 2822 } 2823 2824 return (tbc); 2825 } 2826 2827 usbd_status 2828 xhci_device_ctrl_transfer(struct usbd_xfer *xfer) 2829 { 2830 usbd_status err; 2831 2832 err = usb_insert_transfer(xfer); 2833 if (err) 2834 return (err); 2835 2836 return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2837 } 2838 2839 usbd_status 2840 xhci_device_ctrl_start(struct usbd_xfer *xfer) 2841 { 2842 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2843 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 2844 struct xhci_trb *trb0, *trb; 2845 uint32_t flags, len = UGETW(xfer->request.wLength); 2846 uint8_t toggle; 2847 int s; 2848 2849 KASSERT(xfer->rqflags & URQ_REQUEST); 2850 2851 if (sc->sc_bus.dying || xp->halted) 2852 return (USBD_IOERROR); 2853 2854 if (xp->free_trbs < 3) 2855 return (USBD_NOMEM); 2856 2857 if (len != 0) 2858 usb_syncmem(&xfer->dmabuf, 0, len, 2859 usbd_xfer_isread(xfer) ? 2860 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2861 2862 /* We'll toggle the setup TRB once we're finished with the stages. */ 2863 trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, 0); 2864 2865 flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | (toggle ^ 1); 2866 if (len != 0) { 2867 if (usbd_xfer_isread(xfer)) 2868 flags |= XHCI_TRB_TRT_IN; 2869 else 2870 flags |= XHCI_TRB_TRT_OUT; 2871 } 2872 2873 memcpy(&trb0->trb_paddr, &xfer->request, sizeof(trb0->trb_paddr)); 2874 trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8)); 2875 trb0->trb_flags = htole32(flags); 2876 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2877 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 2878 BUS_DMASYNC_PREWRITE); 2879 2880 /* Data TRB */ 2881 if (len != 0) { 2882 trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0); 2883 2884 flags = XHCI_TRB_TYPE_DATA | toggle; 2885 if (usbd_xfer_isread(xfer)) 2886 flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP; 2887 2888 trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0)); 2889 trb->trb_status = htole32( 2890 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 2891 xhci_xfer_tdsize(xfer, len, len) 2892 ); 2893 trb->trb_flags = htole32(flags); 2894 2895 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2896 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 2897 BUS_DMASYNC_PREWRITE); 2898 } 2899 2900 /* Status TRB */ 2901 trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1); 2902 2903 flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle; 2904 if (len == 0 || !usbd_xfer_isread(xfer)) 2905 flags |= XHCI_TRB_DIR_IN; 2906 2907 trb->trb_paddr = 0; 2908 trb->trb_status = htole32(XHCI_TRB_INTR(0)); 2909 trb->trb_flags = htole32(flags); 2910 2911 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2912 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 2913 BUS_DMASYNC_PREWRITE); 2914 2915 /* Setup TRB */ 2916 trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE); 2917 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2918 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 2919 BUS_DMASYNC_PREWRITE); 2920 2921 s = splusb(); 2922 XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci); 2923 2924 xfer->status = USBD_IN_PROGRESS; 2925 if (xfer->timeout && !sc->sc_bus.use_polling) { 2926 timeout_del(&xfer->timeout_handle); 2927 timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); 2928 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 2929 } 2930 splx(s); 2931 2932 return (USBD_IN_PROGRESS); 2933 } 2934 2935 void 2936 xhci_device_ctrl_abort(struct usbd_xfer *xfer) 2937 { 2938 xhci_abort_xfer(xfer, USBD_CANCELLED); 2939 } 2940 2941 usbd_status 2942 xhci_device_generic_transfer(struct usbd_xfer *xfer) 2943 { 2944 usbd_status err; 2945 2946 err = usb_insert_transfer(xfer); 2947 if (err) 2948 return (err); 2949 2950 return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2951 } 2952 2953 usbd_status 2954 xhci_device_generic_start(struct usbd_xfer *xfer) 2955 { 2956 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2957 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 2958 struct xhci_trb *trb0, *trb; 2959 uint32_t len, remain, flags; 2960 uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize); 2961 uint64_t paddr = DMAADDR(&xfer->dmabuf, 0); 2962 uint8_t toggle; 2963 int s, i, ntrb, zerotd = 0; 2964 2965 KASSERT(!(xfer->rqflags & URQ_REQUEST)); 2966 2967 if (sc->sc_bus.dying || xp->halted) 2968 return (USBD_IOERROR); 2969 2970 /* How many TRBs do we need for this transfer? */ 2971 ntrb = howmany(xfer->length, XHCI_TRB_MAXSIZE); 2972 2973 /* If the buffer crosses a 64k boundary, we need one more. */ 2974 len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1)); 2975 if (len < xfer->length) 2976 ntrb = howmany(xfer->length - len, XHCI_TRB_MAXSIZE) + 1; 2977 else 2978 len = xfer->length; 2979 2980 /* If we need to append a zero length packet, we need one more. */ 2981 if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) && 2982 (xfer->length % UE_GET_SIZE(mps) == 0)) 2983 zerotd = 1; 2984 2985 if (xp->free_trbs < (ntrb + zerotd)) 2986 return (USBD_NOMEM); 2987 2988 usb_syncmem(&xfer->dmabuf, 0, xfer->length, 2989 usbd_xfer_isread(xfer) ? 2990 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2991 2992 /* We'll toggle the first TRB once we're finished with the chain. */ 2993 trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1)); 2994 flags = XHCI_TRB_TYPE_NORMAL | (toggle ^ 1); 2995 if (usbd_xfer_isread(xfer)) 2996 flags |= XHCI_TRB_ISP; 2997 flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 2998 2999 trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0)); 3000 trb0->trb_status = htole32( 3001 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3002 xhci_xfer_tdsize(xfer, xfer->length, len) 3003 ); 3004 trb0->trb_flags = htole32(flags); 3005 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3006 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 3007 BUS_DMASYNC_PREWRITE); 3008 3009 remain = xfer->length - len; 3010 paddr += len; 3011 3012 /* Chain more TRBs if needed. */ 3013 for (i = ntrb - 1; i > 0; i--) { 3014 len = min(remain, XHCI_TRB_MAXSIZE); 3015 3016 /* Next (or Last) TRB. */ 3017 trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1)); 3018 flags = XHCI_TRB_TYPE_NORMAL | toggle; 3019 if (usbd_xfer_isread(xfer)) 3020 flags |= XHCI_TRB_ISP; 3021 flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 3022 3023 trb->trb_paddr = htole64(paddr); 3024 trb->trb_status = htole32( 3025 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3026 xhci_xfer_tdsize(xfer, remain, len) 3027 ); 3028 trb->trb_flags = htole32(flags); 3029 3030 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3031 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3032 BUS_DMASYNC_PREWRITE); 3033 3034 remain -= len; 3035 paddr += len; 3036 } 3037 3038 /* Do we need to issue a zero length transfer? */ 3039 if (zerotd == 1) { 3040 trb = xhci_xfer_get_trb(sc, xfer, &toggle, -1); 3041 trb->trb_paddr = 0; 3042 trb->trb_status = 0; 3043 trb->trb_flags = htole32(XHCI_TRB_TYPE_NORMAL | XHCI_TRB_IOC | toggle); 3044 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3045 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3046 BUS_DMASYNC_PREWRITE); 3047 } 3048 3049 /* First TRB. */ 3050 trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE); 3051 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3052 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 3053 BUS_DMASYNC_PREWRITE); 3054 3055 s = splusb(); 3056 XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci); 3057 3058 xfer->status = USBD_IN_PROGRESS; 3059 if (xfer->timeout && !sc->sc_bus.use_polling) { 3060 timeout_del(&xfer->timeout_handle); 3061 timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); 3062 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 3063 } 3064 splx(s); 3065 3066 return (USBD_IN_PROGRESS); 3067 } 3068 3069 void 3070 xhci_device_generic_done(struct usbd_xfer *xfer) 3071 { 3072 /* Only happens with interrupt transfers. */ 3073 if (xfer->pipe->repeat) { 3074 xfer->actlen = 0; 3075 xhci_device_generic_start(xfer); 3076 } 3077 } 3078 3079 void 3080 xhci_device_generic_abort(struct usbd_xfer *xfer) 3081 { 3082 KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer); 3083 3084 xhci_abort_xfer(xfer, USBD_CANCELLED); 3085 } 3086 3087 usbd_status 3088 xhci_device_isoc_transfer(struct usbd_xfer *xfer) 3089 { 3090 usbd_status err; 3091 3092 err = usb_insert_transfer(xfer); 3093 if (err && err != USBD_IN_PROGRESS) 3094 return (err); 3095 3096 return (xhci_device_isoc_start(xfer)); 3097 } 3098 3099 usbd_status 3100 xhci_device_isoc_start(struct usbd_xfer *xfer) 3101 { 3102 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 3103 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 3104 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 3105 struct xhci_trb *trb0, *trb; 3106 uint32_t len, remain, flags; 3107 uint64_t paddr; 3108 uint32_t tbc, tlbpc; 3109 int s, i, j, ntrb = xfer->nframes; 3110 uint8_t toggle; 3111 3112 KASSERT(!(xfer->rqflags & URQ_REQUEST)); 3113 3114 /* 3115 * To allow continuous transfers, above we start all transfers 3116 * immediately. However, we're still going to get usbd_start_next call 3117 * this when another xfer completes. So, check if this is already 3118 * in progress or not 3119 */ 3120 if (xx->ntrb > 0) 3121 return (USBD_IN_PROGRESS); 3122 3123 if (sc->sc_bus.dying || xp->halted) 3124 return (USBD_IOERROR); 3125 3126 /* Why would you do that anyway? */ 3127 if (sc->sc_bus.use_polling) 3128 return (USBD_INVAL); 3129 3130 paddr = DMAADDR(&xfer->dmabuf, 0); 3131 3132 /* How many TRBs do for all Transfers? */ 3133 for (i = 0, ntrb = 0; i < xfer->nframes; i++) { 3134 /* How many TRBs do we need for this transfer? */ 3135 ntrb += howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE); 3136 3137 /* If the buffer crosses a 64k boundary, we need one more. */ 3138 len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1)); 3139 if (len < xfer->frlengths[i]) 3140 ntrb++; 3141 3142 paddr += xfer->frlengths[i]; 3143 } 3144 3145 if (xp->free_trbs < ntrb) 3146 return (USBD_NOMEM); 3147 3148 usb_syncmem(&xfer->dmabuf, 0, xfer->length, 3149 usbd_xfer_isread(xfer) ? 3150 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 3151 3152 paddr = DMAADDR(&xfer->dmabuf, 0); 3153 3154 for (i = 0, trb0 = NULL; i < xfer->nframes; i++) { 3155 /* How many TRBs do we need for this transfer? */ 3156 ntrb = howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE); 3157 3158 /* If the buffer crosses a 64k boundary, we need one more. */ 3159 len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1)); 3160 if (len < xfer->frlengths[i]) 3161 ntrb++; 3162 else 3163 len = xfer->frlengths[i]; 3164 3165 KASSERT(ntrb < 3); 3166 3167 /* 3168 * We'll commit the first TRB once we're finished with the 3169 * chain. 3170 */ 3171 trb = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1)); 3172 3173 DPRINTFN(4, ("%s:%d: ring %p trb0_idx %lu ntrb %d paddr %llx " 3174 "len %u\n", __func__, __LINE__, 3175 &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, paddr, 3176 len)); 3177 3178 /* Record the first TRB so we can toggle later. */ 3179 if (trb0 == NULL) { 3180 trb0 = trb; 3181 toggle ^= 1; 3182 } 3183 3184 flags = XHCI_TRB_TYPE_ISOCH | XHCI_TRB_SIA | toggle; 3185 if (usbd_xfer_isread(xfer)) 3186 flags |= XHCI_TRB_ISP; 3187 flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 3188 3189 tbc = xhci_xfer_tbc(xfer, xfer->frlengths[i], &tlbpc); 3190 flags |= XHCI_TRB_ISOC_TBC(tbc) | XHCI_TRB_ISOC_TLBPC(tlbpc); 3191 3192 trb->trb_paddr = htole64(paddr); 3193 trb->trb_status = htole32( 3194 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3195 xhci_xfer_tdsize(xfer, xfer->frlengths[i], len) 3196 ); 3197 trb->trb_flags = htole32(flags); 3198 3199 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3200 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3201 BUS_DMASYNC_PREWRITE); 3202 3203 remain = xfer->frlengths[i] - len; 3204 paddr += len; 3205 3206 /* Chain more TRBs if needed. */ 3207 for (j = ntrb - 1; j > 0; j--) { 3208 len = min(remain, XHCI_TRB_MAXSIZE); 3209 3210 /* Next (or Last) TRB. */ 3211 trb = xhci_xfer_get_trb(sc, xfer, &toggle, (j == 1)); 3212 flags = XHCI_TRB_TYPE_NORMAL | toggle; 3213 if (usbd_xfer_isread(xfer)) 3214 flags |= XHCI_TRB_ISP; 3215 flags |= (j == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 3216 DPRINTFN(3, ("%s:%d: ring %p trb0_idx %lu ntrb %d " 3217 "paddr %llx len %u\n", __func__, __LINE__, 3218 &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, 3219 paddr, len)); 3220 3221 trb->trb_paddr = htole64(paddr); 3222 trb->trb_status = htole32( 3223 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3224 xhci_xfer_tdsize(xfer, remain, len) 3225 ); 3226 trb->trb_flags = htole32(flags); 3227 3228 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3229 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3230 BUS_DMASYNC_PREWRITE); 3231 3232 remain -= len; 3233 paddr += len; 3234 } 3235 3236 xfer->frlengths[i] = 0; 3237 } 3238 3239 /* First TRB. */ 3240 trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE); 3241 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3242 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 3243 BUS_DMASYNC_PREWRITE); 3244 3245 s = splusb(); 3246 XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci); 3247 3248 xfer->status = USBD_IN_PROGRESS; 3249 3250 if (xfer->timeout) { 3251 timeout_del(&xfer->timeout_handle); 3252 timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); 3253 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 3254 } 3255 splx(s); 3256 3257 return (USBD_IN_PROGRESS); 3258 } 3259