xref: /openbsd/sys/dev/usb/xhci.c (revision 98dddc57)
1*98dddc57Skettenis /* $OpenBSD: xhci.c,v 1.135 2024/10/08 19:42:31 kettenis Exp $ */
26cb98821Smpi 
36cb98821Smpi /*
4f584fc70Smpi  * Copyright (c) 2014-2015 Martin Pieuchot
56cb98821Smpi  *
66cb98821Smpi  * Permission to use, copy, modify, and distribute this software for any
76cb98821Smpi  * purpose with or without fee is hereby granted, provided that the above
86cb98821Smpi  * copyright notice and this permission notice appear in all copies.
96cb98821Smpi  *
106cb98821Smpi  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
116cb98821Smpi  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
126cb98821Smpi  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
136cb98821Smpi  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
146cb98821Smpi  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
156cb98821Smpi  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
166cb98821Smpi  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
176cb98821Smpi  */
186cb98821Smpi 
196cb98821Smpi #include <sys/param.h>
206cb98821Smpi #include <sys/systm.h>
216cb98821Smpi #include <sys/malloc.h>
226cb98821Smpi #include <sys/device.h>
23feb20a89Smpi #include <sys/queue.h>
246cb98821Smpi #include <sys/timeout.h>
25feb20a89Smpi #include <sys/pool.h>
269b18ffb8Sguenther #include <sys/endian.h>
27eeefa845Smpi #include <sys/rwlock.h>
286cb98821Smpi 
296cb98821Smpi #include <machine/bus.h>
306cb98821Smpi 
316cb98821Smpi #include <dev/usb/usb.h>
326cb98821Smpi #include <dev/usb/usbdi.h>
336cb98821Smpi #include <dev/usb/usbdivar.h>
346cb98821Smpi #include <dev/usb/usb_mem.h>
356cb98821Smpi 
366cb98821Smpi #include <dev/usb/xhcireg.h>
376cb98821Smpi #include <dev/usb/xhcivar.h>
386cb98821Smpi 
396cb98821Smpi struct cfdriver xhci_cd = {
4027b5a9d5Sderaadt 	NULL, "xhci", DV_DULL, CD_SKIPHIBERNATE
416cb98821Smpi };
426cb98821Smpi 
436cb98821Smpi #ifdef XHCI_DEBUG
446cb98821Smpi #define DPRINTF(x)	do { if (xhcidebug) printf x; } while(0)
456cb98821Smpi #define DPRINTFN(n,x)	do { if (xhcidebug>(n)) printf x; } while (0)
466cb98821Smpi int xhcidebug = 3;
476cb98821Smpi #else
486cb98821Smpi #define DPRINTF(x)
496cb98821Smpi #define DPRINTFN(n,x)
506cb98821Smpi #endif
516cb98821Smpi 
526cb98821Smpi #define DEVNAME(sc)	((sc)->sc_bus.bdev.dv_xname)
536cb98821Smpi 
54d1df9c46Smpi #define TRBOFF(r, trb)	((char *)(trb) - (char *)((r)->trbs))
55b067e289Smpi #define DEQPTR(r)	((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index))
566cb98821Smpi 
57feb20a89Smpi struct pool *xhcixfer;
58feb20a89Smpi 
596cb98821Smpi struct xhci_pipe {
606cb98821Smpi 	struct usbd_pipe	pipe;
616cb98821Smpi 
626cb98821Smpi 	uint8_t			dci;
636cb98821Smpi 	uint8_t			slot;	/* Device slot ID */
646cb98821Smpi 	struct xhci_ring	ring;
656cb98821Smpi 
666cb98821Smpi 	/*
676cb98821Smpi 	 * XXX used to pass the xfer pointer back to the
686cb98821Smpi 	 * interrupt routine, better way?
696cb98821Smpi 	 */
70b067e289Smpi 	struct usbd_xfer	*pending_xfers[XHCI_MAX_XFER];
71f584fc70Smpi 	struct usbd_xfer	*aborted_xfer;
726cb98821Smpi 	int			 halted;
736cb98821Smpi 	size_t			 free_trbs;
74a72c25aaSratchov 	int			 skip;
75ef552c2eSderaadt #define TRB_PROCESSED_NO	0
76ef552c2eSderaadt #define TRB_PROCESSED_YES 	1
77ef552c2eSderaadt #define TRB_PROCESSED_SHORT	2
781032f1e6Smglocker 	uint8_t			 trb_processed[XHCI_MAX_XFER];
796cb98821Smpi };
806cb98821Smpi 
816cb98821Smpi int	xhci_reset(struct xhci_softc *);
8243e70c96Skettenis void	xhci_suspend(struct xhci_softc *);
836cb98821Smpi int	xhci_intr1(struct xhci_softc *);
846cb98821Smpi void	xhci_event_dequeue(struct xhci_softc *);
856cb98821Smpi void	xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t);
86679fbd8fSmglocker int	xhci_event_xfer_generic(struct xhci_softc *, struct usbd_xfer *,
87679fbd8fSmglocker 	    struct xhci_pipe *, uint32_t, int, uint8_t, uint8_t, uint8_t);
88c8e58a4aSmglocker int	xhci_event_xfer_isoc(struct usbd_xfer *, struct xhci_pipe *,
891032f1e6Smglocker 	    uint32_t, int, uint8_t);
904d2cc942Smpi void	xhci_event_command(struct xhci_softc *, uint64_t);
916cb98821Smpi void	xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t);
922fa48b76Smpi int	xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *);
9338ff87f6Sstsp int	xhci_context_setup(struct xhci_softc *, struct usbd_pipe *);
946cb98821Smpi int	xhci_scratchpad_alloc(struct xhci_softc *, int);
956cb98821Smpi void	xhci_scratchpad_free(struct xhci_softc *);
966cb98821Smpi int	xhci_softdev_alloc(struct xhci_softc *, uint8_t);
976cb98821Smpi void	xhci_softdev_free(struct xhci_softc *, uint8_t);
98b067e289Smpi int	xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t,
99b067e289Smpi 	    size_t);
1006cb98821Smpi void	xhci_ring_free(struct xhci_softc *, struct xhci_ring *);
1016cb98821Smpi void	xhci_ring_reset(struct xhci_softc *, struct xhci_ring *);
102d1df9c46Smpi struct	xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *);
103861c1bbcSpatrick struct	xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *);
1046cb98821Smpi 
1056cb98821Smpi struct	xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*,
1066cb98821Smpi 	    uint8_t *, int);
1076cb98821Smpi void	xhci_xfer_done(struct usbd_xfer *xfer);
1086cb98821Smpi /* xHCI command helpers. */
1096cb98821Smpi int	xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int);
1106cb98821Smpi int	xhci_command_abort(struct xhci_softc *);
1116cb98821Smpi 
112fcda7eabSmpi void	xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t);
1134d2cc942Smpi void	xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t);
114e5bba15cSmpi int	xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t);
115e5bba15cSmpi int	xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t);
1166cb98821Smpi int	xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int);
117ffe08da5Smpi int	xhci_cmd_set_address(struct xhci_softc *, uint8_t,  uint64_t, uint32_t);
1186cb98821Smpi #ifdef XHCI_DEBUG
1196cb98821Smpi int	xhci_cmd_noop(struct xhci_softc *);
1206cb98821Smpi #endif
1216cb98821Smpi 
1226cb98821Smpi /* XXX should be part of the Bus interface. */
1236cb98821Smpi void	xhci_abort_xfer(struct usbd_xfer *, usbd_status);
1246cb98821Smpi void	xhci_pipe_close(struct usbd_pipe *);
1256cb98821Smpi void	xhci_noop(struct usbd_xfer *);
1266cb98821Smpi 
1276cb98821Smpi void 	xhci_timeout(void *);
1281be52566Smpi void	xhci_timeout_task(void *);
1296cb98821Smpi 
1306cb98821Smpi /* USBD Bus Interface. */
1316cb98821Smpi usbd_status	  xhci_pipe_open(struct usbd_pipe *);
132ffe08da5Smpi int		  xhci_setaddr(struct usbd_device *, int);
1336cb98821Smpi void		  xhci_softintr(void *);
1346cb98821Smpi void		  xhci_poll(struct usbd_bus *);
1356cb98821Smpi struct usbd_xfer *xhci_allocx(struct usbd_bus *);
1366cb98821Smpi void		  xhci_freex(struct usbd_bus *, struct usbd_xfer *);
1376cb98821Smpi 
1386cb98821Smpi usbd_status	  xhci_root_ctrl_transfer(struct usbd_xfer *);
1396cb98821Smpi usbd_status	  xhci_root_ctrl_start(struct usbd_xfer *);
1406cb98821Smpi 
1416cb98821Smpi usbd_status	  xhci_root_intr_transfer(struct usbd_xfer *);
1426cb98821Smpi usbd_status	  xhci_root_intr_start(struct usbd_xfer *);
1436cb98821Smpi void		  xhci_root_intr_abort(struct usbd_xfer *);
1446cb98821Smpi void		  xhci_root_intr_done(struct usbd_xfer *);
1456cb98821Smpi 
1466cb98821Smpi usbd_status	  xhci_device_ctrl_transfer(struct usbd_xfer *);
1476cb98821Smpi usbd_status	  xhci_device_ctrl_start(struct usbd_xfer *);
1486cb98821Smpi void		  xhci_device_ctrl_abort(struct usbd_xfer *);
1496cb98821Smpi 
1506cb98821Smpi usbd_status	  xhci_device_generic_transfer(struct usbd_xfer *);
1516cb98821Smpi usbd_status	  xhci_device_generic_start(struct usbd_xfer *);
1526cb98821Smpi void		  xhci_device_generic_abort(struct usbd_xfer *);
1536cb98821Smpi void		  xhci_device_generic_done(struct usbd_xfer *);
1546cb98821Smpi 
15538ff87f6Sstsp usbd_status	  xhci_device_isoc_transfer(struct usbd_xfer *);
15638ff87f6Sstsp usbd_status	  xhci_device_isoc_start(struct usbd_xfer *);
15738ff87f6Sstsp 
1586cb98821Smpi #define XHCI_INTR_ENDPT 1
1596cb98821Smpi 
1608f1d17e8Snaddy const struct usbd_bus_methods xhci_bus_methods = {
1616cb98821Smpi 	.open_pipe = xhci_pipe_open,
162ffe08da5Smpi 	.dev_setaddr = xhci_setaddr,
1636cb98821Smpi 	.soft_intr = xhci_softintr,
1646cb98821Smpi 	.do_poll = xhci_poll,
1656cb98821Smpi 	.allocx = xhci_allocx,
1666cb98821Smpi 	.freex = xhci_freex,
1676cb98821Smpi };
1686cb98821Smpi 
1698f1d17e8Snaddy const struct usbd_pipe_methods xhci_root_ctrl_methods = {
1706cb98821Smpi 	.transfer = xhci_root_ctrl_transfer,
1716cb98821Smpi 	.start = xhci_root_ctrl_start,
1726cb98821Smpi 	.abort = xhci_noop,
1736cb98821Smpi 	.close = xhci_pipe_close,
1746cb98821Smpi 	.done = xhci_noop,
1756cb98821Smpi };
1766cb98821Smpi 
1778f1d17e8Snaddy const struct usbd_pipe_methods xhci_root_intr_methods = {
1786cb98821Smpi 	.transfer = xhci_root_intr_transfer,
1796cb98821Smpi 	.start = xhci_root_intr_start,
1806cb98821Smpi 	.abort = xhci_root_intr_abort,
1816cb98821Smpi 	.close = xhci_pipe_close,
1826cb98821Smpi 	.done = xhci_root_intr_done,
1836cb98821Smpi };
1846cb98821Smpi 
1858f1d17e8Snaddy const struct usbd_pipe_methods xhci_device_ctrl_methods = {
1866cb98821Smpi 	.transfer = xhci_device_ctrl_transfer,
1876cb98821Smpi 	.start = xhci_device_ctrl_start,
1886cb98821Smpi 	.abort = xhci_device_ctrl_abort,
1896cb98821Smpi 	.close = xhci_pipe_close,
1906cb98821Smpi 	.done = xhci_noop,
1916cb98821Smpi };
1926cb98821Smpi 
1938f1d17e8Snaddy const struct usbd_pipe_methods xhci_device_intr_methods = {
19438ff87f6Sstsp 	.transfer = xhci_device_generic_transfer,
19538ff87f6Sstsp 	.start = xhci_device_generic_start,
19638ff87f6Sstsp 	.abort = xhci_device_generic_abort,
19738ff87f6Sstsp 	.close = xhci_pipe_close,
19838ff87f6Sstsp 	.done = xhci_device_generic_done,
1996cb98821Smpi };
2006cb98821Smpi 
2018f1d17e8Snaddy const struct usbd_pipe_methods xhci_device_bulk_methods = {
2026cb98821Smpi 	.transfer = xhci_device_generic_transfer,
2036cb98821Smpi 	.start = xhci_device_generic_start,
2046cb98821Smpi 	.abort = xhci_device_generic_abort,
2056cb98821Smpi 	.close = xhci_pipe_close,
2066cb98821Smpi 	.done = xhci_device_generic_done,
2076cb98821Smpi };
2086cb98821Smpi 
2098f1d17e8Snaddy const struct usbd_pipe_methods xhci_device_isoc_methods = {
21038ff87f6Sstsp 	.transfer = xhci_device_isoc_transfer,
21138ff87f6Sstsp 	.start = xhci_device_isoc_start,
2126cb98821Smpi 	.abort = xhci_device_generic_abort,
2136cb98821Smpi 	.close = xhci_pipe_close,
21438ff87f6Sstsp 	.done = xhci_noop,
2156cb98821Smpi };
2166cb98821Smpi 
2174d2cc942Smpi #ifdef XHCI_DEBUG
2186cb98821Smpi static void
xhci_dump_trb(struct xhci_trb * trb)2196cb98821Smpi xhci_dump_trb(struct xhci_trb *trb)
2206cb98821Smpi {
2216469c75eSmpi 	printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb,
2226469c75eSmpi 	    (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status),
2236469c75eSmpi 	    (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK);
2246cb98821Smpi }
2256cb98821Smpi #endif
2266cb98821Smpi 
227b067e289Smpi int	usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *,
228b067e289Smpi 	    void **, bus_size_t, bus_size_t, bus_size_t);
229b067e289Smpi void	usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *);
230b067e289Smpi 
231b067e289Smpi int
usbd_dma_contig_alloc(struct usbd_bus * bus,struct usbd_dma_info * dma,void ** kvap,bus_size_t size,bus_size_t alignment,bus_size_t boundary)232b067e289Smpi usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma,
233b067e289Smpi     void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary)
234b067e289Smpi {
235b067e289Smpi 	int error;
236b067e289Smpi 
237b067e289Smpi 	dma->tag = bus->dmatag;
238b067e289Smpi 	dma->size = size;
239b067e289Smpi 
240b067e289Smpi 	error = bus_dmamap_create(dma->tag, size, 1, size, boundary,
241*98dddc57Skettenis 	    BUS_DMA_NOWAIT | bus->dmaflags, &dma->map);
242b067e289Smpi 	if (error != 0)
243114e9c63Sderaadt 		return (error);
244b067e289Smpi 
245b067e289Smpi 	error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg,
246*98dddc57Skettenis 	    1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO | bus->dmaflags);
247b067e289Smpi 	if (error != 0)
2486d7eae8cSkettenis 		goto destroy;
249b067e289Smpi 
250b067e289Smpi 	error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr,
251b067e289Smpi 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
252b067e289Smpi 	if (error != 0)
2536d7eae8cSkettenis 		goto free;
254b067e289Smpi 
255b067e289Smpi 	error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size,
256b067e289Smpi 	    BUS_DMA_NOWAIT);
257b067e289Smpi 	if (error != 0)
2586d7eae8cSkettenis 		goto unmap;
259b067e289Smpi 
260ebf82e03Smpi 	bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREREAD |
261ebf82e03Smpi 	    BUS_DMASYNC_PREWRITE);
262b067e289Smpi 
263b067e289Smpi 	dma->paddr = dma->map->dm_segs[0].ds_addr;
264b067e289Smpi 	if (kvap != NULL)
265b067e289Smpi 		*kvap = dma->vaddr;
266b067e289Smpi 
267b067e289Smpi 	return (0);
268b067e289Smpi 
2696d7eae8cSkettenis unmap:
2706d7eae8cSkettenis 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
2716d7eae8cSkettenis free:
2726d7eae8cSkettenis 	bus_dmamem_free(dma->tag, &dma->seg, 1);
2736d7eae8cSkettenis destroy:
2746d7eae8cSkettenis 	bus_dmamap_destroy(dma->tag, dma->map);
275b067e289Smpi 	return (error);
276b067e289Smpi }
277b067e289Smpi 
278b067e289Smpi void
usbd_dma_contig_free(struct usbd_bus * bus,struct usbd_dma_info * dma)279b067e289Smpi usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma)
280b067e289Smpi {
281b067e289Smpi 	if (dma->map != NULL) {
282b067e289Smpi 		bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size,
283b067e289Smpi 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
284b067e289Smpi 		bus_dmamap_unload(bus->dmatag, dma->map);
285b067e289Smpi 		bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size);
286b067e289Smpi 		bus_dmamem_free(bus->dmatag, &dma->seg, 1);
287b067e289Smpi 		bus_dmamap_destroy(bus->dmatag, dma->map);
288b067e289Smpi 		dma->map = NULL;
289b067e289Smpi 	}
290b067e289Smpi }
291b067e289Smpi 
2926cb98821Smpi int
xhci_init(struct xhci_softc * sc)2936cb98821Smpi xhci_init(struct xhci_softc *sc)
2946cb98821Smpi {
2956cb98821Smpi 	uint32_t hcr;
2966cb98821Smpi 	int npage, error;
2976cb98821Smpi 
2986cb98821Smpi 	sc->sc_bus.usbrev = USBREV_3_0;
2996cb98821Smpi 	sc->sc_bus.methods = &xhci_bus_methods;
3006cb98821Smpi 	sc->sc_bus.pipe_size = sizeof(struct xhci_pipe);
3016cb98821Smpi 
3026cb98821Smpi 	sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH);
3036cb98821Smpi 	sc->sc_door_off = XREAD4(sc, XHCI_DBOFF);
3046cb98821Smpi 	sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF);
3056cb98821Smpi 
30685a43b1eSmpi 	sc->sc_version = XREAD2(sc, XHCI_HCIVERSION);
3077d27638aSmpi 	printf(", xHCI %x.%x\n", sc->sc_version >> 8, sc->sc_version & 0xff);
30885a43b1eSmpi 
3096cb98821Smpi #ifdef XHCI_DEBUG
31032dd3e6eSmpi 	printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off);
31132dd3e6eSmpi 	printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off);
31232dd3e6eSmpi 	printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off);
3136cb98821Smpi #endif
3146cb98821Smpi 
3156cb98821Smpi 	error = xhci_reset(sc);
3166cb98821Smpi 	if (error)
3176cb98821Smpi 		return (error);
3186cb98821Smpi 
319feb20a89Smpi 	if (xhcixfer == NULL) {
3208985a220Smglocker 		xhcixfer = malloc(sizeof(struct pool), M_USBHC, M_NOWAIT);
321feb20a89Smpi 		if (xhcixfer == NULL) {
322feb20a89Smpi 			printf("%s: unable to allocate pool descriptor\n",
323feb20a89Smpi 			    DEVNAME(sc));
324feb20a89Smpi 			return (ENOMEM);
325feb20a89Smpi 		}
3261378bae2Sdlg 		pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB,
3271378bae2Sdlg 		    0, "xhcixfer", NULL);
328feb20a89Smpi 	}
329feb20a89Smpi 
3306cb98821Smpi 	hcr = XREAD4(sc, XHCI_HCCPARAMS);
3316cb98821Smpi 	sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32;
332*98dddc57Skettenis 	sc->sc_bus.dmaflags |= XHCI_HCC_AC64(hcr) ? BUS_DMA_64BIT : 0;
3336cb98821Smpi 	DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize));
3346cb98821Smpi 
3356cb98821Smpi #ifdef XHCI_DEBUG
3366cb98821Smpi 	hcr = XOREAD4(sc, XHCI_PAGESIZE);
3376cb98821Smpi 	printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr);
3386cb98821Smpi #endif
3396cb98821Smpi 	/* Use 4K for the moment since it's easier. */
3406cb98821Smpi 	sc->sc_pagesize = 4096;
3416cb98821Smpi 
3426cb98821Smpi 	/* Get port and device slot numbers. */
3436cb98821Smpi 	hcr = XREAD4(sc, XHCI_HCSPARAMS1);
3446cb98821Smpi 	sc->sc_noport = XHCI_HCS1_N_PORTS(hcr);
3456cb98821Smpi 	sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr);
3466cb98821Smpi 	DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport,
3476cb98821Smpi 	    sc->sc_noslot));
3486cb98821Smpi 
349b067e289Smpi 	/* Setup Device Context Base Address Array. */
350b067e289Smpi 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma,
351b067e289Smpi 	    (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t),
352b067e289Smpi 	    XHCI_DCBAA_ALIGN, sc->sc_pagesize);
3536cb98821Smpi 	if (error)
3546cb98821Smpi 		return (ENOMEM);
3556cb98821Smpi 
3566cb98821Smpi 	/* Setup command ring. */
357eeefa845Smpi 	rw_init(&sc->sc_cmd_lock, "xhcicmd");
358b067e289Smpi 	error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS,
359b067e289Smpi 	    XHCI_CMDS_RING_ALIGN);
3606cb98821Smpi 	if (error) {
3616cb98821Smpi 		printf("%s: could not allocate command ring.\n", DEVNAME(sc));
362b067e289Smpi 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
3636cb98821Smpi 		return (error);
3646cb98821Smpi 	}
3656cb98821Smpi 
3666cb98821Smpi 	/* Setup one event ring and its segment table (ERST). */
367b067e289Smpi 	error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS,
368b067e289Smpi 	    XHCI_EVTS_RING_ALIGN);
3696cb98821Smpi 	if (error) {
3706cb98821Smpi 		printf("%s: could not allocate event ring.\n", DEVNAME(sc));
3716cb98821Smpi 		xhci_ring_free(sc, &sc->sc_cmd_ring);
372b067e289Smpi 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
3736cb98821Smpi 		return (error);
3746cb98821Smpi 	}
3756cb98821Smpi 
3766cb98821Smpi 	/* Allocate the required entry for the segment table. */
377b067e289Smpi 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma,
378b067e289Smpi 	    (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg),
379b067e289Smpi 	    XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY);
3806cb98821Smpi 	if (error) {
3816cb98821Smpi 		printf("%s: could not allocate segment table.\n", DEVNAME(sc));
3826cb98821Smpi 		xhci_ring_free(sc, &sc->sc_evt_ring);
3836cb98821Smpi 		xhci_ring_free(sc, &sc->sc_cmd_ring);
384b067e289Smpi 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
3856cb98821Smpi 		return (ENOMEM);
3866cb98821Smpi 	}
3876cb98821Smpi 
3886cb98821Smpi 	/* Set our ring address and size in its corresponding segment. */
389b067e289Smpi 	sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr);
390b067e289Smpi 	sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS);
3916cb98821Smpi 	sc->sc_erst.segs[0].er_rsvd = 0;
392b067e289Smpi 	bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0,
393ebf82e03Smpi 	    sc->sc_erst.dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3946cb98821Smpi 
3956cb98821Smpi 	/* Get the number of scratch pages and configure them if necessary. */
3966cb98821Smpi 	hcr = XREAD4(sc, XHCI_HCSPARAMS2);
3976cb98821Smpi 	npage = XHCI_HCS2_SPB_MAX(hcr);
398438cc1d7Smpi 	DPRINTF(("%s: %u scratch pages, ETE=%u, IST=0x%x\n", DEVNAME(sc), npage,
399438cc1d7Smpi 	   XHCI_HCS2_ETE(hcr), XHCI_HCS2_IST(hcr)));
4006cb98821Smpi 
4016cb98821Smpi 	if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) {
4026cb98821Smpi 		printf("%s: could not allocate scratchpad.\n", DEVNAME(sc));
403b067e289Smpi 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
4046cb98821Smpi 		xhci_ring_free(sc, &sc->sc_evt_ring);
4056cb98821Smpi 		xhci_ring_free(sc, &sc->sc_cmd_ring);
406b067e289Smpi 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
4076cb98821Smpi 		return (ENOMEM);
4086cb98821Smpi 	}
4096cb98821Smpi 
4108b235456Smpi 
4118b235456Smpi 	return (0);
4128b235456Smpi }
4138b235456Smpi 
4148b235456Smpi void
xhci_config(struct xhci_softc * sc)4158b235456Smpi xhci_config(struct xhci_softc *sc)
4168b235456Smpi {
4178b235456Smpi 	uint64_t paddr;
4188b235456Smpi 	uint32_t hcr;
419868dd50cSkettenis 	int i;
4208b235456Smpi 
4218b235456Smpi 	/* Make sure to program a number of device slots we can handle. */
4228b235456Smpi 	if (sc->sc_noslot > USB_MAX_DEVICES)
4238b235456Smpi 		sc->sc_noslot = USB_MAX_DEVICES;
4248b235456Smpi 	hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK;
4258b235456Smpi 	XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot);
4268b235456Smpi 
4276cb98821Smpi 	/* Set the device context base array address. */
428b067e289Smpi 	paddr = (uint64_t)sc->sc_dcbaa.dma.paddr;
4296cb98821Smpi 	XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr);
4306cb98821Smpi 	XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32));
4316cb98821Smpi 
43232dd3e6eSmpi 	DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc),
4336cb98821Smpi 	    XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO)));
4346cb98821Smpi 
4356cb98821Smpi 	/* Set the command ring address. */
436b067e289Smpi 	paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr;
4376cb98821Smpi 	XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS);
4386cb98821Smpi 	XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32));
4396cb98821Smpi 
44032dd3e6eSmpi 	DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc),
4416cb98821Smpi 	    XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr));
4426cb98821Smpi 
4436cb98821Smpi 	/* Set the ERST count number to 1, since we use only one event ring. */
4446cb98821Smpi 	XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1));
4456cb98821Smpi 
4466cb98821Smpi 	/* Set the segment table address. */
447b067e289Smpi 	paddr = (uint64_t)sc->sc_erst.dma.paddr;
4486cb98821Smpi 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr);
4496cb98821Smpi 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32));
4506cb98821Smpi 
45132dd3e6eSmpi 	DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc),
4526cb98821Smpi 	    XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0))));
4536cb98821Smpi 
4546cb98821Smpi 	/* Set the ring dequeue address. */
455b067e289Smpi 	paddr = (uint64_t)sc->sc_evt_ring.dma.paddr;
4566cb98821Smpi 	XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr);
4576cb98821Smpi 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
4586cb98821Smpi 
45932dd3e6eSmpi 	DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc),
4606cb98821Smpi 	    XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0))));
4616cb98821Smpi 
462868dd50cSkettenis 	/*
463868dd50cSkettenis 	 * If we successfully saved the state during suspend, restore
464868dd50cSkettenis 	 * it here.  Otherwise some Intel controllers don't function
465868dd50cSkettenis 	 * correctly after resume.
466868dd50cSkettenis 	 */
467868dd50cSkettenis 	if (sc->sc_saved_state) {
468868dd50cSkettenis 		XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_CRS); /* Restore state */
469868dd50cSkettenis 		hcr = XOREAD4(sc, XHCI_USBSTS);
470868dd50cSkettenis 		for (i = 0; i < 100; i++) {
471868dd50cSkettenis 			usb_delay_ms(&sc->sc_bus, 1);
472868dd50cSkettenis 			hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_RSS;
473868dd50cSkettenis 			if (!hcr)
474868dd50cSkettenis 				break;
475868dd50cSkettenis 		}
476868dd50cSkettenis 
477868dd50cSkettenis 		if (hcr)
478868dd50cSkettenis 			printf("%s: restore state timeout\n", DEVNAME(sc));
479868dd50cSkettenis 
480868dd50cSkettenis 		sc->sc_saved_state = 0;
481868dd50cSkettenis 	}
482868dd50cSkettenis 
4836cb98821Smpi 	/* Enable interrupts. */
4846cb98821Smpi 	hcr = XRREAD4(sc, XHCI_IMAN(0));
4856cb98821Smpi 	XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA);
4866cb98821Smpi 
4876cb98821Smpi 	/* Set default interrupt moderation. */
4886cb98821Smpi 	XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT);
4896cb98821Smpi 
4906cb98821Smpi 	/* Allow event interrupt and start the controller. */
4916cb98821Smpi 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
4926cb98821Smpi 
49332dd3e6eSmpi 	DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD)));
49432dd3e6eSmpi 	DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0))));
4956cb98821Smpi }
4966cb98821Smpi 
4976cb98821Smpi int
xhci_detach(struct device * self,int flags)4988b235456Smpi xhci_detach(struct device *self, int flags)
4996cb98821Smpi {
5008b235456Smpi 	struct xhci_softc *sc = (struct xhci_softc *)self;
5016cb98821Smpi 	int rv;
5026cb98821Smpi 
5038b235456Smpi 	rv = config_detach_children(self, flags);
5046cb98821Smpi 	if (rv != 0) {
5056cb98821Smpi 		printf("%s: error while detaching %d\n", DEVNAME(sc), rv);
5066cb98821Smpi 		return (rv);
5076cb98821Smpi 	}
5086cb98821Smpi 
5096cb98821Smpi 	/* Since the hardware might already be gone, ignore the errors. */
5106cb98821Smpi 	xhci_command_abort(sc);
5116cb98821Smpi 
5126cb98821Smpi 	xhci_reset(sc);
5136cb98821Smpi 
5146cb98821Smpi 	/* Disable interrupts. */
5156cb98821Smpi 	XRWRITE4(sc, XHCI_IMOD(0), 0);
5166cb98821Smpi 	XRWRITE4(sc, XHCI_IMAN(0), 0);
5176cb98821Smpi 
5186cb98821Smpi 	/* Clear the event ring address. */
5196cb98821Smpi 	XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
5206cb98821Smpi 	XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
5216cb98821Smpi 
5226cb98821Smpi 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
5236cb98821Smpi 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
5246cb98821Smpi 
5256cb98821Smpi 	XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
5266cb98821Smpi 
5276cb98821Smpi 	/* Clear the command ring address. */
5286cb98821Smpi 	XOWRITE4(sc, XHCI_CRCR_LO, 0);
5296cb98821Smpi 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
5306cb98821Smpi 
5316cb98821Smpi 	XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
5326cb98821Smpi 	XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
5336cb98821Smpi 
5346cb98821Smpi 	if (sc->sc_spad.npage > 0)
5356cb98821Smpi 		xhci_scratchpad_free(sc);
5366cb98821Smpi 
537b067e289Smpi 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
5386cb98821Smpi 	xhci_ring_free(sc, &sc->sc_evt_ring);
5396cb98821Smpi 	xhci_ring_free(sc, &sc->sc_cmd_ring);
540b067e289Smpi 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
5416cb98821Smpi 
5426cb98821Smpi 	return (0);
5436cb98821Smpi }
5446cb98821Smpi 
5456cb98821Smpi int
xhci_activate(struct device * self,int act)5466cb98821Smpi xhci_activate(struct device *self, int act)
5476cb98821Smpi {
5486cb98821Smpi 	struct xhci_softc *sc = (struct xhci_softc *)self;
5496cb98821Smpi 	int rv = 0;
5506cb98821Smpi 
5516cb98821Smpi 	switch (act) {
5528b235456Smpi 	case DVACT_RESUME:
553e89fd846Smpi 		sc->sc_bus.use_polling++;
554ade86d6eSkettenis 		xhci_reinit(sc);
555e89fd846Smpi 		sc->sc_bus.use_polling--;
5568b235456Smpi 		rv = config_activate_children(self, act);
5576cb98821Smpi 		break;
5586cb98821Smpi 	case DVACT_POWERDOWN:
5596cb98821Smpi 		rv = config_activate_children(self, act);
56043e70c96Skettenis 		xhci_suspend(sc);
5616cb98821Smpi 		break;
5626cb98821Smpi 	default:
5636cb98821Smpi 		rv = config_activate_children(self, act);
5646cb98821Smpi 		break;
5656cb98821Smpi 	}
5666cb98821Smpi 
5676cb98821Smpi 	return (rv);
5686cb98821Smpi }
5696cb98821Smpi 
5706cb98821Smpi int
xhci_reset(struct xhci_softc * sc)5716cb98821Smpi xhci_reset(struct xhci_softc *sc)
5726cb98821Smpi {
5736cb98821Smpi 	uint32_t hcr;
5746cb98821Smpi 	int i;
5756cb98821Smpi 
5766cb98821Smpi 	XOWRITE4(sc, XHCI_USBCMD, 0);	/* Halt controller */
5776cb98821Smpi 	for (i = 0; i < 100; i++) {
5786cb98821Smpi 		usb_delay_ms(&sc->sc_bus, 1);
5796cb98821Smpi 		hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
5806cb98821Smpi 		if (hcr)
5816cb98821Smpi 			break;
5826cb98821Smpi 	}
5836cb98821Smpi 
5846cb98821Smpi 	if (!hcr)
5856cb98821Smpi 		printf("%s: halt timeout\n", DEVNAME(sc));
5866cb98821Smpi 
5876cb98821Smpi 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST);
5886cb98821Smpi 	for (i = 0; i < 100; i++) {
5896cb98821Smpi 		usb_delay_ms(&sc->sc_bus, 1);
5909e16ec65Smikeb 		hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) |
5919e16ec65Smikeb 		    (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR);
5926cb98821Smpi 		if (!hcr)
5936cb98821Smpi 			break;
5946cb98821Smpi 	}
5956cb98821Smpi 
5966cb98821Smpi 	if (hcr) {
5976cb98821Smpi 		printf("%s: reset timeout\n", DEVNAME(sc));
5986cb98821Smpi 		return (EIO);
5996cb98821Smpi 	}
6006cb98821Smpi 
6016cb98821Smpi 	return (0);
6026cb98821Smpi }
6036cb98821Smpi 
604ade86d6eSkettenis void
xhci_suspend(struct xhci_softc * sc)60543e70c96Skettenis xhci_suspend(struct xhci_softc *sc)
60643e70c96Skettenis {
60743e70c96Skettenis 	uint32_t hcr;
60843e70c96Skettenis 	int i;
60943e70c96Skettenis 
61043e70c96Skettenis 	XOWRITE4(sc, XHCI_USBCMD, 0);	/* Halt controller */
61143e70c96Skettenis 	for (i = 0; i < 100; i++) {
61243e70c96Skettenis 		usb_delay_ms(&sc->sc_bus, 1);
61343e70c96Skettenis 		hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
61443e70c96Skettenis 		if (hcr)
61543e70c96Skettenis 			break;
61643e70c96Skettenis 	}
61743e70c96Skettenis 
61843e70c96Skettenis 	if (!hcr) {
61943e70c96Skettenis 		printf("%s: halt timeout\n", DEVNAME(sc));
62043e70c96Skettenis 		xhci_reset(sc);
62143e70c96Skettenis 		return;
62243e70c96Skettenis 	}
62343e70c96Skettenis 
62443e70c96Skettenis 	/*
62543e70c96Skettenis 	 * Some Intel controllers will not power down completely
62643e70c96Skettenis 	 * unless they have seen a save state command.  This in turn
62743e70c96Skettenis 	 * will prevent the SoC from reaching its lowest idle state.
62843e70c96Skettenis 	 * So save the state here.
62943e70c96Skettenis 	 */
630581cc054Sjsg 	if ((sc->sc_flags & XHCI_NOCSS) == 0) {
63143e70c96Skettenis 		XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_CSS); /* Save state */
63243e70c96Skettenis 		hcr = XOREAD4(sc, XHCI_USBSTS);
63343e70c96Skettenis 		for (i = 0; i < 100; i++) {
63443e70c96Skettenis 			usb_delay_ms(&sc->sc_bus, 1);
63543e70c96Skettenis 			hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_SSS;
63643e70c96Skettenis 			if (!hcr)
63743e70c96Skettenis 				break;
63843e70c96Skettenis 		}
63943e70c96Skettenis 
64043e70c96Skettenis 		if (hcr) {
64143e70c96Skettenis 			printf("%s: save state timeout\n", DEVNAME(sc));
64243e70c96Skettenis 			xhci_reset(sc);
64343e70c96Skettenis 			return;
64443e70c96Skettenis 		}
64543e70c96Skettenis 
646868dd50cSkettenis 		sc->sc_saved_state = 1;
647581cc054Sjsg 	}
648868dd50cSkettenis 
64943e70c96Skettenis 	/* Disable interrupts. */
65043e70c96Skettenis 	XRWRITE4(sc, XHCI_IMOD(0), 0);
65143e70c96Skettenis 	XRWRITE4(sc, XHCI_IMAN(0), 0);
65243e70c96Skettenis 
65343e70c96Skettenis 	/* Clear the event ring address. */
65443e70c96Skettenis 	XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
65543e70c96Skettenis 	XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
65643e70c96Skettenis 
65743e70c96Skettenis 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
65843e70c96Skettenis 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
65943e70c96Skettenis 
66043e70c96Skettenis 	XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
66143e70c96Skettenis 
66243e70c96Skettenis 	/* Clear the command ring address. */
66343e70c96Skettenis 	XOWRITE4(sc, XHCI_CRCR_LO, 0);
66443e70c96Skettenis 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
66543e70c96Skettenis 
66643e70c96Skettenis 	XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
66743e70c96Skettenis 	XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
66843e70c96Skettenis }
66943e70c96Skettenis 
67043e70c96Skettenis void
xhci_reinit(struct xhci_softc * sc)671ade86d6eSkettenis xhci_reinit(struct xhci_softc *sc)
672ade86d6eSkettenis {
673ade86d6eSkettenis 	xhci_reset(sc);
674ade86d6eSkettenis 	xhci_ring_reset(sc, &sc->sc_cmd_ring);
675ade86d6eSkettenis 	xhci_ring_reset(sc, &sc->sc_evt_ring);
676ade86d6eSkettenis 
677ade86d6eSkettenis 	/* Renesas controllers, at least, need more time to resume. */
678ade86d6eSkettenis 	usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
679ade86d6eSkettenis 
680ade86d6eSkettenis 	xhci_config(sc);
681ade86d6eSkettenis }
6826cb98821Smpi 
6836cb98821Smpi int
xhci_intr(void * v)6846cb98821Smpi xhci_intr(void *v)
6856cb98821Smpi {
6866cb98821Smpi 	struct xhci_softc *sc = v;
6876cb98821Smpi 
68894e125fcSkettenis 	if (sc->sc_dead)
6896cb98821Smpi 		return (0);
6906cb98821Smpi 
6916cb98821Smpi 	/* If we get an interrupt while polling, then just ignore it. */
6926cb98821Smpi 	if (sc->sc_bus.use_polling) {
6936cb98821Smpi 		DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n"));
6946cb98821Smpi 		return (0);
6956cb98821Smpi 	}
6966cb98821Smpi 
6976cb98821Smpi 	return (xhci_intr1(sc));
6986cb98821Smpi }
6996cb98821Smpi 
7006cb98821Smpi int
xhci_intr1(struct xhci_softc * sc)7016cb98821Smpi xhci_intr1(struct xhci_softc *sc)
7026cb98821Smpi {
7036cb98821Smpi 	uint32_t intrs;
7046cb98821Smpi 
7056cb98821Smpi 	intrs = XOREAD4(sc, XHCI_USBSTS);
7066cb98821Smpi 	if (intrs == 0xffffffff) {
7076cb98821Smpi 		sc->sc_bus.dying = 1;
70894e125fcSkettenis 		sc->sc_dead = 1;
7096cb98821Smpi 		return (0);
7106cb98821Smpi 	}
7116cb98821Smpi 
7126cb98821Smpi 	if ((intrs & XHCI_STS_EINT) == 0)
7136cb98821Smpi 		return (0);
7146cb98821Smpi 
7156cb98821Smpi 	sc->sc_bus.no_intrs++;
7166cb98821Smpi 
7176cb98821Smpi 	if (intrs & XHCI_STS_HSE) {
7186cb98821Smpi 		printf("%s: host system error\n", DEVNAME(sc));
7196cb98821Smpi 		sc->sc_bus.dying = 1;
720709576bbSkettenis 		XOWRITE4(sc, XHCI_USBSTS, intrs);
7216cb98821Smpi 		return (1);
7226cb98821Smpi 	}
7236cb98821Smpi 
7247dc23369Spatrick 	/* Acknowledge interrupts */
7257dc23369Spatrick 	XOWRITE4(sc, XHCI_USBSTS, intrs);
7266cb98821Smpi 	intrs = XRREAD4(sc, XHCI_IMAN(0));
7276cb98821Smpi 	XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND);
7286cb98821Smpi 
7297dc23369Spatrick 	usb_schedsoftintr(&sc->sc_bus);
7307dc23369Spatrick 
7316cb98821Smpi 	return (1);
7326cb98821Smpi }
7336cb98821Smpi 
7346cb98821Smpi void
xhci_poll(struct usbd_bus * bus)7356cb98821Smpi xhci_poll(struct usbd_bus *bus)
7366cb98821Smpi {
7376cb98821Smpi 	struct xhci_softc *sc = (struct xhci_softc *)bus;
7386cb98821Smpi 
7396cb98821Smpi 	if (XOREAD4(sc, XHCI_USBSTS))
7406cb98821Smpi 		xhci_intr1(sc);
7416cb98821Smpi }
7426cb98821Smpi 
7436cb98821Smpi void
xhci_softintr(void * v)7446cb98821Smpi xhci_softintr(void *v)
7456cb98821Smpi {
7466cb98821Smpi 	struct xhci_softc *sc = v;
7476cb98821Smpi 
7487672c07eSclaudio 	if (sc->sc_bus.dying)
7496cb98821Smpi 		return;
7506cb98821Smpi 
7516cb98821Smpi 	sc->sc_bus.intr_context++;
7526cb98821Smpi 	xhci_event_dequeue(sc);
7536cb98821Smpi 	sc->sc_bus.intr_context--;
7546cb98821Smpi }
7556cb98821Smpi 
7566cb98821Smpi void
xhci_event_dequeue(struct xhci_softc * sc)7576cb98821Smpi xhci_event_dequeue(struct xhci_softc *sc)
7586cb98821Smpi {
7596cb98821Smpi 	struct xhci_trb *trb;
7606cb98821Smpi 	uint64_t paddr;
7616cb98821Smpi 	uint32_t status, flags;
7626cb98821Smpi 
763d1df9c46Smpi 	while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) {
7646cb98821Smpi 		paddr = letoh64(trb->trb_paddr);
7656cb98821Smpi 		status = letoh32(trb->trb_status);
7666cb98821Smpi 		flags = letoh32(trb->trb_flags);
7676cb98821Smpi 
7686cb98821Smpi 		switch (flags & XHCI_TRB_TYPE_MASK) {
7696cb98821Smpi 		case XHCI_EVT_XFER:
7706cb98821Smpi 			xhci_event_xfer(sc, paddr, status, flags);
7716cb98821Smpi 			break;
7726cb98821Smpi 		case XHCI_EVT_CMD_COMPLETE:
7736cb98821Smpi 			memcpy(&sc->sc_result_trb, trb, sizeof(*trb));
7744d2cc942Smpi 			xhci_event_command(sc, paddr);
7756cb98821Smpi 			break;
7766cb98821Smpi 		case XHCI_EVT_PORT_CHANGE:
7776cb98821Smpi 			xhci_event_port_change(sc, paddr, status);
7786cb98821Smpi 			break;
77938ff87f6Sstsp 		case XHCI_EVT_HOST_CTRL:
78038ff87f6Sstsp 			/* TODO */
78138ff87f6Sstsp 			break;
7826cb98821Smpi 		default:
7834d2cc942Smpi #ifdef XHCI_DEBUG
7846cb98821Smpi 			printf("event (%d): ", XHCI_TRB_TYPE(flags));
7856cb98821Smpi 			xhci_dump_trb(trb);
7866cb98821Smpi #endif
7876cb98821Smpi 			break;
7886cb98821Smpi 		}
7896cb98821Smpi 
7906cb98821Smpi 	}
7916cb98821Smpi 
792fcda7eabSmpi 	paddr = (uint64_t)DEQPTR(sc->sc_evt_ring);
7936cb98821Smpi 	XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY);
7946cb98821Smpi 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
7956cb98821Smpi }
7966cb98821Smpi 
7976cb98821Smpi void
xhci_skip_all(struct xhci_pipe * xp)798a72c25aaSratchov xhci_skip_all(struct xhci_pipe *xp)
799a72c25aaSratchov {
800a72c25aaSratchov 	struct usbd_xfer *xfer, *last;
801a72c25aaSratchov 
802a72c25aaSratchov 	if (xp->skip) {
803a72c25aaSratchov 		/*
804a72c25aaSratchov 		 * Find the last transfer to skip, this is necessary
805a72c25aaSratchov 		 * as xhci_xfer_done() posts new transfers which we
806a72c25aaSratchov 		 * don't want to skip
807a72c25aaSratchov 		 */
808a72c25aaSratchov 		last = SIMPLEQ_FIRST(&xp->pipe.queue);
809a72c25aaSratchov 		if (last == NULL)
810a72c25aaSratchov 			goto done;
811a72c25aaSratchov 		while ((xfer = SIMPLEQ_NEXT(last, next)) != NULL)
812a72c25aaSratchov 			last = xfer;
813a72c25aaSratchov 
814a72c25aaSratchov 		do {
815a72c25aaSratchov 			xfer = SIMPLEQ_FIRST(&xp->pipe.queue);
816a72c25aaSratchov 			if (xfer == NULL)
817a72c25aaSratchov 				goto done;
818a72c25aaSratchov 			DPRINTF(("%s: skipping %p\n", __func__, xfer));
819a72c25aaSratchov 			xfer->status = USBD_NORMAL_COMPLETION;
820a72c25aaSratchov 			xhci_xfer_done(xfer);
821a72c25aaSratchov 		} while (xfer != last);
822a72c25aaSratchov 	done:
823a72c25aaSratchov 		xp->skip = 0;
824a72c25aaSratchov 	}
825a72c25aaSratchov }
826a72c25aaSratchov 
827a72c25aaSratchov void
xhci_event_xfer(struct xhci_softc * sc,uint64_t paddr,uint32_t status,uint32_t flags)8286cb98821Smpi xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status,
8296cb98821Smpi     uint32_t flags)
8306cb98821Smpi {
8316cb98821Smpi 	struct xhci_pipe *xp;
8326cb98821Smpi 	struct usbd_xfer *xfer;
8330c2735d8Smglocker 	uint8_t dci, slot, code, xfertype;
8342793cdfdSmpi 	uint32_t remain;
835c8e58a4aSmglocker 	int trb_idx;
8366cb98821Smpi 
8376cb98821Smpi 	slot = XHCI_TRB_GET_SLOT(flags);
8386cb98821Smpi 	dci = XHCI_TRB_GET_EP(flags);
83985319fe6Smpi 	if (slot > sc->sc_noslot) {
84085319fe6Smpi 		DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot));
84185319fe6Smpi 		return;
84285319fe6Smpi 	}
8436cb98821Smpi 
8446cb98821Smpi 	xp = sc->sc_sdevs[slot].pipes[dci - 1];
845c76d46d4Smpi 	if (xp == NULL) {
846c76d46d4Smpi 		DPRINTF(("%s: incorrect dci (%u)\n", DEVNAME(sc), dci));
8474083ee39Smpi 		return;
848c76d46d4Smpi 	}
8496cb98821Smpi 
8506cb98821Smpi 	code = XHCI_TRB_GET_CODE(status);
8516cb98821Smpi 	remain = XHCI_TRB_REMAIN(status);
8526cb98821Smpi 
85338ff87f6Sstsp 	switch (code) {
85438ff87f6Sstsp 	case XHCI_CODE_RING_UNDERRUN:
85509f3c3b6Spatrick 		DPRINTF(("%s: slot %u underrun with %zu TRB\n", DEVNAME(sc),
85638ff87f6Sstsp 		    slot, xp->ring.ntrb - xp->free_trbs));
857a72c25aaSratchov 		xhci_skip_all(xp);
85838ff87f6Sstsp 		return;
85938ff87f6Sstsp 	case XHCI_CODE_RING_OVERRUN:
86009f3c3b6Spatrick 		DPRINTF(("%s: slot %u overrun with %zu TRB\n", DEVNAME(sc),
86138ff87f6Sstsp 		    slot, xp->ring.ntrb - xp->free_trbs));
862a72c25aaSratchov 		xhci_skip_all(xp);
863a72c25aaSratchov 		return;
864a72c25aaSratchov 	case XHCI_CODE_MISSED_SRV:
865a72c25aaSratchov 		DPRINTF(("%s: slot %u missed srv with %zu TRB\n", DEVNAME(sc),
866a72c25aaSratchov 		    slot, xp->ring.ntrb - xp->free_trbs));
867a72c25aaSratchov 		xp->skip = 1;
86838ff87f6Sstsp 		return;
86938ff87f6Sstsp 	default:
87038ff87f6Sstsp 		break;
871e56e3c13Smglocker 	}
87238ff87f6Sstsp 
873b067e289Smpi 	trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb);
8746cb98821Smpi 	if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) {
87569a6e10cSmpi 		printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
8766cb98821Smpi 		    trb_idx, xp->ring.ntrb - 1);
8776cb98821Smpi 		return;
8786cb98821Smpi 	}
8796cb98821Smpi 
8806cb98821Smpi 	xfer = xp->pending_xfers[trb_idx];
8816cb98821Smpi 	if (xfer == NULL) {
8823386cc01Skrw 		DPRINTF(("%s: NULL xfer pointer\n", DEVNAME(sc)));
8836cb98821Smpi 		return;
8846cb98821Smpi 	}
8856cb98821Smpi 
8865a065a24Smpi 	if (remain > xfer->length)
8875a065a24Smpi 		remain = xfer->length;
8885a065a24Smpi 
889679fbd8fSmglocker 	xfertype = UE_GET_XFERTYPE(xfer->pipe->endpoint->edesc->bmAttributes);
890679fbd8fSmglocker 
891679fbd8fSmglocker 	switch (xfertype) {
892679fbd8fSmglocker 	case UE_BULK:
893679fbd8fSmglocker 	case UE_INTERRUPT:
894679fbd8fSmglocker 	case UE_CONTROL:
895679fbd8fSmglocker 		if (xhci_event_xfer_generic(sc, xfer, xp, remain, trb_idx,
896679fbd8fSmglocker 		    code, slot, dci))
897679fbd8fSmglocker 			return;
898679fbd8fSmglocker 		break;
899679fbd8fSmglocker 	case UE_ISOCHRONOUS:
9001032f1e6Smglocker 		if (xhci_event_xfer_isoc(xfer, xp, remain, trb_idx, code))
901679fbd8fSmglocker 			return;
902679fbd8fSmglocker 		break;
903679fbd8fSmglocker 	default:
904679fbd8fSmglocker 		panic("xhci_event_xfer: unknown xfer type %u", xfertype);
905679fbd8fSmglocker 	}
906679fbd8fSmglocker 
907679fbd8fSmglocker 	xhci_xfer_done(xfer);
908679fbd8fSmglocker }
909679fbd8fSmglocker 
910d2d18bfeSpatrick uint32_t
xhci_xfer_length_generic(struct xhci_xfer * xx,struct xhci_pipe * xp,int trb_idx)911d2d18bfeSpatrick xhci_xfer_length_generic(struct xhci_xfer *xx, struct xhci_pipe *xp,
912d2d18bfeSpatrick     int trb_idx)
913d2d18bfeSpatrick {
914d2d18bfeSpatrick 	int	 trb0_idx;
915d2d18bfeSpatrick 	uint32_t len = 0, type;
916d2d18bfeSpatrick 
917d2d18bfeSpatrick 	trb0_idx =
918d2d18bfeSpatrick 	    ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1);
919d2d18bfeSpatrick 
920d2d18bfeSpatrick 	while (1) {
921df3c00e8Svisa 		type = letoh32(xp->ring.trbs[trb0_idx].trb_flags) &
922df3c00e8Svisa 		    XHCI_TRB_TYPE_MASK;
923d2d18bfeSpatrick 		if (type == XHCI_TRB_TYPE_NORMAL || type == XHCI_TRB_TYPE_DATA)
924df3c00e8Svisa 			len += XHCI_TRB_LEN(letoh32(
925d2d18bfeSpatrick 			    xp->ring.trbs[trb0_idx].trb_status));
926d2d18bfeSpatrick 		if (trb0_idx == trb_idx)
927d2d18bfeSpatrick 			break;
928d2d18bfeSpatrick 		if (++trb0_idx == xp->ring.ntrb)
929d2d18bfeSpatrick 			trb0_idx = 0;
930d2d18bfeSpatrick 	}
931d2d18bfeSpatrick 	return len;
932d2d18bfeSpatrick }
933d2d18bfeSpatrick 
934679fbd8fSmglocker int
xhci_event_xfer_generic(struct xhci_softc * sc,struct usbd_xfer * xfer,struct xhci_pipe * xp,uint32_t remain,int trb_idx,uint8_t code,uint8_t slot,uint8_t dci)935679fbd8fSmglocker xhci_event_xfer_generic(struct xhci_softc *sc, struct usbd_xfer *xfer,
936679fbd8fSmglocker     struct xhci_pipe *xp, uint32_t remain, int trb_idx,
937679fbd8fSmglocker     uint8_t code, uint8_t slot, uint8_t dci)
938679fbd8fSmglocker {
939679fbd8fSmglocker 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
940679fbd8fSmglocker 
9416cb98821Smpi 	switch (code) {
9426cb98821Smpi 	case XHCI_CODE_SUCCESS:
943d2d18bfeSpatrick 		if (xfer->actlen == 0) {
944d2d18bfeSpatrick 			if (remain)
945d2d18bfeSpatrick 				xfer->actlen =
946d2d18bfeSpatrick 				    xhci_xfer_length_generic(xx, xp, trb_idx) -
947d2d18bfeSpatrick 				    remain;
948d2d18bfeSpatrick 			else
949d2d18bfeSpatrick 				xfer->actlen = xfer->length;
950d2d18bfeSpatrick 		}
9515343ff5aSpatrick 		if (xfer->actlen)
9525343ff5aSpatrick 			usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
9535343ff5aSpatrick 			    usbd_xfer_isread(xfer) ?
9545343ff5aSpatrick 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
955679fbd8fSmglocker 		xfer->status = USBD_NORMAL_COMPLETION;
956679fbd8fSmglocker 		break;
9576cb98821Smpi 	case XHCI_CODE_SHORT_XFER:
958d2d18bfeSpatrick 		/*
959d2d18bfeSpatrick 		 * Use values from the transfer TRB instead of the status TRB.
960d2d18bfeSpatrick 		 */
961a2e73b2fSpatrick 		if (xfer->actlen == 0)
962a2e73b2fSpatrick 			xfer->actlen =
963a2e73b2fSpatrick 			    xhci_xfer_length_generic(xx, xp, trb_idx) - remain;
964679fbd8fSmglocker 		/*
965679fbd8fSmglocker 		 * If this is not the last TRB of a transfer, we should
966679fbd8fSmglocker 		 * theoretically clear the IOC at the end of the chain
967679fbd8fSmglocker 		 * but the HC might have already processed it before we
968679fbd8fSmglocker 		 * had a chance to schedule the softinterrupt.
969679fbd8fSmglocker 		 */
970679fbd8fSmglocker 		if (xx->index != trb_idx) {
971679fbd8fSmglocker 			DPRINTF(("%s: short xfer %p for %u\n",
972679fbd8fSmglocker 			    DEVNAME(sc), xfer, xx->index));
973679fbd8fSmglocker 			return (1);
974679fbd8fSmglocker 		}
9755343ff5aSpatrick 		if (xfer->actlen)
9765343ff5aSpatrick 			usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
9775343ff5aSpatrick 			    usbd_xfer_isread(xfer) ?
9785343ff5aSpatrick 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
979679fbd8fSmglocker 		xfer->status = USBD_NORMAL_COMPLETION;
9806cb98821Smpi 		break;
9812635132fSmpi 	case XHCI_CODE_TXERR:
9822635132fSmpi 	case XHCI_CODE_SPLITERR:
98338ff87f6Sstsp 		DPRINTF(("%s: txerr? code %d\n", DEVNAME(sc), code));
9842635132fSmpi 		xfer->status = USBD_IOERROR;
985679fbd8fSmglocker 		break;
9866cb98821Smpi 	case XHCI_CODE_STALL:
9874d2cc942Smpi 	case XHCI_CODE_BABBLE:
98838ff87f6Sstsp 		DPRINTF(("%s: babble code %d\n", DEVNAME(sc), code));
989f584fc70Smpi 		/* Prevent any timeout to kick in. */
990f584fc70Smpi 		timeout_del(&xfer->timeout_handle);
9911be52566Smpi 		usb_rem_task(xfer->device, &xfer->abort_task);
992f584fc70Smpi 
993f584fc70Smpi 		/* We need to report this condition for umass(4). */
994f584fc70Smpi 		if (code == XHCI_CODE_STALL)
995ce775a50Smpi 			xp->halted = USBD_STALLED;
996f584fc70Smpi 		else
997ce775a50Smpi 			xp->halted = USBD_IOERROR;
9984d2cc942Smpi 		/*
9994d2cc942Smpi 		 * Since the stack might try to start a new transfer as
10004d2cc942Smpi 		 * soon as a pending one finishes, make sure the endpoint
10014d2cc942Smpi 		 * is fully reset before calling usb_transfer_complete().
10024d2cc942Smpi 		 */
1003f584fc70Smpi 		xp->aborted_xfer = xfer;
1004fcda7eabSmpi 		xhci_cmd_reset_ep_async(sc, slot, dci);
1005679fbd8fSmglocker 		return (1);
10061be52566Smpi 	case XHCI_CODE_XFER_STOPPED:
10071be52566Smpi 	case XHCI_CODE_XFER_STOPINV:
10081be52566Smpi 		/* Endpoint stopped while processing a TD. */
10091be52566Smpi 		if (xfer == xp->aborted_xfer) {
10101be52566Smpi 			DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer));
1011679fbd8fSmglocker 		    	return (1);
10121be52566Smpi 		}
10131be52566Smpi 
10141be52566Smpi 		/* FALLTHROUGH */
10156cb98821Smpi 	default:
10166cb98821Smpi 		DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code));
10176cb98821Smpi 		xfer->status = USBD_IOERROR;
10186cb98821Smpi 		xp->halted = 1;
10194d2cc942Smpi 		break;
10206cb98821Smpi 	}
10216cb98821Smpi 
10220c2735d8Smglocker 	return (0);
10230c2735d8Smglocker }
10240c2735d8Smglocker 
10250c2735d8Smglocker int
xhci_event_xfer_isoc(struct usbd_xfer * xfer,struct xhci_pipe * xp,uint32_t remain,int trb_idx,uint8_t code)1026c8e58a4aSmglocker xhci_event_xfer_isoc(struct usbd_xfer *xfer, struct xhci_pipe *xp,
10271032f1e6Smglocker     uint32_t remain, int trb_idx, uint8_t code)
1028c8e58a4aSmglocker {
1029a72c25aaSratchov 	struct usbd_xfer *skipxfer;
1030c8e58a4aSmglocker 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
10311032f1e6Smglocker 	int trb0_idx, frame_idx = 0, skip_trb = 0;
1032c8e58a4aSmglocker 
1033c8e58a4aSmglocker 	KASSERT(xx->index >= 0);
10341032f1e6Smglocker 
10351032f1e6Smglocker 	switch (code) {
10361032f1e6Smglocker 	case XHCI_CODE_SHORT_XFER:
10371032f1e6Smglocker 		xp->trb_processed[trb_idx] = TRB_PROCESSED_SHORT;
10381032f1e6Smglocker 		break;
10391032f1e6Smglocker 	default:
10401032f1e6Smglocker 		xp->trb_processed[trb_idx] = TRB_PROCESSED_YES;
10411032f1e6Smglocker 		break;
10421032f1e6Smglocker 	}
10431032f1e6Smglocker 
1044c8e58a4aSmglocker 	trb0_idx =
1045c8e58a4aSmglocker 	    ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1);
1046c8e58a4aSmglocker 
1047c8e58a4aSmglocker 	/* Find the according frame index for this TRB. */
1048c8e58a4aSmglocker 	while (trb0_idx != trb_idx) {
1049df3c00e8Svisa 		if ((letoh32(xp->ring.trbs[trb0_idx].trb_flags) &
1050df3c00e8Svisa 		    XHCI_TRB_TYPE_MASK) == XHCI_TRB_TYPE_ISOCH)
1051c8e58a4aSmglocker 			frame_idx++;
1052c8e58a4aSmglocker 		if (trb0_idx++ == (xp->ring.ntrb - 1))
1053c8e58a4aSmglocker 			trb0_idx = 0;
1054c8e58a4aSmglocker 	}
1055c8e58a4aSmglocker 
1056c8e58a4aSmglocker 	/*
1057c8e58a4aSmglocker 	 * If we queued two TRBs for a frame and this is the second TRB,
1058c8e58a4aSmglocker 	 * check if the first TRB needs accounting since it might not have
1059c8e58a4aSmglocker 	 * raised an interrupt in case of full data received.
1060c8e58a4aSmglocker 	 */
1061df3c00e8Svisa 	if ((letoh32(xp->ring.trbs[trb_idx].trb_flags) & XHCI_TRB_TYPE_MASK) ==
1062c8e58a4aSmglocker 	    XHCI_TRB_TYPE_NORMAL) {
1063c8e58a4aSmglocker 		frame_idx--;
1064c8e58a4aSmglocker 		if (trb_idx == 0)
1065c8e58a4aSmglocker 			trb0_idx = xp->ring.ntrb - 2;
1066c8e58a4aSmglocker 		else
1067c8e58a4aSmglocker 			trb0_idx = trb_idx - 1;
10681032f1e6Smglocker 		if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_NO) {
1069df3c00e8Svisa 			xfer->frlengths[frame_idx] = XHCI_TRB_LEN(letoh32(
1070df3c00e8Svisa 			    xp->ring.trbs[trb0_idx].trb_status));
10711032f1e6Smglocker 		} else if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_SHORT) {
10721032f1e6Smglocker 			skip_trb = 1;
1073c8e58a4aSmglocker 		}
1074c8e58a4aSmglocker 	}
1075c8e58a4aSmglocker 
10761032f1e6Smglocker 	if (!skip_trb) {
1077c8e58a4aSmglocker 		xfer->frlengths[frame_idx] +=
10781032f1e6Smglocker 		    XHCI_TRB_LEN(letoh32(xp->ring.trbs[trb_idx].trb_status)) -
10791032f1e6Smglocker 		    remain;
1080c8e58a4aSmglocker 		xfer->actlen += xfer->frlengths[frame_idx];
10811032f1e6Smglocker 	}
1082c8e58a4aSmglocker 
1083c8e58a4aSmglocker 	if (xx->index != trb_idx)
1084c8e58a4aSmglocker 		return (1);
1085c8e58a4aSmglocker 
1086a72c25aaSratchov 	if (xp->skip) {
1087a72c25aaSratchov 		while (1) {
1088a72c25aaSratchov 			skipxfer = SIMPLEQ_FIRST(&xp->pipe.queue);
1089e7faaa25Sstsp 			if (skipxfer == xfer || skipxfer == NULL)
1090a72c25aaSratchov 				break;
1091a72c25aaSratchov 			DPRINTF(("%s: skipping %p\n", __func__, skipxfer));
1092a72c25aaSratchov 			skipxfer->status = USBD_NORMAL_COMPLETION;
1093a72c25aaSratchov 			xhci_xfer_done(skipxfer);
1094a72c25aaSratchov 		}
1095a72c25aaSratchov 		xp->skip = 0;
1096a72c25aaSratchov 	}
1097a72c25aaSratchov 
10985343ff5aSpatrick 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
10995343ff5aSpatrick 	    usbd_xfer_isread(xfer) ?
11005343ff5aSpatrick 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
11010c2735d8Smglocker 	xfer->status = USBD_NORMAL_COMPLETION;
11020c2735d8Smglocker 
1103c8e58a4aSmglocker 	return (0);
1104c8e58a4aSmglocker }
1105c8e58a4aSmglocker 
11066cb98821Smpi void
xhci_event_command(struct xhci_softc * sc,uint64_t paddr)11074d2cc942Smpi xhci_event_command(struct xhci_softc *sc, uint64_t paddr)
11084d2cc942Smpi {
1109f10741cdSmpi 	struct xhci_trb *trb;
11104d2cc942Smpi 	struct xhci_pipe *xp;
1111d2068140Smpi 	uint32_t flags;
11124d2cc942Smpi 	uint8_t dci, slot;
1113ce775a50Smpi 	int trb_idx, status;
11144d2cc942Smpi 
1115b067e289Smpi 	trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb);
1116f10741cdSmpi 	if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) {
111769a6e10cSmpi 		printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
1118f10741cdSmpi 		    trb_idx, sc->sc_cmd_ring.ntrb - 1);
1119f10741cdSmpi 		return;
1120f10741cdSmpi 	}
11214d2cc942Smpi 
1122f10741cdSmpi 	trb = &sc->sc_cmd_ring.trbs[trb_idx];
1123f10741cdSmpi 
1124ebf82e03Smpi 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1125ebf82e03Smpi 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1126ebf82e03Smpi 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1127ebf82e03Smpi 
1128f10741cdSmpi 	flags = letoh32(trb->trb_flags);
1129d2068140Smpi 
11304d2cc942Smpi 	slot = XHCI_TRB_GET_SLOT(flags);
11314d2cc942Smpi 	dci = XHCI_TRB_GET_EP(flags);
11324d2cc942Smpi 
11334d2cc942Smpi 	switch (flags & XHCI_TRB_TYPE_MASK) {
11344d2cc942Smpi 	case XHCI_CMD_RESET_EP:
11354083ee39Smpi 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
11364083ee39Smpi 		if (xp == NULL)
11374083ee39Smpi 			break;
11384083ee39Smpi 
1139fcda7eabSmpi 		/* Update the dequeue pointer past the last TRB. */
11404d2cc942Smpi 		xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
1141fcda7eabSmpi 		    DEQPTR(xp->ring) | xp->ring.toggle);
11424d2cc942Smpi 		break;
11434d2cc942Smpi 	case XHCI_CMD_SET_TR_DEQ:
11444083ee39Smpi 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
11454083ee39Smpi 		if (xp == NULL)
11464083ee39Smpi 			break;
11474083ee39Smpi 
1148ce775a50Smpi 		status = xp->halted;
11494d2cc942Smpi 		xp->halted = 0;
1150f584fc70Smpi 		if (xp->aborted_xfer != NULL) {
1151ce775a50Smpi 			xp->aborted_xfer->status = status;
1152f584fc70Smpi 			xhci_xfer_done(xp->aborted_xfer);
11531be52566Smpi 			wakeup(xp);
11544d2cc942Smpi 		}
11554d2cc942Smpi 		break;
11562fad3b86Smpi 	case XHCI_CMD_CONFIG_EP:
11572fad3b86Smpi 	case XHCI_CMD_STOP_EP:
11582fad3b86Smpi 	case XHCI_CMD_DISABLE_SLOT:
11592fad3b86Smpi 	case XHCI_CMD_ENABLE_SLOT:
11602fad3b86Smpi 	case XHCI_CMD_ADDRESS_DEVICE:
11612fad3b86Smpi 	case XHCI_CMD_EVAL_CTX:
11622fad3b86Smpi 	case XHCI_CMD_NOOP:
1163f748d231Sgerhard 		/*
1164f748d231Sgerhard 		 * All these commands are synchronous.
1165f748d231Sgerhard 		 *
1166f748d231Sgerhard 		 * If TRBs differ, this could be a delayed result after we
1167f748d231Sgerhard 		 * gave up waiting for the expected TRB due to timeout.
1168f748d231Sgerhard 		 */
1169f748d231Sgerhard 		if (sc->sc_cmd_trb == trb) {
1170f10741cdSmpi 			sc->sc_cmd_trb = NULL;
11714d2cc942Smpi 			wakeup(&sc->sc_cmd_trb);
1172f748d231Sgerhard 		}
11734d2cc942Smpi 		break;
11742fad3b86Smpi 	default:
11752fad3b86Smpi 		DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags));
11764d2cc942Smpi 	}
11774d2cc942Smpi }
11784d2cc942Smpi 
11794d2cc942Smpi void
xhci_event_port_change(struct xhci_softc * sc,uint64_t paddr,uint32_t status)11806cb98821Smpi xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status)
11816cb98821Smpi {
11826cb98821Smpi 	struct usbd_xfer *xfer = sc->sc_intrxfer;
11836cb98821Smpi 	uint32_t port = XHCI_TRB_PORTID(paddr);
11846cb98821Smpi 	uint8_t *p;
11856cb98821Smpi 
11866cb98821Smpi 	if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) {
118785319fe6Smpi 		DPRINTF(("%s: failed port status event\n", DEVNAME(sc)));
11886cb98821Smpi 		return;
11896cb98821Smpi 	}
11906cb98821Smpi 
11916cb98821Smpi 	if (xfer == NULL)
11926cb98821Smpi 		return;
11936cb98821Smpi 
11946cb98821Smpi 	p = KERNADDR(&xfer->dmabuf, 0);
11956cb98821Smpi 	memset(p, 0, xfer->length);
11966cb98821Smpi 
11976cb98821Smpi 	p[port/8] |= 1 << (port%8);
11986cb98821Smpi 	DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p));
11996cb98821Smpi 
12006cb98821Smpi 	xfer->actlen = xfer->length;
12016cb98821Smpi 	xfer->status = USBD_NORMAL_COMPLETION;
12026cb98821Smpi 
12036cb98821Smpi 	usb_transfer_complete(xfer);
12046cb98821Smpi }
12056cb98821Smpi 
12066cb98821Smpi void
xhci_xfer_done(struct usbd_xfer * xfer)12076cb98821Smpi xhci_xfer_done(struct usbd_xfer *xfer)
12086cb98821Smpi {
12096cb98821Smpi 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
12106cb98821Smpi 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
12116cb98821Smpi 	int ntrb, i;
12126cb98821Smpi 
1213f584fc70Smpi 	splsoftassert(IPL_SOFTUSB);
1214f584fc70Smpi 
12156cb98821Smpi #ifdef XHCI_DEBUG
121699c58b9fSmpi 	if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) {
1217f584fc70Smpi 		printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__,
121899c58b9fSmpi 		    xfer, xx->index, xx->ntrb);
12196cb98821Smpi 	}
12206cb98821Smpi #endif
12216cb98821Smpi 
1222f584fc70Smpi 	if (xp->aborted_xfer == xfer)
1223f584fc70Smpi 		xp->aborted_xfer = NULL;
1224f584fc70Smpi 
12256cb98821Smpi 	for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) {
12266cb98821Smpi 		xp->pending_xfers[i] = NULL;
12276cb98821Smpi 		if (i == 0)
12286cb98821Smpi 			i = (xp->ring.ntrb - 1);
12296cb98821Smpi 	}
12306cb98821Smpi 	xp->free_trbs += xx->ntrb;
123103b1240eSmglocker 	xp->free_trbs += xx->zerotd;
12326cb98821Smpi 	xx->index = -1;
12336cb98821Smpi 	xx->ntrb = 0;
123403b1240eSmglocker 	xx->zerotd = 0;
1235abb5f851Smpi 
1236abb5f851Smpi 	timeout_del(&xfer->timeout_handle);
12371be52566Smpi 	usb_rem_task(xfer->device, &xfer->abort_task);
1238abb5f851Smpi 	usb_transfer_complete(xfer);
12396cb98821Smpi }
12406cb98821Smpi 
12412fa48b76Smpi /*
12422fa48b76Smpi  * Calculate the Device Context Index (DCI) for endpoints as stated
12432fa48b76Smpi  * in section 4.5.1 of xHCI specification r1.1.
12442fa48b76Smpi  */
12456cb98821Smpi static inline uint8_t
xhci_ed2dci(usb_endpoint_descriptor_t * ed)12466cb98821Smpi xhci_ed2dci(usb_endpoint_descriptor_t *ed)
12476cb98821Smpi {
12486cb98821Smpi 	uint8_t dir;
12496cb98821Smpi 
12506cb98821Smpi 	if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL)
12516cb98821Smpi 		return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1);
12526cb98821Smpi 
12536cb98821Smpi 	if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)
12546cb98821Smpi 		dir = 1;
12556cb98821Smpi 	else
12566cb98821Smpi 		dir = 0;
12576cb98821Smpi 
12586cb98821Smpi 	return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir);
12596cb98821Smpi }
12606cb98821Smpi 
12616cb98821Smpi usbd_status
xhci_pipe_open(struct usbd_pipe * pipe)12626cb98821Smpi xhci_pipe_open(struct usbd_pipe *pipe)
12636cb98821Smpi {
12646cb98821Smpi 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
12656cb98821Smpi 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
12666cb98821Smpi 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
12676cb98821Smpi 	uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
12686cb98821Smpi 	int error;
12696cb98821Smpi 
12706cb98821Smpi 	KASSERT(xp->slot == 0);
12716cb98821Smpi 
12726cb98821Smpi 	if (sc->sc_bus.dying)
12736cb98821Smpi 		return (USBD_IOERROR);
12746cb98821Smpi 
12756cb98821Smpi 	/* Root Hub */
12766cb98821Smpi 	if (pipe->device->depth == 0) {
12776cb98821Smpi 		switch (ed->bEndpointAddress) {
12786cb98821Smpi 		case USB_CONTROL_ENDPOINT:
12796cb98821Smpi 			pipe->methods = &xhci_root_ctrl_methods;
12806cb98821Smpi 			break;
12816cb98821Smpi 		case UE_DIR_IN | XHCI_INTR_ENDPT:
12826cb98821Smpi 			pipe->methods = &xhci_root_intr_methods;
12836cb98821Smpi 			break;
12846cb98821Smpi 		default:
12856cb98821Smpi 			pipe->methods = NULL;
12866cb98821Smpi 			return (USBD_INVAL);
12876cb98821Smpi 		}
12886cb98821Smpi 		return (USBD_NORMAL_COMPLETION);
12896cb98821Smpi 	}
12906cb98821Smpi 
12916cb98821Smpi #if 0
12926cb98821Smpi 	/* Issue a noop to check if the command ring is correctly configured. */
12936cb98821Smpi 	xhci_cmd_noop(sc);
12946cb98821Smpi #endif
12956cb98821Smpi 
12966cb98821Smpi 	switch (xfertype) {
12976cb98821Smpi 	case UE_CONTROL:
12986cb98821Smpi 		pipe->methods = &xhci_device_ctrl_methods;
12996cb98821Smpi 
13002fa48b76Smpi 		/*
13012fa48b76Smpi 		 * Get a slot and init the device's contexts.
13022fa48b76Smpi 		 *
13034b1a56afSjsg 		 * Since the control endpoint, represented as the default
13042fa48b76Smpi 		 * pipe, is always opened first we are dealing with a
13052fa48b76Smpi 		 * new device.  Put a new slot in the ENABLED state.
13062fa48b76Smpi 		 *
13072fa48b76Smpi 		 */
13086cb98821Smpi 		error = xhci_cmd_slot_control(sc, &slot, 1);
13096cb98821Smpi 		if (error || slot == 0 || slot > sc->sc_noslot)
13106cb98821Smpi 			return (USBD_INVAL);
13116cb98821Smpi 
13122fa48b76Smpi 		if (xhci_softdev_alloc(sc, slot)) {
13132fa48b76Smpi 			xhci_cmd_slot_control(sc, &slot, 0);
13146cb98821Smpi 			return (USBD_NOMEM);
13150e5ce33bSmpi 		}
13160e5ce33bSmpi 
13176cb98821Smpi 		break;
13186cb98821Smpi 	case UE_ISOCHRONOUS:
13196cb98821Smpi 		pipe->methods = &xhci_device_isoc_methods;
13206cb98821Smpi 		break;
13216cb98821Smpi 	case UE_BULK:
13226cb98821Smpi 		pipe->methods = &xhci_device_bulk_methods;
13236cb98821Smpi 		break;
13246cb98821Smpi 	case UE_INTERRUPT:
132538ff87f6Sstsp 		pipe->methods = &xhci_device_intr_methods;
13266cb98821Smpi 		break;
13276cb98821Smpi 	default:
13286cb98821Smpi 		return (USBD_INVAL);
13296cb98821Smpi 	}
13306cb98821Smpi 
13312fa48b76Smpi 	/*
13322fa48b76Smpi 	 * Our USBD Bus Interface is pipe-oriented but for most of the
1333fa36d6acSmpi 	 * operations we need to access a device context, so keep track
13342fa48b76Smpi 	 * of the slot ID in every pipe.
13352fa48b76Smpi 	 */
13362fa48b76Smpi 	if (slot == 0)
13372fa48b76Smpi 		slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot;
13382fa48b76Smpi 
13392fa48b76Smpi 	xp->slot = slot;
13406cb98821Smpi 	xp->dci = xhci_ed2dci(ed);
13416cb98821Smpi 
13422fa48b76Smpi 	if (xhci_pipe_init(sc, pipe)) {
13432fa48b76Smpi 		xhci_cmd_slot_control(sc, &slot, 0);
13446cb98821Smpi 		return (USBD_IOERROR);
13452fa48b76Smpi 	}
13466cb98821Smpi 
13476cb98821Smpi 	return (USBD_NORMAL_COMPLETION);
13486cb98821Smpi }
13496cb98821Smpi 
13503d65fce9Smpi /*
13513d65fce9Smpi  * Set the maximum Endpoint Service Interface Time (ESIT) payload and
13523d65fce9Smpi  * the average TRB buffer length for an endpoint.
13533d65fce9Smpi  */
13546cb98821Smpi static inline uint32_t
xhci_get_txinfo(struct xhci_softc * sc,struct usbd_pipe * pipe)13553d65fce9Smpi xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe)
13566cb98821Smpi {
13573d65fce9Smpi 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
13583d65fce9Smpi 	uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize);
13593d65fce9Smpi 
1360f74c7769Sjasper 	switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
13616cb98821Smpi 	case UE_CONTROL:
13623d65fce9Smpi 		mep = 0;
13633d65fce9Smpi 		atl = 8;
13643d65fce9Smpi 		break;
13656cb98821Smpi 	case UE_INTERRUPT:
13666cb98821Smpi 	case UE_ISOCHRONOUS:
13673d65fce9Smpi 		if (pipe->device->speed == USB_SPEED_SUPER) {
13683d65fce9Smpi 			/*  XXX Read the companion descriptor */
13696cb98821Smpi 		}
13706cb98821Smpi 
13719d91e500Spatrick 		mep = (UE_GET_TRANS(mps) + 1) * UE_GET_SIZE(mps);
13729d91e500Spatrick 		atl = mep;
13733d65fce9Smpi 		break;
13743d65fce9Smpi 	case UE_BULK:
13753d65fce9Smpi 	default:
13763d65fce9Smpi 		mep = 0;
13773d65fce9Smpi 		atl = 0;
13783d65fce9Smpi 	}
13796cb98821Smpi 
13803d65fce9Smpi 	return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl));
13816cb98821Smpi }
13826cb98821Smpi 
138386ca6da3Smpi static inline uint32_t
xhci_linear_interval(usb_endpoint_descriptor_t * ed)138486ca6da3Smpi xhci_linear_interval(usb_endpoint_descriptor_t *ed)
138538ff87f6Sstsp {
138686ca6da3Smpi 	uint32_t ival = min(max(1, ed->bInterval), 255);
138738ff87f6Sstsp 
138886ca6da3Smpi 	return (fls(ival) - 1);
138938ff87f6Sstsp }
139038ff87f6Sstsp 
139186ca6da3Smpi static inline uint32_t
xhci_exponential_interval(usb_endpoint_descriptor_t * ed)139286ca6da3Smpi xhci_exponential_interval(usb_endpoint_descriptor_t *ed)
139386ca6da3Smpi {
139486ca6da3Smpi 	uint32_t ival = min(max(1, ed->bInterval), 16);
139586ca6da3Smpi 
139686ca6da3Smpi 	return (ival - 1);
139786ca6da3Smpi }
139886ca6da3Smpi /*
139986ca6da3Smpi  * Return interval for endpoint expressed in 2^(ival) * 125us.
140086ca6da3Smpi  *
140186ca6da3Smpi  * See section 6.2.3.6 of xHCI r1.1 Specification for more details.
140286ca6da3Smpi  */
140386ca6da3Smpi uint32_t
xhci_pipe_interval(struct usbd_pipe * pipe)140486ca6da3Smpi xhci_pipe_interval(struct usbd_pipe *pipe)
140586ca6da3Smpi {
140686ca6da3Smpi 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
140786ca6da3Smpi 	uint8_t speed = pipe->device->speed;
140886ca6da3Smpi 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
140986ca6da3Smpi 	uint32_t ival;
141086ca6da3Smpi 
141186ca6da3Smpi 	if (xfertype == UE_CONTROL || xfertype == UE_BULK) {
141286ca6da3Smpi 		/* Control and Bulk endpoints never NAKs. */
141386ca6da3Smpi 		ival = 0;
141486ca6da3Smpi 	} else {
141586ca6da3Smpi 		switch (speed) {
141686ca6da3Smpi 		case USB_SPEED_FULL:
141786ca6da3Smpi 			if (xfertype == UE_ISOCHRONOUS) {
141886ca6da3Smpi 				/* Convert 1-2^(15)ms into 3-18 */
141986ca6da3Smpi 				ival = xhci_exponential_interval(ed) + 3;
142086ca6da3Smpi 				break;
142186ca6da3Smpi 			}
142286ca6da3Smpi 			/* FALLTHROUGH */
142386ca6da3Smpi 		case USB_SPEED_LOW:
142486ca6da3Smpi 			/* Convert 1-255ms into 3-10 */
142586ca6da3Smpi 			ival = xhci_linear_interval(ed) + 3;
142686ca6da3Smpi 			break;
142786ca6da3Smpi 		case USB_SPEED_HIGH:
142886ca6da3Smpi 		case USB_SPEED_SUPER:
142986ca6da3Smpi 		default:
143086ca6da3Smpi 			/* Convert 1-2^(15) * 125us into 0-15 */
143186ca6da3Smpi 			ival = xhci_exponential_interval(ed);
143286ca6da3Smpi 			break;
143386ca6da3Smpi 		}
143486ca6da3Smpi 	}
143586ca6da3Smpi 
1436d87e98b4Smpi 	KASSERT(ival <= 15);
143786ca6da3Smpi 	return (XHCI_EPCTX_SET_IVAL(ival));
143838ff87f6Sstsp }
143938ff87f6Sstsp 
1440438cc1d7Smpi uint32_t
xhci_pipe_maxburst(struct usbd_pipe * pipe)1441438cc1d7Smpi xhci_pipe_maxburst(struct usbd_pipe *pipe)
1442438cc1d7Smpi {
1443438cc1d7Smpi 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1444438cc1d7Smpi 	uint32_t mps = UGETW(ed->wMaxPacketSize);
1445438cc1d7Smpi 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1446438cc1d7Smpi 	uint32_t maxb = 0;
1447438cc1d7Smpi 
1448438cc1d7Smpi 	switch (pipe->device->speed) {
1449438cc1d7Smpi 	case USB_SPEED_HIGH:
1450438cc1d7Smpi 		if (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)
1451438cc1d7Smpi 			maxb = UE_GET_TRANS(mps);
1452438cc1d7Smpi 		break;
1453438cc1d7Smpi 	case USB_SPEED_SUPER:
1454438cc1d7Smpi 		/*  XXX Read the companion descriptor */
1455438cc1d7Smpi 	default:
1456438cc1d7Smpi 		break;
1457438cc1d7Smpi 	}
1458438cc1d7Smpi 
1459438cc1d7Smpi 	return (maxb);
1460438cc1d7Smpi }
1461438cc1d7Smpi 
146224aa8717Skrw static inline uint32_t
xhci_last_valid_dci(struct xhci_pipe ** pipes,struct xhci_pipe * ignore)146324aa8717Skrw xhci_last_valid_dci(struct xhci_pipe **pipes, struct xhci_pipe *ignore)
146424aa8717Skrw {
146524aa8717Skrw 	struct xhci_pipe *lxp;
146624aa8717Skrw 	int i;
146724aa8717Skrw 
146824aa8717Skrw 	/* Find the last valid Endpoint Context. */
146924aa8717Skrw 	for (i = 30; i >= 0; i--) {
147024aa8717Skrw 		lxp = pipes[i];
147124aa8717Skrw 		if (lxp != NULL && lxp != ignore)
147224aa8717Skrw 			return XHCI_SCTX_DCI(lxp->dci);
147324aa8717Skrw 	}
147424aa8717Skrw 
147524aa8717Skrw 	return 0;
147624aa8717Skrw }
147724aa8717Skrw 
147838ff87f6Sstsp int
xhci_context_setup(struct xhci_softc * sc,struct usbd_pipe * pipe)1479ffe08da5Smpi xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe)
14806cb98821Smpi {
14816cb98821Smpi 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
14826cb98821Smpi 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
14836cb98821Smpi 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
148486ca6da3Smpi 	uint32_t mps = UGETW(ed->wMaxPacketSize);
14856cb98821Smpi 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1486438cc1d7Smpi 	uint8_t speed, cerr = 0;
1487d3068fdbSmpi 	uint32_t route = 0, rhport = 0;
14882fa48b76Smpi 	struct usbd_device *hub;
14896cb98821Smpi 
14902fa48b76Smpi 	/*
14912fa48b76Smpi 	 * Calculate the Route String.  Assume that there is no hub with
14922fa48b76Smpi 	 * more than 15 ports and that they all have a detph < 6.  See
14932fa48b76Smpi 	 * section 8.9 of USB 3.1 Specification for more details.
14942fa48b76Smpi 	 */
14952fa48b76Smpi 	for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) {
14962fa48b76Smpi 		uint32_t port = hub->powersrc->portno;
14972fa48b76Smpi 		uint32_t depth = hub->myhub->depth;
14982fa48b76Smpi 
14992fa48b76Smpi 		route |= port << (4 * (depth - 1));
15002fa48b76Smpi 	}
15012fa48b76Smpi 
15022fa48b76Smpi 	/* Get Root Hub port */
15032fa48b76Smpi 	rhport = hub->powersrc->portno;
15042fa48b76Smpi 
15056cb98821Smpi 	switch (pipe->device->speed) {
15066cb98821Smpi 	case USB_SPEED_LOW:
15076cb98821Smpi 		speed = XHCI_SPEED_LOW;
15086cb98821Smpi 		break;
15096cb98821Smpi 	case USB_SPEED_FULL:
15106cb98821Smpi 		speed = XHCI_SPEED_FULL;
15116cb98821Smpi 		break;
15126cb98821Smpi 	case USB_SPEED_HIGH:
15136cb98821Smpi 		speed = XHCI_SPEED_HIGH;
15146cb98821Smpi 		break;
15156cb98821Smpi 	case USB_SPEED_SUPER:
15166cb98821Smpi 		speed = XHCI_SPEED_SUPER;
15176cb98821Smpi 		break;
15186cb98821Smpi 	default:
151938ff87f6Sstsp 		return (USBD_INVAL);
15206cb98821Smpi 	}
15216cb98821Smpi 
15226cb98821Smpi 	/* Setup the endpoint context */
15236cb98821Smpi 	if (xfertype != UE_ISOCHRONOUS)
15246cb98821Smpi 		cerr = 3;
15256cb98821Smpi 
15266cb98821Smpi 	if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL))
15276cb98821Smpi 		xfertype |= 0x4;
15286cb98821Smpi 
152986ca6da3Smpi 	sdev->ep_ctx[xp->dci-1]->info_lo = htole32(xhci_pipe_interval(pipe));
15306cb98821Smpi 	sdev->ep_ctx[xp->dci-1]->info_hi = htole32(
1531438cc1d7Smpi 	    XHCI_EPCTX_SET_MPS(UE_GET_SIZE(mps)) |
1532438cc1d7Smpi 	    XHCI_EPCTX_SET_MAXB(xhci_pipe_maxburst(pipe)) |
153386ca6da3Smpi 	    XHCI_EPCTX_SET_EPTYPE(xfertype) | XHCI_EPCTX_SET_CERR(cerr)
15346cb98821Smpi 	);
15353d65fce9Smpi 	sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe));
15366cb98821Smpi 	sdev->ep_ctx[xp->dci-1]->deqp = htole64(
1537fcda7eabSmpi 	    DEQPTR(xp->ring) | xp->ring.toggle
15386cb98821Smpi 	);
15396cb98821Smpi 
15404b1a56afSjsg 	/* Unmask the new endpoint */
15416cb98821Smpi 	sdev->input_ctx->drop_flags = 0;
15426cb98821Smpi 	sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
15436cb98821Smpi 
15446cb98821Smpi 	/* Setup the slot context */
15454f1fc9aeSmpi 	sdev->slot_ctx->info_lo = htole32(
154624aa8717Skrw 	    xhci_last_valid_dci(sdev->pipes, NULL) | XHCI_SCTX_SPEED(speed) |
15470e5ce33bSmpi 	    XHCI_SCTX_ROUTE(route)
15484f1fc9aeSmpi 	);
15492fa48b76Smpi 	sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport));
15506cb98821Smpi 	sdev->slot_ctx->tt = 0;
15516cb98821Smpi 	sdev->slot_ctx->state = 0;
15526cb98821Smpi 
15539b37ecc5Smpi /* XXX */
15549b37ecc5Smpi #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT)
15552fa48b76Smpi 	/*
15562fa48b76Smpi 	 * If we are opening the interrupt pipe of a hub, update its
15572fa48b76Smpi 	 * context before putting it in the CONFIGURED state.
15582fa48b76Smpi 	 */
15594f1fc9aeSmpi 	if (pipe->device->hub != NULL) {
15604f1fc9aeSmpi 		int nports = pipe->device->hub->nports;
15614f1fc9aeSmpi 
15624f1fc9aeSmpi 		sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1));
15634f1fc9aeSmpi 		sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports));
15649b37ecc5Smpi 
15659b37ecc5Smpi 		if (UHUB_IS_MTT(pipe->device))
15669b37ecc5Smpi 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
15679b37ecc5Smpi 
15689b37ecc5Smpi 		sdev->slot_ctx->tt |= htole32(
15699b37ecc5Smpi 		    XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink)
15709b37ecc5Smpi 		);
15716cb98821Smpi 	}
15726cb98821Smpi 
15739b37ecc5Smpi 	/*
15749b37ecc5Smpi 	 * If this is a Low or Full Speed device below an external High
15759b37ecc5Smpi 	 * Speed hub, it needs some TT love.
15769b37ecc5Smpi 	 */
15779b37ecc5Smpi 	if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) {
15789b37ecc5Smpi 		struct usbd_device *hshub = pipe->device->myhsport->parent;
15799b37ecc5Smpi 		uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot;
15809b37ecc5Smpi 
15819b37ecc5Smpi 		if (UHUB_IS_MTT(hshub))
15829b37ecc5Smpi 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
15839b37ecc5Smpi 
15849b37ecc5Smpi 		sdev->slot_ctx->tt |= htole32(
15859b37ecc5Smpi 		    XHCI_SCTX_TT_HUB_SID(slot) |
15869b37ecc5Smpi 		    XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno)
15879b37ecc5Smpi 		);
15889b37ecc5Smpi 	}
15899b37ecc5Smpi #undef UHUB_IS_MTT
15909b37ecc5Smpi 
159116a9d1e5Smpi 	/* Unmask the slot context */
159216a9d1e5Smpi 	sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0));
159316a9d1e5Smpi 
1594b067e289Smpi 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1595ebf82e03Smpi 	    sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
159638ff87f6Sstsp 
159738ff87f6Sstsp 	return (0);
1598ffe08da5Smpi }
1599ffe08da5Smpi 
1600ffe08da5Smpi int
xhci_pipe_init(struct xhci_softc * sc,struct usbd_pipe * pipe)1601ffe08da5Smpi xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe)
1602ffe08da5Smpi {
1603ffe08da5Smpi 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1604ffe08da5Smpi 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1605ffe08da5Smpi 	int error;
1606ffe08da5Smpi 
1607d31b8b3dSmpi #ifdef XHCI_DEBUG
1608d31b8b3dSmpi 	struct usbd_device *dev = pipe->device;
1609d31b8b3dSmpi 	printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u"
1610d31b8b3dSmpi 	    " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth,
1611d31b8b3dSmpi 	    dev->powersrc->portno, dev->speed, xp->slot, xp->dci,
1612d31b8b3dSmpi 	    pipe->endpoint->edesc->bEndpointAddress);
1613d31b8b3dSmpi #endif
1614ffe08da5Smpi 
1615b067e289Smpi 	if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN))
1616ffe08da5Smpi 		return (ENOMEM);
1617ffe08da5Smpi 
1618ffe08da5Smpi 	xp->free_trbs = xp->ring.ntrb;
1619ffe08da5Smpi 	xp->halted = 0;
1620ffe08da5Smpi 
1621ffe08da5Smpi 	sdev->pipes[xp->dci - 1] = xp;
1622ffe08da5Smpi 
162338ff87f6Sstsp 	error = xhci_context_setup(sc, pipe);
162438ff87f6Sstsp 	if (error)
162538ff87f6Sstsp 		return (error);
16266cb98821Smpi 
1627cf73556aSmpi 	if (xp->dci == 1) {
16282fa48b76Smpi 		/*
16292fa48b76Smpi 		 * If we are opening the default pipe, the Slot should
16302fa48b76Smpi 		 * be in the ENABLED state.  Issue an "Address Device"
1631ffe08da5Smpi 		 * with BSR=1 to put the device in the DEFAULT state.
1632ffe08da5Smpi 		 * We cannot jump directly to the ADDRESSED state with
1633fa36d6acSmpi 		 * BSR=0 because some Low/Full speed devices won't accept
1634ffe08da5Smpi 		 * a SET_ADDRESS command before we've read their device
1635ffe08da5Smpi 		 * descriptor.
16362fa48b76Smpi 		 */
1637ffe08da5Smpi 		error = xhci_cmd_set_address(sc, xp->slot,
1638b067e289Smpi 		    sdev->ictx_dma.paddr, XHCI_TRB_BSR);
1639cf73556aSmpi 	} else {
1640e5bba15cSmpi 		error = xhci_cmd_configure_ep(sc, xp->slot,
1641b067e289Smpi 		    sdev->ictx_dma.paddr);
1642cf73556aSmpi 	}
1643716b95b6Smpi 
16446cb98821Smpi 	if (error) {
16456cb98821Smpi 		xhci_ring_free(sc, &xp->ring);
16466cb98821Smpi 		return (EIO);
16476cb98821Smpi 	}
16486cb98821Smpi 
16496cb98821Smpi 	return (0);
16506cb98821Smpi }
16516cb98821Smpi 
16526cb98821Smpi void
xhci_pipe_close(struct usbd_pipe * pipe)16536cb98821Smpi xhci_pipe_close(struct usbd_pipe *pipe)
16546cb98821Smpi {
16556cb98821Smpi 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
165624aa8717Skrw 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
16576cb98821Smpi 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
16586cb98821Smpi 
16596cb98821Smpi 	/* Root Hub */
16606cb98821Smpi 	if (pipe->device->depth == 0)
16616cb98821Smpi 		return;
16626cb98821Smpi 
16636cb98821Smpi 	/* Mask the endpoint */
1664e5bba15cSmpi 	sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1665e5bba15cSmpi 	sdev->input_ctx->add_flags = 0;
16666cb98821Smpi 
1667e5bba15cSmpi 	/* Update last valid Endpoint Context */
166824aa8717Skrw 	sdev->slot_ctx->info_lo &= htole32(~XHCI_SCTX_DCI(31));
166924aa8717Skrw 	sdev->slot_ctx->info_lo |= htole32(xhci_last_valid_dci(sdev->pipes, xp));
1670e5bba15cSmpi 
1671e5bba15cSmpi 	/* Clear the Endpoint Context */
1672aa87025fSmpi 	memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx));
16736cb98821Smpi 
1674b067e289Smpi 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1675ebf82e03Smpi 	    sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
16766cb98821Smpi 
1677b067e289Smpi 	if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr))
1678e5bba15cSmpi 		DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci));
1679e5bba15cSmpi 
16806cb98821Smpi 	xhci_ring_free(sc, &xp->ring);
1681e5bba15cSmpi 	sdev->pipes[xp->dci - 1] = NULL;
1682e5bba15cSmpi 
16832fa48b76Smpi 	/*
16842fa48b76Smpi 	 * If we are closing the default pipe, the device is probably
16852fa48b76Smpi 	 * gone, so put its slot in the DISABLED state.
16862fa48b76Smpi 	 */
16872fa48b76Smpi 	if (xp->dci == 1) {
1688e5bba15cSmpi 		xhci_cmd_slot_control(sc, &xp->slot, 0);
1689e5bba15cSmpi 		xhci_softdev_free(sc, xp->slot);
1690e5bba15cSmpi 	}
16916cb98821Smpi }
16926cb98821Smpi 
1693ffe08da5Smpi /*
1694ffe08da5Smpi  * Transition a device from DEFAULT to ADDRESSED Slot state, this hook
1695ffe08da5Smpi  * is needed for Low/Full speed devices.
1696ffe08da5Smpi  *
1697ffe08da5Smpi  * See section 4.5.3 of USB 3.1 Specification for more details.
1698ffe08da5Smpi  */
1699ffe08da5Smpi int
xhci_setaddr(struct usbd_device * dev,int addr)1700ffe08da5Smpi xhci_setaddr(struct usbd_device *dev, int addr)
1701ffe08da5Smpi {
1702ffe08da5Smpi 	struct xhci_softc *sc = (struct xhci_softc *)dev->bus;
1703ffe08da5Smpi 	struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe;
1704ffe08da5Smpi 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1705ffe08da5Smpi 	int error;
1706ffe08da5Smpi 
1707ffe08da5Smpi 	/* Root Hub */
1708ffe08da5Smpi 	if (dev->depth == 0)
1709ffe08da5Smpi 		return (0);
1710ffe08da5Smpi 
1711ffe08da5Smpi 	KASSERT(xp->dci == 1);
1712ffe08da5Smpi 
171338ff87f6Sstsp 	error = xhci_context_setup(sc, dev->default_pipe);
171438ff87f6Sstsp 	if (error)
171538ff87f6Sstsp 		return (error);
1716ffe08da5Smpi 
1717b067e289Smpi 	error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0);
1718ffe08da5Smpi 
1719ffe08da5Smpi #ifdef XHCI_DEBUG
1720ffe08da5Smpi 	if (error == 0) {
1721ffe08da5Smpi 		struct xhci_sctx *sctx;
1722ffe08da5Smpi 		uint8_t addr;
1723ffe08da5Smpi 
1724b067e289Smpi 		bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0,
1725b067e289Smpi 		    sc->sc_pagesize, BUS_DMASYNC_POSTREAD);
1726ffe08da5Smpi 
1727ffe08da5Smpi 		/* Get output slot context. */
1728b067e289Smpi 		sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr;
1729ffe08da5Smpi 		addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state));
1730ffe08da5Smpi 		error = (addr == 0);
1731ffe08da5Smpi 
1732ffe08da5Smpi 		printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr);
1733ffe08da5Smpi 	}
1734ffe08da5Smpi #endif
1735ffe08da5Smpi 
1736ffe08da5Smpi 	return (error);
1737ffe08da5Smpi }
1738ffe08da5Smpi 
17396cb98821Smpi struct usbd_xfer *
xhci_allocx(struct usbd_bus * bus)17406cb98821Smpi xhci_allocx(struct usbd_bus *bus)
17416cb98821Smpi {
174279234da1Smpi 	return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO));
17436cb98821Smpi }
17446cb98821Smpi 
17456cb98821Smpi void
xhci_freex(struct usbd_bus * bus,struct usbd_xfer * xfer)17466cb98821Smpi xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
17476cb98821Smpi {
174879234da1Smpi 	pool_put(xhcixfer, xfer);
17496cb98821Smpi }
17506cb98821Smpi 
17516cb98821Smpi int
xhci_scratchpad_alloc(struct xhci_softc * sc,int npage)17526cb98821Smpi xhci_scratchpad_alloc(struct xhci_softc *sc, int npage)
17536cb98821Smpi {
17546cb98821Smpi 	uint64_t *pte;
17556cb98821Smpi 	int error, i;
17566cb98821Smpi 
17576cb98821Smpi 	/* Allocate the required entry for the table. */
1758b067e289Smpi 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma,
1759b067e289Smpi 	    (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN,
1760b067e289Smpi 	    sc->sc_pagesize);
17616cb98821Smpi 	if (error)
17626cb98821Smpi 		return (ENOMEM);
17636cb98821Smpi 
1764b067e289Smpi 	/* Allocate pages. XXX does not need to be contiguous. */
1765b067e289Smpi 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma,
1766b067e289Smpi 	    NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0);
17676cb98821Smpi 	if (error) {
1768b067e289Smpi 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
17696cb98821Smpi 		return (ENOMEM);
17706cb98821Smpi 	}
17716cb98821Smpi 
17726cb98821Smpi 	for (i = 0; i < npage; i++) {
17736cb98821Smpi 		pte[i] = htole64(
1774b067e289Smpi 		    sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize)
17756cb98821Smpi 		);
17766cb98821Smpi 	}
1777b067e289Smpi 
1778b067e289Smpi 	bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0,
1779ebf82e03Smpi 	    npage * sizeof(uint64_t), BUS_DMASYNC_PREREAD |
1780ebf82e03Smpi 	    BUS_DMASYNC_PREWRITE);
17816cb98821Smpi 
17826cb98821Smpi 	/*  Entry 0 points to the table of scratchpad pointers. */
1783b067e289Smpi 	sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr);
1784b067e289Smpi 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1785ebf82e03Smpi 	    sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
17866cb98821Smpi 
17876cb98821Smpi 	sc->sc_spad.npage = npage;
17886cb98821Smpi 
17896cb98821Smpi 	return (0);
17906cb98821Smpi }
17916cb98821Smpi 
17926cb98821Smpi void
xhci_scratchpad_free(struct xhci_softc * sc)17936cb98821Smpi xhci_scratchpad_free(struct xhci_softc *sc)
17946cb98821Smpi {
17956cb98821Smpi 	sc->sc_dcbaa.segs[0] = 0;
1796b067e289Smpi 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1797ebf82e03Smpi 	    sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
17986cb98821Smpi 
1799b067e289Smpi 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma);
1800b067e289Smpi 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
18016cb98821Smpi }
18026cb98821Smpi 
18036cb98821Smpi int
xhci_ring_alloc(struct xhci_softc * sc,struct xhci_ring * ring,size_t ntrb,size_t alignment)1804b067e289Smpi xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb,
1805b067e289Smpi     size_t alignment)
18066cb98821Smpi {
18076cb98821Smpi 	size_t size;
1808b067e289Smpi 	int error;
18096cb98821Smpi 
18106cb98821Smpi 	size = ntrb * sizeof(struct xhci_trb);
18116cb98821Smpi 
1812b067e289Smpi 	error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma,
1813b067e289Smpi 	    (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY);
1814b067e289Smpi 	if (error)
1815b067e289Smpi 		return (error);
18166cb98821Smpi 
18176cb98821Smpi 	ring->ntrb = ntrb;
18186cb98821Smpi 
18194d2cc942Smpi 	xhci_ring_reset(sc, ring);
18206cb98821Smpi 
18216cb98821Smpi 	return (0);
18226cb98821Smpi }
18236cb98821Smpi 
18246cb98821Smpi void
xhci_ring_free(struct xhci_softc * sc,struct xhci_ring * ring)18256cb98821Smpi xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring)
18266cb98821Smpi {
1827b067e289Smpi 	usbd_dma_contig_free(&sc->sc_bus, &ring->dma);
18286cb98821Smpi }
18296cb98821Smpi 
18306cb98821Smpi void
xhci_ring_reset(struct xhci_softc * sc,struct xhci_ring * ring)18316cb98821Smpi xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring)
18326cb98821Smpi {
18336cb98821Smpi 	size_t size;
18346cb98821Smpi 
18354d2cc942Smpi 	size = ring->ntrb * sizeof(struct xhci_trb);
18364d2cc942Smpi 
18376cb98821Smpi 	memset(ring->trbs, 0, size);
18386cb98821Smpi 
18396cb98821Smpi 	ring->index = 0;
18406cb98821Smpi 	ring->toggle = XHCI_TRB_CYCLE;
18416cb98821Smpi 
18424d2cc942Smpi 	/*
18434d2cc942Smpi 	 * Since all our rings use only one segment, at least for
18444d2cc942Smpi 	 * the moment, link their tail to their head.
18454d2cc942Smpi 	 */
18464d2cc942Smpi 	if (ring != &sc->sc_evt_ring) {
18474d2cc942Smpi 		struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1];
18484d2cc942Smpi 
1849b067e289Smpi 		trb->trb_paddr = htole64(ring->dma.paddr);
1850b4bf4808Spatrick 		trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG |
1851b4bf4808Spatrick 		    XHCI_TRB_CYCLE);
1852b067e289Smpi 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1853b067e289Smpi 		    BUS_DMASYNC_PREWRITE);
1854ebf82e03Smpi 	} else
1855ebf82e03Smpi 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1856ebf82e03Smpi 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
18576cb98821Smpi }
18586cb98821Smpi 
18596cb98821Smpi struct xhci_trb*
xhci_ring_consume(struct xhci_softc * sc,struct xhci_ring * ring)1860d1df9c46Smpi xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring)
18616cb98821Smpi {
1862d1df9c46Smpi 	struct xhci_trb *trb = &ring->trbs[ring->index];
18636cb98821Smpi 
1864d1df9c46Smpi 	KASSERT(ring->index < ring->ntrb);
18656cb98821Smpi 
1866d1df9c46Smpi 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1867d1df9c46Smpi 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
18686cb98821Smpi 
18696cb98821Smpi 	/* Make sure this TRB can be consumed. */
1870d1df9c46Smpi 	if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE))
18716cb98821Smpi 		return (NULL);
18726cb98821Smpi 
1873d1df9c46Smpi 	ring->index++;
18746cb98821Smpi 
1875d1df9c46Smpi 	if (ring->index == ring->ntrb) {
1876d1df9c46Smpi 		ring->index = 0;
1877d1df9c46Smpi 		ring->toggle ^= 1;
1878d1df9c46Smpi 	}
1879d1df9c46Smpi 
1880d1df9c46Smpi 	return (trb);
1881d1df9c46Smpi }
1882d1df9c46Smpi 
1883d1df9c46Smpi struct xhci_trb*
xhci_ring_produce(struct xhci_softc * sc,struct xhci_ring * ring)1884861c1bbcSpatrick xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring)
1885d1df9c46Smpi {
1886b4bf4808Spatrick 	struct xhci_trb *lnk, *trb;
1887d1df9c46Smpi 
1888d1df9c46Smpi 	KASSERT(ring->index < ring->ntrb);
1889d1df9c46Smpi 
1890b4bf4808Spatrick 	/* Setup the link TRB after the previous TRB is done. */
1891b4bf4808Spatrick 	if (ring->index == 0) {
1892b4bf4808Spatrick 		lnk = &ring->trbs[ring->ntrb - 1];
1893b4bf4808Spatrick 		trb = &ring->trbs[ring->ntrb - 2];
1894d1df9c46Smpi 
1895d1df9c46Smpi 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1896ebf82e03Smpi 		    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1897ebf82e03Smpi 		    BUS_DMASYNC_POSTWRITE);
1898d1df9c46Smpi 
1899ca163b3eSpatrick 		lnk->trb_flags &= htole32(~XHCI_TRB_CHAIN);
1900b4bf4808Spatrick 		if (letoh32(trb->trb_flags) & XHCI_TRB_CHAIN)
1901ca163b3eSpatrick 			lnk->trb_flags |= htole32(XHCI_TRB_CHAIN);
1902d1df9c46Smpi 
1903d1df9c46Smpi 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1904d1df9c46Smpi 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
19056cb98821Smpi 
1906861c1bbcSpatrick 		lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE);
1907861c1bbcSpatrick 
1908861c1bbcSpatrick 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1909861c1bbcSpatrick 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1910b4bf4808Spatrick 	}
1911861c1bbcSpatrick 
1912b4bf4808Spatrick 	trb = &ring->trbs[ring->index++];
1913b4bf4808Spatrick 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1914b4bf4808Spatrick 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1915b4bf4808Spatrick 	    BUS_DMASYNC_POSTWRITE);
1916b4bf4808Spatrick 
1917b4bf4808Spatrick 	/* Toggle cycle state of the link TRB and skip it. */
1918b4bf4808Spatrick 	if (ring->index == (ring->ntrb - 1)) {
19196cb98821Smpi 		ring->index = 0;
19204d2cc942Smpi 		ring->toggle ^= 1;
19216cb98821Smpi 	}
19226cb98821Smpi 
19236cb98821Smpi 	return (trb);
19246cb98821Smpi }
19256cb98821Smpi 
19266cb98821Smpi struct xhci_trb *
xhci_xfer_get_trb(struct xhci_softc * sc,struct usbd_xfer * xfer,uint8_t * togglep,int last)19276cb98821Smpi xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer,
19286cb98821Smpi     uint8_t *togglep, int last)
19296cb98821Smpi {
19306cb98821Smpi 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
19316cb98821Smpi 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
19326cb98821Smpi 
19336cb98821Smpi 	KASSERT(xp->free_trbs >= 1);
19346cb98821Smpi 	xp->free_trbs--;
19356cb98821Smpi 	*togglep = xp->ring.toggle;
19363386cc01Skrw 
19373386cc01Skrw 	switch (last) {
19383386cc01Skrw 	case -1:	/* This will be a zero-length TD. */
19393386cc01Skrw 		xp->pending_xfers[xp->ring.index] = NULL;
194003b1240eSmglocker 		xx->zerotd += 1;
19413386cc01Skrw 		break;
19423386cc01Skrw 	case 0:		/* This will be in a chain. */
19433386cc01Skrw 		xp->pending_xfers[xp->ring.index] = xfer;
19443386cc01Skrw 		xx->index = -2;
19453386cc01Skrw 		xx->ntrb += 1;
19463386cc01Skrw 		break;
19473386cc01Skrw 	case 1:		/* This will terminate a chain. */
19483386cc01Skrw 		xp->pending_xfers[xp->ring.index] = xfer;
19493386cc01Skrw 		xx->index = xp->ring.index;
19503386cc01Skrw 		xx->ntrb += 1;
19513386cc01Skrw 		break;
19523386cc01Skrw 	}
19533386cc01Skrw 
19541032f1e6Smglocker 	xp->trb_processed[xp->ring.index] = TRB_PROCESSED_NO;
19551032f1e6Smglocker 
1956861c1bbcSpatrick 	return (xhci_ring_produce(sc, &xp->ring));
19576cb98821Smpi }
19586cb98821Smpi 
19596cb98821Smpi int
xhci_command_submit(struct xhci_softc * sc,struct xhci_trb * trb0,int timeout)19606cb98821Smpi xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout)
19616cb98821Smpi {
19626cb98821Smpi 	struct xhci_trb *trb;
1963ffa66e84Smpi 	int s, error = 0;
19646cb98821Smpi 
1965f10741cdSmpi 	KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL);
19666cb98821Smpi 
19676cb98821Smpi 	trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle);
19686cb98821Smpi 
1969861c1bbcSpatrick 	trb = xhci_ring_produce(sc, &sc->sc_cmd_ring);
1970ffa66e84Smpi 	if (trb == NULL)
1971ffa66e84Smpi 		return (EAGAIN);
19720a259b09Svisa 	trb->trb_paddr = trb0->trb_paddr;
19730a259b09Svisa 	trb->trb_status = trb0->trb_status;
1974b067e289Smpi 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1975d1df9c46Smpi 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1976b067e289Smpi 	    BUS_DMASYNC_PREWRITE);
1977b067e289Smpi 
19780a259b09Svisa 	trb->trb_flags = trb0->trb_flags;
19790a259b09Svisa 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
19800a259b09Svisa 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
19810a259b09Svisa 	    BUS_DMASYNC_PREWRITE);
19826cb98821Smpi 
1983ffa66e84Smpi 	if (timeout == 0) {
19846cb98821Smpi 		XDWRITE4(sc, XHCI_DOORBELL(0), 0);
19854d2cc942Smpi 		return (0);
1986ffa66e84Smpi 	}
19874d2cc942Smpi 
1988eeefa845Smpi 	rw_assert_wrlock(&sc->sc_cmd_lock);
19894d2cc942Smpi 
1990ffa66e84Smpi 	s = splusb();
1991ffa66e84Smpi 	sc->sc_cmd_trb = trb;
1992ffa66e84Smpi 	XDWRITE4(sc, XHCI_DOORBELL(0), 0);
199329a2b068Smpi 	error = tsleep_nsec(&sc->sc_cmd_trb, PZERO, "xhcicmd", timeout);
19946cb98821Smpi 	if (error) {
19954d2cc942Smpi #ifdef XHCI_DEBUG
1996d2068140Smpi 		printf("%s: tsleep() = %d\n", __func__, error);
19976cb98821Smpi 		printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags)));
19986cb98821Smpi 		xhci_dump_trb(trb);
19996cb98821Smpi #endif
2000f748d231Sgerhard 		KASSERT(sc->sc_cmd_trb == trb || sc->sc_cmd_trb == NULL);
2001f748d231Sgerhard 		/*
2002f748d231Sgerhard 		 * Just because the timeout expired this does not mean that the
2003f748d231Sgerhard 		 * TRB isn't active anymore! We could get an interrupt from
2004f748d231Sgerhard 		 * this TRB later on and then wonder what to do with it.
2005f748d231Sgerhard 		 * We'd rather abort it.
2006f748d231Sgerhard 		 */
2007f748d231Sgerhard 		xhci_command_abort(sc);
2008d2068140Smpi 		sc->sc_cmd_trb = NULL;
2009ffa66e84Smpi 		splx(s);
2010d2068140Smpi 		return (error);
20116cb98821Smpi 	}
2012ffa66e84Smpi 	splx(s);
20136cb98821Smpi 
20146cb98821Smpi 	memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb));
20156cb98821Smpi 
2016f584fc70Smpi 	if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS)
2017f584fc70Smpi 		return (0);
20186cb98821Smpi 
20194d2cc942Smpi #ifdef XHCI_DEBUG
2020f584fc70Smpi 	printf("%s: event error code=%d, result=%d  \n", DEVNAME(sc),
2021f584fc70Smpi 	    XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)),
2022f584fc70Smpi 	    XHCI_TRB_TYPE(letoh32(trb0->trb_flags)));
20236cb98821Smpi 	xhci_dump_trb(trb0);
20246cb98821Smpi #endif
2025f584fc70Smpi 	return (EIO);
20266cb98821Smpi }
20276cb98821Smpi 
20286cb98821Smpi int
xhci_command_abort(struct xhci_softc * sc)20296cb98821Smpi xhci_command_abort(struct xhci_softc *sc)
20306cb98821Smpi {
20316cb98821Smpi 	uint32_t reg;
20326cb98821Smpi 	int i;
20336cb98821Smpi 
20346cb98821Smpi 	reg = XOREAD4(sc, XHCI_CRCR_LO);
20356cb98821Smpi 	if ((reg & XHCI_CRCR_LO_CRR) == 0)
20366cb98821Smpi 		return (0);
20376cb98821Smpi 
20386cb98821Smpi 	XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA);
20396cb98821Smpi 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
20406cb98821Smpi 
2041f748d231Sgerhard 	for (i = 0; i < 2500; i++) {
2042f748d231Sgerhard 		DELAY(100);
20436cb98821Smpi 		reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR;
20446cb98821Smpi 		if (!reg)
20456cb98821Smpi 			break;
20466cb98821Smpi 	}
20476cb98821Smpi 
20486cb98821Smpi 	if (reg) {
20496cb98821Smpi 		printf("%s: command ring abort timeout\n", DEVNAME(sc));
20506cb98821Smpi 		return (1);
20516cb98821Smpi 	}
20526cb98821Smpi 
20536cb98821Smpi 	return (0);
20546cb98821Smpi }
20556cb98821Smpi 
20566cb98821Smpi int
xhci_cmd_configure_ep(struct xhci_softc * sc,uint8_t slot,uint64_t addr)2057e5bba15cSmpi xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
20586cb98821Smpi {
20596cb98821Smpi 	struct xhci_trb trb;
2060eeefa845Smpi 	int error;
20616cb98821Smpi 
2062d31b8b3dSmpi 	DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
20636cb98821Smpi 
20646cb98821Smpi 	trb.trb_paddr = htole64(addr);
20656cb98821Smpi 	trb.trb_status = 0;
20666cb98821Smpi 	trb.trb_flags = htole32(
20676cb98821Smpi 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP
20686cb98821Smpi 	);
20696cb98821Smpi 
2070eeefa845Smpi 	rw_enter_write(&sc->sc_cmd_lock);
2071eeefa845Smpi 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2072eeefa845Smpi 	rw_exit_write(&sc->sc_cmd_lock);
2073eeefa845Smpi 	return (error);
20746cb98821Smpi }
20756cb98821Smpi 
20766cb98821Smpi int
xhci_cmd_stop_ep(struct xhci_softc * sc,uint8_t slot,uint8_t dci)2077e5bba15cSmpi xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
20786cb98821Smpi {
20796cb98821Smpi 	struct xhci_trb trb;
2080eeefa845Smpi 	int error;
20816cb98821Smpi 
2082d31b8b3dSmpi 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
20836cb98821Smpi 
20846cb98821Smpi 	trb.trb_paddr = 0;
20856cb98821Smpi 	trb.trb_status = 0;
20866cb98821Smpi 	trb.trb_flags = htole32(
20876cb98821Smpi 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP
20886cb98821Smpi 	);
20896cb98821Smpi 
2090eeefa845Smpi 	rw_enter_write(&sc->sc_cmd_lock);
2091eeefa845Smpi 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2092eeefa845Smpi 	rw_exit_write(&sc->sc_cmd_lock);
2093eeefa845Smpi 	return (error);
20946cb98821Smpi }
20956cb98821Smpi 
20964d2cc942Smpi void
xhci_cmd_reset_ep_async(struct xhci_softc * sc,uint8_t slot,uint8_t dci)2097fcda7eabSmpi xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
20986cb98821Smpi {
20996cb98821Smpi 	struct xhci_trb trb;
21006cb98821Smpi 
2101d31b8b3dSmpi 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
21026cb98821Smpi 
21036cb98821Smpi 	trb.trb_paddr = 0;
21046cb98821Smpi 	trb.trb_status = 0;
21056cb98821Smpi 	trb.trb_flags = htole32(
21066cb98821Smpi 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP
21076cb98821Smpi 	);
21086cb98821Smpi 
21094d2cc942Smpi 	xhci_command_submit(sc, &trb, 0);
21106cb98821Smpi }
21116cb98821Smpi 
21124d2cc942Smpi void
xhci_cmd_set_tr_deq_async(struct xhci_softc * sc,uint8_t slot,uint8_t dci,uint64_t addr)21134d2cc942Smpi xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci,
21146cb98821Smpi    uint64_t addr)
21156cb98821Smpi {
21166cb98821Smpi 	struct xhci_trb trb;
21176cb98821Smpi 
2118d31b8b3dSmpi 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
21196cb98821Smpi 
21206cb98821Smpi 	trb.trb_paddr = htole64(addr);
21216cb98821Smpi 	trb.trb_status = 0;
21226cb98821Smpi 	trb.trb_flags = htole32(
21236cb98821Smpi 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ
21246cb98821Smpi 	);
21256cb98821Smpi 
21264d2cc942Smpi 	xhci_command_submit(sc, &trb, 0);
21276cb98821Smpi }
21286cb98821Smpi 
21296cb98821Smpi int
xhci_cmd_slot_control(struct xhci_softc * sc,uint8_t * slotp,int enable)21306cb98821Smpi xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable)
21316cb98821Smpi {
21326cb98821Smpi 	struct xhci_trb trb;
2133eeefa845Smpi 	int error;
21346cb98821Smpi 
21356cb98821Smpi 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
21366cb98821Smpi 
21376cb98821Smpi 	trb.trb_paddr = 0;
21386cb98821Smpi 	trb.trb_status = 0;
2139e5bba15cSmpi 	if (enable)
2140e5bba15cSmpi 		trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT);
2141e5bba15cSmpi 	else
21426cb98821Smpi 		trb.trb_flags = htole32(
2143e5bba15cSmpi 			XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT
21446cb98821Smpi 		);
21456cb98821Smpi 
2146eeefa845Smpi 	rw_enter_write(&sc->sc_cmd_lock);
2147eeefa845Smpi 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2148eeefa845Smpi 	rw_exit_write(&sc->sc_cmd_lock);
2149eeefa845Smpi 	if (error != 0)
21506cb98821Smpi 		return (EIO);
21516cb98821Smpi 
2152e5bba15cSmpi 	if (enable)
21536cb98821Smpi 		*slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags));
21546cb98821Smpi 
21556cb98821Smpi 	return (0);
21566cb98821Smpi }
21576cb98821Smpi 
21586cb98821Smpi int
xhci_cmd_set_address(struct xhci_softc * sc,uint8_t slot,uint64_t addr,uint32_t bsr)2159ffe08da5Smpi xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr,
2160ffe08da5Smpi     uint32_t bsr)
21616cb98821Smpi {
21626cb98821Smpi 	struct xhci_trb trb;
2163eeefa845Smpi 	int error;
21646cb98821Smpi 
2165ffe08da5Smpi 	DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0));
21666cb98821Smpi 
21676cb98821Smpi 	trb.trb_paddr = htole64(addr);
21686cb98821Smpi 	trb.trb_status = 0;
21696cb98821Smpi 	trb.trb_flags = htole32(
2170ffe08da5Smpi 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr
21716cb98821Smpi 	);
21726cb98821Smpi 
2173eeefa845Smpi 	rw_enter_write(&sc->sc_cmd_lock);
2174eeefa845Smpi 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2175eeefa845Smpi 	rw_exit_write(&sc->sc_cmd_lock);
2176eeefa845Smpi 	return (error);
21776cb98821Smpi }
21786cb98821Smpi 
21796cb98821Smpi #ifdef XHCI_DEBUG
21806cb98821Smpi int
xhci_cmd_noop(struct xhci_softc * sc)21816cb98821Smpi xhci_cmd_noop(struct xhci_softc *sc)
21826cb98821Smpi {
21836cb98821Smpi 	struct xhci_trb trb;
2184eeefa845Smpi 	int error;
21856cb98821Smpi 
21866cb98821Smpi 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
21876cb98821Smpi 
21886cb98821Smpi 	trb.trb_paddr = 0;
21896cb98821Smpi 	trb.trb_status = 0;
21906cb98821Smpi 	trb.trb_flags = htole32(XHCI_CMD_NOOP);
21916cb98821Smpi 
2192eeefa845Smpi 	rw_enter_write(&sc->sc_cmd_lock);
2193eeefa845Smpi 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2194eeefa845Smpi 	rw_exit_write(&sc->sc_cmd_lock);
2195eeefa845Smpi 	return (error);
21966cb98821Smpi }
21976cb98821Smpi #endif
21986cb98821Smpi 
21996cb98821Smpi int
xhci_softdev_alloc(struct xhci_softc * sc,uint8_t slot)22006cb98821Smpi xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot)
22016cb98821Smpi {
22026cb98821Smpi 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
22036cb98821Smpi 	int i, error;
2204b067e289Smpi 	uint8_t *kva;
22056cb98821Smpi 
22066cb98821Smpi 	/*
22076cb98821Smpi 	 * Setup input context.  Even with 64 byte context size, it
22086cb98821Smpi 	 * fits into the smallest supported page size, so use that.
22096cb98821Smpi 	 */
2210b067e289Smpi 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma,
2211b067e289Smpi 	    (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize);
22126cb98821Smpi 	if (error)
22136cb98821Smpi 		return (ENOMEM);
22146cb98821Smpi 
2215b067e289Smpi 	sdev->input_ctx = (struct xhci_inctx *)kva;
2216b067e289Smpi 	sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize);
22176cb98821Smpi 	for (i = 0; i < 31; i++)
22186cb98821Smpi 		sdev->ep_ctx[i] =
2219b067e289Smpi 		    (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize);
22206cb98821Smpi 
22216cb98821Smpi 	DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc),
22226cb98821Smpi 	 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0]));
22236cb98821Smpi 
22246cb98821Smpi 	/* Setup output context */
2225b067e289Smpi 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL,
2226b067e289Smpi 	    sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize);
22276cb98821Smpi 	if (error) {
2228b067e289Smpi 		usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
22296cb98821Smpi 		return (ENOMEM);
22306cb98821Smpi 	}
22316cb98821Smpi 
22326cb98821Smpi 	memset(&sdev->pipes, 0, sizeof(sdev->pipes));
22336cb98821Smpi 
22346cb98821Smpi 	DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc),
2235b067e289Smpi 	    slot, (long long)sdev->octx_dma.paddr));
22366cb98821Smpi 
2237b067e289Smpi 	sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr);
2238b067e289Smpi 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
2239ebf82e03Smpi 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
2240ebf82e03Smpi 	    BUS_DMASYNC_PREWRITE);
22416cb98821Smpi 
22426cb98821Smpi 	return (0);
22436cb98821Smpi }
22446cb98821Smpi 
22456cb98821Smpi void
xhci_softdev_free(struct xhci_softc * sc,uint8_t slot)22466cb98821Smpi xhci_softdev_free(struct xhci_softc *sc, uint8_t slot)
22476cb98821Smpi {
22486cb98821Smpi 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
22496cb98821Smpi 
22506cb98821Smpi 	sc->sc_dcbaa.segs[slot] = 0;
2251b067e289Smpi 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
2252ebf82e03Smpi 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
2253ebf82e03Smpi 	    BUS_DMASYNC_PREWRITE);
22546cb98821Smpi 
2255b067e289Smpi 	usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma);
2256b067e289Smpi 	usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
22576cb98821Smpi 
22586cb98821Smpi 	memset(sdev, 0, sizeof(struct xhci_soft_dev));
22596cb98821Smpi }
22606cb98821Smpi 
22616cb98821Smpi /* Root hub descriptors. */
22628f1d17e8Snaddy const usb_device_descriptor_t xhci_devd = {
22636cb98821Smpi 	USB_DEVICE_DESCRIPTOR_SIZE,
22646cb98821Smpi 	UDESC_DEVICE,		/* type */
22656cb98821Smpi 	{0x00, 0x03},		/* USB version */
22666cb98821Smpi 	UDCLASS_HUB,		/* class */
22676cb98821Smpi 	UDSUBCLASS_HUB,		/* subclass */
22686cb98821Smpi 	UDPROTO_HSHUBSTT,	/* protocol */
22696cb98821Smpi 	9,			/* max packet */
22706cb98821Smpi 	{0},{0},{0x00,0x01},	/* device id */
22716cb98821Smpi 	1,2,0,			/* string indexes */
22726cb98821Smpi 	1			/* # of configurations */
22736cb98821Smpi };
22746cb98821Smpi 
22756cb98821Smpi const usb_config_descriptor_t xhci_confd = {
22766cb98821Smpi 	USB_CONFIG_DESCRIPTOR_SIZE,
22776cb98821Smpi 	UDESC_CONFIG,
22786cb98821Smpi 	{USB_CONFIG_DESCRIPTOR_SIZE +
22796cb98821Smpi 	 USB_INTERFACE_DESCRIPTOR_SIZE +
22806cb98821Smpi 	 USB_ENDPOINT_DESCRIPTOR_SIZE},
22816cb98821Smpi 	1,
22826cb98821Smpi 	1,
22836cb98821Smpi 	0,
22843a3b7daeSmpi 	UC_BUS_POWERED | UC_SELF_POWERED,
22856cb98821Smpi 	0                      /* max power */
22866cb98821Smpi };
22876cb98821Smpi 
22886cb98821Smpi const usb_interface_descriptor_t xhci_ifcd = {
22896cb98821Smpi 	USB_INTERFACE_DESCRIPTOR_SIZE,
22906cb98821Smpi 	UDESC_INTERFACE,
22916cb98821Smpi 	0,
22926cb98821Smpi 	0,
22936cb98821Smpi 	1,
22946cb98821Smpi 	UICLASS_HUB,
22956cb98821Smpi 	UISUBCLASS_HUB,
229685319fe6Smpi 	UIPROTO_HSHUBSTT,
22976cb98821Smpi 	0
22986cb98821Smpi };
22996cb98821Smpi 
23006cb98821Smpi const usb_endpoint_descriptor_t xhci_endpd = {
23016cb98821Smpi 	USB_ENDPOINT_DESCRIPTOR_SIZE,
23026cb98821Smpi 	UDESC_ENDPOINT,
23036cb98821Smpi 	UE_DIR_IN | XHCI_INTR_ENDPT,
23046cb98821Smpi 	UE_INTERRUPT,
23056cb98821Smpi 	{2, 0},                 /* max 15 ports */
23066cb98821Smpi 	255
23076cb98821Smpi };
23086cb98821Smpi 
23096cb98821Smpi const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = {
23106cb98821Smpi 	USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE,
23116cb98821Smpi 	UDESC_ENDPOINT_SS_COMP,
23126cb98821Smpi 	0,
23136cb98821Smpi 	0,
231485319fe6Smpi 	{0, 0}
23156cb98821Smpi };
23166cb98821Smpi 
23176cb98821Smpi const usb_hub_descriptor_t xhci_hubd = {
23186cb98821Smpi 	USB_HUB_DESCRIPTOR_SIZE,
23196cb98821Smpi 	UDESC_SS_HUB,
23206cb98821Smpi 	0,
23216cb98821Smpi 	{0,0},
23226cb98821Smpi 	0,
23236cb98821Smpi 	0,
23246cb98821Smpi 	{0},
23256cb98821Smpi };
23266cb98821Smpi 
23276cb98821Smpi void
xhci_abort_xfer(struct usbd_xfer * xfer,usbd_status status)23286cb98821Smpi xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
23296cb98821Smpi {
23301be52566Smpi 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
23311be52566Smpi 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
23321be52566Smpi 	int error;
23331be52566Smpi 
233499c58b9fSmpi 	splsoftassert(IPL_SOFTUSB);
23356cb98821Smpi 
2336f584fc70Smpi 	DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n",
233799c58b9fSmpi 	    __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status),
233899c58b9fSmpi 	    xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index));
2339abb5f851Smpi 
23401be52566Smpi 	/* XXX The stack should not call abort() in this case. */
23417ff307aeSmpi 	if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) {
23426cb98821Smpi 		xfer->status = status;
23437ff307aeSmpi 		timeout_del(&xfer->timeout_handle);
23447ff307aeSmpi 		usb_rem_task(xfer->device, &xfer->abort_task);
23451be52566Smpi 		usb_transfer_complete(xfer);
23461be52566Smpi 		return;
23471be52566Smpi 	}
23481be52566Smpi 
23491be52566Smpi 	/* Transfer is already done. */
23501be52566Smpi 	if (xfer->status != USBD_IN_PROGRESS) {
23511be52566Smpi 		DPRINTF(("%s: already done \n", __func__));
23521be52566Smpi 		return;
23531be52566Smpi 	}
23541be52566Smpi 
23551be52566Smpi 	/* Prevent any timeout to kick in. */
23561be52566Smpi 	timeout_del(&xfer->timeout_handle);
23571be52566Smpi 	usb_rem_task(xfer->device, &xfer->abort_task);
23581be52566Smpi 
23591be52566Smpi 	/* Indicate that we are aborting this transfer. */
2360ce775a50Smpi 	xp->halted = status;
23611be52566Smpi 	xp->aborted_xfer = xfer;
23621be52566Smpi 
23631be52566Smpi 	/* Stop the endpoint and wait until the hardware says so. */
2364c3b0c434Smpi 	if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci)) {
23651be52566Smpi 		DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc)));
2366c3b0c434Smpi 		/* Assume the device is gone. */
23675619e852Smpi 		xp->halted = 0;
23685619e852Smpi 		xp->aborted_xfer = NULL;
2369c3b0c434Smpi 		xfer->status = status;
2370c3b0c434Smpi 		usb_transfer_complete(xfer);
2371c3b0c434Smpi 		return;
2372c3b0c434Smpi 	}
23731be52566Smpi 
23741be52566Smpi 	/*
23751be52566Smpi 	 * The transfer was already completed when we stopped the
23761be52566Smpi 	 * endpoint, no need to move the dequeue pointer past its
23771be52566Smpi 	 * TRBs.
23781be52566Smpi 	 */
23791be52566Smpi 	if (xp->aborted_xfer == NULL) {
23801be52566Smpi 		DPRINTF(("%s: done before stopping the endpoint\n", __func__));
23811be52566Smpi 		xp->halted = 0;
23821be52566Smpi 		return;
23831be52566Smpi 	}
23841be52566Smpi 
23851be52566Smpi 	/*
23861be52566Smpi 	 * At this stage the endpoint has been stopped, so update its
23871be52566Smpi 	 * dequeue pointer past the last TRB of the transfer.
23881be52566Smpi 	 *
2389fa36d6acSmpi 	 * Note: This assumes that only one transfer per endpoint has
23901be52566Smpi 	 *	 pending TRBs on the ring.
23911be52566Smpi 	 */
23921be52566Smpi 	xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
23931be52566Smpi 	    DEQPTR(xp->ring) | xp->ring.toggle);
239429a2b068Smpi 	error = tsleep_nsec(xp, PZERO, "xhciab", XHCI_CMD_TIMEOUT);
23951be52566Smpi 	if (error)
23961be52566Smpi 		printf("%s: timeout aborting transfer\n", DEVNAME(sc));
23976cb98821Smpi }
23986cb98821Smpi 
23996cb98821Smpi void
xhci_timeout(void * addr)24006cb98821Smpi xhci_timeout(void *addr)
24016cb98821Smpi {
24026cb98821Smpi 	struct usbd_xfer *xfer = addr;
24031be52566Smpi 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
24041be52566Smpi 
24051be52566Smpi 	if (sc->sc_bus.dying) {
24061be52566Smpi 		xhci_timeout_task(addr);
24071be52566Smpi 		return;
24081be52566Smpi 	}
24091be52566Smpi 
24101be52566Smpi 	usb_init_task(&xfer->abort_task, xhci_timeout_task, addr,
24111be52566Smpi 	    USB_TASK_TYPE_ABORT);
24121be52566Smpi 	usb_add_task(xfer->device, &xfer->abort_task);
24131be52566Smpi }
24141be52566Smpi 
24151be52566Smpi void
xhci_timeout_task(void * addr)24161be52566Smpi xhci_timeout_task(void *addr)
24171be52566Smpi {
24181be52566Smpi 	struct usbd_xfer *xfer = addr;
241999c58b9fSmpi 	int s;
24206cb98821Smpi 
242199c58b9fSmpi 	s = splusb();
24226cb98821Smpi 	xhci_abort_xfer(xfer, USBD_TIMEOUT);
242399c58b9fSmpi 	splx(s);
24246cb98821Smpi }
24256cb98821Smpi 
24266cb98821Smpi usbd_status
xhci_root_ctrl_transfer(struct usbd_xfer * xfer)24276cb98821Smpi xhci_root_ctrl_transfer(struct usbd_xfer *xfer)
24286cb98821Smpi {
24296cb98821Smpi 	usbd_status err;
24306cb98821Smpi 
24316cb98821Smpi 	err = usb_insert_transfer(xfer);
24326cb98821Smpi 	if (err)
24336cb98821Smpi 		return (err);
24346cb98821Smpi 
24356cb98821Smpi 	return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
24366cb98821Smpi }
24376cb98821Smpi 
24386cb98821Smpi usbd_status
xhci_root_ctrl_start(struct usbd_xfer * xfer)24396cb98821Smpi xhci_root_ctrl_start(struct usbd_xfer *xfer)
24406cb98821Smpi {
24416cb98821Smpi 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
24426cb98821Smpi 	usb_port_status_t ps;
24436cb98821Smpi 	usb_device_request_t *req;
24446cb98821Smpi 	void *buf = NULL;
24458f1d17e8Snaddy 	usb_device_descriptor_t devd;
24466cb98821Smpi 	usb_hub_descriptor_t hubd;
24476cb98821Smpi 	usbd_status err;
24486cb98821Smpi 	int s, len, value, index;
24496cb98821Smpi 	int l, totlen = 0;
24506cb98821Smpi 	int port, i;
24516cb98821Smpi 	uint32_t v;
24526cb98821Smpi 
24536cb98821Smpi 	KASSERT(xfer->rqflags & URQ_REQUEST);
24546cb98821Smpi 
24556cb98821Smpi 	if (sc->sc_bus.dying)
24566cb98821Smpi 		return (USBD_IOERROR);
24576cb98821Smpi 
24586cb98821Smpi 	req = &xfer->request;
24596cb98821Smpi 
24606cb98821Smpi 	DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__,
24616cb98821Smpi 	    req->bmRequestType, req->bRequest));
24626cb98821Smpi 
24636cb98821Smpi 	len = UGETW(req->wLength);
24646cb98821Smpi 	value = UGETW(req->wValue);
24656cb98821Smpi 	index = UGETW(req->wIndex);
24666cb98821Smpi 
24676cb98821Smpi 	if (len != 0)
24686cb98821Smpi 		buf = KERNADDR(&xfer->dmabuf, 0);
24696cb98821Smpi 
24706cb98821Smpi #define C(x,y) ((x) | ((y) << 8))
24716cb98821Smpi 	switch(C(req->bRequest, req->bmRequestType)) {
24726cb98821Smpi 	case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
24736cb98821Smpi 	case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
24746cb98821Smpi 	case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
24756cb98821Smpi 		/*
24766cb98821Smpi 		 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
24776cb98821Smpi 		 * for the integrated root hub.
24786cb98821Smpi 		 */
24796cb98821Smpi 		break;
24806cb98821Smpi 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
24816cb98821Smpi 		if (len > 0) {
24826cb98821Smpi 			*(uint8_t *)buf = sc->sc_conf;
24836cb98821Smpi 			totlen = 1;
24846cb98821Smpi 		}
24856cb98821Smpi 		break;
24866cb98821Smpi 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
24876cb98821Smpi 		DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value));
24886cb98821Smpi 		switch(value >> 8) {
24896cb98821Smpi 		case UDESC_DEVICE:
24906cb98821Smpi 			if ((value & 0xff) != 0) {
24916cb98821Smpi 				err = USBD_IOERROR;
24926cb98821Smpi 				goto ret;
24936cb98821Smpi 			}
24948f1d17e8Snaddy 			devd = xhci_devd;
24958f1d17e8Snaddy 			USETW(devd.idVendor, sc->sc_id_vendor);
24966cb98821Smpi 			totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE);
24978f1d17e8Snaddy 			memcpy(buf, &devd, l);
24986cb98821Smpi 			break;
24996cb98821Smpi 		/*
25006cb98821Smpi 		 * We can't really operate at another speed, but the spec says
25016cb98821Smpi 		 * we need this descriptor.
25026cb98821Smpi 		 */
25036cb98821Smpi 		case UDESC_OTHER_SPEED_CONFIGURATION:
25046cb98821Smpi 		case UDESC_CONFIG:
25056cb98821Smpi 			if ((value & 0xff) != 0) {
25066cb98821Smpi 				err = USBD_IOERROR;
25076cb98821Smpi 				goto ret;
25086cb98821Smpi 			}
25096cb98821Smpi 			totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE);
25106cb98821Smpi 			memcpy(buf, &xhci_confd, l);
25116cb98821Smpi 			((usb_config_descriptor_t *)buf)->bDescriptorType =
25126cb98821Smpi 			    value >> 8;
25136cb98821Smpi 			buf = (char *)buf + l;
25146cb98821Smpi 			len -= l;
25156cb98821Smpi 			l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE);
25166cb98821Smpi 			totlen += l;
25176cb98821Smpi 			memcpy(buf, &xhci_ifcd, l);
25186cb98821Smpi 			buf = (char *)buf + l;
25196cb98821Smpi 			len -= l;
25206cb98821Smpi 			l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE);
25216cb98821Smpi 			totlen += l;
25226cb98821Smpi 			memcpy(buf, &xhci_endpd, l);
25236cb98821Smpi 			break;
25246cb98821Smpi 		case UDESC_STRING:
25256cb98821Smpi 			if (len == 0)
25266cb98821Smpi 				break;
25276cb98821Smpi 			*(u_int8_t *)buf = 0;
25286cb98821Smpi 			totlen = 1;
25296cb98821Smpi 			switch (value & 0xff) {
25306cb98821Smpi 			case 0: /* Language table */
25316cb98821Smpi 				totlen = usbd_str(buf, len, "\001");
25326cb98821Smpi 				break;
25336cb98821Smpi 			case 1: /* Vendor */
25346cb98821Smpi 				totlen = usbd_str(buf, len, sc->sc_vendor);
25356cb98821Smpi 				break;
25366cb98821Smpi 			case 2: /* Product */
2537f501d196Smpi 				totlen = usbd_str(buf, len, "xHCI root hub");
25386cb98821Smpi 				break;
25396cb98821Smpi 			}
25406cb98821Smpi 			break;
25416cb98821Smpi 		default:
25426cb98821Smpi 			err = USBD_IOERROR;
25436cb98821Smpi 			goto ret;
25446cb98821Smpi 		}
25456cb98821Smpi 		break;
25466cb98821Smpi 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
25476cb98821Smpi 		if (len > 0) {
25486cb98821Smpi 			*(uint8_t *)buf = 0;
25496cb98821Smpi 			totlen = 1;
25506cb98821Smpi 		}
25516cb98821Smpi 		break;
25526cb98821Smpi 	case C(UR_GET_STATUS, UT_READ_DEVICE):
25536cb98821Smpi 		if (len > 1) {
25546cb98821Smpi 			USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED);
25556cb98821Smpi 			totlen = 2;
25566cb98821Smpi 		}
25576cb98821Smpi 		break;
25586cb98821Smpi 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
25596cb98821Smpi 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
25606cb98821Smpi 		if (len > 1) {
25616cb98821Smpi 			USETW(((usb_status_t *)buf)->wStatus, 0);
25626cb98821Smpi 			totlen = 2;
25636cb98821Smpi 		}
25646cb98821Smpi 		break;
25656cb98821Smpi 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
25666cb98821Smpi 		if (value >= USB_MAX_DEVICES) {
25676cb98821Smpi 			err = USBD_IOERROR;
25686cb98821Smpi 			goto ret;
25696cb98821Smpi 		}
25706cb98821Smpi 		break;
25716cb98821Smpi 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
25726cb98821Smpi 		if (value != 0 && value != 1) {
25736cb98821Smpi 			err = USBD_IOERROR;
25746cb98821Smpi 			goto ret;
25756cb98821Smpi 		}
25766cb98821Smpi 		sc->sc_conf = value;
25776cb98821Smpi 		break;
25786cb98821Smpi 	case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
25796cb98821Smpi 		break;
25806cb98821Smpi 	case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
25816cb98821Smpi 	case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
25826cb98821Smpi 	case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
25836cb98821Smpi 		err = USBD_IOERROR;
25846cb98821Smpi 		goto ret;
25856cb98821Smpi 	case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
25866cb98821Smpi 		break;
25876cb98821Smpi 	case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
25886cb98821Smpi 		break;
25896cb98821Smpi 	/* Hub requests */
25906cb98821Smpi 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
25916cb98821Smpi 		break;
25926cb98821Smpi 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
25936cb98821Smpi 		DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE "
25946cb98821Smpi 		    "port=%d feature=%d\n", index, value));
25956cb98821Smpi 		if (index < 1 || index > sc->sc_noport) {
25966cb98821Smpi 			err = USBD_IOERROR;
25976cb98821Smpi 			goto ret;
25986cb98821Smpi 		}
25996cb98821Smpi 		port = XHCI_PORTSC(index);
26006cb98821Smpi 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
26016cb98821Smpi 		switch (value) {
26026cb98821Smpi 		case UHF_PORT_ENABLE:
26036cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_PED);
26046cb98821Smpi 			break;
26056cb98821Smpi 		case UHF_PORT_SUSPEND:
26066cb98821Smpi 			/* TODO */
26076cb98821Smpi 			break;
26086cb98821Smpi 		case UHF_PORT_POWER:
26096cb98821Smpi 			XOWRITE4(sc, port, v & ~XHCI_PS_PP);
26106cb98821Smpi 			break;
26116cb98821Smpi 		case UHF_PORT_INDICATOR:
26126cb98821Smpi 			XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3));
26136cb98821Smpi 			break;
26146cb98821Smpi 		case UHF_C_PORT_CONNECTION:
26156cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_CSC);
26166cb98821Smpi 			break;
26176cb98821Smpi 		case UHF_C_PORT_ENABLE:
26186cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_PEC);
26196cb98821Smpi 			break;
26206cb98821Smpi 		case UHF_C_PORT_SUSPEND:
2621a0a34e67Smpi 		case UHF_C_PORT_LINK_STATE:
26226cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_PLC);
26236cb98821Smpi 			break;
26246cb98821Smpi 		case UHF_C_PORT_OVER_CURRENT:
26256cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_OCC);
26266cb98821Smpi 			break;
26276cb98821Smpi 		case UHF_C_PORT_RESET:
26286cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
26296cb98821Smpi 			break;
26300d8ef6c1Skettenis 		case UHF_C_BH_PORT_RESET:
26310d8ef6c1Skettenis 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
26320d8ef6c1Skettenis 			break;
26336cb98821Smpi 		default:
26346cb98821Smpi 			err = USBD_IOERROR;
26356cb98821Smpi 			goto ret;
26366cb98821Smpi 		}
26376cb98821Smpi 		break;
26386cb98821Smpi 
26396cb98821Smpi 	case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
26406cb98821Smpi 		if (len == 0)
26416cb98821Smpi 			break;
26426cb98821Smpi 		if ((value & 0xff) != 0) {
26436cb98821Smpi 			err = USBD_IOERROR;
26446cb98821Smpi 			goto ret;
26456cb98821Smpi 		}
26466cb98821Smpi 		v = XREAD4(sc, XHCI_HCCPARAMS);
26476cb98821Smpi 		hubd = xhci_hubd;
26486cb98821Smpi 		hubd.bNbrPorts = sc->sc_noport;
26496cb98821Smpi 		USETW(hubd.wHubCharacteristics,
26506cb98821Smpi 		    (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) |
26516cb98821Smpi 		    (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0));
26526cb98821Smpi 		hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */
26536cb98821Smpi 		for (i = 1; i <= sc->sc_noport; i++) {
26546cb98821Smpi 			v = XOREAD4(sc, XHCI_PORTSC(i));
26556cb98821Smpi 			if (v & XHCI_PS_DR)
26566cb98821Smpi 				hubd.DeviceRemovable[i / 8] |= 1U << (i % 8);
26576cb98821Smpi 		}
26586cb98821Smpi 		hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
26596cb98821Smpi 		l = min(len, hubd.bDescLength);
26606cb98821Smpi 		totlen = l;
26616cb98821Smpi 		memcpy(buf, &hubd, l);
26626cb98821Smpi 		break;
26636cb98821Smpi 	case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
26646cb98821Smpi 		if (len != 16) {
26656cb98821Smpi 			err = USBD_IOERROR;
26666cb98821Smpi 			goto ret;
26676cb98821Smpi 		}
26686cb98821Smpi 		memset(buf, 0, len);
26696cb98821Smpi 		totlen = len;
26706cb98821Smpi 		break;
26716cb98821Smpi 	case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
26726cb98821Smpi 		DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n",
26736cb98821Smpi 		    index));
26746cb98821Smpi 		if (index < 1 || index > sc->sc_noport) {
26756cb98821Smpi 			err = USBD_IOERROR;
26766cb98821Smpi 			goto ret;
26776cb98821Smpi 		}
26786cb98821Smpi 		if (len != 4) {
26796cb98821Smpi 			err = USBD_IOERROR;
26806cb98821Smpi 			goto ret;
26816cb98821Smpi 		}
26826cb98821Smpi 		v = XOREAD4(sc, XHCI_PORTSC(index));
26836cb98821Smpi 		DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v));
2684fb72199cSmpi 		i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v));
26856cb98821Smpi 		switch (XHCI_PS_SPEED(v)) {
26866cb98821Smpi 		case XHCI_SPEED_FULL:
2687fb72199cSmpi 			i |= UPS_FULL_SPEED;
26886cb98821Smpi 			break;
26896cb98821Smpi 		case XHCI_SPEED_LOW:
2690fb72199cSmpi 			i |= UPS_LOW_SPEED;
26916cb98821Smpi 			break;
26926cb98821Smpi 		case XHCI_SPEED_HIGH:
2693fb72199cSmpi 			i |= UPS_HIGH_SPEED;
26946cb98821Smpi 			break;
26956cb98821Smpi 		case XHCI_SPEED_SUPER:
26966cb98821Smpi 		default:
26976cb98821Smpi 			break;
26986cb98821Smpi 		}
26996cb98821Smpi 		if (v & XHCI_PS_CCS)	i |= UPS_CURRENT_CONNECT_STATUS;
27006cb98821Smpi 		if (v & XHCI_PS_PED)	i |= UPS_PORT_ENABLED;
27016cb98821Smpi 		if (v & XHCI_PS_OCA)	i |= UPS_OVERCURRENT_INDICATOR;
27026cb98821Smpi 		if (v & XHCI_PS_PR)	i |= UPS_RESET;
2703fb72199cSmpi 		if (v & XHCI_PS_PP)	{
2704fb72199cSmpi 			if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL &&
2705fb72199cSmpi 			    XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH)
2706fb72199cSmpi 				i |= UPS_PORT_POWER;
2707fb72199cSmpi 			else
2708fb72199cSmpi 				i |= UPS_PORT_POWER_SS;
2709fb72199cSmpi 		}
27106cb98821Smpi 		USETW(ps.wPortStatus, i);
27116cb98821Smpi 		i = 0;
27126cb98821Smpi 		if (v & XHCI_PS_CSC)    i |= UPS_C_CONNECT_STATUS;
27136cb98821Smpi 		if (v & XHCI_PS_PEC)    i |= UPS_C_PORT_ENABLED;
27146cb98821Smpi 		if (v & XHCI_PS_OCC)    i |= UPS_C_OVERCURRENT_INDICATOR;
27156cb98821Smpi 		if (v & XHCI_PS_PRC)	i |= UPS_C_PORT_RESET;
27160d8ef6c1Skettenis 		if (v & XHCI_PS_WRC)	i |= UPS_C_BH_PORT_RESET;
2717fb72199cSmpi 		if (v & XHCI_PS_PLC)	i |= UPS_C_PORT_LINK_STATE;
2718fb72199cSmpi 		if (v & XHCI_PS_CEC)	i |= UPS_C_PORT_CONFIG_ERROR;
27196cb98821Smpi 		USETW(ps.wPortChange, i);
27206cb98821Smpi 		l = min(len, sizeof ps);
27216cb98821Smpi 		memcpy(buf, &ps, l);
27226cb98821Smpi 		totlen = l;
27236cb98821Smpi 		break;
27246cb98821Smpi 	case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
27256cb98821Smpi 		err = USBD_IOERROR;
27266cb98821Smpi 		goto ret;
27276cb98821Smpi 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
27286cb98821Smpi 		break;
27296cb98821Smpi 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
27306cb98821Smpi 
27316cb98821Smpi 		i = index >> 8;
27326cb98821Smpi 		index &= 0x00ff;
27336cb98821Smpi 
27346cb98821Smpi 		if (index < 1 || index > sc->sc_noport) {
27356cb98821Smpi 			err = USBD_IOERROR;
27366cb98821Smpi 			goto ret;
27376cb98821Smpi 		}
27386cb98821Smpi 		port = XHCI_PORTSC(index);
27396cb98821Smpi 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
27406cb98821Smpi 
27416cb98821Smpi 		switch (value) {
27426cb98821Smpi 		case UHF_PORT_ENABLE:
27436cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_PED);
27446cb98821Smpi 			break;
27456cb98821Smpi 		case UHF_PORT_SUSPEND:
27466cb98821Smpi 			DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i));
27476cb98821Smpi 			if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) {
27486cb98821Smpi 				err = USBD_IOERROR;
27496cb98821Smpi 				goto ret;
27506cb98821Smpi 			}
27516cb98821Smpi 			XOWRITE4(sc, port, v |
27526cb98821Smpi 			    XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS);
27536cb98821Smpi 			break;
27546cb98821Smpi 		case UHF_PORT_RESET:
27556cb98821Smpi 			DPRINTFN(6, ("reset port %d\n", index));
27566cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_PR);
27576cb98821Smpi 			break;
27586cb98821Smpi 		case UHF_PORT_POWER:
27596cb98821Smpi 			DPRINTFN(3, ("set port power %d\n", index));
27606cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_PP);
27616cb98821Smpi 			break;
27626cb98821Smpi 		case UHF_PORT_INDICATOR:
27636cb98821Smpi 			DPRINTFN(3, ("set port indicator %d\n", index));
27646cb98821Smpi 
27656cb98821Smpi 			v &= ~XHCI_PS_SET_PIC(3);
27666cb98821Smpi 			v |= XHCI_PS_SET_PIC(1);
27676cb98821Smpi 
27686cb98821Smpi 			XOWRITE4(sc, port, v);
27696cb98821Smpi 			break;
27706cb98821Smpi 		case UHF_C_PORT_RESET:
27716cb98821Smpi 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
27726cb98821Smpi 			break;
27730d8ef6c1Skettenis 		case UHF_C_BH_PORT_RESET:
27740d8ef6c1Skettenis 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
27750d8ef6c1Skettenis 			break;
27766cb98821Smpi 		default:
27776cb98821Smpi 			err = USBD_IOERROR;
27786cb98821Smpi 			goto ret;
27796cb98821Smpi 		}
27806cb98821Smpi 		break;
27816cb98821Smpi 	case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
27826cb98821Smpi 	case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
27836cb98821Smpi 	case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
27846cb98821Smpi 	case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
27856cb98821Smpi 		break;
27866cb98821Smpi 	default:
27876cb98821Smpi 		err = USBD_IOERROR;
27886cb98821Smpi 		goto ret;
27896cb98821Smpi 	}
27906cb98821Smpi 	xfer->actlen = totlen;
27916cb98821Smpi 	err = USBD_NORMAL_COMPLETION;
27926cb98821Smpi ret:
27936cb98821Smpi 	xfer->status = err;
27946cb98821Smpi 	s = splusb();
27956cb98821Smpi 	usb_transfer_complete(xfer);
27966cb98821Smpi 	splx(s);
2797628113a4Smpi 	return (err);
27986cb98821Smpi }
27996cb98821Smpi 
28006cb98821Smpi 
28016cb98821Smpi void
xhci_noop(struct usbd_xfer * xfer)28026cb98821Smpi xhci_noop(struct usbd_xfer *xfer)
28036cb98821Smpi {
28046cb98821Smpi }
28056cb98821Smpi 
28066cb98821Smpi 
28076cb98821Smpi usbd_status
xhci_root_intr_transfer(struct usbd_xfer * xfer)28086cb98821Smpi xhci_root_intr_transfer(struct usbd_xfer *xfer)
28096cb98821Smpi {
28106cb98821Smpi 	usbd_status err;
28116cb98821Smpi 
28126cb98821Smpi 	err = usb_insert_transfer(xfer);
28136cb98821Smpi 	if (err)
28146cb98821Smpi 		return (err);
28156cb98821Smpi 
28166cb98821Smpi 	return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
28176cb98821Smpi }
28186cb98821Smpi 
28196cb98821Smpi usbd_status
xhci_root_intr_start(struct usbd_xfer * xfer)28206cb98821Smpi xhci_root_intr_start(struct usbd_xfer *xfer)
28216cb98821Smpi {
28226cb98821Smpi 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
28236cb98821Smpi 
28246cb98821Smpi 	if (sc->sc_bus.dying)
28256cb98821Smpi 		return (USBD_IOERROR);
28266cb98821Smpi 
28276cb98821Smpi 	sc->sc_intrxfer = xfer;
28286cb98821Smpi 
28296cb98821Smpi 	return (USBD_IN_PROGRESS);
28306cb98821Smpi }
28316cb98821Smpi 
28326cb98821Smpi void
xhci_root_intr_abort(struct usbd_xfer * xfer)28336cb98821Smpi xhci_root_intr_abort(struct usbd_xfer *xfer)
28346cb98821Smpi {
283509eaccf5Smpi 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
28366cb98821Smpi 	int s;
28376cb98821Smpi 
283809eaccf5Smpi 	sc->sc_intrxfer = NULL;
28396cb98821Smpi 
284009eaccf5Smpi 	xfer->status = USBD_CANCELLED;
28416cb98821Smpi 	s = splusb();
28426cb98821Smpi 	usb_transfer_complete(xfer);
28436cb98821Smpi 	splx(s);
28446cb98821Smpi }
28456cb98821Smpi 
28466cb98821Smpi void
xhci_root_intr_done(struct usbd_xfer * xfer)28476cb98821Smpi xhci_root_intr_done(struct usbd_xfer *xfer)
28486cb98821Smpi {
28496cb98821Smpi }
28506cb98821Smpi 
2851438cc1d7Smpi /*
2852438cc1d7Smpi  * Number of packets remaining in the TD after the corresponding TRB.
2853438cc1d7Smpi  *
2854438cc1d7Smpi  * Section 4.11.2.4 of xHCI specification r1.1.
2855438cc1d7Smpi  */
28566c7284e6Smpi static inline uint32_t
xhci_xfer_tdsize(struct usbd_xfer * xfer,uint32_t remain,uint32_t len)28576c7284e6Smpi xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len)
28586c7284e6Smpi {
28596c7284e6Smpi 	uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
28606c7284e6Smpi 
28616c7284e6Smpi 	if (len == 0)
28626c7284e6Smpi 		return XHCI_TRB_TDREM(0);
28636c7284e6Smpi 
28641f708f7aSmpi 	npkt = howmany(remain - len, UE_GET_SIZE(mps));
28656c7284e6Smpi 	if (npkt > 31)
28666c7284e6Smpi 		npkt = 31;
28676c7284e6Smpi 
28686c7284e6Smpi 	return XHCI_TRB_TDREM(npkt);
28696c7284e6Smpi }
28706c7284e6Smpi 
2871438cc1d7Smpi /*
2872438cc1d7Smpi  * Transfer Burst Count (TBC) and Transfer Last Burst Packet Count (TLBPC).
2873438cc1d7Smpi  *
2874438cc1d7Smpi  * Section 4.11.2.3  of xHCI specification r1.1.
2875438cc1d7Smpi  */
2876438cc1d7Smpi static inline uint32_t
xhci_xfer_tbc(struct usbd_xfer * xfer,uint32_t len,uint32_t * tlbpc)2877438cc1d7Smpi xhci_xfer_tbc(struct usbd_xfer *xfer, uint32_t len, uint32_t *tlbpc)
2878438cc1d7Smpi {
2879438cc1d7Smpi 	uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2880438cc1d7Smpi 	uint32_t maxb, tdpc, residue, tbc;
2881438cc1d7Smpi 
2882438cc1d7Smpi 	/* Transfer Descriptor Packet Count, section 4.14.1. */
2883438cc1d7Smpi 	tdpc = howmany(len, UE_GET_SIZE(mps));
2884438cc1d7Smpi 	if (tdpc == 0)
2885438cc1d7Smpi 		tdpc = 1;
2886438cc1d7Smpi 
2887438cc1d7Smpi 	/* Transfer Burst Count */
2888438cc1d7Smpi 	maxb = xhci_pipe_maxburst(xfer->pipe);
2889438cc1d7Smpi 	tbc = howmany(tdpc, maxb + 1) - 1;
2890438cc1d7Smpi 
2891438cc1d7Smpi 	/* Transfer Last Burst Packet Count */
2892438cc1d7Smpi 	if (xfer->device->speed == USB_SPEED_SUPER) {
2893438cc1d7Smpi 		residue = tdpc % (maxb + 1);
2894438cc1d7Smpi 		if (residue == 0)
2895438cc1d7Smpi 			*tlbpc = maxb;
2896438cc1d7Smpi 		else
2897438cc1d7Smpi 			*tlbpc = residue - 1;
2898438cc1d7Smpi 	} else {
2899438cc1d7Smpi 		*tlbpc = tdpc - 1;
2900438cc1d7Smpi 	}
2901438cc1d7Smpi 
2902438cc1d7Smpi 	return (tbc);
2903438cc1d7Smpi }
2904438cc1d7Smpi 
29056cb98821Smpi usbd_status
xhci_device_ctrl_transfer(struct usbd_xfer * xfer)29066cb98821Smpi xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
29076cb98821Smpi {
29086cb98821Smpi 	usbd_status err;
29096cb98821Smpi 
29106cb98821Smpi 	err = usb_insert_transfer(xfer);
29116cb98821Smpi 	if (err)
29126cb98821Smpi 		return (err);
29136cb98821Smpi 
29146cb98821Smpi 	return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
29156cb98821Smpi }
29166cb98821Smpi 
29176cb98821Smpi usbd_status
xhci_device_ctrl_start(struct usbd_xfer * xfer)29186cb98821Smpi xhci_device_ctrl_start(struct usbd_xfer *xfer)
29196cb98821Smpi {
29206cb98821Smpi 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
29216cb98821Smpi 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
29226cb98821Smpi 	struct xhci_trb *trb0, *trb;
2923906892b8Smpi 	uint32_t flags, len = UGETW(xfer->request.wLength);
2924861c1bbcSpatrick 	uint8_t toggle;
292516a9d1e5Smpi 	int s;
29266cb98821Smpi 
29276cb98821Smpi 	KASSERT(xfer->rqflags & URQ_REQUEST);
29286cb98821Smpi 
29294d2cc942Smpi 	if (sc->sc_bus.dying || xp->halted)
29306cb98821Smpi 		return (USBD_IOERROR);
29316cb98821Smpi 
29326cb98821Smpi 	if (xp->free_trbs < 3)
29336cb98821Smpi 		return (USBD_NOMEM);
29346cb98821Smpi 
29355343ff5aSpatrick 	if (len != 0)
29365343ff5aSpatrick 		usb_syncmem(&xfer->dmabuf, 0, len,
29375343ff5aSpatrick 		    usbd_xfer_isread(xfer) ?
29385343ff5aSpatrick 		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
29395343ff5aSpatrick 
2940861c1bbcSpatrick 	/* We'll toggle the setup TRB once we're finished with the stages. */
2941861c1bbcSpatrick 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2942861c1bbcSpatrick 
2943861c1bbcSpatrick 	flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | (toggle ^ 1);
2944861c1bbcSpatrick 	if (len != 0) {
2945861c1bbcSpatrick 		if (usbd_xfer_isread(xfer))
2946861c1bbcSpatrick 			flags |= XHCI_TRB_TRT_IN;
2947861c1bbcSpatrick 		else
2948861c1bbcSpatrick 			flags |= XHCI_TRB_TRT_OUT;
2949861c1bbcSpatrick 	}
2950861c1bbcSpatrick 
2951861c1bbcSpatrick 	memcpy(&trb0->trb_paddr, &xfer->request, sizeof(trb0->trb_paddr));
2952861c1bbcSpatrick 	trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8));
2953861c1bbcSpatrick 	trb0->trb_flags = htole32(flags);
2954861c1bbcSpatrick 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2955861c1bbcSpatrick 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2956861c1bbcSpatrick 	    BUS_DMASYNC_PREWRITE);
29576cb98821Smpi 
29586cb98821Smpi 	/* Data TRB */
29596cb98821Smpi 	if (len != 0) {
29606cb98821Smpi 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2961906892b8Smpi 
2962906892b8Smpi 		flags = XHCI_TRB_TYPE_DATA | toggle;
2963906892b8Smpi 		if (usbd_xfer_isread(xfer))
2964906892b8Smpi 			flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP;
2965906892b8Smpi 
29666cb98821Smpi 		trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
29676cb98821Smpi 		trb->trb_status = htole32(
29686c7284e6Smpi 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
29696c7284e6Smpi 		    xhci_xfer_tdsize(xfer, len, len)
29706cb98821Smpi 		);
2971906892b8Smpi 		trb->trb_flags = htole32(flags);
29726cb98821Smpi 
2973ebf82e03Smpi 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2974ebf82e03Smpi 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2975ebf82e03Smpi 		    BUS_DMASYNC_PREWRITE);
29766cb98821Smpi 	}
29776cb98821Smpi 
29786cb98821Smpi 	/* Status TRB */
29796cb98821Smpi 	trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1);
2980906892b8Smpi 
2981906892b8Smpi 	flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle;
2982906892b8Smpi 	if (len == 0 || !usbd_xfer_isread(xfer))
2983906892b8Smpi 		flags |= XHCI_TRB_DIR_IN;
2984906892b8Smpi 
29856cb98821Smpi 	trb->trb_paddr = 0;
29866cb98821Smpi 	trb->trb_status = htole32(XHCI_TRB_INTR(0));
2987906892b8Smpi 	trb->trb_flags = htole32(flags);
29886cb98821Smpi 
2989ebf82e03Smpi 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2990ebf82e03Smpi 	    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2991ebf82e03Smpi 	    BUS_DMASYNC_PREWRITE);
2992ebf82e03Smpi 
29936cb98821Smpi 	/* Setup TRB */
2994861c1bbcSpatrick 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
2995b067e289Smpi 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2996ebf82e03Smpi 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2997b067e289Smpi 	    BUS_DMASYNC_PREWRITE);
299816a9d1e5Smpi 
299916a9d1e5Smpi 	s = splusb();
30006cb98821Smpi 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
30016cb98821Smpi 
30026cb98821Smpi 	xfer->status = USBD_IN_PROGRESS;
3003cdd09757Smpi 	if (xfer->timeout && !sc->sc_bus.use_polling) {
30046cb98821Smpi 		timeout_del(&xfer->timeout_handle);
30056cb98821Smpi 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
30066cb98821Smpi 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
30076cb98821Smpi 	}
300816a9d1e5Smpi 	splx(s);
30096cb98821Smpi 
30106cb98821Smpi 	return (USBD_IN_PROGRESS);
30116cb98821Smpi }
30126cb98821Smpi 
30136cb98821Smpi void
xhci_device_ctrl_abort(struct usbd_xfer * xfer)30146cb98821Smpi xhci_device_ctrl_abort(struct usbd_xfer *xfer)
30156cb98821Smpi {
30166cb98821Smpi 	xhci_abort_xfer(xfer, USBD_CANCELLED);
30176cb98821Smpi }
30186cb98821Smpi 
30196cb98821Smpi usbd_status
xhci_device_generic_transfer(struct usbd_xfer * xfer)30206cb98821Smpi xhci_device_generic_transfer(struct usbd_xfer *xfer)
30216cb98821Smpi {
30226cb98821Smpi 	usbd_status err;
30236cb98821Smpi 
30246cb98821Smpi 	err = usb_insert_transfer(xfer);
30256cb98821Smpi 	if (err)
30266cb98821Smpi 		return (err);
30276cb98821Smpi 
30286cb98821Smpi 	return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
30296cb98821Smpi }
30306cb98821Smpi 
30316cb98821Smpi usbd_status
xhci_device_generic_start(struct usbd_xfer * xfer)30326cb98821Smpi xhci_device_generic_start(struct usbd_xfer *xfer)
30336cb98821Smpi {
30346cb98821Smpi 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
30356cb98821Smpi 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
3036906892b8Smpi 	struct xhci_trb *trb0, *trb;
3037906892b8Smpi 	uint32_t len, remain, flags;
3038861c1bbcSpatrick 	uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
30396c7284e6Smpi 	uint64_t paddr = DMAADDR(&xfer->dmabuf, 0);
3040861c1bbcSpatrick 	uint8_t toggle;
30413386cc01Skrw 	int s, i, ntrb, zerotd = 0;
30426cb98821Smpi 
30436cb98821Smpi 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
30446cb98821Smpi 
30454d2cc942Smpi 	if (sc->sc_bus.dying || xp->halted)
30466cb98821Smpi 		return (USBD_IOERROR);
30476cb98821Smpi 
3048906892b8Smpi 	/* How many TRBs do we need for this transfer? */
30491f708f7aSmpi 	ntrb = howmany(xfer->length, XHCI_TRB_MAXSIZE);
30506c7284e6Smpi 
30516c7284e6Smpi 	/* If the buffer crosses a 64k boundary, we need one more. */
3052861c1bbcSpatrick 	len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
3053861c1bbcSpatrick 	if (len < xfer->length)
30545ee1ceacSmglocker 		ntrb = howmany(xfer->length - len, XHCI_TRB_MAXSIZE) + 1;
30556c7284e6Smpi 	else
3056861c1bbcSpatrick 		len = xfer->length;
3057906892b8Smpi 
3058906892b8Smpi 	/* If we need to append a zero length packet, we need one more. */
3059906892b8Smpi 	if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) &&
30601f708f7aSmpi 	    (xfer->length % UE_GET_SIZE(mps) == 0))
30613386cc01Skrw 		zerotd = 1;
3062906892b8Smpi 
30633386cc01Skrw 	if (xp->free_trbs < (ntrb + zerotd))
30646cb98821Smpi 		return (USBD_NOMEM);
30656cb98821Smpi 
30665343ff5aSpatrick 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
30675343ff5aSpatrick 	    usbd_xfer_isread(xfer) ?
30685343ff5aSpatrick 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
30695343ff5aSpatrick 
3070861c1bbcSpatrick 	/* We'll toggle the first TRB once we're finished with the chain. */
3071861c1bbcSpatrick 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1));
3072861c1bbcSpatrick 	flags = XHCI_TRB_TYPE_NORMAL | (toggle ^ 1);
3073861c1bbcSpatrick 	if (usbd_xfer_isread(xfer))
3074861c1bbcSpatrick 		flags |= XHCI_TRB_ISP;
3075861c1bbcSpatrick 	flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
30761aa48deaSmpi 
3077861c1bbcSpatrick 	trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
3078861c1bbcSpatrick 	trb0->trb_status = htole32(
3079861c1bbcSpatrick 	    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3080861c1bbcSpatrick 	    xhci_xfer_tdsize(xfer, xfer->length, len)
3081861c1bbcSpatrick 	);
3082861c1bbcSpatrick 	trb0->trb_flags = htole32(flags);
3083861c1bbcSpatrick 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3084861c1bbcSpatrick 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3085861c1bbcSpatrick 	    BUS_DMASYNC_PREWRITE);
3086861c1bbcSpatrick 
3087861c1bbcSpatrick 	remain = xfer->length - len;
3088861c1bbcSpatrick 	paddr += len;
3089906892b8Smpi 
3090906892b8Smpi 	/* Chain more TRBs if needed. */
3091906892b8Smpi 	for (i = ntrb - 1; i > 0; i--) {
3092c69c0b43Smpi 		len = min(remain, XHCI_TRB_MAXSIZE);
3093c69c0b43Smpi 
3094906892b8Smpi 		/* Next (or Last) TRB. */
3095906892b8Smpi 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1));
3096906892b8Smpi 		flags = XHCI_TRB_TYPE_NORMAL | toggle;
30971aa48deaSmpi 		if (usbd_xfer_isread(xfer))
3098906892b8Smpi 			flags |= XHCI_TRB_ISP;
3099906892b8Smpi 		flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3100906892b8Smpi 
3101906892b8Smpi 		trb->trb_paddr = htole64(paddr);
3102906892b8Smpi 		trb->trb_status = htole32(
31036c7284e6Smpi 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
31046c7284e6Smpi 		    xhci_xfer_tdsize(xfer, remain, len)
3105906892b8Smpi 		);
3106906892b8Smpi 		trb->trb_flags = htole32(flags);
3107906892b8Smpi 
3108ebf82e03Smpi 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3109ebf82e03Smpi 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3110ebf82e03Smpi 		    BUS_DMASYNC_PREWRITE);
3111ebf82e03Smpi 
3112906892b8Smpi 		remain -= len;
3113906892b8Smpi 		paddr += len;
3114906892b8Smpi 	}
3115906892b8Smpi 
31163386cc01Skrw 	/* Do we need to issue a zero length transfer? */
31173386cc01Skrw 	if (zerotd == 1) {
31183386cc01Skrw 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, -1);
31193386cc01Skrw 		trb->trb_paddr = 0;
31203386cc01Skrw 		trb->trb_status = 0;
31213386cc01Skrw 		trb->trb_flags = htole32(XHCI_TRB_TYPE_NORMAL | XHCI_TRB_IOC | toggle);
31223386cc01Skrw 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
31233386cc01Skrw 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
31243386cc01Skrw 		    BUS_DMASYNC_PREWRITE);
31253386cc01Skrw 	}
31263386cc01Skrw 
3127906892b8Smpi 	/* First TRB. */
3128861c1bbcSpatrick 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
3129b067e289Smpi 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3130ebf82e03Smpi 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3131b067e289Smpi 	    BUS_DMASYNC_PREWRITE);
313216a9d1e5Smpi 
313316a9d1e5Smpi 	s = splusb();
31346cb98821Smpi 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
31356cb98821Smpi 
31366cb98821Smpi 	xfer->status = USBD_IN_PROGRESS;
3137cdd09757Smpi 	if (xfer->timeout && !sc->sc_bus.use_polling) {
31386cb98821Smpi 		timeout_del(&xfer->timeout_handle);
31396cb98821Smpi 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
31406cb98821Smpi 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
31416cb98821Smpi 	}
314216a9d1e5Smpi 	splx(s);
31436cb98821Smpi 
31446cb98821Smpi 	return (USBD_IN_PROGRESS);
31456cb98821Smpi }
31466cb98821Smpi 
31476cb98821Smpi void
xhci_device_generic_done(struct usbd_xfer * xfer)31486cb98821Smpi xhci_device_generic_done(struct usbd_xfer *xfer)
31496cb98821Smpi {
31506cb98821Smpi 	/* Only happens with interrupt transfers. */
31510e5ce33bSmpi 	if (xfer->pipe->repeat) {
31520e5ce33bSmpi 		xfer->actlen = 0;
31530e5ce33bSmpi 		xhci_device_generic_start(xfer);
31540e5ce33bSmpi 	}
31556cb98821Smpi }
31566cb98821Smpi 
31576cb98821Smpi void
xhci_device_generic_abort(struct usbd_xfer * xfer)31586cb98821Smpi xhci_device_generic_abort(struct usbd_xfer *xfer)
31596cb98821Smpi {
31606cb98821Smpi 	KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer);
31616cb98821Smpi 
31626cb98821Smpi 	xhci_abort_xfer(xfer, USBD_CANCELLED);
31636cb98821Smpi }
316438ff87f6Sstsp 
316538ff87f6Sstsp usbd_status
xhci_device_isoc_transfer(struct usbd_xfer * xfer)316638ff87f6Sstsp xhci_device_isoc_transfer(struct usbd_xfer *xfer)
316738ff87f6Sstsp {
316838ff87f6Sstsp 	usbd_status err;
316938ff87f6Sstsp 
317038ff87f6Sstsp 	err = usb_insert_transfer(xfer);
317138ff87f6Sstsp 	if (err && err != USBD_IN_PROGRESS)
317238ff87f6Sstsp 		return (err);
317338ff87f6Sstsp 
317438ff87f6Sstsp 	return (xhci_device_isoc_start(xfer));
317538ff87f6Sstsp }
317638ff87f6Sstsp 
317738ff87f6Sstsp usbd_status
xhci_device_isoc_start(struct usbd_xfer * xfer)317838ff87f6Sstsp xhci_device_isoc_start(struct usbd_xfer *xfer)
317938ff87f6Sstsp {
318038ff87f6Sstsp 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
318138ff87f6Sstsp 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
318238ff87f6Sstsp 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
318338ff87f6Sstsp 	struct xhci_trb *trb0, *trb;
318438ff87f6Sstsp 	uint32_t len, remain, flags;
31857e304809Spatrick 	uint64_t paddr;
31867e304809Spatrick 	uint32_t tbc, tlbpc;
31877e304809Spatrick 	int s, i, j, ntrb = xfer->nframes;
3188861c1bbcSpatrick 	uint8_t toggle;
318938ff87f6Sstsp 
319038ff87f6Sstsp 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
319138ff87f6Sstsp 
319238ff87f6Sstsp 	/*
319338ff87f6Sstsp 	 * To allow continuous transfers, above we start all transfers
319438ff87f6Sstsp 	 * immediately. However, we're still going to get usbd_start_next call
319538ff87f6Sstsp 	 * this when another xfer completes. So, check if this is already
319638ff87f6Sstsp 	 * in progress or not
319738ff87f6Sstsp 	 */
319838ff87f6Sstsp 	if (xx->ntrb > 0)
319938ff87f6Sstsp 		return (USBD_IN_PROGRESS);
320038ff87f6Sstsp 
320118f4917eSmglocker 	if (sc->sc_bus.dying || xp->halted)
320218f4917eSmglocker 		return (USBD_IOERROR);
320318f4917eSmglocker 
320418f4917eSmglocker 	/* Why would you do that anyway? */
320518f4917eSmglocker 	if (sc->sc_bus.use_polling)
320618f4917eSmglocker 		return (USBD_INVAL);
320718f4917eSmglocker 
32087e304809Spatrick 	paddr = DMAADDR(&xfer->dmabuf, 0);
32097e304809Spatrick 
32107e304809Spatrick 	/* How many TRBs do for all Transfers? */
32117e304809Spatrick 	for (i = 0, ntrb = 0; i < xfer->nframes; i++) {
32127e304809Spatrick 		/* How many TRBs do we need for this transfer? */
32137e304809Spatrick 		ntrb += howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE);
32147e304809Spatrick 
32157e304809Spatrick 		/* If the buffer crosses a 64k boundary, we need one more. */
32167e304809Spatrick 		len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
32177e304809Spatrick 		if (len < xfer->frlengths[i])
32187e304809Spatrick 			ntrb++;
32197e304809Spatrick 
32207e304809Spatrick 		paddr += xfer->frlengths[i];
32217e304809Spatrick 	}
32227e304809Spatrick 
3223c69c0b43Smpi 	if (xp->free_trbs < ntrb)
322438ff87f6Sstsp 		return (USBD_NOMEM);
322538ff87f6Sstsp 
32265343ff5aSpatrick 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
32275343ff5aSpatrick 	    usbd_xfer_isread(xfer) ?
32285343ff5aSpatrick 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
32295343ff5aSpatrick 
32307e304809Spatrick 	paddr = DMAADDR(&xfer->dmabuf, 0);
323138ff87f6Sstsp 
32327e304809Spatrick 	for (i = 0, trb0 = NULL; i < xfer->nframes; i++) {
32337e304809Spatrick 		/* How many TRBs do we need for this transfer? */
32347e304809Spatrick 		ntrb = howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE);
3235861c1bbcSpatrick 
32367e304809Spatrick 		/* If the buffer crosses a 64k boundary, we need one more. */
32377e304809Spatrick 		len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
32387e304809Spatrick 		if (len < xfer->frlengths[i])
32397e304809Spatrick 			ntrb++;
32407e304809Spatrick 		else
32417e304809Spatrick 			len = xfer->frlengths[i];
32427e304809Spatrick 
32437e304809Spatrick 		KASSERT(ntrb < 3);
32447e304809Spatrick 
32457e304809Spatrick 		/*
32467e304809Spatrick 		 * We'll commit the first TRB once we're finished with the
32477e304809Spatrick 		 * chain.
32487e304809Spatrick 		 */
32497e304809Spatrick 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1));
32507e304809Spatrick 
3251e0df9922Sratchov 		DPRINTFN(4, ("%s:%d: ring %p trb0_idx %lu ntrb %d paddr %llx "
32527e304809Spatrick 		    "len %u\n", __func__, __LINE__,
32537e304809Spatrick 		    &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, paddr,
32547e304809Spatrick 		    len));
32557e304809Spatrick 
32567e304809Spatrick 		/* Record the first TRB so we can toggle later. */
32577e304809Spatrick 		if (trb0 == NULL) {
32587e304809Spatrick 			trb0 = trb;
32597e304809Spatrick 			toggle ^= 1;
32607e304809Spatrick 		}
32617e304809Spatrick 
32627e304809Spatrick 		flags = XHCI_TRB_TYPE_ISOCH | XHCI_TRB_SIA | toggle;
3263861c1bbcSpatrick 		if (usbd_xfer_isread(xfer))
3264861c1bbcSpatrick 			flags |= XHCI_TRB_ISP;
3265861c1bbcSpatrick 		flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3266861c1bbcSpatrick 
32677e304809Spatrick 		tbc = xhci_xfer_tbc(xfer, xfer->frlengths[i], &tlbpc);
3268861c1bbcSpatrick 		flags |= XHCI_TRB_ISOC_TBC(tbc) | XHCI_TRB_ISOC_TLBPC(tlbpc);
3269861c1bbcSpatrick 
32707e304809Spatrick 		trb->trb_paddr = htole64(paddr);
32717e304809Spatrick 		trb->trb_status = htole32(
32727e304809Spatrick 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
32737e304809Spatrick 		    xhci_xfer_tdsize(xfer, xfer->frlengths[i], len)
3274861c1bbcSpatrick 		);
32757e304809Spatrick 		trb->trb_flags = htole32(flags);
32767e304809Spatrick 
3277861c1bbcSpatrick 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
32787e304809Spatrick 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3279861c1bbcSpatrick 		    BUS_DMASYNC_PREWRITE);
328038ff87f6Sstsp 
32817e304809Spatrick 		remain = xfer->frlengths[i] - len;
32827e304809Spatrick 		paddr += len;
328338ff87f6Sstsp 
328438ff87f6Sstsp 		/* Chain more TRBs if needed. */
32857e304809Spatrick 		for (j = ntrb - 1; j > 0; j--) {
32867e304809Spatrick 			len = min(remain, XHCI_TRB_MAXSIZE);
328738ff87f6Sstsp 
328838ff87f6Sstsp 			/* Next (or Last) TRB. */
32897e304809Spatrick 			trb = xhci_xfer_get_trb(sc, xfer, &toggle, (j == 1));
329038ff87f6Sstsp 			flags = XHCI_TRB_TYPE_NORMAL | toggle;
329138ff87f6Sstsp 			if (usbd_xfer_isread(xfer))
329238ff87f6Sstsp 				flags |= XHCI_TRB_ISP;
32937e304809Spatrick 			flags |= (j == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3294e0df9922Sratchov 			DPRINTFN(3, ("%s:%d: ring %p trb0_idx %lu ntrb %d "
32957e304809Spatrick 			    "paddr %llx len %u\n", __func__, __LINE__,
32967e304809Spatrick 			    &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb,
32977e304809Spatrick 			    paddr, len));
329838ff87f6Sstsp 
329938ff87f6Sstsp 			trb->trb_paddr = htole64(paddr);
330038ff87f6Sstsp 			trb->trb_status = htole32(
330138ff87f6Sstsp 			    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
330238ff87f6Sstsp 			    xhci_xfer_tdsize(xfer, remain, len)
330338ff87f6Sstsp 			);
330438ff87f6Sstsp 			trb->trb_flags = htole32(flags);
330538ff87f6Sstsp 
3306c69c0b43Smpi 			bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3307c69c0b43Smpi 			    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3308c69c0b43Smpi 			    BUS_DMASYNC_PREWRITE);
3309c69c0b43Smpi 
331038ff87f6Sstsp 			remain -= len;
3311c69c0b43Smpi 			paddr += len;
331238ff87f6Sstsp 		}
331338ff87f6Sstsp 
33147e304809Spatrick 		xfer->frlengths[i] = 0;
33157e304809Spatrick 	}
33167e304809Spatrick 
331738ff87f6Sstsp 	/* First TRB. */
3318861c1bbcSpatrick 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
331938ff87f6Sstsp 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3320c69c0b43Smpi 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
332138ff87f6Sstsp 	    BUS_DMASYNC_PREWRITE);
332238ff87f6Sstsp 
332338ff87f6Sstsp 	s = splusb();
332438ff87f6Sstsp 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
332538ff87f6Sstsp 
332638ff87f6Sstsp 	xfer->status = USBD_IN_PROGRESS;
332738ff87f6Sstsp 
332838ff87f6Sstsp 	if (xfer->timeout) {
332938ff87f6Sstsp 		timeout_del(&xfer->timeout_handle);
333038ff87f6Sstsp 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
333138ff87f6Sstsp 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
333238ff87f6Sstsp 	}
333338ff87f6Sstsp 	splx(s);
333438ff87f6Sstsp 
333538ff87f6Sstsp 	return (USBD_IN_PROGRESS);
333638ff87f6Sstsp }
3337