xref: /openbsd/sys/dev/usb/xhci.c (revision d415bd75)
1 /* $OpenBSD: xhci.c,v 1.130 2023/07/20 09:43:00 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2014-2015 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/queue.h>
25 #include <sys/timeout.h>
26 #include <sys/pool.h>
27 #include <sys/endian.h>
28 #include <sys/rwlock.h>
29 
30 #include <machine/bus.h>
31 
32 #include <dev/usb/usb.h>
33 #include <dev/usb/usbdi.h>
34 #include <dev/usb/usbdivar.h>
35 #include <dev/usb/usb_mem.h>
36 
37 #include <dev/usb/xhcireg.h>
38 #include <dev/usb/xhcivar.h>
39 
40 struct cfdriver xhci_cd = {
41 	NULL, "xhci", DV_DULL, CD_SKIPHIBERNATE
42 };
43 
44 #ifdef XHCI_DEBUG
45 #define DPRINTF(x)	do { if (xhcidebug) printf x; } while(0)
46 #define DPRINTFN(n,x)	do { if (xhcidebug>(n)) printf x; } while (0)
47 int xhcidebug = 3;
48 #else
49 #define DPRINTF(x)
50 #define DPRINTFN(n,x)
51 #endif
52 
53 #define DEVNAME(sc)	((sc)->sc_bus.bdev.dv_xname)
54 
55 #define TRBOFF(r, trb)	((char *)(trb) - (char *)((r)->trbs))
56 #define DEQPTR(r)	((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index))
57 
58 struct pool *xhcixfer;
59 
60 struct xhci_pipe {
61 	struct usbd_pipe	pipe;
62 
63 	uint8_t			dci;
64 	uint8_t			slot;	/* Device slot ID */
65 	struct xhci_ring	ring;
66 
67 	/*
68 	 * XXX used to pass the xfer pointer back to the
69 	 * interrupt routine, better way?
70 	 */
71 	struct usbd_xfer	*pending_xfers[XHCI_MAX_XFER];
72 	struct usbd_xfer	*aborted_xfer;
73 	int			 halted;
74 	size_t			 free_trbs;
75 	int			 skip;
76 #define TRB_PROCESSED_NO	0
77 #define TRB_PROCESSED_YES 	1
78 #define TRB_PROCESSED_SHORT	2
79 	uint8_t			 trb_processed[XHCI_MAX_XFER];
80 };
81 
82 int	xhci_reset(struct xhci_softc *);
83 int	xhci_intr1(struct xhci_softc *);
84 void	xhci_event_dequeue(struct xhci_softc *);
85 void	xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t);
86 int	xhci_event_xfer_generic(struct xhci_softc *, struct usbd_xfer *,
87 	    struct xhci_pipe *, uint32_t, int, uint8_t, uint8_t, uint8_t);
88 int	xhci_event_xfer_isoc(struct usbd_xfer *, struct xhci_pipe *,
89 	    uint32_t, int, uint8_t);
90 void	xhci_event_command(struct xhci_softc *, uint64_t);
91 void	xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t);
92 int	xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *);
93 int	xhci_context_setup(struct xhci_softc *, struct usbd_pipe *);
94 int	xhci_scratchpad_alloc(struct xhci_softc *, int);
95 void	xhci_scratchpad_free(struct xhci_softc *);
96 int	xhci_softdev_alloc(struct xhci_softc *, uint8_t);
97 void	xhci_softdev_free(struct xhci_softc *, uint8_t);
98 int	xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t,
99 	    size_t);
100 void	xhci_ring_free(struct xhci_softc *, struct xhci_ring *);
101 void	xhci_ring_reset(struct xhci_softc *, struct xhci_ring *);
102 struct	xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *);
103 struct	xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *);
104 
105 struct	xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*,
106 	    uint8_t *, int);
107 void	xhci_xfer_done(struct usbd_xfer *xfer);
108 /* xHCI command helpers. */
109 int	xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int);
110 int	xhci_command_abort(struct xhci_softc *);
111 
112 void	xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t);
113 void	xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t);
114 int	xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t);
115 int	xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t);
116 int	xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int);
117 int	xhci_cmd_set_address(struct xhci_softc *, uint8_t,  uint64_t, uint32_t);
118 #ifdef XHCI_DEBUG
119 int	xhci_cmd_noop(struct xhci_softc *);
120 #endif
121 
122 /* XXX should be part of the Bus interface. */
123 void	xhci_abort_xfer(struct usbd_xfer *, usbd_status);
124 void	xhci_pipe_close(struct usbd_pipe *);
125 void	xhci_noop(struct usbd_xfer *);
126 
127 void 	xhci_timeout(void *);
128 void	xhci_timeout_task(void *);
129 
130 /* USBD Bus Interface. */
131 usbd_status	  xhci_pipe_open(struct usbd_pipe *);
132 int		  xhci_setaddr(struct usbd_device *, int);
133 void		  xhci_softintr(void *);
134 void		  xhci_poll(struct usbd_bus *);
135 struct usbd_xfer *xhci_allocx(struct usbd_bus *);
136 void		  xhci_freex(struct usbd_bus *, struct usbd_xfer *);
137 
138 usbd_status	  xhci_root_ctrl_transfer(struct usbd_xfer *);
139 usbd_status	  xhci_root_ctrl_start(struct usbd_xfer *);
140 
141 usbd_status	  xhci_root_intr_transfer(struct usbd_xfer *);
142 usbd_status	  xhci_root_intr_start(struct usbd_xfer *);
143 void		  xhci_root_intr_abort(struct usbd_xfer *);
144 void		  xhci_root_intr_done(struct usbd_xfer *);
145 
146 usbd_status	  xhci_device_ctrl_transfer(struct usbd_xfer *);
147 usbd_status	  xhci_device_ctrl_start(struct usbd_xfer *);
148 void		  xhci_device_ctrl_abort(struct usbd_xfer *);
149 
150 usbd_status	  xhci_device_generic_transfer(struct usbd_xfer *);
151 usbd_status	  xhci_device_generic_start(struct usbd_xfer *);
152 void		  xhci_device_generic_abort(struct usbd_xfer *);
153 void		  xhci_device_generic_done(struct usbd_xfer *);
154 
155 usbd_status	  xhci_device_isoc_transfer(struct usbd_xfer *);
156 usbd_status	  xhci_device_isoc_start(struct usbd_xfer *);
157 
158 #define XHCI_INTR_ENDPT 1
159 
160 const struct usbd_bus_methods xhci_bus_methods = {
161 	.open_pipe = xhci_pipe_open,
162 	.dev_setaddr = xhci_setaddr,
163 	.soft_intr = xhci_softintr,
164 	.do_poll = xhci_poll,
165 	.allocx = xhci_allocx,
166 	.freex = xhci_freex,
167 };
168 
169 const struct usbd_pipe_methods xhci_root_ctrl_methods = {
170 	.transfer = xhci_root_ctrl_transfer,
171 	.start = xhci_root_ctrl_start,
172 	.abort = xhci_noop,
173 	.close = xhci_pipe_close,
174 	.done = xhci_noop,
175 };
176 
177 const struct usbd_pipe_methods xhci_root_intr_methods = {
178 	.transfer = xhci_root_intr_transfer,
179 	.start = xhci_root_intr_start,
180 	.abort = xhci_root_intr_abort,
181 	.close = xhci_pipe_close,
182 	.done = xhci_root_intr_done,
183 };
184 
185 const struct usbd_pipe_methods xhci_device_ctrl_methods = {
186 	.transfer = xhci_device_ctrl_transfer,
187 	.start = xhci_device_ctrl_start,
188 	.abort = xhci_device_ctrl_abort,
189 	.close = xhci_pipe_close,
190 	.done = xhci_noop,
191 };
192 
193 const struct usbd_pipe_methods xhci_device_intr_methods = {
194 	.transfer = xhci_device_generic_transfer,
195 	.start = xhci_device_generic_start,
196 	.abort = xhci_device_generic_abort,
197 	.close = xhci_pipe_close,
198 	.done = xhci_device_generic_done,
199 };
200 
201 const struct usbd_pipe_methods xhci_device_bulk_methods = {
202 	.transfer = xhci_device_generic_transfer,
203 	.start = xhci_device_generic_start,
204 	.abort = xhci_device_generic_abort,
205 	.close = xhci_pipe_close,
206 	.done = xhci_device_generic_done,
207 };
208 
209 const struct usbd_pipe_methods xhci_device_isoc_methods = {
210 	.transfer = xhci_device_isoc_transfer,
211 	.start = xhci_device_isoc_start,
212 	.abort = xhci_device_generic_abort,
213 	.close = xhci_pipe_close,
214 	.done = xhci_noop,
215 };
216 
217 #ifdef XHCI_DEBUG
218 static void
219 xhci_dump_trb(struct xhci_trb *trb)
220 {
221 	printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb,
222 	    (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status),
223 	    (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK);
224 }
225 #endif
226 
227 int	usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *,
228 	    void **, bus_size_t, bus_size_t, bus_size_t);
229 void	usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *);
230 
231 int
232 usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma,
233     void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary)
234 {
235 	int error;
236 
237 	dma->tag = bus->dmatag;
238 	dma->size = size;
239 
240 	error = bus_dmamap_create(dma->tag, size, 1, size, boundary,
241 	    BUS_DMA_NOWAIT, &dma->map);
242 	if (error != 0)
243 		return (error);
244 
245 	error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg,
246 	    1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
247 	if (error != 0)
248 		goto destroy;
249 
250 	error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr,
251 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
252 	if (error != 0)
253 		goto free;
254 
255 	error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size,
256 	    BUS_DMA_NOWAIT);
257 	if (error != 0)
258 		goto unmap;
259 
260 	bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREREAD |
261 	    BUS_DMASYNC_PREWRITE);
262 
263 	dma->paddr = dma->map->dm_segs[0].ds_addr;
264 	if (kvap != NULL)
265 		*kvap = dma->vaddr;
266 
267 	return (0);
268 
269 unmap:
270 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
271 free:
272 	bus_dmamem_free(dma->tag, &dma->seg, 1);
273 destroy:
274 	bus_dmamap_destroy(dma->tag, dma->map);
275 	return (error);
276 }
277 
278 void
279 usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma)
280 {
281 	if (dma->map != NULL) {
282 		bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size,
283 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
284 		bus_dmamap_unload(bus->dmatag, dma->map);
285 		bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size);
286 		bus_dmamem_free(bus->dmatag, &dma->seg, 1);
287 		bus_dmamap_destroy(bus->dmatag, dma->map);
288 		dma->map = NULL;
289 	}
290 }
291 
292 int
293 xhci_init(struct xhci_softc *sc)
294 {
295 	uint32_t hcr;
296 	int npage, error;
297 
298 	sc->sc_bus.usbrev = USBREV_3_0;
299 	sc->sc_bus.methods = &xhci_bus_methods;
300 	sc->sc_bus.pipe_size = sizeof(struct xhci_pipe);
301 
302 	sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH);
303 	sc->sc_door_off = XREAD4(sc, XHCI_DBOFF);
304 	sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF);
305 
306 	sc->sc_version = XREAD2(sc, XHCI_HCIVERSION);
307 	printf(", xHCI %x.%x\n", sc->sc_version >> 8, sc->sc_version & 0xff);
308 
309 #ifdef XHCI_DEBUG
310 	printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off);
311 	printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off);
312 	printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off);
313 #endif
314 
315 	error = xhci_reset(sc);
316 	if (error)
317 		return (error);
318 
319 	if (xhcixfer == NULL) {
320 		xhcixfer = malloc(sizeof(struct pool), M_USBHC, M_NOWAIT);
321 		if (xhcixfer == NULL) {
322 			printf("%s: unable to allocate pool descriptor\n",
323 			    DEVNAME(sc));
324 			return (ENOMEM);
325 		}
326 		pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB,
327 		    0, "xhcixfer", NULL);
328 	}
329 
330 	hcr = XREAD4(sc, XHCI_HCCPARAMS);
331 	sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32;
332 	DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize));
333 
334 #ifdef XHCI_DEBUG
335 	hcr = XOREAD4(sc, XHCI_PAGESIZE);
336 	printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr);
337 #endif
338 	/* Use 4K for the moment since it's easier. */
339 	sc->sc_pagesize = 4096;
340 
341 	/* Get port and device slot numbers. */
342 	hcr = XREAD4(sc, XHCI_HCSPARAMS1);
343 	sc->sc_noport = XHCI_HCS1_N_PORTS(hcr);
344 	sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr);
345 	DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport,
346 	    sc->sc_noslot));
347 
348 	/* Setup Device Context Base Address Array. */
349 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma,
350 	    (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t),
351 	    XHCI_DCBAA_ALIGN, sc->sc_pagesize);
352 	if (error)
353 		return (ENOMEM);
354 
355 	/* Setup command ring. */
356 	rw_init(&sc->sc_cmd_lock, "xhcicmd");
357 	error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS,
358 	    XHCI_CMDS_RING_ALIGN);
359 	if (error) {
360 		printf("%s: could not allocate command ring.\n", DEVNAME(sc));
361 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
362 		return (error);
363 	}
364 
365 	/* Setup one event ring and its segment table (ERST). */
366 	error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS,
367 	    XHCI_EVTS_RING_ALIGN);
368 	if (error) {
369 		printf("%s: could not allocate event ring.\n", DEVNAME(sc));
370 		xhci_ring_free(sc, &sc->sc_cmd_ring);
371 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
372 		return (error);
373 	}
374 
375 	/* Allocate the required entry for the segment table. */
376 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma,
377 	    (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg),
378 	    XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY);
379 	if (error) {
380 		printf("%s: could not allocate segment table.\n", DEVNAME(sc));
381 		xhci_ring_free(sc, &sc->sc_evt_ring);
382 		xhci_ring_free(sc, &sc->sc_cmd_ring);
383 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
384 		return (ENOMEM);
385 	}
386 
387 	/* Set our ring address and size in its corresponding segment. */
388 	sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr);
389 	sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS);
390 	sc->sc_erst.segs[0].er_rsvd = 0;
391 	bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0,
392 	    sc->sc_erst.dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
393 
394 	/* Get the number of scratch pages and configure them if necessary. */
395 	hcr = XREAD4(sc, XHCI_HCSPARAMS2);
396 	npage = XHCI_HCS2_SPB_MAX(hcr);
397 	DPRINTF(("%s: %u scratch pages, ETE=%u, IST=0x%x\n", DEVNAME(sc), npage,
398 	   XHCI_HCS2_ETE(hcr), XHCI_HCS2_IST(hcr)));
399 
400 	if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) {
401 		printf("%s: could not allocate scratchpad.\n", DEVNAME(sc));
402 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
403 		xhci_ring_free(sc, &sc->sc_evt_ring);
404 		xhci_ring_free(sc, &sc->sc_cmd_ring);
405 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
406 		return (ENOMEM);
407 	}
408 
409 
410 	return (0);
411 }
412 
413 void
414 xhci_config(struct xhci_softc *sc)
415 {
416 	uint64_t paddr;
417 	uint32_t hcr;
418 
419 	/* Make sure to program a number of device slots we can handle. */
420 	if (sc->sc_noslot > USB_MAX_DEVICES)
421 		sc->sc_noslot = USB_MAX_DEVICES;
422 	hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK;
423 	XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot);
424 
425 	/* Set the device context base array address. */
426 	paddr = (uint64_t)sc->sc_dcbaa.dma.paddr;
427 	XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr);
428 	XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32));
429 
430 	DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc),
431 	    XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO)));
432 
433 	/* Set the command ring address. */
434 	paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr;
435 	XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS);
436 	XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32));
437 
438 	DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc),
439 	    XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr));
440 
441 	/* Set the ERST count number to 1, since we use only one event ring. */
442 	XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1));
443 
444 	/* Set the segment table address. */
445 	paddr = (uint64_t)sc->sc_erst.dma.paddr;
446 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr);
447 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32));
448 
449 	DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc),
450 	    XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0))));
451 
452 	/* Set the ring dequeue address. */
453 	paddr = (uint64_t)sc->sc_evt_ring.dma.paddr;
454 	XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr);
455 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
456 
457 	DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc),
458 	    XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0))));
459 
460 	/* Enable interrupts. */
461 	hcr = XRREAD4(sc, XHCI_IMAN(0));
462 	XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA);
463 
464 	/* Set default interrupt moderation. */
465 	XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT);
466 
467 	/* Allow event interrupt and start the controller. */
468 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
469 
470 	DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD)));
471 	DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0))));
472 }
473 
474 int
475 xhci_detach(struct device *self, int flags)
476 {
477 	struct xhci_softc *sc = (struct xhci_softc *)self;
478 	int rv;
479 
480 	rv = config_detach_children(self, flags);
481 	if (rv != 0) {
482 		printf("%s: error while detaching %d\n", DEVNAME(sc), rv);
483 		return (rv);
484 	}
485 
486 	/* Since the hardware might already be gone, ignore the errors. */
487 	xhci_command_abort(sc);
488 
489 	xhci_reset(sc);
490 
491 	/* Disable interrupts. */
492 	XRWRITE4(sc, XHCI_IMOD(0), 0);
493 	XRWRITE4(sc, XHCI_IMAN(0), 0);
494 
495 	/* Clear the event ring address. */
496 	XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
497 	XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
498 
499 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
500 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
501 
502 	XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
503 
504 	/* Clear the command ring address. */
505 	XOWRITE4(sc, XHCI_CRCR_LO, 0);
506 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
507 
508 	XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
509 	XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
510 
511 	if (sc->sc_spad.npage > 0)
512 		xhci_scratchpad_free(sc);
513 
514 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
515 	xhci_ring_free(sc, &sc->sc_evt_ring);
516 	xhci_ring_free(sc, &sc->sc_cmd_ring);
517 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
518 
519 	return (0);
520 }
521 
522 int
523 xhci_activate(struct device *self, int act)
524 {
525 	struct xhci_softc *sc = (struct xhci_softc *)self;
526 	int rv = 0;
527 
528 	switch (act) {
529 	case DVACT_RESUME:
530 		sc->sc_bus.use_polling++;
531 		xhci_reinit(sc);
532 		sc->sc_bus.use_polling--;
533 		rv = config_activate_children(self, act);
534 		break;
535 	case DVACT_POWERDOWN:
536 		rv = config_activate_children(self, act);
537 		xhci_reset(sc);
538 		break;
539 	default:
540 		rv = config_activate_children(self, act);
541 		break;
542 	}
543 
544 	return (rv);
545 }
546 
547 int
548 xhci_reset(struct xhci_softc *sc)
549 {
550 	uint32_t hcr;
551 	int i;
552 
553 	XOWRITE4(sc, XHCI_USBCMD, 0);	/* Halt controller */
554 	for (i = 0; i < 100; i++) {
555 		usb_delay_ms(&sc->sc_bus, 1);
556 		hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
557 		if (hcr)
558 			break;
559 	}
560 
561 	if (!hcr)
562 		printf("%s: halt timeout\n", DEVNAME(sc));
563 
564 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST);
565 	for (i = 0; i < 100; i++) {
566 		usb_delay_ms(&sc->sc_bus, 1);
567 		hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) |
568 		    (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR);
569 		if (!hcr)
570 			break;
571 	}
572 
573 	if (hcr) {
574 		printf("%s: reset timeout\n", DEVNAME(sc));
575 		return (EIO);
576 	}
577 
578 	return (0);
579 }
580 
581 void
582 xhci_reinit(struct xhci_softc *sc)
583 {
584 	xhci_reset(sc);
585 	xhci_ring_reset(sc, &sc->sc_cmd_ring);
586 	xhci_ring_reset(sc, &sc->sc_evt_ring);
587 
588 	/* Renesas controllers, at least, need more time to resume. */
589 	usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
590 
591 	xhci_config(sc);
592 }
593 
594 int
595 xhci_intr(void *v)
596 {
597 	struct xhci_softc *sc = v;
598 
599 	if (sc->sc_dead)
600 		return (0);
601 
602 	/* If we get an interrupt while polling, then just ignore it. */
603 	if (sc->sc_bus.use_polling) {
604 		DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n"));
605 		return (0);
606 	}
607 
608 	return (xhci_intr1(sc));
609 }
610 
611 int
612 xhci_intr1(struct xhci_softc *sc)
613 {
614 	uint32_t intrs;
615 
616 	intrs = XOREAD4(sc, XHCI_USBSTS);
617 	if (intrs == 0xffffffff) {
618 		sc->sc_bus.dying = 1;
619 		sc->sc_dead = 1;
620 		return (0);
621 	}
622 
623 	if ((intrs & XHCI_STS_EINT) == 0)
624 		return (0);
625 
626 	sc->sc_bus.no_intrs++;
627 
628 	if (intrs & XHCI_STS_HSE) {
629 		printf("%s: host system error\n", DEVNAME(sc));
630 		sc->sc_bus.dying = 1;
631 		XOWRITE4(sc, XHCI_USBSTS, intrs);
632 		return (1);
633 	}
634 
635 	/* Acknowledge interrupts */
636 	XOWRITE4(sc, XHCI_USBSTS, intrs);
637 	intrs = XRREAD4(sc, XHCI_IMAN(0));
638 	XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND);
639 
640 	usb_schedsoftintr(&sc->sc_bus);
641 
642 	return (1);
643 }
644 
645 void
646 xhci_poll(struct usbd_bus *bus)
647 {
648 	struct xhci_softc *sc = (struct xhci_softc *)bus;
649 
650 	if (XOREAD4(sc, XHCI_USBSTS))
651 		xhci_intr1(sc);
652 }
653 
654 void
655 xhci_softintr(void *v)
656 {
657 	struct xhci_softc *sc = v;
658 
659 	if (sc->sc_bus.dying)
660 		return;
661 
662 	sc->sc_bus.intr_context++;
663 	xhci_event_dequeue(sc);
664 	sc->sc_bus.intr_context--;
665 }
666 
667 void
668 xhci_event_dequeue(struct xhci_softc *sc)
669 {
670 	struct xhci_trb *trb;
671 	uint64_t paddr;
672 	uint32_t status, flags;
673 
674 	while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) {
675 		paddr = letoh64(trb->trb_paddr);
676 		status = letoh32(trb->trb_status);
677 		flags = letoh32(trb->trb_flags);
678 
679 		switch (flags & XHCI_TRB_TYPE_MASK) {
680 		case XHCI_EVT_XFER:
681 			xhci_event_xfer(sc, paddr, status, flags);
682 			break;
683 		case XHCI_EVT_CMD_COMPLETE:
684 			memcpy(&sc->sc_result_trb, trb, sizeof(*trb));
685 			xhci_event_command(sc, paddr);
686 			break;
687 		case XHCI_EVT_PORT_CHANGE:
688 			xhci_event_port_change(sc, paddr, status);
689 			break;
690 		case XHCI_EVT_HOST_CTRL:
691 			/* TODO */
692 			break;
693 		default:
694 #ifdef XHCI_DEBUG
695 			printf("event (%d): ", XHCI_TRB_TYPE(flags));
696 			xhci_dump_trb(trb);
697 #endif
698 			break;
699 		}
700 
701 	}
702 
703 	paddr = (uint64_t)DEQPTR(sc->sc_evt_ring);
704 	XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY);
705 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
706 }
707 
708 void
709 xhci_skip_all(struct xhci_pipe *xp)
710 {
711 	struct usbd_xfer *xfer, *last;
712 
713 	if (xp->skip) {
714 		/*
715 		 * Find the last transfer to skip, this is necessary
716 		 * as xhci_xfer_done() posts new transfers which we
717 		 * don't want to skip
718 		 */
719 		last = SIMPLEQ_FIRST(&xp->pipe.queue);
720 		if (last == NULL)
721 			goto done;
722 		while ((xfer = SIMPLEQ_NEXT(last, next)) != NULL)
723 			last = xfer;
724 
725 		do {
726 			xfer = SIMPLEQ_FIRST(&xp->pipe.queue);
727 			if (xfer == NULL)
728 				goto done;
729 			DPRINTF(("%s: skipping %p\n", __func__, xfer));
730 			xfer->status = USBD_NORMAL_COMPLETION;
731 			xhci_xfer_done(xfer);
732 		} while (xfer != last);
733 	done:
734 		xp->skip = 0;
735 	}
736 }
737 
738 void
739 xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status,
740     uint32_t flags)
741 {
742 	struct xhci_pipe *xp;
743 	struct usbd_xfer *xfer;
744 	uint8_t dci, slot, code, xfertype;
745 	uint32_t remain;
746 	int trb_idx;
747 
748 	slot = XHCI_TRB_GET_SLOT(flags);
749 	dci = XHCI_TRB_GET_EP(flags);
750 	if (slot > sc->sc_noslot) {
751 		DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot));
752 		return;
753 	}
754 
755 	xp = sc->sc_sdevs[slot].pipes[dci - 1];
756 	if (xp == NULL) {
757 		DPRINTF(("%s: incorrect dci (%u)\n", DEVNAME(sc), dci));
758 		return;
759 	}
760 
761 	code = XHCI_TRB_GET_CODE(status);
762 	remain = XHCI_TRB_REMAIN(status);
763 
764 	switch (code) {
765 	case XHCI_CODE_RING_UNDERRUN:
766 		DPRINTF(("%s: slot %u underrun with %zu TRB\n", DEVNAME(sc),
767 		    slot, xp->ring.ntrb - xp->free_trbs));
768 		xhci_skip_all(xp);
769 		return;
770 	case XHCI_CODE_RING_OVERRUN:
771 		DPRINTF(("%s: slot %u overrun with %zu TRB\n", DEVNAME(sc),
772 		    slot, xp->ring.ntrb - xp->free_trbs));
773 		xhci_skip_all(xp);
774 		return;
775 	case XHCI_CODE_MISSED_SRV:
776 		DPRINTF(("%s: slot %u missed srv with %zu TRB\n", DEVNAME(sc),
777 		    slot, xp->ring.ntrb - xp->free_trbs));
778 		xp->skip = 1;
779 		return;
780 	default:
781 		break;
782 	}
783 
784 	trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb);
785 	if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) {
786 		printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
787 		    trb_idx, xp->ring.ntrb - 1);
788 		return;
789 	}
790 
791 	xfer = xp->pending_xfers[trb_idx];
792 	if (xfer == NULL) {
793 		DPRINTF(("%s: NULL xfer pointer\n", DEVNAME(sc)));
794 		return;
795 	}
796 
797 	if (remain > xfer->length)
798 		remain = xfer->length;
799 
800 	xfertype = UE_GET_XFERTYPE(xfer->pipe->endpoint->edesc->bmAttributes);
801 
802 	switch (xfertype) {
803 	case UE_BULK:
804 	case UE_INTERRUPT:
805 	case UE_CONTROL:
806 		if (xhci_event_xfer_generic(sc, xfer, xp, remain, trb_idx,
807 		    code, slot, dci))
808 			return;
809 		break;
810 	case UE_ISOCHRONOUS:
811 		if (xhci_event_xfer_isoc(xfer, xp, remain, trb_idx, code))
812 			return;
813 		break;
814 	default:
815 		panic("xhci_event_xfer: unknown xfer type %u", xfertype);
816 	}
817 
818 	xhci_xfer_done(xfer);
819 }
820 
821 uint32_t
822 xhci_xfer_length_generic(struct xhci_xfer *xx, struct xhci_pipe *xp,
823     int trb_idx)
824 {
825 	int	 trb0_idx;
826 	uint32_t len = 0, type;
827 
828 	trb0_idx =
829 	    ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1);
830 
831 	while (1) {
832 		type = letoh32(xp->ring.trbs[trb0_idx].trb_flags) &
833 		    XHCI_TRB_TYPE_MASK;
834 		if (type == XHCI_TRB_TYPE_NORMAL || type == XHCI_TRB_TYPE_DATA)
835 			len += XHCI_TRB_LEN(letoh32(
836 			    xp->ring.trbs[trb0_idx].trb_status));
837 		if (trb0_idx == trb_idx)
838 			break;
839 		if (++trb0_idx == xp->ring.ntrb)
840 			trb0_idx = 0;
841 	}
842 	return len;
843 }
844 
845 int
846 xhci_event_xfer_generic(struct xhci_softc *sc, struct usbd_xfer *xfer,
847     struct xhci_pipe *xp, uint32_t remain, int trb_idx,
848     uint8_t code, uint8_t slot, uint8_t dci)
849 {
850 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
851 
852 	switch (code) {
853 	case XHCI_CODE_SUCCESS:
854 		if (xfer->actlen == 0) {
855 			if (remain)
856 				xfer->actlen =
857 				    xhci_xfer_length_generic(xx, xp, trb_idx) -
858 				    remain;
859 			else
860 				xfer->actlen = xfer->length;
861 		}
862 		if (xfer->actlen)
863 			usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
864 			    usbd_xfer_isread(xfer) ?
865 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
866 		xfer->status = USBD_NORMAL_COMPLETION;
867 		break;
868 	case XHCI_CODE_SHORT_XFER:
869 		/*
870 		 * Use values from the transfer TRB instead of the status TRB.
871 		 */
872 		if (xfer->actlen == 0)
873 			xfer->actlen =
874 			    xhci_xfer_length_generic(xx, xp, trb_idx) - remain;
875 		/*
876 		 * If this is not the last TRB of a transfer, we should
877 		 * theoretically clear the IOC at the end of the chain
878 		 * but the HC might have already processed it before we
879 		 * had a chance to schedule the softinterrupt.
880 		 */
881 		if (xx->index != trb_idx) {
882 			DPRINTF(("%s: short xfer %p for %u\n",
883 			    DEVNAME(sc), xfer, xx->index));
884 			return (1);
885 		}
886 		if (xfer->actlen)
887 			usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
888 			    usbd_xfer_isread(xfer) ?
889 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
890 		xfer->status = USBD_NORMAL_COMPLETION;
891 		break;
892 	case XHCI_CODE_TXERR:
893 	case XHCI_CODE_SPLITERR:
894 		DPRINTF(("%s: txerr? code %d\n", DEVNAME(sc), code));
895 		xfer->status = USBD_IOERROR;
896 		break;
897 	case XHCI_CODE_STALL:
898 	case XHCI_CODE_BABBLE:
899 		DPRINTF(("%s: babble code %d\n", DEVNAME(sc), code));
900 		/* Prevent any timeout to kick in. */
901 		timeout_del(&xfer->timeout_handle);
902 		usb_rem_task(xfer->device, &xfer->abort_task);
903 
904 		/* We need to report this condition for umass(4). */
905 		if (code == XHCI_CODE_STALL)
906 			xp->halted = USBD_STALLED;
907 		else
908 			xp->halted = USBD_IOERROR;
909 		/*
910 		 * Since the stack might try to start a new transfer as
911 		 * soon as a pending one finishes, make sure the endpoint
912 		 * is fully reset before calling usb_transfer_complete().
913 		 */
914 		xp->aborted_xfer = xfer;
915 		xhci_cmd_reset_ep_async(sc, slot, dci);
916 		return (1);
917 	case XHCI_CODE_XFER_STOPPED:
918 	case XHCI_CODE_XFER_STOPINV:
919 		/* Endpoint stopped while processing a TD. */
920 		if (xfer == xp->aborted_xfer) {
921 			DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer));
922 		    	return (1);
923 		}
924 
925 		/* FALLTHROUGH */
926 	default:
927 		DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code));
928 		xfer->status = USBD_IOERROR;
929 		xp->halted = 1;
930 		break;
931 	}
932 
933 	return (0);
934 }
935 
936 int
937 xhci_event_xfer_isoc(struct usbd_xfer *xfer, struct xhci_pipe *xp,
938     uint32_t remain, int trb_idx, uint8_t code)
939 {
940 	struct usbd_xfer *skipxfer;
941 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
942 	int trb0_idx, frame_idx = 0, skip_trb = 0;
943 
944 	KASSERT(xx->index >= 0);
945 
946 	switch (code) {
947 	case XHCI_CODE_SHORT_XFER:
948 		xp->trb_processed[trb_idx] = TRB_PROCESSED_SHORT;
949 		break;
950 	default:
951 		xp->trb_processed[trb_idx] = TRB_PROCESSED_YES;
952 		break;
953 	}
954 
955 	trb0_idx =
956 	    ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1);
957 
958 	/* Find the according frame index for this TRB. */
959 	while (trb0_idx != trb_idx) {
960 		if ((letoh32(xp->ring.trbs[trb0_idx].trb_flags) &
961 		    XHCI_TRB_TYPE_MASK) == XHCI_TRB_TYPE_ISOCH)
962 			frame_idx++;
963 		if (trb0_idx++ == (xp->ring.ntrb - 1))
964 			trb0_idx = 0;
965 	}
966 
967 	/*
968 	 * If we queued two TRBs for a frame and this is the second TRB,
969 	 * check if the first TRB needs accounting since it might not have
970 	 * raised an interrupt in case of full data received.
971 	 */
972 	if ((letoh32(xp->ring.trbs[trb_idx].trb_flags) & XHCI_TRB_TYPE_MASK) ==
973 	    XHCI_TRB_TYPE_NORMAL) {
974 		frame_idx--;
975 		if (trb_idx == 0)
976 			trb0_idx = xp->ring.ntrb - 2;
977 		else
978 			trb0_idx = trb_idx - 1;
979 		if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_NO) {
980 			xfer->frlengths[frame_idx] = XHCI_TRB_LEN(letoh32(
981 			    xp->ring.trbs[trb0_idx].trb_status));
982 		} else if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_SHORT) {
983 			skip_trb = 1;
984 		}
985 	}
986 
987 	if (!skip_trb) {
988 		xfer->frlengths[frame_idx] +=
989 		    XHCI_TRB_LEN(letoh32(xp->ring.trbs[trb_idx].trb_status)) -
990 		    remain;
991 		xfer->actlen += xfer->frlengths[frame_idx];
992 	}
993 
994 	if (xx->index != trb_idx)
995 		return (1);
996 
997 	if (xp->skip) {
998 		while (1) {
999 			skipxfer = SIMPLEQ_FIRST(&xp->pipe.queue);
1000 			if (skipxfer == xfer || skipxfer == NULL)
1001 				break;
1002 			DPRINTF(("%s: skipping %p\n", __func__, skipxfer));
1003 			skipxfer->status = USBD_NORMAL_COMPLETION;
1004 			xhci_xfer_done(skipxfer);
1005 		}
1006 		xp->skip = 0;
1007 	}
1008 
1009 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
1010 	    usbd_xfer_isread(xfer) ?
1011 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1012 	xfer->status = USBD_NORMAL_COMPLETION;
1013 
1014 	return (0);
1015 }
1016 
1017 void
1018 xhci_event_command(struct xhci_softc *sc, uint64_t paddr)
1019 {
1020 	struct xhci_trb *trb;
1021 	struct xhci_pipe *xp;
1022 	uint32_t flags;
1023 	uint8_t dci, slot;
1024 	int trb_idx, status;
1025 
1026 	trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb);
1027 	if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) {
1028 		printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
1029 		    trb_idx, sc->sc_cmd_ring.ntrb - 1);
1030 		return;
1031 	}
1032 
1033 	trb = &sc->sc_cmd_ring.trbs[trb_idx];
1034 
1035 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1036 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1037 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1038 
1039 	flags = letoh32(trb->trb_flags);
1040 
1041 	slot = XHCI_TRB_GET_SLOT(flags);
1042 	dci = XHCI_TRB_GET_EP(flags);
1043 
1044 	switch (flags & XHCI_TRB_TYPE_MASK) {
1045 	case XHCI_CMD_RESET_EP:
1046 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
1047 		if (xp == NULL)
1048 			break;
1049 
1050 		/* Update the dequeue pointer past the last TRB. */
1051 		xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
1052 		    DEQPTR(xp->ring) | xp->ring.toggle);
1053 		break;
1054 	case XHCI_CMD_SET_TR_DEQ:
1055 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
1056 		if (xp == NULL)
1057 			break;
1058 
1059 		status = xp->halted;
1060 		xp->halted = 0;
1061 		if (xp->aborted_xfer != NULL) {
1062 			xp->aborted_xfer->status = status;
1063 			xhci_xfer_done(xp->aborted_xfer);
1064 			wakeup(xp);
1065 		}
1066 		break;
1067 	case XHCI_CMD_CONFIG_EP:
1068 	case XHCI_CMD_STOP_EP:
1069 	case XHCI_CMD_DISABLE_SLOT:
1070 	case XHCI_CMD_ENABLE_SLOT:
1071 	case XHCI_CMD_ADDRESS_DEVICE:
1072 	case XHCI_CMD_EVAL_CTX:
1073 	case XHCI_CMD_NOOP:
1074 		/*
1075 		 * All these commands are synchronous.
1076 		 *
1077 		 * If TRBs differ, this could be a delayed result after we
1078 		 * gave up waiting for the expected TRB due to timeout.
1079 		 */
1080 		if (sc->sc_cmd_trb == trb) {
1081 			sc->sc_cmd_trb = NULL;
1082 			wakeup(&sc->sc_cmd_trb);
1083 		}
1084 		break;
1085 	default:
1086 		DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags));
1087 	}
1088 }
1089 
1090 void
1091 xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status)
1092 {
1093 	struct usbd_xfer *xfer = sc->sc_intrxfer;
1094 	uint32_t port = XHCI_TRB_PORTID(paddr);
1095 	uint8_t *p;
1096 
1097 	if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) {
1098 		DPRINTF(("%s: failed port status event\n", DEVNAME(sc)));
1099 		return;
1100 	}
1101 
1102 	if (xfer == NULL)
1103 		return;
1104 
1105 	p = KERNADDR(&xfer->dmabuf, 0);
1106 	memset(p, 0, xfer->length);
1107 
1108 	p[port/8] |= 1 << (port%8);
1109 	DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p));
1110 
1111 	xfer->actlen = xfer->length;
1112 	xfer->status = USBD_NORMAL_COMPLETION;
1113 
1114 	usb_transfer_complete(xfer);
1115 }
1116 
1117 void
1118 xhci_xfer_done(struct usbd_xfer *xfer)
1119 {
1120 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1121 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1122 	int ntrb, i;
1123 
1124 	splsoftassert(IPL_SOFTUSB);
1125 
1126 #ifdef XHCI_DEBUG
1127 	if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) {
1128 		printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__,
1129 		    xfer, xx->index, xx->ntrb);
1130 	}
1131 #endif
1132 
1133 	if (xp->aborted_xfer == xfer)
1134 		xp->aborted_xfer = NULL;
1135 
1136 	for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) {
1137 		xp->pending_xfers[i] = NULL;
1138 		if (i == 0)
1139 			i = (xp->ring.ntrb - 1);
1140 	}
1141 	xp->free_trbs += xx->ntrb;
1142 	xp->free_trbs += xx->zerotd;
1143 	xx->index = -1;
1144 	xx->ntrb = 0;
1145 	xx->zerotd = 0;
1146 
1147 	timeout_del(&xfer->timeout_handle);
1148 	usb_rem_task(xfer->device, &xfer->abort_task);
1149 	usb_transfer_complete(xfer);
1150 }
1151 
1152 /*
1153  * Calculate the Device Context Index (DCI) for endpoints as stated
1154  * in section 4.5.1 of xHCI specification r1.1.
1155  */
1156 static inline uint8_t
1157 xhci_ed2dci(usb_endpoint_descriptor_t *ed)
1158 {
1159 	uint8_t dir;
1160 
1161 	if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL)
1162 		return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1);
1163 
1164 	if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)
1165 		dir = 1;
1166 	else
1167 		dir = 0;
1168 
1169 	return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir);
1170 }
1171 
1172 usbd_status
1173 xhci_pipe_open(struct usbd_pipe *pipe)
1174 {
1175 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1176 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1177 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1178 	uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1179 	int error;
1180 
1181 	KASSERT(xp->slot == 0);
1182 
1183 	if (sc->sc_bus.dying)
1184 		return (USBD_IOERROR);
1185 
1186 	/* Root Hub */
1187 	if (pipe->device->depth == 0) {
1188 		switch (ed->bEndpointAddress) {
1189 		case USB_CONTROL_ENDPOINT:
1190 			pipe->methods = &xhci_root_ctrl_methods;
1191 			break;
1192 		case UE_DIR_IN | XHCI_INTR_ENDPT:
1193 			pipe->methods = &xhci_root_intr_methods;
1194 			break;
1195 		default:
1196 			pipe->methods = NULL;
1197 			return (USBD_INVAL);
1198 		}
1199 		return (USBD_NORMAL_COMPLETION);
1200 	}
1201 
1202 #if 0
1203 	/* Issue a noop to check if the command ring is correctly configured. */
1204 	xhci_cmd_noop(sc);
1205 #endif
1206 
1207 	switch (xfertype) {
1208 	case UE_CONTROL:
1209 		pipe->methods = &xhci_device_ctrl_methods;
1210 
1211 		/*
1212 		 * Get a slot and init the device's contexts.
1213 		 *
1214 		 * Since the control endpoint, represented as the default
1215 		 * pipe, is always opened first we are dealing with a
1216 		 * new device.  Put a new slot in the ENABLED state.
1217 		 *
1218 		 */
1219 		error = xhci_cmd_slot_control(sc, &slot, 1);
1220 		if (error || slot == 0 || slot > sc->sc_noslot)
1221 			return (USBD_INVAL);
1222 
1223 		if (xhci_softdev_alloc(sc, slot)) {
1224 			xhci_cmd_slot_control(sc, &slot, 0);
1225 			return (USBD_NOMEM);
1226 		}
1227 
1228 		break;
1229 	case UE_ISOCHRONOUS:
1230 		pipe->methods = &xhci_device_isoc_methods;
1231 		break;
1232 	case UE_BULK:
1233 		pipe->methods = &xhci_device_bulk_methods;
1234 		break;
1235 	case UE_INTERRUPT:
1236 		pipe->methods = &xhci_device_intr_methods;
1237 		break;
1238 	default:
1239 		return (USBD_INVAL);
1240 	}
1241 
1242 	/*
1243 	 * Our USBD Bus Interface is pipe-oriented but for most of the
1244 	 * operations we need to access a device context, so keep track
1245 	 * of the slot ID in every pipe.
1246 	 */
1247 	if (slot == 0)
1248 		slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot;
1249 
1250 	xp->slot = slot;
1251 	xp->dci = xhci_ed2dci(ed);
1252 
1253 	if (xhci_pipe_init(sc, pipe)) {
1254 		xhci_cmd_slot_control(sc, &slot, 0);
1255 		return (USBD_IOERROR);
1256 	}
1257 
1258 	return (USBD_NORMAL_COMPLETION);
1259 }
1260 
1261 /*
1262  * Set the maximum Endpoint Service Interface Time (ESIT) payload and
1263  * the average TRB buffer length for an endpoint.
1264  */
1265 static inline uint32_t
1266 xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe)
1267 {
1268 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1269 	uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize);
1270 
1271 	switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
1272 	case UE_CONTROL:
1273 		mep = 0;
1274 		atl = 8;
1275 		break;
1276 	case UE_INTERRUPT:
1277 	case UE_ISOCHRONOUS:
1278 		if (pipe->device->speed == USB_SPEED_SUPER) {
1279 			/*  XXX Read the companion descriptor */
1280 		}
1281 
1282 		mep = (UE_GET_TRANS(mps) + 1) * UE_GET_SIZE(mps);
1283 		atl = mep;
1284 		break;
1285 	case UE_BULK:
1286 	default:
1287 		mep = 0;
1288 		atl = 0;
1289 	}
1290 
1291 	return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl));
1292 }
1293 
1294 static inline uint32_t
1295 xhci_linear_interval(usb_endpoint_descriptor_t *ed)
1296 {
1297 	uint32_t ival = min(max(1, ed->bInterval), 255);
1298 
1299 	return (fls(ival) - 1);
1300 }
1301 
1302 static inline uint32_t
1303 xhci_exponential_interval(usb_endpoint_descriptor_t *ed)
1304 {
1305 	uint32_t ival = min(max(1, ed->bInterval), 16);
1306 
1307 	return (ival - 1);
1308 }
1309 /*
1310  * Return interval for endpoint expressed in 2^(ival) * 125us.
1311  *
1312  * See section 6.2.3.6 of xHCI r1.1 Specification for more details.
1313  */
1314 uint32_t
1315 xhci_pipe_interval(struct usbd_pipe *pipe)
1316 {
1317 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1318 	uint8_t speed = pipe->device->speed;
1319 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1320 	uint32_t ival;
1321 
1322 	if (xfertype == UE_CONTROL || xfertype == UE_BULK) {
1323 		/* Control and Bulk endpoints never NAKs. */
1324 		ival = 0;
1325 	} else {
1326 		switch (speed) {
1327 		case USB_SPEED_FULL:
1328 			if (xfertype == UE_ISOCHRONOUS) {
1329 				/* Convert 1-2^(15)ms into 3-18 */
1330 				ival = xhci_exponential_interval(ed) + 3;
1331 				break;
1332 			}
1333 			/* FALLTHROUGH */
1334 		case USB_SPEED_LOW:
1335 			/* Convert 1-255ms into 3-10 */
1336 			ival = xhci_linear_interval(ed) + 3;
1337 			break;
1338 		case USB_SPEED_HIGH:
1339 		case USB_SPEED_SUPER:
1340 		default:
1341 			/* Convert 1-2^(15) * 125us into 0-15 */
1342 			ival = xhci_exponential_interval(ed);
1343 			break;
1344 		}
1345 	}
1346 
1347 	KASSERT(ival <= 15);
1348 	return (XHCI_EPCTX_SET_IVAL(ival));
1349 }
1350 
1351 uint32_t
1352 xhci_pipe_maxburst(struct usbd_pipe *pipe)
1353 {
1354 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1355 	uint32_t mps = UGETW(ed->wMaxPacketSize);
1356 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1357 	uint32_t maxb = 0;
1358 
1359 	switch (pipe->device->speed) {
1360 	case USB_SPEED_HIGH:
1361 		if (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)
1362 			maxb = UE_GET_TRANS(mps);
1363 		break;
1364 	case USB_SPEED_SUPER:
1365 		/*  XXX Read the companion descriptor */
1366 	default:
1367 		break;
1368 	}
1369 
1370 	return (maxb);
1371 }
1372 
1373 static inline uint32_t
1374 xhci_last_valid_dci(struct xhci_pipe **pipes, struct xhci_pipe *ignore)
1375 {
1376 	struct xhci_pipe *lxp;
1377 	int i;
1378 
1379 	/* Find the last valid Endpoint Context. */
1380 	for (i = 30; i >= 0; i--) {
1381 		lxp = pipes[i];
1382 		if (lxp != NULL && lxp != ignore)
1383 			return XHCI_SCTX_DCI(lxp->dci);
1384 	}
1385 
1386 	return 0;
1387 }
1388 
1389 int
1390 xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe)
1391 {
1392 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1393 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1394 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1395 	uint32_t mps = UGETW(ed->wMaxPacketSize);
1396 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1397 	uint8_t speed, cerr = 0;
1398 	uint32_t route = 0, rhport = 0;
1399 	struct usbd_device *hub;
1400 
1401 	/*
1402 	 * Calculate the Route String.  Assume that there is no hub with
1403 	 * more than 15 ports and that they all have a detph < 6.  See
1404 	 * section 8.9 of USB 3.1 Specification for more details.
1405 	 */
1406 	for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) {
1407 		uint32_t port = hub->powersrc->portno;
1408 		uint32_t depth = hub->myhub->depth;
1409 
1410 		route |= port << (4 * (depth - 1));
1411 	}
1412 
1413 	/* Get Root Hub port */
1414 	rhport = hub->powersrc->portno;
1415 
1416 	switch (pipe->device->speed) {
1417 	case USB_SPEED_LOW:
1418 		speed = XHCI_SPEED_LOW;
1419 		break;
1420 	case USB_SPEED_FULL:
1421 		speed = XHCI_SPEED_FULL;
1422 		break;
1423 	case USB_SPEED_HIGH:
1424 		speed = XHCI_SPEED_HIGH;
1425 		break;
1426 	case USB_SPEED_SUPER:
1427 		speed = XHCI_SPEED_SUPER;
1428 		break;
1429 	default:
1430 		return (USBD_INVAL);
1431 	}
1432 
1433 	/* Setup the endpoint context */
1434 	if (xfertype != UE_ISOCHRONOUS)
1435 		cerr = 3;
1436 
1437 	if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL))
1438 		xfertype |= 0x4;
1439 
1440 	sdev->ep_ctx[xp->dci-1]->info_lo = htole32(xhci_pipe_interval(pipe));
1441 	sdev->ep_ctx[xp->dci-1]->info_hi = htole32(
1442 	    XHCI_EPCTX_SET_MPS(UE_GET_SIZE(mps)) |
1443 	    XHCI_EPCTX_SET_MAXB(xhci_pipe_maxburst(pipe)) |
1444 	    XHCI_EPCTX_SET_EPTYPE(xfertype) | XHCI_EPCTX_SET_CERR(cerr)
1445 	);
1446 	sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe));
1447 	sdev->ep_ctx[xp->dci-1]->deqp = htole64(
1448 	    DEQPTR(xp->ring) | xp->ring.toggle
1449 	);
1450 
1451 	/* Unmask the new endpoint */
1452 	sdev->input_ctx->drop_flags = 0;
1453 	sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1454 
1455 	/* Setup the slot context */
1456 	sdev->slot_ctx->info_lo = htole32(
1457 	    xhci_last_valid_dci(sdev->pipes, NULL) | XHCI_SCTX_SPEED(speed) |
1458 	    XHCI_SCTX_ROUTE(route)
1459 	);
1460 	sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport));
1461 	sdev->slot_ctx->tt = 0;
1462 	sdev->slot_ctx->state = 0;
1463 
1464 /* XXX */
1465 #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT)
1466 	/*
1467 	 * If we are opening the interrupt pipe of a hub, update its
1468 	 * context before putting it in the CONFIGURED state.
1469 	 */
1470 	if (pipe->device->hub != NULL) {
1471 		int nports = pipe->device->hub->nports;
1472 
1473 		sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1));
1474 		sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports));
1475 
1476 		if (UHUB_IS_MTT(pipe->device))
1477 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1478 
1479 		sdev->slot_ctx->tt |= htole32(
1480 		    XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink)
1481 		);
1482 	}
1483 
1484 	/*
1485 	 * If this is a Low or Full Speed device below an external High
1486 	 * Speed hub, it needs some TT love.
1487 	 */
1488 	if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) {
1489 		struct usbd_device *hshub = pipe->device->myhsport->parent;
1490 		uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot;
1491 
1492 		if (UHUB_IS_MTT(hshub))
1493 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1494 
1495 		sdev->slot_ctx->tt |= htole32(
1496 		    XHCI_SCTX_TT_HUB_SID(slot) |
1497 		    XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno)
1498 		);
1499 	}
1500 #undef UHUB_IS_MTT
1501 
1502 	/* Unmask the slot context */
1503 	sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0));
1504 
1505 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1506 	    sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1507 
1508 	return (0);
1509 }
1510 
1511 int
1512 xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe)
1513 {
1514 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1515 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1516 	int error;
1517 
1518 #ifdef XHCI_DEBUG
1519 	struct usbd_device *dev = pipe->device;
1520 	printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u"
1521 	    " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth,
1522 	    dev->powersrc->portno, dev->speed, xp->slot, xp->dci,
1523 	    pipe->endpoint->edesc->bEndpointAddress);
1524 #endif
1525 
1526 	if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN))
1527 		return (ENOMEM);
1528 
1529 	xp->free_trbs = xp->ring.ntrb;
1530 	xp->halted = 0;
1531 
1532 	sdev->pipes[xp->dci - 1] = xp;
1533 
1534 	error = xhci_context_setup(sc, pipe);
1535 	if (error)
1536 		return (error);
1537 
1538 	if (xp->dci == 1) {
1539 		/*
1540 		 * If we are opening the default pipe, the Slot should
1541 		 * be in the ENABLED state.  Issue an "Address Device"
1542 		 * with BSR=1 to put the device in the DEFAULT state.
1543 		 * We cannot jump directly to the ADDRESSED state with
1544 		 * BSR=0 because some Low/Full speed devices won't accept
1545 		 * a SET_ADDRESS command before we've read their device
1546 		 * descriptor.
1547 		 */
1548 		error = xhci_cmd_set_address(sc, xp->slot,
1549 		    sdev->ictx_dma.paddr, XHCI_TRB_BSR);
1550 	} else {
1551 		error = xhci_cmd_configure_ep(sc, xp->slot,
1552 		    sdev->ictx_dma.paddr);
1553 	}
1554 
1555 	if (error) {
1556 		xhci_ring_free(sc, &xp->ring);
1557 		return (EIO);
1558 	}
1559 
1560 	return (0);
1561 }
1562 
1563 void
1564 xhci_pipe_close(struct usbd_pipe *pipe)
1565 {
1566 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1567 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1568 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1569 
1570 	/* Root Hub */
1571 	if (pipe->device->depth == 0)
1572 		return;
1573 
1574 	/* Mask the endpoint */
1575 	sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1576 	sdev->input_ctx->add_flags = 0;
1577 
1578 	/* Update last valid Endpoint Context */
1579 	sdev->slot_ctx->info_lo &= htole32(~XHCI_SCTX_DCI(31));
1580 	sdev->slot_ctx->info_lo |= htole32(xhci_last_valid_dci(sdev->pipes, xp));
1581 
1582 	/* Clear the Endpoint Context */
1583 	memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx));
1584 
1585 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1586 	    sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1587 
1588 	if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr))
1589 		DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci));
1590 
1591 	xhci_ring_free(sc, &xp->ring);
1592 	sdev->pipes[xp->dci - 1] = NULL;
1593 
1594 	/*
1595 	 * If we are closing the default pipe, the device is probably
1596 	 * gone, so put its slot in the DISABLED state.
1597 	 */
1598 	if (xp->dci == 1) {
1599 		xhci_cmd_slot_control(sc, &xp->slot, 0);
1600 		xhci_softdev_free(sc, xp->slot);
1601 	}
1602 }
1603 
1604 /*
1605  * Transition a device from DEFAULT to ADDRESSED Slot state, this hook
1606  * is needed for Low/Full speed devices.
1607  *
1608  * See section 4.5.3 of USB 3.1 Specification for more details.
1609  */
1610 int
1611 xhci_setaddr(struct usbd_device *dev, int addr)
1612 {
1613 	struct xhci_softc *sc = (struct xhci_softc *)dev->bus;
1614 	struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe;
1615 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1616 	int error;
1617 
1618 	/* Root Hub */
1619 	if (dev->depth == 0)
1620 		return (0);
1621 
1622 	KASSERT(xp->dci == 1);
1623 
1624 	error = xhci_context_setup(sc, dev->default_pipe);
1625 	if (error)
1626 		return (error);
1627 
1628 	error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0);
1629 
1630 #ifdef XHCI_DEBUG
1631 	if (error == 0) {
1632 		struct xhci_sctx *sctx;
1633 		uint8_t addr;
1634 
1635 		bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0,
1636 		    sc->sc_pagesize, BUS_DMASYNC_POSTREAD);
1637 
1638 		/* Get output slot context. */
1639 		sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr;
1640 		addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state));
1641 		error = (addr == 0);
1642 
1643 		printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr);
1644 	}
1645 #endif
1646 
1647 	return (error);
1648 }
1649 
1650 struct usbd_xfer *
1651 xhci_allocx(struct usbd_bus *bus)
1652 {
1653 	return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO));
1654 }
1655 
1656 void
1657 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
1658 {
1659 	pool_put(xhcixfer, xfer);
1660 }
1661 
1662 int
1663 xhci_scratchpad_alloc(struct xhci_softc *sc, int npage)
1664 {
1665 	uint64_t *pte;
1666 	int error, i;
1667 
1668 	/* Allocate the required entry for the table. */
1669 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma,
1670 	    (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN,
1671 	    sc->sc_pagesize);
1672 	if (error)
1673 		return (ENOMEM);
1674 
1675 	/* Allocate pages. XXX does not need to be contiguous. */
1676 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma,
1677 	    NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0);
1678 	if (error) {
1679 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1680 		return (ENOMEM);
1681 	}
1682 
1683 	for (i = 0; i < npage; i++) {
1684 		pte[i] = htole64(
1685 		    sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize)
1686 		);
1687 	}
1688 
1689 	bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0,
1690 	    npage * sizeof(uint64_t), BUS_DMASYNC_PREREAD |
1691 	    BUS_DMASYNC_PREWRITE);
1692 
1693 	/*  Entry 0 points to the table of scratchpad pointers. */
1694 	sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr);
1695 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1696 	    sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1697 
1698 	sc->sc_spad.npage = npage;
1699 
1700 	return (0);
1701 }
1702 
1703 void
1704 xhci_scratchpad_free(struct xhci_softc *sc)
1705 {
1706 	sc->sc_dcbaa.segs[0] = 0;
1707 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1708 	    sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1709 
1710 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma);
1711 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1712 }
1713 
1714 int
1715 xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb,
1716     size_t alignment)
1717 {
1718 	size_t size;
1719 	int error;
1720 
1721 	size = ntrb * sizeof(struct xhci_trb);
1722 
1723 	error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma,
1724 	    (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY);
1725 	if (error)
1726 		return (error);
1727 
1728 	ring->ntrb = ntrb;
1729 
1730 	xhci_ring_reset(sc, ring);
1731 
1732 	return (0);
1733 }
1734 
1735 void
1736 xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring)
1737 {
1738 	usbd_dma_contig_free(&sc->sc_bus, &ring->dma);
1739 }
1740 
1741 void
1742 xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring)
1743 {
1744 	size_t size;
1745 
1746 	size = ring->ntrb * sizeof(struct xhci_trb);
1747 
1748 	memset(ring->trbs, 0, size);
1749 
1750 	ring->index = 0;
1751 	ring->toggle = XHCI_TRB_CYCLE;
1752 
1753 	/*
1754 	 * Since all our rings use only one segment, at least for
1755 	 * the moment, link their tail to their head.
1756 	 */
1757 	if (ring != &sc->sc_evt_ring) {
1758 		struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1];
1759 
1760 		trb->trb_paddr = htole64(ring->dma.paddr);
1761 		trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG |
1762 		    XHCI_TRB_CYCLE);
1763 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1764 		    BUS_DMASYNC_PREWRITE);
1765 	} else
1766 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1767 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1768 }
1769 
1770 struct xhci_trb*
1771 xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring)
1772 {
1773 	struct xhci_trb *trb = &ring->trbs[ring->index];
1774 
1775 	KASSERT(ring->index < ring->ntrb);
1776 
1777 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1778 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1779 
1780 	/* Make sure this TRB can be consumed. */
1781 	if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE))
1782 		return (NULL);
1783 
1784 	ring->index++;
1785 
1786 	if (ring->index == ring->ntrb) {
1787 		ring->index = 0;
1788 		ring->toggle ^= 1;
1789 	}
1790 
1791 	return (trb);
1792 }
1793 
1794 struct xhci_trb*
1795 xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring)
1796 {
1797 	struct xhci_trb *lnk, *trb;
1798 
1799 	KASSERT(ring->index < ring->ntrb);
1800 
1801 	/* Setup the link TRB after the previous TRB is done. */
1802 	if (ring->index == 0) {
1803 		lnk = &ring->trbs[ring->ntrb - 1];
1804 		trb = &ring->trbs[ring->ntrb - 2];
1805 
1806 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1807 		    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1808 		    BUS_DMASYNC_POSTWRITE);
1809 
1810 		lnk->trb_flags &= htole32(~XHCI_TRB_CHAIN);
1811 		if (letoh32(trb->trb_flags) & XHCI_TRB_CHAIN)
1812 			lnk->trb_flags |= htole32(XHCI_TRB_CHAIN);
1813 
1814 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1815 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1816 
1817 		lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE);
1818 
1819 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1820 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1821 	}
1822 
1823 	trb = &ring->trbs[ring->index++];
1824 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1825 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1826 	    BUS_DMASYNC_POSTWRITE);
1827 
1828 	/* Toggle cycle state of the link TRB and skip it. */
1829 	if (ring->index == (ring->ntrb - 1)) {
1830 		ring->index = 0;
1831 		ring->toggle ^= 1;
1832 	}
1833 
1834 	return (trb);
1835 }
1836 
1837 struct xhci_trb *
1838 xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer,
1839     uint8_t *togglep, int last)
1840 {
1841 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1842 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1843 
1844 	KASSERT(xp->free_trbs >= 1);
1845 	xp->free_trbs--;
1846 	*togglep = xp->ring.toggle;
1847 
1848 	switch (last) {
1849 	case -1:	/* This will be a zero-length TD. */
1850 		xp->pending_xfers[xp->ring.index] = NULL;
1851 		xx->zerotd += 1;
1852 		break;
1853 	case 0:		/* This will be in a chain. */
1854 		xp->pending_xfers[xp->ring.index] = xfer;
1855 		xx->index = -2;
1856 		xx->ntrb += 1;
1857 		break;
1858 	case 1:		/* This will terminate a chain. */
1859 		xp->pending_xfers[xp->ring.index] = xfer;
1860 		xx->index = xp->ring.index;
1861 		xx->ntrb += 1;
1862 		break;
1863 	}
1864 
1865 	xp->trb_processed[xp->ring.index] = TRB_PROCESSED_NO;
1866 
1867 	return (xhci_ring_produce(sc, &xp->ring));
1868 }
1869 
1870 int
1871 xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout)
1872 {
1873 	struct xhci_trb *trb;
1874 	int s, error = 0;
1875 
1876 	KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL);
1877 
1878 	trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle);
1879 
1880 	trb = xhci_ring_produce(sc, &sc->sc_cmd_ring);
1881 	if (trb == NULL)
1882 		return (EAGAIN);
1883 	trb->trb_paddr = trb0->trb_paddr;
1884 	trb->trb_status = trb0->trb_status;
1885 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1886 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1887 	    BUS_DMASYNC_PREWRITE);
1888 
1889 	trb->trb_flags = trb0->trb_flags;
1890 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1891 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1892 	    BUS_DMASYNC_PREWRITE);
1893 
1894 	if (timeout == 0) {
1895 		XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1896 		return (0);
1897 	}
1898 
1899 	rw_assert_wrlock(&sc->sc_cmd_lock);
1900 
1901 	s = splusb();
1902 	sc->sc_cmd_trb = trb;
1903 	XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1904 	error = tsleep_nsec(&sc->sc_cmd_trb, PZERO, "xhcicmd", timeout);
1905 	if (error) {
1906 #ifdef XHCI_DEBUG
1907 		printf("%s: tsleep() = %d\n", __func__, error);
1908 		printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags)));
1909 		xhci_dump_trb(trb);
1910 #endif
1911 		KASSERT(sc->sc_cmd_trb == trb || sc->sc_cmd_trb == NULL);
1912 		/*
1913 		 * Just because the timeout expired this does not mean that the
1914 		 * TRB isn't active anymore! We could get an interrupt from
1915 		 * this TRB later on and then wonder what to do with it.
1916 		 * We'd rather abort it.
1917 		 */
1918 		xhci_command_abort(sc);
1919 		sc->sc_cmd_trb = NULL;
1920 		splx(s);
1921 		return (error);
1922 	}
1923 	splx(s);
1924 
1925 	memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb));
1926 
1927 	if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS)
1928 		return (0);
1929 
1930 #ifdef XHCI_DEBUG
1931 	printf("%s: event error code=%d, result=%d  \n", DEVNAME(sc),
1932 	    XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)),
1933 	    XHCI_TRB_TYPE(letoh32(trb0->trb_flags)));
1934 	xhci_dump_trb(trb0);
1935 #endif
1936 	return (EIO);
1937 }
1938 
1939 int
1940 xhci_command_abort(struct xhci_softc *sc)
1941 {
1942 	uint32_t reg;
1943 	int i;
1944 
1945 	reg = XOREAD4(sc, XHCI_CRCR_LO);
1946 	if ((reg & XHCI_CRCR_LO_CRR) == 0)
1947 		return (0);
1948 
1949 	XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA);
1950 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
1951 
1952 	for (i = 0; i < 2500; i++) {
1953 		DELAY(100);
1954 		reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR;
1955 		if (!reg)
1956 			break;
1957 	}
1958 
1959 	if (reg) {
1960 		printf("%s: command ring abort timeout\n", DEVNAME(sc));
1961 		return (1);
1962 	}
1963 
1964 	return (0);
1965 }
1966 
1967 int
1968 xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1969 {
1970 	struct xhci_trb trb;
1971 	int error;
1972 
1973 	DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
1974 
1975 	trb.trb_paddr = htole64(addr);
1976 	trb.trb_status = 0;
1977 	trb.trb_flags = htole32(
1978 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP
1979 	);
1980 
1981 	rw_enter_write(&sc->sc_cmd_lock);
1982 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
1983 	rw_exit_write(&sc->sc_cmd_lock);
1984 	return (error);
1985 }
1986 
1987 int
1988 xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1989 {
1990 	struct xhci_trb trb;
1991 	int error;
1992 
1993 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1994 
1995 	trb.trb_paddr = 0;
1996 	trb.trb_status = 0;
1997 	trb.trb_flags = htole32(
1998 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP
1999 	);
2000 
2001 	rw_enter_write(&sc->sc_cmd_lock);
2002 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2003 	rw_exit_write(&sc->sc_cmd_lock);
2004 	return (error);
2005 }
2006 
2007 void
2008 xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
2009 {
2010 	struct xhci_trb trb;
2011 
2012 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
2013 
2014 	trb.trb_paddr = 0;
2015 	trb.trb_status = 0;
2016 	trb.trb_flags = htole32(
2017 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP
2018 	);
2019 
2020 	xhci_command_submit(sc, &trb, 0);
2021 }
2022 
2023 void
2024 xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci,
2025    uint64_t addr)
2026 {
2027 	struct xhci_trb trb;
2028 
2029 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
2030 
2031 	trb.trb_paddr = htole64(addr);
2032 	trb.trb_status = 0;
2033 	trb.trb_flags = htole32(
2034 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ
2035 	);
2036 
2037 	xhci_command_submit(sc, &trb, 0);
2038 }
2039 
2040 int
2041 xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable)
2042 {
2043 	struct xhci_trb trb;
2044 	int error;
2045 
2046 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
2047 
2048 	trb.trb_paddr = 0;
2049 	trb.trb_status = 0;
2050 	if (enable)
2051 		trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT);
2052 	else
2053 		trb.trb_flags = htole32(
2054 			XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT
2055 		);
2056 
2057 	rw_enter_write(&sc->sc_cmd_lock);
2058 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2059 	rw_exit_write(&sc->sc_cmd_lock);
2060 	if (error != 0)
2061 		return (EIO);
2062 
2063 	if (enable)
2064 		*slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags));
2065 
2066 	return (0);
2067 }
2068 
2069 int
2070 xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr,
2071     uint32_t bsr)
2072 {
2073 	struct xhci_trb trb;
2074 	int error;
2075 
2076 	DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0));
2077 
2078 	trb.trb_paddr = htole64(addr);
2079 	trb.trb_status = 0;
2080 	trb.trb_flags = htole32(
2081 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr
2082 	);
2083 
2084 	rw_enter_write(&sc->sc_cmd_lock);
2085 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2086 	rw_exit_write(&sc->sc_cmd_lock);
2087 	return (error);
2088 }
2089 
2090 #ifdef XHCI_DEBUG
2091 int
2092 xhci_cmd_noop(struct xhci_softc *sc)
2093 {
2094 	struct xhci_trb trb;
2095 	int error;
2096 
2097 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
2098 
2099 	trb.trb_paddr = 0;
2100 	trb.trb_status = 0;
2101 	trb.trb_flags = htole32(XHCI_CMD_NOOP);
2102 
2103 	rw_enter_write(&sc->sc_cmd_lock);
2104 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2105 	rw_exit_write(&sc->sc_cmd_lock);
2106 	return (error);
2107 }
2108 #endif
2109 
2110 int
2111 xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot)
2112 {
2113 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
2114 	int i, error;
2115 	uint8_t *kva;
2116 
2117 	/*
2118 	 * Setup input context.  Even with 64 byte context size, it
2119 	 * fits into the smallest supported page size, so use that.
2120 	 */
2121 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma,
2122 	    (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize);
2123 	if (error)
2124 		return (ENOMEM);
2125 
2126 	sdev->input_ctx = (struct xhci_inctx *)kva;
2127 	sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize);
2128 	for (i = 0; i < 31; i++)
2129 		sdev->ep_ctx[i] =
2130 		    (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize);
2131 
2132 	DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc),
2133 	 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0]));
2134 
2135 	/* Setup output context */
2136 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL,
2137 	    sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize);
2138 	if (error) {
2139 		usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
2140 		return (ENOMEM);
2141 	}
2142 
2143 	memset(&sdev->pipes, 0, sizeof(sdev->pipes));
2144 
2145 	DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc),
2146 	    slot, (long long)sdev->octx_dma.paddr));
2147 
2148 	sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr);
2149 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
2150 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
2151 	    BUS_DMASYNC_PREWRITE);
2152 
2153 	return (0);
2154 }
2155 
2156 void
2157 xhci_softdev_free(struct xhci_softc *sc, uint8_t slot)
2158 {
2159 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
2160 
2161 	sc->sc_dcbaa.segs[slot] = 0;
2162 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
2163 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
2164 	    BUS_DMASYNC_PREWRITE);
2165 
2166 	usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma);
2167 	usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
2168 
2169 	memset(sdev, 0, sizeof(struct xhci_soft_dev));
2170 }
2171 
2172 /* Root hub descriptors. */
2173 const usb_device_descriptor_t xhci_devd = {
2174 	USB_DEVICE_DESCRIPTOR_SIZE,
2175 	UDESC_DEVICE,		/* type */
2176 	{0x00, 0x03},		/* USB version */
2177 	UDCLASS_HUB,		/* class */
2178 	UDSUBCLASS_HUB,		/* subclass */
2179 	UDPROTO_HSHUBSTT,	/* protocol */
2180 	9,			/* max packet */
2181 	{0},{0},{0x00,0x01},	/* device id */
2182 	1,2,0,			/* string indexes */
2183 	1			/* # of configurations */
2184 };
2185 
2186 const usb_config_descriptor_t xhci_confd = {
2187 	USB_CONFIG_DESCRIPTOR_SIZE,
2188 	UDESC_CONFIG,
2189 	{USB_CONFIG_DESCRIPTOR_SIZE +
2190 	 USB_INTERFACE_DESCRIPTOR_SIZE +
2191 	 USB_ENDPOINT_DESCRIPTOR_SIZE},
2192 	1,
2193 	1,
2194 	0,
2195 	UC_BUS_POWERED | UC_SELF_POWERED,
2196 	0                      /* max power */
2197 };
2198 
2199 const usb_interface_descriptor_t xhci_ifcd = {
2200 	USB_INTERFACE_DESCRIPTOR_SIZE,
2201 	UDESC_INTERFACE,
2202 	0,
2203 	0,
2204 	1,
2205 	UICLASS_HUB,
2206 	UISUBCLASS_HUB,
2207 	UIPROTO_HSHUBSTT,
2208 	0
2209 };
2210 
2211 const usb_endpoint_descriptor_t xhci_endpd = {
2212 	USB_ENDPOINT_DESCRIPTOR_SIZE,
2213 	UDESC_ENDPOINT,
2214 	UE_DIR_IN | XHCI_INTR_ENDPT,
2215 	UE_INTERRUPT,
2216 	{2, 0},                 /* max 15 ports */
2217 	255
2218 };
2219 
2220 const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = {
2221 	USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE,
2222 	UDESC_ENDPOINT_SS_COMP,
2223 	0,
2224 	0,
2225 	{0, 0}
2226 };
2227 
2228 const usb_hub_descriptor_t xhci_hubd = {
2229 	USB_HUB_DESCRIPTOR_SIZE,
2230 	UDESC_SS_HUB,
2231 	0,
2232 	{0,0},
2233 	0,
2234 	0,
2235 	{0},
2236 };
2237 
2238 void
2239 xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
2240 {
2241 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2242 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2243 	int error;
2244 
2245 	splsoftassert(IPL_SOFTUSB);
2246 
2247 	DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n",
2248 	    __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status),
2249 	    xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index));
2250 
2251 	/* XXX The stack should not call abort() in this case. */
2252 	if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) {
2253 		xfer->status = status;
2254 		timeout_del(&xfer->timeout_handle);
2255 		usb_rem_task(xfer->device, &xfer->abort_task);
2256 		usb_transfer_complete(xfer);
2257 		return;
2258 	}
2259 
2260 	/* Transfer is already done. */
2261 	if (xfer->status != USBD_IN_PROGRESS) {
2262 		DPRINTF(("%s: already done \n", __func__));
2263 		return;
2264 	}
2265 
2266 	/* Prevent any timeout to kick in. */
2267 	timeout_del(&xfer->timeout_handle);
2268 	usb_rem_task(xfer->device, &xfer->abort_task);
2269 
2270 	/* Indicate that we are aborting this transfer. */
2271 	xp->halted = status;
2272 	xp->aborted_xfer = xfer;
2273 
2274 	/* Stop the endpoint and wait until the hardware says so. */
2275 	if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci)) {
2276 		DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc)));
2277 		/* Assume the device is gone. */
2278 		xp->halted = 0;
2279 		xp->aborted_xfer = NULL;
2280 		xfer->status = status;
2281 		usb_transfer_complete(xfer);
2282 		return;
2283 	}
2284 
2285 	/*
2286 	 * The transfer was already completed when we stopped the
2287 	 * endpoint, no need to move the dequeue pointer past its
2288 	 * TRBs.
2289 	 */
2290 	if (xp->aborted_xfer == NULL) {
2291 		DPRINTF(("%s: done before stopping the endpoint\n", __func__));
2292 		xp->halted = 0;
2293 		return;
2294 	}
2295 
2296 	/*
2297 	 * At this stage the endpoint has been stopped, so update its
2298 	 * dequeue pointer past the last TRB of the transfer.
2299 	 *
2300 	 * Note: This assumes that only one transfer per endpoint has
2301 	 *	 pending TRBs on the ring.
2302 	 */
2303 	xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
2304 	    DEQPTR(xp->ring) | xp->ring.toggle);
2305 	error = tsleep_nsec(xp, PZERO, "xhciab", XHCI_CMD_TIMEOUT);
2306 	if (error)
2307 		printf("%s: timeout aborting transfer\n", DEVNAME(sc));
2308 }
2309 
2310 void
2311 xhci_timeout(void *addr)
2312 {
2313 	struct usbd_xfer *xfer = addr;
2314 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2315 
2316 	if (sc->sc_bus.dying) {
2317 		xhci_timeout_task(addr);
2318 		return;
2319 	}
2320 
2321 	usb_init_task(&xfer->abort_task, xhci_timeout_task, addr,
2322 	    USB_TASK_TYPE_ABORT);
2323 	usb_add_task(xfer->device, &xfer->abort_task);
2324 }
2325 
2326 void
2327 xhci_timeout_task(void *addr)
2328 {
2329 	struct usbd_xfer *xfer = addr;
2330 	int s;
2331 
2332 	s = splusb();
2333 	xhci_abort_xfer(xfer, USBD_TIMEOUT);
2334 	splx(s);
2335 }
2336 
2337 usbd_status
2338 xhci_root_ctrl_transfer(struct usbd_xfer *xfer)
2339 {
2340 	usbd_status err;
2341 
2342 	err = usb_insert_transfer(xfer);
2343 	if (err)
2344 		return (err);
2345 
2346 	return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2347 }
2348 
2349 usbd_status
2350 xhci_root_ctrl_start(struct usbd_xfer *xfer)
2351 {
2352 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2353 	usb_port_status_t ps;
2354 	usb_device_request_t *req;
2355 	void *buf = NULL;
2356 	usb_device_descriptor_t devd;
2357 	usb_hub_descriptor_t hubd;
2358 	usbd_status err;
2359 	int s, len, value, index;
2360 	int l, totlen = 0;
2361 	int port, i;
2362 	uint32_t v;
2363 
2364 	KASSERT(xfer->rqflags & URQ_REQUEST);
2365 
2366 	if (sc->sc_bus.dying)
2367 		return (USBD_IOERROR);
2368 
2369 	req = &xfer->request;
2370 
2371 	DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__,
2372 	    req->bmRequestType, req->bRequest));
2373 
2374 	len = UGETW(req->wLength);
2375 	value = UGETW(req->wValue);
2376 	index = UGETW(req->wIndex);
2377 
2378 	if (len != 0)
2379 		buf = KERNADDR(&xfer->dmabuf, 0);
2380 
2381 #define C(x,y) ((x) | ((y) << 8))
2382 	switch(C(req->bRequest, req->bmRequestType)) {
2383 	case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
2384 	case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
2385 	case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
2386 		/*
2387 		 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
2388 		 * for the integrated root hub.
2389 		 */
2390 		break;
2391 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
2392 		if (len > 0) {
2393 			*(uint8_t *)buf = sc->sc_conf;
2394 			totlen = 1;
2395 		}
2396 		break;
2397 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
2398 		DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value));
2399 		switch(value >> 8) {
2400 		case UDESC_DEVICE:
2401 			if ((value & 0xff) != 0) {
2402 				err = USBD_IOERROR;
2403 				goto ret;
2404 			}
2405 			devd = xhci_devd;
2406 			USETW(devd.idVendor, sc->sc_id_vendor);
2407 			totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE);
2408 			memcpy(buf, &devd, l);
2409 			break;
2410 		/*
2411 		 * We can't really operate at another speed, but the spec says
2412 		 * we need this descriptor.
2413 		 */
2414 		case UDESC_OTHER_SPEED_CONFIGURATION:
2415 		case UDESC_CONFIG:
2416 			if ((value & 0xff) != 0) {
2417 				err = USBD_IOERROR;
2418 				goto ret;
2419 			}
2420 			totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE);
2421 			memcpy(buf, &xhci_confd, l);
2422 			((usb_config_descriptor_t *)buf)->bDescriptorType =
2423 			    value >> 8;
2424 			buf = (char *)buf + l;
2425 			len -= l;
2426 			l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE);
2427 			totlen += l;
2428 			memcpy(buf, &xhci_ifcd, l);
2429 			buf = (char *)buf + l;
2430 			len -= l;
2431 			l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE);
2432 			totlen += l;
2433 			memcpy(buf, &xhci_endpd, l);
2434 			break;
2435 		case UDESC_STRING:
2436 			if (len == 0)
2437 				break;
2438 			*(u_int8_t *)buf = 0;
2439 			totlen = 1;
2440 			switch (value & 0xff) {
2441 			case 0: /* Language table */
2442 				totlen = usbd_str(buf, len, "\001");
2443 				break;
2444 			case 1: /* Vendor */
2445 				totlen = usbd_str(buf, len, sc->sc_vendor);
2446 				break;
2447 			case 2: /* Product */
2448 				totlen = usbd_str(buf, len, "xHCI root hub");
2449 				break;
2450 			}
2451 			break;
2452 		default:
2453 			err = USBD_IOERROR;
2454 			goto ret;
2455 		}
2456 		break;
2457 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
2458 		if (len > 0) {
2459 			*(uint8_t *)buf = 0;
2460 			totlen = 1;
2461 		}
2462 		break;
2463 	case C(UR_GET_STATUS, UT_READ_DEVICE):
2464 		if (len > 1) {
2465 			USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED);
2466 			totlen = 2;
2467 		}
2468 		break;
2469 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
2470 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
2471 		if (len > 1) {
2472 			USETW(((usb_status_t *)buf)->wStatus, 0);
2473 			totlen = 2;
2474 		}
2475 		break;
2476 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
2477 		if (value >= USB_MAX_DEVICES) {
2478 			err = USBD_IOERROR;
2479 			goto ret;
2480 		}
2481 		break;
2482 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
2483 		if (value != 0 && value != 1) {
2484 			err = USBD_IOERROR;
2485 			goto ret;
2486 		}
2487 		sc->sc_conf = value;
2488 		break;
2489 	case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
2490 		break;
2491 	case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
2492 	case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
2493 	case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
2494 		err = USBD_IOERROR;
2495 		goto ret;
2496 	case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
2497 		break;
2498 	case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
2499 		break;
2500 	/* Hub requests */
2501 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
2502 		break;
2503 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
2504 		DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE "
2505 		    "port=%d feature=%d\n", index, value));
2506 		if (index < 1 || index > sc->sc_noport) {
2507 			err = USBD_IOERROR;
2508 			goto ret;
2509 		}
2510 		port = XHCI_PORTSC(index);
2511 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2512 		switch (value) {
2513 		case UHF_PORT_ENABLE:
2514 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2515 			break;
2516 		case UHF_PORT_SUSPEND:
2517 			/* TODO */
2518 			break;
2519 		case UHF_PORT_POWER:
2520 			XOWRITE4(sc, port, v & ~XHCI_PS_PP);
2521 			break;
2522 		case UHF_PORT_INDICATOR:
2523 			XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3));
2524 			break;
2525 		case UHF_C_PORT_CONNECTION:
2526 			XOWRITE4(sc, port, v | XHCI_PS_CSC);
2527 			break;
2528 		case UHF_C_PORT_ENABLE:
2529 			XOWRITE4(sc, port, v | XHCI_PS_PEC);
2530 			break;
2531 		case UHF_C_PORT_SUSPEND:
2532 		case UHF_C_PORT_LINK_STATE:
2533 			XOWRITE4(sc, port, v | XHCI_PS_PLC);
2534 			break;
2535 		case UHF_C_PORT_OVER_CURRENT:
2536 			XOWRITE4(sc, port, v | XHCI_PS_OCC);
2537 			break;
2538 		case UHF_C_PORT_RESET:
2539 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2540 			break;
2541 		case UHF_C_BH_PORT_RESET:
2542 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2543 			break;
2544 		default:
2545 			err = USBD_IOERROR;
2546 			goto ret;
2547 		}
2548 		break;
2549 
2550 	case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
2551 		if (len == 0)
2552 			break;
2553 		if ((value & 0xff) != 0) {
2554 			err = USBD_IOERROR;
2555 			goto ret;
2556 		}
2557 		v = XREAD4(sc, XHCI_HCCPARAMS);
2558 		hubd = xhci_hubd;
2559 		hubd.bNbrPorts = sc->sc_noport;
2560 		USETW(hubd.wHubCharacteristics,
2561 		    (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) |
2562 		    (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0));
2563 		hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */
2564 		for (i = 1; i <= sc->sc_noport; i++) {
2565 			v = XOREAD4(sc, XHCI_PORTSC(i));
2566 			if (v & XHCI_PS_DR)
2567 				hubd.DeviceRemovable[i / 8] |= 1U << (i % 8);
2568 		}
2569 		hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
2570 		l = min(len, hubd.bDescLength);
2571 		totlen = l;
2572 		memcpy(buf, &hubd, l);
2573 		break;
2574 	case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
2575 		if (len != 16) {
2576 			err = USBD_IOERROR;
2577 			goto ret;
2578 		}
2579 		memset(buf, 0, len);
2580 		totlen = len;
2581 		break;
2582 	case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
2583 		DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n",
2584 		    index));
2585 		if (index < 1 || index > sc->sc_noport) {
2586 			err = USBD_IOERROR;
2587 			goto ret;
2588 		}
2589 		if (len != 4) {
2590 			err = USBD_IOERROR;
2591 			goto ret;
2592 		}
2593 		v = XOREAD4(sc, XHCI_PORTSC(index));
2594 		DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v));
2595 		i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v));
2596 		switch (XHCI_PS_SPEED(v)) {
2597 		case XHCI_SPEED_FULL:
2598 			i |= UPS_FULL_SPEED;
2599 			break;
2600 		case XHCI_SPEED_LOW:
2601 			i |= UPS_LOW_SPEED;
2602 			break;
2603 		case XHCI_SPEED_HIGH:
2604 			i |= UPS_HIGH_SPEED;
2605 			break;
2606 		case XHCI_SPEED_SUPER:
2607 		default:
2608 			break;
2609 		}
2610 		if (v & XHCI_PS_CCS)	i |= UPS_CURRENT_CONNECT_STATUS;
2611 		if (v & XHCI_PS_PED)	i |= UPS_PORT_ENABLED;
2612 		if (v & XHCI_PS_OCA)	i |= UPS_OVERCURRENT_INDICATOR;
2613 		if (v & XHCI_PS_PR)	i |= UPS_RESET;
2614 		if (v & XHCI_PS_PP)	{
2615 			if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL &&
2616 			    XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH)
2617 				i |= UPS_PORT_POWER;
2618 			else
2619 				i |= UPS_PORT_POWER_SS;
2620 		}
2621 		USETW(ps.wPortStatus, i);
2622 		i = 0;
2623 		if (v & XHCI_PS_CSC)    i |= UPS_C_CONNECT_STATUS;
2624 		if (v & XHCI_PS_PEC)    i |= UPS_C_PORT_ENABLED;
2625 		if (v & XHCI_PS_OCC)    i |= UPS_C_OVERCURRENT_INDICATOR;
2626 		if (v & XHCI_PS_PRC)	i |= UPS_C_PORT_RESET;
2627 		if (v & XHCI_PS_WRC)	i |= UPS_C_BH_PORT_RESET;
2628 		if (v & XHCI_PS_PLC)	i |= UPS_C_PORT_LINK_STATE;
2629 		if (v & XHCI_PS_CEC)	i |= UPS_C_PORT_CONFIG_ERROR;
2630 		USETW(ps.wPortChange, i);
2631 		l = min(len, sizeof ps);
2632 		memcpy(buf, &ps, l);
2633 		totlen = l;
2634 		break;
2635 	case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
2636 		err = USBD_IOERROR;
2637 		goto ret;
2638 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
2639 		break;
2640 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
2641 
2642 		i = index >> 8;
2643 		index &= 0x00ff;
2644 
2645 		if (index < 1 || index > sc->sc_noport) {
2646 			err = USBD_IOERROR;
2647 			goto ret;
2648 		}
2649 		port = XHCI_PORTSC(index);
2650 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2651 
2652 		switch (value) {
2653 		case UHF_PORT_ENABLE:
2654 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2655 			break;
2656 		case UHF_PORT_SUSPEND:
2657 			DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i));
2658 			if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) {
2659 				err = USBD_IOERROR;
2660 				goto ret;
2661 			}
2662 			XOWRITE4(sc, port, v |
2663 			    XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS);
2664 			break;
2665 		case UHF_PORT_RESET:
2666 			DPRINTFN(6, ("reset port %d\n", index));
2667 			XOWRITE4(sc, port, v | XHCI_PS_PR);
2668 			break;
2669 		case UHF_PORT_POWER:
2670 			DPRINTFN(3, ("set port power %d\n", index));
2671 			XOWRITE4(sc, port, v | XHCI_PS_PP);
2672 			break;
2673 		case UHF_PORT_INDICATOR:
2674 			DPRINTFN(3, ("set port indicator %d\n", index));
2675 
2676 			v &= ~XHCI_PS_SET_PIC(3);
2677 			v |= XHCI_PS_SET_PIC(1);
2678 
2679 			XOWRITE4(sc, port, v);
2680 			break;
2681 		case UHF_C_PORT_RESET:
2682 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2683 			break;
2684 		case UHF_C_BH_PORT_RESET:
2685 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2686 			break;
2687 		default:
2688 			err = USBD_IOERROR;
2689 			goto ret;
2690 		}
2691 		break;
2692 	case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
2693 	case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
2694 	case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
2695 	case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
2696 		break;
2697 	default:
2698 		err = USBD_IOERROR;
2699 		goto ret;
2700 	}
2701 	xfer->actlen = totlen;
2702 	err = USBD_NORMAL_COMPLETION;
2703 ret:
2704 	xfer->status = err;
2705 	s = splusb();
2706 	usb_transfer_complete(xfer);
2707 	splx(s);
2708 	return (err);
2709 }
2710 
2711 
2712 void
2713 xhci_noop(struct usbd_xfer *xfer)
2714 {
2715 }
2716 
2717 
2718 usbd_status
2719 xhci_root_intr_transfer(struct usbd_xfer *xfer)
2720 {
2721 	usbd_status err;
2722 
2723 	err = usb_insert_transfer(xfer);
2724 	if (err)
2725 		return (err);
2726 
2727 	return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2728 }
2729 
2730 usbd_status
2731 xhci_root_intr_start(struct usbd_xfer *xfer)
2732 {
2733 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2734 
2735 	if (sc->sc_bus.dying)
2736 		return (USBD_IOERROR);
2737 
2738 	sc->sc_intrxfer = xfer;
2739 
2740 	return (USBD_IN_PROGRESS);
2741 }
2742 
2743 void
2744 xhci_root_intr_abort(struct usbd_xfer *xfer)
2745 {
2746 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2747 	int s;
2748 
2749 	sc->sc_intrxfer = NULL;
2750 
2751 	xfer->status = USBD_CANCELLED;
2752 	s = splusb();
2753 	usb_transfer_complete(xfer);
2754 	splx(s);
2755 }
2756 
2757 void
2758 xhci_root_intr_done(struct usbd_xfer *xfer)
2759 {
2760 }
2761 
2762 /*
2763  * Number of packets remaining in the TD after the corresponding TRB.
2764  *
2765  * Section 4.11.2.4 of xHCI specification r1.1.
2766  */
2767 static inline uint32_t
2768 xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len)
2769 {
2770 	uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2771 
2772 	if (len == 0)
2773 		return XHCI_TRB_TDREM(0);
2774 
2775 	npkt = howmany(remain - len, UE_GET_SIZE(mps));
2776 	if (npkt > 31)
2777 		npkt = 31;
2778 
2779 	return XHCI_TRB_TDREM(npkt);
2780 }
2781 
2782 /*
2783  * Transfer Burst Count (TBC) and Transfer Last Burst Packet Count (TLBPC).
2784  *
2785  * Section 4.11.2.3  of xHCI specification r1.1.
2786  */
2787 static inline uint32_t
2788 xhci_xfer_tbc(struct usbd_xfer *xfer, uint32_t len, uint32_t *tlbpc)
2789 {
2790 	uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2791 	uint32_t maxb, tdpc, residue, tbc;
2792 
2793 	/* Transfer Descriptor Packet Count, section 4.14.1. */
2794 	tdpc = howmany(len, UE_GET_SIZE(mps));
2795 	if (tdpc == 0)
2796 		tdpc = 1;
2797 
2798 	/* Transfer Burst Count */
2799 	maxb = xhci_pipe_maxburst(xfer->pipe);
2800 	tbc = howmany(tdpc, maxb + 1) - 1;
2801 
2802 	/* Transfer Last Burst Packet Count */
2803 	if (xfer->device->speed == USB_SPEED_SUPER) {
2804 		residue = tdpc % (maxb + 1);
2805 		if (residue == 0)
2806 			*tlbpc = maxb;
2807 		else
2808 			*tlbpc = residue - 1;
2809 	} else {
2810 		*tlbpc = tdpc - 1;
2811 	}
2812 
2813 	return (tbc);
2814 }
2815 
2816 usbd_status
2817 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2818 {
2819 	usbd_status err;
2820 
2821 	err = usb_insert_transfer(xfer);
2822 	if (err)
2823 		return (err);
2824 
2825 	return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2826 }
2827 
2828 usbd_status
2829 xhci_device_ctrl_start(struct usbd_xfer *xfer)
2830 {
2831 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2832 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2833 	struct xhci_trb *trb0, *trb;
2834 	uint32_t flags, len = UGETW(xfer->request.wLength);
2835 	uint8_t toggle;
2836 	int s;
2837 
2838 	KASSERT(xfer->rqflags & URQ_REQUEST);
2839 
2840 	if (sc->sc_bus.dying || xp->halted)
2841 		return (USBD_IOERROR);
2842 
2843 	if (xp->free_trbs < 3)
2844 		return (USBD_NOMEM);
2845 
2846 	if (len != 0)
2847 		usb_syncmem(&xfer->dmabuf, 0, len,
2848 		    usbd_xfer_isread(xfer) ?
2849 		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2850 
2851 	/* We'll toggle the setup TRB once we're finished with the stages. */
2852 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2853 
2854 	flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | (toggle ^ 1);
2855 	if (len != 0) {
2856 		if (usbd_xfer_isread(xfer))
2857 			flags |= XHCI_TRB_TRT_IN;
2858 		else
2859 			flags |= XHCI_TRB_TRT_OUT;
2860 	}
2861 
2862 	memcpy(&trb0->trb_paddr, &xfer->request, sizeof(trb0->trb_paddr));
2863 	trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8));
2864 	trb0->trb_flags = htole32(flags);
2865 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2866 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2867 	    BUS_DMASYNC_PREWRITE);
2868 
2869 	/* Data TRB */
2870 	if (len != 0) {
2871 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2872 
2873 		flags = XHCI_TRB_TYPE_DATA | toggle;
2874 		if (usbd_xfer_isread(xfer))
2875 			flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP;
2876 
2877 		trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2878 		trb->trb_status = htole32(
2879 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2880 		    xhci_xfer_tdsize(xfer, len, len)
2881 		);
2882 		trb->trb_flags = htole32(flags);
2883 
2884 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2885 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2886 		    BUS_DMASYNC_PREWRITE);
2887 	}
2888 
2889 	/* Status TRB */
2890 	trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1);
2891 
2892 	flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle;
2893 	if (len == 0 || !usbd_xfer_isread(xfer))
2894 		flags |= XHCI_TRB_DIR_IN;
2895 
2896 	trb->trb_paddr = 0;
2897 	trb->trb_status = htole32(XHCI_TRB_INTR(0));
2898 	trb->trb_flags = htole32(flags);
2899 
2900 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2901 	    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2902 	    BUS_DMASYNC_PREWRITE);
2903 
2904 	/* Setup TRB */
2905 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
2906 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2907 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2908 	    BUS_DMASYNC_PREWRITE);
2909 
2910 	s = splusb();
2911 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2912 
2913 	xfer->status = USBD_IN_PROGRESS;
2914 	if (xfer->timeout && !sc->sc_bus.use_polling) {
2915 		timeout_del(&xfer->timeout_handle);
2916 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2917 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2918 	}
2919 	splx(s);
2920 
2921 	return (USBD_IN_PROGRESS);
2922 }
2923 
2924 void
2925 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
2926 {
2927 	xhci_abort_xfer(xfer, USBD_CANCELLED);
2928 }
2929 
2930 usbd_status
2931 xhci_device_generic_transfer(struct usbd_xfer *xfer)
2932 {
2933 	usbd_status err;
2934 
2935 	err = usb_insert_transfer(xfer);
2936 	if (err)
2937 		return (err);
2938 
2939 	return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2940 }
2941 
2942 usbd_status
2943 xhci_device_generic_start(struct usbd_xfer *xfer)
2944 {
2945 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2946 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2947 	struct xhci_trb *trb0, *trb;
2948 	uint32_t len, remain, flags;
2949 	uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2950 	uint64_t paddr = DMAADDR(&xfer->dmabuf, 0);
2951 	uint8_t toggle;
2952 	int s, i, ntrb, zerotd = 0;
2953 
2954 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
2955 
2956 	if (sc->sc_bus.dying || xp->halted)
2957 		return (USBD_IOERROR);
2958 
2959 	/* How many TRBs do we need for this transfer? */
2960 	ntrb = howmany(xfer->length, XHCI_TRB_MAXSIZE);
2961 
2962 	/* If the buffer crosses a 64k boundary, we need one more. */
2963 	len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
2964 	if (len < xfer->length)
2965 		ntrb = howmany(xfer->length - len, XHCI_TRB_MAXSIZE) + 1;
2966 	else
2967 		len = xfer->length;
2968 
2969 	/* If we need to append a zero length packet, we need one more. */
2970 	if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) &&
2971 	    (xfer->length % UE_GET_SIZE(mps) == 0))
2972 		zerotd = 1;
2973 
2974 	if (xp->free_trbs < (ntrb + zerotd))
2975 		return (USBD_NOMEM);
2976 
2977 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
2978 	    usbd_xfer_isread(xfer) ?
2979 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2980 
2981 	/* We'll toggle the first TRB once we're finished with the chain. */
2982 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1));
2983 	flags = XHCI_TRB_TYPE_NORMAL | (toggle ^ 1);
2984 	if (usbd_xfer_isread(xfer))
2985 		flags |= XHCI_TRB_ISP;
2986 	flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
2987 
2988 	trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2989 	trb0->trb_status = htole32(
2990 	    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2991 	    xhci_xfer_tdsize(xfer, xfer->length, len)
2992 	);
2993 	trb0->trb_flags = htole32(flags);
2994 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2995 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2996 	    BUS_DMASYNC_PREWRITE);
2997 
2998 	remain = xfer->length - len;
2999 	paddr += len;
3000 
3001 	/* Chain more TRBs if needed. */
3002 	for (i = ntrb - 1; i > 0; i--) {
3003 		len = min(remain, XHCI_TRB_MAXSIZE);
3004 
3005 		/* Next (or Last) TRB. */
3006 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1));
3007 		flags = XHCI_TRB_TYPE_NORMAL | toggle;
3008 		if (usbd_xfer_isread(xfer))
3009 			flags |= XHCI_TRB_ISP;
3010 		flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3011 
3012 		trb->trb_paddr = htole64(paddr);
3013 		trb->trb_status = htole32(
3014 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3015 		    xhci_xfer_tdsize(xfer, remain, len)
3016 		);
3017 		trb->trb_flags = htole32(flags);
3018 
3019 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3020 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3021 		    BUS_DMASYNC_PREWRITE);
3022 
3023 		remain -= len;
3024 		paddr += len;
3025 	}
3026 
3027 	/* Do we need to issue a zero length transfer? */
3028 	if (zerotd == 1) {
3029 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, -1);
3030 		trb->trb_paddr = 0;
3031 		trb->trb_status = 0;
3032 		trb->trb_flags = htole32(XHCI_TRB_TYPE_NORMAL | XHCI_TRB_IOC | toggle);
3033 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3034 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3035 		    BUS_DMASYNC_PREWRITE);
3036 	}
3037 
3038 	/* First TRB. */
3039 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
3040 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3041 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3042 	    BUS_DMASYNC_PREWRITE);
3043 
3044 	s = splusb();
3045 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
3046 
3047 	xfer->status = USBD_IN_PROGRESS;
3048 	if (xfer->timeout && !sc->sc_bus.use_polling) {
3049 		timeout_del(&xfer->timeout_handle);
3050 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
3051 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
3052 	}
3053 	splx(s);
3054 
3055 	return (USBD_IN_PROGRESS);
3056 }
3057 
3058 void
3059 xhci_device_generic_done(struct usbd_xfer *xfer)
3060 {
3061 	/* Only happens with interrupt transfers. */
3062 	if (xfer->pipe->repeat) {
3063 		xfer->actlen = 0;
3064 		xhci_device_generic_start(xfer);
3065 	}
3066 }
3067 
3068 void
3069 xhci_device_generic_abort(struct usbd_xfer *xfer)
3070 {
3071 	KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer);
3072 
3073 	xhci_abort_xfer(xfer, USBD_CANCELLED);
3074 }
3075 
3076 usbd_status
3077 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
3078 {
3079 	usbd_status err;
3080 
3081 	err = usb_insert_transfer(xfer);
3082 	if (err && err != USBD_IN_PROGRESS)
3083 		return (err);
3084 
3085 	return (xhci_device_isoc_start(xfer));
3086 }
3087 
3088 usbd_status
3089 xhci_device_isoc_start(struct usbd_xfer *xfer)
3090 {
3091 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
3092 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
3093 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
3094 	struct xhci_trb *trb0, *trb;
3095 	uint32_t len, remain, flags;
3096 	uint64_t paddr;
3097 	uint32_t tbc, tlbpc;
3098 	int s, i, j, ntrb = xfer->nframes;
3099 	uint8_t toggle;
3100 
3101 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
3102 
3103 	/*
3104 	 * To allow continuous transfers, above we start all transfers
3105 	 * immediately. However, we're still going to get usbd_start_next call
3106 	 * this when another xfer completes. So, check if this is already
3107 	 * in progress or not
3108 	 */
3109 	if (xx->ntrb > 0)
3110 		return (USBD_IN_PROGRESS);
3111 
3112 	if (sc->sc_bus.dying || xp->halted)
3113 		return (USBD_IOERROR);
3114 
3115 	/* Why would you do that anyway? */
3116 	if (sc->sc_bus.use_polling)
3117 		return (USBD_INVAL);
3118 
3119 	paddr = DMAADDR(&xfer->dmabuf, 0);
3120 
3121 	/* How many TRBs do for all Transfers? */
3122 	for (i = 0, ntrb = 0; i < xfer->nframes; i++) {
3123 		/* How many TRBs do we need for this transfer? */
3124 		ntrb += howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE);
3125 
3126 		/* If the buffer crosses a 64k boundary, we need one more. */
3127 		len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
3128 		if (len < xfer->frlengths[i])
3129 			ntrb++;
3130 
3131 		paddr += xfer->frlengths[i];
3132 	}
3133 
3134 	if (xp->free_trbs < ntrb)
3135 		return (USBD_NOMEM);
3136 
3137 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
3138 	    usbd_xfer_isread(xfer) ?
3139 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
3140 
3141 	paddr = DMAADDR(&xfer->dmabuf, 0);
3142 
3143 	for (i = 0, trb0 = NULL; i < xfer->nframes; i++) {
3144 		/* How many TRBs do we need for this transfer? */
3145 		ntrb = howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE);
3146 
3147 		/* If the buffer crosses a 64k boundary, we need one more. */
3148 		len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
3149 		if (len < xfer->frlengths[i])
3150 			ntrb++;
3151 		else
3152 			len = xfer->frlengths[i];
3153 
3154 		KASSERT(ntrb < 3);
3155 
3156 		/*
3157 		 * We'll commit the first TRB once we're finished with the
3158 		 * chain.
3159 		 */
3160 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1));
3161 
3162 		DPRINTFN(4, ("%s:%d: ring %p trb0_idx %lu ntrb %d paddr %llx "
3163 		    "len %u\n", __func__, __LINE__,
3164 		    &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, paddr,
3165 		    len));
3166 
3167 		/* Record the first TRB so we can toggle later. */
3168 		if (trb0 == NULL) {
3169 			trb0 = trb;
3170 			toggle ^= 1;
3171 		}
3172 
3173 		flags = XHCI_TRB_TYPE_ISOCH | XHCI_TRB_SIA | toggle;
3174 		if (usbd_xfer_isread(xfer))
3175 			flags |= XHCI_TRB_ISP;
3176 		flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3177 
3178 		tbc = xhci_xfer_tbc(xfer, xfer->frlengths[i], &tlbpc);
3179 		flags |= XHCI_TRB_ISOC_TBC(tbc) | XHCI_TRB_ISOC_TLBPC(tlbpc);
3180 
3181 		trb->trb_paddr = htole64(paddr);
3182 		trb->trb_status = htole32(
3183 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3184 		    xhci_xfer_tdsize(xfer, xfer->frlengths[i], len)
3185 		);
3186 		trb->trb_flags = htole32(flags);
3187 
3188 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3189 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3190 		    BUS_DMASYNC_PREWRITE);
3191 
3192 		remain = xfer->frlengths[i] - len;
3193 		paddr += len;
3194 
3195 		/* Chain more TRBs if needed. */
3196 		for (j = ntrb - 1; j > 0; j--) {
3197 			len = min(remain, XHCI_TRB_MAXSIZE);
3198 
3199 			/* Next (or Last) TRB. */
3200 			trb = xhci_xfer_get_trb(sc, xfer, &toggle, (j == 1));
3201 			flags = XHCI_TRB_TYPE_NORMAL | toggle;
3202 			if (usbd_xfer_isread(xfer))
3203 				flags |= XHCI_TRB_ISP;
3204 			flags |= (j == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3205 			DPRINTFN(3, ("%s:%d: ring %p trb0_idx %lu ntrb %d "
3206 			    "paddr %llx len %u\n", __func__, __LINE__,
3207 			    &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb,
3208 			    paddr, len));
3209 
3210 			trb->trb_paddr = htole64(paddr);
3211 			trb->trb_status = htole32(
3212 			    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3213 			    xhci_xfer_tdsize(xfer, remain, len)
3214 			);
3215 			trb->trb_flags = htole32(flags);
3216 
3217 			bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3218 			    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3219 			    BUS_DMASYNC_PREWRITE);
3220 
3221 			remain -= len;
3222 			paddr += len;
3223 		}
3224 
3225 		xfer->frlengths[i] = 0;
3226 	}
3227 
3228 	/* First TRB. */
3229 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
3230 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3231 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3232 	    BUS_DMASYNC_PREWRITE);
3233 
3234 	s = splusb();
3235 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
3236 
3237 	xfer->status = USBD_IN_PROGRESS;
3238 
3239 	if (xfer->timeout) {
3240 		timeout_del(&xfer->timeout_handle);
3241 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
3242 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
3243 	}
3244 	splx(s);
3245 
3246 	return (USBD_IN_PROGRESS);
3247 }
3248