xref: /openbsd/sys/dev/usb/xhci.c (revision 98dddc57)
1 /* $OpenBSD: xhci.c,v 1.135 2024/10/08 19:42:31 kettenis Exp $ */
2 
3 /*
4  * Copyright (c) 2014-2015 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/malloc.h>
22 #include <sys/device.h>
23 #include <sys/queue.h>
24 #include <sys/timeout.h>
25 #include <sys/pool.h>
26 #include <sys/endian.h>
27 #include <sys/rwlock.h>
28 
29 #include <machine/bus.h>
30 
31 #include <dev/usb/usb.h>
32 #include <dev/usb/usbdi.h>
33 #include <dev/usb/usbdivar.h>
34 #include <dev/usb/usb_mem.h>
35 
36 #include <dev/usb/xhcireg.h>
37 #include <dev/usb/xhcivar.h>
38 
39 struct cfdriver xhci_cd = {
40 	NULL, "xhci", DV_DULL, CD_SKIPHIBERNATE
41 };
42 
43 #ifdef XHCI_DEBUG
44 #define DPRINTF(x)	do { if (xhcidebug) printf x; } while(0)
45 #define DPRINTFN(n,x)	do { if (xhcidebug>(n)) printf x; } while (0)
46 int xhcidebug = 3;
47 #else
48 #define DPRINTF(x)
49 #define DPRINTFN(n,x)
50 #endif
51 
52 #define DEVNAME(sc)	((sc)->sc_bus.bdev.dv_xname)
53 
54 #define TRBOFF(r, trb)	((char *)(trb) - (char *)((r)->trbs))
55 #define DEQPTR(r)	((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index))
56 
57 struct pool *xhcixfer;
58 
59 struct xhci_pipe {
60 	struct usbd_pipe	pipe;
61 
62 	uint8_t			dci;
63 	uint8_t			slot;	/* Device slot ID */
64 	struct xhci_ring	ring;
65 
66 	/*
67 	 * XXX used to pass the xfer pointer back to the
68 	 * interrupt routine, better way?
69 	 */
70 	struct usbd_xfer	*pending_xfers[XHCI_MAX_XFER];
71 	struct usbd_xfer	*aborted_xfer;
72 	int			 halted;
73 	size_t			 free_trbs;
74 	int			 skip;
75 #define TRB_PROCESSED_NO	0
76 #define TRB_PROCESSED_YES 	1
77 #define TRB_PROCESSED_SHORT	2
78 	uint8_t			 trb_processed[XHCI_MAX_XFER];
79 };
80 
81 int	xhci_reset(struct xhci_softc *);
82 void	xhci_suspend(struct xhci_softc *);
83 int	xhci_intr1(struct xhci_softc *);
84 void	xhci_event_dequeue(struct xhci_softc *);
85 void	xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t);
86 int	xhci_event_xfer_generic(struct xhci_softc *, struct usbd_xfer *,
87 	    struct xhci_pipe *, uint32_t, int, uint8_t, uint8_t, uint8_t);
88 int	xhci_event_xfer_isoc(struct usbd_xfer *, struct xhci_pipe *,
89 	    uint32_t, int, uint8_t);
90 void	xhci_event_command(struct xhci_softc *, uint64_t);
91 void	xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t);
92 int	xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *);
93 int	xhci_context_setup(struct xhci_softc *, struct usbd_pipe *);
94 int	xhci_scratchpad_alloc(struct xhci_softc *, int);
95 void	xhci_scratchpad_free(struct xhci_softc *);
96 int	xhci_softdev_alloc(struct xhci_softc *, uint8_t);
97 void	xhci_softdev_free(struct xhci_softc *, uint8_t);
98 int	xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t,
99 	    size_t);
100 void	xhci_ring_free(struct xhci_softc *, struct xhci_ring *);
101 void	xhci_ring_reset(struct xhci_softc *, struct xhci_ring *);
102 struct	xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *);
103 struct	xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *);
104 
105 struct	xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*,
106 	    uint8_t *, int);
107 void	xhci_xfer_done(struct usbd_xfer *xfer);
108 /* xHCI command helpers. */
109 int	xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int);
110 int	xhci_command_abort(struct xhci_softc *);
111 
112 void	xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t);
113 void	xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t);
114 int	xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t);
115 int	xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t);
116 int	xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int);
117 int	xhci_cmd_set_address(struct xhci_softc *, uint8_t,  uint64_t, uint32_t);
118 #ifdef XHCI_DEBUG
119 int	xhci_cmd_noop(struct xhci_softc *);
120 #endif
121 
122 /* XXX should be part of the Bus interface. */
123 void	xhci_abort_xfer(struct usbd_xfer *, usbd_status);
124 void	xhci_pipe_close(struct usbd_pipe *);
125 void	xhci_noop(struct usbd_xfer *);
126 
127 void 	xhci_timeout(void *);
128 void	xhci_timeout_task(void *);
129 
130 /* USBD Bus Interface. */
131 usbd_status	  xhci_pipe_open(struct usbd_pipe *);
132 int		  xhci_setaddr(struct usbd_device *, int);
133 void		  xhci_softintr(void *);
134 void		  xhci_poll(struct usbd_bus *);
135 struct usbd_xfer *xhci_allocx(struct usbd_bus *);
136 void		  xhci_freex(struct usbd_bus *, struct usbd_xfer *);
137 
138 usbd_status	  xhci_root_ctrl_transfer(struct usbd_xfer *);
139 usbd_status	  xhci_root_ctrl_start(struct usbd_xfer *);
140 
141 usbd_status	  xhci_root_intr_transfer(struct usbd_xfer *);
142 usbd_status	  xhci_root_intr_start(struct usbd_xfer *);
143 void		  xhci_root_intr_abort(struct usbd_xfer *);
144 void		  xhci_root_intr_done(struct usbd_xfer *);
145 
146 usbd_status	  xhci_device_ctrl_transfer(struct usbd_xfer *);
147 usbd_status	  xhci_device_ctrl_start(struct usbd_xfer *);
148 void		  xhci_device_ctrl_abort(struct usbd_xfer *);
149 
150 usbd_status	  xhci_device_generic_transfer(struct usbd_xfer *);
151 usbd_status	  xhci_device_generic_start(struct usbd_xfer *);
152 void		  xhci_device_generic_abort(struct usbd_xfer *);
153 void		  xhci_device_generic_done(struct usbd_xfer *);
154 
155 usbd_status	  xhci_device_isoc_transfer(struct usbd_xfer *);
156 usbd_status	  xhci_device_isoc_start(struct usbd_xfer *);
157 
158 #define XHCI_INTR_ENDPT 1
159 
160 const struct usbd_bus_methods xhci_bus_methods = {
161 	.open_pipe = xhci_pipe_open,
162 	.dev_setaddr = xhci_setaddr,
163 	.soft_intr = xhci_softintr,
164 	.do_poll = xhci_poll,
165 	.allocx = xhci_allocx,
166 	.freex = xhci_freex,
167 };
168 
169 const struct usbd_pipe_methods xhci_root_ctrl_methods = {
170 	.transfer = xhci_root_ctrl_transfer,
171 	.start = xhci_root_ctrl_start,
172 	.abort = xhci_noop,
173 	.close = xhci_pipe_close,
174 	.done = xhci_noop,
175 };
176 
177 const struct usbd_pipe_methods xhci_root_intr_methods = {
178 	.transfer = xhci_root_intr_transfer,
179 	.start = xhci_root_intr_start,
180 	.abort = xhci_root_intr_abort,
181 	.close = xhci_pipe_close,
182 	.done = xhci_root_intr_done,
183 };
184 
185 const struct usbd_pipe_methods xhci_device_ctrl_methods = {
186 	.transfer = xhci_device_ctrl_transfer,
187 	.start = xhci_device_ctrl_start,
188 	.abort = xhci_device_ctrl_abort,
189 	.close = xhci_pipe_close,
190 	.done = xhci_noop,
191 };
192 
193 const struct usbd_pipe_methods xhci_device_intr_methods = {
194 	.transfer = xhci_device_generic_transfer,
195 	.start = xhci_device_generic_start,
196 	.abort = xhci_device_generic_abort,
197 	.close = xhci_pipe_close,
198 	.done = xhci_device_generic_done,
199 };
200 
201 const struct usbd_pipe_methods xhci_device_bulk_methods = {
202 	.transfer = xhci_device_generic_transfer,
203 	.start = xhci_device_generic_start,
204 	.abort = xhci_device_generic_abort,
205 	.close = xhci_pipe_close,
206 	.done = xhci_device_generic_done,
207 };
208 
209 const struct usbd_pipe_methods xhci_device_isoc_methods = {
210 	.transfer = xhci_device_isoc_transfer,
211 	.start = xhci_device_isoc_start,
212 	.abort = xhci_device_generic_abort,
213 	.close = xhci_pipe_close,
214 	.done = xhci_noop,
215 };
216 
217 #ifdef XHCI_DEBUG
218 static void
xhci_dump_trb(struct xhci_trb * trb)219 xhci_dump_trb(struct xhci_trb *trb)
220 {
221 	printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb,
222 	    (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status),
223 	    (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK);
224 }
225 #endif
226 
227 int	usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *,
228 	    void **, bus_size_t, bus_size_t, bus_size_t);
229 void	usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *);
230 
231 int
usbd_dma_contig_alloc(struct usbd_bus * bus,struct usbd_dma_info * dma,void ** kvap,bus_size_t size,bus_size_t alignment,bus_size_t boundary)232 usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma,
233     void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary)
234 {
235 	int error;
236 
237 	dma->tag = bus->dmatag;
238 	dma->size = size;
239 
240 	error = bus_dmamap_create(dma->tag, size, 1, size, boundary,
241 	    BUS_DMA_NOWAIT | bus->dmaflags, &dma->map);
242 	if (error != 0)
243 		return (error);
244 
245 	error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg,
246 	    1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO | bus->dmaflags);
247 	if (error != 0)
248 		goto destroy;
249 
250 	error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr,
251 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
252 	if (error != 0)
253 		goto free;
254 
255 	error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size,
256 	    BUS_DMA_NOWAIT);
257 	if (error != 0)
258 		goto unmap;
259 
260 	bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREREAD |
261 	    BUS_DMASYNC_PREWRITE);
262 
263 	dma->paddr = dma->map->dm_segs[0].ds_addr;
264 	if (kvap != NULL)
265 		*kvap = dma->vaddr;
266 
267 	return (0);
268 
269 unmap:
270 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
271 free:
272 	bus_dmamem_free(dma->tag, &dma->seg, 1);
273 destroy:
274 	bus_dmamap_destroy(dma->tag, dma->map);
275 	return (error);
276 }
277 
278 void
usbd_dma_contig_free(struct usbd_bus * bus,struct usbd_dma_info * dma)279 usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma)
280 {
281 	if (dma->map != NULL) {
282 		bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size,
283 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
284 		bus_dmamap_unload(bus->dmatag, dma->map);
285 		bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size);
286 		bus_dmamem_free(bus->dmatag, &dma->seg, 1);
287 		bus_dmamap_destroy(bus->dmatag, dma->map);
288 		dma->map = NULL;
289 	}
290 }
291 
292 int
xhci_init(struct xhci_softc * sc)293 xhci_init(struct xhci_softc *sc)
294 {
295 	uint32_t hcr;
296 	int npage, error;
297 
298 	sc->sc_bus.usbrev = USBREV_3_0;
299 	sc->sc_bus.methods = &xhci_bus_methods;
300 	sc->sc_bus.pipe_size = sizeof(struct xhci_pipe);
301 
302 	sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH);
303 	sc->sc_door_off = XREAD4(sc, XHCI_DBOFF);
304 	sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF);
305 
306 	sc->sc_version = XREAD2(sc, XHCI_HCIVERSION);
307 	printf(", xHCI %x.%x\n", sc->sc_version >> 8, sc->sc_version & 0xff);
308 
309 #ifdef XHCI_DEBUG
310 	printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off);
311 	printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off);
312 	printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off);
313 #endif
314 
315 	error = xhci_reset(sc);
316 	if (error)
317 		return (error);
318 
319 	if (xhcixfer == NULL) {
320 		xhcixfer = malloc(sizeof(struct pool), M_USBHC, M_NOWAIT);
321 		if (xhcixfer == NULL) {
322 			printf("%s: unable to allocate pool descriptor\n",
323 			    DEVNAME(sc));
324 			return (ENOMEM);
325 		}
326 		pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB,
327 		    0, "xhcixfer", NULL);
328 	}
329 
330 	hcr = XREAD4(sc, XHCI_HCCPARAMS);
331 	sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32;
332 	sc->sc_bus.dmaflags |= XHCI_HCC_AC64(hcr) ? BUS_DMA_64BIT : 0;
333 	DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize));
334 
335 #ifdef XHCI_DEBUG
336 	hcr = XOREAD4(sc, XHCI_PAGESIZE);
337 	printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr);
338 #endif
339 	/* Use 4K for the moment since it's easier. */
340 	sc->sc_pagesize = 4096;
341 
342 	/* Get port and device slot numbers. */
343 	hcr = XREAD4(sc, XHCI_HCSPARAMS1);
344 	sc->sc_noport = XHCI_HCS1_N_PORTS(hcr);
345 	sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr);
346 	DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport,
347 	    sc->sc_noslot));
348 
349 	/* Setup Device Context Base Address Array. */
350 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma,
351 	    (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t),
352 	    XHCI_DCBAA_ALIGN, sc->sc_pagesize);
353 	if (error)
354 		return (ENOMEM);
355 
356 	/* Setup command ring. */
357 	rw_init(&sc->sc_cmd_lock, "xhcicmd");
358 	error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS,
359 	    XHCI_CMDS_RING_ALIGN);
360 	if (error) {
361 		printf("%s: could not allocate command ring.\n", DEVNAME(sc));
362 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
363 		return (error);
364 	}
365 
366 	/* Setup one event ring and its segment table (ERST). */
367 	error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS,
368 	    XHCI_EVTS_RING_ALIGN);
369 	if (error) {
370 		printf("%s: could not allocate event ring.\n", DEVNAME(sc));
371 		xhci_ring_free(sc, &sc->sc_cmd_ring);
372 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
373 		return (error);
374 	}
375 
376 	/* Allocate the required entry for the segment table. */
377 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma,
378 	    (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg),
379 	    XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY);
380 	if (error) {
381 		printf("%s: could not allocate segment table.\n", DEVNAME(sc));
382 		xhci_ring_free(sc, &sc->sc_evt_ring);
383 		xhci_ring_free(sc, &sc->sc_cmd_ring);
384 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
385 		return (ENOMEM);
386 	}
387 
388 	/* Set our ring address and size in its corresponding segment. */
389 	sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr);
390 	sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS);
391 	sc->sc_erst.segs[0].er_rsvd = 0;
392 	bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0,
393 	    sc->sc_erst.dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
394 
395 	/* Get the number of scratch pages and configure them if necessary. */
396 	hcr = XREAD4(sc, XHCI_HCSPARAMS2);
397 	npage = XHCI_HCS2_SPB_MAX(hcr);
398 	DPRINTF(("%s: %u scratch pages, ETE=%u, IST=0x%x\n", DEVNAME(sc), npage,
399 	   XHCI_HCS2_ETE(hcr), XHCI_HCS2_IST(hcr)));
400 
401 	if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) {
402 		printf("%s: could not allocate scratchpad.\n", DEVNAME(sc));
403 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
404 		xhci_ring_free(sc, &sc->sc_evt_ring);
405 		xhci_ring_free(sc, &sc->sc_cmd_ring);
406 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
407 		return (ENOMEM);
408 	}
409 
410 
411 	return (0);
412 }
413 
414 void
xhci_config(struct xhci_softc * sc)415 xhci_config(struct xhci_softc *sc)
416 {
417 	uint64_t paddr;
418 	uint32_t hcr;
419 	int i;
420 
421 	/* Make sure to program a number of device slots we can handle. */
422 	if (sc->sc_noslot > USB_MAX_DEVICES)
423 		sc->sc_noslot = USB_MAX_DEVICES;
424 	hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK;
425 	XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot);
426 
427 	/* Set the device context base array address. */
428 	paddr = (uint64_t)sc->sc_dcbaa.dma.paddr;
429 	XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr);
430 	XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32));
431 
432 	DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc),
433 	    XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO)));
434 
435 	/* Set the command ring address. */
436 	paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr;
437 	XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS);
438 	XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32));
439 
440 	DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc),
441 	    XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr));
442 
443 	/* Set the ERST count number to 1, since we use only one event ring. */
444 	XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1));
445 
446 	/* Set the segment table address. */
447 	paddr = (uint64_t)sc->sc_erst.dma.paddr;
448 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr);
449 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32));
450 
451 	DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc),
452 	    XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0))));
453 
454 	/* Set the ring dequeue address. */
455 	paddr = (uint64_t)sc->sc_evt_ring.dma.paddr;
456 	XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr);
457 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
458 
459 	DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc),
460 	    XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0))));
461 
462 	/*
463 	 * If we successfully saved the state during suspend, restore
464 	 * it here.  Otherwise some Intel controllers don't function
465 	 * correctly after resume.
466 	 */
467 	if (sc->sc_saved_state) {
468 		XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_CRS); /* Restore state */
469 		hcr = XOREAD4(sc, XHCI_USBSTS);
470 		for (i = 0; i < 100; i++) {
471 			usb_delay_ms(&sc->sc_bus, 1);
472 			hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_RSS;
473 			if (!hcr)
474 				break;
475 		}
476 
477 		if (hcr)
478 			printf("%s: restore state timeout\n", DEVNAME(sc));
479 
480 		sc->sc_saved_state = 0;
481 	}
482 
483 	/* Enable interrupts. */
484 	hcr = XRREAD4(sc, XHCI_IMAN(0));
485 	XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA);
486 
487 	/* Set default interrupt moderation. */
488 	XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT);
489 
490 	/* Allow event interrupt and start the controller. */
491 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
492 
493 	DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD)));
494 	DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0))));
495 }
496 
497 int
xhci_detach(struct device * self,int flags)498 xhci_detach(struct device *self, int flags)
499 {
500 	struct xhci_softc *sc = (struct xhci_softc *)self;
501 	int rv;
502 
503 	rv = config_detach_children(self, flags);
504 	if (rv != 0) {
505 		printf("%s: error while detaching %d\n", DEVNAME(sc), rv);
506 		return (rv);
507 	}
508 
509 	/* Since the hardware might already be gone, ignore the errors. */
510 	xhci_command_abort(sc);
511 
512 	xhci_reset(sc);
513 
514 	/* Disable interrupts. */
515 	XRWRITE4(sc, XHCI_IMOD(0), 0);
516 	XRWRITE4(sc, XHCI_IMAN(0), 0);
517 
518 	/* Clear the event ring address. */
519 	XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
520 	XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
521 
522 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
523 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
524 
525 	XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
526 
527 	/* Clear the command ring address. */
528 	XOWRITE4(sc, XHCI_CRCR_LO, 0);
529 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
530 
531 	XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
532 	XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
533 
534 	if (sc->sc_spad.npage > 0)
535 		xhci_scratchpad_free(sc);
536 
537 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
538 	xhci_ring_free(sc, &sc->sc_evt_ring);
539 	xhci_ring_free(sc, &sc->sc_cmd_ring);
540 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
541 
542 	return (0);
543 }
544 
545 int
xhci_activate(struct device * self,int act)546 xhci_activate(struct device *self, int act)
547 {
548 	struct xhci_softc *sc = (struct xhci_softc *)self;
549 	int rv = 0;
550 
551 	switch (act) {
552 	case DVACT_RESUME:
553 		sc->sc_bus.use_polling++;
554 		xhci_reinit(sc);
555 		sc->sc_bus.use_polling--;
556 		rv = config_activate_children(self, act);
557 		break;
558 	case DVACT_POWERDOWN:
559 		rv = config_activate_children(self, act);
560 		xhci_suspend(sc);
561 		break;
562 	default:
563 		rv = config_activate_children(self, act);
564 		break;
565 	}
566 
567 	return (rv);
568 }
569 
570 int
xhci_reset(struct xhci_softc * sc)571 xhci_reset(struct xhci_softc *sc)
572 {
573 	uint32_t hcr;
574 	int i;
575 
576 	XOWRITE4(sc, XHCI_USBCMD, 0);	/* Halt controller */
577 	for (i = 0; i < 100; i++) {
578 		usb_delay_ms(&sc->sc_bus, 1);
579 		hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
580 		if (hcr)
581 			break;
582 	}
583 
584 	if (!hcr)
585 		printf("%s: halt timeout\n", DEVNAME(sc));
586 
587 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST);
588 	for (i = 0; i < 100; i++) {
589 		usb_delay_ms(&sc->sc_bus, 1);
590 		hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) |
591 		    (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR);
592 		if (!hcr)
593 			break;
594 	}
595 
596 	if (hcr) {
597 		printf("%s: reset timeout\n", DEVNAME(sc));
598 		return (EIO);
599 	}
600 
601 	return (0);
602 }
603 
604 void
xhci_suspend(struct xhci_softc * sc)605 xhci_suspend(struct xhci_softc *sc)
606 {
607 	uint32_t hcr;
608 	int i;
609 
610 	XOWRITE4(sc, XHCI_USBCMD, 0);	/* Halt controller */
611 	for (i = 0; i < 100; i++) {
612 		usb_delay_ms(&sc->sc_bus, 1);
613 		hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
614 		if (hcr)
615 			break;
616 	}
617 
618 	if (!hcr) {
619 		printf("%s: halt timeout\n", DEVNAME(sc));
620 		xhci_reset(sc);
621 		return;
622 	}
623 
624 	/*
625 	 * Some Intel controllers will not power down completely
626 	 * unless they have seen a save state command.  This in turn
627 	 * will prevent the SoC from reaching its lowest idle state.
628 	 * So save the state here.
629 	 */
630 	if ((sc->sc_flags & XHCI_NOCSS) == 0) {
631 		XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_CSS); /* Save state */
632 		hcr = XOREAD4(sc, XHCI_USBSTS);
633 		for (i = 0; i < 100; i++) {
634 			usb_delay_ms(&sc->sc_bus, 1);
635 			hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_SSS;
636 			if (!hcr)
637 				break;
638 		}
639 
640 		if (hcr) {
641 			printf("%s: save state timeout\n", DEVNAME(sc));
642 			xhci_reset(sc);
643 			return;
644 		}
645 
646 		sc->sc_saved_state = 1;
647 	}
648 
649 	/* Disable interrupts. */
650 	XRWRITE4(sc, XHCI_IMOD(0), 0);
651 	XRWRITE4(sc, XHCI_IMAN(0), 0);
652 
653 	/* Clear the event ring address. */
654 	XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
655 	XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
656 
657 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
658 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
659 
660 	XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
661 
662 	/* Clear the command ring address. */
663 	XOWRITE4(sc, XHCI_CRCR_LO, 0);
664 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
665 
666 	XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
667 	XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
668 }
669 
670 void
xhci_reinit(struct xhci_softc * sc)671 xhci_reinit(struct xhci_softc *sc)
672 {
673 	xhci_reset(sc);
674 	xhci_ring_reset(sc, &sc->sc_cmd_ring);
675 	xhci_ring_reset(sc, &sc->sc_evt_ring);
676 
677 	/* Renesas controllers, at least, need more time to resume. */
678 	usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
679 
680 	xhci_config(sc);
681 }
682 
683 int
xhci_intr(void * v)684 xhci_intr(void *v)
685 {
686 	struct xhci_softc *sc = v;
687 
688 	if (sc->sc_dead)
689 		return (0);
690 
691 	/* If we get an interrupt while polling, then just ignore it. */
692 	if (sc->sc_bus.use_polling) {
693 		DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n"));
694 		return (0);
695 	}
696 
697 	return (xhci_intr1(sc));
698 }
699 
700 int
xhci_intr1(struct xhci_softc * sc)701 xhci_intr1(struct xhci_softc *sc)
702 {
703 	uint32_t intrs;
704 
705 	intrs = XOREAD4(sc, XHCI_USBSTS);
706 	if (intrs == 0xffffffff) {
707 		sc->sc_bus.dying = 1;
708 		sc->sc_dead = 1;
709 		return (0);
710 	}
711 
712 	if ((intrs & XHCI_STS_EINT) == 0)
713 		return (0);
714 
715 	sc->sc_bus.no_intrs++;
716 
717 	if (intrs & XHCI_STS_HSE) {
718 		printf("%s: host system error\n", DEVNAME(sc));
719 		sc->sc_bus.dying = 1;
720 		XOWRITE4(sc, XHCI_USBSTS, intrs);
721 		return (1);
722 	}
723 
724 	/* Acknowledge interrupts */
725 	XOWRITE4(sc, XHCI_USBSTS, intrs);
726 	intrs = XRREAD4(sc, XHCI_IMAN(0));
727 	XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND);
728 
729 	usb_schedsoftintr(&sc->sc_bus);
730 
731 	return (1);
732 }
733 
734 void
xhci_poll(struct usbd_bus * bus)735 xhci_poll(struct usbd_bus *bus)
736 {
737 	struct xhci_softc *sc = (struct xhci_softc *)bus;
738 
739 	if (XOREAD4(sc, XHCI_USBSTS))
740 		xhci_intr1(sc);
741 }
742 
743 void
xhci_softintr(void * v)744 xhci_softintr(void *v)
745 {
746 	struct xhci_softc *sc = v;
747 
748 	if (sc->sc_bus.dying)
749 		return;
750 
751 	sc->sc_bus.intr_context++;
752 	xhci_event_dequeue(sc);
753 	sc->sc_bus.intr_context--;
754 }
755 
756 void
xhci_event_dequeue(struct xhci_softc * sc)757 xhci_event_dequeue(struct xhci_softc *sc)
758 {
759 	struct xhci_trb *trb;
760 	uint64_t paddr;
761 	uint32_t status, flags;
762 
763 	while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) {
764 		paddr = letoh64(trb->trb_paddr);
765 		status = letoh32(trb->trb_status);
766 		flags = letoh32(trb->trb_flags);
767 
768 		switch (flags & XHCI_TRB_TYPE_MASK) {
769 		case XHCI_EVT_XFER:
770 			xhci_event_xfer(sc, paddr, status, flags);
771 			break;
772 		case XHCI_EVT_CMD_COMPLETE:
773 			memcpy(&sc->sc_result_trb, trb, sizeof(*trb));
774 			xhci_event_command(sc, paddr);
775 			break;
776 		case XHCI_EVT_PORT_CHANGE:
777 			xhci_event_port_change(sc, paddr, status);
778 			break;
779 		case XHCI_EVT_HOST_CTRL:
780 			/* TODO */
781 			break;
782 		default:
783 #ifdef XHCI_DEBUG
784 			printf("event (%d): ", XHCI_TRB_TYPE(flags));
785 			xhci_dump_trb(trb);
786 #endif
787 			break;
788 		}
789 
790 	}
791 
792 	paddr = (uint64_t)DEQPTR(sc->sc_evt_ring);
793 	XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY);
794 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
795 }
796 
797 void
xhci_skip_all(struct xhci_pipe * xp)798 xhci_skip_all(struct xhci_pipe *xp)
799 {
800 	struct usbd_xfer *xfer, *last;
801 
802 	if (xp->skip) {
803 		/*
804 		 * Find the last transfer to skip, this is necessary
805 		 * as xhci_xfer_done() posts new transfers which we
806 		 * don't want to skip
807 		 */
808 		last = SIMPLEQ_FIRST(&xp->pipe.queue);
809 		if (last == NULL)
810 			goto done;
811 		while ((xfer = SIMPLEQ_NEXT(last, next)) != NULL)
812 			last = xfer;
813 
814 		do {
815 			xfer = SIMPLEQ_FIRST(&xp->pipe.queue);
816 			if (xfer == NULL)
817 				goto done;
818 			DPRINTF(("%s: skipping %p\n", __func__, xfer));
819 			xfer->status = USBD_NORMAL_COMPLETION;
820 			xhci_xfer_done(xfer);
821 		} while (xfer != last);
822 	done:
823 		xp->skip = 0;
824 	}
825 }
826 
827 void
xhci_event_xfer(struct xhci_softc * sc,uint64_t paddr,uint32_t status,uint32_t flags)828 xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status,
829     uint32_t flags)
830 {
831 	struct xhci_pipe *xp;
832 	struct usbd_xfer *xfer;
833 	uint8_t dci, slot, code, xfertype;
834 	uint32_t remain;
835 	int trb_idx;
836 
837 	slot = XHCI_TRB_GET_SLOT(flags);
838 	dci = XHCI_TRB_GET_EP(flags);
839 	if (slot > sc->sc_noslot) {
840 		DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot));
841 		return;
842 	}
843 
844 	xp = sc->sc_sdevs[slot].pipes[dci - 1];
845 	if (xp == NULL) {
846 		DPRINTF(("%s: incorrect dci (%u)\n", DEVNAME(sc), dci));
847 		return;
848 	}
849 
850 	code = XHCI_TRB_GET_CODE(status);
851 	remain = XHCI_TRB_REMAIN(status);
852 
853 	switch (code) {
854 	case XHCI_CODE_RING_UNDERRUN:
855 		DPRINTF(("%s: slot %u underrun with %zu TRB\n", DEVNAME(sc),
856 		    slot, xp->ring.ntrb - xp->free_trbs));
857 		xhci_skip_all(xp);
858 		return;
859 	case XHCI_CODE_RING_OVERRUN:
860 		DPRINTF(("%s: slot %u overrun with %zu TRB\n", DEVNAME(sc),
861 		    slot, xp->ring.ntrb - xp->free_trbs));
862 		xhci_skip_all(xp);
863 		return;
864 	case XHCI_CODE_MISSED_SRV:
865 		DPRINTF(("%s: slot %u missed srv with %zu TRB\n", DEVNAME(sc),
866 		    slot, xp->ring.ntrb - xp->free_trbs));
867 		xp->skip = 1;
868 		return;
869 	default:
870 		break;
871 	}
872 
873 	trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb);
874 	if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) {
875 		printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
876 		    trb_idx, xp->ring.ntrb - 1);
877 		return;
878 	}
879 
880 	xfer = xp->pending_xfers[trb_idx];
881 	if (xfer == NULL) {
882 		DPRINTF(("%s: NULL xfer pointer\n", DEVNAME(sc)));
883 		return;
884 	}
885 
886 	if (remain > xfer->length)
887 		remain = xfer->length;
888 
889 	xfertype = UE_GET_XFERTYPE(xfer->pipe->endpoint->edesc->bmAttributes);
890 
891 	switch (xfertype) {
892 	case UE_BULK:
893 	case UE_INTERRUPT:
894 	case UE_CONTROL:
895 		if (xhci_event_xfer_generic(sc, xfer, xp, remain, trb_idx,
896 		    code, slot, dci))
897 			return;
898 		break;
899 	case UE_ISOCHRONOUS:
900 		if (xhci_event_xfer_isoc(xfer, xp, remain, trb_idx, code))
901 			return;
902 		break;
903 	default:
904 		panic("xhci_event_xfer: unknown xfer type %u", xfertype);
905 	}
906 
907 	xhci_xfer_done(xfer);
908 }
909 
910 uint32_t
xhci_xfer_length_generic(struct xhci_xfer * xx,struct xhci_pipe * xp,int trb_idx)911 xhci_xfer_length_generic(struct xhci_xfer *xx, struct xhci_pipe *xp,
912     int trb_idx)
913 {
914 	int	 trb0_idx;
915 	uint32_t len = 0, type;
916 
917 	trb0_idx =
918 	    ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1);
919 
920 	while (1) {
921 		type = letoh32(xp->ring.trbs[trb0_idx].trb_flags) &
922 		    XHCI_TRB_TYPE_MASK;
923 		if (type == XHCI_TRB_TYPE_NORMAL || type == XHCI_TRB_TYPE_DATA)
924 			len += XHCI_TRB_LEN(letoh32(
925 			    xp->ring.trbs[trb0_idx].trb_status));
926 		if (trb0_idx == trb_idx)
927 			break;
928 		if (++trb0_idx == xp->ring.ntrb)
929 			trb0_idx = 0;
930 	}
931 	return len;
932 }
933 
934 int
xhci_event_xfer_generic(struct xhci_softc * sc,struct usbd_xfer * xfer,struct xhci_pipe * xp,uint32_t remain,int trb_idx,uint8_t code,uint8_t slot,uint8_t dci)935 xhci_event_xfer_generic(struct xhci_softc *sc, struct usbd_xfer *xfer,
936     struct xhci_pipe *xp, uint32_t remain, int trb_idx,
937     uint8_t code, uint8_t slot, uint8_t dci)
938 {
939 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
940 
941 	switch (code) {
942 	case XHCI_CODE_SUCCESS:
943 		if (xfer->actlen == 0) {
944 			if (remain)
945 				xfer->actlen =
946 				    xhci_xfer_length_generic(xx, xp, trb_idx) -
947 				    remain;
948 			else
949 				xfer->actlen = xfer->length;
950 		}
951 		if (xfer->actlen)
952 			usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
953 			    usbd_xfer_isread(xfer) ?
954 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
955 		xfer->status = USBD_NORMAL_COMPLETION;
956 		break;
957 	case XHCI_CODE_SHORT_XFER:
958 		/*
959 		 * Use values from the transfer TRB instead of the status TRB.
960 		 */
961 		if (xfer->actlen == 0)
962 			xfer->actlen =
963 			    xhci_xfer_length_generic(xx, xp, trb_idx) - remain;
964 		/*
965 		 * If this is not the last TRB of a transfer, we should
966 		 * theoretically clear the IOC at the end of the chain
967 		 * but the HC might have already processed it before we
968 		 * had a chance to schedule the softinterrupt.
969 		 */
970 		if (xx->index != trb_idx) {
971 			DPRINTF(("%s: short xfer %p for %u\n",
972 			    DEVNAME(sc), xfer, xx->index));
973 			return (1);
974 		}
975 		if (xfer->actlen)
976 			usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
977 			    usbd_xfer_isread(xfer) ?
978 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
979 		xfer->status = USBD_NORMAL_COMPLETION;
980 		break;
981 	case XHCI_CODE_TXERR:
982 	case XHCI_CODE_SPLITERR:
983 		DPRINTF(("%s: txerr? code %d\n", DEVNAME(sc), code));
984 		xfer->status = USBD_IOERROR;
985 		break;
986 	case XHCI_CODE_STALL:
987 	case XHCI_CODE_BABBLE:
988 		DPRINTF(("%s: babble code %d\n", DEVNAME(sc), code));
989 		/* Prevent any timeout to kick in. */
990 		timeout_del(&xfer->timeout_handle);
991 		usb_rem_task(xfer->device, &xfer->abort_task);
992 
993 		/* We need to report this condition for umass(4). */
994 		if (code == XHCI_CODE_STALL)
995 			xp->halted = USBD_STALLED;
996 		else
997 			xp->halted = USBD_IOERROR;
998 		/*
999 		 * Since the stack might try to start a new transfer as
1000 		 * soon as a pending one finishes, make sure the endpoint
1001 		 * is fully reset before calling usb_transfer_complete().
1002 		 */
1003 		xp->aborted_xfer = xfer;
1004 		xhci_cmd_reset_ep_async(sc, slot, dci);
1005 		return (1);
1006 	case XHCI_CODE_XFER_STOPPED:
1007 	case XHCI_CODE_XFER_STOPINV:
1008 		/* Endpoint stopped while processing a TD. */
1009 		if (xfer == xp->aborted_xfer) {
1010 			DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer));
1011 		    	return (1);
1012 		}
1013 
1014 		/* FALLTHROUGH */
1015 	default:
1016 		DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code));
1017 		xfer->status = USBD_IOERROR;
1018 		xp->halted = 1;
1019 		break;
1020 	}
1021 
1022 	return (0);
1023 }
1024 
1025 int
xhci_event_xfer_isoc(struct usbd_xfer * xfer,struct xhci_pipe * xp,uint32_t remain,int trb_idx,uint8_t code)1026 xhci_event_xfer_isoc(struct usbd_xfer *xfer, struct xhci_pipe *xp,
1027     uint32_t remain, int trb_idx, uint8_t code)
1028 {
1029 	struct usbd_xfer *skipxfer;
1030 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1031 	int trb0_idx, frame_idx = 0, skip_trb = 0;
1032 
1033 	KASSERT(xx->index >= 0);
1034 
1035 	switch (code) {
1036 	case XHCI_CODE_SHORT_XFER:
1037 		xp->trb_processed[trb_idx] = TRB_PROCESSED_SHORT;
1038 		break;
1039 	default:
1040 		xp->trb_processed[trb_idx] = TRB_PROCESSED_YES;
1041 		break;
1042 	}
1043 
1044 	trb0_idx =
1045 	    ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1);
1046 
1047 	/* Find the according frame index for this TRB. */
1048 	while (trb0_idx != trb_idx) {
1049 		if ((letoh32(xp->ring.trbs[trb0_idx].trb_flags) &
1050 		    XHCI_TRB_TYPE_MASK) == XHCI_TRB_TYPE_ISOCH)
1051 			frame_idx++;
1052 		if (trb0_idx++ == (xp->ring.ntrb - 1))
1053 			trb0_idx = 0;
1054 	}
1055 
1056 	/*
1057 	 * If we queued two TRBs for a frame and this is the second TRB,
1058 	 * check if the first TRB needs accounting since it might not have
1059 	 * raised an interrupt in case of full data received.
1060 	 */
1061 	if ((letoh32(xp->ring.trbs[trb_idx].trb_flags) & XHCI_TRB_TYPE_MASK) ==
1062 	    XHCI_TRB_TYPE_NORMAL) {
1063 		frame_idx--;
1064 		if (trb_idx == 0)
1065 			trb0_idx = xp->ring.ntrb - 2;
1066 		else
1067 			trb0_idx = trb_idx - 1;
1068 		if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_NO) {
1069 			xfer->frlengths[frame_idx] = XHCI_TRB_LEN(letoh32(
1070 			    xp->ring.trbs[trb0_idx].trb_status));
1071 		} else if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_SHORT) {
1072 			skip_trb = 1;
1073 		}
1074 	}
1075 
1076 	if (!skip_trb) {
1077 		xfer->frlengths[frame_idx] +=
1078 		    XHCI_TRB_LEN(letoh32(xp->ring.trbs[trb_idx].trb_status)) -
1079 		    remain;
1080 		xfer->actlen += xfer->frlengths[frame_idx];
1081 	}
1082 
1083 	if (xx->index != trb_idx)
1084 		return (1);
1085 
1086 	if (xp->skip) {
1087 		while (1) {
1088 			skipxfer = SIMPLEQ_FIRST(&xp->pipe.queue);
1089 			if (skipxfer == xfer || skipxfer == NULL)
1090 				break;
1091 			DPRINTF(("%s: skipping %p\n", __func__, skipxfer));
1092 			skipxfer->status = USBD_NORMAL_COMPLETION;
1093 			xhci_xfer_done(skipxfer);
1094 		}
1095 		xp->skip = 0;
1096 	}
1097 
1098 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
1099 	    usbd_xfer_isread(xfer) ?
1100 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1101 	xfer->status = USBD_NORMAL_COMPLETION;
1102 
1103 	return (0);
1104 }
1105 
1106 void
xhci_event_command(struct xhci_softc * sc,uint64_t paddr)1107 xhci_event_command(struct xhci_softc *sc, uint64_t paddr)
1108 {
1109 	struct xhci_trb *trb;
1110 	struct xhci_pipe *xp;
1111 	uint32_t flags;
1112 	uint8_t dci, slot;
1113 	int trb_idx, status;
1114 
1115 	trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb);
1116 	if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) {
1117 		printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
1118 		    trb_idx, sc->sc_cmd_ring.ntrb - 1);
1119 		return;
1120 	}
1121 
1122 	trb = &sc->sc_cmd_ring.trbs[trb_idx];
1123 
1124 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1125 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1126 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1127 
1128 	flags = letoh32(trb->trb_flags);
1129 
1130 	slot = XHCI_TRB_GET_SLOT(flags);
1131 	dci = XHCI_TRB_GET_EP(flags);
1132 
1133 	switch (flags & XHCI_TRB_TYPE_MASK) {
1134 	case XHCI_CMD_RESET_EP:
1135 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
1136 		if (xp == NULL)
1137 			break;
1138 
1139 		/* Update the dequeue pointer past the last TRB. */
1140 		xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
1141 		    DEQPTR(xp->ring) | xp->ring.toggle);
1142 		break;
1143 	case XHCI_CMD_SET_TR_DEQ:
1144 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
1145 		if (xp == NULL)
1146 			break;
1147 
1148 		status = xp->halted;
1149 		xp->halted = 0;
1150 		if (xp->aborted_xfer != NULL) {
1151 			xp->aborted_xfer->status = status;
1152 			xhci_xfer_done(xp->aborted_xfer);
1153 			wakeup(xp);
1154 		}
1155 		break;
1156 	case XHCI_CMD_CONFIG_EP:
1157 	case XHCI_CMD_STOP_EP:
1158 	case XHCI_CMD_DISABLE_SLOT:
1159 	case XHCI_CMD_ENABLE_SLOT:
1160 	case XHCI_CMD_ADDRESS_DEVICE:
1161 	case XHCI_CMD_EVAL_CTX:
1162 	case XHCI_CMD_NOOP:
1163 		/*
1164 		 * All these commands are synchronous.
1165 		 *
1166 		 * If TRBs differ, this could be a delayed result after we
1167 		 * gave up waiting for the expected TRB due to timeout.
1168 		 */
1169 		if (sc->sc_cmd_trb == trb) {
1170 			sc->sc_cmd_trb = NULL;
1171 			wakeup(&sc->sc_cmd_trb);
1172 		}
1173 		break;
1174 	default:
1175 		DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags));
1176 	}
1177 }
1178 
1179 void
xhci_event_port_change(struct xhci_softc * sc,uint64_t paddr,uint32_t status)1180 xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status)
1181 {
1182 	struct usbd_xfer *xfer = sc->sc_intrxfer;
1183 	uint32_t port = XHCI_TRB_PORTID(paddr);
1184 	uint8_t *p;
1185 
1186 	if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) {
1187 		DPRINTF(("%s: failed port status event\n", DEVNAME(sc)));
1188 		return;
1189 	}
1190 
1191 	if (xfer == NULL)
1192 		return;
1193 
1194 	p = KERNADDR(&xfer->dmabuf, 0);
1195 	memset(p, 0, xfer->length);
1196 
1197 	p[port/8] |= 1 << (port%8);
1198 	DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p));
1199 
1200 	xfer->actlen = xfer->length;
1201 	xfer->status = USBD_NORMAL_COMPLETION;
1202 
1203 	usb_transfer_complete(xfer);
1204 }
1205 
1206 void
xhci_xfer_done(struct usbd_xfer * xfer)1207 xhci_xfer_done(struct usbd_xfer *xfer)
1208 {
1209 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1210 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1211 	int ntrb, i;
1212 
1213 	splsoftassert(IPL_SOFTUSB);
1214 
1215 #ifdef XHCI_DEBUG
1216 	if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) {
1217 		printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__,
1218 		    xfer, xx->index, xx->ntrb);
1219 	}
1220 #endif
1221 
1222 	if (xp->aborted_xfer == xfer)
1223 		xp->aborted_xfer = NULL;
1224 
1225 	for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) {
1226 		xp->pending_xfers[i] = NULL;
1227 		if (i == 0)
1228 			i = (xp->ring.ntrb - 1);
1229 	}
1230 	xp->free_trbs += xx->ntrb;
1231 	xp->free_trbs += xx->zerotd;
1232 	xx->index = -1;
1233 	xx->ntrb = 0;
1234 	xx->zerotd = 0;
1235 
1236 	timeout_del(&xfer->timeout_handle);
1237 	usb_rem_task(xfer->device, &xfer->abort_task);
1238 	usb_transfer_complete(xfer);
1239 }
1240 
1241 /*
1242  * Calculate the Device Context Index (DCI) for endpoints as stated
1243  * in section 4.5.1 of xHCI specification r1.1.
1244  */
1245 static inline uint8_t
xhci_ed2dci(usb_endpoint_descriptor_t * ed)1246 xhci_ed2dci(usb_endpoint_descriptor_t *ed)
1247 {
1248 	uint8_t dir;
1249 
1250 	if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL)
1251 		return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1);
1252 
1253 	if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)
1254 		dir = 1;
1255 	else
1256 		dir = 0;
1257 
1258 	return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir);
1259 }
1260 
1261 usbd_status
xhci_pipe_open(struct usbd_pipe * pipe)1262 xhci_pipe_open(struct usbd_pipe *pipe)
1263 {
1264 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1265 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1266 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1267 	uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1268 	int error;
1269 
1270 	KASSERT(xp->slot == 0);
1271 
1272 	if (sc->sc_bus.dying)
1273 		return (USBD_IOERROR);
1274 
1275 	/* Root Hub */
1276 	if (pipe->device->depth == 0) {
1277 		switch (ed->bEndpointAddress) {
1278 		case USB_CONTROL_ENDPOINT:
1279 			pipe->methods = &xhci_root_ctrl_methods;
1280 			break;
1281 		case UE_DIR_IN | XHCI_INTR_ENDPT:
1282 			pipe->methods = &xhci_root_intr_methods;
1283 			break;
1284 		default:
1285 			pipe->methods = NULL;
1286 			return (USBD_INVAL);
1287 		}
1288 		return (USBD_NORMAL_COMPLETION);
1289 	}
1290 
1291 #if 0
1292 	/* Issue a noop to check if the command ring is correctly configured. */
1293 	xhci_cmd_noop(sc);
1294 #endif
1295 
1296 	switch (xfertype) {
1297 	case UE_CONTROL:
1298 		pipe->methods = &xhci_device_ctrl_methods;
1299 
1300 		/*
1301 		 * Get a slot and init the device's contexts.
1302 		 *
1303 		 * Since the control endpoint, represented as the default
1304 		 * pipe, is always opened first we are dealing with a
1305 		 * new device.  Put a new slot in the ENABLED state.
1306 		 *
1307 		 */
1308 		error = xhci_cmd_slot_control(sc, &slot, 1);
1309 		if (error || slot == 0 || slot > sc->sc_noslot)
1310 			return (USBD_INVAL);
1311 
1312 		if (xhci_softdev_alloc(sc, slot)) {
1313 			xhci_cmd_slot_control(sc, &slot, 0);
1314 			return (USBD_NOMEM);
1315 		}
1316 
1317 		break;
1318 	case UE_ISOCHRONOUS:
1319 		pipe->methods = &xhci_device_isoc_methods;
1320 		break;
1321 	case UE_BULK:
1322 		pipe->methods = &xhci_device_bulk_methods;
1323 		break;
1324 	case UE_INTERRUPT:
1325 		pipe->methods = &xhci_device_intr_methods;
1326 		break;
1327 	default:
1328 		return (USBD_INVAL);
1329 	}
1330 
1331 	/*
1332 	 * Our USBD Bus Interface is pipe-oriented but for most of the
1333 	 * operations we need to access a device context, so keep track
1334 	 * of the slot ID in every pipe.
1335 	 */
1336 	if (slot == 0)
1337 		slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot;
1338 
1339 	xp->slot = slot;
1340 	xp->dci = xhci_ed2dci(ed);
1341 
1342 	if (xhci_pipe_init(sc, pipe)) {
1343 		xhci_cmd_slot_control(sc, &slot, 0);
1344 		return (USBD_IOERROR);
1345 	}
1346 
1347 	return (USBD_NORMAL_COMPLETION);
1348 }
1349 
1350 /*
1351  * Set the maximum Endpoint Service Interface Time (ESIT) payload and
1352  * the average TRB buffer length for an endpoint.
1353  */
1354 static inline uint32_t
xhci_get_txinfo(struct xhci_softc * sc,struct usbd_pipe * pipe)1355 xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe)
1356 {
1357 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1358 	uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize);
1359 
1360 	switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
1361 	case UE_CONTROL:
1362 		mep = 0;
1363 		atl = 8;
1364 		break;
1365 	case UE_INTERRUPT:
1366 	case UE_ISOCHRONOUS:
1367 		if (pipe->device->speed == USB_SPEED_SUPER) {
1368 			/*  XXX Read the companion descriptor */
1369 		}
1370 
1371 		mep = (UE_GET_TRANS(mps) + 1) * UE_GET_SIZE(mps);
1372 		atl = mep;
1373 		break;
1374 	case UE_BULK:
1375 	default:
1376 		mep = 0;
1377 		atl = 0;
1378 	}
1379 
1380 	return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl));
1381 }
1382 
1383 static inline uint32_t
xhci_linear_interval(usb_endpoint_descriptor_t * ed)1384 xhci_linear_interval(usb_endpoint_descriptor_t *ed)
1385 {
1386 	uint32_t ival = min(max(1, ed->bInterval), 255);
1387 
1388 	return (fls(ival) - 1);
1389 }
1390 
1391 static inline uint32_t
xhci_exponential_interval(usb_endpoint_descriptor_t * ed)1392 xhci_exponential_interval(usb_endpoint_descriptor_t *ed)
1393 {
1394 	uint32_t ival = min(max(1, ed->bInterval), 16);
1395 
1396 	return (ival - 1);
1397 }
1398 /*
1399  * Return interval for endpoint expressed in 2^(ival) * 125us.
1400  *
1401  * See section 6.2.3.6 of xHCI r1.1 Specification for more details.
1402  */
1403 uint32_t
xhci_pipe_interval(struct usbd_pipe * pipe)1404 xhci_pipe_interval(struct usbd_pipe *pipe)
1405 {
1406 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1407 	uint8_t speed = pipe->device->speed;
1408 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1409 	uint32_t ival;
1410 
1411 	if (xfertype == UE_CONTROL || xfertype == UE_BULK) {
1412 		/* Control and Bulk endpoints never NAKs. */
1413 		ival = 0;
1414 	} else {
1415 		switch (speed) {
1416 		case USB_SPEED_FULL:
1417 			if (xfertype == UE_ISOCHRONOUS) {
1418 				/* Convert 1-2^(15)ms into 3-18 */
1419 				ival = xhci_exponential_interval(ed) + 3;
1420 				break;
1421 			}
1422 			/* FALLTHROUGH */
1423 		case USB_SPEED_LOW:
1424 			/* Convert 1-255ms into 3-10 */
1425 			ival = xhci_linear_interval(ed) + 3;
1426 			break;
1427 		case USB_SPEED_HIGH:
1428 		case USB_SPEED_SUPER:
1429 		default:
1430 			/* Convert 1-2^(15) * 125us into 0-15 */
1431 			ival = xhci_exponential_interval(ed);
1432 			break;
1433 		}
1434 	}
1435 
1436 	KASSERT(ival <= 15);
1437 	return (XHCI_EPCTX_SET_IVAL(ival));
1438 }
1439 
1440 uint32_t
xhci_pipe_maxburst(struct usbd_pipe * pipe)1441 xhci_pipe_maxburst(struct usbd_pipe *pipe)
1442 {
1443 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1444 	uint32_t mps = UGETW(ed->wMaxPacketSize);
1445 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1446 	uint32_t maxb = 0;
1447 
1448 	switch (pipe->device->speed) {
1449 	case USB_SPEED_HIGH:
1450 		if (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)
1451 			maxb = UE_GET_TRANS(mps);
1452 		break;
1453 	case USB_SPEED_SUPER:
1454 		/*  XXX Read the companion descriptor */
1455 	default:
1456 		break;
1457 	}
1458 
1459 	return (maxb);
1460 }
1461 
1462 static inline uint32_t
xhci_last_valid_dci(struct xhci_pipe ** pipes,struct xhci_pipe * ignore)1463 xhci_last_valid_dci(struct xhci_pipe **pipes, struct xhci_pipe *ignore)
1464 {
1465 	struct xhci_pipe *lxp;
1466 	int i;
1467 
1468 	/* Find the last valid Endpoint Context. */
1469 	for (i = 30; i >= 0; i--) {
1470 		lxp = pipes[i];
1471 		if (lxp != NULL && lxp != ignore)
1472 			return XHCI_SCTX_DCI(lxp->dci);
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 int
xhci_context_setup(struct xhci_softc * sc,struct usbd_pipe * pipe)1479 xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe)
1480 {
1481 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1482 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1483 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1484 	uint32_t mps = UGETW(ed->wMaxPacketSize);
1485 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1486 	uint8_t speed, cerr = 0;
1487 	uint32_t route = 0, rhport = 0;
1488 	struct usbd_device *hub;
1489 
1490 	/*
1491 	 * Calculate the Route String.  Assume that there is no hub with
1492 	 * more than 15 ports and that they all have a detph < 6.  See
1493 	 * section 8.9 of USB 3.1 Specification for more details.
1494 	 */
1495 	for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) {
1496 		uint32_t port = hub->powersrc->portno;
1497 		uint32_t depth = hub->myhub->depth;
1498 
1499 		route |= port << (4 * (depth - 1));
1500 	}
1501 
1502 	/* Get Root Hub port */
1503 	rhport = hub->powersrc->portno;
1504 
1505 	switch (pipe->device->speed) {
1506 	case USB_SPEED_LOW:
1507 		speed = XHCI_SPEED_LOW;
1508 		break;
1509 	case USB_SPEED_FULL:
1510 		speed = XHCI_SPEED_FULL;
1511 		break;
1512 	case USB_SPEED_HIGH:
1513 		speed = XHCI_SPEED_HIGH;
1514 		break;
1515 	case USB_SPEED_SUPER:
1516 		speed = XHCI_SPEED_SUPER;
1517 		break;
1518 	default:
1519 		return (USBD_INVAL);
1520 	}
1521 
1522 	/* Setup the endpoint context */
1523 	if (xfertype != UE_ISOCHRONOUS)
1524 		cerr = 3;
1525 
1526 	if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL))
1527 		xfertype |= 0x4;
1528 
1529 	sdev->ep_ctx[xp->dci-1]->info_lo = htole32(xhci_pipe_interval(pipe));
1530 	sdev->ep_ctx[xp->dci-1]->info_hi = htole32(
1531 	    XHCI_EPCTX_SET_MPS(UE_GET_SIZE(mps)) |
1532 	    XHCI_EPCTX_SET_MAXB(xhci_pipe_maxburst(pipe)) |
1533 	    XHCI_EPCTX_SET_EPTYPE(xfertype) | XHCI_EPCTX_SET_CERR(cerr)
1534 	);
1535 	sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe));
1536 	sdev->ep_ctx[xp->dci-1]->deqp = htole64(
1537 	    DEQPTR(xp->ring) | xp->ring.toggle
1538 	);
1539 
1540 	/* Unmask the new endpoint */
1541 	sdev->input_ctx->drop_flags = 0;
1542 	sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1543 
1544 	/* Setup the slot context */
1545 	sdev->slot_ctx->info_lo = htole32(
1546 	    xhci_last_valid_dci(sdev->pipes, NULL) | XHCI_SCTX_SPEED(speed) |
1547 	    XHCI_SCTX_ROUTE(route)
1548 	);
1549 	sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport));
1550 	sdev->slot_ctx->tt = 0;
1551 	sdev->slot_ctx->state = 0;
1552 
1553 /* XXX */
1554 #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT)
1555 	/*
1556 	 * If we are opening the interrupt pipe of a hub, update its
1557 	 * context before putting it in the CONFIGURED state.
1558 	 */
1559 	if (pipe->device->hub != NULL) {
1560 		int nports = pipe->device->hub->nports;
1561 
1562 		sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1));
1563 		sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports));
1564 
1565 		if (UHUB_IS_MTT(pipe->device))
1566 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1567 
1568 		sdev->slot_ctx->tt |= htole32(
1569 		    XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink)
1570 		);
1571 	}
1572 
1573 	/*
1574 	 * If this is a Low or Full Speed device below an external High
1575 	 * Speed hub, it needs some TT love.
1576 	 */
1577 	if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) {
1578 		struct usbd_device *hshub = pipe->device->myhsport->parent;
1579 		uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot;
1580 
1581 		if (UHUB_IS_MTT(hshub))
1582 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1583 
1584 		sdev->slot_ctx->tt |= htole32(
1585 		    XHCI_SCTX_TT_HUB_SID(slot) |
1586 		    XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno)
1587 		);
1588 	}
1589 #undef UHUB_IS_MTT
1590 
1591 	/* Unmask the slot context */
1592 	sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0));
1593 
1594 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1595 	    sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1596 
1597 	return (0);
1598 }
1599 
1600 int
xhci_pipe_init(struct xhci_softc * sc,struct usbd_pipe * pipe)1601 xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe)
1602 {
1603 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1604 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1605 	int error;
1606 
1607 #ifdef XHCI_DEBUG
1608 	struct usbd_device *dev = pipe->device;
1609 	printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u"
1610 	    " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth,
1611 	    dev->powersrc->portno, dev->speed, xp->slot, xp->dci,
1612 	    pipe->endpoint->edesc->bEndpointAddress);
1613 #endif
1614 
1615 	if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN))
1616 		return (ENOMEM);
1617 
1618 	xp->free_trbs = xp->ring.ntrb;
1619 	xp->halted = 0;
1620 
1621 	sdev->pipes[xp->dci - 1] = xp;
1622 
1623 	error = xhci_context_setup(sc, pipe);
1624 	if (error)
1625 		return (error);
1626 
1627 	if (xp->dci == 1) {
1628 		/*
1629 		 * If we are opening the default pipe, the Slot should
1630 		 * be in the ENABLED state.  Issue an "Address Device"
1631 		 * with BSR=1 to put the device in the DEFAULT state.
1632 		 * We cannot jump directly to the ADDRESSED state with
1633 		 * BSR=0 because some Low/Full speed devices won't accept
1634 		 * a SET_ADDRESS command before we've read their device
1635 		 * descriptor.
1636 		 */
1637 		error = xhci_cmd_set_address(sc, xp->slot,
1638 		    sdev->ictx_dma.paddr, XHCI_TRB_BSR);
1639 	} else {
1640 		error = xhci_cmd_configure_ep(sc, xp->slot,
1641 		    sdev->ictx_dma.paddr);
1642 	}
1643 
1644 	if (error) {
1645 		xhci_ring_free(sc, &xp->ring);
1646 		return (EIO);
1647 	}
1648 
1649 	return (0);
1650 }
1651 
1652 void
xhci_pipe_close(struct usbd_pipe * pipe)1653 xhci_pipe_close(struct usbd_pipe *pipe)
1654 {
1655 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1656 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1657 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1658 
1659 	/* Root Hub */
1660 	if (pipe->device->depth == 0)
1661 		return;
1662 
1663 	/* Mask the endpoint */
1664 	sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1665 	sdev->input_ctx->add_flags = 0;
1666 
1667 	/* Update last valid Endpoint Context */
1668 	sdev->slot_ctx->info_lo &= htole32(~XHCI_SCTX_DCI(31));
1669 	sdev->slot_ctx->info_lo |= htole32(xhci_last_valid_dci(sdev->pipes, xp));
1670 
1671 	/* Clear the Endpoint Context */
1672 	memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx));
1673 
1674 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1675 	    sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1676 
1677 	if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr))
1678 		DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci));
1679 
1680 	xhci_ring_free(sc, &xp->ring);
1681 	sdev->pipes[xp->dci - 1] = NULL;
1682 
1683 	/*
1684 	 * If we are closing the default pipe, the device is probably
1685 	 * gone, so put its slot in the DISABLED state.
1686 	 */
1687 	if (xp->dci == 1) {
1688 		xhci_cmd_slot_control(sc, &xp->slot, 0);
1689 		xhci_softdev_free(sc, xp->slot);
1690 	}
1691 }
1692 
1693 /*
1694  * Transition a device from DEFAULT to ADDRESSED Slot state, this hook
1695  * is needed for Low/Full speed devices.
1696  *
1697  * See section 4.5.3 of USB 3.1 Specification for more details.
1698  */
1699 int
xhci_setaddr(struct usbd_device * dev,int addr)1700 xhci_setaddr(struct usbd_device *dev, int addr)
1701 {
1702 	struct xhci_softc *sc = (struct xhci_softc *)dev->bus;
1703 	struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe;
1704 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1705 	int error;
1706 
1707 	/* Root Hub */
1708 	if (dev->depth == 0)
1709 		return (0);
1710 
1711 	KASSERT(xp->dci == 1);
1712 
1713 	error = xhci_context_setup(sc, dev->default_pipe);
1714 	if (error)
1715 		return (error);
1716 
1717 	error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0);
1718 
1719 #ifdef XHCI_DEBUG
1720 	if (error == 0) {
1721 		struct xhci_sctx *sctx;
1722 		uint8_t addr;
1723 
1724 		bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0,
1725 		    sc->sc_pagesize, BUS_DMASYNC_POSTREAD);
1726 
1727 		/* Get output slot context. */
1728 		sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr;
1729 		addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state));
1730 		error = (addr == 0);
1731 
1732 		printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr);
1733 	}
1734 #endif
1735 
1736 	return (error);
1737 }
1738 
1739 struct usbd_xfer *
xhci_allocx(struct usbd_bus * bus)1740 xhci_allocx(struct usbd_bus *bus)
1741 {
1742 	return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO));
1743 }
1744 
1745 void
xhci_freex(struct usbd_bus * bus,struct usbd_xfer * xfer)1746 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
1747 {
1748 	pool_put(xhcixfer, xfer);
1749 }
1750 
1751 int
xhci_scratchpad_alloc(struct xhci_softc * sc,int npage)1752 xhci_scratchpad_alloc(struct xhci_softc *sc, int npage)
1753 {
1754 	uint64_t *pte;
1755 	int error, i;
1756 
1757 	/* Allocate the required entry for the table. */
1758 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma,
1759 	    (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN,
1760 	    sc->sc_pagesize);
1761 	if (error)
1762 		return (ENOMEM);
1763 
1764 	/* Allocate pages. XXX does not need to be contiguous. */
1765 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma,
1766 	    NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0);
1767 	if (error) {
1768 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1769 		return (ENOMEM);
1770 	}
1771 
1772 	for (i = 0; i < npage; i++) {
1773 		pte[i] = htole64(
1774 		    sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize)
1775 		);
1776 	}
1777 
1778 	bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0,
1779 	    npage * sizeof(uint64_t), BUS_DMASYNC_PREREAD |
1780 	    BUS_DMASYNC_PREWRITE);
1781 
1782 	/*  Entry 0 points to the table of scratchpad pointers. */
1783 	sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr);
1784 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1785 	    sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1786 
1787 	sc->sc_spad.npage = npage;
1788 
1789 	return (0);
1790 }
1791 
1792 void
xhci_scratchpad_free(struct xhci_softc * sc)1793 xhci_scratchpad_free(struct xhci_softc *sc)
1794 {
1795 	sc->sc_dcbaa.segs[0] = 0;
1796 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1797 	    sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1798 
1799 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma);
1800 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1801 }
1802 
1803 int
xhci_ring_alloc(struct xhci_softc * sc,struct xhci_ring * ring,size_t ntrb,size_t alignment)1804 xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb,
1805     size_t alignment)
1806 {
1807 	size_t size;
1808 	int error;
1809 
1810 	size = ntrb * sizeof(struct xhci_trb);
1811 
1812 	error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma,
1813 	    (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY);
1814 	if (error)
1815 		return (error);
1816 
1817 	ring->ntrb = ntrb;
1818 
1819 	xhci_ring_reset(sc, ring);
1820 
1821 	return (0);
1822 }
1823 
1824 void
xhci_ring_free(struct xhci_softc * sc,struct xhci_ring * ring)1825 xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring)
1826 {
1827 	usbd_dma_contig_free(&sc->sc_bus, &ring->dma);
1828 }
1829 
1830 void
xhci_ring_reset(struct xhci_softc * sc,struct xhci_ring * ring)1831 xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring)
1832 {
1833 	size_t size;
1834 
1835 	size = ring->ntrb * sizeof(struct xhci_trb);
1836 
1837 	memset(ring->trbs, 0, size);
1838 
1839 	ring->index = 0;
1840 	ring->toggle = XHCI_TRB_CYCLE;
1841 
1842 	/*
1843 	 * Since all our rings use only one segment, at least for
1844 	 * the moment, link their tail to their head.
1845 	 */
1846 	if (ring != &sc->sc_evt_ring) {
1847 		struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1];
1848 
1849 		trb->trb_paddr = htole64(ring->dma.paddr);
1850 		trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG |
1851 		    XHCI_TRB_CYCLE);
1852 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1853 		    BUS_DMASYNC_PREWRITE);
1854 	} else
1855 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1856 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1857 }
1858 
1859 struct xhci_trb*
xhci_ring_consume(struct xhci_softc * sc,struct xhci_ring * ring)1860 xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring)
1861 {
1862 	struct xhci_trb *trb = &ring->trbs[ring->index];
1863 
1864 	KASSERT(ring->index < ring->ntrb);
1865 
1866 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1867 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1868 
1869 	/* Make sure this TRB can be consumed. */
1870 	if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE))
1871 		return (NULL);
1872 
1873 	ring->index++;
1874 
1875 	if (ring->index == ring->ntrb) {
1876 		ring->index = 0;
1877 		ring->toggle ^= 1;
1878 	}
1879 
1880 	return (trb);
1881 }
1882 
1883 struct xhci_trb*
xhci_ring_produce(struct xhci_softc * sc,struct xhci_ring * ring)1884 xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring)
1885 {
1886 	struct xhci_trb *lnk, *trb;
1887 
1888 	KASSERT(ring->index < ring->ntrb);
1889 
1890 	/* Setup the link TRB after the previous TRB is done. */
1891 	if (ring->index == 0) {
1892 		lnk = &ring->trbs[ring->ntrb - 1];
1893 		trb = &ring->trbs[ring->ntrb - 2];
1894 
1895 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1896 		    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1897 		    BUS_DMASYNC_POSTWRITE);
1898 
1899 		lnk->trb_flags &= htole32(~XHCI_TRB_CHAIN);
1900 		if (letoh32(trb->trb_flags) & XHCI_TRB_CHAIN)
1901 			lnk->trb_flags |= htole32(XHCI_TRB_CHAIN);
1902 
1903 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1904 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1905 
1906 		lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE);
1907 
1908 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1909 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1910 	}
1911 
1912 	trb = &ring->trbs[ring->index++];
1913 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1914 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1915 	    BUS_DMASYNC_POSTWRITE);
1916 
1917 	/* Toggle cycle state of the link TRB and skip it. */
1918 	if (ring->index == (ring->ntrb - 1)) {
1919 		ring->index = 0;
1920 		ring->toggle ^= 1;
1921 	}
1922 
1923 	return (trb);
1924 }
1925 
1926 struct xhci_trb *
xhci_xfer_get_trb(struct xhci_softc * sc,struct usbd_xfer * xfer,uint8_t * togglep,int last)1927 xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer,
1928     uint8_t *togglep, int last)
1929 {
1930 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1931 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1932 
1933 	KASSERT(xp->free_trbs >= 1);
1934 	xp->free_trbs--;
1935 	*togglep = xp->ring.toggle;
1936 
1937 	switch (last) {
1938 	case -1:	/* This will be a zero-length TD. */
1939 		xp->pending_xfers[xp->ring.index] = NULL;
1940 		xx->zerotd += 1;
1941 		break;
1942 	case 0:		/* This will be in a chain. */
1943 		xp->pending_xfers[xp->ring.index] = xfer;
1944 		xx->index = -2;
1945 		xx->ntrb += 1;
1946 		break;
1947 	case 1:		/* This will terminate a chain. */
1948 		xp->pending_xfers[xp->ring.index] = xfer;
1949 		xx->index = xp->ring.index;
1950 		xx->ntrb += 1;
1951 		break;
1952 	}
1953 
1954 	xp->trb_processed[xp->ring.index] = TRB_PROCESSED_NO;
1955 
1956 	return (xhci_ring_produce(sc, &xp->ring));
1957 }
1958 
1959 int
xhci_command_submit(struct xhci_softc * sc,struct xhci_trb * trb0,int timeout)1960 xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout)
1961 {
1962 	struct xhci_trb *trb;
1963 	int s, error = 0;
1964 
1965 	KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL);
1966 
1967 	trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle);
1968 
1969 	trb = xhci_ring_produce(sc, &sc->sc_cmd_ring);
1970 	if (trb == NULL)
1971 		return (EAGAIN);
1972 	trb->trb_paddr = trb0->trb_paddr;
1973 	trb->trb_status = trb0->trb_status;
1974 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1975 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1976 	    BUS_DMASYNC_PREWRITE);
1977 
1978 	trb->trb_flags = trb0->trb_flags;
1979 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1980 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1981 	    BUS_DMASYNC_PREWRITE);
1982 
1983 	if (timeout == 0) {
1984 		XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1985 		return (0);
1986 	}
1987 
1988 	rw_assert_wrlock(&sc->sc_cmd_lock);
1989 
1990 	s = splusb();
1991 	sc->sc_cmd_trb = trb;
1992 	XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1993 	error = tsleep_nsec(&sc->sc_cmd_trb, PZERO, "xhcicmd", timeout);
1994 	if (error) {
1995 #ifdef XHCI_DEBUG
1996 		printf("%s: tsleep() = %d\n", __func__, error);
1997 		printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags)));
1998 		xhci_dump_trb(trb);
1999 #endif
2000 		KASSERT(sc->sc_cmd_trb == trb || sc->sc_cmd_trb == NULL);
2001 		/*
2002 		 * Just because the timeout expired this does not mean that the
2003 		 * TRB isn't active anymore! We could get an interrupt from
2004 		 * this TRB later on and then wonder what to do with it.
2005 		 * We'd rather abort it.
2006 		 */
2007 		xhci_command_abort(sc);
2008 		sc->sc_cmd_trb = NULL;
2009 		splx(s);
2010 		return (error);
2011 	}
2012 	splx(s);
2013 
2014 	memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb));
2015 
2016 	if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS)
2017 		return (0);
2018 
2019 #ifdef XHCI_DEBUG
2020 	printf("%s: event error code=%d, result=%d  \n", DEVNAME(sc),
2021 	    XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)),
2022 	    XHCI_TRB_TYPE(letoh32(trb0->trb_flags)));
2023 	xhci_dump_trb(trb0);
2024 #endif
2025 	return (EIO);
2026 }
2027 
2028 int
xhci_command_abort(struct xhci_softc * sc)2029 xhci_command_abort(struct xhci_softc *sc)
2030 {
2031 	uint32_t reg;
2032 	int i;
2033 
2034 	reg = XOREAD4(sc, XHCI_CRCR_LO);
2035 	if ((reg & XHCI_CRCR_LO_CRR) == 0)
2036 		return (0);
2037 
2038 	XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA);
2039 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
2040 
2041 	for (i = 0; i < 2500; i++) {
2042 		DELAY(100);
2043 		reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR;
2044 		if (!reg)
2045 			break;
2046 	}
2047 
2048 	if (reg) {
2049 		printf("%s: command ring abort timeout\n", DEVNAME(sc));
2050 		return (1);
2051 	}
2052 
2053 	return (0);
2054 }
2055 
2056 int
xhci_cmd_configure_ep(struct xhci_softc * sc,uint8_t slot,uint64_t addr)2057 xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
2058 {
2059 	struct xhci_trb trb;
2060 	int error;
2061 
2062 	DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
2063 
2064 	trb.trb_paddr = htole64(addr);
2065 	trb.trb_status = 0;
2066 	trb.trb_flags = htole32(
2067 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP
2068 	);
2069 
2070 	rw_enter_write(&sc->sc_cmd_lock);
2071 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2072 	rw_exit_write(&sc->sc_cmd_lock);
2073 	return (error);
2074 }
2075 
2076 int
xhci_cmd_stop_ep(struct xhci_softc * sc,uint8_t slot,uint8_t dci)2077 xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
2078 {
2079 	struct xhci_trb trb;
2080 	int error;
2081 
2082 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
2083 
2084 	trb.trb_paddr = 0;
2085 	trb.trb_status = 0;
2086 	trb.trb_flags = htole32(
2087 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP
2088 	);
2089 
2090 	rw_enter_write(&sc->sc_cmd_lock);
2091 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2092 	rw_exit_write(&sc->sc_cmd_lock);
2093 	return (error);
2094 }
2095 
2096 void
xhci_cmd_reset_ep_async(struct xhci_softc * sc,uint8_t slot,uint8_t dci)2097 xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
2098 {
2099 	struct xhci_trb trb;
2100 
2101 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
2102 
2103 	trb.trb_paddr = 0;
2104 	trb.trb_status = 0;
2105 	trb.trb_flags = htole32(
2106 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP
2107 	);
2108 
2109 	xhci_command_submit(sc, &trb, 0);
2110 }
2111 
2112 void
xhci_cmd_set_tr_deq_async(struct xhci_softc * sc,uint8_t slot,uint8_t dci,uint64_t addr)2113 xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci,
2114    uint64_t addr)
2115 {
2116 	struct xhci_trb trb;
2117 
2118 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
2119 
2120 	trb.trb_paddr = htole64(addr);
2121 	trb.trb_status = 0;
2122 	trb.trb_flags = htole32(
2123 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ
2124 	);
2125 
2126 	xhci_command_submit(sc, &trb, 0);
2127 }
2128 
2129 int
xhci_cmd_slot_control(struct xhci_softc * sc,uint8_t * slotp,int enable)2130 xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable)
2131 {
2132 	struct xhci_trb trb;
2133 	int error;
2134 
2135 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
2136 
2137 	trb.trb_paddr = 0;
2138 	trb.trb_status = 0;
2139 	if (enable)
2140 		trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT);
2141 	else
2142 		trb.trb_flags = htole32(
2143 			XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT
2144 		);
2145 
2146 	rw_enter_write(&sc->sc_cmd_lock);
2147 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2148 	rw_exit_write(&sc->sc_cmd_lock);
2149 	if (error != 0)
2150 		return (EIO);
2151 
2152 	if (enable)
2153 		*slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags));
2154 
2155 	return (0);
2156 }
2157 
2158 int
xhci_cmd_set_address(struct xhci_softc * sc,uint8_t slot,uint64_t addr,uint32_t bsr)2159 xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr,
2160     uint32_t bsr)
2161 {
2162 	struct xhci_trb trb;
2163 	int error;
2164 
2165 	DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0));
2166 
2167 	trb.trb_paddr = htole64(addr);
2168 	trb.trb_status = 0;
2169 	trb.trb_flags = htole32(
2170 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr
2171 	);
2172 
2173 	rw_enter_write(&sc->sc_cmd_lock);
2174 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2175 	rw_exit_write(&sc->sc_cmd_lock);
2176 	return (error);
2177 }
2178 
2179 #ifdef XHCI_DEBUG
2180 int
xhci_cmd_noop(struct xhci_softc * sc)2181 xhci_cmd_noop(struct xhci_softc *sc)
2182 {
2183 	struct xhci_trb trb;
2184 	int error;
2185 
2186 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
2187 
2188 	trb.trb_paddr = 0;
2189 	trb.trb_status = 0;
2190 	trb.trb_flags = htole32(XHCI_CMD_NOOP);
2191 
2192 	rw_enter_write(&sc->sc_cmd_lock);
2193 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2194 	rw_exit_write(&sc->sc_cmd_lock);
2195 	return (error);
2196 }
2197 #endif
2198 
2199 int
xhci_softdev_alloc(struct xhci_softc * sc,uint8_t slot)2200 xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot)
2201 {
2202 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
2203 	int i, error;
2204 	uint8_t *kva;
2205 
2206 	/*
2207 	 * Setup input context.  Even with 64 byte context size, it
2208 	 * fits into the smallest supported page size, so use that.
2209 	 */
2210 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma,
2211 	    (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize);
2212 	if (error)
2213 		return (ENOMEM);
2214 
2215 	sdev->input_ctx = (struct xhci_inctx *)kva;
2216 	sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize);
2217 	for (i = 0; i < 31; i++)
2218 		sdev->ep_ctx[i] =
2219 		    (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize);
2220 
2221 	DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc),
2222 	 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0]));
2223 
2224 	/* Setup output context */
2225 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL,
2226 	    sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize);
2227 	if (error) {
2228 		usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
2229 		return (ENOMEM);
2230 	}
2231 
2232 	memset(&sdev->pipes, 0, sizeof(sdev->pipes));
2233 
2234 	DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc),
2235 	    slot, (long long)sdev->octx_dma.paddr));
2236 
2237 	sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr);
2238 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
2239 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
2240 	    BUS_DMASYNC_PREWRITE);
2241 
2242 	return (0);
2243 }
2244 
2245 void
xhci_softdev_free(struct xhci_softc * sc,uint8_t slot)2246 xhci_softdev_free(struct xhci_softc *sc, uint8_t slot)
2247 {
2248 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
2249 
2250 	sc->sc_dcbaa.segs[slot] = 0;
2251 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
2252 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
2253 	    BUS_DMASYNC_PREWRITE);
2254 
2255 	usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma);
2256 	usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
2257 
2258 	memset(sdev, 0, sizeof(struct xhci_soft_dev));
2259 }
2260 
2261 /* Root hub descriptors. */
2262 const usb_device_descriptor_t xhci_devd = {
2263 	USB_DEVICE_DESCRIPTOR_SIZE,
2264 	UDESC_DEVICE,		/* type */
2265 	{0x00, 0x03},		/* USB version */
2266 	UDCLASS_HUB,		/* class */
2267 	UDSUBCLASS_HUB,		/* subclass */
2268 	UDPROTO_HSHUBSTT,	/* protocol */
2269 	9,			/* max packet */
2270 	{0},{0},{0x00,0x01},	/* device id */
2271 	1,2,0,			/* string indexes */
2272 	1			/* # of configurations */
2273 };
2274 
2275 const usb_config_descriptor_t xhci_confd = {
2276 	USB_CONFIG_DESCRIPTOR_SIZE,
2277 	UDESC_CONFIG,
2278 	{USB_CONFIG_DESCRIPTOR_SIZE +
2279 	 USB_INTERFACE_DESCRIPTOR_SIZE +
2280 	 USB_ENDPOINT_DESCRIPTOR_SIZE},
2281 	1,
2282 	1,
2283 	0,
2284 	UC_BUS_POWERED | UC_SELF_POWERED,
2285 	0                      /* max power */
2286 };
2287 
2288 const usb_interface_descriptor_t xhci_ifcd = {
2289 	USB_INTERFACE_DESCRIPTOR_SIZE,
2290 	UDESC_INTERFACE,
2291 	0,
2292 	0,
2293 	1,
2294 	UICLASS_HUB,
2295 	UISUBCLASS_HUB,
2296 	UIPROTO_HSHUBSTT,
2297 	0
2298 };
2299 
2300 const usb_endpoint_descriptor_t xhci_endpd = {
2301 	USB_ENDPOINT_DESCRIPTOR_SIZE,
2302 	UDESC_ENDPOINT,
2303 	UE_DIR_IN | XHCI_INTR_ENDPT,
2304 	UE_INTERRUPT,
2305 	{2, 0},                 /* max 15 ports */
2306 	255
2307 };
2308 
2309 const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = {
2310 	USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE,
2311 	UDESC_ENDPOINT_SS_COMP,
2312 	0,
2313 	0,
2314 	{0, 0}
2315 };
2316 
2317 const usb_hub_descriptor_t xhci_hubd = {
2318 	USB_HUB_DESCRIPTOR_SIZE,
2319 	UDESC_SS_HUB,
2320 	0,
2321 	{0,0},
2322 	0,
2323 	0,
2324 	{0},
2325 };
2326 
2327 void
xhci_abort_xfer(struct usbd_xfer * xfer,usbd_status status)2328 xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
2329 {
2330 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2331 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2332 	int error;
2333 
2334 	splsoftassert(IPL_SOFTUSB);
2335 
2336 	DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n",
2337 	    __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status),
2338 	    xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index));
2339 
2340 	/* XXX The stack should not call abort() in this case. */
2341 	if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) {
2342 		xfer->status = status;
2343 		timeout_del(&xfer->timeout_handle);
2344 		usb_rem_task(xfer->device, &xfer->abort_task);
2345 		usb_transfer_complete(xfer);
2346 		return;
2347 	}
2348 
2349 	/* Transfer is already done. */
2350 	if (xfer->status != USBD_IN_PROGRESS) {
2351 		DPRINTF(("%s: already done \n", __func__));
2352 		return;
2353 	}
2354 
2355 	/* Prevent any timeout to kick in. */
2356 	timeout_del(&xfer->timeout_handle);
2357 	usb_rem_task(xfer->device, &xfer->abort_task);
2358 
2359 	/* Indicate that we are aborting this transfer. */
2360 	xp->halted = status;
2361 	xp->aborted_xfer = xfer;
2362 
2363 	/* Stop the endpoint and wait until the hardware says so. */
2364 	if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci)) {
2365 		DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc)));
2366 		/* Assume the device is gone. */
2367 		xp->halted = 0;
2368 		xp->aborted_xfer = NULL;
2369 		xfer->status = status;
2370 		usb_transfer_complete(xfer);
2371 		return;
2372 	}
2373 
2374 	/*
2375 	 * The transfer was already completed when we stopped the
2376 	 * endpoint, no need to move the dequeue pointer past its
2377 	 * TRBs.
2378 	 */
2379 	if (xp->aborted_xfer == NULL) {
2380 		DPRINTF(("%s: done before stopping the endpoint\n", __func__));
2381 		xp->halted = 0;
2382 		return;
2383 	}
2384 
2385 	/*
2386 	 * At this stage the endpoint has been stopped, so update its
2387 	 * dequeue pointer past the last TRB of the transfer.
2388 	 *
2389 	 * Note: This assumes that only one transfer per endpoint has
2390 	 *	 pending TRBs on the ring.
2391 	 */
2392 	xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
2393 	    DEQPTR(xp->ring) | xp->ring.toggle);
2394 	error = tsleep_nsec(xp, PZERO, "xhciab", XHCI_CMD_TIMEOUT);
2395 	if (error)
2396 		printf("%s: timeout aborting transfer\n", DEVNAME(sc));
2397 }
2398 
2399 void
xhci_timeout(void * addr)2400 xhci_timeout(void *addr)
2401 {
2402 	struct usbd_xfer *xfer = addr;
2403 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2404 
2405 	if (sc->sc_bus.dying) {
2406 		xhci_timeout_task(addr);
2407 		return;
2408 	}
2409 
2410 	usb_init_task(&xfer->abort_task, xhci_timeout_task, addr,
2411 	    USB_TASK_TYPE_ABORT);
2412 	usb_add_task(xfer->device, &xfer->abort_task);
2413 }
2414 
2415 void
xhci_timeout_task(void * addr)2416 xhci_timeout_task(void *addr)
2417 {
2418 	struct usbd_xfer *xfer = addr;
2419 	int s;
2420 
2421 	s = splusb();
2422 	xhci_abort_xfer(xfer, USBD_TIMEOUT);
2423 	splx(s);
2424 }
2425 
2426 usbd_status
xhci_root_ctrl_transfer(struct usbd_xfer * xfer)2427 xhci_root_ctrl_transfer(struct usbd_xfer *xfer)
2428 {
2429 	usbd_status err;
2430 
2431 	err = usb_insert_transfer(xfer);
2432 	if (err)
2433 		return (err);
2434 
2435 	return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2436 }
2437 
2438 usbd_status
xhci_root_ctrl_start(struct usbd_xfer * xfer)2439 xhci_root_ctrl_start(struct usbd_xfer *xfer)
2440 {
2441 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2442 	usb_port_status_t ps;
2443 	usb_device_request_t *req;
2444 	void *buf = NULL;
2445 	usb_device_descriptor_t devd;
2446 	usb_hub_descriptor_t hubd;
2447 	usbd_status err;
2448 	int s, len, value, index;
2449 	int l, totlen = 0;
2450 	int port, i;
2451 	uint32_t v;
2452 
2453 	KASSERT(xfer->rqflags & URQ_REQUEST);
2454 
2455 	if (sc->sc_bus.dying)
2456 		return (USBD_IOERROR);
2457 
2458 	req = &xfer->request;
2459 
2460 	DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__,
2461 	    req->bmRequestType, req->bRequest));
2462 
2463 	len = UGETW(req->wLength);
2464 	value = UGETW(req->wValue);
2465 	index = UGETW(req->wIndex);
2466 
2467 	if (len != 0)
2468 		buf = KERNADDR(&xfer->dmabuf, 0);
2469 
2470 #define C(x,y) ((x) | ((y) << 8))
2471 	switch(C(req->bRequest, req->bmRequestType)) {
2472 	case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
2473 	case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
2474 	case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
2475 		/*
2476 		 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
2477 		 * for the integrated root hub.
2478 		 */
2479 		break;
2480 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
2481 		if (len > 0) {
2482 			*(uint8_t *)buf = sc->sc_conf;
2483 			totlen = 1;
2484 		}
2485 		break;
2486 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
2487 		DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value));
2488 		switch(value >> 8) {
2489 		case UDESC_DEVICE:
2490 			if ((value & 0xff) != 0) {
2491 				err = USBD_IOERROR;
2492 				goto ret;
2493 			}
2494 			devd = xhci_devd;
2495 			USETW(devd.idVendor, sc->sc_id_vendor);
2496 			totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE);
2497 			memcpy(buf, &devd, l);
2498 			break;
2499 		/*
2500 		 * We can't really operate at another speed, but the spec says
2501 		 * we need this descriptor.
2502 		 */
2503 		case UDESC_OTHER_SPEED_CONFIGURATION:
2504 		case UDESC_CONFIG:
2505 			if ((value & 0xff) != 0) {
2506 				err = USBD_IOERROR;
2507 				goto ret;
2508 			}
2509 			totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE);
2510 			memcpy(buf, &xhci_confd, l);
2511 			((usb_config_descriptor_t *)buf)->bDescriptorType =
2512 			    value >> 8;
2513 			buf = (char *)buf + l;
2514 			len -= l;
2515 			l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE);
2516 			totlen += l;
2517 			memcpy(buf, &xhci_ifcd, l);
2518 			buf = (char *)buf + l;
2519 			len -= l;
2520 			l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE);
2521 			totlen += l;
2522 			memcpy(buf, &xhci_endpd, l);
2523 			break;
2524 		case UDESC_STRING:
2525 			if (len == 0)
2526 				break;
2527 			*(u_int8_t *)buf = 0;
2528 			totlen = 1;
2529 			switch (value & 0xff) {
2530 			case 0: /* Language table */
2531 				totlen = usbd_str(buf, len, "\001");
2532 				break;
2533 			case 1: /* Vendor */
2534 				totlen = usbd_str(buf, len, sc->sc_vendor);
2535 				break;
2536 			case 2: /* Product */
2537 				totlen = usbd_str(buf, len, "xHCI root hub");
2538 				break;
2539 			}
2540 			break;
2541 		default:
2542 			err = USBD_IOERROR;
2543 			goto ret;
2544 		}
2545 		break;
2546 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
2547 		if (len > 0) {
2548 			*(uint8_t *)buf = 0;
2549 			totlen = 1;
2550 		}
2551 		break;
2552 	case C(UR_GET_STATUS, UT_READ_DEVICE):
2553 		if (len > 1) {
2554 			USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED);
2555 			totlen = 2;
2556 		}
2557 		break;
2558 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
2559 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
2560 		if (len > 1) {
2561 			USETW(((usb_status_t *)buf)->wStatus, 0);
2562 			totlen = 2;
2563 		}
2564 		break;
2565 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
2566 		if (value >= USB_MAX_DEVICES) {
2567 			err = USBD_IOERROR;
2568 			goto ret;
2569 		}
2570 		break;
2571 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
2572 		if (value != 0 && value != 1) {
2573 			err = USBD_IOERROR;
2574 			goto ret;
2575 		}
2576 		sc->sc_conf = value;
2577 		break;
2578 	case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
2579 		break;
2580 	case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
2581 	case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
2582 	case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
2583 		err = USBD_IOERROR;
2584 		goto ret;
2585 	case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
2586 		break;
2587 	case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
2588 		break;
2589 	/* Hub requests */
2590 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
2591 		break;
2592 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
2593 		DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE "
2594 		    "port=%d feature=%d\n", index, value));
2595 		if (index < 1 || index > sc->sc_noport) {
2596 			err = USBD_IOERROR;
2597 			goto ret;
2598 		}
2599 		port = XHCI_PORTSC(index);
2600 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2601 		switch (value) {
2602 		case UHF_PORT_ENABLE:
2603 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2604 			break;
2605 		case UHF_PORT_SUSPEND:
2606 			/* TODO */
2607 			break;
2608 		case UHF_PORT_POWER:
2609 			XOWRITE4(sc, port, v & ~XHCI_PS_PP);
2610 			break;
2611 		case UHF_PORT_INDICATOR:
2612 			XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3));
2613 			break;
2614 		case UHF_C_PORT_CONNECTION:
2615 			XOWRITE4(sc, port, v | XHCI_PS_CSC);
2616 			break;
2617 		case UHF_C_PORT_ENABLE:
2618 			XOWRITE4(sc, port, v | XHCI_PS_PEC);
2619 			break;
2620 		case UHF_C_PORT_SUSPEND:
2621 		case UHF_C_PORT_LINK_STATE:
2622 			XOWRITE4(sc, port, v | XHCI_PS_PLC);
2623 			break;
2624 		case UHF_C_PORT_OVER_CURRENT:
2625 			XOWRITE4(sc, port, v | XHCI_PS_OCC);
2626 			break;
2627 		case UHF_C_PORT_RESET:
2628 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2629 			break;
2630 		case UHF_C_BH_PORT_RESET:
2631 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2632 			break;
2633 		default:
2634 			err = USBD_IOERROR;
2635 			goto ret;
2636 		}
2637 		break;
2638 
2639 	case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
2640 		if (len == 0)
2641 			break;
2642 		if ((value & 0xff) != 0) {
2643 			err = USBD_IOERROR;
2644 			goto ret;
2645 		}
2646 		v = XREAD4(sc, XHCI_HCCPARAMS);
2647 		hubd = xhci_hubd;
2648 		hubd.bNbrPorts = sc->sc_noport;
2649 		USETW(hubd.wHubCharacteristics,
2650 		    (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) |
2651 		    (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0));
2652 		hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */
2653 		for (i = 1; i <= sc->sc_noport; i++) {
2654 			v = XOREAD4(sc, XHCI_PORTSC(i));
2655 			if (v & XHCI_PS_DR)
2656 				hubd.DeviceRemovable[i / 8] |= 1U << (i % 8);
2657 		}
2658 		hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
2659 		l = min(len, hubd.bDescLength);
2660 		totlen = l;
2661 		memcpy(buf, &hubd, l);
2662 		break;
2663 	case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
2664 		if (len != 16) {
2665 			err = USBD_IOERROR;
2666 			goto ret;
2667 		}
2668 		memset(buf, 0, len);
2669 		totlen = len;
2670 		break;
2671 	case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
2672 		DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n",
2673 		    index));
2674 		if (index < 1 || index > sc->sc_noport) {
2675 			err = USBD_IOERROR;
2676 			goto ret;
2677 		}
2678 		if (len != 4) {
2679 			err = USBD_IOERROR;
2680 			goto ret;
2681 		}
2682 		v = XOREAD4(sc, XHCI_PORTSC(index));
2683 		DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v));
2684 		i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v));
2685 		switch (XHCI_PS_SPEED(v)) {
2686 		case XHCI_SPEED_FULL:
2687 			i |= UPS_FULL_SPEED;
2688 			break;
2689 		case XHCI_SPEED_LOW:
2690 			i |= UPS_LOW_SPEED;
2691 			break;
2692 		case XHCI_SPEED_HIGH:
2693 			i |= UPS_HIGH_SPEED;
2694 			break;
2695 		case XHCI_SPEED_SUPER:
2696 		default:
2697 			break;
2698 		}
2699 		if (v & XHCI_PS_CCS)	i |= UPS_CURRENT_CONNECT_STATUS;
2700 		if (v & XHCI_PS_PED)	i |= UPS_PORT_ENABLED;
2701 		if (v & XHCI_PS_OCA)	i |= UPS_OVERCURRENT_INDICATOR;
2702 		if (v & XHCI_PS_PR)	i |= UPS_RESET;
2703 		if (v & XHCI_PS_PP)	{
2704 			if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL &&
2705 			    XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH)
2706 				i |= UPS_PORT_POWER;
2707 			else
2708 				i |= UPS_PORT_POWER_SS;
2709 		}
2710 		USETW(ps.wPortStatus, i);
2711 		i = 0;
2712 		if (v & XHCI_PS_CSC)    i |= UPS_C_CONNECT_STATUS;
2713 		if (v & XHCI_PS_PEC)    i |= UPS_C_PORT_ENABLED;
2714 		if (v & XHCI_PS_OCC)    i |= UPS_C_OVERCURRENT_INDICATOR;
2715 		if (v & XHCI_PS_PRC)	i |= UPS_C_PORT_RESET;
2716 		if (v & XHCI_PS_WRC)	i |= UPS_C_BH_PORT_RESET;
2717 		if (v & XHCI_PS_PLC)	i |= UPS_C_PORT_LINK_STATE;
2718 		if (v & XHCI_PS_CEC)	i |= UPS_C_PORT_CONFIG_ERROR;
2719 		USETW(ps.wPortChange, i);
2720 		l = min(len, sizeof ps);
2721 		memcpy(buf, &ps, l);
2722 		totlen = l;
2723 		break;
2724 	case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
2725 		err = USBD_IOERROR;
2726 		goto ret;
2727 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
2728 		break;
2729 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
2730 
2731 		i = index >> 8;
2732 		index &= 0x00ff;
2733 
2734 		if (index < 1 || index > sc->sc_noport) {
2735 			err = USBD_IOERROR;
2736 			goto ret;
2737 		}
2738 		port = XHCI_PORTSC(index);
2739 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2740 
2741 		switch (value) {
2742 		case UHF_PORT_ENABLE:
2743 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2744 			break;
2745 		case UHF_PORT_SUSPEND:
2746 			DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i));
2747 			if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) {
2748 				err = USBD_IOERROR;
2749 				goto ret;
2750 			}
2751 			XOWRITE4(sc, port, v |
2752 			    XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS);
2753 			break;
2754 		case UHF_PORT_RESET:
2755 			DPRINTFN(6, ("reset port %d\n", index));
2756 			XOWRITE4(sc, port, v | XHCI_PS_PR);
2757 			break;
2758 		case UHF_PORT_POWER:
2759 			DPRINTFN(3, ("set port power %d\n", index));
2760 			XOWRITE4(sc, port, v | XHCI_PS_PP);
2761 			break;
2762 		case UHF_PORT_INDICATOR:
2763 			DPRINTFN(3, ("set port indicator %d\n", index));
2764 
2765 			v &= ~XHCI_PS_SET_PIC(3);
2766 			v |= XHCI_PS_SET_PIC(1);
2767 
2768 			XOWRITE4(sc, port, v);
2769 			break;
2770 		case UHF_C_PORT_RESET:
2771 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2772 			break;
2773 		case UHF_C_BH_PORT_RESET:
2774 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2775 			break;
2776 		default:
2777 			err = USBD_IOERROR;
2778 			goto ret;
2779 		}
2780 		break;
2781 	case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
2782 	case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
2783 	case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
2784 	case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
2785 		break;
2786 	default:
2787 		err = USBD_IOERROR;
2788 		goto ret;
2789 	}
2790 	xfer->actlen = totlen;
2791 	err = USBD_NORMAL_COMPLETION;
2792 ret:
2793 	xfer->status = err;
2794 	s = splusb();
2795 	usb_transfer_complete(xfer);
2796 	splx(s);
2797 	return (err);
2798 }
2799 
2800 
2801 void
xhci_noop(struct usbd_xfer * xfer)2802 xhci_noop(struct usbd_xfer *xfer)
2803 {
2804 }
2805 
2806 
2807 usbd_status
xhci_root_intr_transfer(struct usbd_xfer * xfer)2808 xhci_root_intr_transfer(struct usbd_xfer *xfer)
2809 {
2810 	usbd_status err;
2811 
2812 	err = usb_insert_transfer(xfer);
2813 	if (err)
2814 		return (err);
2815 
2816 	return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2817 }
2818 
2819 usbd_status
xhci_root_intr_start(struct usbd_xfer * xfer)2820 xhci_root_intr_start(struct usbd_xfer *xfer)
2821 {
2822 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2823 
2824 	if (sc->sc_bus.dying)
2825 		return (USBD_IOERROR);
2826 
2827 	sc->sc_intrxfer = xfer;
2828 
2829 	return (USBD_IN_PROGRESS);
2830 }
2831 
2832 void
xhci_root_intr_abort(struct usbd_xfer * xfer)2833 xhci_root_intr_abort(struct usbd_xfer *xfer)
2834 {
2835 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2836 	int s;
2837 
2838 	sc->sc_intrxfer = NULL;
2839 
2840 	xfer->status = USBD_CANCELLED;
2841 	s = splusb();
2842 	usb_transfer_complete(xfer);
2843 	splx(s);
2844 }
2845 
2846 void
xhci_root_intr_done(struct usbd_xfer * xfer)2847 xhci_root_intr_done(struct usbd_xfer *xfer)
2848 {
2849 }
2850 
2851 /*
2852  * Number of packets remaining in the TD after the corresponding TRB.
2853  *
2854  * Section 4.11.2.4 of xHCI specification r1.1.
2855  */
2856 static inline uint32_t
xhci_xfer_tdsize(struct usbd_xfer * xfer,uint32_t remain,uint32_t len)2857 xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len)
2858 {
2859 	uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2860 
2861 	if (len == 0)
2862 		return XHCI_TRB_TDREM(0);
2863 
2864 	npkt = howmany(remain - len, UE_GET_SIZE(mps));
2865 	if (npkt > 31)
2866 		npkt = 31;
2867 
2868 	return XHCI_TRB_TDREM(npkt);
2869 }
2870 
2871 /*
2872  * Transfer Burst Count (TBC) and Transfer Last Burst Packet Count (TLBPC).
2873  *
2874  * Section 4.11.2.3  of xHCI specification r1.1.
2875  */
2876 static inline uint32_t
xhci_xfer_tbc(struct usbd_xfer * xfer,uint32_t len,uint32_t * tlbpc)2877 xhci_xfer_tbc(struct usbd_xfer *xfer, uint32_t len, uint32_t *tlbpc)
2878 {
2879 	uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2880 	uint32_t maxb, tdpc, residue, tbc;
2881 
2882 	/* Transfer Descriptor Packet Count, section 4.14.1. */
2883 	tdpc = howmany(len, UE_GET_SIZE(mps));
2884 	if (tdpc == 0)
2885 		tdpc = 1;
2886 
2887 	/* Transfer Burst Count */
2888 	maxb = xhci_pipe_maxburst(xfer->pipe);
2889 	tbc = howmany(tdpc, maxb + 1) - 1;
2890 
2891 	/* Transfer Last Burst Packet Count */
2892 	if (xfer->device->speed == USB_SPEED_SUPER) {
2893 		residue = tdpc % (maxb + 1);
2894 		if (residue == 0)
2895 			*tlbpc = maxb;
2896 		else
2897 			*tlbpc = residue - 1;
2898 	} else {
2899 		*tlbpc = tdpc - 1;
2900 	}
2901 
2902 	return (tbc);
2903 }
2904 
2905 usbd_status
xhci_device_ctrl_transfer(struct usbd_xfer * xfer)2906 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2907 {
2908 	usbd_status err;
2909 
2910 	err = usb_insert_transfer(xfer);
2911 	if (err)
2912 		return (err);
2913 
2914 	return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2915 }
2916 
2917 usbd_status
xhci_device_ctrl_start(struct usbd_xfer * xfer)2918 xhci_device_ctrl_start(struct usbd_xfer *xfer)
2919 {
2920 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2921 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2922 	struct xhci_trb *trb0, *trb;
2923 	uint32_t flags, len = UGETW(xfer->request.wLength);
2924 	uint8_t toggle;
2925 	int s;
2926 
2927 	KASSERT(xfer->rqflags & URQ_REQUEST);
2928 
2929 	if (sc->sc_bus.dying || xp->halted)
2930 		return (USBD_IOERROR);
2931 
2932 	if (xp->free_trbs < 3)
2933 		return (USBD_NOMEM);
2934 
2935 	if (len != 0)
2936 		usb_syncmem(&xfer->dmabuf, 0, len,
2937 		    usbd_xfer_isread(xfer) ?
2938 		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2939 
2940 	/* We'll toggle the setup TRB once we're finished with the stages. */
2941 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2942 
2943 	flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | (toggle ^ 1);
2944 	if (len != 0) {
2945 		if (usbd_xfer_isread(xfer))
2946 			flags |= XHCI_TRB_TRT_IN;
2947 		else
2948 			flags |= XHCI_TRB_TRT_OUT;
2949 	}
2950 
2951 	memcpy(&trb0->trb_paddr, &xfer->request, sizeof(trb0->trb_paddr));
2952 	trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8));
2953 	trb0->trb_flags = htole32(flags);
2954 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2955 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2956 	    BUS_DMASYNC_PREWRITE);
2957 
2958 	/* Data TRB */
2959 	if (len != 0) {
2960 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2961 
2962 		flags = XHCI_TRB_TYPE_DATA | toggle;
2963 		if (usbd_xfer_isread(xfer))
2964 			flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP;
2965 
2966 		trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2967 		trb->trb_status = htole32(
2968 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2969 		    xhci_xfer_tdsize(xfer, len, len)
2970 		);
2971 		trb->trb_flags = htole32(flags);
2972 
2973 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2974 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2975 		    BUS_DMASYNC_PREWRITE);
2976 	}
2977 
2978 	/* Status TRB */
2979 	trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1);
2980 
2981 	flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle;
2982 	if (len == 0 || !usbd_xfer_isread(xfer))
2983 		flags |= XHCI_TRB_DIR_IN;
2984 
2985 	trb->trb_paddr = 0;
2986 	trb->trb_status = htole32(XHCI_TRB_INTR(0));
2987 	trb->trb_flags = htole32(flags);
2988 
2989 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2990 	    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2991 	    BUS_DMASYNC_PREWRITE);
2992 
2993 	/* Setup TRB */
2994 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
2995 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2996 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2997 	    BUS_DMASYNC_PREWRITE);
2998 
2999 	s = splusb();
3000 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
3001 
3002 	xfer->status = USBD_IN_PROGRESS;
3003 	if (xfer->timeout && !sc->sc_bus.use_polling) {
3004 		timeout_del(&xfer->timeout_handle);
3005 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
3006 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
3007 	}
3008 	splx(s);
3009 
3010 	return (USBD_IN_PROGRESS);
3011 }
3012 
3013 void
xhci_device_ctrl_abort(struct usbd_xfer * xfer)3014 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
3015 {
3016 	xhci_abort_xfer(xfer, USBD_CANCELLED);
3017 }
3018 
3019 usbd_status
xhci_device_generic_transfer(struct usbd_xfer * xfer)3020 xhci_device_generic_transfer(struct usbd_xfer *xfer)
3021 {
3022 	usbd_status err;
3023 
3024 	err = usb_insert_transfer(xfer);
3025 	if (err)
3026 		return (err);
3027 
3028 	return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
3029 }
3030 
3031 usbd_status
xhci_device_generic_start(struct usbd_xfer * xfer)3032 xhci_device_generic_start(struct usbd_xfer *xfer)
3033 {
3034 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
3035 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
3036 	struct xhci_trb *trb0, *trb;
3037 	uint32_t len, remain, flags;
3038 	uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
3039 	uint64_t paddr = DMAADDR(&xfer->dmabuf, 0);
3040 	uint8_t toggle;
3041 	int s, i, ntrb, zerotd = 0;
3042 
3043 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
3044 
3045 	if (sc->sc_bus.dying || xp->halted)
3046 		return (USBD_IOERROR);
3047 
3048 	/* How many TRBs do we need for this transfer? */
3049 	ntrb = howmany(xfer->length, XHCI_TRB_MAXSIZE);
3050 
3051 	/* If the buffer crosses a 64k boundary, we need one more. */
3052 	len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
3053 	if (len < xfer->length)
3054 		ntrb = howmany(xfer->length - len, XHCI_TRB_MAXSIZE) + 1;
3055 	else
3056 		len = xfer->length;
3057 
3058 	/* If we need to append a zero length packet, we need one more. */
3059 	if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) &&
3060 	    (xfer->length % UE_GET_SIZE(mps) == 0))
3061 		zerotd = 1;
3062 
3063 	if (xp->free_trbs < (ntrb + zerotd))
3064 		return (USBD_NOMEM);
3065 
3066 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
3067 	    usbd_xfer_isread(xfer) ?
3068 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
3069 
3070 	/* We'll toggle the first TRB once we're finished with the chain. */
3071 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1));
3072 	flags = XHCI_TRB_TYPE_NORMAL | (toggle ^ 1);
3073 	if (usbd_xfer_isread(xfer))
3074 		flags |= XHCI_TRB_ISP;
3075 	flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3076 
3077 	trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
3078 	trb0->trb_status = htole32(
3079 	    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3080 	    xhci_xfer_tdsize(xfer, xfer->length, len)
3081 	);
3082 	trb0->trb_flags = htole32(flags);
3083 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3084 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3085 	    BUS_DMASYNC_PREWRITE);
3086 
3087 	remain = xfer->length - len;
3088 	paddr += len;
3089 
3090 	/* Chain more TRBs if needed. */
3091 	for (i = ntrb - 1; i > 0; i--) {
3092 		len = min(remain, XHCI_TRB_MAXSIZE);
3093 
3094 		/* Next (or Last) TRB. */
3095 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1));
3096 		flags = XHCI_TRB_TYPE_NORMAL | toggle;
3097 		if (usbd_xfer_isread(xfer))
3098 			flags |= XHCI_TRB_ISP;
3099 		flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3100 
3101 		trb->trb_paddr = htole64(paddr);
3102 		trb->trb_status = htole32(
3103 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3104 		    xhci_xfer_tdsize(xfer, remain, len)
3105 		);
3106 		trb->trb_flags = htole32(flags);
3107 
3108 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3109 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3110 		    BUS_DMASYNC_PREWRITE);
3111 
3112 		remain -= len;
3113 		paddr += len;
3114 	}
3115 
3116 	/* Do we need to issue a zero length transfer? */
3117 	if (zerotd == 1) {
3118 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, -1);
3119 		trb->trb_paddr = 0;
3120 		trb->trb_status = 0;
3121 		trb->trb_flags = htole32(XHCI_TRB_TYPE_NORMAL | XHCI_TRB_IOC | toggle);
3122 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3123 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3124 		    BUS_DMASYNC_PREWRITE);
3125 	}
3126 
3127 	/* First TRB. */
3128 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
3129 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3130 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3131 	    BUS_DMASYNC_PREWRITE);
3132 
3133 	s = splusb();
3134 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
3135 
3136 	xfer->status = USBD_IN_PROGRESS;
3137 	if (xfer->timeout && !sc->sc_bus.use_polling) {
3138 		timeout_del(&xfer->timeout_handle);
3139 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
3140 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
3141 	}
3142 	splx(s);
3143 
3144 	return (USBD_IN_PROGRESS);
3145 }
3146 
3147 void
xhci_device_generic_done(struct usbd_xfer * xfer)3148 xhci_device_generic_done(struct usbd_xfer *xfer)
3149 {
3150 	/* Only happens with interrupt transfers. */
3151 	if (xfer->pipe->repeat) {
3152 		xfer->actlen = 0;
3153 		xhci_device_generic_start(xfer);
3154 	}
3155 }
3156 
3157 void
xhci_device_generic_abort(struct usbd_xfer * xfer)3158 xhci_device_generic_abort(struct usbd_xfer *xfer)
3159 {
3160 	KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer);
3161 
3162 	xhci_abort_xfer(xfer, USBD_CANCELLED);
3163 }
3164 
3165 usbd_status
xhci_device_isoc_transfer(struct usbd_xfer * xfer)3166 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
3167 {
3168 	usbd_status err;
3169 
3170 	err = usb_insert_transfer(xfer);
3171 	if (err && err != USBD_IN_PROGRESS)
3172 		return (err);
3173 
3174 	return (xhci_device_isoc_start(xfer));
3175 }
3176 
3177 usbd_status
xhci_device_isoc_start(struct usbd_xfer * xfer)3178 xhci_device_isoc_start(struct usbd_xfer *xfer)
3179 {
3180 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
3181 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
3182 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
3183 	struct xhci_trb *trb0, *trb;
3184 	uint32_t len, remain, flags;
3185 	uint64_t paddr;
3186 	uint32_t tbc, tlbpc;
3187 	int s, i, j, ntrb = xfer->nframes;
3188 	uint8_t toggle;
3189 
3190 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
3191 
3192 	/*
3193 	 * To allow continuous transfers, above we start all transfers
3194 	 * immediately. However, we're still going to get usbd_start_next call
3195 	 * this when another xfer completes. So, check if this is already
3196 	 * in progress or not
3197 	 */
3198 	if (xx->ntrb > 0)
3199 		return (USBD_IN_PROGRESS);
3200 
3201 	if (sc->sc_bus.dying || xp->halted)
3202 		return (USBD_IOERROR);
3203 
3204 	/* Why would you do that anyway? */
3205 	if (sc->sc_bus.use_polling)
3206 		return (USBD_INVAL);
3207 
3208 	paddr = DMAADDR(&xfer->dmabuf, 0);
3209 
3210 	/* How many TRBs do for all Transfers? */
3211 	for (i = 0, ntrb = 0; i < xfer->nframes; i++) {
3212 		/* How many TRBs do we need for this transfer? */
3213 		ntrb += howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE);
3214 
3215 		/* If the buffer crosses a 64k boundary, we need one more. */
3216 		len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
3217 		if (len < xfer->frlengths[i])
3218 			ntrb++;
3219 
3220 		paddr += xfer->frlengths[i];
3221 	}
3222 
3223 	if (xp->free_trbs < ntrb)
3224 		return (USBD_NOMEM);
3225 
3226 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
3227 	    usbd_xfer_isread(xfer) ?
3228 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
3229 
3230 	paddr = DMAADDR(&xfer->dmabuf, 0);
3231 
3232 	for (i = 0, trb0 = NULL; i < xfer->nframes; i++) {
3233 		/* How many TRBs do we need for this transfer? */
3234 		ntrb = howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE);
3235 
3236 		/* If the buffer crosses a 64k boundary, we need one more. */
3237 		len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
3238 		if (len < xfer->frlengths[i])
3239 			ntrb++;
3240 		else
3241 			len = xfer->frlengths[i];
3242 
3243 		KASSERT(ntrb < 3);
3244 
3245 		/*
3246 		 * We'll commit the first TRB once we're finished with the
3247 		 * chain.
3248 		 */
3249 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1));
3250 
3251 		DPRINTFN(4, ("%s:%d: ring %p trb0_idx %lu ntrb %d paddr %llx "
3252 		    "len %u\n", __func__, __LINE__,
3253 		    &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, paddr,
3254 		    len));
3255 
3256 		/* Record the first TRB so we can toggle later. */
3257 		if (trb0 == NULL) {
3258 			trb0 = trb;
3259 			toggle ^= 1;
3260 		}
3261 
3262 		flags = XHCI_TRB_TYPE_ISOCH | XHCI_TRB_SIA | toggle;
3263 		if (usbd_xfer_isread(xfer))
3264 			flags |= XHCI_TRB_ISP;
3265 		flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3266 
3267 		tbc = xhci_xfer_tbc(xfer, xfer->frlengths[i], &tlbpc);
3268 		flags |= XHCI_TRB_ISOC_TBC(tbc) | XHCI_TRB_ISOC_TLBPC(tlbpc);
3269 
3270 		trb->trb_paddr = htole64(paddr);
3271 		trb->trb_status = htole32(
3272 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3273 		    xhci_xfer_tdsize(xfer, xfer->frlengths[i], len)
3274 		);
3275 		trb->trb_flags = htole32(flags);
3276 
3277 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3278 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3279 		    BUS_DMASYNC_PREWRITE);
3280 
3281 		remain = xfer->frlengths[i] - len;
3282 		paddr += len;
3283 
3284 		/* Chain more TRBs if needed. */
3285 		for (j = ntrb - 1; j > 0; j--) {
3286 			len = min(remain, XHCI_TRB_MAXSIZE);
3287 
3288 			/* Next (or Last) TRB. */
3289 			trb = xhci_xfer_get_trb(sc, xfer, &toggle, (j == 1));
3290 			flags = XHCI_TRB_TYPE_NORMAL | toggle;
3291 			if (usbd_xfer_isread(xfer))
3292 				flags |= XHCI_TRB_ISP;
3293 			flags |= (j == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3294 			DPRINTFN(3, ("%s:%d: ring %p trb0_idx %lu ntrb %d "
3295 			    "paddr %llx len %u\n", __func__, __LINE__,
3296 			    &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb,
3297 			    paddr, len));
3298 
3299 			trb->trb_paddr = htole64(paddr);
3300 			trb->trb_status = htole32(
3301 			    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3302 			    xhci_xfer_tdsize(xfer, remain, len)
3303 			);
3304 			trb->trb_flags = htole32(flags);
3305 
3306 			bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3307 			    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3308 			    BUS_DMASYNC_PREWRITE);
3309 
3310 			remain -= len;
3311 			paddr += len;
3312 		}
3313 
3314 		xfer->frlengths[i] = 0;
3315 	}
3316 
3317 	/* First TRB. */
3318 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
3319 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3320 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3321 	    BUS_DMASYNC_PREWRITE);
3322 
3323 	s = splusb();
3324 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
3325 
3326 	xfer->status = USBD_IN_PROGRESS;
3327 
3328 	if (xfer->timeout) {
3329 		timeout_del(&xfer->timeout_handle);
3330 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
3331 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
3332 	}
3333 	splx(s);
3334 
3335 	return (USBD_IN_PROGRESS);
3336 }
3337