1 /* $NetBSD: xhci.c,v 1.180 2023/07/20 11:59:04 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * USB rev 2.0 and rev 3.1 specification
31 * http://www.usb.org/developers/docs/
32 * xHCI rev 1.1 specification
33 * http://www.intel.com/technology/usb/spec.htm
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.180 2023/07/20 11:59:04 riastradh Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_usb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/kmem.h>
47 #include <sys/device.h>
48 #include <sys/select.h>
49 #include <sys/proc.h>
50 #include <sys/queue.h>
51 #include <sys/mutex.h>
52 #include <sys/condvar.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/endian.h>
58
59 #include <dev/usb/usb.h>
60 #include <dev/usb/usbdi.h>
61 #include <dev/usb/usbdivar.h>
62 #include <dev/usb/usbdi_util.h>
63 #include <dev/usb/usbhist.h>
64 #include <dev/usb/usb_mem.h>
65 #include <dev/usb/usb_quirks.h>
66
67 #include <dev/usb/xhcireg.h>
68 #include <dev/usb/xhcivar.h>
69 #include <dev/usb/usbroothub.h>
70
71
72 #ifdef USB_DEBUG
73 #ifndef XHCI_DEBUG
74 #define xhcidebug 0
75 #else /* !XHCI_DEBUG */
76 #define HEXDUMP(a, b, c) \
77 do { \
78 if (xhcidebug > 0) \
79 hexdump(printf, a, b, c); \
80 } while (/*CONSTCOND*/0)
81 static int xhcidebug = 0;
82
83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup")
84 {
85 int err;
86 const struct sysctlnode *rnode;
87 const struct sysctlnode *cnode;
88
89 err = sysctl_createv(clog, 0, NULL, &rnode,
90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci",
91 SYSCTL_DESCR("xhci global controls"),
92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
93
94 if (err)
95 goto fail;
96
97 /* control debugging printfs */
98 err = sysctl_createv(clog, 0, &rnode, &cnode,
99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
100 "debug", SYSCTL_DESCR("Enable debugging output"),
101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL);
102 if (err)
103 goto fail;
104
105 return;
106 fail:
107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
108 }
109
110 #endif /* !XHCI_DEBUG */
111 #endif /* USB_DEBUG */
112
113 #ifndef HEXDUMP
114 #define HEXDUMP(a, b, c)
115 #endif
116
117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D)
118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D)
119 #define XHCIHIST_FUNC() USBHIST_FUNC()
120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug)
121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \
122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D)
123
124 #define XHCI_DCI_SLOT 0
125 #define XHCI_DCI_EP_CONTROL 1
126
127 #define XHCI_ICI_INPUT_CONTROL 0
128
129 struct xhci_pipe {
130 struct usbd_pipe xp_pipe;
131 struct usb_task xp_async_task;
132 int16_t xp_isoc_next; /* next frame */
133 uint8_t xp_maxb; /* max burst */
134 uint8_t xp_mult;
135 };
136
137 #define XHCI_COMMAND_RING_TRBS 256
138 #define XHCI_EVENT_RING_TRBS 256
139 #define XHCI_EVENT_RING_SEGMENTS 1
140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT
141
142 static usbd_status xhci_open(struct usbd_pipe *);
143 static void xhci_close_pipe(struct usbd_pipe *);
144 static int xhci_intr1(struct xhci_softc * const);
145 static void xhci_softintr(void *);
146 static void xhci_poll(struct usbd_bus *);
147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int);
148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *);
149 static void xhci_abortx(struct usbd_xfer *);
150 static bool xhci_dying(struct usbd_bus *);
151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **);
152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int,
153 struct usbd_port *);
154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
155 void *, int);
156
157 static void xhci_pipe_restart(struct usbd_pipe *);
158 static void xhci_pipe_restart_async_task(void *);
159 static void xhci_pipe_restart_async(struct usbd_pipe *);
160
161 static usbd_status xhci_configure_endpoint(struct usbd_pipe *);
162 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *);
163 static void xhci_reset_endpoint(struct usbd_pipe *);
164 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *,
165 struct xhci_slot *, u_int, uint32_t);
166 static usbd_status xhci_stop_endpoint(struct usbd_pipe *);
167
168 static void xhci_host_dequeue(struct xhci_ring * const);
169 static void xhci_set_dequeue(struct usbd_pipe *);
170
171 static usbd_status xhci_do_command(struct xhci_softc * const,
172 struct xhci_soft_trb * const, int);
173 static usbd_status xhci_do_command_locked(struct xhci_softc * const,
174 struct xhci_soft_trb * const, int);
175 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t);
176 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *);
177 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool);
178 static usbd_status xhci_enable_slot(struct xhci_softc * const,
179 uint8_t * const);
180 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t);
181 static usbd_status xhci_address_device(struct xhci_softc * const,
182 uint64_t, uint8_t, bool);
183 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int);
184 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const,
185 struct xhci_slot * const, u_int);
186 static usbd_status xhci_ring_init(struct xhci_softc * const,
187 struct xhci_ring **, size_t, size_t);
188 static void xhci_ring_free(struct xhci_softc * const,
189 struct xhci_ring ** const);
190
191 static void xhci_setup_ctx(struct usbd_pipe *);
192 static void xhci_setup_route(struct usbd_pipe *, uint32_t *);
193 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *);
194 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *);
195 static uint32_t xhci_bival2ival(uint32_t, uint32_t, uint32_t);
196
197 static void xhci_noop(struct usbd_pipe *);
198
199 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *);
200 static usbd_status xhci_root_intr_start(struct usbd_xfer *);
201 static void xhci_root_intr_abort(struct usbd_xfer *);
202 static void xhci_root_intr_close(struct usbd_pipe *);
203 static void xhci_root_intr_done(struct usbd_xfer *);
204
205 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *);
206 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *);
207 static void xhci_device_ctrl_abort(struct usbd_xfer *);
208 static void xhci_device_ctrl_close(struct usbd_pipe *);
209 static void xhci_device_ctrl_done(struct usbd_xfer *);
210
211 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *);
212 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *);
213 static void xhci_device_isoc_abort(struct usbd_xfer *);
214 static void xhci_device_isoc_close(struct usbd_pipe *);
215 static void xhci_device_isoc_done(struct usbd_xfer *);
216
217 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *);
218 static usbd_status xhci_device_intr_start(struct usbd_xfer *);
219 static void xhci_device_intr_abort(struct usbd_xfer *);
220 static void xhci_device_intr_close(struct usbd_pipe *);
221 static void xhci_device_intr_done(struct usbd_xfer *);
222
223 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *);
224 static usbd_status xhci_device_bulk_start(struct usbd_xfer *);
225 static void xhci_device_bulk_abort(struct usbd_xfer *);
226 static void xhci_device_bulk_close(struct usbd_pipe *);
227 static void xhci_device_bulk_done(struct usbd_xfer *);
228
229 static const struct usbd_bus_methods xhci_bus_methods = {
230 .ubm_open = xhci_open,
231 .ubm_softint = xhci_softintr,
232 .ubm_dopoll = xhci_poll,
233 .ubm_allocx = xhci_allocx,
234 .ubm_freex = xhci_freex,
235 .ubm_abortx = xhci_abortx,
236 .ubm_dying = xhci_dying,
237 .ubm_getlock = xhci_get_lock,
238 .ubm_newdev = xhci_new_device,
239 .ubm_rhctrl = xhci_roothub_ctrl,
240 };
241
242 static const struct usbd_pipe_methods xhci_root_intr_methods = {
243 .upm_transfer = xhci_root_intr_transfer,
244 .upm_start = xhci_root_intr_start,
245 .upm_abort = xhci_root_intr_abort,
246 .upm_close = xhci_root_intr_close,
247 .upm_cleartoggle = xhci_noop,
248 .upm_done = xhci_root_intr_done,
249 };
250
251
252 static const struct usbd_pipe_methods xhci_device_ctrl_methods = {
253 .upm_transfer = xhci_device_ctrl_transfer,
254 .upm_start = xhci_device_ctrl_start,
255 .upm_abort = xhci_device_ctrl_abort,
256 .upm_close = xhci_device_ctrl_close,
257 .upm_cleartoggle = xhci_noop,
258 .upm_done = xhci_device_ctrl_done,
259 };
260
261 static const struct usbd_pipe_methods xhci_device_isoc_methods = {
262 .upm_transfer = xhci_device_isoc_transfer,
263 .upm_abort = xhci_device_isoc_abort,
264 .upm_close = xhci_device_isoc_close,
265 .upm_cleartoggle = xhci_noop,
266 .upm_done = xhci_device_isoc_done,
267 };
268
269 static const struct usbd_pipe_methods xhci_device_bulk_methods = {
270 .upm_transfer = xhci_device_bulk_transfer,
271 .upm_start = xhci_device_bulk_start,
272 .upm_abort = xhci_device_bulk_abort,
273 .upm_close = xhci_device_bulk_close,
274 .upm_cleartoggle = xhci_noop,
275 .upm_done = xhci_device_bulk_done,
276 };
277
278 static const struct usbd_pipe_methods xhci_device_intr_methods = {
279 .upm_transfer = xhci_device_intr_transfer,
280 .upm_start = xhci_device_intr_start,
281 .upm_abort = xhci_device_intr_abort,
282 .upm_close = xhci_device_intr_close,
283 .upm_cleartoggle = xhci_noop,
284 .upm_done = xhci_device_intr_done,
285 };
286
287 static inline uint32_t
xhci_read_1(const struct xhci_softc * const sc,bus_size_t offset)288 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset)
289 {
290 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset);
291 }
292
293 static inline uint32_t
xhci_read_2(const struct xhci_softc * const sc,bus_size_t offset)294 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset)
295 {
296 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
297 }
298
299 static inline uint32_t
xhci_read_4(const struct xhci_softc * const sc,bus_size_t offset)300 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset)
301 {
302 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset);
303 }
304
305 static inline void
xhci_write_1(const struct xhci_softc * const sc,bus_size_t offset,uint32_t value)306 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset,
307 uint32_t value)
308 {
309 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value);
310 }
311
312 #if 0 /* unused */
313 static inline void
314 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset,
315 uint32_t value)
316 {
317 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value);
318 }
319 #endif /* unused */
320
321 static inline uint32_t
xhci_cap_read_4(const struct xhci_softc * const sc,bus_size_t offset)322 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset)
323 {
324 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset);
325 }
326
327 static inline uint32_t
xhci_op_read_4(const struct xhci_softc * const sc,bus_size_t offset)328 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset)
329 {
330 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
331 }
332
333 static inline void
xhci_op_write_4(const struct xhci_softc * const sc,bus_size_t offset,uint32_t value)334 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset,
335 uint32_t value)
336 {
337 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value);
338 }
339
340 static inline uint64_t
xhci_op_read_8(const struct xhci_softc * const sc,bus_size_t offset)341 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset)
342 {
343 uint64_t value;
344
345 #ifdef XHCI_USE_BUS_SPACE_8
346 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset);
347 #else
348 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset);
349 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh,
350 offset + 4) << 32;
351 #endif
352
353 return value;
354 }
355
356 static inline void
xhci_op_write_8(const struct xhci_softc * const sc,bus_size_t offset,uint64_t value)357 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset,
358 uint64_t value)
359 {
360 #ifdef XHCI_USE_BUS_SPACE_8
361 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value);
362 #else
363 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0,
364 (value >> 0) & 0xffffffff);
365 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4,
366 (value >> 32) & 0xffffffff);
367 #endif
368 }
369
370 static inline uint32_t
xhci_rt_read_4(const struct xhci_softc * const sc,bus_size_t offset)371 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset)
372 {
373 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
374 }
375
376 static inline void
xhci_rt_write_4(const struct xhci_softc * const sc,bus_size_t offset,uint32_t value)377 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset,
378 uint32_t value)
379 {
380 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value);
381 }
382
383 static inline uint64_t
xhci_rt_read_8(const struct xhci_softc * const sc,bus_size_t offset)384 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset)
385 {
386 uint64_t value;
387
388 #ifdef XHCI_USE_BUS_SPACE_8
389 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset);
390 #else
391 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset);
392 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh,
393 offset + 4) << 32;
394 #endif
395
396 return value;
397 }
398
399 static inline void
xhci_rt_write_8(const struct xhci_softc * const sc,bus_size_t offset,uint64_t value)400 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset,
401 uint64_t value)
402 {
403 #ifdef XHCI_USE_BUS_SPACE_8
404 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value);
405 #else
406 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0,
407 (value >> 0) & 0xffffffff);
408 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4,
409 (value >> 32) & 0xffffffff);
410 #endif
411 }
412
413 #if 0 /* unused */
414 static inline uint32_t
415 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset)
416 {
417 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset);
418 }
419 #endif /* unused */
420
421 static inline void
xhci_db_write_4(const struct xhci_softc * const sc,bus_size_t offset,uint32_t value)422 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset,
423 uint32_t value)
424 {
425 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value);
426 }
427
428 /* --- */
429
430 static inline uint8_t
xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)431 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed)
432 {
433 u_int eptype = 0;
434
435 switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
436 case UE_CONTROL:
437 eptype = 0x0;
438 break;
439 case UE_ISOCHRONOUS:
440 eptype = 0x1;
441 break;
442 case UE_BULK:
443 eptype = 0x2;
444 break;
445 case UE_INTERRUPT:
446 eptype = 0x3;
447 break;
448 }
449
450 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
451 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
452 return eptype | 0x4;
453 else
454 return eptype;
455 }
456
457 static u_int
xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)458 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed)
459 {
460 /* xHCI 1.0 section 4.5.1 */
461 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress);
462 u_int in = 0;
463
464 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) ||
465 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN))
466 in = 1;
467
468 return epaddr * 2 + in;
469 }
470
471 static inline u_int
xhci_dci_to_ici(const u_int i)472 xhci_dci_to_ici(const u_int i)
473 {
474 return i + 1;
475 }
476
477 static inline void *
xhci_slot_get_dcv(struct xhci_softc * const sc,struct xhci_slot * const xs,const u_int dci)478 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs,
479 const u_int dci)
480 {
481 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
482 }
483
484 #if 0 /* unused */
485 static inline bus_addr_t
486 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs,
487 const u_int dci)
488 {
489 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci);
490 }
491 #endif /* unused */
492
493 static inline void *
xhci_slot_get_icv(struct xhci_softc * const sc,struct xhci_slot * const xs,const u_int ici)494 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs,
495 const u_int ici)
496 {
497 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
498 }
499
500 static inline bus_addr_t
xhci_slot_get_icp(struct xhci_softc * const sc,struct xhci_slot * const xs,const u_int ici)501 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs,
502 const u_int ici)
503 {
504 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici);
505 }
506
507 static inline struct xhci_trb *
xhci_ring_trbv(struct xhci_ring * const xr,u_int idx)508 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx)
509 {
510 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
511 }
512
513 static inline bus_addr_t
xhci_ring_trbp(struct xhci_ring * const xr,u_int idx)514 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx)
515 {
516 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx);
517 }
518
519 static inline void
xhci_xfer_put_trb(struct xhci_xfer * const xx,u_int idx,uint64_t parameter,uint32_t status,uint32_t control)520 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx,
521 uint64_t parameter, uint32_t status, uint32_t control)
522 {
523 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb);
524 xx->xx_trb[idx].trb_0 = parameter;
525 xx->xx_trb[idx].trb_2 = status;
526 xx->xx_trb[idx].trb_3 = control;
527 }
528
529 static inline void
xhci_trb_put(struct xhci_trb * const trb,uint64_t parameter,uint32_t status,uint32_t control)530 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status,
531 uint32_t control)
532 {
533 trb->trb_0 = htole64(parameter);
534 trb->trb_2 = htole32(status);
535 trb->trb_3 = htole32(control);
536 }
537
538 static int
xhci_trb_get_idx(struct xhci_ring * xr,uint64_t trb_0,int * idx)539 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx)
540 {
541 /* base address of TRBs */
542 bus_addr_t trbp = xhci_ring_trbp(xr, 0);
543
544 /* trb_0 range sanity check */
545 if (trb_0 == 0 || trb_0 < trbp ||
546 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 ||
547 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) {
548 return 1;
549 }
550 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb);
551 return 0;
552 }
553
554 static unsigned int
xhci_get_epstate(struct xhci_softc * const sc,struct xhci_slot * const xs,u_int dci)555 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs,
556 u_int dci)
557 {
558 uint32_t *cp;
559
560 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
561 cp = xhci_slot_get_dcv(sc, xs, dci);
562 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0]));
563 }
564
565 static inline unsigned int
xhci_ctlrport2bus(struct xhci_softc * const sc,unsigned int ctlrport)566 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport)
567 {
568 const unsigned int port = ctlrport - 1;
569 const uint8_t bit = __BIT(port % NBBY);
570
571 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit);
572 }
573
574 /*
575 * Return the roothub port for a controller port. Both are 1..n.
576 */
577 static inline unsigned int
xhci_ctlrport2rhport(struct xhci_softc * const sc,unsigned int ctrlport)578 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport)
579 {
580
581 return sc->sc_ctlrportmap[ctrlport - 1];
582 }
583
584 /*
585 * Return the controller port for a bus roothub port. Both are 1..n.
586 */
587 static inline unsigned int
xhci_rhport2ctlrport(struct xhci_softc * const sc,unsigned int bn,unsigned int rhport)588 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn,
589 unsigned int rhport)
590 {
591
592 return sc->sc_rhportmap[bn][rhport - 1];
593 }
594
595 /* --- */
596
597 void
xhci_childdet(device_t self,device_t child)598 xhci_childdet(device_t self, device_t child)
599 {
600 struct xhci_softc * const sc = device_private(self);
601
602 mutex_enter(&sc->sc_intr_lock);
603 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child));
604 if (child == sc->sc_child2)
605 sc->sc_child2 = NULL;
606 else if (child == sc->sc_child)
607 sc->sc_child = NULL;
608 mutex_exit(&sc->sc_intr_lock);
609 }
610
611 int
xhci_detach(struct xhci_softc * sc,int flags)612 xhci_detach(struct xhci_softc *sc, int flags)
613 {
614 int rv = 0;
615
616 if (sc->sc_child2 != NULL) {
617 rv = config_detach(sc->sc_child2, flags);
618 if (rv != 0)
619 return rv;
620 KASSERT(sc->sc_child2 == NULL);
621 }
622
623 if (sc->sc_child != NULL) {
624 rv = config_detach(sc->sc_child, flags);
625 if (rv != 0)
626 return rv;
627 KASSERT(sc->sc_child == NULL);
628 }
629
630 /* XXX unconfigure/free slots */
631
632 /* verify: */
633 xhci_rt_write_4(sc, XHCI_IMAN(0), 0);
634 xhci_op_write_4(sc, XHCI_USBCMD, 0);
635 /* do we need to wait for stop? */
636
637 xhci_op_write_8(sc, XHCI_CRCR, 0);
638 xhci_ring_free(sc, &sc->sc_cr);
639 cv_destroy(&sc->sc_command_cv);
640 cv_destroy(&sc->sc_cmdbusy_cv);
641
642 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0);
643 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0);
644 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY);
645 xhci_ring_free(sc, &sc->sc_er);
646
647 usb_freemem(&sc->sc_eventst_dma);
648
649 xhci_op_write_8(sc, XHCI_DCBAAP, 0);
650 usb_freemem(&sc->sc_dcbaa_dma);
651
652 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots);
653
654 kmem_free(sc->sc_ctlrportbus,
655 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY));
656 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int));
657
658 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
659 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int));
660 }
661
662 mutex_destroy(&sc->sc_rhlock);
663 mutex_destroy(&sc->sc_lock);
664 mutex_destroy(&sc->sc_intr_lock);
665
666 pool_cache_destroy(sc->sc_xferpool);
667
668 return rv;
669 }
670
671 int
xhci_activate(device_t self,enum devact act)672 xhci_activate(device_t self, enum devact act)
673 {
674 struct xhci_softc * const sc = device_private(self);
675
676 switch (act) {
677 case DVACT_DEACTIVATE:
678 sc->sc_dying = true;
679 return 0;
680 default:
681 return EOPNOTSUPP;
682 }
683 }
684
685 bool
xhci_suspend(device_t self,const pmf_qual_t * qual)686 xhci_suspend(device_t self, const pmf_qual_t *qual)
687 {
688 struct xhci_softc * const sc = device_private(self);
689 size_t i, j, bn, dci;
690 int port;
691 uint32_t v;
692 usbd_status err;
693 bool ok = false;
694
695 XHCIHIST_FUNC(); XHCIHIST_CALLED();
696
697 /*
698 * Block issuance of new commands, and wait for all pending
699 * commands to complete.
700 */
701 mutex_enter(&sc->sc_lock);
702 KASSERT(sc->sc_suspender == NULL);
703 sc->sc_suspender = curlwp;
704 while (sc->sc_command_addr != 0)
705 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
706 mutex_exit(&sc->sc_lock);
707
708 /*
709 * Block roothub xfers which might touch portsc registers until
710 * we're done suspending.
711 */
712 mutex_enter(&sc->sc_rhlock);
713
714 /*
715 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
716 * xHCI Power Management, p. 342
717 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342
718 */
719
720 /*
721 * `1. Stop all USB activity by issuing Stop Endpoint Commands
722 * for Busy endpoints in the Running state. If the Force
723 * Save Context Capability (FSC = ``0'') is not supported,
724 * then Stop Endpoint Commands shall be issued for all idle
725 * endpoints in the Running state as well. The Stop
726 * Endpoint Command causes the xHC to update the respective
727 * Endpoint or Stream Contexts in system memory, e.g. the
728 * TR Dequeue Pointer, DCS, etc. fields. Refer to
729 * Implementation Note "0".'
730 */
731 for (i = 0; i < sc->sc_maxslots; i++) {
732 struct xhci_slot *xs = &sc->sc_slots[i];
733
734 /* Skip if the slot is not in use. */
735 if (xs->xs_idx == 0)
736 continue;
737
738 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
739 /* Skip if the endpoint is not Running. */
740 /* XXX What about Busy? */
741 if (xhci_get_epstate(sc, xs, dci) !=
742 XHCI_EPSTATE_RUNNING)
743 continue;
744
745 /* Stop endpoint. */
746 mutex_enter(&sc->sc_lock);
747 err = xhci_stop_endpoint_cmd(sc, xs, dci,
748 XHCI_TRB_3_SUSP_EP_BIT);
749 mutex_exit(&sc->sc_lock);
750 if (err) {
751 device_printf(self, "failed to stop endpoint"
752 " slot %zu dci %zu err %d\n",
753 i, dci, err);
754 goto out;
755 }
756 }
757 }
758
759 /*
760 * Next, suspend all the ports:
761 *
762 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15:
763 * Suspend-Resume, pp. 276-283
764 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276
765 */
766 for (bn = 0; bn < 2; bn++) {
767 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
768 /* 4.15.1: Port Suspend. */
769 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
770
771 /*
772 * `System software places individual ports
773 * into suspend mode by writing a ``3'' into
774 * the appropriate PORTSC register Port Link
775 * State (PLS) field (refer to Section 5.4.8).
776 * Software should only set the PLS field to
777 * ``3'' when the port is in the Enabled
778 * state.'
779 *
780 * `Software should not attempt to suspend a
781 * port unless the port reports that it is in
782 * the enabled (PED = ``1''; PLS < ``3'')
783 * state (refer to Section 5.4.8 for more
784 * information about PED and PLS).'
785 */
786 v = xhci_op_read_4(sc, port);
787 if (((v & XHCI_PS_PED) == 0) ||
788 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3)
789 continue;
790 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
791 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3);
792 xhci_op_write_4(sc, port, v);
793
794 /*
795 * `When the PLS field is written with U3
796 * (``3''), the status of the PLS bit will not
797 * change to the target U state U3 until the
798 * suspend signaling has completed to the
799 * attached device (which may be as long as
800 * 10ms.).'
801 *
802 * `Software is required to wait for U3
803 * transitions to complete before it puts the
804 * xHC into a low power state, and before
805 * resuming the port.'
806 *
807 * XXX Take advantage of the technique to
808 * reduce polling on host controllers that
809 * support the U3C capability.
810 */
811 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) {
812 v = xhci_op_read_4(sc, port);
813 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3)
814 break;
815 usb_delay_ms(&sc->sc_bus, 1);
816 }
817 if (j == XHCI_WAIT_PLS_U3) {
818 device_printf(self,
819 "suspend timeout on bus %zu port %zu\n",
820 bn, i);
821 goto out;
822 }
823 }
824 }
825
826 /*
827 * `2. Ensure that the Command Ring is in the Stopped state
828 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is
829 * empty), and all Command Completion Events associated
830 * with them have been received.'
831 *
832 * XXX
833 */
834
835 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */
836 xhci_op_write_4(sc, XHCI_USBCMD,
837 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS);
838
839 /*
840 * `4. Read the Operational Runtime, and VTIO registers in the
841 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ,
842 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their
843 * state.'
844 *
845 * (We don't use VTIO here (XXX for now?).)
846 */
847 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
848 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL);
849 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP);
850 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG);
851 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0));
852 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0));
853 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0));
854 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0));
855 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0));
856
857 /*
858 * `5. Set the Controller Save State (CSS) flag in the USBCMD
859 * register (5.4.1)...'
860 */
861 xhci_op_write_4(sc, XHCI_USBCMD,
862 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS);
863
864 /*
865 * `...and wait for the Save State Status (SSS) flag in the
866 * USBSTS register (5.4.2) to transition to ``0''.'
867 */
868 for (i = 0; i < XHCI_WAIT_SSS; i++) {
869 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0)
870 break;
871 usb_delay_ms(&sc->sc_bus, 1);
872 }
873 if (i >= XHCI_WAIT_SSS) {
874 device_printf(self, "suspend timeout, USBSTS.SSS\n");
875 /*
876 * Just optimistically go on and check SRE anyway --
877 * what's the worst that could happen?
878 */
879 }
880
881 /*
882 * `Note: After a Save or Restore operation completes, the
883 * Save/Restore Error (SRE) flag in the USBSTS register should
884 * be checked to ensure that the operation completed
885 * successfully.'
886 */
887 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
888 device_printf(self, "suspend error, USBSTS.SRE\n");
889 goto out;
890 }
891
892 /* Success! */
893 ok = true;
894
895 out: mutex_exit(&sc->sc_rhlock);
896 if (!ok) {
897 /*
898 * If suspend failed, stop holding up command issuance
899 * and make it fail instead.
900 */
901 mutex_enter(&sc->sc_lock);
902 KASSERT(sc->sc_suspender == curlwp);
903 sc->sc_suspender = NULL;
904 sc->sc_suspendresume_failed = true;
905 cv_broadcast(&sc->sc_cmdbusy_cv);
906 mutex_exit(&sc->sc_lock);
907 }
908 return ok;
909 }
910
911 bool
xhci_resume(device_t self,const pmf_qual_t * qual)912 xhci_resume(device_t self, const pmf_qual_t *qual)
913 {
914 struct xhci_softc * const sc = device_private(self);
915 size_t i, j, bn, dci;
916 int port;
917 uint32_t v;
918 bool ok = false;
919
920 XHCIHIST_FUNC(); XHCIHIST_CALLED();
921
922 /*
923 * If resume had previously failed, just try again. Can't make
924 * things worse, probably.
925 */
926 mutex_enter(&sc->sc_lock);
927 if (sc->sc_suspendresume_failed) {
928 KASSERT(sc->sc_suspender == NULL);
929 sc->sc_suspender = curlwp;
930 sc->sc_suspendresume_failed = false;
931 }
932 KASSERT(sc->sc_suspender);
933 mutex_exit(&sc->sc_lock);
934
935 /*
936 * Block roothub xfers which might touch portsc registers until
937 * we're done resuming.
938 */
939 mutex_enter(&sc->sc_rhlock);
940
941 /*
942 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2:
943 * xHCI Power Management, p. 343
944 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343
945 */
946
947 /*
948 * `4. Restore the Operational Runtime, and VTIO registers with
949 * their previously saved state in the following order:
950 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN,
951 * IMOD, and VTIO.'
952 *
953 * (We don't use VTIO here (for now?).)
954 */
955 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd);
956 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl);
957 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap);
958 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config);
959 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0);
960 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0);
961 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0);
962 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0);
963 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0);
964
965 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */
966
967 /*
968 * `5. Set the Controller Restore State (CRS) flag in the
969 * USBCMD register (5.4.1) to ``1''...'
970 */
971 xhci_op_write_4(sc, XHCI_USBCMD,
972 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS);
973
974 /*
975 * `...and wait for the Restore State Status (RSS) in the
976 * USBSTS register (5.4.2) to transition to ``0''.'
977 */
978 for (i = 0; i < XHCI_WAIT_RSS; i++) {
979 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0)
980 break;
981 usb_delay_ms(&sc->sc_bus, 1);
982 }
983 if (i >= XHCI_WAIT_RSS) {
984 device_printf(self, "resume timeout, USBSTS.RSS\n");
985 goto out;
986 }
987
988 /*
989 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits
990 * are consistent with the RCS values to be written to the
991 * CRCR.'
992 *
993 * XXX Hope just zeroing it is good enough!
994 */
995 xhci_host_dequeue(sc->sc_cr);
996
997 /*
998 * `7. Write the CRCR with the address and RCS value of the
999 * reinitialized Command Ring. Note that this write will
1000 * cause the Command Ring to restart at the address
1001 * specified by the CRCR.'
1002 */
1003 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1004 sc->sc_cr->xr_cs);
1005
1006 /*
1007 * `8. Enable the controller by setting Run/Stop (R/S) =
1008 * ``1''.'
1009 */
1010 xhci_op_write_4(sc, XHCI_USBCMD,
1011 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS);
1012
1013 /*
1014 * `9. Software shall walk the USB topology and initialize each
1015 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and
1016 * external hub ports attached to USB devices.'
1017 *
1018 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2
1019 * `Port Resume', 4.15.2.2 `Host Initiated'.
1020 *
1021 * XXX We should maybe batch up initiating the state
1022 * transitions, and then wait for them to complete all at once.
1023 */
1024 for (bn = 0; bn < 2; bn++) {
1025 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) {
1026 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i));
1027
1028 /* `When a port is in the U3 state: ...' */
1029 v = xhci_op_read_4(sc, port);
1030 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3)
1031 continue;
1032
1033 /*
1034 * `For a USB2 protocol port, software shall
1035 * write a ``15'' (Resume) to the PLS field to
1036 * initiate resume signaling. The port shall
1037 * transition to the Resume substate and the
1038 * xHC shall transmit the resume signaling
1039 * within 1ms (T_URSM). Software shall ensure
1040 * that resume is signaled for at least 20ms
1041 * (T_DRSMDN). Software shall start timing
1042 * T_DRSMDN from the write of ``15'' (Resume)
1043 * to PLS.'
1044 */
1045 if (bn == 1) {
1046 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0);
1047 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1048 v |= XHCI_PS_LWS;
1049 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME);
1050 xhci_op_write_4(sc, port, v);
1051 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
1052 } else {
1053 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0);
1054 }
1055
1056 /*
1057 * `For a USB3 protocol port [and a USB2
1058 * protocol port after transitioning to
1059 * Resume], software shall write a ``0'' (U0)
1060 * to the PLS field...'
1061 */
1062 v = xhci_op_read_4(sc, port);
1063 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR);
1064 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0);
1065 xhci_op_write_4(sc, port, v);
1066
1067 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) {
1068 v = xhci_op_read_4(sc, port);
1069 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0)
1070 break;
1071 usb_delay_ms(&sc->sc_bus, 1);
1072 }
1073 if (j == XHCI_WAIT_PLS_U0) {
1074 device_printf(self,
1075 "resume timeout on bus %zu port %zu\n",
1076 bn, i);
1077 goto out;
1078 }
1079 }
1080 }
1081
1082 /*
1083 * `10. Restart each of the previously Running endpoints by
1084 * ringing their doorbells.'
1085 */
1086 for (i = 0; i < sc->sc_maxslots; i++) {
1087 struct xhci_slot *xs = &sc->sc_slots[i];
1088
1089 /* Skip if the slot is not in use. */
1090 if (xs->xs_idx == 0)
1091 continue;
1092
1093 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
1094 /* Skip if the endpoint is not Running. */
1095 if (xhci_get_epstate(sc, xs, dci) !=
1096 XHCI_EPSTATE_RUNNING)
1097 continue;
1098
1099 /* Ring the doorbell. */
1100 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
1101 }
1102 }
1103
1104 /*
1105 * `Note: After a Save or Restore operation completes, the
1106 * Save/Restore Error (SRE) flag in the USBSTS register should
1107 * be checked to ensure that the operation completed
1108 * successfully.'
1109 */
1110 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) {
1111 device_printf(self, "resume error, USBSTS.SRE\n");
1112 goto out;
1113 }
1114
1115 /* Success! */
1116 ok = true;
1117
1118 out: /*
1119 * Resume command issuance. If the hardware failed to resume,
1120 * well, tough -- deadlocking because everything is held up on
1121 * the suspension, with no opportunity to detach, isn't better
1122 * than timing out waiting for dead hardware.
1123 */
1124 mutex_enter(&sc->sc_lock);
1125 KASSERT(sc->sc_suspender);
1126 sc->sc_suspender = NULL;
1127 sc->sc_suspendresume_failed = !ok;
1128 cv_broadcast(&sc->sc_cmdbusy_cv);
1129 mutex_exit(&sc->sc_lock);
1130
1131 mutex_exit(&sc->sc_rhlock);
1132 return ok;
1133 }
1134
1135 bool
xhci_shutdown(device_t self,int flags)1136 xhci_shutdown(device_t self, int flags)
1137 {
1138 return false;
1139 }
1140
1141 static int
xhci_hc_reset(struct xhci_softc * const sc)1142 xhci_hc_reset(struct xhci_softc * const sc)
1143 {
1144 uint32_t usbcmd, usbsts;
1145 int i;
1146
1147 /* Check controller not ready */
1148 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1149 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1150 if ((usbsts & XHCI_STS_CNR) == 0)
1151 break;
1152 usb_delay_ms(&sc->sc_bus, 1);
1153 }
1154 if (i >= XHCI_WAIT_CNR) {
1155 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n");
1156 return EIO;
1157 }
1158
1159 /* Halt controller */
1160 usbcmd = 0;
1161 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1162 usb_delay_ms(&sc->sc_bus, 1);
1163
1164 /* Reset controller */
1165 usbcmd = XHCI_CMD_HCRST;
1166 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd);
1167 for (i = 0; i < XHCI_WAIT_HCRST; i++) {
1168 /*
1169 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to
1170 * prevent system hang (Errata).
1171 */
1172 usb_delay_ms(&sc->sc_bus, 1);
1173 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD);
1174 if ((usbcmd & XHCI_CMD_HCRST) == 0)
1175 break;
1176 }
1177 if (i >= XHCI_WAIT_HCRST) {
1178 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n");
1179 return EIO;
1180 }
1181
1182 /* Check controller not ready */
1183 for (i = 0; i < XHCI_WAIT_CNR; i++) {
1184 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1185 if ((usbsts & XHCI_STS_CNR) == 0)
1186 break;
1187 usb_delay_ms(&sc->sc_bus, 1);
1188 }
1189 if (i >= XHCI_WAIT_CNR) {
1190 aprint_error_dev(sc->sc_dev,
1191 "controller not ready timeout after reset\n");
1192 return EIO;
1193 }
1194
1195 return 0;
1196 }
1197
1198 /* 7.2 xHCI Support Protocol Capability */
1199 static void
xhci_id_protocols(struct xhci_softc * sc,bus_size_t ecp)1200 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp)
1201 {
1202 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1203
1204 /* XXX Cache this lot */
1205
1206 const uint32_t w0 = xhci_read_4(sc, ecp);
1207 const uint32_t w4 = xhci_read_4(sc, ecp + 4);
1208 const uint32_t w8 = xhci_read_4(sc, ecp + 8);
1209 const uint32_t wc = xhci_read_4(sc, ecp + 0xc);
1210
1211 aprint_debug_dev(sc->sc_dev,
1212 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc);
1213
1214 if (w4 != XHCI_XECP_USBID)
1215 return;
1216
1217 const int major = XHCI_XECP_SP_W0_MAJOR(w0);
1218 const int minor = XHCI_XECP_SP_W0_MINOR(w0);
1219 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8);
1220 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8);
1221
1222 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16));
1223 switch (mm) {
1224 case 0x0200:
1225 case 0x0300:
1226 case 0x0301:
1227 case 0x0310:
1228 case 0x0320:
1229 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n",
1230 major == 3 ? "ss" : "hs", cpo, cpo + cpc - 1);
1231 if (major == 3)
1232 sc->sc_usb3nports += cpo + cpc - 1;
1233 else
1234 sc->sc_usb2nports += cpo + cpc - 1;
1235 break;
1236 default:
1237 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n",
1238 major, minor);
1239 return;
1240 }
1241
1242 const size_t bus = (major == 3) ? 0 : 1;
1243
1244 /* Index arrays with 0..n-1 where ports are numbered 1..n */
1245 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) {
1246 if (sc->sc_ctlrportmap[cp] != 0) {
1247 aprint_error_dev(sc->sc_dev, "controller port %zu "
1248 "already assigned", cp);
1249 continue;
1250 }
1251
1252 sc->sc_ctlrportbus[cp / NBBY] |=
1253 bus == 0 ? 0 : __BIT(cp % NBBY);
1254
1255 const size_t rhp = sc->sc_rhportcount[bus]++;
1256
1257 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0,
1258 "bus %zu rhp %zu is %d", bus, rhp,
1259 sc->sc_rhportmap[bus][rhp]);
1260
1261 sc->sc_rhportmap[bus][rhp] = cp + 1;
1262 sc->sc_ctlrportmap[cp] = rhp + 1;
1263 }
1264 }
1265
1266 /* Process extended capabilities */
1267 static void
xhci_ecp(struct xhci_softc * sc)1268 xhci_ecp(struct xhci_softc *sc)
1269 {
1270 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1271
1272 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4;
1273 while (ecp != 0) {
1274 uint32_t ecr = xhci_read_4(sc, ecp);
1275 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr);
1276 switch (XHCI_XECP_ID(ecr)) {
1277 case XHCI_ID_PROTOCOLS: {
1278 xhci_id_protocols(sc, ecp);
1279 break;
1280 }
1281 case XHCI_ID_USB_LEGACY: {
1282 uint8_t bios_sem;
1283
1284 /* Take host controller ownership from BIOS */
1285 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM);
1286 if (bios_sem) {
1287 /* sets xHCI to be owned by OS */
1288 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1);
1289 aprint_debug_dev(sc->sc_dev,
1290 "waiting for BIOS to give up control\n");
1291 for (int i = 0; i < 5000; i++) {
1292 bios_sem = xhci_read_1(sc, ecp +
1293 XHCI_XECP_BIOS_SEM);
1294 if (bios_sem == 0)
1295 break;
1296 DELAY(1000);
1297 }
1298 if (bios_sem) {
1299 aprint_error_dev(sc->sc_dev,
1300 "timed out waiting for BIOS\n");
1301 }
1302 }
1303 break;
1304 }
1305 default:
1306 break;
1307 }
1308 ecr = xhci_read_4(sc, ecp);
1309 if (XHCI_XECP_NEXT(ecr) == 0) {
1310 ecp = 0;
1311 } else {
1312 ecp += XHCI_XECP_NEXT(ecr) * 4;
1313 }
1314 }
1315 }
1316
1317 #define XHCI_HCCPREV1_BITS \
1318 "\177\020" /* New bitmask */ \
1319 "f\020\020XECP\0" \
1320 "f\014\4MAXPSA\0" \
1321 "b\013CFC\0" \
1322 "b\012SEC\0" \
1323 "b\011SBD\0" \
1324 "b\010FSE\0" \
1325 "b\7NSS\0" \
1326 "b\6LTC\0" \
1327 "b\5LHRC\0" \
1328 "b\4PIND\0" \
1329 "b\3PPC\0" \
1330 "b\2CZC\0" \
1331 "b\1BNC\0" \
1332 "b\0AC64\0" \
1333 "\0"
1334 #define XHCI_HCCV1_x_BITS \
1335 "\177\020" /* New bitmask */ \
1336 "f\020\020XECP\0" \
1337 "f\014\4MAXPSA\0" \
1338 "b\013CFC\0" \
1339 "b\012SEC\0" \
1340 "b\011SPC\0" \
1341 "b\010PAE\0" \
1342 "b\7NSS\0" \
1343 "b\6LTC\0" \
1344 "b\5LHRC\0" \
1345 "b\4PIND\0" \
1346 "b\3PPC\0" \
1347 "b\2CSZ\0" \
1348 "b\1BNC\0" \
1349 "b\0AC64\0" \
1350 "\0"
1351
1352 #define XHCI_HCC2_BITS \
1353 "\177\020" /* New bitmask */ \
1354 "b\7ETC_TSC\0" \
1355 "b\6ETC\0" \
1356 "b\5CIC\0" \
1357 "b\4LEC\0" \
1358 "b\3CTC\0" \
1359 "b\2FSC\0" \
1360 "b\1CMC\0" \
1361 "b\0U3C\0" \
1362 "\0"
1363
1364 void
xhci_start(struct xhci_softc * sc)1365 xhci_start(struct xhci_softc *sc)
1366 {
1367 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA);
1368 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0)
1369 /* Intel xhci needs interrupt rate moderated. */
1370 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP);
1371 else
1372 xhci_rt_write_4(sc, XHCI_IMOD(0), 0);
1373 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n",
1374 xhci_rt_read_4(sc, XHCI_IMOD(0)));
1375
1376 /* Go! */
1377 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
1378 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n",
1379 xhci_op_read_4(sc, XHCI_USBCMD));
1380 }
1381
1382 int
xhci_init(struct xhci_softc * sc)1383 xhci_init(struct xhci_softc *sc)
1384 {
1385 bus_size_t bsz;
1386 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff;
1387 uint32_t pagesize, config;
1388 int i = 0;
1389 uint16_t hciversion;
1390 uint8_t caplength;
1391
1392 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1393
1394 /* Set up the bus struct for the usb 3 and usb 2 buses */
1395 sc->sc_bus.ub_methods = &xhci_bus_methods;
1396 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe);
1397 sc->sc_bus.ub_usedma = true;
1398 sc->sc_bus.ub_hcpriv = sc;
1399
1400 sc->sc_bus2.ub_methods = &xhci_bus_methods;
1401 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe);
1402 sc->sc_bus2.ub_revision = USBREV_2_0;
1403 sc->sc_bus2.ub_usedma = true;
1404 sc->sc_bus2.ub_hcpriv = sc;
1405 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag;
1406
1407 caplength = xhci_read_1(sc, XHCI_CAPLENGTH);
1408 hciversion = xhci_read_2(sc, XHCI_HCIVERSION);
1409
1410 if (hciversion < XHCI_HCIVERSION_0_96 ||
1411 hciversion >= 0x0200) {
1412 aprint_normal_dev(sc->sc_dev,
1413 "xHCI version %x.%x not known to be supported\n",
1414 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1415 } else {
1416 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n",
1417 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff);
1418 }
1419
1420 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength,
1421 &sc->sc_cbh) != 0) {
1422 aprint_error_dev(sc->sc_dev, "capability subregion failure\n");
1423 return ENOMEM;
1424 }
1425
1426 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1);
1427 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1);
1428 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1);
1429 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1);
1430 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2);
1431 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3);
1432 aprint_debug_dev(sc->sc_dev,
1433 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3);
1434
1435 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS);
1436 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32;
1437
1438 char sbuf[128];
1439 if (hciversion < XHCI_HCIVERSION_1_0)
1440 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc);
1441 else
1442 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc);
1443 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf);
1444 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n",
1445 XHCI_HCC_XECP(sc->sc_hcc) * 4);
1446 if (hciversion >= XHCI_HCIVERSION_1_1) {
1447 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2);
1448 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2);
1449 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf);
1450 }
1451
1452 /* default all ports to bus 0, i.e. usb 3 */
1453 sc->sc_ctlrportbus = kmem_zalloc(
1454 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP);
1455 sc->sc_ctlrportmap =
1456 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1457
1458 /* controller port to bus roothub port map */
1459 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) {
1460 sc->sc_rhportmap[j] =
1461 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP);
1462 }
1463
1464 /*
1465 * Process all Extended Capabilities
1466 */
1467 xhci_ecp(sc);
1468
1469 bsz = XHCI_PORTSC(sc->sc_maxports);
1470 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz,
1471 &sc->sc_obh) != 0) {
1472 aprint_error_dev(sc->sc_dev, "operational subregion failure\n");
1473 return ENOMEM;
1474 }
1475
1476 dboff = xhci_cap_read_4(sc, XHCI_DBOFF);
1477 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff,
1478 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) {
1479 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n");
1480 return ENOMEM;
1481 }
1482
1483 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF);
1484 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff,
1485 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) {
1486 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n");
1487 return ENOMEM;
1488 }
1489
1490 int rv;
1491 rv = xhci_hc_reset(sc);
1492 if (rv != 0) {
1493 return rv;
1494 }
1495
1496 if (sc->sc_vendor_init)
1497 sc->sc_vendor_init(sc);
1498
1499 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE);
1500 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize);
1501 pagesize = ffs(pagesize);
1502 if (pagesize == 0) {
1503 aprint_error_dev(sc->sc_dev, "pagesize is 0\n");
1504 return EIO;
1505 }
1506 sc->sc_pgsz = 1 << (12 + (pagesize - 1));
1507 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz);
1508 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n",
1509 (uint32_t)sc->sc_maxslots);
1510 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports);
1511
1512 int err;
1513 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2);
1514 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf);
1515 if (sc->sc_maxspbuf != 0) {
1516 err = usb_allocmem(sc->sc_bus.ub_dmatag,
1517 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t),
1518 USBMALLOC_COHERENT | USBMALLOC_ZERO,
1519 &sc->sc_spbufarray_dma);
1520 if (err) {
1521 aprint_error_dev(sc->sc_dev,
1522 "spbufarray init fail, err %d\n", err);
1523 return ENOMEM;
1524 }
1525
1526 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) *
1527 sc->sc_maxspbuf, KM_SLEEP);
1528 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0);
1529 for (i = 0; i < sc->sc_maxspbuf; i++) {
1530 usb_dma_t * const dma = &sc->sc_spbuf_dma[i];
1531 /* allocate contexts */
1532 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz,
1533 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO,
1534 dma);
1535 if (err) {
1536 aprint_error_dev(sc->sc_dev,
1537 "spbufarray_dma init fail, err %d\n", err);
1538 rv = ENOMEM;
1539 goto bad1;
1540 }
1541 spbufarray[i] = htole64(DMAADDR(dma, 0));
1542 usb_syncmem(dma, 0, sc->sc_pgsz,
1543 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1544 }
1545
1546 usb_syncmem(&sc->sc_spbufarray_dma, 0,
1547 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE);
1548 }
1549
1550 config = xhci_op_read_4(sc, XHCI_CONFIG);
1551 config &= ~0xFF;
1552 config |= sc->sc_maxslots & 0xFF;
1553 xhci_op_write_4(sc, XHCI_CONFIG, config);
1554
1555 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS,
1556 XHCI_COMMAND_RING_SEGMENTS_ALIGN);
1557 if (err) {
1558 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n",
1559 err);
1560 rv = ENOMEM;
1561 goto bad1;
1562 }
1563
1564 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS,
1565 XHCI_EVENT_RING_SEGMENTS_ALIGN);
1566 if (err) {
1567 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n",
1568 err);
1569 rv = ENOMEM;
1570 goto bad2;
1571 }
1572
1573 usb_dma_t *dma;
1574 size_t size;
1575 size_t align;
1576
1577 dma = &sc->sc_eventst_dma;
1578 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE,
1579 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN);
1580 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size);
1581 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN;
1582 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1583 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1584 if (err) {
1585 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n",
1586 err);
1587 rv = ENOMEM;
1588 goto bad3;
1589 }
1590
1591 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n",
1592 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0),
1593 KERNADDR(&sc->sc_eventst_dma, 0),
1594 sc->sc_eventst_dma.udma_block->size);
1595
1596 dma = &sc->sc_dcbaa_dma;
1597 size = (1 + sc->sc_maxslots) * sizeof(uint64_t);
1598 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size);
1599 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN;
1600 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
1601 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma);
1602 if (err) {
1603 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err);
1604 rv = ENOMEM;
1605 goto bad4;
1606 }
1607 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n",
1608 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0),
1609 KERNADDR(&sc->sc_dcbaa_dma, 0),
1610 sc->sc_dcbaa_dma.udma_block->size);
1611
1612 if (sc->sc_maxspbuf != 0) {
1613 /*
1614 * DCBA entry 0 hold the scratchbuf array pointer.
1615 */
1616 *(uint64_t *)KERNADDR(dma, 0) =
1617 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0));
1618 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE);
1619 }
1620
1621 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots,
1622 KM_SLEEP);
1623 if (sc->sc_slots == NULL) {
1624 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err);
1625 rv = ENOMEM;
1626 goto bad;
1627 }
1628
1629 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0,
1630 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL);
1631 if (sc->sc_xferpool == NULL) {
1632 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n",
1633 err);
1634 rv = ENOMEM;
1635 goto bad;
1636 }
1637
1638 cv_init(&sc->sc_command_cv, "xhcicmd");
1639 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq");
1640 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE);
1641 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
1642 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
1643
1644 struct xhci_erste *erst;
1645 erst = KERNADDR(&sc->sc_eventst_dma, 0);
1646 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0));
1647 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb);
1648 erst[0].erste_3 = htole32(0);
1649 usb_syncmem(&sc->sc_eventst_dma, 0,
1650 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE);
1651
1652 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS);
1653 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0));
1654 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) |
1655 XHCI_ERDP_BUSY);
1656
1657 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0));
1658 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) |
1659 sc->sc_cr->xr_cs);
1660
1661 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0),
1662 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS);
1663
1664 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0)
1665 xhci_start(sc);
1666
1667 return 0;
1668
1669 bad:
1670 if (sc->sc_xferpool) {
1671 pool_cache_destroy(sc->sc_xferpool);
1672 sc->sc_xferpool = NULL;
1673 }
1674
1675 if (sc->sc_slots) {
1676 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) *
1677 sc->sc_maxslots);
1678 sc->sc_slots = NULL;
1679 }
1680
1681 usb_freemem(&sc->sc_dcbaa_dma);
1682 bad4:
1683 usb_freemem(&sc->sc_eventst_dma);
1684 bad3:
1685 xhci_ring_free(sc, &sc->sc_er);
1686 bad2:
1687 xhci_ring_free(sc, &sc->sc_cr);
1688 i = sc->sc_maxspbuf;
1689 bad1:
1690 for (int j = 0; j < i; j++)
1691 usb_freemem(&sc->sc_spbuf_dma[j]);
1692 usb_freemem(&sc->sc_spbufarray_dma);
1693
1694 return rv;
1695 }
1696
1697 static inline bool
xhci_polling_p(struct xhci_softc * const sc)1698 xhci_polling_p(struct xhci_softc * const sc)
1699 {
1700 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling;
1701 }
1702
1703 int
xhci_intr(void * v)1704 xhci_intr(void *v)
1705 {
1706 struct xhci_softc * const sc = v;
1707 int ret = 0;
1708
1709 XHCIHIST_FUNC(); XHCIHIST_CALLED();
1710
1711 if (sc == NULL)
1712 return 0;
1713
1714 mutex_spin_enter(&sc->sc_intr_lock);
1715
1716 if (sc->sc_dying || !device_has_power(sc->sc_dev))
1717 goto done;
1718
1719 /* If we get an interrupt while polling, then just ignore it. */
1720 if (xhci_polling_p(sc)) {
1721 #ifdef DIAGNOSTIC
1722 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0);
1723 #endif
1724 goto done;
1725 }
1726
1727 ret = xhci_intr1(sc);
1728 if (ret) {
1729 KASSERT(sc->sc_child || sc->sc_child2);
1730
1731 /*
1732 * One of child busses could be already detached. It doesn't
1733 * matter on which of the two the softintr is scheduled.
1734 */
1735 if (sc->sc_child)
1736 usb_schedsoftintr(&sc->sc_bus);
1737 else
1738 usb_schedsoftintr(&sc->sc_bus2);
1739 }
1740 done:
1741 mutex_spin_exit(&sc->sc_intr_lock);
1742 return ret;
1743 }
1744
1745 int
xhci_intr1(struct xhci_softc * const sc)1746 xhci_intr1(struct xhci_softc * const sc)
1747 {
1748 uint32_t usbsts;
1749 uint32_t iman;
1750
1751 XHCIHIST_FUNC();
1752
1753 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1754 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0);
1755 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD |
1756 XHCI_STS_HCE)) == 0) {
1757 DPRINTFN(16, "ignored intr not for %jd",
1758 device_unit(sc->sc_dev), 0, 0, 0);
1759 return 0;
1760 }
1761
1762 /*
1763 * Clear EINT and other transient flags, to not misenterpret
1764 * next shared interrupt. Also, to avoid race, EINT must be cleared
1765 * before XHCI_IMAN_INTR_PEND is cleared.
1766 */
1767 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & ~XHCI_STS_RSVDP0);
1768
1769 #ifdef XHCI_DEBUG
1770 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1771 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1772 #endif
1773
1774 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1775 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1776 iman |= XHCI_IMAN_INTR_PEND;
1777 xhci_rt_write_4(sc, XHCI_IMAN(0), iman);
1778
1779 #ifdef XHCI_DEBUG
1780 iman = xhci_rt_read_4(sc, XHCI_IMAN(0));
1781 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0);
1782 usbsts = xhci_op_read_4(sc, XHCI_USBSTS);
1783 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0);
1784 #endif
1785
1786 return 1;
1787 }
1788
1789 /*
1790 * 3 port speed types used in USB stack
1791 *
1792 * usbdi speed
1793 * definition: USB_SPEED_* in usb.h
1794 * They are used in struct usbd_device in USB stack.
1795 * ioctl interface uses these values too.
1796 * port_status speed
1797 * definition: UPS_*_SPEED in usb.h
1798 * They are used in usb_port_status_t and valid only for USB 2.0.
1799 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus
1800 * of usb_port_status_ext_t indicates port speed.
1801 * Note that some 3.0 values overlap with 2.0 values.
1802 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and
1803 * means UPS_LOW_SPEED in HS.)
1804 * port status returned from hub also uses these values.
1805 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed
1806 * or more.
1807 * xspeed:
1808 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1)
1809 * They are used in only slot context and PORTSC reg of xhci.
1810 * The difference between usbdi speed and xspeed is
1811 * that FS and LS values are swapped.
1812 */
1813
1814 /* convert usbdi speed to xspeed */
1815 static int
xhci_speed2xspeed(int speed)1816 xhci_speed2xspeed(int speed)
1817 {
1818 switch (speed) {
1819 case USB_SPEED_LOW: return 2;
1820 case USB_SPEED_FULL: return 1;
1821 default: return speed;
1822 }
1823 }
1824
1825 #if 0
1826 /* convert xspeed to usbdi speed */
1827 static int
1828 xhci_xspeed2speed(int xspeed)
1829 {
1830 switch (xspeed) {
1831 case 1: return USB_SPEED_FULL;
1832 case 2: return USB_SPEED_LOW;
1833 default: return xspeed;
1834 }
1835 }
1836 #endif
1837
1838 /* convert xspeed to port status speed */
1839 static int
xhci_xspeed2psspeed(int xspeed)1840 xhci_xspeed2psspeed(int xspeed)
1841 {
1842 switch (xspeed) {
1843 case 0: return 0;
1844 case 1: return UPS_FULL_SPEED;
1845 case 2: return UPS_LOW_SPEED;
1846 case 3: return UPS_HIGH_SPEED;
1847 default: return UPS_OTHER_SPEED;
1848 }
1849 }
1850
1851 /*
1852 * Construct input contexts and issue TRB to open pipe.
1853 */
1854 static usbd_status
xhci_configure_endpoint(struct usbd_pipe * pipe)1855 xhci_configure_endpoint(struct usbd_pipe *pipe)
1856 {
1857 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1858 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1859 #ifdef USB_DEBUG
1860 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1861 #endif
1862 struct xhci_soft_trb trb;
1863 usbd_status err;
1864
1865 XHCIHIST_FUNC();
1866 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx",
1867 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress,
1868 pipe->up_endpoint->ue_edesc->bmAttributes);
1869
1870 /* XXX ensure input context is available? */
1871
1872 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz);
1873
1874 /* set up context */
1875 xhci_setup_ctx(pipe);
1876
1877 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0),
1878 sc->sc_ctxsz * 1);
1879 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs,
1880 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1);
1881
1882 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
1883 trb.trb_2 = 0;
1884 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1885 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
1886
1887 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
1888
1889 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
1890 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci),
1891 sc->sc_ctxsz * 1);
1892
1893 return err;
1894 }
1895
1896 #if 0
1897 static usbd_status
1898 xhci_unconfigure_endpoint(struct usbd_pipe *pipe)
1899 {
1900 #ifdef USB_DEBUG
1901 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1902 #endif
1903
1904 XHCIHIST_FUNC();
1905 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
1906
1907 return USBD_NORMAL_COMPLETION;
1908 }
1909 #endif
1910
1911 /* 4.6.8, 6.4.3.7 */
1912 static void
xhci_reset_endpoint(struct usbd_pipe * pipe)1913 xhci_reset_endpoint(struct usbd_pipe *pipe)
1914 {
1915 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1916 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1917 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1918 struct xhci_soft_trb trb;
1919
1920 XHCIHIST_FUNC();
1921 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1922
1923 KASSERT(mutex_owned(&sc->sc_lock));
1924
1925 trb.trb_0 = 0;
1926 trb.trb_2 = 0;
1927 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1928 XHCI_TRB_3_EP_SET(dci) |
1929 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP);
1930
1931 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
1932 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
1933 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
1934 }
1935 }
1936
1937 /*
1938 * 4.6.9, 6.4.3.8
1939 * Stop execution of TDs on xfer ring.
1940 * Should be called with sc_lock held.
1941 */
1942 static usbd_status
xhci_stop_endpoint_cmd(struct xhci_softc * sc,struct xhci_slot * xs,u_int dci,uint32_t trb3flags)1943 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci,
1944 uint32_t trb3flags)
1945 {
1946 struct xhci_soft_trb trb;
1947 usbd_status err;
1948
1949 XHCIHIST_FUNC();
1950 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1951
1952 KASSERT(mutex_owned(&sc->sc_lock));
1953
1954 trb.trb_0 = 0;
1955 trb.trb_2 = 0;
1956 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
1957 XHCI_TRB_3_EP_SET(dci) |
1958 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) |
1959 trb3flags;
1960
1961 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
1962
1963 return err;
1964 }
1965
1966 static usbd_status
xhci_stop_endpoint(struct usbd_pipe * pipe)1967 xhci_stop_endpoint(struct usbd_pipe *pipe)
1968 {
1969 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1970 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1971 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1972
1973 XHCIHIST_FUNC();
1974 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
1975
1976 KASSERT(mutex_owned(&sc->sc_lock));
1977
1978 return xhci_stop_endpoint_cmd(sc, xs, dci, 0);
1979 }
1980
1981 /*
1982 * Set TR Dequeue Pointer.
1983 * xHCI 1.1 4.6.10 6.4.3.9
1984 * Purge all of the TRBs on ring and reinitialize ring.
1985 * Set TR dequeue Pointer to 0 and Cycle State to 1.
1986 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE
1987 * error will be generated.
1988 */
1989 static void
xhci_set_dequeue(struct usbd_pipe * pipe)1990 xhci_set_dequeue(struct usbd_pipe *pipe)
1991 {
1992 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
1993 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
1994 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
1995 struct xhci_ring * const xr = xs->xs_xr[dci];
1996 struct xhci_soft_trb trb;
1997
1998 XHCIHIST_FUNC();
1999 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0);
2000
2001 KASSERT(mutex_owned(&sc->sc_lock));
2002 KASSERT(xr != NULL);
2003
2004 xhci_host_dequeue(xr);
2005
2006 /* set DCS */
2007 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */
2008 trb.trb_2 = 0;
2009 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2010 XHCI_TRB_3_EP_SET(dci) |
2011 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE);
2012
2013 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) {
2014 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n",
2015 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress);
2016 }
2017 }
2018
2019 /*
2020 * Open new pipe: called from usbd_setup_pipe_flags.
2021 * Fills methods of pipe.
2022 * If pipe is not for ep0, calls configure_endpoint.
2023 */
2024 static usbd_status
xhci_open(struct usbd_pipe * pipe)2025 xhci_open(struct usbd_pipe *pipe)
2026 {
2027 struct usbd_device * const dev = pipe->up_dev;
2028 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
2029 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
2030 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2031 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2032 const u_int dci = xhci_ep_get_dci(ed);
2033 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
2034 usbd_status err;
2035
2036 XHCIHIST_FUNC();
2037 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr,
2038 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed);
2039 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx",
2040 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress,
2041 ed->bmAttributes);
2042 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize),
2043 ed->bInterval, 0, 0);
2044
2045 if (sc->sc_dying)
2046 return USBD_IOERROR;
2047
2048 /* Root Hub */
2049 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) {
2050 switch (ed->bEndpointAddress) {
2051 case USB_CONTROL_ENDPOINT:
2052 pipe->up_methods = &roothub_ctrl_methods;
2053 break;
2054 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
2055 pipe->up_methods = &xhci_root_intr_methods;
2056 break;
2057 default:
2058 pipe->up_methods = NULL;
2059 DPRINTFN(0, "bad bEndpointAddress 0x%02jx",
2060 ed->bEndpointAddress, 0, 0, 0);
2061 return USBD_INVAL;
2062 }
2063 return USBD_NORMAL_COMPLETION;
2064 }
2065
2066 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task,
2067 pipe, USB_TASKQ_MPSAFE);
2068
2069 switch (xfertype) {
2070 case UE_CONTROL:
2071 pipe->up_methods = &xhci_device_ctrl_methods;
2072 break;
2073 case UE_ISOCHRONOUS:
2074 pipe->up_methods = &xhci_device_isoc_methods;
2075 pipe->up_serialise = false;
2076 xpipe->xp_isoc_next = -1;
2077 break;
2078 case UE_BULK:
2079 pipe->up_methods = &xhci_device_bulk_methods;
2080 break;
2081 case UE_INTERRUPT:
2082 pipe->up_methods = &xhci_device_intr_methods;
2083 break;
2084 default:
2085 return USBD_IOERROR;
2086 break;
2087 }
2088
2089 KASSERT(xs != NULL);
2090 KASSERT(xs->xs_xr[dci] == NULL);
2091
2092 /* allocate transfer ring */
2093 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS,
2094 XHCI_TRB_ALIGN);
2095 if (err) {
2096 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0);
2097 return err;
2098 }
2099
2100 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT)
2101 return xhci_configure_endpoint(pipe);
2102
2103 return USBD_NORMAL_COMPLETION;
2104 }
2105
2106 /*
2107 * Closes pipe, called from usbd_kill_pipe via close methods.
2108 * If the endpoint to be closed is ep0, disable_slot.
2109 * Should be called with sc_lock held.
2110 */
2111 static void
xhci_close_pipe(struct usbd_pipe * pipe)2112 xhci_close_pipe(struct usbd_pipe *pipe)
2113 {
2114 struct xhci_pipe * const xp =
2115 container_of(pipe, struct xhci_pipe, xp_pipe);
2116 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2117 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2118 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
2119 const u_int dci = xhci_ep_get_dci(ed);
2120 struct xhci_soft_trb trb;
2121 uint32_t *cp;
2122
2123 XHCIHIST_FUNC();
2124
2125 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC,
2126 &sc->sc_lock);
2127
2128 if (sc->sc_dying)
2129 return;
2130
2131 /* xs is uninitialized before xhci_init_slot */
2132 if (xs == NULL || xs->xs_idx == 0)
2133 return;
2134
2135 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2136 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2137
2138 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
2139 KASSERT(mutex_owned(&sc->sc_lock));
2140
2141 if (pipe->up_dev->ud_depth == 0)
2142 return;
2143
2144 if (dci == XHCI_DCI_EP_CONTROL) {
2145 DPRINTFN(4, "closing ep0", 0, 0, 0, 0);
2146 /* This frees all rings */
2147 xhci_disable_slot(sc, xs->xs_idx);
2148 return;
2149 }
2150
2151 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED)
2152 (void)xhci_stop_endpoint(pipe);
2153
2154 /*
2155 * set appropriate bit to be dropped.
2156 * don't set DC bit to 1, otherwise all endpoints
2157 * would be deconfigured.
2158 */
2159 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
2160 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci));
2161 cp[1] = htole32(0);
2162
2163 /* XXX should be most significant one, not dci? */
2164 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
2165 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci));
2166
2167 /* configure ep context performs an implicit dequeue */
2168 xhci_host_dequeue(xs->xs_xr[dci]);
2169
2170 /* sync input contexts before they are read from memory */
2171 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
2172
2173 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
2174 trb.trb_2 = 0;
2175 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
2176 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP);
2177
2178 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
2179 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2180
2181 xhci_ring_free(sc, &xs->xs_xr[dci]);
2182 xs->xs_xr[dci] = NULL;
2183 }
2184
2185 /*
2186 * Abort transfer. Must be called with sc_lock held. Releases and
2187 * reacquires sc_lock to sleep until hardware acknowledges abort.
2188 */
2189 static void
xhci_abortx(struct usbd_xfer * xfer)2190 xhci_abortx(struct usbd_xfer *xfer)
2191 {
2192 XHCIHIST_FUNC();
2193 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
2194
2195 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx",
2196 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0);
2197
2198 KASSERT(mutex_owned(&sc->sc_lock));
2199 KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
2200 xfer->ux_status == USBD_TIMEOUT),
2201 "bad abort status: %d", xfer->ux_status);
2202
2203 xhci_pipe_restart(xfer->ux_pipe);
2204
2205 DPRINTFN(14, "end", 0, 0, 0, 0);
2206 }
2207
2208 static void
xhci_host_dequeue(struct xhci_ring * const xr)2209 xhci_host_dequeue(struct xhci_ring * const xr)
2210 {
2211 /* When dequeueing the controller, update our struct copy too */
2212 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE);
2213 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE,
2214 BUS_DMASYNC_PREWRITE);
2215 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies));
2216
2217 xr->xr_ep = 0;
2218 xr->xr_cs = 1;
2219 }
2220
2221 /*
2222 * Recover STALLed endpoint, or stop endpoint to abort a pipe.
2223 * xHCI 1.1 sect 4.10.2.1
2224 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove
2225 * all transfers on transfer ring.
2226 */
2227 static void
xhci_pipe_restart(struct usbd_pipe * pipe)2228 xhci_pipe_restart(struct usbd_pipe *pipe)
2229 {
2230 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2231 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2232 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2233
2234 XHCIHIST_FUNC();
2235 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju",
2236 (uintptr_t)pipe, xs->xs_idx, dci, 0);
2237
2238 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2239
2240 /*
2241 * - If the endpoint is halted, indicating a stall, reset it.
2242 * - If the endpoint is stopped, we're already good.
2243 * - Otherwise, someone wanted to abort the pipe, so stop the
2244 * endpoint.
2245 *
2246 * In any case, clear the ring.
2247 */
2248 switch (xhci_get_epstate(sc, xs, dci)) {
2249 case XHCI_EPSTATE_HALTED:
2250 xhci_reset_endpoint(pipe);
2251 break;
2252 case XHCI_EPSTATE_STOPPED:
2253 break;
2254 default:
2255 xhci_stop_endpoint(pipe);
2256 break;
2257 }
2258
2259 switch (xhci_get_epstate(sc, xs, dci)) {
2260 case XHCI_EPSTATE_STOPPED:
2261 break;
2262 case XHCI_EPSTATE_ERROR:
2263 device_printf(sc->sc_dev, "endpoint 0x%x error\n",
2264 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2265 break;
2266 default:
2267 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n",
2268 pipe->up_endpoint->ue_edesc->bEndpointAddress);
2269 }
2270
2271 xhci_set_dequeue(pipe);
2272
2273 DPRINTFN(4, "ends", 0, 0, 0, 0);
2274 }
2275
2276 static void
xhci_pipe_restart_async_task(void * cookie)2277 xhci_pipe_restart_async_task(void *cookie)
2278 {
2279 struct usbd_pipe * const pipe = cookie;
2280 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2281 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2282 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2283 struct xhci_ring * const tr = xs->xs_xr[dci];
2284 struct usbd_xfer *xfer;
2285
2286 XHCIHIST_FUNC();
2287 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx",
2288 (uintptr_t)sc, (uintptr_t)pipe, 0, 0);
2289
2290 mutex_enter(&sc->sc_lock);
2291
2292 xhci_pipe_restart(pipe);
2293
2294 /*
2295 * We halted our own queue because it stalled. Mark it no
2296 * longer halted and start issuing queued transfers again.
2297 */
2298 tr->is_halted = false;
2299 xfer = SIMPLEQ_FIRST(&pipe->up_queue);
2300 if (xfer) {
2301 /*
2302 * If the first xfer of the queue is not in progress,
2303 * though, there may be a concurrent software abort
2304 * that has already cancelled it and is now in the
2305 * middle of a concurrent xhci_pipe_restart waiting to
2306 * reacquire the pipe (bus) lock. So only restart the
2307 * xfer if it's still USBD_IN_PROGRESS.
2308 *
2309 * Either way, xfers on the queue can't be in
2310 * USBD_NOT_STARTED.
2311 */
2312 KASSERT(xfer->ux_status != USBD_NOT_STARTED);
2313 if (xfer->ux_status == USBD_IN_PROGRESS) {
2314 (*pipe->up_methods->upm_start)(xfer);
2315 } else {
2316 DPRINTF("pipe restart race xfer=%#jx status=%jd",
2317 (uintptr_t)xfer, xfer->ux_status, 0, 0);
2318 }
2319 }
2320
2321 mutex_exit(&sc->sc_lock);
2322 }
2323
2324 static void
xhci_pipe_restart_async(struct usbd_pipe * pipe)2325 xhci_pipe_restart_async(struct usbd_pipe *pipe)
2326 {
2327 struct xhci_pipe * const xp =
2328 container_of(pipe, struct xhci_pipe, xp_pipe);
2329 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
2330 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv;
2331 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc);
2332 struct xhci_ring * const tr = xs->xs_xr[dci];
2333
2334 XHCIHIST_FUNC();
2335 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0);
2336
2337 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2338
2339 tr->is_halted = true;
2340 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC);
2341
2342 DPRINTFN(4, "ends", 0, 0, 0, 0);
2343 }
2344
2345 /* Process roothub port status/change events and notify to uhub_intr. */
2346 static void
xhci_rhpsc(struct xhci_softc * const sc,u_int ctlrport)2347 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport)
2348 {
2349 XHCIHIST_FUNC();
2350 XHCIHIST_CALLARGS("xhci%jd: port %ju status change",
2351 device_unit(sc->sc_dev), ctlrport, 0, 0);
2352
2353 if (ctlrport > sc->sc_maxports)
2354 return;
2355
2356 const size_t bn = xhci_ctlrport2bus(sc, ctlrport);
2357 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport);
2358 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn];
2359
2360 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change",
2361 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer);
2362
2363 if (xfer == NULL)
2364 return;
2365 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
2366
2367 uint8_t *p = xfer->ux_buf;
2368 if (!xhci_polling_p(sc) || !sc->sc_intrxfer_deferred[bn])
2369 memset(p, 0, xfer->ux_length);
2370 p[rhp / NBBY] |= 1 << (rhp % NBBY);
2371 xfer->ux_actlen = xfer->ux_length;
2372 xfer->ux_status = USBD_NORMAL_COMPLETION;
2373 if (xhci_polling_p(sc))
2374 sc->sc_intrxfer_deferred[bn] = true;
2375 else
2376 usb_transfer_complete(xfer);
2377 }
2378
2379 /* Process Transfer Events */
2380 static void
xhci_event_transfer(struct xhci_softc * const sc,const struct xhci_trb * const trb)2381 xhci_event_transfer(struct xhci_softc * const sc,
2382 const struct xhci_trb * const trb)
2383 {
2384 uint64_t trb_0;
2385 uint32_t trb_2, trb_3;
2386 uint8_t trbcode;
2387 u_int slot, dci;
2388 struct xhci_slot *xs;
2389 struct xhci_ring *xr;
2390 struct xhci_xfer *xx;
2391 struct usbd_xfer *xfer;
2392 usbd_status err;
2393
2394 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2395
2396 trb_0 = le64toh(trb->trb_0);
2397 trb_2 = le32toh(trb->trb_2);
2398 trb_3 = le32toh(trb->trb_3);
2399 trbcode = XHCI_TRB_2_ERROR_GET(trb_2);
2400 slot = XHCI_TRB_3_SLOT_GET(trb_3);
2401 dci = XHCI_TRB_3_EP_GET(trb_3);
2402 xs = &sc->sc_slots[slot];
2403 xr = xs->xs_xr[dci];
2404
2405 /* sanity check */
2406 KASSERT(xr != NULL);
2407 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots,
2408 "invalid xs_idx %u slot %u", xs->xs_idx, slot);
2409
2410 int idx = 0;
2411 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2412 if (xhci_trb_get_idx(xr, trb_0, &idx)) {
2413 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0);
2414 return;
2415 }
2416 xx = xr->xr_cookies[idx];
2417
2418 /* clear cookie of consumed TRB */
2419 xr->xr_cookies[idx] = NULL;
2420
2421 /*
2422 * xx is NULL if pipe is opened but xfer is not started.
2423 * It happens when stopping idle pipe.
2424 */
2425 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) {
2426 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju",
2427 idx, (uintptr_t)xx, trbcode, dci);
2428 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0,
2429 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)),
2430 0, 0);
2431 return;
2432 }
2433 } else {
2434 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */
2435 xx = (void *)(uintptr_t)(trb_0 & ~0x3);
2436 }
2437 /* XXX this may not happen */
2438 if (xx == NULL) {
2439 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0);
2440 return;
2441 }
2442 xfer = &xx->xx_xfer;
2443 /* XXX this may happen when detaching */
2444 if (xfer == NULL) {
2445 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx",
2446 (uintptr_t)xx, trb_0, 0, 0);
2447 return;
2448 }
2449 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0);
2450 /* XXX I dunno why this happens */
2451 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer);
2452
2453 if (!xfer->ux_pipe->up_repeat &&
2454 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) {
2455 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer,
2456 0, 0, 0);
2457 return;
2458 }
2459
2460 const uint8_t xfertype =
2461 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes);
2462
2463 /* 4.11.5.2 Event Data TRB */
2464 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) {
2465 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx"
2466 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0);
2467 if ((trb_0 & 0x3) == 0x3) {
2468 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2);
2469 }
2470 }
2471
2472 switch (trbcode) {
2473 case XHCI_TRB_ERROR_SHORT_PKT:
2474 case XHCI_TRB_ERROR_SUCCESS:
2475 /*
2476 * A ctrl transfer can generate two events if it has a Data
2477 * stage. A short data stage can be OK and should not
2478 * complete the transfer as the status stage needs to be
2479 * performed.
2480 *
2481 * Note: Data and Status stage events point at same xfer.
2482 * ux_actlen and ux_dmabuf will be passed to
2483 * usb_transfer_complete after the Status stage event.
2484 *
2485 * It can be distinguished which stage generates the event:
2486 * + by checking least 3 bits of trb_0 if ED==1.
2487 * (see xhci_device_ctrl_start).
2488 * + by checking the type of original TRB if ED==0.
2489 *
2490 * In addition, intr, bulk, and isoc transfer currently
2491 * consists of single TD, so the "skip" is not needed.
2492 * ctrl xfer uses EVENT_DATA, and others do not.
2493 * Thus driver can switch the flow by checking ED bit.
2494 */
2495 if (xfertype == UE_ISOCHRONOUS) {
2496 xfer->ux_frlengths[xx->xx_isoc_done] -=
2497 XHCI_TRB_2_REM_GET(trb_2);
2498 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done];
2499 } else
2500 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) {
2501 if (xfer->ux_actlen == 0)
2502 xfer->ux_actlen = xfer->ux_length -
2503 XHCI_TRB_2_REM_GET(trb_2);
2504 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3))
2505 == XHCI_TRB_TYPE_DATA_STAGE) {
2506 return;
2507 }
2508 } else if ((trb_0 & 0x3) == 0x3) {
2509 return;
2510 }
2511 err = USBD_NORMAL_COMPLETION;
2512 break;
2513 case XHCI_TRB_ERROR_STOPPED:
2514 case XHCI_TRB_ERROR_LENGTH:
2515 case XHCI_TRB_ERROR_STOPPED_SHORT:
2516 err = USBD_IOERROR;
2517 break;
2518 case XHCI_TRB_ERROR_STALL:
2519 case XHCI_TRB_ERROR_BABBLE:
2520 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2521 xhci_pipe_restart_async(xfer->ux_pipe);
2522 err = USBD_STALLED;
2523 break;
2524 default:
2525 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0);
2526 err = USBD_IOERROR;
2527 break;
2528 }
2529
2530 if (xfertype == UE_ISOCHRONOUS) {
2531 switch (trbcode) {
2532 case XHCI_TRB_ERROR_SHORT_PKT:
2533 case XHCI_TRB_ERROR_SUCCESS:
2534 break;
2535 case XHCI_TRB_ERROR_MISSED_SERVICE:
2536 case XHCI_TRB_ERROR_RING_UNDERRUN:
2537 case XHCI_TRB_ERROR_RING_OVERRUN:
2538 default:
2539 xfer->ux_frlengths[xx->xx_isoc_done] = 0;
2540 break;
2541 }
2542 if (++xx->xx_isoc_done < xfer->ux_nframes)
2543 return;
2544 }
2545
2546 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 ||
2547 (trb_0 & 0x3) == 0x0) {
2548 /*
2549 * Try to claim this xfer for completion. If it has
2550 * already completed or aborted, drop it on the floor.
2551 */
2552 if (!usbd_xfer_trycomplete(xfer))
2553 return;
2554
2555 /* Set the status. */
2556 xfer->ux_status = err;
2557
2558 usb_transfer_complete(xfer);
2559 }
2560 }
2561
2562 /* Process Command complete events */
2563 static void
xhci_event_cmd(struct xhci_softc * const sc,const struct xhci_trb * const trb)2564 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb)
2565 {
2566 uint64_t trb_0;
2567 uint32_t trb_2, trb_3;
2568
2569 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2570
2571 KASSERT(mutex_owned(&sc->sc_lock));
2572
2573 trb_0 = le64toh(trb->trb_0);
2574 trb_2 = le32toh(trb->trb_2);
2575 trb_3 = le32toh(trb->trb_3);
2576
2577 if (trb_0 == sc->sc_command_addr) {
2578 sc->sc_resultpending = false;
2579
2580 sc->sc_result_trb.trb_0 = trb_0;
2581 sc->sc_result_trb.trb_2 = trb_2;
2582 sc->sc_result_trb.trb_3 = trb_3;
2583 if (XHCI_TRB_2_ERROR_GET(trb_2) !=
2584 XHCI_TRB_ERROR_SUCCESS) {
2585 DPRINTFN(1, "command completion "
2586 "failure: 0x%016jx 0x%08jx 0x%08jx",
2587 trb_0, trb_2, trb_3, 0);
2588 }
2589 cv_signal(&sc->sc_command_cv);
2590 } else {
2591 DPRINTFN(1, "spurious event: %#jx 0x%016jx "
2592 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3);
2593 }
2594 }
2595
2596 /*
2597 * Process events.
2598 * called from xhci_softintr
2599 */
2600 static void
xhci_handle_event(struct xhci_softc * const sc,const struct xhci_trb * const trb)2601 xhci_handle_event(struct xhci_softc * const sc,
2602 const struct xhci_trb * const trb)
2603 {
2604 uint64_t trb_0;
2605 uint32_t trb_2, trb_3;
2606
2607 XHCIHIST_FUNC();
2608
2609 trb_0 = le64toh(trb->trb_0);
2610 trb_2 = le32toh(trb->trb_2);
2611 trb_3 = le32toh(trb->trb_3);
2612
2613 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx",
2614 (uintptr_t)trb, trb_0, trb_2, trb_3);
2615
2616 /*
2617 * 4.11.3.1, 6.4.2.1
2618 * TRB Pointer is invalid for these completion codes.
2619 */
2620 switch (XHCI_TRB_2_ERROR_GET(trb_2)) {
2621 case XHCI_TRB_ERROR_RING_UNDERRUN:
2622 case XHCI_TRB_ERROR_RING_OVERRUN:
2623 case XHCI_TRB_ERROR_VF_RING_FULL:
2624 return;
2625 default:
2626 if (trb_0 == 0) {
2627 return;
2628 }
2629 break;
2630 }
2631
2632 switch (XHCI_TRB_3_TYPE_GET(trb_3)) {
2633 case XHCI_TRB_EVENT_TRANSFER:
2634 xhci_event_transfer(sc, trb);
2635 break;
2636 case XHCI_TRB_EVENT_CMD_COMPLETE:
2637 xhci_event_cmd(sc, trb);
2638 break;
2639 case XHCI_TRB_EVENT_PORT_STS_CHANGE:
2640 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff));
2641 break;
2642 default:
2643 break;
2644 }
2645 }
2646
2647 static void
xhci_softintr(void * v)2648 xhci_softintr(void *v)
2649 {
2650 struct usbd_bus * const bus = v;
2651 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2652 struct xhci_ring * const er = sc->sc_er;
2653 struct xhci_trb *trb;
2654 int i, j, k, bn;
2655
2656 XHCIHIST_FUNC();
2657
2658 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
2659
2660 i = er->xr_ep;
2661 j = er->xr_cs;
2662
2663 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0);
2664
2665 /*
2666 * Handle deferred root intr xfer, in case we just switched off
2667 * polling. It's not safe to complete root intr xfers while
2668 * polling -- too much kernel machinery gets involved.
2669 */
2670 if (!xhci_polling_p(sc)) {
2671 for (bn = 0; bn < 2; bn++) {
2672 if (__predict_false(sc->sc_intrxfer_deferred[bn])) {
2673 sc->sc_intrxfer_deferred[bn] = false;
2674 usb_transfer_complete(sc->sc_intrxfer[bn]);
2675 }
2676 }
2677 }
2678
2679 while (1) {
2680 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE,
2681 BUS_DMASYNC_POSTREAD);
2682 trb = &er->xr_trb[i];
2683 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0;
2684
2685 if (j != k)
2686 break;
2687
2688 xhci_handle_event(sc, trb);
2689
2690 i++;
2691 if (i == er->xr_ntrb) {
2692 i = 0;
2693 j ^= 1;
2694 }
2695 }
2696
2697 er->xr_ep = i;
2698 er->xr_cs = j;
2699
2700 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) |
2701 XHCI_ERDP_BUSY);
2702
2703 DPRINTFN(16, "ends", 0, 0, 0, 0);
2704
2705 return;
2706 }
2707
2708 static void
xhci_poll(struct usbd_bus * bus)2709 xhci_poll(struct usbd_bus *bus)
2710 {
2711 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2712
2713 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2714
2715 mutex_enter(&sc->sc_intr_lock);
2716 int ret = xhci_intr1(sc);
2717 if (ret) {
2718 xhci_softintr(bus);
2719 }
2720 mutex_exit(&sc->sc_intr_lock);
2721
2722 return;
2723 }
2724
2725 static struct usbd_xfer *
xhci_allocx(struct usbd_bus * bus,unsigned int nframes)2726 xhci_allocx(struct usbd_bus *bus, unsigned int nframes)
2727 {
2728 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2729 struct xhci_xfer *xx;
2730 u_int ntrbs;
2731
2732 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2733
2734 ntrbs = uimax(3, nframes);
2735 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs;
2736
2737 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
2738 if (xx != NULL) {
2739 memset(xx, 0, sizeof(*xx));
2740 if (ntrbs > 0) {
2741 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP);
2742 xx->xx_ntrb = ntrbs;
2743 }
2744 #ifdef DIAGNOSTIC
2745 xx->xx_xfer.ux_state = XFER_BUSY;
2746 #endif
2747 }
2748
2749 return &xx->xx_xfer;
2750 }
2751
2752 static void
xhci_freex(struct usbd_bus * bus,struct usbd_xfer * xfer)2753 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
2754 {
2755 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2756 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
2757
2758 XHCIHIST_FUNC(); XHCIHIST_CALLED();
2759
2760 #ifdef DIAGNOSTIC
2761 if (xfer->ux_state != XFER_BUSY &&
2762 xfer->ux_status != USBD_NOT_STARTED) {
2763 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx",
2764 (uintptr_t)xfer, xfer->ux_state, 0, 0);
2765 }
2766 xfer->ux_state = XFER_FREE;
2767 #endif
2768 if (xx->xx_ntrb > 0) {
2769 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb));
2770 xx->xx_trb = NULL;
2771 xx->xx_ntrb = 0;
2772 }
2773 pool_cache_put(sc->sc_xferpool, xx);
2774 }
2775
2776 static bool
xhci_dying(struct usbd_bus * bus)2777 xhci_dying(struct usbd_bus *bus)
2778 {
2779 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2780
2781 return sc->sc_dying;
2782 }
2783
2784 static void
xhci_get_lock(struct usbd_bus * bus,kmutex_t ** lock)2785 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
2786 {
2787 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2788
2789 *lock = &sc->sc_lock;
2790 }
2791
2792 extern uint32_t usb_cookie_no;
2793
2794 /*
2795 * xHCI 4.3
2796 * Called when uhub_explore finds a new device (via usbd_new_device).
2797 * Port initialization and speed detection (4.3.1) are already done in uhub.c.
2798 * This function does:
2799 * Allocate and construct dev structure of default endpoint (ep0).
2800 * Allocate and open pipe of ep0.
2801 * Enable slot and initialize slot context.
2802 * Set Address.
2803 * Read initial device descriptor.
2804 * Determine initial MaxPacketSize (mps) by speed.
2805 * Read full device descriptor.
2806 * Register this device.
2807 * Finally state of device transitions ADDRESSED.
2808 */
2809 static usbd_status
xhci_new_device(device_t parent,struct usbd_bus * bus,int depth,int speed,int port,struct usbd_port * up)2810 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth,
2811 int speed, int port, struct usbd_port *up)
2812 {
2813 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
2814 struct usbd_device *dev;
2815 usbd_status err;
2816 usb_device_descriptor_t *dd;
2817 struct xhci_slot *xs;
2818 uint32_t *cp;
2819
2820 XHCIHIST_FUNC();
2821 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx",
2822 port, depth, speed, (uintptr_t)up);
2823
2824 KASSERT(KERNEL_LOCKED_P());
2825
2826 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
2827 dev->ud_bus = bus;
2828 dev->ud_quirks = &usbd_no_quirk;
2829 dev->ud_addr = 0;
2830 dev->ud_ddesc.bMaxPacketSize = 0;
2831 dev->ud_depth = depth;
2832 dev->ud_powersrc = up;
2833 dev->ud_myhub = up->up_parent;
2834 dev->ud_speed = speed;
2835 dev->ud_langid = USBD_NOLANG;
2836 dev->ud_cookie.cookie = ++usb_cookie_no;
2837
2838 /* Set up default endpoint handle. */
2839 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc;
2840 /* doesn't matter, just don't let it uninitialized */
2841 dev->ud_ep0.ue_toggle = 0;
2842
2843 /* Set up default endpoint descriptor. */
2844 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE;
2845 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT;
2846 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
2847 dev->ud_ep0desc.bmAttributes = UE_CONTROL;
2848 dev->ud_ep0desc.bInterval = 0;
2849
2850 /* 4.3, 4.8.2.1 */
2851 switch (speed) {
2852 case USB_SPEED_SUPER:
2853 case USB_SPEED_SUPER_PLUS:
2854 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET);
2855 break;
2856 case USB_SPEED_FULL:
2857 /* XXX using 64 as initial mps of ep0 in FS */
2858 case USB_SPEED_HIGH:
2859 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET);
2860 break;
2861 case USB_SPEED_LOW:
2862 default:
2863 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET);
2864 break;
2865 }
2866
2867 up->up_dev = dev;
2868
2869 dd = &dev->ud_ddesc;
2870
2871 if (depth == 0 && port == 0) {
2872 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL);
2873 bus->ub_devices[USB_ROOTHUB_INDEX] = dev;
2874
2875 /* Establish the default pipe. */
2876 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2877 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2878 if (err) {
2879 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0);
2880 goto bad;
2881 }
2882 err = usbd_get_initial_ddesc(dev, dd);
2883 if (err) {
2884 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2885 goto bad;
2886 }
2887 } else {
2888 uint8_t slot = 0;
2889
2890 /* 4.3.2 */
2891 err = xhci_enable_slot(sc, &slot);
2892 if (err) {
2893 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0);
2894 goto bad;
2895 }
2896
2897 xs = &sc->sc_slots[slot];
2898 dev->ud_hcpriv = xs;
2899
2900 /* 4.3.3 initialize slot structure */
2901 err = xhci_init_slot(dev, slot);
2902 if (err) {
2903 DPRINTFN(1, "init slot %ju", err, 0, 0, 0);
2904 dev->ud_hcpriv = NULL;
2905 /*
2906 * We have to disable_slot here because
2907 * xs->xs_idx == 0 when xhci_init_slot fails,
2908 * in that case usbd_remove_dev won't work.
2909 */
2910 mutex_enter(&sc->sc_lock);
2911 xhci_disable_slot(sc, slot);
2912 mutex_exit(&sc->sc_lock);
2913 goto bad;
2914 }
2915
2916 /*
2917 * We have to establish the default pipe _after_ slot
2918 * structure has been prepared.
2919 */
2920 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0,
2921 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0);
2922 if (err) {
2923 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0,
2924 0);
2925 goto bad;
2926 }
2927
2928 /* 4.3.4 Address Assignment */
2929 err = xhci_set_address(dev, slot, false);
2930 if (err) {
2931 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0);
2932 goto bad;
2933 }
2934
2935 /* Allow device time to set new address */
2936 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE);
2937
2938 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
2939 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT);
2940 HEXDUMP("slot context", cp, sc->sc_ctxsz);
2941 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3]));
2942 DPRINTFN(4, "device address %ju", addr, 0, 0, 0);
2943 /*
2944 * XXX ensure we know when the hardware does something
2945 * we can't yet cope with
2946 */
2947 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr);
2948 dev->ud_addr = addr;
2949
2950 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL,
2951 "addr %d already allocated", dev->ud_addr);
2952 /*
2953 * The root hub is given its own slot
2954 */
2955 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev;
2956
2957 err = usbd_get_initial_ddesc(dev, dd);
2958 if (err) {
2959 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0);
2960 goto bad;
2961 }
2962
2963 /* 4.8.2.1 */
2964 if (USB_IS_SS(speed)) {
2965 if (dd->bMaxPacketSize != 9) {
2966 printf("%s: invalid mps 2^%u for SS ep0,"
2967 " using 512\n",
2968 device_xname(sc->sc_dev),
2969 dd->bMaxPacketSize);
2970 dd->bMaxPacketSize = 9;
2971 }
2972 USETW(dev->ud_ep0desc.wMaxPacketSize,
2973 (1 << dd->bMaxPacketSize));
2974 } else
2975 USETW(dev->ud_ep0desc.wMaxPacketSize,
2976 dd->bMaxPacketSize);
2977 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0);
2978 err = xhci_update_ep0_mps(sc, xs,
2979 UGETW(dev->ud_ep0desc.wMaxPacketSize));
2980 if (err) {
2981 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0);
2982 goto bad;
2983 }
2984 }
2985
2986 err = usbd_reload_device_desc(dev);
2987 if (err) {
2988 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0);
2989 goto bad;
2990 }
2991
2992 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,",
2993 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0);
2994 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,",
2995 dd->bDeviceClass, dd->bDeviceSubClass,
2996 dd->bDeviceProtocol, 0);
2997 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd",
2998 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations,
2999 dev->ud_speed);
3000
3001 usbd_get_device_strings(dev);
3002
3003 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev);
3004
3005 if (depth == 0 && port == 0) {
3006 usbd_attach_roothub(parent, dev);
3007 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0);
3008 return USBD_NORMAL_COMPLETION;
3009 }
3010
3011 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr);
3012 bad:
3013 if (err != USBD_NORMAL_COMPLETION) {
3014 if (depth == 0 && port == 0 && dev->ud_pipe0)
3015 usbd_kill_pipe(dev->ud_pipe0);
3016 usbd_remove_device(dev, up);
3017 }
3018
3019 return err;
3020 }
3021
3022 static usbd_status
xhci_ring_init(struct xhci_softc * const sc,struct xhci_ring ** xrp,size_t ntrb,size_t align)3023 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp,
3024 size_t ntrb, size_t align)
3025 {
3026 size_t size = ntrb * XHCI_TRB_SIZE;
3027 struct xhci_ring *xr;
3028
3029 XHCIHIST_FUNC();
3030 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx",
3031 (uintptr_t)*xrp, ntrb, align, 0);
3032
3033 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP);
3034 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0);
3035
3036 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align,
3037 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma);
3038 if (err) {
3039 kmem_free(xr, sizeof(struct xhci_ring));
3040 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0);
3041 return err;
3042 }
3043 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
3044 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP);
3045 xr->xr_trb = xhci_ring_trbv(xr, 0);
3046 xr->xr_ntrb = ntrb;
3047 xr->is_halted = false;
3048 xhci_host_dequeue(xr);
3049 *xrp = xr;
3050
3051 return USBD_NORMAL_COMPLETION;
3052 }
3053
3054 static void
xhci_ring_free(struct xhci_softc * const sc,struct xhci_ring ** const xr)3055 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr)
3056 {
3057 if (*xr == NULL)
3058 return;
3059
3060 usb_freemem(&(*xr)->xr_dma);
3061 mutex_destroy(&(*xr)->xr_lock);
3062 kmem_free((*xr)->xr_cookies,
3063 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb);
3064 kmem_free(*xr, sizeof(struct xhci_ring));
3065 *xr = NULL;
3066 }
3067
3068 static void
xhci_ring_put(struct xhci_softc * const sc,struct xhci_ring * const xr,void * cookie,struct xhci_soft_trb * const trbs,size_t ntrbs)3069 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr,
3070 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs)
3071 {
3072 size_t i;
3073 u_int ri;
3074 u_int cs;
3075 uint64_t parameter;
3076 uint32_t status;
3077 uint32_t control;
3078
3079 XHCIHIST_FUNC();
3080 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju",
3081 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0);
3082
3083 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u",
3084 ntrbs, xr->xr_ntrb);
3085 for (i = 0; i < ntrbs; i++) {
3086 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr,
3087 (uintptr_t)trbs, i, 0);
3088 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx",
3089 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0);
3090 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) !=
3091 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3);
3092 }
3093
3094 ri = xr->xr_ep;
3095 cs = xr->xr_cs;
3096
3097 /*
3098 * Although the xhci hardware can do scatter/gather dma from
3099 * arbitrary sized buffers, there is a non-obvious restriction
3100 * that a LINK trb is only allowed at the end of a burst of
3101 * transfers - which might be 16kB.
3102 * Arbitrary aligned LINK trb definitely fail on Ivy bridge.
3103 * The simple solution is not to allow a LINK trb in the middle
3104 * of anything - as here.
3105 * XXX: (dsl) There are xhci controllers out there (eg some made by
3106 * ASMedia) that seem to lock up if they process a LINK trb but
3107 * cannot process the linked-to trb yet.
3108 * The code should write the 'cycle' bit on the link trb AFTER
3109 * adding the other trb.
3110 */
3111 u_int firstep = xr->xr_ep;
3112 u_int firstcs = xr->xr_cs;
3113
3114 for (i = 0; i < ntrbs; ) {
3115 u_int oldri = ri;
3116 u_int oldcs = cs;
3117
3118 if (ri >= (xr->xr_ntrb - 1)) {
3119 /* Put Link TD at the end of ring */
3120 parameter = xhci_ring_trbp(xr, 0);
3121 status = 0;
3122 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) |
3123 XHCI_TRB_3_TC_BIT;
3124 xr->xr_cookies[ri] = NULL;
3125 xr->xr_ep = 0;
3126 xr->xr_cs ^= 1;
3127 ri = xr->xr_ep;
3128 cs = xr->xr_cs;
3129 } else {
3130 parameter = trbs[i].trb_0;
3131 status = trbs[i].trb_2;
3132 control = trbs[i].trb_3;
3133
3134 xr->xr_cookies[ri] = cookie;
3135 ri++;
3136 i++;
3137 }
3138 /*
3139 * If this is a first TRB, mark it invalid to prevent
3140 * xHC from running it immediately.
3141 */
3142 if (oldri == firstep) {
3143 if (oldcs) {
3144 control &= ~XHCI_TRB_3_CYCLE_BIT;
3145 } else {
3146 control |= XHCI_TRB_3_CYCLE_BIT;
3147 }
3148 } else {
3149 if (oldcs) {
3150 control |= XHCI_TRB_3_CYCLE_BIT;
3151 } else {
3152 control &= ~XHCI_TRB_3_CYCLE_BIT;
3153 }
3154 }
3155 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control);
3156 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri,
3157 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3158 }
3159
3160 /* Now invert cycle bit of first TRB */
3161 if (firstcs) {
3162 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT);
3163 } else {
3164 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT);
3165 }
3166 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep,
3167 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE);
3168
3169 xr->xr_ep = ri;
3170 xr->xr_cs = cs;
3171
3172 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep,
3173 xr->xr_cs, 0);
3174 }
3175
3176 static inline void
xhci_ring_put_xfer(struct xhci_softc * const sc,struct xhci_ring * const tr,struct xhci_xfer * xx,u_int ntrb)3177 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr,
3178 struct xhci_xfer *xx, u_int ntrb)
3179 {
3180 KASSERT(ntrb <= xx->xx_ntrb);
3181 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb);
3182 }
3183
3184 /*
3185 * Stop execution commands, purge all commands on command ring, and
3186 * rewind dequeue pointer.
3187 */
3188 static void
xhci_abort_command(struct xhci_softc * sc)3189 xhci_abort_command(struct xhci_softc *sc)
3190 {
3191 struct xhci_ring * const cr = sc->sc_cr;
3192 uint64_t crcr;
3193 int i;
3194
3195 XHCIHIST_FUNC();
3196 XHCIHIST_CALLARGS("command %#jx timeout, aborting",
3197 sc->sc_command_addr, 0, 0, 0);
3198
3199 mutex_enter(&cr->xr_lock);
3200
3201 /* 4.6.1.2 Aborting a Command */
3202 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3203 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA);
3204
3205 for (i = 0; i < 500; i++) {
3206 crcr = xhci_op_read_8(sc, XHCI_CRCR);
3207 if ((crcr & XHCI_CRCR_LO_CRR) == 0)
3208 break;
3209 usb_delay_ms(&sc->sc_bus, 1);
3210 }
3211 if ((crcr & XHCI_CRCR_LO_CRR) != 0) {
3212 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0);
3213 /* reset HC here? */
3214 }
3215
3216 /* reset command ring dequeue pointer */
3217 cr->xr_ep = 0;
3218 cr->xr_cs = 1;
3219 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs);
3220
3221 mutex_exit(&cr->xr_lock);
3222 }
3223
3224 /*
3225 * Put a command on command ring, ring bell, set timer, and cv_timedwait.
3226 * Command completion is notified by cv_signal from xhci_event_cmd()
3227 * (called from xhci_softint), or timed-out.
3228 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(),
3229 * then do_command examines it.
3230 */
3231 static usbd_status
xhci_do_command_locked(struct xhci_softc * const sc,struct xhci_soft_trb * const trb,int timeout)3232 xhci_do_command_locked(struct xhci_softc * const sc,
3233 struct xhci_soft_trb * const trb, int timeout)
3234 {
3235 struct xhci_ring * const cr = sc->sc_cr;
3236 usbd_status err;
3237
3238 XHCIHIST_FUNC();
3239 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx",
3240 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3241
3242 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx");
3243 KASSERT(mutex_owned(&sc->sc_lock));
3244
3245 while (sc->sc_command_addr != 0 ||
3246 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp))
3247 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock);
3248 if (sc->sc_suspendresume_failed)
3249 return USBD_IOERROR;
3250
3251 /*
3252 * If enqueue pointer points at last of ring, it's Link TRB,
3253 * command TRB will be stored in 0th TRB.
3254 */
3255 if (cr->xr_ep == cr->xr_ntrb - 1)
3256 sc->sc_command_addr = xhci_ring_trbp(cr, 0);
3257 else
3258 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep);
3259
3260 sc->sc_resultpending = true;
3261
3262 mutex_enter(&cr->xr_lock);
3263 xhci_ring_put(sc, cr, NULL, trb, 1);
3264 mutex_exit(&cr->xr_lock);
3265
3266 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0);
3267
3268 while (sc->sc_resultpending) {
3269 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock,
3270 MAX(1, mstohz(timeout))) == EWOULDBLOCK) {
3271 xhci_abort_command(sc);
3272 err = USBD_TIMEOUT;
3273 goto timedout;
3274 }
3275 }
3276
3277 trb->trb_0 = sc->sc_result_trb.trb_0;
3278 trb->trb_2 = sc->sc_result_trb.trb_2;
3279 trb->trb_3 = sc->sc_result_trb.trb_3;
3280
3281 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx",
3282 trb->trb_0, trb->trb_2, trb->trb_3, 0);
3283
3284 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) {
3285 case XHCI_TRB_ERROR_SUCCESS:
3286 err = USBD_NORMAL_COMPLETION;
3287 break;
3288 default:
3289 case 192 ... 223:
3290 DPRINTFN(5, "error %#jx",
3291 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0);
3292 err = USBD_IOERROR;
3293 break;
3294 case 224 ... 255:
3295 err = USBD_NORMAL_COMPLETION;
3296 break;
3297 }
3298
3299 timedout:
3300 sc->sc_resultpending = false;
3301 sc->sc_command_addr = 0;
3302 cv_broadcast(&sc->sc_cmdbusy_cv);
3303
3304 return err;
3305 }
3306
3307 static usbd_status
xhci_do_command(struct xhci_softc * const sc,struct xhci_soft_trb * const trb,int timeout)3308 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb,
3309 int timeout)
3310 {
3311
3312 mutex_enter(&sc->sc_lock);
3313 usbd_status ret = xhci_do_command_locked(sc, trb, timeout);
3314 mutex_exit(&sc->sc_lock);
3315
3316 return ret;
3317 }
3318
3319 static usbd_status
xhci_enable_slot(struct xhci_softc * const sc,uint8_t * const slotp)3320 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp)
3321 {
3322 struct xhci_soft_trb trb;
3323 usbd_status err;
3324
3325 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3326
3327 trb.trb_0 = 0;
3328 trb.trb_2 = 0;
3329 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT);
3330
3331 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3332 if (err != USBD_NORMAL_COMPLETION) {
3333 return err;
3334 }
3335
3336 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3);
3337
3338 return err;
3339 }
3340
3341 /*
3342 * xHCI 4.6.4
3343 * Deallocate ring and device/input context DMA buffers, and disable_slot.
3344 * All endpoints in the slot should be stopped.
3345 * Should be called with sc_lock held.
3346 */
3347 static usbd_status
xhci_disable_slot(struct xhci_softc * const sc,uint8_t slot)3348 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot)
3349 {
3350 struct xhci_soft_trb trb;
3351 struct xhci_slot *xs;
3352 usbd_status err;
3353
3354 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3355
3356 if (sc->sc_dying)
3357 return USBD_IOERROR;
3358
3359 trb.trb_0 = 0;
3360 trb.trb_2 = 0;
3361 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) |
3362 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT);
3363
3364 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT);
3365
3366 if (!err) {
3367 xs = &sc->sc_slots[slot];
3368 if (xs->xs_idx != 0) {
3369 xhci_free_slot(sc, xs);
3370 xhci_set_dcba(sc, 0, slot);
3371 memset(xs, 0, sizeof(*xs));
3372 }
3373 }
3374
3375 return err;
3376 }
3377
3378 /*
3379 * Set address of device and transition slot state from ENABLED to ADDRESSED
3380 * if Block Setaddress Request (BSR) is false.
3381 * If BSR==true, transition slot state from ENABLED to DEFAULT.
3382 * see xHCI 1.1 4.5.3, 3.3.4
3383 * Should be called without sc_lock held.
3384 */
3385 static usbd_status
xhci_address_device(struct xhci_softc * const sc,uint64_t icp,uint8_t slot_id,bool bsr)3386 xhci_address_device(struct xhci_softc * const sc,
3387 uint64_t icp, uint8_t slot_id, bool bsr)
3388 {
3389 struct xhci_soft_trb trb;
3390 usbd_status err;
3391
3392 XHCIHIST_FUNC();
3393 if (bsr) {
3394 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr",
3395 icp, slot_id, 0, 0);
3396 } else {
3397 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr",
3398 icp, slot_id, 0, 0);
3399 }
3400
3401 trb.trb_0 = icp;
3402 trb.trb_2 = 0;
3403 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) |
3404 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) |
3405 (bsr ? XHCI_TRB_3_BSR_BIT : 0);
3406
3407 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3408
3409 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS)
3410 err = USBD_NO_ADDR;
3411
3412 return err;
3413 }
3414
3415 static usbd_status
xhci_update_ep0_mps(struct xhci_softc * const sc,struct xhci_slot * const xs,u_int mps)3416 xhci_update_ep0_mps(struct xhci_softc * const sc,
3417 struct xhci_slot * const xs, u_int mps)
3418 {
3419 struct xhci_soft_trb trb;
3420 usbd_status err;
3421 uint32_t * cp;
3422
3423 XHCIHIST_FUNC();
3424 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0);
3425
3426 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3427 cp[0] = htole32(0);
3428 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL));
3429
3430 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL));
3431 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps));
3432
3433 /* sync input contexts before they are read from memory */
3434 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3435 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3436 sc->sc_ctxsz * 4);
3437
3438 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0);
3439 trb.trb_2 = 0;
3440 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) |
3441 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX);
3442
3443 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT);
3444 return err;
3445 }
3446
3447 static void
xhci_set_dcba(struct xhci_softc * const sc,uint64_t dcba,int si)3448 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si)
3449 {
3450 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0);
3451
3452 XHCIHIST_FUNC();
3453 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd",
3454 (uintptr_t)&dcbaa[si], dcba, si, 0);
3455
3456 dcbaa[si] = htole64(dcba);
3457 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t),
3458 BUS_DMASYNC_PREWRITE);
3459 }
3460
3461 /*
3462 * Allocate device and input context DMA buffer, and
3463 * TRB DMA buffer for each endpoint.
3464 */
3465 static usbd_status
xhci_init_slot(struct usbd_device * dev,uint32_t slot)3466 xhci_init_slot(struct usbd_device *dev, uint32_t slot)
3467 {
3468 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3469 struct xhci_slot *xs;
3470
3471 XHCIHIST_FUNC();
3472 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0);
3473
3474 xs = &sc->sc_slots[slot];
3475
3476 /* allocate contexts */
3477 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3478 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma);
3479 if (err) {
3480 DPRINTFN(1, "failed to allocmem output device context %jd",
3481 err, 0, 0, 0);
3482 return USBD_NOMEM;
3483 }
3484
3485 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz,
3486 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma);
3487 if (err) {
3488 DPRINTFN(1, "failed to allocmem input device context %jd",
3489 err, 0, 0, 0);
3490 goto bad1;
3491 }
3492
3493 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr));
3494 xs->xs_idx = slot;
3495
3496 return USBD_NORMAL_COMPLETION;
3497
3498 bad1:
3499 usb_freemem(&xs->xs_dc_dma);
3500 xs->xs_idx = 0;
3501 return USBD_NOMEM;
3502 }
3503
3504 static void
xhci_free_slot(struct xhci_softc * sc,struct xhci_slot * xs)3505 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs)
3506 {
3507 u_int dci;
3508
3509 XHCIHIST_FUNC();
3510 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0);
3511
3512 /* deallocate all allocated rings in the slot */
3513 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) {
3514 if (xs->xs_xr[dci] != NULL)
3515 xhci_ring_free(sc, &xs->xs_xr[dci]);
3516 }
3517 usb_freemem(&xs->xs_ic_dma);
3518 usb_freemem(&xs->xs_dc_dma);
3519 xs->xs_idx = 0;
3520 }
3521
3522 /*
3523 * Setup slot context, set Device Context Base Address, and issue
3524 * Set Address Device command.
3525 */
3526 static usbd_status
xhci_set_address(struct usbd_device * dev,uint32_t slot,bool bsr)3527 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr)
3528 {
3529 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus);
3530 struct xhci_slot *xs;
3531 usbd_status err;
3532
3533 XHCIHIST_FUNC();
3534 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0);
3535
3536 xs = &sc->sc_slots[slot];
3537
3538 xhci_setup_ctx(dev->ud_pipe0);
3539
3540 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0),
3541 sc->sc_ctxsz * 3);
3542
3543 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot);
3544
3545 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr);
3546
3547 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD);
3548 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0),
3549 sc->sc_ctxsz * 2);
3550
3551 return err;
3552 }
3553
3554 /*
3555 * 4.8.2, 6.2.3.2
3556 * construct slot/endpoint context parameters and do syncmem
3557 */
3558 static void
xhci_setup_ctx(struct usbd_pipe * pipe)3559 xhci_setup_ctx(struct usbd_pipe *pipe)
3560 {
3561 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3562 struct usbd_device *dev = pipe->up_dev;
3563 struct xhci_slot * const xs = dev->ud_hcpriv;
3564 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3565 const u_int dci = xhci_ep_get_dci(ed);
3566 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3567 uint32_t *cp;
3568 uint8_t speed = dev->ud_speed;
3569
3570 XHCIHIST_FUNC();
3571 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju",
3572 (uintptr_t)pipe, xs->xs_idx, dci, speed);
3573
3574 /* set up initial input control context */
3575 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL);
3576 cp[0] = htole32(0);
3577 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci));
3578 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT));
3579 cp[7] = htole32(0);
3580
3581 /* set up input slot context */
3582 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT));
3583 cp[0] =
3584 XHCI_SCTX_0_CTX_NUM_SET(dci) |
3585 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed));
3586 cp[1] = 0;
3587 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0);
3588 cp[3] = 0;
3589 xhci_setup_route(pipe, cp);
3590 xhci_setup_tthub(pipe, cp);
3591
3592 cp[0] = htole32(cp[0]);
3593 cp[1] = htole32(cp[1]);
3594 cp[2] = htole32(cp[2]);
3595 cp[3] = htole32(cp[3]);
3596
3597 /* set up input endpoint context */
3598 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci));
3599 cp[0] =
3600 XHCI_EPCTX_0_EPSTATE_SET(0) |
3601 XHCI_EPCTX_0_MULT_SET(0) |
3602 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) |
3603 XHCI_EPCTX_0_LSA_SET(0) |
3604 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0);
3605 cp[1] =
3606 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) |
3607 XHCI_EPCTX_1_HID_SET(0) |
3608 XHCI_EPCTX_1_MAXB_SET(0);
3609
3610 if (xfertype != UE_ISOCHRONOUS)
3611 cp[1] |= XHCI_EPCTX_1_CERR_SET(3);
3612
3613 xhci_setup_maxburst(pipe, cp);
3614
3615 DPRINTFN(4, "setting on dci %ju ival %ju mult %ju mps %#jx",
3616 dci, XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_0_MULT_GET(cp[0]),
3617 XHCI_EPCTX_1_MAXP_SIZE_GET(cp[1]));
3618 DPRINTFN(4, " maxburst %ju mep %#jx atl %#jx",
3619 XHCI_EPCTX_1_MAXB_GET(cp[1]),
3620 (XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_GET(cp[0]) << 16) +
3621 XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_GET(cp[4]),
3622 XHCI_EPCTX_4_AVG_TRB_LEN_GET(cp[4]), 0);
3623
3624 /* rewind TR dequeue pointer in xHC */
3625 /* can't use xhci_ep_get_dci() yet? */
3626 *(uint64_t *)(&cp[2]) = htole64(
3627 xhci_ring_trbp(xs->xs_xr[dci], 0) |
3628 XHCI_EPCTX_2_DCS_SET(1));
3629
3630 cp[0] = htole32(cp[0]);
3631 cp[1] = htole32(cp[1]);
3632 cp[4] = htole32(cp[4]);
3633
3634 /* rewind TR dequeue pointer in driver */
3635 struct xhci_ring *xr = xs->xs_xr[dci];
3636 mutex_enter(&xr->xr_lock);
3637 xhci_host_dequeue(xr);
3638 mutex_exit(&xr->xr_lock);
3639
3640 /* sync input contexts before they are read from memory */
3641 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE);
3642 }
3643
3644 /*
3645 * Setup route string and roothub port of given device for slot context
3646 */
3647 static void
xhci_setup_route(struct usbd_pipe * pipe,uint32_t * cp)3648 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp)
3649 {
3650 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3651 struct usbd_device *dev = pipe->up_dev;
3652 struct usbd_port *up = dev->ud_powersrc;
3653 struct usbd_device *hub;
3654 struct usbd_device *adev;
3655 uint8_t rhport = 0;
3656 uint32_t route = 0;
3657
3658 XHCIHIST_FUNC(); XHCIHIST_CALLED();
3659
3660 /* Locate root hub port and Determine route string */
3661 /* 4.3.3 route string does not include roothub port */
3662 for (hub = dev; hub != NULL; hub = hub->ud_myhub) {
3663 uint32_t dep;
3664
3665 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd",
3666 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc,
3667 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno :
3668 -1);
3669
3670 if (hub->ud_powersrc == NULL)
3671 break;
3672 dep = hub->ud_depth;
3673 if (dep == 0)
3674 break;
3675 rhport = hub->ud_powersrc->up_portno;
3676 if (dep > USB_HUB_MAX_DEPTH)
3677 continue;
3678
3679 route |=
3680 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport)
3681 << ((dep - 1) * 4);
3682 }
3683 route = route >> 4;
3684 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1;
3685
3686 /* Locate port on upstream high speed hub */
3687 for (adev = dev, hub = up->up_parent;
3688 hub != NULL && hub->ud_speed != USB_SPEED_HIGH;
3689 adev = hub, hub = hub->ud_myhub)
3690 ;
3691 if (hub) {
3692 int p;
3693 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) {
3694 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) {
3695 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1];
3696 goto found;
3697 }
3698 }
3699 panic("%s: cannot find HS port", __func__);
3700 found:
3701 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0);
3702 } else {
3703 dev->ud_myhsport = NULL;
3704 }
3705
3706 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport);
3707
3708 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport,
3709 ctlrport, route, (uintptr_t)hub);
3710
3711 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route);
3712 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport);
3713 }
3714
3715 /*
3716 * Setup whether device is hub, whether device uses MTT, and
3717 * TT informations if it uses MTT.
3718 */
3719 static void
xhci_setup_tthub(struct usbd_pipe * pipe,uint32_t * cp)3720 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp)
3721 {
3722 struct usbd_device *dev = pipe->up_dev;
3723 struct usbd_port *myhsport = dev->ud_myhsport;
3724 usb_device_descriptor_t * const dd = &dev->ud_ddesc;
3725 uint32_t speed = dev->ud_speed;
3726 uint8_t rhaddr = dev->ud_bus->ub_rhaddr;
3727 uint8_t tthubslot, ttportnum;
3728 bool ishub;
3729 bool usemtt;
3730
3731 XHCIHIST_FUNC();
3732
3733 /*
3734 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2
3735 * tthubslot:
3736 * This is the slot ID of parent HS hub
3737 * if LS/FS device is connected && connected through HS hub.
3738 * This is 0 if device is not LS/FS device ||
3739 * parent hub is not HS hub ||
3740 * attached to root hub.
3741 * ttportnum:
3742 * This is the downstream facing port of parent HS hub
3743 * if LS/FS device is connected.
3744 * This is 0 if device is not LS/FS device ||
3745 * parent hub is not HS hub ||
3746 * attached to root hub.
3747 */
3748 if (myhsport &&
3749 myhsport->up_parent->ud_addr != rhaddr &&
3750 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3751 ttportnum = myhsport->up_portno;
3752 tthubslot = myhsport->up_parent->ud_addr;
3753 } else {
3754 ttportnum = 0;
3755 tthubslot = 0;
3756 }
3757 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd",
3758 (uintptr_t)myhsport, ttportnum, tthubslot, 0);
3759
3760 /* ishub is valid after reading UDESC_DEVICE */
3761 ishub = (dd->bDeviceClass == UDCLASS_HUB);
3762
3763 /* dev->ud_hub is valid after reading UDESC_HUB */
3764 if (ishub && dev->ud_hub) {
3765 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc;
3766 uint8_t ttt =
3767 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK);
3768
3769 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts);
3770 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt);
3771 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0);
3772 }
3773
3774 #define IS_MTTHUB(dd) \
3775 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT)
3776
3777 /*
3778 * MTT flag is set if
3779 * 1. this is HS hub && MTTs are supported and enabled; or
3780 * 2. this is LS or FS device && there is a parent HS hub where MTTs
3781 * are supported and enabled.
3782 *
3783 * XXX enabled is not tested yet
3784 */
3785 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd))
3786 usemtt = true;
3787 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) &&
3788 myhsport &&
3789 myhsport->up_parent->ud_addr != rhaddr &&
3790 IS_MTTHUB(&myhsport->up_parent->ud_ddesc))
3791 usemtt = true;
3792 else
3793 usemtt = false;
3794 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd",
3795 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt);
3796
3797 #undef IS_MTTHUB
3798
3799 cp[0] |=
3800 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) |
3801 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0);
3802 cp[2] |=
3803 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) |
3804 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum);
3805 }
3806
3807 static const usb_endpoint_ss_comp_descriptor_t *
xhci_get_essc_desc(struct usbd_pipe * pipe)3808 xhci_get_essc_desc(struct usbd_pipe *pipe)
3809 {
3810 struct usbd_device *dev = pipe->up_dev;
3811 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3812 const usb_cdc_descriptor_t *cdcd;
3813 usbd_desc_iter_t iter;
3814 uint8_t ep;
3815
3816 /* config desc is NULL when opening ep0 */
3817 if (dev == NULL || dev->ud_cdesc == NULL)
3818 return NULL;
3819
3820 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev,
3821 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY);
3822 if (cdcd == NULL)
3823 return NULL;
3824
3825 usb_desc_iter_init(dev, &iter);
3826 iter.cur = (const void *)cdcd;
3827
3828 /* find endpoint_ss_comp desc for ep of this pipe */
3829 for (ep = 0;;) {
3830 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter);
3831 if (cdcd == NULL)
3832 break;
3833 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) {
3834 ep = ((const usb_endpoint_descriptor_t *)cdcd)->
3835 bEndpointAddress;
3836 if (UE_GET_ADDR(ep) ==
3837 UE_GET_ADDR(ed->bEndpointAddress)) {
3838 cdcd = (const usb_cdc_descriptor_t *)
3839 usb_desc_iter_next(&iter);
3840 break;
3841 }
3842 ep = 0;
3843 }
3844 }
3845 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
3846 return (const usb_endpoint_ss_comp_descriptor_t *)cdcd;
3847 }
3848 return NULL;
3849 }
3850
3851 /* set up params for periodic endpoint */
3852 static void
xhci_setup_maxburst(struct usbd_pipe * pipe,uint32_t * cp)3853 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp)
3854 {
3855 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe;
3856 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
3857 struct usbd_device * const dev = pipe->up_dev;
3858 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc;
3859 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
3860 uint16_t mps = UGETW(ed->wMaxPacketSize);
3861 uint8_t speed = dev->ud_speed;
3862 uint32_t maxb, mep, atl;
3863 uint8_t ival, mult;
3864
3865 const usb_endpoint_ss_comp_descriptor_t * esscd =
3866 xhci_get_essc_desc(pipe);
3867
3868 /* USB 2.0 9.6.6, xHCI 4.8.2.4, 6.2.3.2 - 6.2.3.8 */
3869 switch (xfertype) {
3870 case UE_ISOCHRONOUS:
3871 case UE_INTERRUPT:
3872 if (USB_IS_SS(speed)) {
3873 maxb = esscd ? esscd->bMaxBurst : UE_GET_TRANS(mps);
3874 mep = esscd ? UGETW(esscd->wBytesPerInterval) :
3875 UE_GET_SIZE(mps) * (maxb + 1);
3876 if (esscd && xfertype == UE_ISOCHRONOUS &&
3877 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) {
3878 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes);
3879 mult = (mult > 2) ? 2 : mult;
3880 } else
3881 mult = 0;
3882
3883 } else {
3884 switch (speed) {
3885 case USB_SPEED_HIGH:
3886 maxb = UE_GET_TRANS(mps);
3887 mep = UE_GET_SIZE(mps) * (maxb + 1);
3888 break;
3889 case USB_SPEED_FULL:
3890 maxb = 0;
3891 mep = UE_GET_SIZE(mps);
3892 break;
3893 default:
3894 maxb = 0;
3895 mep = 0;
3896 break;
3897 }
3898 mult = 0;
3899 }
3900 mps = UE_GET_SIZE(mps);
3901
3902 if (pipe->up_interval == USBD_DEFAULT_INTERVAL)
3903 ival = ed->bInterval;
3904 else
3905 ival = pipe->up_interval;
3906
3907 ival = xhci_bival2ival(ival, speed, xfertype);
3908 atl = mep;
3909 break;
3910 case UE_CONTROL:
3911 case UE_BULK:
3912 default:
3913 if (USB_IS_SS(speed)) {
3914 maxb = esscd ? esscd->bMaxBurst : 0;
3915 } else
3916 maxb = 0;
3917
3918 mps = UE_GET_SIZE(mps);
3919 mep = 0;
3920 mult = 0;
3921 ival = 0;
3922 if (xfertype == UE_CONTROL)
3923 atl = 8; /* 6.2.3 */
3924 else
3925 atl = mps;
3926 break;
3927 }
3928
3929 switch (speed) {
3930 case USB_SPEED_LOW:
3931 break;
3932 case USB_SPEED_FULL:
3933 if (xfertype == UE_INTERRUPT)
3934 if (mep > XHCI_EPCTX_MEP_FS_INTR)
3935 mep = XHCI_EPCTX_MEP_FS_INTR;
3936 if (xfertype == UE_ISOCHRONOUS)
3937 if (mep > XHCI_EPCTX_MEP_FS_ISOC)
3938 mep = XHCI_EPCTX_MEP_FS_ISOC;
3939 break;
3940 case USB_SPEED_HIGH:
3941 if (xfertype == UE_INTERRUPT)
3942 if (mep > XHCI_EPCTX_MEP_HS_INTR)
3943 mep = XHCI_EPCTX_MEP_HS_INTR;
3944 if (xfertype == UE_ISOCHRONOUS)
3945 if (mep > XHCI_EPCTX_MEP_HS_ISOC)
3946 mep = XHCI_EPCTX_MEP_HS_ISOC;
3947 break;
3948 case USB_SPEED_SUPER:
3949 case USB_SPEED_SUPER_PLUS:
3950 default:
3951 if (xfertype == UE_INTERRUPT)
3952 if (mep > XHCI_EPCTX_MEP_SS_INTR)
3953 mep = XHCI_EPCTX_MEP_SS_INTR;
3954 if (xfertype == UE_ISOCHRONOUS) {
3955 if (speed == USB_SPEED_SUPER ||
3956 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) {
3957 if (mep > XHCI_EPCTX_MEP_SS_ISOC)
3958 mep = XHCI_EPCTX_MEP_SS_ISOC;
3959 } else {
3960 if (mep > XHCI_EPCTX_MEP_SS_ISOC_LEC)
3961 mep = XHCI_EPCTX_MEP_SS_ISOC_LEC;
3962 }
3963 }
3964 break;
3965 }
3966
3967 xpipe->xp_maxb = maxb + 1;
3968 xpipe->xp_mult = mult + 1;
3969
3970 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(mep >> 16);
3971 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival);
3972 cp[0] |= XHCI_EPCTX_0_MULT_SET(mult);
3973 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps);
3974 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb);
3975 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(mep & 0xffff);
3976 cp[4] |= XHCI_EPCTX_4_AVG_TRB_LEN_SET(atl);
3977 }
3978
3979 /*
3980 * Convert usbdi bInterval value to xhci endpoint context interval value
3981 * for periodic pipe.
3982 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6
3983 */
3984 static uint32_t
xhci_bival2ival(uint32_t ival,uint32_t speed,uint32_t xfertype)3985 xhci_bival2ival(uint32_t ival, uint32_t speed, uint32_t xfertype)
3986 {
3987 if (xfertype != UE_INTERRUPT && xfertype != UE_ISOCHRONOUS)
3988 return 0;
3989
3990 if (xfertype == UE_INTERRUPT &&
3991 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) {
3992 u_int i;
3993
3994 /*
3995 * round ival down to "the nearest base 2 multiple of
3996 * bInterval * 8".
3997 * bInterval is at most 255 as its type is uByte.
3998 * 255(ms) = 2040(x 125us) < 2^11, so start with 10.
3999 */
4000 for (i = 10; i > 0; i--) {
4001 if ((ival * 8) >= (1 << i))
4002 break;
4003 }
4004 ival = i;
4005
4006 /* 3 - 10 */
4007 ival = (ival < 3) ? 3 : ival;
4008 } else if (speed == USB_SPEED_FULL) {
4009 /* FS isoc */
4010 ival += 3; /* 1ms -> 125us */
4011 ival--; /* Interval = bInterval-1 */
4012 /* 3 - 18 */
4013 ival = (ival > 18) ? 18 : ival;
4014 ival = (ival < 3) ? 3 : ival;
4015 } else {
4016 /* SS/HS intr/isoc */
4017 if (ival > 0)
4018 ival--; /* Interval = bInterval-1 */
4019 /* 0 - 15 */
4020 ival = (ival > 15) ? 15 : ival;
4021 }
4022
4023 return ival;
4024 }
4025
4026 /* ----- */
4027
4028 static void
xhci_noop(struct usbd_pipe * pipe)4029 xhci_noop(struct usbd_pipe *pipe)
4030 {
4031 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4032 }
4033
4034 /*
4035 * Process root hub request.
4036 */
4037 static int
xhci_roothub_ctrl_locked(struct usbd_bus * bus,usb_device_request_t * req,void * buf,int buflen)4038 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req,
4039 void *buf, int buflen)
4040 {
4041 struct xhci_softc * const sc = XHCI_BUS2SC(bus);
4042 usb_port_status_t ps;
4043 int l, totlen = 0;
4044 uint16_t len, value, index;
4045 int port, i;
4046 uint32_t v;
4047
4048 XHCIHIST_FUNC();
4049
4050 KASSERT(mutex_owned(&sc->sc_rhlock));
4051
4052 if (sc->sc_dying)
4053 return -1;
4054
4055 size_t bn = bus == &sc->sc_bus ? 0 : 1;
4056
4057 len = UGETW(req->wLength);
4058 value = UGETW(req->wValue);
4059 index = UGETW(req->wIndex);
4060
4061 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx",
4062 req->bmRequestType | (req->bRequest << 8), value, index, len);
4063
4064 #define C(x,y) ((x) | ((y) << 8))
4065 switch (C(req->bRequest, req->bmRequestType)) {
4066 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
4067 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0);
4068 if (len == 0)
4069 break;
4070 switch (value) {
4071 #define sd ((usb_string_descriptor_t *)buf)
4072 case C(2, UDESC_STRING):
4073 /* Product */
4074 totlen = usb_makestrdesc(sd, len, "xHCI root hub");
4075 break;
4076 #undef sd
4077 default:
4078 /* default from usbroothub */
4079 return buflen;
4080 }
4081 break;
4082
4083 /* Hub requests */
4084 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
4085 break;
4086 /* Clear Port Feature request */
4087 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): {
4088 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4089
4090 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd",
4091 index, value, bn, cp);
4092 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4093 return -1;
4094 }
4095 port = XHCI_PORTSC(cp);
4096 v = xhci_op_read_4(sc, port);
4097 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0);
4098 v &= ~XHCI_PS_CLEAR;
4099 switch (value) {
4100 case UHF_PORT_ENABLE:
4101 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED);
4102 break;
4103 case UHF_PORT_SUSPEND:
4104 return -1;
4105 case UHF_PORT_POWER:
4106 break;
4107 case UHF_PORT_TEST:
4108 case UHF_PORT_INDICATOR:
4109 return -1;
4110 case UHF_C_PORT_CONNECTION:
4111 xhci_op_write_4(sc, port, v | XHCI_PS_CSC);
4112 break;
4113 case UHF_C_PORT_ENABLE:
4114 case UHF_C_PORT_SUSPEND:
4115 case UHF_C_PORT_OVER_CURRENT:
4116 return -1;
4117 case UHF_C_BH_PORT_RESET:
4118 xhci_op_write_4(sc, port, v | XHCI_PS_WRC);
4119 break;
4120 case UHF_C_PORT_RESET:
4121 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4122 break;
4123 case UHF_C_PORT_LINK_STATE:
4124 xhci_op_write_4(sc, port, v | XHCI_PS_PLC);
4125 break;
4126 case UHF_C_PORT_CONFIG_ERROR:
4127 xhci_op_write_4(sc, port, v | XHCI_PS_CEC);
4128 break;
4129 default:
4130 return -1;
4131 }
4132 break;
4133 }
4134 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
4135 if (len == 0)
4136 break;
4137 if ((value & 0xff) != 0) {
4138 return -1;
4139 }
4140 usb_hub_descriptor_t hubd;
4141
4142 totlen = uimin(buflen, sizeof(hubd));
4143 memcpy(&hubd, buf, totlen);
4144 hubd.bNbrPorts = sc->sc_rhportcount[bn];
4145 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH);
4146 hubd.bPwrOn2PwrGood = 200;
4147 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) {
4148 /* XXX can't find out? */
4149 hubd.DeviceRemovable[i++] = 0;
4150 }
4151 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
4152 totlen = uimin(totlen, hubd.bDescLength);
4153 memcpy(buf, &hubd, totlen);
4154 break;
4155 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
4156 if (len != 4) {
4157 return -1;
4158 }
4159 memset(buf, 0, len); /* ? XXX */
4160 totlen = len;
4161 break;
4162 /* Get Port Status request */
4163 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): {
4164 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4165
4166 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju",
4167 bn, index, cp, 0);
4168 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4169 DPRINTFN(5, "bad get port status: index=%jd bn=%jd "
4170 "portcount=%jd",
4171 index, bn, sc->sc_rhportcount[bn], 0);
4172 return -1;
4173 }
4174 if (len != 4) {
4175 DPRINTFN(5, "bad get port status: len %jd != 4",
4176 len, 0, 0, 0);
4177 return -1;
4178 }
4179 v = xhci_op_read_4(sc, XHCI_PORTSC(cp));
4180 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0);
4181 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v));
4182 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS;
4183 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED;
4184 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR;
4185 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND;
4186 if (v & XHCI_PS_PR) i |= UPS_RESET;
4187 if (v & XHCI_PS_PP) {
4188 if (i & UPS_OTHER_SPEED)
4189 i |= UPS_PORT_POWER_SS;
4190 else
4191 i |= UPS_PORT_POWER;
4192 }
4193 if (i & UPS_OTHER_SPEED)
4194 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v));
4195 if (sc->sc_vendor_port_status)
4196 i = sc->sc_vendor_port_status(sc, v, i);
4197 USETW(ps.wPortStatus, i);
4198 i = 0;
4199 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS;
4200 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED;
4201 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR;
4202 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET;
4203 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET;
4204 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE;
4205 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR;
4206 USETW(ps.wPortChange, i);
4207 totlen = uimin(len, sizeof(ps));
4208 memcpy(buf, &ps, totlen);
4209 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx"
4210 " totlen %jd",
4211 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0);
4212 break;
4213 }
4214 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
4215 return -1;
4216 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE):
4217 break;
4218 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
4219 break;
4220 /* Set Port Feature request */
4221 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): {
4222 int optval = (index >> 8) & 0xff;
4223 index &= 0xff;
4224 if (index < 1 || index > sc->sc_rhportcount[bn]) {
4225 return -1;
4226 }
4227
4228 const size_t cp = xhci_rhport2ctlrport(sc, bn, index);
4229
4230 port = XHCI_PORTSC(cp);
4231 v = xhci_op_read_4(sc, port);
4232 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0);
4233 v &= ~XHCI_PS_CLEAR;
4234 switch (value) {
4235 case UHF_PORT_ENABLE:
4236 xhci_op_write_4(sc, port, v | XHCI_PS_PED);
4237 break;
4238 case UHF_PORT_SUSPEND:
4239 /* XXX suspend */
4240 break;
4241 case UHF_PORT_RESET:
4242 xhci_op_write_4(sc, port, v | XHCI_PS_PR);
4243 /* Wait for reset to complete. */
4244 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) {
4245 if (sc->sc_dying) {
4246 return -1;
4247 }
4248 v = xhci_op_read_4(sc, port);
4249 if ((v & XHCI_PS_PR) == 0) {
4250 break;
4251 }
4252 usb_delay_ms(&sc->sc_bus, 10);
4253 }
4254 break;
4255 case UHF_PORT_POWER:
4256 /* XXX power control */
4257 break;
4258 /* XXX more */
4259 case UHF_C_PORT_RESET:
4260 xhci_op_write_4(sc, port, v | XHCI_PS_PRC);
4261 break;
4262 case UHF_PORT_U1_TIMEOUT:
4263 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4264 return -1;
4265 }
4266 port = XHCI_PORTPMSC(cp);
4267 v = xhci_op_read_4(sc, port);
4268 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4269 index, cp, v, 0);
4270 v &= ~XHCI_PM3_U1TO_SET(0xff);
4271 v |= XHCI_PM3_U1TO_SET(optval);
4272 xhci_op_write_4(sc, port, v);
4273 break;
4274 case UHF_PORT_U2_TIMEOUT:
4275 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) {
4276 return -1;
4277 }
4278 port = XHCI_PORTPMSC(cp);
4279 v = xhci_op_read_4(sc, port);
4280 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx",
4281 index, cp, v, 0);
4282 v &= ~XHCI_PM3_U2TO_SET(0xff);
4283 v |= XHCI_PM3_U2TO_SET(optval);
4284 xhci_op_write_4(sc, port, v);
4285 break;
4286 default:
4287 return -1;
4288 }
4289 }
4290 break;
4291 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
4292 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
4293 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
4294 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
4295 break;
4296 default:
4297 /* default from usbroothub */
4298 return buflen;
4299 }
4300
4301 return totlen;
4302 }
4303
4304 static int
xhci_roothub_ctrl(struct usbd_bus * bus,usb_device_request_t * req,void * buf,int buflen)4305 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
4306 void *buf, int buflen)
4307 {
4308 struct xhci_softc *sc = XHCI_BUS2SC(bus);
4309 int actlen;
4310
4311 mutex_enter(&sc->sc_rhlock);
4312 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen);
4313 mutex_exit(&sc->sc_rhlock);
4314
4315 return actlen;
4316 }
4317
4318 /* root hub interrupt */
4319
4320 static usbd_status
xhci_root_intr_transfer(struct usbd_xfer * xfer)4321 xhci_root_intr_transfer(struct usbd_xfer *xfer)
4322 {
4323 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4324
4325 /* Pipe isn't running, start first */
4326 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4327 }
4328
4329 /* Wait for roothub port status/change */
4330 static usbd_status
xhci_root_intr_start(struct usbd_xfer * xfer)4331 xhci_root_intr_start(struct usbd_xfer *xfer)
4332 {
4333 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4334 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4335
4336 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4337
4338 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4339
4340 if (sc->sc_dying)
4341 return USBD_IOERROR;
4342
4343 KASSERT(sc->sc_intrxfer[bn] == NULL);
4344 sc->sc_intrxfer[bn] = xfer;
4345 xfer->ux_status = USBD_IN_PROGRESS;
4346
4347 return USBD_IN_PROGRESS;
4348 }
4349
4350 static void
xhci_root_intr_abort(struct usbd_xfer * xfer)4351 xhci_root_intr_abort(struct usbd_xfer *xfer)
4352 {
4353 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4354 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4355
4356 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4357
4358 KASSERT(mutex_owned(&sc->sc_lock));
4359 KASSERT(xfer->ux_pipe->up_intrxfer == xfer);
4360
4361 /* If xfer has already completed, nothing to do here. */
4362 if (sc->sc_intrxfer[bn] == NULL)
4363 return;
4364
4365 /*
4366 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer.
4367 * Cancel it.
4368 */
4369 KASSERT(sc->sc_intrxfer[bn] == xfer);
4370 xfer->ux_status = USBD_CANCELLED;
4371 usb_transfer_complete(xfer);
4372 }
4373
4374 static void
xhci_root_intr_close(struct usbd_pipe * pipe)4375 xhci_root_intr_close(struct usbd_pipe *pipe)
4376 {
4377 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe);
4378 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer;
4379 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4380
4381 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4382
4383 KASSERT(mutex_owned(&sc->sc_lock));
4384
4385 /*
4386 * Caller must guarantee the xfer has completed first, by
4387 * closing the pipe only after normal completion or an abort.
4388 */
4389 KASSERT(sc->sc_intrxfer[bn] == NULL);
4390 }
4391
4392 static void
xhci_root_intr_done(struct usbd_xfer * xfer)4393 xhci_root_intr_done(struct usbd_xfer *xfer)
4394 {
4395 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4396 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1;
4397
4398 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4399
4400 KASSERT(mutex_owned(&sc->sc_lock));
4401
4402 /* Claim the xfer so it doesn't get completed again. */
4403 KASSERT(sc->sc_intrxfer[bn] == xfer);
4404 KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
4405 sc->sc_intrxfer[bn] = NULL;
4406 }
4407
4408 /* -------------- */
4409 /* device control */
4410
4411 static usbd_status
xhci_device_ctrl_transfer(struct usbd_xfer * xfer)4412 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
4413 {
4414 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4415
4416 /* Pipe isn't running, start first */
4417 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4418 }
4419
4420 static usbd_status
xhci_device_ctrl_start(struct usbd_xfer * xfer)4421 xhci_device_ctrl_start(struct usbd_xfer *xfer)
4422 {
4423 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4424 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4425 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4426 struct xhci_ring * const tr = xs->xs_xr[dci];
4427 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4428 usb_device_request_t * const req = &xfer->ux_request;
4429 const bool isread = usbd_xfer_isread(xfer);
4430 const uint32_t len = UGETW(req->wLength);
4431 usb_dma_t * const dma = &xfer->ux_dmabuf;
4432 uint64_t parameter;
4433 uint32_t status;
4434 uint32_t control;
4435 u_int i;
4436 const bool polling = xhci_polling_p(sc);
4437
4438 XHCIHIST_FUNC();
4439 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx",
4440 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue),
4441 UGETW(req->wIndex), UGETW(req->wLength));
4442
4443 KASSERT(polling || mutex_owned(&sc->sc_lock));
4444
4445 /* we rely on the bottom bits for extra info */
4446 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %p", xfer);
4447
4448 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0);
4449
4450 if (tr->is_halted)
4451 goto out;
4452
4453 i = 0;
4454
4455 /* setup phase */
4456 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */
4457 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req));
4458 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE :
4459 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) |
4460 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) |
4461 XHCI_TRB_3_IDT_BIT;
4462 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4463
4464 if (len != 0) {
4465 /* data phase */
4466 parameter = DMAADDR(dma, 0);
4467 KASSERTMSG(len <= 0x10000, "len %d", len);
4468 status = XHCI_TRB_2_IRQ_SET(0) |
4469 XHCI_TRB_2_TDSZ_SET(0) |
4470 XHCI_TRB_2_BYTES_SET(len);
4471 control = (isread ? XHCI_TRB_3_DIR_IN : 0) |
4472 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) |
4473 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4474 XHCI_TRB_3_IOC_BIT;
4475 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4476
4477 usb_syncmem(dma, 0, len,
4478 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4479 }
4480
4481 parameter = 0;
4482 status = XHCI_TRB_2_IRQ_SET(0);
4483 /* the status stage has inverted direction */
4484 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) |
4485 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) |
4486 XHCI_TRB_3_IOC_BIT;
4487 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4488
4489 if (!polling)
4490 mutex_enter(&tr->xr_lock);
4491 xhci_ring_put_xfer(sc, tr, xx, i);
4492 if (!polling)
4493 mutex_exit(&tr->xr_lock);
4494
4495 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4496
4497 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4498 usbd_xfer_schedule_timeout(xfer);
4499 xfer->ux_status = USBD_IN_PROGRESS;
4500 } else {
4501 /*
4502 * We must be coming from xhci_pipe_restart -- timeout
4503 * already set up, nothing to do.
4504 */
4505 }
4506 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4507
4508 return USBD_IN_PROGRESS;
4509 }
4510
4511 static void
xhci_device_ctrl_done(struct usbd_xfer * xfer)4512 xhci_device_ctrl_done(struct usbd_xfer *xfer)
4513 {
4514 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4515 usb_device_request_t *req = &xfer->ux_request;
4516 int len = UGETW(req->wLength);
4517 int rd = req->bmRequestType & UT_READ;
4518
4519 if (len)
4520 usb_syncmem(&xfer->ux_dmabuf, 0, len,
4521 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4522 }
4523
4524 static void
xhci_device_ctrl_abort(struct usbd_xfer * xfer)4525 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
4526 {
4527 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4528
4529 usbd_xfer_abort(xfer);
4530 }
4531
4532 static void
xhci_device_ctrl_close(struct usbd_pipe * pipe)4533 xhci_device_ctrl_close(struct usbd_pipe *pipe)
4534 {
4535 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4536
4537 xhci_close_pipe(pipe);
4538 }
4539
4540 /* ------------------ */
4541 /* device isochronous */
4542
4543 static usbd_status
xhci_device_isoc_transfer(struct usbd_xfer * xfer)4544 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
4545 {
4546 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4547
4548 return xhci_device_isoc_enter(xfer);
4549 }
4550
4551 static usbd_status
xhci_device_isoc_enter(struct usbd_xfer * xfer)4552 xhci_device_isoc_enter(struct usbd_xfer *xfer)
4553 {
4554 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4555 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4556 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4557 struct xhci_ring * const tr = xs->xs_xr[dci];
4558 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4559 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe;
4560 uint32_t len = xfer->ux_length;
4561 usb_dma_t * const dma = &xfer->ux_dmabuf;
4562 uint64_t parameter;
4563 uint32_t status;
4564 uint32_t control;
4565 uint32_t mfindex;
4566 uint32_t offs;
4567 int i, ival;
4568 const bool polling = xhci_polling_p(sc);
4569 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize);
4570 const uint16_t mps = UE_GET_SIZE(MPS);
4571 const uint8_t maxb = xpipe->xp_maxb;
4572 u_int tdpc, tbc, tlbpc;
4573
4574 XHCIHIST_FUNC();
4575 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4576 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4577
4578 KASSERT(polling || mutex_owned(&sc->sc_lock));
4579
4580 if (sc->sc_dying)
4581 return USBD_IOERROR;
4582
4583 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths);
4584 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4585
4586 const bool isread = usbd_xfer_isread(xfer);
4587 if (xfer->ux_length)
4588 usb_syncmem(dma, 0, xfer->ux_length,
4589 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4590
4591 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval;
4592 if (ival >= 1 && ival <= 16)
4593 ival = 1 << (ival - 1);
4594 else
4595 ival = 1; /* fake something up */
4596
4597 if (xpipe->xp_isoc_next == -1) {
4598 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX);
4599 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0);
4600 mfindex = XHCI_MFINDEX_GET(mfindex + 1);
4601 mfindex /= USB_UFRAMES_PER_FRAME;
4602 mfindex += 7; /* 7 frames is max possible IST */
4603 xpipe->xp_isoc_next = roundup2(mfindex, ival);
4604 }
4605
4606 offs = 0;
4607 for (i = 0; i < xfer->ux_nframes; i++) {
4608 len = xfer->ux_frlengths[i];
4609
4610 tdpc = howmany(len, mps);
4611 tbc = howmany(tdpc, maxb) - 1;
4612 tlbpc = tdpc % maxb;
4613 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1;
4614
4615 KASSERTMSG(len <= 0x10000, "len %d", len);
4616 parameter = DMAADDR(dma, offs);
4617 status = XHCI_TRB_2_IRQ_SET(0) |
4618 XHCI_TRB_2_TDSZ_SET(0) |
4619 XHCI_TRB_2_BYTES_SET(len);
4620 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) |
4621 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4622 XHCI_TRB_3_TBC_SET(tbc) |
4623 XHCI_TRB_3_TLBPC_SET(tlbpc) |
4624 XHCI_TRB_3_IOC_BIT;
4625 if (XHCI_HCC_CFC(sc->sc_hcc)) {
4626 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4627 #if 0
4628 } else if (xpipe->xp_isoc_next == -1) {
4629 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next);
4630 #endif
4631 } else {
4632 control |= XHCI_TRB_3_ISO_SIA_BIT;
4633 }
4634 #if 0
4635 if (i != xfer->ux_nframes - 1)
4636 control |= XHCI_TRB_3_BEI_BIT;
4637 #endif
4638 xhci_xfer_put_trb(xx, i, parameter, status, control);
4639
4640 xpipe->xp_isoc_next += ival;
4641 offs += len;
4642 }
4643
4644 xx->xx_isoc_done = 0;
4645
4646 if (!polling)
4647 mutex_enter(&tr->xr_lock);
4648 xhci_ring_put_xfer(sc, tr, xx, i);
4649 if (!polling)
4650 mutex_exit(&tr->xr_lock);
4651
4652 xfer->ux_status = USBD_IN_PROGRESS;
4653 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4654 usbd_xfer_schedule_timeout(xfer);
4655
4656 return USBD_IN_PROGRESS;
4657 }
4658
4659 static void
xhci_device_isoc_abort(struct usbd_xfer * xfer)4660 xhci_device_isoc_abort(struct usbd_xfer *xfer)
4661 {
4662 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4663
4664 usbd_xfer_abort(xfer);
4665 }
4666
4667 static void
xhci_device_isoc_close(struct usbd_pipe * pipe)4668 xhci_device_isoc_close(struct usbd_pipe *pipe)
4669 {
4670 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4671
4672 xhci_close_pipe(pipe);
4673 }
4674
4675 static void
xhci_device_isoc_done(struct usbd_xfer * xfer)4676 xhci_device_isoc_done(struct usbd_xfer *xfer)
4677 {
4678 #ifdef USB_DEBUG
4679 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4680 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4681 #endif
4682 const bool isread = usbd_xfer_isread(xfer);
4683
4684 XHCIHIST_FUNC();
4685 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4686 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4687
4688 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4689 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4690 }
4691
4692 /* ----------- */
4693 /* device bulk */
4694
4695 static usbd_status
xhci_device_bulk_transfer(struct usbd_xfer * xfer)4696 xhci_device_bulk_transfer(struct usbd_xfer *xfer)
4697 {
4698 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4699
4700 /* Pipe isn't running, so start it first. */
4701 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4702 }
4703
4704 static usbd_status
xhci_device_bulk_start(struct usbd_xfer * xfer)4705 xhci_device_bulk_start(struct usbd_xfer *xfer)
4706 {
4707 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4708 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4709 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4710 struct xhci_ring * const tr = xs->xs_xr[dci];
4711 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4712 const uint32_t len = xfer->ux_length;
4713 usb_dma_t * const dma = &xfer->ux_dmabuf;
4714 uint64_t parameter;
4715 uint32_t status;
4716 uint32_t control;
4717 u_int i = 0;
4718 const bool polling = xhci_polling_p(sc);
4719
4720 XHCIHIST_FUNC();
4721 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4722 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4723
4724 KASSERT(polling || mutex_owned(&sc->sc_lock));
4725
4726 if (sc->sc_dying)
4727 return USBD_IOERROR;
4728
4729 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4730
4731 if (tr->is_halted)
4732 goto out;
4733
4734 parameter = DMAADDR(dma, 0);
4735 const bool isread = usbd_xfer_isread(xfer);
4736 if (len)
4737 usb_syncmem(dma, 0, len,
4738 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4739
4740 /*
4741 * XXX: (dsl) The physical buffer must not cross a 64k boundary.
4742 * If the user supplied buffer crosses such a boundary then 2
4743 * (or more) TRB should be used.
4744 * If multiple TRB are used the td_size field must be set correctly.
4745 * For v1.0 devices (like ivy bridge) this is the number of usb data
4746 * blocks needed to complete the transfer.
4747 * Setting it to 1 in the last TRB causes an extra zero-length
4748 * data block be sent.
4749 * The earlier documentation differs, I don't know how it behaves.
4750 */
4751 KASSERTMSG(len <= 0x10000, "len %d", len);
4752 status = XHCI_TRB_2_IRQ_SET(0) |
4753 XHCI_TRB_2_TDSZ_SET(0) |
4754 XHCI_TRB_2_BYTES_SET(len);
4755 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4756 (isread ? XHCI_TRB_3_ISP_BIT : 0) |
4757 XHCI_TRB_3_IOC_BIT;
4758 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4759
4760 if (!polling)
4761 mutex_enter(&tr->xr_lock);
4762 xhci_ring_put_xfer(sc, tr, xx, i);
4763 if (!polling)
4764 mutex_exit(&tr->xr_lock);
4765
4766 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4767
4768 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4769 xfer->ux_status = USBD_IN_PROGRESS;
4770 usbd_xfer_schedule_timeout(xfer);
4771 } else {
4772 /*
4773 * We must be coming from xhci_pipe_restart -- timeout
4774 * already set up, nothing to do.
4775 */
4776 }
4777 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4778
4779 return USBD_IN_PROGRESS;
4780 }
4781
4782 static void
xhci_device_bulk_done(struct usbd_xfer * xfer)4783 xhci_device_bulk_done(struct usbd_xfer *xfer)
4784 {
4785 #ifdef USB_DEBUG
4786 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4787 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4788 #endif
4789 const bool isread = usbd_xfer_isread(xfer);
4790
4791 XHCIHIST_FUNC();
4792 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4793 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4794
4795 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4796 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4797 }
4798
4799 static void
xhci_device_bulk_abort(struct usbd_xfer * xfer)4800 xhci_device_bulk_abort(struct usbd_xfer *xfer)
4801 {
4802 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4803
4804 usbd_xfer_abort(xfer);
4805 }
4806
4807 static void
xhci_device_bulk_close(struct usbd_pipe * pipe)4808 xhci_device_bulk_close(struct usbd_pipe *pipe)
4809 {
4810 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4811
4812 xhci_close_pipe(pipe);
4813 }
4814
4815 /* ---------------- */
4816 /* device interrupt */
4817
4818 static usbd_status
xhci_device_intr_transfer(struct usbd_xfer * xfer)4819 xhci_device_intr_transfer(struct usbd_xfer *xfer)
4820 {
4821 XHCIHIST_FUNC(); XHCIHIST_CALLED();
4822
4823 /* Pipe isn't running, so start it first. */
4824 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
4825 }
4826
4827 static usbd_status
xhci_device_intr_start(struct usbd_xfer * xfer)4828 xhci_device_intr_start(struct usbd_xfer *xfer)
4829 {
4830 struct xhci_softc * const sc = XHCI_XFER2SC(xfer);
4831 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4832 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4833 struct xhci_ring * const tr = xs->xs_xr[dci];
4834 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer);
4835 const uint32_t len = xfer->ux_length;
4836 const bool polling = xhci_polling_p(sc);
4837 usb_dma_t * const dma = &xfer->ux_dmabuf;
4838 uint64_t parameter;
4839 uint32_t status;
4840 uint32_t control;
4841 u_int i = 0;
4842
4843 XHCIHIST_FUNC();
4844 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4845 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4846
4847 KASSERT(polling || mutex_owned(&sc->sc_lock));
4848
4849 if (sc->sc_dying)
4850 return USBD_IOERROR;
4851
4852 if (tr->is_halted)
4853 goto out;
4854
4855 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0);
4856
4857 const bool isread = usbd_xfer_isread(xfer);
4858 if (len)
4859 usb_syncmem(dma, 0, len,
4860 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
4861
4862 parameter = DMAADDR(dma, 0);
4863 KASSERTMSG(len <= 0x10000, "len %d", len);
4864 status = XHCI_TRB_2_IRQ_SET(0) |
4865 XHCI_TRB_2_TDSZ_SET(0) |
4866 XHCI_TRB_2_BYTES_SET(len);
4867 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) |
4868 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT;
4869 xhci_xfer_put_trb(xx, i++, parameter, status, control);
4870
4871 if (!polling)
4872 mutex_enter(&tr->xr_lock);
4873 xhci_ring_put_xfer(sc, tr, xx, i);
4874 if (!polling)
4875 mutex_exit(&tr->xr_lock);
4876
4877 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci);
4878
4879 out: if (xfer->ux_status == USBD_NOT_STARTED) {
4880 xfer->ux_status = USBD_IN_PROGRESS;
4881 usbd_xfer_schedule_timeout(xfer);
4882 } else {
4883 /*
4884 * We must be coming from xhci_pipe_restart -- timeout
4885 * already set up, nothing to do.
4886 */
4887 }
4888 KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
4889
4890 return USBD_IN_PROGRESS;
4891 }
4892
4893 static void
xhci_device_intr_done(struct usbd_xfer * xfer)4894 xhci_device_intr_done(struct usbd_xfer *xfer)
4895 {
4896 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4897 #ifdef USB_DEBUG
4898 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv;
4899 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc);
4900 #endif
4901 const bool isread = usbd_xfer_isread(xfer);
4902
4903 XHCIHIST_FUNC();
4904 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju",
4905 (uintptr_t)xfer, xs->xs_idx, dci, 0);
4906
4907 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock));
4908
4909 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length,
4910 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
4911 }
4912
4913 static void
xhci_device_intr_abort(struct usbd_xfer * xfer)4914 xhci_device_intr_abort(struct usbd_xfer *xfer)
4915 {
4916 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer);
4917
4918 XHCIHIST_FUNC();
4919 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0);
4920
4921 KASSERT(mutex_owned(&sc->sc_lock));
4922 usbd_xfer_abort(xfer);
4923 }
4924
4925 static void
xhci_device_intr_close(struct usbd_pipe * pipe)4926 xhci_device_intr_close(struct usbd_pipe *pipe)
4927 {
4928 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe);
4929
4930 XHCIHIST_FUNC();
4931 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0);
4932
4933 xhci_close_pipe(pipe);
4934 }
4935