1 /*
2  * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  *
19  * You can also choose to distribute this program under the terms of
20  * the Unmodified Binary Distribution Licence (as given in the file
21  * COPYING.UBDL), provided that you have satisfied its requirements.
22  */
23 
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25 
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <unistd.h>
29 #include <string.h>
30 #include <strings.h>
31 #include <errno.h>
32 #include <byteswap.h>
33 #include <ipxe/malloc.h>
34 #include <ipxe/umalloc.h>
35 #include <ipxe/pci.h>
36 #include <ipxe/usb.h>
37 #include <ipxe/init.h>
38 #include <ipxe/profile.h>
39 #include "xhci.h"
40 
41 /** @file
42  *
43  * USB eXtensible Host Controller Interface (xHCI) driver
44  *
45  */
46 
47 /** Message transfer profiler */
48 static struct profiler xhci_message_profiler __profiler =
49 	{ .name = "xhci.message" };
50 
51 /** Stream transfer profiler */
52 static struct profiler xhci_stream_profiler __profiler =
53 	{ .name = "xhci.stream" };
54 
55 /** Event ring profiler */
56 static struct profiler xhci_event_profiler __profiler =
57 	{ .name = "xhci.event" };
58 
59 /** Transfer event profiler */
60 static struct profiler xhci_transfer_profiler __profiler =
61 	{ .name = "xhci.transfer" };
62 
63 /* Disambiguate the various error causes */
64 #define EIO_DATA							\
65 	__einfo_error ( EINFO_EIO_DATA )
66 #define EINFO_EIO_DATA							\
67 	__einfo_uniqify ( EINFO_EIO, ( 2 - 0 ),				\
68 			  "Data buffer error" )
69 #define EIO_BABBLE							\
70 	__einfo_error ( EINFO_EIO_BABBLE )
71 #define EINFO_EIO_BABBLE						\
72 	__einfo_uniqify ( EINFO_EIO, ( 3 - 0 ),				\
73 			  "Babble detected" )
74 #define EIO_USB								\
75 	__einfo_error ( EINFO_EIO_USB )
76 #define EINFO_EIO_USB							\
77 	__einfo_uniqify ( EINFO_EIO, ( 4 - 0 ),				\
78 			  "USB transaction error" )
79 #define EIO_TRB								\
80 	__einfo_error ( EINFO_EIO_TRB )
81 #define EINFO_EIO_TRB							\
82 	__einfo_uniqify ( EINFO_EIO, ( 5 - 0 ),				\
83 			  "TRB error" )
84 #define EIO_STALL							\
85 	__einfo_error ( EINFO_EIO_STALL )
86 #define EINFO_EIO_STALL							\
87 	__einfo_uniqify ( EINFO_EIO, ( 6 - 0 ),				\
88 			  "Stall error" )
89 #define EIO_RESOURCE							\
90 	__einfo_error ( EINFO_EIO_RESOURCE )
91 #define EINFO_EIO_RESOURCE						\
92 	__einfo_uniqify ( EINFO_EIO, ( 7 - 0 ),				\
93 			  "Resource error" )
94 #define EIO_BANDWIDTH							\
95 	__einfo_error ( EINFO_EIO_BANDWIDTH )
96 #define EINFO_EIO_BANDWIDTH						\
97 	__einfo_uniqify ( EINFO_EIO, ( 8 - 0 ),				\
98 			  "Bandwidth error" )
99 #define EIO_NO_SLOTS							\
100 	__einfo_error ( EINFO_EIO_NO_SLOTS )
101 #define EINFO_EIO_NO_SLOTS						\
102 	__einfo_uniqify ( EINFO_EIO, ( 9 - 0 ),				\
103 			  "No slots available" )
104 #define EIO_STREAM_TYPE							\
105 	__einfo_error ( EINFO_EIO_STREAM_TYPE )
106 #define EINFO_EIO_STREAM_TYPE						\
107 	__einfo_uniqify ( EINFO_EIO, ( 10 - 0 ),			\
108 			  "Invalid stream type" )
109 #define EIO_SLOT							\
110 	__einfo_error ( EINFO_EIO_SLOT )
111 #define EINFO_EIO_SLOT							\
112 	__einfo_uniqify ( EINFO_EIO, ( 11 - 0 ),			\
113 			  "Slot not enabled" )
114 #define EIO_ENDPOINT							\
115 	__einfo_error ( EINFO_EIO_ENDPOINT )
116 #define EINFO_EIO_ENDPOINT						\
117 	__einfo_uniqify ( EINFO_EIO, ( 12 - 0 ),			\
118 			  "Endpoint not enabled" )
119 #define EIO_SHORT							\
120 	__einfo_error ( EINFO_EIO_SHORT )
121 #define EINFO_EIO_SHORT							\
122 	__einfo_uniqify ( EINFO_EIO, ( 13 - 0 ),			\
123 			  "Short packet" )
124 #define EIO_UNDERRUN							\
125 	__einfo_error ( EINFO_EIO_UNDERRUN )
126 #define EINFO_EIO_UNDERRUN						\
127 	__einfo_uniqify ( EINFO_EIO, ( 14 - 0 ),			\
128 			  "Ring underrun" )
129 #define EIO_OVERRUN							\
130 	__einfo_error ( EINFO_EIO_OVERRUN )
131 #define EINFO_EIO_OVERRUN						\
132 	__einfo_uniqify ( EINFO_EIO, ( 15 - 0 ),			\
133 			  "Ring overrun" )
134 #define EIO_VF_RING_FULL						\
135 	__einfo_error ( EINFO_EIO_VF_RING_FULL )
136 #define EINFO_EIO_VF_RING_FULL						\
137 	__einfo_uniqify ( EINFO_EIO, ( 16 - 0 ),			\
138 			  "Virtual function event ring full" )
139 #define EIO_PARAMETER							\
140 	__einfo_error ( EINFO_EIO_PARAMETER )
141 #define EINFO_EIO_PARAMETER						\
142 	__einfo_uniqify ( EINFO_EIO, ( 17 - 0 ),			\
143 			  "Parameter error" )
144 #define EIO_BANDWIDTH_OVERRUN						\
145 	__einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
146 #define EINFO_EIO_BANDWIDTH_OVERRUN					\
147 	__einfo_uniqify ( EINFO_EIO, ( 18 - 0 ),			\
148 			  "Bandwidth overrun" )
149 #define EIO_CONTEXT							\
150 	__einfo_error ( EINFO_EIO_CONTEXT )
151 #define EINFO_EIO_CONTEXT						\
152 	__einfo_uniqify ( EINFO_EIO, ( 19 - 0 ),			\
153 			  "Context state error" )
154 #define EIO_NO_PING							\
155 	__einfo_error ( EINFO_EIO_NO_PING )
156 #define EINFO_EIO_NO_PING						\
157 	__einfo_uniqify ( EINFO_EIO, ( 20 - 0 ),			\
158 			  "No ping response" )
159 #define EIO_RING_FULL							\
160 	__einfo_error ( EINFO_EIO_RING_FULL )
161 #define EINFO_EIO_RING_FULL						\
162 	__einfo_uniqify ( EINFO_EIO, ( 21 - 0 ),			\
163 			  "Event ring full" )
164 #define EIO_INCOMPATIBLE						\
165 	__einfo_error ( EINFO_EIO_INCOMPATIBLE )
166 #define EINFO_EIO_INCOMPATIBLE						\
167 	__einfo_uniqify ( EINFO_EIO, ( 22 - 0 ),			\
168 			  "Incompatible device" )
169 #define EIO_MISSED							\
170 	__einfo_error ( EINFO_EIO_MISSED )
171 #define EINFO_EIO_MISSED						\
172 	__einfo_uniqify ( EINFO_EIO, ( 23 - 0 ),			\
173 			  "Missed service error" )
174 #define EIO_CMD_STOPPED							\
175 	__einfo_error ( EINFO_EIO_CMD_STOPPED )
176 #define EINFO_EIO_CMD_STOPPED						\
177 	__einfo_uniqify ( EINFO_EIO, ( 24 - 0 ),			\
178 			  "Command ring stopped" )
179 #define EIO_CMD_ABORTED							\
180 	__einfo_error ( EINFO_EIO_CMD_ABORTED )
181 #define EINFO_EIO_CMD_ABORTED						\
182 	__einfo_uniqify ( EINFO_EIO, ( 25 - 0 ),			\
183 			  "Command aborted" )
184 #define EIO_STOP							\
185 	__einfo_error ( EINFO_EIO_STOP )
186 #define EINFO_EIO_STOP							\
187 	__einfo_uniqify ( EINFO_EIO, ( 26 - 0 ),			\
188 			  "Stopped" )
189 #define EIO_STOP_LEN							\
190 	__einfo_error ( EINFO_EIO_STOP_LEN )
191 #define EINFO_EIO_STOP_LEN						\
192 	__einfo_uniqify ( EINFO_EIO, ( 27 - 0 ),			\
193 			  "Stopped - length invalid" )
194 #define EIO_STOP_SHORT							\
195 	__einfo_error ( EINFO_EIO_STOP_SHORT )
196 #define EINFO_EIO_STOP_SHORT						\
197 	__einfo_uniqify ( EINFO_EIO, ( 28 - 0 ),			\
198 			  "Stopped - short packet" )
199 #define EIO_LATENCY							\
200 	__einfo_error ( EINFO_EIO_LATENCY )
201 #define EINFO_EIO_LATENCY						\
202 	__einfo_uniqify ( EINFO_EIO, ( 29 - 0 ),			\
203 			  "Maximum exit latency too large" )
204 #define EIO_ISOCH							\
205 	__einfo_error ( EINFO_EIO_ISOCH )
206 #define EINFO_EIO_ISOCH							\
207 	__einfo_uniqify ( EINFO_EIO, ( 31 - 0 ),			\
208 			  "Isochronous buffer overrun" )
209 #define EPROTO_LOST							\
210 	__einfo_error ( EINFO_EPROTO_LOST )
211 #define EINFO_EPROTO_LOST						\
212 	__einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ),			\
213 			  "Event lost" )
214 #define EPROTO_UNDEFINED						\
215 	__einfo_error ( EINFO_EPROTO_UNDEFINED )
216 #define EINFO_EPROTO_UNDEFINED						\
217 	__einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ),			\
218 			  "Undefined error" )
219 #define EPROTO_STREAM_ID						\
220 	__einfo_error ( EINFO_EPROTO_STREAM_ID )
221 #define EINFO_EPROTO_STREAM_ID						\
222 	__einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ),			\
223 			  "Invalid stream ID" )
224 #define EPROTO_SECONDARY						\
225 	__einfo_error ( EINFO_EPROTO_SECONDARY )
226 #define EINFO_EPROTO_SECONDARY						\
227 	__einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ),			\
228 			  "Secondary bandwidth error" )
229 #define EPROTO_SPLIT							\
230 	__einfo_error ( EINFO_EPROTO_SPLIT )
231 #define EINFO_EPROTO_SPLIT						\
232 	__einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ),			\
233 			  "Split transaction error" )
234 #define ECODE(code)							\
235 	( ( (code) < 32 ) ?						\
236 	  EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE,	\
237 		  EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE,		\
238 		  EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE,		\
239 		  EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN,	\
240 		  EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER,		\
241 		  EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING,	\
242 		  EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED,		\
243 		  EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP,		\
244 		  EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY,		\
245 		  EIO_ISOCH ) :						\
246 	  ( (code) < 64 ) ?						\
247 	  EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST,		\
248 		  EPROTO_UNDEFINED, EPROTO_STREAM_ID,			\
249 		  EPROTO_SECONDARY, EPROTO_SPLIT ) :			\
250 	  EFAULT )
251 
252 /******************************************************************************
253  *
254  * Register access
255  *
256  ******************************************************************************
257  */
258 
259 /**
260  * Initialise device
261  *
262  * @v xhci		xHCI device
263  * @v regs		MMIO registers
264  */
xhci_init(struct xhci_device * xhci,void * regs)265 static void xhci_init ( struct xhci_device *xhci, void *regs ) {
266 	uint32_t hcsparams1;
267 	uint32_t hcsparams2;
268 	uint32_t hccparams1;
269 	uint32_t pagesize;
270 	size_t caplength;
271 	size_t rtsoff;
272 	size_t dboff;
273 
274 	/* Locate capability, operational, runtime, and doorbell registers */
275 	xhci->cap = regs;
276 	caplength = readb ( xhci->cap + XHCI_CAP_CAPLENGTH );
277 	rtsoff = readl ( xhci->cap + XHCI_CAP_RTSOFF );
278 	dboff = readl ( xhci->cap + XHCI_CAP_DBOFF );
279 	xhci->op = ( xhci->cap + caplength );
280 	xhci->run = ( xhci->cap + rtsoff );
281 	xhci->db = ( xhci->cap + dboff );
282 	DBGC2 ( xhci, "XHCI %s cap %08lx op %08lx run %08lx db %08lx\n",
283 		xhci->name, virt_to_phys ( xhci->cap ),
284 		virt_to_phys ( xhci->op ), virt_to_phys ( xhci->run ),
285 		virt_to_phys ( xhci->db ) );
286 
287 	/* Read structural parameters 1 */
288 	hcsparams1 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS1 );
289 	xhci->slots = XHCI_HCSPARAMS1_SLOTS ( hcsparams1 );
290 	xhci->intrs = XHCI_HCSPARAMS1_INTRS ( hcsparams1 );
291 	xhci->ports = XHCI_HCSPARAMS1_PORTS ( hcsparams1 );
292 	DBGC ( xhci, "XHCI %s has %d slots %d intrs %d ports\n",
293 	       xhci->name, xhci->slots, xhci->intrs, xhci->ports );
294 
295 	/* Read structural parameters 2 */
296 	hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 );
297 	xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
298 	DBGC2 ( xhci, "XHCI %s needs %d scratchpads\n",
299 		xhci->name, xhci->scratchpads );
300 
301 	/* Read capability parameters 1 */
302 	hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 );
303 	xhci->addr64 = XHCI_HCCPARAMS1_ADDR64 ( hccparams1 );
304 	xhci->csz_shift = XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1 );
305 	xhci->xecp = XHCI_HCCPARAMS1_XECP ( hccparams1 );
306 
307 	/* Read page size */
308 	pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
309 	xhci->pagesize = XHCI_PAGESIZE ( pagesize );
310 	assert ( xhci->pagesize != 0 );
311 	assert ( ( ( xhci->pagesize ) & ( xhci->pagesize - 1 ) ) == 0 );
312 	DBGC2 ( xhci, "XHCI %s page size %zd bytes\n",
313 		xhci->name, xhci->pagesize );
314 }
315 
316 /**
317  * Find extended capability
318  *
319  * @v xhci		xHCI device
320  * @v id		Capability ID
321  * @v offset		Offset to previous extended capability instance, or zero
322  * @ret offset		Offset to extended capability, or zero if not found
323  */
xhci_extended_capability(struct xhci_device * xhci,unsigned int id,unsigned int offset)324 static unsigned int xhci_extended_capability ( struct xhci_device *xhci,
325 					       unsigned int id,
326 					       unsigned int offset ) {
327 	uint32_t xecp;
328 	unsigned int next;
329 
330 	/* Locate the extended capability */
331 	while ( 1 ) {
332 
333 		/* Locate first or next capability as applicable */
334 		if ( offset ) {
335 			xecp = readl ( xhci->cap + offset );
336 			next = XHCI_XECP_NEXT ( xecp );
337 		} else {
338 			next = xhci->xecp;
339 		}
340 		if ( ! next )
341 			return 0;
342 		offset += next;
343 
344 		/* Check if this is the requested capability */
345 		xecp = readl ( xhci->cap + offset );
346 		if ( XHCI_XECP_ID ( xecp ) == id )
347 			return offset;
348 	}
349 }
350 
351 /**
352  * Write potentially 64-bit register
353  *
354  * @v xhci		xHCI device
355  * @v value		Value
356  * @v reg		Register address
357  * @ret rc		Return status code
358  */
359 static inline __attribute__ (( always_inline )) int
xhci_writeq(struct xhci_device * xhci,physaddr_t value,void * reg)360 xhci_writeq ( struct xhci_device *xhci, physaddr_t value, void *reg ) {
361 
362 	/* If this is a 32-bit build, then this can never fail
363 	 * (allowing the compiler to optimise out the error path).
364 	 */
365 	if ( sizeof ( value ) <= sizeof ( uint32_t ) ) {
366 		writel ( value, reg );
367 		writel ( 0, ( reg + sizeof ( uint32_t ) ) );
368 		return 0;
369 	}
370 
371 	/* If the device does not support 64-bit addresses and this
372 	 * address is outside the 32-bit address space, then fail.
373 	 */
374 	if ( ( value & ~0xffffffffULL ) && ! xhci->addr64 ) {
375 		DBGC ( xhci, "XHCI %s cannot access address %lx\n",
376 		       xhci->name, value );
377 		return -ENOTSUP;
378 	}
379 
380 	/* If this is a 64-bit build, then writeq() is available */
381 	writeq ( value, reg );
382 	return 0;
383 }
384 
385 /**
386  * Calculate buffer alignment
387  *
388  * @v len		Length
389  * @ret align		Buffer alignment
390  *
391  * Determine alignment required for a buffer which must be aligned to
392  * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
393  */
xhci_align(size_t len)394 static inline size_t xhci_align ( size_t len ) {
395 	size_t align;
396 
397 	/* Align to own length (rounded up to a power of two) */
398 	align = ( 1 << fls ( len - 1 ) );
399 
400 	/* Round up to XHCI_MIN_ALIGN if needed */
401 	if ( align < XHCI_MIN_ALIGN )
402 		align = XHCI_MIN_ALIGN;
403 
404 	return align;
405 }
406 
407 /**
408  * Calculate device context offset
409  *
410  * @v xhci		xHCI device
411  * @v ctx		Context index
412  */
xhci_device_context_offset(struct xhci_device * xhci,unsigned int ctx)413 static inline size_t xhci_device_context_offset ( struct xhci_device *xhci,
414 						  unsigned int ctx ) {
415 
416 	return ( XHCI_DCI ( ctx ) << xhci->csz_shift );
417 }
418 
419 /**
420  * Calculate input context offset
421  *
422  * @v xhci		xHCI device
423  * @v ctx		Context index
424  */
xhci_input_context_offset(struct xhci_device * xhci,unsigned int ctx)425 static inline size_t xhci_input_context_offset ( struct xhci_device *xhci,
426 						 unsigned int ctx ) {
427 
428 	return ( XHCI_ICI ( ctx ) << xhci->csz_shift );
429 }
430 
431 /******************************************************************************
432  *
433  * Diagnostics
434  *
435  ******************************************************************************
436  */
437 
438 /**
439  * Dump host controller registers
440  *
441  * @v xhci		xHCI device
442  */
xhci_dump(struct xhci_device * xhci)443 static inline void xhci_dump ( struct xhci_device *xhci ) {
444 	uint32_t usbcmd;
445 	uint32_t usbsts;
446 	uint32_t pagesize;
447 	uint32_t dnctrl;
448 	uint32_t config;
449 
450 	/* Do nothing unless debugging is enabled */
451 	if ( ! DBG_LOG )
452 		return;
453 
454 	/* Dump USBCMD */
455 	usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
456 	DBGC ( xhci, "XHCI %s USBCMD %08x%s%s\n", xhci->name, usbcmd,
457 	       ( ( usbcmd & XHCI_USBCMD_RUN ) ? " run" : "" ),
458 	       ( ( usbcmd & XHCI_USBCMD_HCRST ) ? " hcrst" : "" ) );
459 
460 	/* Dump USBSTS */
461 	usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
462 	DBGC ( xhci, "XHCI %s USBSTS %08x%s\n", xhci->name, usbsts,
463 	       ( ( usbsts & XHCI_USBSTS_HCH ) ? " hch" : "" ) );
464 
465 	/* Dump PAGESIZE */
466 	pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
467 	DBGC ( xhci, "XHCI %s PAGESIZE %08x\n", xhci->name, pagesize );
468 
469 	/* Dump DNCTRL */
470 	dnctrl = readl ( xhci->op + XHCI_OP_DNCTRL );
471 	DBGC ( xhci, "XHCI %s DNCTRL %08x\n", xhci->name, dnctrl );
472 
473 	/* Dump CONFIG */
474 	config = readl ( xhci->op + XHCI_OP_CONFIG );
475 	DBGC ( xhci, "XHCI %s CONFIG %08x\n", xhci->name, config );
476 }
477 
478 /**
479  * Dump port registers
480  *
481  * @v xhci		xHCI device
482  * @v port		Port number
483  */
xhci_dump_port(struct xhci_device * xhci,unsigned int port)484 static inline void xhci_dump_port ( struct xhci_device *xhci,
485 				    unsigned int port ) {
486 	uint32_t portsc;
487 	uint32_t portpmsc;
488 	uint32_t portli;
489 	uint32_t porthlpmc;
490 
491 	/* Do nothing unless debugging is enabled */
492 	if ( ! DBG_LOG )
493 		return;
494 
495 	/* Dump PORTSC */
496 	portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port ) );
497 	DBGC ( xhci, "XHCI %s-%d PORTSC %08x%s%s%s%s psiv=%d\n",
498 	       xhci->name, port, portsc,
499 	       ( ( portsc & XHCI_PORTSC_CCS ) ? " ccs" : "" ),
500 	       ( ( portsc & XHCI_PORTSC_PED ) ? " ped" : "" ),
501 	       ( ( portsc & XHCI_PORTSC_PR ) ? " pr" : "" ),
502 	       ( ( portsc & XHCI_PORTSC_PP ) ? " pp" : "" ),
503 	       XHCI_PORTSC_PSIV ( portsc ) );
504 
505 	/* Dump PORTPMSC */
506 	portpmsc = readl ( xhci->op + XHCI_OP_PORTPMSC ( port ) );
507 	DBGC ( xhci, "XHCI %s-%d PORTPMSC %08x\n", xhci->name, port, portpmsc );
508 
509 	/* Dump PORTLI */
510 	portli = readl ( xhci->op + XHCI_OP_PORTLI ( port ) );
511 	DBGC ( xhci, "XHCI %s-%d PORTLI %08x\n", xhci->name, port, portli );
512 
513 	/* Dump PORTHLPMC */
514 	porthlpmc = readl ( xhci->op + XHCI_OP_PORTHLPMC ( port ) );
515 	DBGC ( xhci, "XHCI %s-%d PORTHLPMC %08x\n",
516 	       xhci->name, port, porthlpmc );
517 }
518 
519 /******************************************************************************
520  *
521  * USB legacy support
522  *
523  ******************************************************************************
524  */
525 
526 /** Prevent the release of ownership back to BIOS */
527 static int xhci_legacy_prevent_release;
528 
529 /**
530  * Initialise USB legacy support
531  *
532  * @v xhci		xHCI device
533  */
xhci_legacy_init(struct xhci_device * xhci)534 static void xhci_legacy_init ( struct xhci_device *xhci ) {
535 	unsigned int legacy;
536 	uint8_t bios;
537 
538 	/* Locate USB legacy support capability (if present) */
539 	legacy = xhci_extended_capability ( xhci, XHCI_XECP_ID_LEGACY, 0 );
540 	if ( ! legacy ) {
541 		/* Not an error; capability may not be present */
542 		DBGC ( xhci, "XHCI %s has no USB legacy support capability\n",
543 		       xhci->name );
544 		return;
545 	}
546 
547 	/* Check if legacy USB support is enabled */
548 	bios = readb ( xhci->cap + legacy + XHCI_USBLEGSUP_BIOS );
549 	if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
550 		/* Not an error; already owned by OS */
551 		DBGC ( xhci, "XHCI %s USB legacy support already disabled\n",
552 		       xhci->name );
553 		return;
554 	}
555 
556 	/* Record presence of USB legacy support capability */
557 	xhci->legacy = legacy;
558 }
559 
560 /**
561  * Claim ownership from BIOS
562  *
563  * @v xhci		xHCI device
564  */
xhci_legacy_claim(struct xhci_device * xhci)565 static void xhci_legacy_claim ( struct xhci_device *xhci ) {
566 	uint32_t ctlsts;
567 	uint8_t bios;
568 	unsigned int i;
569 
570 	/* Do nothing unless legacy support capability is present */
571 	if ( ! xhci->legacy )
572 		return;
573 
574 	/* Claim ownership */
575 	writeb ( XHCI_USBLEGSUP_OS_OWNED,
576 		 xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
577 
578 	/* Wait for BIOS to release ownership */
579 	for ( i = 0 ; i < XHCI_USBLEGSUP_MAX_WAIT_MS ; i++ ) {
580 
581 		/* Check if BIOS has released ownership */
582 		bios = readb ( xhci->cap + xhci->legacy + XHCI_USBLEGSUP_BIOS );
583 		if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
584 			DBGC ( xhci, "XHCI %s claimed ownership from BIOS\n",
585 			       xhci->name );
586 			ctlsts = readl ( xhci->cap + xhci->legacy +
587 					 XHCI_USBLEGSUP_CTLSTS );
588 			if ( ctlsts ) {
589 				DBGC ( xhci, "XHCI %s warning: BIOS retained "
590 				       "SMIs: %08x\n", xhci->name, ctlsts );
591 			}
592 			return;
593 		}
594 
595 		/* Delay */
596 		mdelay ( 1 );
597 	}
598 
599 	/* BIOS did not release ownership.  Claim it forcibly by
600 	 * disabling all SMIs.
601 	 */
602 	DBGC ( xhci, "XHCI %s could not claim ownership from BIOS: forcibly "
603 	       "disabling SMIs\n", xhci->name );
604 	writel ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_CTLSTS );
605 }
606 
607 /**
608  * Release ownership back to BIOS
609  *
610  * @v xhci		xHCI device
611  */
xhci_legacy_release(struct xhci_device * xhci)612 static void xhci_legacy_release ( struct xhci_device *xhci ) {
613 
614 	/* Do nothing unless legacy support capability is present */
615 	if ( ! xhci->legacy )
616 		return;
617 
618 	/* Do nothing if releasing ownership is prevented */
619 	if ( xhci_legacy_prevent_release ) {
620 		DBGC ( xhci, "XHCI %s not releasing ownership to BIOS\n",
621 		       xhci->name );
622 		return;
623 	}
624 
625 	/* Release ownership */
626 	writeb ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
627 	DBGC ( xhci, "XHCI %s released ownership to BIOS\n", xhci->name );
628 }
629 
630 /******************************************************************************
631  *
632  * Supported protocols
633  *
634  ******************************************************************************
635  */
636 
637 /**
638  * Transcribe port speed (for debugging)
639  *
640  * @v psi		Protocol speed ID
641  * @ret speed		Transcribed speed
642  */
xhci_speed_name(uint32_t psi)643 static inline const char * xhci_speed_name ( uint32_t psi ) {
644 	static const char *exponents[4] = { "", "k", "M", "G" };
645 	static char buf[ 10 /* "xxxxxXbps" + NUL */ ];
646 	unsigned int mantissa;
647 	unsigned int exponent;
648 
649 	/* Extract mantissa and exponent */
650 	mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
651 	exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
652 
653 	/* Transcribe speed */
654 	snprintf ( buf, sizeof ( buf ), "%d%sbps",
655 		   mantissa, exponents[exponent] );
656 	return buf;
657 }
658 
659 /**
660  * Find supported protocol extended capability for a port
661  *
662  * @v xhci		xHCI device
663  * @v port		Port number
664  * @ret supported	Offset to extended capability, or zero if not found
665  */
xhci_supported_protocol(struct xhci_device * xhci,unsigned int port)666 static unsigned int xhci_supported_protocol ( struct xhci_device *xhci,
667 					      unsigned int port ) {
668 	unsigned int supported = 0;
669 	unsigned int offset;
670 	unsigned int count;
671 	uint32_t ports;
672 
673 	/* Iterate over all supported protocol structures */
674 	while ( ( supported = xhci_extended_capability ( xhci,
675 							 XHCI_XECP_ID_SUPPORTED,
676 							 supported ) ) ) {
677 
678 		/* Determine port range */
679 		ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
680 		offset = XHCI_SUPPORTED_PORTS_OFFSET ( ports );
681 		count = XHCI_SUPPORTED_PORTS_COUNT ( ports );
682 
683 		/* Check if port lies within this range */
684 		if ( ( port - offset ) < count )
685 			return supported;
686 	}
687 
688 	DBGC ( xhci, "XHCI %s-%d has no supported protocol\n",
689 	       xhci->name, port );
690 	return 0;
691 }
692 
693 /**
694  * Find port protocol
695  *
696  * @v xhci		xHCI device
697  * @v port		Port number
698  * @ret protocol	USB protocol, or zero if not found
699  */
xhci_port_protocol(struct xhci_device * xhci,unsigned int port)700 static unsigned int xhci_port_protocol ( struct xhci_device *xhci,
701 					 unsigned int port ) {
702 	unsigned int supported = xhci_supported_protocol ( xhci, port );
703 	union {
704 		uint32_t raw;
705 		char text[5];
706 	} name;
707 	unsigned int protocol;
708 	unsigned int type;
709 	unsigned int psic;
710 	unsigned int psiv;
711 	unsigned int i;
712 	uint32_t revision;
713 	uint32_t ports;
714 	uint32_t slot;
715 	uint32_t psi;
716 
717 	/* Fail if there is no supported protocol */
718 	if ( ! supported )
719 		return 0;
720 
721 	/* Determine protocol version */
722 	revision = readl ( xhci->cap + supported + XHCI_SUPPORTED_REVISION );
723 	protocol = XHCI_SUPPORTED_REVISION_VER ( revision );
724 
725 	/* Describe port protocol */
726 	if ( DBG_EXTRA ) {
727 		name.raw = cpu_to_le32 ( readl ( xhci->cap + supported +
728 						 XHCI_SUPPORTED_NAME ) );
729 		name.text[4] = '\0';
730 		slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
731 		type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
732 		DBGC2 ( xhci, "XHCI %s-%d %sv%04x type %d",
733 			xhci->name, port, name.text, protocol, type );
734 		ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
735 		psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
736 		if ( psic ) {
737 			DBGC2 ( xhci, " speeds" );
738 			for ( i = 0 ; i < psic ; i++ ) {
739 				psi = readl ( xhci->cap + supported +
740 					      XHCI_SUPPORTED_PSI ( i ) );
741 				psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
742 				DBGC2 ( xhci, " %d:%s", psiv,
743 					xhci_speed_name ( psi ) );
744 			}
745 		}
746 		if ( xhci->quirks & XHCI_BAD_PSIV )
747 			DBGC2 ( xhci, " (ignored)" );
748 		DBGC2 ( xhci, "\n" );
749 	}
750 
751 	return protocol;
752 }
753 
754 /**
755  * Find port slot type
756  *
757  * @v xhci		xHCI device
758  * @v port		Port number
759  * @ret type		Slot type, or negative error
760  */
xhci_port_slot_type(struct xhci_device * xhci,unsigned int port)761 static int xhci_port_slot_type ( struct xhci_device *xhci, unsigned int port ) {
762 	unsigned int supported = xhci_supported_protocol ( xhci, port );
763 	unsigned int type;
764 	uint32_t slot;
765 
766 	/* Fail if there is no supported protocol */
767 	if ( ! supported )
768 		return -ENOTSUP;
769 
770 	/* Get slot type */
771 	slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
772 	type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
773 
774 	return type;
775 }
776 
777 /**
778  * Find port speed
779  *
780  * @v xhci		xHCI device
781  * @v port		Port number
782  * @v psiv		Protocol speed ID value
783  * @ret speed		Port speed, or negative error
784  */
xhci_port_speed(struct xhci_device * xhci,unsigned int port,unsigned int psiv)785 static int xhci_port_speed ( struct xhci_device *xhci, unsigned int port,
786 			     unsigned int psiv ) {
787 	unsigned int supported = xhci_supported_protocol ( xhci, port );
788 	unsigned int psic;
789 	unsigned int mantissa;
790 	unsigned int exponent;
791 	unsigned int speed;
792 	unsigned int i;
793 	uint32_t ports;
794 	uint32_t psi;
795 
796 	/* Fail if there is no supported protocol */
797 	if ( ! supported )
798 		return -ENOTSUP;
799 
800 	/* Get protocol speed ID count */
801 	ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
802 	psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
803 
804 	/* Use protocol speed ID table unless device is known to be faulty */
805 	if ( ! ( xhci->quirks & XHCI_BAD_PSIV ) ) {
806 
807 		/* Iterate over PSI dwords looking for a match */
808 		for ( i = 0 ; i < psic ; i++ ) {
809 			psi = readl ( xhci->cap + supported +
810 				      XHCI_SUPPORTED_PSI ( i ) );
811 			if ( psiv == XHCI_SUPPORTED_PSI_VALUE ( psi ) ) {
812 				mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
813 				exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
814 				speed = USB_SPEED ( mantissa, exponent );
815 				return speed;
816 			}
817 		}
818 
819 		/* Record device as faulty if no match is found */
820 		if ( psic != 0 ) {
821 			DBGC ( xhci, "XHCI %s-%d spurious PSI value %d: "
822 			       "assuming PSI table is invalid\n",
823 			       xhci->name, port, psiv );
824 			xhci->quirks |= XHCI_BAD_PSIV;
825 		}
826 	}
827 
828 	/* Use the default mappings */
829 	switch ( psiv ) {
830 	case XHCI_SPEED_LOW :	return USB_SPEED_LOW;
831 	case XHCI_SPEED_FULL :	return USB_SPEED_FULL;
832 	case XHCI_SPEED_HIGH :	return USB_SPEED_HIGH;
833 	case XHCI_SPEED_SUPER :	return USB_SPEED_SUPER;
834 	default:
835 		DBGC ( xhci, "XHCI %s-%d unrecognised PSI value %d\n",
836 		       xhci->name, port, psiv );
837 		return -ENOTSUP;
838 	}
839 }
840 
841 /**
842  * Find protocol speed ID value
843  *
844  * @v xhci		xHCI device
845  * @v port		Port number
846  * @v speed		USB speed
847  * @ret psiv		Protocol speed ID value, or negative error
848  */
xhci_port_psiv(struct xhci_device * xhci,unsigned int port,unsigned int speed)849 static int xhci_port_psiv ( struct xhci_device *xhci, unsigned int port,
850 			    unsigned int speed ) {
851 	unsigned int supported = xhci_supported_protocol ( xhci, port );
852 	unsigned int psic;
853 	unsigned int mantissa;
854 	unsigned int exponent;
855 	unsigned int psiv;
856 	unsigned int i;
857 	uint32_t ports;
858 	uint32_t psi;
859 
860 	/* Fail if there is no supported protocol */
861 	if ( ! supported )
862 		return -ENOTSUP;
863 
864 	/* Get protocol speed ID count */
865 	ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
866 	psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
867 
868 	/* Use the default mappings if applicable */
869 	if ( ( psic == 0 ) || ( xhci->quirks & XHCI_BAD_PSIV ) ) {
870 		switch ( speed ) {
871 		case USB_SPEED_LOW :	return XHCI_SPEED_LOW;
872 		case USB_SPEED_FULL :	return XHCI_SPEED_FULL;
873 		case USB_SPEED_HIGH :	return XHCI_SPEED_HIGH;
874 		case USB_SPEED_SUPER :	return XHCI_SPEED_SUPER;
875 		default:
876 			DBGC ( xhci, "XHCI %s-%d non-standard speed %d\n",
877 			       xhci->name, port, speed );
878 			return -ENOTSUP;
879 		}
880 	}
881 
882 	/* Iterate over PSI dwords looking for a match */
883 	for ( i = 0 ; i < psic ; i++ ) {
884 		psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
885 		mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
886 		exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
887 		if ( speed == USB_SPEED ( mantissa, exponent ) ) {
888 			psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
889 			return psiv;
890 		}
891 	}
892 
893 	DBGC ( xhci, "XHCI %s-%d unrepresentable speed %#x\n",
894 	       xhci->name, port, speed );
895 	return -ENOENT;
896 }
897 
898 /******************************************************************************
899  *
900  * Device context base address array
901  *
902  ******************************************************************************
903  */
904 
905 /**
906  * Allocate device context base address array
907  *
908  * @v xhci		xHCI device
909  * @ret rc		Return status code
910  */
xhci_dcbaa_alloc(struct xhci_device * xhci)911 static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
912 	size_t len;
913 	physaddr_t dcbaap;
914 	int rc;
915 
916 	/* Allocate and initialise structure.  Must be at least
917 	 * 64-byte aligned and must not cross a page boundary, so
918 	 * align on its own size (rounded up to a power of two and
919 	 * with a minimum of 64 bytes).
920 	 */
921 	len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
922 	xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) );
923 	if ( ! xhci->dcbaa ) {
924 		DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name );
925 		rc = -ENOMEM;
926 		goto err_alloc;
927 	}
928 	memset ( xhci->dcbaa, 0, len );
929 
930 	/* Program DCBAA pointer */
931 	dcbaap = virt_to_phys ( xhci->dcbaa );
932 	if ( ( rc = xhci_writeq ( xhci, dcbaap,
933 				  xhci->op + XHCI_OP_DCBAAP ) ) != 0 )
934 		goto err_writeq;
935 
936 	DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n",
937 		xhci->name, dcbaap, ( dcbaap + len ) );
938 	return 0;
939 
940  err_writeq:
941 	free_dma ( xhci->dcbaa, len );
942  err_alloc:
943 	return rc;
944 }
945 
946 /**
947  * Free device context base address array
948  *
949  * @v xhci		xHCI device
950  */
xhci_dcbaa_free(struct xhci_device * xhci)951 static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
952 	size_t len;
953 	unsigned int i;
954 
955 	/* Sanity check */
956 	for ( i = 0 ; i <= xhci->slots ; i++ )
957 		assert ( xhci->dcbaa[i] == 0 );
958 
959 	/* Clear DCBAA pointer */
960 	xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP );
961 
962 	/* Free DCBAA */
963 	len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
964 	free_dma ( xhci->dcbaa, len );
965 }
966 
967 /******************************************************************************
968  *
969  * Scratchpad buffers
970  *
971  ******************************************************************************
972  */
973 
974 /**
975  * Allocate scratchpad buffers
976  *
977  * @v xhci		xHCI device
978  * @ret rc		Return status code
979  */
xhci_scratchpad_alloc(struct xhci_device * xhci)980 static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
981 	size_t array_len;
982 	size_t len;
983 	physaddr_t phys;
984 	unsigned int i;
985 	int rc;
986 
987 	/* Do nothing if no scratchpad buffers are used */
988 	if ( ! xhci->scratchpads )
989 		return 0;
990 
991 	/* Allocate scratchpads */
992 	len = ( xhci->scratchpads * xhci->pagesize );
993 	xhci->scratchpad = umalloc ( len );
994 	if ( ! xhci->scratchpad ) {
995 		DBGC ( xhci, "XHCI %s could not allocate scratchpad buffers\n",
996 		       xhci->name );
997 		rc = -ENOMEM;
998 		goto err_alloc;
999 	}
1000 	memset_user ( xhci->scratchpad, 0, 0, len );
1001 
1002 	/* Allocate scratchpad array */
1003 	array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
1004 	xhci->scratchpad_array =
1005 		malloc_dma ( array_len, xhci_align ( array_len ) );
1006 	if ( ! xhci->scratchpad_array ) {
1007 		DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer "
1008 		       "array\n", xhci->name );
1009 		rc = -ENOMEM;
1010 		goto err_alloc_array;
1011 	}
1012 
1013 	/* Populate scratchpad array */
1014 	for ( i = 0 ; i < xhci->scratchpads ; i++ ) {
1015 		phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize ));
1016 		xhci->scratchpad_array[i] = phys;
1017 	}
1018 
1019 	/* Set scratchpad array pointer */
1020 	assert ( xhci->dcbaa != NULL );
1021 	xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array ));
1022 
1023 	DBGC2 ( xhci, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
1024 		xhci->name, user_to_phys ( xhci->scratchpad, 0 ),
1025 		user_to_phys ( xhci->scratchpad, len ),
1026 		virt_to_phys ( xhci->scratchpad_array ),
1027 		( virt_to_phys ( xhci->scratchpad_array ) + array_len ) );
1028 	return 0;
1029 
1030 	free_dma ( xhci->scratchpad_array, array_len );
1031  err_alloc_array:
1032 	ufree ( xhci->scratchpad );
1033  err_alloc:
1034 	return rc;
1035 }
1036 
1037 /**
1038  * Free scratchpad buffers
1039  *
1040  * @v xhci		xHCI device
1041  */
xhci_scratchpad_free(struct xhci_device * xhci)1042 static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
1043 	size_t array_len;
1044 
1045 	/* Do nothing if no scratchpad buffers are used */
1046 	if ( ! xhci->scratchpads )
1047 		return;
1048 
1049 	/* Clear scratchpad array pointer */
1050 	assert ( xhci->dcbaa != NULL );
1051 	xhci->dcbaa[0] = 0;
1052 
1053 	/* Free scratchpad array */
1054 	array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
1055 	free_dma ( xhci->scratchpad_array, array_len );
1056 
1057 	/* Free scratchpads */
1058 	ufree ( xhci->scratchpad );
1059 }
1060 
1061 /******************************************************************************
1062  *
1063  * Run / stop / reset
1064  *
1065  ******************************************************************************
1066  */
1067 
1068 /**
1069  * Start xHCI device
1070  *
1071  * @v xhci		xHCI device
1072  */
xhci_run(struct xhci_device * xhci)1073 static void xhci_run ( struct xhci_device *xhci ) {
1074 	uint32_t config;
1075 	uint32_t usbcmd;
1076 
1077 	/* Configure number of device slots */
1078 	config = readl ( xhci->op + XHCI_OP_CONFIG );
1079 	config &= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK;
1080 	config |= XHCI_CONFIG_MAX_SLOTS_EN ( xhci->slots );
1081 	writel ( config, xhci->op + XHCI_OP_CONFIG );
1082 
1083 	/* Set run/stop bit */
1084 	usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1085 	usbcmd |= XHCI_USBCMD_RUN;
1086 	writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1087 }
1088 
1089 /**
1090  * Stop xHCI device
1091  *
1092  * @v xhci		xHCI device
1093  * @ret rc		Return status code
1094  */
xhci_stop(struct xhci_device * xhci)1095 static int xhci_stop ( struct xhci_device *xhci ) {
1096 	uint32_t usbcmd;
1097 	uint32_t usbsts;
1098 	unsigned int i;
1099 
1100 	/* Clear run/stop bit */
1101 	usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1102 	usbcmd &= ~XHCI_USBCMD_RUN;
1103 	writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
1104 
1105 	/* Wait for device to stop */
1106 	for ( i = 0 ; i < XHCI_STOP_MAX_WAIT_MS ; i++ ) {
1107 
1108 		/* Check if device is stopped */
1109 		usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
1110 		if ( usbsts & XHCI_USBSTS_HCH )
1111 			return 0;
1112 
1113 		/* Delay */
1114 		mdelay ( 1 );
1115 	}
1116 
1117 	DBGC ( xhci, "XHCI %s timed out waiting for stop\n", xhci->name );
1118 	return -ETIMEDOUT;
1119 }
1120 
1121 /**
1122  * Reset xHCI device
1123  *
1124  * @v xhci		xHCI device
1125  * @ret rc		Return status code
1126  */
xhci_reset(struct xhci_device * xhci)1127 static int xhci_reset ( struct xhci_device *xhci ) {
1128 	uint32_t usbcmd;
1129 	unsigned int i;
1130 	int rc;
1131 
1132 	/* The xHCI specification states that resetting a running
1133 	 * device may result in undefined behaviour, so try stopping
1134 	 * it first.
1135 	 */
1136 	if ( ( rc = xhci_stop ( xhci ) ) != 0 ) {
1137 		/* Ignore errors and attempt to reset the device anyway */
1138 	}
1139 
1140 	/* Reset device */
1141 	writel ( XHCI_USBCMD_HCRST, xhci->op + XHCI_OP_USBCMD );
1142 
1143 	/* Wait for reset to complete */
1144 	for ( i = 0 ; i < XHCI_RESET_MAX_WAIT_MS ; i++ ) {
1145 
1146 		/* Check if reset is complete */
1147 		usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
1148 		if ( ! ( usbcmd & XHCI_USBCMD_HCRST ) )
1149 			return 0;
1150 
1151 		/* Delay */
1152 		mdelay ( 1 );
1153 	}
1154 
1155 	DBGC ( xhci, "XHCI %s timed out waiting for reset\n", xhci->name );
1156 	return -ETIMEDOUT;
1157 }
1158 
1159 /******************************************************************************
1160  *
1161  * Transfer request blocks
1162  *
1163  ******************************************************************************
1164  */
1165 
1166 /**
1167  * Allocate transfer request block ring
1168  *
1169  * @v xhci		xHCI device
1170  * @v ring		TRB ring
1171  * @v shift		Ring size (log2)
1172  * @v slot		Device slot
1173  * @v target		Doorbell target
1174  * @v stream		Doorbell stream ID
1175  * @ret rc		Return status code
1176  */
xhci_ring_alloc(struct xhci_device * xhci,struct xhci_trb_ring * ring,unsigned int shift,unsigned int slot,unsigned int target,unsigned int stream)1177 static int xhci_ring_alloc ( struct xhci_device *xhci,
1178 			     struct xhci_trb_ring *ring,
1179 			     unsigned int shift, unsigned int slot,
1180 			     unsigned int target, unsigned int stream ) {
1181 	struct xhci_trb_link *link;
1182 	unsigned int count;
1183 	int rc;
1184 
1185 	/* Sanity check */
1186 	assert ( shift > 0 );
1187 
1188 	/* Initialise structure */
1189 	memset ( ring, 0, sizeof ( *ring ) );
1190 	ring->shift = shift;
1191 	count = ( 1U << shift );
1192 	ring->mask = ( count - 1 );
1193 	ring->len = ( ( count + 1 /* Link TRB */ ) * sizeof ( ring->trb[0] ) );
1194 	ring->db = ( xhci->db + ( slot * sizeof ( ring->dbval ) ) );
1195 	ring->dbval = XHCI_DBVAL ( target, stream );
1196 
1197 	/* Allocate I/O buffers */
1198 	ring->iobuf = zalloc ( count * sizeof ( ring->iobuf[0] ) );
1199 	if ( ! ring->iobuf ) {
1200 		rc = -ENOMEM;
1201 		goto err_alloc_iobuf;
1202 	}
1203 
1204 	/* Allocate TRBs */
1205 	ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) );
1206 	if ( ! ring->trb ) {
1207 		rc = -ENOMEM;
1208 		goto err_alloc_trb;
1209 	}
1210 	memset ( ring->trb, 0, ring->len );
1211 
1212 	/* Initialise Link TRB */
1213 	link = &ring->trb[count].link;
1214 	link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) );
1215 	link->flags = XHCI_TRB_TC;
1216 	link->type = XHCI_TRB_LINK;
1217 	ring->link = link;
1218 
1219 	return 0;
1220 
1221 	free_dma ( ring->trb, ring->len );
1222  err_alloc_trb:
1223 	free ( ring->iobuf );
1224  err_alloc_iobuf:
1225 	return rc;
1226 }
1227 
1228 /**
1229  * Reset transfer request block ring
1230  *
1231  * @v ring		TRB ring
1232  */
xhci_ring_reset(struct xhci_trb_ring * ring)1233 static void xhci_ring_reset ( struct xhci_trb_ring *ring ) {
1234 	unsigned int count = ( 1U << ring->shift );
1235 
1236 	/* Reset producer and consumer counters */
1237 	ring->prod = 0;
1238 	ring->cons = 0;
1239 
1240 	/* Reset TRBs (except Link TRB) */
1241 	memset ( ring->trb, 0, ( count * sizeof ( ring->trb[0] ) ) );
1242 }
1243 
1244 /**
1245  * Free transfer request block ring
1246  *
1247  * @v ring		TRB ring
1248  */
xhci_ring_free(struct xhci_trb_ring * ring)1249 static void xhci_ring_free ( struct xhci_trb_ring *ring ) {
1250 	unsigned int count = ( 1U << ring->shift );
1251 	unsigned int i;
1252 
1253 	/* Sanity checks */
1254 	assert ( ring->cons == ring->prod );
1255 	for ( i = 0 ; i < count ; i++ )
1256 		assert ( ring->iobuf[i] == NULL );
1257 
1258 	/* Free TRBs */
1259 	free_dma ( ring->trb, ring->len );
1260 
1261 	/* Free I/O buffers */
1262 	free ( ring->iobuf );
1263 }
1264 
1265 /**
1266  * Enqueue a transfer request block
1267  *
1268  * @v ring		TRB ring
1269  * @v iobuf		I/O buffer (if any)
1270  * @v trb		Transfer request block (with empty Cycle flag)
1271  * @ret rc		Return status code
1272  *
1273  * This operation does not implicitly ring the doorbell register.
1274  */
xhci_enqueue(struct xhci_trb_ring * ring,struct io_buffer * iobuf,const union xhci_trb * trb)1275 static int xhci_enqueue ( struct xhci_trb_ring *ring, struct io_buffer *iobuf,
1276 			  const union xhci_trb *trb ) {
1277 	union xhci_trb *dest;
1278 	unsigned int prod;
1279 	unsigned int mask;
1280 	unsigned int index;
1281 	unsigned int cycle;
1282 
1283 	/* Sanity check */
1284 	assert ( ! ( trb->common.flags & XHCI_TRB_C ) );
1285 
1286 	/* Fail if ring is full */
1287 	if ( ! xhci_ring_remaining ( ring ) )
1288 		return -ENOBUFS;
1289 
1290 	/* Update producer counter (and link TRB, if applicable) */
1291 	prod = ring->prod++;
1292 	mask = ring->mask;
1293 	cycle = ( ( ~( prod >> ring->shift ) ) & XHCI_TRB_C );
1294 	index = ( prod & mask );
1295 	if ( index == 0 )
1296 		ring->link->flags = ( XHCI_TRB_TC | ( cycle ^ XHCI_TRB_C ) );
1297 
1298 	/* Record I/O buffer */
1299 	ring->iobuf[index] = iobuf;
1300 
1301 	/* Enqueue TRB */
1302 	dest = &ring->trb[index];
1303 	dest->template.parameter = trb->template.parameter;
1304 	dest->template.status = trb->template.status;
1305 	wmb();
1306 	dest->template.control = ( trb->template.control |
1307 				   cpu_to_le32 ( cycle ) );
1308 
1309 	return 0;
1310 }
1311 
1312 /**
1313  * Dequeue a transfer request block
1314  *
1315  * @v ring		TRB ring
1316  * @ret iobuf		I/O buffer
1317  */
xhci_dequeue(struct xhci_trb_ring * ring)1318 static struct io_buffer * xhci_dequeue ( struct xhci_trb_ring *ring ) {
1319 	struct io_buffer *iobuf;
1320 	unsigned int cons;
1321 	unsigned int mask;
1322 	unsigned int index;
1323 
1324 	/* Sanity check */
1325 	assert ( xhci_ring_fill ( ring ) != 0 );
1326 
1327 	/* Update consumer counter */
1328 	cons = ring->cons++;
1329 	mask = ring->mask;
1330 	index = ( cons & mask );
1331 
1332 	/* Retrieve I/O buffer */
1333 	iobuf = ring->iobuf[index];
1334 	ring->iobuf[index] = NULL;
1335 
1336 	return iobuf;
1337 }
1338 
1339 /**
1340  * Enqueue multiple transfer request blocks
1341  *
1342  * @v ring		TRB ring
1343  * @v iobuf		I/O buffer
1344  * @v trbs		Transfer request blocks (with empty Cycle flag)
1345  * @v count		Number of transfer request blocks
1346  * @ret rc		Return status code
1347  *
1348  * This operation does not implicitly ring the doorbell register.
1349  */
xhci_enqueue_multi(struct xhci_trb_ring * ring,struct io_buffer * iobuf,const union xhci_trb * trbs,unsigned int count)1350 static int xhci_enqueue_multi ( struct xhci_trb_ring *ring,
1351 				struct io_buffer *iobuf,
1352 				const union xhci_trb *trbs,
1353 				unsigned int count ) {
1354 	const union xhci_trb *trb = trbs;
1355 	int rc;
1356 
1357 	/* Sanity check */
1358 	assert ( iobuf != NULL );
1359 
1360 	/* Fail if ring does not have sufficient space */
1361 	if ( xhci_ring_remaining ( ring ) < count )
1362 		return -ENOBUFS;
1363 
1364 	/* Enqueue each TRB, recording the I/O buffer with the final TRB */
1365 	while ( count-- ) {
1366 		rc = xhci_enqueue ( ring, ( count ? NULL : iobuf ), trb++ );
1367 		assert ( rc == 0 ); /* Should never be able to fail */
1368 	}
1369 
1370 	return 0;
1371 }
1372 
1373 /**
1374  * Dequeue multiple transfer request blocks
1375  *
1376  * @v ring		TRB ring
1377  * @ret iobuf		I/O buffer
1378  */
xhci_dequeue_multi(struct xhci_trb_ring * ring)1379 static struct io_buffer * xhci_dequeue_multi ( struct xhci_trb_ring *ring ) {
1380 	struct io_buffer *iobuf;
1381 
1382 	/* Dequeue TRBs until we reach the final TRB for an I/O buffer */
1383 	do {
1384 		iobuf = xhci_dequeue ( ring );
1385 	} while ( iobuf == NULL );
1386 
1387 	return iobuf;
1388 }
1389 
1390 /**
1391  * Ring doorbell register
1392  *
1393  * @v ring		TRB ring
1394  */
1395 static inline __attribute__ (( always_inline )) void
xhci_doorbell(struct xhci_trb_ring * ring)1396 xhci_doorbell ( struct xhci_trb_ring *ring ) {
1397 
1398 	wmb();
1399 	writel ( ring->dbval, ring->db );
1400 }
1401 
1402 /******************************************************************************
1403  *
1404  * Command and event rings
1405  *
1406  ******************************************************************************
1407  */
1408 
1409 /**
1410  * Allocate command ring
1411  *
1412  * @v xhci		xHCI device
1413  * @ret rc		Return status code
1414  */
xhci_command_alloc(struct xhci_device * xhci)1415 static int xhci_command_alloc ( struct xhci_device *xhci ) {
1416 	physaddr_t crp;
1417 	int rc;
1418 
1419 	/* Allocate TRB ring */
1420 	if ( ( rc = xhci_ring_alloc ( xhci, &xhci->command, XHCI_CMD_TRBS_LOG2,
1421 				      0, 0, 0 ) ) != 0 )
1422 		goto err_ring_alloc;
1423 
1424 	/* Program command ring control register */
1425 	crp = virt_to_phys ( xhci->command.trb );
1426 	if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ),
1427 				  xhci->op + XHCI_OP_CRCR ) ) != 0 )
1428 		goto err_writeq;
1429 
1430 	DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n",
1431 		xhci->name, crp, ( crp + xhci->command.len ) );
1432 	return 0;
1433 
1434  err_writeq:
1435 	xhci_ring_free ( &xhci->command );
1436  err_ring_alloc:
1437 	return rc;
1438 }
1439 
1440 /**
1441  * Free command ring
1442  *
1443  * @v xhci		xHCI device
1444  */
xhci_command_free(struct xhci_device * xhci)1445 static void xhci_command_free ( struct xhci_device *xhci ) {
1446 
1447 	/* Sanity check */
1448 	assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
1449 
1450 	/* Clear command ring control register */
1451 	xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_CRCR );
1452 
1453 	/* Free TRB ring */
1454 	xhci_ring_free ( &xhci->command );
1455 }
1456 
1457 /**
1458  * Allocate event ring
1459  *
1460  * @v xhci		xHCI device
1461  * @ret rc		Return status code
1462  */
xhci_event_alloc(struct xhci_device * xhci)1463 static int xhci_event_alloc ( struct xhci_device *xhci ) {
1464 	struct xhci_event_ring *event = &xhci->event;
1465 	unsigned int count;
1466 	size_t len;
1467 	int rc;
1468 
1469 	/* Allocate event ring */
1470 	count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1471 	len = ( count * sizeof ( event->trb[0] ) );
1472 	event->trb = malloc_dma ( len, xhci_align ( len ) );
1473 	if ( ! event->trb ) {
1474 		rc = -ENOMEM;
1475 		goto err_alloc_trb;
1476 	}
1477 	memset ( event->trb, 0, len );
1478 
1479 	/* Allocate event ring segment table */
1480 	event->segment = malloc_dma ( sizeof ( event->segment[0] ),
1481 				      xhci_align ( sizeof (event->segment[0])));
1482 	if ( ! event->segment ) {
1483 		rc = -ENOMEM;
1484 		goto err_alloc_segment;
1485 	}
1486 	memset ( event->segment, 0, sizeof ( event->segment[0] ) );
1487 	event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) );
1488 	event->segment[0].count = cpu_to_le32 ( count );
1489 
1490 	/* Program event ring registers */
1491 	writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1492 	if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ),
1493 				  xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
1494 		goto err_writeq_erdp;
1495 	if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ),
1496 				  xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
1497 		goto err_writeq_erstba;
1498 
1499 	DBGC2 ( xhci, "XHCI %s event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
1500 		xhci->name, virt_to_phys ( event->trb ),
1501 		( virt_to_phys ( event->trb ) + len ),
1502 		virt_to_phys ( event->segment ),
1503 		( virt_to_phys ( event->segment ) +
1504 		  sizeof (event->segment[0] ) ) );
1505 	return 0;
1506 
1507 	xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1508  err_writeq_erstba:
1509 	xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1510  err_writeq_erdp:
1511 	free_dma ( event->trb, len );
1512  err_alloc_segment:
1513 	free_dma ( event->segment, sizeof ( event->segment[0] ) );
1514  err_alloc_trb:
1515 	return rc;
1516 }
1517 
1518 /**
1519  * Free event ring
1520  *
1521  * @v xhci		xHCI device
1522  */
xhci_event_free(struct xhci_device * xhci)1523 static void xhci_event_free ( struct xhci_device *xhci ) {
1524 	struct xhci_event_ring *event = &xhci->event;
1525 	unsigned int count;
1526 	size_t len;
1527 
1528 	/* Clear event ring registers */
1529 	writel ( 0, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
1530 	xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
1531 	xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
1532 
1533 	/* Free event ring segment table */
1534 	free_dma ( event->segment, sizeof ( event->segment[0] ) );
1535 
1536 	/* Free event ring */
1537 	count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
1538 	len = ( count * sizeof ( event->trb[0] ) );
1539 	free_dma ( event->trb, len );
1540 }
1541 
1542 /**
1543  * Handle transfer event
1544  *
1545  * @v xhci		xHCI device
1546  * @v trb		Transfer event TRB
1547  */
xhci_transfer(struct xhci_device * xhci,struct xhci_trb_transfer * trb)1548 static void xhci_transfer ( struct xhci_device *xhci,
1549 			    struct xhci_trb_transfer *trb ) {
1550 	struct xhci_slot *slot;
1551 	struct xhci_endpoint *endpoint;
1552 	struct io_buffer *iobuf;
1553 	int rc;
1554 
1555 	/* Profile transfer events */
1556 	profile_start ( &xhci_transfer_profiler );
1557 
1558 	/* Identify slot */
1559 	if ( ( trb->slot > xhci->slots ) ||
1560 	     ( ( slot = xhci->slot[trb->slot] ) == NULL ) ) {
1561 		DBGC ( xhci, "XHCI %s transfer event invalid slot %d:\n",
1562 		       xhci->name, trb->slot );
1563 		DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1564 		return;
1565 	}
1566 
1567 	/* Identify endpoint */
1568 	if ( ( trb->endpoint >= XHCI_CTX_END ) ||
1569 	     ( ( endpoint = slot->endpoint[trb->endpoint] ) == NULL ) ) {
1570 		DBGC ( xhci, "XHCI %s slot %d transfer event invalid epid "
1571 		       "%d:\n", xhci->name, slot->id, trb->endpoint );
1572 		DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1573 		return;
1574 	}
1575 
1576 	/* Dequeue TRB(s) */
1577 	iobuf = xhci_dequeue_multi ( &endpoint->ring );
1578 	assert ( iobuf != NULL );
1579 
1580 	/* Check for errors */
1581 	if ( ! ( ( trb->code == XHCI_CMPLT_SUCCESS ) ||
1582 		 ( trb->code == XHCI_CMPLT_SHORT ) ) ) {
1583 
1584 		/* Construct error */
1585 		rc = -ECODE ( trb->code );
1586 		DBGC ( xhci, "XHCI %s slot %d ctx %d failed (code %d): %s\n",
1587 		       xhci->name, slot->id, endpoint->ctx, trb->code,
1588 		       strerror ( rc ) );
1589 		DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1590 
1591 		/* Sanity check */
1592 		assert ( ( endpoint->context->state & XHCI_ENDPOINT_STATE_MASK )
1593 			 != XHCI_ENDPOINT_RUNNING );
1594 
1595 		/* Report failure to USB core */
1596 		usb_complete_err ( endpoint->ep, iobuf, rc );
1597 		return;
1598 	}
1599 
1600 	/* Record actual transfer size */
1601 	iob_unput ( iobuf, le16_to_cpu ( trb->residual ) );
1602 
1603 	/* Sanity check (for successful completions only) */
1604 	assert ( xhci_ring_consumed ( &endpoint->ring ) ==
1605 		 le64_to_cpu ( trb->transfer ) );
1606 
1607 	/* Report completion to USB core */
1608 	usb_complete ( endpoint->ep, iobuf );
1609 	profile_stop ( &xhci_transfer_profiler );
1610 }
1611 
1612 /**
1613  * Handle command completion event
1614  *
1615  * @v xhci		xHCI device
1616  * @v trb		Command completion event
1617  */
xhci_complete(struct xhci_device * xhci,struct xhci_trb_complete * trb)1618 static void xhci_complete ( struct xhci_device *xhci,
1619 			    struct xhci_trb_complete *trb ) {
1620 	int rc;
1621 
1622 	/* Ignore "command ring stopped" notifications */
1623 	if ( trb->code == XHCI_CMPLT_CMD_STOPPED ) {
1624 		DBGC2 ( xhci, "XHCI %s command ring stopped\n", xhci->name );
1625 		return;
1626 	}
1627 
1628 	/* Ignore unexpected completions */
1629 	if ( ! xhci->pending ) {
1630 		rc = -ECODE ( trb->code );
1631 		DBGC ( xhci, "XHCI %s unexpected completion (code %d): %s\n",
1632 		       xhci->name, trb->code, strerror ( rc ) );
1633 		DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1634 		return;
1635 	}
1636 
1637 	/* Dequeue command TRB */
1638 	xhci_dequeue ( &xhci->command );
1639 
1640 	/* Sanity check */
1641 	assert ( xhci_ring_consumed ( &xhci->command ) ==
1642 		 le64_to_cpu ( trb->command ) );
1643 
1644 	/* Record completion */
1645 	memcpy ( xhci->pending, trb, sizeof ( *xhci->pending ) );
1646 	xhci->pending = NULL;
1647 }
1648 
1649 /**
1650  * Handle port status event
1651  *
1652  * @v xhci		xHCI device
1653  * @v trb		Port status event
1654  */
xhci_port_status(struct xhci_device * xhci,struct xhci_trb_port_status * trb)1655 static void xhci_port_status ( struct xhci_device *xhci,
1656 			       struct xhci_trb_port_status *trb ) {
1657 	struct usb_port *port = usb_port ( xhci->bus->hub, trb->port );
1658 	uint32_t portsc;
1659 
1660 	/* Sanity check */
1661 	assert ( ( trb->port > 0 ) && ( trb->port <= xhci->ports ) );
1662 
1663 	/* Record disconnections and clear changes */
1664 	portsc = readl ( xhci->op + XHCI_OP_PORTSC ( trb->port ) );
1665 	port->disconnected |= ( portsc & XHCI_PORTSC_CSC );
1666 	portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
1667 	writel ( portsc, xhci->op + XHCI_OP_PORTSC ( trb->port ) );
1668 
1669 	/* Report port status change */
1670 	usb_port_changed ( port );
1671 }
1672 
1673 /**
1674  * Handle host controller event
1675  *
1676  * @v xhci		xHCI device
1677  * @v trb		Host controller event
1678  */
xhci_host_controller(struct xhci_device * xhci,struct xhci_trb_host_controller * trb)1679 static void xhci_host_controller ( struct xhci_device *xhci,
1680 				   struct xhci_trb_host_controller *trb ) {
1681 	int rc;
1682 
1683 	/* Construct error */
1684 	rc = -ECODE ( trb->code );
1685 	DBGC ( xhci, "XHCI %s host controller event (code %d): %s\n",
1686 	       xhci->name, trb->code, strerror ( rc ) );
1687 }
1688 
1689 /**
1690  * Poll event ring
1691  *
1692  * @v xhci		xHCI device
1693  */
xhci_event_poll(struct xhci_device * xhci)1694 static void xhci_event_poll ( struct xhci_device *xhci ) {
1695 	struct xhci_event_ring *event = &xhci->event;
1696 	union xhci_trb *trb;
1697 	unsigned int shift = XHCI_EVENT_TRBS_LOG2;
1698 	unsigned int count = ( 1 << shift );
1699 	unsigned int mask = ( count - 1 );
1700 	unsigned int consumed;
1701 	unsigned int type;
1702 
1703 	/* Poll for events */
1704 	profile_start ( &xhci_event_profiler );
1705 	for ( consumed = 0 ; ; consumed++ ) {
1706 
1707 		/* Stop if we reach an empty TRB */
1708 		rmb();
1709 		trb = &event->trb[ event->cons & mask ];
1710 		if ( ! ( ( trb->common.flags ^
1711 			   ( event->cons >> shift ) ) & XHCI_TRB_C ) )
1712 			break;
1713 
1714 		/* Consume this TRB */
1715 		event->cons++;
1716 
1717 		/* Handle TRB */
1718 		type = ( trb->common.type & XHCI_TRB_TYPE_MASK );
1719 		switch ( type ) {
1720 
1721 		case XHCI_TRB_TRANSFER :
1722 			xhci_transfer ( xhci, &trb->transfer );
1723 			break;
1724 
1725 		case XHCI_TRB_COMPLETE :
1726 			xhci_complete ( xhci, &trb->complete );
1727 			break;
1728 
1729 		case XHCI_TRB_PORT_STATUS:
1730 			xhci_port_status ( xhci, &trb->port );
1731 			break;
1732 
1733 		case XHCI_TRB_HOST_CONTROLLER:
1734 			xhci_host_controller ( xhci, &trb->host );
1735 			break;
1736 
1737 		default:
1738 			DBGC ( xhci, "XHCI %s unrecognised event %#x\n:",
1739 			       xhci->name, ( event->cons - 1 ) );
1740 			DBGC_HDA ( xhci, virt_to_phys ( trb ),
1741 				   trb, sizeof ( *trb ) );
1742 			break;
1743 		}
1744 	}
1745 
1746 	/* Update dequeue pointer if applicable */
1747 	if ( consumed ) {
1748 		xhci_writeq ( xhci, virt_to_phys ( trb ),
1749 			      xhci->run + XHCI_RUN_ERDP ( 0 ) );
1750 		profile_stop ( &xhci_event_profiler );
1751 	}
1752 }
1753 
1754 /**
1755  * Abort command
1756  *
1757  * @v xhci		xHCI device
1758  */
xhci_abort(struct xhci_device * xhci)1759 static void xhci_abort ( struct xhci_device *xhci ) {
1760 	physaddr_t crp;
1761 
1762 	/* Abort the command */
1763 	DBGC2 ( xhci, "XHCI %s aborting command\n", xhci->name );
1764 	xhci_writeq ( xhci, XHCI_CRCR_CA, xhci->op + XHCI_OP_CRCR );
1765 
1766 	/* Allow time for command to abort */
1767 	mdelay ( XHCI_COMMAND_ABORT_DELAY_MS );
1768 
1769 	/* Sanity check */
1770 	assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
1771 
1772 	/* Consume (and ignore) any final command status */
1773 	xhci_event_poll ( xhci );
1774 
1775 	/* Reset the command ring control register */
1776 	xhci_ring_reset ( &xhci->command );
1777 	crp = virt_to_phys ( xhci->command.trb );
1778 	xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR );
1779 }
1780 
1781 /**
1782  * Issue command and wait for completion
1783  *
1784  * @v xhci		xHCI device
1785  * @v trb		Transfer request block (with empty Cycle flag)
1786  * @ret rc		Return status code
1787  *
1788  * On a successful completion, the TRB will be overwritten with the
1789  * completion.
1790  */
xhci_command(struct xhci_device * xhci,union xhci_trb * trb)1791 static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) {
1792 	struct xhci_trb_complete *complete = &trb->complete;
1793 	unsigned int i;
1794 	int rc;
1795 
1796 	/* Record the pending command */
1797 	xhci->pending = trb;
1798 
1799 	/* Enqueue the command */
1800 	if ( ( rc = xhci_enqueue ( &xhci->command, NULL, trb ) ) != 0 )
1801 		goto err_enqueue;
1802 
1803 	/* Ring the command doorbell */
1804 	xhci_doorbell ( &xhci->command );
1805 
1806 	/* Wait for the command to complete */
1807 	for ( i = 0 ; i < XHCI_COMMAND_MAX_WAIT_MS ; i++ ) {
1808 
1809 		/* Poll event ring */
1810 		xhci_event_poll ( xhci );
1811 
1812 		/* Check for completion */
1813 		if ( ! xhci->pending ) {
1814 			if ( complete->code != XHCI_CMPLT_SUCCESS ) {
1815 				rc = -ECODE ( complete->code );
1816 				DBGC ( xhci, "XHCI %s command failed (code "
1817 				       "%d): %s\n", xhci->name, complete->code,
1818 				       strerror ( rc ) );
1819 				DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
1820 				return rc;
1821 			}
1822 			return 0;
1823 		}
1824 
1825 		/* Delay */
1826 		mdelay ( 1 );
1827 	}
1828 
1829 	/* Timeout */
1830 	DBGC ( xhci, "XHCI %s timed out waiting for completion\n", xhci->name );
1831 	rc = -ETIMEDOUT;
1832 
1833 	/* Abort command */
1834 	xhci_abort ( xhci );
1835 
1836  err_enqueue:
1837 	xhci->pending = NULL;
1838 	return rc;
1839 }
1840 
1841 /**
1842  * Issue NOP and wait for completion
1843  *
1844  * @v xhci		xHCI device
1845  * @ret rc		Return status code
1846  */
xhci_nop(struct xhci_device * xhci)1847 static inline int xhci_nop ( struct xhci_device *xhci ) {
1848 	union xhci_trb trb;
1849 	struct xhci_trb_common *nop = &trb.common;
1850 	int rc;
1851 
1852 	/* Construct command */
1853 	memset ( nop, 0, sizeof ( *nop ) );
1854 	nop->flags = XHCI_TRB_IOC;
1855 	nop->type = XHCI_TRB_NOP_CMD;
1856 
1857 	/* Issue command and wait for completion */
1858 	if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
1859 		return rc;
1860 
1861 	return 0;
1862 }
1863 
1864 /**
1865  * Enable slot
1866  *
1867  * @v xhci		xHCI device
1868  * @v type		Slot type
1869  * @ret slot		Device slot ID, or negative error
1870  */
xhci_enable_slot(struct xhci_device * xhci,unsigned int type)1871 static inline int xhci_enable_slot ( struct xhci_device *xhci,
1872 				     unsigned int type ) {
1873 	union xhci_trb trb;
1874 	struct xhci_trb_enable_slot *enable = &trb.enable;
1875 	struct xhci_trb_complete *enabled = &trb.complete;
1876 	unsigned int slot;
1877 	int rc;
1878 
1879 	/* Construct command */
1880 	memset ( enable, 0, sizeof ( *enable ) );
1881 	enable->slot = type;
1882 	enable->type = XHCI_TRB_ENABLE_SLOT;
1883 
1884 	/* Issue command and wait for completion */
1885 	if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1886 		DBGC ( xhci, "XHCI %s could not enable new slot: %s\n",
1887 		       xhci->name, strerror ( rc ) );
1888 		return rc;
1889 	}
1890 
1891 	/* Extract slot number */
1892 	slot = enabled->slot;
1893 
1894 	DBGC2 ( xhci, "XHCI %s slot %d enabled\n", xhci->name, slot );
1895 	return slot;
1896 }
1897 
1898 /**
1899  * Disable slot
1900  *
1901  * @v xhci		xHCI device
1902  * @v slot		Device slot
1903  * @ret rc		Return status code
1904  */
xhci_disable_slot(struct xhci_device * xhci,unsigned int slot)1905 static inline int xhci_disable_slot ( struct xhci_device *xhci,
1906 				      unsigned int slot ) {
1907 	union xhci_trb trb;
1908 	struct xhci_trb_disable_slot *disable = &trb.disable;
1909 	int rc;
1910 
1911 	/* Construct command */
1912 	memset ( disable, 0, sizeof ( *disable ) );
1913 	disable->type = XHCI_TRB_DISABLE_SLOT;
1914 	disable->slot = slot;
1915 
1916 	/* Issue command and wait for completion */
1917 	if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
1918 		DBGC ( xhci, "XHCI %s could not disable slot %d: %s\n",
1919 		       xhci->name, slot, strerror ( rc ) );
1920 		return rc;
1921 	}
1922 
1923 	DBGC2 ( xhci, "XHCI %s slot %d disabled\n", xhci->name, slot );
1924 	return 0;
1925 }
1926 
1927 /**
1928  * Issue context-based command and wait for completion
1929  *
1930  * @v xhci		xHCI device
1931  * @v slot		Device slot
1932  * @v endpoint		Endpoint
1933  * @v type		TRB type
1934  * @v populate		Input context populater
1935  * @ret rc		Return status code
1936  */
xhci_context(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint,unsigned int type,void (* populate)(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint,void * input))1937 static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
1938 			  struct xhci_endpoint *endpoint, unsigned int type,
1939 			  void ( * populate ) ( struct xhci_device *xhci,
1940 						struct xhci_slot *slot,
1941 						struct xhci_endpoint *endpoint,
1942 						void *input ) ) {
1943 	union xhci_trb trb;
1944 	struct xhci_trb_context *context = &trb.context;
1945 	size_t len;
1946 	void *input;
1947 	int rc;
1948 
1949 	/* Allocate an input context */
1950 	len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
1951 	input = malloc_dma ( len, xhci_align ( len ) );
1952 	if ( ! input ) {
1953 		rc = -ENOMEM;
1954 		goto err_alloc;
1955 	}
1956 	memset ( input, 0, len );
1957 
1958 	/* Populate input context */
1959 	populate ( xhci, slot, endpoint, input );
1960 
1961 	/* Construct command */
1962 	memset ( context, 0, sizeof ( *context ) );
1963 	context->type = type;
1964 	context->input = cpu_to_le64 ( virt_to_phys ( input ) );
1965 	context->slot = slot->id;
1966 
1967 	/* Issue command and wait for completion */
1968 	if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
1969 		goto err_command;
1970 
1971  err_command:
1972 	free_dma ( input, len );
1973  err_alloc:
1974 	return rc;
1975 }
1976 
1977 /**
1978  * Populate address device input context
1979  *
1980  * @v xhci		xHCI device
1981  * @v slot		Device slot
1982  * @v endpoint		Endpoint
1983  * @v input		Input context
1984  */
xhci_address_device_input(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint,void * input)1985 static void xhci_address_device_input ( struct xhci_device *xhci,
1986 					struct xhci_slot *slot,
1987 					struct xhci_endpoint *endpoint,
1988 					void *input ) {
1989 	struct xhci_control_context *control_ctx;
1990 	struct xhci_slot_context *slot_ctx;
1991 	struct xhci_endpoint_context *ep_ctx;
1992 
1993 	/* Sanity checks */
1994 	assert ( endpoint->ctx == XHCI_CTX_EP0 );
1995 
1996 	/* Populate control context */
1997 	control_ctx = input;
1998 	control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
1999 					 ( 1 << XHCI_CTX_EP0 ) );
2000 
2001 	/* Populate slot context */
2002 	slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2003 	slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot->psiv,
2004 							slot->route ) );
2005 	slot_ctx->port = slot->port;
2006 	slot_ctx->tt_id = slot->tt_id;
2007 	slot_ctx->tt_port = slot->tt_port;
2008 
2009 	/* Populate control endpoint context */
2010 	ep_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_EP0 ) );
2011 	ep_ctx->type = XHCI_EP_TYPE_CONTROL;
2012 	ep_ctx->burst = endpoint->ep->burst;
2013 	ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2014 	ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
2015 					XHCI_EP_DCS );
2016 	ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN );
2017 }
2018 
2019 /**
2020  * Address device
2021  *
2022  * @v xhci		xHCI device
2023  * @v slot		Device slot
2024  * @ret rc		Return status code
2025  */
xhci_address_device(struct xhci_device * xhci,struct xhci_slot * slot)2026 static inline int xhci_address_device ( struct xhci_device *xhci,
2027 					struct xhci_slot *slot ) {
2028 	struct usb_device *usb = slot->usb;
2029 	struct xhci_slot_context *slot_ctx;
2030 	int rc;
2031 
2032 	/* Assign device address */
2033 	if ( ( rc = xhci_context ( xhci, slot, slot->endpoint[XHCI_CTX_EP0],
2034 				   XHCI_TRB_ADDRESS_DEVICE,
2035 				   xhci_address_device_input ) ) != 0 )
2036 		return rc;
2037 
2038 	/* Get assigned address */
2039 	slot_ctx = ( slot->context +
2040 		     xhci_device_context_offset ( xhci, XHCI_CTX_SLOT ) );
2041 	usb->address = slot_ctx->address;
2042 	DBGC2 ( xhci, "XHCI %s assigned address %d to %s\n",
2043 		xhci->name, usb->address, usb->name );
2044 
2045 	return 0;
2046 }
2047 
2048 /**
2049  * Populate configure endpoint input context
2050  *
2051  * @v xhci		xHCI device
2052  * @v slot		Device slot
2053  * @v endpoint		Endpoint
2054  * @v input		Input context
2055  */
xhci_configure_endpoint_input(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint,void * input)2056 static void xhci_configure_endpoint_input ( struct xhci_device *xhci,
2057 					    struct xhci_slot *slot,
2058 					    struct xhci_endpoint *endpoint,
2059 					    void *input ) {
2060 	struct xhci_control_context *control_ctx;
2061 	struct xhci_slot_context *slot_ctx;
2062 	struct xhci_endpoint_context *ep_ctx;
2063 
2064 	/* Populate control context */
2065 	control_ctx = input;
2066 	control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2067 					 ( 1 << endpoint->ctx ) );
2068 
2069 	/* Populate slot context */
2070 	slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2071 	slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2072 							( slot->ports ? 1 : 0 ),
2073 							slot->psiv, 0 ) );
2074 	slot_ctx->ports = slot->ports;
2075 
2076 	/* Populate endpoint context */
2077 	ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
2078 	ep_ctx->interval = endpoint->interval;
2079 	ep_ctx->type = endpoint->type;
2080 	ep_ctx->burst = endpoint->ep->burst;
2081 	ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2082 	ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
2083 					XHCI_EP_DCS );
2084 	ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */
2085 }
2086 
2087 /**
2088  * Configure endpoint
2089  *
2090  * @v xhci		xHCI device
2091  * @v slot		Device slot
2092  * @v endpoint		Endpoint
2093  * @ret rc		Return status code
2094  */
xhci_configure_endpoint(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint)2095 static inline int xhci_configure_endpoint ( struct xhci_device *xhci,
2096 					    struct xhci_slot *slot,
2097 					    struct xhci_endpoint *endpoint ) {
2098 	int rc;
2099 
2100 	/* Configure endpoint */
2101 	if ( ( rc = xhci_context ( xhci, slot, endpoint,
2102 				   XHCI_TRB_CONFIGURE_ENDPOINT,
2103 				   xhci_configure_endpoint_input ) ) != 0 )
2104 		return rc;
2105 
2106 	DBGC2 ( xhci, "XHCI %s slot %d ctx %d configured\n",
2107 		xhci->name, slot->id, endpoint->ctx );
2108 	return 0;
2109 }
2110 
2111 /**
2112  * Populate deconfigure endpoint input context
2113  *
2114  * @v xhci		xHCI device
2115  * @v slot		Device slot
2116  * @v endpoint		Endpoint
2117  * @v input		Input context
2118  */
2119 static void
xhci_deconfigure_endpoint_input(struct xhci_device * xhci __unused,struct xhci_slot * slot __unused,struct xhci_endpoint * endpoint,void * input)2120 xhci_deconfigure_endpoint_input ( struct xhci_device *xhci __unused,
2121 				  struct xhci_slot *slot __unused,
2122 				  struct xhci_endpoint *endpoint,
2123 				  void *input ) {
2124 	struct xhci_control_context *control_ctx;
2125 	struct xhci_slot_context *slot_ctx;
2126 
2127 	/* Populate control context */
2128 	control_ctx = input;
2129 	control_ctx->add = cpu_to_le32 ( 1 << XHCI_CTX_SLOT );
2130 	control_ctx->drop = cpu_to_le32 ( 1 << endpoint->ctx );
2131 
2132 	/* Populate slot context */
2133 	slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2134 	slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2135 							0, 0, 0 ) );
2136 }
2137 
2138 /**
2139  * Deconfigure endpoint
2140  *
2141  * @v xhci		xHCI device
2142  * @v slot		Device slot
2143  * @v endpoint		Endpoint
2144  * @ret rc		Return status code
2145  */
xhci_deconfigure_endpoint(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint)2146 static inline int xhci_deconfigure_endpoint ( struct xhci_device *xhci,
2147 					      struct xhci_slot *slot,
2148 					      struct xhci_endpoint *endpoint ) {
2149 	int rc;
2150 
2151 	/* Deconfigure endpoint */
2152 	if ( ( rc = xhci_context ( xhci, slot, endpoint,
2153 				   XHCI_TRB_CONFIGURE_ENDPOINT,
2154 				   xhci_deconfigure_endpoint_input ) ) != 0 )
2155 		return rc;
2156 
2157 	DBGC2 ( xhci, "XHCI %s slot %d ctx %d deconfigured\n",
2158 		xhci->name, slot->id, endpoint->ctx );
2159 	return 0;
2160 }
2161 
2162 /**
2163  * Populate evaluate context input context
2164  *
2165  * @v xhci		xHCI device
2166  * @v slot		Device slot
2167  * @v endpoint		Endpoint
2168  * @v input		Input context
2169  */
xhci_evaluate_context_input(struct xhci_device * xhci,struct xhci_slot * slot __unused,struct xhci_endpoint * endpoint,void * input)2170 static void xhci_evaluate_context_input ( struct xhci_device *xhci,
2171 					  struct xhci_slot *slot __unused,
2172 					  struct xhci_endpoint *endpoint,
2173 					  void *input ) {
2174 	struct xhci_control_context *control_ctx;
2175 	struct xhci_slot_context *slot_ctx;
2176 	struct xhci_endpoint_context *ep_ctx;
2177 
2178 	/* Populate control context */
2179 	control_ctx = input;
2180 	control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
2181 					 ( 1 << endpoint->ctx ) );
2182 
2183 	/* Populate slot context */
2184 	slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
2185 	slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
2186 							0, 0, 0 ) );
2187 
2188 	/* Populate endpoint context */
2189 	ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
2190 	ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
2191 }
2192 
2193 /**
2194  * Evaluate context
2195  *
2196  * @v xhci		xHCI device
2197  * @v slot		Device slot
2198  * @v endpoint		Endpoint
2199  * @ret rc		Return status code
2200  */
xhci_evaluate_context(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint)2201 static inline int xhci_evaluate_context ( struct xhci_device *xhci,
2202 					  struct xhci_slot *slot,
2203 					  struct xhci_endpoint *endpoint ) {
2204 	int rc;
2205 
2206 	/* Configure endpoint */
2207 	if ( ( rc = xhci_context ( xhci, slot, endpoint,
2208 				   XHCI_TRB_EVALUATE_CONTEXT,
2209 				   xhci_evaluate_context_input ) ) != 0 )
2210 		return rc;
2211 
2212 	DBGC2 ( xhci, "XHCI %s slot %d ctx %d (re-)evaluated\n",
2213 		xhci->name, slot->id, endpoint->ctx );
2214 	return 0;
2215 }
2216 
2217 /**
2218  * Reset endpoint
2219  *
2220  * @v xhci		xHCI device
2221  * @v slot		Device slot
2222  * @v endpoint		Endpoint
2223  * @ret rc		Return status code
2224  */
xhci_reset_endpoint(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint)2225 static inline int xhci_reset_endpoint ( struct xhci_device *xhci,
2226 					struct xhci_slot *slot,
2227 					struct xhci_endpoint *endpoint ) {
2228 	union xhci_trb trb;
2229 	struct xhci_trb_reset_endpoint *reset = &trb.reset;
2230 	int rc;
2231 
2232 	/* Construct command */
2233 	memset ( reset, 0, sizeof ( *reset ) );
2234 	reset->slot = slot->id;
2235 	reset->endpoint = endpoint->ctx;
2236 	reset->type = XHCI_TRB_RESET_ENDPOINT;
2237 
2238 	/* Issue command and wait for completion */
2239 	if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2240 		DBGC ( xhci, "XHCI %s slot %d ctx %d could not reset endpoint "
2241 		       "in state %d: %s\n", xhci->name, slot->id, endpoint->ctx,
2242 		       endpoint->context->state, strerror ( rc ) );
2243 		return rc;
2244 	}
2245 
2246 	return 0;
2247 }
2248 
2249 /**
2250  * Stop endpoint
2251  *
2252  * @v xhci		xHCI device
2253  * @v slot		Device slot
2254  * @v endpoint		Endpoint
2255  * @ret rc		Return status code
2256  */
xhci_stop_endpoint(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint)2257 static inline int xhci_stop_endpoint ( struct xhci_device *xhci,
2258 				       struct xhci_slot *slot,
2259 				       struct xhci_endpoint *endpoint ) {
2260 	union xhci_trb trb;
2261 	struct xhci_trb_stop_endpoint *stop = &trb.stop;
2262 	int rc;
2263 
2264 	/* Construct command */
2265 	memset ( stop, 0, sizeof ( *stop ) );
2266 	stop->slot = slot->id;
2267 	stop->endpoint = endpoint->ctx;
2268 	stop->type = XHCI_TRB_STOP_ENDPOINT;
2269 
2270 	/* Issue command and wait for completion */
2271 	if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2272 		DBGC ( xhci, "XHCI %s slot %d ctx %d could not stop endpoint "
2273 		       "in state %d: %s\n", xhci->name, slot->id, endpoint->ctx,
2274 		       endpoint->context->state, strerror ( rc ) );
2275 		return rc;
2276 	}
2277 
2278 	return 0;
2279 }
2280 
2281 /**
2282  * Set transfer ring dequeue pointer
2283  *
2284  * @v xhci		xHCI device
2285  * @v slot		Device slot
2286  * @v endpoint		Endpoint
2287  * @ret rc		Return status code
2288  */
2289 static inline int
xhci_set_tr_dequeue_pointer(struct xhci_device * xhci,struct xhci_slot * slot,struct xhci_endpoint * endpoint)2290 xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci,
2291 			      struct xhci_slot *slot,
2292 			      struct xhci_endpoint *endpoint ) {
2293 	union xhci_trb trb;
2294 	struct xhci_trb_set_tr_dequeue_pointer *dequeue = &trb.dequeue;
2295 	struct xhci_trb_ring *ring = &endpoint->ring;
2296 	unsigned int cons;
2297 	unsigned int mask;
2298 	unsigned int index;
2299 	unsigned int dcs;
2300 	int rc;
2301 
2302 	/* Construct command */
2303 	memset ( dequeue, 0, sizeof ( *dequeue ) );
2304 	cons = ring->cons;
2305 	mask = ring->mask;
2306 	dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS );
2307 	index = ( cons & mask );
2308 	dequeue->dequeue =
2309 		cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs );
2310 	dequeue->slot = slot->id;
2311 	dequeue->endpoint = endpoint->ctx;
2312 	dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER;
2313 
2314 	/* Issue command and wait for completion */
2315 	if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
2316 		DBGC ( xhci, "XHCI %s slot %d ctx %d could not set TR dequeue "
2317 		       "pointer in state %d: %s\n", xhci->name, slot->id,
2318 		       endpoint->ctx, endpoint->context->state, strerror ( rc));
2319 		return rc;
2320 	}
2321 
2322 	return 0;
2323 }
2324 
2325 /******************************************************************************
2326  *
2327  * Endpoint operations
2328  *
2329  ******************************************************************************
2330  */
2331 
2332 /**
2333  * Open endpoint
2334  *
2335  * @v ep		USB endpoint
2336  * @ret rc		Return status code
2337  */
xhci_endpoint_open(struct usb_endpoint * ep)2338 static int xhci_endpoint_open ( struct usb_endpoint *ep ) {
2339 	struct usb_device *usb = ep->usb;
2340 	struct xhci_slot *slot = usb_get_hostdata ( usb );
2341 	struct xhci_device *xhci = slot->xhci;
2342 	struct xhci_endpoint *endpoint;
2343 	unsigned int ctx;
2344 	unsigned int type;
2345 	unsigned int interval;
2346 	int rc;
2347 
2348 	/* Calculate context index */
2349 	ctx = XHCI_CTX ( ep->address );
2350 	assert ( slot->endpoint[ctx] == NULL );
2351 
2352 	/* Calculate endpoint type */
2353 	type = XHCI_EP_TYPE ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK );
2354 	if ( type == XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL ) )
2355 		type = XHCI_EP_TYPE_CONTROL;
2356 	if ( ep->address & USB_DIR_IN )
2357 		type |= XHCI_EP_TYPE_IN;
2358 
2359 	/* Calculate interval */
2360 	if ( type & XHCI_EP_TYPE_PERIODIC ) {
2361 		interval = ( fls ( ep->interval ) - 1 );
2362 	} else {
2363 		interval = ep->interval;
2364 	}
2365 
2366 	/* Allocate and initialise structure */
2367 	endpoint = zalloc ( sizeof ( *endpoint ) );
2368 	if ( ! endpoint ) {
2369 		rc = -ENOMEM;
2370 		goto err_alloc;
2371 	}
2372 	usb_endpoint_set_hostdata ( ep, endpoint );
2373 	slot->endpoint[ctx] = endpoint;
2374 	endpoint->xhci = xhci;
2375 	endpoint->slot = slot;
2376 	endpoint->ep = ep;
2377 	endpoint->ctx = ctx;
2378 	endpoint->type = type;
2379 	endpoint->interval = interval;
2380 	endpoint->context = ( ( ( void * ) slot->context ) +
2381 			      xhci_device_context_offset ( xhci, ctx ) );
2382 
2383 	/* Allocate transfer ring */
2384 	if ( ( rc = xhci_ring_alloc ( xhci, &endpoint->ring,
2385 				      XHCI_TRANSFER_TRBS_LOG2,
2386 				      slot->id, ctx, 0 ) ) != 0 )
2387 		goto err_ring_alloc;
2388 
2389 	/* Configure endpoint, if applicable */
2390 	if ( ( ctx != XHCI_CTX_EP0 ) &&
2391 	     ( ( rc = xhci_configure_endpoint ( xhci, slot, endpoint ) ) != 0 ))
2392 		goto err_configure_endpoint;
2393 
2394 	DBGC2 ( xhci, "XHCI %s slot %d ctx %d ring [%08lx,%08lx)\n",
2395 		xhci->name, slot->id, ctx, virt_to_phys ( endpoint->ring.trb ),
2396 		( virt_to_phys ( endpoint->ring.trb ) + endpoint->ring.len ) );
2397 	return 0;
2398 
2399 	xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2400  err_configure_endpoint:
2401 	xhci_ring_free ( &endpoint->ring );
2402  err_ring_alloc:
2403 	slot->endpoint[ctx] = NULL;
2404 	free ( endpoint );
2405  err_alloc:
2406 	return rc;
2407 }
2408 
2409 /**
2410  * Close endpoint
2411  *
2412  * @v ep		USB endpoint
2413  */
xhci_endpoint_close(struct usb_endpoint * ep)2414 static void xhci_endpoint_close ( struct usb_endpoint *ep ) {
2415 	struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2416 	struct xhci_slot *slot = endpoint->slot;
2417 	struct xhci_device *xhci = slot->xhci;
2418 	struct io_buffer *iobuf;
2419 	unsigned int ctx = endpoint->ctx;
2420 
2421 	/* Deconfigure endpoint, if applicable */
2422 	if ( ctx != XHCI_CTX_EP0 )
2423 		xhci_deconfigure_endpoint ( xhci, slot, endpoint );
2424 
2425 	/* Cancel any incomplete transfers */
2426 	while ( xhci_ring_fill ( &endpoint->ring ) ) {
2427 		iobuf = xhci_dequeue_multi ( &endpoint->ring );
2428 		usb_complete_err ( ep, iobuf, -ECANCELED );
2429 	}
2430 
2431 	/* Free endpoint */
2432 	xhci_ring_free ( &endpoint->ring );
2433 	slot->endpoint[ctx] = NULL;
2434 	free ( endpoint );
2435 }
2436 
2437 /**
2438  * Reset endpoint
2439  *
2440  * @v ep		USB endpoint
2441  * @ret rc		Return status code
2442  */
xhci_endpoint_reset(struct usb_endpoint * ep)2443 static int xhci_endpoint_reset ( struct usb_endpoint *ep ) {
2444 	struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2445 	struct xhci_slot *slot = endpoint->slot;
2446 	struct xhci_device *xhci = slot->xhci;
2447 	int rc;
2448 
2449 	/* Reset endpoint context */
2450 	if ( ( rc = xhci_reset_endpoint ( xhci, slot, endpoint ) ) != 0 )
2451 		return rc;
2452 
2453 	/* Set transfer ring dequeue pointer */
2454 	if ( ( rc = xhci_set_tr_dequeue_pointer ( xhci, slot, endpoint ) ) != 0)
2455 		return rc;
2456 
2457 	/* Ring doorbell to resume processing */
2458 	xhci_doorbell ( &endpoint->ring );
2459 
2460 	DBGC ( xhci, "XHCI %s slot %d ctx %d reset\n",
2461 	       xhci->name, slot->id, endpoint->ctx );
2462 	return 0;
2463 }
2464 
2465 /**
2466  * Update MTU
2467  *
2468  * @v ep		USB endpoint
2469  * @ret rc		Return status code
2470  */
xhci_endpoint_mtu(struct usb_endpoint * ep)2471 static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) {
2472 	struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2473 	struct xhci_slot *slot = endpoint->slot;
2474 	struct xhci_device *xhci = slot->xhci;
2475 	int rc;
2476 
2477 	/* Evalulate context */
2478 	if ( ( rc = xhci_evaluate_context ( xhci, slot, endpoint ) ) != 0 )
2479 		return rc;
2480 
2481 	return 0;
2482 }
2483 
2484 /**
2485  * Enqueue message transfer
2486  *
2487  * @v ep		USB endpoint
2488  * @v iobuf		I/O buffer
2489  * @ret rc		Return status code
2490  */
xhci_endpoint_message(struct usb_endpoint * ep,struct io_buffer * iobuf)2491 static int xhci_endpoint_message ( struct usb_endpoint *ep,
2492 				   struct io_buffer *iobuf ) {
2493 	struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2494 	struct usb_setup_packet *packet;
2495 	unsigned int input;
2496 	size_t len;
2497 	union xhci_trb trbs[ 1 /* setup */ + 1 /* possible data */ +
2498 			     1 /* status */ ];
2499 	union xhci_trb *trb = trbs;
2500 	struct xhci_trb_setup *setup;
2501 	struct xhci_trb_data *data;
2502 	struct xhci_trb_status *status;
2503 	int rc;
2504 
2505 	/* Profile message transfers */
2506 	profile_start ( &xhci_message_profiler );
2507 
2508 	/* Construct setup stage TRB */
2509 	memset ( trbs, 0, sizeof ( trbs ) );
2510 	assert ( iob_len ( iobuf ) >= sizeof ( *packet ) );
2511 	packet = iobuf->data;
2512 	iob_pull ( iobuf, sizeof ( *packet ) );
2513 	setup = &(trb++)->setup;
2514 	memcpy ( &setup->packet, packet, sizeof ( setup->packet ) );
2515 	setup->len = cpu_to_le32 ( sizeof ( *packet ) );
2516 	setup->flags = XHCI_TRB_IDT;
2517 	setup->type = XHCI_TRB_SETUP;
2518 	len = iob_len ( iobuf );
2519 	input = ( packet->request & cpu_to_le16 ( USB_DIR_IN ) );
2520 	if ( len )
2521 		setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT );
2522 
2523 	/* Construct data stage TRB, if applicable */
2524 	if ( len ) {
2525 		data = &(trb++)->data;
2526 		data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
2527 		data->len = cpu_to_le32 ( len );
2528 		data->type = XHCI_TRB_DATA;
2529 		data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT );
2530 	}
2531 
2532 	/* Construct status stage TRB */
2533 	status = &(trb++)->status;
2534 	status->flags = XHCI_TRB_IOC;
2535 	status->type = XHCI_TRB_STATUS;
2536 	status->direction =
2537 		( ( len && input ) ? XHCI_STATUS_OUT : XHCI_STATUS_IN );
2538 
2539 	/* Enqueue TRBs */
2540 	if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2541 					 ( trb - trbs ) ) ) != 0 )
2542 		return rc;
2543 
2544 	/* Ring the doorbell */
2545 	xhci_doorbell ( &endpoint->ring );
2546 
2547 	profile_stop ( &xhci_message_profiler );
2548 	return 0;
2549 }
2550 
2551 /**
2552  * Calculate number of TRBs
2553  *
2554  * @v len		Length of data
2555  * @v zlp		Append a zero-length packet
2556  * @ret count		Number of transfer descriptors
2557  */
xhci_endpoint_count(size_t len,int zlp)2558 static unsigned int xhci_endpoint_count ( size_t len, int zlp ) {
2559 	unsigned int count;
2560 
2561 	/* Split into 64kB TRBs */
2562 	count = ( ( len + XHCI_MTU - 1 ) / XHCI_MTU );
2563 
2564 	/* Append a zero-length TRB if applicable */
2565 	if ( zlp || ( count == 0 ) )
2566 		count++;
2567 
2568 	return count;
2569 }
2570 
2571 /**
2572  * Enqueue stream transfer
2573  *
2574  * @v ep		USB endpoint
2575  * @v iobuf		I/O buffer
2576  * @v zlp		Append a zero-length packet
2577  * @ret rc		Return status code
2578  */
xhci_endpoint_stream(struct usb_endpoint * ep,struct io_buffer * iobuf,int zlp)2579 static int xhci_endpoint_stream ( struct usb_endpoint *ep,
2580 				  struct io_buffer *iobuf, int zlp ) {
2581 	struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
2582 	void *data = iobuf->data;
2583 	size_t len = iob_len ( iobuf );
2584 	unsigned int count = xhci_endpoint_count ( len, zlp );
2585 	union xhci_trb trbs[count];
2586 	union xhci_trb *trb = trbs;
2587 	struct xhci_trb_normal *normal;
2588 	unsigned int i;
2589 	size_t trb_len;
2590 	int rc;
2591 
2592 	/* Profile stream transfers */
2593 	profile_start ( &xhci_stream_profiler );
2594 
2595 	/* Construct normal TRBs */
2596 	memset ( &trbs, 0, sizeof ( trbs ) );
2597 	for ( i = 0 ; i < count ; i ++ ) {
2598 
2599 		/* Calculate TRB length */
2600 		trb_len = XHCI_MTU;
2601 		if ( trb_len > len )
2602 			trb_len = len;
2603 
2604 		/* Construct normal TRB */
2605 		normal = &trb->normal;
2606 		normal->data = cpu_to_le64 ( virt_to_phys ( data ) );
2607 		normal->len = cpu_to_le32 ( trb_len );
2608 		normal->type = XHCI_TRB_NORMAL;
2609 		normal->flags = XHCI_TRB_CH;
2610 
2611 		/* Move to next TRB */
2612 		data += trb_len;
2613 		len -= trb_len;
2614 		trb++;
2615 	}
2616 
2617 	/* Mark zero-length packet (if present) as a separate transfer */
2618 	if ( zlp && ( count > 1 ) )
2619 		trb[-2].normal.flags = 0;
2620 
2621 	/* Generate completion for final TRB */
2622 	trb[-1].normal.flags = XHCI_TRB_IOC;
2623 
2624 	/* Enqueue TRBs */
2625 	if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
2626 					 count ) ) != 0 )
2627 		return rc;
2628 
2629 	/* Ring the doorbell */
2630 	xhci_doorbell ( &endpoint->ring );
2631 
2632 	profile_stop ( &xhci_stream_profiler );
2633 	return 0;
2634 }
2635 
2636 /******************************************************************************
2637  *
2638  * Device operations
2639  *
2640  ******************************************************************************
2641  */
2642 
2643 /**
2644  * Open device
2645  *
2646  * @v usb		USB device
2647  * @ret rc		Return status code
2648  */
xhci_device_open(struct usb_device * usb)2649 static int xhci_device_open ( struct usb_device *usb ) {
2650 	struct xhci_device *xhci = usb_bus_get_hostdata ( usb->port->hub->bus );
2651 	struct usb_port *tt = usb_transaction_translator ( usb );
2652 	struct xhci_slot *slot;
2653 	struct xhci_slot *tt_slot;
2654 	size_t len;
2655 	int type;
2656 	int id;
2657 	int rc;
2658 
2659 	/* Determine applicable slot type */
2660 	type = xhci_port_slot_type ( xhci, usb->port->address );
2661 	if ( type < 0 ) {
2662 		rc = type;
2663 		DBGC ( xhci, "XHCI %s-%d has no slot type\n",
2664 		       xhci->name, usb->port->address );
2665 		goto err_type;
2666 	}
2667 
2668 	/* Allocate a device slot number */
2669 	id = xhci_enable_slot ( xhci, type );
2670 	if ( id < 0 ) {
2671 		rc = id;
2672 		goto err_enable_slot;
2673 	}
2674 	assert ( ( id > 0 ) && ( ( unsigned int ) id <= xhci->slots ) );
2675 	assert ( xhci->slot[id] == NULL );
2676 
2677 	/* Allocate and initialise structure */
2678 	slot = zalloc ( sizeof ( *slot ) );
2679 	if ( ! slot ) {
2680 		rc = -ENOMEM;
2681 		goto err_alloc;
2682 	}
2683 	usb_set_hostdata ( usb, slot );
2684 	xhci->slot[id] = slot;
2685 	slot->xhci = xhci;
2686 	slot->usb = usb;
2687 	slot->id = id;
2688 	if ( tt ) {
2689 		tt_slot = usb_get_hostdata ( tt->hub->usb );
2690 		slot->tt_id = tt_slot->id;
2691 		slot->tt_port = tt->address;
2692 	}
2693 
2694 	/* Allocate a device context */
2695 	len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2696 	slot->context = malloc_dma ( len, xhci_align ( len ) );
2697 	if ( ! slot->context ) {
2698 		rc = -ENOMEM;
2699 		goto err_alloc_context;
2700 	}
2701 	memset ( slot->context, 0, len );
2702 
2703 	/* Set device context base address */
2704 	assert ( xhci->dcbaa[id] == 0 );
2705 	xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) );
2706 
2707 	DBGC2 ( xhci, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n",
2708 		xhci->name, slot->id, virt_to_phys ( slot->context ),
2709 		( virt_to_phys ( slot->context ) + len ), usb->name );
2710 	return 0;
2711 
2712 	xhci->dcbaa[id] = 0;
2713 	free_dma ( slot->context, len );
2714  err_alloc_context:
2715 	xhci->slot[id] = NULL;
2716 	free ( slot );
2717  err_alloc:
2718 	xhci_disable_slot ( xhci, id );
2719  err_enable_slot:
2720  err_type:
2721 	return rc;
2722 }
2723 
2724 /**
2725  * Close device
2726  *
2727  * @v usb		USB device
2728  */
xhci_device_close(struct usb_device * usb)2729 static void xhci_device_close ( struct usb_device *usb ) {
2730 	struct xhci_slot *slot = usb_get_hostdata ( usb );
2731 	struct xhci_device *xhci = slot->xhci;
2732 	size_t len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
2733 	unsigned int id = slot->id;
2734 	int rc;
2735 
2736 	/* Disable slot */
2737 	if ( ( rc = xhci_disable_slot ( xhci, id ) ) != 0 ) {
2738 		/* Slot is still enabled.  Leak the slot context,
2739 		 * since the controller may still write to this
2740 		 * memory, and leave the DCBAA entry intact.
2741 		 *
2742 		 * If the controller later reports that this same slot
2743 		 * has been re-enabled, then some assertions will be
2744 		 * triggered.
2745 		 */
2746 		DBGC ( xhci, "XHCI %s slot %d leaking context memory\n",
2747 		       xhci->name, slot->id );
2748 		slot->context = NULL;
2749 	}
2750 
2751 	/* Free slot */
2752 	if ( slot->context ) {
2753 		free_dma ( slot->context, len );
2754 		xhci->dcbaa[id] = 0;
2755 	}
2756 	xhci->slot[id] = NULL;
2757 	free ( slot );
2758 }
2759 
2760 /**
2761  * Assign device address
2762  *
2763  * @v usb		USB device
2764  * @ret rc		Return status code
2765  */
xhci_device_address(struct usb_device * usb)2766 static int xhci_device_address ( struct usb_device *usb ) {
2767 	struct xhci_slot *slot = usb_get_hostdata ( usb );
2768 	struct xhci_device *xhci = slot->xhci;
2769 	struct usb_port *root_port;
2770 	int psiv;
2771 	int rc;
2772 
2773 	/* Calculate route string */
2774 	slot->route = usb_route_string ( usb );
2775 
2776 	/* Calculate root hub port number */
2777 	root_port = usb_root_hub_port ( usb );
2778 	slot->port = root_port->address;
2779 
2780 	/* Calculate protocol speed ID */
2781 	psiv = xhci_port_psiv ( xhci, slot->port, usb->speed );
2782 	if ( psiv < 0 ) {
2783 		rc = psiv;
2784 		return rc;
2785 	}
2786 	slot->psiv = psiv;
2787 
2788 	/* Address device */
2789 	if ( ( rc = xhci_address_device ( xhci, slot ) ) != 0 )
2790 		return rc;
2791 
2792 	return 0;
2793 }
2794 
2795 /******************************************************************************
2796  *
2797  * Bus operations
2798  *
2799  ******************************************************************************
2800  */
2801 
2802 /**
2803  * Open USB bus
2804  *
2805  * @v bus		USB bus
2806  * @ret rc		Return status code
2807  */
xhci_bus_open(struct usb_bus * bus)2808 static int xhci_bus_open ( struct usb_bus *bus ) {
2809 	struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2810 	int rc;
2811 
2812 	/* Allocate device slot array */
2813 	xhci->slot = zalloc ( ( xhci->slots + 1 ) * sizeof ( xhci->slot[0] ) );
2814 	if ( ! xhci->slot ) {
2815 		rc = -ENOMEM;
2816 		goto err_slot_alloc;
2817 	}
2818 
2819 	/* Allocate device context base address array */
2820 	if ( ( rc = xhci_dcbaa_alloc ( xhci ) ) != 0 )
2821 		goto err_dcbaa_alloc;
2822 
2823 	/* Allocate scratchpad buffers */
2824 	if ( ( rc = xhci_scratchpad_alloc ( xhci ) ) != 0 )
2825 		goto err_scratchpad_alloc;
2826 
2827 	/* Allocate command ring */
2828 	if ( ( rc = xhci_command_alloc ( xhci ) ) != 0 )
2829 		goto err_command_alloc;
2830 
2831 	/* Allocate event ring */
2832 	if ( ( rc = xhci_event_alloc ( xhci ) ) != 0 )
2833 		goto err_event_alloc;
2834 
2835 	/* Start controller */
2836 	xhci_run ( xhci );
2837 
2838 	return 0;
2839 
2840 	xhci_stop ( xhci );
2841 	xhci_event_free ( xhci );
2842  err_event_alloc:
2843 	xhci_command_free ( xhci );
2844  err_command_alloc:
2845 	xhci_scratchpad_free ( xhci );
2846  err_scratchpad_alloc:
2847 	xhci_dcbaa_free ( xhci );
2848  err_dcbaa_alloc:
2849 	free ( xhci->slot );
2850  err_slot_alloc:
2851 	return rc;
2852 }
2853 
2854 /**
2855  * Close USB bus
2856  *
2857  * @v bus		USB bus
2858  */
xhci_bus_close(struct usb_bus * bus)2859 static void xhci_bus_close ( struct usb_bus *bus ) {
2860 	struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2861 	unsigned int i;
2862 
2863 	/* Sanity checks */
2864 	assert ( xhci->slot != NULL );
2865 	for ( i = 0 ; i <= xhci->slots ; i++ )
2866 		assert ( xhci->slot[i] == NULL );
2867 
2868 	xhci_stop ( xhci );
2869 	xhci_event_free ( xhci );
2870 	xhci_command_free ( xhci );
2871 	xhci_scratchpad_free ( xhci );
2872 	xhci_dcbaa_free ( xhci );
2873 	free ( xhci->slot );
2874 }
2875 
2876 /**
2877  * Poll USB bus
2878  *
2879  * @v bus		USB bus
2880  */
xhci_bus_poll(struct usb_bus * bus)2881 static void xhci_bus_poll ( struct usb_bus *bus ) {
2882 	struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
2883 
2884 	/* Poll event ring */
2885 	xhci_event_poll ( xhci );
2886 }
2887 
2888 /******************************************************************************
2889  *
2890  * Hub operations
2891  *
2892  ******************************************************************************
2893  */
2894 
2895 /**
2896  * Open hub
2897  *
2898  * @v hub		USB hub
2899  * @ret rc		Return status code
2900  */
xhci_hub_open(struct usb_hub * hub)2901 static int xhci_hub_open ( struct usb_hub *hub ) {
2902 	struct xhci_slot *slot;
2903 
2904 	/* Do nothing if this is the root hub */
2905 	if ( ! hub->usb )
2906 		return 0;
2907 
2908 	/* Get device slot */
2909 	slot = usb_get_hostdata ( hub->usb );
2910 
2911 	/* Update device slot hub parameters.  We don't inform the
2912 	 * hardware of this information until the hub's interrupt
2913 	 * endpoint is opened, since the only mechanism for so doing
2914 	 * provided by the xHCI specification is a Configure Endpoint
2915 	 * command, and we can't issue that command until we have a
2916 	 * non-EP0 endpoint to configure.
2917 	 */
2918 	slot->ports = hub->ports;
2919 
2920 	return 0;
2921 }
2922 
2923 /**
2924  * Close hub
2925  *
2926  * @v hub		USB hub
2927  */
xhci_hub_close(struct usb_hub * hub __unused)2928 static void xhci_hub_close ( struct usb_hub *hub __unused ) {
2929 
2930 	/* Nothing to do */
2931 }
2932 
2933 /******************************************************************************
2934  *
2935  * Root hub operations
2936  *
2937  ******************************************************************************
2938  */
2939 
2940 /**
2941  * Open root hub
2942  *
2943  * @v hub		USB hub
2944  * @ret rc		Return status code
2945  */
xhci_root_open(struct usb_hub * hub)2946 static int xhci_root_open ( struct usb_hub *hub ) {
2947 	struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
2948 	struct usb_port *port;
2949 	uint32_t portsc;
2950 	unsigned int i;
2951 
2952 	/* Enable power to all ports */
2953 	for ( i = 1 ; i <= xhci->ports ; i++ ) {
2954 		portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
2955 		portsc &= XHCI_PORTSC_PRESERVE;
2956 		portsc |= XHCI_PORTSC_PP;
2957 		writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
2958 	}
2959 
2960 	/* xHCI spec requires us to potentially wait 20ms after
2961 	 * enabling power to a port.
2962 	 */
2963 	mdelay ( XHCI_PORT_POWER_DELAY_MS );
2964 
2965 	/* USB3 ports may power up as Disabled */
2966 	for ( i = 1 ; i <= xhci->ports ; i++ ) {
2967 		portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
2968 		port = usb_port ( hub, i );
2969 		if ( ( port->protocol >= USB_PROTO_3_0 ) &&
2970 		     ( ( portsc & XHCI_PORTSC_PLS_MASK ) ==
2971 		       XHCI_PORTSC_PLS_DISABLED ) ) {
2972 			/* Force link state to RxDetect */
2973 			portsc &= XHCI_PORTSC_PRESERVE;
2974 			portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS);
2975 			writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
2976 		}
2977 	}
2978 
2979 	/* Some xHCI cards seem to require an additional delay after
2980 	 * setting the link state to RxDetect.
2981 	 */
2982 	mdelay ( XHCI_LINK_STATE_DELAY_MS );
2983 
2984 	return 0;
2985 }
2986 
2987 /**
2988  * Close root hub
2989  *
2990  * @v hub		USB hub
2991  */
xhci_root_close(struct usb_hub * hub __unused)2992 static void xhci_root_close ( struct usb_hub *hub __unused ) {
2993 
2994 	/* Nothing to do */
2995 }
2996 
2997 /**
2998  * Enable port
2999  *
3000  * @v hub		USB hub
3001  * @v port		USB port
3002  * @ret rc		Return status code
3003  */
xhci_root_enable(struct usb_hub * hub,struct usb_port * port)3004 static int xhci_root_enable ( struct usb_hub *hub, struct usb_port *port ) {
3005 	struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3006 	uint32_t portsc;
3007 	unsigned int i;
3008 
3009 	/* Reset port */
3010 	portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3011 	portsc &= XHCI_PORTSC_PRESERVE;
3012 	portsc |= XHCI_PORTSC_PR;
3013 	writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3014 
3015 	/* Wait for port to become enabled */
3016 	for ( i = 0 ; i < XHCI_PORT_RESET_MAX_WAIT_MS ; i++ ) {
3017 
3018 		/* Check port status */
3019 		portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3020 		if ( portsc & XHCI_PORTSC_PED )
3021 			return 0;
3022 
3023 		/* Delay */
3024 		mdelay ( 1 );
3025 	}
3026 
3027 	DBGC ( xhci, "XHCI %s-%d timed out waiting for port to enable\n",
3028 	       xhci->name, port->address );
3029 	return -ETIMEDOUT;
3030 }
3031 
3032 /**
3033  * Disable port
3034  *
3035  * @v hub		USB hub
3036  * @v port		USB port
3037  * @ret rc		Return status code
3038  */
xhci_root_disable(struct usb_hub * hub,struct usb_port * port)3039 static int xhci_root_disable ( struct usb_hub *hub, struct usb_port *port ) {
3040 	struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3041 	uint32_t portsc;
3042 
3043 	/* Disable port */
3044 	portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3045 	portsc &= XHCI_PORTSC_PRESERVE;
3046 	portsc |= XHCI_PORTSC_PED;
3047 	writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3048 
3049 	/* Allow time for link state to stabilise */
3050 	mdelay ( XHCI_LINK_STATE_DELAY_MS );
3051 
3052 	/* Set link state to RxDetect for USB3 ports */
3053 	if ( port->protocol >= USB_PROTO_3_0 ) {
3054 		portsc &= XHCI_PORTSC_PRESERVE;
3055 		portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS );
3056 		writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3057 	}
3058 
3059 	/* Allow time for link state to stabilise */
3060 	mdelay ( XHCI_LINK_STATE_DELAY_MS );
3061 
3062 	return 0;
3063 }
3064 
3065 /**
3066  * Update root hub port speed
3067  *
3068  * @v hub		USB hub
3069  * @v port		USB port
3070  * @ret rc		Return status code
3071  */
xhci_root_speed(struct usb_hub * hub,struct usb_port * port)3072 static int xhci_root_speed ( struct usb_hub *hub, struct usb_port *port ) {
3073 	struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3074 	uint32_t portsc;
3075 	unsigned int psiv;
3076 	int ccs;
3077 	int ped;
3078 	int csc;
3079 	int speed;
3080 	int rc;
3081 
3082 	/* Read port status */
3083 	portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
3084 	DBGC2 ( xhci, "XHCI %s-%d status is %08x\n",
3085 		xhci->name, port->address, portsc );
3086 	ccs = ( portsc & XHCI_PORTSC_CCS );
3087 	ped = ( portsc & XHCI_PORTSC_PED );
3088 	csc = ( portsc & XHCI_PORTSC_CSC );
3089 	psiv = XHCI_PORTSC_PSIV ( portsc );
3090 
3091 	/* Record disconnections and clear changes */
3092 	port->disconnected |= csc;
3093 	portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
3094 	writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
3095 
3096 	/* Port speed is not valid unless port is connected */
3097 	if ( ! ccs ) {
3098 		port->speed = USB_SPEED_NONE;
3099 		return 0;
3100 	}
3101 
3102 	/* For USB2 ports, the PSIV field is not valid until the port
3103 	 * completes reset and becomes enabled.
3104 	 */
3105 	if ( ( port->protocol < USB_PROTO_3_0 ) && ! ped ) {
3106 		port->speed = USB_SPEED_FULL;
3107 		return 0;
3108 	}
3109 
3110 	/* Get port speed and map to generic USB speed */
3111 	speed = xhci_port_speed ( xhci, port->address, psiv );
3112 	if ( speed < 0 ) {
3113 		rc = speed;
3114 		return rc;
3115 	}
3116 
3117 	port->speed = speed;
3118 	return 0;
3119 }
3120 
3121 /**
3122  * Clear transaction translator buffer
3123  *
3124  * @v hub		USB hub
3125  * @v port		USB port
3126  * @v ep		USB endpoint
3127  * @ret rc		Return status code
3128  */
xhci_root_clear_tt(struct usb_hub * hub,struct usb_port * port,struct usb_endpoint * ep)3129 static int xhci_root_clear_tt ( struct usb_hub *hub, struct usb_port *port,
3130 				struct usb_endpoint *ep ) {
3131 	struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
3132 
3133 	/* Should never be called; this is a root hub */
3134 	DBGC ( xhci, "XHCI %s-%d nonsensical CLEAR_TT for %s %s\n", xhci->name,
3135 	       port->address, ep->usb->name, usb_endpoint_name ( ep ) );
3136 
3137 	return -ENOTSUP;
3138 }
3139 
3140 /******************************************************************************
3141  *
3142  * PCI interface
3143  *
3144  ******************************************************************************
3145  */
3146 
3147 /** USB host controller operations */
3148 static struct usb_host_operations xhci_operations = {
3149 	.endpoint = {
3150 		.open = xhci_endpoint_open,
3151 		.close = xhci_endpoint_close,
3152 		.reset = xhci_endpoint_reset,
3153 		.mtu = xhci_endpoint_mtu,
3154 		.message = xhci_endpoint_message,
3155 		.stream = xhci_endpoint_stream,
3156 	},
3157 	.device = {
3158 		.open = xhci_device_open,
3159 		.close = xhci_device_close,
3160 		.address = xhci_device_address,
3161 	},
3162 	.bus = {
3163 		.open = xhci_bus_open,
3164 		.close = xhci_bus_close,
3165 		.poll = xhci_bus_poll,
3166 	},
3167 	.hub = {
3168 		.open = xhci_hub_open,
3169 		.close = xhci_hub_close,
3170 	},
3171 	.root = {
3172 		.open = xhci_root_open,
3173 		.close = xhci_root_close,
3174 		.enable = xhci_root_enable,
3175 		.disable = xhci_root_disable,
3176 		.speed = xhci_root_speed,
3177 		.clear_tt = xhci_root_clear_tt,
3178 	},
3179 };
3180 
3181 /**
3182  * Fix Intel PCH-specific quirks
3183  *
3184  * @v xhci		xHCI device
3185  * @v pci		PCI device
3186  */
xhci_pch_fix(struct xhci_device * xhci,struct pci_device * pci)3187 static void xhci_pch_fix ( struct xhci_device *xhci, struct pci_device *pci ) {
3188 	struct xhci_pch *pch = &xhci->pch;
3189 	uint32_t xusb2pr;
3190 	uint32_t xusb2prm;
3191 	uint32_t usb3pssen;
3192 	uint32_t usb3prm;
3193 
3194 	/* Enable SuperSpeed capability.  Do this before rerouting
3195 	 * USB2 ports, so that USB3 devices connect at SuperSpeed.
3196 	 */
3197 	pci_read_config_dword ( pci, XHCI_PCH_USB3PSSEN, &usb3pssen );
3198 	pci_read_config_dword ( pci, XHCI_PCH_USB3PRM, &usb3prm );
3199 	if ( usb3prm & ~usb3pssen ) {
3200 		DBGC ( xhci, "XHCI %s enabling SuperSpeed on ports %08x\n",
3201 		       xhci->name, ( usb3prm & ~usb3pssen ) );
3202 	}
3203 	pch->usb3pssen = usb3pssen;
3204 	usb3pssen |= usb3prm;
3205 	pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, usb3pssen );
3206 
3207 	/* Route USB2 ports from EHCI to xHCI */
3208 	pci_read_config_dword ( pci, XHCI_PCH_XUSB2PR, &xusb2pr );
3209 	pci_read_config_dword ( pci, XHCI_PCH_XUSB2PRM, &xusb2prm );
3210 	if ( xusb2prm & ~xusb2pr ) {
3211 		DBGC ( xhci, "XHCI %s routing ports %08x from EHCI to xHCI\n",
3212 		       xhci->name, ( xusb2prm & ~xusb2pr ) );
3213 	}
3214 	pch->xusb2pr = xusb2pr;
3215 	xusb2pr |= xusb2prm;
3216 	pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, xusb2pr );
3217 }
3218 
3219 /**
3220  * Undo Intel PCH-specific quirk fixes
3221  *
3222  * @v xhci		xHCI device
3223  * @v pci		PCI device
3224  */
xhci_pch_undo(struct xhci_device * xhci,struct pci_device * pci)3225 static void xhci_pch_undo ( struct xhci_device *xhci, struct pci_device *pci ) {
3226 	struct xhci_pch *pch = &xhci->pch;
3227 
3228 	/* Restore USB2 port routing to original state */
3229 	pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, pch->xusb2pr );
3230 
3231 	/* Restore SuperSpeed capability to original state */
3232 	pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, pch->usb3pssen );
3233 }
3234 
3235 /**
3236  * Probe PCI device
3237  *
3238  * @v pci		PCI device
3239  * @ret rc		Return status code
3240  */
xhci_probe(struct pci_device * pci)3241 static int xhci_probe ( struct pci_device *pci ) {
3242 	struct xhci_device *xhci;
3243 	struct usb_port *port;
3244 	unsigned long bar_start;
3245 	size_t bar_size;
3246 	unsigned int i;
3247 	int rc;
3248 
3249 	/* Allocate and initialise structure */
3250 	xhci = zalloc ( sizeof ( *xhci ) );
3251 	if ( ! xhci ) {
3252 		rc = -ENOMEM;
3253 		goto err_alloc;
3254 	}
3255 	xhci->name = pci->dev.name;
3256 	xhci->quirks = pci->id->driver_data;
3257 
3258 	/* Fix up PCI device */
3259 	adjust_pci_device ( pci );
3260 
3261 	/* Map registers */
3262 	bar_start = pci_bar_start ( pci, XHCI_BAR );
3263 	bar_size = pci_bar_size ( pci, XHCI_BAR );
3264 	xhci->regs = ioremap ( bar_start, bar_size );
3265 	if ( ! xhci->regs ) {
3266 		rc = -ENODEV;
3267 		goto err_ioremap;
3268 	}
3269 
3270 	/* Initialise xHCI device */
3271 	xhci_init ( xhci, xhci->regs );
3272 
3273 	/* Initialise USB legacy support and claim ownership */
3274 	xhci_legacy_init ( xhci );
3275 	xhci_legacy_claim ( xhci );
3276 
3277 	/* Fix Intel PCH-specific quirks, if applicable */
3278 	if ( xhci->quirks & XHCI_PCH )
3279 		xhci_pch_fix ( xhci, pci );
3280 
3281 	/* Reset device */
3282 	if ( ( rc = xhci_reset ( xhci ) ) != 0 )
3283 		goto err_reset;
3284 
3285 	/* Allocate USB bus */
3286 	xhci->bus = alloc_usb_bus ( &pci->dev, xhci->ports, XHCI_MTU,
3287 				    &xhci_operations );
3288 	if ( ! xhci->bus ) {
3289 		rc = -ENOMEM;
3290 		goto err_alloc_bus;
3291 	}
3292 	usb_bus_set_hostdata ( xhci->bus, xhci );
3293 	usb_hub_set_drvdata ( xhci->bus->hub, xhci );
3294 
3295 	/* Set port protocols */
3296 	for ( i = 1 ; i <= xhci->ports ; i++ ) {
3297 		port = usb_port ( xhci->bus->hub, i );
3298 		port->protocol = xhci_port_protocol ( xhci, i );
3299 	}
3300 
3301 	/* Register USB bus */
3302 	if ( ( rc = register_usb_bus ( xhci->bus ) ) != 0 )
3303 		goto err_register;
3304 
3305 	pci_set_drvdata ( pci, xhci );
3306 	return 0;
3307 
3308 	unregister_usb_bus ( xhci->bus );
3309  err_register:
3310 	free_usb_bus ( xhci->bus );
3311  err_alloc_bus:
3312 	xhci_reset ( xhci );
3313  err_reset:
3314 	if ( xhci->quirks & XHCI_PCH )
3315 		xhci_pch_undo ( xhci, pci );
3316 	xhci_legacy_release ( xhci );
3317 	iounmap ( xhci->regs );
3318  err_ioremap:
3319 	free ( xhci );
3320  err_alloc:
3321 	return rc;
3322 }
3323 
3324 /**
3325  * Remove PCI device
3326  *
3327  * @v pci		PCI device
3328  */
xhci_remove(struct pci_device * pci)3329 static void xhci_remove ( struct pci_device *pci ) {
3330 	struct xhci_device *xhci = pci_get_drvdata ( pci );
3331 	struct usb_bus *bus = xhci->bus;
3332 
3333 	unregister_usb_bus ( bus );
3334 	free_usb_bus ( bus );
3335 	xhci_reset ( xhci );
3336 	if ( xhci->quirks & XHCI_PCH )
3337 		xhci_pch_undo ( xhci, pci );
3338 	xhci_legacy_release ( xhci );
3339 	iounmap ( xhci->regs );
3340 	free ( xhci );
3341 }
3342 
3343 /** XHCI PCI device IDs */
3344 static struct pci_device_id xhci_ids[] = {
3345 	PCI_ROM ( 0x8086, 0x9d2f, "xhci-skylake", "xHCI (Skylake)", ( XHCI_PCH | XHCI_BAD_PSIV ) ),
3346 	PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH ),
3347 	PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
3348 };
3349 
3350 /** XHCI PCI driver */
3351 struct pci_driver xhci_driver __pci_driver = {
3352 	.ids = xhci_ids,
3353 	.id_count = ( sizeof ( xhci_ids ) / sizeof ( xhci_ids[0] ) ),
3354 	.class = PCI_CLASS_ID ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB,
3355 				PCI_CLASS_SERIAL_USB_XHCI ),
3356 	.probe = xhci_probe,
3357 	.remove = xhci_remove,
3358 };
3359 
3360 /**
3361  * Prepare for exit
3362  *
3363  * @v booting		System is shutting down for OS boot
3364  */
xhci_shutdown(int booting)3365 static void xhci_shutdown ( int booting ) {
3366 	/* If we are shutting down to boot an OS, then prevent the
3367 	 * release of ownership back to BIOS.
3368 	 */
3369 	xhci_legacy_prevent_release = booting;
3370 }
3371 
3372 /** Startup/shutdown function */
3373 struct startup_fn xhci_startup __startup_fn ( STARTUP_LATE ) = {
3374 	.name = "xhci",
3375 	.shutdown = xhci_shutdown,
3376 };
3377