xref: /dragonfly/sys/bus/u4b/usb_transfer.c (revision 12bd3c8b)
1 /* $FreeBSD$ */
2 /*-
3  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/stdint.h>
28 #include <sys/stddef.h>
29 #include <sys/param.h>
30 #include <sys/queue.h>
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/module.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/sysctl.h>
40 #include <sys/sx.h>
41 #include <sys/unistd.h>
42 #include <sys/callout.h>
43 #include <sys/malloc.h>
44 #include <sys/priv.h>
45 #include <sys/proc.h>
46 
47 #include <dev/usb/usb.h>
48 #include <dev/usb/usbdi.h>
49 #include <dev/usb/usbdi_util.h>
50 
51 #define	USB_DEBUG_VAR usb_debug
52 
53 #include <dev/usb/usb_core.h>
54 #include <dev/usb/usb_busdma.h>
55 #include <dev/usb/usb_process.h>
56 #include <dev/usb/usb_transfer.h>
57 #include <dev/usb/usb_device.h>
58 #include <dev/usb/usb_debug.h>
59 #include <dev/usb/usb_util.h>
60 
61 #include <dev/usb/usb_controller.h>
62 #include <dev/usb/usb_bus.h>
63 #include <dev/usb/usb_pf.h>
64 
65 struct usb_std_packet_size {
66 	struct {
67 		uint16_t min;		/* inclusive */
68 		uint16_t max;		/* inclusive */
69 	}	range;
70 
71 	uint16_t fixed[4];
72 };
73 
74 static usb_callback_t usb_request_callback;
75 
76 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
77 
78 	/* This transfer is used for generic control endpoint transfers */
79 
80 	[0] = {
81 		.type = UE_CONTROL,
82 		.endpoint = 0x00,	/* Control endpoint */
83 		.direction = UE_DIR_ANY,
84 		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
85 		.flags = {.proxy_buffer = 1,},
86 		.callback = &usb_request_callback,
87 		.usb_mode = USB_MODE_DUAL,	/* both modes */
88 	},
89 
90 	/* This transfer is used for generic clear stall only */
91 
92 	[1] = {
93 		.type = UE_CONTROL,
94 		.endpoint = 0x00,	/* Control pipe */
95 		.direction = UE_DIR_ANY,
96 		.bufsize = sizeof(struct usb_device_request),
97 		.callback = &usb_do_clear_stall_callback,
98 		.timeout = 1000,	/* 1 second */
99 		.interval = 50,	/* 50ms */
100 		.usb_mode = USB_MODE_HOST,
101 	},
102 };
103 
104 /* function prototypes */
105 
106 static void	usbd_update_max_frame_size(struct usb_xfer *);
107 static void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
108 static void	usbd_control_transfer_init(struct usb_xfer *);
109 static int	usbd_setup_ctrl_transfer(struct usb_xfer *);
110 static void	usb_callback_proc(struct usb_proc_msg *);
111 static void	usbd_callback_ss_done_defer(struct usb_xfer *);
112 static void	usbd_callback_wrapper(struct usb_xfer_queue *);
113 static void	usbd_transfer_start_cb(void *);
114 static uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
115 static void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
116 		    uint8_t type, enum usb_dev_speed speed);
117 
118 /*------------------------------------------------------------------------*
119  *	usb_request_callback
120  *------------------------------------------------------------------------*/
121 static void
122 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
123 {
124 	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
125 		usb_handle_request_callback(xfer, error);
126 	else
127 		usbd_do_request_callback(xfer, error);
128 }
129 
130 /*------------------------------------------------------------------------*
131  *	usbd_update_max_frame_size
132  *
133  * This function updates the maximum frame size, hence high speed USB
134  * can transfer multiple consecutive packets.
135  *------------------------------------------------------------------------*/
136 static void
137 usbd_update_max_frame_size(struct usb_xfer *xfer)
138 {
139 	/* compute maximum frame size */
140 	/* this computation should not overflow 16-bit */
141 	/* max = 15 * 1024 */
142 
143 	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
144 }
145 
146 /*------------------------------------------------------------------------*
147  *	usbd_get_dma_delay
148  *
149  * The following function is called when we need to
150  * synchronize with DMA hardware.
151  *
152  * Returns:
153  *    0: no DMA delay required
154  * Else: milliseconds of DMA delay
155  *------------------------------------------------------------------------*/
156 usb_timeout_t
157 usbd_get_dma_delay(struct usb_device *udev)
158 {
159 	struct usb_bus_methods *mtod;
160 	uint32_t temp;
161 
162 	mtod = udev->bus->methods;
163 	temp = 0;
164 
165 	if (mtod->get_dma_delay) {
166 		(mtod->get_dma_delay) (udev, &temp);
167 		/*
168 		 * Round up and convert to milliseconds. Note that we use
169 		 * 1024 milliseconds per second. to save a division.
170 		 */
171 		temp += 0x3FF;
172 		temp /= 0x400;
173 	}
174 	return (temp);
175 }
176 
177 /*------------------------------------------------------------------------*
178  *	usbd_transfer_setup_sub_malloc
179  *
180  * This function will allocate one or more DMA'able memory chunks
181  * according to "size", "align" and "count" arguments. "ppc" is
182  * pointed to a linear array of USB page caches afterwards.
183  *
184  * Returns:
185  *    0: Success
186  * Else: Failure
187  *------------------------------------------------------------------------*/
188 #if USB_HAVE_BUSDMA
189 uint8_t
190 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
191     struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
192     usb_size_t count)
193 {
194 	struct usb_page_cache *pc;
195 	struct usb_page *pg;
196 	void *buf;
197 	usb_size_t n_dma_pc;
198 	usb_size_t n_obj;
199 	usb_size_t x;
200 	usb_size_t y;
201 	usb_size_t r;
202 	usb_size_t z;
203 
204 	USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n",
205 	    align));
206 	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
207 
208 	if (count == 0) {
209 		return (0);		/* nothing to allocate */
210 	}
211 	/*
212 	 * Make sure that the size is aligned properly.
213 	 */
214 	size = -((-size) & (-align));
215 
216 	/*
217 	 * Try multi-allocation chunks to reduce the number of DMA
218 	 * allocations, hence DMA allocations are slow.
219 	 */
220 	if (size >= PAGE_SIZE) {
221 		n_dma_pc = count;
222 		n_obj = 1;
223 	} else {
224 		/* compute number of objects per page */
225 		n_obj = (PAGE_SIZE / size);
226 		/*
227 		 * Compute number of DMA chunks, rounded up
228 		 * to nearest one:
229 		 */
230 		n_dma_pc = ((count + n_obj - 1) / n_obj);
231 	}
232 
233 	if (parm->buf == NULL) {
234 		/* for the future */
235 		parm->dma_page_ptr += n_dma_pc;
236 		parm->dma_page_cache_ptr += n_dma_pc;
237 		parm->dma_page_ptr += count;
238 		parm->xfer_page_cache_ptr += count;
239 		return (0);
240 	}
241 	for (x = 0; x != n_dma_pc; x++) {
242 		/* need to initialize the page cache */
243 		parm->dma_page_cache_ptr[x].tag_parent =
244 		    &parm->curr_xfer->xroot->dma_parent_tag;
245 	}
246 	for (x = 0; x != count; x++) {
247 		/* need to initialize the page cache */
248 		parm->xfer_page_cache_ptr[x].tag_parent =
249 		    &parm->curr_xfer->xroot->dma_parent_tag;
250 	}
251 
252 	if (ppc) {
253 		*ppc = parm->xfer_page_cache_ptr;
254 	}
255 	r = count;			/* set remainder count */
256 	z = n_obj * size;		/* set allocation size */
257 	pc = parm->xfer_page_cache_ptr;
258 	pg = parm->dma_page_ptr;
259 
260 	for (x = 0; x != n_dma_pc; x++) {
261 
262 		if (r < n_obj) {
263 			/* compute last remainder */
264 			z = r * size;
265 			n_obj = r;
266 		}
267 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
268 		    pg, z, align)) {
269 			return (1);	/* failure */
270 		}
271 		/* Set beginning of current buffer */
272 		buf = parm->dma_page_cache_ptr->buffer;
273 		/* Make room for one DMA page cache and one page */
274 		parm->dma_page_cache_ptr++;
275 		pg++;
276 
277 		for (y = 0; (y != n_obj); y++, r--, pc++, pg++) {
278 
279 			/* Load sub-chunk into DMA */
280 			if (usb_pc_dmamap_create(pc, size)) {
281 				return (1);	/* failure */
282 			}
283 			pc->buffer = USB_ADD_BYTES(buf, y * size);
284 			pc->page_start = pg;
285 
286 			mtx_lock(pc->tag_parent->mtx);
287 			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
288 				mtx_unlock(pc->tag_parent->mtx);
289 				return (1);	/* failure */
290 			}
291 			mtx_unlock(pc->tag_parent->mtx);
292 		}
293 	}
294 
295 	parm->xfer_page_cache_ptr = pc;
296 	parm->dma_page_ptr = pg;
297 	return (0);
298 }
299 #endif
300 
301 /*------------------------------------------------------------------------*
302  *	usbd_transfer_setup_sub - transfer setup subroutine
303  *
304  * This function must be called from the "xfer_setup" callback of the
305  * USB Host or Device controller driver when setting up an USB
306  * transfer. This function will setup correct packet sizes, buffer
307  * sizes, flags and more, that are stored in the "usb_xfer"
308  * structure.
309  *------------------------------------------------------------------------*/
310 void
311 usbd_transfer_setup_sub(struct usb_setup_params *parm)
312 {
313 	enum {
314 		REQ_SIZE = 8,
315 		MIN_PKT = 8,
316 	};
317 	struct usb_xfer *xfer = parm->curr_xfer;
318 	const struct usb_config *setup = parm->curr_setup;
319 	struct usb_endpoint_ss_comp_descriptor *ecomp;
320 	struct usb_endpoint_descriptor *edesc;
321 	struct usb_std_packet_size std_size;
322 	usb_frcount_t n_frlengths;
323 	usb_frcount_t n_frbuffers;
324 	usb_frcount_t x;
325 	uint8_t type;
326 	uint8_t zmps;
327 
328 	/*
329 	 * Sanity check. The following parameters must be initialized before
330 	 * calling this function.
331 	 */
332 	if ((parm->hc_max_packet_size == 0) ||
333 	    (parm->hc_max_packet_count == 0) ||
334 	    (parm->hc_max_frame_size == 0)) {
335 		parm->err = USB_ERR_INVAL;
336 		goto done;
337 	}
338 	edesc = xfer->endpoint->edesc;
339 	ecomp = xfer->endpoint->ecomp;
340 
341 	type = (edesc->bmAttributes & UE_XFERTYPE);
342 
343 	xfer->flags = setup->flags;
344 	xfer->nframes = setup->frames;
345 	xfer->timeout = setup->timeout;
346 	xfer->callback = setup->callback;
347 	xfer->interval = setup->interval;
348 	xfer->endpointno = edesc->bEndpointAddress;
349 	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
350 	xfer->max_packet_count = 1;
351 	/* make a shadow copy: */
352 	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
353 
354 	parm->bufsize = setup->bufsize;
355 
356 	switch (parm->speed) {
357 	case USB_SPEED_HIGH:
358 		switch (type) {
359 		case UE_ISOCHRONOUS:
360 		case UE_INTERRUPT:
361 			xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
362 
363 			/* check for invalid max packet count */
364 			if (xfer->max_packet_count > 3)
365 				xfer->max_packet_count = 3;
366 			break;
367 		default:
368 			break;
369 		}
370 		xfer->max_packet_size &= 0x7FF;
371 		break;
372 	case USB_SPEED_SUPER:
373 		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
374 
375 		if (ecomp != NULL)
376 			xfer->max_packet_count += ecomp->bMaxBurst;
377 
378 		if ((xfer->max_packet_count == 0) ||
379 		    (xfer->max_packet_count > 16))
380 			xfer->max_packet_count = 16;
381 
382 		switch (type) {
383 		case UE_CONTROL:
384 			xfer->max_packet_count = 1;
385 			break;
386 		case UE_ISOCHRONOUS:
387 			if (ecomp != NULL) {
388 				uint8_t mult;
389 
390 				mult = (ecomp->bmAttributes & 3) + 1;
391 				if (mult > 3)
392 					mult = 3;
393 
394 				xfer->max_packet_count *= mult;
395 			}
396 			break;
397 		default:
398 			break;
399 		}
400 		xfer->max_packet_size &= 0x7FF;
401 		break;
402 	default:
403 		break;
404 	}
405 	/* range check "max_packet_count" */
406 
407 	if (xfer->max_packet_count > parm->hc_max_packet_count) {
408 		xfer->max_packet_count = parm->hc_max_packet_count;
409 	}
410 	/* filter "wMaxPacketSize" according to HC capabilities */
411 
412 	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
413 	    (xfer->max_packet_size == 0)) {
414 		xfer->max_packet_size = parm->hc_max_packet_size;
415 	}
416 	/* filter "wMaxPacketSize" according to standard sizes */
417 
418 	usbd_get_std_packet_size(&std_size, type, parm->speed);
419 
420 	if (std_size.range.min || std_size.range.max) {
421 
422 		if (xfer->max_packet_size < std_size.range.min) {
423 			xfer->max_packet_size = std_size.range.min;
424 		}
425 		if (xfer->max_packet_size > std_size.range.max) {
426 			xfer->max_packet_size = std_size.range.max;
427 		}
428 	} else {
429 
430 		if (xfer->max_packet_size >= std_size.fixed[3]) {
431 			xfer->max_packet_size = std_size.fixed[3];
432 		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
433 			xfer->max_packet_size = std_size.fixed[2];
434 		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
435 			xfer->max_packet_size = std_size.fixed[1];
436 		} else {
437 			/* only one possibility left */
438 			xfer->max_packet_size = std_size.fixed[0];
439 		}
440 	}
441 
442 	/* compute "max_frame_size" */
443 
444 	usbd_update_max_frame_size(xfer);
445 
446 	/* check interrupt interval and transfer pre-delay */
447 
448 	if (type == UE_ISOCHRONOUS) {
449 
450 		uint16_t frame_limit;
451 
452 		xfer->interval = 0;	/* not used, must be zero */
453 		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
454 
455 		if (xfer->timeout == 0) {
456 			/*
457 			 * set a default timeout in
458 			 * case something goes wrong!
459 			 */
460 			xfer->timeout = 1000 / 4;
461 		}
462 		switch (parm->speed) {
463 		case USB_SPEED_LOW:
464 		case USB_SPEED_FULL:
465 			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
466 			xfer->fps_shift = 0;
467 			break;
468 		default:
469 			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
470 			xfer->fps_shift = edesc->bInterval;
471 			if (xfer->fps_shift > 0)
472 				xfer->fps_shift--;
473 			if (xfer->fps_shift > 3)
474 				xfer->fps_shift = 3;
475 			if (xfer->flags.pre_scale_frames != 0)
476 				xfer->nframes <<= (3 - xfer->fps_shift);
477 			break;
478 		}
479 
480 		if (xfer->nframes > frame_limit) {
481 			/*
482 			 * this is not going to work
483 			 * cross hardware
484 			 */
485 			parm->err = USB_ERR_INVAL;
486 			goto done;
487 		}
488 		if (xfer->nframes == 0) {
489 			/*
490 			 * this is not a valid value
491 			 */
492 			parm->err = USB_ERR_ZERO_NFRAMES;
493 			goto done;
494 		}
495 	} else {
496 
497 		/*
498 		 * If a value is specified use that else check the
499 		 * endpoint descriptor!
500 		 */
501 		if (type == UE_INTERRUPT) {
502 
503 			uint32_t temp;
504 
505 			if (xfer->interval == 0) {
506 
507 				xfer->interval = edesc->bInterval;
508 
509 				switch (parm->speed) {
510 				case USB_SPEED_LOW:
511 				case USB_SPEED_FULL:
512 					break;
513 				default:
514 					/* 125us -> 1ms */
515 					if (xfer->interval < 4)
516 						xfer->interval = 1;
517 					else if (xfer->interval > 16)
518 						xfer->interval = (1 << (16 - 4));
519 					else
520 						xfer->interval =
521 						    (1 << (xfer->interval - 4));
522 					break;
523 				}
524 			}
525 
526 			if (xfer->interval == 0) {
527 				/*
528 				 * One millisecond is the smallest
529 				 * interval we support:
530 				 */
531 				xfer->interval = 1;
532 			}
533 
534 			xfer->fps_shift = 0;
535 			temp = 1;
536 
537 			while ((temp != 0) && (temp < xfer->interval)) {
538 				xfer->fps_shift++;
539 				temp *= 2;
540 			}
541 
542 			switch (parm->speed) {
543 			case USB_SPEED_LOW:
544 			case USB_SPEED_FULL:
545 				break;
546 			default:
547 				xfer->fps_shift += 3;
548 				break;
549 			}
550 		}
551 	}
552 
553 	/*
554 	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
555 	 * to be equal to zero when setting up USB transfers, hence
556 	 * this leads to alot of extra code in the USB kernel.
557 	 */
558 
559 	if ((xfer->max_frame_size == 0) ||
560 	    (xfer->max_packet_size == 0)) {
561 
562 		zmps = 1;
563 
564 		if ((parm->bufsize <= MIN_PKT) &&
565 		    (type != UE_CONTROL) &&
566 		    (type != UE_BULK)) {
567 
568 			/* workaround */
569 			xfer->max_packet_size = MIN_PKT;
570 			xfer->max_packet_count = 1;
571 			parm->bufsize = 0;	/* automatic setup length */
572 			usbd_update_max_frame_size(xfer);
573 
574 		} else {
575 			parm->err = USB_ERR_ZERO_MAXP;
576 			goto done;
577 		}
578 
579 	} else {
580 		zmps = 0;
581 	}
582 
583 	/*
584 	 * check if we should setup a default
585 	 * length:
586 	 */
587 
588 	if (parm->bufsize == 0) {
589 
590 		parm->bufsize = xfer->max_frame_size;
591 
592 		if (type == UE_ISOCHRONOUS) {
593 			parm->bufsize *= xfer->nframes;
594 		}
595 	}
596 	/*
597 	 * check if we are about to setup a proxy
598 	 * type of buffer:
599 	 */
600 
601 	if (xfer->flags.proxy_buffer) {
602 
603 		/* round bufsize up */
604 
605 		parm->bufsize += (xfer->max_frame_size - 1);
606 
607 		if (parm->bufsize < xfer->max_frame_size) {
608 			/* length wrapped around */
609 			parm->err = USB_ERR_INVAL;
610 			goto done;
611 		}
612 		/* subtract remainder */
613 
614 		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
615 
616 		/* add length of USB device request structure, if any */
617 
618 		if (type == UE_CONTROL) {
619 			parm->bufsize += REQ_SIZE;	/* SETUP message */
620 		}
621 	}
622 	xfer->max_data_length = parm->bufsize;
623 
624 	/* Setup "n_frlengths" and "n_frbuffers" */
625 
626 	if (type == UE_ISOCHRONOUS) {
627 		n_frlengths = xfer->nframes;
628 		n_frbuffers = 1;
629 	} else {
630 
631 		if (type == UE_CONTROL) {
632 			xfer->flags_int.control_xfr = 1;
633 			if (xfer->nframes == 0) {
634 				if (parm->bufsize <= REQ_SIZE) {
635 					/*
636 					 * there will never be any data
637 					 * stage
638 					 */
639 					xfer->nframes = 1;
640 				} else {
641 					xfer->nframes = 2;
642 				}
643 			}
644 		} else {
645 			if (xfer->nframes == 0) {
646 				xfer->nframes = 1;
647 			}
648 		}
649 
650 		n_frlengths = xfer->nframes;
651 		n_frbuffers = xfer->nframes;
652 	}
653 
654 	/*
655 	 * check if we have room for the
656 	 * USB device request structure:
657 	 */
658 
659 	if (type == UE_CONTROL) {
660 
661 		if (xfer->max_data_length < REQ_SIZE) {
662 			/* length wrapped around or too small bufsize */
663 			parm->err = USB_ERR_INVAL;
664 			goto done;
665 		}
666 		xfer->max_data_length -= REQ_SIZE;
667 	}
668 	/*
669 	 * Setup "frlengths" and shadow "frlengths" for keeping the
670 	 * initial frame lengths when a USB transfer is complete. This
671 	 * information is useful when computing isochronous offsets.
672 	 */
673 	xfer->frlengths = parm->xfer_length_ptr;
674 	parm->xfer_length_ptr += 2 * n_frlengths;
675 
676 	/* setup "frbuffers" */
677 	xfer->frbuffers = parm->xfer_page_cache_ptr;
678 	parm->xfer_page_cache_ptr += n_frbuffers;
679 
680 	/* initialize max frame count */
681 	xfer->max_frame_count = xfer->nframes;
682 
683 	/*
684 	 * check if we need to setup
685 	 * a local buffer:
686 	 */
687 
688 	if (!xfer->flags.ext_buffer) {
689 
690 		/* align data */
691 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
692 
693 		if (parm->buf) {
694 
695 			xfer->local_buffer =
696 			    USB_ADD_BYTES(parm->buf, parm->size[0]);
697 
698 			usbd_xfer_set_frame_offset(xfer, 0, 0);
699 
700 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
701 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
702 			}
703 		}
704 		parm->size[0] += parm->bufsize;
705 
706 		/* align data again */
707 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
708 	}
709 	/*
710 	 * Compute maximum buffer size
711 	 */
712 
713 	if (parm->bufsize_max < parm->bufsize) {
714 		parm->bufsize_max = parm->bufsize;
715 	}
716 #if USB_HAVE_BUSDMA
717 	if (xfer->flags_int.bdma_enable) {
718 		/*
719 		 * Setup "dma_page_ptr".
720 		 *
721 		 * Proof for formula below:
722 		 *
723 		 * Assume there are three USB frames having length "a", "b" and
724 		 * "c". These USB frames will at maximum need "z"
725 		 * "usb_page" structures. "z" is given by:
726 		 *
727 		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
728 		 * ((c / USB_PAGE_SIZE) + 2);
729 		 *
730 		 * Constraining "a", "b" and "c" like this:
731 		 *
732 		 * (a + b + c) <= parm->bufsize
733 		 *
734 		 * We know that:
735 		 *
736 		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
737 		 *
738 		 * Here is the general formula:
739 		 */
740 		xfer->dma_page_ptr = parm->dma_page_ptr;
741 		parm->dma_page_ptr += (2 * n_frbuffers);
742 		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
743 	}
744 #endif
745 	if (zmps) {
746 		/* correct maximum data length */
747 		xfer->max_data_length = 0;
748 	}
749 	/* subtract USB frame remainder from "hc_max_frame_size" */
750 
751 	xfer->max_hc_frame_size =
752 	    (parm->hc_max_frame_size -
753 	    (parm->hc_max_frame_size % xfer->max_frame_size));
754 
755 	if (xfer->max_hc_frame_size == 0) {
756 		parm->err = USB_ERR_INVAL;
757 		goto done;
758 	}
759 
760 	/* initialize frame buffers */
761 
762 	if (parm->buf) {
763 		for (x = 0; x != n_frbuffers; x++) {
764 			xfer->frbuffers[x].tag_parent =
765 			    &xfer->xroot->dma_parent_tag;
766 #if USB_HAVE_BUSDMA
767 			if (xfer->flags_int.bdma_enable &&
768 			    (parm->bufsize_max > 0)) {
769 
770 				if (usb_pc_dmamap_create(
771 				    xfer->frbuffers + x,
772 				    parm->bufsize_max)) {
773 					parm->err = USB_ERR_NOMEM;
774 					goto done;
775 				}
776 			}
777 #endif
778 		}
779 	}
780 done:
781 	if (parm->err) {
782 		/*
783 		 * Set some dummy values so that we avoid division by zero:
784 		 */
785 		xfer->max_hc_frame_size = 1;
786 		xfer->max_frame_size = 1;
787 		xfer->max_packet_size = 1;
788 		xfer->max_data_length = 0;
789 		xfer->nframes = 0;
790 		xfer->max_frame_count = 0;
791 	}
792 }
793 
794 /*------------------------------------------------------------------------*
795  *	usbd_transfer_setup - setup an array of USB transfers
796  *
797  * NOTE: You must always call "usbd_transfer_unsetup" after calling
798  * "usbd_transfer_setup" if success was returned.
799  *
800  * The idea is that the USB device driver should pre-allocate all its
801  * transfers by one call to this function.
802  *
803  * Return values:
804  *    0: Success
805  * Else: Failure
806  *------------------------------------------------------------------------*/
807 usb_error_t
808 usbd_transfer_setup(struct usb_device *udev,
809     const uint8_t *ifaces, struct usb_xfer **ppxfer,
810     const struct usb_config *setup_start, uint16_t n_setup,
811     void *priv_sc, struct mtx *xfer_mtx)
812 {
813 	struct usb_xfer dummy;
814 	struct usb_setup_params parm;
815 	const struct usb_config *setup_end = setup_start + n_setup;
816 	const struct usb_config *setup;
817 	struct usb_endpoint *ep;
818 	struct usb_xfer_root *info;
819 	struct usb_xfer *xfer;
820 	void *buf = NULL;
821 	uint16_t n;
822 	uint16_t refcount;
823 
824 	parm.err = 0;
825 	refcount = 0;
826 	info = NULL;
827 
828 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
829 	    "usbd_transfer_setup can sleep!");
830 
831 	/* do some checking first */
832 
833 	if (n_setup == 0) {
834 		DPRINTFN(6, "setup array has zero length!\n");
835 		return (USB_ERR_INVAL);
836 	}
837 	if (ifaces == 0) {
838 		DPRINTFN(6, "ifaces array is NULL!\n");
839 		return (USB_ERR_INVAL);
840 	}
841 	if (xfer_mtx == NULL) {
842 		DPRINTFN(6, "using global lock\n");
843 		xfer_mtx = &Giant;
844 	}
845 	/* sanity checks */
846 	for (setup = setup_start, n = 0;
847 	    setup != setup_end; setup++, n++) {
848 		if (setup->bufsize == (usb_frlength_t)-1) {
849 			parm.err = USB_ERR_BAD_BUFSIZE;
850 			DPRINTF("invalid bufsize\n");
851 		}
852 		if (setup->callback == NULL) {
853 			parm.err = USB_ERR_NO_CALLBACK;
854 			DPRINTF("no callback\n");
855 		}
856 		ppxfer[n] = NULL;
857 	}
858 
859 	if (parm.err) {
860 		goto done;
861 	}
862 	memset(&parm, 0, sizeof(parm));
863 
864 	parm.udev = udev;
865 	parm.speed = usbd_get_speed(udev);
866 	parm.hc_max_packet_count = 1;
867 
868 	if (parm.speed >= USB_SPEED_MAX) {
869 		parm.err = USB_ERR_INVAL;
870 		goto done;
871 	}
872 	/* setup all transfers */
873 
874 	while (1) {
875 
876 		if (buf) {
877 			/*
878 			 * Initialize the "usb_xfer_root" structure,
879 			 * which is common for all our USB transfers.
880 			 */
881 			info = USB_ADD_BYTES(buf, 0);
882 
883 			info->memory_base = buf;
884 			info->memory_size = parm.size[0];
885 
886 #if USB_HAVE_BUSDMA
887 			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]);
888 			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]);
889 #endif
890 			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]);
891 			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]);
892 
893 			cv_init(&info->cv_drain, "WDRAIN");
894 
895 			info->xfer_mtx = xfer_mtx;
896 #if USB_HAVE_BUSDMA
897 			usb_dma_tag_setup(&info->dma_parent_tag,
898 			    parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag,
899 			    xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max);
900 #endif
901 
902 			info->bus = udev->bus;
903 			info->udev = udev;
904 
905 			TAILQ_INIT(&info->done_q.head);
906 			info->done_q.command = &usbd_callback_wrapper;
907 #if USB_HAVE_BUSDMA
908 			TAILQ_INIT(&info->dma_q.head);
909 			info->dma_q.command = &usb_bdma_work_loop;
910 #endif
911 			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
912 			info->done_m[0].xroot = info;
913 			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
914 			info->done_m[1].xroot = info;
915 
916 			/*
917 			 * In device side mode control endpoint
918 			 * requests need to run from a separate
919 			 * context, else there is a chance of
920 			 * deadlock!
921 			 */
922 			if (setup_start == usb_control_ep_cfg)
923 				info->done_p =
924 				    &udev->bus->control_xfer_proc;
925 			else if (xfer_mtx == &Giant)
926 				info->done_p =
927 				    &udev->bus->giant_callback_proc;
928 			else
929 				info->done_p =
930 				    &udev->bus->non_giant_callback_proc;
931 		}
932 		/* reset sizes */
933 
934 		parm.size[0] = 0;
935 		parm.buf = buf;
936 		parm.size[0] += sizeof(info[0]);
937 
938 		for (setup = setup_start, n = 0;
939 		    setup != setup_end; setup++, n++) {
940 
941 			/* skip USB transfers without callbacks: */
942 			if (setup->callback == NULL) {
943 				continue;
944 			}
945 			/* see if there is a matching endpoint */
946 			ep = usbd_get_endpoint(udev,
947 			    ifaces[setup->if_index], setup);
948 
949 			if ((ep == NULL) || (ep->methods == NULL)) {
950 				if (setup->flags.no_pipe_ok)
951 					continue;
952 				if ((setup->usb_mode != USB_MODE_DUAL) &&
953 				    (setup->usb_mode != udev->flags.usb_mode))
954 					continue;
955 				parm.err = USB_ERR_NO_PIPE;
956 				goto done;
957 			}
958 
959 			/* align data properly */
960 			parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
961 
962 			/* store current setup pointer */
963 			parm.curr_setup = setup;
964 
965 			if (buf) {
966 				/*
967 				 * Common initialization of the
968 				 * "usb_xfer" structure.
969 				 */
970 				xfer = USB_ADD_BYTES(buf, parm.size[0]);
971 				xfer->address = udev->address;
972 				xfer->priv_sc = priv_sc;
973 				xfer->xroot = info;
974 
975 				usb_callout_init_mtx(&xfer->timeout_handle,
976 				    &udev->bus->bus_mtx, 0);
977 			} else {
978 				/*
979 				 * Setup a dummy xfer, hence we are
980 				 * writing to the "usb_xfer"
981 				 * structure pointed to by "xfer"
982 				 * before we have allocated any
983 				 * memory:
984 				 */
985 				xfer = &dummy;
986 				memset(&dummy, 0, sizeof(dummy));
987 				refcount++;
988 			}
989 
990 			/* set transfer endpoint pointer */
991 			xfer->endpoint = ep;
992 
993 			parm.size[0] += sizeof(xfer[0]);
994 			parm.methods = xfer->endpoint->methods;
995 			parm.curr_xfer = xfer;
996 
997 			/*
998 			 * Call the Host or Device controller transfer
999 			 * setup routine:
1000 			 */
1001 			(udev->bus->methods->xfer_setup) (&parm);
1002 
1003 			/* check for error */
1004 			if (parm.err)
1005 				goto done;
1006 
1007 			if (buf) {
1008 				/*
1009 				 * Increment the endpoint refcount. This
1010 				 * basically prevents setting a new
1011 				 * configuration and alternate setting
1012 				 * when USB transfers are in use on
1013 				 * the given interface. Search the USB
1014 				 * code for "endpoint->refcount_alloc" if you
1015 				 * want more information.
1016 				 */
1017 				USB_BUS_LOCK(info->bus);
1018 				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1019 					parm.err = USB_ERR_INVAL;
1020 
1021 				xfer->endpoint->refcount_alloc++;
1022 
1023 				if (xfer->endpoint->refcount_alloc == 0)
1024 					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1025 				USB_BUS_UNLOCK(info->bus);
1026 
1027 				/*
1028 				 * Whenever we set ppxfer[] then we
1029 				 * also need to increment the
1030 				 * "setup_refcount":
1031 				 */
1032 				info->setup_refcount++;
1033 
1034 				/*
1035 				 * Transfer is successfully setup and
1036 				 * can be used:
1037 				 */
1038 				ppxfer[n] = xfer;
1039 			}
1040 
1041 			/* check for error */
1042 			if (parm.err)
1043 				goto done;
1044 		}
1045 
1046 		if (buf || parm.err) {
1047 			goto done;
1048 		}
1049 		if (refcount == 0) {
1050 			/* no transfers - nothing to do ! */
1051 			goto done;
1052 		}
1053 		/* align data properly */
1054 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1055 
1056 		/* store offset temporarily */
1057 		parm.size[1] = parm.size[0];
1058 
1059 		/*
1060 		 * The number of DMA tags required depends on
1061 		 * the number of endpoints. The current estimate
1062 		 * for maximum number of DMA tags per endpoint
1063 		 * is two.
1064 		 */
1065 		parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX);
1066 
1067 		/*
1068 		 * DMA tags for QH, TD, Data and more.
1069 		 */
1070 		parm.dma_tag_max += 8;
1071 
1072 		parm.dma_tag_p += parm.dma_tag_max;
1073 
1074 		parm.size[0] += ((uint8_t *)parm.dma_tag_p) -
1075 		    ((uint8_t *)0);
1076 
1077 		/* align data properly */
1078 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1079 
1080 		/* store offset temporarily */
1081 		parm.size[3] = parm.size[0];
1082 
1083 		parm.size[0] += ((uint8_t *)parm.dma_page_ptr) -
1084 		    ((uint8_t *)0);
1085 
1086 		/* align data properly */
1087 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1088 
1089 		/* store offset temporarily */
1090 		parm.size[4] = parm.size[0];
1091 
1092 		parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) -
1093 		    ((uint8_t *)0);
1094 
1095 		/* store end offset temporarily */
1096 		parm.size[5] = parm.size[0];
1097 
1098 		parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) -
1099 		    ((uint8_t *)0);
1100 
1101 		/* store end offset temporarily */
1102 
1103 		parm.size[2] = parm.size[0];
1104 
1105 		/* align data properly */
1106 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1107 
1108 		parm.size[6] = parm.size[0];
1109 
1110 		parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) -
1111 		    ((uint8_t *)0);
1112 
1113 		/* align data properly */
1114 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1115 
1116 		/* allocate zeroed memory */
1117 		buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO);
1118 
1119 		if (buf == NULL) {
1120 			parm.err = USB_ERR_NOMEM;
1121 			DPRINTFN(0, "cannot allocate memory block for "
1122 			    "configuration (%d bytes)\n",
1123 			    parm.size[0]);
1124 			goto done;
1125 		}
1126 		parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]);
1127 		parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]);
1128 		parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]);
1129 		parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]);
1130 		parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]);
1131 	}
1132 
1133 done:
1134 	if (buf) {
1135 		if (info->setup_refcount == 0) {
1136 			/*
1137 			 * "usbd_transfer_unsetup_sub" will unlock
1138 			 * the bus mutex before returning !
1139 			 */
1140 			USB_BUS_LOCK(info->bus);
1141 
1142 			/* something went wrong */
1143 			usbd_transfer_unsetup_sub(info, 0);
1144 		}
1145 	}
1146 	if (parm.err) {
1147 		usbd_transfer_unsetup(ppxfer, n_setup);
1148 	}
1149 	return (parm.err);
1150 }
1151 
1152 /*------------------------------------------------------------------------*
1153  *	usbd_transfer_unsetup_sub - factored out code
1154  *------------------------------------------------------------------------*/
1155 static void
1156 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1157 {
1158 #if USB_HAVE_BUSDMA
1159 	struct usb_page_cache *pc;
1160 #endif
1161 
1162 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1163 
1164 	/* wait for any outstanding DMA operations */
1165 
1166 	if (needs_delay) {
1167 		usb_timeout_t temp;
1168 		temp = usbd_get_dma_delay(info->udev);
1169 		if (temp != 0) {
1170 			usb_pause_mtx(&info->bus->bus_mtx,
1171 			    USB_MS_TO_TICKS(temp));
1172 		}
1173 	}
1174 
1175 	/* make sure that our done messages are not queued anywhere */
1176 	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1177 
1178 	USB_BUS_UNLOCK(info->bus);
1179 
1180 #if USB_HAVE_BUSDMA
1181 	/* free DMA'able memory, if any */
1182 	pc = info->dma_page_cache_start;
1183 	while (pc != info->dma_page_cache_end) {
1184 		usb_pc_free_mem(pc);
1185 		pc++;
1186 	}
1187 
1188 	/* free DMA maps in all "xfer->frbuffers" */
1189 	pc = info->xfer_page_cache_start;
1190 	while (pc != info->xfer_page_cache_end) {
1191 		usb_pc_dmamap_destroy(pc);
1192 		pc++;
1193 	}
1194 
1195 	/* free all DMA tags */
1196 	usb_dma_tag_unsetup(&info->dma_parent_tag);
1197 #endif
1198 
1199 	cv_destroy(&info->cv_drain);
1200 
1201 	/*
1202 	 * free the "memory_base" last, hence the "info" structure is
1203 	 * contained within the "memory_base"!
1204 	 */
1205 	free(info->memory_base, M_USB);
1206 }
1207 
1208 /*------------------------------------------------------------------------*
1209  *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1210  *
1211  * NOTE: All USB transfers in progress will get called back passing
1212  * the error code "USB_ERR_CANCELLED" before this function
1213  * returns.
1214  *------------------------------------------------------------------------*/
1215 void
1216 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1217 {
1218 	struct usb_xfer *xfer;
1219 	struct usb_xfer_root *info;
1220 	uint8_t needs_delay = 0;
1221 
1222 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1223 	    "usbd_transfer_unsetup can sleep!");
1224 
1225 	while (n_setup--) {
1226 		xfer = pxfer[n_setup];
1227 
1228 		if (xfer == NULL)
1229 			continue;
1230 
1231 		info = xfer->xroot;
1232 
1233 		USB_XFER_LOCK(xfer);
1234 		USB_BUS_LOCK(info->bus);
1235 
1236 		/*
1237 		 * HINT: when you start/stop a transfer, it might be a
1238 		 * good idea to directly use the "pxfer[]" structure:
1239 		 *
1240 		 * usbd_transfer_start(sc->pxfer[0]);
1241 		 * usbd_transfer_stop(sc->pxfer[0]);
1242 		 *
1243 		 * That way, if your code has many parts that will not
1244 		 * stop running under the same lock, in other words
1245 		 * "xfer_mtx", the usbd_transfer_start and
1246 		 * usbd_transfer_stop functions will simply return
1247 		 * when they detect a NULL pointer argument.
1248 		 *
1249 		 * To avoid any races we clear the "pxfer[]" pointer
1250 		 * while holding the private mutex of the driver:
1251 		 */
1252 		pxfer[n_setup] = NULL;
1253 
1254 		USB_BUS_UNLOCK(info->bus);
1255 		USB_XFER_UNLOCK(xfer);
1256 
1257 		usbd_transfer_drain(xfer);
1258 
1259 #if USB_HAVE_BUSDMA
1260 		if (xfer->flags_int.bdma_enable)
1261 			needs_delay = 1;
1262 #endif
1263 		/*
1264 		 * NOTE: default endpoint does not have an
1265 		 * interface, even if endpoint->iface_index == 0
1266 		 */
1267 		USB_BUS_LOCK(info->bus);
1268 		xfer->endpoint->refcount_alloc--;
1269 		USB_BUS_UNLOCK(info->bus);
1270 
1271 		usb_callout_drain(&xfer->timeout_handle);
1272 
1273 		USB_BUS_LOCK(info->bus);
1274 
1275 		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1276 		    "reference count\n"));
1277 
1278 		info->setup_refcount--;
1279 
1280 		if (info->setup_refcount == 0) {
1281 			usbd_transfer_unsetup_sub(info,
1282 			    needs_delay);
1283 		} else {
1284 			USB_BUS_UNLOCK(info->bus);
1285 		}
1286 	}
1287 }
1288 
1289 /*------------------------------------------------------------------------*
1290  *	usbd_control_transfer_init - factored out code
1291  *
1292  * In USB Device Mode we have to wait for the SETUP packet which
1293  * containst the "struct usb_device_request" structure, before we can
1294  * transfer any data. In USB Host Mode we already have the SETUP
1295  * packet at the moment the USB transfer is started. This leads us to
1296  * having to setup the USB transfer at two different places in
1297  * time. This function just contains factored out control transfer
1298  * initialisation code, so that we don't duplicate the code.
1299  *------------------------------------------------------------------------*/
1300 static void
1301 usbd_control_transfer_init(struct usb_xfer *xfer)
1302 {
1303 	struct usb_device_request req;
1304 
1305 	/* copy out the USB request header */
1306 
1307 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1308 
1309 	/* setup remainder */
1310 
1311 	xfer->flags_int.control_rem = UGETW(req.wLength);
1312 
1313 	/* copy direction to endpoint variable */
1314 
1315 	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1316 	xfer->endpointno |=
1317 	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1318 }
1319 
1320 /*------------------------------------------------------------------------*
1321  *	usbd_setup_ctrl_transfer
1322  *
1323  * This function handles initialisation of control transfers. Control
1324  * transfers are special in that regard that they can both transmit
1325  * and receive data.
1326  *
1327  * Return values:
1328  *    0: Success
1329  * Else: Failure
1330  *------------------------------------------------------------------------*/
1331 static int
1332 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1333 {
1334 	usb_frlength_t len;
1335 
1336 	/* Check for control endpoint stall */
1337 	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1338 		/* the control transfer is no longer active */
1339 		xfer->flags_int.control_stall = 1;
1340 		xfer->flags_int.control_act = 0;
1341 	} else {
1342 		/* don't stall control transfer by default */
1343 		xfer->flags_int.control_stall = 0;
1344 	}
1345 
1346 	/* Check for invalid number of frames */
1347 	if (xfer->nframes > 2) {
1348 		/*
1349 		 * If you need to split a control transfer, you
1350 		 * have to do one part at a time. Only with
1351 		 * non-control transfers you can do multiple
1352 		 * parts a time.
1353 		 */
1354 		DPRINTFN(0, "Too many frames: %u\n",
1355 		    (unsigned int)xfer->nframes);
1356 		goto error;
1357 	}
1358 
1359 	/*
1360          * Check if there is a control
1361          * transfer in progress:
1362          */
1363 	if (xfer->flags_int.control_act) {
1364 
1365 		if (xfer->flags_int.control_hdr) {
1366 
1367 			/* clear send header flag */
1368 
1369 			xfer->flags_int.control_hdr = 0;
1370 
1371 			/* setup control transfer */
1372 			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1373 				usbd_control_transfer_init(xfer);
1374 			}
1375 		}
1376 		/* get data length */
1377 
1378 		len = xfer->sumlen;
1379 
1380 	} else {
1381 
1382 		/* the size of the SETUP structure is hardcoded ! */
1383 
1384 		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1385 			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1386 			    xfer->frlengths[0], sizeof(struct
1387 			    usb_device_request));
1388 			goto error;
1389 		}
1390 		/* check USB mode */
1391 		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1392 
1393 			/* check number of frames */
1394 			if (xfer->nframes != 1) {
1395 				/*
1396 			         * We need to receive the setup
1397 			         * message first so that we know the
1398 			         * data direction!
1399 			         */
1400 				DPRINTF("Misconfigured transfer\n");
1401 				goto error;
1402 			}
1403 			/*
1404 			 * Set a dummy "control_rem" value.  This
1405 			 * variable will be overwritten later by a
1406 			 * call to "usbd_control_transfer_init()" !
1407 			 */
1408 			xfer->flags_int.control_rem = 0xFFFF;
1409 		} else {
1410 
1411 			/* setup "endpoint" and "control_rem" */
1412 
1413 			usbd_control_transfer_init(xfer);
1414 		}
1415 
1416 		/* set transfer-header flag */
1417 
1418 		xfer->flags_int.control_hdr = 1;
1419 
1420 		/* get data length */
1421 
1422 		len = (xfer->sumlen - sizeof(struct usb_device_request));
1423 	}
1424 
1425 	/* check if there is a length mismatch */
1426 
1427 	if (len > xfer->flags_int.control_rem) {
1428 		DPRINTFN(0, "Length (%d) greater than "
1429 		    "remaining length (%d)\n", len,
1430 		    xfer->flags_int.control_rem);
1431 		goto error;
1432 	}
1433 	/* check if we are doing a short transfer */
1434 
1435 	if (xfer->flags.force_short_xfer) {
1436 		xfer->flags_int.control_rem = 0;
1437 	} else {
1438 		if ((len != xfer->max_data_length) &&
1439 		    (len != xfer->flags_int.control_rem) &&
1440 		    (xfer->nframes != 1)) {
1441 			DPRINTFN(0, "Short control transfer without "
1442 			    "force_short_xfer set\n");
1443 			goto error;
1444 		}
1445 		xfer->flags_int.control_rem -= len;
1446 	}
1447 
1448 	/* the status part is executed when "control_act" is 0 */
1449 
1450 	if ((xfer->flags_int.control_rem > 0) ||
1451 	    (xfer->flags.manual_status)) {
1452 		/* don't execute the STATUS stage yet */
1453 		xfer->flags_int.control_act = 1;
1454 
1455 		/* sanity check */
1456 		if ((!xfer->flags_int.control_hdr) &&
1457 		    (xfer->nframes == 1)) {
1458 			/*
1459 		         * This is not a valid operation!
1460 		         */
1461 			DPRINTFN(0, "Invalid parameter "
1462 			    "combination\n");
1463 			goto error;
1464 		}
1465 	} else {
1466 		/* time to execute the STATUS stage */
1467 		xfer->flags_int.control_act = 0;
1468 	}
1469 	return (0);			/* success */
1470 
1471 error:
1472 	return (1);			/* failure */
1473 }
1474 
1475 /*------------------------------------------------------------------------*
1476  *	usbd_transfer_submit - start USB hardware for the given transfer
1477  *
1478  * This function should only be called from the USB callback.
1479  *------------------------------------------------------------------------*/
1480 void
1481 usbd_transfer_submit(struct usb_xfer *xfer)
1482 {
1483 	struct usb_xfer_root *info;
1484 	struct usb_bus *bus;
1485 	usb_frcount_t x;
1486 
1487 	info = xfer->xroot;
1488 	bus = info->bus;
1489 
1490 	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1491 	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1492 	    "read" : "write");
1493 
1494 #ifdef USB_DEBUG
1495 	if (USB_DEBUG_VAR > 0) {
1496 		USB_BUS_LOCK(bus);
1497 
1498 		usb_dump_endpoint(xfer->endpoint);
1499 
1500 		USB_BUS_UNLOCK(bus);
1501 	}
1502 #endif
1503 
1504 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1505 	USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1506 
1507 	/* Only open the USB transfer once! */
1508 	if (!xfer->flags_int.open) {
1509 		xfer->flags_int.open = 1;
1510 
1511 		DPRINTF("open\n");
1512 
1513 		USB_BUS_LOCK(bus);
1514 		(xfer->endpoint->methods->open) (xfer);
1515 		USB_BUS_UNLOCK(bus);
1516 	}
1517 	/* set "transferring" flag */
1518 	xfer->flags_int.transferring = 1;
1519 
1520 #if USB_HAVE_POWERD
1521 	/* increment power reference */
1522 	usbd_transfer_power_ref(xfer, 1);
1523 #endif
1524 	/*
1525 	 * Check if the transfer is waiting on a queue, most
1526 	 * frequently the "done_q":
1527 	 */
1528 	if (xfer->wait_queue) {
1529 		USB_BUS_LOCK(bus);
1530 		usbd_transfer_dequeue(xfer);
1531 		USB_BUS_UNLOCK(bus);
1532 	}
1533 	/* clear "did_dma_delay" flag */
1534 	xfer->flags_int.did_dma_delay = 0;
1535 
1536 	/* clear "did_close" flag */
1537 	xfer->flags_int.did_close = 0;
1538 
1539 #if USB_HAVE_BUSDMA
1540 	/* clear "bdma_setup" flag */
1541 	xfer->flags_int.bdma_setup = 0;
1542 #endif
1543 	/* by default we cannot cancel any USB transfer immediately */
1544 	xfer->flags_int.can_cancel_immed = 0;
1545 
1546 	/* clear lengths and frame counts by default */
1547 	xfer->sumlen = 0;
1548 	xfer->actlen = 0;
1549 	xfer->aframes = 0;
1550 
1551 	/* clear any previous errors */
1552 	xfer->error = 0;
1553 
1554 	/* Check if the device is still alive */
1555 	if (info->udev->state < USB_STATE_POWERED) {
1556 		USB_BUS_LOCK(bus);
1557 		/*
1558 		 * Must return cancelled error code else
1559 		 * device drivers can hang.
1560 		 */
1561 		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1562 		USB_BUS_UNLOCK(bus);
1563 		return;
1564 	}
1565 
1566 	/* sanity check */
1567 	if (xfer->nframes == 0) {
1568 		if (xfer->flags.stall_pipe) {
1569 			/*
1570 			 * Special case - want to stall without transferring
1571 			 * any data:
1572 			 */
1573 			DPRINTF("xfer=%p nframes=0: stall "
1574 			    "or clear stall!\n", xfer);
1575 			USB_BUS_LOCK(bus);
1576 			xfer->flags_int.can_cancel_immed = 1;
1577 			/* start the transfer */
1578 			usb_command_wrapper(&xfer->endpoint->endpoint_q, xfer);
1579 			USB_BUS_UNLOCK(bus);
1580 			return;
1581 		}
1582 		USB_BUS_LOCK(bus);
1583 		usbd_transfer_done(xfer, USB_ERR_INVAL);
1584 		USB_BUS_UNLOCK(bus);
1585 		return;
1586 	}
1587 	/* compute some variables */
1588 
1589 	for (x = 0; x != xfer->nframes; x++) {
1590 		/* make a copy of the frlenghts[] */
1591 		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1592 		/* compute total transfer length */
1593 		xfer->sumlen += xfer->frlengths[x];
1594 		if (xfer->sumlen < xfer->frlengths[x]) {
1595 			/* length wrapped around */
1596 			USB_BUS_LOCK(bus);
1597 			usbd_transfer_done(xfer, USB_ERR_INVAL);
1598 			USB_BUS_UNLOCK(bus);
1599 			return;
1600 		}
1601 	}
1602 
1603 	/* clear some internal flags */
1604 
1605 	xfer->flags_int.short_xfer_ok = 0;
1606 	xfer->flags_int.short_frames_ok = 0;
1607 
1608 	/* check if this is a control transfer */
1609 
1610 	if (xfer->flags_int.control_xfr) {
1611 
1612 		if (usbd_setup_ctrl_transfer(xfer)) {
1613 			USB_BUS_LOCK(bus);
1614 			usbd_transfer_done(xfer, USB_ERR_STALLED);
1615 			USB_BUS_UNLOCK(bus);
1616 			return;
1617 		}
1618 	}
1619 	/*
1620 	 * Setup filtered version of some transfer flags,
1621 	 * in case of data read direction
1622 	 */
1623 	if (USB_GET_DATA_ISREAD(xfer)) {
1624 
1625 		if (xfer->flags.short_frames_ok) {
1626 			xfer->flags_int.short_xfer_ok = 1;
1627 			xfer->flags_int.short_frames_ok = 1;
1628 		} else if (xfer->flags.short_xfer_ok) {
1629 			xfer->flags_int.short_xfer_ok = 1;
1630 
1631 			/* check for control transfer */
1632 			if (xfer->flags_int.control_xfr) {
1633 				/*
1634 				 * 1) Control transfers do not support
1635 				 * reception of multiple short USB
1636 				 * frames in host mode and device side
1637 				 * mode, with exception of:
1638 				 *
1639 				 * 2) Due to sometimes buggy device
1640 				 * side firmware we need to do a
1641 				 * STATUS stage in case of short
1642 				 * control transfers in USB host mode.
1643 				 * The STATUS stage then becomes the
1644 				 * "alt_next" to the DATA stage.
1645 				 */
1646 				xfer->flags_int.short_frames_ok = 1;
1647 			}
1648 		}
1649 	}
1650 	/*
1651 	 * Check if BUS-DMA support is enabled and try to load virtual
1652 	 * buffers into DMA, if any:
1653 	 */
1654 #if USB_HAVE_BUSDMA
1655 	if (xfer->flags_int.bdma_enable) {
1656 		/* insert the USB transfer last in the BUS-DMA queue */
1657 		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1658 		return;
1659 	}
1660 #endif
1661 	/*
1662 	 * Enter the USB transfer into the Host Controller or
1663 	 * Device Controller schedule:
1664 	 */
1665 	usbd_pipe_enter(xfer);
1666 }
1667 
1668 /*------------------------------------------------------------------------*
1669  *	usbd_pipe_enter - factored out code
1670  *------------------------------------------------------------------------*/
1671 void
1672 usbd_pipe_enter(struct usb_xfer *xfer)
1673 {
1674 	struct usb_endpoint *ep;
1675 
1676 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1677 
1678 	USB_BUS_LOCK(xfer->xroot->bus);
1679 
1680 	ep = xfer->endpoint;
1681 
1682 	DPRINTF("enter\n");
1683 
1684 	/* enter the transfer */
1685 	(ep->methods->enter) (xfer);
1686 
1687 	xfer->flags_int.can_cancel_immed = 1;
1688 
1689 	/* check for transfer error */
1690 	if (xfer->error) {
1691 		/* some error has happened */
1692 		usbd_transfer_done(xfer, 0);
1693 		USB_BUS_UNLOCK(xfer->xroot->bus);
1694 		return;
1695 	}
1696 
1697 	/* start the transfer */
1698 	usb_command_wrapper(&ep->endpoint_q, xfer);
1699 	USB_BUS_UNLOCK(xfer->xroot->bus);
1700 }
1701 
1702 /*------------------------------------------------------------------------*
1703  *	usbd_transfer_start - start an USB transfer
1704  *
1705  * NOTE: Calling this function more than one time will only
1706  *       result in a single transfer start, until the USB transfer
1707  *       completes.
1708  *------------------------------------------------------------------------*/
1709 void
1710 usbd_transfer_start(struct usb_xfer *xfer)
1711 {
1712 	if (xfer == NULL) {
1713 		/* transfer is gone */
1714 		return;
1715 	}
1716 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1717 
1718 	/* mark the USB transfer started */
1719 
1720 	if (!xfer->flags_int.started) {
1721 		/* lock the BUS lock to avoid races updating flags_int */
1722 		USB_BUS_LOCK(xfer->xroot->bus);
1723 		xfer->flags_int.started = 1;
1724 		USB_BUS_UNLOCK(xfer->xroot->bus);
1725 	}
1726 	/* check if the USB transfer callback is already transferring */
1727 
1728 	if (xfer->flags_int.transferring) {
1729 		return;
1730 	}
1731 	USB_BUS_LOCK(xfer->xroot->bus);
1732 	/* call the USB transfer callback */
1733 	usbd_callback_ss_done_defer(xfer);
1734 	USB_BUS_UNLOCK(xfer->xroot->bus);
1735 }
1736 
1737 /*------------------------------------------------------------------------*
1738  *	usbd_transfer_stop - stop an USB transfer
1739  *
1740  * NOTE: Calling this function more than one time will only
1741  *       result in a single transfer stop.
1742  * NOTE: When this function returns it is not safe to free nor
1743  *       reuse any DMA buffers. See "usbd_transfer_drain()".
1744  *------------------------------------------------------------------------*/
1745 void
1746 usbd_transfer_stop(struct usb_xfer *xfer)
1747 {
1748 	struct usb_endpoint *ep;
1749 
1750 	if (xfer == NULL) {
1751 		/* transfer is gone */
1752 		return;
1753 	}
1754 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1755 
1756 	/* check if the USB transfer was ever opened */
1757 
1758 	if (!xfer->flags_int.open) {
1759 		if (xfer->flags_int.started) {
1760 			/* nothing to do except clearing the "started" flag */
1761 			/* lock the BUS lock to avoid races updating flags_int */
1762 			USB_BUS_LOCK(xfer->xroot->bus);
1763 			xfer->flags_int.started = 0;
1764 			USB_BUS_UNLOCK(xfer->xroot->bus);
1765 		}
1766 		return;
1767 	}
1768 	/* try to stop the current USB transfer */
1769 
1770 	USB_BUS_LOCK(xfer->xroot->bus);
1771 	/* override any previous error */
1772 	xfer->error = USB_ERR_CANCELLED;
1773 
1774 	/*
1775 	 * Clear "open" and "started" when both private and USB lock
1776 	 * is locked so that we don't get a race updating "flags_int"
1777 	 */
1778 	xfer->flags_int.open = 0;
1779 	xfer->flags_int.started = 0;
1780 
1781 	/*
1782 	 * Check if we can cancel the USB transfer immediately.
1783 	 */
1784 	if (xfer->flags_int.transferring) {
1785 		if (xfer->flags_int.can_cancel_immed &&
1786 		    (!xfer->flags_int.did_close)) {
1787 			DPRINTF("close\n");
1788 			/*
1789 			 * The following will lead to an USB_ERR_CANCELLED
1790 			 * error code being passed to the USB callback.
1791 			 */
1792 			(xfer->endpoint->methods->close) (xfer);
1793 			/* only close once */
1794 			xfer->flags_int.did_close = 1;
1795 		} else {
1796 			/* need to wait for the next done callback */
1797 		}
1798 	} else {
1799 		DPRINTF("close\n");
1800 
1801 		/* close here and now */
1802 		(xfer->endpoint->methods->close) (xfer);
1803 
1804 		/*
1805 		 * Any additional DMA delay is done by
1806 		 * "usbd_transfer_unsetup()".
1807 		 */
1808 
1809 		/*
1810 		 * Special case. Check if we need to restart a blocked
1811 		 * endpoint.
1812 		 */
1813 		ep = xfer->endpoint;
1814 
1815 		/*
1816 		 * If the current USB transfer is completing we need
1817 		 * to start the next one:
1818 		 */
1819 		if (ep->endpoint_q.curr == xfer) {
1820 			usb_command_wrapper(&ep->endpoint_q, NULL);
1821 		}
1822 	}
1823 
1824 	USB_BUS_UNLOCK(xfer->xroot->bus);
1825 }
1826 
1827 /*------------------------------------------------------------------------*
1828  *	usbd_transfer_pending
1829  *
1830  * This function will check if an USB transfer is pending which is a
1831  * little bit complicated!
1832  * Return values:
1833  * 0: Not pending
1834  * 1: Pending: The USB transfer will receive a callback in the future.
1835  *------------------------------------------------------------------------*/
1836 uint8_t
1837 usbd_transfer_pending(struct usb_xfer *xfer)
1838 {
1839 	struct usb_xfer_root *info;
1840 	struct usb_xfer_queue *pq;
1841 
1842 	if (xfer == NULL) {
1843 		/* transfer is gone */
1844 		return (0);
1845 	}
1846 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1847 
1848 	if (xfer->flags_int.transferring) {
1849 		/* trivial case */
1850 		return (1);
1851 	}
1852 	USB_BUS_LOCK(xfer->xroot->bus);
1853 	if (xfer->wait_queue) {
1854 		/* we are waiting on a queue somewhere */
1855 		USB_BUS_UNLOCK(xfer->xroot->bus);
1856 		return (1);
1857 	}
1858 	info = xfer->xroot;
1859 	pq = &info->done_q;
1860 
1861 	if (pq->curr == xfer) {
1862 		/* we are currently scheduled for callback */
1863 		USB_BUS_UNLOCK(xfer->xroot->bus);
1864 		return (1);
1865 	}
1866 	/* we are not pending */
1867 	USB_BUS_UNLOCK(xfer->xroot->bus);
1868 	return (0);
1869 }
1870 
1871 /*------------------------------------------------------------------------*
1872  *	usbd_transfer_drain
1873  *
1874  * This function will stop the USB transfer and wait for any
1875  * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1876  * are loaded into DMA can safely be freed or reused after that this
1877  * function has returned.
1878  *------------------------------------------------------------------------*/
1879 void
1880 usbd_transfer_drain(struct usb_xfer *xfer)
1881 {
1882 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1883 	    "usbd_transfer_drain can sleep!");
1884 
1885 	if (xfer == NULL) {
1886 		/* transfer is gone */
1887 		return;
1888 	}
1889 	if (xfer->xroot->xfer_mtx != &Giant) {
1890 		USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1891 	}
1892 	USB_XFER_LOCK(xfer);
1893 
1894 	usbd_transfer_stop(xfer);
1895 
1896 	while (usbd_transfer_pending(xfer) ||
1897 	    xfer->flags_int.doing_callback) {
1898 
1899 		/*
1900 		 * It is allowed that the callback can drop its
1901 		 * transfer mutex. In that case checking only
1902 		 * "usbd_transfer_pending()" is not enough to tell if
1903 		 * the USB transfer is fully drained. We also need to
1904 		 * check the internal "doing_callback" flag.
1905 		 */
1906 		xfer->flags_int.draining = 1;
1907 
1908 		/*
1909 		 * Wait until the current outstanding USB
1910 		 * transfer is complete !
1911 		 */
1912 		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
1913 	}
1914 	USB_XFER_UNLOCK(xfer);
1915 }
1916 
1917 struct usb_page_cache *
1918 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
1919 {
1920 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1921 
1922 	return (&xfer->frbuffers[frindex]);
1923 }
1924 
1925 /*------------------------------------------------------------------------*
1926  *	usbd_xfer_get_fps_shift
1927  *
1928  * The following function is only useful for isochronous transfers. It
1929  * returns how many times the frame execution rate has been shifted
1930  * down.
1931  *
1932  * Return value:
1933  * Success: 0..3
1934  * Failure: 0
1935  *------------------------------------------------------------------------*/
1936 uint8_t
1937 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
1938 {
1939 	return (xfer->fps_shift);
1940 }
1941 
1942 usb_frlength_t
1943 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
1944 {
1945 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1946 
1947 	return (xfer->frlengths[frindex]);
1948 }
1949 
1950 /*------------------------------------------------------------------------*
1951  *	usbd_xfer_set_frame_data
1952  *
1953  * This function sets the pointer of the buffer that should
1954  * loaded directly into DMA for the given USB frame. Passing "ptr"
1955  * equal to NULL while the corresponding "frlength" is greater
1956  * than zero gives undefined results!
1957  *------------------------------------------------------------------------*/
1958 void
1959 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1960     void *ptr, usb_frlength_t len)
1961 {
1962 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1963 
1964 	/* set virtual address to load and length */
1965 	xfer->frbuffers[frindex].buffer = ptr;
1966 	usbd_xfer_set_frame_len(xfer, frindex, len);
1967 }
1968 
1969 void
1970 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1971     void **ptr, int *len)
1972 {
1973 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1974 
1975 	if (ptr != NULL)
1976 		*ptr = xfer->frbuffers[frindex].buffer;
1977 	if (len != NULL)
1978 		*len = xfer->frlengths[frindex];
1979 }
1980 
1981 /*------------------------------------------------------------------------*
1982  *	usbd_xfer_old_frame_length
1983  *
1984  * This function returns the framelength of the given frame at the
1985  * time the transfer was submitted. This function can be used to
1986  * compute the starting data pointer of the next isochronous frame
1987  * when an isochronous transfer has completed.
1988  *------------------------------------------------------------------------*/
1989 usb_frlength_t
1990 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
1991 {
1992 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1993 
1994 	return (xfer->frlengths[frindex + xfer->max_frame_count]);
1995 }
1996 
1997 void
1998 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
1999     int *nframes)
2000 {
2001 	if (actlen != NULL)
2002 		*actlen = xfer->actlen;
2003 	if (sumlen != NULL)
2004 		*sumlen = xfer->sumlen;
2005 	if (aframes != NULL)
2006 		*aframes = xfer->aframes;
2007 	if (nframes != NULL)
2008 		*nframes = xfer->nframes;
2009 }
2010 
2011 /*------------------------------------------------------------------------*
2012  *	usbd_xfer_set_frame_offset
2013  *
2014  * This function sets the frame data buffer offset relative to the beginning
2015  * of the USB DMA buffer allocated for this USB transfer.
2016  *------------------------------------------------------------------------*/
2017 void
2018 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2019     usb_frcount_t frindex)
2020 {
2021 	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2022 	    "when the USB buffer is external\n"));
2023 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2024 
2025 	/* set virtual address to load */
2026 	xfer->frbuffers[frindex].buffer =
2027 	    USB_ADD_BYTES(xfer->local_buffer, offset);
2028 }
2029 
2030 void
2031 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2032 {
2033 	xfer->interval = i;
2034 }
2035 
2036 void
2037 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2038 {
2039 	xfer->timeout = t;
2040 }
2041 
2042 void
2043 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2044 {
2045 	xfer->nframes = n;
2046 }
2047 
2048 usb_frcount_t
2049 usbd_xfer_max_frames(struct usb_xfer *xfer)
2050 {
2051 	return (xfer->max_frame_count);
2052 }
2053 
2054 usb_frlength_t
2055 usbd_xfer_max_len(struct usb_xfer *xfer)
2056 {
2057 	return (xfer->max_data_length);
2058 }
2059 
2060 usb_frlength_t
2061 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2062 {
2063 	return (xfer->max_frame_size);
2064 }
2065 
2066 void
2067 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2068     usb_frlength_t len)
2069 {
2070 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2071 
2072 	xfer->frlengths[frindex] = len;
2073 }
2074 
2075 /*------------------------------------------------------------------------*
2076  *	usb_callback_proc - factored out code
2077  *
2078  * This function performs USB callbacks.
2079  *------------------------------------------------------------------------*/
2080 static void
2081 usb_callback_proc(struct usb_proc_msg *_pm)
2082 {
2083 	struct usb_done_msg *pm = (void *)_pm;
2084 	struct usb_xfer_root *info = pm->xroot;
2085 
2086 	/* Change locking order */
2087 	USB_BUS_UNLOCK(info->bus);
2088 
2089 	/*
2090 	 * We exploit the fact that the mutex is the same for all
2091 	 * callbacks that will be called from this thread:
2092 	 */
2093 	mtx_lock(info->xfer_mtx);
2094 	USB_BUS_LOCK(info->bus);
2095 
2096 	/* Continue where we lost track */
2097 	usb_command_wrapper(&info->done_q,
2098 	    info->done_q.curr);
2099 
2100 	mtx_unlock(info->xfer_mtx);
2101 }
2102 
2103 /*------------------------------------------------------------------------*
2104  *	usbd_callback_ss_done_defer
2105  *
2106  * This function will defer the start, stop and done callback to the
2107  * correct thread.
2108  *------------------------------------------------------------------------*/
2109 static void
2110 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2111 {
2112 	struct usb_xfer_root *info = xfer->xroot;
2113 	struct usb_xfer_queue *pq = &info->done_q;
2114 
2115 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2116 
2117 	if (pq->curr != xfer) {
2118 		usbd_transfer_enqueue(pq, xfer);
2119 	}
2120 	if (!pq->recurse_1) {
2121 
2122 		/*
2123 	         * We have to postpone the callback due to the fact we
2124 	         * will have a Lock Order Reversal, LOR, if we try to
2125 	         * proceed !
2126 	         */
2127 		if (usb_proc_msignal(info->done_p,
2128 		    &info->done_m[0], &info->done_m[1])) {
2129 			/* ignore */
2130 		}
2131 	} else {
2132 		/* clear second recurse flag */
2133 		pq->recurse_2 = 0;
2134 	}
2135 	return;
2136 
2137 }
2138 
2139 /*------------------------------------------------------------------------*
2140  *	usbd_callback_wrapper
2141  *
2142  * This is a wrapper for USB callbacks. This wrapper does some
2143  * auto-magic things like figuring out if we can call the callback
2144  * directly from the current context or if we need to wakeup the
2145  * interrupt process.
2146  *------------------------------------------------------------------------*/
2147 static void
2148 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2149 {
2150 	struct usb_xfer *xfer = pq->curr;
2151 	struct usb_xfer_root *info = xfer->xroot;
2152 
2153 	USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2154 	if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
2155 		/*
2156 	       	 * Cases that end up here:
2157 		 *
2158 		 * 5) HW interrupt done callback or other source.
2159 		 */
2160 		DPRINTFN(3, "case 5\n");
2161 
2162 		/*
2163 	         * We have to postpone the callback due to the fact we
2164 	         * will have a Lock Order Reversal, LOR, if we try to
2165 	         * proceed !
2166 	         */
2167 		if (usb_proc_msignal(info->done_p,
2168 		    &info->done_m[0], &info->done_m[1])) {
2169 			/* ignore */
2170 		}
2171 		return;
2172 	}
2173 	/*
2174 	 * Cases that end up here:
2175 	 *
2176 	 * 1) We are starting a transfer
2177 	 * 2) We are prematurely calling back a transfer
2178 	 * 3) We are stopping a transfer
2179 	 * 4) We are doing an ordinary callback
2180 	 */
2181 	DPRINTFN(3, "case 1-4\n");
2182 	/* get next USB transfer in the queue */
2183 	info->done_q.curr = NULL;
2184 
2185 	/* set flag in case of drain */
2186 	xfer->flags_int.doing_callback = 1;
2187 
2188 	USB_BUS_UNLOCK(info->bus);
2189 	USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2190 
2191 	/* set correct USB state for callback */
2192 	if (!xfer->flags_int.transferring) {
2193 		xfer->usb_state = USB_ST_SETUP;
2194 		if (!xfer->flags_int.started) {
2195 			/* we got stopped before we even got started */
2196 			USB_BUS_LOCK(info->bus);
2197 			goto done;
2198 		}
2199 	} else {
2200 
2201 		if (usbd_callback_wrapper_sub(xfer)) {
2202 			/* the callback has been deferred */
2203 			USB_BUS_LOCK(info->bus);
2204 			goto done;
2205 		}
2206 #if USB_HAVE_POWERD
2207 		/* decrement power reference */
2208 		usbd_transfer_power_ref(xfer, -1);
2209 #endif
2210 		xfer->flags_int.transferring = 0;
2211 
2212 		if (xfer->error) {
2213 			xfer->usb_state = USB_ST_ERROR;
2214 		} else {
2215 			/* set transferred state */
2216 			xfer->usb_state = USB_ST_TRANSFERRED;
2217 #if USB_HAVE_BUSDMA
2218 			/* sync DMA memory, if any */
2219 			if (xfer->flags_int.bdma_enable &&
2220 			    (!xfer->flags_int.bdma_no_post_sync)) {
2221 				usb_bdma_post_sync(xfer);
2222 			}
2223 #endif
2224 		}
2225 	}
2226 
2227 #if USB_HAVE_PF
2228 	if (xfer->usb_state != USB_ST_SETUP)
2229 		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2230 #endif
2231 	/* call processing routine */
2232 	(xfer->callback) (xfer, xfer->error);
2233 
2234 	/* pickup the USB mutex again */
2235 	USB_BUS_LOCK(info->bus);
2236 
2237 	/*
2238 	 * Check if we got started after that we got cancelled, but
2239 	 * before we managed to do the callback.
2240 	 */
2241 	if ((!xfer->flags_int.open) &&
2242 	    (xfer->flags_int.started) &&
2243 	    (xfer->usb_state == USB_ST_ERROR)) {
2244 		/* clear flag in case of drain */
2245 		xfer->flags_int.doing_callback = 0;
2246 		/* try to loop, but not recursivly */
2247 		usb_command_wrapper(&info->done_q, xfer);
2248 		return;
2249 	}
2250 
2251 done:
2252 	/* clear flag in case of drain */
2253 	xfer->flags_int.doing_callback = 0;
2254 
2255 	/*
2256 	 * Check if we are draining.
2257 	 */
2258 	if (xfer->flags_int.draining &&
2259 	    (!xfer->flags_int.transferring)) {
2260 		/* "usbd_transfer_drain()" is waiting for end of transfer */
2261 		xfer->flags_int.draining = 0;
2262 		cv_broadcast(&info->cv_drain);
2263 	}
2264 
2265 	/* do the next callback, if any */
2266 	usb_command_wrapper(&info->done_q,
2267 	    info->done_q.curr);
2268 }
2269 
2270 /*------------------------------------------------------------------------*
2271  *	usb_dma_delay_done_cb
2272  *
2273  * This function is called when the DMA delay has been exectuded, and
2274  * will make sure that the callback is called to complete the USB
2275  * transfer. This code path is ususally only used when there is an USB
2276  * error like USB_ERR_CANCELLED.
2277  *------------------------------------------------------------------------*/
2278 void
2279 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2280 {
2281 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2282 
2283 	DPRINTFN(3, "Completed %p\n", xfer);
2284 
2285 	/* queue callback for execution, again */
2286 	usbd_transfer_done(xfer, 0);
2287 }
2288 
2289 /*------------------------------------------------------------------------*
2290  *	usbd_transfer_dequeue
2291  *
2292  *  - This function is used to remove an USB transfer from a USB
2293  *  transfer queue.
2294  *
2295  *  - This function can be called multiple times in a row.
2296  *------------------------------------------------------------------------*/
2297 void
2298 usbd_transfer_dequeue(struct usb_xfer *xfer)
2299 {
2300 	struct usb_xfer_queue *pq;
2301 
2302 	pq = xfer->wait_queue;
2303 	if (pq) {
2304 		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2305 		xfer->wait_queue = NULL;
2306 	}
2307 }
2308 
2309 /*------------------------------------------------------------------------*
2310  *	usbd_transfer_enqueue
2311  *
2312  *  - This function is used to insert an USB transfer into a USB *
2313  *  transfer queue.
2314  *
2315  *  - This function can be called multiple times in a row.
2316  *------------------------------------------------------------------------*/
2317 void
2318 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2319 {
2320 	/*
2321 	 * Insert the USB transfer into the queue, if it is not
2322 	 * already on a USB transfer queue:
2323 	 */
2324 	if (xfer->wait_queue == NULL) {
2325 		xfer->wait_queue = pq;
2326 		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2327 	}
2328 }
2329 
2330 /*------------------------------------------------------------------------*
2331  *	usbd_transfer_done
2332  *
2333  *  - This function is used to remove an USB transfer from the busdma,
2334  *  pipe or interrupt queue.
2335  *
2336  *  - This function is used to queue the USB transfer on the done
2337  *  queue.
2338  *
2339  *  - This function is used to stop any USB transfer timeouts.
2340  *------------------------------------------------------------------------*/
2341 void
2342 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2343 {
2344 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2345 
2346 	DPRINTF("err=%s\n", usbd_errstr(error));
2347 
2348 	/*
2349 	 * If we are not transferring then just return.
2350 	 * This can happen during transfer cancel.
2351 	 */
2352 	if (!xfer->flags_int.transferring) {
2353 		DPRINTF("not transferring\n");
2354 		/* end of control transfer, if any */
2355 		xfer->flags_int.control_act = 0;
2356 		return;
2357 	}
2358 	/* only set transfer error if not already set */
2359 	if (!xfer->error) {
2360 		xfer->error = error;
2361 	}
2362 	/* stop any callouts */
2363 	usb_callout_stop(&xfer->timeout_handle);
2364 
2365 	/*
2366 	 * If we are waiting on a queue, just remove the USB transfer
2367 	 * from the queue, if any. We should have the required locks
2368 	 * locked to do the remove when this function is called.
2369 	 */
2370 	usbd_transfer_dequeue(xfer);
2371 
2372 #if USB_HAVE_BUSDMA
2373 	if (mtx_owned(xfer->xroot->xfer_mtx)) {
2374 		struct usb_xfer_queue *pq;
2375 
2376 		/*
2377 		 * If the private USB lock is not locked, then we assume
2378 		 * that the BUS-DMA load stage has been passed:
2379 		 */
2380 		pq = &xfer->xroot->dma_q;
2381 
2382 		if (pq->curr == xfer) {
2383 			/* start the next BUS-DMA load, if any */
2384 			usb_command_wrapper(pq, NULL);
2385 		}
2386 	}
2387 #endif
2388 	/* keep some statistics */
2389 	if (xfer->error) {
2390 		xfer->xroot->bus->stats_err.uds_requests
2391 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2392 	} else {
2393 		xfer->xroot->bus->stats_ok.uds_requests
2394 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2395 	}
2396 
2397 	/* call the USB transfer callback */
2398 	usbd_callback_ss_done_defer(xfer);
2399 }
2400 
2401 /*------------------------------------------------------------------------*
2402  *	usbd_transfer_start_cb
2403  *
2404  * This function is called to start the USB transfer when
2405  * "xfer->interval" is greater than zero, and and the endpoint type is
2406  * BULK or CONTROL.
2407  *------------------------------------------------------------------------*/
2408 static void
2409 usbd_transfer_start_cb(void *arg)
2410 {
2411 	struct usb_xfer *xfer = arg;
2412 	struct usb_endpoint *ep = xfer->endpoint;
2413 
2414 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2415 
2416 	DPRINTF("start\n");
2417 
2418 #if USB_HAVE_PF
2419 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2420 #endif
2421 	/* start USB transfer, if no error */
2422 	if (xfer->error == 0)
2423 		(ep->methods->start) (xfer);
2424 
2425 	xfer->flags_int.can_cancel_immed = 1;
2426 
2427 	/* check for error */
2428 	if (xfer->error) {
2429 		/* some error has happened */
2430 		usbd_transfer_done(xfer, 0);
2431 	}
2432 }
2433 
2434 /*------------------------------------------------------------------------*
2435  *	usbd_xfer_set_stall
2436  *
2437  * This function is used to set the stall flag outside the
2438  * callback. This function is NULL safe.
2439  *------------------------------------------------------------------------*/
2440 void
2441 usbd_xfer_set_stall(struct usb_xfer *xfer)
2442 {
2443 	if (xfer == NULL) {
2444 		/* tearing down */
2445 		return;
2446 	}
2447 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2448 
2449 	/* avoid any races by locking the USB mutex */
2450 	USB_BUS_LOCK(xfer->xroot->bus);
2451 	xfer->flags.stall_pipe = 1;
2452 	USB_BUS_UNLOCK(xfer->xroot->bus);
2453 }
2454 
2455 int
2456 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2457 {
2458 	return (xfer->endpoint->is_stalled);
2459 }
2460 
2461 /*------------------------------------------------------------------------*
2462  *	usbd_transfer_clear_stall
2463  *
2464  * This function is used to clear the stall flag outside the
2465  * callback. This function is NULL safe.
2466  *------------------------------------------------------------------------*/
2467 void
2468 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2469 {
2470 	if (xfer == NULL) {
2471 		/* tearing down */
2472 		return;
2473 	}
2474 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2475 
2476 	/* avoid any races by locking the USB mutex */
2477 	USB_BUS_LOCK(xfer->xroot->bus);
2478 
2479 	xfer->flags.stall_pipe = 0;
2480 
2481 	USB_BUS_UNLOCK(xfer->xroot->bus);
2482 }
2483 
2484 /*------------------------------------------------------------------------*
2485  *	usbd_pipe_start
2486  *
2487  * This function is used to add an USB transfer to the pipe transfer list.
2488  *------------------------------------------------------------------------*/
2489 void
2490 usbd_pipe_start(struct usb_xfer_queue *pq)
2491 {
2492 	struct usb_endpoint *ep;
2493 	struct usb_xfer *xfer;
2494 	uint8_t type;
2495 
2496 	xfer = pq->curr;
2497 	ep = xfer->endpoint;
2498 
2499 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2500 
2501 	/*
2502 	 * If the endpoint is already stalled we do nothing !
2503 	 */
2504 	if (ep->is_stalled) {
2505 		return;
2506 	}
2507 	/*
2508 	 * Check if we are supposed to stall the endpoint:
2509 	 */
2510 	if (xfer->flags.stall_pipe) {
2511 		struct usb_device *udev;
2512 		struct usb_xfer_root *info;
2513 
2514 		/* clear stall command */
2515 		xfer->flags.stall_pipe = 0;
2516 
2517 		/* get pointer to USB device */
2518 		info = xfer->xroot;
2519 		udev = info->udev;
2520 
2521 		/*
2522 		 * Only stall BULK and INTERRUPT endpoints.
2523 		 */
2524 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2525 		if ((type == UE_BULK) ||
2526 		    (type == UE_INTERRUPT)) {
2527 			uint8_t did_stall;
2528 
2529 			did_stall = 1;
2530 
2531 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2532 				(udev->bus->methods->set_stall) (
2533 				    udev, NULL, ep, &did_stall);
2534 			} else if (udev->ctrl_xfer[1]) {
2535 				info = udev->ctrl_xfer[1]->xroot;
2536 				usb_proc_msignal(
2537 				    &info->bus->non_giant_callback_proc,
2538 				    &udev->cs_msg[0], &udev->cs_msg[1]);
2539 			} else {
2540 				/* should not happen */
2541 				DPRINTFN(0, "No stall handler\n");
2542 			}
2543 			/*
2544 			 * Check if we should stall. Some USB hardware
2545 			 * handles set- and clear-stall in hardware.
2546 			 */
2547 			if (did_stall) {
2548 				/*
2549 				 * The transfer will be continued when
2550 				 * the clear-stall control endpoint
2551 				 * message is received.
2552 				 */
2553 				ep->is_stalled = 1;
2554 				return;
2555 			}
2556 		} else if (type == UE_ISOCHRONOUS) {
2557 
2558 			/*
2559 			 * Make sure any FIFO overflow or other FIFO
2560 			 * error conditions go away by resetting the
2561 			 * endpoint FIFO through the clear stall
2562 			 * method.
2563 			 */
2564 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2565 				(udev->bus->methods->clear_stall) (udev, ep);
2566 			}
2567 		}
2568 	}
2569 	/* Set or clear stall complete - special case */
2570 	if (xfer->nframes == 0) {
2571 		/* we are complete */
2572 		xfer->aframes = 0;
2573 		usbd_transfer_done(xfer, 0);
2574 		return;
2575 	}
2576 	/*
2577 	 * Handled cases:
2578 	 *
2579 	 * 1) Start the first transfer queued.
2580 	 *
2581 	 * 2) Re-start the current USB transfer.
2582 	 */
2583 	/*
2584 	 * Check if there should be any
2585 	 * pre transfer start delay:
2586 	 */
2587 	if (xfer->interval > 0) {
2588 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2589 		if ((type == UE_BULK) ||
2590 		    (type == UE_CONTROL)) {
2591 			usbd_transfer_timeout_ms(xfer,
2592 			    &usbd_transfer_start_cb,
2593 			    xfer->interval);
2594 			return;
2595 		}
2596 	}
2597 	DPRINTF("start\n");
2598 
2599 #if USB_HAVE_PF
2600 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2601 #endif
2602 	/* start USB transfer, if no error */
2603 	if (xfer->error == 0)
2604 		(ep->methods->start) (xfer);
2605 
2606 	xfer->flags_int.can_cancel_immed = 1;
2607 
2608 	/* check for error */
2609 	if (xfer->error) {
2610 		/* some error has happened */
2611 		usbd_transfer_done(xfer, 0);
2612 	}
2613 }
2614 
2615 /*------------------------------------------------------------------------*
2616  *	usbd_transfer_timeout_ms
2617  *
2618  * This function is used to setup a timeout on the given USB
2619  * transfer. If the timeout has been deferred the callback given by
2620  * "cb" will get called after "ms" milliseconds.
2621  *------------------------------------------------------------------------*/
2622 void
2623 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2624     void (*cb) (void *arg), usb_timeout_t ms)
2625 {
2626 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2627 
2628 	/* defer delay */
2629 	usb_callout_reset(&xfer->timeout_handle,
2630 	    USB_MS_TO_TICKS(ms), cb, xfer);
2631 }
2632 
2633 /*------------------------------------------------------------------------*
2634  *	usbd_callback_wrapper_sub
2635  *
2636  *  - This function will update variables in an USB transfer after
2637  *  that the USB transfer is complete.
2638  *
2639  *  - This function is used to start the next USB transfer on the
2640  *  ep transfer queue, if any.
2641  *
2642  * NOTE: In some special cases the USB transfer will not be removed from
2643  * the pipe queue, but remain first. To enforce USB transfer removal call
2644  * this function passing the error code "USB_ERR_CANCELLED".
2645  *
2646  * Return values:
2647  * 0: Success.
2648  * Else: The callback has been deferred.
2649  *------------------------------------------------------------------------*/
2650 static uint8_t
2651 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2652 {
2653 	struct usb_endpoint *ep;
2654 	struct usb_bus *bus;
2655 	usb_frcount_t x;
2656 
2657 	bus = xfer->xroot->bus;
2658 
2659 	if ((!xfer->flags_int.open) &&
2660 	    (!xfer->flags_int.did_close)) {
2661 		DPRINTF("close\n");
2662 		USB_BUS_LOCK(bus);
2663 		(xfer->endpoint->methods->close) (xfer);
2664 		USB_BUS_UNLOCK(bus);
2665 		/* only close once */
2666 		xfer->flags_int.did_close = 1;
2667 		return (1);		/* wait for new callback */
2668 	}
2669 	/*
2670 	 * If we have a non-hardware induced error we
2671 	 * need to do the DMA delay!
2672 	 */
2673 	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2674 	    (xfer->error == USB_ERR_CANCELLED ||
2675 	    xfer->error == USB_ERR_TIMEOUT ||
2676 	    bus->methods->start_dma_delay != NULL)) {
2677 
2678 		usb_timeout_t temp;
2679 
2680 		/* only delay once */
2681 		xfer->flags_int.did_dma_delay = 1;
2682 
2683 		/* we can not cancel this delay */
2684 		xfer->flags_int.can_cancel_immed = 0;
2685 
2686 		temp = usbd_get_dma_delay(xfer->xroot->udev);
2687 
2688 		DPRINTFN(3, "DMA delay, %u ms, "
2689 		    "on %p\n", temp, xfer);
2690 
2691 		if (temp != 0) {
2692 			USB_BUS_LOCK(bus);
2693 			/*
2694 			 * Some hardware solutions have dedicated
2695 			 * events when it is safe to free DMA'ed
2696 			 * memory. For the other hardware platforms we
2697 			 * use a static delay.
2698 			 */
2699 			if (bus->methods->start_dma_delay != NULL) {
2700 				(bus->methods->start_dma_delay) (xfer);
2701 			} else {
2702 				usbd_transfer_timeout_ms(xfer,
2703 				    (void *)&usb_dma_delay_done_cb, temp);
2704 			}
2705 			USB_BUS_UNLOCK(bus);
2706 			return (1);	/* wait for new callback */
2707 		}
2708 	}
2709 	/* check actual number of frames */
2710 	if (xfer->aframes > xfer->nframes) {
2711 		if (xfer->error == 0) {
2712 			panic("%s: actual number of frames, %d, is "
2713 			    "greater than initial number of frames, %d\n",
2714 			    __FUNCTION__, xfer->aframes, xfer->nframes);
2715 		} else {
2716 			/* just set some valid value */
2717 			xfer->aframes = xfer->nframes;
2718 		}
2719 	}
2720 	/* compute actual length */
2721 	xfer->actlen = 0;
2722 
2723 	for (x = 0; x != xfer->aframes; x++) {
2724 		xfer->actlen += xfer->frlengths[x];
2725 	}
2726 
2727 	/*
2728 	 * Frames that were not transferred get zero actual length in
2729 	 * case the USB device driver does not check the actual number
2730 	 * of frames transferred, "xfer->aframes":
2731 	 */
2732 	for (; x < xfer->nframes; x++) {
2733 		usbd_xfer_set_frame_len(xfer, x, 0);
2734 	}
2735 
2736 	/* check actual length */
2737 	if (xfer->actlen > xfer->sumlen) {
2738 		if (xfer->error == 0) {
2739 			panic("%s: actual length, %d, is greater than "
2740 			    "initial length, %d\n",
2741 			    __FUNCTION__, xfer->actlen, xfer->sumlen);
2742 		} else {
2743 			/* just set some valid value */
2744 			xfer->actlen = xfer->sumlen;
2745 		}
2746 	}
2747 	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2748 	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2749 	    xfer->aframes, xfer->nframes);
2750 
2751 	if (xfer->error) {
2752 		/* end of control transfer, if any */
2753 		xfer->flags_int.control_act = 0;
2754 
2755 		/* check if we should block the execution queue */
2756 		if ((xfer->error != USB_ERR_CANCELLED) &&
2757 		    (xfer->flags.pipe_bof)) {
2758 			DPRINTFN(2, "xfer=%p: Block On Failure "
2759 			    "on endpoint=%p\n", xfer, xfer->endpoint);
2760 			goto done;
2761 		}
2762 	} else {
2763 		/* check for short transfers */
2764 		if (xfer->actlen < xfer->sumlen) {
2765 
2766 			/* end of control transfer, if any */
2767 			xfer->flags_int.control_act = 0;
2768 
2769 			if (!xfer->flags_int.short_xfer_ok) {
2770 				xfer->error = USB_ERR_SHORT_XFER;
2771 				if (xfer->flags.pipe_bof) {
2772 					DPRINTFN(2, "xfer=%p: Block On Failure on "
2773 					    "Short Transfer on endpoint %p.\n",
2774 					    xfer, xfer->endpoint);
2775 					goto done;
2776 				}
2777 			}
2778 		} else {
2779 			/*
2780 			 * Check if we are in the middle of a
2781 			 * control transfer:
2782 			 */
2783 			if (xfer->flags_int.control_act) {
2784 				DPRINTFN(5, "xfer=%p: Control transfer "
2785 				    "active on endpoint=%p\n", xfer, xfer->endpoint);
2786 				goto done;
2787 			}
2788 		}
2789 	}
2790 
2791 	ep = xfer->endpoint;
2792 
2793 	/*
2794 	 * If the current USB transfer is completing we need to start the
2795 	 * next one:
2796 	 */
2797 	USB_BUS_LOCK(bus);
2798 	if (ep->endpoint_q.curr == xfer) {
2799 		usb_command_wrapper(&ep->endpoint_q, NULL);
2800 
2801 		if (ep->endpoint_q.curr || TAILQ_FIRST(&ep->endpoint_q.head)) {
2802 			/* there is another USB transfer waiting */
2803 		} else {
2804 			/* this is the last USB transfer */
2805 			/* clear isochronous sync flag */
2806 			xfer->endpoint->is_synced = 0;
2807 		}
2808 	}
2809 	USB_BUS_UNLOCK(bus);
2810 done:
2811 	return (0);
2812 }
2813 
2814 /*------------------------------------------------------------------------*
2815  *	usb_command_wrapper
2816  *
2817  * This function is used to execute commands non-recursivly on an USB
2818  * transfer.
2819  *------------------------------------------------------------------------*/
2820 void
2821 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2822 {
2823 	if (xfer) {
2824 		/*
2825 		 * If the transfer is not already processing,
2826 		 * queue it!
2827 		 */
2828 		if (pq->curr != xfer) {
2829 			usbd_transfer_enqueue(pq, xfer);
2830 			if (pq->curr != NULL) {
2831 				/* something is already processing */
2832 				DPRINTFN(6, "busy %p\n", pq->curr);
2833 				return;
2834 			}
2835 		}
2836 	} else {
2837 		/* Get next element in queue */
2838 		pq->curr = NULL;
2839 	}
2840 
2841 	if (!pq->recurse_1) {
2842 
2843 		do {
2844 
2845 			/* set both recurse flags */
2846 			pq->recurse_1 = 1;
2847 			pq->recurse_2 = 1;
2848 
2849 			if (pq->curr == NULL) {
2850 				xfer = TAILQ_FIRST(&pq->head);
2851 				if (xfer) {
2852 					TAILQ_REMOVE(&pq->head, xfer,
2853 					    wait_entry);
2854 					xfer->wait_queue = NULL;
2855 					pq->curr = xfer;
2856 				} else {
2857 					break;
2858 				}
2859 			}
2860 			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
2861 			(pq->command) (pq);
2862 			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
2863 
2864 		} while (!pq->recurse_2);
2865 
2866 		/* clear first recurse flag */
2867 		pq->recurse_1 = 0;
2868 
2869 	} else {
2870 		/* clear second recurse flag */
2871 		pq->recurse_2 = 0;
2872 	}
2873 }
2874 
2875 /*------------------------------------------------------------------------*
2876  *	usbd_ctrl_transfer_setup
2877  *
2878  * This function is used to setup the default USB control endpoint
2879  * transfer.
2880  *------------------------------------------------------------------------*/
2881 void
2882 usbd_ctrl_transfer_setup(struct usb_device *udev)
2883 {
2884 	struct usb_xfer *xfer;
2885 	uint8_t no_resetup;
2886 	uint8_t iface_index;
2887 
2888 	/* check for root HUB */
2889 	if (udev->parent_hub == NULL)
2890 		return;
2891 repeat:
2892 
2893 	xfer = udev->ctrl_xfer[0];
2894 	if (xfer) {
2895 		USB_XFER_LOCK(xfer);
2896 		no_resetup =
2897 		    ((xfer->address == udev->address) &&
2898 		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
2899 		    udev->ddesc.bMaxPacketSize));
2900 		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2901 			if (no_resetup) {
2902 				/*
2903 				 * NOTE: checking "xfer->address" and
2904 				 * starting the USB transfer must be
2905 				 * atomic!
2906 				 */
2907 				usbd_transfer_start(xfer);
2908 			}
2909 		}
2910 		USB_XFER_UNLOCK(xfer);
2911 	} else {
2912 		no_resetup = 0;
2913 	}
2914 
2915 	if (no_resetup) {
2916 		/*
2917 	         * All parameters are exactly the same like before.
2918 	         * Just return.
2919 	         */
2920 		return;
2921 	}
2922 	/*
2923 	 * Update wMaxPacketSize for the default control endpoint:
2924 	 */
2925 	udev->ctrl_ep_desc.wMaxPacketSize[0] =
2926 	    udev->ddesc.bMaxPacketSize;
2927 
2928 	/*
2929 	 * Unsetup any existing USB transfer:
2930 	 */
2931 	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
2932 
2933 	/*
2934 	 * Reset clear stall error counter.
2935 	 */
2936 	udev->clear_stall_errors = 0;
2937 
2938 	/*
2939 	 * Try to setup a new USB transfer for the
2940 	 * default control endpoint:
2941 	 */
2942 	iface_index = 0;
2943 	if (usbd_transfer_setup(udev, &iface_index,
2944 	    udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
2945 	    &udev->device_mtx)) {
2946 		DPRINTFN(0, "could not setup default "
2947 		    "USB transfer\n");
2948 	} else {
2949 		goto repeat;
2950 	}
2951 }
2952 
2953 /*------------------------------------------------------------------------*
2954  *	usbd_clear_data_toggle - factored out code
2955  *
2956  * NOTE: the intention of this function is not to reset the hardware
2957  * data toggle.
2958  *------------------------------------------------------------------------*/
2959 void
2960 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
2961 {
2962 	USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
2963 
2964 	/* check that we have a valid case */
2965 	if (udev->flags.usb_mode == USB_MODE_HOST &&
2966 	    udev->parent_hub != NULL &&
2967 	    udev->bus->methods->clear_stall != NULL &&
2968 	    ep->methods != NULL) {
2969 		(udev->bus->methods->clear_stall) (udev, ep);
2970 	}
2971 }
2972 
2973 /*------------------------------------------------------------------------*
2974  *	usbd_clear_data_toggle - factored out code
2975  *
2976  * NOTE: the intention of this function is not to reset the hardware
2977  * data toggle on the USB device side.
2978  *------------------------------------------------------------------------*/
2979 void
2980 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
2981 {
2982 	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
2983 
2984 	USB_BUS_LOCK(udev->bus);
2985 	ep->toggle_next = 0;
2986 	/* some hardware needs a callback to clear the data toggle */
2987 	usbd_clear_stall_locked(udev, ep);
2988 	USB_BUS_UNLOCK(udev->bus);
2989 }
2990 
2991 /*------------------------------------------------------------------------*
2992  *	usbd_clear_stall_callback - factored out clear stall callback
2993  *
2994  * Input parameters:
2995  *  xfer1: Clear Stall Control Transfer
2996  *  xfer2: Stalled USB Transfer
2997  *
2998  * This function is NULL safe.
2999  *
3000  * Return values:
3001  *   0: In progress
3002  *   Else: Finished
3003  *
3004  * Clear stall config example:
3005  *
3006  * static const struct usb_config my_clearstall =  {
3007  *	.type = UE_CONTROL,
3008  *	.endpoint = 0,
3009  *	.direction = UE_DIR_ANY,
3010  *	.interval = 50, //50 milliseconds
3011  *	.bufsize = sizeof(struct usb_device_request),
3012  *	.timeout = 1000, //1.000 seconds
3013  *	.callback = &my_clear_stall_callback, // **
3014  *	.usb_mode = USB_MODE_HOST,
3015  * };
3016  *
3017  * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3018  * passing the correct parameters.
3019  *------------------------------------------------------------------------*/
3020 uint8_t
3021 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3022     struct usb_xfer *xfer2)
3023 {
3024 	struct usb_device_request req;
3025 
3026 	if (xfer2 == NULL) {
3027 		/* looks like we are tearing down */
3028 		DPRINTF("NULL input parameter\n");
3029 		return (0);
3030 	}
3031 	USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3032 	USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3033 
3034 	switch (USB_GET_STATE(xfer1)) {
3035 	case USB_ST_SETUP:
3036 
3037 		/*
3038 		 * pre-clear the data toggle to DATA0 ("umass.c" and
3039 		 * "ata-usb.c" depends on this)
3040 		 */
3041 
3042 		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3043 
3044 		/* setup a clear-stall packet */
3045 
3046 		req.bmRequestType = UT_WRITE_ENDPOINT;
3047 		req.bRequest = UR_CLEAR_FEATURE;
3048 		USETW(req.wValue, UF_ENDPOINT_HALT);
3049 		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3050 		req.wIndex[1] = 0;
3051 		USETW(req.wLength, 0);
3052 
3053 		/*
3054 		 * "usbd_transfer_setup_sub()" will ensure that
3055 		 * we have sufficient room in the buffer for
3056 		 * the request structure!
3057 		 */
3058 
3059 		/* copy in the transfer */
3060 
3061 		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3062 
3063 		/* set length */
3064 		xfer1->frlengths[0] = sizeof(req);
3065 		xfer1->nframes = 1;
3066 
3067 		usbd_transfer_submit(xfer1);
3068 		return (0);
3069 
3070 	case USB_ST_TRANSFERRED:
3071 		break;
3072 
3073 	default:			/* Error */
3074 		if (xfer1->error == USB_ERR_CANCELLED) {
3075 			return (0);
3076 		}
3077 		break;
3078 	}
3079 	return (1);			/* Clear Stall Finished */
3080 }
3081 
3082 /*------------------------------------------------------------------------*
3083  *	usbd_transfer_poll
3084  *
3085  * The following function gets called from the USB keyboard driver and
3086  * UMASS when the system has paniced.
3087  *
3088  * NOTE: It is currently not possible to resume normal operation on
3089  * the USB controller which has been polled, due to clearing of the
3090  * "up_dsleep" and "up_msleep" flags.
3091  *------------------------------------------------------------------------*/
3092 void
3093 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3094 {
3095 	struct usb_xfer *xfer;
3096 	struct usb_xfer_root *xroot;
3097 	struct usb_device *udev;
3098 	struct usb_proc_msg *pm;
3099 	uint16_t n;
3100 	uint16_t drop_bus;
3101 	uint16_t drop_xfer;
3102 
3103 	for (n = 0; n != max; n++) {
3104 		/* Extra checks to avoid panic */
3105 		xfer = ppxfer[n];
3106 		if (xfer == NULL)
3107 			continue;	/* no USB transfer */
3108 		xroot = xfer->xroot;
3109 		if (xroot == NULL)
3110 			continue;	/* no USB root */
3111 		udev = xroot->udev;
3112 		if (udev == NULL)
3113 			continue;	/* no USB device */
3114 		if (udev->bus == NULL)
3115 			continue;	/* no BUS structure */
3116 		if (udev->bus->methods == NULL)
3117 			continue;	/* no BUS methods */
3118 		if (udev->bus->methods->xfer_poll == NULL)
3119 			continue;	/* no poll method */
3120 
3121 		/* make sure that the BUS mutex is not locked */
3122 		drop_bus = 0;
3123 		while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3124 			mtx_unlock(&xroot->udev->bus->bus_mtx);
3125 			drop_bus++;
3126 		}
3127 
3128 		/* make sure that the transfer mutex is not locked */
3129 		drop_xfer = 0;
3130 		while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3131 			mtx_unlock(xroot->xfer_mtx);
3132 			drop_xfer++;
3133 		}
3134 
3135 		/* Make sure cv_signal() and cv_broadcast() is not called */
3136 		udev->bus->control_xfer_proc.up_msleep = 0;
3137 		udev->bus->explore_proc.up_msleep = 0;
3138 		udev->bus->giant_callback_proc.up_msleep = 0;
3139 		udev->bus->non_giant_callback_proc.up_msleep = 0;
3140 
3141 		/* poll USB hardware */
3142 		(udev->bus->methods->xfer_poll) (udev->bus);
3143 
3144 		USB_BUS_LOCK(xroot->bus);
3145 
3146 		/* check for clear stall */
3147 		if (udev->ctrl_xfer[1] != NULL) {
3148 
3149 			/* poll clear stall start */
3150 			pm = &udev->cs_msg[0].hdr;
3151 			(pm->pm_callback) (pm);
3152 			/* poll clear stall done thread */
3153 			pm = &udev->ctrl_xfer[1]->
3154 			    xroot->done_m[0].hdr;
3155 			(pm->pm_callback) (pm);
3156 		}
3157 
3158 		/* poll done thread */
3159 		pm = &xroot->done_m[0].hdr;
3160 		(pm->pm_callback) (pm);
3161 
3162 		USB_BUS_UNLOCK(xroot->bus);
3163 
3164 		/* restore transfer mutex */
3165 		while (drop_xfer--)
3166 			mtx_lock(xroot->xfer_mtx);
3167 
3168 		/* restore BUS mutex */
3169 		while (drop_bus--)
3170 			mtx_lock(&xroot->udev->bus->bus_mtx);
3171 	}
3172 }
3173 
3174 static void
3175 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3176     uint8_t type, enum usb_dev_speed speed)
3177 {
3178 	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3179 		[USB_SPEED_LOW] = 8,
3180 		[USB_SPEED_FULL] = 64,
3181 		[USB_SPEED_HIGH] = 1024,
3182 		[USB_SPEED_VARIABLE] = 1024,
3183 		[USB_SPEED_SUPER] = 1024,
3184 	};
3185 
3186 	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3187 		[USB_SPEED_LOW] = 0,	/* invalid */
3188 		[USB_SPEED_FULL] = 1023,
3189 		[USB_SPEED_HIGH] = 1024,
3190 		[USB_SPEED_VARIABLE] = 3584,
3191 		[USB_SPEED_SUPER] = 1024,
3192 	};
3193 
3194 	static const uint16_t control_min[USB_SPEED_MAX] = {
3195 		[USB_SPEED_LOW] = 8,
3196 		[USB_SPEED_FULL] = 8,
3197 		[USB_SPEED_HIGH] = 64,
3198 		[USB_SPEED_VARIABLE] = 512,
3199 		[USB_SPEED_SUPER] = 512,
3200 	};
3201 
3202 	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3203 		[USB_SPEED_LOW] = 8,
3204 		[USB_SPEED_FULL] = 8,
3205 		[USB_SPEED_HIGH] = 512,
3206 		[USB_SPEED_VARIABLE] = 512,
3207 		[USB_SPEED_SUPER] = 1024,
3208 	};
3209 
3210 	uint16_t temp;
3211 
3212 	memset(ptr, 0, sizeof(*ptr));
3213 
3214 	switch (type) {
3215 	case UE_INTERRUPT:
3216 		ptr->range.max = intr_range_max[speed];
3217 		break;
3218 	case UE_ISOCHRONOUS:
3219 		ptr->range.max = isoc_range_max[speed];
3220 		break;
3221 	default:
3222 		if (type == UE_BULK)
3223 			temp = bulk_min[speed];
3224 		else /* UE_CONTROL */
3225 			temp = control_min[speed];
3226 
3227 		/* default is fixed */
3228 		ptr->fixed[0] = temp;
3229 		ptr->fixed[1] = temp;
3230 		ptr->fixed[2] = temp;
3231 		ptr->fixed[3] = temp;
3232 
3233 		if (speed == USB_SPEED_FULL) {
3234 			/* multiple sizes */
3235 			ptr->fixed[1] = 16;
3236 			ptr->fixed[2] = 32;
3237 			ptr->fixed[3] = 64;
3238 		}
3239 		if ((speed == USB_SPEED_VARIABLE) &&
3240 		    (type == UE_BULK)) {
3241 			/* multiple sizes */
3242 			ptr->fixed[2] = 1024;
3243 			ptr->fixed[3] = 1536;
3244 		}
3245 		break;
3246 	}
3247 }
3248 
3249 void	*
3250 usbd_xfer_softc(struct usb_xfer *xfer)
3251 {
3252 	return (xfer->priv_sc);
3253 }
3254 
3255 void *
3256 usbd_xfer_get_priv(struct usb_xfer *xfer)
3257 {
3258 	return (xfer->priv_fifo);
3259 }
3260 
3261 void
3262 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3263 {
3264 	xfer->priv_fifo = ptr;
3265 }
3266 
3267 uint8_t
3268 usbd_xfer_state(struct usb_xfer *xfer)
3269 {
3270 	return (xfer->usb_state);
3271 }
3272 
3273 void
3274 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3275 {
3276 	switch (flag) {
3277 		case USB_FORCE_SHORT_XFER:
3278 			xfer->flags.force_short_xfer = 1;
3279 			break;
3280 		case USB_SHORT_XFER_OK:
3281 			xfer->flags.short_xfer_ok = 1;
3282 			break;
3283 		case USB_MULTI_SHORT_OK:
3284 			xfer->flags.short_frames_ok = 1;
3285 			break;
3286 		case USB_MANUAL_STATUS:
3287 			xfer->flags.manual_status = 1;
3288 			break;
3289 	}
3290 }
3291 
3292 void
3293 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3294 {
3295 	switch (flag) {
3296 		case USB_FORCE_SHORT_XFER:
3297 			xfer->flags.force_short_xfer = 0;
3298 			break;
3299 		case USB_SHORT_XFER_OK:
3300 			xfer->flags.short_xfer_ok = 0;
3301 			break;
3302 		case USB_MULTI_SHORT_OK:
3303 			xfer->flags.short_frames_ok = 0;
3304 			break;
3305 		case USB_MANUAL_STATUS:
3306 			xfer->flags.manual_status = 0;
3307 			break;
3308 	}
3309 }
3310 
3311 /*
3312  * The following function returns in milliseconds when the isochronous
3313  * transfer was completed by the hardware. The returned value wraps
3314  * around 65536 milliseconds.
3315  */
3316 uint16_t
3317 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3318 {
3319 	return (xfer->isoc_time_complete);
3320 }
3321