xref: /dragonfly/sys/bus/u4b/usb_transfer.c (revision 7fd4e1a1)
1 /*-
2  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <sys/stdint.h>
27 #include <sys/param.h>
28 #include <sys/queue.h>
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/bus.h>
33 #include <sys/module.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/condvar.h>
37 #include <sys/sysctl.h>
38 #include <sys/unistd.h>
39 #include <sys/callout.h>
40 #include <sys/malloc.h>
41 #include <sys/priv.h>
42 #include <sys/proc.h>
43 
44 #include <bus/u4b/usb.h>
45 #include <bus/u4b/usbdi.h>
46 #include <bus/u4b/usbdi_util.h>
47 
48 #define	USB_DEBUG_VAR usb_debug
49 
50 #include <bus/u4b/usb_core.h>
51 #include <bus/u4b/usb_busdma.h>
52 #include <bus/u4b/usb_process.h>
53 #include <bus/u4b/usb_transfer.h>
54 #include <bus/u4b/usb_device.h>
55 #include <bus/u4b/usb_debug.h>
56 #include <bus/u4b/usb_util.h>
57 
58 #include <bus/u4b/usb_controller.h>
59 #include <bus/u4b/usb_bus.h>
60 #include <bus/u4b/usb_pf.h>
61 
62 struct usb_std_packet_size {
63 	struct {
64 		uint16_t min;		/* inclusive */
65 		uint16_t max;		/* inclusive */
66 	}	range;
67 
68 	uint16_t fixed[4];
69 };
70 
71 static usb_callback_t usb_request_callback;
72 
73 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
74 
75 	/* This transfer is used for generic control endpoint transfers */
76 
77 	[0] = {
78 		.type = UE_CONTROL,
79 		.endpoint = 0x00,	/* Control endpoint */
80 		.direction = UE_DIR_ANY,
81 		.bufsize = USB_EP0_BUFSIZE,	/* bytes */
82 		.flags = {.proxy_buffer = 1,},
83 		.callback = &usb_request_callback,
84 		.usb_mode = USB_MODE_DUAL,	/* both modes */
85 	},
86 
87 	/* This transfer is used for generic clear stall only */
88 
89 	[1] = {
90 		.type = UE_CONTROL,
91 		.endpoint = 0x00,	/* Control pipe */
92 		.direction = UE_DIR_ANY,
93 		.bufsize = sizeof(struct usb_device_request),
94 		.callback = &usb_do_clear_stall_callback,
95 		.timeout = 1000,	/* 1 second */
96 		.interval = 50,	/* 50ms */
97 		.usb_mode = USB_MODE_HOST,
98 	},
99 };
100 
101 /* function prototypes */
102 
103 static void	usbd_update_max_frame_size(struct usb_xfer *);
104 static void	usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
105 static void	usbd_control_transfer_init(struct usb_xfer *);
106 static int	usbd_setup_ctrl_transfer(struct usb_xfer *);
107 static void	usb_callback_proc(struct usb_proc_msg *);
108 static void	usbd_callback_ss_done_defer(struct usb_xfer *);
109 static void	usbd_callback_wrapper(struct usb_xfer_queue *);
110 static void	usbd_transfer_start_cb(void *);
111 static uint8_t	usbd_callback_wrapper_sub(struct usb_xfer *);
112 static void	usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
113 		    uint8_t type, enum usb_dev_speed speed);
114 
115 /*------------------------------------------------------------------------*
116  *	usb_request_callback
117  *------------------------------------------------------------------------*/
118 static void
119 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
120 {
121 	if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
122 		usb_handle_request_callback(xfer, error);
123 	else
124 		usbd_do_request_callback(xfer, error);
125 }
126 
127 /*------------------------------------------------------------------------*
128  *	usbd_update_max_frame_size
129  *
130  * This function updates the maximum frame size, hence high speed USB
131  * can transfer multiple consecutive packets.
132  *------------------------------------------------------------------------*/
133 static void
134 usbd_update_max_frame_size(struct usb_xfer *xfer)
135 {
136 	/* compute maximum frame size */
137 	/* this computation should not overflow 16-bit */
138 	/* max = 15 * 1024 */
139 
140 	xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
141 }
142 
143 /*------------------------------------------------------------------------*
144  *	usbd_get_dma_delay
145  *
146  * The following function is called when we need to
147  * synchronize with DMA hardware.
148  *
149  * Returns:
150  *    0: no DMA delay required
151  * Else: milliseconds of DMA delay
152  *------------------------------------------------------------------------*/
153 usb_timeout_t
154 usbd_get_dma_delay(struct usb_device *udev)
155 {
156 	struct usb_bus_methods *mtod;
157 	uint32_t temp;
158 
159 	mtod = udev->bus->methods;
160 	temp = 0;
161 
162 	if (mtod->get_dma_delay) {
163 		(mtod->get_dma_delay) (udev, &temp);
164 		/*
165 		 * Round up and convert to milliseconds. Note that we use
166 		 * 1024 milliseconds per second. to save a division.
167 		 */
168 		temp += 0x3FF;
169 		temp /= 0x400;
170 	}
171 	return (temp);
172 }
173 
174 /*------------------------------------------------------------------------*
175  *	usbd_transfer_setup_sub_malloc
176  *
177  * This function will allocate one or more DMA'able memory chunks
178  * according to "size", "align" and "count" arguments. "ppc" is
179  * pointed to a linear array of USB page caches afterwards.
180  *
181  * Returns:
182  *    0: Success
183  * Else: Failure
184  *------------------------------------------------------------------------*/
185 #if USB_HAVE_BUSDMA
186 uint8_t
187 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
188     struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
189     usb_size_t count)
190 {
191 	struct usb_page_cache *pc;
192 	struct usb_page *pg;
193 	void *buf;
194 	usb_size_t n_dma_pc;
195 	usb_size_t n_obj;
196 	usb_size_t x;
197 	usb_size_t y;
198 	usb_size_t r;
199 	usb_size_t z;
200 
201 #if 0
202 	USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n",
203 	    align));
204 	USB_ASSERT(size > 0, ("Invalid size = 0\n"));
205 #endif
206 
207 	if (count == 0) {
208 		return (0);		/* nothing to allocate */
209 	}
210 	/*
211 	 * Make sure that the size is aligned properly.
212 	 */
213 	size = -((-size) & (-align));
214 
215 	/*
216 	 * Try multi-allocation chunks to reduce the number of DMA
217 	 * allocations, hence DMA allocations are slow.
218 	 */
219 	if (size >= PAGE_SIZE) {
220 		n_dma_pc = count;
221 		n_obj = 1;
222 	} else {
223 		/* compute number of objects per page */
224 		n_obj = (PAGE_SIZE / size);
225 		/*
226 		 * Compute number of DMA chunks, rounded up
227 		 * to nearest one:
228 		 */
229 		n_dma_pc = ((count + n_obj - 1) / n_obj);
230 	}
231 
232 	if (parm->buf == NULL) {
233 		/* for the future */
234 		parm->dma_page_ptr += n_dma_pc;
235 		parm->dma_page_cache_ptr += n_dma_pc;
236 		parm->dma_page_ptr += count;
237 		parm->xfer_page_cache_ptr += count;
238 		return (0);
239 	}
240 	for (x = 0; x != n_dma_pc; x++) {
241 		/* need to initialize the page cache */
242 		parm->dma_page_cache_ptr[x].tag_parent =
243 		    &parm->curr_xfer->xroot->dma_parent_tag;
244 	}
245 	for (x = 0; x != count; x++) {
246 		/* need to initialize the page cache */
247 		parm->xfer_page_cache_ptr[x].tag_parent =
248 		    &parm->curr_xfer->xroot->dma_parent_tag;
249 	}
250 
251 	if (ppc) {
252 		*ppc = parm->xfer_page_cache_ptr;
253 	}
254 	r = count;			/* set remainder count */
255 	z = n_obj * size;		/* set allocation size */
256 	pc = parm->xfer_page_cache_ptr;
257 	pg = parm->dma_page_ptr;
258 
259 	for (x = 0; x != n_dma_pc; x++) {
260 
261 		if (r < n_obj) {
262 			/* compute last remainder */
263 			z = r * size;
264 			n_obj = r;
265 		}
266 		if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
267 		    pg, z, align)) {
268 			return (1);	/* failure */
269 		}
270 		/* Set beginning of current buffer */
271 		buf = parm->dma_page_cache_ptr->buffer;
272 		/* Make room for one DMA page cache and one page */
273 		parm->dma_page_cache_ptr++;
274 		pg++;
275 
276 		for (y = 0; (y != n_obj); y++, r--, pc++, pg++) {
277 
278 			/* Load sub-chunk into DMA */
279 			if (usb_pc_dmamap_create(pc, size)) {
280 				return (1);	/* failure */
281 			}
282 			pc->buffer = USB_ADD_BYTES(buf, y * size);
283 			pc->page_start = pg;
284 
285 			lockmgr(pc->tag_parent->lock, LK_EXCLUSIVE);
286 			if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
287 				lockmgr(pc->tag_parent->lock, LK_RELEASE);
288 				return (1);	/* failure */
289 			}
290 			lockmgr(pc->tag_parent->lock, LK_RELEASE);
291 		}
292 	}
293 
294 	parm->xfer_page_cache_ptr = pc;
295 	parm->dma_page_ptr = pg;
296 	return (0);
297 }
298 #endif
299 
300 /*------------------------------------------------------------------------*
301  *	usbd_transfer_setup_sub - transfer setup subroutine
302  *
303  * This function must be called from the "xfer_setup" callback of the
304  * USB Host or Device controller driver when setting up an USB
305  * transfer. This function will setup correct packet sizes, buffer
306  * sizes, flags and more, that are stored in the "usb_xfer"
307  * structure.
308  *------------------------------------------------------------------------*/
309 void
310 usbd_transfer_setup_sub(struct usb_setup_params *parm)
311 {
312 	enum {
313 		REQ_SIZE = 8,
314 		MIN_PKT = 8,
315 	};
316 	struct usb_xfer *xfer = parm->curr_xfer;
317 	const struct usb_config *setup = parm->curr_setup;
318 	struct usb_endpoint_ss_comp_descriptor *ecomp;
319 	struct usb_endpoint_descriptor *edesc;
320 	struct usb_std_packet_size std_size;
321 	usb_frcount_t n_frlengths;
322 	usb_frcount_t n_frbuffers;
323 	usb_frcount_t x;
324 	uint8_t type;
325 	uint8_t zmps;
326 
327 	/*
328 	 * Sanity check. The following parameters must be initialized before
329 	 * calling this function.
330 	 */
331 	if ((parm->hc_max_packet_size == 0) ||
332 	    (parm->hc_max_packet_count == 0) ||
333 	    (parm->hc_max_frame_size == 0)) {
334 		parm->err = USB_ERR_INVAL;
335 		goto done;
336 	}
337 	edesc = xfer->endpoint->edesc;
338 	ecomp = xfer->endpoint->ecomp;
339 
340 	type = (edesc->bmAttributes & UE_XFERTYPE);
341 
342 	xfer->flags = setup->flags;
343 	xfer->nframes = setup->frames;
344 	xfer->timeout = setup->timeout;
345 	xfer->callback = setup->callback;
346 	xfer->interval = setup->interval;
347 	xfer->endpointno = edesc->bEndpointAddress;
348 	xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
349 	xfer->max_packet_count = 1;
350 	/* make a shadow copy: */
351 	xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
352 
353 	parm->bufsize = setup->bufsize;
354 
355 	switch (parm->speed) {
356 	case USB_SPEED_HIGH:
357 		switch (type) {
358 		case UE_ISOCHRONOUS:
359 		case UE_INTERRUPT:
360 			xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
361 
362 			/* check for invalid max packet count */
363 			if (xfer->max_packet_count > 3)
364 				xfer->max_packet_count = 3;
365 			break;
366 		default:
367 			break;
368 		}
369 		xfer->max_packet_size &= 0x7FF;
370 		break;
371 	case USB_SPEED_SUPER:
372 		xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
373 
374 		if (ecomp != NULL)
375 			xfer->max_packet_count += ecomp->bMaxBurst;
376 
377 		if ((xfer->max_packet_count == 0) ||
378 		    (xfer->max_packet_count > 16))
379 			xfer->max_packet_count = 16;
380 
381 		switch (type) {
382 		case UE_CONTROL:
383 			xfer->max_packet_count = 1;
384 			break;
385 		case UE_ISOCHRONOUS:
386 			if (ecomp != NULL) {
387 				uint8_t mult;
388 
389 				mult = (ecomp->bmAttributes & 3) + 1;
390 				if (mult > 3)
391 					mult = 3;
392 
393 				xfer->max_packet_count *= mult;
394 			}
395 			break;
396 		default:
397 			break;
398 		}
399 		xfer->max_packet_size &= 0x7FF;
400 		break;
401 	default:
402 		break;
403 	}
404 	/* range check "max_packet_count" */
405 
406 	if (xfer->max_packet_count > parm->hc_max_packet_count) {
407 		xfer->max_packet_count = parm->hc_max_packet_count;
408 	}
409 	/* filter "wMaxPacketSize" according to HC capabilities */
410 
411 	if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
412 	    (xfer->max_packet_size == 0)) {
413 		xfer->max_packet_size = parm->hc_max_packet_size;
414 	}
415 	/* filter "wMaxPacketSize" according to standard sizes */
416 
417 	usbd_get_std_packet_size(&std_size, type, parm->speed);
418 
419 	if (std_size.range.min || std_size.range.max) {
420 
421 		if (xfer->max_packet_size < std_size.range.min) {
422 			xfer->max_packet_size = std_size.range.min;
423 		}
424 		if (xfer->max_packet_size > std_size.range.max) {
425 			xfer->max_packet_size = std_size.range.max;
426 		}
427 	} else {
428 
429 		if (xfer->max_packet_size >= std_size.fixed[3]) {
430 			xfer->max_packet_size = std_size.fixed[3];
431 		} else if (xfer->max_packet_size >= std_size.fixed[2]) {
432 			xfer->max_packet_size = std_size.fixed[2];
433 		} else if (xfer->max_packet_size >= std_size.fixed[1]) {
434 			xfer->max_packet_size = std_size.fixed[1];
435 		} else {
436 			/* only one possibility left */
437 			xfer->max_packet_size = std_size.fixed[0];
438 		}
439 	}
440 
441 	/* compute "max_frame_size" */
442 
443 	usbd_update_max_frame_size(xfer);
444 
445 	/* check interrupt interval and transfer pre-delay */
446 
447 	if (type == UE_ISOCHRONOUS) {
448 
449 		uint16_t frame_limit;
450 
451 		xfer->interval = 0;	/* not used, must be zero */
452 		xfer->flags_int.isochronous_xfr = 1;	/* set flag */
453 
454 		if (xfer->timeout == 0) {
455 			/*
456 			 * set a default timeout in
457 			 * case something goes wrong!
458 			 */
459 			xfer->timeout = 1000 / 4;
460 		}
461 		switch (parm->speed) {
462 		case USB_SPEED_LOW:
463 		case USB_SPEED_FULL:
464 			frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
465 			xfer->fps_shift = 0;
466 			break;
467 		default:
468 			frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
469 			xfer->fps_shift = edesc->bInterval;
470 			if (xfer->fps_shift > 0)
471 				xfer->fps_shift--;
472 			if (xfer->fps_shift > 3)
473 				xfer->fps_shift = 3;
474 			if (xfer->flags.pre_scale_frames != 0)
475 				xfer->nframes <<= (3 - xfer->fps_shift);
476 			break;
477 		}
478 
479 		if (xfer->nframes > frame_limit) {
480 			/*
481 			 * this is not going to work
482 			 * cross hardware
483 			 */
484 			parm->err = USB_ERR_INVAL;
485 			goto done;
486 		}
487 		if (xfer->nframes == 0) {
488 			/*
489 			 * this is not a valid value
490 			 */
491 			parm->err = USB_ERR_ZERO_NFRAMES;
492 			goto done;
493 		}
494 	} else {
495 
496 		/*
497 		 * If a value is specified use that else check the
498 		 * endpoint descriptor!
499 		 */
500 		if (type == UE_INTERRUPT) {
501 
502 			uint32_t temp;
503 
504 			if (xfer->interval == 0) {
505 
506 				xfer->interval = edesc->bInterval;
507 
508 				switch (parm->speed) {
509 				case USB_SPEED_LOW:
510 				case USB_SPEED_FULL:
511 					break;
512 				default:
513 					/* 125us -> 1ms */
514 					if (xfer->interval < 4)
515 						xfer->interval = 1;
516 					else if (xfer->interval > 16)
517 						xfer->interval = (1 << (16 - 4));
518 					else
519 						xfer->interval =
520 						    (1 << (xfer->interval - 4));
521 					break;
522 				}
523 			}
524 
525 			if (xfer->interval == 0) {
526 				/*
527 				 * One millisecond is the smallest
528 				 * interval we support:
529 				 */
530 				xfer->interval = 1;
531 			}
532 
533 			xfer->fps_shift = 0;
534 			temp = 1;
535 
536 			while ((temp != 0) && (temp < xfer->interval)) {
537 				xfer->fps_shift++;
538 				temp *= 2;
539 			}
540 
541 			switch (parm->speed) {
542 			case USB_SPEED_LOW:
543 			case USB_SPEED_FULL:
544 				break;
545 			default:
546 				xfer->fps_shift += 3;
547 				break;
548 			}
549 		}
550 	}
551 
552 	/*
553 	 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
554 	 * to be equal to zero when setting up USB transfers, hence
555 	 * this leads to alot of extra code in the USB kernel.
556 	 */
557 
558 	if ((xfer->max_frame_size == 0) ||
559 	    (xfer->max_packet_size == 0)) {
560 
561 		zmps = 1;
562 
563 		if ((parm->bufsize <= MIN_PKT) &&
564 		    (type != UE_CONTROL) &&
565 		    (type != UE_BULK)) {
566 
567 			/* workaround */
568 			xfer->max_packet_size = MIN_PKT;
569 			xfer->max_packet_count = 1;
570 			parm->bufsize = 0;	/* automatic setup length */
571 			usbd_update_max_frame_size(xfer);
572 
573 		} else {
574 			parm->err = USB_ERR_ZERO_MAXP;
575 			goto done;
576 		}
577 
578 	} else {
579 		zmps = 0;
580 	}
581 
582 	/*
583 	 * check if we should setup a default
584 	 * length:
585 	 */
586 
587 	if (parm->bufsize == 0) {
588 
589 		parm->bufsize = xfer->max_frame_size;
590 
591 		if (type == UE_ISOCHRONOUS) {
592 			parm->bufsize *= xfer->nframes;
593 		}
594 	}
595 	/*
596 	 * check if we are about to setup a proxy
597 	 * type of buffer:
598 	 */
599 
600 	if (xfer->flags.proxy_buffer) {
601 
602 		/* round bufsize up */
603 
604 		parm->bufsize += (xfer->max_frame_size - 1);
605 
606 		if (parm->bufsize < xfer->max_frame_size) {
607 			/* length wrapped around */
608 			parm->err = USB_ERR_INVAL;
609 			goto done;
610 		}
611 		/* subtract remainder */
612 
613 		parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
614 
615 		/* add length of USB device request structure, if any */
616 
617 		if (type == UE_CONTROL) {
618 			parm->bufsize += REQ_SIZE;	/* SETUP message */
619 		}
620 	}
621 	xfer->max_data_length = parm->bufsize;
622 
623 	/* Setup "n_frlengths" and "n_frbuffers" */
624 
625 	if (type == UE_ISOCHRONOUS) {
626 		n_frlengths = xfer->nframes;
627 		n_frbuffers = 1;
628 	} else {
629 
630 		if (type == UE_CONTROL) {
631 			xfer->flags_int.control_xfr = 1;
632 			if (xfer->nframes == 0) {
633 				if (parm->bufsize <= REQ_SIZE) {
634 					/*
635 					 * there will never be any data
636 					 * stage
637 					 */
638 					xfer->nframes = 1;
639 				} else {
640 					xfer->nframes = 2;
641 				}
642 			}
643 		} else {
644 			if (xfer->nframes == 0) {
645 				xfer->nframes = 1;
646 			}
647 		}
648 
649 		n_frlengths = xfer->nframes;
650 		n_frbuffers = xfer->nframes;
651 	}
652 
653 	/*
654 	 * check if we have room for the
655 	 * USB device request structure:
656 	 */
657 
658 	if (type == UE_CONTROL) {
659 
660 		if (xfer->max_data_length < REQ_SIZE) {
661 			/* length wrapped around or too small bufsize */
662 			parm->err = USB_ERR_INVAL;
663 			goto done;
664 		}
665 		xfer->max_data_length -= REQ_SIZE;
666 	}
667 	/*
668 	 * Setup "frlengths" and shadow "frlengths" for keeping the
669 	 * initial frame lengths when a USB transfer is complete. This
670 	 * information is useful when computing isochronous offsets.
671 	 */
672 	xfer->frlengths = parm->xfer_length_ptr;
673 	parm->xfer_length_ptr += 2 * n_frlengths;
674 
675 	/* setup "frbuffers" */
676 	xfer->frbuffers = parm->xfer_page_cache_ptr;
677 	parm->xfer_page_cache_ptr += n_frbuffers;
678 
679 	/* initialize max frame count */
680 	xfer->max_frame_count = xfer->nframes;
681 
682 	/*
683 	 * check if we need to setup
684 	 * a local buffer:
685 	 */
686 
687 	if (!xfer->flags.ext_buffer) {
688 
689 		/* align data */
690 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
691 
692 		if (parm->buf) {
693 
694 			xfer->local_buffer =
695 			    USB_ADD_BYTES(parm->buf, parm->size[0]);
696 
697 			usbd_xfer_set_frame_offset(xfer, 0, 0);
698 
699 			if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
700 				usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
701 			}
702 		}
703 		parm->size[0] += parm->bufsize;
704 
705 		/* align data again */
706 		parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
707 	}
708 	/*
709 	 * Compute maximum buffer size
710 	 */
711 
712 	if (parm->bufsize_max < parm->bufsize) {
713 		parm->bufsize_max = parm->bufsize;
714 	}
715 #if USB_HAVE_BUSDMA
716 	if (xfer->flags_int.bdma_enable) {
717 		/*
718 		 * Setup "dma_page_ptr".
719 		 *
720 		 * Proof for formula below:
721 		 *
722 		 * Assume there are three USB frames having length "a", "b" and
723 		 * "c". These USB frames will at maximum need "z"
724 		 * "usb_page" structures. "z" is given by:
725 		 *
726 		 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
727 		 * ((c / USB_PAGE_SIZE) + 2);
728 		 *
729 		 * Constraining "a", "b" and "c" like this:
730 		 *
731 		 * (a + b + c) <= parm->bufsize
732 		 *
733 		 * We know that:
734 		 *
735 		 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
736 		 *
737 		 * Here is the general formula:
738 		 */
739 		xfer->dma_page_ptr = parm->dma_page_ptr;
740 		parm->dma_page_ptr += (2 * n_frbuffers);
741 		parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
742 	}
743 #endif
744 	if (zmps) {
745 		/* correct maximum data length */
746 		xfer->max_data_length = 0;
747 	}
748 	/* subtract USB frame remainder from "hc_max_frame_size" */
749 
750 	xfer->max_hc_frame_size =
751 	    (parm->hc_max_frame_size -
752 	    (parm->hc_max_frame_size % xfer->max_frame_size));
753 
754 	if (xfer->max_hc_frame_size == 0) {
755 		parm->err = USB_ERR_INVAL;
756 		goto done;
757 	}
758 
759 	/* initialize frame buffers */
760 
761 	if (parm->buf) {
762 		for (x = 0; x != n_frbuffers; x++) {
763 			xfer->frbuffers[x].tag_parent =
764 			    &xfer->xroot->dma_parent_tag;
765 #if USB_HAVE_BUSDMA
766 			if (xfer->flags_int.bdma_enable &&
767 			    (parm->bufsize_max > 0)) {
768 
769 				if (usb_pc_dmamap_create(
770 				    xfer->frbuffers + x,
771 				    parm->bufsize_max)) {
772 					parm->err = USB_ERR_NOMEM;
773 					goto done;
774 				}
775 			}
776 #endif
777 		}
778 	}
779 done:
780 	if (parm->err) {
781 		/*
782 		 * Set some dummy values so that we avoid division by zero:
783 		 */
784 		xfer->max_hc_frame_size = 1;
785 		xfer->max_frame_size = 1;
786 		xfer->max_packet_size = 1;
787 		xfer->max_data_length = 0;
788 		xfer->nframes = 0;
789 		xfer->max_frame_count = 0;
790 	}
791 }
792 
793 /*------------------------------------------------------------------------*
794  *	usbd_transfer_setup - setup an array of USB transfers
795  *
796  * NOTE: You must always call "usbd_transfer_unsetup" after calling
797  * "usbd_transfer_setup" if success was returned.
798  *
799  * The idea is that the USB device driver should pre-allocate all its
800  * transfers by one call to this function.
801  *
802  * Return values:
803  *    0: Success
804  * Else: Failure
805  *------------------------------------------------------------------------*/
806 usb_error_t
807 usbd_transfer_setup(struct usb_device *udev,
808     const uint8_t *ifaces, struct usb_xfer **ppxfer,
809     const struct usb_config *setup_start, uint16_t n_setup,
810     void *priv_sc, struct lock *xfer_lock)
811 {
812 	struct usb_xfer dummy;
813 	struct usb_setup_params parm;
814 	const struct usb_config *setup_end = setup_start + n_setup;
815 	const struct usb_config *setup;
816 	struct usb_endpoint *ep;
817 	struct usb_xfer_root *info;
818 	struct usb_xfer *xfer;
819 	void *buf = NULL;
820 	uint16_t n;
821 	uint16_t refcount;
822 
823 	parm.err = 0;
824 	refcount = 0;
825 	info = NULL;
826 
827 #if 0
828 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
829 	    "usbd_transfer_setup can sleep!");
830 #endif
831 
832 	/* do some checking first */
833 
834 	if (n_setup == 0) {
835 		DPRINTFN(6, "setup array has zero length!\n");
836 		return (USB_ERR_INVAL);
837 	}
838 	if (ifaces == NULL) {
839 		DPRINTFN(6, "ifaces array is NULL!\n");
840 		return (USB_ERR_INVAL);
841 	}
842 	if (xfer_lock == NULL) {
843 		panic("xfer without lock!\n");
844 		DPRINTFN(6, "using global lock\n");
845 	}
846 	/* sanity checks */
847 	for (setup = setup_start, n = 0;
848 	    setup != setup_end; setup++, n++) {
849 		if (setup->bufsize == (usb_frlength_t)-1) {
850 			parm.err = USB_ERR_BAD_BUFSIZE;
851 			DPRINTF("invalid bufsize\n");
852 		}
853 		if (setup->callback == NULL) {
854 			parm.err = USB_ERR_NO_CALLBACK;
855 			DPRINTF("no callback\n");
856 		}
857 		ppxfer[n] = NULL;
858 	}
859 
860 	if (parm.err) {
861 		goto done;
862 	}
863 	memset(&parm, 0, sizeof(parm));
864 
865 	parm.udev = udev;
866 	parm.speed = usbd_get_speed(udev);
867 	parm.hc_max_packet_count = 1;
868 
869 	if (parm.speed >= USB_SPEED_MAX) {
870 		parm.err = USB_ERR_INVAL;
871 		goto done;
872 	}
873 	/* setup all transfers */
874 
875 	while (1) {
876 
877 		if (buf) {
878 			/*
879 			 * Initialize the "usb_xfer_root" structure,
880 			 * which is common for all our USB transfers.
881 			 */
882 			info = USB_ADD_BYTES(buf, 0);
883 
884 			info->memory_base = buf;
885 			info->memory_size = parm.size[0];
886 
887 #if USB_HAVE_BUSDMA
888 			info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]);
889 			info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]);
890 #endif
891 			info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]);
892 			info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]);
893 
894 			cv_init(&info->cv_drain, "WDRAIN");
895 
896 			info->xfer_lock = xfer_lock;
897 #if USB_HAVE_BUSDMA
898 			usb_dma_tag_setup(&info->dma_parent_tag,
899 			    parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag,
900 			    xfer_lock, &usb_bdma_done_event, 32, parm.dma_tag_max);
901 #endif
902 
903 			info->bus = udev->bus;
904 			info->udev = udev;
905 
906 			TAILQ_INIT(&info->done_q.head);
907 			info->done_q.command = &usbd_callback_wrapper;
908 #if USB_HAVE_BUSDMA
909 			TAILQ_INIT(&info->dma_q.head);
910 			info->dma_q.command = &usb_bdma_work_loop;
911 #endif
912 			info->done_m[0].hdr.pm_callback = &usb_callback_proc;
913 			info->done_m[0].xroot = info;
914 			info->done_m[1].hdr.pm_callback = &usb_callback_proc;
915 			info->done_m[1].xroot = info;
916 
917 			/*
918 			 * In device side mode control endpoint
919 			 * requests need to run from a separate
920 			 * context, else there is a chance of
921 			 * deadlock!
922 			 */
923 			if (setup_start == usb_control_ep_cfg)
924 				info->done_p =
925 				    &udev->bus->control_xfer_proc;
926 			else
927 				info->done_p =
928 				    &udev->bus->non_giant_callback_proc;
929 		}
930 		/* reset sizes */
931 
932 		parm.size[0] = 0;
933 		parm.buf = buf;
934 		parm.size[0] += sizeof(info[0]);
935 
936 		for (setup = setup_start, n = 0;
937 		    setup != setup_end; setup++, n++) {
938 
939 			/* skip USB transfers without callbacks: */
940 			if (setup->callback == NULL) {
941 				continue;
942 			}
943 			/* see if there is a matching endpoint */
944 			ep = usbd_get_endpoint(udev,
945 			    ifaces[setup->if_index], setup);
946 
947 			if ((ep == NULL) || (ep->methods == NULL)) {
948 				if (setup->flags.no_pipe_ok)
949 					continue;
950 				if ((setup->usb_mode != USB_MODE_DUAL) &&
951 				    (setup->usb_mode != udev->flags.usb_mode))
952 					continue;
953 				parm.err = USB_ERR_NO_PIPE;
954 				goto done;
955 			}
956 
957 			/* align data properly */
958 			parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
959 
960 			/* store current setup pointer */
961 			parm.curr_setup = setup;
962 
963 			if (buf) {
964 				/*
965 				 * Common initialization of the
966 				 * "usb_xfer" structure.
967 				 */
968 				xfer = USB_ADD_BYTES(buf, parm.size[0]);
969 				xfer->address = udev->address;
970 				xfer->priv_sc = priv_sc;
971 				xfer->xroot = info;
972 
973 				usb_callout_init_mtx(&xfer->timeout_handle,
974 				    &udev->bus->bus_lock, 0);
975 			} else {
976 				/*
977 				 * Setup a dummy xfer, hence we are
978 				 * writing to the "usb_xfer"
979 				 * structure pointed to by "xfer"
980 				 * before we have allocated any
981 				 * memory:
982 				 */
983 				xfer = &dummy;
984 				memset(&dummy, 0, sizeof(dummy));
985 				refcount++;
986 			}
987 
988 			/* set transfer endpoint pointer */
989 			xfer->endpoint = ep;
990 
991 			parm.size[0] += sizeof(xfer[0]);
992 			parm.methods = xfer->endpoint->methods;
993 			parm.curr_xfer = xfer;
994 
995 			/*
996 			 * Call the Host or Device controller transfer
997 			 * setup routine:
998 			 */
999 			(udev->bus->methods->xfer_setup) (&parm);
1000 
1001 			/* check for error */
1002 			if (parm.err)
1003 				goto done;
1004 
1005 			if (buf) {
1006 				/*
1007 				 * Increment the endpoint refcount. This
1008 				 * basically prevents setting a new
1009 				 * configuration and alternate setting
1010 				 * when USB transfers are in use on
1011 				 * the given interface. Search the USB
1012 				 * code for "endpoint->refcount_alloc" if you
1013 				 * want more information.
1014 				 */
1015 				USB_BUS_LOCK(info->bus);
1016 				if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1017 					parm.err = USB_ERR_INVAL;
1018 
1019 				xfer->endpoint->refcount_alloc++;
1020 
1021 				if (xfer->endpoint->refcount_alloc == 0)
1022 					panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1023 				USB_BUS_UNLOCK(info->bus);
1024 
1025 				/*
1026 				 * Whenever we set ppxfer[] then we
1027 				 * also need to increment the
1028 				 * "setup_refcount":
1029 				 */
1030 				info->setup_refcount++;
1031 
1032 				/*
1033 				 * Transfer is successfully setup and
1034 				 * can be used:
1035 				 */
1036 				ppxfer[n] = xfer;
1037 			}
1038 
1039 			/* check for error */
1040 			if (parm.err)
1041 				goto done;
1042 		}
1043 
1044 		if (buf || parm.err) {
1045 			goto done;
1046 		}
1047 		if (refcount == 0) {
1048 			/* no transfers - nothing to do ! */
1049 			goto done;
1050 		}
1051 		/* align data properly */
1052 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1053 
1054 		/* store offset temporarily */
1055 		parm.size[1] = parm.size[0];
1056 
1057 		/*
1058 		 * The number of DMA tags required depends on
1059 		 * the number of endpoints. The current estimate
1060 		 * for maximum number of DMA tags per endpoint
1061 		 * is two.
1062 		 */
1063 		parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX);
1064 
1065 		/*
1066 		 * DMA tags for QH, TD, Data and more.
1067 		 */
1068 		parm.dma_tag_max += 8;
1069 
1070 		parm.dma_tag_p += parm.dma_tag_max;
1071 
1072 		parm.size[0] += ((uint8_t *)parm.dma_tag_p) -
1073 		    ((uint8_t *)0);
1074 
1075 		/* align data properly */
1076 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1077 
1078 		/* store offset temporarily */
1079 		parm.size[3] = parm.size[0];
1080 
1081 		parm.size[0] += ((uint8_t *)parm.dma_page_ptr) -
1082 		    ((uint8_t *)0);
1083 
1084 		/* align data properly */
1085 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1086 
1087 		/* store offset temporarily */
1088 		parm.size[4] = parm.size[0];
1089 
1090 		parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) -
1091 		    ((uint8_t *)0);
1092 
1093 		/* store end offset temporarily */
1094 		parm.size[5] = parm.size[0];
1095 
1096 		parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) -
1097 		    ((uint8_t *)0);
1098 
1099 		/* store end offset temporarily */
1100 
1101 		parm.size[2] = parm.size[0];
1102 
1103 		/* align data properly */
1104 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1105 
1106 		parm.size[6] = parm.size[0];
1107 
1108 		parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) -
1109 		    ((uint8_t *)0);
1110 
1111 		/* align data properly */
1112 		parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
1113 
1114 		/* allocate zeroed memory */
1115 		buf = kmalloc(parm.size[0], M_USB, M_WAITOK | M_ZERO);
1116 
1117 		parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]);
1118 		parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]);
1119 		parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]);
1120 		parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]);
1121 		parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]);
1122 	}
1123 
1124 done:
1125 	if (buf) {
1126 		if (info->setup_refcount == 0) {
1127 			/*
1128 			 * "usbd_transfer_unsetup_sub" will unlock
1129 			 * the bus mutex before returning !
1130 			 */
1131 			USB_BUS_LOCK(info->bus);
1132 
1133 			/* something went wrong */
1134 			usbd_transfer_unsetup_sub(info, 0);
1135 		}
1136 	}
1137 	if (parm.err) {
1138 		usbd_transfer_unsetup(ppxfer, n_setup);
1139 	}
1140 	return (parm.err);
1141 }
1142 
1143 /*------------------------------------------------------------------------*
1144  *	usbd_transfer_unsetup_sub - factored out code
1145  *------------------------------------------------------------------------*/
1146 static void
1147 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1148 {
1149 #if USB_HAVE_BUSDMA
1150 	struct usb_page_cache *pc;
1151 #endif
1152 
1153 	USB_BUS_LOCK_ASSERT(info->bus);
1154 
1155 	/* wait for any outstanding DMA operations */
1156 
1157 	if (needs_delay) {
1158 		usb_timeout_t temp;
1159 		temp = usbd_get_dma_delay(info->udev);
1160 		if (temp != 0) {
1161 			usb_pause_mtx(&info->bus->bus_lock,
1162 			    USB_MS_TO_TICKS(temp));
1163 		}
1164 	}
1165 
1166 	/* make sure that our done messages are not queued anywhere */
1167 	usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1168 
1169 	USB_BUS_UNLOCK(info->bus);
1170 
1171 #if USB_HAVE_BUSDMA
1172 	/* free DMA'able memory, if any */
1173 	pc = info->dma_page_cache_start;
1174 	while (pc != info->dma_page_cache_end) {
1175 		usb_pc_free_mem(pc);
1176 		pc++;
1177 	}
1178 
1179 	/* free DMA maps in all "xfer->frbuffers" */
1180 	pc = info->xfer_page_cache_start;
1181 	while (pc != info->xfer_page_cache_end) {
1182 		usb_pc_dmamap_destroy(pc);
1183 		pc++;
1184 	}
1185 
1186 	/* free all DMA tags */
1187 	usb_dma_tag_unsetup(&info->dma_parent_tag);
1188 #endif
1189 
1190 	cv_destroy(&info->cv_drain);
1191 
1192 	/*
1193 	 * free the "memory_base" last, hence the "info" structure is
1194 	 * contained within the "memory_base"!
1195 	 */
1196 	kfree(info->memory_base, M_USB);
1197 }
1198 
1199 /*------------------------------------------------------------------------*
1200  *	usbd_transfer_unsetup - unsetup/free an array of USB transfers
1201  *
1202  * NOTE: All USB transfers in progress will get called back passing
1203  * the error code "USB_ERR_CANCELLED" before this function
1204  * returns.
1205  *------------------------------------------------------------------------*/
1206 void
1207 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1208 {
1209 	struct usb_xfer *xfer;
1210 	struct usb_xfer_root *info;
1211 	uint8_t needs_delay = 0;
1212 
1213 #if 0
1214 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1215 	    "usbd_transfer_unsetup can sleep!");
1216 #endif
1217 
1218 	while (n_setup--) {
1219 		xfer = pxfer[n_setup];
1220 
1221 		if (xfer == NULL)
1222 			continue;
1223 
1224 		info = xfer->xroot;
1225 
1226 		USB_XFER_LOCK(xfer);
1227 		USB_BUS_LOCK(info->bus);
1228 
1229 		/*
1230 		 * HINT: when you start/stop a transfer, it might be a
1231 		 * good idea to directly use the "pxfer[]" structure:
1232 		 *
1233 		 * usbd_transfer_start(sc->pxfer[0]);
1234 		 * usbd_transfer_stop(sc->pxfer[0]);
1235 		 *
1236 		 * That way, if your code has many parts that will not
1237 		 * stop running under the same lock, in other words
1238 		 * "xfer_mtx", the usbd_transfer_start and
1239 		 * usbd_transfer_stop functions will simply return
1240 		 * when they detect a NULL pointer argument.
1241 		 *
1242 		 * To avoid any races we clear the "pxfer[]" pointer
1243 		 * while holding the private mutex of the driver:
1244 		 */
1245 		pxfer[n_setup] = NULL;
1246 
1247 		USB_BUS_UNLOCK(info->bus);
1248 		USB_XFER_UNLOCK(xfer);
1249 
1250 		usbd_transfer_drain(xfer);
1251 
1252 #if USB_HAVE_BUSDMA
1253 		if (xfer->flags_int.bdma_enable)
1254 			needs_delay = 1;
1255 #endif
1256 		/*
1257 		 * NOTE: default endpoint does not have an
1258 		 * interface, even if endpoint->iface_index == 0
1259 		 */
1260 		USB_BUS_LOCK(info->bus);
1261 		xfer->endpoint->refcount_alloc--;
1262 		USB_BUS_UNLOCK(info->bus);
1263 
1264 		usb_callout_drain(&xfer->timeout_handle);
1265 
1266 		USB_BUS_LOCK(info->bus);
1267 
1268 #if 0
1269 		USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1270 		    "reference count\n"));
1271 #endif
1272 
1273 		info->setup_refcount--;
1274 
1275 		if (info->setup_refcount == 0) {
1276 			usbd_transfer_unsetup_sub(info,
1277 			    needs_delay);
1278 		} else {
1279 			USB_BUS_UNLOCK(info->bus);
1280 		}
1281 	}
1282 }
1283 
1284 /*------------------------------------------------------------------------*
1285  *	usbd_control_transfer_init - factored out code
1286  *
1287  * In USB Device Mode we have to wait for the SETUP packet which
1288  * containst the "struct usb_device_request" structure, before we can
1289  * transfer any data. In USB Host Mode we already have the SETUP
1290  * packet at the moment the USB transfer is started. This leads us to
1291  * having to setup the USB transfer at two different places in
1292  * time. This function just contains factored out control transfer
1293  * initialisation code, so that we don't duplicate the code.
1294  *------------------------------------------------------------------------*/
1295 static void
1296 usbd_control_transfer_init(struct usb_xfer *xfer)
1297 {
1298 	struct usb_device_request req;
1299 
1300 	/* copy out the USB request header */
1301 
1302 	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1303 
1304 	/* setup remainder */
1305 
1306 	xfer->flags_int.control_rem = UGETW(req.wLength);
1307 
1308 	/* copy direction to endpoint variable */
1309 
1310 	xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1311 	xfer->endpointno |=
1312 	    (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1313 }
1314 
1315 /*------------------------------------------------------------------------*
1316  *	usbd_setup_ctrl_transfer
1317  *
1318  * This function handles initialisation of control transfers. Control
1319  * transfers are special in that regard that they can both transmit
1320  * and receive data.
1321  *
1322  * Return values:
1323  *    0: Success
1324  * Else: Failure
1325  *------------------------------------------------------------------------*/
1326 static int
1327 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1328 {
1329 	usb_frlength_t len;
1330 
1331 	/* Check for control endpoint stall */
1332 	if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1333 		/* the control transfer is no longer active */
1334 		xfer->flags_int.control_stall = 1;
1335 		xfer->flags_int.control_act = 0;
1336 	} else {
1337 		/* don't stall control transfer by default */
1338 		xfer->flags_int.control_stall = 0;
1339 	}
1340 
1341 	/* Check for invalid number of frames */
1342 	if (xfer->nframes > 2) {
1343 		/*
1344 		 * If you need to split a control transfer, you
1345 		 * have to do one part at a time. Only with
1346 		 * non-control transfers you can do multiple
1347 		 * parts a time.
1348 		 */
1349 		DPRINTFN(0, "Too many frames: %u\n",
1350 		    (unsigned int)xfer->nframes);
1351 		goto error;
1352 	}
1353 
1354 	/*
1355          * Check if there is a control
1356          * transfer in progress:
1357          */
1358 	if (xfer->flags_int.control_act) {
1359 
1360 		if (xfer->flags_int.control_hdr) {
1361 
1362 			/* clear send header flag */
1363 
1364 			xfer->flags_int.control_hdr = 0;
1365 
1366 			/* setup control transfer */
1367 			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1368 				usbd_control_transfer_init(xfer);
1369 			}
1370 		}
1371 		/* get data length */
1372 
1373 		len = xfer->sumlen;
1374 
1375 	} else {
1376 
1377 		/* the size of the SETUP structure is hardcoded ! */
1378 
1379 		if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1380 			DPRINTFN(0, "Wrong framelength %u != %zu\n",
1381 			    xfer->frlengths[0], sizeof(struct
1382 			    usb_device_request));
1383 			goto error;
1384 		}
1385 		/* check USB mode */
1386 		if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1387 
1388 			/* check number of frames */
1389 			if (xfer->nframes != 1) {
1390 				/*
1391 			         * We need to receive the setup
1392 			         * message first so that we know the
1393 			         * data direction!
1394 			         */
1395 				DPRINTF("Misconfigured transfer\n");
1396 				goto error;
1397 			}
1398 			/*
1399 			 * Set a dummy "control_rem" value.  This
1400 			 * variable will be overwritten later by a
1401 			 * call to "usbd_control_transfer_init()" !
1402 			 */
1403 			xfer->flags_int.control_rem = 0xFFFF;
1404 		} else {
1405 
1406 			/* setup "endpoint" and "control_rem" */
1407 
1408 			usbd_control_transfer_init(xfer);
1409 		}
1410 
1411 		/* set transfer-header flag */
1412 
1413 		xfer->flags_int.control_hdr = 1;
1414 
1415 		/* get data length */
1416 
1417 		len = (xfer->sumlen - sizeof(struct usb_device_request));
1418 	}
1419 
1420 	/* check if there is a length mismatch */
1421 
1422 	if (len > xfer->flags_int.control_rem) {
1423 		DPRINTFN(0, "Length (%d) greater than "
1424 		    "remaining length (%d)\n", len,
1425 		    xfer->flags_int.control_rem);
1426 		goto error;
1427 	}
1428 	/* check if we are doing a short transfer */
1429 
1430 	if (xfer->flags.force_short_xfer) {
1431 		xfer->flags_int.control_rem = 0;
1432 	} else {
1433 		if ((len != xfer->max_data_length) &&
1434 		    (len != xfer->flags_int.control_rem) &&
1435 		    (xfer->nframes != 1)) {
1436 			DPRINTFN(0, "Short control transfer without "
1437 			    "force_short_xfer set\n");
1438 			goto error;
1439 		}
1440 		xfer->flags_int.control_rem -= len;
1441 	}
1442 
1443 	/* the status part is executed when "control_act" is 0 */
1444 
1445 	if ((xfer->flags_int.control_rem > 0) ||
1446 	    (xfer->flags.manual_status)) {
1447 		/* don't execute the STATUS stage yet */
1448 		xfer->flags_int.control_act = 1;
1449 
1450 		/* sanity check */
1451 		if ((!xfer->flags_int.control_hdr) &&
1452 		    (xfer->nframes == 1)) {
1453 			/*
1454 		         * This is not a valid operation!
1455 		         */
1456 			DPRINTFN(0, "Invalid parameter "
1457 			    "combination\n");
1458 			goto error;
1459 		}
1460 	} else {
1461 		/* time to execute the STATUS stage */
1462 		xfer->flags_int.control_act = 0;
1463 	}
1464 	return (0);			/* success */
1465 
1466 error:
1467 	return (1);			/* failure */
1468 }
1469 
1470 /*------------------------------------------------------------------------*
1471  *	usbd_transfer_submit - start USB hardware for the given transfer
1472  *
1473  * This function should only be called from the USB callback.
1474  *------------------------------------------------------------------------*/
1475 void
1476 usbd_transfer_submit(struct usb_xfer *xfer)
1477 {
1478 	struct usb_xfer_root *info;
1479 	struct usb_bus *bus;
1480 	usb_frcount_t x;
1481 
1482 	info = xfer->xroot;
1483 	bus = info->bus;
1484 
1485 	DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1486 	    xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1487 	    "read" : "write");
1488 
1489 #ifdef USB_DEBUG
1490 	if (USB_DEBUG_VAR > 0) {
1491 		USB_BUS_LOCK(bus);
1492 
1493 		usb_dump_endpoint(xfer->endpoint);
1494 
1495 		USB_BUS_UNLOCK(bus);
1496 	}
1497 #endif
1498 
1499 	USB_XFER_LOCK_ASSERT(xfer);
1500 	USB_BUS_LOCK_ASSERT_NOTOWNED(bus);
1501 
1502 	/* Only open the USB transfer once! */
1503 	if (!xfer->flags_int.open) {
1504 		xfer->flags_int.open = 1;
1505 
1506 		DPRINTF("open\n");
1507 
1508 		USB_BUS_LOCK(bus);
1509 		(xfer->endpoint->methods->open) (xfer);
1510 		USB_BUS_UNLOCK(bus);
1511 	}
1512 	/* set "transferring" flag */
1513 	xfer->flags_int.transferring = 1;
1514 
1515 #if USB_HAVE_POWERD
1516 	/* increment power reference */
1517 	usbd_transfer_power_ref(xfer, 1);
1518 #endif
1519 	/*
1520 	 * Check if the transfer is waiting on a queue, most
1521 	 * frequently the "done_q":
1522 	 */
1523 	if (xfer->wait_queue) {
1524 		USB_BUS_LOCK(bus);
1525 		usbd_transfer_dequeue(xfer);
1526 		USB_BUS_UNLOCK(bus);
1527 	}
1528 	/* clear "did_dma_delay" flag */
1529 	xfer->flags_int.did_dma_delay = 0;
1530 
1531 	/* clear "did_close" flag */
1532 	xfer->flags_int.did_close = 0;
1533 
1534 #if USB_HAVE_BUSDMA
1535 	/* clear "bdma_setup" flag */
1536 	xfer->flags_int.bdma_setup = 0;
1537 #endif
1538 	/* by default we cannot cancel any USB transfer immediately */
1539 	xfer->flags_int.can_cancel_immed = 0;
1540 
1541 	/* clear lengths and frame counts by default */
1542 	xfer->sumlen = 0;
1543 	xfer->actlen = 0;
1544 	xfer->aframes = 0;
1545 
1546 	/* clear any previous errors */
1547 	xfer->error = 0;
1548 
1549 	/* Check if the device is still alive */
1550 	if (info->udev->state < USB_STATE_POWERED) {
1551 		USB_BUS_LOCK(bus);
1552 		/*
1553 		 * Must return cancelled error code else
1554 		 * device drivers can hang.
1555 		 */
1556 		usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1557 		USB_BUS_UNLOCK(bus);
1558 		return;
1559 	}
1560 
1561 	/* sanity check */
1562 	if (xfer->nframes == 0) {
1563 		if (xfer->flags.stall_pipe) {
1564 			/*
1565 			 * Special case - want to stall without transferring
1566 			 * any data:
1567 			 */
1568 			DPRINTF("xfer=%p nframes=0: stall "
1569 			    "or clear stall!\n", xfer);
1570 			USB_BUS_LOCK(bus);
1571 			xfer->flags_int.can_cancel_immed = 1;
1572 			/* start the transfer */
1573 			usb_command_wrapper(&xfer->endpoint->endpoint_q, xfer);
1574 			USB_BUS_UNLOCK(bus);
1575 			return;
1576 		}
1577 		USB_BUS_LOCK(bus);
1578 		usbd_transfer_done(xfer, USB_ERR_INVAL);
1579 		USB_BUS_UNLOCK(bus);
1580 		return;
1581 	}
1582 	/* compute some variables */
1583 
1584 	for (x = 0; x != xfer->nframes; x++) {
1585 		/* make a copy of the frlenghts[] */
1586 		xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1587 		/* compute total transfer length */
1588 		xfer->sumlen += xfer->frlengths[x];
1589 		if (xfer->sumlen < xfer->frlengths[x]) {
1590 			/* length wrapped around */
1591 			USB_BUS_LOCK(bus);
1592 			usbd_transfer_done(xfer, USB_ERR_INVAL);
1593 			USB_BUS_UNLOCK(bus);
1594 			return;
1595 		}
1596 	}
1597 
1598 	/* clear some internal flags */
1599 
1600 	xfer->flags_int.short_xfer_ok = 0;
1601 	xfer->flags_int.short_frames_ok = 0;
1602 
1603 	/* check if this is a control transfer */
1604 
1605 	if (xfer->flags_int.control_xfr) {
1606 
1607 		if (usbd_setup_ctrl_transfer(xfer)) {
1608 			USB_BUS_LOCK(bus);
1609 			usbd_transfer_done(xfer, USB_ERR_STALLED);
1610 			USB_BUS_UNLOCK(bus);
1611 			return;
1612 		}
1613 	}
1614 	/*
1615 	 * Setup filtered version of some transfer flags,
1616 	 * in case of data read direction
1617 	 */
1618 	if (USB_GET_DATA_ISREAD(xfer)) {
1619 
1620 		if (xfer->flags.short_frames_ok) {
1621 			xfer->flags_int.short_xfer_ok = 1;
1622 			xfer->flags_int.short_frames_ok = 1;
1623 		} else if (xfer->flags.short_xfer_ok) {
1624 			xfer->flags_int.short_xfer_ok = 1;
1625 
1626 			/* check for control transfer */
1627 			if (xfer->flags_int.control_xfr) {
1628 				/*
1629 				 * 1) Control transfers do not support
1630 				 * reception of multiple short USB
1631 				 * frames in host mode and device side
1632 				 * mode, with exception of:
1633 				 *
1634 				 * 2) Due to sometimes buggy device
1635 				 * side firmware we need to do a
1636 				 * STATUS stage in case of short
1637 				 * control transfers in USB host mode.
1638 				 * The STATUS stage then becomes the
1639 				 * "alt_next" to the DATA stage.
1640 				 */
1641 				xfer->flags_int.short_frames_ok = 1;
1642 			}
1643 		}
1644 	}
1645 	/*
1646 	 * Check if BUS-DMA support is enabled and try to load virtual
1647 	 * buffers into DMA, if any:
1648 	 */
1649 #if USB_HAVE_BUSDMA
1650 	if (xfer->flags_int.bdma_enable) {
1651 		/* insert the USB transfer last in the BUS-DMA queue */
1652 		usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1653 		return;
1654 	}
1655 #endif
1656 	/*
1657 	 * Enter the USB transfer into the Host Controller or
1658 	 * Device Controller schedule:
1659 	 */
1660 	usbd_pipe_enter(xfer);
1661 }
1662 
1663 /*------------------------------------------------------------------------*
1664  *	usbd_pipe_enter - factored out code
1665  *------------------------------------------------------------------------*/
1666 void
1667 usbd_pipe_enter(struct usb_xfer *xfer)
1668 {
1669 	struct usb_endpoint *ep;
1670 
1671 	USB_XFER_LOCK_ASSERT(xfer);
1672 
1673 	USB_BUS_LOCK(xfer->xroot->bus);
1674 
1675 	ep = xfer->endpoint;
1676 
1677 	DPRINTF("enter\n");
1678 
1679 	/* enter the transfer */
1680 	(ep->methods->enter) (xfer);
1681 
1682 	xfer->flags_int.can_cancel_immed = 1;
1683 
1684 	/* check for transfer error */
1685 	if (xfer->error) {
1686 		/* some error has happened */
1687 		usbd_transfer_done(xfer, 0);
1688 		USB_BUS_UNLOCK(xfer->xroot->bus);
1689 		return;
1690 	}
1691 
1692 	/* start the transfer */
1693 	usb_command_wrapper(&ep->endpoint_q, xfer);
1694 	USB_BUS_UNLOCK(xfer->xroot->bus);
1695 }
1696 
1697 /*------------------------------------------------------------------------*
1698  *	usbd_transfer_start - start an USB transfer
1699  *
1700  * NOTE: Calling this function more than one time will only
1701  *       result in a single transfer start, until the USB transfer
1702  *       completes.
1703  *------------------------------------------------------------------------*/
1704 void
1705 usbd_transfer_start(struct usb_xfer *xfer)
1706 {
1707 	if (xfer == NULL) {
1708 		/* transfer is gone */
1709 		return;
1710 	}
1711 	USB_XFER_LOCK_ASSERT(xfer);
1712 
1713 	/* mark the USB transfer started */
1714 
1715 	if (!xfer->flags_int.started) {
1716 		/* lock the BUS lock to avoid races updating flags_int */
1717 		USB_BUS_LOCK(xfer->xroot->bus);
1718 		xfer->flags_int.started = 1;
1719 		USB_BUS_UNLOCK(xfer->xroot->bus);
1720 	}
1721 	/* check if the USB transfer callback is already transferring */
1722 
1723 	if (xfer->flags_int.transferring) {
1724 		return;
1725 	}
1726 	USB_BUS_LOCK(xfer->xroot->bus);
1727 	/* call the USB transfer callback */
1728 	usbd_callback_ss_done_defer(xfer);
1729 	USB_BUS_UNLOCK(xfer->xroot->bus);
1730 }
1731 
1732 /*------------------------------------------------------------------------*
1733  *	usbd_transfer_stop - stop an USB transfer
1734  *
1735  * NOTE: Calling this function more than one time will only
1736  *       result in a single transfer stop.
1737  * NOTE: When this function returns it is not safe to free nor
1738  *       reuse any DMA buffers. See "usbd_transfer_drain()".
1739  *------------------------------------------------------------------------*/
1740 void
1741 usbd_transfer_stop(struct usb_xfer *xfer)
1742 {
1743 	struct usb_endpoint *ep;
1744 
1745 	if (xfer == NULL) {
1746 		/* transfer is gone */
1747 		return;
1748 	}
1749 #if 0
1750 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1751 #endif
1752 
1753 	/* check if the USB transfer was ever opened */
1754 
1755 	if (!xfer->flags_int.open) {
1756 		if (xfer->flags_int.started) {
1757 			/* nothing to do except clearing the "started" flag */
1758 			/* lock the BUS lock to avoid races updating flags_int */
1759 			USB_BUS_LOCK(xfer->xroot->bus);
1760 			xfer->flags_int.started = 0;
1761 			USB_BUS_UNLOCK(xfer->xroot->bus);
1762 		}
1763 		return;
1764 	}
1765 	/* try to stop the current USB transfer */
1766 
1767 	USB_BUS_LOCK(xfer->xroot->bus);
1768 	/* override any previous error */
1769 	xfer->error = USB_ERR_CANCELLED;
1770 
1771 	/*
1772 	 * Clear "open" and "started" when both private and USB lock
1773 	 * is locked so that we don't get a race updating "flags_int"
1774 	 */
1775 	xfer->flags_int.open = 0;
1776 	xfer->flags_int.started = 0;
1777 
1778 	/*
1779 	 * Check if we can cancel the USB transfer immediately.
1780 	 */
1781 	if (xfer->flags_int.transferring) {
1782 		if (xfer->flags_int.can_cancel_immed &&
1783 		    (!xfer->flags_int.did_close)) {
1784 			DPRINTF("close\n");
1785 			/*
1786 			 * The following will lead to an USB_ERR_CANCELLED
1787 			 * error code being passed to the USB callback.
1788 			 */
1789 			(xfer->endpoint->methods->close) (xfer);
1790 			/* only close once */
1791 			xfer->flags_int.did_close = 1;
1792 		} else {
1793 			/* need to wait for the next done callback */
1794 		}
1795 	} else {
1796 		DPRINTF("close\n");
1797 
1798 		/* close here and now */
1799 		(xfer->endpoint->methods->close) (xfer);
1800 
1801 		/*
1802 		 * Any additional DMA delay is done by
1803 		 * "usbd_transfer_unsetup()".
1804 		 */
1805 
1806 		/*
1807 		 * Special case. Check if we need to restart a blocked
1808 		 * endpoint.
1809 		 */
1810 		ep = xfer->endpoint;
1811 
1812 		/*
1813 		 * If the current USB transfer is completing we need
1814 		 * to start the next one:
1815 		 */
1816 		if (ep->endpoint_q.curr == xfer) {
1817 			usb_command_wrapper(&ep->endpoint_q, NULL);
1818 		}
1819 	}
1820 
1821 	USB_BUS_UNLOCK(xfer->xroot->bus);
1822 }
1823 
1824 /*------------------------------------------------------------------------*
1825  *	usbd_transfer_pending
1826  *
1827  * This function will check if an USB transfer is pending which is a
1828  * little bit complicated!
1829  * Return values:
1830  * 0: Not pending
1831  * 1: Pending: The USB transfer will receive a callback in the future.
1832  *------------------------------------------------------------------------*/
1833 uint8_t
1834 usbd_transfer_pending(struct usb_xfer *xfer)
1835 {
1836 	struct usb_xfer_root *info;
1837 	struct usb_xfer_queue *pq;
1838 
1839 	if (xfer == NULL) {
1840 		/* transfer is gone */
1841 		return (0);
1842 	}
1843 #if 0
1844 	USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1845 #endif
1846 
1847 	if (xfer->flags_int.transferring) {
1848 		/* trivial case */
1849 		return (1);
1850 	}
1851 	USB_BUS_LOCK(xfer->xroot->bus);
1852 	if (xfer->wait_queue) {
1853 		/* we are waiting on a queue somewhere */
1854 		USB_BUS_UNLOCK(xfer->xroot->bus);
1855 		return (1);
1856 	}
1857 	info = xfer->xroot;
1858 	pq = &info->done_q;
1859 
1860 	if (pq->curr == xfer) {
1861 		/* we are currently scheduled for callback */
1862 		USB_BUS_UNLOCK(xfer->xroot->bus);
1863 		return (1);
1864 	}
1865 	/* we are not pending */
1866 	USB_BUS_UNLOCK(xfer->xroot->bus);
1867 	return (0);
1868 }
1869 
1870 /*------------------------------------------------------------------------*
1871  *	usbd_transfer_drain
1872  *
1873  * This function will stop the USB transfer and wait for any
1874  * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1875  * are loaded into DMA can safely be freed or reused after that this
1876  * function has returned.
1877  *------------------------------------------------------------------------*/
1878 void
1879 usbd_transfer_drain(struct usb_xfer *xfer)
1880 {
1881 #if 0
1882 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1883 	    "usbd_transfer_drain can sleep!");
1884 #endif
1885 
1886 	if (xfer == NULL) {
1887 		/* transfer is gone */
1888 		return;
1889 	}
1890 	USB_XFER_LOCK_ASSERT_NOTOWNED(xfer);
1891 	USB_XFER_LOCK(xfer);
1892 
1893 	usbd_transfer_stop(xfer);
1894 
1895 	while (usbd_transfer_pending(xfer) ||
1896 	    xfer->flags_int.doing_callback) {
1897 
1898 		/*
1899 		 * It is allowed that the callback can drop its
1900 		 * transfer mutex. In that case checking only
1901 		 * "usbd_transfer_pending()" is not enough to tell if
1902 		 * the USB transfer is fully drained. We also need to
1903 		 * check the internal "doing_callback" flag.
1904 		 */
1905 		xfer->flags_int.draining = 1;
1906 
1907 		/*
1908 		 * Wait until the current outstanding USB
1909 		 * transfer is complete !
1910 		 */
1911 		cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_lock);
1912 	}
1913 	USB_XFER_UNLOCK(xfer);
1914 }
1915 
1916 struct usb_page_cache *
1917 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
1918 {
1919 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1920 
1921 	return (&xfer->frbuffers[frindex]);
1922 }
1923 
1924 /*------------------------------------------------------------------------*
1925  *	usbd_xfer_get_fps_shift
1926  *
1927  * The following function is only useful for isochronous transfers. It
1928  * returns how many times the frame execution rate has been shifted
1929  * down.
1930  *
1931  * Return value:
1932  * Success: 0..3
1933  * Failure: 0
1934  *------------------------------------------------------------------------*/
1935 uint8_t
1936 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
1937 {
1938 	return (xfer->fps_shift);
1939 }
1940 
1941 usb_frlength_t
1942 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
1943 {
1944 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1945 
1946 	return (xfer->frlengths[frindex]);
1947 }
1948 
1949 /*------------------------------------------------------------------------*
1950  *	usbd_xfer_set_frame_data
1951  *
1952  * This function sets the pointer of the buffer that should
1953  * loaded directly into DMA for the given USB frame. Passing "ptr"
1954  * equal to NULL while the corresponding "frlength" is greater
1955  * than zero gives undefined results!
1956  *------------------------------------------------------------------------*/
1957 void
1958 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1959     void *ptr, usb_frlength_t len)
1960 {
1961 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1962 
1963 	/* set virtual address to load and length */
1964 	xfer->frbuffers[frindex].buffer = ptr;
1965 	usbd_xfer_set_frame_len(xfer, frindex, len);
1966 }
1967 
1968 void
1969 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
1970     void **ptr, int *len)
1971 {
1972 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1973 
1974 	if (ptr != NULL)
1975 		*ptr = xfer->frbuffers[frindex].buffer;
1976 	if (len != NULL)
1977 		*len = xfer->frlengths[frindex];
1978 }
1979 
1980 /*------------------------------------------------------------------------*
1981  *	usbd_xfer_old_frame_length
1982  *
1983  * This function returns the framelength of the given frame at the
1984  * time the transfer was submitted. This function can be used to
1985  * compute the starting data pointer of the next isochronous frame
1986  * when an isochronous transfer has completed.
1987  *------------------------------------------------------------------------*/
1988 usb_frlength_t
1989 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
1990 {
1991 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
1992 
1993 	return (xfer->frlengths[frindex + xfer->max_frame_count]);
1994 }
1995 
1996 void
1997 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
1998     int *nframes)
1999 {
2000 	if (actlen != NULL)
2001 		*actlen = xfer->actlen;
2002 	if (sumlen != NULL)
2003 		*sumlen = xfer->sumlen;
2004 	if (aframes != NULL)
2005 		*aframes = xfer->aframes;
2006 	if (nframes != NULL)
2007 		*nframes = xfer->nframes;
2008 }
2009 
2010 /*------------------------------------------------------------------------*
2011  *	usbd_xfer_set_frame_offset
2012  *
2013  * This function sets the frame data buffer offset relative to the beginning
2014  * of the USB DMA buffer allocated for this USB transfer.
2015  *------------------------------------------------------------------------*/
2016 void
2017 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2018     usb_frcount_t frindex)
2019 {
2020 	KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2021 	    "when the USB buffer is external\n"));
2022 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2023 
2024 	/* set virtual address to load */
2025 	xfer->frbuffers[frindex].buffer =
2026 	    USB_ADD_BYTES(xfer->local_buffer, offset);
2027 }
2028 
2029 void
2030 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2031 {
2032 	xfer->interval = i;
2033 }
2034 
2035 void
2036 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2037 {
2038 	xfer->timeout = t;
2039 }
2040 
2041 void
2042 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2043 {
2044 	xfer->nframes = n;
2045 }
2046 
2047 usb_frcount_t
2048 usbd_xfer_max_frames(struct usb_xfer *xfer)
2049 {
2050 	return (xfer->max_frame_count);
2051 }
2052 
2053 usb_frlength_t
2054 usbd_xfer_max_len(struct usb_xfer *xfer)
2055 {
2056 	return (xfer->max_data_length);
2057 }
2058 
2059 usb_frlength_t
2060 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2061 {
2062 	return (xfer->max_frame_size);
2063 }
2064 
2065 void
2066 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2067     usb_frlength_t len)
2068 {
2069 	KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2070 
2071 	xfer->frlengths[frindex] = len;
2072 }
2073 
2074 /*------------------------------------------------------------------------*
2075  *	usb_callback_proc - factored out code
2076  *
2077  * This function performs USB callbacks.
2078  *------------------------------------------------------------------------*/
2079 static void
2080 usb_callback_proc(struct usb_proc_msg *_pm)
2081 {
2082 	struct usb_done_msg *pm = (void *)_pm;
2083 	struct usb_xfer_root *info = pm->xroot;
2084 
2085 	/* Change locking order */
2086 	USB_BUS_UNLOCK(info->bus);
2087 
2088 	/*
2089 	 * We exploit the fact that the mutex is the same for all
2090 	 * callbacks that will be called from this thread:
2091 	 */
2092 	lockmgr(info->xfer_lock, LK_EXCLUSIVE);
2093 	USB_BUS_LOCK(info->bus);
2094 
2095 	/* Continue where we lost track */
2096 	usb_command_wrapper(&info->done_q,
2097 	    info->done_q.curr);
2098 
2099 	lockmgr(info->xfer_lock, LK_RELEASE);
2100 }
2101 
2102 /*------------------------------------------------------------------------*
2103  *	usbd_callback_ss_done_defer
2104  *
2105  * This function will defer the start, stop and done callback to the
2106  * correct thread.
2107  *------------------------------------------------------------------------*/
2108 static void
2109 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2110 {
2111 	struct usb_xfer_root *info = xfer->xroot;
2112 	struct usb_xfer_queue *pq = &info->done_q;
2113 
2114 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus);
2115 
2116 	if (pq->curr != xfer) {
2117 		usbd_transfer_enqueue(pq, xfer);
2118 	}
2119 	if (!pq->recurse_1) {
2120 
2121 		/*
2122 	         * We have to postpone the callback due to the fact we
2123 	         * will have a Lock Order Reversal, LOR, if we try to
2124 	         * proceed !
2125 	         */
2126 		if (usb_proc_msignal(info->done_p,
2127 		    &info->done_m[0], &info->done_m[1])) {
2128 			/* ignore */
2129 		}
2130 	} else {
2131 		/* clear second recurse flag */
2132 		pq->recurse_2 = 0;
2133 	}
2134 	return;
2135 
2136 }
2137 
2138 /*------------------------------------------------------------------------*
2139  *	usbd_callback_wrapper
2140  *
2141  * This is a wrapper for USB callbacks. This wrapper does some
2142  * auto-magic things like figuring out if we can call the callback
2143  * directly from the current context or if we need to wakeup the
2144  * interrupt process.
2145  *------------------------------------------------------------------------*/
2146 static void
2147 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2148 {
2149 	struct usb_xfer *xfer = pq->curr;
2150 	struct usb_xfer_root *info = xfer->xroot;
2151 
2152 	USB_BUS_LOCK_ASSERT(info->bus);
2153 	if (!lockowned(info->xfer_lock)) {
2154 		/*
2155 	       	 * Cases that end up here:
2156 		 *
2157 		 * 5) HW interrupt done callback or other source.
2158 		 */
2159 		DPRINTFN(3, "case 5\n");
2160 
2161 		/*
2162 	         * We have to postpone the callback due to the fact we
2163 	         * will have a Lock Order Reversal, LOR, if we try to
2164 	         * proceed !
2165 	         */
2166 		if (usb_proc_msignal(info->done_p,
2167 		    &info->done_m[0], &info->done_m[1])) {
2168 			/* ignore */
2169 		}
2170 		return;
2171 	}
2172 	/*
2173 	 * Cases that end up here:
2174 	 *
2175 	 * 1) We are starting a transfer
2176 	 * 2) We are prematurely calling back a transfer
2177 	 * 3) We are stopping a transfer
2178 	 * 4) We are doing an ordinary callback
2179 	 */
2180 	DPRINTFN(3, "case 1-4\n");
2181 	/* get next USB transfer in the queue */
2182 	info->done_q.curr = NULL;
2183 
2184 	/* set flag in case of drain */
2185 	xfer->flags_int.doing_callback = 1;
2186 
2187 	USB_BUS_UNLOCK(info->bus);
2188 	USB_BUS_LOCK_ASSERT_NOTOWNED(info->bus);
2189 
2190 	/* set correct USB state for callback */
2191 	if (!xfer->flags_int.transferring) {
2192 		xfer->usb_state = USB_ST_SETUP;
2193 		if (!xfer->flags_int.started) {
2194 			/* we got stopped before we even got started */
2195 			USB_BUS_LOCK(info->bus);
2196 			goto done;
2197 		}
2198 	} else {
2199 
2200 		if (usbd_callback_wrapper_sub(xfer)) {
2201 			/* the callback has been deferred */
2202 			USB_BUS_LOCK(info->bus);
2203 			goto done;
2204 		}
2205 #if USB_HAVE_POWERD
2206 		/* decrement power reference */
2207 		usbd_transfer_power_ref(xfer, -1);
2208 #endif
2209 		xfer->flags_int.transferring = 0;
2210 
2211 		if (xfer->error) {
2212 			xfer->usb_state = USB_ST_ERROR;
2213 		} else {
2214 			/* set transferred state */
2215 			xfer->usb_state = USB_ST_TRANSFERRED;
2216 #if USB_HAVE_BUSDMA
2217 			/* sync DMA memory, if any */
2218 			if (xfer->flags_int.bdma_enable &&
2219 			    (!xfer->flags_int.bdma_no_post_sync)) {
2220 				usb_bdma_post_sync(xfer);
2221 			}
2222 #endif
2223 		}
2224 	}
2225 
2226 #if USB_HAVE_PF
2227 	if (xfer->usb_state != USB_ST_SETUP)
2228 		usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2229 #endif
2230 	USB_XFER_LOCK_ASSERT(xfer);
2231 	/* call processing routine */
2232 	(xfer->callback) (xfer, xfer->error);
2233 
2234 	/* pickup the USB mutex again */
2235 	USB_BUS_LOCK(info->bus);
2236 
2237 	/*
2238 	 * Check if we got started after that we got cancelled, but
2239 	 * before we managed to do the callback.
2240 	 */
2241 	if ((!xfer->flags_int.open) &&
2242 	    (xfer->flags_int.started) &&
2243 	    (xfer->usb_state == USB_ST_ERROR)) {
2244 		/* clear flag in case of drain */
2245 		xfer->flags_int.doing_callback = 0;
2246 		/* try to loop, but not recursivly */
2247 		usb_command_wrapper(&info->done_q, xfer);
2248 		return;
2249 	}
2250 
2251 done:
2252 	/* clear flag in case of drain */
2253 	xfer->flags_int.doing_callback = 0;
2254 
2255 	/*
2256 	 * Check if we are draining.
2257 	 */
2258 	if (xfer->flags_int.draining &&
2259 	    (!xfer->flags_int.transferring)) {
2260 		/* "usbd_transfer_drain()" is waiting for end of transfer */
2261 		xfer->flags_int.draining = 0;
2262 		cv_broadcast(&info->cv_drain);
2263 	}
2264 
2265 	/* do the next callback, if any */
2266 	usb_command_wrapper(&info->done_q,
2267 	    info->done_q.curr);
2268 }
2269 
2270 /*------------------------------------------------------------------------*
2271  *	usb_dma_delay_done_cb
2272  *
2273  * This function is called when the DMA delay has been exectuded, and
2274  * will make sure that the callback is called to complete the USB
2275  * transfer. This code path is ususally only used when there is an USB
2276  * error like USB_ERR_CANCELLED.
2277  *------------------------------------------------------------------------*/
2278 void
2279 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2280 {
2281 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus);
2282 
2283 	DPRINTFN(3, "Completed %p\n", xfer);
2284 
2285 	/* queue callback for execution, again */
2286 	usbd_transfer_done(xfer, 0);
2287 }
2288 
2289 /*------------------------------------------------------------------------*
2290  *	usbd_transfer_dequeue
2291  *
2292  *  - This function is used to remove an USB transfer from a USB
2293  *  transfer queue.
2294  *
2295  *  - This function can be called multiple times in a row.
2296  *------------------------------------------------------------------------*/
2297 void
2298 usbd_transfer_dequeue(struct usb_xfer *xfer)
2299 {
2300 	struct usb_xfer_queue *pq;
2301 
2302 	pq = xfer->wait_queue;
2303 	if (pq) {
2304 		TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2305 		xfer->wait_queue = NULL;
2306 	}
2307 }
2308 
2309 /*------------------------------------------------------------------------*
2310  *	usbd_transfer_enqueue
2311  *
2312  *  - This function is used to insert an USB transfer into a USB *
2313  *  transfer queue.
2314  *
2315  *  - This function can be called multiple times in a row.
2316  *------------------------------------------------------------------------*/
2317 void
2318 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2319 {
2320 	/*
2321 	 * Insert the USB transfer into the queue, if it is not
2322 	 * already on a USB transfer queue:
2323 	 */
2324 	if (xfer->wait_queue == NULL) {
2325 		xfer->wait_queue = pq;
2326 		TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2327 	}
2328 }
2329 
2330 /*------------------------------------------------------------------------*
2331  *	usbd_transfer_done
2332  *
2333  *  - This function is used to remove an USB transfer from the busdma,
2334  *  pipe or interrupt queue.
2335  *
2336  *  - This function is used to queue the USB transfer on the done
2337  *  queue.
2338  *
2339  *  - This function is used to stop any USB transfer timeouts.
2340  *------------------------------------------------------------------------*/
2341 void
2342 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2343 {
2344 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus);
2345 
2346 	DPRINTF("err=%s\n", usbd_errstr(error));
2347 
2348 	/*
2349 	 * If we are not transferring then just return.
2350 	 * This can happen during transfer cancel.
2351 	 */
2352 	if (!xfer->flags_int.transferring) {
2353 		DPRINTF("not transferring\n");
2354 		/* end of control transfer, if any */
2355 		xfer->flags_int.control_act = 0;
2356 		return;
2357 	}
2358 	/* only set transfer error if not already set */
2359 	if (!xfer->error) {
2360 		xfer->error = error;
2361 	}
2362 	/* stop any callouts */
2363 	usb_callout_stop(&xfer->timeout_handle);
2364 
2365 	/*
2366 	 * If we are waiting on a queue, just remove the USB transfer
2367 	 * from the queue, if any. We should have the required locks
2368 	 * locked to do the remove when this function is called.
2369 	 */
2370 	usbd_transfer_dequeue(xfer);
2371 
2372 #if USB_HAVE_BUSDMA
2373 	if (lockowned(xfer->xroot->xfer_lock)) {
2374 		struct usb_xfer_queue *pq;
2375 
2376 		/*
2377 		 * If the private USB lock is not locked, then we assume
2378 		 * that the BUS-DMA load stage has been passed:
2379 		 */
2380 		pq = &xfer->xroot->dma_q;
2381 
2382 		if (pq->curr == xfer) {
2383 			/* start the next BUS-DMA load, if any */
2384 			usb_command_wrapper(pq, NULL);
2385 		}
2386 	}
2387 #endif
2388 	/* keep some statistics */
2389 	if (xfer->error) {
2390 		xfer->xroot->bus->stats_err.uds_requests
2391 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2392 	} else {
2393 		xfer->xroot->bus->stats_ok.uds_requests
2394 		    [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2395 	}
2396 
2397 	/* call the USB transfer callback */
2398 	usbd_callback_ss_done_defer(xfer);
2399 }
2400 
2401 /*------------------------------------------------------------------------*
2402  *	usbd_transfer_start_cb
2403  *
2404  * This function is called to start the USB transfer when
2405  * "xfer->interval" is greater than zero, and and the endpoint type is
2406  * BULK or CONTROL.
2407  *------------------------------------------------------------------------*/
2408 static void
2409 usbd_transfer_start_cb(void *arg)
2410 {
2411 	struct usb_xfer *xfer = arg;
2412 	struct usb_endpoint *ep = xfer->endpoint;
2413 
2414 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus);
2415 
2416 	DPRINTF("start\n");
2417 
2418 #if USB_HAVE_PF
2419 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2420 #endif
2421 	/* start USB transfer, if no error */
2422 	if (xfer->error == 0)
2423 		(ep->methods->start) (xfer);
2424 
2425 	xfer->flags_int.can_cancel_immed = 1;
2426 
2427 	/* check for error */
2428 	if (xfer->error) {
2429 		/* some error has happened */
2430 		usbd_transfer_done(xfer, 0);
2431 	}
2432 }
2433 
2434 /*------------------------------------------------------------------------*
2435  *	usbd_xfer_set_stall
2436  *
2437  * This function is used to set the stall flag outside the
2438  * callback. This function is NULL safe.
2439  *------------------------------------------------------------------------*/
2440 void
2441 usbd_xfer_set_stall(struct usb_xfer *xfer)
2442 {
2443 	if (xfer == NULL) {
2444 		/* tearing down */
2445 		return;
2446 	}
2447 	USB_XFER_LOCK_ASSERT(xfer);
2448 
2449 	/* avoid any races by locking the USB mutex */
2450 	USB_BUS_LOCK(xfer->xroot->bus);
2451 	xfer->flags.stall_pipe = 1;
2452 	USB_BUS_UNLOCK(xfer->xroot->bus);
2453 }
2454 
2455 int
2456 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2457 {
2458 	return (xfer->endpoint->is_stalled);
2459 }
2460 
2461 /*------------------------------------------------------------------------*
2462  *	usbd_transfer_clear_stall
2463  *
2464  * This function is used to clear the stall flag outside the
2465  * callback. This function is NULL safe.
2466  *------------------------------------------------------------------------*/
2467 void
2468 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2469 {
2470 	if (xfer == NULL) {
2471 		/* tearing down */
2472 		return;
2473 	}
2474 	USB_XFER_LOCK_ASSERT(xfer);
2475 
2476 	/* avoid any races by locking the USB mutex */
2477 	USB_BUS_LOCK(xfer->xroot->bus);
2478 
2479 	xfer->flags.stall_pipe = 0;
2480 
2481 	USB_BUS_UNLOCK(xfer->xroot->bus);
2482 }
2483 
2484 /*------------------------------------------------------------------------*
2485  *	usbd_pipe_start
2486  *
2487  * This function is used to add an USB transfer to the pipe transfer list.
2488  *------------------------------------------------------------------------*/
2489 void
2490 usbd_pipe_start(struct usb_xfer_queue *pq)
2491 {
2492 	struct usb_endpoint *ep;
2493 	struct usb_xfer *xfer;
2494 	uint8_t type;
2495 
2496 	xfer = pq->curr;
2497 	ep = xfer->endpoint;
2498 
2499 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus);
2500 
2501 	/*
2502 	 * If the endpoint is already stalled we do nothing !
2503 	 */
2504 	if (ep->is_stalled) {
2505 		return;
2506 	}
2507 	/*
2508 	 * Check if we are supposed to stall the endpoint:
2509 	 */
2510 	if (xfer->flags.stall_pipe) {
2511 		struct usb_device *udev;
2512 		struct usb_xfer_root *info;
2513 
2514 		/* clear stall command */
2515 		xfer->flags.stall_pipe = 0;
2516 
2517 		/* get pointer to USB device */
2518 		info = xfer->xroot;
2519 		udev = info->udev;
2520 
2521 		/*
2522 		 * Only stall BULK and INTERRUPT endpoints.
2523 		 */
2524 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2525 		if ((type == UE_BULK) ||
2526 		    (type == UE_INTERRUPT)) {
2527 			uint8_t did_stall;
2528 
2529 			did_stall = 1;
2530 
2531 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2532 				(udev->bus->methods->set_stall) (
2533 				    udev, NULL, ep, &did_stall);
2534 			} else if (udev->ctrl_xfer[1]) {
2535 				info = udev->ctrl_xfer[1]->xroot;
2536 				usb_proc_msignal(
2537 				    &info->bus->non_giant_callback_proc,
2538 				    &udev->cs_msg[0], &udev->cs_msg[1]);
2539 			} else {
2540 				/* should not happen */
2541 				DPRINTFN(0, "No stall handler\n");
2542 			}
2543 			/*
2544 			 * Check if we should stall. Some USB hardware
2545 			 * handles set- and clear-stall in hardware.
2546 			 */
2547 			if (did_stall) {
2548 				/*
2549 				 * The transfer will be continued when
2550 				 * the clear-stall control endpoint
2551 				 * message is received.
2552 				 */
2553 				ep->is_stalled = 1;
2554 				return;
2555 			}
2556 		} else if (type == UE_ISOCHRONOUS) {
2557 
2558 			/*
2559 			 * Make sure any FIFO overflow or other FIFO
2560 			 * error conditions go away by resetting the
2561 			 * endpoint FIFO through the clear stall
2562 			 * method.
2563 			 */
2564 			if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2565 				(udev->bus->methods->clear_stall) (udev, ep);
2566 			}
2567 		}
2568 	}
2569 	/* Set or clear stall complete - special case */
2570 	if (xfer->nframes == 0) {
2571 		/* we are complete */
2572 		xfer->aframes = 0;
2573 		usbd_transfer_done(xfer, 0);
2574 		return;
2575 	}
2576 	/*
2577 	 * Handled cases:
2578 	 *
2579 	 * 1) Start the first transfer queued.
2580 	 *
2581 	 * 2) Re-start the current USB transfer.
2582 	 */
2583 	/*
2584 	 * Check if there should be any
2585 	 * pre transfer start delay:
2586 	 */
2587 	if (xfer->interval > 0) {
2588 		type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2589 		if ((type == UE_BULK) ||
2590 		    (type == UE_CONTROL)) {
2591 			usbd_transfer_timeout_ms(xfer,
2592 			    &usbd_transfer_start_cb,
2593 			    xfer->interval);
2594 			return;
2595 		}
2596 	}
2597 	DPRINTF("start\n");
2598 
2599 #if USB_HAVE_PF
2600 	usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2601 #endif
2602 	/* start USB transfer, if no error */
2603 	if (xfer->error == 0)
2604 		(ep->methods->start) (xfer);
2605 
2606 	xfer->flags_int.can_cancel_immed = 1;
2607 
2608 	/* check for error */
2609 	if (xfer->error) {
2610 		/* some error has happened */
2611 		usbd_transfer_done(xfer, 0);
2612 	}
2613 }
2614 
2615 /*------------------------------------------------------------------------*
2616  *	usbd_transfer_timeout_ms
2617  *
2618  * This function is used to setup a timeout on the given USB
2619  * transfer. If the timeout has been deferred the callback given by
2620  * "cb" will get called after "ms" milliseconds.
2621  *------------------------------------------------------------------------*/
2622 void
2623 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2624     void (*cb) (void *arg), usb_timeout_t ms)
2625 {
2626 	USB_BUS_LOCK_ASSERT(xfer->xroot->bus);
2627 
2628 	/* defer delay */
2629 	usb_callout_reset(&xfer->timeout_handle,
2630 	    USB_MS_TO_TICKS(ms), cb, xfer);
2631 }
2632 
2633 /*------------------------------------------------------------------------*
2634  *	usbd_callback_wrapper_sub
2635  *
2636  *  - This function will update variables in an USB transfer after
2637  *  that the USB transfer is complete.
2638  *
2639  *  - This function is used to start the next USB transfer on the
2640  *  ep transfer queue, if any.
2641  *
2642  * NOTE: In some special cases the USB transfer will not be removed from
2643  * the pipe queue, but remain first. To enforce USB transfer removal call
2644  * this function passing the error code "USB_ERR_CANCELLED".
2645  *
2646  * Return values:
2647  * 0: Success.
2648  * Else: The callback has been deferred.
2649  *------------------------------------------------------------------------*/
2650 static uint8_t
2651 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2652 {
2653 	struct usb_endpoint *ep;
2654 	struct usb_bus *bus;
2655 	usb_frcount_t x;
2656 
2657 	bus = xfer->xroot->bus;
2658 
2659 	if ((!xfer->flags_int.open) &&
2660 	    (!xfer->flags_int.did_close)) {
2661 		DPRINTF("close\n");
2662 		USB_BUS_LOCK(bus);
2663 		(xfer->endpoint->methods->close) (xfer);
2664 		USB_BUS_UNLOCK(bus);
2665 		/* only close once */
2666 		xfer->flags_int.did_close = 1;
2667 		return (1);		/* wait for new callback */
2668 	}
2669 	/*
2670 	 * If we have a non-hardware induced error we
2671 	 * need to do the DMA delay!
2672 	 */
2673 	if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2674 	    (xfer->error == USB_ERR_CANCELLED ||
2675 	    xfer->error == USB_ERR_TIMEOUT ||
2676 	    bus->methods->start_dma_delay != NULL)) {
2677 
2678 		usb_timeout_t temp;
2679 
2680 		/* only delay once */
2681 		xfer->flags_int.did_dma_delay = 1;
2682 
2683 		/* we can not cancel this delay */
2684 		xfer->flags_int.can_cancel_immed = 0;
2685 
2686 		temp = usbd_get_dma_delay(xfer->xroot->udev);
2687 
2688 		DPRINTFN(3, "DMA delay, %u ms, "
2689 		    "on %p\n", temp, xfer);
2690 
2691 		if (temp != 0) {
2692 			USB_BUS_LOCK(bus);
2693 			/*
2694 			 * Some hardware solutions have dedicated
2695 			 * events when it is safe to free DMA'ed
2696 			 * memory. For the other hardware platforms we
2697 			 * use a static delay.
2698 			 */
2699 			if (bus->methods->start_dma_delay != NULL) {
2700 				(bus->methods->start_dma_delay) (xfer);
2701 			} else {
2702 				usbd_transfer_timeout_ms(xfer,
2703 				    (void *)&usb_dma_delay_done_cb, temp);
2704 			}
2705 			USB_BUS_UNLOCK(bus);
2706 			return (1);	/* wait for new callback */
2707 		}
2708 	}
2709 	/* check actual number of frames */
2710 	if (xfer->aframes > xfer->nframes) {
2711 		if (xfer->error == 0) {
2712 			panic("%s: actual number of frames, %d, is "
2713 			    "greater than initial number of frames, %d\n",
2714 			    __func__, xfer->aframes, xfer->nframes);
2715 		} else {
2716 			/* just set some valid value */
2717 			xfer->aframes = xfer->nframes;
2718 		}
2719 	}
2720 	/* compute actual length */
2721 	xfer->actlen = 0;
2722 
2723 	for (x = 0; x != xfer->aframes; x++) {
2724 		xfer->actlen += xfer->frlengths[x];
2725 	}
2726 
2727 	/*
2728 	 * Frames that were not transferred get zero actual length in
2729 	 * case the USB device driver does not check the actual number
2730 	 * of frames transferred, "xfer->aframes":
2731 	 */
2732 	for (; x < xfer->nframes; x++) {
2733 		usbd_xfer_set_frame_len(xfer, x, 0);
2734 	}
2735 
2736 	/* check actual length */
2737 	if (xfer->actlen > xfer->sumlen) {
2738 		if (xfer->error == 0) {
2739 			panic("%s: actual length, %d, is greater than "
2740 			    "initial length, %d\n",
2741 			    __func__, xfer->actlen, xfer->sumlen);
2742 		} else {
2743 			/* just set some valid value */
2744 			xfer->actlen = xfer->sumlen;
2745 		}
2746 	}
2747 	DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2748 	    xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2749 	    xfer->aframes, xfer->nframes);
2750 
2751 	if (xfer->error) {
2752 		/* end of control transfer, if any */
2753 		xfer->flags_int.control_act = 0;
2754 
2755 		/* check if we should block the execution queue */
2756 		if ((xfer->error != USB_ERR_CANCELLED) &&
2757 		    (xfer->flags.pipe_bof)) {
2758 			DPRINTFN(2, "xfer=%p: Block On Failure "
2759 			    "on endpoint=%p\n", xfer, xfer->endpoint);
2760 			goto done;
2761 		}
2762 	} else {
2763 		/* check for short transfers */
2764 		if (xfer->actlen < xfer->sumlen) {
2765 
2766 			/* end of control transfer, if any */
2767 			xfer->flags_int.control_act = 0;
2768 
2769 			if (!xfer->flags_int.short_xfer_ok) {
2770 				xfer->error = USB_ERR_SHORT_XFER;
2771 				if (xfer->flags.pipe_bof) {
2772 					DPRINTFN(2, "xfer=%p: Block On Failure on "
2773 					    "Short Transfer on endpoint %p.\n",
2774 					    xfer, xfer->endpoint);
2775 					goto done;
2776 				}
2777 			}
2778 		} else {
2779 			/*
2780 			 * Check if we are in the middle of a
2781 			 * control transfer:
2782 			 */
2783 			if (xfer->flags_int.control_act) {
2784 				DPRINTFN(5, "xfer=%p: Control transfer "
2785 				    "active on endpoint=%p\n", xfer, xfer->endpoint);
2786 				goto done;
2787 			}
2788 		}
2789 	}
2790 
2791 	ep = xfer->endpoint;
2792 
2793 	/*
2794 	 * If the current USB transfer is completing we need to start the
2795 	 * next one:
2796 	 */
2797 	USB_BUS_LOCK(bus);
2798 	if (ep->endpoint_q.curr == xfer) {
2799 		usb_command_wrapper(&ep->endpoint_q, NULL);
2800 
2801 		if (ep->endpoint_q.curr || TAILQ_FIRST(&ep->endpoint_q.head)) {
2802 			/* there is another USB transfer waiting */
2803 		} else {
2804 			/* this is the last USB transfer */
2805 			/* clear isochronous sync flag */
2806 			xfer->endpoint->is_synced = 0;
2807 		}
2808 	}
2809 	USB_BUS_UNLOCK(bus);
2810 done:
2811 	return (0);
2812 }
2813 
2814 /*------------------------------------------------------------------------*
2815  *	usb_command_wrapper
2816  *
2817  * This function is used to execute commands non-recursivly on an USB
2818  * transfer.
2819  *------------------------------------------------------------------------*/
2820 void
2821 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2822 {
2823 	if (xfer) {
2824 		/*
2825 		 * If the transfer is not already processing,
2826 		 * queue it!
2827 		 */
2828 		if (pq->curr != xfer) {
2829 			usbd_transfer_enqueue(pq, xfer);
2830 			if (pq->curr != NULL) {
2831 				/* something is already processing */
2832 				DPRINTFN(6, "busy %p\n", pq->curr);
2833 				return;
2834 			}
2835 		}
2836 	} else {
2837 		/* Get next element in queue */
2838 		pq->curr = NULL;
2839 	}
2840 
2841 	if (!pq->recurse_1) {
2842 
2843 		do {
2844 
2845 			/* set both recurse flags */
2846 			pq->recurse_1 = 1;
2847 			pq->recurse_2 = 1;
2848 
2849 			if (pq->curr == NULL) {
2850 				xfer = TAILQ_FIRST(&pq->head);
2851 				if (xfer) {
2852 					TAILQ_REMOVE(&pq->head, xfer,
2853 					    wait_entry);
2854 					xfer->wait_queue = NULL;
2855 					pq->curr = xfer;
2856 				} else {
2857 					break;
2858 				}
2859 			}
2860 			DPRINTFN(6, "cb %p (enter)\n", pq->curr);
2861 			(pq->command) (pq);
2862 			DPRINTFN(6, "cb %p (leave)\n", pq->curr);
2863 
2864 		} while (!pq->recurse_2);
2865 
2866 		/* clear first recurse flag */
2867 		pq->recurse_1 = 0;
2868 
2869 	} else {
2870 		/* clear second recurse flag */
2871 		pq->recurse_2 = 0;
2872 	}
2873 }
2874 
2875 /*------------------------------------------------------------------------*
2876  *	usbd_ctrl_transfer_setup
2877  *
2878  * This function is used to setup the default USB control endpoint
2879  * transfer.
2880  *------------------------------------------------------------------------*/
2881 void
2882 usbd_ctrl_transfer_setup(struct usb_device *udev)
2883 {
2884 	struct usb_xfer *xfer;
2885 	uint8_t no_resetup;
2886 	uint8_t iface_index;
2887 
2888 	/* check for root HUB */
2889 	if (udev->parent_hub == NULL)
2890 		return;
2891 repeat:
2892 
2893 	xfer = udev->ctrl_xfer[0];
2894 	if (xfer) {
2895 		USB_XFER_LOCK(xfer);
2896 		no_resetup =
2897 		    ((xfer->address == udev->address) &&
2898 		    (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
2899 		    udev->ddesc.bMaxPacketSize));
2900 		if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2901 			if (no_resetup) {
2902 				/*
2903 				 * NOTE: checking "xfer->address" and
2904 				 * starting the USB transfer must be
2905 				 * atomic!
2906 				 */
2907 				usbd_transfer_start(xfer);
2908 			}
2909 		}
2910 		USB_XFER_UNLOCK(xfer);
2911 	} else {
2912 		no_resetup = 0;
2913 	}
2914 
2915 	if (no_resetup) {
2916 		/*
2917 	         * All parameters are exactly the same like before.
2918 	         * Just return.
2919 	         */
2920 		return;
2921 	}
2922 	/*
2923 	 * Update wMaxPacketSize for the default control endpoint:
2924 	 */
2925 	udev->ctrl_ep_desc.wMaxPacketSize[0] =
2926 	    udev->ddesc.bMaxPacketSize;
2927 
2928 	/*
2929 	 * Unsetup any existing USB transfer:
2930 	 */
2931 	usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
2932 
2933 	/*
2934 	 * Reset clear stall error counter.
2935 	 */
2936 	udev->clear_stall_errors = 0;
2937 
2938 	/*
2939 	 * Try to setup a new USB transfer for the
2940 	 * default control endpoint:
2941 	 */
2942 	iface_index = 0;
2943 	if (usbd_transfer_setup(udev, &iface_index,
2944 	    udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
2945 	    &udev->device_lock)) {
2946 		DPRINTFN(0, "could not setup default "
2947 		    "USB transfer\n");
2948 	} else {
2949 		goto repeat;
2950 	}
2951 }
2952 
2953 /*------------------------------------------------------------------------*
2954  *	usbd_clear_data_toggle - factored out code
2955  *
2956  * NOTE: the intention of this function is not to reset the hardware
2957  * data toggle.
2958  *------------------------------------------------------------------------*/
2959 void
2960 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
2961 {
2962 	USB_BUS_LOCK_ASSERT(udev->bus);
2963 
2964 	/* check that we have a valid case */
2965 	if (udev->flags.usb_mode == USB_MODE_HOST &&
2966 	    udev->parent_hub != NULL &&
2967 	    udev->bus->methods->clear_stall != NULL &&
2968 	    ep->methods != NULL) {
2969 		(udev->bus->methods->clear_stall) (udev, ep);
2970 	}
2971 }
2972 
2973 /*------------------------------------------------------------------------*
2974  *	usbd_clear_data_toggle - factored out code
2975  *
2976  * NOTE: the intention of this function is not to reset the hardware
2977  * data toggle on the USB device side.
2978  *------------------------------------------------------------------------*/
2979 void
2980 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
2981 {
2982 	DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
2983 
2984 	USB_BUS_LOCK(udev->bus);
2985 	ep->toggle_next = 0;
2986 	/* some hardware needs a callback to clear the data toggle */
2987 	usbd_clear_stall_locked(udev, ep);
2988 	USB_BUS_UNLOCK(udev->bus);
2989 }
2990 
2991 /*------------------------------------------------------------------------*
2992  *	usbd_clear_stall_callback - factored out clear stall callback
2993  *
2994  * Input parameters:
2995  *  xfer1: Clear Stall Control Transfer
2996  *  xfer2: Stalled USB Transfer
2997  *
2998  * This function is NULL safe.
2999  *
3000  * Return values:
3001  *   0: In progress
3002  *   Else: Finished
3003  *
3004  * Clear stall config example:
3005  *
3006  * static const struct usb_config my_clearstall =  {
3007  *	.type = UE_CONTROL,
3008  *	.endpoint = 0,
3009  *	.direction = UE_DIR_ANY,
3010  *	.interval = 50, //50 milliseconds
3011  *	.bufsize = sizeof(struct usb_device_request),
3012  *	.timeout = 1000, //1.000 seconds
3013  *	.callback = &my_clear_stall_callback, // **
3014  *	.usb_mode = USB_MODE_HOST,
3015  * };
3016  *
3017  * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3018  * passing the correct parameters.
3019  *------------------------------------------------------------------------*/
3020 uint8_t
3021 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3022     struct usb_xfer *xfer2)
3023 {
3024 	struct usb_device_request req;
3025 
3026 	if (xfer2 == NULL) {
3027 		/* looks like we are tearing down */
3028 		DPRINTF("NULL input parameter\n");
3029 		return (0);
3030 	}
3031 	USB_XFER_LOCK_ASSERT(xfer1);
3032 	USB_XFER_LOCK_ASSERT(xfer2);
3033 
3034 	switch (USB_GET_STATE(xfer1)) {
3035 	case USB_ST_SETUP:
3036 
3037 		/*
3038 		 * pre-clear the data toggle to DATA0 ("umass.c" and
3039 		 * "ata-usb.c" depends on this)
3040 		 */
3041 
3042 		usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3043 
3044 		/* setup a clear-stall packet */
3045 
3046 		req.bmRequestType = UT_WRITE_ENDPOINT;
3047 		req.bRequest = UR_CLEAR_FEATURE;
3048 		USETW(req.wValue, UF_ENDPOINT_HALT);
3049 		req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3050 		req.wIndex[1] = 0;
3051 		USETW(req.wLength, 0);
3052 
3053 		/*
3054 		 * "usbd_transfer_setup_sub()" will ensure that
3055 		 * we have sufficient room in the buffer for
3056 		 * the request structure!
3057 		 */
3058 
3059 		/* copy in the transfer */
3060 
3061 		usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3062 
3063 		/* set length */
3064 		xfer1->frlengths[0] = sizeof(req);
3065 		xfer1->nframes = 1;
3066 
3067 		usbd_transfer_submit(xfer1);
3068 		return (0);
3069 
3070 	case USB_ST_TRANSFERRED:
3071 		break;
3072 
3073 	default:			/* Error */
3074 		if (xfer1->error == USB_ERR_CANCELLED) {
3075 			return (0);
3076 		}
3077 		break;
3078 	}
3079 	return (1);			/* Clear Stall Finished */
3080 }
3081 
3082 /*------------------------------------------------------------------------*
3083  *	usbd_transfer_poll
3084  *
3085  * The following function gets called from the USB keyboard driver and
3086  * UMASS when the system has paniced.
3087  *
3088  * NOTE: It is currently not possible to resume normal operation on
3089  * the USB controller which has been polled, due to clearing of the
3090  * "up_dsleep" and "up_msleep" flags.
3091  *------------------------------------------------------------------------*/
3092 void
3093 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3094 {
3095 	struct usb_xfer *xfer;
3096 	struct usb_xfer_root *xroot;
3097 	struct usb_device *udev;
3098 	struct usb_proc_msg *pm;
3099 	uint16_t n;
3100 	uint16_t drop_bus;
3101 	uint16_t drop_xfer;
3102 
3103 	for (n = 0; n != max; n++) {
3104 		/* Extra checks to avoid panic */
3105 		xfer = ppxfer[n];
3106 		if (xfer == NULL)
3107 			continue;	/* no USB transfer */
3108 		xroot = xfer->xroot;
3109 		if (xroot == NULL)
3110 			continue;	/* no USB root */
3111 		udev = xroot->udev;
3112 		if (udev == NULL)
3113 			continue;	/* no USB device */
3114 		if (udev->bus == NULL)
3115 			continue;	/* no BUS structure */
3116 		if (udev->bus->methods == NULL)
3117 			continue;	/* no BUS methods */
3118 		if (udev->bus->methods->xfer_poll == NULL)
3119 			continue;	/* no poll method */
3120 
3121 		/* make sure that the BUS mutex is not locked */
3122 		drop_bus = 0;
3123 		while (lockowned(&xroot->udev->bus->bus_lock)) {
3124 			lockmgr(&xroot->udev->bus->bus_lock, LK_RELEASE);
3125 			drop_bus++;
3126 		}
3127 
3128 		/* make sure that the transfer mutex is not locked */
3129 		drop_xfer = 0;
3130 		while (lockowned(xroot->xfer_lock)) {
3131 			lockmgr(xroot->xfer_lock, LK_RELEASE);
3132 			drop_xfer++;
3133 		}
3134 
3135 		/* Make sure cv_signal() and cv_broadcast() is not called */
3136 		udev->bus->control_xfer_proc.up_msleep = 0;
3137 		udev->bus->explore_proc.up_msleep = 0;
3138 		udev->bus->giant_callback_proc.up_msleep = 0;
3139 		udev->bus->non_giant_callback_proc.up_msleep = 0;
3140 
3141 		/* poll USB hardware */
3142 		(udev->bus->methods->xfer_poll) (udev->bus);
3143 
3144 		USB_BUS_LOCK(xroot->bus);
3145 
3146 		/* check for clear stall */
3147 		if (udev->ctrl_xfer[1] != NULL) {
3148 
3149 			/* poll clear stall start */
3150 			pm = &udev->cs_msg[0].hdr;
3151 			(pm->pm_callback) (pm);
3152 			/* poll clear stall done thread */
3153 			pm = &udev->ctrl_xfer[1]->
3154 			    xroot->done_m[0].hdr;
3155 			(pm->pm_callback) (pm);
3156 		}
3157 
3158 		/* poll done thread */
3159 		pm = &xroot->done_m[0].hdr;
3160 		(pm->pm_callback) (pm);
3161 
3162 		USB_BUS_UNLOCK(xroot->bus);
3163 
3164 		/* restore transfer mutex */
3165 		while (drop_xfer--)
3166 			lockmgr(xroot->xfer_lock, LK_EXCLUSIVE);
3167 
3168 		/* restore BUS mutex */
3169 		while (drop_bus--)
3170 			lockmgr(&xroot->udev->bus->bus_lock, LK_EXCLUSIVE);
3171 	}
3172 }
3173 
3174 static void
3175 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3176     uint8_t type, enum usb_dev_speed speed)
3177 {
3178 	static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3179 		[USB_SPEED_LOW] = 8,
3180 		[USB_SPEED_FULL] = 64,
3181 		[USB_SPEED_HIGH] = 1024,
3182 		[USB_SPEED_VARIABLE] = 1024,
3183 		[USB_SPEED_SUPER] = 1024,
3184 	};
3185 
3186 	static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3187 		[USB_SPEED_LOW] = 0,	/* invalid */
3188 		[USB_SPEED_FULL] = 1023,
3189 		[USB_SPEED_HIGH] = 1024,
3190 		[USB_SPEED_VARIABLE] = 3584,
3191 		[USB_SPEED_SUPER] = 1024,
3192 	};
3193 
3194 	static const uint16_t control_min[USB_SPEED_MAX] = {
3195 		[USB_SPEED_LOW] = 8,
3196 		[USB_SPEED_FULL] = 8,
3197 		[USB_SPEED_HIGH] = 64,
3198 		[USB_SPEED_VARIABLE] = 512,
3199 		[USB_SPEED_SUPER] = 512,
3200 	};
3201 
3202 	static const uint16_t bulk_min[USB_SPEED_MAX] = {
3203 		[USB_SPEED_LOW] = 8,
3204 		[USB_SPEED_FULL] = 8,
3205 		[USB_SPEED_HIGH] = 512,
3206 		[USB_SPEED_VARIABLE] = 512,
3207 		[USB_SPEED_SUPER] = 1024,
3208 	};
3209 
3210 	uint16_t temp;
3211 
3212 	memset(ptr, 0, sizeof(*ptr));
3213 
3214 	switch (type) {
3215 	case UE_INTERRUPT:
3216 		ptr->range.max = intr_range_max[speed];
3217 		break;
3218 	case UE_ISOCHRONOUS:
3219 		ptr->range.max = isoc_range_max[speed];
3220 		break;
3221 	default:
3222 		if (type == UE_BULK)
3223 			temp = bulk_min[speed];
3224 		else /* UE_CONTROL */
3225 			temp = control_min[speed];
3226 
3227 		/* default is fixed */
3228 		ptr->fixed[0] = temp;
3229 		ptr->fixed[1] = temp;
3230 		ptr->fixed[2] = temp;
3231 		ptr->fixed[3] = temp;
3232 
3233 		if (speed == USB_SPEED_FULL) {
3234 			/* multiple sizes */
3235 			ptr->fixed[1] = 16;
3236 			ptr->fixed[2] = 32;
3237 			ptr->fixed[3] = 64;
3238 		}
3239 		if ((speed == USB_SPEED_VARIABLE) &&
3240 		    (type == UE_BULK)) {
3241 			/* multiple sizes */
3242 			ptr->fixed[2] = 1024;
3243 			ptr->fixed[3] = 1536;
3244 		}
3245 		break;
3246 	}
3247 }
3248 
3249 void	*
3250 usbd_xfer_softc(struct usb_xfer *xfer)
3251 {
3252 	return (xfer->priv_sc);
3253 }
3254 
3255 void *
3256 usbd_xfer_get_priv(struct usb_xfer *xfer)
3257 {
3258 	return (xfer->priv_fifo);
3259 }
3260 
3261 void
3262 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3263 {
3264 	xfer->priv_fifo = ptr;
3265 }
3266 
3267 uint8_t
3268 usbd_xfer_state(struct usb_xfer *xfer)
3269 {
3270 	return (xfer->usb_state);
3271 }
3272 
3273 void
3274 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3275 {
3276 	switch (flag) {
3277 		case USB_FORCE_SHORT_XFER:
3278 			xfer->flags.force_short_xfer = 1;
3279 			break;
3280 		case USB_SHORT_XFER_OK:
3281 			xfer->flags.short_xfer_ok = 1;
3282 			break;
3283 		case USB_MULTI_SHORT_OK:
3284 			xfer->flags.short_frames_ok = 1;
3285 			break;
3286 		case USB_MANUAL_STATUS:
3287 			xfer->flags.manual_status = 1;
3288 			break;
3289 	}
3290 }
3291 
3292 void
3293 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3294 {
3295 	switch (flag) {
3296 		case USB_FORCE_SHORT_XFER:
3297 			xfer->flags.force_short_xfer = 0;
3298 			break;
3299 		case USB_SHORT_XFER_OK:
3300 			xfer->flags.short_xfer_ok = 0;
3301 			break;
3302 		case USB_MULTI_SHORT_OK:
3303 			xfer->flags.short_frames_ok = 0;
3304 			break;
3305 		case USB_MANUAL_STATUS:
3306 			xfer->flags.manual_status = 0;
3307 			break;
3308 	}
3309 }
3310 
3311 /*
3312  * The following function returns in milliseconds when the isochronous
3313  * transfer was completed by the hardware. The returned value wraps
3314  * around 65536 milliseconds.
3315  */
3316 uint16_t
3317 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3318 {
3319 	return (xfer->isoc_time_complete);
3320 }
3321