xref: /netbsd/sys/dev/usb/ugen.c (revision 6550d01e)
1 /*	$NetBSD: ugen.c,v 1.111 2010/11/03 22:34:23 dyoung Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology.
10  *
11  * Copyright (c) 2006 BBN Technologies Corp.  All rights reserved.
12  * Effort sponsored in part by the Defense Advanced Research Projects
13  * Agency (DARPA) and the Department of the Interior National Business
14  * Center under agreement number NBCHC050166.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: ugen.c,v 1.111 2010/11/03 22:34:23 dyoung Exp $");
41 
42 #include "opt_compat_netbsd.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #if defined(__NetBSD__) || defined(__OpenBSD__)
49 #include <sys/device.h>
50 #include <sys/ioctl.h>
51 #elif defined(__FreeBSD__)
52 #include <sys/module.h>
53 #include <sys/bus.h>
54 #include <sys/ioccom.h>
55 #include <sys/conf.h>
56 #include <sys/fcntl.h>
57 #include <sys/filio.h>
58 #endif
59 #include <sys/conf.h>
60 #include <sys/tty.h>
61 #include <sys/file.h>
62 #include <sys/select.h>
63 #include <sys/proc.h>
64 #include <sys/vnode.h>
65 #include <sys/poll.h>
66 
67 #include <dev/usb/usb.h>
68 #include <dev/usb/usbdi.h>
69 #include <dev/usb/usbdi_util.h>
70 
71 #ifdef UGEN_DEBUG
72 #define DPRINTF(x)	if (ugendebug) printf x
73 #define DPRINTFN(n,x)	if (ugendebug>(n)) printf x
74 int	ugendebug = 0;
75 #else
76 #define DPRINTF(x)
77 #define DPRINTFN(n,x)
78 #endif
79 
80 #define	UGEN_CHUNK	128	/* chunk size for read */
81 #define	UGEN_IBSIZE	1020	/* buffer size */
82 #define	UGEN_BBSIZE	1024
83 
84 #define UGEN_NISOREQS	4	/* number of outstanding xfer requests */
85 #define UGEN_NISORFRMS	8	/* number of transactions per req */
86 #define UGEN_NISOFRAMES	(UGEN_NISORFRMS * UGEN_NISOREQS)
87 
88 #define UGEN_BULK_RA_WB_BUFSIZE	16384		/* default buffer size */
89 #define UGEN_BULK_RA_WB_BUFMAX	(1 << 20)	/* maximum allowed buffer */
90 
91 struct ugen_endpoint {
92 	struct ugen_softc *sc;
93 	usb_endpoint_descriptor_t *edesc;
94 	usbd_interface_handle iface;
95 	int state;
96 #define	UGEN_ASLP	0x02	/* waiting for data */
97 #define UGEN_SHORT_OK	0x04	/* short xfers are OK */
98 #define UGEN_BULK_RA	0x08	/* in bulk read-ahead mode */
99 #define UGEN_BULK_WB	0x10	/* in bulk write-behind mode */
100 #define UGEN_RA_WB_STOP	0x20	/* RA/WB xfer is stopped (buffer full/empty) */
101 	usbd_pipe_handle pipeh;
102 	struct clist q;
103 	struct selinfo rsel;
104 	u_char *ibuf;		/* start of buffer (circular for isoc) */
105 	u_char *fill;		/* location for input (isoc) */
106 	u_char *limit;		/* end of circular buffer (isoc) */
107 	u_char *cur;		/* current read location (isoc) */
108 	u_int32_t timeout;
109 	u_int32_t ra_wb_bufsize; /* requested size for RA/WB buffer */
110 	u_int32_t ra_wb_reqsize; /* requested xfer length for RA/WB */
111 	u_int32_t ra_wb_used;	 /* how much is in buffer */
112 	u_int32_t ra_wb_xferlen; /* current xfer length for RA/WB */
113 	usbd_xfer_handle ra_wb_xfer;
114 	struct isoreq {
115 		struct ugen_endpoint *sce;
116 		usbd_xfer_handle xfer;
117 		void *dmabuf;
118 		u_int16_t sizes[UGEN_NISORFRMS];
119 	} isoreqs[UGEN_NISOREQS];
120 };
121 
122 struct ugen_softc {
123 	device_t sc_dev;		/* base device */
124 	usbd_device_handle sc_udev;
125 
126 	char sc_is_open[USB_MAX_ENDPOINTS];
127 	struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
128 #define OUT 0
129 #define IN  1
130 
131 	int sc_refcnt;
132 	char sc_buffer[UGEN_BBSIZE];
133 	u_char sc_dying;
134 };
135 
136 #if defined(__NetBSD__)
137 dev_type_open(ugenopen);
138 dev_type_close(ugenclose);
139 dev_type_read(ugenread);
140 dev_type_write(ugenwrite);
141 dev_type_ioctl(ugenioctl);
142 dev_type_poll(ugenpoll);
143 dev_type_kqfilter(ugenkqfilter);
144 
145 const struct cdevsw ugen_cdevsw = {
146 	ugenopen, ugenclose, ugenread, ugenwrite, ugenioctl,
147 	nostop, notty, ugenpoll, nommap, ugenkqfilter, D_OTHER,
148 };
149 #elif defined(__OpenBSD__)
150 cdev_decl(ugen);
151 #elif defined(__FreeBSD__)
152 d_open_t  ugenopen;
153 d_close_t ugenclose;
154 d_read_t  ugenread;
155 d_write_t ugenwrite;
156 d_ioctl_t ugenioctl;
157 d_poll_t  ugenpoll;
158 
159 #define UGEN_CDEV_MAJOR	114
160 
161 Static struct cdevsw ugen_cdevsw = {
162 	/* open */	ugenopen,
163 	/* close */	ugenclose,
164 	/* read */	ugenread,
165 	/* write */	ugenwrite,
166 	/* ioctl */	ugenioctl,
167 	/* poll */	ugenpoll,
168 	/* mmap */	nommap,
169 	/* strategy */	nostrategy,
170 	/* name */	"ugen",
171 	/* maj */	UGEN_CDEV_MAJOR,
172 	/* dump */	nodump,
173 	/* psize */	nopsize,
174 	/* flags */	0,
175 	/* bmaj */	-1
176 };
177 #endif
178 
179 Static void ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr,
180 		     usbd_status status);
181 Static void ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
182 			    usbd_status status);
183 Static void ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
184 			     usbd_status status);
185 Static void ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
186 			     usbd_status status);
187 Static int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
188 Static int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
189 Static int ugen_do_ioctl(struct ugen_softc *, int, u_long,
190 			 void *, int, struct lwp *);
191 Static int ugen_set_config(struct ugen_softc *sc, int configno);
192 Static usb_config_descriptor_t *ugen_get_cdesc(struct ugen_softc *sc,
193 					       int index, int *lenp);
194 Static usbd_status ugen_set_interface(struct ugen_softc *, int, int);
195 Static int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
196 
197 #define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
198 #define UGENENDPOINT(n) (minor(n) & 0xf)
199 #define UGENDEV(u, e) (makedev(0, ((u) << 4) | (e)))
200 
201 int             ugen_match(device_t, cfdata_t, void *);
202 void            ugen_attach(device_t, device_t, void *);
203 int             ugen_detach(device_t, int);
204 int             ugen_activate(device_t, enum devact);
205 extern struct cfdriver ugen_cd;
206 CFATTACH_DECL_NEW(ugen, sizeof(struct ugen_softc), ugen_match, ugen_attach, ugen_detach, ugen_activate);
207 
208 /* toggle to control attach priority. -1 means "let autoconf decide" */
209 int ugen_override = -1;
210 
211 int
212 ugen_match(device_t parent, cfdata_t match, void *aux)
213 {
214 	struct usb_attach_arg *uaa = aux;
215 	int override;
216 
217 	if (ugen_override != -1)
218 		override = ugen_override;
219 	else
220 		override = match->cf_flags & 1;
221 
222 	if (override)
223 		return (UMATCH_HIGHEST);
224 	else if (uaa->usegeneric)
225 		return (UMATCH_GENERIC);
226 	else
227 		return (UMATCH_NONE);
228 }
229 
230 void
231 ugen_attach(device_t parent, device_t self, void *aux)
232 {
233 	struct ugen_softc *sc = device_private(self);
234 	struct usb_attach_arg *uaa = aux;
235 	usbd_device_handle udev;
236 	char *devinfop;
237 	usbd_status err;
238 	int i, dir, conf;
239 
240 	aprint_naive("\n");
241 	aprint_normal("\n");
242 
243 	devinfop = usbd_devinfo_alloc(uaa->device, 0);
244 	aprint_normal_dev(self, "%s\n", devinfop);
245 	usbd_devinfo_free(devinfop);
246 
247 	sc->sc_dev = self;
248 	sc->sc_udev = udev = uaa->device;
249 
250 	/* First set configuration index 0, the default one for ugen. */
251 	err = usbd_set_config_index(udev, 0, 0);
252 	if (err) {
253 		aprint_error_dev(self,
254 		    "setting configuration index 0 failed\n");
255 		sc->sc_dying = 1;
256 		return;
257 	}
258 	conf = usbd_get_config_descriptor(udev)->bConfigurationValue;
259 
260 	/* Set up all the local state for this configuration. */
261 	err = ugen_set_config(sc, conf);
262 	if (err) {
263 		aprint_error_dev(self, "setting configuration %d failed\n",
264 		    conf);
265 		sc->sc_dying = 1;
266 		return;
267 	}
268 
269 #ifdef __FreeBSD__
270 	{
271 		static int global_init_done = 0;
272 		if (!global_init_done) {
273 			cdevsw_add(&ugen_cdevsw);
274 			global_init_done = 1;
275 		}
276 	}
277 #endif
278 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
279 		for (dir = OUT; dir <= IN; dir++) {
280 			struct ugen_endpoint *sce;
281 
282 			sce = &sc->sc_endpoints[i][dir];
283 			selinit(&sce->rsel);
284 		}
285 	}
286 
287 	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev,
288 			   sc->sc_dev);
289 
290 	if (!pmf_device_register(self, NULL, NULL))
291 		aprint_error_dev(self, "couldn't establish power handler\n");
292 
293 	return;
294 }
295 
296 Static int
297 ugen_set_config(struct ugen_softc *sc, int configno)
298 {
299 	usbd_device_handle dev = sc->sc_udev;
300 	usb_config_descriptor_t *cdesc;
301 	usbd_interface_handle iface;
302 	usb_endpoint_descriptor_t *ed;
303 	struct ugen_endpoint *sce;
304 	u_int8_t niface, nendpt;
305 	int ifaceno, endptno, endpt;
306 	usbd_status err;
307 	int dir;
308 
309 	DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
310 		    device_xname(sc->sc_dev), configno, sc));
311 
312 	/*
313 	 * We start at 1, not 0, because we don't care whether the
314 	 * control endpoint is open or not. It is always present.
315 	 */
316 	for (endptno = 1; endptno < USB_MAX_ENDPOINTS; endptno++)
317 		if (sc->sc_is_open[endptno]) {
318 			DPRINTFN(1,
319 			     ("ugen_set_config: %s - endpoint %d is open\n",
320 			      device_xname(sc->sc_dev), endptno));
321 			return (USBD_IN_USE);
322 		}
323 
324 	/* Avoid setting the current value. */
325 	cdesc = usbd_get_config_descriptor(dev);
326 	if (!cdesc || cdesc->bConfigurationValue != configno) {
327 		err = usbd_set_config_no(dev, configno, 1);
328 		if (err)
329 			return (err);
330 	}
331 
332 	err = usbd_interface_count(dev, &niface);
333 	if (err)
334 		return (err);
335 	memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints);
336 	for (ifaceno = 0; ifaceno < niface; ifaceno++) {
337 		DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
338 		err = usbd_device2interface_handle(dev, ifaceno, &iface);
339 		if (err)
340 			return (err);
341 		err = usbd_endpoint_count(iface, &nendpt);
342 		if (err)
343 			return (err);
344 		for (endptno = 0; endptno < nendpt; endptno++) {
345 			ed = usbd_interface2endpoint_descriptor(iface,endptno);
346 			KASSERT(ed != NULL);
347 			endpt = ed->bEndpointAddress;
348 			dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
349 			sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
350 			DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
351 				    "(%d,%d), sce=%p\n",
352 				    endptno, endpt, UE_GET_ADDR(endpt),
353 				    UE_GET_DIR(endpt), sce));
354 			sce->sc = sc;
355 			sce->edesc = ed;
356 			sce->iface = iface;
357 		}
358 	}
359 	return (USBD_NORMAL_COMPLETION);
360 }
361 
362 int
363 ugenopen(dev_t dev, int flag, int mode, struct lwp *l)
364 {
365 	struct ugen_softc *sc;
366 	int unit = UGENUNIT(dev);
367 	int endpt = UGENENDPOINT(dev);
368 	usb_endpoint_descriptor_t *edesc;
369 	struct ugen_endpoint *sce;
370 	int dir, isize;
371 	usbd_status err;
372 	usbd_xfer_handle xfer;
373 	void *tbuf;
374 	int i, j;
375 
376 	sc = device_lookup_private(&ugen_cd, unit);
377 	if (sc == NULL)
378 		return ENXIO;
379 
380 	DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
381 		     flag, mode, unit, endpt));
382 
383 	if (sc == NULL || sc->sc_dying)
384 		return (ENXIO);
385 
386 	/* The control endpoint allows multiple opens. */
387 	if (endpt == USB_CONTROL_ENDPOINT) {
388 		sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
389 		return (0);
390 	}
391 
392 	if (sc->sc_is_open[endpt])
393 		return (EBUSY);
394 
395 	/* Make sure there are pipes for all directions. */
396 	for (dir = OUT; dir <= IN; dir++) {
397 		if (flag & (dir == OUT ? FWRITE : FREAD)) {
398 			sce = &sc->sc_endpoints[endpt][dir];
399 			if (sce == 0 || sce->edesc == 0)
400 				return (ENXIO);
401 		}
402 	}
403 
404 	/* Actually open the pipes. */
405 	/* XXX Should back out properly if it fails. */
406 	for (dir = OUT; dir <= IN; dir++) {
407 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
408 			continue;
409 		sce = &sc->sc_endpoints[endpt][dir];
410 		sce->state = 0;
411 		sce->timeout = USBD_NO_TIMEOUT;
412 		DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
413 			     sc, endpt, dir, sce));
414 		edesc = sce->edesc;
415 		switch (edesc->bmAttributes & UE_XFERTYPE) {
416 		case UE_INTERRUPT:
417 			if (dir == OUT) {
418 				err = usbd_open_pipe(sce->iface,
419 				    edesc->bEndpointAddress, 0, &sce->pipeh);
420 				if (err)
421 					return (EIO);
422 				break;
423 			}
424 			isize = UGETW(edesc->wMaxPacketSize);
425 			if (isize == 0)	/* shouldn't happen */
426 				return (EINVAL);
427 			sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
428 			DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
429 				     endpt, isize));
430 			if (clalloc(&sce->q, UGEN_IBSIZE, 0) == -1)
431 				return (ENOMEM);
432 			err = usbd_open_pipe_intr(sce->iface,
433 				  edesc->bEndpointAddress,
434 				  USBD_SHORT_XFER_OK, &sce->pipeh, sce,
435 				  sce->ibuf, isize, ugenintr,
436 				  USBD_DEFAULT_INTERVAL);
437 			if (err) {
438 				free(sce->ibuf, M_USBDEV);
439 				clfree(&sce->q);
440 				return (EIO);
441 			}
442 			DPRINTFN(5, ("ugenopen: interrupt open done\n"));
443 			break;
444 		case UE_BULK:
445 			err = usbd_open_pipe(sce->iface,
446 				  edesc->bEndpointAddress, 0, &sce->pipeh);
447 			if (err)
448 				return (EIO);
449 			sce->ra_wb_bufsize = UGEN_BULK_RA_WB_BUFSIZE;
450 			/*
451 			 * Use request size for non-RA/WB transfers
452 			 * as the default.
453 			 */
454 			sce->ra_wb_reqsize = UGEN_BBSIZE;
455 			break;
456 		case UE_ISOCHRONOUS:
457 			if (dir == OUT)
458 				return (EINVAL);
459 			isize = UGETW(edesc->wMaxPacketSize);
460 			if (isize == 0)	/* shouldn't happen */
461 				return (EINVAL);
462 			sce->ibuf = malloc(isize * UGEN_NISOFRAMES,
463 				M_USBDEV, M_WAITOK);
464 			sce->cur = sce->fill = sce->ibuf;
465 			sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
466 			DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
467 				     endpt, isize));
468 			err = usbd_open_pipe(sce->iface,
469 				  edesc->bEndpointAddress, 0, &sce->pipeh);
470 			if (err) {
471 				free(sce->ibuf, M_USBDEV);
472 				return (EIO);
473 			}
474 			for(i = 0; i < UGEN_NISOREQS; ++i) {
475 				sce->isoreqs[i].sce = sce;
476 				xfer = usbd_alloc_xfer(sc->sc_udev);
477 				if (xfer == 0)
478 					goto bad;
479 				sce->isoreqs[i].xfer = xfer;
480 				tbuf = usbd_alloc_buffer
481 					(xfer, isize * UGEN_NISORFRMS);
482 				if (tbuf == 0) {
483 					i++;
484 					goto bad;
485 				}
486 				sce->isoreqs[i].dmabuf = tbuf;
487 				for(j = 0; j < UGEN_NISORFRMS; ++j)
488 					sce->isoreqs[i].sizes[j] = isize;
489 				usbd_setup_isoc_xfer
490 					(xfer, sce->pipeh, &sce->isoreqs[i],
491 					 sce->isoreqs[i].sizes,
492 					 UGEN_NISORFRMS, USBD_NO_COPY,
493 					 ugen_isoc_rintr);
494 				(void)usbd_transfer(xfer);
495 			}
496 			DPRINTFN(5, ("ugenopen: isoc open done\n"));
497 			break;
498 		bad:
499 			while (--i >= 0) /* implicit buffer free */
500 				usbd_free_xfer(sce->isoreqs[i].xfer);
501 			return (ENOMEM);
502 		case UE_CONTROL:
503 			sce->timeout = USBD_DEFAULT_TIMEOUT;
504 			return (EINVAL);
505 		}
506 	}
507 	sc->sc_is_open[endpt] = 1;
508 	return (0);
509 }
510 
511 int
512 ugenclose(dev_t dev, int flag, int mode, struct lwp *l)
513 {
514 	int endpt = UGENENDPOINT(dev);
515 	struct ugen_softc *sc;
516 	struct ugen_endpoint *sce;
517 	int dir;
518 	int i;
519 
520 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
521 	if (sc == NULL)
522 		return ENXIO;
523 
524 	DPRINTFN(5, ("ugenclose: flag=%d, mode=%d, unit=%d, endpt=%d\n",
525 		     flag, mode, UGENUNIT(dev), endpt));
526 
527 #ifdef DIAGNOSTIC
528 	if (!sc->sc_is_open[endpt]) {
529 		printf("ugenclose: not open\n");
530 		return (EINVAL);
531 	}
532 #endif
533 
534 	if (endpt == USB_CONTROL_ENDPOINT) {
535 		DPRINTFN(5, ("ugenclose: close control\n"));
536 		sc->sc_is_open[endpt] = 0;
537 		return (0);
538 	}
539 
540 	for (dir = OUT; dir <= IN; dir++) {
541 		if (!(flag & (dir == OUT ? FWRITE : FREAD)))
542 			continue;
543 		sce = &sc->sc_endpoints[endpt][dir];
544 		if (sce == NULL || sce->pipeh == NULL)
545 			continue;
546 		DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
547 			     endpt, dir, sce));
548 
549 		usbd_abort_pipe(sce->pipeh);
550 		usbd_close_pipe(sce->pipeh);
551 		sce->pipeh = NULL;
552 
553 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
554 		case UE_INTERRUPT:
555 			ndflush(&sce->q, sce->q.c_cc);
556 			clfree(&sce->q);
557 			break;
558 		case UE_ISOCHRONOUS:
559 			for (i = 0; i < UGEN_NISOREQS; ++i)
560 				usbd_free_xfer(sce->isoreqs[i].xfer);
561 			break;
562 		case UE_BULK:
563 			if (sce->state & (UGEN_BULK_RA | UGEN_BULK_WB))
564 				/* ibuf freed below */
565 				usbd_free_xfer(sce->ra_wb_xfer);
566 			break;
567 		default:
568 			break;
569 		}
570 
571 		if (sce->ibuf != NULL) {
572 			free(sce->ibuf, M_USBDEV);
573 			sce->ibuf = NULL;
574 			clfree(&sce->q);
575 		}
576 	}
577 	sc->sc_is_open[endpt] = 0;
578 
579 	return (0);
580 }
581 
582 Static int
583 ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
584 {
585 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
586 	u_int32_t n, tn;
587 	usbd_xfer_handle xfer;
588 	usbd_status err;
589 	int s;
590 	int error = 0;
591 
592 	DPRINTFN(5, ("%s: ugenread: %d\n", device_xname(sc->sc_dev), endpt));
593 
594 	if (sc->sc_dying)
595 		return (EIO);
596 
597 	if (endpt == USB_CONTROL_ENDPOINT)
598 		return (ENODEV);
599 
600 #ifdef DIAGNOSTIC
601 	if (sce->edesc == NULL) {
602 		printf("ugenread: no edesc\n");
603 		return (EIO);
604 	}
605 	if (sce->pipeh == NULL) {
606 		printf("ugenread: no pipe\n");
607 		return (EIO);
608 	}
609 #endif
610 
611 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
612 	case UE_INTERRUPT:
613 		/* Block until activity occurred. */
614 		s = splusb();
615 		while (sce->q.c_cc == 0) {
616 			if (flag & IO_NDELAY) {
617 				splx(s);
618 				return (EWOULDBLOCK);
619 			}
620 			sce->state |= UGEN_ASLP;
621 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
622 			error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
623 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
624 			if (sc->sc_dying)
625 				error = EIO;
626 			if (error) {
627 				sce->state &= ~UGEN_ASLP;
628 				break;
629 			}
630 		}
631 		splx(s);
632 
633 		/* Transfer as many chunks as possible. */
634 		while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
635 			n = min(sce->q.c_cc, uio->uio_resid);
636 			if (n > sizeof(sc->sc_buffer))
637 				n = sizeof(sc->sc_buffer);
638 
639 			/* Remove a small chunk from the input queue. */
640 			q_to_b(&sce->q, sc->sc_buffer, n);
641 			DPRINTFN(5, ("ugenread: got %d chars\n", n));
642 
643 			/* Copy the data to the user process. */
644 			error = uiomove(sc->sc_buffer, n, uio);
645 			if (error)
646 				break;
647 		}
648 		break;
649 	case UE_BULK:
650 		if (sce->state & UGEN_BULK_RA) {
651 			DPRINTFN(5, ("ugenread: BULK_RA req: %zd used: %d\n",
652 				     uio->uio_resid, sce->ra_wb_used));
653 			xfer = sce->ra_wb_xfer;
654 
655 			s = splusb();
656 			if (sce->ra_wb_used == 0 && flag & IO_NDELAY) {
657 				splx(s);
658 				return (EWOULDBLOCK);
659 			}
660 			while (uio->uio_resid > 0 && !error) {
661 				while (sce->ra_wb_used == 0) {
662 					sce->state |= UGEN_ASLP;
663 					DPRINTFN(5,
664 						 ("ugenread: sleep on %p\n",
665 						  sce));
666 					error = tsleep(sce, PZERO | PCATCH,
667 						       "ugenrb", 0);
668 					DPRINTFN(5,
669 						 ("ugenread: woke, error=%d\n",
670 						  error));
671 					if (sc->sc_dying)
672 						error = EIO;
673 					if (error) {
674 						sce->state &= ~UGEN_ASLP;
675 						break;
676 					}
677 				}
678 
679 				/* Copy data to the process. */
680 				while (uio->uio_resid > 0
681 				       && sce->ra_wb_used > 0) {
682 					n = min(uio->uio_resid,
683 						sce->ra_wb_used);
684 					n = min(n, sce->limit - sce->cur);
685 					error = uiomove(sce->cur, n, uio);
686 					if (error)
687 						break;
688 					sce->cur += n;
689 					sce->ra_wb_used -= n;
690 					if (sce->cur == sce->limit)
691 						sce->cur = sce->ibuf;
692 				}
693 
694 				/*
695 				 * If the transfers stopped because the
696 				 * buffer was full, restart them.
697 				 */
698 				if (sce->state & UGEN_RA_WB_STOP &&
699 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
700 					n = (sce->limit - sce->ibuf)
701 					    - sce->ra_wb_used;
702 					usbd_setup_xfer(xfer,
703 					    sce->pipeh, sce, NULL,
704 					    min(n, sce->ra_wb_xferlen),
705 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
706 					    ugen_bulkra_intr);
707 					sce->state &= ~UGEN_RA_WB_STOP;
708 					err = usbd_transfer(xfer);
709 					if (err != USBD_IN_PROGRESS)
710 						/*
711 						 * The transfer has not been
712 						 * queued.  Setting STOP
713 						 * will make us try
714 						 * again at the next read.
715 						 */
716 						sce->state |= UGEN_RA_WB_STOP;
717 				}
718 			}
719 			splx(s);
720 			break;
721 		}
722 		xfer = usbd_alloc_xfer(sc->sc_udev);
723 		if (xfer == 0)
724 			return (ENOMEM);
725 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
726 			DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
727 			tn = n;
728 			err = usbd_bulk_transfer(
729 				  xfer, sce->pipeh,
730 				  sce->state & UGEN_SHORT_OK ?
731 				      USBD_SHORT_XFER_OK : 0,
732 				  sce->timeout, sc->sc_buffer, &tn, "ugenrb");
733 			if (err) {
734 				if (err == USBD_INTERRUPTED)
735 					error = EINTR;
736 				else if (err == USBD_TIMEOUT)
737 					error = ETIMEDOUT;
738 				else
739 					error = EIO;
740 				break;
741 			}
742 			DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
743 			error = uiomove(sc->sc_buffer, tn, uio);
744 			if (error || tn < n)
745 				break;
746 		}
747 		usbd_free_xfer(xfer);
748 		break;
749 	case UE_ISOCHRONOUS:
750 		s = splusb();
751 		while (sce->cur == sce->fill) {
752 			if (flag & IO_NDELAY) {
753 				splx(s);
754 				return (EWOULDBLOCK);
755 			}
756 			sce->state |= UGEN_ASLP;
757 			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
758 			error = tsleep(sce, PZERO | PCATCH, "ugenri", 0);
759 			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
760 			if (sc->sc_dying)
761 				error = EIO;
762 			if (error) {
763 				sce->state &= ~UGEN_ASLP;
764 				break;
765 			}
766 		}
767 
768 		while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
769 			if(sce->fill > sce->cur)
770 				n = min(sce->fill - sce->cur, uio->uio_resid);
771 			else
772 				n = min(sce->limit - sce->cur, uio->uio_resid);
773 
774 			DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));
775 
776 			/* Copy the data to the user process. */
777 			error = uiomove(sce->cur, n, uio);
778 			if (error)
779 				break;
780 			sce->cur += n;
781 			if(sce->cur >= sce->limit)
782 				sce->cur = sce->ibuf;
783 		}
784 		splx(s);
785 		break;
786 
787 
788 	default:
789 		return (ENXIO);
790 	}
791 	return (error);
792 }
793 
794 int
795 ugenread(dev_t dev, struct uio *uio, int flag)
796 {
797 	int endpt = UGENENDPOINT(dev);
798 	struct ugen_softc *sc;
799 	int error;
800 
801 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
802 	if (sc == NULL)
803 		return ENXIO;
804 
805 	sc->sc_refcnt++;
806 	error = ugen_do_read(sc, endpt, uio, flag);
807 	if (--sc->sc_refcnt < 0)
808 		usb_detach_wakeup(sc->sc_dev);
809 	return (error);
810 }
811 
812 Static int
813 ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio,
814 	int flag)
815 {
816 	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
817 	u_int32_t n;
818 	int error = 0;
819 	int s;
820 	u_int32_t tn;
821 	char *dbuf;
822 	usbd_xfer_handle xfer;
823 	usbd_status err;
824 
825 	DPRINTFN(5, ("%s: ugenwrite: %d\n", device_xname(sc->sc_dev), endpt));
826 
827 	if (sc->sc_dying)
828 		return (EIO);
829 
830 	if (endpt == USB_CONTROL_ENDPOINT)
831 		return (ENODEV);
832 
833 #ifdef DIAGNOSTIC
834 	if (sce->edesc == NULL) {
835 		printf("ugenwrite: no edesc\n");
836 		return (EIO);
837 	}
838 	if (sce->pipeh == NULL) {
839 		printf("ugenwrite: no pipe\n");
840 		return (EIO);
841 	}
842 #endif
843 
844 	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
845 	case UE_BULK:
846 		if (sce->state & UGEN_BULK_WB) {
847 			DPRINTFN(5, ("ugenwrite: BULK_WB req: %zd used: %d\n",
848 				     uio->uio_resid, sce->ra_wb_used));
849 			xfer = sce->ra_wb_xfer;
850 
851 			s = splusb();
852 			if (sce->ra_wb_used == sce->limit - sce->ibuf &&
853 			    flag & IO_NDELAY) {
854 				splx(s);
855 				return (EWOULDBLOCK);
856 			}
857 			while (uio->uio_resid > 0 && !error) {
858 				while (sce->ra_wb_used ==
859 				       sce->limit - sce->ibuf) {
860 					sce->state |= UGEN_ASLP;
861 					DPRINTFN(5,
862 						 ("ugenwrite: sleep on %p\n",
863 						  sce));
864 					error = tsleep(sce, PZERO | PCATCH,
865 						       "ugenwb", 0);
866 					DPRINTFN(5,
867 						 ("ugenwrite: woke, error=%d\n",
868 						  error));
869 					if (sc->sc_dying)
870 						error = EIO;
871 					if (error) {
872 						sce->state &= ~UGEN_ASLP;
873 						break;
874 					}
875 				}
876 
877 				/* Copy data from the process. */
878 				while (uio->uio_resid > 0 &&
879 				    sce->ra_wb_used < sce->limit - sce->ibuf) {
880 					n = min(uio->uio_resid,
881 						(sce->limit - sce->ibuf)
882 						 - sce->ra_wb_used);
883 					n = min(n, sce->limit - sce->fill);
884 					error = uiomove(sce->fill, n, uio);
885 					if (error)
886 						break;
887 					sce->fill += n;
888 					sce->ra_wb_used += n;
889 					if (sce->fill == sce->limit)
890 						sce->fill = sce->ibuf;
891 				}
892 
893 				/*
894 				 * If the transfers stopped because the
895 				 * buffer was empty, restart them.
896 				 */
897 				if (sce->state & UGEN_RA_WB_STOP &&
898 				    sce->ra_wb_used > 0) {
899 					dbuf = (char *)usbd_get_buffer(xfer);
900 					n = min(sce->ra_wb_used,
901 						sce->ra_wb_xferlen);
902 					tn = min(n, sce->limit - sce->cur);
903 					memcpy(dbuf, sce->cur, tn);
904 					dbuf += tn;
905 					if (n - tn > 0)
906 						memcpy(dbuf, sce->ibuf,
907 						       n - tn);
908 					usbd_setup_xfer(xfer,
909 					    sce->pipeh, sce, NULL, n,
910 					    USBD_NO_COPY, USBD_NO_TIMEOUT,
911 					    ugen_bulkwb_intr);
912 					sce->state &= ~UGEN_RA_WB_STOP;
913 					err = usbd_transfer(xfer);
914 					if (err != USBD_IN_PROGRESS)
915 						/*
916 						 * The transfer has not been
917 						 * queued.  Setting STOP
918 						 * will make us try again
919 						 * at the next read.
920 						 */
921 						sce->state |= UGEN_RA_WB_STOP;
922 				}
923 			}
924 			splx(s);
925 			break;
926 		}
927 		xfer = usbd_alloc_xfer(sc->sc_udev);
928 		if (xfer == 0)
929 			return (EIO);
930 		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
931 			error = uiomove(sc->sc_buffer, n, uio);
932 			if (error)
933 				break;
934 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
935 			err = usbd_bulk_transfer(xfer, sce->pipeh, 0,
936 				  sce->timeout, sc->sc_buffer, &n,"ugenwb");
937 			if (err) {
938 				if (err == USBD_INTERRUPTED)
939 					error = EINTR;
940 				else if (err == USBD_TIMEOUT)
941 					error = ETIMEDOUT;
942 				else
943 					error = EIO;
944 				break;
945 			}
946 		}
947 		usbd_free_xfer(xfer);
948 		break;
949 	case UE_INTERRUPT:
950 		xfer = usbd_alloc_xfer(sc->sc_udev);
951 		if (xfer == 0)
952 			return (EIO);
953 		while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
954 		    uio->uio_resid)) != 0) {
955 			error = uiomove(sc->sc_buffer, n, uio);
956 			if (error)
957 				break;
958 			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
959 			err = usbd_intr_transfer(xfer, sce->pipeh, 0,
960 			    sce->timeout, sc->sc_buffer, &n, "ugenwi");
961 			if (err) {
962 				if (err == USBD_INTERRUPTED)
963 					error = EINTR;
964 				else if (err == USBD_TIMEOUT)
965 					error = ETIMEDOUT;
966 				else
967 					error = EIO;
968 				break;
969 			}
970 		}
971 		usbd_free_xfer(xfer);
972 		break;
973 	default:
974 		return (ENXIO);
975 	}
976 	return (error);
977 }
978 
979 int
980 ugenwrite(dev_t dev, struct uio *uio, int flag)
981 {
982 	int endpt = UGENENDPOINT(dev);
983 	struct ugen_softc *sc;
984 	int error;
985 
986 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
987 	if (sc == NULL)
988 		return ENXIO;
989 
990 	sc->sc_refcnt++;
991 	error = ugen_do_write(sc, endpt, uio, flag);
992 	if (--sc->sc_refcnt < 0)
993 		usb_detach_wakeup(sc->sc_dev);
994 	return (error);
995 }
996 
997 #if defined(__NetBSD__) || defined(__OpenBSD__)
998 int
999 ugen_activate(device_t self, enum devact act)
1000 {
1001 	struct ugen_softc *sc = device_private(self);
1002 
1003 	switch (act) {
1004 	case DVACT_DEACTIVATE:
1005 		sc->sc_dying = 1;
1006 		return 0;
1007 	default:
1008 		return EOPNOTSUPP;
1009 	}
1010 }
1011 #endif
1012 
1013 int
1014 ugen_detach(device_t self, int flags)
1015 {
1016 	struct ugen_softc *sc = device_private(self);
1017 	struct ugen_endpoint *sce;
1018 	int i, dir;
1019 	int s;
1020 #if defined(__NetBSD__) || defined(__OpenBSD__)
1021 	int maj, mn;
1022 
1023 	DPRINTF(("ugen_detach: sc=%p flags=%d\n", sc, flags));
1024 #elif defined(__FreeBSD__)
1025 	DPRINTF(("ugen_detach: sc=%p\n", sc));
1026 #endif
1027 
1028 	sc->sc_dying = 1;
1029 	pmf_device_deregister(self);
1030 	/* Abort all pipes.  Causes processes waiting for transfer to wake. */
1031 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1032 		for (dir = OUT; dir <= IN; dir++) {
1033 			sce = &sc->sc_endpoints[i][dir];
1034 			if (sce && sce->pipeh)
1035 				usbd_abort_pipe(sce->pipeh);
1036 		}
1037 	}
1038 
1039 	s = splusb();
1040 	if (--sc->sc_refcnt >= 0) {
1041 		/* Wake everyone */
1042 		for (i = 0; i < USB_MAX_ENDPOINTS; i++)
1043 			wakeup(&sc->sc_endpoints[i][IN]);
1044 		/* Wait for processes to go away. */
1045 		usb_detach_wait(sc->sc_dev);
1046 	}
1047 	splx(s);
1048 
1049 #if defined(__NetBSD__) || defined(__OpenBSD__)
1050 	/* locate the major number */
1051 #if defined(__NetBSD__)
1052 	maj = cdevsw_lookup_major(&ugen_cdevsw);
1053 #elif defined(__OpenBSD__)
1054 	for (maj = 0; maj < nchrdev; maj++)
1055 		if (cdevsw[maj].d_open == ugenopen)
1056 			break;
1057 #endif
1058 
1059 	/* Nuke the vnodes for any open instances (calls close). */
1060 	mn = device_unit(self) * USB_MAX_ENDPOINTS;
1061 	vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR);
1062 #elif defined(__FreeBSD__)
1063 	/* XXX not implemented yet */
1064 #endif
1065 
1066 	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
1067 			   sc->sc_dev);
1068 
1069 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
1070 		for (dir = OUT; dir <= IN; dir++) {
1071 			sce = &sc->sc_endpoints[i][dir];
1072 			seldestroy(&sce->rsel);
1073 		}
1074 	}
1075 
1076 	return (0);
1077 }
1078 
1079 Static void
1080 ugenintr(usbd_xfer_handle xfer, usbd_private_handle addr, usbd_status status)
1081 {
1082 	struct ugen_endpoint *sce = addr;
1083 	/*struct ugen_softc *sc = sce->sc;*/
1084 	u_int32_t count;
1085 	u_char *ibuf;
1086 
1087 	if (status == USBD_CANCELLED)
1088 		return;
1089 
1090 	if (status != USBD_NORMAL_COMPLETION) {
1091 		DPRINTF(("ugenintr: status=%d\n", status));
1092 		if (status == USBD_STALLED)
1093 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1094 		return;
1095 	}
1096 
1097 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1098 	ibuf = sce->ibuf;
1099 
1100 	DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
1101 		     xfer, status, count));
1102 	DPRINTFN(5, ("          data = %02x %02x %02x\n",
1103 		     ibuf[0], ibuf[1], ibuf[2]));
1104 
1105 	(void)b_to_q(ibuf, count, &sce->q);
1106 
1107 	if (sce->state & UGEN_ASLP) {
1108 		sce->state &= ~UGEN_ASLP;
1109 		DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
1110 		wakeup(sce);
1111 	}
1112 	selnotify(&sce->rsel, 0, 0);
1113 }
1114 
1115 Static void
1116 ugen_isoc_rintr(usbd_xfer_handle xfer, usbd_private_handle addr,
1117 		usbd_status status)
1118 {
1119 	struct isoreq *req = addr;
1120 	struct ugen_endpoint *sce = req->sce;
1121 	u_int32_t count, n;
1122 	int i, isize;
1123 
1124 	/* Return if we are aborting. */
1125 	if (status == USBD_CANCELLED)
1126 		return;
1127 
1128 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1129 	DPRINTFN(5,("ugen_isoc_rintr: xfer %ld, count=%d\n",
1130 	    (long)(req - sce->isoreqs), count));
1131 
1132 	/* throw away oldest input if the buffer is full */
1133 	if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
1134 		sce->cur += count;
1135 		if(sce->cur >= sce->limit)
1136 			sce->cur = sce->ibuf + (sce->limit - sce->cur);
1137 		DPRINTFN(5, ("ugen_isoc_rintr: throwing away %d bytes\n",
1138 			     count));
1139 	}
1140 
1141 	isize = UGETW(sce->edesc->wMaxPacketSize);
1142 	for (i = 0; i < UGEN_NISORFRMS; i++) {
1143 		u_int32_t actlen = req->sizes[i];
1144 		char const *tbuf = (char const *)req->dmabuf + isize * i;
1145 
1146 		/* copy data to buffer */
1147 		while (actlen > 0) {
1148 			n = min(actlen, sce->limit - sce->fill);
1149 			memcpy(sce->fill, tbuf, n);
1150 
1151 			tbuf += n;
1152 			actlen -= n;
1153 			sce->fill += n;
1154 			if(sce->fill == sce->limit)
1155 				sce->fill = sce->ibuf;
1156 		}
1157 
1158 		/* setup size for next transfer */
1159 		req->sizes[i] = isize;
1160 	}
1161 
1162 	usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
1163 			     USBD_NO_COPY, ugen_isoc_rintr);
1164 	(void)usbd_transfer(xfer);
1165 
1166 	if (sce->state & UGEN_ASLP) {
1167 		sce->state &= ~UGEN_ASLP;
1168 		DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
1169 		wakeup(sce);
1170 	}
1171 	selnotify(&sce->rsel, 0, 0);
1172 }
1173 
1174 Static void
1175 ugen_bulkra_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1176 		 usbd_status status)
1177 {
1178 	struct ugen_endpoint *sce = addr;
1179 	u_int32_t count, n;
1180 	char const *tbuf;
1181 	usbd_status err;
1182 
1183 	/* Return if we are aborting. */
1184 	if (status == USBD_CANCELLED)
1185 		return;
1186 
1187 	if (status != USBD_NORMAL_COMPLETION) {
1188 		DPRINTF(("ugen_bulkra_intr: status=%d\n", status));
1189 		sce->state |= UGEN_RA_WB_STOP;
1190 		if (status == USBD_STALLED)
1191 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1192 		return;
1193 	}
1194 
1195 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1196 
1197 	/* Keep track of how much is in the buffer. */
1198 	sce->ra_wb_used += count;
1199 
1200 	/* Copy data to buffer. */
1201 	tbuf = (char const *)usbd_get_buffer(sce->ra_wb_xfer);
1202 	n = min(count, sce->limit - sce->fill);
1203 	memcpy(sce->fill, tbuf, n);
1204 	tbuf += n;
1205 	count -= n;
1206 	sce->fill += n;
1207 	if (sce->fill == sce->limit)
1208 		sce->fill = sce->ibuf;
1209 	if (count > 0) {
1210 		memcpy(sce->fill, tbuf, count);
1211 		sce->fill += count;
1212 	}
1213 
1214 	/* Set up the next request if necessary. */
1215 	n = (sce->limit - sce->ibuf) - sce->ra_wb_used;
1216 	if (n > 0) {
1217 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1218 		    min(n, sce->ra_wb_xferlen), USBD_NO_COPY,
1219 		    USBD_NO_TIMEOUT, ugen_bulkra_intr);
1220 		err = usbd_transfer(xfer);
1221 		if (err != USBD_IN_PROGRESS) {
1222 			printf("usbd_bulkra_intr: error=%d\n", err);
1223 			/*
1224 			 * The transfer has not been queued.  Setting STOP
1225 			 * will make us try again at the next read.
1226 			 */
1227 			sce->state |= UGEN_RA_WB_STOP;
1228 		}
1229 	}
1230 	else
1231 		sce->state |= UGEN_RA_WB_STOP;
1232 
1233 	if (sce->state & UGEN_ASLP) {
1234 		sce->state &= ~UGEN_ASLP;
1235 		DPRINTFN(5, ("ugen_bulkra_intr: waking %p\n", sce));
1236 		wakeup(sce);
1237 	}
1238 	selnotify(&sce->rsel, 0, 0);
1239 }
1240 
1241 Static void
1242 ugen_bulkwb_intr(usbd_xfer_handle xfer, usbd_private_handle addr,
1243 		 usbd_status status)
1244 {
1245 	struct ugen_endpoint *sce = addr;
1246 	u_int32_t count, n;
1247 	char *tbuf;
1248 	usbd_status err;
1249 
1250 	/* Return if we are aborting. */
1251 	if (status == USBD_CANCELLED)
1252 		return;
1253 
1254 	if (status != USBD_NORMAL_COMPLETION) {
1255 		DPRINTF(("ugen_bulkwb_intr: status=%d\n", status));
1256 		sce->state |= UGEN_RA_WB_STOP;
1257 		if (status == USBD_STALLED)
1258 		    usbd_clear_endpoint_stall_async(sce->pipeh);
1259 		return;
1260 	}
1261 
1262 	usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
1263 
1264 	/* Keep track of how much is in the buffer. */
1265 	sce->ra_wb_used -= count;
1266 
1267 	/* Update buffer pointers. */
1268 	sce->cur += count;
1269 	if (sce->cur >= sce->limit)
1270 		sce->cur = sce->ibuf + (sce->cur - sce->limit);
1271 
1272 	/* Set up next request if necessary. */
1273 	if (sce->ra_wb_used > 0) {
1274 		/* copy data from buffer */
1275 		tbuf = (char *)usbd_get_buffer(sce->ra_wb_xfer);
1276 		count = min(sce->ra_wb_used, sce->ra_wb_xferlen);
1277 		n = min(count, sce->limit - sce->cur);
1278 		memcpy(tbuf, sce->cur, n);
1279 		tbuf += n;
1280 		if (count - n > 0)
1281 			memcpy(tbuf, sce->ibuf, count - n);
1282 
1283 		usbd_setup_xfer(xfer, sce->pipeh, sce, NULL,
1284 		    count, USBD_NO_COPY, USBD_NO_TIMEOUT, ugen_bulkwb_intr);
1285 		err = usbd_transfer(xfer);
1286 		if (err != USBD_IN_PROGRESS) {
1287 			printf("usbd_bulkwb_intr: error=%d\n", err);
1288 			/*
1289 			 * The transfer has not been queued.  Setting STOP
1290 			 * will make us try again at the next write.
1291 			 */
1292 			sce->state |= UGEN_RA_WB_STOP;
1293 		}
1294 	}
1295 	else
1296 		sce->state |= UGEN_RA_WB_STOP;
1297 
1298 	if (sce->state & UGEN_ASLP) {
1299 		sce->state &= ~UGEN_ASLP;
1300 		DPRINTFN(5, ("ugen_bulkwb_intr: waking %p\n", sce));
1301 		wakeup(sce);
1302 	}
1303 	selnotify(&sce->rsel, 0, 0);
1304 }
1305 
1306 Static usbd_status
1307 ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
1308 {
1309 	usbd_interface_handle iface;
1310 	usb_endpoint_descriptor_t *ed;
1311 	usbd_status err;
1312 	struct ugen_endpoint *sce;
1313 	u_int8_t niface, nendpt, endptno, endpt;
1314 	int dir;
1315 
1316 	DPRINTFN(15, ("ugen_set_interface %d %d\n", ifaceidx, altno));
1317 
1318 	err = usbd_interface_count(sc->sc_udev, &niface);
1319 	if (err)
1320 		return (err);
1321 	if (ifaceidx < 0 || ifaceidx >= niface)
1322 		return (USBD_INVAL);
1323 
1324 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1325 	if (err)
1326 		return (err);
1327 	err = usbd_endpoint_count(iface, &nendpt);
1328 	if (err)
1329 		return (err);
1330 	/* XXX should only do this after setting new altno has succeeded */
1331 	for (endptno = 0; endptno < nendpt; endptno++) {
1332 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1333 		endpt = ed->bEndpointAddress;
1334 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1335 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1336 		sce->sc = 0;
1337 		sce->edesc = 0;
1338 		sce->iface = 0;
1339 	}
1340 
1341 	/* change setting */
1342 	err = usbd_set_interface(iface, altno);
1343 	if (err)
1344 		return (err);
1345 
1346 	err = usbd_endpoint_count(iface, &nendpt);
1347 	if (err)
1348 		return (err);
1349 	for (endptno = 0; endptno < nendpt; endptno++) {
1350 		ed = usbd_interface2endpoint_descriptor(iface,endptno);
1351 		KASSERT(ed != NULL);
1352 		endpt = ed->bEndpointAddress;
1353 		dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
1354 		sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
1355 		sce->sc = sc;
1356 		sce->edesc = ed;
1357 		sce->iface = iface;
1358 	}
1359 	return (0);
1360 }
1361 
1362 /* Retrieve a complete descriptor for a certain device and index. */
1363 Static usb_config_descriptor_t *
1364 ugen_get_cdesc(struct ugen_softc *sc, int index, int *lenp)
1365 {
1366 	usb_config_descriptor_t *cdesc, *tdesc, cdescr;
1367 	int len;
1368 	usbd_status err;
1369 
1370 	if (index == USB_CURRENT_CONFIG_INDEX) {
1371 		tdesc = usbd_get_config_descriptor(sc->sc_udev);
1372 		len = UGETW(tdesc->wTotalLength);
1373 		if (lenp)
1374 			*lenp = len;
1375 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1376 		memcpy(cdesc, tdesc, len);
1377 		DPRINTFN(5,("ugen_get_cdesc: current, len=%d\n", len));
1378 	} else {
1379 		err = usbd_get_config_desc(sc->sc_udev, index, &cdescr);
1380 		if (err)
1381 			return (0);
1382 		len = UGETW(cdescr.wTotalLength);
1383 		DPRINTFN(5,("ugen_get_cdesc: index=%d, len=%d\n", index, len));
1384 		if (lenp)
1385 			*lenp = len;
1386 		cdesc = malloc(len, M_TEMP, M_WAITOK);
1387 		err = usbd_get_config_desc_full(sc->sc_udev, index, cdesc, len);
1388 		if (err) {
1389 			free(cdesc, M_TEMP);
1390 			return (0);
1391 		}
1392 	}
1393 	return (cdesc);
1394 }
1395 
1396 Static int
1397 ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx)
1398 {
1399 	usbd_interface_handle iface;
1400 	usbd_status err;
1401 
1402 	err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
1403 	if (err)
1404 		return (-1);
1405 	return (usbd_get_interface_altindex(iface));
1406 }
1407 
1408 Static int
1409 ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
1410 	      void *addr, int flag, struct lwp *l)
1411 {
1412 	struct ugen_endpoint *sce;
1413 	usbd_status err;
1414 	usbd_interface_handle iface;
1415 	struct usb_config_desc *cd;
1416 	usb_config_descriptor_t *cdesc;
1417 	struct usb_interface_desc *id;
1418 	usb_interface_descriptor_t *idesc;
1419 	struct usb_endpoint_desc *ed;
1420 	usb_endpoint_descriptor_t *edesc;
1421 	struct usb_alt_interface *ai;
1422 	struct usb_string_desc *si;
1423 	u_int8_t conf, alt;
1424 
1425 	DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
1426 	if (sc->sc_dying)
1427 		return (EIO);
1428 
1429 	switch (cmd) {
1430 	case FIONBIO:
1431 		/* All handled in the upper FS layer. */
1432 		return (0);
1433 	case USB_SET_SHORT_XFER:
1434 		if (endpt == USB_CONTROL_ENDPOINT)
1435 			return (EINVAL);
1436 		/* This flag only affects read */
1437 		sce = &sc->sc_endpoints[endpt][IN];
1438 		if (sce == NULL || sce->pipeh == NULL)
1439 			return (EINVAL);
1440 		if (*(int *)addr)
1441 			sce->state |= UGEN_SHORT_OK;
1442 		else
1443 			sce->state &= ~UGEN_SHORT_OK;
1444 		return (0);
1445 	case USB_SET_TIMEOUT:
1446 		sce = &sc->sc_endpoints[endpt][IN];
1447 		if (sce == NULL
1448 		    /* XXX this shouldn't happen, but the distinction between
1449 		       input and output pipes isn't clear enough.
1450 		       || sce->pipeh == NULL */
1451 			)
1452 			return (EINVAL);
1453 		sce->timeout = *(int *)addr;
1454 		return (0);
1455 	case USB_SET_BULK_RA:
1456 		if (endpt == USB_CONTROL_ENDPOINT)
1457 			return (EINVAL);
1458 		sce = &sc->sc_endpoints[endpt][IN];
1459 		if (sce == NULL || sce->pipeh == NULL)
1460 			return (EINVAL);
1461 		edesc = sce->edesc;
1462 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1463 			return (EINVAL);
1464 
1465 		if (*(int *)addr) {
1466 			/* Only turn RA on if it's currently off. */
1467 			if (sce->state & UGEN_BULK_RA)
1468 				return (0);
1469 
1470 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1471 				/* shouldn't happen */
1472 				return (EINVAL);
1473 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1474 			if (sce->ra_wb_xfer == NULL)
1475 				return (ENOMEM);
1476 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1477 			/*
1478 			 * Set up a dmabuf because we reuse the xfer with
1479 			 * the same (max) request length like isoc.
1480 			 */
1481 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1482 					      sce->ra_wb_xferlen) == 0) {
1483 				usbd_free_xfer(sce->ra_wb_xfer);
1484 				return (ENOMEM);
1485 			}
1486 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1487 					   M_USBDEV, M_WAITOK);
1488 			sce->fill = sce->cur = sce->ibuf;
1489 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1490 			sce->ra_wb_used = 0;
1491 			sce->state |= UGEN_BULK_RA;
1492 			sce->state &= ~UGEN_RA_WB_STOP;
1493 			/* Now start reading. */
1494 			usbd_setup_xfer(sce->ra_wb_xfer, sce->pipeh, sce,
1495 			    NULL,
1496 			    min(sce->ra_wb_xferlen, sce->ra_wb_bufsize),
1497 			    USBD_NO_COPY, USBD_NO_TIMEOUT,
1498 			    ugen_bulkra_intr);
1499 			err = usbd_transfer(sce->ra_wb_xfer);
1500 			if (err != USBD_IN_PROGRESS) {
1501 				sce->state &= ~UGEN_BULK_RA;
1502 				free(sce->ibuf, M_USBDEV);
1503 				sce->ibuf = NULL;
1504 				usbd_free_xfer(sce->ra_wb_xfer);
1505 				return (EIO);
1506 			}
1507 		} else {
1508 			/* Only turn RA off if it's currently on. */
1509 			if (!(sce->state & UGEN_BULK_RA))
1510 				return (0);
1511 
1512 			sce->state &= ~UGEN_BULK_RA;
1513 			usbd_abort_pipe(sce->pipeh);
1514 			usbd_free_xfer(sce->ra_wb_xfer);
1515 			/*
1516 			 * XXX Discard whatever's in the buffer, but we
1517 			 * should keep it around and drain the buffer
1518 			 * instead.
1519 			 */
1520 			free(sce->ibuf, M_USBDEV);
1521 			sce->ibuf = NULL;
1522 		}
1523 		return (0);
1524 	case USB_SET_BULK_WB:
1525 		if (endpt == USB_CONTROL_ENDPOINT)
1526 			return (EINVAL);
1527 		sce = &sc->sc_endpoints[endpt][OUT];
1528 		if (sce == NULL || sce->pipeh == NULL)
1529 			return (EINVAL);
1530 		edesc = sce->edesc;
1531 		if ((edesc->bmAttributes & UE_XFERTYPE) != UE_BULK)
1532 			return (EINVAL);
1533 
1534 		if (*(int *)addr) {
1535 			/* Only turn WB on if it's currently off. */
1536 			if (sce->state & UGEN_BULK_WB)
1537 				return (0);
1538 
1539 			if (sce->ra_wb_bufsize == 0 || sce->ra_wb_reqsize == 0)
1540 				/* shouldn't happen */
1541 				return (EINVAL);
1542 			sce->ra_wb_xfer = usbd_alloc_xfer(sc->sc_udev);
1543 			if (sce->ra_wb_xfer == NULL)
1544 				return (ENOMEM);
1545 			sce->ra_wb_xferlen = sce->ra_wb_reqsize;
1546 			/*
1547 			 * Set up a dmabuf because we reuse the xfer with
1548 			 * the same (max) request length like isoc.
1549 			 */
1550 			if (usbd_alloc_buffer(sce->ra_wb_xfer,
1551 					      sce->ra_wb_xferlen) == 0) {
1552 				usbd_free_xfer(sce->ra_wb_xfer);
1553 				return (ENOMEM);
1554 			}
1555 			sce->ibuf = malloc(sce->ra_wb_bufsize,
1556 					   M_USBDEV, M_WAITOK);
1557 			sce->fill = sce->cur = sce->ibuf;
1558 			sce->limit = sce->ibuf + sce->ra_wb_bufsize;
1559 			sce->ra_wb_used = 0;
1560 			sce->state |= UGEN_BULK_WB | UGEN_RA_WB_STOP;
1561 		} else {
1562 			/* Only turn WB off if it's currently on. */
1563 			if (!(sce->state & UGEN_BULK_WB))
1564 				return (0);
1565 
1566 			sce->state &= ~UGEN_BULK_WB;
1567 			/*
1568 			 * XXX Discard whatever's in the buffer, but we
1569 			 * should keep it around and keep writing to
1570 			 * drain the buffer instead.
1571 			 */
1572 			usbd_abort_pipe(sce->pipeh);
1573 			usbd_free_xfer(sce->ra_wb_xfer);
1574 			free(sce->ibuf, M_USBDEV);
1575 			sce->ibuf = NULL;
1576 		}
1577 		return (0);
1578 	case USB_SET_BULK_RA_OPT:
1579 	case USB_SET_BULK_WB_OPT:
1580 	{
1581 		struct usb_bulk_ra_wb_opt *opt;
1582 
1583 		if (endpt == USB_CONTROL_ENDPOINT)
1584 			return (EINVAL);
1585 		opt = (struct usb_bulk_ra_wb_opt *)addr;
1586 		if (cmd == USB_SET_BULK_RA_OPT)
1587 			sce = &sc->sc_endpoints[endpt][IN];
1588 		else
1589 			sce = &sc->sc_endpoints[endpt][OUT];
1590 		if (sce == NULL || sce->pipeh == NULL)
1591 			return (EINVAL);
1592 		if (opt->ra_wb_buffer_size < 1 ||
1593 		    opt->ra_wb_buffer_size > UGEN_BULK_RA_WB_BUFMAX ||
1594 		    opt->ra_wb_request_size < 1 ||
1595 		    opt->ra_wb_request_size > opt->ra_wb_buffer_size)
1596 			return (EINVAL);
1597 		/*
1598 		 * XXX These changes do not take effect until the
1599 		 * next time RA/WB mode is enabled but they ought to
1600 		 * take effect immediately.
1601 		 */
1602 		sce->ra_wb_bufsize = opt->ra_wb_buffer_size;
1603 		sce->ra_wb_reqsize = opt->ra_wb_request_size;
1604 		return (0);
1605 	}
1606 	default:
1607 		break;
1608 	}
1609 
1610 	if (endpt != USB_CONTROL_ENDPOINT)
1611 		return (EINVAL);
1612 
1613 	switch (cmd) {
1614 #ifdef UGEN_DEBUG
1615 	case USB_SETDEBUG:
1616 		ugendebug = *(int *)addr;
1617 		break;
1618 #endif
1619 	case USB_GET_CONFIG:
1620 		err = usbd_get_config(sc->sc_udev, &conf);
1621 		if (err)
1622 			return (EIO);
1623 		*(int *)addr = conf;
1624 		break;
1625 	case USB_SET_CONFIG:
1626 		if (!(flag & FWRITE))
1627 			return (EPERM);
1628 		err = ugen_set_config(sc, *(int *)addr);
1629 		switch (err) {
1630 		case USBD_NORMAL_COMPLETION:
1631 			break;
1632 		case USBD_IN_USE:
1633 			return (EBUSY);
1634 		default:
1635 			return (EIO);
1636 		}
1637 		break;
1638 	case USB_GET_ALTINTERFACE:
1639 		ai = (struct usb_alt_interface *)addr;
1640 		err = usbd_device2interface_handle(sc->sc_udev,
1641 			  ai->uai_interface_index, &iface);
1642 		if (err)
1643 			return (EINVAL);
1644 		idesc = usbd_get_interface_descriptor(iface);
1645 		if (idesc == NULL)
1646 			return (EIO);
1647 		ai->uai_alt_no = idesc->bAlternateSetting;
1648 		break;
1649 	case USB_SET_ALTINTERFACE:
1650 		if (!(flag & FWRITE))
1651 			return (EPERM);
1652 		ai = (struct usb_alt_interface *)addr;
1653 		err = usbd_device2interface_handle(sc->sc_udev,
1654 			  ai->uai_interface_index, &iface);
1655 		if (err)
1656 			return (EINVAL);
1657 		err = ugen_set_interface(sc, ai->uai_interface_index,
1658 		    ai->uai_alt_no);
1659 		if (err)
1660 			return (EINVAL);
1661 		break;
1662 	case USB_GET_NO_ALT:
1663 		ai = (struct usb_alt_interface *)addr;
1664 		cdesc = ugen_get_cdesc(sc, ai->uai_config_index, 0);
1665 		if (cdesc == NULL)
1666 			return (EINVAL);
1667 		idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
1668 		if (idesc == NULL) {
1669 			free(cdesc, M_TEMP);
1670 			return (EINVAL);
1671 		}
1672 		ai->uai_alt_no = usbd_get_no_alts(cdesc,
1673 		    idesc->bInterfaceNumber);
1674 		free(cdesc, M_TEMP);
1675 		break;
1676 	case USB_GET_DEVICE_DESC:
1677 		*(usb_device_descriptor_t *)addr =
1678 			*usbd_get_device_descriptor(sc->sc_udev);
1679 		break;
1680 	case USB_GET_CONFIG_DESC:
1681 		cd = (struct usb_config_desc *)addr;
1682 		cdesc = ugen_get_cdesc(sc, cd->ucd_config_index, 0);
1683 		if (cdesc == NULL)
1684 			return (EINVAL);
1685 		cd->ucd_desc = *cdesc;
1686 		free(cdesc, M_TEMP);
1687 		break;
1688 	case USB_GET_INTERFACE_DESC:
1689 		id = (struct usb_interface_desc *)addr;
1690 		cdesc = ugen_get_cdesc(sc, id->uid_config_index, 0);
1691 		if (cdesc == NULL)
1692 			return (EINVAL);
1693 		if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
1694 		    id->uid_alt_index == USB_CURRENT_ALT_INDEX)
1695 			alt = ugen_get_alt_index(sc, id->uid_interface_index);
1696 		else
1697 			alt = id->uid_alt_index;
1698 		idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
1699 		if (idesc == NULL) {
1700 			free(cdesc, M_TEMP);
1701 			return (EINVAL);
1702 		}
1703 		id->uid_desc = *idesc;
1704 		free(cdesc, M_TEMP);
1705 		break;
1706 	case USB_GET_ENDPOINT_DESC:
1707 		ed = (struct usb_endpoint_desc *)addr;
1708 		cdesc = ugen_get_cdesc(sc, ed->ued_config_index, 0);
1709 		if (cdesc == NULL)
1710 			return (EINVAL);
1711 		if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
1712 		    ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
1713 			alt = ugen_get_alt_index(sc, ed->ued_interface_index);
1714 		else
1715 			alt = ed->ued_alt_index;
1716 		edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
1717 					alt, ed->ued_endpoint_index);
1718 		if (edesc == NULL) {
1719 			free(cdesc, M_TEMP);
1720 			return (EINVAL);
1721 		}
1722 		ed->ued_desc = *edesc;
1723 		free(cdesc, M_TEMP);
1724 		break;
1725 	case USB_GET_FULL_DESC:
1726 	{
1727 		int len;
1728 		struct iovec iov;
1729 		struct uio uio;
1730 		struct usb_full_desc *fd = (struct usb_full_desc *)addr;
1731 		int error;
1732 
1733 		cdesc = ugen_get_cdesc(sc, fd->ufd_config_index, &len);
1734 		if (cdesc == NULL)
1735 			return (EINVAL);
1736 		if (len > fd->ufd_size)
1737 			len = fd->ufd_size;
1738 		iov.iov_base = (void *)fd->ufd_data;
1739 		iov.iov_len = len;
1740 		uio.uio_iov = &iov;
1741 		uio.uio_iovcnt = 1;
1742 		uio.uio_resid = len;
1743 		uio.uio_offset = 0;
1744 		uio.uio_rw = UIO_READ;
1745 		uio.uio_vmspace = l->l_proc->p_vmspace;
1746 		error = uiomove((void *)cdesc, len, &uio);
1747 		free(cdesc, M_TEMP);
1748 		return (error);
1749 	}
1750 	case USB_GET_STRING_DESC: {
1751 		int len;
1752 		si = (struct usb_string_desc *)addr;
1753 		err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
1754 			  si->usd_language_id, &si->usd_desc, &len);
1755 		if (err)
1756 			return (EINVAL);
1757 		break;
1758 	}
1759 	case USB_DO_REQUEST:
1760 	{
1761 		struct usb_ctl_request *ur = (void *)addr;
1762 		int len = UGETW(ur->ucr_request.wLength);
1763 		struct iovec iov;
1764 		struct uio uio;
1765 		void *ptr = 0;
1766 		usbd_status xerr;
1767 		int error = 0;
1768 
1769 		if (!(flag & FWRITE))
1770 			return (EPERM);
1771 		/* Avoid requests that would damage the bus integrity. */
1772 		if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1773 		     ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
1774 		    (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
1775 		     ur->ucr_request.bRequest == UR_SET_CONFIG) ||
1776 		    (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
1777 		     ur->ucr_request.bRequest == UR_SET_INTERFACE))
1778 			return (EINVAL);
1779 
1780 		if (len < 0 || len > 32767)
1781 			return (EINVAL);
1782 		if (len != 0) {
1783 			iov.iov_base = (void *)ur->ucr_data;
1784 			iov.iov_len = len;
1785 			uio.uio_iov = &iov;
1786 			uio.uio_iovcnt = 1;
1787 			uio.uio_resid = len;
1788 			uio.uio_offset = 0;
1789 			uio.uio_rw =
1790 				ur->ucr_request.bmRequestType & UT_READ ?
1791 				UIO_READ : UIO_WRITE;
1792 			uio.uio_vmspace = l->l_proc->p_vmspace;
1793 			ptr = malloc(len, M_TEMP, M_WAITOK);
1794 			if (uio.uio_rw == UIO_WRITE) {
1795 				error = uiomove(ptr, len, &uio);
1796 				if (error)
1797 					goto ret;
1798 			}
1799 		}
1800 		sce = &sc->sc_endpoints[endpt][IN];
1801 		xerr = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
1802 			  ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
1803 		if (xerr) {
1804 			error = EIO;
1805 			goto ret;
1806 		}
1807 		if (len != 0) {
1808 			if (uio.uio_rw == UIO_READ) {
1809 				error = uiomove(ptr, len, &uio);
1810 				if (error)
1811 					goto ret;
1812 			}
1813 		}
1814 	ret:
1815 		if (ptr)
1816 			free(ptr, M_TEMP);
1817 		return (error);
1818 	}
1819 	case USB_GET_DEVICEINFO:
1820 		usbd_fill_deviceinfo(sc->sc_udev,
1821 				     (struct usb_device_info *)addr, 0);
1822 		break;
1823 #ifdef COMPAT_30
1824 	case USB_GET_DEVICEINFO_OLD:
1825 		usbd_fill_deviceinfo_old(sc->sc_udev,
1826 					 (struct usb_device_info_old *)addr, 0);
1827 
1828 		break;
1829 #endif
1830 	default:
1831 		return (EINVAL);
1832 	}
1833 	return (0);
1834 }
1835 
1836 int
1837 ugenioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1838 {
1839 	int endpt = UGENENDPOINT(dev);
1840 	struct ugen_softc *sc;
1841 	int error;
1842 
1843 	sc = device_lookup_private(& ugen_cd, UGENUNIT(dev));
1844 	if (sc == NULL)
1845 		return ENXIO;
1846 
1847 	sc->sc_refcnt++;
1848 	error = ugen_do_ioctl(sc, endpt, cmd, addr, flag, l);
1849 	if (--sc->sc_refcnt < 0)
1850 		usb_detach_wakeup(sc->sc_dev);
1851 	return (error);
1852 }
1853 
1854 int
1855 ugenpoll(dev_t dev, int events, struct lwp *l)
1856 {
1857 	struct ugen_softc *sc;
1858 	struct ugen_endpoint *sce_in, *sce_out;
1859 	int revents = 0;
1860 	int s;
1861 
1862 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
1863 	if (sc == NULL)
1864 		return ENXIO;
1865 
1866 	if (sc->sc_dying)
1867 		return (POLLHUP);
1868 
1869 	sce_in = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
1870 	sce_out = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
1871 	if (sce_in == NULL && sce_out == NULL)
1872 		return (POLLERR);
1873 #ifdef DIAGNOSTIC
1874 	if (!sce_in->edesc && !sce_out->edesc) {
1875 		printf("ugenpoll: no edesc\n");
1876 		return (POLLERR);
1877 	}
1878 	/* It's possible to have only one pipe open. */
1879 	if (!sce_in->pipeh && !sce_out->pipeh) {
1880 		printf("ugenpoll: no pipe\n");
1881 		return (POLLERR);
1882 	}
1883 #endif
1884 	s = splusb();
1885 	if (sce_in && sce_in->pipeh && (events & (POLLIN | POLLRDNORM)))
1886 		switch (sce_in->edesc->bmAttributes & UE_XFERTYPE) {
1887 		case UE_INTERRUPT:
1888 			if (sce_in->q.c_cc > 0)
1889 				revents |= events & (POLLIN | POLLRDNORM);
1890 			else
1891 				selrecord(l, &sce_in->rsel);
1892 			break;
1893 		case UE_ISOCHRONOUS:
1894 			if (sce_in->cur != sce_in->fill)
1895 				revents |= events & (POLLIN | POLLRDNORM);
1896 			else
1897 				selrecord(l, &sce_in->rsel);
1898 			break;
1899 		case UE_BULK:
1900 			if (sce_in->state & UGEN_BULK_RA) {
1901 				if (sce_in->ra_wb_used > 0)
1902 					revents |= events &
1903 					    (POLLIN | POLLRDNORM);
1904 				else
1905 					selrecord(l, &sce_in->rsel);
1906 				break;
1907 			}
1908 			/*
1909 			 * We have no easy way of determining if a read will
1910 			 * yield any data or a write will happen.
1911 			 * Pretend they will.
1912 			 */
1913 			 revents |= events & (POLLIN | POLLRDNORM);
1914 			 break;
1915 		default:
1916 			break;
1917 		}
1918 	if (sce_out && sce_out->pipeh && (events & (POLLOUT | POLLWRNORM)))
1919 		switch (sce_out->edesc->bmAttributes & UE_XFERTYPE) {
1920 		case UE_INTERRUPT:
1921 		case UE_ISOCHRONOUS:
1922 			/* XXX unimplemented */
1923 			break;
1924 		case UE_BULK:
1925 			if (sce_out->state & UGEN_BULK_WB) {
1926 				if (sce_out->ra_wb_used <
1927 				    sce_out->limit - sce_out->ibuf)
1928 					revents |= events &
1929 					    (POLLOUT | POLLWRNORM);
1930 				else
1931 					selrecord(l, &sce_out->rsel);
1932 				break;
1933 			}
1934 			/*
1935 			 * We have no easy way of determining if a read will
1936 			 * yield any data or a write will happen.
1937 			 * Pretend they will.
1938 			 */
1939 			 revents |= events & (POLLOUT | POLLWRNORM);
1940 			 break;
1941 		default:
1942 			break;
1943 		}
1944 
1945 
1946 	splx(s);
1947 	return (revents);
1948 }
1949 
1950 static void
1951 filt_ugenrdetach(struct knote *kn)
1952 {
1953 	struct ugen_endpoint *sce = kn->kn_hook;
1954 	int s;
1955 
1956 	s = splusb();
1957 	SLIST_REMOVE(&sce->rsel.sel_klist, kn, knote, kn_selnext);
1958 	splx(s);
1959 }
1960 
1961 static int
1962 filt_ugenread_intr(struct knote *kn, long hint)
1963 {
1964 	struct ugen_endpoint *sce = kn->kn_hook;
1965 
1966 	kn->kn_data = sce->q.c_cc;
1967 	return (kn->kn_data > 0);
1968 }
1969 
1970 static int
1971 filt_ugenread_isoc(struct knote *kn, long hint)
1972 {
1973 	struct ugen_endpoint *sce = kn->kn_hook;
1974 
1975 	if (sce->cur == sce->fill)
1976 		return (0);
1977 
1978 	if (sce->cur < sce->fill)
1979 		kn->kn_data = sce->fill - sce->cur;
1980 	else
1981 		kn->kn_data = (sce->limit - sce->cur) +
1982 		    (sce->fill - sce->ibuf);
1983 
1984 	return (1);
1985 }
1986 
1987 static int
1988 filt_ugenread_bulk(struct knote *kn, long hint)
1989 {
1990 	struct ugen_endpoint *sce = kn->kn_hook;
1991 
1992 	if (!(sce->state & UGEN_BULK_RA))
1993 		/*
1994 		 * We have no easy way of determining if a read will
1995 		 * yield any data or a write will happen.
1996 		 * So, emulate "seltrue".
1997 		 */
1998 		return (filt_seltrue(kn, hint));
1999 
2000 	if (sce->ra_wb_used == 0)
2001 		return (0);
2002 
2003 	kn->kn_data = sce->ra_wb_used;
2004 
2005 	return (1);
2006 }
2007 
2008 static int
2009 filt_ugenwrite_bulk(struct knote *kn, long hint)
2010 {
2011 	struct ugen_endpoint *sce = kn->kn_hook;
2012 
2013 	if (!(sce->state & UGEN_BULK_WB))
2014 		/*
2015 		 * We have no easy way of determining if a read will
2016 		 * yield any data or a write will happen.
2017 		 * So, emulate "seltrue".
2018 		 */
2019 		return (filt_seltrue(kn, hint));
2020 
2021 	if (sce->ra_wb_used == sce->limit - sce->ibuf)
2022 		return (0);
2023 
2024 	kn->kn_data = (sce->limit - sce->ibuf) - sce->ra_wb_used;
2025 
2026 	return (1);
2027 }
2028 
2029 static const struct filterops ugenread_intr_filtops =
2030 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
2031 
2032 static const struct filterops ugenread_isoc_filtops =
2033 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
2034 
2035 static const struct filterops ugenread_bulk_filtops =
2036 	{ 1, NULL, filt_ugenrdetach, filt_ugenread_bulk };
2037 
2038 static const struct filterops ugenwrite_bulk_filtops =
2039 	{ 1, NULL, filt_ugenrdetach, filt_ugenwrite_bulk };
2040 
2041 int
2042 ugenkqfilter(dev_t dev, struct knote *kn)
2043 {
2044 	struct ugen_softc *sc;
2045 	struct ugen_endpoint *sce;
2046 	struct klist *klist;
2047 	int s;
2048 
2049 	sc = device_lookup_private(&ugen_cd, UGENUNIT(dev));
2050 	if (sc == NULL)
2051 		return ENXIO;
2052 
2053 	if (sc->sc_dying)
2054 		return (ENXIO);
2055 
2056 	switch (kn->kn_filter) {
2057 	case EVFILT_READ:
2058 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
2059 		if (sce == NULL)
2060 			return (EINVAL);
2061 
2062 		klist = &sce->rsel.sel_klist;
2063 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2064 		case UE_INTERRUPT:
2065 			kn->kn_fop = &ugenread_intr_filtops;
2066 			break;
2067 		case UE_ISOCHRONOUS:
2068 			kn->kn_fop = &ugenread_isoc_filtops;
2069 			break;
2070 		case UE_BULK:
2071 			kn->kn_fop = &ugenread_bulk_filtops;
2072 			break;
2073 			break;
2074 		default:
2075 			return (EINVAL);
2076 		}
2077 		break;
2078 
2079 	case EVFILT_WRITE:
2080 		sce = &sc->sc_endpoints[UGENENDPOINT(dev)][OUT];
2081 		if (sce == NULL)
2082 			return (EINVAL);
2083 
2084 		klist = &sce->rsel.sel_klist;
2085 		switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
2086 		case UE_INTERRUPT:
2087 		case UE_ISOCHRONOUS:
2088 			/* XXX poll doesn't support this */
2089 			return (EINVAL);
2090 
2091 		case UE_BULK:
2092 			kn->kn_fop = &ugenwrite_bulk_filtops;
2093 			break;
2094 		default:
2095 			return (EINVAL);
2096 		}
2097 		break;
2098 
2099 	default:
2100 		return (EINVAL);
2101 	}
2102 
2103 	kn->kn_hook = sce;
2104 
2105 	s = splusb();
2106 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
2107 	splx(s);
2108 
2109 	return (0);
2110 }
2111 
2112 #if defined(__FreeBSD__)
2113 DRIVER_MODULE(ugen, uhub, ugen_driver, ugen_devclass, usbd_driver_load, 0);
2114 #endif
2115