xref: /dragonfly/sys/bus/u4b/net/usb_ethernet.c (revision 066b6da2)
1 /* $FreeBSD$ */
2 /*-
3  * Copyright (c) 2009 Andrew Thompson (thompsa@FreeBSD.org)
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/bus.h>
30 #include <sys/condvar.h>
31 #include <sys/kernel.h>
32 #include <sys/lock.h>
33 #include <sys/malloc.h>
34 #include <sys/mbuf.h>
35 #include <sys/module.h>
36 #include <sys/socket.h>
37 #include <sys/sockio.h>
38 #include <sys/sysctl.h>
39 
40 #include <net/if.h>
41 #include <net/ifq_var.h>
42 #include <net/ethernet.h>
43 #include <net/if_types.h>
44 #include <net/if_media.h>
45 #include <net/vlan/if_vlan_var.h>
46 
47 #include <sys/devfs.h>
48 
49 #include <dev/netif/mii_layer/mii.h>
50 #include <dev/netif/mii_layer/miivar.h>
51 
52 #include <bus/u4b/usb.h>
53 #include <bus/u4b/usbdi.h>
54 
55 #include <bus/u4b/usb_process.h>
56 #include <bus/u4b/net/usb_ethernet.h>
57 
58 static SYSCTL_NODE(_net, OID_AUTO, ue, CTLFLAG_RD, 0,
59     "USB Ethernet parameters");
60 
61 #define	UE_LOCK(_ue)		lockmgr((_ue)->ue_lock, LK_EXCLUSIVE)
62 #define	UE_UNLOCK(_ue)		lockmgr((_ue)->ue_lock, LK_RELEASE)
63 #define	UE_LOCK_ASSERT(_ue)	KKASSERT(lockowned((_ue)->ue_lock))
64 
65 MODULE_DEPEND(uether, usb, 1, 1, 1);
66 MODULE_DEPEND(uether, miibus, 1, 1, 1);
67 
68 /*
69 static struct unrhdr *ueunit;
70 */
71 DEVFS_DECLARE_CLONE_BITMAP(ue);
72 
73 static usb_proc_callback_t ue_attach_post_task;
74 static usb_proc_callback_t ue_promisc_task;
75 static usb_proc_callback_t ue_setmulti_task;
76 static usb_proc_callback_t ue_ifmedia_task;
77 static usb_proc_callback_t ue_tick_task;
78 static usb_proc_callback_t ue_start_task;
79 static usb_proc_callback_t ue_stop_task;
80 
81 static void	ue_init(void *);
82 static void	ue_start(struct ifnet *, struct ifaltq_subque *);
83 static int	ue_ifmedia_upd(struct ifnet *);
84 static void	ue_watchdog(void *);
85 
86 /*
87  * Return values:
88  *    0: success
89  * Else: device has been detached
90  */
91 uint8_t
92 uether_pause(struct usb_ether *ue, unsigned int _ticks)
93 {
94 	if (usb_proc_is_gone(&ue->ue_tq)) {
95 		/* nothing to do */
96 		return (1);
97 	}
98 	usb_pause_mtx(ue->ue_lock, _ticks);
99 	return (0);
100 }
101 
102 static void
103 ue_queue_command(struct usb_ether *ue,
104     usb_proc_callback_t *fn,
105     struct usb_proc_msg *t0, struct usb_proc_msg *t1)
106 {
107 	struct usb_ether_cfg_task *task;
108 
109 	UE_LOCK_ASSERT(ue);
110 
111 	if (usb_proc_is_gone(&ue->ue_tq)) {
112 		return;         /* nothing to do */
113 	}
114 	/*
115 	 * NOTE: The task cannot get executed before we drop the
116 	 * "sc_mtx" mutex. It is safe to update fields in the message
117 	 * structure after that the message got queued.
118 	 */
119 	task = (struct usb_ether_cfg_task *)
120 	  usb_proc_msignal(&ue->ue_tq, t0, t1);
121 
122 	/* Setup callback and self pointers */
123 	task->hdr.pm_callback = fn;
124 	task->ue = ue;
125 
126 	/*
127 	 * Start and stop must be synchronous!
128 	 */
129 	if ((fn == ue_start_task) || (fn == ue_stop_task))
130 		usb_proc_mwait(&ue->ue_tq, t0, t1);
131 }
132 
133 struct ifnet *
134 uether_getifp(struct usb_ether *ue)
135 {
136 	return (ue->ue_ifp);
137 }
138 
139 struct mii_data *
140 uether_getmii(struct usb_ether *ue)
141 {
142 	return (device_get_softc(ue->ue_miibus));
143 }
144 
145 void *
146 uether_getsc(struct usb_ether *ue)
147 {
148 	return (ue->ue_sc);
149 }
150 
151 static int
152 ue_sysctl_parent(SYSCTL_HANDLER_ARGS)
153 {
154 	struct usb_ether *ue = arg1;
155 	const char *name;
156 
157 	name = device_get_nameunit(ue->ue_dev);
158 	return SYSCTL_OUT(req, name, strlen(name));
159 }
160 
161 int
162 uether_ifattach(struct usb_ether *ue)
163 {
164 	int error;
165 
166 	/* check some critical parameters */
167 	if ((ue->ue_dev == NULL) ||
168 	    (ue->ue_udev == NULL) ||
169 	    (ue->ue_lock == NULL) ||
170 	    (ue->ue_methods == NULL))
171 		return (EINVAL);
172 
173 	error = usb_proc_create(&ue->ue_tq, ue->ue_lock,
174 	    device_get_nameunit(ue->ue_dev), USB_PRI_MED);
175 	if (error) {
176 		device_printf(ue->ue_dev, "could not setup taskqueue\n");
177 		goto error;
178 	}
179 
180 	/* fork rest of the attach code */
181 	UE_LOCK(ue);
182 	ue_queue_command(ue, ue_attach_post_task,
183 	    &ue->ue_sync_task[0].hdr,
184 	    &ue->ue_sync_task[1].hdr);
185 	UE_UNLOCK(ue);
186 
187 error:
188 	return (error);
189 }
190 
191 static void
192 ue_attach_post_task(struct usb_proc_msg *_task)
193 {
194 	struct usb_ether_cfg_task *task =
195 	    (struct usb_ether_cfg_task *)_task;
196 	struct usb_ether *ue = task->ue;
197 	struct ifnet *ifp;
198 	int error;
199 	char num[14];			/* sufficient for 32 bits */
200 
201 	/* first call driver's post attach routine */
202 	ue->ue_methods->ue_attach_post(ue);
203 
204 	UE_UNLOCK(ue);
205 
206 	KKASSERT(!lockowned(ue->ue_lock));
207 	/* XXX
208 	ue->ue_unit = alloc_unr(ueunit);
209 	*/
210 	ue->ue_unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ue), 0);
211 	usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_lock, 0);
212 	sysctl_ctx_init(&ue->ue_sysctl_ctx);
213 
214 	KKASSERT(!lockowned(ue->ue_lock));
215 	error = 0;
216 	ifp = if_alloc(IFT_ETHER);
217 	if (ifp == NULL) {
218 		device_printf(ue->ue_dev, "could not allocate ifnet\n");
219 		goto fail;
220 	}
221 
222 	KKASSERT(!lockowned(ue->ue_lock));
223 	ifp->if_softc = ue;
224 	if_initname(ifp, "ue", ue->ue_unit);
225 	if (ue->ue_methods->ue_attach_post_sub != NULL) {
226 		ue->ue_ifp = ifp;
227 		error = ue->ue_methods->ue_attach_post_sub(ue);
228 		KKASSERT(!lockowned(ue->ue_lock));
229 	} else {
230 		ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
231 		if (ue->ue_methods->ue_ioctl != NULL)
232 			ifp->if_ioctl = ue->ue_methods->ue_ioctl;
233 		else
234 			ifp->if_ioctl = uether_ioctl;
235 		ifp->if_start = ue_start;
236 		ifp->if_init = ue_init;
237 		ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
238 	/* XXX
239 		ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
240 	*/
241 		ifq_set_ready(&ifp->if_snd);
242 		ue->ue_ifp = ifp;
243 
244 		if (ue->ue_methods->ue_mii_upd != NULL &&
245 		    ue->ue_methods->ue_mii_sts != NULL) {
246 			/* device_xxx() depends on this */
247 /*			mtx_lock(&Giant);*/
248 			error = mii_phy_probe(ue->ue_dev, &ue->ue_miibus,
249 				ue_ifmedia_upd, ue->ue_methods->ue_mii_sts);
250 			/*
251 			error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
252 			    ue_ifmedia_upd, ue->ue_methods->ue_mii_sts,
253 			    BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
254 */ /*			mtx_unlock(&Giant);*/
255 		}
256 	}
257 
258 	if (error) {
259 		device_printf(ue->ue_dev, "attaching PHYs failed\n");
260 		goto fail;
261 	}
262 
263 	if_printf(ifp, "<USB Ethernet> on %s\n", device_get_nameunit(ue->ue_dev));
264 	ether_ifattach(ifp, ue->ue_eaddr, NULL);
265 	/* Tell upper layer we support VLAN oversized frames. */
266 	if (ifp->if_capabilities & IFCAP_VLAN_MTU)
267 		ifp->if_hdrlen = sizeof(struct ether_vlan_header);
268 
269 	ksnprintf(num, sizeof(num), "%u", ue->ue_unit);
270 	ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx,
271 	    &SYSCTL_NODE_CHILDREN(_net, ue),
272 	    OID_AUTO, num, CTLFLAG_RD, NULL, "");
273 	SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx,
274 	    SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO,
275 	    "%parent", CTLTYPE_STRING | CTLFLAG_RD, ue, 0,
276 	    ue_sysctl_parent, "A", "parent device");
277 
278 	KKASSERT(!lockowned(ue->ue_lock));
279 	UE_LOCK(ue);
280 	return;
281 
282 fail:
283 	/* XXX
284 	free_unr(ueunit, ue->ue_unit);
285 	*/
286 	devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ue), ue->ue_unit);
287 	if (ue->ue_ifp != NULL) {
288 		if_free(ue->ue_ifp);
289 		ue->ue_ifp = NULL;
290 	}
291 	UE_LOCK(ue);
292 	return;
293 }
294 
295 void
296 uether_ifdetach(struct usb_ether *ue)
297 {
298 	struct ifnet *ifp;
299 
300 	/* wait for any post attach or other command to complete */
301 	usb_proc_drain(&ue->ue_tq);
302 
303 	/* read "ifnet" pointer after taskqueue drain */
304 	ifp = ue->ue_ifp;
305 
306 	if (ifp != NULL) {
307 
308 		/* we are not running any more */
309 		UE_LOCK(ue);
310 		ifp->if_flags &= ~IFF_RUNNING;
311 		UE_UNLOCK(ue);
312 
313 		/* drain any callouts */
314 		usb_callout_drain(&ue->ue_watchdog);
315 
316 		/* detach miibus */
317 		if (ue->ue_miibus != NULL) {
318 			/*mtx_lock(&Giant);*/	/* device_xxx() depends on this */
319 			device_delete_child(ue->ue_dev, ue->ue_miibus);
320 			/*mtx_unlock(&Giant);*/
321 		}
322 
323 		/* detach ethernet */
324 		ether_ifdetach(ifp);
325 
326 		/* free interface instance */
327 		if_free(ifp);
328 
329 		/* free sysctl */
330 		sysctl_ctx_free(&ue->ue_sysctl_ctx);
331 
332 		/* free unit */
333 		devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ue), ue->ue_unit);
334 		/*
335 		free_unr(ueunit, ue->ue_unit);
336 		*/
337 	}
338 
339 	/* free taskqueue, if any */
340 	usb_proc_free(&ue->ue_tq);
341 }
342 
343 uint8_t
344 uether_is_gone(struct usb_ether *ue)
345 {
346 	return (usb_proc_is_gone(&ue->ue_tq));
347 }
348 
349 void
350 uether_init(void *arg)
351 {
352 	ue_init(arg);
353 }
354 
355 static void
356 ue_init(void *arg)
357 {
358 	struct usb_ether *ue = arg;
359 
360 	UE_LOCK(ue);
361 	ue_queue_command(ue, ue_start_task,
362 	    &ue->ue_sync_task[0].hdr,
363 	    &ue->ue_sync_task[1].hdr);
364 	UE_UNLOCK(ue);
365 }
366 
367 static void
368 ue_start_task(struct usb_proc_msg *_task)
369 {
370 	struct usb_ether_cfg_task *task =
371 	    (struct usb_ether_cfg_task *)_task;
372 	struct usb_ether *ue = task->ue;
373 	struct ifnet *ifp = ue->ue_ifp;
374 
375 	UE_LOCK_ASSERT(ue);
376 
377 	ue->ue_methods->ue_init(ue);
378 
379 	if ((ifp->if_flags & IFF_RUNNING) == 0)
380 		return;
381 	if (ue->ue_methods->ue_tick != NULL)
382 		usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue);
383 }
384 
385 static void
386 ue_stop_task(struct usb_proc_msg *_task)
387 {
388 	struct usb_ether_cfg_task *task =
389 	    (struct usb_ether_cfg_task *)_task;
390 	struct usb_ether *ue = task->ue;
391 
392 	UE_LOCK_ASSERT(ue);
393 
394 	usb_callout_stop(&ue->ue_watchdog);
395 
396 	ue->ue_methods->ue_stop(ue);
397 }
398 
399 void
400 uether_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
401 {
402 
403 	ue_start(ifp, ifsq);
404 }
405 
406 static void
407 ue_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
408 {
409 	struct usb_ether *ue = ifp->if_softc;
410 
411 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
412 
413 	if ((ifp->if_flags & IFF_RUNNING) == 0)
414 		return;
415 	UE_LOCK(ue);
416 	ue->ue_methods->ue_start(ue);
417 	UE_UNLOCK(ue);
418 }
419 
420 static void
421 ue_promisc_task(struct usb_proc_msg *_task)
422 {
423 	struct usb_ether_cfg_task *task =
424 	    (struct usb_ether_cfg_task *)_task;
425 	struct usb_ether *ue = task->ue;
426 
427 	ue->ue_methods->ue_setpromisc(ue);
428 }
429 
430 static void
431 ue_setmulti_task(struct usb_proc_msg *_task)
432 {
433 	struct usb_ether_cfg_task *task =
434 	    (struct usb_ether_cfg_task *)_task;
435 	struct usb_ether *ue = task->ue;
436 
437 	ue->ue_methods->ue_setmulti(ue);
438 }
439 
440 int
441 uether_ifmedia_upd(struct ifnet *ifp)
442 {
443 
444 	return (ue_ifmedia_upd(ifp));
445 }
446 
447 static int
448 ue_ifmedia_upd(struct ifnet *ifp)
449 {
450 	struct usb_ether *ue = ifp->if_softc;
451 
452 	/* Defer to process context */
453 	UE_LOCK(ue);
454 	ue_queue_command(ue, ue_ifmedia_task,
455 	    &ue->ue_media_task[0].hdr,
456 	    &ue->ue_media_task[1].hdr);
457 	UE_UNLOCK(ue);
458 
459 	return (0);
460 }
461 
462 static void
463 ue_ifmedia_task(struct usb_proc_msg *_task)
464 {
465 	struct usb_ether_cfg_task *task =
466 	    (struct usb_ether_cfg_task *)_task;
467 	struct usb_ether *ue = task->ue;
468 	struct ifnet *ifp = ue->ue_ifp;
469 
470 	ue->ue_methods->ue_mii_upd(ifp);
471 }
472 
473 static void
474 ue_watchdog(void *arg)
475 {
476 	struct usb_ether *ue = arg;
477 	struct ifnet *ifp = ue->ue_ifp;
478 
479 	if ((ifp->if_flags & IFF_RUNNING) == 0)
480 		return;
481 
482 	ue_queue_command(ue, ue_tick_task,
483 	    &ue->ue_tick_task[0].hdr,
484 	    &ue->ue_tick_task[1].hdr);
485 
486 	usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue);
487 }
488 
489 static void
490 ue_tick_task(struct usb_proc_msg *_task)
491 {
492 	struct usb_ether_cfg_task *task =
493 	    (struct usb_ether_cfg_task *)_task;
494 	struct usb_ether *ue = task->ue;
495 	struct ifnet *ifp = ue->ue_ifp;
496 
497 	if ((ifp->if_flags & IFF_RUNNING) == 0)
498 		return;
499 
500 	ue->ue_methods->ue_tick(ue);
501 }
502 
503 int
504 uether_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred* uc)
505 {
506 	struct usb_ether *ue = ifp->if_softc;
507 	struct ifreq *ifr = (struct ifreq *)data;
508 	struct mii_data *mii;
509 	int error = 0;
510 
511 	switch (command) {
512 	case SIOCSIFFLAGS:
513 		UE_LOCK(ue);
514 		if (ifp->if_flags & IFF_UP) {
515 			if (ifp->if_flags & IFF_RUNNING)
516 				ue_queue_command(ue, ue_promisc_task,
517 				    &ue->ue_promisc_task[0].hdr,
518 				    &ue->ue_promisc_task[1].hdr);
519 			else
520 				ue_queue_command(ue, ue_start_task,
521 				    &ue->ue_sync_task[0].hdr,
522 				    &ue->ue_sync_task[1].hdr);
523 		} else {
524 			ue_queue_command(ue, ue_stop_task,
525 			    &ue->ue_sync_task[0].hdr,
526 			    &ue->ue_sync_task[1].hdr);
527 		}
528 		UE_UNLOCK(ue);
529 		break;
530 	case SIOCADDMULTI:
531 	case SIOCDELMULTI:
532 		UE_LOCK(ue);
533 		ue_queue_command(ue, ue_setmulti_task,
534 		    &ue->ue_multi_task[0].hdr,
535 		    &ue->ue_multi_task[1].hdr);
536 		UE_UNLOCK(ue);
537 		break;
538 	case SIOCGIFMEDIA:
539 	case SIOCSIFMEDIA:
540 		if (ue->ue_miibus != NULL) {
541 			mii = device_get_softc(ue->ue_miibus);
542 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
543 		} else
544 			error = ether_ioctl(ifp, command, data);
545 		break;
546 	default:
547 		error = ether_ioctl(ifp, command, data);
548 		break;
549 	}
550 	return (error);
551 }
552 
553 static int
554 uether_modevent(module_t mod, int type, void *data)
555 {
556 	static int attached = 0;
557 
558 	switch (type) {
559 	case MOD_LOAD:
560 		if (attached)
561 			return (EEXIST);
562 
563 		devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ue));
564 
565 		attached = 1;
566         break;
567 	case MOD_UNLOAD:
568 		devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ue));
569 		break;
570 	default:
571 		return (EOPNOTSUPP);
572 	}
573 	return (0);
574 }
575 static moduledata_t uether_mod = {
576 	"uether",
577 	uether_modevent,
578 	0
579 };
580 
581 struct mbuf *
582 uether_newbuf(void)
583 {
584 	struct mbuf *m_new;
585 
586 	m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
587 	if (m_new == NULL)
588 		return (NULL);
589 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
590 
591 	m_adj(m_new, ETHER_ALIGN);
592 	return (m_new);
593 }
594 
595 int
596 uether_rxmbuf(struct usb_ether *ue, struct mbuf *m,
597     unsigned int len)
598 {
599 	struct ifnet *ifp = ue->ue_ifp;
600 
601 	UE_LOCK_ASSERT(ue);
602 
603 	/* finalize mbuf */
604 	IFNET_STAT_INC(ifp, ipackets, 1);
605 	m->m_pkthdr.rcvif = ifp;
606 	m->m_pkthdr.len = m->m_len = len;
607 
608 	/* enqueue for later when the lock can be released */
609 	IF_ENQUEUE(&ue->ue_rxq, m);
610 	return (0);
611 }
612 
613 int
614 uether_rxbuf(struct usb_ether *ue, struct usb_page_cache *pc,
615     unsigned int offset, unsigned int len)
616 {
617 	struct ifnet *ifp = ue->ue_ifp;
618 	struct mbuf *m;
619 
620 	UE_LOCK_ASSERT(ue);
621 
622 	if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN)
623 		return (1);
624 
625 	m = uether_newbuf();
626 	if (m == NULL) {
627 		IFNET_STAT_INC(ifp, iqdrops, 1);
628 		return (ENOMEM);
629 	}
630 
631 	usbd_copy_out(pc, offset, mtod(m, uint8_t *), len);
632 
633 	/* finalize mbuf */
634 	IFNET_STAT_INC(ifp, ipackets, 1);
635 	m->m_pkthdr.rcvif = ifp;
636 	m->m_pkthdr.len = m->m_len = len;
637 
638 	/* enqueue for later when the lock can be released */
639 	IF_ENQUEUE(&ue->ue_rxq, m);
640 	return (0);
641 }
642 
643 void
644 uether_rxflush(struct usb_ether *ue)
645 {
646 	struct ifnet *ifp = ue->ue_ifp;
647 	struct mbuf *m;
648 
649 	UE_LOCK_ASSERT(ue);
650 
651 	for (;;) {
652 		IF_DEQUEUE(&ue->ue_rxq, m);
653 		if (m == NULL)
654 			break;
655 
656 		/*
657 		 * The USB xfer has been resubmitted so its safe to unlock now.
658 		 */
659 		UE_UNLOCK(ue);
660 		ifp->if_input(ifp, m, NULL, -1);
661 		UE_LOCK(ue);
662 	}
663 }
664 
665 DECLARE_MODULE(uether, uether_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
666 MODULE_VERSION(uether, 1);
667