xref: /freebsd/sys/dev/firewire/if_fwip.c (revision e0c4386e)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 2004
5  *	Doug Rabson
6  * Copyright (c) 2002-2003
7  * 	Hidetoshi Shimokawa. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *
20  *	This product includes software developed by Hidetoshi Shimokawa.
21  *
22  * 4. Neither the name of the author nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  */
39 
40 #ifdef HAVE_KERNEL_OPTION_HEADERS
41 #include "opt_device_polling.h"
42 #include "opt_inet.h"
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53 #include <sys/taskqueue.h>
54 #include <sys/module.h>
55 #include <sys/bus.h>
56 #include <machine/bus.h>
57 
58 #include <net/bpf.h>
59 #include <net/if.h>
60 #include <net/if_var.h>
61 #include <net/firewire.h>
62 #include <net/if_arp.h>
63 #include <net/if_types.h>
64 #include <dev/firewire/firewire.h>
65 #include <dev/firewire/firewirereg.h>
66 #include <dev/firewire/iec13213.h>
67 #include <dev/firewire/if_fwipvar.h>
68 
69 /*
70  * We really need a mechanism for allocating regions in the FIFO
71  * address space. We pick a address in the OHCI controller's 'middle'
72  * address space. This means that the controller will automatically
73  * send responses for us, which is fine since we don't have any
74  * important information to put in the response anyway.
75  */
76 #define INET_FIFO	0xfffe00000000LL
77 
78 #define FWIPDEBUG	if (fwipdebug) if_printf
79 #define TX_MAX_QUEUE	(FWMAXQUEUE - 1)
80 
81 /* network interface */
82 static void fwip_start (if_t);
83 static int fwip_ioctl (if_t, u_long, caddr_t);
84 static void fwip_init (void *);
85 
86 static void fwip_post_busreset (void *);
87 static void fwip_output_callback (struct fw_xfer *);
88 static void fwip_async_output (struct fwip_softc *, if_t);
89 static void fwip_start_send (void *, int);
90 static void fwip_stream_input (struct fw_xferq *);
91 static void fwip_unicast_input(struct fw_xfer *);
92 
93 static int fwipdebug = 0;
94 static int broadcast_channel = 0xc0 | 0x1f; /*  tag | channel(XXX) */
95 static int tx_speed = 2;
96 static int rx_queue_len = FWMAXQUEUE;
97 
98 static MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface");
99 SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, "");
100 SYSCTL_DECL(_hw_firewire);
101 static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
102 	"Firewire ip subsystem");
103 SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
104 	0, "Length of the receive queue");
105 
106 #ifdef DEVICE_POLLING
107 static poll_handler_t fwip_poll;
108 
109 static int
110 fwip_poll(if_t ifp, enum poll_cmd cmd, int count)
111 {
112 	struct fwip_softc *fwip;
113 	struct firewire_comm *fc;
114 
115 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
116 		return (0);
117 
118 	fwip = ((struct fwip_eth_softc *)if_getsoftc(ifp))->fwip;
119 	fc = fwip->fd.fc;
120 	fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
121 	return (0);
122 }
123 #endif /* DEVICE_POLLING */
124 
125 static void
126 fwip_identify(driver_t *driver, device_t parent)
127 {
128 	BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent));
129 }
130 
131 static int
132 fwip_probe(device_t dev)
133 {
134 	device_t pa;
135 
136 	pa = device_get_parent(dev);
137 	if (device_get_unit(dev) != device_get_unit(pa)) {
138 		return (ENXIO);
139 	}
140 
141 	device_set_desc(dev, "IP over FireWire");
142 	return (0);
143 }
144 
145 static int
146 fwip_attach(device_t dev)
147 {
148 	struct fwip_softc *fwip;
149 	if_t ifp;
150 	int unit, s;
151 	struct fw_hwaddr *hwaddr;
152 
153 	fwip = ((struct fwip_softc *)device_get_softc(dev));
154 	unit = device_get_unit(dev);
155 	ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394);
156 	if (ifp == NULL)
157 		return (ENOSPC);
158 
159 	mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF);
160 	/* XXX */
161 	fwip->dma_ch = -1;
162 
163 	fwip->fd.fc = device_get_ivars(dev);
164 	if (tx_speed < 0)
165 		tx_speed = fwip->fd.fc->speed;
166 
167 	fwip->fd.dev = dev;
168 	fwip->fd.post_explore = NULL;
169 	fwip->fd.post_busreset = fwip_post_busreset;
170 	fwip->fw_softc.fwip = fwip;
171 	TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip);
172 
173 	/*
174 	 * Encode our hardware the way that arp likes it.
175 	 */
176 	hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr;
177 	hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi);
178 	hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo);
179 	hwaddr->sender_max_rec = fwip->fd.fc->maxrec;
180 	hwaddr->sspd = fwip->fd.fc->speed;
181 	hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32));
182 	hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO);
183 
184 	/* fill the rest and attach interface */
185 	if_setsoftc(ifp, &fwip->fw_softc);
186 
187 	if_initname(ifp, device_get_name(dev), unit);
188 	if_setinitfn(ifp, fwip_init);
189 	if_setstartfn(ifp, fwip_start);
190 	if_setioctlfn(ifp, fwip_ioctl);
191 	if_setflags(ifp, (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST));
192 	if_setsendqlen(ifp, TX_MAX_QUEUE);
193 #ifdef DEVICE_POLLING
194 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
195 #endif
196 
197 	s = splimp();
198 	firewire_ifattach(ifp, hwaddr);
199 	splx(s);
200 
201 	FWIPDEBUG(ifp, "interface created\n");
202 	return 0;
203 }
204 
205 static void
206 fwip_stop(struct fwip_softc *fwip)
207 {
208 	struct firewire_comm *fc;
209 	struct fw_xferq *xferq;
210 	if_t ifp = fwip->fw_softc.fwip_ifp;
211 	struct fw_xfer *xfer, *next;
212 	int i;
213 
214 	fc = fwip->fd.fc;
215 
216 	if (fwip->dma_ch >= 0) {
217 		xferq = fc->ir[fwip->dma_ch];
218 
219 		if (xferq->flag & FWXFERQ_RUNNING)
220 			fc->irx_disable(fc, fwip->dma_ch);
221 		xferq->flag &=
222 			~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
223 			FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
224 		xferq->hand =  NULL;
225 
226 		for (i = 0; i < xferq->bnchunk; i++)
227 			m_freem(xferq->bulkxfer[i].mbuf);
228 		free(xferq->bulkxfer, M_FWIP);
229 
230 		fw_bindremove(fc, &fwip->fwb);
231 		for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL;
232 					xfer = next) {
233 			next = STAILQ_NEXT(xfer, link);
234 			fw_xfer_free(xfer);
235 		}
236 
237 		for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL;
238 					xfer = next) {
239 			next = STAILQ_NEXT(xfer, link);
240 			fw_xfer_free(xfer);
241 		}
242 		STAILQ_INIT(&fwip->xferlist);
243 
244 		xferq->bulkxfer =  NULL;
245 		fwip->dma_ch = -1;
246 	}
247 
248 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
249 }
250 
251 static int
252 fwip_detach(device_t dev)
253 {
254 	struct fwip_softc *fwip;
255 	if_t ifp;
256 	int s;
257 
258 	fwip = (struct fwip_softc *)device_get_softc(dev);
259 	ifp = fwip->fw_softc.fwip_ifp;
260 
261 #ifdef DEVICE_POLLING
262 	if (if_getcapenable(ifp) & IFCAP_POLLING)
263 		ether_poll_deregister(ifp);
264 #endif
265 
266 	s = splimp();
267 
268 	fwip_stop(fwip);
269 	firewire_ifdetach(ifp);
270 	if_free(ifp);
271 	mtx_destroy(&fwip->mtx);
272 
273 	splx(s);
274 	return 0;
275 }
276 
277 static void
278 fwip_init(void *arg)
279 {
280 	struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip;
281 	struct firewire_comm *fc;
282 	if_t ifp = fwip->fw_softc.fwip_ifp;
283 	struct fw_xferq *xferq;
284 	struct fw_xfer *xfer;
285 	struct mbuf *m;
286 	int i;
287 
288 	FWIPDEBUG(ifp, "initializing\n");
289 
290 	fc = fwip->fd.fc;
291 #define START 0
292 	if (fwip->dma_ch < 0) {
293 		fwip->dma_ch = fw_open_isodma(fc, /* tx */0);
294 		if (fwip->dma_ch < 0)
295 			return;
296 		xferq = fc->ir[fwip->dma_ch];
297 		xferq->flag |= FWXFERQ_EXTBUF |
298 				FWXFERQ_HANDLER | FWXFERQ_STREAM;
299 		xferq->flag &= ~0xff;
300 		xferq->flag |= broadcast_channel & 0xff;
301 		/* register fwip_input handler */
302 		xferq->sc = (caddr_t) fwip;
303 		xferq->hand = fwip_stream_input;
304 		xferq->bnchunk = rx_queue_len;
305 		xferq->bnpacket = 1;
306 		xferq->psize = MCLBYTES;
307 		xferq->queued = 0;
308 		xferq->buf = NULL;
309 		xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
310 			sizeof(struct fw_bulkxfer) * xferq->bnchunk,
311 							M_FWIP, M_WAITOK);
312 		if (xferq->bulkxfer == NULL) {
313 			printf("if_fwip: malloc failed\n");
314 			return;
315 		}
316 		STAILQ_INIT(&xferq->stvalid);
317 		STAILQ_INIT(&xferq->stfree);
318 		STAILQ_INIT(&xferq->stdma);
319 		xferq->stproc = NULL;
320 		for (i = 0; i < xferq->bnchunk; i++) {
321 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
322 			xferq->bulkxfer[i].mbuf = m;
323 			m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
324 			STAILQ_INSERT_TAIL(&xferq->stfree,
325 					&xferq->bulkxfer[i], link);
326 		}
327 
328 		fwip->fwb.start = INET_FIFO;
329 		fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */
330 
331 		/* pre-allocate xfer */
332 		STAILQ_INIT(&fwip->fwb.xferlist);
333 		for (i = 0; i < rx_queue_len; i++) {
334 			xfer = fw_xfer_alloc(M_FWIP);
335 			if (xfer == NULL)
336 				break;
337 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
338 			xfer->recv.payload = mtod(m, uint32_t *);
339 			xfer->recv.pay_len = MCLBYTES;
340 			xfer->hand = fwip_unicast_input;
341 			xfer->fc = fc;
342 			xfer->sc = (caddr_t)fwip;
343 			xfer->mbuf = m;
344 			STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
345 		}
346 		fw_bindadd(fc, &fwip->fwb);
347 
348 		STAILQ_INIT(&fwip->xferlist);
349 		for (i = 0; i < TX_MAX_QUEUE; i++) {
350 			xfer = fw_xfer_alloc(M_FWIP);
351 			if (xfer == NULL)
352 				break;
353 			xfer->send.spd = tx_speed;
354 			xfer->fc = fwip->fd.fc;
355 			xfer->sc = (caddr_t)fwip;
356 			xfer->hand = fwip_output_callback;
357 			STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
358 		}
359 	} else
360 		xferq = fc->ir[fwip->dma_ch];
361 
362 	fwip->last_dest.hi = 0;
363 	fwip->last_dest.lo = 0;
364 
365 	/* start dma */
366 	if ((xferq->flag & FWXFERQ_RUNNING) == 0)
367 		fc->irx_enable(fc, fwip->dma_ch);
368 
369 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
370 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
371 
372 #if 0
373 	/* attempt to start output */
374 	fwip_start(ifp);
375 #endif
376 }
377 
378 static int
379 fwip_ioctl(if_t ifp, u_long cmd, caddr_t data)
380 {
381 	struct fwip_softc *fwip = ((struct fwip_eth_softc *)if_getsoftc(ifp))->fwip;
382 	int s, error;
383 
384 	switch (cmd) {
385 	case SIOCSIFFLAGS:
386 		s = splimp();
387 		if (if_getflags(ifp) & IFF_UP) {
388 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
389 				fwip_init(&fwip->fw_softc);
390 		} else {
391 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
392 				fwip_stop(fwip);
393 		}
394 		splx(s);
395 		break;
396 	case SIOCADDMULTI:
397 	case SIOCDELMULTI:
398 		break;
399 	case SIOCSIFCAP:
400 #ifdef DEVICE_POLLING
401 	    {
402 		struct ifreq *ifr = (struct ifreq *) data;
403 		struct firewire_comm *fc = fwip->fd.fc;
404 
405 		if (ifr->ifr_reqcap & IFCAP_POLLING &&
406 		    !(if_getcapenable(ifp) & IFCAP_POLLING)) {
407 			error = ether_poll_register(fwip_poll, ifp);
408 			if (error)
409 				return (error);
410 			/* Disable interrupts */
411 			fc->set_intr(fc, 0);
412 			if_setcapenablebit(ifp, IFCAP_POLLING, 0);
413 			return (error);
414 		}
415 		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
416 		    if_getcapenable(ifp) & IFCAP_POLLING) {
417 			error = ether_poll_deregister(ifp);
418 			/* Enable interrupts. */
419 			fc->set_intr(fc, 1);
420 			if_setcapenablebit(ifp, 0, IFCAP_POLLING);
421 			return (error);
422 		}
423 	    }
424 #endif /* DEVICE_POLLING */
425 		break;
426 	default:
427 		s = splimp();
428 		error = firewire_ioctl(ifp, cmd, data);
429 		splx(s);
430 		return (error);
431 	}
432 
433 	return (0);
434 }
435 
436 static void
437 fwip_post_busreset(void *arg)
438 {
439 	struct fwip_softc *fwip = arg;
440 	struct crom_src *src;
441 	struct crom_chunk *root;
442 
443 	src = fwip->fd.fc->crom_src;
444 	root = fwip->fd.fc->crom_root;
445 
446 	/* RFC2734 IPv4 over IEEE1394 */
447 	bzero(&fwip->unit4, sizeof(struct crom_chunk));
448 	crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR);
449 	crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF);
450 	crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA");
451 	crom_add_entry(&fwip->unit4, CSRKEY_VER, 1);
452 	crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4");
453 
454 	/* RFC3146 IPv6 over IEEE1394 */
455 	bzero(&fwip->unit6, sizeof(struct crom_chunk));
456 	crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR);
457 	crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF);
458 	crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA");
459 	crom_add_entry(&fwip->unit6, CSRKEY_VER, 2);
460 	crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6");
461 
462 	fwip->last_dest.hi = 0;
463 	fwip->last_dest.lo = 0;
464 	firewire_busreset(fwip->fw_softc.fwip_ifp);
465 }
466 
467 static void
468 fwip_output_callback(struct fw_xfer *xfer)
469 {
470 	struct fwip_softc *fwip;
471 	if_t ifp;
472 	int s;
473 
474 	fwip = (struct fwip_softc *)xfer->sc;
475 	ifp = fwip->fw_softc.fwip_ifp;
476 	/* XXX error check */
477 	FWIPDEBUG(ifp, "resp = %d\n", xfer->resp);
478 	if (xfer->resp != 0)
479 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
480 	m_freem(xfer->mbuf);
481 	fw_xfer_unload(xfer);
482 
483 	s = splimp();
484 	FWIP_LOCK(fwip);
485 	STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
486 	FWIP_UNLOCK(fwip);
487 	splx(s);
488 
489 	/* for queue full */
490 	if (!if_sendq_empty(ifp)) {
491 		fwip_start(ifp);
492 	}
493 }
494 
495 static void
496 fwip_start(if_t ifp)
497 {
498 	struct fwip_softc *fwip = ((struct fwip_eth_softc *)if_getsoftc(ifp))->fwip;
499 	int s;
500 
501 	FWIPDEBUG(ifp, "starting\n");
502 
503 	if (fwip->dma_ch < 0) {
504 		struct mbuf	*m = NULL;
505 
506 		FWIPDEBUG(ifp, "not ready\n");
507 
508 		s = splimp();
509 		do {
510 			m = if_dequeue(ifp);
511 			if (m != NULL)
512 				m_freem(m);
513 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
514 		} while (m != NULL);
515 		splx(s);
516 
517 		return;
518 	}
519 
520 	s = splimp();
521 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
522 
523 	if (!if_sendq_empty(ifp))
524 		fwip_async_output(fwip, ifp);
525 
526 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
527 	splx(s);
528 }
529 
530 /* Async. stream output */
531 static void
532 fwip_async_output(struct fwip_softc *fwip, if_t ifp)
533 {
534 	struct firewire_comm *fc = fwip->fd.fc;
535 	struct mbuf *m;
536 	struct m_tag *mtag;
537 	struct fw_hwaddr *destfw;
538 	struct fw_xfer *xfer;
539 	struct fw_xferq *xferq;
540 	struct fw_pkt *fp;
541 	uint16_t nodeid;
542 	int error;
543 	int i = 0;
544 
545 	xfer = NULL;
546 	xferq = fc->atq;
547 	while ((xferq->queued < xferq->maxq - 1) &&
548 			!if_sendq_empty(ifp)) {
549 		FWIP_LOCK(fwip);
550 		xfer = STAILQ_FIRST(&fwip->xferlist);
551 		if (xfer == NULL) {
552 			FWIP_UNLOCK(fwip);
553 #if 0
554 			printf("if_fwip: lack of xfer\n");
555 #endif
556 			break;
557 		}
558 		STAILQ_REMOVE_HEAD(&fwip->xferlist, link);
559 		FWIP_UNLOCK(fwip);
560 
561 		m = if_dequeue(ifp);
562 		if (m == NULL) {
563 			FWIP_LOCK(fwip);
564 			STAILQ_INSERT_HEAD(&fwip->xferlist, xfer, link);
565 			FWIP_UNLOCK(fwip);
566 			break;
567 		}
568 
569 		/*
570 		 * Dig out the link-level address which
571 		 * firewire_output got via arp or neighbour
572 		 * discovery. If we don't have a link-level address,
573 		 * just stick the thing on the broadcast channel.
574 		 */
575 		mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0);
576 		if (mtag == NULL)
577 			destfw = NULL;
578 		else
579 			destfw = (struct fw_hwaddr *) (mtag + 1);
580 
581 
582 		/*
583 		 * We don't do any bpf stuff here - the generic code
584 		 * in firewire_output gives the packet to bpf before
585 		 * it adds the link-level encapsulation.
586 		 */
587 
588 		/*
589 		 * Put the mbuf in the xfer early in case we hit an
590 		 * error case below - fwip_output_callback will free
591 		 * the mbuf.
592 		 */
593 		xfer->mbuf = m;
594 
595 		/*
596 		 * We use the arp result (if any) to add a suitable firewire
597 		 * packet header before handing off to the bus.
598 		 */
599 		fp = &xfer->send.hdr;
600 		nodeid = FWLOCALBUS | fc->nodeid;
601 		if ((m->m_flags & M_BCAST) || !destfw) {
602 			/*
603 			 * Broadcast packets are sent as GASP packets with
604 			 * specifier ID 0x00005e, version 1 on the broadcast
605 			 * channel. To be conservative, we send at the
606 			 * slowest possible speed.
607 			 */
608 			uint32_t *p;
609 
610 			M_PREPEND(m, 2*sizeof(uint32_t), M_NOWAIT);
611 			p = mtod(m, uint32_t *);
612 			fp->mode.stream.len = m->m_pkthdr.len;
613 			fp->mode.stream.chtag = broadcast_channel;
614 			fp->mode.stream.tcode = FWTCODE_STREAM;
615 			fp->mode.stream.sy = 0;
616 			xfer->send.spd = 0;
617 			p[0] = htonl(nodeid << 16);
618 			p[1] = htonl((0x5e << 24) | 1);
619 		} else {
620 			/*
621 			 * Unicast packets are sent as block writes to the
622 			 * target's unicast fifo address. If we can't
623 			 * find the node address, we just give up. We
624 			 * could broadcast it but that might overflow
625 			 * the packet size limitations due to the
626 			 * extra GASP header. Note: the hardware
627 			 * address is stored in network byte order to
628 			 * make life easier for ARP.
629 			 */
630 			struct fw_device *fd;
631 			struct fw_eui64 eui;
632 
633 			eui.hi = ntohl(destfw->sender_unique_ID_hi);
634 			eui.lo = ntohl(destfw->sender_unique_ID_lo);
635 			if (fwip->last_dest.hi != eui.hi ||
636 			    fwip->last_dest.lo != eui.lo) {
637 				fd = fw_noderesolve_eui64(fc, &eui);
638 				if (!fd) {
639 					/* error */
640 					if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
641 					/* XXX set error code */
642 					fwip_output_callback(xfer);
643 					continue;
644 
645 				}
646 				fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst;
647 				fwip->last_hdr.mode.wreqb.tlrt = 0;
648 				fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB;
649 				fwip->last_hdr.mode.wreqb.pri = 0;
650 				fwip->last_hdr.mode.wreqb.src = nodeid;
651 				fwip->last_hdr.mode.wreqb.dest_hi =
652 					ntohs(destfw->sender_unicast_FIFO_hi);
653 				fwip->last_hdr.mode.wreqb.dest_lo =
654 					ntohl(destfw->sender_unicast_FIFO_lo);
655 				fwip->last_hdr.mode.wreqb.extcode = 0;
656 				fwip->last_dest = eui;
657 			}
658 
659 			fp->mode.wreqb = fwip->last_hdr.mode.wreqb;
660 			fp->mode.wreqb.len = m->m_pkthdr.len;
661 			xfer->send.spd = min(destfw->sspd, fc->speed);
662 		}
663 
664 		xfer->send.pay_len = m->m_pkthdr.len;
665 
666 		error = fw_asyreq(fc, -1, xfer);
667 		if (error == EAGAIN) {
668 			/*
669 			 * We ran out of tlabels - requeue the packet
670 			 * for later transmission.
671 			 */
672 			xfer->mbuf = 0;
673 			FWIP_LOCK(fwip);
674 			STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
675 			FWIP_UNLOCK(fwip);
676 			if_sendq_prepend(ifp, m);
677 			break;
678 		}
679 		if (error) {
680 			/* error */
681 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
682 			/* XXX set error code */
683 			fwip_output_callback(xfer);
684 			continue;
685 		} else {
686 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
687 			i++;
688 		}
689 	}
690 #if 0
691 	if (i > 1)
692 		printf("%d queued\n", i);
693 #endif
694 	if (i > 0)
695 		xferq->start(fc);
696 }
697 
698 static void
699 fwip_start_send (void *arg, int count)
700 {
701 	struct fwip_softc *fwip = arg;
702 
703 	fwip->fd.fc->atq->start(fwip->fd.fc);
704 }
705 
706 /* Async. stream output */
707 static void
708 fwip_stream_input(struct fw_xferq *xferq)
709 {
710 	struct epoch_tracker et;
711 	struct mbuf *m, *m0;
712 	struct m_tag *mtag;
713 	if_t ifp;
714 	struct fwip_softc *fwip;
715 	struct fw_bulkxfer *sxfer;
716 	struct fw_pkt *fp;
717 	uint16_t src;
718 	uint32_t *p;
719 
720 	fwip = (struct fwip_softc *)xferq->sc;
721 	ifp = fwip->fw_softc.fwip_ifp;
722 
723 	NET_EPOCH_ENTER(et);
724 	while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
725 		STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
726 		fp = mtod(sxfer->mbuf, struct fw_pkt *);
727 		if (fwip->fd.fc->irx_post != NULL)
728 			fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld);
729 		m = sxfer->mbuf;
730 
731 		/* insert new rbuf */
732 		sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
733 		if (m0 != NULL) {
734 			m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
735 			STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
736 		} else
737 			printf("fwip_as_input: m_getcl failed\n");
738 
739 		/*
740 		 * We must have a GASP header - leave the
741 		 * encapsulation sanity checks to the generic
742 		 * code. Remember that we also have the firewire async
743 		 * stream header even though that isn't accounted for
744 		 * in mode.stream.len.
745 		 */
746 		if (sxfer->resp != 0 || fp->mode.stream.len <
747 		    2*sizeof(uint32_t)) {
748 			m_freem(m);
749 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
750 			continue;
751 		}
752 		m->m_len = m->m_pkthdr.len = fp->mode.stream.len
753 			+ sizeof(fp->mode.stream);
754 
755 		/*
756 		 * If we received the packet on the broadcast channel,
757 		 * mark it as broadcast, otherwise we assume it must
758 		 * be multicast.
759 		 */
760 		if (fp->mode.stream.chtag == broadcast_channel)
761 			m->m_flags |= M_BCAST;
762 		else
763 			m->m_flags |= M_MCAST;
764 
765 		/*
766 		 * Make sure we recognise the GASP specifier and
767 		 * version.
768 		 */
769 		p = mtod(m, uint32_t *);
770 		if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e
771 		    || (ntohl(p[2]) & 0xffffff) != 1) {
772 			FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n",
773 			    ntohl(p[1]), ntohl(p[2]));
774 			m_freem(m);
775 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
776 			continue;
777 		}
778 
779 		/*
780 		 * Record the sender ID for possible BPF usage.
781 		 */
782 		src = ntohl(p[1]) >> 16;
783 		if (bpf_peers_present_if(ifp)) {
784 			mtag = m_tag_alloc(MTAG_FIREWIRE,
785 			    MTAG_FIREWIRE_SENDER_EUID,
786 			    2*sizeof(uint32_t), M_NOWAIT);
787 			if (mtag) {
788 				/* bpf wants it in network byte order */
789 				struct fw_device *fd;
790 				uint32_t *p = (uint32_t *) (mtag + 1);
791 				fd = fw_noderesolve_nodeid(fwip->fd.fc,
792 				    src & 0x3f);
793 				if (fd) {
794 					p[0] = htonl(fd->eui.hi);
795 					p[1] = htonl(fd->eui.lo);
796 				} else {
797 					p[0] = 0;
798 					p[1] = 0;
799 				}
800 				m_tag_prepend(m, mtag);
801 			}
802 		}
803 
804 		/*
805 		 * Trim off the GASP header
806 		 */
807 		m_adj(m, 3*sizeof(uint32_t));
808 		m->m_pkthdr.rcvif = ifp;
809 		firewire_input(ifp, m, src);
810 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
811 	}
812 	NET_EPOCH_EXIT(et);
813 	if (STAILQ_FIRST(&xferq->stfree) != NULL)
814 		fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch);
815 }
816 
817 static __inline void
818 fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer)
819 {
820 	struct mbuf *m;
821 
822 	/*
823 	 * We have finished with a unicast xfer. Allocate a new
824 	 * cluster and stick it on the back of the input queue.
825 	 */
826 	m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
827 	xfer->mbuf = m;
828 	xfer->recv.payload = mtod(m, uint32_t *);
829 	xfer->recv.pay_len = MCLBYTES;
830 	xfer->mbuf = m;
831 	STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
832 }
833 
834 static void
835 fwip_unicast_input(struct fw_xfer *xfer)
836 {
837 	uint64_t address;
838 	struct mbuf *m;
839 	struct m_tag *mtag;
840 	struct epoch_tracker et;
841 	if_t ifp;
842 	struct fwip_softc *fwip;
843 	struct fw_pkt *fp;
844 	//struct fw_pkt *sfp;
845 	int rtcode;
846 
847 	fwip = (struct fwip_softc *)xfer->sc;
848 	ifp = fwip->fw_softc.fwip_ifp;
849 	m = xfer->mbuf;
850 	xfer->mbuf = 0;
851 	fp = &xfer->recv.hdr;
852 
853 	/*
854 	 * Check the fifo address - we only accept addresses of
855 	 * exactly INET_FIFO.
856 	 */
857 	address = ((uint64_t)fp->mode.wreqb.dest_hi << 32)
858 		| fp->mode.wreqb.dest_lo;
859 	if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
860 		rtcode = FWRCODE_ER_TYPE;
861 	} else if (address != INET_FIFO) {
862 		rtcode = FWRCODE_ER_ADDR;
863 	} else {
864 		rtcode = FWRCODE_COMPLETE;
865 	}
866 	NET_EPOCH_ENTER(et);
867 
868 	/*
869 	 * Pick up a new mbuf and stick it on the back of the receive
870 	 * queue.
871 	 */
872 	fwip_unicast_input_recycle(fwip, xfer);
873 
874 	/*
875 	 * If we've already rejected the packet, give up now.
876 	 */
877 	if (rtcode != FWRCODE_COMPLETE) {
878 		m_freem(m);
879 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
880 		goto done;
881 	}
882 
883 	if (bpf_peers_present_if(ifp)) {
884 		/*
885 		 * Record the sender ID for possible BPF usage.
886 		 */
887 		mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID,
888 		    2*sizeof(uint32_t), M_NOWAIT);
889 		if (mtag) {
890 			/* bpf wants it in network byte order */
891 			struct fw_device *fd;
892 			uint32_t *p = (uint32_t *) (mtag + 1);
893 			fd = fw_noderesolve_nodeid(fwip->fd.fc,
894 			    fp->mode.wreqb.src & 0x3f);
895 			if (fd) {
896 				p[0] = htonl(fd->eui.hi);
897 				p[1] = htonl(fd->eui.lo);
898 			} else {
899 				p[0] = 0;
900 				p[1] = 0;
901 			}
902 			m_tag_prepend(m, mtag);
903 		}
904 	}
905 
906 	/*
907 	 * Hand off to the generic encapsulation code. We don't use
908 	 * ifp->if_input so that we can pass the source nodeid as an
909 	 * argument to facilitate link-level fragment reassembly.
910 	 */
911 	m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len;
912 	m->m_pkthdr.rcvif = ifp;
913 	firewire_input(ifp, m, fp->mode.wreqb.src);
914 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
915 done:
916 	NET_EPOCH_EXIT(et);
917 }
918 
919 static device_method_t fwip_methods[] = {
920 	/* device interface */
921 	DEVMETHOD(device_identify,	fwip_identify),
922 	DEVMETHOD(device_probe,		fwip_probe),
923 	DEVMETHOD(device_attach,	fwip_attach),
924 	DEVMETHOD(device_detach,	fwip_detach),
925 	{ 0, 0 }
926 };
927 
928 static driver_t fwip_driver = {
929         "fwip",
930 	fwip_methods,
931 	sizeof(struct fwip_softc),
932 };
933 
934 
935 DRIVER_MODULE(fwip, firewire, fwip_driver, 0, 0);
936 MODULE_VERSION(fwip, 1);
937 MODULE_DEPEND(fwip, firewire, 1, 1, 1);
938