xref: /freebsd/sys/dev/firewire/if_fwip.c (revision 10ff414c)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 2004
5  *	Doug Rabson
6  * Copyright (c) 2002-2003
7  * 	Hidetoshi Shimokawa. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *
20  *	This product includes software developed by Hidetoshi Shimokawa.
21  *
22  * 4. Neither the name of the author nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * $FreeBSD$
39  */
40 
41 #ifdef HAVE_KERNEL_OPTION_HEADERS
42 #include "opt_device_polling.h"
43 #include "opt_inet.h"
44 #endif
45 
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
54 #include <sys/taskqueue.h>
55 #include <sys/module.h>
56 #include <sys/bus.h>
57 #include <machine/bus.h>
58 
59 #include <net/bpf.h>
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/firewire.h>
63 #include <net/if_arp.h>
64 #include <net/if_types.h>
65 #include <dev/firewire/firewire.h>
66 #include <dev/firewire/firewirereg.h>
67 #include <dev/firewire/iec13213.h>
68 #include <dev/firewire/if_fwipvar.h>
69 
70 /*
71  * We really need a mechanism for allocating regions in the FIFO
72  * address space. We pick a address in the OHCI controller's 'middle'
73  * address space. This means that the controller will automatically
74  * send responses for us, which is fine since we don't have any
75  * important information to put in the response anyway.
76  */
77 #define INET_FIFO	0xfffe00000000LL
78 
79 #define FWIPDEBUG	if (fwipdebug) if_printf
80 #define TX_MAX_QUEUE	(FWMAXQUEUE - 1)
81 
82 /* network interface */
83 static void fwip_start (struct ifnet *);
84 static int fwip_ioctl (struct ifnet *, u_long, caddr_t);
85 static void fwip_init (void *);
86 
87 static void fwip_post_busreset (void *);
88 static void fwip_output_callback (struct fw_xfer *);
89 static void fwip_async_output (struct fwip_softc *, struct ifnet *);
90 static void fwip_start_send (void *, int);
91 static void fwip_stream_input (struct fw_xferq *);
92 static void fwip_unicast_input(struct fw_xfer *);
93 
94 static int fwipdebug = 0;
95 static int broadcast_channel = 0xc0 | 0x1f; /*  tag | channel(XXX) */
96 static int tx_speed = 2;
97 static int rx_queue_len = FWMAXQUEUE;
98 
99 static MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface");
100 SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, "");
101 SYSCTL_DECL(_hw_firewire);
102 static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
103 	"Firewire ip subsystem");
104 SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
105 	0, "Length of the receive queue");
106 
107 #ifdef DEVICE_POLLING
108 static poll_handler_t fwip_poll;
109 
110 static int
111 fwip_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
112 {
113 	struct fwip_softc *fwip;
114 	struct firewire_comm *fc;
115 
116 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
117 		return (0);
118 
119 	fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
120 	fc = fwip->fd.fc;
121 	fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
122 	return (0);
123 }
124 #endif /* DEVICE_POLLING */
125 
126 static void
127 fwip_identify(driver_t *driver, device_t parent)
128 {
129 	BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent));
130 }
131 
132 static int
133 fwip_probe(device_t dev)
134 {
135 	device_t pa;
136 
137 	pa = device_get_parent(dev);
138 	if (device_get_unit(dev) != device_get_unit(pa)) {
139 		return (ENXIO);
140 	}
141 
142 	device_set_desc(dev, "IP over FireWire");
143 	return (0);
144 }
145 
146 static int
147 fwip_attach(device_t dev)
148 {
149 	struct fwip_softc *fwip;
150 	struct ifnet *ifp;
151 	int unit, s;
152 	struct fw_hwaddr *hwaddr;
153 
154 	fwip = ((struct fwip_softc *)device_get_softc(dev));
155 	unit = device_get_unit(dev);
156 	ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394);
157 	if (ifp == NULL)
158 		return (ENOSPC);
159 
160 	mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF);
161 	/* XXX */
162 	fwip->dma_ch = -1;
163 
164 	fwip->fd.fc = device_get_ivars(dev);
165 	if (tx_speed < 0)
166 		tx_speed = fwip->fd.fc->speed;
167 
168 	fwip->fd.dev = dev;
169 	fwip->fd.post_explore = NULL;
170 	fwip->fd.post_busreset = fwip_post_busreset;
171 	fwip->fw_softc.fwip = fwip;
172 	TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip);
173 
174 	/*
175 	 * Encode our hardware the way that arp likes it.
176 	 */
177 	hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr;
178 	hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi);
179 	hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo);
180 	hwaddr->sender_max_rec = fwip->fd.fc->maxrec;
181 	hwaddr->sspd = fwip->fd.fc->speed;
182 	hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32));
183 	hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO);
184 
185 	/* fill the rest and attach interface */
186 	ifp->if_softc = &fwip->fw_softc;
187 
188 	if_initname(ifp, device_get_name(dev), unit);
189 	ifp->if_init = fwip_init;
190 	ifp->if_start = fwip_start;
191 	ifp->if_ioctl = fwip_ioctl;
192 	ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
193 	ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE;
194 #ifdef DEVICE_POLLING
195 	ifp->if_capabilities |= IFCAP_POLLING;
196 #endif
197 
198 	s = splimp();
199 	firewire_ifattach(ifp, hwaddr);
200 	splx(s);
201 
202 	FWIPDEBUG(ifp, "interface created\n");
203 	return 0;
204 }
205 
206 static void
207 fwip_stop(struct fwip_softc *fwip)
208 {
209 	struct firewire_comm *fc;
210 	struct fw_xferq *xferq;
211 	struct ifnet *ifp = fwip->fw_softc.fwip_ifp;
212 	struct fw_xfer *xfer, *next;
213 	int i;
214 
215 	fc = fwip->fd.fc;
216 
217 	if (fwip->dma_ch >= 0) {
218 		xferq = fc->ir[fwip->dma_ch];
219 
220 		if (xferq->flag & FWXFERQ_RUNNING)
221 			fc->irx_disable(fc, fwip->dma_ch);
222 		xferq->flag &=
223 			~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
224 			FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
225 		xferq->hand =  NULL;
226 
227 		for (i = 0; i < xferq->bnchunk; i++)
228 			m_freem(xferq->bulkxfer[i].mbuf);
229 		free(xferq->bulkxfer, M_FWIP);
230 
231 		fw_bindremove(fc, &fwip->fwb);
232 		for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL;
233 					xfer = next) {
234 			next = STAILQ_NEXT(xfer, link);
235 			fw_xfer_free(xfer);
236 		}
237 
238 		for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL;
239 					xfer = next) {
240 			next = STAILQ_NEXT(xfer, link);
241 			fw_xfer_free(xfer);
242 		}
243 		STAILQ_INIT(&fwip->xferlist);
244 
245 		xferq->bulkxfer =  NULL;
246 		fwip->dma_ch = -1;
247 	}
248 
249 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
250 }
251 
252 static int
253 fwip_detach(device_t dev)
254 {
255 	struct fwip_softc *fwip;
256 	struct ifnet *ifp;
257 	int s;
258 
259 	fwip = (struct fwip_softc *)device_get_softc(dev);
260 	ifp = fwip->fw_softc.fwip_ifp;
261 
262 #ifdef DEVICE_POLLING
263 	if (ifp->if_capenable & IFCAP_POLLING)
264 		ether_poll_deregister(ifp);
265 #endif
266 
267 	s = splimp();
268 
269 	fwip_stop(fwip);
270 	firewire_ifdetach(ifp);
271 	if_free(ifp);
272 	mtx_destroy(&fwip->mtx);
273 
274 	splx(s);
275 	return 0;
276 }
277 
278 static void
279 fwip_init(void *arg)
280 {
281 	struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip;
282 	struct firewire_comm *fc;
283 	struct ifnet *ifp = fwip->fw_softc.fwip_ifp;
284 	struct fw_xferq *xferq;
285 	struct fw_xfer *xfer;
286 	struct mbuf *m;
287 	int i;
288 
289 	FWIPDEBUG(ifp, "initializing\n");
290 
291 	fc = fwip->fd.fc;
292 #define START 0
293 	if (fwip->dma_ch < 0) {
294 		fwip->dma_ch = fw_open_isodma(fc, /* tx */0);
295 		if (fwip->dma_ch < 0)
296 			return;
297 		xferq = fc->ir[fwip->dma_ch];
298 		xferq->flag |= FWXFERQ_EXTBUF |
299 				FWXFERQ_HANDLER | FWXFERQ_STREAM;
300 		xferq->flag &= ~0xff;
301 		xferq->flag |= broadcast_channel & 0xff;
302 		/* register fwip_input handler */
303 		xferq->sc = (caddr_t) fwip;
304 		xferq->hand = fwip_stream_input;
305 		xferq->bnchunk = rx_queue_len;
306 		xferq->bnpacket = 1;
307 		xferq->psize = MCLBYTES;
308 		xferq->queued = 0;
309 		xferq->buf = NULL;
310 		xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
311 			sizeof(struct fw_bulkxfer) * xferq->bnchunk,
312 							M_FWIP, M_WAITOK);
313 		if (xferq->bulkxfer == NULL) {
314 			printf("if_fwip: malloc failed\n");
315 			return;
316 		}
317 		STAILQ_INIT(&xferq->stvalid);
318 		STAILQ_INIT(&xferq->stfree);
319 		STAILQ_INIT(&xferq->stdma);
320 		xferq->stproc = NULL;
321 		for (i = 0; i < xferq->bnchunk; i++) {
322 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
323 			xferq->bulkxfer[i].mbuf = m;
324 			m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
325 			STAILQ_INSERT_TAIL(&xferq->stfree,
326 					&xferq->bulkxfer[i], link);
327 		}
328 
329 		fwip->fwb.start = INET_FIFO;
330 		fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */
331 
332 		/* pre-allocate xfer */
333 		STAILQ_INIT(&fwip->fwb.xferlist);
334 		for (i = 0; i < rx_queue_len; i++) {
335 			xfer = fw_xfer_alloc(M_FWIP);
336 			if (xfer == NULL)
337 				break;
338 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
339 			xfer->recv.payload = mtod(m, uint32_t *);
340 			xfer->recv.pay_len = MCLBYTES;
341 			xfer->hand = fwip_unicast_input;
342 			xfer->fc = fc;
343 			xfer->sc = (caddr_t)fwip;
344 			xfer->mbuf = m;
345 			STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
346 		}
347 		fw_bindadd(fc, &fwip->fwb);
348 
349 		STAILQ_INIT(&fwip->xferlist);
350 		for (i = 0; i < TX_MAX_QUEUE; i++) {
351 			xfer = fw_xfer_alloc(M_FWIP);
352 			if (xfer == NULL)
353 				break;
354 			xfer->send.spd = tx_speed;
355 			xfer->fc = fwip->fd.fc;
356 			xfer->sc = (caddr_t)fwip;
357 			xfer->hand = fwip_output_callback;
358 			STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
359 		}
360 	} else
361 		xferq = fc->ir[fwip->dma_ch];
362 
363 	fwip->last_dest.hi = 0;
364 	fwip->last_dest.lo = 0;
365 
366 	/* start dma */
367 	if ((xferq->flag & FWXFERQ_RUNNING) == 0)
368 		fc->irx_enable(fc, fwip->dma_ch);
369 
370 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
371 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
372 
373 #if 0
374 	/* attempt to start output */
375 	fwip_start(ifp);
376 #endif
377 }
378 
379 static int
380 fwip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
381 {
382 	struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
383 	int s, error;
384 
385 	switch (cmd) {
386 	case SIOCSIFFLAGS:
387 		s = splimp();
388 		if (ifp->if_flags & IFF_UP) {
389 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
390 				fwip_init(&fwip->fw_softc);
391 		} else {
392 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
393 				fwip_stop(fwip);
394 		}
395 		splx(s);
396 		break;
397 	case SIOCADDMULTI:
398 	case SIOCDELMULTI:
399 		break;
400 	case SIOCSIFCAP:
401 #ifdef DEVICE_POLLING
402 	    {
403 		struct ifreq *ifr = (struct ifreq *) data;
404 		struct firewire_comm *fc = fwip->fd.fc;
405 
406 		if (ifr->ifr_reqcap & IFCAP_POLLING &&
407 		    !(ifp->if_capenable & IFCAP_POLLING)) {
408 			error = ether_poll_register(fwip_poll, ifp);
409 			if (error)
410 				return (error);
411 			/* Disable interrupts */
412 			fc->set_intr(fc, 0);
413 			ifp->if_capenable |= IFCAP_POLLING;
414 			return (error);
415 		}
416 		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
417 		    ifp->if_capenable & IFCAP_POLLING) {
418 			error = ether_poll_deregister(ifp);
419 			/* Enable interrupts. */
420 			fc->set_intr(fc, 1);
421 			ifp->if_capenable &= ~IFCAP_POLLING;
422 			return (error);
423 		}
424 	    }
425 #endif /* DEVICE_POLLING */
426 		break;
427 	default:
428 		s = splimp();
429 		error = firewire_ioctl(ifp, cmd, data);
430 		splx(s);
431 		return (error);
432 	}
433 
434 	return (0);
435 }
436 
437 static void
438 fwip_post_busreset(void *arg)
439 {
440 	struct fwip_softc *fwip = arg;
441 	struct crom_src *src;
442 	struct crom_chunk *root;
443 
444 	src = fwip->fd.fc->crom_src;
445 	root = fwip->fd.fc->crom_root;
446 
447 	/* RFC2734 IPv4 over IEEE1394 */
448 	bzero(&fwip->unit4, sizeof(struct crom_chunk));
449 	crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR);
450 	crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF);
451 	crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA");
452 	crom_add_entry(&fwip->unit4, CSRKEY_VER, 1);
453 	crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4");
454 
455 	/* RFC3146 IPv6 over IEEE1394 */
456 	bzero(&fwip->unit6, sizeof(struct crom_chunk));
457 	crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR);
458 	crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF);
459 	crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA");
460 	crom_add_entry(&fwip->unit6, CSRKEY_VER, 2);
461 	crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6");
462 
463 	fwip->last_dest.hi = 0;
464 	fwip->last_dest.lo = 0;
465 	firewire_busreset(fwip->fw_softc.fwip_ifp);
466 }
467 
468 static void
469 fwip_output_callback(struct fw_xfer *xfer)
470 {
471 	struct fwip_softc *fwip;
472 	struct ifnet *ifp;
473 	int s;
474 
475 	fwip = (struct fwip_softc *)xfer->sc;
476 	ifp = fwip->fw_softc.fwip_ifp;
477 	/* XXX error check */
478 	FWIPDEBUG(ifp, "resp = %d\n", xfer->resp);
479 	if (xfer->resp != 0)
480 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
481 	m_freem(xfer->mbuf);
482 	fw_xfer_unload(xfer);
483 
484 	s = splimp();
485 	FWIP_LOCK(fwip);
486 	STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
487 	FWIP_UNLOCK(fwip);
488 	splx(s);
489 
490 	/* for queue full */
491 	if (ifp->if_snd.ifq_head != NULL) {
492 		fwip_start(ifp);
493 	}
494 }
495 
496 static void
497 fwip_start(struct ifnet *ifp)
498 {
499 	struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
500 	int s;
501 
502 	FWIPDEBUG(ifp, "starting\n");
503 
504 	if (fwip->dma_ch < 0) {
505 		struct mbuf	*m = NULL;
506 
507 		FWIPDEBUG(ifp, "not ready\n");
508 
509 		s = splimp();
510 		do {
511 			IF_DEQUEUE(&ifp->if_snd, m);
512 			if (m != NULL)
513 				m_freem(m);
514 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
515 		} while (m != NULL);
516 		splx(s);
517 
518 		return;
519 	}
520 
521 	s = splimp();
522 	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
523 
524 	if (ifp->if_snd.ifq_len != 0)
525 		fwip_async_output(fwip, ifp);
526 
527 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
528 	splx(s);
529 }
530 
531 /* Async. stream output */
532 static void
533 fwip_async_output(struct fwip_softc *fwip, struct ifnet *ifp)
534 {
535 	struct firewire_comm *fc = fwip->fd.fc;
536 	struct mbuf *m;
537 	struct m_tag *mtag;
538 	struct fw_hwaddr *destfw;
539 	struct fw_xfer *xfer;
540 	struct fw_xferq *xferq;
541 	struct fw_pkt *fp;
542 	uint16_t nodeid;
543 	int error;
544 	int i = 0;
545 
546 	xfer = NULL;
547 	xferq = fc->atq;
548 	while ((xferq->queued < xferq->maxq - 1) &&
549 			(ifp->if_snd.ifq_head != NULL)) {
550 		FWIP_LOCK(fwip);
551 		xfer = STAILQ_FIRST(&fwip->xferlist);
552 		if (xfer == NULL) {
553 			FWIP_UNLOCK(fwip);
554 #if 0
555 			printf("if_fwip: lack of xfer\n");
556 #endif
557 			break;
558 		}
559 		STAILQ_REMOVE_HEAD(&fwip->xferlist, link);
560 		FWIP_UNLOCK(fwip);
561 
562 		IF_DEQUEUE(&ifp->if_snd, m);
563 		if (m == NULL) {
564 			FWIP_LOCK(fwip);
565 			STAILQ_INSERT_HEAD(&fwip->xferlist, xfer, link);
566 			FWIP_UNLOCK(fwip);
567 			break;
568 		}
569 
570 		/*
571 		 * Dig out the link-level address which
572 		 * firewire_output got via arp or neighbour
573 		 * discovery. If we don't have a link-level address,
574 		 * just stick the thing on the broadcast channel.
575 		 */
576 		mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0);
577 		if (mtag == NULL)
578 			destfw = NULL;
579 		else
580 			destfw = (struct fw_hwaddr *) (mtag + 1);
581 
582 
583 		/*
584 		 * We don't do any bpf stuff here - the generic code
585 		 * in firewire_output gives the packet to bpf before
586 		 * it adds the link-level encapsulation.
587 		 */
588 
589 		/*
590 		 * Put the mbuf in the xfer early in case we hit an
591 		 * error case below - fwip_output_callback will free
592 		 * the mbuf.
593 		 */
594 		xfer->mbuf = m;
595 
596 		/*
597 		 * We use the arp result (if any) to add a suitable firewire
598 		 * packet header before handing off to the bus.
599 		 */
600 		fp = &xfer->send.hdr;
601 		nodeid = FWLOCALBUS | fc->nodeid;
602 		if ((m->m_flags & M_BCAST) || !destfw) {
603 			/*
604 			 * Broadcast packets are sent as GASP packets with
605 			 * specifier ID 0x00005e, version 1 on the broadcast
606 			 * channel. To be conservative, we send at the
607 			 * slowest possible speed.
608 			 */
609 			uint32_t *p;
610 
611 			M_PREPEND(m, 2*sizeof(uint32_t), M_NOWAIT);
612 			p = mtod(m, uint32_t *);
613 			fp->mode.stream.len = m->m_pkthdr.len;
614 			fp->mode.stream.chtag = broadcast_channel;
615 			fp->mode.stream.tcode = FWTCODE_STREAM;
616 			fp->mode.stream.sy = 0;
617 			xfer->send.spd = 0;
618 			p[0] = htonl(nodeid << 16);
619 			p[1] = htonl((0x5e << 24) | 1);
620 		} else {
621 			/*
622 			 * Unicast packets are sent as block writes to the
623 			 * target's unicast fifo address. If we can't
624 			 * find the node address, we just give up. We
625 			 * could broadcast it but that might overflow
626 			 * the packet size limitations due to the
627 			 * extra GASP header. Note: the hardware
628 			 * address is stored in network byte order to
629 			 * make life easier for ARP.
630 			 */
631 			struct fw_device *fd;
632 			struct fw_eui64 eui;
633 
634 			eui.hi = ntohl(destfw->sender_unique_ID_hi);
635 			eui.lo = ntohl(destfw->sender_unique_ID_lo);
636 			if (fwip->last_dest.hi != eui.hi ||
637 			    fwip->last_dest.lo != eui.lo) {
638 				fd = fw_noderesolve_eui64(fc, &eui);
639 				if (!fd) {
640 					/* error */
641 					if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
642 					/* XXX set error code */
643 					fwip_output_callback(xfer);
644 					continue;
645 
646 				}
647 				fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst;
648 				fwip->last_hdr.mode.wreqb.tlrt = 0;
649 				fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB;
650 				fwip->last_hdr.mode.wreqb.pri = 0;
651 				fwip->last_hdr.mode.wreqb.src = nodeid;
652 				fwip->last_hdr.mode.wreqb.dest_hi =
653 					ntohs(destfw->sender_unicast_FIFO_hi);
654 				fwip->last_hdr.mode.wreqb.dest_lo =
655 					ntohl(destfw->sender_unicast_FIFO_lo);
656 				fwip->last_hdr.mode.wreqb.extcode = 0;
657 				fwip->last_dest = eui;
658 			}
659 
660 			fp->mode.wreqb = fwip->last_hdr.mode.wreqb;
661 			fp->mode.wreqb.len = m->m_pkthdr.len;
662 			xfer->send.spd = min(destfw->sspd, fc->speed);
663 		}
664 
665 		xfer->send.pay_len = m->m_pkthdr.len;
666 
667 		error = fw_asyreq(fc, -1, xfer);
668 		if (error == EAGAIN) {
669 			/*
670 			 * We ran out of tlabels - requeue the packet
671 			 * for later transmission.
672 			 */
673 			xfer->mbuf = 0;
674 			FWIP_LOCK(fwip);
675 			STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
676 			FWIP_UNLOCK(fwip);
677 			IF_PREPEND(&ifp->if_snd, m);
678 			break;
679 		}
680 		if (error) {
681 			/* error */
682 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
683 			/* XXX set error code */
684 			fwip_output_callback(xfer);
685 			continue;
686 		} else {
687 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
688 			i++;
689 		}
690 	}
691 #if 0
692 	if (i > 1)
693 		printf("%d queued\n", i);
694 #endif
695 	if (i > 0)
696 		xferq->start(fc);
697 }
698 
699 static void
700 fwip_start_send (void *arg, int count)
701 {
702 	struct fwip_softc *fwip = arg;
703 
704 	fwip->fd.fc->atq->start(fwip->fd.fc);
705 }
706 
707 /* Async. stream output */
708 static void
709 fwip_stream_input(struct fw_xferq *xferq)
710 {
711 	struct epoch_tracker et;
712 	struct mbuf *m, *m0;
713 	struct m_tag *mtag;
714 	struct ifnet *ifp;
715 	struct fwip_softc *fwip;
716 	struct fw_bulkxfer *sxfer;
717 	struct fw_pkt *fp;
718 	uint16_t src;
719 	uint32_t *p;
720 
721 	fwip = (struct fwip_softc *)xferq->sc;
722 	ifp = fwip->fw_softc.fwip_ifp;
723 
724 	NET_EPOCH_ENTER(et);
725 	while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
726 		STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
727 		fp = mtod(sxfer->mbuf, struct fw_pkt *);
728 		if (fwip->fd.fc->irx_post != NULL)
729 			fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld);
730 		m = sxfer->mbuf;
731 
732 		/* insert new rbuf */
733 		sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
734 		if (m0 != NULL) {
735 			m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
736 			STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
737 		} else
738 			printf("fwip_as_input: m_getcl failed\n");
739 
740 		/*
741 		 * We must have a GASP header - leave the
742 		 * encapsulation sanity checks to the generic
743 		 * code. Remember that we also have the firewire async
744 		 * stream header even though that isn't accounted for
745 		 * in mode.stream.len.
746 		 */
747 		if (sxfer->resp != 0 || fp->mode.stream.len <
748 		    2*sizeof(uint32_t)) {
749 			m_freem(m);
750 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
751 			continue;
752 		}
753 		m->m_len = m->m_pkthdr.len = fp->mode.stream.len
754 			+ sizeof(fp->mode.stream);
755 
756 		/*
757 		 * If we received the packet on the broadcast channel,
758 		 * mark it as broadcast, otherwise we assume it must
759 		 * be multicast.
760 		 */
761 		if (fp->mode.stream.chtag == broadcast_channel)
762 			m->m_flags |= M_BCAST;
763 		else
764 			m->m_flags |= M_MCAST;
765 
766 		/*
767 		 * Make sure we recognise the GASP specifier and
768 		 * version.
769 		 */
770 		p = mtod(m, uint32_t *);
771 		if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e
772 		    || (ntohl(p[2]) & 0xffffff) != 1) {
773 			FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n",
774 			    ntohl(p[1]), ntohl(p[2]));
775 			m_freem(m);
776 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
777 			continue;
778 		}
779 
780 		/*
781 		 * Record the sender ID for possible BPF usage.
782 		 */
783 		src = ntohl(p[1]) >> 16;
784 		if (bpf_peers_present(ifp->if_bpf)) {
785 			mtag = m_tag_alloc(MTAG_FIREWIRE,
786 			    MTAG_FIREWIRE_SENDER_EUID,
787 			    2*sizeof(uint32_t), M_NOWAIT);
788 			if (mtag) {
789 				/* bpf wants it in network byte order */
790 				struct fw_device *fd;
791 				uint32_t *p = (uint32_t *) (mtag + 1);
792 				fd = fw_noderesolve_nodeid(fwip->fd.fc,
793 				    src & 0x3f);
794 				if (fd) {
795 					p[0] = htonl(fd->eui.hi);
796 					p[1] = htonl(fd->eui.lo);
797 				} else {
798 					p[0] = 0;
799 					p[1] = 0;
800 				}
801 				m_tag_prepend(m, mtag);
802 			}
803 		}
804 
805 		/*
806 		 * Trim off the GASP header
807 		 */
808 		m_adj(m, 3*sizeof(uint32_t));
809 		m->m_pkthdr.rcvif = ifp;
810 		firewire_input(ifp, m, src);
811 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
812 	}
813 	NET_EPOCH_EXIT(et);
814 	if (STAILQ_FIRST(&xferq->stfree) != NULL)
815 		fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch);
816 }
817 
818 static __inline void
819 fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer)
820 {
821 	struct mbuf *m;
822 
823 	/*
824 	 * We have finished with a unicast xfer. Allocate a new
825 	 * cluster and stick it on the back of the input queue.
826 	 */
827 	m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
828 	xfer->mbuf = m;
829 	xfer->recv.payload = mtod(m, uint32_t *);
830 	xfer->recv.pay_len = MCLBYTES;
831 	xfer->mbuf = m;
832 	STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
833 }
834 
835 static void
836 fwip_unicast_input(struct fw_xfer *xfer)
837 {
838 	uint64_t address;
839 	struct mbuf *m;
840 	struct m_tag *mtag;
841 	struct epoch_tracker et;
842 	struct ifnet *ifp;
843 	struct fwip_softc *fwip;
844 	struct fw_pkt *fp;
845 	//struct fw_pkt *sfp;
846 	int rtcode;
847 
848 	fwip = (struct fwip_softc *)xfer->sc;
849 	ifp = fwip->fw_softc.fwip_ifp;
850 	m = xfer->mbuf;
851 	xfer->mbuf = 0;
852 	fp = &xfer->recv.hdr;
853 
854 	/*
855 	 * Check the fifo address - we only accept addresses of
856 	 * exactly INET_FIFO.
857 	 */
858 	address = ((uint64_t)fp->mode.wreqb.dest_hi << 32)
859 		| fp->mode.wreqb.dest_lo;
860 	if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
861 		rtcode = FWRCODE_ER_TYPE;
862 	} else if (address != INET_FIFO) {
863 		rtcode = FWRCODE_ER_ADDR;
864 	} else {
865 		rtcode = FWRCODE_COMPLETE;
866 	}
867 	NET_EPOCH_ENTER(et);
868 
869 	/*
870 	 * Pick up a new mbuf and stick it on the back of the receive
871 	 * queue.
872 	 */
873 	fwip_unicast_input_recycle(fwip, xfer);
874 
875 	/*
876 	 * If we've already rejected the packet, give up now.
877 	 */
878 	if (rtcode != FWRCODE_COMPLETE) {
879 		m_freem(m);
880 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
881 		goto done;
882 	}
883 
884 	if (bpf_peers_present(ifp->if_bpf)) {
885 		/*
886 		 * Record the sender ID for possible BPF usage.
887 		 */
888 		mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID,
889 		    2*sizeof(uint32_t), M_NOWAIT);
890 		if (mtag) {
891 			/* bpf wants it in network byte order */
892 			struct fw_device *fd;
893 			uint32_t *p = (uint32_t *) (mtag + 1);
894 			fd = fw_noderesolve_nodeid(fwip->fd.fc,
895 			    fp->mode.wreqb.src & 0x3f);
896 			if (fd) {
897 				p[0] = htonl(fd->eui.hi);
898 				p[1] = htonl(fd->eui.lo);
899 			} else {
900 				p[0] = 0;
901 				p[1] = 0;
902 			}
903 			m_tag_prepend(m, mtag);
904 		}
905 	}
906 
907 	/*
908 	 * Hand off to the generic encapsulation code. We don't use
909 	 * ifp->if_input so that we can pass the source nodeid as an
910 	 * argument to facilitate link-level fragment reassembly.
911 	 */
912 	m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len;
913 	m->m_pkthdr.rcvif = ifp;
914 	firewire_input(ifp, m, fp->mode.wreqb.src);
915 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
916 done:
917 	NET_EPOCH_EXIT(et);
918 }
919 
920 static devclass_t fwip_devclass;
921 
922 static device_method_t fwip_methods[] = {
923 	/* device interface */
924 	DEVMETHOD(device_identify,	fwip_identify),
925 	DEVMETHOD(device_probe,		fwip_probe),
926 	DEVMETHOD(device_attach,	fwip_attach),
927 	DEVMETHOD(device_detach,	fwip_detach),
928 	{ 0, 0 }
929 };
930 
931 static driver_t fwip_driver = {
932         "fwip",
933 	fwip_methods,
934 	sizeof(struct fwip_softc),
935 };
936 
937 
938 DRIVER_MODULE(fwip, firewire, fwip_driver, fwip_devclass, 0, 0);
939 MODULE_VERSION(fwip, 1);
940 MODULE_DEPEND(fwip, firewire, 1, 1, 1);
941