xref: /freebsd/sys/dev/firewire/if_fwip.c (revision aa386085)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 2004
5  *	Doug Rabson
6  * Copyright (c) 2002-2003
7  * 	Hidetoshi Shimokawa. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *
20  *	This product includes software developed by Hidetoshi Shimokawa.
21  *
22  * 4. Neither the name of the author nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  */
39 
40 #ifdef HAVE_KERNEL_OPTION_HEADERS
41 #include "opt_device_polling.h"
42 #include "opt_inet.h"
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53 #include <sys/taskqueue.h>
54 #include <sys/module.h>
55 #include <sys/bus.h>
56 #include <machine/bus.h>
57 
58 #include <net/bpf.h>
59 #include <net/if.h>
60 #include <net/if_var.h>
61 #include <net/firewire.h>
62 #include <net/if_arp.h>
63 #include <net/if_types.h>
64 #include <dev/firewire/firewire.h>
65 #include <dev/firewire/firewirereg.h>
66 #include <dev/firewire/iec13213.h>
67 #include <dev/firewire/if_fwipvar.h>
68 
69 /*
70  * We really need a mechanism for allocating regions in the FIFO
71  * address space. We pick a address in the OHCI controller's 'middle'
72  * address space. This means that the controller will automatically
73  * send responses for us, which is fine since we don't have any
74  * important information to put in the response anyway.
75  */
76 #define INET_FIFO	0xfffe00000000LL
77 
78 #define FWIPDEBUG	if (fwipdebug) if_printf
79 #define TX_MAX_QUEUE	(FWMAXQUEUE - 1)
80 
81 /* network interface */
82 static void fwip_start (if_t);
83 static int fwip_ioctl (if_t, u_long, caddr_t);
84 static void fwip_init (void *);
85 
86 static void fwip_post_busreset (void *);
87 static void fwip_output_callback (struct fw_xfer *);
88 static void fwip_async_output (struct fwip_softc *, if_t);
89 static void fwip_start_send (void *, int);
90 static void fwip_stream_input (struct fw_xferq *);
91 static void fwip_unicast_input(struct fw_xfer *);
92 
93 static int fwipdebug = 0;
94 static int broadcast_channel = 0xc0 | 0x1f; /*  tag | channel(XXX) */
95 static int tx_speed = 2;
96 static int rx_queue_len = FWMAXQUEUE;
97 
98 static MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface");
99 SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, "");
100 SYSCTL_DECL(_hw_firewire);
101 static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
102 	"Firewire ip subsystem");
103 SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
104 	0, "Length of the receive queue");
105 
106 #ifdef DEVICE_POLLING
107 static poll_handler_t fwip_poll;
108 
109 static int
fwip_poll(if_t ifp,enum poll_cmd cmd,int count)110 fwip_poll(if_t ifp, enum poll_cmd cmd, int count)
111 {
112 	struct fwip_softc *fwip;
113 	struct firewire_comm *fc;
114 
115 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
116 		return (0);
117 
118 	fwip = ((struct fwip_eth_softc *)if_getsoftc(ifp))->fwip;
119 	fc = fwip->fd.fc;
120 	fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
121 	return (0);
122 }
123 #endif /* DEVICE_POLLING */
124 
125 static void
fwip_identify(driver_t * driver,device_t parent)126 fwip_identify(driver_t *driver, device_t parent)
127 {
128 	BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent));
129 }
130 
131 static int
fwip_probe(device_t dev)132 fwip_probe(device_t dev)
133 {
134 	device_t pa;
135 
136 	pa = device_get_parent(dev);
137 	if (device_get_unit(dev) != device_get_unit(pa)) {
138 		return (ENXIO);
139 	}
140 
141 	device_set_desc(dev, "IP over FireWire");
142 	return (0);
143 }
144 
145 static int
fwip_attach(device_t dev)146 fwip_attach(device_t dev)
147 {
148 	struct fwip_softc *fwip;
149 	if_t ifp;
150 	int unit, s;
151 	struct fw_hwaddr *hwaddr;
152 
153 	fwip = ((struct fwip_softc *)device_get_softc(dev));
154 	unit = device_get_unit(dev);
155 	ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394);
156 
157 	mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF);
158 	/* XXX */
159 	fwip->dma_ch = -1;
160 
161 	fwip->fd.fc = device_get_ivars(dev);
162 	if (tx_speed < 0)
163 		tx_speed = fwip->fd.fc->speed;
164 
165 	fwip->fd.dev = dev;
166 	fwip->fd.post_explore = NULL;
167 	fwip->fd.post_busreset = fwip_post_busreset;
168 	fwip->fw_softc.fwip = fwip;
169 	TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip);
170 
171 	/*
172 	 * Encode our hardware the way that arp likes it.
173 	 */
174 	hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr;
175 	hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi);
176 	hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo);
177 	hwaddr->sender_max_rec = fwip->fd.fc->maxrec;
178 	hwaddr->sspd = fwip->fd.fc->speed;
179 	hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32));
180 	hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO);
181 
182 	/* fill the rest and attach interface */
183 	if_setsoftc(ifp, &fwip->fw_softc);
184 
185 	if_initname(ifp, device_get_name(dev), unit);
186 	if_setinitfn(ifp, fwip_init);
187 	if_setstartfn(ifp, fwip_start);
188 	if_setioctlfn(ifp, fwip_ioctl);
189 	if_setflags(ifp, (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST));
190 	if_setsendqlen(ifp, TX_MAX_QUEUE);
191 #ifdef DEVICE_POLLING
192 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
193 #endif
194 
195 	s = splimp();
196 	firewire_ifattach(ifp, hwaddr);
197 	splx(s);
198 
199 	FWIPDEBUG(ifp, "interface created\n");
200 	return (0);
201 }
202 
203 static void
fwip_stop(struct fwip_softc * fwip)204 fwip_stop(struct fwip_softc *fwip)
205 {
206 	struct firewire_comm *fc;
207 	struct fw_xferq *xferq;
208 	if_t ifp = fwip->fw_softc.fwip_ifp;
209 	struct fw_xfer *xfer, *next;
210 	int i;
211 
212 	fc = fwip->fd.fc;
213 
214 	if (fwip->dma_ch >= 0) {
215 		xferq = fc->ir[fwip->dma_ch];
216 
217 		if (xferq->flag & FWXFERQ_RUNNING)
218 			fc->irx_disable(fc, fwip->dma_ch);
219 		xferq->flag &=
220 			~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
221 			FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
222 		xferq->hand =  NULL;
223 
224 		for (i = 0; i < xferq->bnchunk; i++)
225 			m_freem(xferq->bulkxfer[i].mbuf);
226 		free(xferq->bulkxfer, M_FWIP);
227 
228 		fw_bindremove(fc, &fwip->fwb);
229 		for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL;
230 					xfer = next) {
231 			next = STAILQ_NEXT(xfer, link);
232 			fw_xfer_free(xfer);
233 		}
234 
235 		for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL;
236 					xfer = next) {
237 			next = STAILQ_NEXT(xfer, link);
238 			fw_xfer_free(xfer);
239 		}
240 		STAILQ_INIT(&fwip->xferlist);
241 
242 		xferq->bulkxfer =  NULL;
243 		fwip->dma_ch = -1;
244 	}
245 
246 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
247 }
248 
249 static int
fwip_detach(device_t dev)250 fwip_detach(device_t dev)
251 {
252 	struct fwip_softc *fwip;
253 	if_t ifp;
254 	int s;
255 
256 	fwip = (struct fwip_softc *)device_get_softc(dev);
257 	ifp = fwip->fw_softc.fwip_ifp;
258 
259 #ifdef DEVICE_POLLING
260 	if (if_getcapenable(ifp) & IFCAP_POLLING)
261 		ether_poll_deregister(ifp);
262 #endif
263 
264 	s = splimp();
265 
266 	fwip_stop(fwip);
267 	firewire_ifdetach(ifp);
268 	if_free(ifp);
269 	mtx_destroy(&fwip->mtx);
270 
271 	splx(s);
272 	return 0;
273 }
274 
275 static void
fwip_init(void * arg)276 fwip_init(void *arg)
277 {
278 	struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip;
279 	struct firewire_comm *fc;
280 	if_t ifp = fwip->fw_softc.fwip_ifp;
281 	struct fw_xferq *xferq;
282 	struct fw_xfer *xfer;
283 	struct mbuf *m;
284 	int i;
285 
286 	FWIPDEBUG(ifp, "initializing\n");
287 
288 	fc = fwip->fd.fc;
289 #define START 0
290 	if (fwip->dma_ch < 0) {
291 		fwip->dma_ch = fw_open_isodma(fc, /* tx */0);
292 		if (fwip->dma_ch < 0)
293 			return;
294 		xferq = fc->ir[fwip->dma_ch];
295 		xferq->flag |= FWXFERQ_EXTBUF |
296 				FWXFERQ_HANDLER | FWXFERQ_STREAM;
297 		xferq->flag &= ~0xff;
298 		xferq->flag |= broadcast_channel & 0xff;
299 		/* register fwip_input handler */
300 		xferq->sc = (caddr_t) fwip;
301 		xferq->hand = fwip_stream_input;
302 		xferq->bnchunk = rx_queue_len;
303 		xferq->bnpacket = 1;
304 		xferq->psize = MCLBYTES;
305 		xferq->queued = 0;
306 		xferq->buf = NULL;
307 		xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
308 			sizeof(struct fw_bulkxfer) * xferq->bnchunk,
309 							M_FWIP, M_WAITOK);
310 		if (xferq->bulkxfer == NULL) {
311 			printf("if_fwip: malloc failed\n");
312 			return;
313 		}
314 		STAILQ_INIT(&xferq->stvalid);
315 		STAILQ_INIT(&xferq->stfree);
316 		STAILQ_INIT(&xferq->stdma);
317 		xferq->stproc = NULL;
318 		for (i = 0; i < xferq->bnchunk; i++) {
319 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
320 			xferq->bulkxfer[i].mbuf = m;
321 			m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
322 			STAILQ_INSERT_TAIL(&xferq->stfree,
323 					&xferq->bulkxfer[i], link);
324 		}
325 
326 		fwip->fwb.start = INET_FIFO;
327 		fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */
328 
329 		/* pre-allocate xfer */
330 		STAILQ_INIT(&fwip->fwb.xferlist);
331 		for (i = 0; i < rx_queue_len; i++) {
332 			xfer = fw_xfer_alloc(M_FWIP);
333 			if (xfer == NULL)
334 				break;
335 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
336 			xfer->recv.payload = mtod(m, uint32_t *);
337 			xfer->recv.pay_len = MCLBYTES;
338 			xfer->hand = fwip_unicast_input;
339 			xfer->fc = fc;
340 			xfer->sc = (caddr_t)fwip;
341 			xfer->mbuf = m;
342 			STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
343 		}
344 		fw_bindadd(fc, &fwip->fwb);
345 
346 		STAILQ_INIT(&fwip->xferlist);
347 		for (i = 0; i < TX_MAX_QUEUE; i++) {
348 			xfer = fw_xfer_alloc(M_FWIP);
349 			if (xfer == NULL)
350 				break;
351 			xfer->send.spd = tx_speed;
352 			xfer->fc = fwip->fd.fc;
353 			xfer->sc = (caddr_t)fwip;
354 			xfer->hand = fwip_output_callback;
355 			STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
356 		}
357 	} else
358 		xferq = fc->ir[fwip->dma_ch];
359 
360 	fwip->last_dest.hi = 0;
361 	fwip->last_dest.lo = 0;
362 
363 	/* start dma */
364 	if ((xferq->flag & FWXFERQ_RUNNING) == 0)
365 		fc->irx_enable(fc, fwip->dma_ch);
366 
367 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
368 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
369 
370 #if 0
371 	/* attempt to start output */
372 	fwip_start(ifp);
373 #endif
374 }
375 
376 static int
fwip_ioctl(if_t ifp,u_long cmd,caddr_t data)377 fwip_ioctl(if_t ifp, u_long cmd, caddr_t data)
378 {
379 	struct fwip_softc *fwip = ((struct fwip_eth_softc *)if_getsoftc(ifp))->fwip;
380 	int s, error;
381 
382 	switch (cmd) {
383 	case SIOCSIFFLAGS:
384 		s = splimp();
385 		if (if_getflags(ifp) & IFF_UP) {
386 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
387 				fwip_init(&fwip->fw_softc);
388 		} else {
389 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
390 				fwip_stop(fwip);
391 		}
392 		splx(s);
393 		break;
394 	case SIOCADDMULTI:
395 	case SIOCDELMULTI:
396 		break;
397 	case SIOCSIFCAP:
398 #ifdef DEVICE_POLLING
399 	    {
400 		struct ifreq *ifr = (struct ifreq *) data;
401 		struct firewire_comm *fc = fwip->fd.fc;
402 
403 		if (ifr->ifr_reqcap & IFCAP_POLLING &&
404 		    !(if_getcapenable(ifp) & IFCAP_POLLING)) {
405 			error = ether_poll_register(fwip_poll, ifp);
406 			if (error)
407 				return (error);
408 			/* Disable interrupts */
409 			fc->set_intr(fc, 0);
410 			if_setcapenablebit(ifp, IFCAP_POLLING, 0);
411 			return (error);
412 		}
413 		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
414 		    if_getcapenable(ifp) & IFCAP_POLLING) {
415 			error = ether_poll_deregister(ifp);
416 			/* Enable interrupts. */
417 			fc->set_intr(fc, 1);
418 			if_setcapenablebit(ifp, 0, IFCAP_POLLING);
419 			return (error);
420 		}
421 	    }
422 #endif /* DEVICE_POLLING */
423 		break;
424 	default:
425 		s = splimp();
426 		error = firewire_ioctl(ifp, cmd, data);
427 		splx(s);
428 		return (error);
429 	}
430 
431 	return (0);
432 }
433 
434 static void
fwip_post_busreset(void * arg)435 fwip_post_busreset(void *arg)
436 {
437 	struct fwip_softc *fwip = arg;
438 	struct crom_src *src;
439 	struct crom_chunk *root;
440 
441 	src = fwip->fd.fc->crom_src;
442 	root = fwip->fd.fc->crom_root;
443 
444 	/* RFC2734 IPv4 over IEEE1394 */
445 	bzero(&fwip->unit4, sizeof(struct crom_chunk));
446 	crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR);
447 	crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF);
448 	crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA");
449 	crom_add_entry(&fwip->unit4, CSRKEY_VER, 1);
450 	crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4");
451 
452 	/* RFC3146 IPv6 over IEEE1394 */
453 	bzero(&fwip->unit6, sizeof(struct crom_chunk));
454 	crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR);
455 	crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF);
456 	crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA");
457 	crom_add_entry(&fwip->unit6, CSRKEY_VER, 2);
458 	crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6");
459 
460 	fwip->last_dest.hi = 0;
461 	fwip->last_dest.lo = 0;
462 	firewire_busreset(fwip->fw_softc.fwip_ifp);
463 }
464 
465 static void
fwip_output_callback(struct fw_xfer * xfer)466 fwip_output_callback(struct fw_xfer *xfer)
467 {
468 	struct fwip_softc *fwip;
469 	if_t ifp;
470 	int s;
471 
472 	fwip = (struct fwip_softc *)xfer->sc;
473 	ifp = fwip->fw_softc.fwip_ifp;
474 	/* XXX error check */
475 	FWIPDEBUG(ifp, "resp = %d\n", xfer->resp);
476 	if (xfer->resp != 0)
477 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
478 	m_freem(xfer->mbuf);
479 	fw_xfer_unload(xfer);
480 
481 	s = splimp();
482 	FWIP_LOCK(fwip);
483 	STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
484 	FWIP_UNLOCK(fwip);
485 	splx(s);
486 
487 	/* for queue full */
488 	if (!if_sendq_empty(ifp)) {
489 		fwip_start(ifp);
490 	}
491 }
492 
493 static void
fwip_start(if_t ifp)494 fwip_start(if_t ifp)
495 {
496 	struct fwip_softc *fwip = ((struct fwip_eth_softc *)if_getsoftc(ifp))->fwip;
497 	int s;
498 
499 	FWIPDEBUG(ifp, "starting\n");
500 
501 	if (fwip->dma_ch < 0) {
502 		struct mbuf	*m = NULL;
503 
504 		FWIPDEBUG(ifp, "not ready\n");
505 
506 		s = splimp();
507 		do {
508 			m = if_dequeue(ifp);
509 			if (m != NULL)
510 				m_freem(m);
511 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
512 		} while (m != NULL);
513 		splx(s);
514 
515 		return;
516 	}
517 
518 	s = splimp();
519 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
520 
521 	if (!if_sendq_empty(ifp))
522 		fwip_async_output(fwip, ifp);
523 
524 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
525 	splx(s);
526 }
527 
528 /* Async. stream output */
529 static void
fwip_async_output(struct fwip_softc * fwip,if_t ifp)530 fwip_async_output(struct fwip_softc *fwip, if_t ifp)
531 {
532 	struct firewire_comm *fc = fwip->fd.fc;
533 	struct mbuf *m;
534 	struct m_tag *mtag;
535 	struct fw_hwaddr *destfw;
536 	struct fw_xfer *xfer;
537 	struct fw_xferq *xferq;
538 	struct fw_pkt *fp;
539 	uint16_t nodeid;
540 	int error;
541 	int i = 0;
542 
543 	xfer = NULL;
544 	xferq = fc->atq;
545 	while ((xferq->queued < xferq->maxq - 1) &&
546 			!if_sendq_empty(ifp)) {
547 		FWIP_LOCK(fwip);
548 		xfer = STAILQ_FIRST(&fwip->xferlist);
549 		if (xfer == NULL) {
550 			FWIP_UNLOCK(fwip);
551 #if 0
552 			printf("if_fwip: lack of xfer\n");
553 #endif
554 			break;
555 		}
556 		STAILQ_REMOVE_HEAD(&fwip->xferlist, link);
557 		FWIP_UNLOCK(fwip);
558 
559 		m = if_dequeue(ifp);
560 		if (m == NULL) {
561 			FWIP_LOCK(fwip);
562 			STAILQ_INSERT_HEAD(&fwip->xferlist, xfer, link);
563 			FWIP_UNLOCK(fwip);
564 			break;
565 		}
566 
567 		/*
568 		 * Dig out the link-level address which
569 		 * firewire_output got via arp or neighbour
570 		 * discovery. If we don't have a link-level address,
571 		 * just stick the thing on the broadcast channel.
572 		 */
573 		mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0);
574 		if (mtag == NULL)
575 			destfw = NULL;
576 		else
577 			destfw = (struct fw_hwaddr *) (mtag + 1);
578 
579 
580 		/*
581 		 * We don't do any bpf stuff here - the generic code
582 		 * in firewire_output gives the packet to bpf before
583 		 * it adds the link-level encapsulation.
584 		 */
585 
586 		/*
587 		 * Put the mbuf in the xfer early in case we hit an
588 		 * error case below - fwip_output_callback will free
589 		 * the mbuf.
590 		 */
591 		xfer->mbuf = m;
592 
593 		/*
594 		 * We use the arp result (if any) to add a suitable firewire
595 		 * packet header before handing off to the bus.
596 		 */
597 		fp = &xfer->send.hdr;
598 		nodeid = FWLOCALBUS | fc->nodeid;
599 		if ((m->m_flags & M_BCAST) || !destfw) {
600 			/*
601 			 * Broadcast packets are sent as GASP packets with
602 			 * specifier ID 0x00005e, version 1 on the broadcast
603 			 * channel. To be conservative, we send at the
604 			 * slowest possible speed.
605 			 */
606 			uint32_t *p;
607 
608 			M_PREPEND(m, 2*sizeof(uint32_t), M_NOWAIT);
609 			p = mtod(m, uint32_t *);
610 			fp->mode.stream.len = m->m_pkthdr.len;
611 			fp->mode.stream.chtag = broadcast_channel;
612 			fp->mode.stream.tcode = FWTCODE_STREAM;
613 			fp->mode.stream.sy = 0;
614 			xfer->send.spd = 0;
615 			p[0] = htonl(nodeid << 16);
616 			p[1] = htonl((0x5e << 24) | 1);
617 		} else {
618 			/*
619 			 * Unicast packets are sent as block writes to the
620 			 * target's unicast fifo address. If we can't
621 			 * find the node address, we just give up. We
622 			 * could broadcast it but that might overflow
623 			 * the packet size limitations due to the
624 			 * extra GASP header. Note: the hardware
625 			 * address is stored in network byte order to
626 			 * make life easier for ARP.
627 			 */
628 			struct fw_device *fd;
629 			struct fw_eui64 eui;
630 
631 			eui.hi = ntohl(destfw->sender_unique_ID_hi);
632 			eui.lo = ntohl(destfw->sender_unique_ID_lo);
633 			if (fwip->last_dest.hi != eui.hi ||
634 			    fwip->last_dest.lo != eui.lo) {
635 				fd = fw_noderesolve_eui64(fc, &eui);
636 				if (!fd) {
637 					/* error */
638 					if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
639 					/* XXX set error code */
640 					fwip_output_callback(xfer);
641 					continue;
642 
643 				}
644 				fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst;
645 				fwip->last_hdr.mode.wreqb.tlrt = 0;
646 				fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB;
647 				fwip->last_hdr.mode.wreqb.pri = 0;
648 				fwip->last_hdr.mode.wreqb.src = nodeid;
649 				fwip->last_hdr.mode.wreqb.dest_hi =
650 					ntohs(destfw->sender_unicast_FIFO_hi);
651 				fwip->last_hdr.mode.wreqb.dest_lo =
652 					ntohl(destfw->sender_unicast_FIFO_lo);
653 				fwip->last_hdr.mode.wreqb.extcode = 0;
654 				fwip->last_dest = eui;
655 			}
656 
657 			fp->mode.wreqb = fwip->last_hdr.mode.wreqb;
658 			fp->mode.wreqb.len = m->m_pkthdr.len;
659 			xfer->send.spd = min(destfw->sspd, fc->speed);
660 		}
661 
662 		xfer->send.pay_len = m->m_pkthdr.len;
663 
664 		error = fw_asyreq(fc, -1, xfer);
665 		if (error == EAGAIN) {
666 			/*
667 			 * We ran out of tlabels - requeue the packet
668 			 * for later transmission.
669 			 */
670 			xfer->mbuf = 0;
671 			FWIP_LOCK(fwip);
672 			STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
673 			FWIP_UNLOCK(fwip);
674 			if_sendq_prepend(ifp, m);
675 			break;
676 		}
677 		if (error) {
678 			/* error */
679 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
680 			/* XXX set error code */
681 			fwip_output_callback(xfer);
682 			continue;
683 		} else {
684 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
685 			i++;
686 		}
687 	}
688 #if 0
689 	if (i > 1)
690 		printf("%d queued\n", i);
691 #endif
692 	if (i > 0)
693 		xferq->start(fc);
694 }
695 
696 static void
fwip_start_send(void * arg,int count)697 fwip_start_send (void *arg, int count)
698 {
699 	struct fwip_softc *fwip = arg;
700 
701 	fwip->fd.fc->atq->start(fwip->fd.fc);
702 }
703 
704 /* Async. stream output */
705 static void
fwip_stream_input(struct fw_xferq * xferq)706 fwip_stream_input(struct fw_xferq *xferq)
707 {
708 	struct epoch_tracker et;
709 	struct mbuf *m, *m0;
710 	struct m_tag *mtag;
711 	if_t ifp;
712 	struct fwip_softc *fwip;
713 	struct fw_bulkxfer *sxfer;
714 	struct fw_pkt *fp;
715 	uint16_t src;
716 	uint32_t *p;
717 
718 	fwip = (struct fwip_softc *)xferq->sc;
719 	ifp = fwip->fw_softc.fwip_ifp;
720 
721 	NET_EPOCH_ENTER(et);
722 	while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
723 		STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
724 		fp = mtod(sxfer->mbuf, struct fw_pkt *);
725 		if (fwip->fd.fc->irx_post != NULL)
726 			fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld);
727 		m = sxfer->mbuf;
728 
729 		/* insert new rbuf */
730 		sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
731 		if (m0 != NULL) {
732 			m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
733 			STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
734 		} else
735 			printf("fwip_as_input: m_getcl failed\n");
736 
737 		/*
738 		 * We must have a GASP header - leave the
739 		 * encapsulation sanity checks to the generic
740 		 * code. Remember that we also have the firewire async
741 		 * stream header even though that isn't accounted for
742 		 * in mode.stream.len.
743 		 */
744 		if (sxfer->resp != 0 || fp->mode.stream.len <
745 		    2*sizeof(uint32_t)) {
746 			m_freem(m);
747 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
748 			continue;
749 		}
750 		m->m_len = m->m_pkthdr.len = fp->mode.stream.len
751 			+ sizeof(fp->mode.stream);
752 
753 		/*
754 		 * If we received the packet on the broadcast channel,
755 		 * mark it as broadcast, otherwise we assume it must
756 		 * be multicast.
757 		 */
758 		if (fp->mode.stream.chtag == broadcast_channel)
759 			m->m_flags |= M_BCAST;
760 		else
761 			m->m_flags |= M_MCAST;
762 
763 		/*
764 		 * Make sure we recognise the GASP specifier and
765 		 * version.
766 		 */
767 		p = mtod(m, uint32_t *);
768 		if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e
769 		    || (ntohl(p[2]) & 0xffffff) != 1) {
770 			FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n",
771 			    ntohl(p[1]), ntohl(p[2]));
772 			m_freem(m);
773 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
774 			continue;
775 		}
776 
777 		/*
778 		 * Record the sender ID for possible BPF usage.
779 		 */
780 		src = ntohl(p[1]) >> 16;
781 		if (bpf_peers_present_if(ifp)) {
782 			mtag = m_tag_alloc(MTAG_FIREWIRE,
783 			    MTAG_FIREWIRE_SENDER_EUID,
784 			    2*sizeof(uint32_t), M_NOWAIT);
785 			if (mtag) {
786 				/* bpf wants it in network byte order */
787 				struct fw_device *fd;
788 				uint32_t *p = (uint32_t *) (mtag + 1);
789 				fd = fw_noderesolve_nodeid(fwip->fd.fc,
790 				    src & 0x3f);
791 				if (fd) {
792 					p[0] = htonl(fd->eui.hi);
793 					p[1] = htonl(fd->eui.lo);
794 				} else {
795 					p[0] = 0;
796 					p[1] = 0;
797 				}
798 				m_tag_prepend(m, mtag);
799 			}
800 		}
801 
802 		/*
803 		 * Trim off the GASP header
804 		 */
805 		m_adj(m, 3*sizeof(uint32_t));
806 		m->m_pkthdr.rcvif = ifp;
807 		firewire_input(ifp, m, src);
808 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
809 	}
810 	NET_EPOCH_EXIT(et);
811 	if (STAILQ_FIRST(&xferq->stfree) != NULL)
812 		fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch);
813 }
814 
815 static __inline void
fwip_unicast_input_recycle(struct fwip_softc * fwip,struct fw_xfer * xfer)816 fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer)
817 {
818 	struct mbuf *m;
819 
820 	/*
821 	 * We have finished with a unicast xfer. Allocate a new
822 	 * cluster and stick it on the back of the input queue.
823 	 */
824 	m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
825 	xfer->mbuf = m;
826 	xfer->recv.payload = mtod(m, uint32_t *);
827 	xfer->recv.pay_len = MCLBYTES;
828 	xfer->mbuf = m;
829 	STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
830 }
831 
832 static void
fwip_unicast_input(struct fw_xfer * xfer)833 fwip_unicast_input(struct fw_xfer *xfer)
834 {
835 	uint64_t address;
836 	struct mbuf *m;
837 	struct m_tag *mtag;
838 	struct epoch_tracker et;
839 	if_t ifp;
840 	struct fwip_softc *fwip;
841 	struct fw_pkt *fp;
842 	//struct fw_pkt *sfp;
843 	int rtcode;
844 
845 	fwip = (struct fwip_softc *)xfer->sc;
846 	ifp = fwip->fw_softc.fwip_ifp;
847 	m = xfer->mbuf;
848 	xfer->mbuf = 0;
849 	fp = &xfer->recv.hdr;
850 
851 	/*
852 	 * Check the fifo address - we only accept addresses of
853 	 * exactly INET_FIFO.
854 	 */
855 	address = ((uint64_t)fp->mode.wreqb.dest_hi << 32)
856 		| fp->mode.wreqb.dest_lo;
857 	if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
858 		rtcode = FWRCODE_ER_TYPE;
859 	} else if (address != INET_FIFO) {
860 		rtcode = FWRCODE_ER_ADDR;
861 	} else {
862 		rtcode = FWRCODE_COMPLETE;
863 	}
864 	NET_EPOCH_ENTER(et);
865 
866 	/*
867 	 * Pick up a new mbuf and stick it on the back of the receive
868 	 * queue.
869 	 */
870 	fwip_unicast_input_recycle(fwip, xfer);
871 
872 	/*
873 	 * If we've already rejected the packet, give up now.
874 	 */
875 	if (rtcode != FWRCODE_COMPLETE) {
876 		m_freem(m);
877 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
878 		goto done;
879 	}
880 
881 	if (bpf_peers_present_if(ifp)) {
882 		/*
883 		 * Record the sender ID for possible BPF usage.
884 		 */
885 		mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID,
886 		    2*sizeof(uint32_t), M_NOWAIT);
887 		if (mtag) {
888 			/* bpf wants it in network byte order */
889 			struct fw_device *fd;
890 			uint32_t *p = (uint32_t *) (mtag + 1);
891 			fd = fw_noderesolve_nodeid(fwip->fd.fc,
892 			    fp->mode.wreqb.src & 0x3f);
893 			if (fd) {
894 				p[0] = htonl(fd->eui.hi);
895 				p[1] = htonl(fd->eui.lo);
896 			} else {
897 				p[0] = 0;
898 				p[1] = 0;
899 			}
900 			m_tag_prepend(m, mtag);
901 		}
902 	}
903 
904 	/*
905 	 * Hand off to the generic encapsulation code. We don't use
906 	 * ifp->if_input so that we can pass the source nodeid as an
907 	 * argument to facilitate link-level fragment reassembly.
908 	 */
909 	m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len;
910 	m->m_pkthdr.rcvif = ifp;
911 	firewire_input(ifp, m, fp->mode.wreqb.src);
912 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
913 done:
914 	NET_EPOCH_EXIT(et);
915 }
916 
917 static device_method_t fwip_methods[] = {
918 	/* device interface */
919 	DEVMETHOD(device_identify,	fwip_identify),
920 	DEVMETHOD(device_probe,		fwip_probe),
921 	DEVMETHOD(device_attach,	fwip_attach),
922 	DEVMETHOD(device_detach,	fwip_detach),
923 	{ 0, 0 }
924 };
925 
926 static driver_t fwip_driver = {
927         "fwip",
928 	fwip_methods,
929 	sizeof(struct fwip_softc),
930 };
931 
932 
933 DRIVER_MODULE(fwip, firewire, fwip_driver, 0, 0);
934 MODULE_VERSION(fwip, 1);
935 MODULE_DEPEND(fwip, firewire, 1, 1, 1);
936