xref: /netbsd/sys/dev/qbus/if_dmc.c (revision c4a72b64)
1 /*	$NetBSD: if_dmc.c,v 1.6 2002/10/02 16:52:27 thorpej Exp $	*/
2 /*
3  * Copyright (c) 1982, 1986 Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by the University of
17  *	California, Berkeley and its contributors.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)if_dmc.c	7.10 (Berkeley) 12/16/90
35  */
36 
37 /*
38  * DMC11 device driver, internet version
39  *
40  *	Bill Nesheim
41  *	Cornell University
42  *
43  *	Lou Salkind
44  *	New York University
45  */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: if_dmc.c,v 1.6 2002/10/02 16:52:27 thorpej Exp $");
49 
50 #undef DMCDEBUG	/* for base table dump on fatal error */
51 
52 #include "opt_inet.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/mbuf.h>
57 #include <sys/ioctl.h>
58 #include <sys/socket.h>
59 #include <sys/syslog.h>
60 #include <sys/device.h>
61 
62 #include <net/if.h>
63 #include <net/netisr.h>
64 
65 #ifdef	INET
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #endif
69 
70 #include <machine/bus.h>
71 
72 #include <dev/qbus/ubareg.h>
73 #include <dev/qbus/ubavar.h>
74 #include <dev/qbus/if_uba.h>
75 
76 #include <dev/qbus/if_dmcreg.h>
77 
78 
79 /*
80  * output timeout value, sec.; should depend on line speed.
81  */
82 static int dmc_timeout = 20;
83 
84 #define NRCV 7
85 #define NXMT 3
86 #define NCMDS	(NRCV+NXMT+4)	/* size of command queue */
87 
88 #define DMC_WBYTE(csr, val) \
89 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, csr, val)
90 #define DMC_WWORD(csr, val) \
91 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
92 #define DMC_RBYTE(csr) \
93 	bus_space_read_1(sc->sc_iot, sc->sc_ioh, csr)
94 #define DMC_RWORD(csr) \
95 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
96 
97 
98 #ifdef DMCDEBUG
99 #define printd if(dmcdebug)printf
100 int dmcdebug = 0;
101 #endif
102 
103 /* error reporting intervals */
104 #define DMC_RPNBFS	50
105 #define DMC_RPDSC	1
106 #define DMC_RPTMO	10
107 #define DMC_RPDCK	10
108 
109 struct  dmc_command {
110 	char	qp_cmd;		/* command */
111 	short	qp_ubaddr;	/* buffer address */
112 	short	qp_cc;		/* character count || XMEM */
113 	struct	dmc_command *qp_next;	/* next command on queue */
114 };
115 
116 struct dmcbufs {
117 	int	ubinfo;		/* from uballoc */
118 	short	cc;		/* buffer size */
119 	short	flags;		/* access control */
120 };
121 #define	DBUF_OURS	0	/* buffer is available */
122 #define	DBUF_DMCS	1	/* buffer claimed by somebody */
123 #define	DBUF_XMIT	4	/* transmit buffer */
124 #define	DBUF_RCV	8	/* receive buffer */
125 
126 
127 /*
128  * DMC software status per interface.
129  *
130  * Each interface is referenced by a network interface structure,
131  * sc_if, which the routing code uses to locate the interface.
132  * This structure contains the output queue for the interface, its address, ...
133  * We also have, for each interface, a  set of 7 UBA interface structures
134  * for each, which
135  * contain information about the UNIBUS resources held by the interface:
136  * map registers, buffered data paths, etc.  Information is cached in this
137  * structure for use by the if_uba.c routines in running the interface
138  * efficiently.
139  */
140 struct dmc_softc {
141 	struct	device sc_dev;		/* Configuration common part */
142 	struct	ifnet sc_if;		/* network-visible interface */
143 	short	sc_oused;		/* output buffers currently in use */
144 	short	sc_iused;		/* input buffers given to DMC */
145 	short	sc_flag;		/* flags */
146 	struct	ubinfo sc_ui;		/* UBA mapping info for base table */
147 	int	sc_errors[4];		/* non-fatal error counters */
148 	bus_space_tag_t sc_iot;
149 	bus_addr_t sc_ioh;
150 	bus_dma_tag_t sc_dmat;
151 	struct	evcnt sc_rintrcnt;	/* Interrupt counting */
152 	struct	evcnt sc_tintrcnt;	/* Interrupt counting */
153 #define sc_datck sc_errors[0]
154 #define sc_timeo sc_errors[1]
155 #define sc_nobuf sc_errors[2]
156 #define sc_disc  sc_errors[3]
157 	struct	dmcbufs sc_rbufs[NRCV];	/* receive buffer info */
158 	struct	dmcbufs sc_xbufs[NXMT];	/* transmit buffer info */
159 	struct	ifubinfo sc_ifuba;	/* UNIBUS resources */
160 	struct	ifrw sc_ifr[NRCV];	/* UNIBUS receive buffer maps */
161 	struct	ifxmt sc_ifw[NXMT];	/* UNIBUS receive buffer maps */
162 	/* command queue stuff */
163 	struct	dmc_command sc_cmdbuf[NCMDS];
164 	struct	dmc_command *sc_qhead;	/* head of command queue */
165 	struct	dmc_command *sc_qtail;	/* tail of command queue */
166 	struct	dmc_command *sc_qactive;	/* command in progress */
167 	struct	dmc_command *sc_qfreeh;	/* head of list of free cmd buffers */
168 	struct	dmc_command *sc_qfreet;	/* tail of list of free cmd buffers */
169 	/* end command queue stuff */
170 	struct dmc_base {
171 		short	d_base[128];		/* DMC base table */
172 	} dmc_base;
173 };
174 
175 static  int dmcmatch(struct device *, struct cfdata *, void *);
176 static  void dmcattach(struct device *, struct device *, void *);
177 static  int dmcinit(struct ifnet *);
178 static  void dmcrint(void *);
179 static  void dmcxint(void *);
180 static  void dmcdown(struct dmc_softc *sc);
181 static  void dmcrestart(struct dmc_softc *);
182 static  void dmcload(struct dmc_softc *, int, u_short, u_short);
183 static  void dmcstart(struct ifnet *);
184 static  void dmctimeout(struct ifnet *);
185 static  int dmcioctl(struct ifnet *, u_long, caddr_t);
186 static  int dmcoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
187 	struct rtentry *);
188 static  void dmcreset(struct device *);
189 
190 CFATTACH_DECL(dmc, sizeof(struct dmc_softc),
191     dmcmatch, dmcattach, NULL, NULL);
192 
193 /* flags */
194 #define DMC_RUNNING	0x01		/* device initialized */
195 #define DMC_BMAPPED	0x02		/* base table mapped */
196 #define DMC_RESTART	0x04		/* software restart in progress */
197 #define DMC_ONLINE	0x08		/* device running (had a RDYO) */
198 
199 
200 /* queue manipulation macros */
201 #define	QUEUE_AT_HEAD(qp, head, tail) \
202 	(qp)->qp_next = (head); \
203 	(head) = (qp); \
204 	if ((tail) == (struct dmc_command *) 0) \
205 		(tail) = (head)
206 
207 #define QUEUE_AT_TAIL(qp, head, tail) \
208 	if ((tail)) \
209 		(tail)->qp_next = (qp); \
210 	else \
211 		(head) = (qp); \
212 	(qp)->qp_next = (struct dmc_command *) 0; \
213 	(tail) = (qp)
214 
215 #define DEQUEUE(head, tail) \
216 	(head) = (head)->qp_next;\
217 	if ((head) == (struct dmc_command *) 0)\
218 		(tail) = (head)
219 
220 int
221 dmcmatch(struct device *parent, struct cfdata *cf, void *aux)
222 {
223 	struct uba_attach_args *ua = aux;
224 	struct dmc_softc ssc;
225 	struct dmc_softc *sc = &ssc;
226 	int i;
227 
228 	sc->sc_iot = ua->ua_iot;
229 	sc->sc_ioh = ua->ua_ioh;
230 
231 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
232 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
233 		;
234 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
235 		printf("dmcprobe: can't start device\n" );
236 		return (0);
237 	}
238 	DMC_WBYTE(DMC_BSEL0, DMC_RQI|DMC_IEI);
239 	/* let's be paranoid */
240 	DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) | DMC_RQI|DMC_IEI);
241 	DELAY(1000000);
242 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
243 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
244 		;
245 	return (1);
246 }
247 
248 /*
249  * Interface exists: make available by filling in network interface
250  * record.  System will initialize the interface when it is ready
251  * to accept packets.
252  */
253 void
254 dmcattach(struct device *parent, struct device *self, void *aux)
255 {
256 	struct uba_attach_args *ua = aux;
257 	struct dmc_softc *sc = (struct dmc_softc *)self;
258 
259 	sc->sc_iot = ua->ua_iot;
260 	sc->sc_ioh = ua->ua_ioh;
261 	sc->sc_dmat = ua->ua_dmat;
262 
263 	strcpy(sc->sc_if.if_xname, sc->sc_dev.dv_xname);
264 	sc->sc_if.if_mtu = DMCMTU;
265 	sc->sc_if.if_init = dmcinit;
266 	sc->sc_if.if_output = dmcoutput;
267 	sc->sc_if.if_ioctl = dmcioctl;
268 	sc->sc_if.if_watchdog = dmctimeout;
269 	sc->sc_if.if_flags = IFF_POINTOPOINT;
270 	sc->sc_if.if_softc = sc;
271 	IFQ_SET_READY(&sc->sc_if.if_snd);
272 
273 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, dmcrint, sc,
274 	    &sc->sc_rintrcnt);
275 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec+4, dmcxint, sc,
276 	    &sc->sc_tintrcnt);
277 	uba_reset_establish(dmcreset, &sc->sc_dev);
278 	evcnt_attach_dynamic(&sc->sc_rintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
279 	    sc->sc_dev.dv_xname, "intr");
280 	evcnt_attach_dynamic(&sc->sc_tintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
281 	    sc->sc_dev.dv_xname, "intr");
282 
283 	if_attach(&sc->sc_if);
284 }
285 
286 /*
287  * Reset of interface after UNIBUS reset.
288  * If interface is on specified UBA, reset its state.
289  */
290 void
291 dmcreset(struct device *dev)
292 {
293 	struct dmc_softc *sc = (struct dmc_softc *)dev;
294 
295 	sc->sc_flag = 0;
296 	sc->sc_if.if_flags &= ~IFF_RUNNING;
297 	dmcinit(&sc->sc_if);
298 }
299 
300 /*
301  * Initialization of interface; reinitialize UNIBUS usage.
302  */
303 int
304 dmcinit(struct ifnet *ifp)
305 {
306 	struct dmc_softc *sc = ifp->if_softc;
307 	struct ifrw *ifrw;
308 	struct ifxmt *ifxp;
309 	struct dmcbufs *rp;
310 	struct dmc_command *qp;
311 	struct ifaddr *ifa;
312 	struct cfdata *ui = sc->sc_dev.dv_cfdata;
313 	int base;
314 	int s;
315 
316 	/*
317 	 * Check to see that an address has been set
318 	 * (both local and destination for an address family).
319 	 */
320 	TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)
321 		if (ifa->ifa_addr->sa_family && ifa->ifa_dstaddr->sa_family)
322 			break;
323 	if (ifa == (struct ifaddr *) 0)
324 		return 0;
325 
326 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
327 		printf("dmcinit: DMC not running\n");
328 		ifp->if_flags &= ~IFF_UP;
329 		return 0;
330 	}
331 	/* map base table */
332 	if ((sc->sc_flag & DMC_BMAPPED) == 0) {
333 		sc->sc_ui.ui_size = sizeof(struct dmc_base);
334 		sc->sc_ui.ui_vaddr = (caddr_t)&sc->dmc_base;
335 		uballoc((void *)sc->sc_dev.dv_parent, &sc->sc_ui, 0);
336 		sc->sc_flag |= DMC_BMAPPED;
337 	}
338 	/* initialize UNIBUS resources */
339 	sc->sc_iused = sc->sc_oused = 0;
340 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
341 		if (if_ubaminit(&sc->sc_ifuba, (void *)sc->sc_dev.dv_parent,
342 		    sizeof(struct dmc_header) + DMCMTU,
343 		    sc->sc_ifr, NRCV, sc->sc_ifw, NXMT) == 0) {
344 			printf("%s: can't allocate uba resources\n",
345 			    sc->sc_dev.dv_xname);
346 			ifp->if_flags &= ~IFF_UP;
347 			return 0;
348 		}
349 		ifp->if_flags |= IFF_RUNNING;
350 	}
351 	sc->sc_flag &= ~DMC_ONLINE;
352 	sc->sc_flag |= DMC_RUNNING;
353 	/*
354 	 * Limit packets enqueued until we see if we're on the air.
355 	 */
356 	ifp->if_snd.ifq_maxlen = 3;
357 
358 	/* initialize buffer pool */
359 	/* receives */
360 	ifrw = &sc->sc_ifr[0];
361 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
362 		rp->ubinfo = ifrw->ifrw_info;
363 		rp->cc = DMCMTU + sizeof (struct dmc_header);
364 		rp->flags = DBUF_OURS|DBUF_RCV;
365 		ifrw++;
366 	}
367 	/* transmits */
368 	ifxp = &sc->sc_ifw[0];
369 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
370 		rp->ubinfo = ifxp->ifw_info;
371 		rp->cc = 0;
372 		rp->flags = DBUF_OURS|DBUF_XMIT;
373 		ifxp++;
374 	}
375 
376 	/* set up command queues */
377 	sc->sc_qfreeh = sc->sc_qfreet
378 		 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
379 		(struct dmc_command *)0;
380 	/* set up free command buffer list */
381 	for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
382 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
383 	}
384 
385 	/* base in */
386 	base = sc->sc_ui.ui_baddr;
387 	dmcload(sc, DMC_BASEI, (u_short)base, (base>>2) & DMC_XMEM);
388 	/* specify half duplex operation, flags tell if primary */
389 	/* or secondary station */
390 	if (ui->cf_flags == 0)
391 		/* use DDCMP mode in full duplex */
392 		dmcload(sc, DMC_CNTLI, 0, 0);
393 	else if (ui->cf_flags == 1)
394 		/* use MAINTENENCE mode */
395 		dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
396 	else if (ui->cf_flags == 2)
397 		/* use DDCMP half duplex as primary station */
398 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
399 	else if (ui->cf_flags == 3)
400 		/* use DDCMP half duplex as secondary station */
401 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
402 
403 	/* enable operation done interrupts */
404 	while ((DMC_RBYTE(DMC_BSEL2) & DMC_IEO) == 0)
405 		DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) | DMC_IEO);
406 	s = splnet();
407 	/* queue first NRCV buffers for DMC to fill */
408 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
409 		rp->flags |= DBUF_DMCS;
410 		dmcload(sc, DMC_READ, rp->ubinfo,
411 			(((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
412 		sc->sc_iused++;
413 	}
414 	splx(s);
415 	return 0;
416 }
417 
418 /*
419  * Start output on interface.  Get another datagram
420  * to send from the interface queue and map it to
421  * the interface before starting output.
422  *
423  * Must be called at spl 5
424  */
425 void
426 dmcstart(struct ifnet *ifp)
427 {
428 	struct dmc_softc *sc = ifp->if_softc;
429 	struct mbuf *m;
430 	struct dmcbufs *rp;
431 	int n;
432 
433 	/*
434 	 * Dequeue up to NXMT requests and map them to the UNIBUS.
435 	 * If no more requests, or no dmc buffers available, just return.
436 	 */
437 	n = 0;
438 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
439 		/* find an available buffer */
440 		if ((rp->flags & DBUF_DMCS) == 0) {
441 			IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
442 			if (m == 0)
443 				return;
444 			/* mark it dmcs */
445 			rp->flags |= (DBUF_DMCS);
446 			/*
447 			 * Have request mapped to UNIBUS for transmission
448 			 * and start the output.
449 			 */
450 			rp->cc = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[n], m);
451 			rp->cc &= DMC_CCOUNT;
452 			if (++sc->sc_oused == 1)
453 				sc->sc_if.if_timer = dmc_timeout;
454 			dmcload(sc, DMC_WRITE, rp->ubinfo,
455 				rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
456 		}
457 		n++;
458 	}
459 }
460 
461 /*
462  * Utility routine to load the DMC device registers.
463  */
464 void
465 dmcload(struct dmc_softc *sc, int type, u_short w0, u_short w1)
466 {
467 	struct dmc_command *qp;
468 	int sps;
469 
470 	sps = splnet();
471 
472 	/* grab a command buffer from the free list */
473 	if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
474 		panic("dmc command queue overflow");
475 	DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
476 
477 	/* fill in requested info */
478 	qp->qp_cmd = (type | DMC_RQI);
479 	qp->qp_ubaddr = w0;
480 	qp->qp_cc = w1;
481 
482 	if (sc->sc_qactive) {	/* command in progress */
483 		if (type == DMC_READ) {
484 			QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
485 		} else {
486 			QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
487 		}
488 	} else {	/* command port free */
489 		sc->sc_qactive = qp;
490 		DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
491 		dmcrint(sc);
492 	}
493 	splx(sps);
494 }
495 
496 /*
497  * DMC interface receiver interrupt.
498  * Ready to accept another command,
499  * pull one off the command queue.
500  */
501 void
502 dmcrint(void *arg)
503 {
504 	struct dmc_softc *sc = arg;
505 	struct dmc_command *qp;
506 	int n;
507 
508 	if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
509 		printf("%s: dmcrint no command\n", sc->sc_dev.dv_xname);
510 		return;
511 	}
512 	while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
513 		DMC_WWORD(DMC_SEL4, qp->qp_ubaddr);
514 		DMC_WWORD(DMC_SEL6, qp->qp_cc);
515 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & ~(DMC_IEI|DMC_RQI));
516 		/* free command buffer */
517 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
518 		while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
519 			/*
520 			 * Can't check for RDYO here 'cause
521 			 * this routine isn't reentrant!
522 			 */
523 			DELAY(5);
524 		}
525 		/* move on to next command */
526 		if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
527 			break;		/* all done */
528 		/* more commands to do, start the next one */
529 		qp = sc->sc_qactive;
530 		DEQUEUE(sc->sc_qhead, sc->sc_qtail);
531 		DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
532 		n = RDYSCAN;
533 		while (n-- > 0)
534 			if ((DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) ||
535 			    (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO))
536 				break;
537 	}
538 	if (sc->sc_qactive) {
539 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
540 		/* VMS does it twice !*$%@# */
541 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
542 	}
543 
544 }
545 
546 /*
547  * DMC interface transmitter interrupt.
548  * A transfer may have completed, check for errors.
549  * If it was a read, notify appropriate protocol.
550  * If it was a write, pull the next one off the queue.
551  */
552 void
553 dmcxint(void *a)
554 {
555         struct dmc_softc *sc = a;
556 
557 	struct ifnet *ifp;
558 	struct mbuf *m;
559 	struct ifqueue *inq;
560 	int arg, pkaddr, cmd, len, s;
561 	struct ifrw *ifrw;
562 	struct dmcbufs *rp;
563 	struct ifxmt *ifxp;
564 	struct dmc_header *dh;
565 	char buf[64];
566 
567 	ifp = &sc->sc_if;
568 
569 	while (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO) {
570 
571 		cmd = DMC_RBYTE(DMC_BSEL2) & 0xff;
572 		arg = DMC_RWORD(DMC_SEL6) & 0xffff;
573 		/* reconstruct UNIBUS address of buffer returned to us */
574 		pkaddr = ((arg&DMC_XMEM)<<2) | (DMC_RWORD(DMC_SEL4) & 0xffff);
575 		/* release port */
576 		DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) & ~DMC_RDYO);
577 		switch (cmd & 07) {
578 
579 		case DMC_OUR:
580 			/*
581 			 * A read has completed.
582 			 * Pass packet to type specific
583 			 * higher-level input routine.
584 			 */
585 			ifp->if_ipackets++;
586 			/* find location in dmcuba struct */
587 			ifrw= &sc->sc_ifr[0];
588 			for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
589 				if(rp->ubinfo == pkaddr)
590 					break;
591 				ifrw++;
592 			}
593 			if (rp >= &sc->sc_rbufs[NRCV])
594 				panic("dmc rcv");
595 			if ((rp->flags & DBUF_DMCS) == 0)
596 				printf("%s: done unalloc rbuf\n",
597 				    sc->sc_dev.dv_xname);
598 
599 			len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
600 			if (len < 0 || len > DMCMTU) {
601 				ifp->if_ierrors++;
602 #ifdef DMCDEBUG
603 				printd("%s: bad rcv pkt addr 0x%x len 0x%x\n",
604 				    sc->sc_dev.dv_xname, pkaddr, len);
605 #endif
606 				goto setup;
607 			}
608 			/*
609 			 * Deal with trailer protocol: if type is trailer
610 			 * get true type from first 16-bit word past data.
611 			 * Remember that type was trailer by setting off.
612 			 */
613 			dh = (struct dmc_header *)ifrw->ifrw_addr;
614 			dh->dmc_type = ntohs((u_short)dh->dmc_type);
615 			if (len == 0)
616 				goto setup;
617 
618 			/*
619 			 * Pull packet off interface.  Off is nonzero if
620 			 * packet has trailing header; dmc_get will then
621 			 * force this header information to be at the front,
622 			 * but we still have to drop the type and length
623 			 * which are at the front of any trailer data.
624 			 */
625 			m = if_ubaget(&sc->sc_ifuba, ifrw, ifp, len);
626 			if (m == 0)
627 				goto setup;
628 			/* Shave off dmc_header */
629 			m_adj(m, sizeof(struct dmc_header));
630 			switch (dh->dmc_type) {
631 
632 #ifdef INET
633 			case DMC_IPTYPE:
634 				schednetisr(NETISR_IP);
635 				inq = &ipintrq;
636 				break;
637 #endif
638 			default:
639 				m_freem(m);
640 				goto setup;
641 			}
642 
643 			s = splnet();
644 			if (IF_QFULL(inq)) {
645 				IF_DROP(inq);
646 				m_freem(m);
647 			} else
648 				IF_ENQUEUE(inq, m);
649 			splx(s);
650 
651 	setup:
652 			/* is this needed? */
653 			rp->ubinfo = ifrw->ifrw_info;
654 
655 			dmcload(sc, DMC_READ, rp->ubinfo,
656 			    ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
657 			break;
658 
659 		case DMC_OUX:
660 			/*
661 			 * A write has completed, start another
662 			 * transfer if there is more data to send.
663 			 */
664 			ifp->if_opackets++;
665 			/* find associated dmcbuf structure */
666 			ifxp = &sc->sc_ifw[0];
667 			for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
668 				if(rp->ubinfo == pkaddr)
669 					break;
670 				ifxp++;
671 			}
672 			if (rp >= &sc->sc_xbufs[NXMT]) {
673 				printf("%s: bad packet address 0x%x\n",
674 				    sc->sc_dev.dv_xname, pkaddr);
675 				break;
676 			}
677 			if ((rp->flags & DBUF_DMCS) == 0)
678 				printf("%s: unallocated packet 0x%x\n",
679 				    sc->sc_dev.dv_xname, pkaddr);
680 			/* mark buffer free */
681 			if_ubaend(&sc->sc_ifuba, ifxp);
682 			rp->flags &= ~DBUF_DMCS;
683 			if (--sc->sc_oused == 0)
684 				sc->sc_if.if_timer = 0;
685 			else
686 				sc->sc_if.if_timer = dmc_timeout;
687 			if ((sc->sc_flag & DMC_ONLINE) == 0) {
688 				extern int ifqmaxlen;
689 
690 				/*
691 				 * We're on the air.
692 				 * Open the queue to the usual value.
693 				 */
694 				sc->sc_flag |= DMC_ONLINE;
695 				ifp->if_snd.ifq_maxlen = ifqmaxlen;
696 			}
697 			break;
698 
699 		case DMC_CNTLO:
700 			arg &= DMC_CNTMASK;
701 			if (arg & DMC_FATAL) {
702 				if (arg != DMC_START) {
703 					bitmask_snprintf(arg, CNTLO_BITS,
704 					    buf, sizeof(buf));
705 					log(LOG_ERR,
706 					    "%s: fatal error, flags=%s\n",
707 					    sc->sc_dev.dv_xname, buf);
708 				}
709 				dmcrestart(sc);
710 				break;
711 			}
712 			/* ACCUMULATE STATISTICS */
713 			switch(arg) {
714 			case DMC_NOBUFS:
715 				ifp->if_ierrors++;
716 				if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
717 					goto report;
718 				break;
719 			case DMC_DISCONN:
720 				if ((sc->sc_disc++ % DMC_RPDSC) == 0)
721 					goto report;
722 				break;
723 			case DMC_TIMEOUT:
724 				if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
725 					goto report;
726 				break;
727 			case DMC_DATACK:
728 				ifp->if_oerrors++;
729 				if ((sc->sc_datck++ % DMC_RPDCK) == 0)
730 					goto report;
731 				break;
732 			default:
733 				goto report;
734 			}
735 			break;
736 		report:
737 #ifdef DMCDEBUG
738 			bitmask_snprintf(arg, CNTLO_BITS, buf, sizeof(buf));
739 			printd("%s: soft error, flags=%s\n",
740 			    sc->sc_dev.dv_xname, buf);
741 #endif
742 			if ((sc->sc_flag & DMC_RESTART) == 0) {
743 				/*
744 				 * kill off the dmc to get things
745 				 * going again by generating a
746 				 * procedure error
747 				 */
748 				sc->sc_flag |= DMC_RESTART;
749 				arg = sc->sc_ui.ui_baddr;
750 				dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
751 			}
752 			break;
753 
754 		default:
755 			printf("%s: bad control %o\n",
756 			    sc->sc_dev.dv_xname, cmd);
757 			break;
758 		}
759 	}
760 	dmcstart(ifp);
761 }
762 
763 /*
764  * DMC output routine.
765  * Encapsulate a packet of type family for the dmc.
766  * Use trailer local net encapsulation if enough data in first
767  * packet leaves a multiple of 512 bytes of data in remainder.
768  */
769 int
770 dmcoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
771     struct rtentry *rt)
772 {
773 	int type, error, s;
774 	struct mbuf *m = m0;
775 	struct dmc_header *dh;
776 	ALTQ_DECL(struct altq_pktattr pktattr;)
777 
778 	if ((ifp->if_flags & IFF_UP) == 0) {
779 		error = ENETDOWN;
780 		goto bad;
781 	}
782 
783 	IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
784 
785 	switch (dst->sa_family) {
786 #ifdef	INET
787 	case AF_INET:
788 		type = DMC_IPTYPE;
789 		break;
790 #endif
791 
792 	case AF_UNSPEC:
793 		dh = (struct dmc_header *)dst->sa_data;
794 		type = dh->dmc_type;
795 		break;
796 
797 	default:
798 		printf("%s: can't handle af%d\n", ifp->if_xname,
799 			dst->sa_family);
800 		error = EAFNOSUPPORT;
801 		goto bad;
802 	}
803 
804 	/*
805 	 * Add local network header
806 	 * (there is space for a uba on a vax to step on)
807 	 */
808 	M_PREPEND(m, sizeof(struct dmc_header), M_DONTWAIT);
809 	if (m == 0) {
810 		error = ENOBUFS;
811 		goto bad;
812 	}
813 	dh = mtod(m, struct dmc_header *);
814 	dh->dmc_type = htons((u_short)type);
815 
816 	/*
817 	 * Queue message on interface, and start output if interface
818 	 * not yet active.
819 	 */
820 	s = splnet();
821 	IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
822 	if (error) {
823 		/* mbuf is already freed */
824 		splx(s);
825 		return (error);
826 	}
827 	dmcstart(ifp);
828 	splx(s);
829 	return (0);
830 
831 bad:
832 	m_freem(m0);
833 	return (error);
834 }
835 
836 
837 /*
838  * Process an ioctl request.
839  */
840 /* ARGSUSED */
841 int
842 dmcioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
843 {
844 	int s = splnet(), error = 0;
845 	register struct dmc_softc *sc = ifp->if_softc;
846 
847 	switch (cmd) {
848 
849 	case SIOCSIFADDR:
850 		ifp->if_flags |= IFF_UP;
851 		if ((ifp->if_flags & IFF_RUNNING) == 0)
852 			dmcinit(ifp);
853 		break;
854 
855 	case SIOCSIFDSTADDR:
856 		if ((ifp->if_flags & IFF_RUNNING) == 0)
857 			dmcinit(ifp);
858 		break;
859 
860 	case SIOCSIFFLAGS:
861 		if ((ifp->if_flags & IFF_UP) == 0 &&
862 		    sc->sc_flag & DMC_RUNNING)
863 			dmcdown(sc);
864 		else if (ifp->if_flags & IFF_UP &&
865 		    (sc->sc_flag & DMC_RUNNING) == 0)
866 			dmcrestart(sc);
867 		break;
868 
869 	default:
870 		error = EINVAL;
871 	}
872 	splx(s);
873 	return (error);
874 }
875 
876 /*
877  * Restart after a fatal error.
878  * Clear device and reinitialize.
879  */
880 void
881 dmcrestart(struct dmc_softc *sc)
882 {
883 	int s, i;
884 
885 #ifdef DMCDEBUG
886 	/* dump base table */
887 	printf("%s base table:\n", sc->sc_dev.dv_xname);
888 	for (i = 0; i < sizeof (struct dmc_base); i++)
889 		printf("%o\n" ,dmc_base[unit].d_base[i]);
890 #endif
891 
892 	dmcdown(sc);
893 
894 	/*
895 	 * Let the DMR finish the MCLR.	 At 1 Mbit, it should do so
896 	 * in about a max of 6.4 milliseconds with diagnostics enabled.
897 	 */
898 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
899 		;
900 	/* Did the timer expire or did the DMR finish? */
901 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
902 		log(LOG_ERR, "%s: M820 Test Failed\n", sc->sc_dev.dv_xname);
903 		return;
904 	}
905 
906 	/* restart DMC */
907 	dmcinit(&sc->sc_if);
908 	sc->sc_flag &= ~DMC_RESTART;
909 	s = splnet();
910 	dmcstart(&sc->sc_if);
911 	splx(s);
912 	sc->sc_if.if_collisions++;	/* why not? */
913 }
914 
915 /*
916  * Reset a device and mark down.
917  * Flush output queue and drop queue limit.
918  */
919 void
920 dmcdown(struct dmc_softc *sc)
921 {
922 	struct ifxmt *ifxp;
923 
924 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
925 	sc->sc_flag &= ~(DMC_RUNNING | DMC_ONLINE);
926 
927 	for (ifxp = sc->sc_ifw; ifxp < &sc->sc_ifw[NXMT]; ifxp++) {
928 #ifdef notyet
929 		if (ifxp->ifw_xtofree) {
930 			(void) m_freem(ifxp->ifw_xtofree);
931 			ifxp->ifw_xtofree = 0;
932 		}
933 #endif
934 	}
935 	IF_PURGE(&sc->sc_if.if_snd);
936 }
937 
938 /*
939  * Watchdog timeout to see that transmitted packets don't
940  * lose interrupts.  The device has to be online (the first
941  * transmission may block until the other side comes up).
942  */
943 void
944 dmctimeout(struct ifnet *ifp)
945 {
946 	struct dmc_softc *sc = ifp->if_softc;
947 	char buf1[64], buf2[64];
948 
949 	if (sc->sc_flag & DMC_ONLINE) {
950 		bitmask_snprintf(DMC_RBYTE(DMC_BSEL0) & 0xff, DMC0BITS,
951 		    buf1, sizeof(buf1));
952 		bitmask_snprintf(DMC_RBYTE(DMC_BSEL2) & 0xff, DMC2BITS,
953 		    buf2, sizeof(buf2));
954 		log(LOG_ERR, "%s: output timeout, bsel0=%s bsel2=%s\n",
955 		    sc->sc_dev.dv_xname, buf1, buf2);
956 		dmcrestart(sc);
957 	}
958 }
959