xref: /netbsd/sys/dev/qbus/if_dmc.c (revision 753f02d2)
1 /*	$NetBSD: if_dmc.c,v 1.29 2022/04/04 19:33:45 andvar Exp $	*/
2 /*
3  * Copyright (c) 1982, 1986 Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)if_dmc.c	7.10 (Berkeley) 12/16/90
31  */
32 
33 /*
34  * DMC11 device driver, internet version
35  *
36  *	Bill Nesheim
37  *	Cornell University
38  *
39  *	Lou Salkind
40  *	New York University
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_dmc.c,v 1.29 2022/04/04 19:33:45 andvar Exp $");
45 
46 #undef DMCDEBUG	/* for base table dump on fatal error */
47 
48 #include "opt_inet.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/ioctl.h>
54 #include <sys/socket.h>
55 #include <sys/syslog.h>
56 #include <sys/device.h>
57 
58 #include <net/if.h>
59 
60 #ifdef	INET
61 #include <netinet/in.h>
62 #include <netinet/in_var.h>
63 #endif
64 
65 #include <sys/bus.h>
66 
67 #include <dev/qbus/ubareg.h>
68 #include <dev/qbus/ubavar.h>
69 #include <dev/qbus/if_uba.h>
70 
71 #include <dev/qbus/if_dmcreg.h>
72 
73 
74 /*
75  * output timeout value, sec.; should depend on line speed.
76  */
77 static int dmc_timeout = 20;
78 
79 #define NRCV 7
80 #define NXMT 3
81 #define NCMDS	(NRCV+NXMT+4)	/* size of command queue */
82 
83 #define DMC_WBYTE(csr, val) \
84 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, csr, val)
85 #define DMC_WWORD(csr, val) \
86 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
87 #define DMC_RBYTE(csr) \
88 	bus_space_read_1(sc->sc_iot, sc->sc_ioh, csr)
89 #define DMC_RWORD(csr) \
90 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
91 
92 
93 #ifdef DMCDEBUG
94 #define printd if(dmcdebug)printf
95 int dmcdebug = 0;
96 #endif
97 
98 /* error reporting intervals */
99 #define DMC_RPNBFS	50
100 #define DMC_RPDSC	1
101 #define DMC_RPTMO	10
102 #define DMC_RPDCK	10
103 
104 struct  dmc_command {
105 	char	qp_cmd;		/* command */
106 	short	qp_ubaddr;	/* buffer address */
107 	short	qp_cc;		/* character count || XMEM */
108 	struct	dmc_command *qp_next;	/* next command on queue */
109 };
110 
111 struct dmcbufs {
112 	int	ubinfo;		/* from uballoc */
113 	short	cc;		/* buffer size */
114 	short	flags;		/* access control */
115 };
116 #define	DBUF_OURS	0	/* buffer is available */
117 #define	DBUF_DMCS	1	/* buffer claimed by somebody */
118 #define	DBUF_XMIT	4	/* transmit buffer */
119 #define	DBUF_RCV	8	/* receive buffer */
120 
121 
122 /*
123  * DMC software status per interface.
124  *
125  * Each interface is referenced by a network interface structure,
126  * sc_if, which the routing code uses to locate the interface.
127  * This structure contains the output queue for the interface, its address, ...
128  * We also have, for each interface, a  set of 7 UBA interface structures
129  * for each, which
130  * contain information about the UNIBUS resources held by the interface:
131  * map registers, buffered data paths, etc.  Information is cached in this
132  * structure for use by the if_uba.c routines in running the interface
133  * efficiently.
134  */
135 struct dmc_softc {
136 	device_t sc_dev;		/* Configuration common part */
137 	struct	ifnet sc_if;		/* network-visible interface */
138 	short	sc_oused;		/* output buffers currently in use */
139 	short	sc_iused;		/* input buffers given to DMC */
140 	short	sc_flag;		/* flags */
141 	struct	ubinfo sc_ui;		/* UBA mapping info for base table */
142 	int	sc_errors[4];		/* non-fatal error counters */
143 	bus_space_tag_t sc_iot;
144 	bus_addr_t sc_ioh;
145 	bus_dma_tag_t sc_dmat;
146 	struct	evcnt sc_rintrcnt;	/* Interrupt counting */
147 	struct	evcnt sc_tintrcnt;	/* Interrupt counting */
148 #define sc_datck sc_errors[0]
149 #define sc_timeo sc_errors[1]
150 #define sc_nobuf sc_errors[2]
151 #define sc_disc  sc_errors[3]
152 	struct	dmcbufs sc_rbufs[NRCV];	/* receive buffer info */
153 	struct	dmcbufs sc_xbufs[NXMT];	/* transmit buffer info */
154 	struct	ifubinfo sc_ifuba;	/* UNIBUS resources */
155 	struct	ifrw sc_ifr[NRCV];	/* UNIBUS receive buffer maps */
156 	struct	ifxmt sc_ifw[NXMT];	/* UNIBUS receive buffer maps */
157 	/* command queue stuff */
158 	struct	dmc_command sc_cmdbuf[NCMDS];
159 	struct	dmc_command *sc_qhead;	/* head of command queue */
160 	struct	dmc_command *sc_qtail;	/* tail of command queue */
161 	struct	dmc_command *sc_qactive;	/* command in progress */
162 	struct	dmc_command *sc_qfreeh;	/* head of list of free cmd buffers */
163 	struct	dmc_command *sc_qfreet;	/* tail of list of free cmd buffers */
164 	/* end command queue stuff */
165 	struct dmc_base {
166 		short	d_base[128];		/* DMC base table */
167 	} dmc_base;
168 };
169 
170 static  int dmcmatch(device_t, cfdata_t, void *);
171 static  void dmcattach(device_t, device_t, void *);
172 static  int dmcinit(struct ifnet *);
173 static  void dmcrint(void *);
174 static  void dmcxint(void *);
175 static  void dmcdown(struct dmc_softc *sc);
176 static  void dmcrestart(struct dmc_softc *);
177 static  void dmcload(struct dmc_softc *, int, u_short, u_short);
178 static  void dmcstart(struct ifnet *);
179 static  void dmctimeout(struct ifnet *);
180 static  int dmcioctl(struct ifnet *, u_long, void *);
181 static  int dmcoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
182 	struct rtentry *);
183 static  void dmcreset(device_t);
184 
185 CFATTACH_DECL_NEW(dmc, sizeof(struct dmc_softc),
186     dmcmatch, dmcattach, NULL, NULL);
187 
188 /* flags */
189 #define DMC_RUNNING	0x01		/* device initialized */
190 #define DMC_BMAPPED	0x02		/* base table mapped */
191 #define DMC_RESTART	0x04		/* software restart in progress */
192 #define DMC_ONLINE	0x08		/* device running (had a RDYO) */
193 
194 
195 /* queue manipulation macros */
196 #define	QUEUE_AT_HEAD(qp, head, tail) \
197 	(qp)->qp_next = (head); \
198 	(head) = (qp); \
199 	if ((tail) == (struct dmc_command *) 0) \
200 		(tail) = (head)
201 
202 #define QUEUE_AT_TAIL(qp, head, tail) \
203 	if ((tail)) \
204 		(tail)->qp_next = (qp); \
205 	else \
206 		(head) = (qp); \
207 	(qp)->qp_next = (struct dmc_command *) 0; \
208 	(tail) = (qp)
209 
210 #define DEQUEUE(head, tail) \
211 	(head) = (head)->qp_next;\
212 	if ((head) == (struct dmc_command *) 0)\
213 		(tail) = (head)
214 
215 int
dmcmatch(device_t parent,cfdata_t cf,void * aux)216 dmcmatch(device_t parent, cfdata_t cf, void *aux)
217 {
218 	struct uba_attach_args *ua = aux;
219 	struct dmc_softc ssc;
220 	struct dmc_softc *sc = &ssc;
221 	int i;
222 
223 	sc->sc_iot = ua->ua_iot;
224 	sc->sc_ioh = ua->ua_ioh;
225 
226 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
227 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
228 		;
229 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
230 		printf("dmcprobe: can't start device\n" );
231 		return (0);
232 	}
233 	DMC_WBYTE(DMC_BSEL0, DMC_RQI|DMC_IEI);
234 	/* let's be paranoid */
235 	DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) | DMC_RQI|DMC_IEI);
236 	DELAY(1000000);
237 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
238 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
239 		;
240 	return (1);
241 }
242 
243 /*
244  * Interface exists: make available by filling in network interface
245  * record.  System will initialize the interface when it is ready
246  * to accept packets.
247  */
248 void
dmcattach(device_t parent,device_t self,void * aux)249 dmcattach(device_t parent, device_t self, void *aux)
250 {
251 	struct uba_attach_args *ua = aux;
252 	struct dmc_softc *sc = device_private(self);
253 
254 	sc->sc_dev = self;
255 	sc->sc_iot = ua->ua_iot;
256 	sc->sc_ioh = ua->ua_ioh;
257 	sc->sc_dmat = ua->ua_dmat;
258 
259 	strlcpy(sc->sc_if.if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
260 	sc->sc_if.if_mtu = DMCMTU;
261 	sc->sc_if.if_init = dmcinit;
262 	sc->sc_if.if_output = dmcoutput;
263 	sc->sc_if.if_ioctl = dmcioctl;
264 	sc->sc_if.if_watchdog = dmctimeout;
265 	sc->sc_if.if_flags = IFF_POINTOPOINT;
266 	sc->sc_if.if_softc = sc;
267 	IFQ_SET_READY(&sc->sc_if.if_snd);
268 
269 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, dmcrint, sc,
270 	    &sc->sc_rintrcnt);
271 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec+4, dmcxint, sc,
272 	    &sc->sc_tintrcnt);
273 	uba_reset_establish(dmcreset, sc->sc_dev);
274 	evcnt_attach_dynamic(&sc->sc_rintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
275 	    device_xname(sc->sc_dev), "intr");
276 	evcnt_attach_dynamic(&sc->sc_tintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
277 	    device_xname(sc->sc_dev), "intr");
278 
279 	if_attach(&sc->sc_if);
280 }
281 
282 /*
283  * Reset of interface after UNIBUS reset.
284  * If interface is on specified UBA, reset its state.
285  */
286 void
dmcreset(device_t dev)287 dmcreset(device_t dev)
288 {
289 	struct dmc_softc *sc = device_private(dev);
290 
291 	sc->sc_flag = 0;
292 	sc->sc_if.if_flags &= ~IFF_RUNNING;
293 	dmcinit(&sc->sc_if);
294 }
295 
296 /*
297  * Initialization of interface; reinitialize UNIBUS usage.
298  */
299 int
dmcinit(struct ifnet * ifp)300 dmcinit(struct ifnet *ifp)
301 {
302 	struct dmc_softc *sc = ifp->if_softc;
303 	struct ifrw *ifrw;
304 	struct ifxmt *ifxp;
305 	struct dmcbufs *rp;
306 	struct dmc_command *qp;
307 	struct ifaddr *ifa;
308 	cfdata_t ui = device_cfdata(sc->sc_dev);
309 	int base;
310 	int s;
311 
312 	/*
313 	 * Check to see that an address has been set
314 	 * (both local and destination for an address family).
315 	 */
316 	s = pserialize_read_enter();
317 	IFADDR_READER_FOREACH(ifa, ifp) {
318 		if (ifa->ifa_addr->sa_family && ifa->ifa_dstaddr->sa_family)
319 			break;
320 	}
321 	pserialize_read_exit(s);
322 	if (ifa == NULL)
323 		return 0;
324 
325 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
326 		printf("dmcinit: DMC not running\n");
327 		ifp->if_flags &= ~IFF_UP;
328 		return 0;
329 	}
330 	/* map base table */
331 	if ((sc->sc_flag & DMC_BMAPPED) == 0) {
332 		sc->sc_ui.ui_size = sizeof(struct dmc_base);
333 		sc->sc_ui.ui_vaddr = (void *)&sc->dmc_base;
334 		uballoc(device_private(device_parent(sc->sc_dev)), &sc->sc_ui, 0);
335 		sc->sc_flag |= DMC_BMAPPED;
336 	}
337 	/* initialize UNIBUS resources */
338 	sc->sc_iused = sc->sc_oused = 0;
339 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
340 		if (if_ubaminit(&sc->sc_ifuba,
341 		    device_private(device_parent(sc->sc_dev)),
342 		    sizeof(struct dmc_header) + DMCMTU,
343 		    sc->sc_ifr, NRCV, sc->sc_ifw, NXMT) == 0) {
344 			aprint_error_dev(sc->sc_dev, "can't allocate uba resources\n");
345 			ifp->if_flags &= ~IFF_UP;
346 			return 0;
347 		}
348 		ifp->if_flags |= IFF_RUNNING;
349 	}
350 	sc->sc_flag &= ~DMC_ONLINE;
351 	sc->sc_flag |= DMC_RUNNING;
352 	/*
353 	 * Limit packets enqueued until we see if we're on the air.
354 	 */
355 	ifp->if_snd.ifq_maxlen = 3;
356 
357 	/* initialize buffer pool */
358 	/* receives */
359 	ifrw = &sc->sc_ifr[0];
360 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
361 		rp->ubinfo = ifrw->ifrw_info;
362 		rp->cc = DMCMTU + sizeof (struct dmc_header);
363 		rp->flags = DBUF_OURS|DBUF_RCV;
364 		ifrw++;
365 	}
366 	/* transmits */
367 	ifxp = &sc->sc_ifw[0];
368 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
369 		rp->ubinfo = ifxp->ifw_info;
370 		rp->cc = 0;
371 		rp->flags = DBUF_OURS|DBUF_XMIT;
372 		ifxp++;
373 	}
374 
375 	/* set up command queues */
376 	sc->sc_qfreeh = sc->sc_qfreet
377 		 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
378 		(struct dmc_command *)0;
379 	/* set up free command buffer list */
380 	for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
381 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
382 	}
383 
384 	/* base in */
385 	base = sc->sc_ui.ui_baddr;
386 	dmcload(sc, DMC_BASEI, (u_short)base, (base>>2) & DMC_XMEM);
387 	/* specify half duplex operation, flags tell if primary */
388 	/* or secondary station */
389 	if (ui->cf_flags == 0)
390 		/* use DDCMP mode in full duplex */
391 		dmcload(sc, DMC_CNTLI, 0, 0);
392 	else if (ui->cf_flags == 1)
393 		/* use MAINTENANCE mode */
394 		dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
395 	else if (ui->cf_flags == 2)
396 		/* use DDCMP half duplex as primary station */
397 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
398 	else if (ui->cf_flags == 3)
399 		/* use DDCMP half duplex as secondary station */
400 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
401 
402 	/* enable operation done interrupts */
403 	while ((DMC_RBYTE(DMC_BSEL2) & DMC_IEO) == 0)
404 		DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) | DMC_IEO);
405 	s = splnet();
406 	/* queue first NRCV buffers for DMC to fill */
407 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
408 		rp->flags |= DBUF_DMCS;
409 		dmcload(sc, DMC_READ, rp->ubinfo,
410 			(((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
411 		sc->sc_iused++;
412 	}
413 	splx(s);
414 	return 0;
415 }
416 
417 /*
418  * Start output on interface.  Get another datagram
419  * to send from the interface queue and map it to
420  * the interface before starting output.
421  *
422  * Must be called at spl 5
423  */
424 void
dmcstart(struct ifnet * ifp)425 dmcstart(struct ifnet *ifp)
426 {
427 	struct dmc_softc *sc = ifp->if_softc;
428 	struct mbuf *m;
429 	struct dmcbufs *rp;
430 	int n;
431 
432 	/*
433 	 * Dequeue up to NXMT requests and map them to the UNIBUS.
434 	 * If no more requests, or no dmc buffers available, just return.
435 	 */
436 	n = 0;
437 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
438 		/* find an available buffer */
439 		if ((rp->flags & DBUF_DMCS) == 0) {
440 			IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
441 			if (m == 0)
442 				return;
443 			/* mark it dmcs */
444 			rp->flags |= (DBUF_DMCS);
445 			/*
446 			 * Have request mapped to UNIBUS for transmission
447 			 * and start the output.
448 			 */
449 			rp->cc = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[n], m);
450 			rp->cc &= DMC_CCOUNT;
451 			if (++sc->sc_oused == 1)
452 				sc->sc_if.if_timer = dmc_timeout;
453 			dmcload(sc, DMC_WRITE, rp->ubinfo,
454 				rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
455 		}
456 		n++;
457 	}
458 }
459 
460 /*
461  * Utility routine to load the DMC device registers.
462  */
463 void
dmcload(struct dmc_softc * sc,int type,u_short w0,u_short w1)464 dmcload(struct dmc_softc *sc, int type, u_short w0, u_short w1)
465 {
466 	struct dmc_command *qp;
467 	int sps;
468 
469 	sps = splnet();
470 
471 	/* grab a command buffer from the free list */
472 	if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
473 		panic("dmc command queue overflow");
474 	DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
475 
476 	/* fill in requested info */
477 	qp->qp_cmd = (type | DMC_RQI);
478 	qp->qp_ubaddr = w0;
479 	qp->qp_cc = w1;
480 
481 	if (sc->sc_qactive) {	/* command in progress */
482 		if (type == DMC_READ) {
483 			QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
484 		} else {
485 			QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
486 		}
487 	} else {	/* command port free */
488 		sc->sc_qactive = qp;
489 		DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
490 		dmcrint(sc);
491 	}
492 	splx(sps);
493 }
494 
495 /*
496  * DMC interface receiver interrupt.
497  * Ready to accept another command,
498  * pull one off the command queue.
499  */
500 void
dmcrint(void * arg)501 dmcrint(void *arg)
502 {
503 	struct dmc_softc *sc = arg;
504 	struct dmc_command *qp;
505 	int n;
506 
507 	if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
508 		printf("%s: dmcrint no command\n", device_xname(sc->sc_dev));
509 		return;
510 	}
511 	while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
512 		DMC_WWORD(DMC_SEL4, qp->qp_ubaddr);
513 		DMC_WWORD(DMC_SEL6, qp->qp_cc);
514 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & ~(DMC_IEI|DMC_RQI));
515 		/* free command buffer */
516 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
517 		while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
518 			/*
519 			 * Can't check for RDYO here 'cause
520 			 * this routine isn't reentrant!
521 			 */
522 			DELAY(5);
523 		}
524 		/* move on to next command */
525 		if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
526 			break;		/* all done */
527 		/* more commands to do, start the next one */
528 		qp = sc->sc_qactive;
529 		DEQUEUE(sc->sc_qhead, sc->sc_qtail);
530 		DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
531 		n = RDYSCAN;
532 		while (n-- > 0)
533 			if ((DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) ||
534 			    (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO))
535 				break;
536 	}
537 	if (sc->sc_qactive) {
538 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
539 		/* VMS does it twice !*$%@# */
540 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
541 	}
542 
543 }
544 
545 /*
546  * DMC interface transmitter interrupt.
547  * A transfer may have completed, check for errors.
548  * If it was a read, notify appropriate protocol.
549  * If it was a write, pull the next one off the queue.
550  */
551 void
dmcxint(void * a)552 dmcxint(void *a)
553 {
554 	struct dmc_softc *sc = a;
555 
556 	struct ifnet *ifp;
557 	struct mbuf *m;
558 	int arg, pkaddr, cmd, len, s;
559 	struct ifrw *ifrw;
560 	struct dmcbufs *rp;
561 	struct ifxmt *ifxp;
562 	struct dmc_header *dh;
563 	char buf[64];
564 
565 	ifp = &sc->sc_if;
566 
567 	while (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO) {
568 
569 		cmd = DMC_RBYTE(DMC_BSEL2) & 0xff;
570 		arg = DMC_RWORD(DMC_SEL6) & 0xffff;
571 		/* reconstruct UNIBUS address of buffer returned to us */
572 		pkaddr = ((arg&DMC_XMEM)<<2) | (DMC_RWORD(DMC_SEL4) & 0xffff);
573 		/* release port */
574 		DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) & ~DMC_RDYO);
575 		switch (cmd & 07) {
576 
577 		case DMC_OUR:
578 			/*
579 			 * A read has completed.
580 			 * Pass packet to type specific
581 			 * higher-level input routine.
582 			 */
583 			if_statinc(ifp, if_ipackets);
584 			/* find location in dmcuba struct */
585 			ifrw= &sc->sc_ifr[0];
586 			for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
587 				if(rp->ubinfo == pkaddr)
588 					break;
589 				ifrw++;
590 			}
591 			if (rp >= &sc->sc_rbufs[NRCV])
592 				panic("dmc rcv");
593 			if ((rp->flags & DBUF_DMCS) == 0)
594 				aprint_error_dev(sc->sc_dev, "done unalloc rbuf\n");
595 
596 			len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
597 			if (len < 0 || len > DMCMTU) {
598 				if_statinc(ifp, if_ierrors);
599 #ifdef DMCDEBUG
600 				printd("%s: bad rcv pkt addr 0x%x len 0x%x\n",
601 				    device_xname(sc->sc_dev), pkaddr, len);
602 #endif
603 				goto setup;
604 			}
605 			/*
606 			 * Deal with trailer protocol: if type is trailer
607 			 * get true type from first 16-bit word past data.
608 			 * Remember that type was trailer by setting off.
609 			 */
610 			dh = (struct dmc_header *)ifrw->ifrw_addr;
611 			dh->dmc_type = ntohs((u_short)dh->dmc_type);
612 			if (len == 0)
613 				goto setup;
614 
615 			/*
616 			 * Pull packet off interface.  Off is nonzero if
617 			 * packet has trailing header; dmc_get will then
618 			 * force this header information to be at the front,
619 			 * but we still have to drop the type and length
620 			 * which are at the front of any trailer data.
621 			 */
622 			m = if_ubaget(&sc->sc_ifuba, ifrw, ifp, len);
623 			if (m == 0)
624 				goto setup;
625 			/* Shave off dmc_header */
626 			m_adj(m, sizeof(struct dmc_header));
627 			switch (dh->dmc_type) {
628 #ifdef INET
629 			case DMC_IPTYPE:
630 				break;
631 #endif
632 			default:
633 				m_freem(m);
634 				goto setup;
635 			}
636 
637 			s = splnet();
638 			if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
639 				m_freem(m);
640 			}
641 			splx(s);
642 
643 	setup:
644 			/* is this needed? */
645 			rp->ubinfo = ifrw->ifrw_info;
646 
647 			dmcload(sc, DMC_READ, rp->ubinfo,
648 			    ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
649 			break;
650 
651 		case DMC_OUX:
652 			/*
653 			 * A write has completed, start another
654 			 * transfer if there is more data to send.
655 			 */
656 			if_statinc(ifp, if_opackets);
657 			/* find associated dmcbuf structure */
658 			ifxp = &sc->sc_ifw[0];
659 			for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
660 				if(rp->ubinfo == pkaddr)
661 					break;
662 				ifxp++;
663 			}
664 			if (rp >= &sc->sc_xbufs[NXMT]) {
665 				aprint_error_dev(sc->sc_dev, "bad packet address 0x%x\n",
666 				    pkaddr);
667 				break;
668 			}
669 			if ((rp->flags & DBUF_DMCS) == 0)
670 				aprint_error_dev(sc->sc_dev, "unallocated packet 0x%x\n",
671 				    pkaddr);
672 			/* mark buffer free */
673 			if_ubaend(&sc->sc_ifuba, ifxp);
674 			rp->flags &= ~DBUF_DMCS;
675 			if (--sc->sc_oused == 0)
676 				sc->sc_if.if_timer = 0;
677 			else
678 				sc->sc_if.if_timer = dmc_timeout;
679 			if ((sc->sc_flag & DMC_ONLINE) == 0) {
680 				extern int ifqmaxlen;
681 
682 				/*
683 				 * We're on the air.
684 				 * Open the queue to the usual value.
685 				 */
686 				sc->sc_flag |= DMC_ONLINE;
687 				ifp->if_snd.ifq_maxlen = ifqmaxlen;
688 			}
689 			break;
690 
691 		case DMC_CNTLO:
692 			arg &= DMC_CNTMASK;
693 			if (arg & DMC_FATAL) {
694 				if (arg != DMC_START) {
695 					snprintb(buf, sizeof(buf), CNTLO_BITS,
696 					    arg);
697 					log(LOG_ERR,
698 					    "%s: fatal error, flags=%s\n",
699 					    device_xname(sc->sc_dev), buf);
700 				}
701 				dmcrestart(sc);
702 				break;
703 			}
704 			/* ACCUMULATE STATISTICS */
705 			switch(arg) {
706 			case DMC_NOBUFS:
707 				if_statinc(ifp, if_ierrors);
708 				if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
709 					goto report;
710 				break;
711 			case DMC_DISCONN:
712 				if ((sc->sc_disc++ % DMC_RPDSC) == 0)
713 					goto report;
714 				break;
715 			case DMC_TIMEOUT:
716 				if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
717 					goto report;
718 				break;
719 			case DMC_DATACK:
720 				if_statinc(ifp, if_oerrors);
721 				if ((sc->sc_datck++ % DMC_RPDCK) == 0)
722 					goto report;
723 				break;
724 			default:
725 				goto report;
726 			}
727 			break;
728 		report:
729 #ifdef DMCDEBUG
730 			snprintb(buf, sizeof(buf), CNTLO_BITS, arg);
731 			printd("%s: soft error, flags=%s\n",
732 			    device_xname(sc->sc_dev), buf);
733 #endif
734 			if ((sc->sc_flag & DMC_RESTART) == 0) {
735 				/*
736 				 * kill off the dmc to get things
737 				 * going again by generating a
738 				 * procedure error
739 				 */
740 				sc->sc_flag |= DMC_RESTART;
741 				arg = sc->sc_ui.ui_baddr;
742 				dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
743 			}
744 			break;
745 
746 		default:
747 			printf("%s: bad control %o\n",
748 			    device_xname(sc->sc_dev), cmd);
749 			break;
750 		}
751 	}
752 	dmcstart(ifp);
753 }
754 
755 /*
756  * DMC output routine.
757  * Encapsulate a packet of type family for the dmc.
758  * Use trailer local net encapsulation if enough data in first
759  * packet leaves a multiple of 512 bytes of data in remainder.
760  */
761 int
dmcoutput(struct ifnet * ifp,struct mbuf * m0,struct sockaddr * dst,struct rtentry * rt)762 dmcoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
763     struct rtentry *rt)
764 {
765 	int type, error, s;
766 	struct mbuf *m = m0;
767 	struct dmc_header *dh;
768 
769 	if ((ifp->if_flags & IFF_UP) == 0) {
770 		error = ENETDOWN;
771 		goto bad;
772 	}
773 
774 	IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
775 
776 	switch (dst->sa_family) {
777 #ifdef	INET
778 	case AF_INET:
779 		type = DMC_IPTYPE;
780 		break;
781 #endif
782 
783 	case AF_UNSPEC:
784 		dh = (struct dmc_header *)dst->sa_data;
785 		type = dh->dmc_type;
786 		break;
787 
788 	default:
789 		printf("%s: can't handle af%d\n", ifp->if_xname,
790 			dst->sa_family);
791 		error = EAFNOSUPPORT;
792 		goto bad;
793 	}
794 
795 	/*
796 	 * Add local network header
797 	 * (there is space for a uba on a vax to step on)
798 	 */
799 	M_PREPEND(m, sizeof(struct dmc_header), M_DONTWAIT);
800 	if (m == 0) {
801 		error = ENOBUFS;
802 		goto bad;
803 	}
804 	dh = mtod(m, struct dmc_header *);
805 	dh->dmc_type = htons((u_short)type);
806 
807 	/*
808 	 * Queue message on interface, and start output if interface
809 	 * not yet active.
810 	 */
811 	s = splnet();
812 	IFQ_ENQUEUE(&ifp->if_snd, m, error);
813 	if (error) {
814 		/* mbuf is already freed */
815 		splx(s);
816 		return (error);
817 	}
818 	dmcstart(ifp);
819 	splx(s);
820 	return (0);
821 
822 bad:
823 	m_freem(m0);
824 	return (error);
825 }
826 
827 
828 /*
829  * Process an ioctl request.
830  */
831 /* ARGSUSED */
832 int
dmcioctl(struct ifnet * ifp,u_long cmd,void * data)833 dmcioctl(struct ifnet *ifp, u_long cmd, void *data)
834 {
835 	int s = splnet(), error = 0;
836 	register struct dmc_softc *sc = ifp->if_softc;
837 
838 	switch (cmd) {
839 
840 	case SIOCINITIFADDR:
841 		ifp->if_flags |= IFF_UP;
842 		if ((ifp->if_flags & IFF_RUNNING) == 0)
843 			dmcinit(ifp);
844 		break;
845 
846 	case SIOCSIFDSTADDR:
847 		if ((ifp->if_flags & IFF_RUNNING) == 0)
848 			dmcinit(ifp);
849 		break;
850 
851 	case SIOCSIFFLAGS:
852 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
853 			break;
854 		if ((ifp->if_flags & IFF_UP) == 0 &&
855 		    sc->sc_flag & DMC_RUNNING)
856 			dmcdown(sc);
857 		else if (ifp->if_flags & IFF_UP &&
858 		    (sc->sc_flag & DMC_RUNNING) == 0)
859 			dmcrestart(sc);
860 		break;
861 
862 	default:
863 		error = ifioctl_common(ifp, cmd, data);
864 	}
865 	splx(s);
866 	return (error);
867 }
868 
869 /*
870  * Restart after a fatal error.
871  * Clear device and reinitialize.
872  */
873 void
dmcrestart(struct dmc_softc * sc)874 dmcrestart(struct dmc_softc *sc)
875 {
876 	int s, i;
877 
878 #ifdef DMCDEBUG
879 	/* dump base table */
880 	printf("%s base table:\n", device_xname(sc->sc_dev));
881 	for (i = 0; i < sizeof (struct dmc_base); i++)
882 		printf("%o\n" ,dmc_base[unit].d_base[i]);
883 #endif
884 
885 	dmcdown(sc);
886 
887 	/*
888 	 * Let the DMR finish the MCLR.	 At 1 Mbit, it should do so
889 	 * in about a max of 6.4 milliseconds with diagnostics enabled.
890 	 */
891 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
892 		;
893 	/* Did the timer expire or did the DMR finish? */
894 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
895 		log(LOG_ERR, "%s: M820 Test Failed\n", device_xname(sc->sc_dev));
896 		return;
897 	}
898 
899 	/* restart DMC */
900 	dmcinit(&sc->sc_if);
901 	sc->sc_flag &= ~DMC_RESTART;
902 	s = splnet();
903 	dmcstart(&sc->sc_if);
904 	splx(s);
905 	sc->sc_if.if_collisions++;	/* why not? */
906 }
907 
908 /*
909  * Reset a device and mark down.
910  * Flush output queue and drop queue limit.
911  */
912 void
dmcdown(struct dmc_softc * sc)913 dmcdown(struct dmc_softc *sc)
914 {
915 	struct ifxmt *ifxp;
916 
917 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
918 	sc->sc_flag &= ~(DMC_RUNNING | DMC_ONLINE);
919 
920 	for (ifxp = sc->sc_ifw; ifxp < &sc->sc_ifw[NXMT]; ifxp++) {
921 #ifdef notyet
922 		if (ifxp->ifw_xtofree) {
923 			(void) m_freem(ifxp->ifw_xtofree);
924 			ifxp->ifw_xtofree = 0;
925 		}
926 #endif
927 	}
928 	IF_PURGE(&sc->sc_if.if_snd);
929 }
930 
931 /*
932  * Watchdog timeout to see that transmitted packets don't
933  * lose interrupts.  The device has to be online (the first
934  * transmission may block until the other side comes up).
935  */
936 void
dmctimeout(struct ifnet * ifp)937 dmctimeout(struct ifnet *ifp)
938 {
939 	struct dmc_softc *sc = ifp->if_softc;
940 	char buf1[64], buf2[64];
941 
942 	if (sc->sc_flag & DMC_ONLINE) {
943 		snprintb(buf1, sizeof(buf1), DMC0BITS,
944 		    DMC_RBYTE(DMC_BSEL0) & 0xff);
945 		snprintb(buf2, sizeof(buf2), DMC2BITS,
946 		    DMC_RBYTE(DMC_BSEL2) & 0xff);
947 		log(LOG_ERR, "%s: output timeout, bsel0=%s bsel2=%s\n",
948 		    device_xname(sc->sc_dev), buf1, buf2);
949 		dmcrestart(sc);
950 	}
951 }
952