xref: /netbsd/sys/dev/ic/hd64570.c (revision bf9ec67e)
1 /*	$NetBSD: hd64570.c,v 1.21 2002/03/05 04:12:57 itojun Exp $	*/
2 
3 /*
4  * Copyright (c) 1999 Christian E. Hopps
5  * Copyright (c) 1998 Vixie Enterprises
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Vixie Enterprises nor the names
18  *    of its contributors may be used to endorse or promote products derived
19  *    from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22  * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  * DISCLAIMED.  IN NO EVENT SHALL VIXIE ENTERPRISES OR
26  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * This software has been written for Vixie Enterprises by Michael Graff
36  * <explorer@flame.org>.  To learn more about Vixie Enterprises, see
37  * ``http://www.vix.com''.
38  */
39 
40 /*
41  * TODO:
42  *
43  *	o  teach the receive logic about errors, and about long frames that
44  *         span more than one input buffer.  (Right now, receive/transmit is
45  *	   limited to one descriptor's buffer space, which is MTU + 4 bytes.
46  *	   This is currently 1504, which is large enough to hold the HDLC
47  *	   header and the packet itself.  Packets which are too long are
48  *	   silently dropped on transmit and silently dropped on receive.
49  *	o  write code to handle the msci interrupts, needed only for CD
50  *	   and CTS changes.
51  *	o  consider switching back to a "queue tx with DMA active" model which
52  *	   should help sustain outgoing traffic
53  *	o  through clever use of bus_dma*() functions, it should be possible
54  *	   to map the mbuf's data area directly into a descriptor transmit
55  *	   buffer, removing the need to allocate extra memory.  If, however,
56  *	   we run out of descriptors for this, we will need to then allocate
57  *	   one large mbuf, copy the fragmented chain into it, and put it onto
58  *	   a single descriptor.
59  *	o  use bus_dmamap_sync() with the right offset and lengths, rather
60  *	   than cheating and always sync'ing the whole region.
61  *
62  *	o  perhaps allow rx and tx to be in more than one page
63  *	   if not using dma.  currently the assumption is that
64  *	   rx uses a page and tx uses a page.
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.21 2002/03/05 04:12:57 itojun Exp $");
69 
70 #include "bpfilter.h"
71 #include "opt_inet.h"
72 #include "opt_iso.h"
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/device.h>
77 #include <sys/mbuf.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
80 #include <sys/kernel.h>
81 
82 #include <net/if.h>
83 #include <net/if_types.h>
84 #include <net/netisr.h>
85 
86 #if defined(INET) || defined(INET6)
87 #include <netinet/in.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip.h>
91 #ifdef INET6
92 #include <netinet6/in6_var.h>
93 #endif
94 #endif
95 
96 #ifdef ISO
97 #include <net/if_llc.h>
98 #include <netiso/iso.h>
99 #include <netiso/iso_var.h>
100 #endif
101 
102 #if NBPFILTER > 0
103 #include <net/bpf.h>
104 #endif
105 
106 #include <machine/cpu.h>
107 #include <machine/bus.h>
108 #include <machine/intr.h>
109 
110 #include <dev/pci/pcivar.h>
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcidevs.h>
113 
114 #include <dev/ic/hd64570reg.h>
115 #include <dev/ic/hd64570var.h>
116 
117 #define SCA_DEBUG_RX		0x0001
118 #define SCA_DEBUG_TX		0x0002
119 #define SCA_DEBUG_CISCO		0x0004
120 #define SCA_DEBUG_DMA		0x0008
121 #define SCA_DEBUG_RXPKT		0x0010
122 #define SCA_DEBUG_TXPKT		0x0020
123 #define SCA_DEBUG_INTR		0x0040
124 #define SCA_DEBUG_CLOCK		0x0080
125 
126 #if 0
127 #define SCA_DEBUG_LEVEL	( 0xFFFF )
128 #else
129 #define SCA_DEBUG_LEVEL 0
130 #endif
131 
132 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
133 
134 #if SCA_DEBUG_LEVEL > 0
135 #define SCA_DPRINTF(l, x) do { \
136 	if ((l) & sca_debug) \
137 		printf x;\
138 	} while (0)
139 #else
140 #define SCA_DPRINTF(l, x)
141 #endif
142 
143 #if 0
144 #define SCA_USE_FASTQ		/* use a split queue, one for fast traffic */
145 #endif
146 
147 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
148 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
149 
150 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
151 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
152 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
153 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
154 
155 static	void sca_msci_init(struct sca_softc *, sca_port_t *);
156 static	void sca_dmac_init(struct sca_softc *, sca_port_t *);
157 static void sca_dmac_rxinit(sca_port_t *);
158 
159 static	int sca_dmac_intr(sca_port_t *, u_int8_t);
160 static	int sca_msci_intr(sca_port_t *, u_int8_t);
161 
162 static	void sca_get_packets(sca_port_t *);
163 static	int sca_frame_avail(sca_port_t *);
164 static	void sca_frame_process(sca_port_t *);
165 static	void sca_frame_read_done(sca_port_t *);
166 
167 static	void sca_port_starttx(sca_port_t *);
168 
169 static	void sca_port_up(sca_port_t *);
170 static	void sca_port_down(sca_port_t *);
171 
172 static	int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
173 			    struct rtentry *));
174 static	int sca_ioctl __P((struct ifnet *, u_long, caddr_t));
175 static	void sca_start __P((struct ifnet *));
176 static	void sca_watchdog __P((struct ifnet *));
177 
178 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, caddr_t, u_int);
179 
180 #if SCA_DEBUG_LEVEL > 0
181 static	void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
182 #endif
183 
184 
185 #define	sca_read_1(sc, reg)		(sc)->sc_read_1(sc, reg)
186 #define	sca_read_2(sc, reg)		(sc)->sc_read_2(sc, reg)
187 #define	sca_write_1(sc, reg, val)	(sc)->sc_write_1(sc, reg, val)
188 #define	sca_write_2(sc, reg, val)	(sc)->sc_write_2(sc, reg, val)
189 
190 #define	sca_page_addr(sc, addr)	((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
191 
192 static inline void
193 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
194 {
195 	sca_write_1(scp->sca, scp->msci_off + reg, val);
196 }
197 
198 static inline u_int8_t
199 msci_read_1(sca_port_t *scp, u_int reg)
200 {
201 	return sca_read_1(scp->sca, scp->msci_off + reg);
202 }
203 
204 static inline void
205 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
206 {
207 	sca_write_1(scp->sca, scp->dmac_off + reg, val);
208 }
209 
210 static inline void
211 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
212 {
213 	sca_write_2(scp->sca, scp->dmac_off + reg, val);
214 }
215 
216 static inline u_int8_t
217 dmac_read_1(sca_port_t *scp, u_int reg)
218 {
219 	return sca_read_1(scp->sca, scp->dmac_off + reg);
220 }
221 
222 static inline u_int16_t
223 dmac_read_2(sca_port_t *scp, u_int reg)
224 {
225 	return sca_read_2(scp->sca, scp->dmac_off + reg);
226 }
227 
228 /*
229  * read the chain pointer
230  */
231 static inline u_int16_t
232 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
233 {
234 	if (sc->sc_usedma)
235 		return ((dp)->sd_chainp);
236 	return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
237 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
238 }
239 
240 /*
241  * write the chain pointer
242  */
243 static inline void
244 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
245 {
246 	if (sc->sc_usedma)
247 		(dp)->sd_chainp = cp;
248 	else
249 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
250 		    sca_page_addr(sc, dp)
251 		    + offsetof(struct sca_desc, sd_chainp), cp);
252 }
253 
254 /*
255  * read the buffer pointer
256  */
257 static inline u_int32_t
258 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
259 {
260 	u_int32_t address;
261 
262 	if (sc->sc_usedma)
263 		address = dp->sd_bufp | dp->sd_hbufp << 16;
264 	else {
265 		address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
266 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
267 		address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
268 		    sca_page_addr(sc, dp)
269 		    + offsetof(struct sca_desc, sd_hbufp)) << 16;
270 	}
271 	return (address);
272 }
273 
274 /*
275  * write the buffer pointer
276  */
277 static inline void
278 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
279 {
280 	if (sc->sc_usedma) {
281 		dp->sd_bufp = bufp & 0xFFFF;
282 		dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
283 	} else {
284 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
285 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
286 		    bufp & 0xFFFF);
287 		bus_space_write_1(sc->scu_memt, sc->scu_memh,
288 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
289 		    (bufp & 0x00FF0000) >> 16);
290 	}
291 }
292 
293 /*
294  * read the buffer length
295  */
296 static inline u_int16_t
297 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
298 {
299 	if (sc->sc_usedma)
300 		return ((dp)->sd_buflen);
301 	return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
302 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
303 }
304 
305 /*
306  * write the buffer length
307  */
308 static inline void
309 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
310 {
311 	if (sc->sc_usedma)
312 		(dp)->sd_buflen = len;
313 	else
314 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
315 		    sca_page_addr(sc, dp)
316 		    + offsetof(struct sca_desc, sd_buflen), len);
317 }
318 
319 /*
320  * read the descriptor status
321  */
322 static inline u_int8_t
323 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
324 {
325 	if (sc->sc_usedma)
326 		return ((dp)->sd_stat);
327 	return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
328 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
329 }
330 
331 /*
332  * write the descriptor status
333  */
334 static inline void
335 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
336 {
337 	if (sc->sc_usedma)
338 		(dp)->sd_stat = stat;
339 	else
340 		bus_space_write_1(sc->scu_memt, sc->scu_memh,
341 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
342 		    stat);
343 }
344 
345 void
346 sca_init(struct sca_softc *sc)
347 {
348 	/*
349 	 * Do a little sanity check:  check number of ports.
350 	 */
351 	if (sc->sc_numports < 1 || sc->sc_numports > 2)
352 		panic("sca can\'t handle more than 2 or less than 1 ports");
353 
354 	/*
355 	 * disable DMA and MSCI interrupts
356 	 */
357 	sca_write_1(sc, SCA_DMER, 0);
358 	sca_write_1(sc, SCA_IER0, 0);
359 	sca_write_1(sc, SCA_IER1, 0);
360 	sca_write_1(sc, SCA_IER2, 0);
361 
362 	/*
363 	 * configure interrupt system
364 	 */
365 	sca_write_1(sc, SCA_ITCR,
366 	    SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
367 #if 0
368 	/* these are for the intrerrupt ack cycle which we don't use */
369 	sca_write_1(sc, SCA_IVR, 0x40);
370 	sca_write_1(sc, SCA_IMVR, 0x40);
371 #endif
372 
373 	/*
374 	 * set wait control register to zero wait states
375 	 */
376 	sca_write_1(sc, SCA_PABR0, 0);
377 	sca_write_1(sc, SCA_PABR1, 0);
378 	sca_write_1(sc, SCA_WCRL, 0);
379 	sca_write_1(sc, SCA_WCRM, 0);
380 	sca_write_1(sc, SCA_WCRH, 0);
381 
382 	/*
383 	 * disable DMA and reset status
384 	 */
385 	sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
386 
387 	/*
388 	 * disable transmit DMA for all channels
389 	 */
390 	sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
391 	sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
392 	sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
393 	sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
394 	sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
395 	sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
396 	sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
397 	sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
398 
399 	/*
400 	 * enable DMA based on channel enable flags for each channel
401 	 */
402 	sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
403 
404 	/*
405 	 * Should check to see if the chip is responding, but for now
406 	 * assume it is.
407 	 */
408 }
409 
410 /*
411  * initialize the port and attach it to the networking layer
412  */
413 void
414 sca_port_attach(struct sca_softc *sc, u_int port)
415 {
416 	sca_port_t *scp = &sc->sc_ports[port];
417 	struct ifnet *ifp;
418 	static u_int ntwo_unit = 0;
419 
420 	scp->sca = sc;  /* point back to the parent */
421 
422 	scp->sp_port = port;
423 
424 	if (port == 0) {
425 		scp->msci_off = SCA_MSCI_OFF_0;
426 		scp->dmac_off = SCA_DMAC_OFF_0;
427 		if(sc->sc_parent != NULL)
428 			ntwo_unit=sc->sc_parent->dv_unit * 2 + 0;
429 		else
430 			ntwo_unit = 0;	/* XXX */
431 	} else {
432 		scp->msci_off = SCA_MSCI_OFF_1;
433 		scp->dmac_off = SCA_DMAC_OFF_1;
434 		if(sc->sc_parent != NULL)
435 			ntwo_unit=sc->sc_parent->dv_unit * 2 + 1;
436 		else
437 			ntwo_unit = 1;	/* XXX */
438 	}
439 
440 	sca_msci_init(sc, scp);
441 	sca_dmac_init(sc, scp);
442 
443 	/*
444 	 * attach to the network layer
445 	 */
446 	ifp = &scp->sp_if;
447 	sprintf(ifp->if_xname, "ntwo%d", ntwo_unit);
448 	ifp->if_softc = scp;
449 	ifp->if_mtu = SCA_MTU;
450 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
451 	ifp->if_type = IFT_PTPSERIAL;
452 	ifp->if_hdrlen = HDLC_HDRLEN;
453 	ifp->if_ioctl = sca_ioctl;
454 	ifp->if_output = sca_output;
455 	ifp->if_watchdog = sca_watchdog;
456 	ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
457 	scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
458 #ifdef SCA_USE_FASTQ
459 	scp->fastq.ifq_maxlen = IFQ_MAXLEN;
460 #endif
461 	IFQ_SET_READY(&ifp->if_snd);
462 	if_attach(ifp);
463 	if_alloc_sadl(ifp);
464 
465 #if NBPFILTER > 0
466 	bpfattach(ifp, DLT_HDLC, HDLC_HDRLEN);
467 #endif
468 
469 	if (sc->sc_parent == NULL)
470 		printf("%s: port %d\n", ifp->if_xname, port);
471 	else
472 		printf("%s at %s port %d\n",
473 		       ifp->if_xname, sc->sc_parent->dv_xname, port);
474 
475 	/*
476 	 * reset the last seen times on the cisco keepalive protocol
477 	 */
478 	scp->cka_lasttx = time.tv_usec;
479 	scp->cka_lastrx = 0;
480 }
481 
482 #if 0
483 /*
484  * returns log2(div), sets 'tmc' for the required freq 'hz'
485  */
486 static u_int8_t
487 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
488 {
489 	u_int32_t tmc, div;
490 	u_int32_t clock;
491 
492 	/* clock hz = (chipclock / tmc) / 2^(div); */
493 	/*
494 	 * TD == tmc * 2^(n)
495 	 *
496 	 * note:
497 	 * 1 <= TD <= 256		TD is inc of 1
498 	 * 2 <= TD <= 512		TD is inc of 2
499 	 * 4 <= TD <= 1024		TD is inc of 4
500 	 * ...
501 	 * 512 <= TD <= 256*512		TD is inc of 512
502 	 *
503 	 * so note there are overlaps.  We lose prec
504 	 * as div increases so we wish to minize div.
505 	 *
506 	 * basically we want to do
507 	 *
508 	 * tmc = chip / hz, but have tmc <= 256
509 	 */
510 
511 	/* assume system clock is 9.8304Mhz or 9830400hz */
512 	clock = clock = 9830400 >> 1;
513 
514 	/* round down */
515 	div = 0;
516 	while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
517 		clock >>= 1;
518 		div++;
519 	}
520 	if (clock / tmc > hz)
521 		tmc++;
522 	if (!tmc)
523 		tmc = 1;
524 
525 	if (div > SCA_RXS_DIV_512) {
526 		/* set to maximums */
527 		div = SCA_RXS_DIV_512;
528 		tmc = 0;
529 	}
530 
531 	*tmcp = (tmc & 0xFF);	/* 0 == 256 */
532 	return (div & 0xFF);
533 }
534 #endif
535 
536 /*
537  * initialize the port's MSCI
538  */
539 static void
540 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
541 {
542 	/* reset the channel */
543 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
544 
545 	msci_write_1(scp, SCA_MD00,
546 		     (  SCA_MD0_CRC_1
547 		      | SCA_MD0_CRC_CCITT
548 		      | SCA_MD0_CRC_ENABLE
549 		      | SCA_MD0_MODE_HDLC));
550 #if 0
551 	/* immediately send receive reset so the above takes */
552 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
553 #endif
554 
555 	msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
556 	msci_write_1(scp, SCA_MD20,
557 		     (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
558 
559 	/* be safe and do it again */
560 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
561 
562 	/* setup underrun and idle control, and initial RTS state */
563 	msci_write_1(scp, SCA_CTL0,
564 	     (SCA_CTL_IDLC_PATTERN
565 	     | SCA_CTL_UDRNC_AFTER_FCS
566 	     | SCA_CTL_RTS_LOW));
567 
568 	/* reset the transmitter */
569 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
570 
571 	/*
572 	 * set the clock sources
573 	 */
574 	msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
575 	msci_write_1(scp, SCA_TXS0, scp->sp_txs);
576 	msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
577 
578 	/* set external clock generate as requested */
579 	sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
580 
581 	/*
582 	 * XXX don't pay attention to CTS or CD changes right now.  I can't
583 	 * simulate one, and the transmitter will try to transmit even if
584 	 * CD isn't there anyway, so nothing bad SHOULD happen.
585 	 */
586 #if 0
587 	msci_write_1(scp, SCA_IE00, 0);
588 	msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
589 #else
590 	/* this would deliver transmitter underrun to ST1/ISR1 */
591 	msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
592 	msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
593 #endif
594 	msci_write_1(scp, SCA_IE20, 0);
595 
596 	msci_write_1(scp, SCA_FIE0, 0);
597 
598 	msci_write_1(scp, SCA_SA00, 0);
599 	msci_write_1(scp, SCA_SA10, 0);
600 
601 	msci_write_1(scp, SCA_IDL0, 0x7e);
602 
603 	msci_write_1(scp, SCA_RRC0, 0x0e);
604 	/* msci_write_1(scp, SCA_TRC00, 0x10); */
605 	/*
606 	 * the correct values here are important for avoiding underruns
607 	 * for any value less than or equal to TRC0 txrdy is activated
608 	 * which will start the dmac transfer to the fifo.
609 	 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop dma.
610 	 *
611 	 * thus if we are using a very fast clock that empties the fifo
612 	 * quickly, delays in the dmac starting to fill the fifo can
613 	 * lead to underruns so we want a fairly full fifo to still
614 	 * cause the dmac to start.  for cards with on board ram this
615 	 * has no effect on system performance.  For cards that dma
616 	 * to/from system memory it will cause more, shorter,
617 	 * bus accesses rather than fewer longer ones.
618 	 */
619 	msci_write_1(scp, SCA_TRC00, 0x00);
620 	msci_write_1(scp, SCA_TRC10, 0x1f);
621 }
622 
623 /*
624  * Take the memory for the port and construct two circular linked lists of
625  * descriptors (one tx, one rx) and set the pointers in these descriptors
626  * to point to the buffer space for this port.
627  */
628 static void
629 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
630 {
631 	sca_desc_t *desc;
632 	u_int32_t desc_p;
633 	u_int32_t buf_p;
634 	int i;
635 
636 	if (sc->sc_usedma)
637 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
638 		    BUS_DMASYNC_PREWRITE);
639 	else {
640 		/*
641 		 * XXX assumes that all tx desc and bufs in same page
642 		 */
643 		sc->scu_page_on(sc);
644 		sc->scu_set_page(sc, scp->sp_txdesc_p);
645 	}
646 
647 	desc = scp->sp_txdesc;
648 	desc_p = scp->sp_txdesc_p;
649 	buf_p = scp->sp_txbuf_p;
650 	scp->sp_txcur = 0;
651 	scp->sp_txinuse = 0;
652 
653 #ifdef DEBUG
654 	/* make sure that we won't wrap */
655 	if ((desc_p & 0xffff0000) !=
656 	    ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
657 		panic("sca: tx descriptors cross architecural boundry");
658 	if ((buf_p & 0xff000000) !=
659 	    ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
660 		panic("sca: tx buffers cross architecural boundry");
661 #endif
662 
663 	for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
664 		/*
665 		 * desc_p points to the physcial address of the NEXT desc
666 		 */
667 		desc_p += sizeof(sca_desc_t);
668 
669 		sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
670 		sca_desc_write_bufp(sc, desc, buf_p);
671 		sca_desc_write_buflen(sc, desc, SCA_BSIZE);
672 		sca_desc_write_stat(sc, desc, 0);
673 
674 		desc++;  /* point to the next descriptor */
675 		buf_p += SCA_BSIZE;
676 	}
677 
678 	/*
679 	 * "heal" the circular list by making the last entry point to the
680 	 * first.
681 	 */
682 	sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
683 
684 	/*
685 	 * Now, initialize the transmit DMA logic
686 	 *
687 	 * CPB == chain pointer base address
688 	 */
689 	dmac_write_1(scp, SCA_DSR1, 0);
690 	dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
691 	dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
692 	/* XXX1
693 	dmac_write_1(scp, SCA_DIR1,
694 		     (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
695 	 */
696 	dmac_write_1(scp, SCA_DIR1,
697 		     (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
698 	dmac_write_1(scp, SCA_CPB1,
699 		     (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
700 
701 	/*
702 	 * now, do the same thing for receive descriptors
703 	 *
704 	 * XXX assumes that all rx desc and bufs in same page
705 	 */
706 	if (!sc->sc_usedma)
707 		sc->scu_set_page(sc, scp->sp_rxdesc_p);
708 
709 	desc = scp->sp_rxdesc;
710 	desc_p = scp->sp_rxdesc_p;
711 	buf_p = scp->sp_rxbuf_p;
712 
713 #ifdef DEBUG
714 	/* make sure that we won't wrap */
715 	if ((desc_p & 0xffff0000) !=
716 	    ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
717 		panic("sca: rx descriptors cross architecural boundry");
718 	if ((buf_p & 0xff000000) !=
719 	    ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
720 		panic("sca: rx buffers cross architecural boundry");
721 #endif
722 
723 	for (i = 0 ; i < scp->sp_nrxdesc; i++) {
724 		/*
725 		 * desc_p points to the physcial address of the NEXT desc
726 		 */
727 		desc_p += sizeof(sca_desc_t);
728 
729 		sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
730 		sca_desc_write_bufp(sc, desc, buf_p);
731 		/* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
732 		sca_desc_write_buflen(sc, desc, 0);
733 		sca_desc_write_stat(sc, desc, 0);
734 
735 		desc++;  /* point to the next descriptor */
736 		buf_p += SCA_BSIZE;
737 	}
738 
739 	/*
740 	 * "heal" the circular list by making the last entry point to the
741 	 * first.
742 	 */
743 	sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
744 
745 	sca_dmac_rxinit(scp);
746 
747 	if (sc->sc_usedma)
748 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
749 		    0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
750 	else
751 		sc->scu_page_off(sc);
752 }
753 
754 /*
755  * reset and reinitialize the receive DMA logic
756  */
757 static void
758 sca_dmac_rxinit(sca_port_t *scp)
759 {
760 	/*
761 	 * ... and the receive DMA logic ...
762 	 */
763 	dmac_write_1(scp, SCA_DSR0, 0);  /* disable DMA */
764 	dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
765 
766 	dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
767 	dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
768 
769 	/* reset descriptors to initial state */
770 	scp->sp_rxstart = 0;
771 	scp->sp_rxend = scp->sp_nrxdesc - 1;
772 
773 	/*
774 	 * CPB == chain pointer base
775 	 * CDA == current descriptor address
776 	 * EDA == error descriptor address (overwrite position)
777 	 *	because cda can't be eda when starting we always
778 	 *	have a single buffer gap between cda and eda
779 	 */
780 	dmac_write_1(scp, SCA_CPB0,
781 	    (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
782 	dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
783 	dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
784 	    (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
785 
786 	/*
787 	 * enable receiver DMA
788 	 */
789 	dmac_write_1(scp, SCA_DIR0,
790 		     (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
791 	dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
792 }
793 
794 /*
795  * Queue the packet for our start routine to transmit
796  */
797 static int
798 sca_output(ifp, m, dst, rt0)
799 	struct ifnet *ifp;
800 	struct mbuf *m;
801 	struct sockaddr *dst;
802 	struct rtentry *rt0;
803 {
804 #ifdef ISO
805 	struct hdlc_llc_header *llc;
806 #endif
807 	struct hdlc_header *hdlc;
808 	struct ifqueue *ifq = NULL;
809 	int s, error, len;
810 	short mflags;
811 	ALTQ_DECL(struct altq_pktattr pktattr;)
812 
813 	error = 0;
814 
815 	if ((ifp->if_flags & IFF_UP) != IFF_UP) {
816 		error = ENETDOWN;
817 		goto bad;
818 	}
819 
820 	/*
821 	 * If the queueing discipline needs packet classification,
822 	 * do it before prepending link headers.
823 	 */
824 	IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
825 
826 	/*
827 	 * determine address family, and priority for this packet
828 	 */
829 	switch (dst->sa_family) {
830 #ifdef INET
831 	case AF_INET:
832 #ifdef SCA_USE_FASTQ
833 		if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
834 		    == IPTOS_LOWDELAY)
835 			ifq = &((sca_port_t *)ifp->if_softc)->fastq;
836 #endif
837 		/*
838 		 * Add cisco serial line header. If there is no
839 		 * space in the first mbuf, allocate another.
840 		 */
841 		M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
842 		if (m == 0)
843 			return (ENOBUFS);
844 		hdlc = mtod(m, struct hdlc_header *);
845 		hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
846 		break;
847 #endif
848 #ifdef INET6
849 	case AF_INET6:
850 		/*
851 		 * Add cisco serial line header. If there is no
852 		 * space in the first mbuf, allocate another.
853 		 */
854 		M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
855 		if (m == 0)
856 			return (ENOBUFS);
857 		hdlc = mtod(m, struct hdlc_header *);
858 		hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
859 		break;
860 #endif
861 #ifdef ISO
862        case AF_ISO:
863                /*
864                 * Add cisco llc serial line header. If there is no
865                 * space in the first mbuf, allocate another.
866                 */
867 		M_PREPEND(m, sizeof(struct hdlc_llc_header), M_DONTWAIT);
868 		if (m == 0)
869 			return (ENOBUFS);
870 		hdlc = mtod(m, struct hdlc_header *);
871 		llc = mtod(m, struct hdlc_llc_header *);
872 		llc->hl_dsap = llc->hl_ssap = LLC_ISO_LSAP;
873 		llc->hl_ffb = 0;
874 		break;
875 #endif
876 	default:
877 		printf("%s: address family %d unsupported\n",
878 		       ifp->if_xname, dst->sa_family);
879 		error = EAFNOSUPPORT;
880 		goto bad;
881 	}
882 
883 	/* finish */
884 	if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
885 		hdlc->h_addr = CISCO_MULTICAST;
886 	else
887 		hdlc->h_addr = CISCO_UNICAST;
888 	hdlc->h_resv = 0;
889 
890 	/*
891 	 * queue the packet.  If interactive, use the fast queue.
892 	 */
893 	mflags = m->m_flags;
894 	len = m->m_pkthdr.len;
895 	s = splnet();
896 	if (ifq != NULL) {
897 		if (IF_QFULL(ifq)) {
898 			IF_DROP(ifq);
899 			m_freem(m);
900 			error = ENOBUFS;
901 		} else
902 			IF_ENQUEUE(ifq, m);
903 	} else
904 		IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
905 	if (error != 0) {
906 		splx(s);
907 		ifp->if_oerrors++;
908 		ifp->if_collisions++;
909 		return (error);
910 	}
911 	ifp->if_obytes += len;
912 	if (mflags & M_MCAST)
913 		ifp->if_omcasts++;
914 
915 	sca_start(ifp);
916 	splx(s);
917 
918 	return (error);
919 
920  bad:
921 	if (m)
922 		m_freem(m);
923 	return (error);
924 }
925 
926 static int
927 sca_ioctl(ifp, cmd, addr)
928      struct ifnet *ifp;
929      u_long cmd;
930      caddr_t addr;
931 {
932 	struct ifreq *ifr;
933 	struct ifaddr *ifa;
934 	int error;
935 	int s;
936 
937 	s = splnet();
938 
939 	ifr = (struct ifreq *)addr;
940 	ifa = (struct ifaddr *)addr;
941 	error = 0;
942 
943 	switch (cmd) {
944 	case SIOCSIFADDR:
945 		switch(ifa->ifa_addr->sa_family) {
946 #ifdef INET
947 		case AF_INET:
948 #endif
949 #ifdef INET6
950 		case AF_INET6:
951 #endif
952 #if defined(INET) || defined(INET6)
953 			ifp->if_flags |= IFF_UP;
954 			sca_port_up(ifp->if_softc);
955 			break;
956 #endif
957 		default:
958 			error = EAFNOSUPPORT;
959 			break;
960 		}
961 		break;
962 
963 	case SIOCSIFDSTADDR:
964 #ifdef INET
965 		if (ifa->ifa_addr->sa_family == AF_INET)
966 			break;
967 #endif
968 #ifdef INET6
969 		if (ifa->ifa_addr->sa_family == AF_INET6)
970 			break;
971 #endif
972 		error = EAFNOSUPPORT;
973 		break;
974 
975 	case SIOCADDMULTI:
976 	case SIOCDELMULTI:
977 		/* XXX need multicast group management code */
978 		if (ifr == 0) {
979 			error = EAFNOSUPPORT;		/* XXX */
980 			break;
981 		}
982 		switch (ifr->ifr_addr.sa_family) {
983 #ifdef INET
984 		case AF_INET:
985 			break;
986 #endif
987 #ifdef INET6
988 		case AF_INET6:
989 			break;
990 #endif
991 		default:
992 			error = EAFNOSUPPORT;
993 			break;
994 		}
995 		break;
996 
997 	case SIOCSIFFLAGS:
998 		if (ifr->ifr_flags & IFF_UP) {
999 			ifp->if_flags |= IFF_UP;
1000 			sca_port_up(ifp->if_softc);
1001 		} else {
1002 			ifp->if_flags &= ~IFF_UP;
1003 			sca_port_down(ifp->if_softc);
1004 		}
1005 
1006 		break;
1007 
1008 	default:
1009 		error = EINVAL;
1010 	}
1011 
1012 	splx(s);
1013 	return error;
1014 }
1015 
1016 /*
1017  * start packet transmission on the interface
1018  *
1019  * MUST BE CALLED AT splnet()
1020  */
1021 static void
1022 sca_start(ifp)
1023 	struct ifnet *ifp;
1024 {
1025 	sca_port_t *scp = ifp->if_softc;
1026 	struct sca_softc *sc = scp->sca;
1027 	struct mbuf *m, *mb_head;
1028 	sca_desc_t *desc;
1029 	u_int8_t *buf, stat;
1030 	u_int32_t buf_p;
1031 	int nexttx;
1032 	int trigger_xmit;
1033 	u_int len;
1034 
1035 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1036 
1037 	/*
1038 	 * can't queue when we are full or transmitter is busy
1039 	 */
1040 #ifdef oldcode
1041 	if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1042 	    || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1043 		return;
1044 #else
1045 	if (scp->sp_txinuse
1046 	    || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1047 		return;
1048 #endif
1049 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1050 
1051 	/*
1052 	 * XXX assume that all tx desc and bufs in same page
1053 	 */
1054 	if (sc->sc_usedma)
1055 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1056 		    0, sc->scu_allocsize,
1057 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1058 	else {
1059 		sc->scu_page_on(sc);
1060 		sc->scu_set_page(sc, scp->sp_txdesc_p);
1061 	}
1062 
1063 	trigger_xmit = 0;
1064 
1065  txloop:
1066 	IF_DEQUEUE(&scp->linkq, mb_head);
1067 	if (mb_head == NULL)
1068 #ifdef SCA_USE_FASTQ
1069 		IF_DEQUEUE(&scp->fastq, mb_head);
1070 	if (mb_head == NULL)
1071 #endif
1072 		IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1073 	if (mb_head == NULL)
1074 		goto start_xmit;
1075 
1076 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1077 #ifdef oldcode
1078 	if (scp->txinuse != 0) {
1079 		/* Kill EOT interrupts on the previous descriptor. */
1080 		desc = &scp->sp_txdesc[scp->txcur];
1081 		stat = sca_desc_read_stat(sc, desc);
1082 		sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1083 
1084 		/* Figure out what the next free descriptor is. */
1085 		nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1086 	} else
1087 		nexttx = 0;
1088 #endif	/* oldcode */
1089 
1090 	if (scp->sp_txinuse)
1091 		nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1092 	else
1093 		nexttx = 0;
1094 
1095 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1096 
1097 	buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1098 	buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1099 
1100 	/* XXX hoping we can delay the desc write till after we don't drop. */
1101 	desc = &scp->sp_txdesc[nexttx];
1102 
1103 	/* XXX isn't this set already?? */
1104 	sca_desc_write_bufp(sc, desc, buf_p);
1105 	len = 0;
1106 
1107 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1108 
1109 #if 0	/* uncomment this for a core in cc1 */
1110 X
1111 #endif
1112 	/*
1113 	 * Run through the chain, copying data into the descriptor as we
1114 	 * go.  If it won't fit in one transmission block, drop the packet.
1115 	 * No, this isn't nice, but most of the time it _will_ fit.
1116 	 */
1117 	for (m = mb_head ; m != NULL ; m = m->m_next) {
1118 		if (m->m_len != 0) {
1119 			len += m->m_len;
1120 			if (len > SCA_BSIZE) {
1121 				m_freem(mb_head);
1122 				goto txloop;
1123 			}
1124 			SCA_DPRINTF(SCA_DEBUG_TX,
1125 			    ("TX: about to mbuf len %d\n", m->m_len));
1126 
1127 			if (sc->sc_usedma)
1128 				memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1129 			else
1130 				bus_space_write_region_1(sc->scu_memt,
1131 				    sc->scu_memh, sca_page_addr(sc, buf_p),
1132 				    mtod(m, u_int8_t *), m->m_len);
1133 			buf += m->m_len;
1134 			buf_p += m->m_len;
1135 		}
1136 	}
1137 
1138 	/* set the buffer, the length, and mark end of frame and end of xfer */
1139 	sca_desc_write_buflen(sc, desc, len);
1140 	sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1141 
1142 	ifp->if_opackets++;
1143 
1144 #if NBPFILTER > 0
1145 	/*
1146 	 * Pass packet to bpf if there is a listener.
1147 	 */
1148 	if (ifp->if_bpf)
1149 		bpf_mtap(ifp->if_bpf, mb_head);
1150 #endif
1151 
1152 	m_freem(mb_head);
1153 
1154 	scp->sp_txcur = nexttx;
1155 	scp->sp_txinuse++;
1156 	trigger_xmit = 1;
1157 
1158 	SCA_DPRINTF(SCA_DEBUG_TX,
1159 	    ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1160 
1161 	/*
1162 	 * XXX so didn't this used to limit us to 1?! - multi may be untested
1163 	 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1164 	 * to find bug
1165 	 */
1166 #ifdef oldcode
1167 	if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1168 #endif
1169 	if (scp->sp_txinuse < scp->sp_ntxdesc)
1170 		goto txloop;
1171 
1172  start_xmit:
1173 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1174 
1175 	if (trigger_xmit != 0) {
1176 		/* set EOT on final descriptor */
1177 		desc = &scp->sp_txdesc[scp->sp_txcur];
1178 		stat = sca_desc_read_stat(sc, desc);
1179 		sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1180 	}
1181 
1182 	if (sc->sc_usedma)
1183 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1184 		    sc->scu_allocsize,
1185 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1186 
1187 	if (trigger_xmit != 0)
1188 		sca_port_starttx(scp);
1189 
1190 	if (!sc->sc_usedma)
1191 		sc->scu_page_off(sc);
1192 }
1193 
1194 static void
1195 sca_watchdog(ifp)
1196 	struct ifnet *ifp;
1197 {
1198 }
1199 
1200 int
1201 sca_hardintr(struct sca_softc *sc)
1202 {
1203 	u_int8_t isr0, isr1, isr2;
1204 	int	ret;
1205 
1206 	ret = 0;  /* non-zero means we processed at least one interrupt */
1207 
1208 	SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1209 
1210 	while (1) {
1211 		/*
1212 		 * read SCA interrupts
1213 		 */
1214 		isr0 = sca_read_1(sc, SCA_ISR0);
1215 		isr1 = sca_read_1(sc, SCA_ISR1);
1216 		isr2 = sca_read_1(sc, SCA_ISR2);
1217 
1218 		if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1219 			break;
1220 
1221 		SCA_DPRINTF(SCA_DEBUG_INTR,
1222 			    ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1223 			     isr0, isr1, isr2));
1224 
1225 		/*
1226 		 * check DMAC interrupt
1227 		 */
1228 		if (isr1 & 0x0f)
1229 			ret += sca_dmac_intr(&sc->sc_ports[0],
1230 					     isr1 & 0x0f);
1231 
1232 		if (isr1 & 0xf0)
1233 			ret += sca_dmac_intr(&sc->sc_ports[1],
1234 			     (isr1 & 0xf0) >> 4);
1235 
1236 		/*
1237 		 * mcsi intterupts
1238 		 */
1239 		if (isr0 & 0x0f)
1240 			ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1241 
1242 		if (isr0 & 0xf0)
1243 			ret += sca_msci_intr(&sc->sc_ports[1],
1244 			    (isr0 & 0xf0) >> 4);
1245 
1246 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1247 		if (isr2)
1248 			ret += sca_timer_intr(sc, isr2);
1249 #endif
1250 	}
1251 
1252 	return (ret);
1253 }
1254 
1255 static int
1256 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1257 {
1258 	u_int8_t	 dsr;
1259 	int		 ret;
1260 
1261 	ret = 0;
1262 
1263 	/*
1264 	 * Check transmit channel
1265 	 */
1266 	if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1267 		SCA_DPRINTF(SCA_DEBUG_INTR,
1268 		    ("TX INTERRUPT port %d\n", scp->sp_port));
1269 
1270 		dsr = 1;
1271 		while (dsr != 0) {
1272 			ret++;
1273 			/*
1274 			 * reset interrupt
1275 			 */
1276 			dsr = dmac_read_1(scp, SCA_DSR1);
1277 			dmac_write_1(scp, SCA_DSR1,
1278 				     dsr | SCA_DSR_DEWD);
1279 
1280 			/*
1281 			 * filter out the bits we don't care about
1282 			 */
1283 			dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1284 			if (dsr == 0)
1285 				break;
1286 
1287 			/*
1288 			 * check for counter overflow
1289 			 */
1290 			if (dsr & SCA_DSR_COF) {
1291 				printf("%s: TXDMA counter overflow\n",
1292 				       scp->sp_if.if_xname);
1293 
1294 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1295 				scp->sp_txcur = 0;
1296 				scp->sp_txinuse = 0;
1297 			}
1298 
1299 			/*
1300 			 * check for buffer overflow
1301 			 */
1302 			if (dsr & SCA_DSR_BOF) {
1303 				printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1304 				       scp->sp_if.if_xname,
1305 				       dmac_read_2(scp, SCA_CDAL1),
1306 				       dmac_read_2(scp, SCA_EDAL1),
1307 				       dmac_read_1(scp, SCA_CPB1));
1308 
1309 				/*
1310 				 * Yikes.  Arrange for a full
1311 				 * transmitter restart.
1312 				 */
1313 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1314 				scp->sp_txcur = 0;
1315 				scp->sp_txinuse = 0;
1316 			}
1317 
1318 			/*
1319 			 * check for end of transfer, which is not
1320 			 * an error. It means that all data queued
1321 			 * was transmitted, and we mark ourself as
1322 			 * not in use and stop the watchdog timer.
1323 			 */
1324 			if (dsr & SCA_DSR_EOT) {
1325 				SCA_DPRINTF(SCA_DEBUG_TX,
1326 			    ("Transmit completed. cda %x eda %x dsr %x\n",
1327 				    dmac_read_2(scp, SCA_CDAL1),
1328 				    dmac_read_2(scp, SCA_EDAL1),
1329 				    dsr));
1330 
1331 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1332 				scp->sp_txcur = 0;
1333 				scp->sp_txinuse = 0;
1334 
1335 				/*
1336 				 * check for more packets
1337 				 */
1338 				sca_start(&scp->sp_if);
1339 			}
1340 		}
1341 	}
1342 	/*
1343 	 * receive channel check
1344 	 */
1345 	if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1346 		SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1347 		    (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1348 
1349 		dsr = 1;
1350 		while (dsr != 0) {
1351 			ret++;
1352 
1353 			dsr = dmac_read_1(scp, SCA_DSR0);
1354 			dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1355 
1356 			/*
1357 			 * filter out the bits we don't care about
1358 			 */
1359 			dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1360 				| SCA_DSR_BOF | SCA_DSR_EOT);
1361 			if (dsr == 0)
1362 				break;
1363 
1364 			/*
1365 			 * End of frame
1366 			 */
1367 			if (dsr & SCA_DSR_EOM) {
1368 				SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1369 
1370 				sca_get_packets(scp);
1371 			}
1372 
1373 			/*
1374 			 * check for counter overflow
1375 			 */
1376 			if (dsr & SCA_DSR_COF) {
1377 				printf("%s: RXDMA counter overflow\n",
1378 				       scp->sp_if.if_xname);
1379 
1380 				sca_dmac_rxinit(scp);
1381 			}
1382 
1383 			/*
1384 			 * check for end of transfer, which means we
1385 			 * ran out of descriptors to receive into.
1386 			 * This means the line is much faster than
1387 			 * we can handle.
1388 			 */
1389 			if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1390 				printf("%s: RXDMA buffer overflow\n",
1391 				       scp->sp_if.if_xname);
1392 
1393 				sca_dmac_rxinit(scp);
1394 			}
1395 		}
1396 	}
1397 
1398 	return ret;
1399 }
1400 
1401 static int
1402 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1403 {
1404 	u_int8_t st1, trc0;
1405 
1406 	/* get and clear the specific interrupt -- should act on it :)*/
1407 	if ((st1 = msci_read_1(scp, SCA_ST10))) {
1408 		/* clear the interrupt */
1409 		msci_write_1(scp, SCA_ST10, st1);
1410 
1411 		if (st1 & SCA_ST1_UDRN) {
1412 			/* underrun -- try to increase ready control */
1413 			trc0 = msci_read_1(scp, SCA_TRC00);
1414 			if (trc0 == 0x1f)
1415 				printf("TX: underrun - fifo depth maxed\n");
1416 			else {
1417 				if ((trc0 += 2) > 0x1f)
1418 					trc0 = 0x1f;
1419 				SCA_DPRINTF(SCA_DEBUG_TX,
1420 				   ("TX: udrn - incr fifo to %d\n", trc0));
1421 				msci_write_1(scp, SCA_TRC00, trc0);
1422 			}
1423 		}
1424 	}
1425 	return (0);
1426 }
1427 
1428 static void
1429 sca_get_packets(sca_port_t *scp)
1430 {
1431 	struct sca_softc *sc;
1432 
1433 	SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1434 
1435 	sc = scp->sca;
1436 	if (sc->sc_usedma)
1437 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1438 		    0, sc->scu_allocsize,
1439 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1440 	else {
1441 		/*
1442 		 * XXX this code is unable to deal with rx stuff
1443 		 * in more than 1 page
1444 		 */
1445 		sc->scu_page_on(sc);
1446 		sc->scu_set_page(sc, scp->sp_rxdesc_p);
1447 	}
1448 
1449 	/* process as many frames as are available */
1450 	while (sca_frame_avail(scp)) {
1451 		sca_frame_process(scp);
1452 		sca_frame_read_done(scp);
1453 	}
1454 
1455 	if (sc->sc_usedma)
1456 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1457 		    0, sc->scu_allocsize,
1458 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1459 	else
1460 		sc->scu_page_off(sc);
1461 }
1462 
1463 /*
1464  * Starting with the first descriptor we wanted to read into, up to but
1465  * not including the current SCA read descriptor, look for a packet.
1466  *
1467  * must be called at splnet()
1468  */
1469 static int
1470 sca_frame_avail(sca_port_t *scp)
1471 {
1472 	struct sca_softc *sc;
1473 	u_int16_t cda;
1474 	u_int32_t desc_p;	/* physical address (lower 16 bits) */
1475 	sca_desc_t *desc;
1476 	u_int8_t rxstat;
1477 	int cdaidx, toolong;
1478 
1479 	/*
1480 	 * Read the current descriptor from the SCA.
1481 	 */
1482 	sc = scp->sca;
1483 	cda = dmac_read_2(scp, SCA_CDAL0);
1484 
1485 	/*
1486 	 * calculate the index of the current descriptor
1487 	 */
1488 	desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1489 	desc_p = cda - desc_p;
1490 	cdaidx = desc_p / sizeof(sca_desc_t);
1491 
1492 	SCA_DPRINTF(SCA_DEBUG_RX,
1493 	    ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1494 	    cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1495 
1496 	/* note confusion */
1497 	if (cdaidx >= scp->sp_nrxdesc)
1498 		panic("current descriptor index out of range");
1499 
1500 	/* see if we have a valid frame available */
1501 	toolong = 0;
1502 	for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1503 		/*
1504 		 * We might have a valid descriptor.  Set up a pointer
1505 		 * to the kva address for it so we can more easily examine
1506 		 * the contents.
1507 		 */
1508 		desc = &scp->sp_rxdesc[scp->sp_rxstart];
1509 		rxstat = sca_desc_read_stat(scp->sca, desc);
1510 
1511 		SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1512 		    scp->sp_port, scp->sp_rxstart, rxstat));
1513 
1514 		SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1515 		    scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1516 
1517 		/*
1518 		 * check for errors
1519 		 */
1520 		if (rxstat & SCA_DESC_ERRORS) {
1521 			/*
1522 			 * consider an error condition the end
1523 			 * of a frame
1524 			 */
1525 			scp->sp_if.if_ierrors++;
1526 			toolong = 0;
1527 			continue;
1528 		}
1529 
1530 		/*
1531 		 * if we aren't skipping overlong frames
1532 		 * we are done, otherwise reset and look for
1533 		 * another good frame
1534 		 */
1535 		if (rxstat & SCA_DESC_EOM) {
1536 			if (!toolong)
1537 				return (1);
1538 			toolong = 0;
1539 		} else if (!toolong) {
1540 			/*
1541 			 * we currently don't deal with frames
1542 			 * larger than a single buffer (fixed MTU)
1543 			 */
1544 			scp->sp_if.if_ierrors++;
1545 			toolong = 1;
1546 		}
1547 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1548 		    scp->sp_rxstart));
1549 	}
1550 
1551 	SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1552 	return 0;
1553 }
1554 
1555 /*
1556  * Pass the packet up to the kernel if it is a packet we want to pay
1557  * attention to.
1558  *
1559  * MUST BE CALLED AT splnet()
1560  */
1561 static void
1562 sca_frame_process(sca_port_t *scp)
1563 {
1564 	struct ifqueue *ifq;
1565 	struct hdlc_header *hdlc;
1566 	struct cisco_pkt *cisco;
1567 	sca_desc_t *desc;
1568 	struct mbuf *m;
1569 	u_int8_t *bufp;
1570 	u_int16_t len;
1571 	u_int32_t t;
1572 
1573 	t = (time.tv_sec - boottime.tv_sec) * 1000;
1574 	desc = &scp->sp_rxdesc[scp->sp_rxstart];
1575 	bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1576 	len = sca_desc_read_buflen(scp->sca, desc);
1577 
1578 	SCA_DPRINTF(SCA_DEBUG_RX,
1579 	    ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1580 	    (bus_addr_t)bufp, len));
1581 
1582 #if SCA_DEBUG_LEVEL > 0
1583 	if (sca_debug & SCA_DEBUG_RXPKT)
1584 		sca_frame_print(scp, desc, bufp);
1585 #endif
1586 	/*
1587 	 * skip packets that are too short
1588 	 */
1589 	if (len < sizeof(struct hdlc_header)) {
1590 		scp->sp_if.if_ierrors++;
1591 		return;
1592 	}
1593 
1594 	m = sca_mbuf_alloc(scp->sca, bufp, len);
1595 	if (m == NULL) {
1596 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1597 		return;
1598 	}
1599 
1600 	/*
1601 	 * read and then strip off the HDLC information
1602 	 */
1603 	m = m_pullup(m, sizeof(struct hdlc_header));
1604 	if (m == NULL) {
1605 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1606 		return;
1607 	}
1608 
1609 #if NBPFILTER > 0
1610 	if (scp->sp_if.if_bpf)
1611 		bpf_mtap(scp->sp_if.if_bpf, m);
1612 #endif
1613 
1614 	scp->sp_if.if_ipackets++;
1615 
1616 	hdlc = mtod(m, struct hdlc_header *);
1617 	switch (ntohs(hdlc->h_proto)) {
1618 #ifdef INET
1619 	case HDLC_PROTOCOL_IP:
1620 		SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1621 		m->m_pkthdr.rcvif = &scp->sp_if;
1622 		m->m_pkthdr.len -= sizeof(struct hdlc_header);
1623 		m->m_data += sizeof(struct hdlc_header);
1624 		m->m_len -= sizeof(struct hdlc_header);
1625 		ifq = &ipintrq;
1626 		schednetisr(NETISR_IP);
1627 		break;
1628 #endif	/* INET */
1629 #ifdef INET6
1630 	case HDLC_PROTOCOL_IPV6:
1631 		SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1632 		m->m_pkthdr.rcvif = &scp->sp_if;
1633 		m->m_pkthdr.len -= sizeof(struct hdlc_header);
1634 		m->m_data += sizeof(struct hdlc_header);
1635 		m->m_len -= sizeof(struct hdlc_header);
1636 		ifq = &ip6intrq;
1637 		schednetisr(NETISR_IPV6);
1638 		break;
1639 #endif	/* INET6 */
1640 #ifdef ISO
1641 	case HDLC_PROTOCOL_ISO:
1642 		if (m->m_pkthdr.len < sizeof(struct hdlc_llc_header))
1643                        goto dropit;
1644 		m->m_pkthdr.rcvif = &scp->sp_if;
1645 		m->m_pkthdr.len -= sizeof(struct hdlc_llc_header);
1646 		m->m_data += sizeof(struct hdlc_llc_header);
1647 		m->m_len -= sizeof(struct hdlc_llc_header);
1648 		ifq = &clnlintrq;
1649 		schednetisr(NETISR_ISO);
1650 		break;
1651 #endif	/* ISO */
1652 	case CISCO_KEEPALIVE:
1653 		SCA_DPRINTF(SCA_DEBUG_CISCO,
1654 			    ("Received CISCO keepalive packet\n"));
1655 
1656 		if (len < CISCO_PKT_LEN) {
1657 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1658 				    ("short CISCO packet %d, wanted %d\n",
1659 				     len, CISCO_PKT_LEN));
1660 			scp->sp_if.if_ierrors++;
1661 			goto dropit;
1662 		}
1663 
1664 		m = m_pullup(m, sizeof(struct cisco_pkt));
1665 		if (m == NULL) {
1666 			SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1667 			return;
1668 		}
1669 
1670 		cisco = (struct cisco_pkt *)
1671 		    (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1672 		m->m_pkthdr.rcvif = &scp->sp_if;
1673 
1674 		switch (ntohl(cisco->type)) {
1675 		case CISCO_ADDR_REQ:
1676 			printf("Got CISCO addr_req, ignoring\n");
1677 			scp->sp_if.if_ierrors++;
1678 			goto dropit;
1679 
1680 		case CISCO_ADDR_REPLY:
1681 			printf("Got CISCO addr_reply, ignoring\n");
1682 			scp->sp_if.if_ierrors++;
1683 			goto dropit;
1684 
1685 		case CISCO_KEEPALIVE_REQ:
1686 
1687 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1688 				    ("Received KA, mseq %d,"
1689 				     " yseq %d, rel 0x%04x, t0"
1690 				     " %04x, t1 %04x\n",
1691 				     ntohl(cisco->par1), ntohl(cisco->par2),
1692 				     ntohs(cisco->rel), ntohs(cisco->time0),
1693 				     ntohs(cisco->time1)));
1694 
1695 			scp->cka_lastrx = ntohl(cisco->par1);
1696 			scp->cka_lasttx++;
1697 
1698 			/*
1699 			 * schedule the transmit right here.
1700 			 */
1701 			cisco->par2 = cisco->par1;
1702 			cisco->par1 = htonl(scp->cka_lasttx);
1703 			cisco->time0 = htons((u_int16_t)(t >> 16));
1704 			cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1705 
1706 			ifq = &scp->linkq;
1707 			if (IF_QFULL(ifq)) {
1708 				IF_DROP(ifq);
1709 				goto dropit;
1710 			}
1711 			IF_ENQUEUE(ifq, m);
1712 
1713 			sca_start(&scp->sp_if);
1714 
1715 			/* since start may have reset this fix */
1716 			if (!scp->sca->sc_usedma) {
1717 				scp->sca->scu_set_page(scp->sca,
1718 				    scp->sp_rxdesc_p);
1719 				scp->sca->scu_page_on(scp->sca);
1720 			}
1721 			return;
1722 		default:
1723 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1724 				    ("Unknown CISCO keepalive protocol 0x%04x\n",
1725 				     ntohl(cisco->type)));
1726 
1727 			scp->sp_if.if_noproto++;
1728 			goto dropit;
1729 		}
1730 		return;
1731 	default:
1732 		SCA_DPRINTF(SCA_DEBUG_RX,
1733 			    ("Unknown/unexpected ethertype 0x%04x\n",
1734 			     ntohs(hdlc->h_proto)));
1735 		scp->sp_if.if_noproto++;
1736 		goto dropit;
1737 	}
1738 
1739 	/* queue the packet */
1740 	if (!IF_QFULL(ifq)) {
1741 		IF_ENQUEUE(ifq, m);
1742 	} else {
1743 		IF_DROP(ifq);
1744 		scp->sp_if.if_iqdrops++;
1745 		goto dropit;
1746 	}
1747 	return;
1748 dropit:
1749 	if (m)
1750 		m_freem(m);
1751 	return;
1752 }
1753 
1754 #if SCA_DEBUG_LEVEL > 0
1755 /*
1756  * do a hex dump of the packet received into descriptor "desc" with
1757  * data buffer "p"
1758  */
1759 static void
1760 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1761 {
1762 	int i;
1763 	int nothing_yet = 1;
1764 	struct sca_softc *sc;
1765 	u_int len;
1766 
1767 	sc = scp->sca;
1768 	printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1769 	       desc,
1770 	       sca_desc_read_chainp(sc, desc),
1771 	       sca_desc_read_bufp(sc, desc),
1772 	       sca_desc_read_stat(sc, desc),
1773 	       (len = sca_desc_read_buflen(sc, desc)));
1774 
1775 	for (i = 0 ; i < len && i < 256; i++) {
1776 		if (nothing_yet == 1 &&
1777 		    (sc->sc_usedma ? *p
1778 			: bus_space_read_1(sc->scu_memt, sc->scu_memh,
1779 		    sca_page_addr(sc, p))) == 0) {
1780 			p++;
1781 			continue;
1782 		}
1783 		nothing_yet = 0;
1784 		if (i % 16 == 0)
1785 			printf("\n");
1786 		printf("%02x ",
1787 		    (sc->sc_usedma ? *p
1788 		    : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1789 		    sca_page_addr(sc, p))));
1790 		p++;
1791 	}
1792 
1793 	if (i % 16 != 1)
1794 		printf("\n");
1795 }
1796 #endif
1797 
1798 /*
1799  * adjust things becuase we have just read the current starting
1800  * frame
1801  *
1802  * must be called at splnet()
1803  */
1804 static void
1805 sca_frame_read_done(sca_port_t *scp)
1806 {
1807 	u_int16_t edesc_p;
1808 
1809 	/* update where our indicies are */
1810 	scp->sp_rxend = scp->sp_rxstart;
1811 	scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1812 
1813 	/* update the error [end] descriptor */
1814 	edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1815 	    (sizeof(sca_desc_t) * scp->sp_rxend);
1816 	dmac_write_2(scp, SCA_EDAL0, edesc_p);
1817 }
1818 
1819 /*
1820  * set a port to the "up" state
1821  */
1822 static void
1823 sca_port_up(sca_port_t *scp)
1824 {
1825 	struct sca_softc *sc = scp->sca;
1826 #if 0
1827 	u_int8_t ier0, ier1;
1828 #endif
1829 
1830 	/*
1831 	 * reset things
1832 	 */
1833 #if 0
1834 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1835 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1836 #endif
1837 	/*
1838 	 * clear in-use flag
1839 	 */
1840 	scp->sp_if.if_flags &= ~IFF_OACTIVE;
1841 	scp->sp_if.if_flags |= IFF_RUNNING;
1842 
1843 	/*
1844 	 * raise DTR
1845 	 */
1846 	sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1847 
1848 	/*
1849 	 * raise RTS
1850 	 */
1851 	msci_write_1(scp, SCA_CTL0,
1852 	     (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1853 	     | SCA_CTL_RTS_HIGH);
1854 
1855 #if 0
1856 	/*
1857 	 * enable interrupts (no timer IER2)
1858 	 */
1859 	ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1860 	    | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1861 	ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1862 	    | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1863 	if (scp->sp_port == 1) {
1864 		ier0 <<= 4;
1865 		ier1 <<= 4;
1866 	}
1867 	sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1868 	sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1869 #else
1870 	if (scp->sp_port == 0) {
1871 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1872 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1873 	} else {
1874 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1875 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1876 	}
1877 #endif
1878 
1879 	/*
1880 	 * enable transmit and receive
1881 	 */
1882 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1883 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1884 
1885 	/*
1886 	 * reset internal state
1887 	 */
1888 	scp->sp_txinuse = 0;
1889 	scp->sp_txcur = 0;
1890 	scp->cka_lasttx = time.tv_usec;
1891 	scp->cka_lastrx = 0;
1892 }
1893 
1894 /*
1895  * set a port to the "down" state
1896  */
1897 static void
1898 sca_port_down(sca_port_t *scp)
1899 {
1900 	struct sca_softc *sc = scp->sca;
1901 #if 0
1902 	u_int8_t ier0, ier1;
1903 #endif
1904 
1905 	/*
1906 	 * lower DTR
1907 	 */
1908 	sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1909 
1910 	/*
1911 	 * lower RTS
1912 	 */
1913 	msci_write_1(scp, SCA_CTL0,
1914 	     (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1915 	     | SCA_CTL_RTS_LOW);
1916 
1917 	/*
1918 	 * disable interrupts
1919 	 */
1920 #if 0
1921 	ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1922 	    | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1923 	ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1924 	    | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1925 	if (scp->sp_port == 1) {
1926 		ier0 <<= 4;
1927 		ier1 <<= 4;
1928 	}
1929 	sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1930 	sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1931 #else
1932 	if (scp->sp_port == 0) {
1933 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1934 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1935 	} else {
1936 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1937 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1938 	}
1939 #endif
1940 
1941 	/*
1942 	 * disable transmit and receive
1943 	 */
1944 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1945 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1946 
1947 	/*
1948 	 * no, we're not in use anymore
1949 	 */
1950 	scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1951 }
1952 
1953 /*
1954  * disable all DMA and interrupts for all ports at once.
1955  */
1956 void
1957 sca_shutdown(struct sca_softc *sca)
1958 {
1959 	/*
1960 	 * disable DMA and interrupts
1961 	 */
1962 	sca_write_1(sca, SCA_DMER, 0);
1963 	sca_write_1(sca, SCA_IER0, 0);
1964 	sca_write_1(sca, SCA_IER1, 0);
1965 }
1966 
1967 /*
1968  * If there are packets to transmit, start the transmit DMA logic.
1969  */
1970 static void
1971 sca_port_starttx(sca_port_t *scp)
1972 {
1973 	struct sca_softc *sc;
1974 	u_int32_t	startdesc_p, enddesc_p;
1975 	int enddesc;
1976 
1977 	sc = scp->sca;
1978 
1979 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1980 
1981 	if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1982 	    || scp->sp_txinuse == 0)
1983 		return;
1984 
1985 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1986 
1987 	scp->sp_if.if_flags |= IFF_OACTIVE;
1988 
1989 	/*
1990 	 * We have something to do, since we have at least one packet
1991 	 * waiting, and we are not already marked as active.
1992 	 */
1993 	enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1994 	startdesc_p = scp->sp_txdesc_p;
1995 	enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1996 
1997 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1998 	    startdesc_p, enddesc_p));
1999 
2000 	dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
2001 	dmac_write_2(scp, SCA_CDAL1,
2002 		     (u_int16_t)(startdesc_p & 0x0000ffff));
2003 
2004 	/*
2005 	 * enable the DMA
2006 	 */
2007 	dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
2008 }
2009 
2010 /*
2011  * allocate an mbuf at least long enough to hold "len" bytes.
2012  * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
2013  * otherwise let the caller handle copying the data in.
2014  */
2015 static struct mbuf *
2016 sca_mbuf_alloc(struct sca_softc *sc, caddr_t p, u_int len)
2017 {
2018 	struct mbuf *m;
2019 
2020 	/*
2021 	 * allocate an mbuf and copy the important bits of data
2022 	 * into it.  If the packet won't fit in the header,
2023 	 * allocate a cluster for it and store it there.
2024 	 */
2025 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2026 	if (m == NULL)
2027 		return NULL;
2028 	if (len > MHLEN) {
2029 		if (len > MCLBYTES) {
2030 			m_freem(m);
2031 			return NULL;
2032 		}
2033 		MCLGET(m, M_DONTWAIT);
2034 		if ((m->m_flags & M_EXT) == 0) {
2035 			m_freem(m);
2036 			return NULL;
2037 		}
2038 	}
2039 	if (p != NULL) {
2040 		/* XXX do we need to sync here? */
2041 		if (sc->sc_usedma)
2042 			memcpy(mtod(m, caddr_t), p, len);
2043 		else
2044 			bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2045 			    sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2046 	}
2047 	m->m_len = len;
2048 	m->m_pkthdr.len = len;
2049 
2050 	return (m);
2051 }
2052 
2053 /*
2054  * get the base clock
2055  */
2056 void
2057 sca_get_base_clock(struct sca_softc *sc)
2058 {
2059 	struct timeval btv, ctv, dtv;
2060 	u_int64_t bcnt;
2061 	u_int32_t cnt;
2062 	u_int16_t subcnt;
2063 
2064 	/* disable the timer, set prescale to 0 */
2065 	sca_write_1(sc, SCA_TCSR0, 0);
2066 	sca_write_1(sc, SCA_TEPR0, 0);
2067 
2068 	/* reset the counter */
2069 	(void)sca_read_1(sc, SCA_TCSR0);
2070 	subcnt = sca_read_2(sc, SCA_TCNTL0);
2071 
2072 	/* count to max */
2073 	sca_write_2(sc, SCA_TCONRL0, 0xffff);
2074 
2075 	cnt = 0;
2076 	microtime(&btv);
2077 	/* start the timer -- no interrupt enable */
2078 	sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2079 	for (;;) {
2080 		microtime(&ctv);
2081 
2082 		/* end around 3/4 of a second */
2083 		timersub(&ctv, &btv, &dtv);
2084 		if (dtv.tv_usec >= 750000)
2085 			break;
2086 
2087 		/* spin */
2088 		while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2089 			;
2090 		/* reset the timer */
2091 		(void)sca_read_2(sc, SCA_TCNTL0);
2092 		cnt++;
2093 	}
2094 
2095 	/* stop the timer */
2096 	sca_write_1(sc, SCA_TCSR0, 0);
2097 
2098 	subcnt = sca_read_2(sc, SCA_TCNTL0);
2099 	/* add the slop in and get the total timer ticks */
2100 	cnt = (cnt << 16) | subcnt;
2101 
2102 	/* cnt is 1/8 the actual time */
2103 	bcnt = cnt * 8;
2104 	/* make it proportional to 3/4 of a second */
2105 	bcnt *= (u_int64_t)750000;
2106 	bcnt /= (u_int64_t)dtv.tv_usec;
2107 	cnt = bcnt;
2108 
2109 	/* make it Hz */
2110 	cnt *= 4;
2111 	cnt /= 3;
2112 
2113 	SCA_DPRINTF(SCA_DEBUG_CLOCK,
2114 	    ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2115 
2116 	/*
2117 	 * round to the nearest 200 -- this allows for +-3 ticks error
2118 	 */
2119 	sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2120 }
2121 
2122 /*
2123  * print the information about the clock on the ports
2124  */
2125 void
2126 sca_print_clock_info(struct sca_softc *sc)
2127 {
2128 	struct sca_port *scp;
2129 	u_int32_t mhz, div;
2130 	int i;
2131 
2132 	printf("%s: base clock %d Hz\n", sc->sc_parent->dv_xname,
2133 	    sc->sc_baseclock);
2134 
2135 	/* print the information about the port clock selection */
2136 	for (i = 0; i < sc->sc_numports; i++) {
2137 		scp = &sc->sc_ports[i];
2138 		mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2139 		div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2140 
2141 		printf("%s: rx clock: ", scp->sp_if.if_xname);
2142 		switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2143 		case SCA_RXS_CLK_LINE:
2144 			printf("line");
2145 			break;
2146 		case SCA_RXS_CLK_LINE_SN:
2147 			printf("line with noise suppression");
2148 			break;
2149 		case SCA_RXS_CLK_INTERNAL:
2150 			printf("internal %d Hz", (mhz >> div));
2151 			break;
2152 		case SCA_RXS_CLK_ADPLL_OUT:
2153 			printf("adpll using internal %d Hz", (mhz >> div));
2154 			break;
2155 		case SCA_RXS_CLK_ADPLL_IN:
2156 			printf("adpll using line clock");
2157 			break;
2158 		}
2159 		printf("  tx clock: ");
2160 		div = scp->sp_txs & SCA_TXS_DIV_MASK;
2161 		switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2162 		case SCA_TXS_CLK_LINE:
2163 			printf("line\n");
2164 			break;
2165 		case SCA_TXS_CLK_INTERNAL:
2166 			printf("internal %d Hz\n", (mhz >> div));
2167 			break;
2168 		case SCA_TXS_CLK_RXCLK:
2169 			printf("rxclock\n");
2170 			break;
2171 		}
2172 		if (scp->sp_eclock)
2173 			printf("%s: outputting line clock\n",
2174 			    scp->sp_if.if_xname);
2175 	}
2176 }
2177 
2178