xref: /netbsd/sys/dev/ic/hd64570.c (revision d10e2ccb)
1 /*	$NetBSD: hd64570.c,v 1.57 2022/09/03 02:48:00 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1999 Christian E. Hopps
5  * Copyright (c) 1998 Vixie Enterprises
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Vixie Enterprises nor the names
18  *    of its contributors may be used to endorse or promote products derived
19  *    from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22  * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  * DISCLAIMED.  IN NO EVENT SHALL VIXIE ENTERPRISES OR
26  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * This software has been written for Vixie Enterprises by Michael Graff
36  * <explorer@flame.org>.  To learn more about Vixie Enterprises, see
37  * ``http://www.vix.com''.
38  */
39 
40 /*
41  * TODO:
42  *
43  *	o  teach the receive logic about errors, and about long frames that
44  *         span more than one input buffer.  (Right now, receive/transmit is
45  *	   limited to one descriptor's buffer space, which is MTU + 4 bytes.
46  *	   This is currently 1504, which is large enough to hold the HDLC
47  *	   header and the packet itself.  Packets which are too long are
48  *	   silently dropped on transmit and silently dropped on receive.
49  *	o  write code to handle the msci interrupts, needed only for CD
50  *	   and CTS changes.
51  *	o  consider switching back to a "queue tx with DMA active" model which
52  *	   should help sustain outgoing traffic
53  *	o  through clever use of bus_dma*() functions, it should be possible
54  *	   to map the mbuf's data area directly into a descriptor transmit
55  *	   buffer, removing the need to allocate extra memory.  If, however,
56  *	   we run out of descriptors for this, we will need to then allocate
57  *	   one large mbuf, copy the fragmented chain into it, and put it onto
58  *	   a single descriptor.
59  *	o  use bus_dmamap_sync() with the right offset and lengths, rather
60  *	   than cheating and always sync'ing the whole region.
61  *
62  *	o  perhaps allow rx and tx to be in more than one page
63  *	   if not using DMA.  currently the assumption is that
64  *	   rx uses a page and tx uses a page.
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.57 2022/09/03 02:48:00 thorpej Exp $");
69 
70 #include "opt_inet.h"
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/device.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/kernel.h>
79 
80 #include <net/if.h>
81 #include <net/if_types.h>
82 
83 #if defined(INET) || defined(INET6)
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_var.h>
87 #include <netinet/ip.h>
88 #ifdef INET6
89 #include <netinet6/in6_var.h>
90 #endif
91 #endif
92 
93 #include <net/bpf.h>
94 
95 #include <sys/cpu.h>
96 #include <sys/bus.h>
97 #include <sys/intr.h>
98 
99 #include <dev/pci/pcivar.h>
100 #include <dev/pci/pcireg.h>
101 #include <dev/pci/pcidevs.h>
102 
103 #include <dev/ic/hd64570reg.h>
104 #include <dev/ic/hd64570var.h>
105 
106 #define SCA_DEBUG_RX		0x0001
107 #define SCA_DEBUG_TX		0x0002
108 #define SCA_DEBUG_CISCO		0x0004
109 #define SCA_DEBUG_DMA		0x0008
110 #define SCA_DEBUG_RXPKT		0x0010
111 #define SCA_DEBUG_TXPKT		0x0020
112 #define SCA_DEBUG_INTR		0x0040
113 #define SCA_DEBUG_CLOCK		0x0080
114 
115 #if 0
116 #define SCA_DEBUG_LEVEL	( 0xFFFF )
117 #else
118 #define SCA_DEBUG_LEVEL 0
119 #endif
120 
121 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
122 
123 #if SCA_DEBUG_LEVEL > 0
124 #define SCA_DPRINTF(l, x) do { \
125 	if ((l) & sca_debug) \
126 		printf x;\
127 	} while (0)
128 #else
129 #define SCA_DPRINTF(l, x)
130 #endif
131 
132 #if 0
133 #define SCA_USE_FASTQ		/* use a split queue, one for fast traffic */
134 #endif
135 
136 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
137 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
138 
139 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
140 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
141 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
142 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
143 
144 static	void sca_msci_init(struct sca_softc *, sca_port_t *);
145 static	void sca_dmac_init(struct sca_softc *, sca_port_t *);
146 static void sca_dmac_rxinit(sca_port_t *);
147 
148 static	int sca_dmac_intr(sca_port_t *, u_int8_t);
149 static	int sca_msci_intr(sca_port_t *, u_int8_t);
150 
151 static	void sca_get_packets(sca_port_t *);
152 static	int sca_frame_avail(sca_port_t *);
153 static	void sca_frame_process(sca_port_t *);
154 static	void sca_frame_read_done(sca_port_t *);
155 
156 static	void sca_port_starttx(sca_port_t *);
157 
158 static	void sca_port_up(sca_port_t *);
159 static	void sca_port_down(sca_port_t *);
160 
161 static	int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *,
162 			    const struct rtentry *);
163 static	int sca_ioctl(struct ifnet *, u_long, void *);
164 static	void sca_start(struct ifnet *);
165 static	void sca_watchdog(struct ifnet *);
166 
167 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int);
168 
169 #if SCA_DEBUG_LEVEL > 0
170 static	void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
171 #endif
172 
173 
174 #define	sca_read_1(sc, reg)		(sc)->sc_read_1(sc, reg)
175 #define	sca_read_2(sc, reg)		(sc)->sc_read_2(sc, reg)
176 #define	sca_write_1(sc, reg, val)	(sc)->sc_write_1(sc, reg, val)
177 #define	sca_write_2(sc, reg, val)	(sc)->sc_write_2(sc, reg, val)
178 
179 #define	sca_page_addr(sc, addr)	((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
180 
181 static inline void
msci_write_1(sca_port_t * scp,u_int reg,u_int8_t val)182 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
183 {
184 	sca_write_1(scp->sca, scp->msci_off + reg, val);
185 }
186 
187 static inline u_int8_t
msci_read_1(sca_port_t * scp,u_int reg)188 msci_read_1(sca_port_t *scp, u_int reg)
189 {
190 	return sca_read_1(scp->sca, scp->msci_off + reg);
191 }
192 
193 static inline void
dmac_write_1(sca_port_t * scp,u_int reg,u_int8_t val)194 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
195 {
196 	sca_write_1(scp->sca, scp->dmac_off + reg, val);
197 }
198 
199 static inline void
dmac_write_2(sca_port_t * scp,u_int reg,u_int16_t val)200 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
201 {
202 	sca_write_2(scp->sca, scp->dmac_off + reg, val);
203 }
204 
205 static inline u_int8_t
dmac_read_1(sca_port_t * scp,u_int reg)206 dmac_read_1(sca_port_t *scp, u_int reg)
207 {
208 	return sca_read_1(scp->sca, scp->dmac_off + reg);
209 }
210 
211 static inline u_int16_t
dmac_read_2(sca_port_t * scp,u_int reg)212 dmac_read_2(sca_port_t *scp, u_int reg)
213 {
214 	return sca_read_2(scp->sca, scp->dmac_off + reg);
215 }
216 
217 #if SCA_DEBUG_LEVEL > 0
218 /*
219  * read the chain pointer
220  */
221 static inline u_int16_t
sca_desc_read_chainp(struct sca_softc * sc,struct sca_desc * dp)222 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
223 {
224 	if (sc->sc_usedma)
225 		return ((dp)->sd_chainp);
226 	return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
227 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
228 }
229 #endif
230 
231 /*
232  * write the chain pointer
233  */
234 static inline void
sca_desc_write_chainp(struct sca_softc * sc,struct sca_desc * dp,u_int16_t cp)235 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
236 {
237 	if (sc->sc_usedma)
238 		(dp)->sd_chainp = cp;
239 	else
240 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
241 		    sca_page_addr(sc, dp)
242 		    + offsetof(struct sca_desc, sd_chainp), cp);
243 }
244 
245 #if SCA_DEBUG_LEVEL > 0
246 /*
247  * read the buffer pointer
248  */
249 static inline u_int32_t
sca_desc_read_bufp(struct sca_softc * sc,struct sca_desc * dp)250 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
251 {
252 	u_int32_t address;
253 
254 	if (sc->sc_usedma)
255 		address = dp->sd_bufp | dp->sd_hbufp << 16;
256 	else {
257 		address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
258 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
259 		address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
260 		    sca_page_addr(sc, dp)
261 		    + offsetof(struct sca_desc, sd_hbufp)) << 16;
262 	}
263 	return (address);
264 }
265 #endif
266 
267 /*
268  * write the buffer pointer
269  */
270 static inline void
sca_desc_write_bufp(struct sca_softc * sc,struct sca_desc * dp,u_int32_t bufp)271 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
272 {
273 	if (sc->sc_usedma) {
274 		dp->sd_bufp = bufp & 0xFFFF;
275 		dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
276 	} else {
277 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
278 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
279 		    bufp & 0xFFFF);
280 		bus_space_write_1(sc->scu_memt, sc->scu_memh,
281 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
282 		    (bufp & 0x00FF0000) >> 16);
283 	}
284 }
285 
286 /*
287  * read the buffer length
288  */
289 static inline u_int16_t
sca_desc_read_buflen(struct sca_softc * sc,struct sca_desc * dp)290 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
291 {
292 	if (sc->sc_usedma)
293 		return ((dp)->sd_buflen);
294 	return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
295 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
296 }
297 
298 /*
299  * write the buffer length
300  */
301 static inline void
sca_desc_write_buflen(struct sca_softc * sc,struct sca_desc * dp,u_int16_t len)302 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
303 {
304 	if (sc->sc_usedma)
305 		(dp)->sd_buflen = len;
306 	else
307 		bus_space_write_2(sc->scu_memt, sc->scu_memh,
308 		    sca_page_addr(sc, dp)
309 		    + offsetof(struct sca_desc, sd_buflen), len);
310 }
311 
312 /*
313  * read the descriptor status
314  */
315 static inline u_int8_t
sca_desc_read_stat(struct sca_softc * sc,struct sca_desc * dp)316 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
317 {
318 	if (sc->sc_usedma)
319 		return ((dp)->sd_stat);
320 	return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
321 	    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
322 }
323 
324 /*
325  * write the descriptor status
326  */
327 static inline void
sca_desc_write_stat(struct sca_softc * sc,struct sca_desc * dp,u_int8_t stat)328 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
329 {
330 	if (sc->sc_usedma)
331 		(dp)->sd_stat = stat;
332 	else
333 		bus_space_write_1(sc->scu_memt, sc->scu_memh,
334 		    sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
335 		    stat);
336 }
337 
338 void
sca_init(struct sca_softc * sc)339 sca_init(struct sca_softc *sc)
340 {
341 	/*
342 	 * Do a little sanity check:  check number of ports.
343 	 */
344 	if (sc->sc_numports < 1 || sc->sc_numports > 2)
345 		panic("sca can\'t handle more than 2 or less than 1 ports");
346 
347 	/*
348 	 * disable DMA and MSCI interrupts
349 	 */
350 	sca_write_1(sc, SCA_DMER, 0);
351 	sca_write_1(sc, SCA_IER0, 0);
352 	sca_write_1(sc, SCA_IER1, 0);
353 	sca_write_1(sc, SCA_IER2, 0);
354 
355 	/*
356 	 * configure interrupt system
357 	 */
358 	sca_write_1(sc, SCA_ITCR,
359 	    SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
360 #if 0
361 	/* these are for the intrerrupt ack cycle which we don't use */
362 	sca_write_1(sc, SCA_IVR, 0x40);
363 	sca_write_1(sc, SCA_IMVR, 0x40);
364 #endif
365 
366 	/*
367 	 * set wait control register to zero wait states
368 	 */
369 	sca_write_1(sc, SCA_PABR0, 0);
370 	sca_write_1(sc, SCA_PABR1, 0);
371 	sca_write_1(sc, SCA_WCRL, 0);
372 	sca_write_1(sc, SCA_WCRM, 0);
373 	sca_write_1(sc, SCA_WCRH, 0);
374 
375 	/*
376 	 * disable DMA and reset status
377 	 */
378 	sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
379 
380 	/*
381 	 * disable transmit DMA for all channels
382 	 */
383 	sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
384 	sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
385 	sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
386 	sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
387 	sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
388 	sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
389 	sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
390 	sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
391 
392 	/*
393 	 * enable DMA based on channel enable flags for each channel
394 	 */
395 	sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
396 
397 	/*
398 	 * Should check to see if the chip is responding, but for now
399 	 * assume it is.
400 	 */
401 }
402 
403 /*
404  * initialize the port and attach it to the networking layer
405  */
406 void
sca_port_attach(struct sca_softc * sc,u_int port)407 sca_port_attach(struct sca_softc *sc, u_int port)
408 {
409 	struct timeval now;
410 	sca_port_t *scp = &sc->sc_ports[port];
411 	struct ifnet *ifp;
412 	static u_int ntwo_unit = 0;
413 
414 	scp->sca = sc;  /* point back to the parent */
415 
416 	scp->sp_port = port;
417 
418 	if (port == 0) {
419 		scp->msci_off = SCA_MSCI_OFF_0;
420 		scp->dmac_off = SCA_DMAC_OFF_0;
421 		if(sc->sc_parent != NULL)
422 			ntwo_unit = device_unit(sc->sc_parent) * 2 + 0;
423 		else
424 			ntwo_unit = 0;	/* XXX */
425 	} else {
426 		scp->msci_off = SCA_MSCI_OFF_1;
427 		scp->dmac_off = SCA_DMAC_OFF_1;
428 		if(sc->sc_parent != NULL)
429 			ntwo_unit = device_unit(sc->sc_parent) * 2 + 1;
430 		else
431 			ntwo_unit = 1;	/* XXX */
432 	}
433 
434 	sca_msci_init(sc, scp);
435 	sca_dmac_init(sc, scp);
436 
437 	/*
438 	 * attach to the network layer
439 	 */
440 	ifp = &scp->sp_if;
441 	snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit);
442 	ifp->if_softc = scp;
443 	ifp->if_mtu = SCA_MTU;
444 	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
445 	ifp->if_type = IFT_PTPSERIAL;
446 	ifp->if_hdrlen = HDLC_HDRLEN;
447 	ifp->if_ioctl = sca_ioctl;
448 	ifp->if_output = sca_output;
449 	ifp->if_watchdog = sca_watchdog;
450 	ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
451 	scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
452 #ifdef SCA_USE_FASTQ
453 	scp->fastq.ifq_maxlen = IFQ_MAXLEN;
454 #endif
455 	IFQ_SET_READY(&ifp->if_snd);
456 	if_attach(ifp);
457 	if_deferred_start_init(ifp, NULL);
458 	if_alloc_sadl(ifp);
459 	bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN);
460 	bpf_mtap_softint_init(ifp);
461 
462 	if (sc->sc_parent == NULL)
463 		printf("%s: port %d\n", ifp->if_xname, port);
464 	else
465 		printf("%s at %s port %d\n",
466 		       ifp->if_xname, device_xname(sc->sc_parent), port);
467 
468 	/*
469 	 * reset the last seen times on the cisco keepalive protocol
470 	 */
471 	getmicrotime(&now);
472 	scp->cka_lasttx = now.tv_usec;
473 	scp->cka_lastrx = 0;
474 }
475 
476 #if 0
477 /*
478  * returns log2(div), sets 'tmc' for the required freq 'hz'
479  */
480 static u_int8_t
481 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
482 {
483 	u_int32_t tmc, div;
484 	u_int32_t clock;
485 
486 	/* clock hz = (chipclock / tmc) / 2^(div); */
487 	/*
488 	 * TD == tmc * 2^(n)
489 	 *
490 	 * note:
491 	 * 1 <= TD <= 256		TD is inc of 1
492 	 * 2 <= TD <= 512		TD is inc of 2
493 	 * 4 <= TD <= 1024		TD is inc of 4
494 	 * ...
495 	 * 512 <= TD <= 256*512		TD is inc of 512
496 	 *
497 	 * so note there are overlaps.  We lose prec
498 	 * as div increases so we wish to minize div.
499 	 *
500 	 * basically we want to do
501 	 *
502 	 * tmc = chip / hz, but have tmc <= 256
503 	 */
504 
505 	/* assume system clock is 9.8304MHz or 9830400Hz */
506 	clock = clock = 9830400 >> 1;
507 
508 	/* round down */
509 	div = 0;
510 	while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
511 		clock >>= 1;
512 		div++;
513 	}
514 	if (clock / tmc > hz)
515 		tmc++;
516 	if (!tmc)
517 		tmc = 1;
518 
519 	if (div > SCA_RXS_DIV_512) {
520 		/* set to maximums */
521 		div = SCA_RXS_DIV_512;
522 		tmc = 0;
523 	}
524 
525 	*tmcp = (tmc & 0xFF);	/* 0 == 256 */
526 	return (div & 0xFF);
527 }
528 #endif
529 
530 /*
531  * initialize the port's MSCI
532  */
533 static void
sca_msci_init(struct sca_softc * sc,sca_port_t * scp)534 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
535 {
536 	/* reset the channel */
537 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
538 
539 	msci_write_1(scp, SCA_MD00,
540 		     (  SCA_MD0_CRC_1
541 		      | SCA_MD0_CRC_CCITT
542 		      | SCA_MD0_CRC_ENABLE
543 		      | SCA_MD0_MODE_HDLC));
544 #if 0
545 	/* immediately send receive reset so the above takes */
546 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
547 #endif
548 
549 	msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
550 	msci_write_1(scp, SCA_MD20,
551 		     (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
552 
553 	/* be safe and do it again */
554 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
555 
556 	/* setup underrun and idle control, and initial RTS state */
557 	msci_write_1(scp, SCA_CTL0,
558 	     (SCA_CTL_IDLC_PATTERN
559 	     | SCA_CTL_UDRNC_AFTER_FCS
560 	     | SCA_CTL_RTS_LOW));
561 
562 	/* reset the transmitter */
563 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
564 
565 	/*
566 	 * set the clock sources
567 	 */
568 	msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
569 	msci_write_1(scp, SCA_TXS0, scp->sp_txs);
570 	msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
571 
572 	/* set external clock generate as requested */
573 	sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
574 
575 	/*
576 	 * XXX don't pay attention to CTS or CD changes right now.  I can't
577 	 * simulate one, and the transmitter will try to transmit even if
578 	 * CD isn't there anyway, so nothing bad SHOULD happen.
579 	 */
580 #if 0
581 	msci_write_1(scp, SCA_IE00, 0);
582 	msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
583 #else
584 	/* this would deliver transmitter underrun to ST1/ISR1 */
585 	msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
586 	msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
587 #endif
588 	msci_write_1(scp, SCA_IE20, 0);
589 
590 	msci_write_1(scp, SCA_FIE0, 0);
591 
592 	msci_write_1(scp, SCA_SA00, 0);
593 	msci_write_1(scp, SCA_SA10, 0);
594 
595 	msci_write_1(scp, SCA_IDL0, 0x7e);
596 
597 	msci_write_1(scp, SCA_RRC0, 0x0e);
598 	/* msci_write_1(scp, SCA_TRC00, 0x10); */
599 	/*
600 	 * the correct values here are important for avoiding underruns
601 	 * for any value less than or equal to TRC0 txrdy is activated
602 	 * which will start the dmac transfer to the fifo.
603 	 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
604 	 *
605 	 * thus if we are using a very fast clock that empties the fifo
606 	 * quickly, delays in the dmac starting to fill the fifo can
607 	 * lead to underruns so we want a fairly full fifo to still
608 	 * cause the dmac to start.  for cards with on board ram this
609 	 * has no effect on system performance.  For cards that DMA
610 	 * to/from system memory it will cause more, shorter,
611 	 * bus accesses rather than fewer longer ones.
612 	 */
613 	msci_write_1(scp, SCA_TRC00, 0x00);
614 	msci_write_1(scp, SCA_TRC10, 0x1f);
615 }
616 
617 /*
618  * Take the memory for the port and construct two circular linked lists of
619  * descriptors (one tx, one rx) and set the pointers in these descriptors
620  * to point to the buffer space for this port.
621  */
622 static void
sca_dmac_init(struct sca_softc * sc,sca_port_t * scp)623 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
624 {
625 	sca_desc_t *desc;
626 	u_int32_t desc_p;
627 	u_int32_t buf_p;
628 	int i;
629 
630 	if (sc->sc_usedma)
631 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
632 		    BUS_DMASYNC_PREWRITE);
633 	else {
634 		/*
635 		 * XXX assumes that all tx desc and bufs in same page
636 		 */
637 		sc->scu_page_on(sc);
638 		sc->scu_set_page(sc, scp->sp_txdesc_p);
639 	}
640 
641 	desc = scp->sp_txdesc;
642 	desc_p = scp->sp_txdesc_p;
643 	buf_p = scp->sp_txbuf_p;
644 	scp->sp_txcur = 0;
645 	scp->sp_txinuse = 0;
646 
647 #ifdef DEBUG
648 	/* make sure that we won't wrap */
649 	if ((desc_p & 0xffff0000) !=
650 	    ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
651 		panic("sca: tx descriptors cross architecural boundary");
652 	if ((buf_p & 0xff000000) !=
653 	    ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
654 		panic("sca: tx buffers cross architecural boundary");
655 #endif
656 
657 	for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
658 		/*
659 		 * desc_p points to the physical address of the NEXT desc
660 		 */
661 		desc_p += sizeof(sca_desc_t);
662 
663 		sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
664 		sca_desc_write_bufp(sc, desc, buf_p);
665 		sca_desc_write_buflen(sc, desc, SCA_BSIZE);
666 		sca_desc_write_stat(sc, desc, 0);
667 
668 		desc++;  /* point to the next descriptor */
669 		buf_p += SCA_BSIZE;
670 	}
671 
672 	/*
673 	 * "heal" the circular list by making the last entry point to the
674 	 * first.
675 	 */
676 	sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
677 
678 	/*
679 	 * Now, initialize the transmit DMA logic
680 	 *
681 	 * CPB == chain pointer base address
682 	 */
683 	dmac_write_1(scp, SCA_DSR1, 0);
684 	dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
685 	dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
686 	/* XXX1
687 	dmac_write_1(scp, SCA_DIR1,
688 		     (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
689 	 */
690 	dmac_write_1(scp, SCA_DIR1,
691 		     (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
692 	dmac_write_1(scp, SCA_CPB1,
693 		     (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
694 
695 	/*
696 	 * now, do the same thing for receive descriptors
697 	 *
698 	 * XXX assumes that all rx desc and bufs in same page
699 	 */
700 	if (!sc->sc_usedma)
701 		sc->scu_set_page(sc, scp->sp_rxdesc_p);
702 
703 	desc = scp->sp_rxdesc;
704 	desc_p = scp->sp_rxdesc_p;
705 	buf_p = scp->sp_rxbuf_p;
706 
707 #ifdef DEBUG
708 	/* make sure that we won't wrap */
709 	if ((desc_p & 0xffff0000) !=
710 	    ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
711 		panic("sca: rx descriptors cross architecural boundary");
712 	if ((buf_p & 0xff000000) !=
713 	    ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
714 		panic("sca: rx buffers cross architecural boundary");
715 #endif
716 
717 	for (i = 0 ; i < scp->sp_nrxdesc; i++) {
718 		/*
719 		 * desc_p points to the physical address of the NEXT desc
720 		 */
721 		desc_p += sizeof(sca_desc_t);
722 
723 		sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
724 		sca_desc_write_bufp(sc, desc, buf_p);
725 		/* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
726 		sca_desc_write_buflen(sc, desc, 0);
727 		sca_desc_write_stat(sc, desc, 0);
728 
729 		desc++;  /* point to the next descriptor */
730 		buf_p += SCA_BSIZE;
731 	}
732 
733 	/*
734 	 * "heal" the circular list by making the last entry point to the
735 	 * first.
736 	 */
737 	sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
738 
739 	sca_dmac_rxinit(scp);
740 
741 	if (sc->sc_usedma)
742 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
743 		    0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
744 	else
745 		sc->scu_page_off(sc);
746 }
747 
748 /*
749  * reset and reinitialize the receive DMA logic
750  */
751 static void
sca_dmac_rxinit(sca_port_t * scp)752 sca_dmac_rxinit(sca_port_t *scp)
753 {
754 	/*
755 	 * ... and the receive DMA logic ...
756 	 */
757 	dmac_write_1(scp, SCA_DSR0, 0);  /* disable DMA */
758 	dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
759 
760 	dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
761 	dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
762 
763 	/* reset descriptors to initial state */
764 	scp->sp_rxstart = 0;
765 	scp->sp_rxend = scp->sp_nrxdesc - 1;
766 
767 	/*
768 	 * CPB == chain pointer base
769 	 * CDA == current descriptor address
770 	 * EDA == error descriptor address (overwrite position)
771 	 *	because cda can't be eda when starting we always
772 	 *	have a single buffer gap between cda and eda
773 	 */
774 	dmac_write_1(scp, SCA_CPB0,
775 	    (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
776 	dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
777 	dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
778 	    (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
779 
780 	/*
781 	 * enable receiver DMA
782 	 */
783 	dmac_write_1(scp, SCA_DIR0,
784 		     (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
785 	dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
786 }
787 
788 /*
789  * Queue the packet for our start routine to transmit
790  */
791 static int
sca_output(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,const struct rtentry * rt0)792 sca_output(
793     struct ifnet *ifp,
794     struct mbuf *m,
795     const struct sockaddr *dst,
796     const struct rtentry *rt0)
797 {
798 	struct hdlc_header *hdlc;
799 	struct ifqueue *ifq = NULL;
800 	int s, error, len;
801 	short mflags;
802 
803 	error = 0;
804 
805 	if ((ifp->if_flags & IFF_UP) != IFF_UP) {
806 		error = ENETDOWN;
807 		goto bad;
808 	}
809 
810 	/*
811 	 * If the queueing discipline needs packet classification,
812 	 * do it before prepending link headers.
813 	 */
814 	IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
815 
816 	/*
817 	 * determine address family, and priority for this packet
818 	 */
819 	switch (dst->sa_family) {
820 #ifdef INET
821 	case AF_INET:
822 #ifdef SCA_USE_FASTQ
823 		if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
824 		    == IPTOS_LOWDELAY)
825 			ifq = &((sca_port_t *)ifp->if_softc)->fastq;
826 #endif
827 		/*
828 		 * Add cisco serial line header. If there is no
829 		 * space in the first mbuf, allocate another.
830 		 */
831 		M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
832 		if (m == 0)
833 			return (ENOBUFS);
834 		hdlc = mtod(m, struct hdlc_header *);
835 		hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
836 		break;
837 #endif
838 #ifdef INET6
839 	case AF_INET6:
840 		/*
841 		 * Add cisco serial line header. If there is no
842 		 * space in the first mbuf, allocate another.
843 		 */
844 		M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
845 		if (m == 0)
846 			return (ENOBUFS);
847 		hdlc = mtod(m, struct hdlc_header *);
848 		hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
849 		break;
850 #endif
851 	default:
852 		printf("%s: address family %d unsupported\n",
853 		       ifp->if_xname, dst->sa_family);
854 		error = EAFNOSUPPORT;
855 		goto bad;
856 	}
857 
858 	/* finish */
859 	if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
860 		hdlc->h_addr = CISCO_MULTICAST;
861 	else
862 		hdlc->h_addr = CISCO_UNICAST;
863 	hdlc->h_resv = 0;
864 
865 	/*
866 	 * queue the packet.  If interactive, use the fast queue.
867 	 */
868 	mflags = m->m_flags;
869 	len = m->m_pkthdr.len;
870 	s = splnet();
871 	if (ifq != NULL) {
872 		if (IF_QFULL(ifq)) {
873 			IF_DROP(ifq);
874 			m_freem(m);
875 			error = ENOBUFS;
876 		} else
877 			IF_ENQUEUE(ifq, m);
878 	} else
879 		IFQ_ENQUEUE(&ifp->if_snd, m, error);
880 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
881 	if (error != 0) {
882 		if_statinc_ref(nsr, if_oerrors);
883 		if_statinc_ref(nsr, if_collisions);
884 		IF_STAT_PUTREF(ifp);
885 		splx(s);
886 		return (error);
887 	}
888 	if_statadd_ref(nsr, if_obytes, len);
889 	if (mflags & M_MCAST)
890 		if_statinc_ref(nsr, if_omcasts);
891 	IF_STAT_PUTREF(ifp);
892 
893 	sca_start(ifp);
894 	splx(s);
895 
896 	return (error);
897 
898  bad:
899 	if (m)
900 		m_freem(m);
901 	return (error);
902 }
903 
904 static int
sca_ioctl(struct ifnet * ifp,u_long cmd,void * data)905 sca_ioctl(struct ifnet *ifp, u_long cmd, void *data)
906 {
907 	struct ifreq *ifr;
908 	struct ifaddr *ifa;
909 	int error;
910 	int s;
911 
912 	s = splnet();
913 
914 	ifr = (struct ifreq *)data;
915 	ifa = (struct ifaddr *)data;
916 	error = 0;
917 
918 	switch (cmd) {
919 	case SIOCINITIFADDR:
920 		switch(ifa->ifa_addr->sa_family) {
921 #ifdef INET
922 		case AF_INET:
923 #endif
924 #ifdef INET6
925 		case AF_INET6:
926 #endif
927 #if defined(INET) || defined(INET6)
928 			ifp->if_flags |= IFF_UP;
929 			sca_port_up(ifp->if_softc);
930 			break;
931 #endif
932 		default:
933 			error = EAFNOSUPPORT;
934 			break;
935 		}
936 		break;
937 
938 	case SIOCSIFDSTADDR:
939 #ifdef INET
940 		if (ifa->ifa_addr->sa_family == AF_INET)
941 			break;
942 #endif
943 #ifdef INET6
944 		if (ifa->ifa_addr->sa_family == AF_INET6)
945 			break;
946 #endif
947 		error = EAFNOSUPPORT;
948 		break;
949 
950 	case SIOCADDMULTI:
951 	case SIOCDELMULTI:
952 		/* XXX need multicast group management code */
953 		if (ifr == 0) {
954 			error = EAFNOSUPPORT;		/* XXX */
955 			break;
956 		}
957 		switch (ifreq_getaddr(cmd, ifr)->sa_family) {
958 #ifdef INET
959 		case AF_INET:
960 			break;
961 #endif
962 #ifdef INET6
963 		case AF_INET6:
964 			break;
965 #endif
966 		default:
967 			error = EAFNOSUPPORT;
968 			break;
969 		}
970 		break;
971 
972 	case SIOCSIFFLAGS:
973 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
974 			break;
975 		if (ifr->ifr_flags & IFF_UP) {
976 			ifp->if_flags |= IFF_UP;
977 			sca_port_up(ifp->if_softc);
978 		} else {
979 			ifp->if_flags &= ~IFF_UP;
980 			sca_port_down(ifp->if_softc);
981 		}
982 
983 		break;
984 
985 	default:
986 		error = ifioctl_common(ifp, cmd, data);
987 	}
988 
989 	splx(s);
990 	return error;
991 }
992 
993 /*
994  * start packet transmission on the interface
995  *
996  * MUST BE CALLED AT splnet()
997  */
998 static void
sca_start(struct ifnet * ifp)999 sca_start(struct ifnet *ifp)
1000 {
1001 	sca_port_t *scp = ifp->if_softc;
1002 	struct sca_softc *sc = scp->sca;
1003 	struct mbuf *m, *mb_head;
1004 	sca_desc_t *desc;
1005 	u_int8_t *buf, stat;
1006 	u_int32_t buf_p;
1007 	int nexttx;
1008 	int trigger_xmit;
1009 	u_int len;
1010 
1011 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1012 
1013 	/*
1014 	 * can't queue when we are full or transmitter is busy
1015 	 */
1016 #ifdef oldcode
1017 	if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1018 	    || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1019 		return;
1020 #else
1021 	if (scp->sp_txinuse
1022 	    || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1023 		return;
1024 #endif
1025 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1026 
1027 	/*
1028 	 * XXX assume that all tx desc and bufs in same page
1029 	 */
1030 	if (sc->sc_usedma)
1031 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1032 		    0, sc->scu_allocsize,
1033 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1034 	else {
1035 		sc->scu_page_on(sc);
1036 		sc->scu_set_page(sc, scp->sp_txdesc_p);
1037 	}
1038 
1039 	trigger_xmit = 0;
1040 
1041  txloop:
1042 	IF_DEQUEUE(&scp->linkq, mb_head);
1043 	if (mb_head == NULL)
1044 #ifdef SCA_USE_FASTQ
1045 		IF_DEQUEUE(&scp->fastq, mb_head);
1046 	if (mb_head == NULL)
1047 #endif
1048 		IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1049 	if (mb_head == NULL)
1050 		goto start_xmit;
1051 
1052 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1053 #ifdef oldcode
1054 	if (scp->txinuse != 0) {
1055 		/* Kill EOT interrupts on the previous descriptor. */
1056 		desc = &scp->sp_txdesc[scp->txcur];
1057 		stat = sca_desc_read_stat(sc, desc);
1058 		sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1059 
1060 		/* Figure out what the next free descriptor is. */
1061 		nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1062 	} else
1063 		nexttx = 0;
1064 #endif	/* oldcode */
1065 
1066 	if (scp->sp_txinuse)
1067 		nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1068 	else
1069 		nexttx = 0;
1070 
1071 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1072 
1073 	buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1074 	buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1075 
1076 	/* XXX hoping we can delay the desc write till after we don't drop. */
1077 	desc = &scp->sp_txdesc[nexttx];
1078 
1079 	/* XXX isn't this set already?? */
1080 	sca_desc_write_bufp(sc, desc, buf_p);
1081 	len = 0;
1082 
1083 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1084 
1085 #if 0	/* uncomment this for a core in cc1 */
1086 X
1087 #endif
1088 	/*
1089 	 * Run through the chain, copying data into the descriptor as we
1090 	 * go.  If it won't fit in one transmission block, drop the packet.
1091 	 * No, this isn't nice, but most of the time it _will_ fit.
1092 	 */
1093 	for (m = mb_head ; m != NULL ; m = m->m_next) {
1094 		if (m->m_len != 0) {
1095 			len += m->m_len;
1096 			if (len > SCA_BSIZE) {
1097 				m_freem(mb_head);
1098 				goto txloop;
1099 			}
1100 			SCA_DPRINTF(SCA_DEBUG_TX,
1101 			    ("TX: about to mbuf len %d\n", m->m_len));
1102 
1103 			if (sc->sc_usedma)
1104 				memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1105 			else
1106 				bus_space_write_region_1(sc->scu_memt,
1107 				    sc->scu_memh, sca_page_addr(sc, buf_p),
1108 				    mtod(m, u_int8_t *), m->m_len);
1109 			buf += m->m_len;
1110 			buf_p += m->m_len;
1111 		}
1112 	}
1113 
1114 	/* set the buffer, the length, and mark end of frame and end of xfer */
1115 	sca_desc_write_buflen(sc, desc, len);
1116 	sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1117 
1118 	if_statinc(ifp, if_opackets);
1119 
1120 	/*
1121 	 * Pass packet to bpf if there is a listener.
1122 	 */
1123 	bpf_mtap(ifp, mb_head, BPF_D_OUT);
1124 
1125 	m_freem(mb_head);
1126 
1127 	scp->sp_txcur = nexttx;
1128 	scp->sp_txinuse++;
1129 	trigger_xmit = 1;
1130 
1131 	SCA_DPRINTF(SCA_DEBUG_TX,
1132 	    ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1133 
1134 	/*
1135 	 * XXX so didn't this used to limit us to 1?! - multi may be untested
1136 	 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1137 	 * to find bug
1138 	 */
1139 #ifdef oldcode
1140 	if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1141 #endif
1142 	if (scp->sp_txinuse < scp->sp_ntxdesc)
1143 		goto txloop;
1144 
1145  start_xmit:
1146 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1147 
1148 	if (trigger_xmit != 0) {
1149 		/* set EOT on final descriptor */
1150 		desc = &scp->sp_txdesc[scp->sp_txcur];
1151 		stat = sca_desc_read_stat(sc, desc);
1152 		sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1153 	}
1154 
1155 	if (sc->sc_usedma)
1156 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1157 		    sc->scu_allocsize,
1158 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1159 
1160 	if (trigger_xmit != 0)
1161 		sca_port_starttx(scp);
1162 
1163 	if (!sc->sc_usedma)
1164 		sc->scu_page_off(sc);
1165 }
1166 
1167 static void
sca_watchdog(struct ifnet * ifp)1168 sca_watchdog(struct ifnet *ifp)
1169 {
1170 }
1171 
1172 int
sca_hardintr(struct sca_softc * sc)1173 sca_hardintr(struct sca_softc *sc)
1174 {
1175 	u_int8_t isr0, isr1, isr2;
1176 	int	ret;
1177 
1178 	ret = 0;  /* non-zero means we processed at least one interrupt */
1179 
1180 	SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1181 
1182 	while (1) {
1183 		/*
1184 		 * read SCA interrupts
1185 		 */
1186 		isr0 = sca_read_1(sc, SCA_ISR0);
1187 		isr1 = sca_read_1(sc, SCA_ISR1);
1188 		isr2 = sca_read_1(sc, SCA_ISR2);
1189 
1190 		if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1191 			break;
1192 
1193 		SCA_DPRINTF(SCA_DEBUG_INTR,
1194 			    ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1195 			     isr0, isr1, isr2));
1196 
1197 		/*
1198 		 * check DMAC interrupt
1199 		 */
1200 		if (isr1 & 0x0f)
1201 			ret += sca_dmac_intr(&sc->sc_ports[0],
1202 					     isr1 & 0x0f);
1203 
1204 		if (isr1 & 0xf0)
1205 			ret += sca_dmac_intr(&sc->sc_ports[1],
1206 			     (isr1 & 0xf0) >> 4);
1207 
1208 		/*
1209 		 * mcsi intterupts
1210 		 */
1211 		if (isr0 & 0x0f)
1212 			ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1213 
1214 		if (isr0 & 0xf0)
1215 			ret += sca_msci_intr(&sc->sc_ports[1],
1216 			    (isr0 & 0xf0) >> 4);
1217 
1218 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1219 		if (isr2)
1220 			ret += sca_timer_intr(sc, isr2);
1221 #endif
1222 	}
1223 
1224 	return (ret);
1225 }
1226 
1227 static int
sca_dmac_intr(sca_port_t * scp,u_int8_t isr)1228 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1229 {
1230 	u_int8_t	 dsr;
1231 	int		 ret;
1232 
1233 	ret = 0;
1234 
1235 	/*
1236 	 * Check transmit channel
1237 	 */
1238 	if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1239 		SCA_DPRINTF(SCA_DEBUG_INTR,
1240 		    ("TX INTERRUPT port %d\n", scp->sp_port));
1241 
1242 		dsr = 1;
1243 		while (dsr != 0) {
1244 			ret++;
1245 			/*
1246 			 * reset interrupt
1247 			 */
1248 			dsr = dmac_read_1(scp, SCA_DSR1);
1249 			dmac_write_1(scp, SCA_DSR1,
1250 				     dsr | SCA_DSR_DEWD);
1251 
1252 			/*
1253 			 * filter out the bits we don't care about
1254 			 */
1255 			dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1256 			if (dsr == 0)
1257 				break;
1258 
1259 			/*
1260 			 * check for counter overflow
1261 			 */
1262 			if (dsr & SCA_DSR_COF) {
1263 				printf("%s: TXDMA counter overflow\n",
1264 				       scp->sp_if.if_xname);
1265 
1266 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1267 				scp->sp_txcur = 0;
1268 				scp->sp_txinuse = 0;
1269 			}
1270 
1271 			/*
1272 			 * check for buffer overflow
1273 			 */
1274 			if (dsr & SCA_DSR_BOF) {
1275 				printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1276 				       scp->sp_if.if_xname,
1277 				       dmac_read_2(scp, SCA_CDAL1),
1278 				       dmac_read_2(scp, SCA_EDAL1),
1279 				       dmac_read_1(scp, SCA_CPB1));
1280 
1281 				/*
1282 				 * Yikes.  Arrange for a full
1283 				 * transmitter restart.
1284 				 */
1285 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1286 				scp->sp_txcur = 0;
1287 				scp->sp_txinuse = 0;
1288 			}
1289 
1290 			/*
1291 			 * check for end of transfer, which is not
1292 			 * an error. It means that all data queued
1293 			 * was transmitted, and we mark ourself as
1294 			 * not in use and stop the watchdog timer.
1295 			 */
1296 			if (dsr & SCA_DSR_EOT) {
1297 				SCA_DPRINTF(SCA_DEBUG_TX,
1298 			    ("Transmit completed. cda %x eda %x dsr %x\n",
1299 				    dmac_read_2(scp, SCA_CDAL1),
1300 				    dmac_read_2(scp, SCA_EDAL1),
1301 				    dsr));
1302 
1303 				scp->sp_if.if_flags &= ~IFF_OACTIVE;
1304 				scp->sp_txcur = 0;
1305 				scp->sp_txinuse = 0;
1306 
1307 				/*
1308 				 * check for more packets
1309 				 */
1310 				if_schedule_deferred_start(&scp->sp_if);
1311 			}
1312 		}
1313 	}
1314 	/*
1315 	 * receive channel check
1316 	 */
1317 	if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1318 		SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1319 		    (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1320 
1321 		dsr = 1;
1322 		while (dsr != 0) {
1323 			ret++;
1324 
1325 			dsr = dmac_read_1(scp, SCA_DSR0);
1326 			dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1327 
1328 			/*
1329 			 * filter out the bits we don't care about
1330 			 */
1331 			dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1332 				| SCA_DSR_BOF | SCA_DSR_EOT);
1333 			if (dsr == 0)
1334 				break;
1335 
1336 			/*
1337 			 * End of frame
1338 			 */
1339 			if (dsr & SCA_DSR_EOM) {
1340 				SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1341 
1342 				sca_get_packets(scp);
1343 			}
1344 
1345 			/*
1346 			 * check for counter overflow
1347 			 */
1348 			if (dsr & SCA_DSR_COF) {
1349 				printf("%s: RXDMA counter overflow\n",
1350 				       scp->sp_if.if_xname);
1351 
1352 				sca_dmac_rxinit(scp);
1353 			}
1354 
1355 			/*
1356 			 * check for end of transfer, which means we
1357 			 * ran out of descriptors to receive into.
1358 			 * This means the line is much faster than
1359 			 * we can handle.
1360 			 */
1361 			if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1362 				printf("%s: RXDMA buffer overflow\n",
1363 				       scp->sp_if.if_xname);
1364 
1365 				sca_dmac_rxinit(scp);
1366 			}
1367 		}
1368 	}
1369 
1370 	return ret;
1371 }
1372 
1373 static int
sca_msci_intr(sca_port_t * scp,u_int8_t isr)1374 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1375 {
1376 	u_int8_t st1, trc0;
1377 
1378 	/* get and clear the specific interrupt -- should act on it :)*/
1379 	if ((st1 = msci_read_1(scp, SCA_ST10))) {
1380 		/* clear the interrupt */
1381 		msci_write_1(scp, SCA_ST10, st1);
1382 
1383 		if (st1 & SCA_ST1_UDRN) {
1384 			/* underrun -- try to increase ready control */
1385 			trc0 = msci_read_1(scp, SCA_TRC00);
1386 			if (trc0 == 0x1f)
1387 				printf("TX: underrun - fifo depth maxed\n");
1388 			else {
1389 				if ((trc0 += 2) > 0x1f)
1390 					trc0 = 0x1f;
1391 				SCA_DPRINTF(SCA_DEBUG_TX,
1392 				   ("TX: udrn - incr fifo to %d\n", trc0));
1393 				msci_write_1(scp, SCA_TRC00, trc0);
1394 			}
1395 		}
1396 	}
1397 	return (0);
1398 }
1399 
1400 static void
sca_get_packets(sca_port_t * scp)1401 sca_get_packets(sca_port_t *scp)
1402 {
1403 	struct sca_softc *sc;
1404 
1405 	SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1406 
1407 	sc = scp->sca;
1408 	if (sc->sc_usedma)
1409 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1410 		    0, sc->scu_allocsize,
1411 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1412 	else {
1413 		/*
1414 		 * XXX this code is unable to deal with rx stuff
1415 		 * in more than 1 page
1416 		 */
1417 		sc->scu_page_on(sc);
1418 		sc->scu_set_page(sc, scp->sp_rxdesc_p);
1419 	}
1420 
1421 	/* process as many frames as are available */
1422 	while (sca_frame_avail(scp)) {
1423 		sca_frame_process(scp);
1424 		sca_frame_read_done(scp);
1425 	}
1426 
1427 	if (sc->sc_usedma)
1428 		bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1429 		    0, sc->scu_allocsize,
1430 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1431 	else
1432 		sc->scu_page_off(sc);
1433 }
1434 
1435 /*
1436  * Starting with the first descriptor we wanted to read into, up to but
1437  * not including the current SCA read descriptor, look for a packet.
1438  *
1439  * must be called at splnet()
1440  */
1441 static int
sca_frame_avail(sca_port_t * scp)1442 sca_frame_avail(sca_port_t *scp)
1443 {
1444 	u_int16_t cda;
1445 	u_int32_t desc_p;	/* physical address (lower 16 bits) */
1446 	sca_desc_t *desc;
1447 	u_int8_t rxstat;
1448 	int cdaidx, toolong;
1449 
1450 	/*
1451 	 * Read the current descriptor from the SCA.
1452 	 */
1453 	cda = dmac_read_2(scp, SCA_CDAL0);
1454 
1455 	/*
1456 	 * calculate the index of the current descriptor
1457 	 */
1458 	desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1459 	desc_p = cda - desc_p;
1460 	cdaidx = desc_p / sizeof(sca_desc_t);
1461 
1462 	SCA_DPRINTF(SCA_DEBUG_RX,
1463 	    ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1464 	    cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1465 
1466 	/* note confusion */
1467 	if (cdaidx >= scp->sp_nrxdesc)
1468 		panic("current descriptor index out of range");
1469 
1470 	/* see if we have a valid frame available */
1471 	toolong = 0;
1472 	for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1473 		/*
1474 		 * We might have a valid descriptor.  Set up a pointer
1475 		 * to the kva address for it so we can more easily examine
1476 		 * the contents.
1477 		 */
1478 		desc = &scp->sp_rxdesc[scp->sp_rxstart];
1479 		rxstat = sca_desc_read_stat(scp->sca, desc);
1480 
1481 		SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1482 		    scp->sp_port, scp->sp_rxstart, rxstat));
1483 
1484 		SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1485 		    scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1486 
1487 		/*
1488 		 * check for errors
1489 		 */
1490 		if (rxstat & SCA_DESC_ERRORS) {
1491 			/*
1492 			 * consider an error condition the end
1493 			 * of a frame
1494 			 */
1495 			if_statinc(&scp->sp_if, if_ierrors);
1496 			toolong = 0;
1497 			continue;
1498 		}
1499 
1500 		/*
1501 		 * if we aren't skipping overlong frames
1502 		 * we are done, otherwise reset and look for
1503 		 * another good frame
1504 		 */
1505 		if (rxstat & SCA_DESC_EOM) {
1506 			if (!toolong)
1507 				return (1);
1508 			toolong = 0;
1509 		} else if (!toolong) {
1510 			/*
1511 			 * we currently don't deal with frames
1512 			 * larger than a single buffer (fixed MTU)
1513 			 */
1514 			if_statinc(&scp->sp_if, if_ierrors);
1515 			toolong = 1;
1516 		}
1517 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1518 		    scp->sp_rxstart));
1519 	}
1520 
1521 	SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1522 	return 0;
1523 }
1524 
1525 /*
1526  * Pass the packet up to the kernel if it is a packet we want to pay
1527  * attention to.
1528  *
1529  * MUST BE CALLED AT splnet()
1530  */
1531 static void
sca_frame_process(sca_port_t * scp)1532 sca_frame_process(sca_port_t *scp)
1533 {
1534 	pktqueue_t *pktq = NULL;
1535 	struct hdlc_header *hdlc;
1536 	struct cisco_pkt *cisco;
1537 	sca_desc_t *desc;
1538 	struct mbuf *m;
1539 	u_int8_t *bufp;
1540 	u_int16_t len;
1541 	u_int32_t t;
1542 
1543 	t = time_uptime * 1000;
1544 	desc = &scp->sp_rxdesc[scp->sp_rxstart];
1545 	bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1546 	len = sca_desc_read_buflen(scp->sca, desc);
1547 
1548 	SCA_DPRINTF(SCA_DEBUG_RX,
1549 	    ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1550 	    (bus_addr_t)bufp, len));
1551 
1552 #if SCA_DEBUG_LEVEL > 0
1553 	if (sca_debug & SCA_DEBUG_RXPKT)
1554 		sca_frame_print(scp, desc, bufp);
1555 #endif
1556 	/*
1557 	 * skip packets that are too short
1558 	 */
1559 	if (len < sizeof(struct hdlc_header)) {
1560 		if_statinc(&scp->sp_if, if_ierrors);
1561 		return;
1562 	}
1563 
1564 	m = sca_mbuf_alloc(scp->sca, bufp, len);
1565 	if (m == NULL) {
1566 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1567 		return;
1568 	}
1569 
1570 	/*
1571 	 * read and then strip off the HDLC information
1572 	 */
1573 	m = m_pullup(m, sizeof(struct hdlc_header));
1574 	if (m == NULL) {
1575 		SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1576 		return;
1577 	}
1578 
1579 	bpf_mtap_softint(&scp->sp_if, m);
1580 
1581 	if_statinc(&scp->sp_if, if_ipackets);
1582 
1583 	hdlc = mtod(m, struct hdlc_header *);
1584 	switch (ntohs(hdlc->h_proto)) {
1585 #ifdef INET
1586 	case HDLC_PROTOCOL_IP:
1587 		SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1588 		m_set_rcvif(m, &scp->sp_if);
1589 		m->m_pkthdr.len -= sizeof(struct hdlc_header);
1590 		m->m_data += sizeof(struct hdlc_header);
1591 		m->m_len -= sizeof(struct hdlc_header);
1592 		pktq = ip_pktq;
1593 		break;
1594 #endif	/* INET */
1595 #ifdef INET6
1596 	case HDLC_PROTOCOL_IPV6:
1597 		SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1598 		m_set_rcvif(m, &scp->sp_if);
1599 		m->m_pkthdr.len -= sizeof(struct hdlc_header);
1600 		m->m_data += sizeof(struct hdlc_header);
1601 		m->m_len -= sizeof(struct hdlc_header);
1602 		pktq = ip6_pktq;
1603 		break;
1604 #endif	/* INET6 */
1605 	case CISCO_KEEPALIVE:
1606 		SCA_DPRINTF(SCA_DEBUG_CISCO,
1607 			    ("Received CISCO keepalive packet\n"));
1608 
1609 		if (len < CISCO_PKT_LEN) {
1610 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1611 				    ("short CISCO packet %d, wanted %d\n",
1612 				     len, CISCO_PKT_LEN));
1613 			if_statinc(&scp->sp_if, if_ierrors);
1614 			goto dropit;
1615 		}
1616 
1617 		m = m_pullup(m, sizeof(struct cisco_pkt));
1618 		if (m == NULL) {
1619 			SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1620 			return;
1621 		}
1622 
1623 		cisco = (struct cisco_pkt *)
1624 		    (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1625 		m_set_rcvif(m, &scp->sp_if);
1626 
1627 		switch (ntohl(cisco->type)) {
1628 		case CISCO_ADDR_REQ:
1629 			printf("Got CISCO addr_req, ignoring\n");
1630 			if_statinc(&scp->sp_if, if_ierrors);
1631 			goto dropit;
1632 
1633 		case CISCO_ADDR_REPLY:
1634 			printf("Got CISCO addr_reply, ignoring\n");
1635 			if_statinc(&scp->sp_if, if_ierrors);
1636 			goto dropit;
1637 
1638 		case CISCO_KEEPALIVE_REQ:
1639 
1640 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1641 				    ("Received KA, mseq %d,"
1642 				     " yseq %d, rel 0x%04x, t0"
1643 				     " %04x, t1 %04x\n",
1644 				     ntohl(cisco->par1), ntohl(cisco->par2),
1645 				     ntohs(cisco->rel), ntohs(cisco->time0),
1646 				     ntohs(cisco->time1)));
1647 
1648 			scp->cka_lastrx = ntohl(cisco->par1);
1649 			scp->cka_lasttx++;
1650 
1651 			/*
1652 			 * schedule the transmit right here.
1653 			 */
1654 			cisco->par2 = cisco->par1;
1655 			cisco->par1 = htonl(scp->cka_lasttx);
1656 			cisco->time0 = htons((u_int16_t)(t >> 16));
1657 			cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1658 
1659 			if (IF_QFULL(&scp->linkq)) {
1660 				IF_DROP(&scp->linkq);
1661 				goto dropit;
1662 			}
1663 			IF_ENQUEUE(&scp->linkq, m);
1664 
1665 			sca_start(&scp->sp_if);
1666 
1667 			/* since start may have reset this fix */
1668 			if (!scp->sca->sc_usedma) {
1669 				scp->sca->scu_set_page(scp->sca,
1670 				    scp->sp_rxdesc_p);
1671 				scp->sca->scu_page_on(scp->sca);
1672 			}
1673 			return;
1674 		default:
1675 			SCA_DPRINTF(SCA_DEBUG_CISCO,
1676 				    ("Unknown CISCO keepalive protocol 0x%04x\n",
1677 				     ntohl(cisco->type)));
1678 
1679 			if_statinc(&scp->sp_if, if_noproto);
1680 			goto dropit;
1681 		}
1682 		return;
1683 	default:
1684 		SCA_DPRINTF(SCA_DEBUG_RX,
1685 			    ("Unknown/unexpected ethertype 0x%04x\n",
1686 			     ntohs(hdlc->h_proto)));
1687 		if_statinc(&scp->sp_if, if_noproto);
1688 		goto dropit;
1689 	}
1690 
1691 	/* Queue the packet */
1692 	KASSERT(pktq != NULL);
1693 	if (__predict_false(!pktq_enqueue(pktq, m, 0))) {
1694 		if_statinc(&scp->sp_if, if_iqdrops);
1695 		goto dropit;
1696 	}
1697 	return;
1698 dropit:
1699 	if (m)
1700 		m_freem(m);
1701 	return;
1702 }
1703 
1704 #if SCA_DEBUG_LEVEL > 0
1705 /*
1706  * do a hex dump of the packet received into descriptor "desc" with
1707  * data buffer "p"
1708  */
1709 static void
sca_frame_print(sca_port_t * scp,sca_desc_t * desc,u_int8_t * p)1710 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1711 {
1712 	int i;
1713 	int nothing_yet = 1;
1714 	struct sca_softc *sc;
1715 	u_int len;
1716 
1717 	sc = scp->sca;
1718 	printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1719 	       desc,
1720 	       sca_desc_read_chainp(sc, desc),
1721 	       sca_desc_read_bufp(sc, desc),
1722 	       sca_desc_read_stat(sc, desc),
1723 	       (len = sca_desc_read_buflen(sc, desc)));
1724 
1725 	for (i = 0 ; i < len && i < 256; i++) {
1726 		if (nothing_yet == 1 &&
1727 		    (sc->sc_usedma ? *p
1728 			: bus_space_read_1(sc->scu_memt, sc->scu_memh,
1729 		    sca_page_addr(sc, p))) == 0) {
1730 			p++;
1731 			continue;
1732 		}
1733 		nothing_yet = 0;
1734 		if (i % 16 == 0)
1735 			printf("\n");
1736 		printf("%02x ",
1737 		    (sc->sc_usedma ? *p
1738 		    : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1739 		    sca_page_addr(sc, p))));
1740 		p++;
1741 	}
1742 
1743 	if (i % 16 != 1)
1744 		printf("\n");
1745 }
1746 #endif
1747 
1748 /*
1749  * adjust things because we have just read the current starting
1750  * frame
1751  *
1752  * must be called at splnet()
1753  */
1754 static void
sca_frame_read_done(sca_port_t * scp)1755 sca_frame_read_done(sca_port_t *scp)
1756 {
1757 	u_int16_t edesc_p;
1758 
1759 	/* update where our indicies are */
1760 	scp->sp_rxend = scp->sp_rxstart;
1761 	scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1762 
1763 	/* update the error [end] descriptor */
1764 	edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1765 	    (sizeof(sca_desc_t) * scp->sp_rxend);
1766 	dmac_write_2(scp, SCA_EDAL0, edesc_p);
1767 }
1768 
1769 /*
1770  * set a port to the "up" state
1771  */
1772 static void
sca_port_up(sca_port_t * scp)1773 sca_port_up(sca_port_t *scp)
1774 {
1775 	struct sca_softc *sc = scp->sca;
1776 	struct timeval now;
1777 #if 0
1778 	u_int8_t ier0, ier1;
1779 #endif
1780 
1781 	/*
1782 	 * reset things
1783 	 */
1784 #if 0
1785 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1786 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1787 #endif
1788 	/*
1789 	 * clear in-use flag
1790 	 */
1791 	scp->sp_if.if_flags &= ~IFF_OACTIVE;
1792 	scp->sp_if.if_flags |= IFF_RUNNING;
1793 
1794 	/*
1795 	 * raise DTR
1796 	 */
1797 	sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1798 
1799 	/*
1800 	 * raise RTS
1801 	 */
1802 	msci_write_1(scp, SCA_CTL0,
1803 	     (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1804 	     | SCA_CTL_RTS_HIGH);
1805 
1806 #if 0
1807 	/*
1808 	 * enable interrupts (no timer IER2)
1809 	 */
1810 	ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1811 	    | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1812 	ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1813 	    | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1814 	if (scp->sp_port == 1) {
1815 		ier0 <<= 4;
1816 		ier1 <<= 4;
1817 	}
1818 	sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1819 	sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1820 #else
1821 	if (scp->sp_port == 0) {
1822 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1823 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1824 	} else {
1825 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1826 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1827 	}
1828 #endif
1829 
1830 	/*
1831 	 * enable transmit and receive
1832 	 */
1833 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1834 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1835 
1836 	/*
1837 	 * reset internal state
1838 	 */
1839 	scp->sp_txinuse = 0;
1840 	scp->sp_txcur = 0;
1841 	getmicrotime(&now);
1842 	scp->cka_lasttx = now.tv_usec;
1843 	scp->cka_lastrx = 0;
1844 }
1845 
1846 /*
1847  * set a port to the "down" state
1848  */
1849 static void
sca_port_down(sca_port_t * scp)1850 sca_port_down(sca_port_t *scp)
1851 {
1852 	struct sca_softc *sc = scp->sca;
1853 #if 0
1854 	u_int8_t ier0, ier1;
1855 #endif
1856 
1857 	/*
1858 	 * lower DTR
1859 	 */
1860 	sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1861 
1862 	/*
1863 	 * lower RTS
1864 	 */
1865 	msci_write_1(scp, SCA_CTL0,
1866 	     (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1867 	     | SCA_CTL_RTS_LOW);
1868 
1869 	/*
1870 	 * disable interrupts
1871 	 */
1872 #if 0
1873 	ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1874 	    | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1875 	ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1876 	    | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1877 	if (scp->sp_port == 1) {
1878 		ier0 <<= 4;
1879 		ier1 <<= 4;
1880 	}
1881 	sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1882 	sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1883 #else
1884 	if (scp->sp_port == 0) {
1885 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1886 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1887 	} else {
1888 		sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1889 		sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1890 	}
1891 #endif
1892 
1893 	/*
1894 	 * disable transmit and receive
1895 	 */
1896 	msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1897 	msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1898 
1899 	/*
1900 	 * no, we're not in use anymore
1901 	 */
1902 	scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1903 }
1904 
1905 /*
1906  * disable all DMA and interrupts for all ports at once.
1907  */
1908 void
sca_shutdown(struct sca_softc * sca)1909 sca_shutdown(struct sca_softc *sca)
1910 {
1911 	/*
1912 	 * disable DMA and interrupts
1913 	 */
1914 	sca_write_1(sca, SCA_DMER, 0);
1915 	sca_write_1(sca, SCA_IER0, 0);
1916 	sca_write_1(sca, SCA_IER1, 0);
1917 }
1918 
1919 /*
1920  * If there are packets to transmit, start the transmit DMA logic.
1921  */
1922 static void
sca_port_starttx(sca_port_t * scp)1923 sca_port_starttx(sca_port_t *scp)
1924 {
1925 	u_int32_t	startdesc_p, enddesc_p;
1926 	int enddesc;
1927 
1928 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1929 
1930 	if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1931 	    || scp->sp_txinuse == 0)
1932 		return;
1933 
1934 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1935 
1936 	scp->sp_if.if_flags |= IFF_OACTIVE;
1937 
1938 	/*
1939 	 * We have something to do, since we have at least one packet
1940 	 * waiting, and we are not already marked as active.
1941 	 */
1942 	enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1943 	startdesc_p = scp->sp_txdesc_p;
1944 	enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1945 
1946 	SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1947 	    startdesc_p, enddesc_p));
1948 
1949 	dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1950 	dmac_write_2(scp, SCA_CDAL1,
1951 		     (u_int16_t)(startdesc_p & 0x0000ffff));
1952 
1953 	/*
1954 	 * enable the DMA
1955 	 */
1956 	dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1957 }
1958 
1959 /*
1960  * allocate an mbuf at least long enough to hold "len" bytes.
1961  * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1962  * otherwise let the caller handle copying the data in.
1963  */
1964 static struct mbuf *
sca_mbuf_alloc(struct sca_softc * sc,void * p,u_int len)1965 sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len)
1966 {
1967 	struct mbuf *m;
1968 
1969 	/*
1970 	 * allocate an mbuf and copy the important bits of data
1971 	 * into it.  If the packet won't fit in the header,
1972 	 * allocate a cluster for it and store it there.
1973 	 */
1974 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1975 	if (m == NULL)
1976 		return NULL;
1977 	if (len > MHLEN) {
1978 		if (len > MCLBYTES) {
1979 			m_freem(m);
1980 			return NULL;
1981 		}
1982 		MCLGET(m, M_DONTWAIT);
1983 		if ((m->m_flags & M_EXT) == 0) {
1984 			m_freem(m);
1985 			return NULL;
1986 		}
1987 	}
1988 	if (p != NULL) {
1989 		/* XXX do we need to sync here? */
1990 		if (sc->sc_usedma)
1991 			memcpy(mtod(m, void *), p, len);
1992 		else
1993 			bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
1994 			    sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
1995 	}
1996 	m->m_len = len;
1997 	m->m_pkthdr.len = len;
1998 
1999 	return (m);
2000 }
2001 
2002 /*
2003  * get the base clock
2004  */
2005 void
sca_get_base_clock(struct sca_softc * sc)2006 sca_get_base_clock(struct sca_softc *sc)
2007 {
2008 	struct timeval btv, ctv, dtv;
2009 	u_int64_t bcnt;
2010 	u_int32_t cnt;
2011 	u_int16_t subcnt;
2012 
2013 	/* disable the timer, set prescale to 0 */
2014 	sca_write_1(sc, SCA_TCSR0, 0);
2015 	sca_write_1(sc, SCA_TEPR0, 0);
2016 
2017 	/* reset the counter */
2018 	(void)sca_read_1(sc, SCA_TCSR0);
2019 	subcnt = sca_read_2(sc, SCA_TCNTL0);
2020 
2021 	/* count to max */
2022 	sca_write_2(sc, SCA_TCONRL0, 0xffff);
2023 
2024 	cnt = 0;
2025 	microtime(&btv);
2026 	/* start the timer -- no interrupt enable */
2027 	sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2028 	for (;;) {
2029 		microtime(&ctv);
2030 
2031 		/* end around 3/4 of a second */
2032 		timersub(&ctv, &btv, &dtv);
2033 		if (dtv.tv_usec >= 750000)
2034 			break;
2035 
2036 		/* spin */
2037 		while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2038 			;
2039 		/* reset the timer */
2040 		(void)sca_read_2(sc, SCA_TCNTL0);
2041 		cnt++;
2042 	}
2043 
2044 	/* stop the timer */
2045 	sca_write_1(sc, SCA_TCSR0, 0);
2046 
2047 	subcnt = sca_read_2(sc, SCA_TCNTL0);
2048 	/* add the slop in and get the total timer ticks */
2049 	cnt = (cnt << 16) | subcnt;
2050 
2051 	/* cnt is 1/8 the actual time */
2052 	bcnt = cnt * 8;
2053 	/* make it proportional to 3/4 of a second */
2054 	bcnt *= (u_int64_t)750000;
2055 	bcnt /= (u_int64_t)dtv.tv_usec;
2056 	cnt = bcnt;
2057 
2058 	/* make it Hz */
2059 	cnt *= 4;
2060 	cnt /= 3;
2061 
2062 	SCA_DPRINTF(SCA_DEBUG_CLOCK,
2063 	    ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2064 
2065 	/*
2066 	 * round to the nearest 200 -- this allows for +-3 ticks error
2067 	 */
2068 	sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2069 }
2070 
2071 /*
2072  * print the information about the clock on the ports
2073  */
2074 void
sca_print_clock_info(struct sca_softc * sc)2075 sca_print_clock_info(struct sca_softc *sc)
2076 {
2077 	struct sca_port *scp;
2078 	u_int32_t mhz, div;
2079 	int i;
2080 
2081 	printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent),
2082 	    sc->sc_baseclock);
2083 
2084 	/* print the information about the port clock selection */
2085 	for (i = 0; i < sc->sc_numports; i++) {
2086 		scp = &sc->sc_ports[i];
2087 		mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2088 		div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2089 
2090 		printf("%s: rx clock: ", scp->sp_if.if_xname);
2091 		switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2092 		case SCA_RXS_CLK_LINE:
2093 			printf("line");
2094 			break;
2095 		case SCA_RXS_CLK_LINE_SN:
2096 			printf("line with noise suppression");
2097 			break;
2098 		case SCA_RXS_CLK_INTERNAL:
2099 			printf("internal %d Hz", (mhz >> div));
2100 			break;
2101 		case SCA_RXS_CLK_ADPLL_OUT:
2102 			printf("adpll using internal %d Hz", (mhz >> div));
2103 			break;
2104 		case SCA_RXS_CLK_ADPLL_IN:
2105 			printf("adpll using line clock");
2106 			break;
2107 		}
2108 		printf("  tx clock: ");
2109 		div = scp->sp_txs & SCA_TXS_DIV_MASK;
2110 		switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2111 		case SCA_TXS_CLK_LINE:
2112 			printf("line\n");
2113 			break;
2114 		case SCA_TXS_CLK_INTERNAL:
2115 			printf("internal %d Hz\n", (mhz >> div));
2116 			break;
2117 		case SCA_TXS_CLK_RXCLK:
2118 			printf("rxclock\n");
2119 			break;
2120 		}
2121 		if (scp->sp_eclock)
2122 			printf("%s: outputting line clock\n",
2123 			    scp->sp_if.if_xname);
2124 	}
2125 }
2126 
2127