xref: /netbsd/sys/arch/mips/sibyte/dev/sbmac.c (revision f3bb5c4e)
1 /* $NetBSD: sbmac.c,v 1.65 2022/09/18 12:43:41 thorpej Exp $ */
2 
3 /*
4  * Copyright 2000, 2001, 2004
5  * Broadcom Corporation. All rights reserved.
6  *
7  * This software is furnished under license and may be used and copied only
8  * in accordance with the following terms and conditions.  Subject to these
9  * conditions, you may download, copy, install, use, modify and distribute
10  * modified or unmodified copies of this software in source and/or binary
11  * form. No title or ownership is transferred hereby.
12  *
13  * 1) Any source code used, modified or distributed must reproduce and
14  *    retain this copyright notice and list of conditions as they appear in
15  *    the source file.
16  *
17  * 2) No right is granted to use any trade name, trademark, or logo of
18  *    Broadcom Corporation.  The "Broadcom Corporation" name may not be
19  *    used to endorse or promote products derived from this software
20  *    without the prior written permission of Broadcom Corporation.
21  *
22  * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
23  *    WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
24  *    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
25  *    NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
26  *    FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
27  *    LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  *    CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  *    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30  *    BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  *    WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32  *    OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: sbmac.c,v 1.65 2022/09/18 12:43:41 thorpej Exp $");
37 
38 #include "opt_inet.h"
39 #include "opt_ns.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/kmem.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/queue.h>
49 #include <sys/device.h>
50 
51 #include <net/if.h>
52 #include <net/if_arp.h>
53 #include <net/if_ether.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 
57 #include <net/bpf.h>
58 
59 #ifdef INET
60 #include <netinet/in.h>
61 #include <netinet/if_inarp.h>
62 #endif
63 
64 #include <mips/locore.h>
65 
66 #include "sbobiovar.h"
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 #include <dev/mii/mii_bitbang.h>
71 
72 #include <mips/sibyte/include/sb1250_defs.h>
73 #include <mips/sibyte/include/sb1250_regs.h>
74 #include <mips/sibyte/include/sb1250_mac.h>
75 #include <mips/sibyte/include/sb1250_dma.h>
76 #include <mips/sibyte/include/sb1250_scd.h>
77 
78 #include <evbmips/sbmips/systemsw.h>
79 
80 /* Simple types */
81 
82 typedef u_long sbmac_port_t;
83 typedef uint64_t sbmac_physaddr_t;
84 typedef uint64_t sbmac_enetaddr_t;
85 
86 typedef enum { sbmac_speed_auto, sbmac_speed_10,
87 	       sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
88 
89 typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
90 	       sbmac_duplex_full } sbmac_duplex_t;
91 
92 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
93 	       sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
94 
95 typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
96 	       sbmac_state_broken } sbmac_state_t;
97 
98 
99 /* Macros */
100 
101 #define	SBMAC_EVENT_COUNTERS	/* Include counters for various events */
102 
103 #define	SBDMA_NEXTBUF(d, f)	((f + 1) & (d)->sbdma_dscr_mask)
104 
105 #define	CACHELINESIZE	32
106 #define	NUMCACHEBLKS(x)	(((x)+CACHELINESIZE-1)/CACHELINESIZE)
107 #define	KVTOPHYS(x)	kvtophys((vaddr_t)(x))
108 
109 #ifdef SBMACDEBUG
110 #define	dprintf(x)	printf x
111 #else
112 #define	dprintf(x)
113 #endif
114 
115 #define	SBMAC_READCSR(t) mips3_ld((register_t)(t))
116 #define	SBMAC_WRITECSR(t, v) mips3_sd((register_t)(t), (v))
117 
118 #define	PKSEG1(x) ((sbmac_port_t) MIPS_PHYS_TO_KSEG1(x))
119 
120 /* These are limited to fit within one virtual page, and must be 2**N.  */
121 #define	SBMAC_MAX_TXDESCR	256		/* should be 1024 */
122 #define	SBMAC_MAX_RXDESCR	256		/* should be 512 */
123 
124 /* DMA Descriptor structure */
125 
126 typedef struct sbdmadscr_s {
127 	uint64_t dscr_a;
128 	uint64_t dscr_b;
129 } sbdmadscr_t;
130 
131 
132 /* DMA Controller structure */
133 
134 typedef struct sbmacdma_s {
135 
136 	/*
137 	 * This stuff is used to identify the channel and the registers
138 	 * associated with it.
139 	 */
140 
141 	struct sbmac_softc *sbdma_eth;	/* back pointer to associated MAC */
142 	int		sbdma_channel;	/* channel number */
143 	int		sbdma_txdir;	/* direction (1=transmit) */
144 	int		sbdma_maxdescr;	/* total # of descriptors in ring */
145 	sbmac_port_t	sbdma_config0;	/* DMA config register 0 */
146 	sbmac_port_t	sbdma_config1;	/* DMA config register 1 */
147 	sbmac_port_t	sbdma_dscrbase;	/* Descriptor base address */
148 	sbmac_port_t	sbdma_dscrcnt;	/* Descriptor count register */
149 	sbmac_port_t	sbdma_curdscr;	/* current descriptor address */
150 
151 	/*
152 	 * This stuff is for maintenance of the ring
153 	 */
154 	sbdmadscr_t	*sbdma_dscrtable;	/* base of descriptor table */
155 	struct mbuf	**sbdma_ctxtable;	/* context table, one per descr */
156 	unsigned int	sbdma_dscr_mask;	/* sbdma_maxdescr - 1 */
157 	paddr_t		sbdma_dscrtable_phys;	/* and also the phys addr */
158 	unsigned int	sbdma_add_index;	/* next dscr for sw to add */
159 	unsigned int	sbdma_rem_index;	/* next dscr for sw to remove */
160 } sbmacdma_t;
161 
162 
163 /* Ethernet softc structure */
164 
165 struct sbmac_softc {
166 
167 	/*
168 	 * NetBSD-specific things
169 	 */
170 	struct ethercom	sc_ethercom;	/* Ethernet common part */
171 	struct mii_data	sc_mii;
172 	struct callout	sc_tick_ch;
173 
174 	device_t	sc_dev;		/* device */
175 	u_short		sbm_if_flags;
176 	void		*sbm_intrhand;
177 
178 	/*
179 	 * Controller-specific things
180 	 */
181 
182 	sbmac_port_t	sbm_base;	/* MAC's base address */
183 	sbmac_state_t	sbm_state;	/* current state */
184 
185 	sbmac_port_t	sbm_macenable;	/* MAC Enable Register */
186 	sbmac_port_t	sbm_maccfg;	/* MAC Configuration Register */
187 	sbmac_port_t	sbm_fifocfg;	/* FIFO configuration register */
188 	sbmac_port_t	sbm_framecfg;	/* Frame configuration register */
189 	sbmac_port_t	sbm_rxfilter;	/* receive filter register */
190 	sbmac_port_t	sbm_isr;	/* Interrupt status register */
191 	sbmac_port_t	sbm_imr;	/* Interrupt mask register */
192 
193 	sbmac_speed_t	sbm_speed;	/* current speed */
194 	sbmac_duplex_t	sbm_duplex;	/* current duplex */
195 	sbmac_fc_t	sbm_fc;		/* current flow control setting */
196 	int		sbm_rxflags;	/* received packet flags */
197 
198 	u_char		sbm_hwaddr[ETHER_ADDR_LEN];
199 
200 	sbmacdma_t	sbm_txdma;	/* for now, only use channel 0 */
201 	sbmacdma_t	sbm_rxdma;
202 
203 	int		sbm_pass3_dma;	/* chip has pass3 SOC DMA features */
204 
205 #ifdef SBMAC_EVENT_COUNTERS
206 	struct evcnt	sbm_ev_rxintr;	/* Rx interrupts */
207 	struct evcnt	sbm_ev_txintr;	/* Tx interrupts */
208 	struct evcnt	sbm_ev_txdrop;	/* Tx dropped due to no mbuf alloc failed */
209 	struct evcnt	sbm_ev_txstall;	/* Tx stalled due to no descriptors free */
210 
211 	struct evcnt	sbm_ev_txsplit;	/* pass3 Tx split mbuf */
212 	struct evcnt	sbm_ev_txkeep;	/* pass3 Tx didn't split mbuf */
213 #endif
214 };
215 
216 
217 #ifdef SBMAC_EVENT_COUNTERS
218 #define	SBMAC_EVCNT_INCR(ev)	(ev).ev_count++
219 #else
220 #define	SBMAC_EVCNT_INCR(ev)	do { /* nothing */ } while (0)
221 #endif
222 
223 /* Externs */
224 
225 extern paddr_t kvtophys(vaddr_t);
226 
227 /* Prototypes */
228 
229 static void sbdma_initctx(sbmacdma_t *, struct sbmac_softc *, int, int, int);
230 static void sbdma_channel_start(sbmacdma_t *);
231 static int sbdma_add_rcvbuffer(sbmacdma_t *, struct mbuf *);
232 static int sbdma_add_txbuffer(sbmacdma_t *, struct mbuf *);
233 static void sbdma_emptyring(sbmacdma_t *);
234 static void sbdma_fillring(sbmacdma_t *);
235 static void sbdma_rx_process(struct sbmac_softc *, sbmacdma_t *);
236 static void sbdma_tx_process(struct sbmac_softc *, sbmacdma_t *);
237 static void sbmac_initctx(struct sbmac_softc *);
238 static void sbmac_channel_start(struct sbmac_softc *);
239 static void sbmac_channel_stop(struct sbmac_softc *);
240 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,
241     sbmac_state_t);
242 static void sbmac_promiscuous_mode(struct sbmac_softc *, bool);
243 static void sbmac_init_and_start(struct sbmac_softc *);
244 static uint64_t sbmac_addr2reg(u_char *);
245 static void sbmac_intr(void *, uint32_t, vaddr_t);
246 static void sbmac_start(struct ifnet *);
247 static void sbmac_setmulti(struct sbmac_softc *);
248 static int sbmac_ether_ioctl(struct ifnet *, u_long, void *);
249 static int sbmac_ioctl(struct ifnet *, u_long, void *);
250 static void sbmac_watchdog(struct ifnet *);
251 static int sbmac_match(device_t, cfdata_t, void *);
252 static void sbmac_attach(device_t, device_t, void *);
253 static bool sbmac_set_speed(struct sbmac_softc *, sbmac_speed_t);
254 static bool sbmac_set_duplex(struct sbmac_softc *, sbmac_duplex_t, sbmac_fc_t);
255 static void sbmac_tick(void *);
256 
257 
258 /* Globals */
259 
260 CFATTACH_DECL_NEW(sbmac, sizeof(struct sbmac_softc),
261     sbmac_match, sbmac_attach, NULL, NULL);
262 
263 static uint32_t sbmac_mii_bitbang_read(device_t self);
264 static void sbmac_mii_bitbang_write(device_t self, uint32_t val);
265 
266 static const struct mii_bitbang_ops sbmac_mii_bitbang_ops = {
267 	sbmac_mii_bitbang_read,
268 	sbmac_mii_bitbang_write,
269 	{
270 		(uint32_t)M_MAC_MDIO_OUT,	/* MII_BIT_MDO */
271 		(uint32_t)M_MAC_MDIO_IN,	/* MII_BIT_MDI */
272 		(uint32_t)M_MAC_MDC,		/* MII_BIT_MDC */
273 		0,				/* MII_BIT_DIR_HOST_PHY */
274 		(uint32_t)M_MAC_MDIO_DIR	/* MII_BIT_DIR_PHY_HOST */
275 	}
276 };
277 
278 static uint32_t
sbmac_mii_bitbang_read(device_t self)279 sbmac_mii_bitbang_read(device_t self)
280 {
281 	struct sbmac_softc *sc = device_private(self);
282 	sbmac_port_t reg;
283 
284 	reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
285 	return (uint32_t) SBMAC_READCSR(reg);
286 }
287 
288 static void
sbmac_mii_bitbang_write(device_t self,uint32_t val)289 sbmac_mii_bitbang_write(device_t self, uint32_t val)
290 {
291 	struct sbmac_softc *sc = device_private(self);
292 	sbmac_port_t reg;
293 
294 	reg = PKSEG1(sc->sbm_base + R_MAC_MDIO);
295 
296 	SBMAC_WRITECSR(reg, (val &
297 	    (M_MAC_MDC | M_MAC_MDIO_DIR | M_MAC_MDIO_OUT | M_MAC_MDIO_IN)));
298 }
299 
300 /*
301  * Read an PHY register through the MII.
302  */
303 static int
sbmac_mii_readreg(device_t self,int phy,int reg,uint16_t * val)304 sbmac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
305 {
306 
307 	return mii_bitbang_readreg(self, &sbmac_mii_bitbang_ops, phy, reg,
308 	    val);
309 }
310 
311 /*
312  * Write to a PHY register through the MII.
313  */
314 static int
sbmac_mii_writereg(device_t self,int phy,int reg,uint16_t val)315 sbmac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
316 {
317 
318 	return mii_bitbang_writereg(self, &sbmac_mii_bitbang_ops, phy, reg,
319 	    val);
320 }
321 
322 static void
sbmac_mii_statchg(struct ifnet * ifp)323 sbmac_mii_statchg(struct ifnet *ifp)
324 {
325 	struct sbmac_softc *sc = ifp->if_softc;
326 	sbmac_state_t oldstate;
327 
328 	/* Stop the MAC in preparation for changing all of the parameters. */
329 	oldstate = sbmac_set_channel_state(sc, sbmac_state_off);
330 
331 	switch (sc->sc_ethercom.ec_if.if_baudrate) {
332 	default:		/* if autonegotiation fails, assume 10Mbit */
333 	case IF_Mbps(10):
334 		sbmac_set_speed(sc, sbmac_speed_10);
335 		break;
336 
337 	case IF_Mbps(100):
338 		sbmac_set_speed(sc, sbmac_speed_100);
339 		break;
340 
341 	case IF_Mbps(1000):
342 		sbmac_set_speed(sc, sbmac_speed_1000);
343 		break;
344 	}
345 
346 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
347 		/* Configure for full-duplex */
348 		/* XXX: is flow control right for 10, 100? */
349 		sbmac_set_duplex(sc, sbmac_duplex_full, sbmac_fc_frame);
350 	} else {
351 		/* Configure for half-duplex */
352 		/* XXX: is flow control right? */
353 		sbmac_set_duplex(sc, sbmac_duplex_half, sbmac_fc_disabled);
354 	}
355 
356 	/* And put it back into its former state. */
357 	sbmac_set_channel_state(sc, oldstate);
358 }
359 
360 /*
361  *  SBDMA_INITCTX(d, sc, chan, txrx, maxdescr)
362  *
363  *  Initialize a DMA channel context.  Since there are potentially
364  *  eight DMA channels per MAC, it's nice to do this in a standard
365  *  way.
366  *
367  *  Input parameters:
368  *	d - sbmacdma_t structure (DMA channel context)
369  *	sc - sbmac_softc structure (pointer to a MAC)
370  *	chan - channel number (0..1 right now)
371  *	txrx - Identifies DMA_TX or DMA_RX for channel direction
372  *	maxdescr - number of descriptors
373  *
374  *  Return value:
375  *	nothing
376  */
377 
378 static void
sbdma_initctx(sbmacdma_t * d,struct sbmac_softc * sc,int chan,int txrx,int maxdescr)379 sbdma_initctx(sbmacdma_t *d, struct sbmac_softc *sc, int chan, int txrx,
380     int maxdescr)
381 {
382 	uintptr_t ptr;
383 
384 	/*
385 	 * Save away interesting stuff in the structure
386 	 */
387 
388 	d->sbdma_eth = sc;
389 	d->sbdma_channel = chan;
390 	d->sbdma_txdir = txrx;
391 
392 	/*
393 	 * initialize register pointers
394 	 */
395 
396 	d->sbdma_config0 = PKSEG1(sc->sbm_base +
397 	    R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG0));
398 	d->sbdma_config1 = PKSEG1(sc->sbm_base +
399 	    R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG1));
400 	d->sbdma_dscrbase = PKSEG1(sc->sbm_base +
401 	    R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_BASE));
402 	d->sbdma_dscrcnt = PKSEG1(sc->sbm_base +
403 	    R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_CNT));
404 	d->sbdma_curdscr = PKSEG1(sc->sbm_base +
405 	    R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CUR_DSCRADDR));
406 
407 	/*
408 	 * Allocate memory for the ring.  This must be aligned to a
409 	 * 32-byte cache line boundary on pass1 or pass2 silicon.
410 	 */
411 
412 	d->sbdma_maxdescr = maxdescr;
413 	d->sbdma_dscr_mask = d->sbdma_maxdescr - 1;
414 	ptr = (uintptr_t)kmem_zalloc(d->sbdma_maxdescr * sizeof(sbdmadscr_t) +
415 	    CACHELINESIZE - 1, KM_SLEEP);
416 	d->sbdma_dscrtable = (sbdmadscr_t *)roundup2(ptr, CACHELINESIZE);
417 
418 	d->sbdma_dscrtable_phys = KVTOPHYS(d->sbdma_dscrtable);
419 
420 	/*
421 	 * And context table
422 	 */
423 
424 	d->sbdma_ctxtable = (struct mbuf **)
425 	    kmem_zalloc(d->sbdma_maxdescr * sizeof(struct mbuf *), KM_SLEEP);
426 }
427 
428 /*
429  *  SBDMA_CHANNEL_START(d)
430  *
431  *  Initialize the hardware registers for a DMA channel.
432  *
433  *  Input parameters:
434  *	d - DMA channel to init (context must be previously init'd
435  *
436  *  Return value:
437  *	nothing
438  */
439 
440 static void
sbdma_channel_start(sbmacdma_t * d)441 sbdma_channel_start(sbmacdma_t *d)
442 {
443 	/*
444 	 * Turn on the DMA channel
445 	 */
446 
447 	SBMAC_WRITECSR(d->sbdma_config1, 0);
448 
449 	SBMAC_WRITECSR(d->sbdma_dscrbase, d->sbdma_dscrtable_phys);
450 
451 	SBMAC_WRITECSR(d->sbdma_config0, V_DMA_RINGSZ(d->sbdma_maxdescr) | 0);
452 
453 	/*
454 	 * Initialize ring pointers
455 	 */
456 
457 	d->sbdma_add_index = 0;
458 	d->sbdma_rem_index = 0;
459 }
460 
461 /*
462  *  SBDMA_ADD_RCVBUFFER(d, m)
463  *
464  *  Add a buffer to the specified DMA channel.   For receive channels,
465  *  this queues a buffer for inbound packets.
466  *
467  *  Input parameters:
468  *	d - DMA channel descriptor
469  *	m - mbuf to add, or NULL if we should allocate one.
470  *
471  *  Return value:
472  *	0 if buffer could not be added (ring is full)
473  *	1 if buffer added successfully
474  */
475 
476 static int
sbdma_add_rcvbuffer(sbmacdma_t * d,struct mbuf * m)477 sbdma_add_rcvbuffer(sbmacdma_t *d, struct mbuf *m)
478 {
479 	unsigned int dsc, nextdsc;
480 	struct mbuf *m_new = NULL;
481 
482 	/* get pointer to our current place in the ring */
483 
484 	dsc = d->sbdma_add_index;
485 	nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
486 
487 	/*
488 	 * figure out if the ring is full - if the next descriptor
489 	 * is the same as the one that we're going to remove from
490 	 * the ring, the ring is full
491 	 */
492 
493 	if (nextdsc == d->sbdma_rem_index)
494 		return ENOSPC;
495 
496 	/*
497 	 * Allocate an mbuf if we don't already have one.
498 	 * If we do have an mbuf, reset it so that it's empty.
499 	 */
500 
501 	if (m == NULL) {
502 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
503 		if (m_new == NULL) {
504 			aprint_error_dev(d->sbdma_eth->sc_dev,
505 			    "mbuf allocation failed\n");
506 			return ENOBUFS;
507 		}
508 
509 		MCLGET(m_new, M_DONTWAIT);
510 		if (!(m_new->m_flags & M_EXT)) {
511 			aprint_error_dev(d->sbdma_eth->sc_dev,
512 			    "mbuf cluster allocation failed\n");
513 			m_freem(m_new);
514 			return ENOBUFS;
515 		}
516 
517 		m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
518 		m_adj(m_new, ETHER_ALIGN);
519 	} else {
520 		m_new = m;
521 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
522 		m_new->m_data = m_new->m_ext.ext_buf;
523 		m_adj(m_new, ETHER_ALIGN);
524 	}
525 
526 	/*
527 	 * fill in the descriptor
528 	 */
529 
530 	d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new, void *)) |
531 	    V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(ETHER_ALIGN + m_new->m_len)) |
532 	    M_DMA_DSCRA_INTERRUPT;
533 
534 	/* receiving: no options */
535 	d->sbdma_dscrtable[dsc].dscr_b = 0;
536 
537 	/*
538 	 * fill in the context
539 	 */
540 
541 	d->sbdma_ctxtable[dsc] = m_new;
542 
543 	/*
544 	 * point at next packet
545 	 */
546 
547 	d->sbdma_add_index = nextdsc;
548 
549 	/*
550 	 * Give the buffer to the DMA engine.
551 	 */
552 
553 	SBMAC_WRITECSR(d->sbdma_dscrcnt, 1);
554 
555 	return 0;					/* we did it */
556 }
557 
558 /*
559  *  SBDMA_ADD_TXBUFFER(d, m)
560  *
561  *  Add a transmit buffer to the specified DMA channel, causing a
562  *  transmit to start.
563  *
564  *  Input parameters:
565  *	d - DMA channel descriptor
566  *	m - mbuf to add
567  *
568  *  Return value:
569  *	0 transmit queued successfully
570  *	otherwise error code
571  */
572 
573 static int
sbdma_add_txbuffer(sbmacdma_t * d,struct mbuf * m)574 sbdma_add_txbuffer(sbmacdma_t *d, struct mbuf *m)
575 {
576 	unsigned int dsc, nextdsc, prevdsc, origdesc;
577 	int length;
578 	int num_mbufs = 0;
579 	struct sbmac_softc *sc = d->sbdma_eth;
580 
581 	/* get pointer to our current place in the ring */
582 
583 	dsc = d->sbdma_add_index;
584 	nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
585 
586 	/*
587 	 * figure out if the ring is full - if the next descriptor
588 	 * is the same as the one that we're going to remove from
589 	 * the ring, the ring is full
590 	 */
591 
592 	if (nextdsc == d->sbdma_rem_index) {
593 		SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
594 		return ENOSPC;
595 	}
596 
597 	/*
598 	 * PASS3 parts do not have buffer alignment restriction.
599 	 * No need to copy/coalesce to new mbuf.  Also has different
600 	 * descriptor format
601 	 */
602 	if (sc->sbm_pass3_dma) {
603 		struct mbuf *m_temp = NULL;
604 
605 		/*
606 		 * Loop thru this mbuf record.
607 		 * The head mbuf will have SOP set.
608 		 */
609 		d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m, void *)) |
610 		    M_DMA_ETHTX_SOP;
611 
612 		/*
613 		 * transmitting: set outbound options,buffer A size(+ low 5
614 		 * bits of start addr),and packet length.
615 		 */
616 		d->sbdma_dscrtable[dsc].dscr_b =
617 		    V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
618 		    V_DMA_DSCRB_A_SIZE((m->m_len +
619 		      (mtod(m, uintptr_t) & 0x0000001F))) |
620 		    V_DMA_DSCRB_PKT_SIZE_MSB((m->m_pkthdr.len & 0xc000) >> 14) |
621 		    V_DMA_DSCRB_PKT_SIZE(m->m_pkthdr.len & 0x3fff);
622 
623 		d->sbdma_add_index = nextdsc;
624 		origdesc = prevdsc = dsc;
625 		dsc = d->sbdma_add_index;
626 		num_mbufs++;
627 
628 		/* Start with first non-head mbuf */
629 		for(m_temp = m->m_next; m_temp != 0; m_temp = m_temp->m_next) {
630 			int len, next_len;
631 			uint64_t addr;
632 
633 			if (m_temp->m_len == 0)
634 				continue;	/* Skip 0-length mbufs */
635 
636 			len = m_temp->m_len;
637 			addr = KVTOPHYS(mtod(m_temp, void *));
638 
639 			/*
640 			 * Check to see if the mbuf spans a page boundary.  If
641 			 * it does, and the physical pages behind the virtual
642 			 * pages are not contiguous, split it so that each
643 			 * virtual page uses its own Tx descriptor.
644 			 */
645 			if (trunc_page(addr) != trunc_page(addr + len - 1)) {
646 				next_len = (addr + len) - trunc_page(addr + len);
647 
648 				len -= next_len;
649 
650 				if (addr + len ==
651 				    KVTOPHYS(mtod(m_temp, char *) + len)) {
652 					SBMAC_EVCNT_INCR(sc->sbm_ev_txkeep);
653 					len += next_len;
654 					next_len = 0;
655 				} else {
656 					SBMAC_EVCNT_INCR(sc->sbm_ev_txsplit);
657 				}
658 			} else {
659 				next_len = 0;
660 			}
661 
662 again:
663 			/*
664 			 * fill in the descriptor
665 			 */
666 			d->sbdma_dscrtable[dsc].dscr_a = addr;
667 
668 			/*
669 			 * transmitting: set outbound options,buffer A
670 			 * size(+ low 5 bits of start addr)
671 			 */
672 			d->sbdma_dscrtable[dsc].dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_NOTSOP) |
673 			    V_DMA_DSCRB_A_SIZE((len + (addr & 0x0000001F)));
674 
675 			d->sbdma_ctxtable[dsc] = NULL;
676 
677 			/*
678 			 * point at next descriptor
679 			 */
680 			nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index);
681 			if (nextdsc == d->sbdma_rem_index) {
682 				d->sbdma_add_index = origdesc;
683 				SBMAC_EVCNT_INCR(sc->sbm_ev_txstall);
684 				return ENOSPC;
685 			}
686 			d->sbdma_add_index = nextdsc;
687 
688 			prevdsc = dsc;
689 			dsc = d->sbdma_add_index;
690 			num_mbufs++;
691 
692 			if (next_len != 0) {
693 				addr = KVTOPHYS(mtod(m_temp, char *) + len);
694 				len = next_len;
695 
696 				next_len = 0;
697 				goto again;
698 			}
699 
700 		}
701 		/* Set head mbuf to last context index */
702 		d->sbdma_ctxtable[prevdsc] = m;
703 
704 		/* Interrupt on last dscr of packet.  */
705 		d->sbdma_dscrtable[prevdsc].dscr_a |= M_DMA_DSCRA_INTERRUPT;
706 	} else {
707 		struct mbuf *m_new = NULL;
708 		/*
709 		 * [BEGIN XXX]
710 		 * XXX Copy/coalesce the mbufs into a single mbuf cluster (we
711 		 * assume it will fit).  This is a temporary hack to get us
712 		 * going.
713 		 */
714 
715 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
716 		if (m_new == NULL) {
717 			aprint_error_dev(d->sbdma_eth->sc_dev,
718 			    "mbuf allocation failed\n");
719 			SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
720 			return ENOBUFS;
721 		}
722 
723 		MCLGET(m_new, M_DONTWAIT);
724 		if (!(m_new->m_flags & M_EXT)) {
725 			aprint_error_dev(d->sbdma_eth->sc_dev,
726 			    "mbuf cluster allocation failed\n");
727 			m_freem(m_new);
728 			SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop);
729 			return ENOBUFS;
730 		}
731 
732 		m_new->m_len = m_new->m_pkthdr.len= MCLBYTES;
733 		/*m_adj(m_new, ETHER_ALIGN);*/
734 
735 		/*
736 		 * XXX Don't forget to include the offset portion in the
737 		 * XXX cache block calculation when this code is rewritten!
738 		 */
739 
740 		/*
741 		 * Copy data
742 		 */
743 
744 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
745 		m_new->m_len = m_new->m_pkthdr.len = m->m_pkthdr.len;
746 
747 		/* Free old mbuf 'm', actual mbuf is now 'm_new' */
748 
749 		// XXX: CALLERS WILL FREE, they might have to bpf_mtap() if this
750 		// XXX: function succeeds.
751 		// m_freem(m);
752 		length = m_new->m_len;
753 
754 		/* [END XXX] */
755 		/*
756 		 * fill in the descriptor
757 		 */
758 
759 		d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new,void *)) |
760 		    V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(m_new->m_len)) |
761 		    M_DMA_DSCRA_INTERRUPT |
762 		    M_DMA_ETHTX_SOP;
763 
764 		/* transmitting: set outbound options and length */
765 		d->sbdma_dscrtable[dsc].dscr_b =
766 		    V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
767 		    V_DMA_DSCRB_PKT_SIZE(length);
768 
769 		num_mbufs++;
770 
771 		/*
772 		 * fill in the context
773 		 */
774 
775 		d->sbdma_ctxtable[dsc] = m_new;
776 
777 		/*
778 		 * point at next packet
779 		 */
780 		d->sbdma_add_index = nextdsc;
781 	}
782 
783 	/*
784 	 * Give the buffer to the DMA engine.
785 	 */
786 
787 	SBMAC_WRITECSR(d->sbdma_dscrcnt, num_mbufs);
788 
789 	return 0;					/* we did it */
790 }
791 
792 /*
793  *  SBDMA_EMPTYRING(d)
794  *
795  *  Free all allocated mbufs on the specified DMA channel;
796  *
797  *  Input parameters:
798  *	d  - DMA channel
799  *
800  *  Return value:
801  *	nothing
802  */
803 
804 static void
sbdma_emptyring(sbmacdma_t * d)805 sbdma_emptyring(sbmacdma_t *d)
806 {
807 	int idx;
808 	struct mbuf *m;
809 
810 	for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
811 		m = d->sbdma_ctxtable[idx];
812 		if (m) {
813 			m_freem(m);
814 			d->sbdma_ctxtable[idx] = NULL;
815 		}
816 	}
817 }
818 
819 /*
820  *  SBDMA_FILLRING(d)
821  *
822  *  Fill the specified DMA channel (must be receive channel)
823  *  with mbufs
824  *
825  *  Input parameters:
826  *	d - DMA channel
827  *
828  *  Return value:
829  *	nothing
830  */
831 
832 static void
sbdma_fillring(sbmacdma_t * d)833 sbdma_fillring(sbmacdma_t *d)
834 {
835 	int idx;
836 
837 	for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++)
838 		if (sbdma_add_rcvbuffer(d, NULL) != 0)
839 			break;
840 }
841 
842 /*
843  *  SBDMA_RX_PROCESS(sc, d)
844  *
845  *  Process "completed" receive buffers on the specified DMA channel.
846  *  Note that this isn't really ideal for priority channels, since
847  *  it processes all of the packets on a given channel before
848  *  returning.
849  *
850  *  Input parameters:
851  *	sc - softc structure
852  *	d - DMA channel context
853  *
854  *  Return value:
855  *	nothing
856  */
857 
858 static void
sbdma_rx_process(struct sbmac_softc * sc,sbmacdma_t * d)859 sbdma_rx_process(struct sbmac_softc *sc, sbmacdma_t *d)
860 {
861 	int curidx;
862 	int hwidx;
863 	sbdmadscr_t *dscp;
864 	struct mbuf *m;
865 	int len;
866 
867 	struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
868 
869 	for (;;) {
870 		/*
871 		 * figure out where we are (as an index) and where
872 		 * the hardware is (also as an index)
873 		 *
874 		 * This could be done faster if (for example) the
875 		 * descriptor table was page-aligned and contiguous in
876 		 * both virtual and physical memory -- you could then
877 		 * just compare the low-order bits of the virtual address
878 		 * (sbdma_rem_index) and the physical address
879 		 * (sbdma_curdscr CSR).
880 		 */
881 
882 		curidx = d->sbdma_rem_index;
883 		hwidx = (int)
884 		    (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
885 		    d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
886 
887 		/*
888 		 * If they're the same, that means we've processed all
889 		 * of the descriptors up to (but not including) the one that
890 		 * the hardware is working on right now.
891 		 */
892 
893 		if (curidx == hwidx)
894 			break;
895 
896 		/*
897 		 * Otherwise, get the packet's mbuf ptr back
898 		 */
899 
900 		dscp = &(d->sbdma_dscrtable[curidx]);
901 		m = d->sbdma_ctxtable[curidx];
902 		d->sbdma_ctxtable[curidx] = NULL;
903 
904 		len = (int)G_DMA_DSCRB_PKT_SIZE(dscp->dscr_b) - 4;
905 
906 		/*
907 		 * Check packet status.  If good, process it.
908 		 * If not, silently drop it and put it back on the
909 		 * receive ring.
910 		 */
911 
912 		if (! (dscp->dscr_a & M_DMA_ETHRX_BAD)) {
913 
914 			/*
915 			 * Set length into the packet
916 			 * XXX do we remove the CRC here?
917 			 */
918 			m->m_pkthdr.len = m->m_len = len;
919 
920 			m_set_rcvif(m, ifp);
921 
922 
923 			/*
924 			 * Add a new buffer to replace the old one.
925 			 */
926 			sbdma_add_rcvbuffer(d, NULL);
927 
928 			/*
929 			 * Handle BPF listeners. Let the BPF user see the
930 			 * packet, but don't pass it up to the ether_input()
931 			 * layer unless it's a broadcast packet, multicast
932 			 * packet, matches our ethernet address or the
933 			 * interface is in promiscuous mode.
934 			 */
935 
936 			/*
937 			 * Pass the buffer to the kernel
938 			 */
939 			if_percpuq_enqueue(ifp->if_percpuq, m);
940 		} else {
941 			/*
942 			 * Packet was mangled somehow.  Just drop it and
943 			 * put it back on the receive ring.
944 			 */
945 			sbdma_add_rcvbuffer(d, m);
946 		}
947 
948 		/*
949 		 * .. and advance to the next buffer.
950 		 */
951 
952 		d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
953 	}
954 }
955 
956 /*
957  *  SBDMA_TX_PROCESS(sc, d)
958  *
959  *  Process "completed" transmit buffers on the specified DMA channel.
960  *  This is normally called within the interrupt service routine.
961  *  Note that this isn't really ideal for priority channels, since
962  *  it processes all of the packets on a given channel before
963  *  returning.
964  *
965  *  Input parameters:
966  *	sc - softc structure
967  *	d - DMA channel context
968  *
969  *  Return value:
970  *	nothing
971  */
972 
973 static void
sbdma_tx_process(struct sbmac_softc * sc,sbmacdma_t * d)974 sbdma_tx_process(struct sbmac_softc *sc, sbmacdma_t *d)
975 {
976 	int curidx;
977 	int hwidx;
978 	struct mbuf *m;
979 
980 	struct ifnet *ifp = &(sc->sc_ethercom.ec_if);
981 
982 	for (;;) {
983 		/*
984 		 * figure out where we are (as an index) and where
985 		 * the hardware is (also as an index)
986 		 *
987 		 * This could be done faster if (for example) the
988 		 * descriptor table was page-aligned and contiguous in
989 		 * both virtual and physical memory -- you could then
990 		 * just compare the low-order bits of the virtual address
991 		 * (sbdma_rem_index) and the physical address
992 		 * (sbdma_curdscr CSR).
993 		 */
994 
995 		curidx = d->sbdma_rem_index;
996 		hwidx = (int)
997 		    (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
998 		    d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
999 
1000 		/*
1001 		 * If they're the same, that means we've processed all
1002 		 * of the descriptors up to (but not including) the one that
1003 		 * the hardware is working on right now.
1004 		 */
1005 
1006 		if (curidx == hwidx)
1007 			break;
1008 
1009 		/*
1010 		 * Otherwise, get the packet's mbuf ptr back
1011 		 */
1012 
1013 		m = d->sbdma_ctxtable[curidx];
1014 		d->sbdma_ctxtable[curidx] = NULL;
1015 
1016 		/*
1017 		 * for transmits we just free buffers and count packets.
1018 		 */
1019 		if_statinc(ifp, if_opackets);
1020 		m_freem(m);
1021 
1022 		/*
1023 		 * .. and advance to the next buffer.
1024 		 */
1025 
1026 		d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index);
1027 	}
1028 }
1029 
1030 /*
1031  *  SBMAC_INITCTX(s)
1032  *
1033  *  Initialize an Ethernet context structure - this is called
1034  *  once per MAC on the 1250.  Memory is allocated here, so don't
1035  *  call it again from inside the ioctl routines that bring the
1036  *  interface up/down
1037  *
1038  *  Input parameters:
1039  *	sc - sbmac context structure
1040  *
1041  *  Return value:
1042  *	0
1043  */
1044 
1045 static void
sbmac_initctx(struct sbmac_softc * sc)1046 sbmac_initctx(struct sbmac_softc *sc)
1047 {
1048 	uint64_t sysrev;
1049 
1050 	/*
1051 	 * figure out the addresses of some ports
1052 	 */
1053 
1054 	sc->sbm_macenable = PKSEG1(sc->sbm_base + R_MAC_ENABLE);
1055 	sc->sbm_maccfg	  = PKSEG1(sc->sbm_base + R_MAC_CFG);
1056 	sc->sbm_fifocfg	  = PKSEG1(sc->sbm_base + R_MAC_THRSH_CFG);
1057 	sc->sbm_framecfg  = PKSEG1(sc->sbm_base + R_MAC_FRAMECFG);
1058 	sc->sbm_rxfilter  = PKSEG1(sc->sbm_base + R_MAC_ADFILTER_CFG);
1059 	sc->sbm_isr	  = PKSEG1(sc->sbm_base + R_MAC_STATUS);
1060 	sc->sbm_imr	  = PKSEG1(sc->sbm_base + R_MAC_INT_MASK);
1061 
1062 	/*
1063 	 * Initialize the DMA channels.  Right now, only one per MAC is used
1064 	 * Note: Only do this _once_, as it allocates memory from the kernel!
1065 	 */
1066 
1067 	sbdma_initctx(&(sc->sbm_txdma), sc, 0, DMA_TX, SBMAC_MAX_TXDESCR);
1068 	sbdma_initctx(&(sc->sbm_rxdma), sc, 0, DMA_RX, SBMAC_MAX_RXDESCR);
1069 
1070 	/*
1071 	 * initial state is OFF
1072 	 */
1073 
1074 	sc->sbm_state = sbmac_state_off;
1075 
1076 	/*
1077 	 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1078 	 */
1079 
1080 	sc->sbm_speed = sbmac_speed_10;
1081 	sc->sbm_duplex = sbmac_duplex_half;
1082 	sc->sbm_fc = sbmac_fc_disabled;
1083 
1084 	/*
1085 	 * Determine SOC type.  112x has Pass3 SOC features.
1086 	 */
1087 	sysrev = SBMAC_READCSR( PKSEG1(A_SCD_SYSTEM_REVISION) );
1088 	sc->sbm_pass3_dma = (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1120 ||
1089 			    SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125 ||
1090 			    SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125H ||
1091 			    (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1250 &&
1092 			     G_SYS_REVISION(sysrev) >= K_SYS_REVISION_BCM1250_PASS3));
1093 #ifdef SBMAC_EVENT_COUNTERS
1094 	const char * const xname = device_xname(sc->sc_dev);
1095 	evcnt_attach_dynamic(&sc->sbm_ev_rxintr, EVCNT_TYPE_INTR,
1096 	    NULL, xname, "rxintr");
1097 	evcnt_attach_dynamic(&sc->sbm_ev_txintr, EVCNT_TYPE_INTR,
1098 	    NULL, xname, "txintr");
1099 	evcnt_attach_dynamic(&sc->sbm_ev_txdrop, EVCNT_TYPE_MISC,
1100 	    NULL, xname, "txdrop");
1101 	evcnt_attach_dynamic(&sc->sbm_ev_txstall, EVCNT_TYPE_MISC,
1102 	    NULL, xname, "txstall");
1103 	if (sc->sbm_pass3_dma) {
1104 		evcnt_attach_dynamic(&sc->sbm_ev_txsplit, EVCNT_TYPE_MISC,
1105 		    NULL, xname, "pass3tx-split");
1106 		evcnt_attach_dynamic(&sc->sbm_ev_txkeep, EVCNT_TYPE_MISC,
1107 		    NULL, xname, "pass3tx-keep");
1108 	}
1109 #endif
1110 }
1111 
1112 /*
1113  *  SBMAC_CHANNEL_START(s)
1114  *
1115  *  Start packet processing on this MAC.
1116  *
1117  *  Input parameters:
1118  *	sc - sbmac structure
1119  *
1120  *  Return value:
1121  *	nothing
1122  */
1123 
1124 static void
sbmac_channel_start(struct sbmac_softc * sc)1125 sbmac_channel_start(struct sbmac_softc *sc)
1126 {
1127 	uint64_t reg;
1128 	sbmac_port_t port;
1129 	uint64_t cfg, fifo, framecfg;
1130 	int idx;
1131 	uint64_t dma_cfg0, fifo_cfg;
1132 	sbmacdma_t *txdma;
1133 
1134 	/*
1135 	 * Don't do this if running
1136 	 */
1137 
1138 	if (sc->sbm_state == sbmac_state_on)
1139 		return;
1140 
1141 	/*
1142 	 * Bring the controller out of reset, but leave it off.
1143 	 */
1144 
1145 	SBMAC_WRITECSR(sc->sbm_macenable, 0);
1146 
1147 	/*
1148 	 * Ignore all received packets
1149 	 */
1150 
1151 	SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1152 
1153 	/*
1154 	 * Calculate values for various control registers.
1155 	 */
1156 
1157 	cfg = M_MAC_RETRY_EN |
1158 	      M_MAC_TX_HOLD_SOP_EN |
1159 	      V_MAC_TX_PAUSE_CNT_16K |
1160 	      M_MAC_AP_STAT_EN |
1161 	      M_MAC_SS_EN |
1162 	      0;
1163 
1164 	fifo = V_MAC_TX_WR_THRSH(4) |	/* Must be '4' or '8' */
1165 	       V_MAC_TX_RD_THRSH(4) |
1166 	       V_MAC_TX_RL_THRSH(4) |
1167 	       V_MAC_RX_PL_THRSH(4) |
1168 	       V_MAC_RX_RD_THRSH(4) |	/* Must be '4' */
1169 	       V_MAC_RX_PL_THRSH(4) |
1170 	       V_MAC_RX_RL_THRSH(8) |
1171 	       0;
1172 
1173 	framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
1174 	    V_MAC_MAX_FRAMESZ_DEFAULT |
1175 	    V_MAC_BACKOFF_SEL(1);
1176 
1177 	/*
1178 	 * Clear out the hash address map
1179 	 */
1180 
1181 	port = PKSEG1(sc->sbm_base + R_MAC_HASH_BASE);
1182 	for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1183 		SBMAC_WRITECSR(port, 0);
1184 		port += sizeof(uint64_t);
1185 	}
1186 
1187 	/*
1188 	 * Clear out the exact-match table
1189 	 */
1190 
1191 	port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1192 	for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1193 		SBMAC_WRITECSR(port, 0);
1194 		port += sizeof(uint64_t);
1195 	}
1196 
1197 	/*
1198 	 * Clear out the DMA Channel mapping table registers
1199 	 */
1200 
1201 	port = PKSEG1(sc->sbm_base + R_MAC_CHUP0_BASE);
1202 	for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1203 		SBMAC_WRITECSR(port, 0);
1204 		port += sizeof(uint64_t);
1205 	}
1206 
1207 	port = PKSEG1(sc->sbm_base + R_MAC_CHLO0_BASE);
1208 	for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1209 		SBMAC_WRITECSR(port, 0);
1210 		port += sizeof(uint64_t);
1211 	}
1212 
1213 	/*
1214 	 * Program the hardware address.  It goes into the hardware-address
1215 	 * register as well as the first filter register.
1216 	 */
1217 
1218 	reg = sbmac_addr2reg(sc->sbm_hwaddr);
1219 
1220 	port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE);
1221 	SBMAC_WRITECSR(port, reg);
1222 	port = PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR);
1223 	SBMAC_WRITECSR(port, 0);			// pass1 workaround
1224 
1225 	/*
1226 	 * Set the receive filter for no packets, and write values
1227 	 * to the various config registers
1228 	 */
1229 
1230 	SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1231 	SBMAC_WRITECSR(sc->sbm_imr, 0);
1232 	SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1233 	SBMAC_WRITECSR(sc->sbm_fifocfg, fifo);
1234 	SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1235 
1236 	/*
1237 	 * Initialize DMA channels (rings should be ok now)
1238 	 */
1239 
1240 	sbdma_channel_start(&(sc->sbm_rxdma));
1241 	sbdma_channel_start(&(sc->sbm_txdma));
1242 
1243 	/*
1244 	 * Configure the speed, duplex, and flow control
1245 	 */
1246 
1247 	sbmac_set_speed(sc, sc->sbm_speed);
1248 	sbmac_set_duplex(sc, sc->sbm_duplex, sc->sbm_fc);
1249 
1250 	/*
1251 	 * Fill the receive ring
1252 	 */
1253 
1254 	sbdma_fillring(&(sc->sbm_rxdma));
1255 
1256 	/*
1257 	 * Turn on the rest of the bits in the enable register
1258 	 */
1259 
1260 	SBMAC_WRITECSR(sc->sbm_macenable, M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 |
1261 	    M_MAC_RX_ENABLE | M_MAC_TX_ENABLE);
1262 
1263 
1264 	/*
1265 	 * Accept any kind of interrupt on TX and RX DMA channel 0
1266 	 */
1267 	SBMAC_WRITECSR(sc->sbm_imr,
1268 	    (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1269 	    (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
1270 
1271 	/*
1272 	 * Enable receiving unicasts and broadcasts
1273 	 */
1274 
1275 	SBMAC_WRITECSR(sc->sbm_rxfilter, M_MAC_UCAST_EN | M_MAC_BCAST_EN);
1276 
1277 	/*
1278 	 * On chips which support unaligned DMA features, set the descriptor
1279 	 * ring for transmit channels to use the unaligned buffer format.
1280 	 */
1281 	txdma = &(sc->sbm_txdma);
1282 
1283 	if (sc->sbm_pass3_dma) {
1284 		dma_cfg0 = SBMAC_READCSR(txdma->sbdma_config0);
1285 		dma_cfg0 |= V_DMA_DESC_TYPE(K_DMA_DESC_TYPE_RING_UAL_RMW) |
1286 		    M_DMA_TBX_EN | M_DMA_TDX_EN;
1287 		SBMAC_WRITECSR(txdma->sbdma_config0, dma_cfg0);
1288 
1289 		fifo_cfg =  SBMAC_READCSR(sc->sbm_fifocfg);
1290 		fifo_cfg |= V_MAC_TX_WR_THRSH(8) |
1291 		    V_MAC_TX_RD_THRSH(8) | V_MAC_TX_RL_THRSH(8);
1292 		SBMAC_WRITECSR(sc->sbm_fifocfg, fifo_cfg);
1293 	}
1294 
1295 	/*
1296 	 * we're running now.
1297 	 */
1298 
1299 	sc->sbm_state = sbmac_state_on;
1300 	sc->sc_ethercom.ec_if.if_flags |= IFF_RUNNING;
1301 
1302 	/*
1303 	 * Program multicast addresses
1304 	 */
1305 
1306 	sbmac_setmulti(sc);
1307 
1308 	/*
1309 	 * If channel was in promiscuous mode before, turn that on
1310 	 */
1311 
1312 	if (sc->sc_ethercom.ec_if.if_flags & IFF_PROMISC)
1313 		sbmac_promiscuous_mode(sc, true);
1314 
1315 	/*
1316 	 * Turn on the once-per-second timer
1317 	 */
1318 
1319 	callout_reset(&(sc->sc_tick_ch), hz, sbmac_tick, sc);
1320 }
1321 
1322 /*
1323  *  SBMAC_CHANNEL_STOP(s)
1324  *
1325  *  Stop packet processing on this MAC.
1326  *
1327  *  Input parameters:
1328  *	sc - sbmac structure
1329  *
1330  *  Return value:
1331  *	nothing
1332  */
1333 
1334 static void
sbmac_channel_stop(struct sbmac_softc * sc)1335 sbmac_channel_stop(struct sbmac_softc *sc)
1336 {
1337 	uint64_t ctl;
1338 
1339 	/* don't do this if already stopped */
1340 
1341 	if (sc->sbm_state == sbmac_state_off)
1342 		return;
1343 
1344 	/* don't accept any packets, disable all interrupts */
1345 
1346 	SBMAC_WRITECSR(sc->sbm_rxfilter, 0);
1347 	SBMAC_WRITECSR(sc->sbm_imr, 0);
1348 
1349 	/* Turn off ticker */
1350 
1351 	callout_stop(&(sc->sc_tick_ch));
1352 
1353 	/* turn off receiver and transmitter */
1354 
1355 	ctl = SBMAC_READCSR(sc->sbm_macenable);
1356 	ctl &= ~(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0);
1357 	SBMAC_WRITECSR(sc->sbm_macenable, ctl);
1358 
1359 	/* We're stopped now. */
1360 
1361 	sc->sbm_state = sbmac_state_off;
1362 	sc->sc_ethercom.ec_if.if_flags &= ~IFF_RUNNING;
1363 
1364 	/* Empty the receive and transmit rings */
1365 
1366 	sbdma_emptyring(&(sc->sbm_rxdma));
1367 	sbdma_emptyring(&(sc->sbm_txdma));
1368 }
1369 
1370 /*
1371  *  SBMAC_SET_CHANNEL_STATE(state)
1372  *
1373  *  Set the channel's state ON or OFF
1374  *
1375  *  Input parameters:
1376  *	state - new state
1377  *
1378  *  Return value:
1379  *	old state
1380  */
1381 
1382 static sbmac_state_t
sbmac_set_channel_state(struct sbmac_softc * sc,sbmac_state_t state)1383 sbmac_set_channel_state(struct sbmac_softc *sc, sbmac_state_t state)
1384 {
1385 	sbmac_state_t oldstate = sc->sbm_state;
1386 
1387 	/*
1388 	 * If same as previous state, return
1389 	 */
1390 
1391 	if (state == oldstate)
1392 		return oldstate;
1393 
1394 	/*
1395 	 * If new state is ON, turn channel on
1396 	 */
1397 
1398 	if (state == sbmac_state_on)
1399 		sbmac_channel_start(sc);
1400 	else
1401 		sbmac_channel_stop(sc);
1402 
1403 	/*
1404 	 * Return previous state
1405 	 */
1406 
1407 	return oldstate;
1408 }
1409 
1410 /*
1411  *  SBMAC_PROMISCUOUS_MODE(sc, enabled)
1412  *
1413  *  Turn on or off promiscuous mode
1414  *
1415  *  Input parameters:
1416  *	sc - softc
1417  *	enabled - true to turn on, false to turn off
1418  *
1419  *  Return value:
1420  *	nothing
1421  */
1422 
1423 static void
sbmac_promiscuous_mode(struct sbmac_softc * sc,bool enabled)1424 sbmac_promiscuous_mode(struct sbmac_softc *sc, bool enabled)
1425 {
1426 	uint64_t reg;
1427 
1428 	if (sc->sbm_state != sbmac_state_on)
1429 		return;
1430 
1431 	if (enabled) {
1432 		reg = SBMAC_READCSR(sc->sbm_rxfilter);
1433 		reg |= M_MAC_ALLPKT_EN;
1434 		SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1435 	} else {
1436 		reg = SBMAC_READCSR(sc->sbm_rxfilter);
1437 		reg &= ~M_MAC_ALLPKT_EN;
1438 		SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1439 	}
1440 }
1441 
1442 /*
1443  *  SBMAC_INIT_AND_START(sc)
1444  *
1445  *  Stop the channel and restart it.  This is generally used
1446  *  when we have to do something to the channel that requires
1447  *  a swift kick.
1448  *
1449  *  Input parameters:
1450  *	sc - softc
1451  */
1452 
1453 static void
sbmac_init_and_start(struct sbmac_softc * sc)1454 sbmac_init_and_start(struct sbmac_softc *sc)
1455 {
1456 	int s;
1457 
1458 	s = splnet();
1459 
1460 	mii_pollstat(&sc->sc_mii);		/* poll phy for current speed */
1461 	sbmac_mii_statchg(&sc->sc_ethercom.ec_if); /* set state to new speed */
1462 	sbmac_set_channel_state(sc, sbmac_state_on);
1463 
1464 	splx(s);
1465 }
1466 
1467 /*
1468  *  SBMAC_ADDR2REG(ptr)
1469  *
1470  *  Convert six bytes into the 64-bit register value that
1471  *  we typically write into the SBMAC's address/mcast registers
1472  *
1473  *  Input parameters:
1474  *	ptr - pointer to 6 bytes
1475  *
1476  *  Return value:
1477  *	register value
1478  */
1479 
1480 static uint64_t
sbmac_addr2reg(u_char * ptr)1481 sbmac_addr2reg(u_char *ptr)
1482 {
1483 	uint64_t reg = 0;
1484 
1485 	ptr += 6;
1486 
1487 	reg |= (uint64_t) *(--ptr);
1488 	reg <<= 8;
1489 	reg |= (uint64_t) *(--ptr);
1490 	reg <<= 8;
1491 	reg |= (uint64_t) *(--ptr);
1492 	reg <<= 8;
1493 	reg |= (uint64_t) *(--ptr);
1494 	reg <<= 8;
1495 	reg |= (uint64_t) *(--ptr);
1496 	reg <<= 8;
1497 	reg |= (uint64_t) *(--ptr);
1498 
1499 	return reg;
1500 }
1501 
1502 /*
1503  *  SBMAC_SET_SPEED(sc, speed)
1504  *
1505  *  Configure LAN speed for the specified MAC.
1506  *  Warning: must be called when MAC is off!
1507  *
1508  *  Input parameters:
1509  *	sc - sbmac structure
1510  *	speed - speed to set MAC to (see sbmac_speed_t enum)
1511  *
1512  *  Return value:
1513  *	true if successful
1514  *	false indicates invalid parameters
1515  */
1516 
1517 static bool
sbmac_set_speed(struct sbmac_softc * sc,sbmac_speed_t speed)1518 sbmac_set_speed(struct sbmac_softc *sc, sbmac_speed_t speed)
1519 {
1520 	uint64_t cfg;
1521 	uint64_t framecfg;
1522 
1523 	/*
1524 	 * Save new current values
1525 	 */
1526 
1527 	sc->sbm_speed = speed;
1528 
1529 	if (sc->sbm_state != sbmac_state_off)
1530 		panic("sbmac_set_speed while MAC not off");
1531 
1532 	/*
1533 	 * Read current register values
1534 	 */
1535 
1536 	cfg = SBMAC_READCSR(sc->sbm_maccfg);
1537 	framecfg = SBMAC_READCSR(sc->sbm_framecfg);
1538 
1539 	/*
1540 	 * Mask out the stuff we want to change
1541 	 */
1542 
1543 	cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1544 	framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1545 	    M_MAC_SLOT_SIZE);
1546 
1547 	/*
1548 	 * Now add in the new bits
1549 	 */
1550 
1551 	switch (speed) {
1552 	case sbmac_speed_10:
1553 		framecfg |= V_MAC_IFG_RX_10 |
1554 		    V_MAC_IFG_TX_10 |
1555 		    K_MAC_IFG_THRSH_10 |
1556 		    V_MAC_SLOT_SIZE_10;
1557 		cfg |= V_MAC_SPEED_SEL_10MBPS;
1558 		break;
1559 
1560 	case sbmac_speed_100:
1561 		framecfg |= V_MAC_IFG_RX_100 |
1562 		    V_MAC_IFG_TX_100 |
1563 		    V_MAC_IFG_THRSH_100 |
1564 		    V_MAC_SLOT_SIZE_100;
1565 		cfg |= V_MAC_SPEED_SEL_100MBPS ;
1566 		break;
1567 
1568 	case sbmac_speed_1000:
1569 		framecfg |= V_MAC_IFG_RX_1000 |
1570 		    V_MAC_IFG_TX_1000 |
1571 		    V_MAC_IFG_THRSH_1000 |
1572 		    V_MAC_SLOT_SIZE_1000;
1573 		cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1574 		break;
1575 
1576 	case sbmac_speed_auto:		/* XXX not implemented */
1577 		/* fall through */
1578 	default:
1579 		return false;
1580 	}
1581 
1582 	/*
1583 	 * Send the bits back to the hardware
1584 	 */
1585 
1586 	SBMAC_WRITECSR(sc->sbm_framecfg, framecfg);
1587 	SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1588 
1589 	return true;
1590 }
1591 
1592 /*
1593  *  SBMAC_SET_DUPLEX(sc, duplex, fc)
1594  *
1595  *  Set Ethernet duplex and flow control options for this MAC
1596  *  Warning: must be called when MAC is off!
1597  *
1598  *  Input parameters:
1599  *	sc - sbmac structure
1600  *	duplex - duplex setting (see sbmac_duplex_t)
1601  *	fc - flow control setting (see sbmac_fc_t)
1602  *
1603  *  Return value:
1604  *	true if ok
1605  *	false if an invalid parameter combination was specified
1606  */
1607 
1608 static bool
sbmac_set_duplex(struct sbmac_softc * sc,sbmac_duplex_t duplex,sbmac_fc_t fc)1609 sbmac_set_duplex(struct sbmac_softc *sc, sbmac_duplex_t duplex, sbmac_fc_t fc)
1610 {
1611 	uint64_t cfg;
1612 
1613 	/*
1614 	 * Save new current values
1615 	 */
1616 
1617 	sc->sbm_duplex = duplex;
1618 	sc->sbm_fc = fc;
1619 
1620 	if (sc->sbm_state != sbmac_state_off)
1621 		panic("sbmac_set_duplex while MAC not off");
1622 
1623 	/*
1624 	 * Read current register values
1625 	 */
1626 
1627 	cfg = SBMAC_READCSR(sc->sbm_maccfg);
1628 
1629 	/*
1630 	 * Mask off the stuff we're about to change
1631 	 */
1632 
1633 	cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
1634 
1635 	switch (duplex) {
1636 	case sbmac_duplex_half:
1637 		switch (fc) {
1638 		case sbmac_fc_disabled:
1639 			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
1640 			break;
1641 
1642 		case sbmac_fc_collision:
1643 			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
1644 			break;
1645 
1646 		case sbmac_fc_carrier:
1647 			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
1648 			break;
1649 
1650 		case sbmac_fc_auto:		/* XXX not implemented */
1651 			/* fall through */
1652 		case sbmac_fc_frame:		/* not valid in half duplex */
1653 		default:			/* invalid selection */
1654 			panic("%s: invalid half duplex fc selection %d",
1655 			    device_xname(sc->sc_dev), fc);
1656 			return false;
1657 		}
1658 		break;
1659 
1660 	case sbmac_duplex_full:
1661 		switch (fc) {
1662 		case sbmac_fc_disabled:
1663 			cfg |= V_MAC_FC_CMD_DISABLED;
1664 			break;
1665 
1666 		case sbmac_fc_frame:
1667 			cfg |= V_MAC_FC_CMD_ENABLED;
1668 			break;
1669 
1670 		case sbmac_fc_collision:	/* not valid in full duplex */
1671 		case sbmac_fc_carrier:		/* not valid in full duplex */
1672 		case sbmac_fc_auto:		/* XXX not implemented */
1673 			/* fall through */
1674 		default:
1675 			panic("%s: invalid full duplex fc selection %d",
1676 			    device_xname(sc->sc_dev), fc);
1677 			return false;
1678 		}
1679 		break;
1680 
1681 	default:
1682 		/* fall through */
1683 	case sbmac_duplex_auto:
1684 		panic("%s: bad duplex %d", device_xname(sc->sc_dev), duplex);
1685 		/* XXX not implemented */
1686 		break;
1687 	}
1688 
1689 	/*
1690 	 * Send the bits back to the hardware
1691 	 */
1692 
1693 	SBMAC_WRITECSR(sc->sbm_maccfg, cfg);
1694 
1695 	return true;
1696 }
1697 
1698 /*
1699  *  SBMAC_INTR()
1700  *
1701  *  Interrupt handler for MAC interrupts
1702  *
1703  *  Input parameters:
1704  *	MAC structure
1705  *
1706  *  Return value:
1707  *	nothing
1708  */
1709 
1710 /* ARGSUSED */
1711 static void
sbmac_intr(void * xsc,uint32_t status,vaddr_t pc)1712 sbmac_intr(void *xsc, uint32_t status, vaddr_t pc)
1713 {
1714 	struct sbmac_softc *sc = xsc;
1715 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1716 	uint64_t isr;
1717 
1718 	for (;;) {
1719 
1720 		/*
1721 		 * Read the ISR (this clears the bits in the real register)
1722 		 */
1723 
1724 		isr = SBMAC_READCSR(sc->sbm_isr);
1725 
1726 		if (isr == 0)
1727 			break;
1728 
1729 		/*
1730 		 * Transmits on channel 0
1731 		 */
1732 
1733 		if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
1734 			sbdma_tx_process(sc, &(sc->sbm_txdma));
1735 			SBMAC_EVCNT_INCR(sc->sbm_ev_txintr);
1736 		}
1737 
1738 		/*
1739 		 * Receives on channel 0
1740 		 */
1741 
1742 		if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
1743 			sbdma_rx_process(sc, &(sc->sbm_rxdma));
1744 			SBMAC_EVCNT_INCR(sc->sbm_ev_rxintr);
1745 		}
1746 	}
1747 
1748 	/* try to get more packets going */
1749 	if_schedule_deferred_start(ifp);
1750 }
1751 
1752 
1753 /*
1754  *  SBMAC_START(ifp)
1755  *
1756  *  Start output on the specified interface.  Basically, we
1757  *  queue as many buffers as we can until the ring fills up, or
1758  *  we run off the end of the queue, whichever comes first.
1759  *
1760  *  Input parameters:
1761  *	ifp - interface
1762  *
1763  *  Return value:
1764  *	nothing
1765  */
1766 
1767 static void
sbmac_start(struct ifnet * ifp)1768 sbmac_start(struct ifnet *ifp)
1769 {
1770 	struct sbmac_softc	*sc;
1771 	struct mbuf		*m_head = NULL;
1772 	int			rv;
1773 
1774 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1775 		return;
1776 
1777 	sc = ifp->if_softc;
1778 
1779 	for (;;) {
1780 
1781 		IF_POLL(&ifp->if_snd, m_head);
1782 		if (m_head == NULL)
1783 		    break;
1784 
1785 		/*
1786 		 * Put the buffer on the transmit ring.  If we
1787 		 * don't have room, we'll try to get things going
1788 		 * again after a transmit interrupt.
1789 		 */
1790 
1791 		rv = sbdma_add_txbuffer(&(sc->sbm_txdma), m_head);
1792 
1793 		if (rv == 0) {
1794 			/*
1795 			 * If there's a BPF listener, bounce a copy of this
1796 			 * frame to it.
1797 			 */
1798 			IF_DEQUEUE(&ifp->if_snd, m_head);
1799 			bpf_mtap(ifp, m_head, BPF_D_OUT);
1800 			if (!sc->sbm_pass3_dma) {
1801 				/*
1802 				 * Don't free mbuf if we're not copying to new
1803 				 * mbuf in sbdma_add_txbuffer.  It will be
1804 				 * freed in sbdma_tx_process.
1805 				 */
1806 				m_freem(m_head);
1807 			}
1808 		}
1809 	}
1810 }
1811 
1812 /*
1813  *  SBMAC_SETMULTI(sc)
1814  *
1815  *  Reprogram the multicast table into the hardware, given
1816  *  the list of multicasts associated with the interface
1817  *  structure.
1818  *
1819  *  Input parameters:
1820  *	sc - softc
1821  *
1822  *  Return value:
1823  *	nothing
1824  */
1825 
1826 static void
sbmac_setmulti(struct sbmac_softc * sc)1827 sbmac_setmulti(struct sbmac_softc *sc)
1828 {
1829 	struct ethercom *ec = &sc->sc_ethercom;
1830 	struct ifnet *ifp = &ec->ec_if;
1831 	uint64_t reg;
1832 	sbmac_port_t port;
1833 	int idx;
1834 	struct ether_multi *enm;
1835 	struct ether_multistep step;
1836 
1837 	/*
1838 	 * Clear out entire multicast table.  We do this by nuking
1839 	 * the entire hash table and all the direct matches except
1840 	 * the first one, which is used for our station address
1841 	 */
1842 
1843 	for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
1844 		port = PKSEG1(sc->sbm_base +
1845 		    R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1846 		SBMAC_WRITECSR(port, 0);
1847 	}
1848 
1849 	for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1850 		port = PKSEG1(sc->sbm_base +
1851 		    R_MAC_HASH_BASE+(idx*sizeof(uint64_t)));
1852 		SBMAC_WRITECSR(port, 0);
1853 	}
1854 
1855 	/*
1856 	 * Clear the filter to say we don't want any multicasts.
1857 	 */
1858 
1859 	reg = SBMAC_READCSR(sc->sbm_rxfilter);
1860 	reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1861 	SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1862 
1863 	if (ifp->if_flags & IFF_ALLMULTI) {
1864 		/*
1865 		 * Enable ALL multicasts.  Do this by inverting the
1866 		 * multicast enable bit.
1867 		 */
1868 		reg = SBMAC_READCSR(sc->sbm_rxfilter);
1869 		reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
1870 		SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1871 		return;
1872 	}
1873 
1874 	/*
1875 	 * Program new multicast entries.  For now, only use the
1876 	 * perfect filter.  In the future we'll need to use the
1877 	 * hash filter if the perfect filter overflows
1878 	 */
1879 
1880 	/*
1881 	 * XXX only using perfect filter for now, need to use hash
1882 	 * XXX if the table overflows
1883 	 */
1884 
1885 	idx = 1;		/* skip station address */
1886 	ETHER_LOCK(ec);
1887 	ETHER_FIRST_MULTI(step, ec, enm);
1888 	while ((enm != NULL) && (idx < MAC_ADDR_COUNT)) {
1889 		reg = sbmac_addr2reg(enm->enm_addrlo);
1890 		port = PKSEG1(sc->sbm_base +
1891 		    R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)));
1892 		SBMAC_WRITECSR(port, reg);
1893 		idx++;
1894 		ETHER_NEXT_MULTI(step, enm);
1895 	}
1896 	ETHER_UNLOCK(ec);
1897 
1898 	/*
1899 	 * Enable the "accept multicast bits" if we programmed at least one
1900 	 * multicast.
1901 	 */
1902 
1903 	if (idx > 1) {
1904 	    reg = SBMAC_READCSR(sc->sbm_rxfilter);
1905 	    reg |= M_MAC_MCAST_EN;
1906 	    SBMAC_WRITECSR(sc->sbm_rxfilter, reg);
1907 	}
1908 }
1909 
1910 /*
1911  *  SBMAC_ETHER_IOCTL(ifp, cmd, data)
1912  *
1913  *  Generic IOCTL requests for this interface.  The basic
1914  *  stuff is handled here for bringing the interface up,
1915  *  handling multicasts, etc.
1916  *
1917  *  Input parameters:
1918  *	ifp - interface structure
1919  *	cmd - command code
1920  *	data - pointer to data
1921  *
1922  *  Return value:
1923  *	return value (0 is success)
1924  */
1925 
1926 static int
sbmac_ether_ioctl(struct ifnet * ifp,u_long cmd,void * data)1927 sbmac_ether_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1928 {
1929 	struct ifaddr *ifa = (struct ifaddr *) data;
1930 	struct sbmac_softc *sc = ifp->if_softc;
1931 
1932 	switch (cmd) {
1933 	case SIOCINITIFADDR:
1934 		ifp->if_flags |= IFF_UP;
1935 
1936 		switch (ifa->ifa_addr->sa_family) {
1937 #ifdef INET
1938 		case AF_INET:
1939 			sbmac_init_and_start(sc);
1940 			arp_ifinit(ifp, ifa);
1941 			break;
1942 #endif
1943 		default:
1944 			sbmac_init_and_start(sc);
1945 			break;
1946 		}
1947 		break;
1948 
1949 	default:
1950 		return ENOTTY;
1951 	}
1952 
1953 	return 0;
1954 }
1955 
1956 /*
1957  *  SBMAC_IOCTL(ifp, cmd, data)
1958  *
1959  *  Main IOCTL handler - dispatches to other IOCTLs for various
1960  *  types of requests.
1961  *
1962  *  Input parameters:
1963  *	ifp - interface pointer
1964  *	cmd - command code
1965  *	data - pointer to argument data
1966  *
1967  *  Return value:
1968  *	0 if ok
1969  *	else error code
1970  */
1971 
1972 static int
sbmac_ioctl(struct ifnet * ifp,u_long cmd,void * data)1973 sbmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1974 {
1975 	struct sbmac_softc *sc = ifp->if_softc;
1976 	struct ifreq *ifr = (struct ifreq *) data;
1977 	int s, error = 0;
1978 
1979 	s = splnet();
1980 
1981 	switch (cmd) {
1982 	case SIOCINITIFADDR:
1983 		error = sbmac_ether_ioctl(ifp, cmd, data);
1984 		break;
1985 	case SIOCSIFMTU:
1986 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
1987 			error = EINVAL;
1988 		else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1989 			/* XXX Program new MTU here */
1990 			error = 0;
1991 		break;
1992 	case SIOCSIFFLAGS:
1993 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1994 			break;
1995 		if (ifp->if_flags & IFF_UP) {
1996 			/*
1997 			 * If only the state of the PROMISC flag changed,
1998 			 * just tweak the hardware registers.
1999 			 */
2000 			if ((ifp->if_flags & IFF_RUNNING) &&
2001 			    (ifp->if_flags & IFF_PROMISC)) {
2002 				/* turn on promiscuous mode */
2003 				sbmac_promiscuous_mode(sc, true);
2004 			} else if (ifp->if_flags & IFF_RUNNING &&
2005 			    !(ifp->if_flags & IFF_PROMISC)) {
2006 			    /* turn off promiscuous mode */
2007 			    sbmac_promiscuous_mode(sc, false);
2008 			} else
2009 			    sbmac_set_channel_state(sc, sbmac_state_on);
2010 		} else {
2011 			if (ifp->if_flags & IFF_RUNNING)
2012 				sbmac_set_channel_state(sc, sbmac_state_off);
2013 		}
2014 
2015 		sc->sbm_if_flags = ifp->if_flags;
2016 		error = 0;
2017 		break;
2018 
2019 	default:
2020 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
2021 			error = 0;
2022 			if (ifp->if_flags & IFF_RUNNING)
2023 				sbmac_setmulti(sc);
2024 		}
2025 		break;
2026 	}
2027 
2028 	(void)splx(s);
2029 
2030 	return error;
2031 }
2032 
2033 /*
2034  *  SBMAC_IFMEDIA_UPD(ifp)
2035  *
2036  *  Configure an appropriate media type for this interface,
2037  *  given the data in the interface structure
2038  *
2039  *  Input parameters:
2040  *	ifp - interface
2041  *
2042  *  Return value:
2043  *	0 if ok
2044  *	else error code
2045  */
2046 
2047 /*
2048  *  SBMAC_IFMEDIA_STS(ifp, ifmr)
2049  *
2050  *  Report current media status (used by ifconfig, for example)
2051  *
2052  *  Input parameters:
2053  *	ifp - interface structure
2054  *	ifmr - media request structure
2055  *
2056  *  Return value:
2057  *	nothing
2058  */
2059 
2060 /*
2061  *  SBMAC_WATCHDOG(ifp)
2062  *
2063  *  Called periodically to make sure we're still happy.
2064  *
2065  *  Input parameters:
2066  *	ifp - interface structure
2067  *
2068  *  Return value:
2069  *	nothing
2070  */
2071 
2072 static void
sbmac_watchdog(struct ifnet * ifp)2073 sbmac_watchdog(struct ifnet *ifp)
2074 {
2075 
2076 	/* XXX do something */
2077 }
2078 
2079 /*
2080  * One second timer, used to tick MII.
2081  */
2082 static void
sbmac_tick(void * arg)2083 sbmac_tick(void *arg)
2084 {
2085 	struct sbmac_softc *sc = arg;
2086 	int s;
2087 
2088 	s = splnet();
2089 	mii_tick(&sc->sc_mii);
2090 	splx(s);
2091 
2092 	callout_reset(&sc->sc_tick_ch, hz, sbmac_tick, sc);
2093 }
2094 
2095 
2096 /*
2097  *  SBMAC_MATCH(parent, match, aux)
2098  *
2099  *  Part of the config process - see if this device matches the
2100  *  info about what we expect to find on the bus.
2101  *
2102  *  Input parameters:
2103  *	parent - parent bus structure
2104  *	match -
2105  *	aux - bus-specific args
2106  *
2107  *  Return value:
2108  *	1 if we match
2109  *	0 if we don't match
2110  */
2111 
2112 static int
sbmac_match(device_t parent,cfdata_t match,void * aux)2113 sbmac_match(device_t parent, cfdata_t match, void *aux)
2114 {
2115 	struct sbobio_attach_args *sa = aux;
2116 
2117 	/*
2118 	 * Make sure it's a MAC
2119 	 */
2120 	if (sa->sa_locs.sa_type != SBOBIO_DEVTYPE_MAC)
2121 		return 0;
2122 
2123 	/*
2124 	 * Yup, it is.
2125 	 */
2126 
2127 	return 1;
2128 }
2129 
2130 /*
2131  *  SBMAC_PARSE_XDIGIT(str)
2132  *
2133  *  Parse a hex digit, returning its value
2134  *
2135  *  Input parameters:
2136  *	str - character
2137  *
2138  *  Return value:
2139  *	hex value, or -1 if invalid
2140  */
2141 
2142 static int
sbmac_parse_xdigit(char str)2143 sbmac_parse_xdigit(char str)
2144 {
2145 	int digit;
2146 
2147 	if ((str >= '0') && (str <= '9'))
2148 		digit = str - '0';
2149 	else if ((str >= 'a') && (str <= 'f'))
2150 		digit = str - 'a' + 10;
2151 	else if ((str >= 'A') && (str <= 'F'))
2152 		digit = str - 'A' + 10;
2153 	else
2154 		digit = -1;
2155 
2156 	return digit;
2157 }
2158 
2159 /*
2160  *  SBMAC_PARSE_HWADDR(str, hwaddr)
2161  *
2162  *  Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2163  *  Ethernet address.
2164  *
2165  *  Input parameters:
2166  *	str - string
2167  *	hwaddr - pointer to hardware address
2168  *
2169  *  Return value:
2170  *	0 if ok, else -1
2171  */
2172 
2173 static int
sbmac_parse_hwaddr(const char * str,u_char * hwaddr)2174 sbmac_parse_hwaddr(const char *str, u_char *hwaddr)
2175 {
2176 	int digit1, digit2;
2177 	int idx = 6;
2178 
2179 	while (*str && (idx > 0)) {
2180 		digit1 = sbmac_parse_xdigit(*str);
2181 		if (digit1 < 0)
2182 			return -1;
2183 		str++;
2184 		if (!*str)
2185 			return -1;
2186 
2187 		if ((*str == ':') || (*str == '-')) {
2188 			digit2 = digit1;
2189 			digit1 = 0;
2190 		} else {
2191 			digit2 = sbmac_parse_xdigit(*str);
2192 			if (digit2 < 0)
2193 				return -1;
2194 			str++;
2195 		}
2196 
2197 		*hwaddr++ = (digit1 << 4) | digit2;
2198 		idx--;
2199 
2200 		if (*str == '-')
2201 			str++;
2202 		if (*str == ':')
2203 			str++;
2204 	}
2205 	return 0;
2206 }
2207 
2208 /*
2209  *  SBMAC_ATTACH(parent, self, aux)
2210  *
2211  *  Attach routine - init hardware and hook ourselves into NetBSD.
2212  *
2213  *  Input parameters:
2214  *	parent - parent bus device
2215  *	self - our softc
2216  *	aux - attach data
2217  *
2218  *  Return value:
2219  *	nothing
2220  */
2221 
2222 static void
sbmac_attach(device_t parent,device_t self,void * aux)2223 sbmac_attach(device_t parent, device_t self, void *aux)
2224 {
2225 	struct sbmac_softc * const sc = device_private(self);
2226 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
2227 	struct mii_data * const mii = &sc->sc_mii;
2228 	struct sbobio_attach_args * const sa = aux;
2229 	u_char *eaddr;
2230 	static int unit = 0;	/* XXX */
2231 	uint64_t ea_reg;
2232 	int idx;
2233 
2234 	sc->sc_dev = self;
2235 
2236 	/* Determine controller base address */
2237 
2238 	sc->sbm_base = sa->sa_base + sa->sa_locs.sa_offset;
2239 
2240 	eaddr = sc->sbm_hwaddr;
2241 
2242 	/*
2243 	 * Initialize context (get pointers to registers and stuff), then
2244 	 * allocate the memory for the descriptor tables.
2245 	 */
2246 
2247 	sbmac_initctx(sc);
2248 
2249 	callout_init(&(sc->sc_tick_ch), 0);
2250 
2251 	/*
2252 	 * Read the ethernet address.  The firwmare left this programmed
2253 	 * for us in the ethernet address register for each mac.
2254 	 */
2255 
2256 	ea_reg = SBMAC_READCSR(PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR));
2257 	for (idx = 0; idx < 6; idx++) {
2258 		eaddr[idx] = (uint8_t) (ea_reg & 0xFF);
2259 		ea_reg >>= 8;
2260 	}
2261 
2262 #define	SBMAC_DEFAULT_HWADDR "40:00:00:00:01:00"
2263 	if (eaddr[0] == 0 && eaddr[1] == 0 && eaddr[2] == 0 &&
2264 		eaddr[3] == 0 && eaddr[4] == 0 && eaddr[5] == 0) {
2265 		sbmac_parse_hwaddr(SBMAC_DEFAULT_HWADDR, eaddr);
2266 		eaddr[5] = unit;
2267 	}
2268 
2269 #ifdef SBMAC_ETH0_HWADDR
2270 	if (unit == 0)
2271 		sbmac_parse_hwaddr(SBMAC_ETH0_HWADDR, eaddr);
2272 #endif
2273 #ifdef SBMAC_ETH1_HWADDR
2274 	if (unit == 1)
2275 		sbmac_parse_hwaddr(SBMAC_ETH1_HWADDR, eaddr);
2276 #endif
2277 #ifdef SBMAC_ETH2_HWADDR
2278 	if (unit == 2)
2279 		sbmac_parse_hwaddr(SBMAC_ETH2_HWADDR, eaddr);
2280 #endif
2281 	unit++;
2282 
2283 	/*
2284 	 * Display Ethernet address (this is called during the config process
2285 	 * so we need to finish off the config message that was being displayed)
2286 	 */
2287 	aprint_normal(": Ethernet%s\n",
2288 	    sc->sbm_pass3_dma ? ", using unaligned tx DMA" : "");
2289 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
2290 
2291 
2292 	/*
2293 	 * Set up ifnet structure
2294 	 */
2295 
2296 	ifp->if_softc = sc;
2297 	memcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
2298 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2299 	ifp->if_ioctl = sbmac_ioctl;
2300 	ifp->if_start = sbmac_start;
2301 	ifp->if_watchdog = sbmac_watchdog;
2302 	ifp->if_snd.ifq_maxlen = SBMAC_MAX_TXDESCR - 1;
2303 
2304 	/*
2305 	 * Set up ifmedia support.
2306 	 */
2307 
2308 	/*
2309 	 * Initialize MII/media info.
2310 	 */
2311 	mii->mii_ifp	  = ifp;
2312 	mii->mii_readreg  = sbmac_mii_readreg;
2313 	mii->mii_writereg = sbmac_mii_writereg;
2314 	mii->mii_statchg  = sbmac_mii_statchg;
2315 	sc->sc_ethercom.ec_mii = mii;
2316 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
2317 	mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
2318 	    MII_OFFSET_ANY, 0);
2319 
2320 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
2321 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
2322 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
2323 	} else
2324 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
2325 
2326 	/*
2327 	 * map/route interrupt
2328 	 */
2329 
2330 	sc->sbm_intrhand = cpu_intr_establish(sa->sa_locs.sa_intr[0], IPL_NET,
2331 	    sbmac_intr, sc);
2332 
2333 	/*
2334 	 * Call MI attach routines.
2335 	 */
2336 	if_attach(ifp);
2337 	if_deferred_start_init(ifp, NULL);
2338 	ether_ifattach(ifp, eaddr);
2339 }
2340