1 /*
2  * Copyright (c) 2008 Stefan Hajnoczi <stefanha@gmail.com>
3  * Copyright (c) 2008 Pantelis Koukousoulas <pktoss@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation; either version 2 of the
8  * License, or any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  *
20  * This driver is a port of the b44 linux driver version 1.01
21  *
22  * Copyright (c) 2002 David S. Miller <davem@redhat.com>
23  * Copyright (c) Pekka Pietikainen <pp@ee.oulu.fi>
24  * Copyright (C) 2006 Broadcom Corporation.
25  *
26  * Some ssb bits copied from version 2.0 of the b44 driver
27  * Copyright (c) Michael Buesch
28  *
29  * Copyright (c) a lot of people too. Please respect their work.
30  */
31 
32 FILE_LICENCE ( GPL2_OR_LATER );
33 
34 #include <errno.h>
35 #include <assert.h>
36 #include <stdio.h>
37 #include <unistd.h>
38 #include <byteswap.h>
39 #include <ipxe/io.h>
40 #include <mii.h>
41 #include <ipxe/iobuf.h>
42 #include <ipxe/malloc.h>
43 #include <ipxe/pci.h>
44 #include <ipxe/netdevice.h>
45 #include <ipxe/ethernet.h>
46 #include <ipxe/if_ether.h>
47 #include "b44.h"
48 
49 
ring_next(int index)50 static inline int ring_next(int index)
51 {
52 	/* B44_RING_SIZE is a power of 2 :) */
53 	return (index + 1) & (B44_RING_SIZE - 1);
54 }
55 
56 
57 /* Memory-mapped I/O wrappers */
58 
br32(const struct b44_private * bp,u32 reg)59 static inline u32 br32(const struct b44_private *bp, u32 reg)
60 {
61 	return readl(bp->regs + reg);
62 }
63 
64 
bw32(const struct b44_private * bp,u32 reg,u32 val)65 static inline void bw32(const struct b44_private *bp, u32 reg, u32 val)
66 {
67 	writel(val, bp->regs + reg);
68 }
69 
70 
bflush(const struct b44_private * bp,u32 reg,u32 timeout)71 static inline void bflush(const struct b44_private *bp, u32 reg, u32 timeout)
72 {
73 	readl(bp->regs + reg);
74 	udelay(timeout);
75 }
76 
77 
78 #define VIRT_TO_B44(addr)	( virt_to_bus(addr) + SB_PCI_DMA )
79 
80 
81 /**
82  * Check if card can access address
83  *
84  * @v address		Virtual address
85  * @v address_ok	Card can access address
86  */
87 static inline __attribute__ (( always_inline )) int
b44_address_ok(void * address)88 b44_address_ok ( void *address ) {
89 
90 	/* Card can address anything with a 30-bit address */
91 	if ( ( virt_to_bus ( address ) & ~B44_30BIT_DMA_MASK ) == 0 )
92 		return 1;
93 
94 	return 0;
95 }
96 
97 /**
98  * Ring cells waiting to be processed are between 'tx_cur' and 'pending'
99  * indexes in the ring.
100  */
pending_tx_index(struct b44_private * bp)101 static u32 pending_tx_index(struct b44_private *bp)
102 {
103 	u32 pending = br32(bp, B44_DMATX_STAT);
104 	pending &= DMATX_STAT_CDMASK;
105 
106 	pending /= sizeof(struct dma_desc);
107 	return pending & (B44_RING_SIZE - 1);
108 }
109 
110 
111 /**
112  * Ring cells waiting to be processed are between 'rx_cur' and 'pending'
113  * indexes in the ring.
114  */
pending_rx_index(struct b44_private * bp)115 static u32 pending_rx_index(struct b44_private *bp)
116 {
117 	u32 pending = br32(bp, B44_DMARX_STAT);
118 	pending &= DMARX_STAT_CDMASK;
119 
120 	pending /= sizeof(struct dma_desc);
121 	return pending & (B44_RING_SIZE - 1);
122 }
123 
124 
125 /**
126  * Wait until the given bit is set/cleared.
127  */
b44_wait_bit(struct b44_private * bp,unsigned long reg,u32 bit,unsigned long timeout,const int clear)128 static int b44_wait_bit(struct b44_private *bp, unsigned long reg, u32 bit,
129 			            unsigned long timeout, const int clear)
130 {
131 	unsigned long i;
132 
133 	for (i = 0; i < timeout; i++) {
134 		u32 val = br32(bp, reg);
135 
136 		if (clear && !(val & bit))
137 			break;
138 
139 		if (!clear && (val & bit))
140 			break;
141 
142 		udelay(10);
143 	}
144 	if (i == timeout) {
145 		return -ENODEV;
146 	}
147 	return 0;
148 }
149 
150 
151 /*
152  * Sonics Silicon Backplane support. SSB is a mini-bus interconnecting
153  * so-called IP Cores. One of those cores implements the Fast Ethernet
154  * functionality and another one the PCI engine.
155  *
156  * You need to switch to the core you want to talk to before actually
157  * sending commands.
158  *
159  * See: http://bcm-v4.sipsolutions.net/Backplane for (reverse-engineered)
160  * specs.
161  */
162 
ssb_get_core_rev(struct b44_private * bp)163 static inline u32 ssb_get_core_rev(struct b44_private *bp)
164 {
165 	return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
166 }
167 
168 
ssb_is_core_up(struct b44_private * bp)169 static inline int ssb_is_core_up(struct b44_private *bp)
170 {
171 	return ((br32(bp, B44_SBTMSLOW) & (SSB_CORE_DOWN | SBTMSLOW_CLOCK))
172 	                                                == SBTMSLOW_CLOCK);
173 }
174 
175 
ssb_pci_setup(struct b44_private * bp,u32 cores)176 static u32 ssb_pci_setup(struct b44_private *bp, u32 cores)
177 {
178 	u32 bar_orig, pci_rev, val;
179 
180 	pci_read_config_dword(bp->pci, SSB_BAR0_WIN, &bar_orig);
181 	pci_write_config_dword(bp->pci, SSB_BAR0_WIN,
182 	                       BCM4400_PCI_CORE_ADDR);
183 	pci_rev = ssb_get_core_rev(bp);
184 
185 	val = br32(bp, B44_SBINTVEC);
186 	val |= cores;
187 	bw32(bp, B44_SBINTVEC, val);
188 
189 	val = br32(bp, SSB_PCI_TRANS_2);
190 	val |= SSB_PCI_PREF | SSB_PCI_BURST;
191 	bw32(bp, SSB_PCI_TRANS_2, val);
192 
193 	pci_write_config_dword(bp->pci, SSB_BAR0_WIN, bar_orig);
194 
195 	return pci_rev;
196 }
197 
198 
ssb_core_disable(struct b44_private * bp)199 static void ssb_core_disable(struct b44_private *bp)
200 {
201 	if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
202 		return;
203 
204 	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
205 	b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
206 	b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
207 
208 	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
209 	                                        SSB_CORE_DOWN));
210 	bflush(bp, B44_SBTMSLOW, 1);
211 
212 	bw32(bp, B44_SBTMSLOW, SSB_CORE_DOWN);
213 	bflush(bp, B44_SBTMSLOW, 1);
214 }
215 
216 
ssb_core_reset(struct b44_private * bp)217 static void ssb_core_reset(struct b44_private *bp)
218 {
219 	u32 val;
220 	const u32 mask = (SBTMSLOW_CLOCK | SBTMSLOW_FGC | SBTMSLOW_RESET);
221 
222 	ssb_core_disable(bp);
223 
224 	bw32(bp, B44_SBTMSLOW, mask);
225 	bflush(bp, B44_SBTMSLOW, 1);
226 
227 	/* Clear SERR if set, this is a hw bug workaround.  */
228 	if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
229 		bw32(bp, B44_SBTMSHIGH, 0);
230 
231 	val = br32(bp, B44_SBIMSTATE);
232 	if (val & (SBIMSTATE_BAD)) {
233 		bw32(bp, B44_SBIMSTATE, val & ~SBIMSTATE_BAD);
234 	}
235 
236 	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
237 	bflush(bp, B44_SBTMSLOW, 1);
238 
239 	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
240 	bflush(bp, B44_SBTMSLOW, 1);
241 }
242 
243 
244 /*
245  * Driver helper functions
246  */
247 
248 /*
249  * Chip reset provides power to the b44 MAC & PCI cores, which
250  * is necessary for MAC register access. We only do a partial
251  * reset in case of transmit/receive errors (ISTAT_ERRORS) to
252  * avoid the chip being hung for an unnecessary long time in
253  * this case.
254  *
255  * Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe
256  */
b44_chip_reset(struct b44_private * bp,int reset_kind)257 static void b44_chip_reset(struct b44_private *bp, int reset_kind)
258 {
259 	if (ssb_is_core_up(bp)) {
260 		bw32(bp, B44_RCV_LAZY, 0);
261 
262 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
263 
264 		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
265 
266 		bw32(bp, B44_DMATX_CTRL, 0);
267 
268 		bp->tx_dirty = bp->tx_cur = 0;
269 
270 		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK)
271 			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
272 			                                          100, 0);
273 
274 		bw32(bp, B44_DMARX_CTRL, 0);
275 
276 		bp->rx_cur = 0;
277 	} else {
278 		ssb_pci_setup(bp, SBINTVEC_ENET0);
279 	}
280 
281 	ssb_core_reset(bp);
282 
283 	/* Don't enable PHY if we are only doing a partial reset. */
284 	if (reset_kind == B44_CHIP_RESET_PARTIAL)
285 		return;
286 
287 	/* Make PHY accessible. */
288 	bw32(bp, B44_MDIO_CTRL,
289 	     (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK)));
290 	bflush(bp, B44_MDIO_CTRL, 1);
291 
292 	/* Enable internal or external PHY */
293 	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
294 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
295 		bflush(bp, B44_ENET_CTRL, 1);
296 	} else {
297 		u32 val = br32(bp, B44_DEVCTRL);
298 		if (val & DEVCTRL_EPR) {
299 			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
300 			bflush(bp, B44_DEVCTRL, 100);
301 		}
302 	}
303 }
304 
305 
306 /**
307  * called by b44_poll in the error path
308  */
b44_halt(struct b44_private * bp)309 static void b44_halt(struct b44_private *bp)
310 {
311 	/* disable ints */
312 	bw32(bp, B44_IMASK, 0);
313 	bflush(bp, B44_IMASK, 1);
314 
315 	DBG("b44: powering down PHY\n");
316 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
317 
318 	/*
319 	 * Now reset the chip, but without enabling
320 	 * the MAC&PHY part of it.
321 	 * This has to be done _after_ we shut down the PHY
322 	 */
323 	b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
324 }
325 
326 
327 
328 /*
329  * Called at device open time to get the chip ready for
330  * packet processing.
331  *
332  * Called-by: b44_open
333  */
b44_init_hw(struct b44_private * bp,int reset_kind)334 static void b44_init_hw(struct b44_private *bp, int reset_kind)
335 {
336 	u32 val;
337 #define CTRL_MASK (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))
338 
339 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
340 	if (reset_kind == B44_FULL_RESET) {
341 		b44_phy_reset(bp);
342 	}
343 
344 	/* Enable CRC32, set proper LED modes and power on PHY */
345 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
346 	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
347 
348 	/* This sets the MAC address too.  */
349 	b44_set_rx_mode(bp->netdev);
350 
351 	/* MTU + eth header + possible VLAN tag + struct rx_header */
352 	bw32(bp, B44_RXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
353 	bw32(bp, B44_TXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
354 
355 	bw32(bp, B44_TX_HIWMARK, TX_HIWMARK_DEFLT);
356 	if (reset_kind == B44_PARTIAL_RESET) {
357 		bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
358 	} else {
359 		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
360 		bw32(bp, B44_DMATX_ADDR, VIRT_TO_B44(bp->tx));
361 
362 		bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
363 		bw32(bp, B44_DMARX_ADDR, VIRT_TO_B44(bp->rx));
364 		bw32(bp, B44_DMARX_PTR, B44_RX_RING_LEN_BYTES);
365 
366 		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
367 	}
368 
369 	val = br32(bp, B44_ENET_CTRL);
370 	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
371 #undef CTRL_MASK
372 }
373 
374 
375 /***  Management of ring descriptors  ***/
376 
377 
b44_populate_rx_descriptor(struct b44_private * bp,u32 idx)378 static void b44_populate_rx_descriptor(struct b44_private *bp, u32 idx)
379 {
380 	struct rx_header *rh;
381 	u32 ctrl, addr;
382 
383 	rh = bp->rx_iobuf[idx]->data;
384 	rh->len = 0;
385 	rh->flags = 0;
386 	ctrl = DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET);
387 	if (idx == B44_RING_LAST) {
388 		ctrl |= DESC_CTRL_EOT;
389 	}
390 	addr = VIRT_TO_B44(bp->rx_iobuf[idx]->data);
391 
392 	bp->rx[idx].ctrl = cpu_to_le32(ctrl);
393 	bp->rx[idx].addr = cpu_to_le32(addr);
394 	bw32(bp, B44_DMARX_PTR, idx * sizeof(struct dma_desc));
395 }
396 
397 
398 /*
399  * Refill RX ring descriptors with buffers. This is needed
400  * because during rx we are passing ownership of descriptor
401  * buffers to the network stack.
402  */
b44_rx_refill(struct b44_private * bp,u32 pending)403 static void b44_rx_refill(struct b44_private *bp, u32 pending)
404 {
405 	struct io_buffer *iobuf;
406 	u32 i;
407 
408 	// skip pending
409 	for (i = pending + 1; i != bp->rx_cur; i = ring_next(i)) {
410 		if (bp->rx_iobuf[i] != NULL)
411 			continue;
412 
413 		iobuf = alloc_iob(RX_PKT_BUF_SZ);
414 		if (!iobuf) {
415 			DBG("Refill rx ring failed!!\n");
416 			break;
417 		}
418 		if (!b44_address_ok(iobuf->data)) {
419 			DBG("Refill rx ring bad address!!\n");
420 			free_iob(iobuf);
421 			break;
422 		}
423 		bp->rx_iobuf[i] = iobuf;
424 
425 		b44_populate_rx_descriptor(bp, i);
426 	}
427 }
428 
429 
b44_free_rx_ring(struct b44_private * bp)430 static void b44_free_rx_ring(struct b44_private *bp)
431 {
432 	u32 i;
433 
434 	if (bp->rx) {
435 		for (i = 0; i < B44_RING_SIZE; i++) {
436 			free_iob(bp->rx_iobuf[i]);
437 			bp->rx_iobuf[i] = NULL;
438 		}
439 		free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
440 		bp->rx = NULL;
441 	}
442 }
443 
444 
b44_init_rx_ring(struct b44_private * bp)445 static int b44_init_rx_ring(struct b44_private *bp)
446 {
447 	b44_free_rx_ring(bp);
448 
449 	bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
450 	if (!bp->rx)
451 		return -ENOMEM;
452 	if (!b44_address_ok(bp->rx)) {
453 		free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
454 		return -ENOTSUP;
455 	}
456 
457 	memset(bp->rx_iobuf, 0, sizeof(bp->rx_iobuf));
458 
459 	bp->rx_iobuf[0] = alloc_iob(RX_PKT_BUF_SZ);
460 	b44_populate_rx_descriptor(bp, 0);
461 	b44_rx_refill(bp, 0);
462 
463 	DBG("Init RX rings: rx=0x%08lx\n", VIRT_TO_B44(bp->rx));
464 	return 0;
465 }
466 
467 
b44_free_tx_ring(struct b44_private * bp)468 static void b44_free_tx_ring(struct b44_private *bp)
469 {
470 	if (bp->tx) {
471 		free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
472 		bp->tx = NULL;
473 	}
474 }
475 
476 
b44_init_tx_ring(struct b44_private * bp)477 static int b44_init_tx_ring(struct b44_private *bp)
478 {
479 	b44_free_tx_ring(bp);
480 
481 	bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
482 	if (!bp->tx)
483 		return -ENOMEM;
484 	if (!b44_address_ok(bp->tx)) {
485 		free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
486 		return -ENOTSUP;
487 	}
488 
489 	memset(bp->tx, 0, B44_TX_RING_LEN_BYTES);
490 	memset(bp->tx_iobuf, 0, sizeof(bp->tx_iobuf));
491 
492 	DBG("Init TX rings: tx=0x%08lx\n", VIRT_TO_B44(bp->tx));
493 	return 0;
494 }
495 
496 
497 /*** Interaction with the PHY ***/
498 
499 
b44_phy_read(struct b44_private * bp,int reg,u32 * val)500 static int b44_phy_read(struct b44_private *bp, int reg, u32 * val)
501 {
502 	int err;
503 
504 	u32 arg1 = (MDIO_OP_READ << MDIO_DATA_OP_SHIFT);
505 	u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
506 	u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
507 	u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
508 	u32 argv = arg1 | arg2 | arg3 | arg4;
509 
510 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
511 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
512 	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
513 	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
514 
515 	return err;
516 }
517 
518 
b44_phy_write(struct b44_private * bp,int reg,u32 val)519 static int b44_phy_write(struct b44_private *bp, int reg, u32 val)
520 {
521 	u32 arg1 = (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT);
522 	u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
523 	u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
524 	u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
525 	u32 arg5 = (val & MDIO_DATA_DATA);
526 	u32 argv = arg1 | arg2 | arg3 | arg4 | arg5;
527 
528 
529 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
530 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
531 	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
532 }
533 
534 
b44_phy_reset(struct b44_private * bp)535 static int b44_phy_reset(struct b44_private *bp)
536 {
537 	u32 val;
538 	int err;
539 
540 	err = b44_phy_write(bp, MII_BMCR, BMCR_RESET);
541 	if (err)
542 		return err;
543 
544 	udelay(100);
545 	err = b44_phy_read(bp, MII_BMCR, &val);
546 	if (!err) {
547 		if (val & BMCR_RESET) {
548 			return -ENODEV;
549 		}
550 	}
551 
552 	return 0;
553 }
554 
555 
556 /*
557  * The BCM44xx CAM (Content Addressable Memory) stores the MAC
558  * and PHY address.
559  */
b44_cam_write(struct b44_private * bp,unsigned char * data,int index)560 static void b44_cam_write(struct b44_private *bp, unsigned char *data,
561 			                                    int index)
562 {
563 	u32 val;
564 
565 	val  = ((u32) data[2]) << 24;
566 	val |= ((u32) data[3]) << 16;
567 	val |= ((u32) data[4]) << 8;
568 	val |= ((u32) data[5]) << 0;
569 	bw32(bp, B44_CAM_DATA_LO, val);
570 
571 
572 	val = (CAM_DATA_HI_VALID |
573 	       (((u32) data[0]) << 8) | (((u32) data[1]) << 0));
574 
575 	bw32(bp, B44_CAM_DATA_HI, val);
576 
577 	val = CAM_CTRL_WRITE | (index << CAM_CTRL_INDEX_SHIFT);
578 	bw32(bp, B44_CAM_CTRL, val);
579 
580 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
581 }
582 
583 
b44_set_mac_addr(struct b44_private * bp)584 static void b44_set_mac_addr(struct b44_private *bp)
585 {
586 	u32 val;
587 	bw32(bp, B44_CAM_CTRL, 0);
588 	b44_cam_write(bp, bp->netdev->ll_addr, 0);
589 	val = br32(bp, B44_CAM_CTRL);
590 	bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
591 }
592 
593 
594 /* Read 128-bytes of EEPROM. */
b44_read_eeprom(struct b44_private * bp,u8 * data)595 static void b44_read_eeprom(struct b44_private *bp, u8 * data)
596 {
597 	long i;
598 	u16 *ptr = (u16 *) data;
599 
600 	for (i = 0; i < 128; i += 2)
601 		ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
602 }
603 
604 
b44_load_mac_and_phy_addr(struct b44_private * bp)605 static void b44_load_mac_and_phy_addr(struct b44_private *bp)
606 {
607 	u8 eeprom[128];
608 
609 	/* Load MAC address, note byteswapping */
610 	b44_read_eeprom(bp, &eeprom[0]);
611 	bp->netdev->hw_addr[0] = eeprom[79];
612 	bp->netdev->hw_addr[1] = eeprom[78];
613 	bp->netdev->hw_addr[2] = eeprom[81];
614 	bp->netdev->hw_addr[3] = eeprom[80];
615 	bp->netdev->hw_addr[4] = eeprom[83];
616 	bp->netdev->hw_addr[5] = eeprom[82];
617 
618 	/* Load PHY address */
619 	bp->phy_addr = eeprom[90] & 0x1f;
620 }
621 
622 
b44_set_rx_mode(struct net_device * netdev)623 static void b44_set_rx_mode(struct net_device *netdev)
624 {
625 	struct b44_private *bp = netdev_priv(netdev);
626 	unsigned char zero[6] = { 0, 0, 0, 0, 0, 0 };
627 	u32 val;
628 	int i;
629 
630 	val = br32(bp, B44_RXCONFIG);
631 	val &= ~RXCONFIG_PROMISC;
632 	val |= RXCONFIG_ALLMULTI;
633 
634 	b44_set_mac_addr(bp);
635 
636 	for (i = 1; i < 64; i++)
637 		b44_cam_write(bp, zero, i);
638 
639 	bw32(bp, B44_RXCONFIG, val);
640 	val = br32(bp, B44_CAM_CTRL);
641 	bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
642 }
643 
644 
645 /*** Implementation of iPXE driver callbacks ***/
646 
647 /**
648  * Probe device
649  *
650  * @v pci	PCI device
651  * @v id	Matching entry in ID table
652  * @ret rc	Return status code
653  */
b44_probe(struct pci_device * pci)654 static int b44_probe(struct pci_device *pci)
655 {
656 	struct net_device *netdev;
657 	struct b44_private *bp;
658 	int rc;
659 
660 	/* Set up netdev */
661 	netdev = alloc_etherdev(sizeof(*bp));
662 	if (!netdev)
663 		return -ENOMEM;
664 
665 	netdev_init(netdev, &b44_operations);
666 	pci_set_drvdata(pci, netdev);
667 	netdev->dev = &pci->dev;
668 
669 	/* Set up private data */
670 	bp = netdev_priv(netdev);
671 	memset(bp, 0, sizeof(*bp));
672 	bp->netdev = netdev;
673 	bp->pci = pci;
674 
675 	/* Map device registers */
676 	bp->regs = ioremap(pci->membase, B44_REGS_SIZE);
677 	if (!bp->regs) {
678 		netdev_put(netdev);
679 		return -ENOMEM;
680 	}
681 
682 	/* Enable PCI bus mastering */
683 	adjust_pci_device(pci);
684 
685 	b44_load_mac_and_phy_addr(bp);
686 
687 	rc = register_netdev(netdev);
688 	if (rc != 0) {
689 		iounmap(bp->regs);
690 		netdev_put(netdev);
691 		return rc;
692 	}
693 
694 	/* Link management currently not implemented */
695 	netdev_link_up(netdev);
696 
697 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
698 
699 	DBG("b44 %s (%04x:%04x) regs=%p MAC=%s\n", pci->id->name,
700 	    pci->id->vendor, pci->id->device, bp->regs,
701 	    eth_ntoa(netdev->ll_addr));
702 
703 	return 0;
704 }
705 
706 
707 /**
708  * Remove device
709  *
710  * @v pci	PCI device
711  */
b44_remove(struct pci_device * pci)712 static void b44_remove(struct pci_device *pci)
713 {
714 	struct net_device *netdev = pci_get_drvdata(pci);
715 	struct b44_private *bp = netdev_priv(netdev);
716 
717 	ssb_core_disable(bp);
718 	unregister_netdev(netdev);
719 	iounmap(bp->regs);
720 	netdev_nullify(netdev);
721 	netdev_put(netdev);
722 }
723 
724 
725 /** Enable or disable interrupts
726  *
727  * @v netdev	Network device
728  * @v enable	Interrupts should be enabled
729  */
b44_irq(struct net_device * netdev,int enable)730 static void b44_irq(struct net_device *netdev, int enable)
731 {
732 	struct b44_private *bp = netdev_priv(netdev);
733 
734 	/* Interrupt mask specifies which events generate interrupts */
735 	bw32(bp, B44_IMASK, enable ? IMASK_DEF : IMASK_DISABLE);
736 }
737 
738 
739 /** Open network device
740  *
741  * @v netdev	Network device
742  * @ret rc	Return status code
743  */
b44_open(struct net_device * netdev)744 static int b44_open(struct net_device *netdev)
745 {
746 	struct b44_private *bp = netdev_priv(netdev);
747 	int rc;
748 
749 	rc = b44_init_tx_ring(bp);
750 	if (rc != 0)
751 		return rc;
752 
753 	rc = b44_init_rx_ring(bp);
754 	if (rc != 0)
755 		return rc;
756 
757 	b44_init_hw(bp, B44_FULL_RESET);
758 
759 	/* Disable interrupts */
760 	b44_irq(netdev, 0);
761 
762 	return 0;
763 }
764 
765 
766 /** Close network device
767  *
768  * @v netdev	Network device
769  */
b44_close(struct net_device * netdev)770 static void b44_close(struct net_device *netdev)
771 {
772 	struct b44_private *bp = netdev_priv(netdev);
773 
774 	b44_chip_reset(bp, B44_FULL_RESET);
775 	b44_free_tx_ring(bp);
776 	b44_free_rx_ring(bp);
777 }
778 
779 
780 /** Transmit packet
781  *
782  * @v netdev	Network device
783  * @v iobuf	I/O buffer
784  * @ret rc	Return status code
785  */
b44_transmit(struct net_device * netdev,struct io_buffer * iobuf)786 static int b44_transmit(struct net_device *netdev, struct io_buffer *iobuf)
787 {
788 	struct b44_private *bp = netdev_priv(netdev);
789 	u32 cur = bp->tx_cur;
790 	u32 ctrl;
791 
792 	/* Check for TX ring overflow */
793 	if (bp->tx[cur].ctrl) {
794 		DBG("tx overflow\n");
795 		return -ENOBUFS;
796 	}
797 
798 	/* Check for addressability */
799 	if (!b44_address_ok(iobuf->data))
800 		return -ENOTSUP;
801 
802 	/* Will call netdev_tx_complete() on the iobuf later */
803 	bp->tx_iobuf[cur] = iobuf;
804 
805 	/* Set up TX descriptor */
806 	ctrl = (iob_len(iobuf) & DESC_CTRL_LEN) |
807 	    DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
808 
809 	if (cur == B44_RING_LAST)
810 		ctrl |= DESC_CTRL_EOT;
811 
812 	bp->tx[cur].ctrl = cpu_to_le32(ctrl);
813 	bp->tx[cur].addr = cpu_to_le32(VIRT_TO_B44(iobuf->data));
814 
815 	/* Update next available descriptor index */
816 	cur = ring_next(cur);
817 	bp->tx_cur = cur;
818 	wmb();
819 
820 	/* Tell card that a new TX descriptor is ready */
821 	bw32(bp, B44_DMATX_PTR, cur * sizeof(struct dma_desc));
822 	return 0;
823 }
824 
825 
826 /** Recycles sent TX descriptors and notifies network stack
827  *
828  * @v bp Driver state
829  */
b44_tx_complete(struct b44_private * bp)830 static void b44_tx_complete(struct b44_private *bp)
831 {
832 	u32 cur, i;
833 
834 	cur = pending_tx_index(bp);
835 
836 	for (i = bp->tx_dirty; i != cur; i = ring_next(i)) {
837 		/* Free finished frame */
838 		netdev_tx_complete(bp->netdev, bp->tx_iobuf[i]);
839 		bp->tx_iobuf[i] = NULL;
840 
841 		/* Clear TX descriptor */
842 		bp->tx[i].ctrl = 0;
843 		bp->tx[i].addr = 0;
844 	}
845 	bp->tx_dirty = cur;
846 }
847 
848 
b44_process_rx_packets(struct b44_private * bp)849 static void b44_process_rx_packets(struct b44_private *bp)
850 {
851 	struct io_buffer *iob;	/* received data */
852 	struct rx_header *rh;
853 	u32 pending, i;
854 	u16 len;
855 
856 	pending = pending_rx_index(bp);
857 
858 	for (i = bp->rx_cur; i != pending; i = ring_next(i)) {
859 		iob = bp->rx_iobuf[i];
860 		if (iob == NULL)
861 			break;
862 
863 		rh = iob->data;
864 		len = le16_to_cpu(rh->len);
865 
866 		/*
867 		 * Guard against incompletely written RX descriptors.
868 		 * Without this, things can get really slow!
869 		 */
870 		if (len == 0)
871 			break;
872 
873 		/* Discard CRC that is generated by the card */
874 		len -= 4;
875 
876 		/* Check for invalid packets and errors */
877 		if (len > RX_PKT_BUF_SZ - RX_PKT_OFFSET ||
878 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
879 			DBG("rx error len=%d flags=%04x\n", len,
880 			                 cpu_to_le16(rh->flags));
881 			rh->len = 0;
882 			rh->flags = 0;
883 			netdev_rx_err(bp->netdev, iob, -EINVAL);
884 			continue;
885 		}
886 
887 		/* Clear RX descriptor */
888 		rh->len = 0;
889 		rh->flags = 0;
890 		bp->rx_iobuf[i] = NULL;
891 
892 		/* Hand off the IO buffer to the network stack */
893 		iob_reserve(iob, RX_PKT_OFFSET);
894 		iob_put(iob, len);
895 		netdev_rx(bp->netdev, iob);
896 	}
897 	bp->rx_cur = i;
898 	b44_rx_refill(bp, pending_rx_index(bp));
899 }
900 
901 
902 /** Poll for completed and received packets
903  *
904  * @v netdev	Network device
905  */
b44_poll(struct net_device * netdev)906 static void b44_poll(struct net_device *netdev)
907 {
908 	struct b44_private *bp = netdev_priv(netdev);
909 	u32 istat;
910 
911 	/* Interrupt status */
912 	istat = br32(bp, B44_ISTAT);
913 	istat &= IMASK_DEF;	/* only the events we care about */
914 
915 	if (!istat)
916 		return;
917 	if (istat & ISTAT_TX)
918 		b44_tx_complete(bp);
919 	if (istat & ISTAT_RX)
920 		b44_process_rx_packets(bp);
921 	if (istat & ISTAT_ERRORS) {
922 		DBG("b44 error istat=0x%08x\n", istat);
923 
924 		/* Reset B44 core partially to avoid long waits */
925 		b44_irq(bp->netdev, 0);
926 		b44_halt(bp);
927 		b44_init_tx_ring(bp);
928 		b44_init_rx_ring(bp);
929 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
930 	}
931 
932 	/* Acknowledge interrupt */
933 	bw32(bp, B44_ISTAT, 0);
934 	bflush(bp, B44_ISTAT, 1);
935 }
936 
937 
938 static struct net_device_operations b44_operations = {
939 	.open = b44_open,
940 	.close = b44_close,
941 	.transmit = b44_transmit,
942 	.poll = b44_poll,
943 	.irq = b44_irq,
944 };
945 
946 
947 static struct pci_device_id b44_nics[] = {
948 	PCI_ROM(0x14e4, 0x4401, "BCM4401", "BCM4401", 0),
949 	PCI_ROM(0x14e4, 0x170c, "BCM4401-B0", "BCM4401-B0", 0),
950 	PCI_ROM(0x14e4, 0x4402, "BCM4401-B1", "BCM4401-B1", 0),
951 };
952 
953 
954 struct pci_driver b44_driver __pci_driver = {
955 	.ids = b44_nics,
956 	.id_count = sizeof b44_nics / sizeof b44_nics[0],
957 	.probe = b44_probe,
958 	.remove = b44_remove,
959 };
960