1 
2 FILE_LICENCE ( GPL2_ONLY );
3 
4 #include <mii.h>
5 #include <stdio.h>
6 #include <errno.h>
7 #include <unistd.h>
8 #include <byteswap.h>
9 #include <ipxe/pci.h>
10 #include <ipxe/iobuf.h>
11 #include <ipxe/timer.h>
12 #include <ipxe/malloc.h>
13 #include <ipxe/if_ether.h>
14 #include <ipxe/ethernet.h>
15 #include <ipxe/netdevice.h>
16 
17 #include "tg3.h"
18 
19 #define TG3_DEF_RX_MODE		0
20 #define TG3_DEF_TX_MODE		0
21 
22 static void tg3_refill_prod_ring(struct tg3 *tp);
23 
24 /* Do not place this n-ring entries value into the tp struct itself,
25  * we really want to expose these constants to GCC so that modulo et
26  * al.  operations are done with shifts and masks instead of with
27  * hw multiply/modulo instructions.  Another solution would be to
28  * replace things like '% foo' with '& (foo - 1)'.
29  */
30 
31 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
32 				 TG3_TX_RING_SIZE)
33 
34 /* FIXME: does TG3_RX_RET_MAX_SIZE_5705 work for all cards? */
35 #define TG3_RX_RCB_RING_BYTES(tp) \
36 	(sizeof(struct tg3_rx_buffer_desc) * (TG3_RX_RET_MAX_SIZE_5705))
37 
38 #define TG3_RX_STD_RING_BYTES(tp) \
39 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
40 
tg3_rx_prodring_fini(struct tg3_rx_prodring_set * tpr)41 void tg3_rx_prodring_fini(struct tg3_rx_prodring_set *tpr)
42 {	DBGP("%s\n", __func__);
43 
44 	if (tpr->rx_std) {
45 		free_dma(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
46 		tpr->rx_std = NULL;
47 	}
48 }
49 
50 /*
51  * Must not be invoked with interrupt sources disabled and
52  * the hardware shutdown down.
53  */
tg3_free_consistent(struct tg3 * tp)54 static void tg3_free_consistent(struct tg3 *tp)
55 {	DBGP("%s\n", __func__);
56 
57 	if (tp->tx_ring) {
58 		free_dma(tp->tx_ring, TG3_TX_RING_BYTES);
59 		tp->tx_ring = NULL;
60 	}
61 
62 	free(tp->tx_buffers);
63 	tp->tx_buffers = NULL;
64 
65 	if (tp->rx_rcb) {
66 		free_dma(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
67 		tp->rx_rcb_mapping = 0;
68 		tp->rx_rcb = NULL;
69 	}
70 
71 	tg3_rx_prodring_fini(&tp->prodring);
72 
73 	if (tp->hw_status) {
74 		free_dma(tp->hw_status, TG3_HW_STATUS_SIZE);
75 		tp->status_mapping = 0;
76 		tp->hw_status = NULL;
77 	}
78 }
79 
80 /*
81  * Must not be invoked with interrupt sources disabled and
82  * the hardware shutdown down.  Can sleep.
83  */
tg3_alloc_consistent(struct tg3 * tp)84 int tg3_alloc_consistent(struct tg3 *tp)
85 {	DBGP("%s\n", __func__);
86 
87 	struct tg3_hw_status *sblk;
88 	struct tg3_rx_prodring_set *tpr = &tp->prodring;
89 
90 	tp->hw_status = malloc_dma(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
91 	if (!tp->hw_status) {
92 		DBGC(tp->dev, "hw_status alloc failed\n");
93 		goto err_out;
94 	}
95 	tp->status_mapping = virt_to_bus(tp->hw_status);
96 
97 	memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
98 	sblk = tp->hw_status;
99 
100 	tpr->rx_std = malloc_dma(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
101 	if (!tpr->rx_std) {
102 		DBGC(tp->dev, "rx prodring alloc failed\n");
103 		goto err_out;
104 	}
105 	tpr->rx_std_mapping = virt_to_bus(tpr->rx_std);
106 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
107 
108 	tp->tx_buffers = zalloc(sizeof(struct ring_info) * TG3_TX_RING_SIZE);
109 	if (!tp->tx_buffers)
110 		goto err_out;
111 
112 	tp->tx_ring = malloc_dma(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
113 	if (!tp->tx_ring)
114 		goto err_out;
115 	tp->tx_desc_mapping = virt_to_bus(tp->tx_ring);
116 
117 	/*
118 	 * When RSS is enabled, the status block format changes
119 	 * slightly.  The "rx_jumbo_consumer", "reserved",
120 	 * and "rx_mini_consumer" members get mapped to the
121 	 * other three rx return ring producer indexes.
122 	 */
123 
124 	tp->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
125 
126 	tp->rx_rcb = malloc_dma(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
127 	if (!tp->rx_rcb)
128 		goto err_out;
129 	tp->rx_rcb_mapping = virt_to_bus(tp->rx_rcb);
130 
131 	memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
132 
133 	return 0;
134 
135 err_out:
136 	tg3_free_consistent(tp);
137 	return -ENOMEM;
138 }
139 
140 #define TG3_RX_STD_BUFF_RING_BYTES(tp) \
141 	(sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
142 #define TG3_RX_STD_RING_BYTES(tp) \
143 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
144 
145 /* Initialize rx rings for packet processing.
146  *
147  * The chip has been shut down and the driver detached from
148  * the networking, so no interrupts or new tx packets will
149  * end up in the driver.
150  */
tg3_rx_prodring_alloc(struct tg3 __unused * tp,struct tg3_rx_prodring_set * tpr)151 static int tg3_rx_prodring_alloc(struct tg3 __unused *tp,
152 				 struct tg3_rx_prodring_set *tpr)
153 {	DBGP("%s\n", __func__);
154 
155 	u32 i;
156 
157 	tpr->rx_std_cons_idx = 0;
158 	tpr->rx_std_prod_idx = 0;
159 
160 	/* Initialize invariants of the rings, we only set this
161 	 * stuff once.  This works because the card does not
162 	 * write into the rx buffer posting rings.
163 	 */
164 	/* FIXME: does TG3_RX_STD_MAX_SIZE_5700 work on all cards? */
165 	for (i = 0; i < TG3_RX_STD_MAX_SIZE_5700; i++) {
166 		struct tg3_rx_buffer_desc *rxd;
167 
168 		rxd = &tpr->rx_std[i];
169 		rxd->idx_len = (TG3_RX_STD_DMA_SZ - 64 - 2) << RXD_LEN_SHIFT;
170 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
171 		rxd->opaque = (RXD_OPAQUE_RING_STD |
172 			       (i << RXD_OPAQUE_INDEX_SHIFT));
173 	}
174 
175 	return 0;
176 }
177 
tg3_rx_iob_free(struct io_buffer * iobs[],int i)178 static void tg3_rx_iob_free(struct io_buffer *iobs[], int i)
179 {	DBGP("%s\n", __func__);
180 
181 	if (iobs[i] == NULL)
182 		return;
183 
184 	free_iob(iobs[i]);
185 	iobs[i] = NULL;
186 }
187 
tg3_rx_prodring_free(struct tg3_rx_prodring_set * tpr)188 static void tg3_rx_prodring_free(struct tg3_rx_prodring_set *tpr)
189 {	DBGP("%s\n", __func__);
190 
191 	unsigned int i;
192 
193 	for (i = 0; i < TG3_DEF_RX_RING_PENDING; i++)
194 		tg3_rx_iob_free(tpr->rx_iobufs, i);
195 }
196 
197 /* Initialize tx/rx rings for packet processing.
198  *
199  * The chip has been shut down and the driver detached from
200  * the networking, so no interrupts or new tx packets will
201  * end up in the driver.
202  */
tg3_init_rings(struct tg3 * tp)203 int tg3_init_rings(struct tg3 *tp)
204 {	DBGP("%s\n", __func__);
205 
206 	/* Free up all the SKBs. */
207 ///	tg3_free_rings(tp);
208 
209 	tp->last_tag = 0;
210 	tp->last_irq_tag = 0;
211 	tp->hw_status->status = 0;
212 	tp->hw_status->status_tag = 0;
213 	memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
214 
215 	tp->tx_prod = 0;
216 	tp->tx_cons = 0;
217 	if (tp->tx_ring)
218 		memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
219 
220 	tp->rx_rcb_ptr = 0;
221 	if (tp->rx_rcb)
222 		memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
223 
224 	if (tg3_rx_prodring_alloc(tp, &tp->prodring)) {
225 		DBGC(tp->dev, "tg3_rx_prodring_alloc() failed\n");
226 		tg3_rx_prodring_free(&tp->prodring);
227 		return -ENOMEM;
228 	}
229 
230 	return 0;
231 }
232 
tg3_open(struct net_device * dev)233 static int tg3_open(struct net_device *dev)
234 {	DBGP("%s\n", __func__);
235 
236 	struct tg3 *tp = netdev_priv(dev);
237 	struct tg3_rx_prodring_set *tpr = &tp->prodring;
238 	int err = 0;
239 
240 	tg3_set_power_state_0(tp);
241 
242 	/* Initialize MAC address and backoff seed. */
243 	__tg3_set_mac_addr(tp, 0);
244 
245 	err = tg3_alloc_consistent(tp);
246 	if (err)
247 		return err;
248 
249 	tpr->rx_std_iob_cnt = 0;
250 
251 	err = tg3_init_hw(tp, 1);
252 	if (err != 0)
253 		DBGC(tp->dev, "tg3_init_hw failed: %s\n", strerror(err));
254 	else
255 		tg3_refill_prod_ring(tp);
256 
257 	return err;
258 }
259 
tg3_tx_avail(struct tg3 * tp)260 static inline u32 tg3_tx_avail(struct tg3 *tp)
261 {	DBGP("%s\n", __func__);
262 
263 	/* Tell compiler to fetch tx indices from memory. */
264 	barrier();
265 	return TG3_DEF_TX_RING_PENDING -
266 	       ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1));
267 }
268 
269 #if 0
270 /**
271  *
272  * Prints all registers that could cause a set ERR bit in hw_status->status
273  */
274 static void tg3_dump_err_reg(struct tg3 *tp)
275 {	DBGP("%s\n", __func__);
276 
277 		printf("FLOW_ATTN: %#08x\n", tr32(HOSTCC_FLOW_ATTN));
278 		printf("MAC ATTN: %#08x\n", tr32(MAC_STATUS));
279 		printf("MSI STATUS: %#08x\n", tr32(MSGINT_STATUS));
280 		printf("DMA RD: %#08x\n", tr32(RDMAC_STATUS));
281 		printf("DMA WR: %#08x\n", tr32(WDMAC_STATUS));
282 		printf("TX CPU STATE: %#08x\n", tr32(TX_CPU_STATE));
283 		printf("RX CPU STATE: %#08x\n", tr32(RX_CPU_STATE));
284 }
285 
286 static void __unused tw32_mailbox2(struct tg3 *tp, uint32_t reg, uint32_t val)
287 {	DBGP("%s\n", __func__);
288 
289 	tw32_mailbox(reg, val);
290 	tr32(reg);
291 }
292 #endif
293 
294 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
295 
296 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
297  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
298  */
tg3_transmit(struct net_device * dev,struct io_buffer * iob)299 static int tg3_transmit(struct net_device *dev, struct io_buffer *iob)
300 {	DBGP("%s\n", __func__);
301 
302 	struct tg3 *tp = netdev_priv(dev);
303 	u32 len, entry;
304 	dma_addr_t mapping;
305 
306 	if (tg3_tx_avail(tp) < 1) {
307 		DBGC(dev, "Transmit ring full\n");
308 		return -ENOBUFS;
309 	}
310 
311 	entry = tp->tx_prod;
312 
313 	iob_pad(iob, ETH_ZLEN);
314 	mapping = virt_to_bus(iob->data);
315 	len = iob_len(iob);
316 
317 	tp->tx_buffers[entry].iob = iob;
318 
319 	tg3_set_txd(tp, entry, mapping, len, TXD_FLAG_END);
320 
321 	entry = NEXT_TX(entry);
322 
323 	/* Packets are ready, update Tx producer idx local and on card. */
324 	tw32_tx_mbox(tp->prodmbox, entry);
325 
326 	tp->tx_prod = entry;
327 
328 	mb();
329 
330 	return 0;
331 }
332 
tg3_tx_complete(struct net_device * dev)333 static void tg3_tx_complete(struct net_device *dev)
334 {	DBGP("%s\n", __func__);
335 
336 	struct tg3 *tp = netdev_priv(dev);
337 	u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
338 	u32 sw_idx = tp->tx_cons;
339 
340 	while (sw_idx != hw_idx) {
341 		struct io_buffer *iob = tp->tx_buffers[sw_idx].iob;
342 
343 		DBGC2(dev, "Transmitted packet: %zd bytes\n", iob_len(iob));
344 
345 		netdev_tx_complete(dev, iob);
346 		sw_idx = NEXT_TX(sw_idx);
347 	}
348 
349 	tp->tx_cons = sw_idx;
350 }
351 
352 #define TG3_RX_STD_BUFF_RING_BYTES(tp) \
353 	(sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
354 #define TG3_RX_STD_RING_BYTES(tp) \
355 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
356 
357 /* Returns 0 or < 0 on error.
358  *
359  * We only need to fill in the address because the other members
360  * of the RX descriptor are invariant, see tg3_init_rings.
361  *
362  * Note the purposeful assymetry of cpu vs. chip accesses.  For
363  * posting buffers we only dirty the first cache line of the RX
364  * descriptor (containing the address).  Whereas for the RX status
365  * buffers the cpu only reads the last cacheline of the RX descriptor
366  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
367  */
tg3_alloc_rx_iob(struct tg3_rx_prodring_set * tpr,u32 dest_idx_unmasked)368 static int tg3_alloc_rx_iob(struct tg3_rx_prodring_set *tpr, u32 dest_idx_unmasked)
369 {	DBGP("%s\n", __func__);
370 
371 	struct tg3_rx_buffer_desc *desc;
372 	struct io_buffer *iob;
373 	dma_addr_t mapping;
374 	int dest_idx, iob_idx;
375 
376 	dest_idx = dest_idx_unmasked & (TG3_RX_STD_MAX_SIZE_5700 - 1);
377 	desc = &tpr->rx_std[dest_idx];
378 
379 	/* Do not overwrite any of the map or rp information
380 	 * until we are sure we can commit to a new buffer.
381 	 *
382 	 * Callers depend upon this behavior and assume that
383 	 * we leave everything unchanged if we fail.
384 	 */
385 	iob = alloc_iob(TG3_RX_STD_DMA_SZ);
386 	if (iob == NULL)
387 		return -ENOMEM;
388 
389 	iob_idx = dest_idx % TG3_DEF_RX_RING_PENDING;
390 	tpr->rx_iobufs[iob_idx] = iob;
391 
392 	mapping = virt_to_bus(iob->data);
393 
394 	desc->addr_hi = ((u64)mapping >> 32);
395 	desc->addr_lo = ((u64)mapping & 0xffffffff);
396 
397 	return 0;
398 }
399 
tg3_refill_prod_ring(struct tg3 * tp)400 static void tg3_refill_prod_ring(struct tg3 *tp)
401 {	DBGP("%s\n", __func__);
402 
403 	struct tg3_rx_prodring_set *tpr = &tp->prodring;
404 	int idx = tpr->rx_std_prod_idx;
405 
406 	DBGCP(tp->dev, "%s\n", __func__);
407 
408 	while (tpr->rx_std_iob_cnt < TG3_DEF_RX_RING_PENDING) {
409 		if (tpr->rx_iobufs[idx % TG3_DEF_RX_RING_PENDING] == NULL) {
410 			if (tg3_alloc_rx_iob(tpr, idx) < 0) {
411 				DBGC(tp->dev, "alloc_iob() failed for descriptor %d\n", idx);
412 				break;
413 			}
414 			DBGC2(tp->dev, "allocated iob_buffer for descriptor %d\n", idx);
415 		}
416 
417 		idx = (idx + 1) % TG3_RX_STD_MAX_SIZE_5700;
418 		tpr->rx_std_iob_cnt++;
419 	}
420 
421 	if ((u32)idx != tpr->rx_std_prod_idx) {
422 		tpr->rx_std_prod_idx = idx;
423 		tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
424 	}
425 }
426 
tg3_rx_complete(struct net_device * dev)427 static void tg3_rx_complete(struct net_device *dev)
428 {	DBGP("%s\n", __func__);
429 
430 	struct tg3 *tp = netdev_priv(dev);
431 
432 	u32 sw_idx = tp->rx_rcb_ptr;
433 	u16 hw_idx;
434 	struct tg3_rx_prodring_set *tpr = &tp->prodring;
435 
436 	hw_idx = *(tp->rx_rcb_prod_idx);
437 
438 	while (sw_idx != hw_idx) {
439 		struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
440 		u32 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
441 		int iob_idx = desc_idx % TG3_DEF_RX_RING_PENDING;
442 		struct io_buffer *iob = tpr->rx_iobufs[iob_idx];
443 		unsigned int len;
444 
445 		DBGC2(dev, "RX - desc_idx: %d sw_idx: %d hw_idx: %d\n", desc_idx, sw_idx, hw_idx);
446 
447 		assert(iob != NULL);
448 
449 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
450 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
451 			/* drop packet */
452 			DBGC(dev, "Corrupted packet received\n");
453 			netdev_rx_err(dev, iob, -EINVAL);
454 		} else {
455 			len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
456 			        ETH_FCS_LEN;
457 			iob_put(iob, len);
458 			netdev_rx(dev, iob);
459 
460 			DBGC2(dev, "Received packet: %d bytes %d %d\n", len, sw_idx, hw_idx);
461 		}
462 
463 		sw_idx++;
464 		sw_idx &= TG3_RX_RET_MAX_SIZE_5705 - 1;
465 
466 		tpr->rx_iobufs[iob_idx] = NULL;
467 		tpr->rx_std_iob_cnt--;
468 	}
469 
470 	if (tp->rx_rcb_ptr != sw_idx) {
471 		tw32_rx_mbox(tp->consmbox, sw_idx);
472 		tp->rx_rcb_ptr = sw_idx;
473 	}
474 
475 	tg3_refill_prod_ring(tp);
476 }
477 
tg3_poll(struct net_device * dev)478 static void tg3_poll(struct net_device *dev)
479 {	DBGP("%s\n", __func__);
480 
481 	struct tg3 *tp = netdev_priv(dev);
482 
483 	/* ACK interrupts */
484 	/*
485 	 *tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00);
486 	 */
487 	tp->hw_status->status &= ~SD_STATUS_UPDATED;
488 
489 	mb();
490 
491 	tg3_poll_link(tp);
492 	tg3_tx_complete(dev);
493 	tg3_rx_complete(dev);
494 }
495 
tg3_close(struct net_device * dev)496 static void tg3_close(struct net_device *dev)
497 {	DBGP("%s\n", __func__);
498 
499 	struct tg3 *tp = netdev_priv(dev);
500 
501 	DBGP("%s\n", __func__);
502 
503 	tg3_halt(tp);
504 	tg3_rx_prodring_free(&tp->prodring);
505 	tg3_flag_clear(tp, INIT_COMPLETE);
506 
507 	tg3_free_consistent(tp);
508 
509 }
510 
tg3_irq(struct net_device * dev,int enable)511 static void tg3_irq(struct net_device *dev, int enable)
512 {	DBGP("%s\n", __func__);
513 
514 	struct tg3 *tp = netdev_priv(dev);
515 
516 	DBGP("%s: %d\n", __func__, enable);
517 
518 	if (enable)
519 		tg3_enable_ints(tp);
520 	else
521 		tg3_disable_ints(tp);
522 }
523 
524 static struct net_device_operations tg3_netdev_ops = {
525 	.open = tg3_open,
526 	.close = tg3_close,
527 	.poll = tg3_poll,
528 	.transmit = tg3_transmit,
529 	.irq = tg3_irq,
530 };
531 
532 #define TEST_BUFFER_SIZE	0x2000
533 
534 int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device);
535 void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val);
536 
tg3_test_dma(struct tg3 * tp)537 static int tg3_test_dma(struct tg3 *tp)
538 {	DBGP("%s\n", __func__);
539 
540 	dma_addr_t buf_dma;
541 	u32 *buf;
542 	int ret = 0;
543 
544 	buf = malloc_dma(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
545 	if (!buf) {
546 		ret = -ENOMEM;
547 		goto out_nofree;
548 	}
549 	buf_dma = virt_to_bus(buf);
550 	DBGC2(tp->dev, "dma test buffer, virt: %p phys: %#016lx\n", buf, buf_dma);
551 
552 	if (tg3_flag(tp, 57765_PLUS)) {
553 		tp->dma_rwctrl = DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
554 		goto out;
555 	}
556 
557 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
558 	                 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
559 
560 	if (tg3_flag(tp, PCI_EXPRESS)) {
561 		/* DMA read watermark not used on PCIE */
562 		tp->dma_rwctrl |= 0x00180000;
563 	} else if (!tg3_flag(tp, PCIX_MODE)) {
564 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
565 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
566 			tp->dma_rwctrl |= 0x003f0000;
567 		else
568 			tp->dma_rwctrl |= 0x003f000f;
569 	} else {
570 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
571 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
572 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
573 			u32 read_water = 0x7;
574 
575 			if (ccval == 0x6 || ccval == 0x7)
576 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
577 
578 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
579 				read_water = 4;
580 			/* Set bit 23 to enable PCIX hw bug fix */
581 			tp->dma_rwctrl |=
582 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
583 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
584 				(1 << 23);
585 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
586 			/* 5780 always in PCIX mode */
587 			tp->dma_rwctrl |= 0x00144000;
588 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
589 			/* 5714 always in PCIX mode */
590 			tp->dma_rwctrl |= 0x00148000;
591 		} else {
592 			tp->dma_rwctrl |= 0x001b000f;
593 		}
594 	}
595 
596 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
597 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
598 		tp->dma_rwctrl &= 0xfffffff0;
599 
600 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
601 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
602 		/* Remove this if it causes problems for some boards. */
603 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
604 
605 		/* On 5700/5701 chips, we need to set this bit.
606 		 * Otherwise the chip will issue cacheline transactions
607 		 * to streamable DMA memory with not all the byte
608 		 * enables turned on.  This is an error on several
609 		 * RISC PCI controllers, in particular sparc64.
610 		 *
611 		 * On 5703/5704 chips, this bit has been reassigned
612 		 * a different meaning.  In particular, it is used
613 		 * on those chips to enable a PCI-X workaround.
614 		 */
615 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
616 	}
617 
618 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
619 
620 #if 0
621 	/* Unneeded, already done by tg3_get_invariants.  */
622 	tg3_switch_clocks(tp);
623 #endif
624 
625 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
626 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
627 		goto out;
628 
629 	/* It is best to perform DMA test with maximum write burst size
630 	 * to expose the 5700/5701 write DMA bug.
631 	 */
632 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
633 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
634 
635 	while (1) {
636 		u32 *p = buf, i;
637 
638 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
639 			p[i] = i;
640 
641 		/* Send the buffer to the chip. */
642 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
643 		if (ret) {
644 			DBGC(&tp->pdev->dev,
645 				"%s: Buffer write failed. err = %d\n",
646 				__func__, ret);
647 			break;
648 		}
649 
650 		/* validate data reached card RAM correctly. */
651 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
652 			u32 val;
653 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
654 			if (le32_to_cpu(val) != p[i]) {
655 				DBGC(&tp->pdev->dev,
656 					"%s: Buffer corrupted on device! "
657 					"(%d != %d)\n", __func__, val, i);
658 				/* ret = -ENODEV here? */
659 			}
660 			p[i] = 0;
661 		}
662 
663 		/* Now read it back. */
664 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
665 		if (ret) {
666 			DBGC(&tp->pdev->dev, "%s: Buffer read failed. "
667 				"err = %d\n", __func__, ret);
668 			break;
669 		}
670 
671 		/* Verify it. */
672 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
673 			if (p[i] == i)
674 				continue;
675 
676 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
677 			    DMA_RWCTRL_WRITE_BNDRY_16) {
678 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
679 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
680 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
681 				break;
682 			} else {
683 				DBGC(&tp->pdev->dev,
684 					"%s: Buffer corrupted on read back! "
685 					"(%d != %d)\n", __func__, p[i], i);
686 				ret = -ENODEV;
687 				goto out;
688 			}
689 		}
690 
691 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
692 			/* Success. */
693 			ret = 0;
694 			break;
695 		}
696 	}
697 
698 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
699 	    DMA_RWCTRL_WRITE_BNDRY_16) {
700 		/* DMA test passed without adjusting DMA boundary,
701 		 * now look for chipsets that are known to expose the
702 		 * DMA bug without failing the test.
703 		 */
704 		tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
705 		tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
706 
707 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
708 	}
709 
710 out:
711 	free_dma(buf, TEST_BUFFER_SIZE);
712 out_nofree:
713 	return ret;
714 }
715 
tg3_init_one(struct pci_device * pdev)716 static int tg3_init_one(struct pci_device *pdev)
717 {	DBGP("%s\n", __func__);
718 
719 	struct net_device *dev;
720 	struct tg3 *tp;
721 	int err = 0;
722 	unsigned long reg_base, reg_size;
723 
724 	adjust_pci_device(pdev);
725 
726 	dev = alloc_etherdev(sizeof(*tp));
727 	if (!dev) {
728 		DBGC(&pdev->dev, "Failed to allocate etherdev\n");
729 		err = -ENOMEM;
730 		goto err_out_disable_pdev;
731 	}
732 
733 	netdev_init(dev, &tg3_netdev_ops);
734 	pci_set_drvdata(pdev, dev);
735 
736 	dev->dev = &pdev->dev;
737 
738 	tp = netdev_priv(dev);
739 	tp->pdev = pdev;
740 	tp->dev = dev;
741 	tp->rx_mode = TG3_DEF_RX_MODE;
742 	tp->tx_mode = TG3_DEF_TX_MODE;
743 
744 	/* Subsystem IDs are required later */
745 	pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->subsystem_vendor);
746 	pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_ID, &tp->subsystem_device);
747 
748 	/* The word/byte swap controls here control register access byte
749 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
750 	 * setting below.
751 	 */
752 	tp->misc_host_ctrl =
753 		MISC_HOST_CTRL_MASK_PCI_INT |
754 		MISC_HOST_CTRL_WORD_SWAP |
755 		MISC_HOST_CTRL_INDIR_ACCESS |
756 		MISC_HOST_CTRL_PCISTATE_RW;
757 
758 	/* The NONFRM (non-frame) byte/word swap controls take effect
759 	 * on descriptor entries, anything which isn't packet data.
760 	 *
761 	 * The StrongARM chips on the board (one for tx, one for rx)
762 	 * are running in big-endian mode.
763 	 */
764 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
765 			GRC_MODE_WSWAP_NONFRM_DATA);
766 #if __BYTE_ORDER == __BIG_ENDIAN
767 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
768 #endif
769 
770 	/* FIXME: how can we detect errors here? */
771 	reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
772 	reg_size = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
773 
774 	tp->regs = ioremap(reg_base, reg_size);
775 	if (!tp->regs) {
776 		DBGC(&pdev->dev, "Failed to remap device registers\n");
777 		errno = -ENOENT;
778 		goto err_out_disable_pdev;
779 	}
780 
781 	err = tg3_get_invariants(tp);
782 	if (err) {
783 		DBGC(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
784 		goto err_out_iounmap;
785 	}
786 
787 	tg3_init_bufmgr_config(tp);
788 
789 	err = tg3_get_device_address(tp);
790 	if (err) {
791 		DBGC(&pdev->dev, "Could not obtain valid ethernet address, aborting\n");
792 		goto err_out_iounmap;
793 	}
794 
795 	/*
796 	 * Reset chip in case UNDI or EFI driver did not shutdown
797 	 * DMA self test will enable WDMAC and we'll see (spurious)
798 	 * pending DMA on the PCI bus at that point.
799 	 */
800 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
801 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
802 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
803 		tg3_halt(tp);
804 	}
805 
806 	err = tg3_test_dma(tp);
807 	if (err) {
808 		DBGC(&pdev->dev, "DMA engine test failed, aborting\n");
809 		goto err_out_iounmap;
810 	}
811 
812 	tp->int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
813 	tp->consmbox = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
814 	tp->prodmbox = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
815 
816 	tp->coal_now = HOSTCC_MODE_NOW;
817 
818 	err = register_netdev(dev);
819 	if (err) {
820 		DBGC(&pdev->dev, "Cannot register net device, aborting\n");
821 		goto err_out_iounmap;
822 	}
823 
824 	/* Call tg3_setup_phy() to start autoneg process, which saves time
825 	 * over starting autoneg in tg3_open();
826 	 */
827 	err = tg3_setup_phy(tp, 0);
828 	if (err) {
829 		DBGC(tp->dev, "tg3_setup_phy() call failed in %s\n", __func__);
830 		goto err_out_iounmap;
831 	}
832 
833 	return 0;
834 
835 err_out_iounmap:
836 	if (tp->regs) {
837 		iounmap(tp->regs);
838 		tp->regs = NULL;
839 	}
840 
841 	netdev_put(dev);
842 
843 err_out_disable_pdev:
844 	pci_set_drvdata(pdev, NULL);
845 	return err;
846 }
847 
tg3_remove_one(struct pci_device * pci)848 static void tg3_remove_one(struct pci_device *pci)
849 {	DBGP("%s\n", __func__);
850 
851 	struct net_device *netdev = pci_get_drvdata(pci);
852 
853 	unregister_netdev(netdev);
854 	netdev_nullify(netdev);
855 	netdev_put(netdev);
856 }
857 
858 static struct pci_device_id tg3_nics[] = {
859 	PCI_ROM(0x14e4, 0x1644, "14e4-1644", "14e4-1644", 0),
860 	PCI_ROM(0x14e4, 0x1645, "14e4-1645", "14e4-1645", 0),
861 	PCI_ROM(0x14e4, 0x1646, "14e4-1646", "14e4-1646", 0),
862 	PCI_ROM(0x14e4, 0x1647, "14e4-1647", "14e4-1647", 0),
863 	PCI_ROM(0x14e4, 0x1648, "14e4-1648", "14e4-1648", 0),
864 	PCI_ROM(0x14e4, 0x164d, "14e4-164d", "14e4-164d", 0),
865 	PCI_ROM(0x14e4, 0x1653, "14e4-1653", "14e4-1653", 0),
866 	PCI_ROM(0x14e4, 0x1654, "14e4-1654", "14e4-1654", 0),
867 	PCI_ROM(0x14e4, 0x165d, "14e4-165d", "14e4-165d", 0),
868 	PCI_ROM(0x14e4, 0x165e, "14e4-165e", "14e4-165e", 0),
869 	PCI_ROM(0x14e4, 0x16a6, "14e4-16a6", "14e4-16a6", 0),
870 	PCI_ROM(0x14e4, 0x16a7, "14e4-16a7", "14e4-16a7", 0),
871 	PCI_ROM(0x14e4, 0x16a8, "14e4-16a8", "14e4-16a8", 0),
872 	PCI_ROM(0x14e4, 0x16c6, "14e4-16c6", "14e4-16c6", 0),
873 	PCI_ROM(0x14e4, 0x16c7, "14e4-16c7", "14e4-16c7", 0),
874 	PCI_ROM(0x14e4, 0x1696, "14e4-1696", "14e4-1696", 0),
875 	PCI_ROM(0x14e4, 0x169c, "14e4-169c", "14e4-169c", 0),
876 	PCI_ROM(0x14e4, 0x169d, "14e4-169d", "14e4-169d", 0),
877 	PCI_ROM(0x14e4, 0x170d, "14e4-170d", "14e4-170d", 0),
878 	PCI_ROM(0x14e4, 0x170e, "14e4-170e", "14e4-170e", 0),
879 	PCI_ROM(0x14e4, 0x1649, "14e4-1649", "14e4-1649", 0),
880 	PCI_ROM(0x14e4, 0x166e, "14e4-166e", "14e4-166e", 0),
881 	PCI_ROM(0x14e4, 0x1659, "14e4-1659", "14e4-1659", 0),
882 	PCI_ROM(0x14e4, 0x165a, "14e4-165a", "14e4-165a", 0),
883 	PCI_ROM(0x14e4, 0x1677, "14e4-1677", "14e4-1677", 0),
884 	PCI_ROM(0x14e4, 0x167d, "14e4-167d", "14e4-167d", 0),
885 	PCI_ROM(0x14e4, 0x167e, "14e4-167e", "14e4-167e", 0),
886 	PCI_ROM(0x14e4, 0x1600, "14e4-1600", "14e4-1600", 0),
887 	PCI_ROM(0x14e4, 0x1601, "14e4-1601", "14e4-1601", 0),
888 	PCI_ROM(0x14e4, 0x16f7, "14e4-16f7", "14e4-16f7", 0),
889 	PCI_ROM(0x14e4, 0x16fd, "14e4-16fd", "14e4-16fd", 0),
890 	PCI_ROM(0x14e4, 0x16fe, "14e4-16fe", "14e4-16fe", 0),
891 	PCI_ROM(0x14e4, 0x167a, "14e4-167a", "14e4-167a", 0),
892 	PCI_ROM(0x14e4, 0x1672, "14e4-1672", "14e4-1672", 0),
893 	PCI_ROM(0x14e4, 0x167b, "14e4-167b", "14e4-167b", 0),
894 	PCI_ROM(0x14e4, 0x1673, "14e4-1673", "14e4-1673", 0),
895 	PCI_ROM(0x14e4, 0x1674, "14e4-1674", "14e4-1674", 0),
896 	PCI_ROM(0x14e4, 0x169a, "14e4-169a", "14e4-169a", 0),
897 	PCI_ROM(0x14e4, 0x169b, "14e4-169b", "14e4-169b", 0),
898 	PCI_ROM(0x14e4, 0x1693, "14e4-1693", "14e4-1693", 0),
899 	PCI_ROM(0x14e4, 0x167f, "14e4-167f", "14e4-167f", 0),
900 	PCI_ROM(0x14e4, 0x1668, "14e4-1668", "14e4-1668", 0),
901 	PCI_ROM(0x14e4, 0x1669, "14e4-1669", "14e4-1669", 0),
902 	PCI_ROM(0x14e4, 0x1678, "14e4-1678", "14e4-1678", 0),
903 	PCI_ROM(0x14e4, 0x1679, "14e4-1679", "14e4-1679", 0),
904 	PCI_ROM(0x14e4, 0x166a, "14e4-166a", "14e4-166a", 0),
905 	PCI_ROM(0x14e4, 0x166b, "14e4-166b", "14e4-166b", 0),
906 	PCI_ROM(0x14e4, 0x16dd, "14e4-16dd", "14e4-16dd", 0),
907 	PCI_ROM(0x14e4, 0x1712, "14e4-1712", "14e4-1712", 0),
908 	PCI_ROM(0x14e4, 0x1713, "14e4-1713", "14e4-1713", 0),
909 	PCI_ROM(0x14e4, 0x1698, "14e4-1698", "14e4-1698", 0),
910 	PCI_ROM(0x14e4, 0x1684, "14e4-1684", "14e4-1684", 0),
911 	PCI_ROM(0x14e4, 0x165b, "14e4-165b", "14e4-165b", 0),
912 	PCI_ROM(0x14e4, 0x1681, "14e4-1681", "14e4-1681", 0),
913 	PCI_ROM(0x14e4, 0x1682, "14e4-1682", "14e4-1682", 0),
914 	PCI_ROM(0x14e4, 0x1680, "14e4-1680", "14e4-1680", 0),
915 	PCI_ROM(0x14e4, 0x1688, "14e4-1688", "14e4-1688", 0),
916 	PCI_ROM(0x14e4, 0x1689, "14e4-1689", "14e4-1689", 0),
917 	PCI_ROM(0x14e4, 0x1699, "14e4-1699", "14e4-1699", 0),
918 	PCI_ROM(0x14e4, 0x16a0, "14e4-16a0", "14e4-16a0", 0),
919 	PCI_ROM(0x14e4, 0x1692, "14e4-1692", "14e4-1692", 0),
920 	PCI_ROM(0x14e4, 0x1690, "14e4-1690", "14e4-1690", 0),
921 	PCI_ROM(0x14e4, 0x1694, "14e4-1694", "14e4-1694", 0),
922 	PCI_ROM(0x14e4, 0x1691, "14e4-1691", "14e4-1691", 0),
923 	PCI_ROM(0x14e4, 0x1655, "14e4-1655", "14e4-1655", 0),
924 	PCI_ROM(0x14e4, 0x1656, "14e4-1656", "14e4-1656", 0),
925 	PCI_ROM(0x14e4, 0x16b1, "14e4-16b1", "14e4-16b1", 0),
926 	PCI_ROM(0x14e4, 0x16b5, "14e4-16b5", "14e4-16b5", 0),
927 	PCI_ROM(0x14e4, 0x16b0, "14e4-16b0", "14e4-16b0", 0),
928 	PCI_ROM(0x14e4, 0x16b4, "14e4-16b4", "14e4-16b4", 0),
929 	PCI_ROM(0x14e4, 0x16b2, "14e4-16b2", "14e4-16b2", 0),
930 	PCI_ROM(0x14e4, 0x16b6, "14e4-16b6", "14e4-16b6", 0),
931 	PCI_ROM(0x14e4, 0x1657, "14e4-1657", "14e4-1657", 0),
932 	PCI_ROM(0x14e4, 0x165f, "14e4-165f", "14e4-165f", 0),
933 	PCI_ROM(0x14e4, 0x1686, "14e4-1686", "14e4-1686", 0),
934 	PCI_ROM(0x1148, 0x4400, "1148-4400", "1148-4400", 0),
935 	PCI_ROM(0x1148, 0x4500, "1148-4500", "1148-4500", 0),
936 	PCI_ROM(0x173b, 0x03e8, "173b-03e8", "173b-03e8", 0),
937 	PCI_ROM(0x173b, 0x03e9, "173b-03e9", "173b-03e9", 0),
938 	PCI_ROM(0x173b, 0x03eb, "173b-03eb", "173b-03eb", 0),
939 	PCI_ROM(0x173b, 0x03ea, "173b-03ea", "173b-03ea", 0),
940 	PCI_ROM(0x106b, 0x1645, "106b-1645", "106b-1645", 0),
941 };
942 
943 struct pci_driver tg3_pci_driver __pci_driver = {
944 	.ids = tg3_nics,
945 	.id_count = ARRAY_SIZE(tg3_nics),
946 	.probe = tg3_init_one,
947 	.remove = tg3_remove_one,
948 };
949