1 /*
2  * JMicron JMC2x0 series PCIe Ethernet gPXE Device Driver
3  *
4  * Copyright 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  *
20  */
21 FILE_LICENCE ( GPL2_OR_LATER );
22 
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <ipxe/io.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <byteswap.h>
31 #include <ipxe/pci.h>
32 #include <ipxe/if_ether.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/iobuf.h>
35 #include <ipxe/netdevice.h>
36 #include <ipxe/malloc.h>
37 #include <mii.h>
38 #include "jme.h"
39 
40 static int
jme_mdio_read(struct net_device * netdev,int phy,int reg)41 jme_mdio_read(struct net_device *netdev, int phy, int reg)
42 {
43 	struct jme_adapter *jme = netdev->priv;
44 	int i, val, again = (reg == MII_BMSR) ? 1 : 0;
45 
46 read_again:
47 	jwrite32(jme, JME_SMI, SMI_OP_REQ |
48 				smi_phy_addr(phy) |
49 				smi_reg_addr(reg));
50 
51 	for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
52 		udelay(20);
53 		val = jread32(jme, JME_SMI);
54 		if ((val & SMI_OP_REQ) == 0)
55 			break;
56 	}
57 
58 	if (i == 0) {
59 		DBG("phy(%d) read timeout : %d\n", phy, reg);
60 		return 0;
61 	}
62 
63 	if (again--)
64 		goto read_again;
65 
66 	return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
67 }
68 
69 static void
jme_mdio_write(struct net_device * netdev,int phy,int reg,int val)70 jme_mdio_write(struct net_device *netdev,
71 				int phy, int reg, int val)
72 {
73 	struct jme_adapter *jme = netdev->priv;
74 	int i;
75 
76 	jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
77 		((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
78 		smi_phy_addr(phy) | smi_reg_addr(reg));
79 
80 	wmb();
81 	for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
82 		udelay(20);
83 		if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
84 			break;
85 	}
86 
87 	if (i == 0)
88 		DBG("phy(%d) write timeout : %d\n", phy, reg);
89 
90 	return;
91 }
92 
93 static void
jme_reset_phy_processor(struct jme_adapter * jme)94 jme_reset_phy_processor(struct jme_adapter *jme)
95 {
96 	u32 val;
97 
98 	jme_mdio_write(jme->mii_if.dev,
99 			jme->mii_if.phy_id,
100 			MII_ADVERTISE, ADVERTISE_ALL |
101 			ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
102 
103 	if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
104 		jme_mdio_write(jme->mii_if.dev,
105 				jme->mii_if.phy_id,
106 				MII_CTRL1000,
107 				ADVERTISE_1000FULL | ADVERTISE_1000HALF);
108 
109 	val = jme_mdio_read(jme->mii_if.dev,
110 				jme->mii_if.phy_id,
111 				MII_BMCR);
112 
113 	jme_mdio_write(jme->mii_if.dev,
114 			jme->mii_if.phy_id,
115 			MII_BMCR, val | BMCR_RESET);
116 
117 	return;
118 }
119 
120 static void
jme_phy_init(struct jme_adapter * jme)121 jme_phy_init(struct jme_adapter *jme)
122 {
123 	u16 reg26;
124 
125 	reg26 = jme_mdio_read(jme->mii_if.dev, jme->mii_if.phy_id, 26);
126 	jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
127 }
128 
129 static void
jme_set_phyfifoa(struct jme_adapter * jme)130 jme_set_phyfifoa(struct jme_adapter *jme)
131 {
132 	jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 27, 0x0004);
133 }
134 
135 static void
jme_set_phyfifob(struct jme_adapter * jme)136 jme_set_phyfifob(struct jme_adapter *jme)
137 {
138 	jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 27, 0x0000);
139 }
140 
141 static void
jme_phy_off(struct jme_adapter * jme)142 jme_phy_off(struct jme_adapter *jme)
143 {
144 	jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
145 }
146 
147 static void
jme_restart_an(struct jme_adapter * jme)148 jme_restart_an(struct jme_adapter *jme)
149 {
150 	uint32_t bmcr;
151 
152 	bmcr = jme_mdio_read(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR);
153 	bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
154 	jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
155 }
156 
157 static void
jme_reset_ghc_speed(struct jme_adapter * jme)158 jme_reset_ghc_speed(struct jme_adapter *jme)
159 {
160 	jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
161 	jwrite32(jme, JME_GHC, jme->reg_ghc);
162 }
163 
164 static void
jme_start_irq(struct jme_adapter * jme)165 jme_start_irq(struct jme_adapter *jme)
166 {
167 	/*
168 	 * Enable Interrupts
169 	 */
170 	jwrite32(jme, JME_IENS, INTR_ENABLE);
171 }
172 
173 static void
jme_stop_irq(struct jme_adapter * jme)174 jme_stop_irq(struct jme_adapter *jme)
175 {
176 	/*
177 	 * Disable Interrupts
178 	 */
179 	jwrite32f(jme, JME_IENC, INTR_ENABLE);
180 }
181 
182 static void
jme_setup_wakeup_frame(struct jme_adapter * jme,u32 * mask,u32 crc,int fnr)183 jme_setup_wakeup_frame(struct jme_adapter *jme,
184 		u32 *mask, u32 crc, int fnr)
185 {
186 	int i;
187 
188 	/*
189 	 * Setup CRC pattern
190 	 */
191 	jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
192 	wmb();
193 	jwrite32(jme, JME_WFODP, crc);
194 	wmb();
195 
196 	/*
197 	 * Setup Mask
198 	 */
199 	for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
200 		jwrite32(jme, JME_WFOI,
201 				((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
202 				(fnr & WFOI_FRAME_SEL));
203 		wmb();
204 		jwrite32(jme, JME_WFODP, mask[i]);
205 		wmb();
206 	}
207 }
208 
209 static void
jme_reset_mac_processor(struct jme_adapter * jme)210 jme_reset_mac_processor(struct jme_adapter *jme)
211 {
212 	u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
213 	u32 crc = 0xCDCDCDCD;
214 	int i;
215 
216 	jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
217 	udelay(2);
218 	jwrite32(jme, JME_GHC, jme->reg_ghc);
219 
220 	jwrite32(jme, JME_RXDBA_LO, 0x00000000);
221 	jwrite32(jme, JME_RXDBA_HI, 0x00000000);
222 	jwrite32(jme, JME_RXQDC, 0x00000000);
223 	jwrite32(jme, JME_RXNDA, 0x00000000);
224 	jwrite32(jme, JME_TXDBA_LO, 0x00000000);
225 	jwrite32(jme, JME_TXDBA_HI, 0x00000000);
226 	jwrite32(jme, JME_TXQDC, 0x00000000);
227 	jwrite32(jme, JME_TXNDA, 0x00000000);
228 
229 	jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
230 	jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
231 	for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
232 		jme_setup_wakeup_frame(jme, mask, crc, i);
233 	jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
234 	jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
235 }
236 
237 static void
jme_free_tx_buffers(struct jme_adapter * jme)238 jme_free_tx_buffers(struct jme_adapter *jme)
239 {
240 	struct jme_ring *txring = &jme->txring;
241 	struct io_buffer *txbi;
242 	unsigned int i;
243 
244 	for (i = 0; i < jme->tx_ring_size; ++i) {
245 		txbi = txring->bufinf[i];
246 		if (txbi) {
247 			netdev_tx_complete_err(jme->mii_if.dev,
248 					txbi, -ENOLINK);
249 			txring->bufinf[i] = NULL;
250 		}
251 	}
252 }
253 
254 static void
jme_free_tx_resources(struct jme_adapter * jme)255 jme_free_tx_resources(struct jme_adapter *jme)
256 {
257 	struct jme_ring *txring = &jme->txring;
258 
259 	if (txring->desc) {
260 		if (txring->bufinf) {
261 			memset(txring->bufinf, 0,
262 				sizeof(struct io_buffer *) * jme->tx_ring_size);
263 			free(txring->bufinf);
264 		}
265 		free_dma(txring->desc, jme->tx_ring_size * TX_DESC_SIZE);
266 		txring->desc		= NULL;
267 		txring->dma		= 0;
268 		txring->bufinf		= NULL;
269 	}
270 	txring->next_to_use	= 0;
271 	txring->next_to_clean	= 0;
272 	txring->nr_free		= 0;
273 }
274 
275 static int
jme_alloc_tx_resources(struct jme_adapter * jme)276 jme_alloc_tx_resources(struct jme_adapter *jme)
277 {
278 	struct jme_ring *txring = &jme->txring;
279 
280 	txring->desc = malloc_dma(jme->tx_ring_size * TX_DESC_SIZE,
281 					RING_DESC_ALIGN);
282 	if (!txring->desc) {
283 		DBG("Can not allocate transmit ring descriptors.\n");
284 		goto err_out;
285 	}
286 
287 	/*
288 	 * 16 Bytes align
289 	 */
290 	txring->dma		= virt_to_bus(txring->desc);
291 	txring->bufinf		= malloc(sizeof(struct io_buffer *) *
292 					jme->tx_ring_size);
293 	if (!(txring->bufinf)) {
294 		DBG("Can not allocate transmit buffer info.\n");
295 		goto err_out;
296 	}
297 
298 	/*
299 	 * Initialize Transmit Buffer Pointers
300 	 */
301 	memset(txring->bufinf, 0,
302 		sizeof(struct io_buffer *) * jme->tx_ring_size);
303 
304 	return 0;
305 
306 err_out:
307 	jme_free_tx_resources(jme);
308 	return -ENOMEM;
309 }
310 
311 static void
jme_init_tx_ring(struct jme_adapter * jme)312 jme_init_tx_ring(struct jme_adapter *jme)
313 {
314 	struct jme_ring *txring = &jme->txring;
315 
316 	txring->next_to_clean	= 0;
317 	txring->next_to_use	= 0;
318 	txring->nr_free		= jme->tx_ring_size;
319 
320 	/*
321 	 * Initialize Transmit Descriptors
322 	 */
323 	memset(txring->desc, 0, jme->tx_ring_size * TX_DESC_SIZE);
324 	jme_free_tx_buffers(jme);
325 }
326 
327 static void
jme_enable_tx_engine(struct jme_adapter * jme)328 jme_enable_tx_engine(struct jme_adapter *jme)
329 {
330 	/*
331 	 * Select Queue 0
332 	 */
333 	jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
334 	wmb();
335 
336 	/*
337 	 * Setup TX Queue 0 DMA Bass Address
338 	 */
339 	jwrite32(jme, JME_TXDBA_LO, (uint64_t)jme->txring.dma & 0xFFFFFFFFUL);
340 	jwrite32(jme, JME_TXDBA_HI, (uint64_t)(jme->txring.dma) >> 32);
341 	jwrite32(jme, JME_TXNDA, (uint64_t)jme->txring.dma & 0xFFFFFFFFUL);
342 
343 	/*
344 	 * Setup TX Descptor Count
345 	 */
346 	jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
347 
348 	/*
349 	 * Enable TX Engine
350 	 */
351 	wmb();
352 	jwrite32(jme, JME_TXCS, jme->reg_txcs |
353 				TXCS_SELECT_QUEUE0 |
354 				TXCS_ENABLE);
355 
356 }
357 
358 static void
jme_disable_tx_engine(struct jme_adapter * jme)359 jme_disable_tx_engine(struct jme_adapter *jme)
360 {
361 	int i;
362 	u32 val;
363 
364 	/*
365 	 * Disable TX Engine
366 	 */
367 	jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
368 	wmb();
369 
370 	val = jread32(jme, JME_TXCS);
371 	for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
372 		mdelay(1);
373 		val = jread32(jme, JME_TXCS);
374 		rmb();
375 	}
376 
377 	if (!i)
378 		DBG("Disable TX engine timeout.\n");
379 }
380 
381 
382 static void
jme_set_clean_rxdesc(struct jme_adapter * jme,int i)383 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
384 {
385 	struct jme_ring *rxring = &jme->rxring;
386 	register struct rxdesc *rxdesc = rxring->desc;
387 	struct io_buffer *rxbi = rxring->bufinf[i];
388 	uint64_t mapping;
389 
390 	rxdesc += i;
391 	mapping = virt_to_bus(rxbi->data);
392 
393 	rxdesc->dw[0] = 0;
394 	rxdesc->dw[1] = 0;
395 	rxdesc->desc1.bufaddrh	= cpu_to_le32(mapping >> 32);
396 	rxdesc->desc1.bufaddrl	= cpu_to_le32(mapping & 0xFFFFFFFFUL);
397 	rxdesc->desc1.datalen	= cpu_to_le16(RX_ALLOC_LEN);
398 	wmb();
399 	rxdesc->desc1.flags	|= RXFLAG_OWN | RXFLAG_INT;
400 }
401 
402 static int
jme_make_new_rx_buf(struct io_buffer ** rxbip)403 jme_make_new_rx_buf(struct io_buffer **rxbip)
404 {
405 	struct io_buffer *inbuf;
406 
407 	/*
408 	 * IOB_ALIGN == 2048
409 	 */
410 	inbuf = alloc_iob(RX_ALLOC_LEN);
411 	if (!inbuf) {
412 		DBG("Allocate receive iob error.\n");
413 		return -ENOMEM;
414 	}
415 	*rxbip = inbuf;
416 
417 	return 0;
418 }
419 
420 static void
jme_free_rx_buf(struct jme_adapter * jme,int i)421 jme_free_rx_buf(struct jme_adapter *jme, int i)
422 {
423 	struct jme_ring *rxring = &jme->rxring;
424 	struct io_buffer *rxbi = rxring->bufinf[i];
425 
426 	if (rxbi) {
427 		free_iob(rxbi);
428 		rxring->bufinf[i] = NULL;
429 	}
430 }
431 
432 static void
jme_free_rx_resources(struct jme_adapter * jme)433 jme_free_rx_resources(struct jme_adapter *jme)
434 {
435 	unsigned int i;
436 	struct jme_ring *rxring = &jme->rxring;
437 
438 	if (rxring->desc) {
439 		if (rxring->bufinf) {
440 			for (i = 0 ; i < jme->rx_ring_size ; ++i)
441 				jme_free_rx_buf(jme, i);
442 			free(rxring->bufinf);
443 		}
444 
445 		free_dma(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE);
446 		rxring->desc     = NULL;
447 		rxring->dma      = 0;
448 		rxring->bufinf   = NULL;
449 	}
450 	rxring->next_to_fill = 0;
451 	rxring->next_to_clean = 0;
452 }
453 
454 static int
jme_alloc_rx_resources(struct jme_adapter * jme)455 jme_alloc_rx_resources(struct jme_adapter *jme)
456 {
457 	unsigned int i;
458 	struct jme_ring *rxring = &jme->rxring;
459 	struct io_buffer **bufinf;
460 
461 	rxring->desc = malloc_dma(jme->rx_ring_size * RX_DESC_SIZE,
462 			RING_DESC_ALIGN);
463 	if (!rxring->desc) {
464 		DBG("Can not allocate receive ring descriptors.\n");
465 		goto err_out;
466 	}
467 
468 	/*
469 	 * 16 Bytes align
470 	 */
471 	rxring->dma		= virt_to_bus(rxring->desc);
472 	rxring->bufinf		= malloc(sizeof(struct io_buffer *) *
473 					jme->rx_ring_size);
474 	if (!(rxring->bufinf)) {
475 		DBG("Can not allocate receive buffer info.\n");
476 		goto err_out;
477 	}
478 
479 	/*
480 	 * Initiallize Receive Buffer Pointers
481 	 */
482 	bufinf = rxring->bufinf;
483 	memset(bufinf, 0, sizeof(struct io_buffer *) * jme->rx_ring_size);
484 	for (i = 0 ; i < jme->rx_ring_size ; ++i) {
485 		if (jme_make_new_rx_buf(bufinf))
486 			goto err_out;
487 		++bufinf;
488 	}
489 
490 	return 0;
491 
492 err_out:
493 	jme_free_rx_resources(jme);
494 	return -ENOMEM;
495 }
496 
497 static void
jme_init_rx_ring(struct jme_adapter * jme)498 jme_init_rx_ring(struct jme_adapter *jme)
499 {
500 	unsigned int i;
501 	struct jme_ring *rxring = &jme->rxring;
502 
503 	for (i = 0 ; i < jme->rx_ring_size ; ++i)
504 		jme_set_clean_rxdesc(jme, i);
505 
506 	rxring->next_to_fill = 0;
507 	rxring->next_to_clean = 0;
508 }
509 
510 static void
jme_set_multi(struct jme_adapter * jme)511 jme_set_multi(struct jme_adapter *jme)
512 {
513 	/*
514 	 * Just receive all kind of packet for new.
515 	 */
516 	jme->reg_rxmcs |= RXMCS_ALLFRAME | RXMCS_BRDFRAME | RXMCS_UNIFRAME;
517 	jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
518 }
519 
520 static void
jme_enable_rx_engine(struct jme_adapter * jme)521 jme_enable_rx_engine(struct jme_adapter *jme)
522 {
523 	/*
524 	 * Select Queue 0
525 	 */
526 	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
527 				RXCS_QUEUESEL_Q0);
528 	wmb();
529 
530 	/*
531 	 * Setup RX DMA Bass Address
532 	 */
533 	jwrite32(jme, JME_RXDBA_LO, (uint64_t)(jme->rxring.dma) & 0xFFFFFFFFUL);
534 	jwrite32(jme, JME_RXDBA_HI, (uint64_t)(jme->rxring.dma) >> 32);
535 	jwrite32(jme, JME_RXNDA, (uint64_t)(jme->rxring.dma) & 0xFFFFFFFFUL);
536 
537 	/*
538 	 * Setup RX Descriptor Count
539 	 */
540 	jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
541 
542 	/*
543 	 * Setup Unicast Filter
544 	 */
545 	jme_set_multi(jme);
546 
547 	/*
548 	 * Enable RX Engine
549 	 */
550 	wmb();
551 	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
552 				RXCS_QUEUESEL_Q0 |
553 				RXCS_ENABLE |
554 				RXCS_QST);
555 }
556 
557 static void
jme_restart_rx_engine(struct jme_adapter * jme)558 jme_restart_rx_engine(struct jme_adapter *jme)
559 {
560 	/*
561 	 * Start RX Engine
562 	 */
563 	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
564 				RXCS_QUEUESEL_Q0 |
565 				RXCS_ENABLE |
566 				RXCS_QST);
567 }
568 
569 static void
jme_disable_rx_engine(struct jme_adapter * jme)570 jme_disable_rx_engine(struct jme_adapter *jme)
571 {
572 	int i;
573 	u32 val;
574 
575 	/*
576 	 * Disable RX Engine
577 	 */
578 	jwrite32(jme, JME_RXCS, jme->reg_rxcs);
579 	wmb();
580 
581 	val = jread32(jme, JME_RXCS);
582 	for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
583 		mdelay(1);
584 		val = jread32(jme, JME_RXCS);
585 		rmb();
586 	}
587 
588 	if (!i)
589 		DBG("Disable RX engine timeout.\n");
590 
591 }
592 
593 static void
jme_refill_rx_ring(struct jme_adapter * jme,int curhole)594 jme_refill_rx_ring(struct jme_adapter *jme, int curhole)
595 {
596 	struct jme_ring *rxring = &jme->rxring;
597 	int i = rxring->next_to_fill;
598 	struct io_buffer **bufinf = rxring->bufinf;
599 	int mask = jme->rx_ring_mask;
600 	int limit = jme->rx_ring_size;
601 
602 	while (limit--) {
603 		if (!bufinf[i]) {
604 			if (jme_make_new_rx_buf(bufinf + i))
605 				break;
606 			jme_set_clean_rxdesc(jme, i);
607 		}
608 		if (i == curhole)
609 			limit = 0;
610 		i = (i + 1) & mask;
611 	}
612 	rxring->next_to_fill = i;
613 }
614 
615 static void
jme_alloc_and_feed_iob(struct jme_adapter * jme,int idx)616 jme_alloc_and_feed_iob(struct jme_adapter *jme, int idx)
617 {
618 	struct jme_ring *rxring = &jme->rxring;
619 	struct rxdesc *rxdesc = rxring->desc;
620 	struct io_buffer *rxbi = rxring->bufinf[idx];
621 	struct net_device *netdev = jme->mii_if.dev;
622 	int framesize;
623 
624 	rxdesc += idx;
625 
626 	framesize = le16_to_cpu(rxdesc->descwb.framesize);
627 	iob_put(rxbi, framesize);
628 	netdev_rx(netdev, rxbi);
629 
630 	rxring->bufinf[idx] = NULL;
631 	jme_refill_rx_ring(jme, idx);
632 }
633 
634 static void
jme_process_receive(struct jme_adapter * jme)635 jme_process_receive(struct jme_adapter *jme)
636 {
637 	struct jme_ring *rxring = &jme->rxring;
638 	struct rxdesc *rxdesc = rxring->desc;
639 	struct net_device *netdev = jme->mii_if.dev;
640 	int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
641 	unsigned int limit = jme->rx_ring_size;
642 
643 	i = rxring->next_to_clean;
644 	rxdesc += i;
645 	while (rxring->bufinf[i] &&
646 		!(rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) &&
647 		(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL) &&
648 		limit--) {
649 
650 		rmb();
651 		desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
652 		DBG2("Cleaning rx desc=%d, cnt=%d\n", i, desccnt);
653 
654 		if (desccnt > 1 || rxdesc->descwb.errstat & RXWBERR_ALLERR) {
655 			for (j = i, ccnt = desccnt ; ccnt-- ; ) {
656 				jme_set_clean_rxdesc(jme, j);
657 				j = (j + 1) & (mask);
658 			}
659 			DBG("Dropped packet due to ");
660 			if (desccnt > 1)
661 				DBG("long packet.(%d descriptors)\n", desccnt);
662 			else
663 				DBG("Packet error.\n");
664 			netdev_rx_err(netdev, NULL, -EINVAL);
665 		} else {
666 			jme_alloc_and_feed_iob(jme, i);
667 		}
668 
669 		i = (i + desccnt) & (mask);
670 		rxdesc = rxring->desc;
671 		rxdesc += i;
672 	}
673 	rxring->next_to_clean = i;
674 
675 	return;
676 }
677 
678 static void
jme_set_custom_macaddr(struct net_device * netdev)679 jme_set_custom_macaddr(struct net_device *netdev)
680 {
681 	struct jme_adapter *jme = netdev->priv;
682 	uint8_t *addr = netdev->ll_addr;
683 	u32 val;
684 
685 	val = (addr[3] & 0xff) << 24 |
686 	      (addr[2] & 0xff) << 16 |
687 	      (addr[1] & 0xff) <<  8 |
688 	      (addr[0] & 0xff);
689 	jwrite32(jme, JME_RXUMA_LO, val);
690 	val = (addr[5] & 0xff) << 8 |
691 	      (addr[4] & 0xff);
692 	jwrite32(jme, JME_RXUMA_HI, val);
693 }
694 
695 /**
696  * Open NIC
697  *
698  * @v netdev		Net device
699  * @ret rc		Return status code
700  */
701 static int
jme_open(struct net_device * netdev)702 jme_open(struct net_device *netdev)
703 {
704 	struct jme_adapter *jme = netdev->priv;
705 	int rc;
706 
707 	/*
708 	 * Allocate receive resources
709 	 */
710 	rc = jme_alloc_rx_resources(jme);
711 	if (rc) {
712 		DBG("Allocate receive resources error.\n");
713 		goto nomem_out;
714 	}
715 
716 	/*
717 	 * Allocate transmit resources
718 	 */
719 	rc = jme_alloc_tx_resources(jme);
720 	if (rc) {
721 		DBG("Allocate transmit resources error.\n");
722 		goto free_rx_resources_out;
723 	}
724 
725 	jme_set_custom_macaddr(netdev);
726 	jme_reset_phy_processor(jme);
727 	jme_restart_an(jme);
728 
729 	return 0;
730 
731 free_rx_resources_out:
732 	jme_free_rx_resources(jme);
733 nomem_out:
734 	return rc;
735 }
736 
737 /**
738  * Close NIC
739  *
740  * @v netdev		Net device
741  */
742 static void
jme_close(struct net_device * netdev)743 jme_close(struct net_device *netdev)
744 {
745 	struct jme_adapter *jme = netdev->priv;
746 
747 	jme_free_tx_resources(jme);
748 	jme_free_rx_resources(jme);
749 	jme_reset_mac_processor(jme);
750 	jme->phylink = 0;
751 	jme_phy_off(jme);
752 	netdev_link_down(netdev);
753 }
754 
755 static int
jme_alloc_txdesc(struct jme_adapter * jme)756 jme_alloc_txdesc(struct jme_adapter *jme)
757 {
758 	struct jme_ring *txring = &jme->txring;
759 	int idx;
760 
761 	idx = txring->next_to_use;
762 	if (txring->nr_free < 1)
763 		return -1;
764 	--(txring->nr_free);
765 	txring->next_to_use = (txring->next_to_use + 1) & jme->tx_ring_mask;
766 
767 	return idx;
768 }
769 
770 static void
jme_fill_tx_desc(struct jme_adapter * jme,struct io_buffer * iob,int idx)771 jme_fill_tx_desc(struct jme_adapter *jme, struct io_buffer *iob, int idx)
772 {
773 	struct jme_ring *txring = &jme->txring;
774 	struct txdesc *txdesc = txring->desc;
775 	uint16_t len = iob_len(iob);
776 	unsigned long int mapping;
777 
778 	txdesc += idx;
779 	mapping = virt_to_bus(iob->data);
780 	DBG2("TX buffer address: %p(%08lx+%x)\n",
781 			iob->data, mapping, len);
782 	txdesc->dw[0] = 0;
783 	txdesc->dw[1] = 0;
784 	txdesc->dw[2] = 0;
785 	txdesc->dw[3] = 0;
786 	txdesc->desc1.datalen	= cpu_to_le16(len);
787 	txdesc->desc1.pktsize	= cpu_to_le16(len);
788 	txdesc->desc1.bufaddr	= cpu_to_le32(mapping);
789 	/*
790 	 * Set OWN bit at final.
791 	 * When kernel transmit faster than NIC.
792 	 * And NIC trying to send this descriptor before we tell
793 	 * it to start sending this TX queue.
794 	 * Other fields are already filled correctly.
795 	 */
796 	wmb();
797 	txdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
798 	/*
799 	 * Set tx buffer info after telling NIC to send
800 	 * For better tx_clean timing
801 	 */
802 	wmb();
803 	txring->bufinf[idx] = iob;
804 }
805 
806 /**
807  * Transmit packet
808  *
809  * @v netdev	Network device
810  * @v iobuf	I/O buffer
811  * @ret rc	Return status code
812  */
813 static int
jme_transmit(struct net_device * netdev,struct io_buffer * iobuf)814 jme_transmit(struct net_device *netdev, struct io_buffer *iobuf)
815 {
816 	struct jme_adapter *jme = netdev->priv;
817 	int idx;
818 
819 	idx = jme_alloc_txdesc(jme);
820 	if (idx < 0) {
821 		/*
822 		 * Pause transmit queue somehow if possible.
823 		 */
824 		DBG("TX ring full!\n");
825 		return -EOVERFLOW;
826 	}
827 
828 	jme_fill_tx_desc(jme, iobuf, idx);
829 
830 	jwrite32(jme, JME_TXCS, jme->reg_txcs |
831 				TXCS_SELECT_QUEUE0 |
832 				TXCS_QUEUE0S |
833 				TXCS_ENABLE);
834 	DBG2("xmit: idx=%d\n", idx);
835 
836 	return 0;
837 }
838 
839 static int
jme_check_link(struct net_device * netdev,int testonly)840 jme_check_link(struct net_device *netdev, int testonly)
841 {
842 	struct jme_adapter *jme = netdev->priv;
843 	u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, gpreg1;
844 	int rc = 0;
845 
846 	phylink = jread32(jme, JME_PHY_LINK);
847 
848 	if (phylink & PHY_LINK_UP) {
849 		/*
850 		 * Keep polling for speed/duplex resolve complete
851 		 */
852 		while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
853 			--cnt) {
854 
855 			udelay(1);
856 			phylink = jread32(jme, JME_PHY_LINK);
857 		}
858 		if (!cnt)
859 			DBG("Waiting speed resolve timeout.\n");
860 
861 		if (jme->phylink == phylink) {
862 			rc = 1;
863 			goto out;
864 		}
865 		if (testonly)
866 			goto out;
867 
868 		jme->phylink = phylink;
869 
870 		ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
871 				GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
872 				GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
873 		switch (phylink & PHY_LINK_SPEED_MASK) {
874 		case PHY_LINK_SPEED_10M:
875 			ghc |= GHC_SPEED_10M |
876 				GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
877 			break;
878 		case PHY_LINK_SPEED_100M:
879 			ghc |= GHC_SPEED_100M |
880 				GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
881 			break;
882 		case PHY_LINK_SPEED_1000M:
883 			ghc |= GHC_SPEED_1000M |
884 				GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
885 			break;
886 		default:
887 			break;
888 		}
889 
890 		if (phylink & PHY_LINK_DUPLEX) {
891 			jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
892 			ghc |= GHC_DPX;
893 		} else {
894 			jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
895 						TXMCS_BACKOFF |
896 						TXMCS_CARRIERSENSE |
897 						TXMCS_COLLISION);
898 			jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
899 				((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
900 				TXTRHD_TXREN |
901 				((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
902 		}
903 
904 		gpreg1 = GPREG1_DEFAULT;
905 		if (is_buggy250(jme->pdev->device, jme->chiprev)) {
906 			if (!(phylink & PHY_LINK_DUPLEX))
907 				gpreg1 |= GPREG1_HALFMODEPATCH;
908 			switch (phylink & PHY_LINK_SPEED_MASK) {
909 			case PHY_LINK_SPEED_10M:
910 				jme_set_phyfifoa(jme);
911 				gpreg1 |= GPREG1_RSSPATCH;
912 				break;
913 			case PHY_LINK_SPEED_100M:
914 				jme_set_phyfifob(jme);
915 				gpreg1 |= GPREG1_RSSPATCH;
916 				break;
917 			case PHY_LINK_SPEED_1000M:
918 				jme_set_phyfifoa(jme);
919 				break;
920 			default:
921 				break;
922 			}
923 		}
924 
925 		jwrite32(jme, JME_GPREG1, gpreg1);
926 		jwrite32(jme, JME_GHC, ghc);
927 		jme->reg_ghc = ghc;
928 
929 		DBG("Link is up at %d Mbps, %s-Duplex, MDI%s.\n",
930 		    ((phylink & PHY_LINK_SPEED_MASK)
931 			     == PHY_LINK_SPEED_1000M) ? 1000 :
932 		    ((phylink & PHY_LINK_SPEED_MASK)
933 			     == PHY_LINK_SPEED_100M)  ? 100  : 10,
934 		    (phylink & PHY_LINK_DUPLEX) ? "Full" : "Half",
935 		    (phylink & PHY_LINK_MDI_STAT) ? "-X" : "");
936 		netdev_link_up(netdev);
937 	} else {
938 		if (testonly)
939 			goto out;
940 
941 		DBG("Link is down.\n");
942 		jme->phylink = 0;
943 		netdev_link_down(netdev);
944 	}
945 
946 out:
947 	return rc;
948 }
949 
950 static void
jme_link_change(struct net_device * netdev)951 jme_link_change(struct net_device *netdev)
952 {
953 	struct jme_adapter *jme = netdev->priv;
954 
955 	/*
956 	 * Do nothing if the link status did not change.
957 	 */
958 	if (jme_check_link(netdev, 1))
959 		return;
960 
961 	if (netdev_link_ok(netdev)) {
962 		netdev_link_down(netdev);
963 		jme_disable_rx_engine(jme);
964 		jme_disable_tx_engine(jme);
965 		jme_reset_ghc_speed(jme);
966 		jme_reset_mac_processor(jme);
967 	}
968 
969 	jme_check_link(netdev, 0);
970 	if (netdev_link_ok(netdev)) {
971 		jme_init_rx_ring(jme);
972 		jme_enable_rx_engine(jme);
973 		jme_init_tx_ring(jme);
974 		jme_enable_tx_engine(jme);
975 	}
976 
977 	return;
978 }
979 
980 static void
jme_tx_clean(struct jme_adapter * jme)981 jme_tx_clean(struct jme_adapter *jme)
982 {
983 	struct jme_ring *txring = &jme->txring;
984 	struct txdesc *txdesc = txring->desc;
985 	struct io_buffer *txbi;
986 	struct net_device *netdev = jme->mii_if.dev;
987 	int i, cnt = 0, max, err, mask;
988 
989 	max = jme->tx_ring_size - txring->nr_free;
990 	mask = jme->tx_ring_mask;
991 
992 	for (i = txring->next_to_clean ; cnt < max ; ++cnt) {
993 
994 		txbi = txring->bufinf[i];
995 
996 		if (txbi && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
997 			DBG2("TX clean address: %08lx(%08lx+%zx)\n",
998 					(unsigned long)txbi->data,
999 					virt_to_bus(txbi->data),
1000 					iob_len(txbi));
1001 			err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1002 			if (err)
1003 				netdev_tx_complete_err(netdev, txbi, -EIO);
1004 			else
1005 				netdev_tx_complete(netdev, txbi);
1006 			txring->bufinf[i] = NULL;
1007 		} else {
1008 			break;
1009 		}
1010 
1011 		i = (i + 1) & mask;
1012 	}
1013 
1014 	DBG2("txclean: next %d\n", i);
1015 	txring->next_to_clean = i;
1016 	txring->nr_free += cnt;
1017 }
1018 /**
1019  * Poll for received packets
1020  *
1021  * @v netdev	Network device
1022  */
1023 static void
jme_poll(struct net_device * netdev)1024 jme_poll(struct net_device *netdev)
1025 {
1026 	struct jme_adapter *jme = netdev->priv;
1027 	u32 intrstat;
1028 
1029 	intrstat = jread32(jme, JME_IEVE);
1030 
1031 	/*
1032 	 * Check if any actions needs to perform.
1033 	 */
1034 	if ((intrstat & INTR_ENABLE) == 0)
1035 		return;
1036 
1037 	/*
1038 	 * Check if the device still exist
1039 	 */
1040 	if (intrstat == ~((typeof(intrstat))0))
1041 		return;
1042 
1043 	DBG2("intrstat 0x%08x\n", intrstat);
1044 	if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1045 		DBG2("Link changed\n");
1046 		jme_link_change(netdev);
1047 
1048 		/*
1049 		 * Clear all interrupt status
1050 		 */
1051 		jwrite32(jme, JME_IEVE, intrstat);
1052 
1053 		/*
1054 		 * Link change event is critical
1055 		 * all other events are ignored
1056 		 */
1057 		return;
1058 	}
1059 
1060 	/*
1061 	 * Process transmission complete first to free more memory.
1062 	 */
1063 	if (intrstat & INTR_TX0) {
1064 		DBG2("Packet transmit complete\n");
1065 		jme_tx_clean(jme);
1066 		jwrite32(jme, JME_IEVE, intrstat & INTR_TX0);
1067 	}
1068 
1069 	if (intrstat & (INTR_RX0 | INTR_RX0EMP)) {
1070 		DBG2("Packet received\n");
1071 		jme_process_receive(jme);
1072 		jwrite32(jme, JME_IEVE,
1073 			intrstat & (INTR_RX0 | INTR_RX0EMP));
1074 		if (intrstat & INTR_RX0EMP)
1075 			jme_restart_rx_engine(jme);
1076 	}
1077 
1078 	/*
1079 	 * Clean all other interrupt status
1080 	 */
1081 	jwrite32(jme, JME_IEVE,
1082 		intrstat & ~(INTR_RX0 | INTR_RX0EMP | INTR_TX0));
1083 }
1084 
1085 /**
1086  * Enable/disable interrupts
1087  *
1088  * @v netdev	Network device
1089  * @v enable	Interrupts should be enabled
1090  */
1091 static void
jme_irq(struct net_device * netdev,int enable)1092 jme_irq(struct net_device *netdev, int enable)
1093 {
1094 	struct jme_adapter *jme = netdev->priv;
1095 
1096 	DBG("jme interrupts %s\n", (enable ? "enabled" : "disabled"));
1097 	if (enable)
1098 		jme_start_irq(jme);
1099 	else
1100 		jme_stop_irq(jme);
1101 }
1102 
1103 /** JME net device operations */
1104 static struct net_device_operations jme_operations = {
1105 	.open		= jme_open,
1106 	.close		= jme_close,
1107 	.transmit	= jme_transmit,
1108 	.poll		= jme_poll,
1109 	.irq		= jme_irq,
1110 };
1111 
1112 static void
jme_check_hw_ver(struct jme_adapter * jme)1113 jme_check_hw_ver(struct jme_adapter *jme)
1114 {
1115 	u32 chipmode;
1116 
1117 	chipmode = jread32(jme, JME_CHIPMODE);
1118 
1119 	jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
1120 	jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
1121 }
1122 
1123 static int
jme_reload_eeprom(struct jme_adapter * jme)1124 jme_reload_eeprom(struct jme_adapter *jme)
1125 {
1126 	u32 val;
1127 	int i;
1128 
1129 	val = jread32(jme, JME_SMBCSR);
1130 
1131 	if (val & SMBCSR_EEPROMD) {
1132 		val |= SMBCSR_CNACK;
1133 		jwrite32(jme, JME_SMBCSR, val);
1134 		val |= SMBCSR_RELOAD;
1135 		jwrite32(jme, JME_SMBCSR, val);
1136 		mdelay(12);
1137 
1138 		for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
1139 			mdelay(1);
1140 			if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
1141 				break;
1142 		}
1143 
1144 		if (i == 0) {
1145 			DBG("eeprom reload timeout\n");
1146 			return -EIO;
1147 		}
1148 	}
1149 
1150 	return 0;
1151 }
1152 
1153 static void
jme_load_macaddr(struct net_device * netdev)1154 jme_load_macaddr(struct net_device *netdev)
1155 {
1156 	struct jme_adapter *jme = netdev_priv(netdev);
1157 	unsigned char macaddr[6];
1158 	u32 val;
1159 
1160 	val = jread32(jme, JME_RXUMA_LO);
1161 	macaddr[0] = (val >>  0) & 0xFF;
1162 	macaddr[1] = (val >>  8) & 0xFF;
1163 	macaddr[2] = (val >> 16) & 0xFF;
1164 	macaddr[3] = (val >> 24) & 0xFF;
1165 	val = jread32(jme, JME_RXUMA_HI);
1166 	macaddr[4] = (val >>  0) & 0xFF;
1167 	macaddr[5] = (val >>  8) & 0xFF;
1168 	memcpy(netdev->hw_addr, macaddr, 6);
1169 }
1170 
1171 /**
1172  * Probe PCI device
1173  *
1174  * @v pci	PCI device
1175  * @v id	PCI ID
1176  * @ret rc	Return status code
1177  */
1178 static int
jme_probe(struct pci_device * pci)1179 jme_probe(struct pci_device *pci)
1180 {
1181 	struct net_device *netdev;
1182 	struct jme_adapter *jme;
1183 	int rc;
1184 	uint8_t mrrs;
1185 
1186 	/* Allocate net device */
1187 	netdev = alloc_etherdev(sizeof(*jme));
1188 	if (!netdev)
1189 		return -ENOMEM;
1190 	netdev_init(netdev, &jme_operations);
1191 	jme = netdev->priv;
1192 	pci_set_drvdata(pci, netdev);
1193 	netdev->dev = &pci->dev;
1194 	jme->regs = ioremap(pci->membase, JME_REGS_SIZE);
1195 	if (!(jme->regs)) {
1196 		DBG("Mapping PCI resource region error.\n");
1197 		rc = -ENOMEM;
1198 		goto err_out;
1199 	}
1200 	jme->reg_ghc = 0;
1201 	jme->reg_rxcs = RXCS_DEFAULT;
1202 	jme->reg_rxmcs = RXMCS_DEFAULT;
1203 	jme->phylink = 0;
1204 	jme->pdev = pci;
1205 	jme->mii_if.dev = netdev;
1206 	jme->mii_if.phy_id = 1;
1207 	jme->mii_if.mdio_read = jme_mdio_read;
1208 	jme->mii_if.mdio_write = jme_mdio_write;
1209 	jme->rx_ring_size = 1 << 4;
1210 	jme->rx_ring_mask = jme->rx_ring_size - 1;
1211 	jme->tx_ring_size = 1 << 4;
1212 	jme->tx_ring_mask = jme->tx_ring_size - 1;
1213 
1214 	/* Fix up PCI device */
1215 	adjust_pci_device(pci);
1216 
1217 	/*
1218 	 * Get Max Read Req Size from PCI Config Space
1219 	 */
1220 	pci_read_config_byte(pci, PCI_DCSR_MRRS, &mrrs);
1221 	mrrs &= PCI_DCSR_MRRS_MASK;
1222 	switch (mrrs) {
1223 	case MRRS_128B:
1224 		jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
1225 		break;
1226 	case MRRS_256B:
1227 		jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
1228 		break;
1229 	default:
1230 		jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
1231 		break;
1232 	};
1233 
1234 	/*
1235 	 * Get basic hardware info.
1236 	 */
1237 	jme_check_hw_ver(jme);
1238 	if (pci->device == PCI_DEVICE_ID_JMICRON_JMC250)
1239 		jme->mii_if.supports_gmii = 1;
1240 	else
1241 		jme->mii_if.supports_gmii = 0;
1242 
1243 	/*
1244 	 * Initialize PHY
1245 	 */
1246 	jme_set_phyfifoa(jme);
1247 	jme_phy_init(jme);
1248 
1249 	/*
1250 	 * Bring down phy before interface is opened.
1251 	 */
1252 	jme_phy_off(jme);
1253 
1254 	/*
1255 	 * Reset MAC processor and reload EEPROM for MAC Address
1256 	 */
1257 	jme_reset_mac_processor(jme);
1258 	rc = jme_reload_eeprom(jme);
1259 	if (rc) {
1260 		DBG("Reload eeprom for reading MAC Address error.\n");
1261 		goto err_unmap;
1262 	}
1263 	jme_load_macaddr(netdev);
1264 
1265 	/* Register network device */
1266 	if ((rc = register_netdev(netdev)) != 0) {
1267 		DBG("Register net_device error.\n");
1268 		goto err_unmap;
1269 	}
1270 
1271 	return 0;
1272 
1273 err_unmap:
1274 	iounmap(jme->regs);
1275 err_out:
1276 	netdev_nullify(netdev);
1277 	netdev_put(netdev);
1278 	return rc;
1279 }
1280 
1281 /**
1282  * Remove PCI device
1283  *
1284  * @v pci	PCI device
1285  */
1286 static void
jme_remove(struct pci_device * pci)1287 jme_remove(struct pci_device *pci)
1288 {
1289 	struct net_device *netdev = pci_get_drvdata(pci);
1290 	struct jme_adapter *jme = netdev->priv;
1291 
1292 	iounmap(jme->regs);
1293 	unregister_netdev(netdev);
1294 	netdev_nullify(netdev);
1295 	netdev_put(netdev);
1296 }
1297 
1298 static struct pci_device_id jm_nics[] = {
1299 PCI_ROM(0x197b, 0x0250, "jme",  "JMicron Gigabit Ethernet", 0),
1300 PCI_ROM(0x197b, 0x0260, "jmfe", "JMicron Fast Ethernet",    0),
1301 };
1302 
1303 struct pci_driver jme_driver __pci_driver = {
1304         .ids = jm_nics,
1305         .id_count = ( sizeof ( jm_nics ) / sizeof ( jm_nics[0] ) ),
1306         .probe = jme_probe,
1307         .remove = jme_remove,
1308 };
1309 
1310