1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/rman.h>
38 #include <sys/module.h>
39 #include <sys/proc.h>
40 #include <sys/queue.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 #include <sys/taskqueue.h>
45
46 #include <net/bpf.h>
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
55
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66
67 #include <machine/bus.h>
68 #include <machine/in_cksum.h>
69
70 #include <dev/jme/if_jmereg.h>
71 #include <dev/jme/if_jmevar.h>
72
73 /* "device miibus" required. See GENERIC if you get errors here. */
74 #include "miibus_if.h"
75
76 /* Define the following to disable printing Rx errors. */
77 #undef JME_SHOW_ERRORS
78
79 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
80
81 MODULE_DEPEND(jme, pci, 1, 1, 1);
82 MODULE_DEPEND(jme, ether, 1, 1, 1);
83 MODULE_DEPEND(jme, miibus, 1, 1, 1);
84
85 /* Tunables. */
86 static int msi_disable = 0;
87 static int msix_disable = 0;
88 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
89 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
90
91 /*
92 * Devices supported by this driver.
93 */
94 static struct jme_dev {
95 uint16_t jme_vendorid;
96 uint16_t jme_deviceid;
97 const char *jme_name;
98 } jme_devs[] = {
99 { VENDORID_JMICRON, DEVICEID_JMC250,
100 "JMicron Inc, JMC25x Gigabit Ethernet" },
101 { VENDORID_JMICRON, DEVICEID_JMC260,
102 "JMicron Inc, JMC26x Fast Ethernet" },
103 };
104
105 static int jme_miibus_readreg(device_t, int, int);
106 static int jme_miibus_writereg(device_t, int, int, int);
107 static void jme_miibus_statchg(device_t);
108 static void jme_mediastatus(if_t, struct ifmediareq *);
109 static int jme_mediachange(if_t);
110 static int jme_probe(device_t);
111 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
112 static int jme_eeprom_macaddr(struct jme_softc *);
113 static int jme_efuse_macaddr(struct jme_softc *);
114 static void jme_reg_macaddr(struct jme_softc *);
115 static void jme_set_macaddr(struct jme_softc *, uint8_t *);
116 static void jme_map_intr_vector(struct jme_softc *);
117 static int jme_attach(device_t);
118 static int jme_detach(device_t);
119 static void jme_sysctl_node(struct jme_softc *);
120 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
121 static int jme_dma_alloc(struct jme_softc *);
122 static void jme_dma_free(struct jme_softc *);
123 static int jme_shutdown(device_t);
124 static void jme_setlinkspeed(struct jme_softc *);
125 static void jme_setwol(struct jme_softc *);
126 static int jme_suspend(device_t);
127 static int jme_resume(device_t);
128 static int jme_encap(struct jme_softc *, struct mbuf **);
129 static void jme_start(if_t);
130 static void jme_start_locked(if_t);
131 static void jme_watchdog(struct jme_softc *);
132 static int jme_ioctl(if_t, u_long, caddr_t);
133 static void jme_mac_config(struct jme_softc *);
134 static void jme_link_task(void *, int);
135 static int jme_intr(void *);
136 static void jme_int_task(void *, int);
137 static void jme_txeof(struct jme_softc *);
138 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
139 static void jme_rxeof(struct jme_softc *);
140 static int jme_rxintr(struct jme_softc *, int);
141 static void jme_tick(void *);
142 static void jme_reset(struct jme_softc *);
143 static void jme_init(void *);
144 static void jme_init_locked(struct jme_softc *);
145 static void jme_stop(struct jme_softc *);
146 static void jme_stop_tx(struct jme_softc *);
147 static void jme_stop_rx(struct jme_softc *);
148 static int jme_init_rx_ring(struct jme_softc *);
149 static void jme_init_tx_ring(struct jme_softc *);
150 static void jme_init_ssb(struct jme_softc *);
151 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
152 static void jme_set_vlan(struct jme_softc *);
153 static void jme_set_filter(struct jme_softc *);
154 static void jme_stats_clear(struct jme_softc *);
155 static void jme_stats_save(struct jme_softc *);
156 static void jme_stats_update(struct jme_softc *);
157 static void jme_phy_down(struct jme_softc *);
158 static void jme_phy_up(struct jme_softc *);
159 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
160 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
161 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
162 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
163 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
164 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
165
166
167 static device_method_t jme_methods[] = {
168 /* Device interface. */
169 DEVMETHOD(device_probe, jme_probe),
170 DEVMETHOD(device_attach, jme_attach),
171 DEVMETHOD(device_detach, jme_detach),
172 DEVMETHOD(device_shutdown, jme_shutdown),
173 DEVMETHOD(device_suspend, jme_suspend),
174 DEVMETHOD(device_resume, jme_resume),
175
176 /* MII interface. */
177 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
178 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
179 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
180
181 { NULL, NULL }
182 };
183
184 static driver_t jme_driver = {
185 "jme",
186 jme_methods,
187 sizeof(struct jme_softc)
188 };
189
190 DRIVER_MODULE(jme, pci, jme_driver, 0, 0);
191 DRIVER_MODULE(miibus, jme, miibus_driver, 0, 0);
192
193 static struct resource_spec jme_res_spec_mem[] = {
194 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
195 { -1, 0, 0 }
196 };
197
198 static struct resource_spec jme_irq_spec_legacy[] = {
199 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
200 { -1, 0, 0 }
201 };
202
203 static struct resource_spec jme_irq_spec_msi[] = {
204 { SYS_RES_IRQ, 1, RF_ACTIVE },
205 { -1, 0, 0 }
206 };
207
208 /*
209 * Read a PHY register on the MII of the JMC250.
210 */
211 static int
jme_miibus_readreg(device_t dev,int phy,int reg)212 jme_miibus_readreg(device_t dev, int phy, int reg)
213 {
214 struct jme_softc *sc;
215 uint32_t val;
216 int i;
217
218 sc = device_get_softc(dev);
219
220 /* For FPGA version, PHY address 0 should be ignored. */
221 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
222 return (0);
223
224 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
225 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
226 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
227 DELAY(1);
228 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
229 break;
230 }
231
232 if (i == 0) {
233 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
234 return (0);
235 }
236
237 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
238 }
239
240 /*
241 * Write a PHY register on the MII of the JMC250.
242 */
243 static int
jme_miibus_writereg(device_t dev,int phy,int reg,int val)244 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
245 {
246 struct jme_softc *sc;
247 int i;
248
249 sc = device_get_softc(dev);
250
251 /* For FPGA version, PHY address 0 should be ignored. */
252 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
253 return (0);
254
255 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
256 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
257 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
258 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
259 DELAY(1);
260 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
261 break;
262 }
263
264 if (i == 0)
265 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
266
267 return (0);
268 }
269
270 /*
271 * Callback from MII layer when media changes.
272 */
273 static void
jme_miibus_statchg(device_t dev)274 jme_miibus_statchg(device_t dev)
275 {
276 struct jme_softc *sc;
277
278 sc = device_get_softc(dev);
279 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
280 }
281
282 /*
283 * Get the current interface media status.
284 */
285 static void
jme_mediastatus(if_t ifp,struct ifmediareq * ifmr)286 jme_mediastatus(if_t ifp, struct ifmediareq *ifmr)
287 {
288 struct jme_softc *sc;
289 struct mii_data *mii;
290
291 sc = if_getsoftc(ifp);
292 JME_LOCK(sc);
293 if ((if_getflags(ifp) & IFF_UP) == 0) {
294 JME_UNLOCK(sc);
295 return;
296 }
297 mii = device_get_softc(sc->jme_miibus);
298
299 mii_pollstat(mii);
300 ifmr->ifm_status = mii->mii_media_status;
301 ifmr->ifm_active = mii->mii_media_active;
302 JME_UNLOCK(sc);
303 }
304
305 /*
306 * Set hardware to newly-selected media.
307 */
308 static int
jme_mediachange(if_t ifp)309 jme_mediachange(if_t ifp)
310 {
311 struct jme_softc *sc;
312 struct mii_data *mii;
313 struct mii_softc *miisc;
314 int error;
315
316 sc = if_getsoftc(ifp);
317 JME_LOCK(sc);
318 mii = device_get_softc(sc->jme_miibus);
319 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
320 PHY_RESET(miisc);
321 error = mii_mediachg(mii);
322 JME_UNLOCK(sc);
323
324 return (error);
325 }
326
327 static int
jme_probe(device_t dev)328 jme_probe(device_t dev)
329 {
330 struct jme_dev *sp;
331 int i;
332 uint16_t vendor, devid;
333
334 vendor = pci_get_vendor(dev);
335 devid = pci_get_device(dev);
336 sp = jme_devs;
337 for (i = 0; i < nitems(jme_devs); i++, sp++) {
338 if (vendor == sp->jme_vendorid &&
339 devid == sp->jme_deviceid) {
340 device_set_desc(dev, sp->jme_name);
341 return (BUS_PROBE_DEFAULT);
342 }
343 }
344
345 return (ENXIO);
346 }
347
348 static int
jme_eeprom_read_byte(struct jme_softc * sc,uint8_t addr,uint8_t * val)349 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
350 {
351 uint32_t reg;
352 int i;
353
354 *val = 0;
355 for (i = JME_TIMEOUT; i > 0; i--) {
356 reg = CSR_READ_4(sc, JME_SMBCSR);
357 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
358 break;
359 DELAY(1);
360 }
361
362 if (i == 0) {
363 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
364 return (ETIMEDOUT);
365 }
366
367 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
368 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
369 for (i = JME_TIMEOUT; i > 0; i--) {
370 DELAY(1);
371 reg = CSR_READ_4(sc, JME_SMBINTF);
372 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
373 break;
374 }
375
376 if (i == 0) {
377 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
378 return (ETIMEDOUT);
379 }
380
381 reg = CSR_READ_4(sc, JME_SMBINTF);
382 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
383
384 return (0);
385 }
386
387 static int
jme_eeprom_macaddr(struct jme_softc * sc)388 jme_eeprom_macaddr(struct jme_softc *sc)
389 {
390 uint8_t eaddr[ETHER_ADDR_LEN];
391 uint8_t fup, reg, val;
392 uint32_t offset;
393 int match;
394
395 offset = 0;
396 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
397 fup != JME_EEPROM_SIG0)
398 return (ENOENT);
399 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
400 fup != JME_EEPROM_SIG1)
401 return (ENOENT);
402 match = 0;
403 do {
404 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
405 break;
406 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
407 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
408 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
409 break;
410 if (reg >= JME_PAR0 &&
411 reg < JME_PAR0 + ETHER_ADDR_LEN) {
412 if (jme_eeprom_read_byte(sc, offset + 2,
413 &val) != 0)
414 break;
415 eaddr[reg - JME_PAR0] = val;
416 match++;
417 }
418 }
419 /* Check for the end of EEPROM descriptor. */
420 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
421 break;
422 /* Try next eeprom descriptor. */
423 offset += JME_EEPROM_DESC_BYTES;
424 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
425
426 if (match == ETHER_ADDR_LEN) {
427 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
428 return (0);
429 }
430
431 return (ENOENT);
432 }
433
434 static int
jme_efuse_macaddr(struct jme_softc * sc)435 jme_efuse_macaddr(struct jme_softc *sc)
436 {
437 uint32_t reg;
438 int i;
439
440 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
441 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) !=
442 EFUSE_CTL1_AUTOLAOD_DONE)
443 return (ENOENT);
444 /* Reset eFuse controller. */
445 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
446 reg |= EFUSE_CTL2_RESET;
447 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
448 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
449 reg &= ~EFUSE_CTL2_RESET;
450 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
451
452 /* Have eFuse reload station address to MAC controller. */
453 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
454 reg &= ~EFUSE_CTL1_CMD_MASK;
455 reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE;
456 pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4);
457
458 /*
459 * Verify completion of eFuse autload command. It should be
460 * completed within 108us.
461 */
462 DELAY(110);
463 for (i = 10; i > 0; i--) {
464 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
465 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR |
466 EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) {
467 DELAY(20);
468 continue;
469 }
470 if ((reg & EFUSE_CTL1_EXECUTE) == 0)
471 break;
472 /* Station address loading is still in progress. */
473 DELAY(20);
474 }
475 if (i == 0) {
476 device_printf(sc->jme_dev, "eFuse autoload timed out.\n");
477 return (ETIMEDOUT);
478 }
479
480 return (0);
481 }
482
483 static void
jme_reg_macaddr(struct jme_softc * sc)484 jme_reg_macaddr(struct jme_softc *sc)
485 {
486 uint32_t par0, par1;
487
488 /* Read station address. */
489 par0 = CSR_READ_4(sc, JME_PAR0);
490 par1 = CSR_READ_4(sc, JME_PAR1);
491 par1 &= 0xFFFF;
492 if ((par0 == 0 && par1 == 0) ||
493 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
494 device_printf(sc->jme_dev,
495 "Failed to retrieve Ethernet address.\n");
496 } else {
497 /*
498 * For controllers that use eFuse, the station address
499 * could also be extracted from JME_PCI_PAR0 and
500 * JME_PCI_PAR1 registers in PCI configuration space.
501 * Each register holds exactly half of station address(24bits)
502 * so use JME_PAR0, JME_PAR1 registers instead.
503 */
504 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
505 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
506 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
507 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
508 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
509 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
510 }
511 }
512
513 static void
jme_set_macaddr(struct jme_softc * sc,uint8_t * eaddr)514 jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr)
515 {
516 uint32_t val;
517 int i;
518
519 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
520 /*
521 * Avoid reprogramming station address if the address
522 * is the same as previous one. Note, reprogrammed
523 * station address is permanent as if it was written
524 * to EEPROM. So if station address was changed by
525 * admistrator it's possible to lose factory configured
526 * address when driver fails to restore its address.
527 * (e.g. reboot or system crash)
528 */
529 if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) {
530 for (i = 0; i < ETHER_ADDR_LEN; i++) {
531 val = JME_EFUSE_EEPROM_FUNC0 <<
532 JME_EFUSE_EEPROM_FUNC_SHIFT;
533 val |= JME_EFUSE_EEPROM_PAGE_BAR1 <<
534 JME_EFUSE_EEPROM_PAGE_SHIFT;
535 val |= (JME_PAR0 + i) <<
536 JME_EFUSE_EEPROM_ADDR_SHIFT;
537 val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT;
538 pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM,
539 val | JME_EFUSE_EEPROM_WRITE, 4);
540 }
541 }
542 } else {
543 CSR_WRITE_4(sc, JME_PAR0,
544 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
545 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
546 }
547 }
548
549 static void
jme_map_intr_vector(struct jme_softc * sc)550 jme_map_intr_vector(struct jme_softc *sc)
551 {
552 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
553
554 bzero(map, sizeof(map));
555
556 /* Map Tx interrupts source to MSI/MSIX vector 2. */
557 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
558 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
559 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
560 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
561 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
562 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
563 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
564 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
565 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
566 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
567 map[MSINUM_REG_INDEX(N_INTR_TXQ5_COMP)] |=
568 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
569 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
570 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
571 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
572 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
573 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
574 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
575 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
576 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
577
578 /* Map Rx interrupts source to MSI/MSIX vector 1. */
579 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
580 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
581 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
582 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
583 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
584 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
585 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
586 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
587 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
588 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
589 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
590 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
591 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
592 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
593 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
594 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
595 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
596 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
597 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
598 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
599 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
600 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
601 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
602 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
603 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
604 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
605 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
606 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
607 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
608 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
609 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
610 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
611
612 /* Map all other interrupts source to MSI/MSIX vector 0. */
613 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
614 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
615 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
616 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
617 }
618
619 static int
jme_attach(device_t dev)620 jme_attach(device_t dev)
621 {
622 struct jme_softc *sc;
623 if_t ifp;
624 struct mii_softc *miisc;
625 struct mii_data *mii;
626 uint32_t reg;
627 uint16_t burst;
628 int error, i, mii_flags, msic, msixc, pmc;
629
630 error = 0;
631 sc = device_get_softc(dev);
632 sc->jme_dev = dev;
633
634 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
635 MTX_DEF);
636 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
637 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
638 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
639
640 /*
641 * Map the device. JMC250 supports both memory mapped and I/O
642 * register space access. Because I/O register access should
643 * use different BARs to access registers it's waste of time
644 * to use I/O register spce access. JMC250 uses 16K to map
645 * entire memory space.
646 */
647 pci_enable_busmaster(dev);
648 sc->jme_res_spec = jme_res_spec_mem;
649 sc->jme_irq_spec = jme_irq_spec_legacy;
650 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
651 if (error != 0) {
652 device_printf(dev, "cannot allocate memory resources.\n");
653 goto fail;
654 }
655
656 /* Allocate IRQ resources. */
657 msixc = pci_msix_count(dev);
658 msic = pci_msi_count(dev);
659 if (bootverbose) {
660 device_printf(dev, "MSIX count : %d\n", msixc);
661 device_printf(dev, "MSI count : %d\n", msic);
662 }
663
664 /* Use 1 MSI/MSI-X. */
665 if (msixc > 1)
666 msixc = 1;
667 if (msic > 1)
668 msic = 1;
669 /* Prefer MSIX over MSI. */
670 if (msix_disable == 0 || msi_disable == 0) {
671 if (msix_disable == 0 && msixc > 0 &&
672 pci_alloc_msix(dev, &msixc) == 0) {
673 if (msixc == 1) {
674 device_printf(dev, "Using %d MSIX messages.\n",
675 msixc);
676 sc->jme_flags |= JME_FLAG_MSIX;
677 sc->jme_irq_spec = jme_irq_spec_msi;
678 } else
679 pci_release_msi(dev);
680 }
681 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
682 msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
683 if (msic == 1) {
684 device_printf(dev, "Using %d MSI messages.\n",
685 msic);
686 sc->jme_flags |= JME_FLAG_MSI;
687 sc->jme_irq_spec = jme_irq_spec_msi;
688 } else
689 pci_release_msi(dev);
690 }
691 /* Map interrupt vector 0, 1 and 2. */
692 if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
693 (sc->jme_flags & JME_FLAG_MSIX) != 0)
694 jme_map_intr_vector(sc);
695 }
696
697 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
698 if (error != 0) {
699 device_printf(dev, "cannot allocate IRQ resources.\n");
700 goto fail;
701 }
702
703 sc->jme_rev = pci_get_device(dev);
704 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
705 sc->jme_flags |= JME_FLAG_FASTETH;
706 sc->jme_flags |= JME_FLAG_NOJUMBO;
707 }
708 reg = CSR_READ_4(sc, JME_CHIPMODE);
709 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
710 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
711 CHIPMODE_NOT_FPGA)
712 sc->jme_flags |= JME_FLAG_FPGA;
713 if (bootverbose) {
714 device_printf(dev, "PCI device revision : 0x%04x\n",
715 sc->jme_rev);
716 device_printf(dev, "Chip revision : 0x%02x\n",
717 sc->jme_chip_rev);
718 if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
719 device_printf(dev, "FPGA revision : 0x%04x\n",
720 (reg & CHIPMODE_FPGA_REV_MASK) >>
721 CHIPMODE_FPGA_REV_SHIFT);
722 }
723 if (sc->jme_chip_rev == 0xFF) {
724 device_printf(dev, "Unknown chip revision : 0x%02x\n",
725 sc->jme_rev);
726 error = ENXIO;
727 goto fail;
728 }
729
730 /* Identify controller features and bugs. */
731 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
732 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
733 CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
734 sc->jme_flags |= JME_FLAG_DMA32BIT;
735 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
736 sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD;
737 sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK;
738 sc->jme_flags |= JME_FLAG_HWMIB;
739 }
740
741 /* Reset the ethernet controller. */
742 jme_reset(sc);
743
744 /* Get station address. */
745 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
746 error = jme_efuse_macaddr(sc);
747 if (error == 0)
748 jme_reg_macaddr(sc);
749 } else {
750 error = ENOENT;
751 reg = CSR_READ_4(sc, JME_SMBCSR);
752 if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
753 error = jme_eeprom_macaddr(sc);
754 if (error != 0 && bootverbose)
755 device_printf(sc->jme_dev,
756 "ethernet hardware address not found in EEPROM.\n");
757 if (error != 0)
758 jme_reg_macaddr(sc);
759 }
760
761 /*
762 * Save PHY address.
763 * Integrated JR0211 has fixed PHY address whereas FPGA version
764 * requires PHY probing to get correct PHY address.
765 */
766 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
767 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
768 GPREG0_PHY_ADDR_MASK;
769 if (bootverbose)
770 device_printf(dev, "PHY is at address %d.\n",
771 sc->jme_phyaddr);
772 } else
773 sc->jme_phyaddr = 0;
774
775 /* Set max allowable DMA size. */
776 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
777 sc->jme_flags |= JME_FLAG_PCIE;
778 burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
779 if (bootverbose) {
780 device_printf(dev, "Read request size : %d bytes.\n",
781 128 << ((burst >> 12) & 0x07));
782 device_printf(dev, "TLP payload size : %d bytes.\n",
783 128 << ((burst >> 5) & 0x07));
784 }
785 switch ((burst >> 12) & 0x07) {
786 case 0:
787 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
788 break;
789 case 1:
790 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
791 break;
792 default:
793 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
794 break;
795 }
796 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
797 } else {
798 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
799 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
800 }
801 /* Create coalescing sysctl node. */
802 jme_sysctl_node(sc);
803 if ((error = jme_dma_alloc(sc)) != 0)
804 goto fail;
805
806 ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
807 if (ifp == NULL) {
808 device_printf(dev, "cannot allocate ifnet structure.\n");
809 error = ENXIO;
810 goto fail;
811 }
812
813 if_setsoftc(ifp, sc);
814 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
815 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
816 if_setioctlfn(ifp, jme_ioctl);
817 if_setstartfn(ifp, jme_start);
818 if_setinitfn(ifp, jme_init);
819 if_setsendqlen(ifp, JME_TX_RING_CNT - 1);
820 if_setsendqready(ifp);
821 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */
822 if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
823 if_sethwassist(ifp, JME_CSUM_FEATURES | CSUM_TSO);
824 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
825 sc->jme_flags |= JME_FLAG_PMCAP;
826 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
827 }
828 if_setcapenable(ifp, if_getcapabilities(ifp));
829
830 /* Wakeup PHY. */
831 jme_phy_up(sc);
832 mii_flags = MIIF_DOPAUSE;
833 /* Ask PHY calibration to PHY driver. */
834 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
835 mii_flags |= MIIF_MACPRIV0;
836 /* Set up MII bus. */
837 error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange,
838 jme_mediastatus, BMSR_DEFCAPMASK,
839 sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
840 MII_OFFSET_ANY, mii_flags);
841 if (error != 0) {
842 device_printf(dev, "attaching PHYs failed\n");
843 goto fail;
844 }
845
846 /*
847 * Force PHY to FPGA mode.
848 */
849 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
850 mii = device_get_softc(sc->jme_miibus);
851 if (mii->mii_instance != 0) {
852 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
853 if (miisc->mii_phy != 0) {
854 sc->jme_phyaddr = miisc->mii_phy;
855 break;
856 }
857 }
858 if (sc->jme_phyaddr != 0) {
859 device_printf(sc->jme_dev,
860 "FPGA PHY is at %d\n", sc->jme_phyaddr);
861 /* vendor magic. */
862 jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
863 0x0004);
864 }
865 }
866 }
867
868 ether_ifattach(ifp, sc->jme_eaddr);
869
870 /* VLAN capability setup */
871 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
872 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
873 if_setcapenable(ifp, if_getcapabilities(ifp));
874
875 /* Tell the upper layer(s) we support long frames. */
876 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
877
878 /* Create local taskq. */
879 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
880 taskqueue_thread_enqueue, &sc->jme_tq);
881 if (sc->jme_tq == NULL) {
882 device_printf(dev, "could not create taskqueue.\n");
883 ether_ifdetach(ifp);
884 error = ENXIO;
885 goto fail;
886 }
887 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
888 device_get_nameunit(sc->jme_dev));
889
890 for (i = 0; i < 1; i++) {
891 error = bus_setup_intr(dev, sc->jme_irq[i],
892 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
893 &sc->jme_intrhand[i]);
894 if (error != 0)
895 break;
896 }
897
898 if (error != 0) {
899 device_printf(dev, "could not set up interrupt handler.\n");
900 taskqueue_free(sc->jme_tq);
901 sc->jme_tq = NULL;
902 ether_ifdetach(ifp);
903 goto fail;
904 }
905
906 fail:
907 if (error != 0)
908 jme_detach(dev);
909
910 return (error);
911 }
912
913 static int
jme_detach(device_t dev)914 jme_detach(device_t dev)
915 {
916 struct jme_softc *sc;
917 if_t ifp;
918 int i;
919
920 sc = device_get_softc(dev);
921
922 ifp = sc->jme_ifp;
923 if (device_is_attached(dev)) {
924 JME_LOCK(sc);
925 sc->jme_flags |= JME_FLAG_DETACH;
926 jme_stop(sc);
927 JME_UNLOCK(sc);
928 callout_drain(&sc->jme_tick_ch);
929 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
930 taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
931 /* Restore possibly modified station address. */
932 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0)
933 jme_set_macaddr(sc, sc->jme_eaddr);
934 ether_ifdetach(ifp);
935 }
936
937 if (sc->jme_tq != NULL) {
938 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
939 taskqueue_free(sc->jme_tq);
940 sc->jme_tq = NULL;
941 }
942
943 if (sc->jme_miibus != NULL) {
944 device_delete_child(dev, sc->jme_miibus);
945 sc->jme_miibus = NULL;
946 }
947 bus_generic_detach(dev);
948 jme_dma_free(sc);
949
950 if (ifp != NULL) {
951 if_free(ifp);
952 sc->jme_ifp = NULL;
953 }
954
955 for (i = 0; i < 1; i++) {
956 if (sc->jme_intrhand[i] != NULL) {
957 bus_teardown_intr(dev, sc->jme_irq[i],
958 sc->jme_intrhand[i]);
959 sc->jme_intrhand[i] = NULL;
960 }
961 }
962
963 if (sc->jme_irq[0] != NULL)
964 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
965 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
966 pci_release_msi(dev);
967 if (sc->jme_res[0] != NULL)
968 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
969 mtx_destroy(&sc->jme_mtx);
970
971 return (0);
972 }
973
974 #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \
975 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
976
977 static void
jme_sysctl_node(struct jme_softc * sc)978 jme_sysctl_node(struct jme_softc *sc)
979 {
980 struct sysctl_ctx_list *ctx;
981 struct sysctl_oid_list *child, *parent;
982 struct sysctl_oid *tree;
983 struct jme_hw_stats *stats;
984 int error;
985
986 stats = &sc->jme_stats;
987 ctx = device_get_sysctl_ctx(sc->jme_dev);
988 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
989
990 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
991 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_to,
992 0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
993
994 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
995 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_pkt,
996 0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
997
998 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
999 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_to,
1000 0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
1001
1002 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
1003 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_pkt,
1004 0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
1005
1006 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
1007 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1008 &sc->jme_process_limit, 0, sysctl_hw_jme_proc_limit, "I",
1009 "max number of Rx events to process");
1010
1011 /* Pull in device tunables. */
1012 sc->jme_process_limit = JME_PROC_DEFAULT;
1013 error = resource_int_value(device_get_name(sc->jme_dev),
1014 device_get_unit(sc->jme_dev), "process_limit",
1015 &sc->jme_process_limit);
1016 if (error == 0) {
1017 if (sc->jme_process_limit < JME_PROC_MIN ||
1018 sc->jme_process_limit > JME_PROC_MAX) {
1019 device_printf(sc->jme_dev,
1020 "process_limit value out of range; "
1021 "using default: %d\n", JME_PROC_DEFAULT);
1022 sc->jme_process_limit = JME_PROC_DEFAULT;
1023 }
1024 }
1025
1026 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1027 error = resource_int_value(device_get_name(sc->jme_dev),
1028 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
1029 if (error == 0) {
1030 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
1031 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
1032 device_printf(sc->jme_dev,
1033 "tx_coal_to value out of range; "
1034 "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
1035 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1036 }
1037 }
1038
1039 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1040 error = resource_int_value(device_get_name(sc->jme_dev),
1041 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
1042 if (error == 0) {
1043 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
1044 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
1045 device_printf(sc->jme_dev,
1046 "tx_coal_pkt value out of range; "
1047 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
1048 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1049 }
1050 }
1051
1052 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1053 error = resource_int_value(device_get_name(sc->jme_dev),
1054 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
1055 if (error == 0) {
1056 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
1057 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
1058 device_printf(sc->jme_dev,
1059 "rx_coal_to value out of range; "
1060 "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
1061 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1062 }
1063 }
1064
1065 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1066 error = resource_int_value(device_get_name(sc->jme_dev),
1067 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
1068 if (error == 0) {
1069 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
1070 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
1071 device_printf(sc->jme_dev,
1072 "tx_coal_pkt value out of range; "
1073 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
1074 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1075 }
1076 }
1077
1078 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
1079 return;
1080
1081 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
1082 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "JME statistics");
1083 parent = SYSCTL_CHILDREN(tree);
1084
1085 /* Rx statistics. */
1086 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
1087 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
1088 child = SYSCTL_CHILDREN(tree);
1089 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1090 &stats->rx_good_frames, "Good frames");
1091 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1092 &stats->rx_crc_errs, "CRC errors");
1093 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
1094 &stats->rx_mii_errs, "MII errors");
1095 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1096 &stats->rx_fifo_oflows, "FIFO overflows");
1097 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
1098 &stats->rx_desc_empty, "Descriptor empty");
1099 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1100 &stats->rx_bad_frames, "Bad frames");
1101
1102 /* Tx statistics. */
1103 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
1104 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
1105 child = SYSCTL_CHILDREN(tree);
1106 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1107 &stats->tx_good_frames, "Good frames");
1108 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1109 &stats->tx_bad_frames, "Bad frames");
1110 }
1111
1112 #undef JME_SYSCTL_STAT_ADD32
1113
1114 struct jme_dmamap_arg {
1115 bus_addr_t jme_busaddr;
1116 };
1117
1118 static void
jme_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1119 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1120 {
1121 struct jme_dmamap_arg *ctx;
1122
1123 if (error != 0)
1124 return;
1125
1126 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1127
1128 ctx = (struct jme_dmamap_arg *)arg;
1129 ctx->jme_busaddr = segs[0].ds_addr;
1130 }
1131
1132 static int
jme_dma_alloc(struct jme_softc * sc)1133 jme_dma_alloc(struct jme_softc *sc)
1134 {
1135 struct jme_dmamap_arg ctx;
1136 struct jme_txdesc *txd;
1137 struct jme_rxdesc *rxd;
1138 bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1139 int error, i;
1140
1141 lowaddr = BUS_SPACE_MAXADDR;
1142 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1143 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1144
1145 again:
1146 /* Create parent ring tag. */
1147 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1148 1, 0, /* algnmnt, boundary */
1149 lowaddr, /* lowaddr */
1150 BUS_SPACE_MAXADDR, /* highaddr */
1151 NULL, NULL, /* filter, filterarg */
1152 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1153 0, /* nsegments */
1154 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1155 0, /* flags */
1156 NULL, NULL, /* lockfunc, lockarg */
1157 &sc->jme_cdata.jme_ring_tag);
1158 if (error != 0) {
1159 device_printf(sc->jme_dev,
1160 "could not create parent ring DMA tag.\n");
1161 goto fail;
1162 }
1163 /* Create tag for Tx ring. */
1164 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1165 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1166 BUS_SPACE_MAXADDR, /* lowaddr */
1167 BUS_SPACE_MAXADDR, /* highaddr */
1168 NULL, NULL, /* filter, filterarg */
1169 JME_TX_RING_SIZE, /* maxsize */
1170 1, /* nsegments */
1171 JME_TX_RING_SIZE, /* maxsegsize */
1172 0, /* flags */
1173 NULL, NULL, /* lockfunc, lockarg */
1174 &sc->jme_cdata.jme_tx_ring_tag);
1175 if (error != 0) {
1176 device_printf(sc->jme_dev,
1177 "could not allocate Tx ring DMA tag.\n");
1178 goto fail;
1179 }
1180
1181 /* Create tag for Rx ring. */
1182 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1183 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
1184 lowaddr, /* lowaddr */
1185 BUS_SPACE_MAXADDR, /* highaddr */
1186 NULL, NULL, /* filter, filterarg */
1187 JME_RX_RING_SIZE, /* maxsize */
1188 1, /* nsegments */
1189 JME_RX_RING_SIZE, /* maxsegsize */
1190 0, /* flags */
1191 NULL, NULL, /* lockfunc, lockarg */
1192 &sc->jme_cdata.jme_rx_ring_tag);
1193 if (error != 0) {
1194 device_printf(sc->jme_dev,
1195 "could not allocate Rx ring DMA tag.\n");
1196 goto fail;
1197 }
1198
1199 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1200 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1201 (void **)&sc->jme_rdata.jme_tx_ring,
1202 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1203 &sc->jme_cdata.jme_tx_ring_map);
1204 if (error != 0) {
1205 device_printf(sc->jme_dev,
1206 "could not allocate DMA'able memory for Tx ring.\n");
1207 goto fail;
1208 }
1209
1210 ctx.jme_busaddr = 0;
1211 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1212 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1213 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1214 if (error != 0 || ctx.jme_busaddr == 0) {
1215 device_printf(sc->jme_dev,
1216 "could not load DMA'able memory for Tx ring.\n");
1217 goto fail;
1218 }
1219 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1220
1221 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1222 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1223 (void **)&sc->jme_rdata.jme_rx_ring,
1224 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1225 &sc->jme_cdata.jme_rx_ring_map);
1226 if (error != 0) {
1227 device_printf(sc->jme_dev,
1228 "could not allocate DMA'able memory for Rx ring.\n");
1229 goto fail;
1230 }
1231
1232 ctx.jme_busaddr = 0;
1233 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1234 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1235 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1236 if (error != 0 || ctx.jme_busaddr == 0) {
1237 device_printf(sc->jme_dev,
1238 "could not load DMA'able memory for Rx ring.\n");
1239 goto fail;
1240 }
1241 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1242
1243 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1244 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1245 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1246 JME_TX_RING_SIZE;
1247 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1248 JME_RX_RING_SIZE;
1249 if ((JME_ADDR_HI(tx_ring_end) !=
1250 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1251 (JME_ADDR_HI(rx_ring_end) !=
1252 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1253 device_printf(sc->jme_dev, "4GB boundary crossed, "
1254 "switching to 32bit DMA address mode.\n");
1255 jme_dma_free(sc);
1256 /* Limit DMA address space to 32bit and try again. */
1257 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1258 goto again;
1259 }
1260 }
1261
1262 lowaddr = BUS_SPACE_MAXADDR;
1263 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1264 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1265 /* Create parent buffer tag. */
1266 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1267 1, 0, /* algnmnt, boundary */
1268 lowaddr, /* lowaddr */
1269 BUS_SPACE_MAXADDR, /* highaddr */
1270 NULL, NULL, /* filter, filterarg */
1271 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1272 0, /* nsegments */
1273 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1274 0, /* flags */
1275 NULL, NULL, /* lockfunc, lockarg */
1276 &sc->jme_cdata.jme_buffer_tag);
1277 if (error != 0) {
1278 device_printf(sc->jme_dev,
1279 "could not create parent buffer DMA tag.\n");
1280 goto fail;
1281 }
1282
1283 /* Create shadow status block tag. */
1284 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1285 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1286 BUS_SPACE_MAXADDR, /* lowaddr */
1287 BUS_SPACE_MAXADDR, /* highaddr */
1288 NULL, NULL, /* filter, filterarg */
1289 JME_SSB_SIZE, /* maxsize */
1290 1, /* nsegments */
1291 JME_SSB_SIZE, /* maxsegsize */
1292 0, /* flags */
1293 NULL, NULL, /* lockfunc, lockarg */
1294 &sc->jme_cdata.jme_ssb_tag);
1295 if (error != 0) {
1296 device_printf(sc->jme_dev,
1297 "could not create shared status block DMA tag.\n");
1298 goto fail;
1299 }
1300
1301 /* Create tag for Tx buffers. */
1302 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1303 1, 0, /* algnmnt, boundary */
1304 BUS_SPACE_MAXADDR, /* lowaddr */
1305 BUS_SPACE_MAXADDR, /* highaddr */
1306 NULL, NULL, /* filter, filterarg */
1307 JME_TSO_MAXSIZE, /* maxsize */
1308 JME_MAXTXSEGS, /* nsegments */
1309 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1310 0, /* flags */
1311 NULL, NULL, /* lockfunc, lockarg */
1312 &sc->jme_cdata.jme_tx_tag);
1313 if (error != 0) {
1314 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1315 goto fail;
1316 }
1317
1318 /* Create tag for Rx buffers. */
1319 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1320 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1321 BUS_SPACE_MAXADDR, /* lowaddr */
1322 BUS_SPACE_MAXADDR, /* highaddr */
1323 NULL, NULL, /* filter, filterarg */
1324 MCLBYTES, /* maxsize */
1325 1, /* nsegments */
1326 MCLBYTES, /* maxsegsize */
1327 0, /* flags */
1328 NULL, NULL, /* lockfunc, lockarg */
1329 &sc->jme_cdata.jme_rx_tag);
1330 if (error != 0) {
1331 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1332 goto fail;
1333 }
1334
1335 /*
1336 * Allocate DMA'able memory and load the DMA map for shared
1337 * status block.
1338 */
1339 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1340 (void **)&sc->jme_rdata.jme_ssb_block,
1341 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1342 &sc->jme_cdata.jme_ssb_map);
1343 if (error != 0) {
1344 device_printf(sc->jme_dev, "could not allocate DMA'able "
1345 "memory for shared status block.\n");
1346 goto fail;
1347 }
1348
1349 ctx.jme_busaddr = 0;
1350 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1351 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1352 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1353 if (error != 0 || ctx.jme_busaddr == 0) {
1354 device_printf(sc->jme_dev, "could not load DMA'able memory "
1355 "for shared status block.\n");
1356 goto fail;
1357 }
1358 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1359
1360 /* Create DMA maps for Tx buffers. */
1361 for (i = 0; i < JME_TX_RING_CNT; i++) {
1362 txd = &sc->jme_cdata.jme_txdesc[i];
1363 txd->tx_m = NULL;
1364 txd->tx_dmamap = NULL;
1365 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1366 &txd->tx_dmamap);
1367 if (error != 0) {
1368 device_printf(sc->jme_dev,
1369 "could not create Tx dmamap.\n");
1370 goto fail;
1371 }
1372 }
1373 /* Create DMA maps for Rx buffers. */
1374 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1375 &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1376 device_printf(sc->jme_dev,
1377 "could not create spare Rx dmamap.\n");
1378 goto fail;
1379 }
1380 for (i = 0; i < JME_RX_RING_CNT; i++) {
1381 rxd = &sc->jme_cdata.jme_rxdesc[i];
1382 rxd->rx_m = NULL;
1383 rxd->rx_dmamap = NULL;
1384 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1385 &rxd->rx_dmamap);
1386 if (error != 0) {
1387 device_printf(sc->jme_dev,
1388 "could not create Rx dmamap.\n");
1389 goto fail;
1390 }
1391 }
1392
1393 fail:
1394 return (error);
1395 }
1396
1397 static void
jme_dma_free(struct jme_softc * sc)1398 jme_dma_free(struct jme_softc *sc)
1399 {
1400 struct jme_txdesc *txd;
1401 struct jme_rxdesc *rxd;
1402 int i;
1403
1404 /* Tx ring */
1405 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1406 if (sc->jme_rdata.jme_tx_ring_paddr)
1407 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1408 sc->jme_cdata.jme_tx_ring_map);
1409 if (sc->jme_rdata.jme_tx_ring)
1410 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1411 sc->jme_rdata.jme_tx_ring,
1412 sc->jme_cdata.jme_tx_ring_map);
1413 sc->jme_rdata.jme_tx_ring = NULL;
1414 sc->jme_rdata.jme_tx_ring_paddr = 0;
1415 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1416 sc->jme_cdata.jme_tx_ring_tag = NULL;
1417 }
1418 /* Rx ring */
1419 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1420 if (sc->jme_rdata.jme_rx_ring_paddr)
1421 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1422 sc->jme_cdata.jme_rx_ring_map);
1423 if (sc->jme_rdata.jme_rx_ring)
1424 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1425 sc->jme_rdata.jme_rx_ring,
1426 sc->jme_cdata.jme_rx_ring_map);
1427 sc->jme_rdata.jme_rx_ring = NULL;
1428 sc->jme_rdata.jme_rx_ring_paddr = 0;
1429 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1430 sc->jme_cdata.jme_rx_ring_tag = NULL;
1431 }
1432 /* Tx buffers */
1433 if (sc->jme_cdata.jme_tx_tag != NULL) {
1434 for (i = 0; i < JME_TX_RING_CNT; i++) {
1435 txd = &sc->jme_cdata.jme_txdesc[i];
1436 if (txd->tx_dmamap != NULL) {
1437 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1438 txd->tx_dmamap);
1439 txd->tx_dmamap = NULL;
1440 }
1441 }
1442 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1443 sc->jme_cdata.jme_tx_tag = NULL;
1444 }
1445 /* Rx buffers */
1446 if (sc->jme_cdata.jme_rx_tag != NULL) {
1447 for (i = 0; i < JME_RX_RING_CNT; i++) {
1448 rxd = &sc->jme_cdata.jme_rxdesc[i];
1449 if (rxd->rx_dmamap != NULL) {
1450 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1451 rxd->rx_dmamap);
1452 rxd->rx_dmamap = NULL;
1453 }
1454 }
1455 if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1456 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1457 sc->jme_cdata.jme_rx_sparemap);
1458 sc->jme_cdata.jme_rx_sparemap = NULL;
1459 }
1460 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1461 sc->jme_cdata.jme_rx_tag = NULL;
1462 }
1463
1464 /* Shared status block. */
1465 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1466 if (sc->jme_rdata.jme_ssb_block_paddr)
1467 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1468 sc->jme_cdata.jme_ssb_map);
1469 if (sc->jme_rdata.jme_ssb_block)
1470 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1471 sc->jme_rdata.jme_ssb_block,
1472 sc->jme_cdata.jme_ssb_map);
1473 sc->jme_rdata.jme_ssb_block = NULL;
1474 sc->jme_rdata.jme_ssb_block_paddr = 0;
1475 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1476 sc->jme_cdata.jme_ssb_tag = NULL;
1477 }
1478
1479 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1480 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1481 sc->jme_cdata.jme_buffer_tag = NULL;
1482 }
1483 if (sc->jme_cdata.jme_ring_tag != NULL) {
1484 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1485 sc->jme_cdata.jme_ring_tag = NULL;
1486 }
1487 }
1488
1489 /*
1490 * Make sure the interface is stopped at reboot time.
1491 */
1492 static int
jme_shutdown(device_t dev)1493 jme_shutdown(device_t dev)
1494 {
1495
1496 return (jme_suspend(dev));
1497 }
1498
1499 /*
1500 * Unlike other ethernet controllers, JMC250 requires
1501 * explicit resetting link speed to 10/100Mbps as gigabit
1502 * link will cunsume more power than 375mA.
1503 * Note, we reset the link speed to 10/100Mbps with
1504 * auto-negotiation but we don't know whether that operation
1505 * would succeed or not as we have no control after powering
1506 * off. If the renegotiation fail WOL may not work. Running
1507 * at 1Gbps draws more power than 375mA at 3.3V which is
1508 * specified in PCI specification and that would result in
1509 * complete shutdowning power to ethernet controller.
1510 *
1511 * TODO
1512 * Save current negotiated media speed/duplex/flow-control
1513 * to softc and restore the same link again after resuming.
1514 * PHY handling such as power down/resetting to 100Mbps
1515 * may be better handled in suspend method in phy driver.
1516 */
1517 static void
jme_setlinkspeed(struct jme_softc * sc)1518 jme_setlinkspeed(struct jme_softc *sc)
1519 {
1520 struct mii_data *mii;
1521 int aneg, i;
1522
1523 JME_LOCK_ASSERT(sc);
1524
1525 mii = device_get_softc(sc->jme_miibus);
1526 mii_pollstat(mii);
1527 aneg = 0;
1528 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1529 switch IFM_SUBTYPE(mii->mii_media_active) {
1530 case IFM_10_T:
1531 case IFM_100_TX:
1532 return;
1533 case IFM_1000_T:
1534 aneg++;
1535 default:
1536 break;
1537 }
1538 }
1539 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1540 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1541 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1542 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1543 BMCR_AUTOEN | BMCR_STARTNEG);
1544 DELAY(1000);
1545 if (aneg != 0) {
1546 /* Poll link state until jme(4) get a 10/100 link. */
1547 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1548 mii_pollstat(mii);
1549 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1550 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1551 case IFM_10_T:
1552 case IFM_100_TX:
1553 jme_mac_config(sc);
1554 return;
1555 default:
1556 break;
1557 }
1558 }
1559 JME_UNLOCK(sc);
1560 pause("jmelnk", hz);
1561 JME_LOCK(sc);
1562 }
1563 if (i == MII_ANEGTICKS_GIGE)
1564 device_printf(sc->jme_dev, "establishing link failed, "
1565 "WOL may not work!");
1566 }
1567 /*
1568 * No link, force MAC to have 100Mbps, full-duplex link.
1569 * This is the last resort and may/may not work.
1570 */
1571 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1572 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1573 jme_mac_config(sc);
1574 }
1575
1576 static void
jme_setwol(struct jme_softc * sc)1577 jme_setwol(struct jme_softc *sc)
1578 {
1579 if_t ifp;
1580 uint32_t gpr, pmcs;
1581 uint16_t pmstat;
1582 int pmc;
1583
1584 JME_LOCK_ASSERT(sc);
1585
1586 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1587 /* Remove Tx MAC/offload clock to save more power. */
1588 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1589 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1590 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1591 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1592 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
1593 CSR_WRITE_4(sc, JME_GPREG1,
1594 CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS);
1595 /* No PME capability, PHY power down. */
1596 jme_phy_down(sc);
1597 return;
1598 }
1599
1600 ifp = sc->jme_ifp;
1601 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1602 pmcs = CSR_READ_4(sc, JME_PMCS);
1603 pmcs &= ~PMCS_WOL_ENB_MASK;
1604 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
1605 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1606 /* Enable PME message. */
1607 gpr |= GPREG0_PME_ENB;
1608 /* For gigabit controllers, reset link speed to 10/100. */
1609 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1610 jme_setlinkspeed(sc);
1611 }
1612
1613 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1614 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1615 /* Remove Tx MAC/offload clock to save more power. */
1616 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1617 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1618 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1619 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1620 /* Request PME. */
1621 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1622 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1623 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
1624 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1625 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1626 if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
1627 /* No WOL, PHY power down. */
1628 jme_phy_down(sc);
1629 }
1630 }
1631
1632 static int
jme_suspend(device_t dev)1633 jme_suspend(device_t dev)
1634 {
1635 struct jme_softc *sc;
1636
1637 sc = device_get_softc(dev);
1638
1639 JME_LOCK(sc);
1640 jme_stop(sc);
1641 jme_setwol(sc);
1642 JME_UNLOCK(sc);
1643
1644 return (0);
1645 }
1646
1647 static int
jme_resume(device_t dev)1648 jme_resume(device_t dev)
1649 {
1650 struct jme_softc *sc;
1651 if_t ifp;
1652 uint16_t pmstat;
1653 int pmc;
1654
1655 sc = device_get_softc(dev);
1656
1657 JME_LOCK(sc);
1658 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) == 0) {
1659 pmstat = pci_read_config(sc->jme_dev,
1660 pmc + PCIR_POWER_STATUS, 2);
1661 /* Disable PME clear PME status. */
1662 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1663 pci_write_config(sc->jme_dev,
1664 pmc + PCIR_POWER_STATUS, pmstat, 2);
1665 }
1666 /* Wakeup PHY. */
1667 jme_phy_up(sc);
1668 ifp = sc->jme_ifp;
1669 if ((if_getflags(ifp) & IFF_UP) != 0) {
1670 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1671 jme_init_locked(sc);
1672 }
1673
1674 JME_UNLOCK(sc);
1675
1676 return (0);
1677 }
1678
1679 static int
jme_encap(struct jme_softc * sc,struct mbuf ** m_head)1680 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1681 {
1682 struct jme_txdesc *txd;
1683 struct jme_desc *desc;
1684 struct mbuf *m;
1685 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1686 int error, i, nsegs, prod;
1687 uint32_t cflags, tsosegsz;
1688
1689 JME_LOCK_ASSERT(sc);
1690
1691 M_ASSERTPKTHDR((*m_head));
1692
1693 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1694 /*
1695 * Due to the adherence to NDIS specification JMC250
1696 * assumes upper stack computed TCP pseudo checksum
1697 * without including payload length. This breaks
1698 * checksum offload for TSO case so recompute TCP
1699 * pseudo checksum for JMC250. Hopefully this wouldn't
1700 * be much burden on modern CPUs.
1701 */
1702 struct ether_header *eh;
1703 struct ip *ip;
1704 struct tcphdr *tcp;
1705 uint32_t ip_off, poff;
1706
1707 if (M_WRITABLE(*m_head) == 0) {
1708 /* Get a writable copy. */
1709 m = m_dup(*m_head, M_NOWAIT);
1710 m_freem(*m_head);
1711 if (m == NULL) {
1712 *m_head = NULL;
1713 return (ENOBUFS);
1714 }
1715 *m_head = m;
1716 }
1717 ip_off = sizeof(struct ether_header);
1718 m = m_pullup(*m_head, ip_off);
1719 if (m == NULL) {
1720 *m_head = NULL;
1721 return (ENOBUFS);
1722 }
1723 eh = mtod(m, struct ether_header *);
1724 /* Check the existence of VLAN tag. */
1725 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1726 ip_off = sizeof(struct ether_vlan_header);
1727 m = m_pullup(m, ip_off);
1728 if (m == NULL) {
1729 *m_head = NULL;
1730 return (ENOBUFS);
1731 }
1732 }
1733 m = m_pullup(m, ip_off + sizeof(struct ip));
1734 if (m == NULL) {
1735 *m_head = NULL;
1736 return (ENOBUFS);
1737 }
1738 ip = (struct ip *)(mtod(m, char *) + ip_off);
1739 poff = ip_off + (ip->ip_hl << 2);
1740 m = m_pullup(m, poff + sizeof(struct tcphdr));
1741 if (m == NULL) {
1742 *m_head = NULL;
1743 return (ENOBUFS);
1744 }
1745 /*
1746 * Reset IP checksum and recompute TCP pseudo
1747 * checksum that NDIS specification requires.
1748 */
1749 ip = (struct ip *)(mtod(m, char *) + ip_off);
1750 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1751 ip->ip_sum = 0;
1752 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1753 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1754 ip->ip_dst.s_addr,
1755 htons((tcp->th_off << 2) + IPPROTO_TCP));
1756 /* No need to TSO, force IP checksum offload. */
1757 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1758 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1759 } else
1760 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1761 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1762 *m_head = m;
1763 }
1764
1765 prod = sc->jme_cdata.jme_tx_prod;
1766 txd = &sc->jme_cdata.jme_txdesc[prod];
1767
1768 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1769 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1770 if (error == EFBIG) {
1771 m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS);
1772 if (m == NULL) {
1773 m_freem(*m_head);
1774 *m_head = NULL;
1775 return (ENOMEM);
1776 }
1777 *m_head = m;
1778 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1779 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1780 if (error != 0) {
1781 m_freem(*m_head);
1782 *m_head = NULL;
1783 return (error);
1784 }
1785 } else if (error != 0)
1786 return (error);
1787 if (nsegs == 0) {
1788 m_freem(*m_head);
1789 *m_head = NULL;
1790 return (EIO);
1791 }
1792
1793 /*
1794 * Check descriptor overrun. Leave one free descriptor.
1795 * Since we always use 64bit address mode for transmitting,
1796 * each Tx request requires one more dummy descriptor.
1797 */
1798 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1799 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1800 return (ENOBUFS);
1801 }
1802
1803 m = *m_head;
1804 cflags = 0;
1805 tsosegsz = 0;
1806 /* Configure checksum offload and TSO. */
1807 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1808 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1809 JME_TD_MSS_SHIFT;
1810 cflags |= JME_TD_TSO;
1811 } else {
1812 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1813 cflags |= JME_TD_IPCSUM;
1814 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1815 cflags |= JME_TD_TCPCSUM;
1816 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1817 cflags |= JME_TD_UDPCSUM;
1818 }
1819 /* Configure VLAN. */
1820 if ((m->m_flags & M_VLANTAG) != 0) {
1821 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1822 cflags |= JME_TD_VLAN_TAG;
1823 }
1824
1825 desc = &sc->jme_rdata.jme_tx_ring[prod];
1826 desc->flags = htole32(cflags);
1827 desc->buflen = htole32(tsosegsz);
1828 desc->addr_hi = htole32(m->m_pkthdr.len);
1829 desc->addr_lo = 0;
1830 sc->jme_cdata.jme_tx_cnt++;
1831 JME_DESC_INC(prod, JME_TX_RING_CNT);
1832 for (i = 0; i < nsegs; i++) {
1833 desc = &sc->jme_rdata.jme_tx_ring[prod];
1834 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1835 desc->buflen = htole32(txsegs[i].ds_len);
1836 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1837 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1838 sc->jme_cdata.jme_tx_cnt++;
1839 JME_DESC_INC(prod, JME_TX_RING_CNT);
1840 }
1841
1842 /* Update producer index. */
1843 sc->jme_cdata.jme_tx_prod = prod;
1844 /*
1845 * Finally request interrupt and give the first descriptor
1846 * owenership to hardware.
1847 */
1848 desc = txd->tx_desc;
1849 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1850
1851 txd->tx_m = m;
1852 txd->tx_ndesc = nsegs + 1;
1853
1854 /* Sync descriptors. */
1855 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1856 BUS_DMASYNC_PREWRITE);
1857 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1858 sc->jme_cdata.jme_tx_ring_map,
1859 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1860
1861 return (0);
1862 }
1863
1864 static void
jme_start(if_t ifp)1865 jme_start(if_t ifp)
1866 {
1867 struct jme_softc *sc;
1868
1869 sc = if_getsoftc(ifp);
1870 JME_LOCK(sc);
1871 jme_start_locked(ifp);
1872 JME_UNLOCK(sc);
1873 }
1874
1875 static void
jme_start_locked(if_t ifp)1876 jme_start_locked(if_t ifp)
1877 {
1878 struct jme_softc *sc;
1879 struct mbuf *m_head;
1880 int enq;
1881
1882 sc = if_getsoftc(ifp);
1883
1884 JME_LOCK_ASSERT(sc);
1885
1886 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1887 jme_txeof(sc);
1888
1889 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1890 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0)
1891 return;
1892
1893 for (enq = 0; !if_sendq_empty(ifp); ) {
1894 m_head = if_dequeue(ifp);
1895 if (m_head == NULL)
1896 break;
1897 /*
1898 * Pack the data into the transmit ring. If we
1899 * don't have room, set the OACTIVE flag and wait
1900 * for the NIC to drain the ring.
1901 */
1902 if (jme_encap(sc, &m_head)) {
1903 if (m_head == NULL)
1904 break;
1905 if_sendq_prepend(ifp, m_head);
1906 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1907 break;
1908 }
1909
1910 enq++;
1911 /*
1912 * If there's a BPF listener, bounce a copy of this frame
1913 * to him.
1914 */
1915 ETHER_BPF_MTAP(ifp, m_head);
1916 }
1917
1918 if (enq > 0) {
1919 /*
1920 * Reading TXCSR takes very long time under heavy load
1921 * so cache TXCSR value and writes the ORed value with
1922 * the kick command to the TXCSR. This saves one register
1923 * access cycle.
1924 */
1925 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1926 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1927 /* Set a timeout in case the chip goes out to lunch. */
1928 sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1929 }
1930 }
1931
1932 static void
jme_watchdog(struct jme_softc * sc)1933 jme_watchdog(struct jme_softc *sc)
1934 {
1935 if_t ifp;
1936
1937 JME_LOCK_ASSERT(sc);
1938
1939 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1940 return;
1941
1942 ifp = sc->jme_ifp;
1943 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1944 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1945 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1946 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1947 jme_init_locked(sc);
1948 return;
1949 }
1950 jme_txeof(sc);
1951 if (sc->jme_cdata.jme_tx_cnt == 0) {
1952 if_printf(sc->jme_ifp,
1953 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1954 if (!if_sendq_empty(ifp))
1955 jme_start_locked(ifp);
1956 return;
1957 }
1958
1959 if_printf(sc->jme_ifp, "watchdog timeout\n");
1960 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1961 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1962 jme_init_locked(sc);
1963 if (!if_sendq_empty(ifp))
1964 jme_start_locked(ifp);
1965 }
1966
1967 static int
jme_ioctl(if_t ifp,u_long cmd,caddr_t data)1968 jme_ioctl(if_t ifp, u_long cmd, caddr_t data)
1969 {
1970 struct jme_softc *sc;
1971 struct ifreq *ifr;
1972 struct mii_data *mii;
1973 uint32_t reg;
1974 int error, mask;
1975
1976 sc = if_getsoftc(ifp);
1977 ifr = (struct ifreq *)data;
1978 error = 0;
1979 switch (cmd) {
1980 case SIOCSIFMTU:
1981 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1982 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1983 ifr->ifr_mtu > JME_MAX_MTU)) {
1984 error = EINVAL;
1985 break;
1986 }
1987
1988 if (if_getmtu(ifp) != ifr->ifr_mtu) {
1989 /*
1990 * No special configuration is required when interface
1991 * MTU is changed but availability of TSO/Tx checksum
1992 * offload should be chcked against new MTU size as
1993 * FIFO size is just 2K.
1994 */
1995 JME_LOCK(sc);
1996 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1997 if_setcapenablebit(ifp, 0,
1998 IFCAP_TXCSUM | IFCAP_TSO4);
1999 if_sethwassistbits(ifp, 0,
2000 JME_CSUM_FEATURES | CSUM_TSO);
2001 VLAN_CAPABILITIES(ifp);
2002 }
2003 if_setmtu(ifp, ifr->ifr_mtu);
2004 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2005 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2006 jme_init_locked(sc);
2007 }
2008 JME_UNLOCK(sc);
2009 }
2010 break;
2011 case SIOCSIFFLAGS:
2012 JME_LOCK(sc);
2013 if ((if_getflags(ifp) & IFF_UP) != 0) {
2014 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2015 if (((if_getflags(ifp) ^ sc->jme_if_flags)
2016 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2017 jme_set_filter(sc);
2018 } else {
2019 if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
2020 jme_init_locked(sc);
2021 }
2022 } else {
2023 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2024 jme_stop(sc);
2025 }
2026 sc->jme_if_flags = if_getflags(ifp);
2027 JME_UNLOCK(sc);
2028 break;
2029 case SIOCADDMULTI:
2030 case SIOCDELMULTI:
2031 JME_LOCK(sc);
2032 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2033 jme_set_filter(sc);
2034 JME_UNLOCK(sc);
2035 break;
2036 case SIOCSIFMEDIA:
2037 case SIOCGIFMEDIA:
2038 mii = device_get_softc(sc->jme_miibus);
2039 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2040 break;
2041 case SIOCSIFCAP:
2042 JME_LOCK(sc);
2043 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2044 if ((mask & IFCAP_TXCSUM) != 0 &&
2045 if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
2046 if ((IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
2047 if_togglecapenable(ifp, IFCAP_TXCSUM);
2048 if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
2049 if_sethwassistbits(ifp, JME_CSUM_FEATURES, 0);
2050 else
2051 if_sethwassistbits(ifp, 0, JME_CSUM_FEATURES);
2052 }
2053 }
2054 if ((mask & IFCAP_RXCSUM) != 0 &&
2055 (IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0) {
2056 if_togglecapenable(ifp, IFCAP_RXCSUM);
2057 reg = CSR_READ_4(sc, JME_RXMAC);
2058 reg &= ~RXMAC_CSUM_ENB;
2059 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2060 reg |= RXMAC_CSUM_ENB;
2061 CSR_WRITE_4(sc, JME_RXMAC, reg);
2062 }
2063 if ((mask & IFCAP_TSO4) != 0 &&
2064 if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
2065 if ((IFCAP_TSO4 & if_getcapabilities(ifp)) != 0) {
2066 if_togglecapenable(ifp, IFCAP_TSO4);
2067 if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
2068 if_sethwassistbits(ifp, CSUM_TSO, 0);
2069 else
2070 if_sethwassistbits(ifp, 0, CSUM_TSO);
2071 }
2072 }
2073 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2074 (IFCAP_WOL_MAGIC & if_getcapabilities(ifp)) != 0)
2075 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
2076 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2077 (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
2078 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
2079 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2080 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
2081 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2082 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2083 (IFCAP_VLAN_HWTAGGING & if_getcapabilities(ifp)) != 0) {
2084 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2085 jme_set_vlan(sc);
2086 }
2087 JME_UNLOCK(sc);
2088 VLAN_CAPABILITIES(ifp);
2089 break;
2090 default:
2091 error = ether_ioctl(ifp, cmd, data);
2092 break;
2093 }
2094
2095 return (error);
2096 }
2097
2098 static void
jme_mac_config(struct jme_softc * sc)2099 jme_mac_config(struct jme_softc *sc)
2100 {
2101 struct mii_data *mii;
2102 uint32_t ghc, gpreg, rxmac, txmac, txpause;
2103 uint32_t txclk;
2104
2105 JME_LOCK_ASSERT(sc);
2106
2107 mii = device_get_softc(sc->jme_miibus);
2108
2109 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2110 DELAY(10);
2111 CSR_WRITE_4(sc, JME_GHC, 0);
2112 ghc = 0;
2113 txclk = 0;
2114 rxmac = CSR_READ_4(sc, JME_RXMAC);
2115 rxmac &= ~RXMAC_FC_ENB;
2116 txmac = CSR_READ_4(sc, JME_TXMAC);
2117 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2118 txpause = CSR_READ_4(sc, JME_TXPFC);
2119 txpause &= ~TXPFC_PAUSE_ENB;
2120 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2121 ghc |= GHC_FULL_DUPLEX;
2122 rxmac &= ~RXMAC_COLL_DET_ENB;
2123 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2124 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2125 TXMAC_FRAME_BURST);
2126 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2127 txpause |= TXPFC_PAUSE_ENB;
2128 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2129 rxmac |= RXMAC_FC_ENB;
2130 /* Disable retry transmit timer/retry limit. */
2131 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2132 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2133 } else {
2134 rxmac |= RXMAC_COLL_DET_ENB;
2135 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2136 /* Enable retry transmit timer/retry limit. */
2137 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2138 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2139 }
2140 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
2141 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2142 case IFM_10_T:
2143 ghc |= GHC_SPEED_10;
2144 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2145 break;
2146 case IFM_100_TX:
2147 ghc |= GHC_SPEED_100;
2148 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2149 break;
2150 case IFM_1000_T:
2151 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2152 break;
2153 ghc |= GHC_SPEED_1000;
2154 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
2155 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2156 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2157 break;
2158 default:
2159 break;
2160 }
2161 if (sc->jme_rev == DEVICEID_JMC250 &&
2162 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2163 /*
2164 * Workaround occasional packet loss issue of JMC250 A2
2165 * when it runs on half-duplex media.
2166 */
2167 gpreg = CSR_READ_4(sc, JME_GPREG1);
2168 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2169 gpreg &= ~GPREG1_HDPX_FIX;
2170 else
2171 gpreg |= GPREG1_HDPX_FIX;
2172 CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2173 /* Workaround CRC errors at 100Mbps on JMC250 A2. */
2174 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2175 /* Extend interface FIFO depth. */
2176 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2177 0x1B, 0x0000);
2178 } else {
2179 /* Select default interface FIFO depth. */
2180 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2181 0x1B, 0x0004);
2182 }
2183 }
2184 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2185 ghc |= txclk;
2186 CSR_WRITE_4(sc, JME_GHC, ghc);
2187 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2188 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2189 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2190 }
2191
2192 static void
jme_link_task(void * arg,int pending)2193 jme_link_task(void *arg, int pending)
2194 {
2195 struct jme_softc *sc;
2196 struct mii_data *mii;
2197 if_t ifp;
2198 struct jme_txdesc *txd;
2199 bus_addr_t paddr;
2200 int i;
2201
2202 sc = (struct jme_softc *)arg;
2203
2204 JME_LOCK(sc);
2205 mii = device_get_softc(sc->jme_miibus);
2206 ifp = sc->jme_ifp;
2207 if (mii == NULL || ifp == NULL ||
2208 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
2209 JME_UNLOCK(sc);
2210 return;
2211 }
2212
2213 sc->jme_flags &= ~JME_FLAG_LINK;
2214 if ((mii->mii_media_status & IFM_AVALID) != 0) {
2215 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2216 case IFM_10_T:
2217 case IFM_100_TX:
2218 sc->jme_flags |= JME_FLAG_LINK;
2219 break;
2220 case IFM_1000_T:
2221 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2222 break;
2223 sc->jme_flags |= JME_FLAG_LINK;
2224 break;
2225 default:
2226 break;
2227 }
2228 }
2229
2230 /*
2231 * Disabling Rx/Tx MACs have a side-effect of resetting
2232 * JME_TXNDA/JME_RXNDA register to the first address of
2233 * Tx/Rx descriptor address. So driver should reset its
2234 * internal procucer/consumer pointer and reclaim any
2235 * allocated resources. Note, just saving the value of
2236 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2237 * and restoring JME_TXNDA/JME_RXNDA register is not
2238 * sufficient to make sure correct MAC state because
2239 * stopping MAC operation can take a while and hardware
2240 * might have updated JME_TXNDA/JME_RXNDA registers
2241 * during the stop operation.
2242 */
2243 /* Block execution of task. */
2244 taskqueue_block(sc->jme_tq);
2245 /* Disable interrupts and stop driver. */
2246 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2247 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2248 callout_stop(&sc->jme_tick_ch);
2249 sc->jme_watchdog_timer = 0;
2250
2251 /* Stop receiver/transmitter. */
2252 jme_stop_rx(sc);
2253 jme_stop_tx(sc);
2254
2255 /* XXX Drain all queued tasks. */
2256 JME_UNLOCK(sc);
2257 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2258 JME_LOCK(sc);
2259
2260 if (sc->jme_cdata.jme_rxhead != NULL)
2261 m_freem(sc->jme_cdata.jme_rxhead);
2262 JME_RXCHAIN_RESET(sc);
2263 jme_txeof(sc);
2264 if (sc->jme_cdata.jme_tx_cnt != 0) {
2265 /* Remove queued packets for transmit. */
2266 for (i = 0; i < JME_TX_RING_CNT; i++) {
2267 txd = &sc->jme_cdata.jme_txdesc[i];
2268 if (txd->tx_m != NULL) {
2269 bus_dmamap_sync(
2270 sc->jme_cdata.jme_tx_tag,
2271 txd->tx_dmamap,
2272 BUS_DMASYNC_POSTWRITE);
2273 bus_dmamap_unload(
2274 sc->jme_cdata.jme_tx_tag,
2275 txd->tx_dmamap);
2276 m_freem(txd->tx_m);
2277 txd->tx_m = NULL;
2278 txd->tx_ndesc = 0;
2279 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2280 }
2281 }
2282 }
2283
2284 /*
2285 * Reuse configured Rx descriptors and reset
2286 * producer/consumer index.
2287 */
2288 sc->jme_cdata.jme_rx_cons = 0;
2289 sc->jme_morework = 0;
2290 jme_init_tx_ring(sc);
2291 /* Initialize shadow status block. */
2292 jme_init_ssb(sc);
2293
2294 /* Program MAC with resolved speed/duplex/flow-control. */
2295 if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2296 jme_mac_config(sc);
2297 jme_stats_clear(sc);
2298
2299 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2300 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2301
2302 /* Set Tx ring address to the hardware. */
2303 paddr = JME_TX_RING_ADDR(sc, 0);
2304 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2305 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2306
2307 /* Set Rx ring address to the hardware. */
2308 paddr = JME_RX_RING_ADDR(sc, 0);
2309 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2310 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2311
2312 /* Restart receiver/transmitter. */
2313 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2314 RXCSR_RXQ_START);
2315 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2316 /* Lastly enable TX/RX clock. */
2317 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2318 CSR_WRITE_4(sc, JME_GHC,
2319 CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS);
2320 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
2321 CSR_WRITE_4(sc, JME_GPREG1,
2322 CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS);
2323 }
2324
2325 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2326 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2327 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2328 /* Unblock execution of task. */
2329 taskqueue_unblock(sc->jme_tq);
2330 /* Reenable interrupts. */
2331 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2332
2333 JME_UNLOCK(sc);
2334 }
2335
2336 static int
jme_intr(void * arg)2337 jme_intr(void *arg)
2338 {
2339 struct jme_softc *sc;
2340 uint32_t status;
2341
2342 sc = (struct jme_softc *)arg;
2343
2344 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2345 if (status == 0 || status == 0xFFFFFFFF)
2346 return (FILTER_STRAY);
2347 /* Disable interrupts. */
2348 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2349 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2350
2351 return (FILTER_HANDLED);
2352 }
2353
2354 static void
jme_int_task(void * arg,int pending)2355 jme_int_task(void *arg, int pending)
2356 {
2357 struct jme_softc *sc;
2358 if_t ifp;
2359 uint32_t status;
2360 int more;
2361
2362 sc = (struct jme_softc *)arg;
2363 ifp = sc->jme_ifp;
2364
2365 JME_LOCK(sc);
2366 status = CSR_READ_4(sc, JME_INTR_STATUS);
2367 if (sc->jme_morework != 0) {
2368 sc->jme_morework = 0;
2369 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2370 }
2371 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2372 goto done;
2373 /* Reset PCC counter/timer and Ack interrupts. */
2374 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2375 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2376 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2377 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2378 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2379 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2380 more = 0;
2381 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2382 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2383 more = jme_rxintr(sc, sc->jme_process_limit);
2384 if (more != 0)
2385 sc->jme_morework = 1;
2386 }
2387 if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2388 /*
2389 * Notify hardware availability of new Rx
2390 * buffers.
2391 * Reading RXCSR takes very long time under
2392 * heavy load so cache RXCSR value and writes
2393 * the ORed value with the kick command to
2394 * the RXCSR. This saves one register access
2395 * cycle.
2396 */
2397 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2398 RXCSR_RX_ENB | RXCSR_RXQ_START);
2399 }
2400 if (!if_sendq_empty(ifp))
2401 jme_start_locked(ifp);
2402 }
2403
2404 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2405 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2406 JME_UNLOCK(sc);
2407 return;
2408 }
2409 done:
2410 JME_UNLOCK(sc);
2411
2412 /* Reenable interrupts. */
2413 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2414 }
2415
2416 static void
jme_txeof(struct jme_softc * sc)2417 jme_txeof(struct jme_softc *sc)
2418 {
2419 if_t ifp;
2420 struct jme_txdesc *txd;
2421 uint32_t status;
2422 int cons, nsegs;
2423
2424 JME_LOCK_ASSERT(sc);
2425
2426 ifp = sc->jme_ifp;
2427
2428 cons = sc->jme_cdata.jme_tx_cons;
2429 if (cons == sc->jme_cdata.jme_tx_prod)
2430 return;
2431
2432 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2433 sc->jme_cdata.jme_tx_ring_map,
2434 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2435
2436 /*
2437 * Go through our Tx list and free mbufs for those
2438 * frames which have been transmitted.
2439 */
2440 for (; cons != sc->jme_cdata.jme_tx_prod;) {
2441 txd = &sc->jme_cdata.jme_txdesc[cons];
2442 status = le32toh(txd->tx_desc->flags);
2443 if ((status & JME_TD_OWN) == JME_TD_OWN)
2444 break;
2445
2446 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2447 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2448 else {
2449 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2450 if ((status & JME_TD_COLLISION) != 0)
2451 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2452 le32toh(txd->tx_desc->buflen) &
2453 JME_TD_BUF_LEN_MASK);
2454 }
2455 /*
2456 * Only the first descriptor of multi-descriptor
2457 * transmission is updated so driver have to skip entire
2458 * chained buffers for the transmiited frame. In other
2459 * words, JME_TD_OWN bit is valid only at the first
2460 * descriptor of a multi-descriptor transmission.
2461 */
2462 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2463 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2464 JME_DESC_INC(cons, JME_TX_RING_CNT);
2465 }
2466
2467 /* Reclaim transferred mbufs. */
2468 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2469 BUS_DMASYNC_POSTWRITE);
2470 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2471
2472 KASSERT(txd->tx_m != NULL,
2473 ("%s: freeing NULL mbuf!\n", __func__));
2474 m_freem(txd->tx_m);
2475 txd->tx_m = NULL;
2476 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2477 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2478 ("%s: Active Tx desc counter was garbled\n", __func__));
2479 txd->tx_ndesc = 0;
2480 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2481 }
2482 sc->jme_cdata.jme_tx_cons = cons;
2483 /* Unarm watchdog timer when there is no pending descriptors in queue. */
2484 if (sc->jme_cdata.jme_tx_cnt == 0)
2485 sc->jme_watchdog_timer = 0;
2486
2487 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2488 sc->jme_cdata.jme_tx_ring_map,
2489 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2490 }
2491
2492 static __inline void
jme_discard_rxbuf(struct jme_softc * sc,int cons)2493 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2494 {
2495 struct jme_desc *desc;
2496
2497 desc = &sc->jme_rdata.jme_rx_ring[cons];
2498 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2499 desc->buflen = htole32(MCLBYTES);
2500 }
2501
2502 /* Receive a frame. */
2503 static void
jme_rxeof(struct jme_softc * sc)2504 jme_rxeof(struct jme_softc *sc)
2505 {
2506 if_t ifp;
2507 struct jme_desc *desc;
2508 struct jme_rxdesc *rxd;
2509 struct mbuf *mp, *m;
2510 uint32_t flags, status;
2511 int cons, count, nsegs;
2512
2513 JME_LOCK_ASSERT(sc);
2514
2515 ifp = sc->jme_ifp;
2516
2517 cons = sc->jme_cdata.jme_rx_cons;
2518 desc = &sc->jme_rdata.jme_rx_ring[cons];
2519 flags = le32toh(desc->flags);
2520 status = le32toh(desc->buflen);
2521 nsegs = JME_RX_NSEGS(status);
2522 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2523 if ((status & JME_RX_ERR_STAT) != 0) {
2524 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2525 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2526 #ifdef JME_SHOW_ERRORS
2527 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2528 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2529 #endif
2530 sc->jme_cdata.jme_rx_cons += nsegs;
2531 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2532 return;
2533 }
2534
2535 for (count = 0; count < nsegs; count++,
2536 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2537 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2538 mp = rxd->rx_m;
2539 /* Add a new receive buffer to the ring. */
2540 if (jme_newbuf(sc, rxd) != 0) {
2541 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2542 /* Reuse buffer. */
2543 for (; count < nsegs; count++) {
2544 jme_discard_rxbuf(sc, cons);
2545 JME_DESC_INC(cons, JME_RX_RING_CNT);
2546 }
2547 if (sc->jme_cdata.jme_rxhead != NULL) {
2548 m_freem(sc->jme_cdata.jme_rxhead);
2549 JME_RXCHAIN_RESET(sc);
2550 }
2551 break;
2552 }
2553
2554 /*
2555 * Assume we've received a full sized frame.
2556 * Actual size is fixed when we encounter the end of
2557 * multi-segmented frame.
2558 */
2559 mp->m_len = MCLBYTES;
2560
2561 /* Chain received mbufs. */
2562 if (sc->jme_cdata.jme_rxhead == NULL) {
2563 sc->jme_cdata.jme_rxhead = mp;
2564 sc->jme_cdata.jme_rxtail = mp;
2565 } else {
2566 /*
2567 * Receive processor can receive a maximum frame
2568 * size of 65535 bytes.
2569 */
2570 mp->m_flags &= ~M_PKTHDR;
2571 sc->jme_cdata.jme_rxtail->m_next = mp;
2572 sc->jme_cdata.jme_rxtail = mp;
2573 }
2574
2575 if (count == nsegs - 1) {
2576 /* Last desc. for this frame. */
2577 m = sc->jme_cdata.jme_rxhead;
2578 m->m_flags |= M_PKTHDR;
2579 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2580 if (nsegs > 1) {
2581 /* Set first mbuf size. */
2582 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2583 /* Set last mbuf size. */
2584 mp->m_len = sc->jme_cdata.jme_rxlen -
2585 ((MCLBYTES - JME_RX_PAD_BYTES) +
2586 (MCLBYTES * (nsegs - 2)));
2587 } else
2588 m->m_len = sc->jme_cdata.jme_rxlen;
2589 m->m_pkthdr.rcvif = ifp;
2590
2591 /*
2592 * Account for 10bytes auto padding which is used
2593 * to align IP header on 32bit boundary. Also note,
2594 * CRC bytes is automatically removed by the
2595 * hardware.
2596 */
2597 m->m_data += JME_RX_PAD_BYTES;
2598
2599 /* Set checksum information. */
2600 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
2601 (flags & JME_RD_IPV4) != 0) {
2602 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2603 if ((flags & JME_RD_IPCSUM) != 0)
2604 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2605 if (((flags & JME_RD_MORE_FRAG) == 0) &&
2606 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2607 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2608 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2609 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2610 m->m_pkthdr.csum_flags |=
2611 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2612 m->m_pkthdr.csum_data = 0xffff;
2613 }
2614 }
2615
2616 /* Check for VLAN tagged packets. */
2617 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
2618 (flags & JME_RD_VLAN_TAG) != 0) {
2619 m->m_pkthdr.ether_vtag =
2620 flags & JME_RD_VLAN_MASK;
2621 m->m_flags |= M_VLANTAG;
2622 }
2623
2624 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2625 /* Pass it on. */
2626 JME_UNLOCK(sc);
2627 if_input(ifp, m);
2628 JME_LOCK(sc);
2629
2630 /* Reset mbuf chains. */
2631 JME_RXCHAIN_RESET(sc);
2632 }
2633 }
2634
2635 sc->jme_cdata.jme_rx_cons += nsegs;
2636 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2637 }
2638
2639 static int
jme_rxintr(struct jme_softc * sc,int count)2640 jme_rxintr(struct jme_softc *sc, int count)
2641 {
2642 struct jme_desc *desc;
2643 int nsegs, prog, pktlen;
2644
2645 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2646 sc->jme_cdata.jme_rx_ring_map,
2647 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2648
2649 for (prog = 0; count > 0; prog++) {
2650 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2651 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2652 break;
2653 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2654 break;
2655 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2656 /*
2657 * Check number of segments against received bytes.
2658 * Non-matching value would indicate that hardware
2659 * is still trying to update Rx descriptors. I'm not
2660 * sure whether this check is needed.
2661 */
2662 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2663 if (nsegs != howmany(pktlen, MCLBYTES))
2664 break;
2665 prog++;
2666 /* Received a frame. */
2667 jme_rxeof(sc);
2668 count -= nsegs;
2669 }
2670
2671 if (prog > 0)
2672 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2673 sc->jme_cdata.jme_rx_ring_map,
2674 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2675
2676 return (count > 0 ? 0 : EAGAIN);
2677 }
2678
2679 static void
jme_tick(void * arg)2680 jme_tick(void *arg)
2681 {
2682 struct jme_softc *sc;
2683 struct mii_data *mii;
2684
2685 sc = (struct jme_softc *)arg;
2686
2687 JME_LOCK_ASSERT(sc);
2688
2689 mii = device_get_softc(sc->jme_miibus);
2690 mii_tick(mii);
2691 /*
2692 * Reclaim Tx buffers that have been completed. It's not
2693 * needed here but it would release allocated mbuf chains
2694 * faster and limit the maximum delay to a hz.
2695 */
2696 jme_txeof(sc);
2697 jme_stats_update(sc);
2698 jme_watchdog(sc);
2699 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2700 }
2701
2702 static void
jme_reset(struct jme_softc * sc)2703 jme_reset(struct jme_softc *sc)
2704 {
2705 uint32_t ghc, gpreg;
2706
2707 /* Stop receiver, transmitter. */
2708 jme_stop_rx(sc);
2709 jme_stop_tx(sc);
2710
2711 /* Reset controller. */
2712 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2713 CSR_READ_4(sc, JME_GHC);
2714 DELAY(10);
2715 /*
2716 * Workaround Rx FIFO overruns seen under certain conditions.
2717 * Explicitly synchorize TX/RX clock. TX/RX clock should be
2718 * enabled only after enabling TX/RX MACs.
2719 */
2720 if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) {
2721 /* Disable TX clock. */
2722 CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS);
2723 /* Disable RX clock. */
2724 gpreg = CSR_READ_4(sc, JME_GPREG1);
2725 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2726 gpreg = CSR_READ_4(sc, JME_GPREG1);
2727 /* De-assert RESET but still disable TX clock. */
2728 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2729 ghc = CSR_READ_4(sc, JME_GHC);
2730
2731 /* Enable TX clock. */
2732 CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS);
2733 /* Enable RX clock. */
2734 CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS);
2735 CSR_READ_4(sc, JME_GPREG1);
2736
2737 /* Disable TX/RX clock again. */
2738 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2739 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2740 } else
2741 CSR_WRITE_4(sc, JME_GHC, 0);
2742 CSR_READ_4(sc, JME_GHC);
2743 DELAY(10);
2744 }
2745
2746 static void
jme_init(void * xsc)2747 jme_init(void *xsc)
2748 {
2749 struct jme_softc *sc;
2750
2751 sc = (struct jme_softc *)xsc;
2752 JME_LOCK(sc);
2753 jme_init_locked(sc);
2754 JME_UNLOCK(sc);
2755 }
2756
2757 static void
jme_init_locked(struct jme_softc * sc)2758 jme_init_locked(struct jme_softc *sc)
2759 {
2760 if_t ifp;
2761 struct mii_data *mii;
2762 bus_addr_t paddr;
2763 uint32_t reg;
2764 int error;
2765
2766 JME_LOCK_ASSERT(sc);
2767
2768 ifp = sc->jme_ifp;
2769 mii = device_get_softc(sc->jme_miibus);
2770
2771 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2772 return;
2773 /*
2774 * Cancel any pending I/O.
2775 */
2776 jme_stop(sc);
2777
2778 /*
2779 * Reset the chip to a known state.
2780 */
2781 jme_reset(sc);
2782
2783 /* Init descriptors. */
2784 error = jme_init_rx_ring(sc);
2785 if (error != 0) {
2786 device_printf(sc->jme_dev,
2787 "%s: initialization failed: no memory for Rx buffers.\n",
2788 __func__);
2789 jme_stop(sc);
2790 return;
2791 }
2792 jme_init_tx_ring(sc);
2793 /* Initialize shadow status block. */
2794 jme_init_ssb(sc);
2795
2796 /* Reprogram the station address. */
2797 jme_set_macaddr(sc, if_getlladdr(sc->jme_ifp));
2798
2799 /*
2800 * Configure Tx queue.
2801 * Tx priority queue weight value : 0
2802 * Tx FIFO threshold for processing next packet : 16QW
2803 * Maximum Tx DMA length : 512
2804 * Allow Tx DMA burst.
2805 */
2806 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2807 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2808 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2809 sc->jme_txcsr |= sc->jme_tx_dma_size;
2810 sc->jme_txcsr |= TXCSR_DMA_BURST;
2811 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2812
2813 /* Set Tx descriptor counter. */
2814 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2815
2816 /* Set Tx ring address to the hardware. */
2817 paddr = JME_TX_RING_ADDR(sc, 0);
2818 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2819 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2820
2821 /* Configure TxMAC parameters. */
2822 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2823 reg |= TXMAC_THRESH_1_PKT;
2824 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2825 CSR_WRITE_4(sc, JME_TXMAC, reg);
2826
2827 /*
2828 * Configure Rx queue.
2829 * FIFO full threshold for transmitting Tx pause packet : 128T
2830 * FIFO threshold for processing next packet : 128QW
2831 * Rx queue 0 select
2832 * Max Rx DMA length : 128
2833 * Rx descriptor retry : 32
2834 * Rx descriptor retry time gap : 256ns
2835 * Don't receive runt/bad frame.
2836 */
2837 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2838 /*
2839 * Since Rx FIFO size is 4K bytes, receiving frames larger
2840 * than 4K bytes will suffer from Rx FIFO overruns. So
2841 * decrease FIFO threshold to reduce the FIFO overruns for
2842 * frames larger than 4000 bytes.
2843 * For best performance of standard MTU sized frames use
2844 * maximum allowable FIFO threshold, 128QW. Note these do
2845 * not hold on chip full mask version >=2. For these
2846 * controllers 64QW and 128QW are not valid value.
2847 */
2848 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
2849 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2850 else {
2851 if ((if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2852 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2853 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2854 else
2855 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2856 }
2857 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2858 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2859 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2860 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2861
2862 /* Set Rx descriptor counter. */
2863 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2864
2865 /* Set Rx ring address to the hardware. */
2866 paddr = JME_RX_RING_ADDR(sc, 0);
2867 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2868 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2869
2870 /* Clear receive filter. */
2871 CSR_WRITE_4(sc, JME_RXMAC, 0);
2872 /* Set up the receive filter. */
2873 jme_set_filter(sc);
2874 jme_set_vlan(sc);
2875
2876 /*
2877 * Disable all WOL bits as WOL can interfere normal Rx
2878 * operation. Also clear WOL detection status bits.
2879 */
2880 reg = CSR_READ_4(sc, JME_PMCS);
2881 reg &= ~PMCS_WOL_ENB_MASK;
2882 CSR_WRITE_4(sc, JME_PMCS, reg);
2883
2884 reg = CSR_READ_4(sc, JME_RXMAC);
2885 /*
2886 * Pad 10bytes right before received frame. This will greatly
2887 * help Rx performance on strict-alignment architectures as
2888 * it does not need to copy the frame to align the payload.
2889 */
2890 reg |= RXMAC_PAD_10BYTES;
2891 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2892 reg |= RXMAC_CSUM_ENB;
2893 CSR_WRITE_4(sc, JME_RXMAC, reg);
2894
2895 /* Configure general purpose reg0 */
2896 reg = CSR_READ_4(sc, JME_GPREG0);
2897 reg &= ~GPREG0_PCC_UNIT_MASK;
2898 /* Set PCC timer resolution to micro-seconds unit. */
2899 reg |= GPREG0_PCC_UNIT_US;
2900 /*
2901 * Disable all shadow register posting as we have to read
2902 * JME_INTR_STATUS register in jme_int_task. Also it seems
2903 * that it's hard to synchronize interrupt status between
2904 * hardware and software with shadow posting due to
2905 * requirements of bus_dmamap_sync(9).
2906 */
2907 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2908 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2909 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2910 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2911 /* Disable posting of DW0. */
2912 reg &= ~GPREG0_POST_DW0_ENB;
2913 /* Clear PME message. */
2914 reg &= ~GPREG0_PME_ENB;
2915 /* Set PHY address. */
2916 reg &= ~GPREG0_PHY_ADDR_MASK;
2917 reg |= sc->jme_phyaddr;
2918 CSR_WRITE_4(sc, JME_GPREG0, reg);
2919
2920 /* Configure Tx queue 0 packet completion coalescing. */
2921 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2922 PCCTX_COAL_TO_MASK;
2923 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2924 PCCTX_COAL_PKT_MASK;
2925 reg |= PCCTX_COAL_TXQ0;
2926 CSR_WRITE_4(sc, JME_PCCTX, reg);
2927
2928 /* Configure Rx queue 0 packet completion coalescing. */
2929 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2930 PCCRX_COAL_TO_MASK;
2931 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2932 PCCRX_COAL_PKT_MASK;
2933 CSR_WRITE_4(sc, JME_PCCRX0, reg);
2934
2935 /*
2936 * Configure PCD(Packet Completion Deferring). It seems PCD
2937 * generates an interrupt when the time interval between two
2938 * back-to-back incoming/outgoing packet is long enough for
2939 * it to reach its timer value 0. The arrival of new packets
2940 * after timer has started causes the PCD timer to restart.
2941 * Unfortunately, it's not clear how PCD is useful at this
2942 * moment, so just use the same of PCC parameters.
2943 */
2944 if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) {
2945 sc->jme_rx_pcd_to = sc->jme_rx_coal_to;
2946 if (sc->jme_rx_coal_to > PCDRX_TO_MAX)
2947 sc->jme_rx_pcd_to = PCDRX_TO_MAX;
2948 sc->jme_tx_pcd_to = sc->jme_tx_coal_to;
2949 if (sc->jme_tx_coal_to > PCDTX_TO_MAX)
2950 sc->jme_tx_pcd_to = PCDTX_TO_MAX;
2951 reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT;
2952 reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT;
2953 CSR_WRITE_4(sc, PCDRX_REG(0), reg);
2954 reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT;
2955 reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT;
2956 CSR_WRITE_4(sc, JME_PCDTX, reg);
2957 }
2958
2959 /* Configure shadow status block but don't enable posting. */
2960 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2961 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2962 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2963
2964 /* Disable Timer 1 and Timer 2. */
2965 CSR_WRITE_4(sc, JME_TIMER1, 0);
2966 CSR_WRITE_4(sc, JME_TIMER2, 0);
2967
2968 /* Configure retry transmit period, retry limit value. */
2969 CSR_WRITE_4(sc, JME_TXTRHD,
2970 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2971 TXTRHD_RT_PERIOD_MASK) |
2972 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2973 TXTRHD_RT_LIMIT_SHIFT));
2974
2975 /* Disable RSS. */
2976 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2977
2978 /* Initialize the interrupt mask. */
2979 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2980 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2981
2982 /*
2983 * Enabling Tx/Rx DMA engines and Rx queue processing is
2984 * done after detection of valid link in jme_link_task.
2985 */
2986
2987 sc->jme_flags &= ~JME_FLAG_LINK;
2988 /* Set the current media. */
2989 mii_mediachg(mii);
2990
2991 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2992
2993 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2994 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2995 }
2996
2997 static void
jme_stop(struct jme_softc * sc)2998 jme_stop(struct jme_softc *sc)
2999 {
3000 if_t ifp;
3001 struct jme_txdesc *txd;
3002 struct jme_rxdesc *rxd;
3003 int i;
3004
3005 JME_LOCK_ASSERT(sc);
3006 /*
3007 * Mark the interface down and cancel the watchdog timer.
3008 */
3009 ifp = sc->jme_ifp;
3010 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
3011 sc->jme_flags &= ~JME_FLAG_LINK;
3012 callout_stop(&sc->jme_tick_ch);
3013 sc->jme_watchdog_timer = 0;
3014
3015 /*
3016 * Disable interrupts.
3017 */
3018 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3019 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
3020
3021 /* Disable updating shadow status block. */
3022 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
3023 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
3024
3025 /* Stop receiver, transmitter. */
3026 jme_stop_rx(sc);
3027 jme_stop_tx(sc);
3028
3029 /* Reclaim Rx/Tx buffers that have been completed. */
3030 jme_rxintr(sc, JME_RX_RING_CNT);
3031 if (sc->jme_cdata.jme_rxhead != NULL)
3032 m_freem(sc->jme_cdata.jme_rxhead);
3033 JME_RXCHAIN_RESET(sc);
3034 jme_txeof(sc);
3035 /*
3036 * Free RX and TX mbufs still in the queues.
3037 */
3038 for (i = 0; i < JME_RX_RING_CNT; i++) {
3039 rxd = &sc->jme_cdata.jme_rxdesc[i];
3040 if (rxd->rx_m != NULL) {
3041 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
3042 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3043 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
3044 rxd->rx_dmamap);
3045 m_freem(rxd->rx_m);
3046 rxd->rx_m = NULL;
3047 }
3048 }
3049 for (i = 0; i < JME_TX_RING_CNT; i++) {
3050 txd = &sc->jme_cdata.jme_txdesc[i];
3051 if (txd->tx_m != NULL) {
3052 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
3053 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3054 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
3055 txd->tx_dmamap);
3056 m_freem(txd->tx_m);
3057 txd->tx_m = NULL;
3058 txd->tx_ndesc = 0;
3059 }
3060 }
3061 jme_stats_update(sc);
3062 jme_stats_save(sc);
3063 }
3064
3065 static void
jme_stop_tx(struct jme_softc * sc)3066 jme_stop_tx(struct jme_softc *sc)
3067 {
3068 uint32_t reg;
3069 int i;
3070
3071 reg = CSR_READ_4(sc, JME_TXCSR);
3072 if ((reg & TXCSR_TX_ENB) == 0)
3073 return;
3074 reg &= ~TXCSR_TX_ENB;
3075 CSR_WRITE_4(sc, JME_TXCSR, reg);
3076 for (i = JME_TIMEOUT; i > 0; i--) {
3077 DELAY(1);
3078 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
3079 break;
3080 }
3081 if (i == 0)
3082 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
3083 }
3084
3085 static void
jme_stop_rx(struct jme_softc * sc)3086 jme_stop_rx(struct jme_softc *sc)
3087 {
3088 uint32_t reg;
3089 int i;
3090
3091 reg = CSR_READ_4(sc, JME_RXCSR);
3092 if ((reg & RXCSR_RX_ENB) == 0)
3093 return;
3094 reg &= ~RXCSR_RX_ENB;
3095 CSR_WRITE_4(sc, JME_RXCSR, reg);
3096 for (i = JME_TIMEOUT; i > 0; i--) {
3097 DELAY(1);
3098 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
3099 break;
3100 }
3101 if (i == 0)
3102 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
3103 }
3104
3105 static void
jme_init_tx_ring(struct jme_softc * sc)3106 jme_init_tx_ring(struct jme_softc *sc)
3107 {
3108 struct jme_ring_data *rd;
3109 struct jme_txdesc *txd;
3110 int i;
3111
3112 sc->jme_cdata.jme_tx_prod = 0;
3113 sc->jme_cdata.jme_tx_cons = 0;
3114 sc->jme_cdata.jme_tx_cnt = 0;
3115
3116 rd = &sc->jme_rdata;
3117 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
3118 for (i = 0; i < JME_TX_RING_CNT; i++) {
3119 txd = &sc->jme_cdata.jme_txdesc[i];
3120 txd->tx_m = NULL;
3121 txd->tx_desc = &rd->jme_tx_ring[i];
3122 txd->tx_ndesc = 0;
3123 }
3124
3125 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
3126 sc->jme_cdata.jme_tx_ring_map,
3127 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3128 }
3129
3130 static void
jme_init_ssb(struct jme_softc * sc)3131 jme_init_ssb(struct jme_softc *sc)
3132 {
3133 struct jme_ring_data *rd;
3134
3135 rd = &sc->jme_rdata;
3136 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
3137 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
3138 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3139 }
3140
3141 static int
jme_init_rx_ring(struct jme_softc * sc)3142 jme_init_rx_ring(struct jme_softc *sc)
3143 {
3144 struct jme_ring_data *rd;
3145 struct jme_rxdesc *rxd;
3146 int i;
3147
3148 sc->jme_cdata.jme_rx_cons = 0;
3149 JME_RXCHAIN_RESET(sc);
3150 sc->jme_morework = 0;
3151
3152 rd = &sc->jme_rdata;
3153 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
3154 for (i = 0; i < JME_RX_RING_CNT; i++) {
3155 rxd = &sc->jme_cdata.jme_rxdesc[i];
3156 rxd->rx_m = NULL;
3157 rxd->rx_desc = &rd->jme_rx_ring[i];
3158 if (jme_newbuf(sc, rxd) != 0)
3159 return (ENOBUFS);
3160 }
3161
3162 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
3163 sc->jme_cdata.jme_rx_ring_map,
3164 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3165
3166 return (0);
3167 }
3168
3169 static int
jme_newbuf(struct jme_softc * sc,struct jme_rxdesc * rxd)3170 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
3171 {
3172 struct jme_desc *desc;
3173 struct mbuf *m;
3174 bus_dma_segment_t segs[1];
3175 bus_dmamap_t map;
3176 int nsegs;
3177
3178 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3179 if (m == NULL)
3180 return (ENOBUFS);
3181 /*
3182 * JMC250 has 64bit boundary alignment limitation so jme(4)
3183 * takes advantage of 10 bytes padding feature of hardware
3184 * in order not to copy entire frame to align IP header on
3185 * 32bit boundary.
3186 */
3187 m->m_len = m->m_pkthdr.len = MCLBYTES;
3188
3189 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
3190 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3191 m_freem(m);
3192 return (ENOBUFS);
3193 }
3194 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3195
3196 if (rxd->rx_m != NULL) {
3197 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3198 BUS_DMASYNC_POSTREAD);
3199 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
3200 }
3201 map = rxd->rx_dmamap;
3202 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
3203 sc->jme_cdata.jme_rx_sparemap = map;
3204 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3205 BUS_DMASYNC_PREREAD);
3206 rxd->rx_m = m;
3207
3208 desc = rxd->rx_desc;
3209 desc->buflen = htole32(segs[0].ds_len);
3210 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
3211 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
3212 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
3213
3214 return (0);
3215 }
3216
3217 static void
jme_set_vlan(struct jme_softc * sc)3218 jme_set_vlan(struct jme_softc *sc)
3219 {
3220 if_t ifp;
3221 uint32_t reg;
3222
3223 JME_LOCK_ASSERT(sc);
3224
3225 ifp = sc->jme_ifp;
3226 reg = CSR_READ_4(sc, JME_RXMAC);
3227 reg &= ~RXMAC_VLAN_ENB;
3228 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
3229 reg |= RXMAC_VLAN_ENB;
3230 CSR_WRITE_4(sc, JME_RXMAC, reg);
3231 }
3232
3233 static u_int
jme_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)3234 jme_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
3235 {
3236 uint32_t crc, *mchash = arg;
3237
3238 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
3239
3240 /* Just want the 6 least significant bits. */
3241 crc &= 0x3f;
3242
3243 /* Set the corresponding bit in the hash table. */
3244 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3245
3246 return (1);
3247 }
3248
3249 static void
jme_set_filter(struct jme_softc * sc)3250 jme_set_filter(struct jme_softc *sc)
3251 {
3252 if_t ifp;
3253 uint32_t mchash[2];
3254 uint32_t rxcfg;
3255
3256 JME_LOCK_ASSERT(sc);
3257
3258 ifp = sc->jme_ifp;
3259
3260 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3261 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3262 RXMAC_ALLMULTI);
3263 /* Always accept frames destined to our station address. */
3264 rxcfg |= RXMAC_UNICAST;
3265 if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
3266 rxcfg |= RXMAC_BROADCAST;
3267 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3268 if ((if_getflags(ifp) & IFF_PROMISC) != 0)
3269 rxcfg |= RXMAC_PROMISC;
3270 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
3271 rxcfg |= RXMAC_ALLMULTI;
3272 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3273 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3274 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3275 return;
3276 }
3277
3278 /*
3279 * Set up the multicast address filter by passing all multicast
3280 * addresses through a CRC generator, and then using the low-order
3281 * 6 bits as an index into the 64 bit multicast hash table. The
3282 * high order bits select the register, while the rest of the bits
3283 * select the bit within the register.
3284 */
3285 rxcfg |= RXMAC_MULTICAST;
3286 bzero(mchash, sizeof(mchash));
3287 if_foreach_llmaddr(ifp, jme_hash_maddr, &mchash);
3288
3289 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3290 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3291 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3292 }
3293
3294 static void
jme_stats_clear(struct jme_softc * sc)3295 jme_stats_clear(struct jme_softc *sc)
3296 {
3297
3298 JME_LOCK_ASSERT(sc);
3299
3300 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3301 return;
3302
3303 /* Disable and clear counters. */
3304 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3305 /* Activate hw counters. */
3306 CSR_WRITE_4(sc, JME_STATCSR, 0);
3307 CSR_READ_4(sc, JME_STATCSR);
3308 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
3309 }
3310
3311 static void
jme_stats_save(struct jme_softc * sc)3312 jme_stats_save(struct jme_softc *sc)
3313 {
3314
3315 JME_LOCK_ASSERT(sc);
3316
3317 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3318 return;
3319 /* Save current counters. */
3320 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
3321 /* Disable and clear counters. */
3322 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3323 }
3324
3325 static void
jme_stats_update(struct jme_softc * sc)3326 jme_stats_update(struct jme_softc *sc)
3327 {
3328 struct jme_hw_stats *stat, *ostat;
3329 uint32_t reg;
3330
3331 JME_LOCK_ASSERT(sc);
3332
3333 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3334 return;
3335 stat = &sc->jme_stats;
3336 ostat = &sc->jme_ostats;
3337 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
3338 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
3339 reg = CSR_READ_4(sc, JME_STAT_CRCMII);
3340 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
3341 STAT_RX_CRC_ERR_SHIFT;
3342 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
3343 STAT_RX_MII_ERR_SHIFT;
3344 reg = CSR_READ_4(sc, JME_STAT_RXERR);
3345 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
3346 STAT_RXERR_OFLOW_SHIFT;
3347 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
3348 STAT_RXERR_MPTY_SHIFT;
3349 reg = CSR_READ_4(sc, JME_STAT_FAIL);
3350 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
3351 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
3352
3353 /* Account for previous counters. */
3354 stat->rx_good_frames += ostat->rx_good_frames;
3355 stat->rx_crc_errs += ostat->rx_crc_errs;
3356 stat->rx_mii_errs += ostat->rx_mii_errs;
3357 stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
3358 stat->rx_desc_empty += ostat->rx_desc_empty;
3359 stat->rx_bad_frames += ostat->rx_bad_frames;
3360 stat->tx_good_frames += ostat->tx_good_frames;
3361 stat->tx_bad_frames += ostat->tx_bad_frames;
3362 }
3363
3364 static void
jme_phy_down(struct jme_softc * sc)3365 jme_phy_down(struct jme_softc *sc)
3366 {
3367 uint32_t reg;
3368
3369 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN);
3370 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3371 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3372 reg |= 0x0000000F;
3373 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3374 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3375 reg &= ~PE1_GIGA_PDOWN_MASK;
3376 reg |= PE1_GIGA_PDOWN_D3;
3377 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3378 }
3379 }
3380
3381 static void
jme_phy_up(struct jme_softc * sc)3382 jme_phy_up(struct jme_softc *sc)
3383 {
3384 uint32_t reg;
3385 uint16_t bmcr;
3386
3387 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
3388 bmcr &= ~BMCR_PDOWN;
3389 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
3390 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3391 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3392 reg &= ~0x0000000F;
3393 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3394 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3395 reg &= ~PE1_GIGA_PDOWN_MASK;
3396 reg |= PE1_GIGA_PDOWN_DIS;
3397 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3398 }
3399 }
3400
3401 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)3402 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3403 {
3404 int error, value;
3405
3406 if (arg1 == NULL)
3407 return (EINVAL);
3408 value = *(int *)arg1;
3409 error = sysctl_handle_int(oidp, &value, 0, req);
3410 if (error || req->newptr == NULL)
3411 return (error);
3412 if (value < low || value > high)
3413 return (EINVAL);
3414 *(int *)arg1 = value;
3415
3416 return (0);
3417 }
3418
3419 static int
sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)3420 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3421 {
3422 return (sysctl_int_range(oidp, arg1, arg2, req,
3423 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3424 }
3425
3426 static int
sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)3427 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3428 {
3429 return (sysctl_int_range(oidp, arg1, arg2, req,
3430 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3431 }
3432
3433 static int
sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)3434 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3435 {
3436 return (sysctl_int_range(oidp, arg1, arg2, req,
3437 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3438 }
3439
3440 static int
sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)3441 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3442 {
3443 return (sysctl_int_range(oidp, arg1, arg2, req,
3444 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3445 }
3446
3447 static int
sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)3448 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3449 {
3450 return (sysctl_int_range(oidp, arg1, arg2, req,
3451 JME_PROC_MIN, JME_PROC_MAX));
3452 }
3453