1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2
3 /*
4 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa@gmail.com> and
8 * Matthew Dillon <dillon@apollo.backplane.com>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
40 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
41 *
42 * Permission to use, copy, modify, and distribute this software for any
43 * purpose with or without fee is hereby granted, provided that the above
44 * copyright notice and this permission notice appear in all copies.
45 *
46 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
47 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
48 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
49 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
50 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
51 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
52 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
53 */
54
55 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
56
57 #include "opt_ifpoll.h"
58
59 #include <sys/param.h>
60 #include <sys/endian.h>
61 #include <sys/kernel.h>
62 #include <sys/malloc.h>
63 #include <sys/bus.h>
64 #include <sys/interrupt.h>
65 #include <sys/proc.h>
66 #include <sys/rman.h>
67 #include <sys/serialize.h>
68 #include <sys/socket.h>
69 #include <sys/sockio.h>
70 #include <sys/sysctl.h>
71
72 #include <net/ethernet.h>
73 #include <net/if.h>
74 #include <net/bpf.h>
75 #include <net/if_arp.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/if_poll.h>
79 #include <net/ifq_var.h>
80 #include <net/if_types.h>
81 #include <net/if_var.h>
82 #include <net/vlan/if_vlan_var.h>
83 #include <net/vlan/if_vlan_ether.h>
84
85 #include <bus/pci/pcireg.h>
86 #include <bus/pci/pcivar.h>
87 #include "pcidevs.h"
88
89 #include <dev/netif/mii_layer/mii.h>
90 #include <dev/netif/mii_layer/miivar.h>
91
92 #include "miibus_if.h"
93
94 #include <dev/netif/nfe/if_nfereg.h>
95 #include <dev/netif/nfe/if_nfevar.h>
96
97 #define NFE_CSUM
98 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
99
100 static int nfe_probe(device_t);
101 static int nfe_attach(device_t);
102 static int nfe_detach(device_t);
103 static void nfe_shutdown(device_t);
104 static int nfe_resume(device_t);
105 static int nfe_suspend(device_t);
106
107 static int nfe_miibus_readreg(device_t, int, int);
108 static void nfe_miibus_writereg(device_t, int, int, int);
109 static void nfe_miibus_statchg(device_t);
110
111 #ifdef IFPOLL_ENABLE
112 static void nfe_npoll(struct ifnet *, struct ifpoll_info *);
113 static void nfe_npoll_compat(struct ifnet *, void *, int);
114 static void nfe_disable_intrs(struct nfe_softc *);
115 #endif
116 static void nfe_intr(void *);
117 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
118 static int nfe_rxeof(struct nfe_softc *);
119 static int nfe_txeof(struct nfe_softc *, int);
120 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
121 struct mbuf *);
122 static void nfe_start(struct ifnet *, struct ifaltq_subque *);
123 static void nfe_watchdog(struct ifnet *);
124 static void nfe_init(void *);
125 static void nfe_stop(struct nfe_softc *);
126 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
127 static void nfe_jfree(void *);
128 static void nfe_jref(void *);
129 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
130 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
131 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
132 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
133 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
134 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
135 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
136 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
137 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
138 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
139 static int nfe_ifmedia_upd(struct ifnet *);
140 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
141 static void nfe_setmulti(struct nfe_softc *);
142 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
143 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
144 static void nfe_powerup(device_t);
145 static void nfe_mac_reset(struct nfe_softc *);
146 static void nfe_tick(void *);
147 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
148 int, bus_addr_t);
149 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
150 int);
151 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
152 int);
153 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
154 int);
155 static void nfe_enable_intrs(struct nfe_softc *);
156
157 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS);
158
159 #define NFE_DEBUG
160 #ifdef NFE_DEBUG
161
162 static int nfe_debug = 0;
163 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT;
164 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT;
165 /*
166 * hw timer simulated interrupt moderation @4000Hz. Negative values
167 * disable the timer when the discrete interrupt rate falls below
168 * the moderation rate.
169 *
170 * XXX 8000Hz might be better but if the interrupt is shared it can
171 * blow out the cpu.
172 */
173 static int nfe_imtime = -250; /* uS */
174
175 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count);
176 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count);
177 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime);
178 TUNABLE_INT("hw.nfe.debug", &nfe_debug);
179
180 #define DPRINTF(sc, fmt, ...) do { \
181 if ((sc)->sc_debug) { \
182 if_printf(&(sc)->arpcom.ac_if, \
183 fmt, __VA_ARGS__); \
184 } \
185 } while (0)
186
187 #define DPRINTFN(sc, lv, fmt, ...) do { \
188 if ((sc)->sc_debug >= (lv)) { \
189 if_printf(&(sc)->arpcom.ac_if, \
190 fmt, __VA_ARGS__); \
191 } \
192 } while (0)
193
194 #else /* !NFE_DEBUG */
195
196 #define DPRINTF(sc, fmt, ...)
197 #define DPRINTFN(sc, lv, fmt, ...)
198
199 #endif /* NFE_DEBUG */
200
201 static const struct nfe_dev {
202 uint16_t vid;
203 uint16_t did;
204 const char *desc;
205 } nfe_devices[] = {
206 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
207 "NVIDIA nForce Fast Ethernet" },
208
209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
210 "NVIDIA nForce2 Fast Ethernet" },
211
212 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
213 "NVIDIA nForce3 Gigabit Ethernet" },
214
215 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
216 chipset, and possibly also the 400R; it might be both nForce2- and
217 nForce3-based boards can use the same MCPs (= southbridges) */
218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
219 "NVIDIA nForce3 Gigabit Ethernet" },
220
221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
222 "NVIDIA nForce3 Gigabit Ethernet" },
223
224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
225 "NVIDIA nForce3 Gigabit Ethernet" },
226
227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
228 "NVIDIA nForce3 Gigabit Ethernet" },
229
230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
231 "NVIDIA CK804 Gigabit Ethernet" },
232
233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
234 "NVIDIA CK804 Gigabit Ethernet" },
235
236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
237 "NVIDIA MCP04 Gigabit Ethernet" },
238
239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
240 "NVIDIA MCP04 Gigabit Ethernet" },
241
242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
243 "NVIDIA MCP51 Gigabit Ethernet" },
244
245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
246 "NVIDIA MCP51 Gigabit Ethernet" },
247
248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
249 "NVIDIA MCP55 Gigabit Ethernet" },
250
251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
252 "NVIDIA MCP55 Gigabit Ethernet" },
253
254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
255 "NVIDIA MCP61 Gigabit Ethernet" },
256
257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
258 "NVIDIA MCP61 Gigabit Ethernet" },
259
260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
261 "NVIDIA MCP61 Gigabit Ethernet" },
262
263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
264 "NVIDIA MCP61 Gigabit Ethernet" },
265
266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
267 "NVIDIA MCP65 Gigabit Ethernet" },
268
269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
270 "NVIDIA MCP65 Gigabit Ethernet" },
271
272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
273 "NVIDIA MCP65 Gigabit Ethernet" },
274
275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
276 "NVIDIA MCP65 Gigabit Ethernet" },
277
278 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
279 "NVIDIA MCP67 Gigabit Ethernet" },
280
281 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
282 "NVIDIA MCP67 Gigabit Ethernet" },
283
284 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
285 "NVIDIA MCP67 Gigabit Ethernet" },
286
287 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
288 "NVIDIA MCP67 Gigabit Ethernet" },
289
290 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
291 "NVIDIA MCP73 Gigabit Ethernet" },
292
293 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
294 "NVIDIA MCP73 Gigabit Ethernet" },
295
296 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
297 "NVIDIA MCP73 Gigabit Ethernet" },
298
299 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
300 "NVIDIA MCP73 Gigabit Ethernet" },
301
302 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
303 "NVIDIA MCP77 Gigabit Ethernet" },
304
305 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
306 "NVIDIA MCP77 Gigabit Ethernet" },
307
308 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
309 "NVIDIA MCP77 Gigabit Ethernet" },
310
311 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
312 "NVIDIA MCP77 Gigabit Ethernet" },
313
314 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
315 "NVIDIA MCP79 Gigabit Ethernet" },
316
317 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
318 "NVIDIA MCP79 Gigabit Ethernet" },
319
320 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
321 "NVIDIA MCP79 Gigabit Ethernet" },
322
323 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
324 "NVIDIA MCP79 Gigabit Ethernet" },
325
326 { 0, 0, NULL }
327 };
328
329 static device_method_t nfe_methods[] = {
330 /* Device interface */
331 DEVMETHOD(device_probe, nfe_probe),
332 DEVMETHOD(device_attach, nfe_attach),
333 DEVMETHOD(device_detach, nfe_detach),
334 DEVMETHOD(device_suspend, nfe_suspend),
335 DEVMETHOD(device_resume, nfe_resume),
336 DEVMETHOD(device_shutdown, nfe_shutdown),
337
338 /* Bus interface */
339 DEVMETHOD(bus_print_child, bus_generic_print_child),
340 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
341
342 /* MII interface */
343 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
344 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
345 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
346
347 DEVMETHOD_END
348 };
349
350 static driver_t nfe_driver = {
351 "nfe",
352 nfe_methods,
353 sizeof(struct nfe_softc)
354 };
355
356 static devclass_t nfe_devclass;
357
358 DECLARE_DUMMY_MODULE(if_nfe);
359 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
360 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, NULL, NULL);
361 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, NULL, NULL);
362
363 /*
364 * NOTE: NFE_WORDALIGN support is guesswork right now.
365 */
366 static int
nfe_probe(device_t dev)367 nfe_probe(device_t dev)
368 {
369 const struct nfe_dev *n;
370 uint16_t vid, did;
371
372 vid = pci_get_vendor(dev);
373 did = pci_get_device(dev);
374 for (n = nfe_devices; n->desc != NULL; ++n) {
375 if (vid == n->vid && did == n->did) {
376 struct nfe_softc *sc = device_get_softc(dev);
377
378 switch (did) {
379 case PCI_PRODUCT_NVIDIA_NFORCE_LAN:
380 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN:
381 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1:
382 sc->sc_caps = NFE_NO_PWRCTL |
383 NFE_FIX_EADDR;
384 break;
385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
386 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
387 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
388 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
389 sc->sc_caps = NFE_JUMBO_SUP |
390 NFE_HW_CSUM |
391 NFE_NO_PWRCTL |
392 NFE_FIX_EADDR;
393 break;
394 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
395 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
396 sc->sc_caps = NFE_FIX_EADDR;
397 /* FALL THROUGH */
398 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
399 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
400 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
401 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
402 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
403 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
404 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
405 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
406 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
407 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
408 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
409 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
410 sc->sc_caps |= NFE_40BIT_ADDR;
411 break;
412 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
413 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
414 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
415 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
416 sc->sc_caps = NFE_JUMBO_SUP |
417 NFE_40BIT_ADDR |
418 NFE_HW_CSUM |
419 NFE_NO_PWRCTL |
420 NFE_FIX_EADDR;
421 break;
422 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
423 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
424 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
425 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
426 sc->sc_caps = NFE_JUMBO_SUP |
427 NFE_40BIT_ADDR;
428 break;
429 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
430 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
431 sc->sc_caps = NFE_JUMBO_SUP |
432 NFE_40BIT_ADDR |
433 NFE_HW_CSUM |
434 NFE_HW_VLAN |
435 NFE_FIX_EADDR;
436 break;
437 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
438 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
439 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
440 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
441 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
442 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
443 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
444 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
445 sc->sc_caps = NFE_40BIT_ADDR |
446 NFE_HW_CSUM |
447 NFE_WORDALIGN;
448 break;
449 }
450
451 device_set_desc(dev, n->desc);
452 device_set_async_attach(dev, TRUE);
453 return 0;
454 }
455 }
456 return ENXIO;
457 }
458
459 static int
nfe_attach(device_t dev)460 nfe_attach(device_t dev)
461 {
462 struct nfe_softc *sc = device_get_softc(dev);
463 struct ifnet *ifp = &sc->arpcom.ac_if;
464 struct sysctl_ctx_list *ctx;
465 struct sysctl_oid *tree;
466 uint8_t eaddr[ETHER_ADDR_LEN];
467 bus_addr_t lowaddr;
468 int error;
469
470 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
471 lwkt_serialize_init(&sc->sc_jbuf_serializer);
472
473 /*
474 * Initialize sysctl variables
475 */
476 sc->sc_rx_ring_count = nfe_rx_ring_count;
477 sc->sc_tx_ring_count = nfe_tx_ring_count;
478 sc->sc_debug = nfe_debug;
479 if (nfe_imtime < 0) {
480 sc->sc_flags |= NFE_F_DYN_IM;
481 sc->sc_imtime = -nfe_imtime;
482 } else {
483 sc->sc_imtime = nfe_imtime;
484 }
485 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
486
487 sc->sc_mem_rid = PCIR_BAR(0);
488
489 if (sc->sc_caps & NFE_40BIT_ADDR)
490 sc->rxtxctl_desc = NFE_RXTX_DESC_V3;
491 else if (sc->sc_caps & NFE_JUMBO_SUP)
492 sc->rxtxctl_desc = NFE_RXTX_DESC_V2;
493
494 #ifndef BURN_BRIDGES
495 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
496 uint32_t mem, irq;
497
498 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
499 irq = pci_read_config(dev, PCIR_INTLINE, 4);
500
501 device_printf(dev, "chip is in D%d power mode "
502 "-- setting to D0\n", pci_get_powerstate(dev));
503
504 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
505
506 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
507 pci_write_config(dev, PCIR_INTLINE, irq, 4);
508 }
509 #endif /* !BURN_BRIDGE */
510
511 /* Enable bus mastering */
512 pci_enable_busmaster(dev);
513
514 /* Allocate IO memory */
515 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
516 &sc->sc_mem_rid, RF_ACTIVE);
517 if (sc->sc_mem_res == NULL) {
518 device_printf(dev, "could not allocate io memory\n");
519 return ENXIO;
520 }
521 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
522 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
523
524 /* Allocate IRQ */
525 sc->sc_irq_rid = 0;
526 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
527 &sc->sc_irq_rid,
528 RF_SHAREABLE | RF_ACTIVE);
529 if (sc->sc_irq_res == NULL) {
530 device_printf(dev, "could not allocate irq\n");
531 error = ENXIO;
532 goto fail;
533 }
534
535 /* Disable WOL */
536 NFE_WRITE(sc, NFE_WOL_CTL, 0);
537
538 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
539 nfe_powerup(dev);
540
541 nfe_get_macaddr(sc, eaddr);
542
543 /*
544 * Allocate top level DMA tag
545 */
546 if (sc->sc_caps & NFE_40BIT_ADDR)
547 lowaddr = NFE_BUS_SPACE_MAXADDR;
548 else
549 lowaddr = BUS_SPACE_MAXADDR_32BIT;
550 error = bus_dma_tag_create(NULL, /* parent */
551 1, 0, /* alignment, boundary */
552 lowaddr, /* lowaddr */
553 BUS_SPACE_MAXADDR, /* highaddr */
554 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
555 0, /* nsegments */
556 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
557 0, /* flags */
558 &sc->sc_dtag);
559 if (error) {
560 device_printf(dev, "could not allocate parent dma tag\n");
561 goto fail;
562 }
563
564 /*
565 * Allocate Tx and Rx rings.
566 */
567 error = nfe_alloc_tx_ring(sc, &sc->txq);
568 if (error) {
569 device_printf(dev, "could not allocate Tx ring\n");
570 goto fail;
571 }
572
573 error = nfe_alloc_rx_ring(sc, &sc->rxq);
574 if (error) {
575 device_printf(dev, "could not allocate Rx ring\n");
576 goto fail;
577 }
578
579 /*
580 * Create sysctl tree
581 */
582 ctx = device_get_sysctl_ctx(dev);
583 tree = device_get_sysctl_tree(dev);
584 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
585 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW,
586 sc, 0, nfe_sysctl_imtime, "I",
587 "Interrupt moderation time (usec). "
588 "0 to disable interrupt moderation.");
589 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
590 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count,
591 0, "RX ring count");
592 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
593 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count,
594 0, "TX ring count");
595 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
596 "debug", CTLFLAG_RW, &sc->sc_debug,
597 0, "control debugging printfs");
598
599 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
600 nfe_ifmedia_sts);
601 if (error) {
602 device_printf(dev, "MII without any phy\n");
603 goto fail;
604 }
605
606 ifp->if_softc = sc;
607 ifp->if_mtu = ETHERMTU;
608 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
609 ifp->if_ioctl = nfe_ioctl;
610 ifp->if_start = nfe_start;
611 #ifdef IFPOLL_ENABLE
612 ifp->if_npoll = nfe_npoll;
613 #endif
614 ifp->if_watchdog = nfe_watchdog;
615 ifp->if_init = nfe_init;
616 ifp->if_nmbclusters = sc->sc_rx_ring_count;
617 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count);
618 ifq_set_ready(&ifp->if_snd);
619
620 ifp->if_capabilities = IFCAP_VLAN_MTU;
621
622 if (sc->sc_caps & NFE_HW_VLAN)
623 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
624
625 #ifdef NFE_CSUM
626 if (sc->sc_caps & NFE_HW_CSUM) {
627 ifp->if_capabilities |= IFCAP_HWCSUM;
628 ifp->if_hwassist = NFE_CSUM_FEATURES;
629 }
630 #else
631 sc->sc_caps &= ~NFE_HW_CSUM;
632 #endif
633 ifp->if_capenable = ifp->if_capabilities;
634
635 callout_init(&sc->sc_tick_ch);
636
637 ether_ifattach(ifp, eaddr, NULL);
638
639 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res));
640
641 #ifdef IFPOLL_ENABLE
642 ifpoll_compat_setup(&sc->sc_npoll, ctx, (struct sysctl_oid *)tree,
643 device_get_unit(dev), ifp->if_serializer);
644 #endif
645
646 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
647 &sc->sc_ih, ifp->if_serializer);
648 if (error) {
649 device_printf(dev, "could not setup intr\n");
650 ether_ifdetach(ifp);
651 goto fail;
652 }
653
654 return 0;
655 fail:
656 nfe_detach(dev);
657 return error;
658 }
659
660 static int
nfe_detach(device_t dev)661 nfe_detach(device_t dev)
662 {
663 struct nfe_softc *sc = device_get_softc(dev);
664
665 if (device_is_attached(dev)) {
666 struct ifnet *ifp = &sc->arpcom.ac_if;
667
668 lwkt_serialize_enter(ifp->if_serializer);
669 nfe_stop(sc);
670 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
671 lwkt_serialize_exit(ifp->if_serializer);
672
673 ether_ifdetach(ifp);
674 }
675
676 if (sc->sc_miibus != NULL)
677 device_delete_child(dev, sc->sc_miibus);
678 bus_generic_detach(dev);
679
680 if (sc->sc_irq_res != NULL) {
681 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
682 sc->sc_irq_res);
683 }
684
685 if (sc->sc_mem_res != NULL) {
686 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
687 sc->sc_mem_res);
688 }
689
690 nfe_free_tx_ring(sc, &sc->txq);
691 nfe_free_rx_ring(sc, &sc->rxq);
692 if (sc->sc_dtag != NULL)
693 bus_dma_tag_destroy(sc->sc_dtag);
694
695 return 0;
696 }
697
698 static void
nfe_shutdown(device_t dev)699 nfe_shutdown(device_t dev)
700 {
701 struct nfe_softc *sc = device_get_softc(dev);
702 struct ifnet *ifp = &sc->arpcom.ac_if;
703
704 lwkt_serialize_enter(ifp->if_serializer);
705 nfe_stop(sc);
706 lwkt_serialize_exit(ifp->if_serializer);
707 }
708
709 static int
nfe_suspend(device_t dev)710 nfe_suspend(device_t dev)
711 {
712 struct nfe_softc *sc = device_get_softc(dev);
713 struct ifnet *ifp = &sc->arpcom.ac_if;
714
715 lwkt_serialize_enter(ifp->if_serializer);
716 nfe_stop(sc);
717 lwkt_serialize_exit(ifp->if_serializer);
718
719 return 0;
720 }
721
722 static int
nfe_resume(device_t dev)723 nfe_resume(device_t dev)
724 {
725 struct nfe_softc *sc = device_get_softc(dev);
726 struct ifnet *ifp = &sc->arpcom.ac_if;
727
728 lwkt_serialize_enter(ifp->if_serializer);
729 if (ifp->if_flags & IFF_UP)
730 nfe_init(sc);
731 lwkt_serialize_exit(ifp->if_serializer);
732
733 return 0;
734 }
735
736 static void
nfe_miibus_statchg(device_t dev)737 nfe_miibus_statchg(device_t dev)
738 {
739 struct nfe_softc *sc = device_get_softc(dev);
740 struct mii_data *mii = device_get_softc(sc->sc_miibus);
741 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
742
743 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer);
744
745 phy = NFE_READ(sc, NFE_PHY_IFACE);
746 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
747
748 seed = NFE_READ(sc, NFE_RNDSEED);
749 seed &= ~NFE_SEED_MASK;
750
751 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
752 phy |= NFE_PHY_HDX; /* half-duplex */
753 misc |= NFE_MISC1_HDX;
754 }
755
756 switch (IFM_SUBTYPE(mii->mii_media_active)) {
757 case IFM_1000_T: /* full-duplex only */
758 link |= NFE_MEDIA_1000T;
759 seed |= NFE_SEED_1000T;
760 phy |= NFE_PHY_1000T;
761 break;
762 case IFM_100_TX:
763 link |= NFE_MEDIA_100TX;
764 seed |= NFE_SEED_100TX;
765 phy |= NFE_PHY_100TX;
766 break;
767 case IFM_10_T:
768 link |= NFE_MEDIA_10T;
769 seed |= NFE_SEED_10T;
770 break;
771 }
772
773 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
774
775 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
776 NFE_WRITE(sc, NFE_MISC1, misc);
777 NFE_WRITE(sc, NFE_LINKSPEED, link);
778 }
779
780 static int
nfe_miibus_readreg(device_t dev,int phy,int reg)781 nfe_miibus_readreg(device_t dev, int phy, int reg)
782 {
783 struct nfe_softc *sc = device_get_softc(dev);
784 uint32_t val;
785 int ntries;
786
787 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
788
789 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
790 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
791 DELAY(100);
792 }
793
794 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
795
796 for (ntries = 0; ntries < 1000; ntries++) {
797 DELAY(100);
798 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
799 break;
800 }
801 if (ntries == 1000) {
802 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
803 return 0;
804 }
805
806 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
807 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
808 return 0;
809 }
810
811 val = NFE_READ(sc, NFE_PHY_DATA);
812 if (val != 0xffffffff && val != 0)
813 sc->mii_phyaddr = phy;
814
815 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
816
817 return val;
818 }
819
820 static void
nfe_miibus_writereg(device_t dev,int phy,int reg,int val)821 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
822 {
823 struct nfe_softc *sc = device_get_softc(dev);
824 uint32_t ctl;
825 int ntries;
826
827 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
828
829 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
830 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
831 DELAY(100);
832 }
833
834 NFE_WRITE(sc, NFE_PHY_DATA, val);
835 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
836 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
837
838 for (ntries = 0; ntries < 1000; ntries++) {
839 DELAY(100);
840 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
841 break;
842 }
843
844 #ifdef NFE_DEBUG
845 if (ntries == 1000)
846 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
847 #endif
848 }
849
850 #ifdef IFPOLL_ENABLE
851
852 static void
nfe_npoll_compat(struct ifnet * ifp,void * arg __unused,int count __unused)853 nfe_npoll_compat(struct ifnet *ifp, void *arg __unused, int count __unused)
854 {
855 struct nfe_softc *sc = ifp->if_softc;
856
857 ASSERT_SERIALIZED(ifp->if_serializer);
858
859 nfe_rxeof(sc);
860 nfe_txeof(sc, 1);
861 }
862
863 static void
nfe_disable_intrs(struct nfe_softc * sc)864 nfe_disable_intrs(struct nfe_softc *sc)
865 {
866 /* Disable interrupts */
867 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
868 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
869 sc->sc_npoll.ifpc_stcount = 0;
870 }
871
872 static void
nfe_npoll(struct ifnet * ifp,struct ifpoll_info * info)873 nfe_npoll(struct ifnet *ifp, struct ifpoll_info *info)
874 {
875 struct nfe_softc *sc = ifp->if_softc;
876
877 ASSERT_SERIALIZED(ifp->if_serializer);
878
879 if (info != NULL) {
880 int cpuid = sc->sc_npoll.ifpc_cpuid;
881
882 info->ifpi_rx[cpuid].poll_func = nfe_npoll_compat;
883 info->ifpi_rx[cpuid].arg = NULL;
884 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
885
886 if (ifp->if_flags & IFF_RUNNING)
887 nfe_disable_intrs(sc);
888 ifq_set_cpuid(&ifp->if_snd, cpuid);
889 } else {
890 if (ifp->if_flags & IFF_RUNNING)
891 nfe_enable_intrs(sc);
892 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res));
893 }
894 }
895
896 #endif /* IFPOLL_ENABLE */
897
898 static void
nfe_intr(void * arg)899 nfe_intr(void *arg)
900 {
901 struct nfe_softc *sc = arg;
902 struct ifnet *ifp = &sc->arpcom.ac_if;
903 uint32_t r;
904
905 r = NFE_READ(sc, NFE_IRQ_STATUS);
906 if (r == 0)
907 return; /* not for us */
908 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
909
910 if (sc->sc_rate_second != time_uptime) {
911 /*
912 * Calculate sc_rate_avg - interrupts per second.
913 */
914 sc->sc_rate_second = time_uptime;
915 if (sc->sc_rate_avg < sc->sc_rate_acc)
916 sc->sc_rate_avg = sc->sc_rate_acc;
917 else
918 sc->sc_rate_avg = (sc->sc_rate_avg * 3 +
919 sc->sc_rate_acc) / 4;
920 sc->sc_rate_acc = 0;
921 } else if (sc->sc_rate_avg < sc->sc_rate_acc) {
922 /*
923 * Don't wait for a tick to roll over if we are taking
924 * a lot of interrupts.
925 */
926 sc->sc_rate_avg = sc->sc_rate_acc;
927 }
928
929 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
930
931 if (r & NFE_IRQ_LINK) {
932 NFE_READ(sc, NFE_PHY_STATUS);
933 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
934 DPRINTF(sc, "link state changed %s\n", "");
935 }
936
937 if (ifp->if_flags & IFF_RUNNING) {
938 int ret;
939 int rate;
940
941 /* check Rx ring */
942 ret = nfe_rxeof(sc);
943
944 /* check Tx ring */
945 ret |= nfe_txeof(sc, 1);
946
947 /* update the rate accumulator */
948 if (ret)
949 ++sc->sc_rate_acc;
950
951 if (sc->sc_flags & NFE_F_DYN_IM) {
952 rate = 1000000 / sc->sc_imtime;
953 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 &&
954 sc->sc_rate_avg > rate) {
955 /*
956 * Use the hardware timer to reduce the
957 * interrupt rate if the discrete interrupt
958 * rate has exceeded our threshold.
959 */
960 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER);
961 sc->sc_flags |= NFE_F_IRQ_TIMER;
962 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) &&
963 sc->sc_rate_avg <= rate) {
964 /*
965 * Use discrete TX/RX interrupts if the rate
966 * has fallen below our threshold.
967 */
968 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER);
969 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
970
971 /*
972 * Recollect, mainly to avoid the possible race
973 * introduced by changing interrupt masks.
974 */
975 nfe_rxeof(sc);
976 nfe_txeof(sc, 1);
977 }
978 }
979 }
980 }
981
982 static int
nfe_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data,struct ucred * cr)983 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
984 {
985 struct nfe_softc *sc = ifp->if_softc;
986 struct ifreq *ifr = (struct ifreq *)data;
987 struct mii_data *mii;
988 int error = 0, mask, jumbo_cap;
989
990 ASSERT_SERIALIZED(ifp->if_serializer);
991
992 switch (cmd) {
993 case SIOCSIFMTU:
994 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL)
995 jumbo_cap = 1;
996 else
997 jumbo_cap = 0;
998
999 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) ||
1000 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) {
1001 return EINVAL;
1002 } else if (ifp->if_mtu != ifr->ifr_mtu) {
1003 ifp->if_mtu = ifr->ifr_mtu;
1004 if (ifp->if_flags & IFF_RUNNING)
1005 nfe_init(sc);
1006 }
1007 break;
1008 case SIOCSIFFLAGS:
1009 if (ifp->if_flags & IFF_UP) {
1010 /*
1011 * If only the PROMISC or ALLMULTI flag changes, then
1012 * don't do a full re-init of the chip, just update
1013 * the Rx filter.
1014 */
1015 if ((ifp->if_flags & IFF_RUNNING) &&
1016 ((ifp->if_flags ^ sc->sc_if_flags) &
1017 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1018 nfe_setmulti(sc);
1019 } else {
1020 if (!(ifp->if_flags & IFF_RUNNING))
1021 nfe_init(sc);
1022 }
1023 } else {
1024 if (ifp->if_flags & IFF_RUNNING)
1025 nfe_stop(sc);
1026 }
1027 sc->sc_if_flags = ifp->if_flags;
1028 break;
1029 case SIOCADDMULTI:
1030 case SIOCDELMULTI:
1031 if (ifp->if_flags & IFF_RUNNING)
1032 nfe_setmulti(sc);
1033 break;
1034 case SIOCSIFMEDIA:
1035 case SIOCGIFMEDIA:
1036 mii = device_get_softc(sc->sc_miibus);
1037 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1038 break;
1039 case SIOCSIFCAP:
1040 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM;
1041 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) {
1042 ifp->if_capenable ^= mask;
1043 if (IFCAP_TXCSUM & ifp->if_capenable)
1044 ifp->if_hwassist = NFE_CSUM_FEATURES;
1045 else
1046 ifp->if_hwassist = 0;
1047
1048 if (ifp->if_flags & IFF_RUNNING)
1049 nfe_init(sc);
1050 }
1051 break;
1052 default:
1053 error = ether_ioctl(ifp, cmd, data);
1054 break;
1055 }
1056 return error;
1057 }
1058
1059 static int
nfe_rxeof(struct nfe_softc * sc)1060 nfe_rxeof(struct nfe_softc *sc)
1061 {
1062 struct ifnet *ifp = &sc->arpcom.ac_if;
1063 struct nfe_rx_ring *ring = &sc->rxq;
1064 int reap;
1065
1066 reap = 0;
1067 for (;;) {
1068 struct nfe_rx_data *data = &ring->data[ring->cur];
1069 struct mbuf *m;
1070 uint16_t flags;
1071 int len, error;
1072
1073 if (sc->sc_caps & NFE_40BIT_ADDR) {
1074 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
1075
1076 flags = le16toh(desc64->flags);
1077 len = le16toh(desc64->length) & 0x3fff;
1078 } else {
1079 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
1080
1081 flags = le16toh(desc32->flags);
1082 len = le16toh(desc32->length) & 0x3fff;
1083 }
1084
1085 if (flags & NFE_RX_READY)
1086 break;
1087
1088 reap = 1;
1089
1090 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1091 if (!(flags & NFE_RX_VALID_V1))
1092 goto skip;
1093
1094 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1095 flags &= ~NFE_RX_ERROR;
1096 len--; /* fix buffer length */
1097 }
1098 } else {
1099 if (!(flags & NFE_RX_VALID_V2))
1100 goto skip;
1101
1102 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1103 flags &= ~NFE_RX_ERROR;
1104 len--; /* fix buffer length */
1105 }
1106 }
1107
1108 if (flags & NFE_RX_ERROR) {
1109 IFNET_STAT_INC(ifp, ierrors, 1);
1110 goto skip;
1111 }
1112
1113 m = data->m;
1114
1115 if (sc->sc_flags & NFE_F_USE_JUMBO)
1116 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
1117 else
1118 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
1119 if (error) {
1120 IFNET_STAT_INC(ifp, ierrors, 1);
1121 goto skip;
1122 }
1123
1124 /* finalize mbuf */
1125 m->m_pkthdr.len = m->m_len = len;
1126 m->m_pkthdr.rcvif = ifp;
1127
1128 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
1129 (flags & NFE_RX_CSUMOK)) {
1130 if (flags & NFE_RX_IP_CSUMOK_V2) {
1131 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1132 CSUM_IP_VALID;
1133 }
1134
1135 if (flags &
1136 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) {
1137 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1138 CSUM_PSEUDO_HDR |
1139 CSUM_FRAG_NOT_CHECKED;
1140 m->m_pkthdr.csum_data = 0xffff;
1141 }
1142 }
1143
1144 IFNET_STAT_INC(ifp, ipackets, 1);
1145 ifp->if_input(ifp, m, NULL, -1);
1146 skip:
1147 nfe_set_ready_rxdesc(sc, ring, ring->cur);
1148 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count;
1149 }
1150 return reap;
1151 }
1152
1153 static int
nfe_txeof(struct nfe_softc * sc,int start)1154 nfe_txeof(struct nfe_softc *sc, int start)
1155 {
1156 struct ifnet *ifp = &sc->arpcom.ac_if;
1157 struct nfe_tx_ring *ring = &sc->txq;
1158 struct nfe_tx_data *data = NULL;
1159
1160 while (ring->next != ring->cur) {
1161 uint16_t flags;
1162
1163 if (sc->sc_caps & NFE_40BIT_ADDR)
1164 flags = le16toh(ring->desc64[ring->next].flags);
1165 else
1166 flags = le16toh(ring->desc32[ring->next].flags);
1167
1168 if (flags & NFE_TX_VALID)
1169 break;
1170
1171 data = &ring->data[ring->next];
1172
1173 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1174 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1175 goto skip;
1176
1177 if ((flags & NFE_TX_ERROR_V1) != 0) {
1178 if_printf(ifp, "tx v1 error 0x%pb%i\n",
1179 NFE_V1_TXERR, flags);
1180 IFNET_STAT_INC(ifp, oerrors, 1);
1181 } else {
1182 IFNET_STAT_INC(ifp, opackets, 1);
1183 }
1184 } else {
1185 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1186 goto skip;
1187
1188 if ((flags & NFE_TX_ERROR_V2) != 0) {
1189 if_printf(ifp, "tx v2 error 0x%pb%i\n",
1190 NFE_V2_TXERR, flags);
1191 IFNET_STAT_INC(ifp, oerrors, 1);
1192 } else {
1193 IFNET_STAT_INC(ifp, opackets, 1);
1194 }
1195 }
1196
1197 if (data->m == NULL) { /* should not get there */
1198 if_printf(ifp,
1199 "last fragment bit w/o associated mbuf!\n");
1200 goto skip;
1201 }
1202
1203 /* last fragment of the mbuf chain transmitted */
1204 bus_dmamap_unload(ring->data_tag, data->map);
1205 m_freem(data->m);
1206 data->m = NULL;
1207 skip:
1208 ring->queued--;
1209 KKASSERT(ring->queued >= 0);
1210 ring->next = (ring->next + 1) % sc->sc_tx_ring_count;
1211 }
1212
1213 if (sc->sc_tx_ring_count - ring->queued >=
1214 sc->sc_tx_spare + NFE_NSEG_RSVD)
1215 ifq_clr_oactive(&ifp->if_snd);
1216
1217 if (ring->queued == 0)
1218 ifp->if_timer = 0;
1219
1220 if (start && !ifq_is_empty(&ifp->if_snd))
1221 if_devstart(ifp);
1222
1223 if (data != NULL)
1224 return 1;
1225 else
1226 return 0;
1227 }
1228
1229 static int
nfe_encap(struct nfe_softc * sc,struct nfe_tx_ring * ring,struct mbuf * m0)1230 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
1231 {
1232 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1233 struct nfe_tx_data *data, *data_map;
1234 bus_dmamap_t map;
1235 struct nfe_desc64 *desc64 = NULL;
1236 struct nfe_desc32 *desc32 = NULL;
1237 uint16_t flags = 0;
1238 uint32_t vtag = 0;
1239 int error, i, j, maxsegs, nsegs;
1240
1241 data = &ring->data[ring->cur];
1242 map = data->map;
1243 data_map = data; /* Remember who owns the DMA map */
1244
1245 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD;
1246 if (maxsegs > NFE_MAX_SCATTER)
1247 maxsegs = NFE_MAX_SCATTER;
1248 KASSERT(maxsegs >= sc->sc_tx_spare,
1249 ("not enough segments %d,%d", maxsegs, sc->sc_tx_spare));
1250
1251 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0,
1252 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1253 if (error)
1254 goto back;
1255 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1256
1257 error = 0;
1258
1259 /* setup h/w VLAN tagging */
1260 if (m0->m_flags & M_VLANTAG)
1261 vtag = m0->m_pkthdr.ether_vlantag;
1262
1263 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) {
1264 if (m0->m_pkthdr.csum_flags & CSUM_IP)
1265 flags |= NFE_TX_IP_CSUM;
1266 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
1267 flags |= NFE_TX_TCP_CSUM;
1268 }
1269
1270 /*
1271 * XXX urm. somebody is unaware of how hardware works. You
1272 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1273 * the ring until the entire chain is actually *VALID*. Otherwise
1274 * the hardware may encounter a partially initialized chain that
1275 * is marked as being ready to go when it in fact is not ready to
1276 * go.
1277 */
1278
1279 for (i = 0; i < nsegs; i++) {
1280 j = (ring->cur + i) % sc->sc_tx_ring_count;
1281 data = &ring->data[j];
1282
1283 if (sc->sc_caps & NFE_40BIT_ADDR) {
1284 desc64 = &ring->desc64[j];
1285 desc64->physaddr[0] =
1286 htole32(NFE_ADDR_HI(segs[i].ds_addr));
1287 desc64->physaddr[1] =
1288 htole32(NFE_ADDR_LO(segs[i].ds_addr));
1289 desc64->length = htole16(segs[i].ds_len - 1);
1290 desc64->vtag = htole32(vtag);
1291 desc64->flags = htole16(flags);
1292 } else {
1293 desc32 = &ring->desc32[j];
1294 desc32->physaddr = htole32(segs[i].ds_addr);
1295 desc32->length = htole16(segs[i].ds_len - 1);
1296 desc32->flags = htole16(flags);
1297 }
1298
1299 /* csum flags and vtag belong to the first fragment only */
1300 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1301 vtag = 0;
1302
1303 ring->queued++;
1304 KKASSERT(ring->queued <= sc->sc_tx_ring_count);
1305 }
1306
1307 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1308 if (sc->sc_caps & NFE_40BIT_ADDR) {
1309 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1310 } else {
1311 if (sc->sc_caps & NFE_JUMBO_SUP)
1312 flags = NFE_TX_LASTFRAG_V2;
1313 else
1314 flags = NFE_TX_LASTFRAG_V1;
1315 desc32->flags |= htole16(flags);
1316 }
1317
1318 /*
1319 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1320 * whole mess until the first descriptor in the map is flagged.
1321 */
1322 for (i = nsegs - 1; i >= 0; --i) {
1323 j = (ring->cur + i) % sc->sc_tx_ring_count;
1324 if (sc->sc_caps & NFE_40BIT_ADDR) {
1325 desc64 = &ring->desc64[j];
1326 desc64->flags |= htole16(NFE_TX_VALID);
1327 } else {
1328 desc32 = &ring->desc32[j];
1329 desc32->flags |= htole16(NFE_TX_VALID);
1330 }
1331 }
1332 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count;
1333
1334 /* Exchange DMA map */
1335 data_map->map = data->map;
1336 data->map = map;
1337 data->m = m0;
1338 back:
1339 if (error)
1340 m_freem(m0);
1341 return error;
1342 }
1343
1344 static void
nfe_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)1345 nfe_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1346 {
1347 struct nfe_softc *sc = ifp->if_softc;
1348 struct nfe_tx_ring *ring = &sc->txq;
1349 int count = 0, oactive = 0;
1350 struct mbuf *m0;
1351
1352 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1353 ASSERT_SERIALIZED(ifp->if_serializer);
1354
1355 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1356 return;
1357
1358 for (;;) {
1359 int error;
1360
1361 if (sc->sc_tx_ring_count - ring->queued <
1362 sc->sc_tx_spare + NFE_NSEG_RSVD) {
1363 if (oactive) {
1364 ifq_set_oactive(&ifp->if_snd);
1365 break;
1366 }
1367
1368 nfe_txeof(sc, 0);
1369 oactive = 1;
1370 continue;
1371 }
1372
1373 m0 = ifq_dequeue(&ifp->if_snd);
1374 if (m0 == NULL)
1375 break;
1376
1377 ETHER_BPF_MTAP(ifp, m0);
1378
1379 error = nfe_encap(sc, ring, m0);
1380 if (error) {
1381 IFNET_STAT_INC(ifp, oerrors, 1);
1382 if (error == EFBIG) {
1383 if (oactive) {
1384 ifq_set_oactive(&ifp->if_snd);
1385 break;
1386 }
1387 nfe_txeof(sc, 0);
1388 oactive = 1;
1389 }
1390 continue;
1391 } else {
1392 oactive = 0;
1393 }
1394 ++count;
1395
1396 /*
1397 * NOTE:
1398 * `m0' may be freed in nfe_encap(), so
1399 * it should not be touched any more.
1400 */
1401 }
1402
1403 if (count == 0) /* nothing sent */
1404 return;
1405
1406 /* Kick Tx */
1407 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1408
1409 /*
1410 * Set a timeout in case the chip goes out to lunch.
1411 */
1412 ifp->if_timer = 5;
1413 }
1414
1415 static void
nfe_watchdog(struct ifnet * ifp)1416 nfe_watchdog(struct ifnet *ifp)
1417 {
1418 struct nfe_softc *sc = ifp->if_softc;
1419
1420 ASSERT_SERIALIZED(ifp->if_serializer);
1421
1422 if (ifp->if_flags & IFF_RUNNING) {
1423 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1424 nfe_txeof(sc, 1);
1425 return;
1426 }
1427
1428 if_printf(ifp, "watchdog timeout\n");
1429
1430 nfe_init(ifp->if_softc);
1431
1432 IFNET_STAT_INC(ifp, oerrors, 1);
1433 }
1434
1435 static void
nfe_init(void * xsc)1436 nfe_init(void *xsc)
1437 {
1438 struct nfe_softc *sc = xsc;
1439 struct ifnet *ifp = &sc->arpcom.ac_if;
1440 uint32_t tmp;
1441 int error;
1442
1443 ASSERT_SERIALIZED(ifp->if_serializer);
1444
1445 nfe_stop(sc);
1446
1447 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
1448 nfe_mac_reset(sc);
1449
1450 /*
1451 * NOTE:
1452 * Switching between jumbo frames and normal frames should
1453 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1454 */
1455 if (ifp->if_mtu > ETHERMTU) {
1456 sc->sc_flags |= NFE_F_USE_JUMBO;
1457 sc->rxq.bufsz = NFE_JBYTES;
1458 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO;
1459 if (bootverbose)
1460 if_printf(ifp, "use jumbo frames\n");
1461 } else {
1462 sc->sc_flags &= ~NFE_F_USE_JUMBO;
1463 sc->rxq.bufsz = MCLBYTES;
1464 sc->sc_tx_spare = NFE_NSEG_SPARE;
1465 if (bootverbose)
1466 if_printf(ifp, "use non-jumbo frames\n");
1467 }
1468
1469 error = nfe_init_tx_ring(sc, &sc->txq);
1470 if (error) {
1471 nfe_stop(sc);
1472 return;
1473 }
1474
1475 error = nfe_init_rx_ring(sc, &sc->rxq);
1476 if (error) {
1477 nfe_stop(sc);
1478 return;
1479 }
1480
1481 NFE_WRITE(sc, NFE_TX_POLL, 0);
1482 NFE_WRITE(sc, NFE_STATUS, 0);
1483
1484 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc;
1485
1486 if (ifp->if_capenable & IFCAP_RXCSUM)
1487 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1488
1489 /*
1490 * Although the adapter is capable of stripping VLAN tags from received
1491 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1492 * purpose. This will be done in software by our network stack.
1493 */
1494 if (sc->sc_caps & NFE_HW_VLAN)
1495 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1496
1497 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1498 DELAY(10);
1499 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1500
1501 if (sc->sc_caps & NFE_HW_VLAN)
1502 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1503
1504 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1505
1506 /* set MAC address */
1507 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1508
1509 /* tell MAC where rings are in memory */
1510 if (sc->sc_caps & NFE_40BIT_ADDR) {
1511 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
1512 NFE_ADDR_HI(sc->rxq.physaddr));
1513 }
1514 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr));
1515
1516 if (sc->sc_caps & NFE_40BIT_ADDR) {
1517 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI,
1518 NFE_ADDR_HI(sc->txq.physaddr));
1519 }
1520 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
1521
1522 NFE_WRITE(sc, NFE_RING_SIZE,
1523 (sc->sc_rx_ring_count - 1) << 16 |
1524 (sc->sc_tx_ring_count - 1));
1525
1526 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1527
1528 /* force MAC to wakeup */
1529 tmp = NFE_READ(sc, NFE_PWR_STATE);
1530 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1531 DELAY(10);
1532 tmp = NFE_READ(sc, NFE_PWR_STATE);
1533 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1534
1535 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1536 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1537 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1538
1539 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1540 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1541
1542 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1543
1544 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1545 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1546 DELAY(10);
1547 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1548
1549 /* set Rx filter */
1550 nfe_setmulti(sc);
1551
1552 nfe_ifmedia_upd(ifp);
1553
1554 /* enable Rx */
1555 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1556
1557 /* enable Tx */
1558 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1559
1560 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1561
1562 #ifdef IFPOLL_ENABLE
1563 if (ifp->if_flags & IFF_NPOLLING)
1564 nfe_disable_intrs(sc);
1565 else
1566 #endif
1567 nfe_enable_intrs(sc);
1568
1569 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1570
1571 ifp->if_flags |= IFF_RUNNING;
1572 ifq_clr_oactive(&ifp->if_snd);
1573
1574 /*
1575 * If we had stuff in the tx ring before its all cleaned out now
1576 * so we are not going to get an interrupt, jump-start any pending
1577 * output.
1578 */
1579 if (!ifq_is_empty(&ifp->if_snd))
1580 if_devstart(ifp);
1581 }
1582
1583 static void
nfe_stop(struct nfe_softc * sc)1584 nfe_stop(struct nfe_softc *sc)
1585 {
1586 struct ifnet *ifp = &sc->arpcom.ac_if;
1587 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
1588 int i;
1589
1590 ASSERT_SERIALIZED(ifp->if_serializer);
1591
1592 callout_stop(&sc->sc_tick_ch);
1593
1594 ifp->if_timer = 0;
1595 ifp->if_flags &= ~IFF_RUNNING;
1596 ifq_clr_oactive(&ifp->if_snd);
1597 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
1598
1599 #define WAITMAX 50000
1600
1601 /*
1602 * Abort Tx
1603 */
1604 NFE_WRITE(sc, NFE_TX_CTL, 0);
1605 for (i = 0; i < WAITMAX; ++i) {
1606 DELAY(100);
1607 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0)
1608 break;
1609 }
1610 if (i == WAITMAX)
1611 if_printf(ifp, "can't stop TX\n");
1612 DELAY(100);
1613
1614 /*
1615 * Disable Rx
1616 */
1617 NFE_WRITE(sc, NFE_RX_CTL, 0);
1618 for (i = 0; i < WAITMAX; ++i) {
1619 DELAY(100);
1620 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0)
1621 break;
1622 }
1623 if (i == WAITMAX)
1624 if_printf(ifp, "can't stop RX\n");
1625 DELAY(100);
1626
1627 #undef WAITMAX
1628
1629 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
1630 DELAY(10);
1631 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1632
1633 /* Disable interrupts */
1634 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1635
1636 /* Reset Tx and Rx rings */
1637 nfe_reset_tx_ring(sc, &sc->txq);
1638 nfe_reset_rx_ring(sc, &sc->rxq);
1639 }
1640
1641 static int
nfe_alloc_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1642 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1643 {
1644 int i, j, error, descsize;
1645 bus_dmamem_t dmem;
1646 void **desc;
1647
1648 if (sc->sc_caps & NFE_40BIT_ADDR) {
1649 desc = (void *)&ring->desc64;
1650 descsize = sizeof(struct nfe_desc64);
1651 } else {
1652 desc = (void *)&ring->desc32;
1653 descsize = sizeof(struct nfe_desc32);
1654 }
1655
1656 ring->bufsz = MCLBYTES;
1657 ring->cur = ring->next = 0;
1658
1659 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1660 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1661 sc->sc_rx_ring_count * descsize,
1662 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1663 if (error) {
1664 if_printf(&sc->arpcom.ac_if,
1665 "could not create RX desc ring\n");
1666 return error;
1667 }
1668 ring->tag = dmem.dmem_tag;
1669 ring->map = dmem.dmem_map;
1670 *desc = dmem.dmem_addr;
1671 ring->physaddr = dmem.dmem_busaddr;
1672
1673 if (sc->sc_caps & NFE_JUMBO_SUP) {
1674 ring->jbuf =
1675 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc),
1676 M_DEVBUF, M_WAITOK | M_ZERO);
1677
1678 error = nfe_jpool_alloc(sc, ring);
1679 if (error) {
1680 if_printf(&sc->arpcom.ac_if,
1681 "could not allocate jumbo frames\n");
1682 kfree(ring->jbuf, M_DEVBUF);
1683 ring->jbuf = NULL;
1684 /* Allow jumbo frame allocation to fail */
1685 }
1686 }
1687
1688 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count,
1689 M_DEVBUF, M_WAITOK | M_ZERO);
1690
1691 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1692 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1693 MCLBYTES, 1, MCLBYTES,
1694 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
1695 &ring->data_tag);
1696 if (error) {
1697 if_printf(&sc->arpcom.ac_if,
1698 "could not create RX mbuf DMA tag\n");
1699 return error;
1700 }
1701
1702 /* Create a spare RX mbuf DMA map */
1703 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
1704 &ring->data_tmpmap);
1705 if (error) {
1706 if_printf(&sc->arpcom.ac_if,
1707 "could not create spare RX mbuf DMA map\n");
1708 bus_dma_tag_destroy(ring->data_tag);
1709 ring->data_tag = NULL;
1710 return error;
1711 }
1712
1713 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1714 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
1715 &ring->data[i].map);
1716 if (error) {
1717 if_printf(&sc->arpcom.ac_if,
1718 "could not create %dth RX mbuf DMA mapn", i);
1719 goto fail;
1720 }
1721 }
1722 return 0;
1723 fail:
1724 for (j = 0; j < i; ++j)
1725 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1726 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1727 bus_dma_tag_destroy(ring->data_tag);
1728 ring->data_tag = NULL;
1729 return error;
1730 }
1731
1732 static void
nfe_reset_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1733 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1734 {
1735 int i;
1736
1737 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1738 struct nfe_rx_data *data = &ring->data[i];
1739
1740 if (data->m != NULL) {
1741 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0)
1742 bus_dmamap_unload(ring->data_tag, data->map);
1743 m_freem(data->m);
1744 data->m = NULL;
1745 }
1746 }
1747
1748 ring->cur = ring->next = 0;
1749 }
1750
1751 static int
nfe_init_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1752 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1753 {
1754 int i;
1755
1756 for (i = 0; i < sc->sc_rx_ring_count; ++i) {
1757 int error;
1758
1759 /* XXX should use a function pointer */
1760 if (sc->sc_flags & NFE_F_USE_JUMBO)
1761 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1762 else
1763 error = nfe_newbuf_std(sc, ring, i, 1);
1764 if (error) {
1765 if_printf(&sc->arpcom.ac_if,
1766 "could not allocate RX buffer\n");
1767 return error;
1768 }
1769 nfe_set_ready_rxdesc(sc, ring, i);
1770 }
1771 return 0;
1772 }
1773
1774 static void
nfe_free_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1775 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1776 {
1777 if (ring->data_tag != NULL) {
1778 struct nfe_rx_data *data;
1779 int i;
1780
1781 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1782 data = &ring->data[i];
1783
1784 if (data->m != NULL) {
1785 bus_dmamap_unload(ring->data_tag, data->map);
1786 m_freem(data->m);
1787 }
1788 bus_dmamap_destroy(ring->data_tag, data->map);
1789 }
1790 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1791 bus_dma_tag_destroy(ring->data_tag);
1792 }
1793
1794 nfe_jpool_free(sc, ring);
1795
1796 if (ring->jbuf != NULL)
1797 kfree(ring->jbuf, M_DEVBUF);
1798 if (ring->data != NULL)
1799 kfree(ring->data, M_DEVBUF);
1800
1801 if (ring->tag != NULL) {
1802 void *desc;
1803
1804 if (sc->sc_caps & NFE_40BIT_ADDR)
1805 desc = ring->desc64;
1806 else
1807 desc = ring->desc32;
1808
1809 bus_dmamap_unload(ring->tag, ring->map);
1810 bus_dmamem_free(ring->tag, desc, ring->map);
1811 bus_dma_tag_destroy(ring->tag);
1812 }
1813 }
1814
1815 static struct nfe_jbuf *
nfe_jalloc(struct nfe_softc * sc)1816 nfe_jalloc(struct nfe_softc *sc)
1817 {
1818 struct ifnet *ifp = &sc->arpcom.ac_if;
1819 struct nfe_jbuf *jbuf;
1820
1821 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1822
1823 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1824 if (jbuf != NULL) {
1825 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1826 jbuf->inuse = 1;
1827 } else {
1828 if_printf(ifp, "no free jumbo buffer\n");
1829 }
1830
1831 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1832
1833 return jbuf;
1834 }
1835
1836 static void
nfe_jfree(void * arg)1837 nfe_jfree(void *arg)
1838 {
1839 struct nfe_jbuf *jbuf = arg;
1840 struct nfe_softc *sc = jbuf->sc;
1841 struct nfe_rx_ring *ring = jbuf->ring;
1842
1843 if (&ring->jbuf[jbuf->slot] != jbuf)
1844 panic("%s: free wrong jumbo buffer", __func__);
1845 else if (jbuf->inuse == 0)
1846 panic("%s: jumbo buffer already freed", __func__);
1847
1848 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1849 atomic_subtract_int(&jbuf->inuse, 1);
1850 if (jbuf->inuse == 0)
1851 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1852 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1853 }
1854
1855 static void
nfe_jref(void * arg)1856 nfe_jref(void *arg)
1857 {
1858 struct nfe_jbuf *jbuf = arg;
1859 struct nfe_rx_ring *ring = jbuf->ring;
1860
1861 if (&ring->jbuf[jbuf->slot] != jbuf)
1862 panic("%s: ref wrong jumbo buffer", __func__);
1863 else if (jbuf->inuse == 0)
1864 panic("%s: jumbo buffer already freed", __func__);
1865
1866 atomic_add_int(&jbuf->inuse, 1);
1867 }
1868
1869 static int
nfe_jpool_alloc(struct nfe_softc * sc,struct nfe_rx_ring * ring)1870 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1871 {
1872 struct nfe_jbuf *jbuf;
1873 bus_dmamem_t dmem;
1874 bus_addr_t physaddr;
1875 caddr_t buf;
1876 int i, error;
1877
1878 /*
1879 * Allocate a big chunk of DMA'able memory.
1880 */
1881 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1882 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1883 NFE_JPOOL_SIZE(sc),
1884 BUS_DMA_WAITOK, &dmem);
1885 if (error) {
1886 if_printf(&sc->arpcom.ac_if,
1887 "could not create jumbo buffer\n");
1888 return error;
1889 }
1890 ring->jtag = dmem.dmem_tag;
1891 ring->jmap = dmem.dmem_map;
1892 ring->jpool = dmem.dmem_addr;
1893 physaddr = dmem.dmem_busaddr;
1894
1895 /* ..and split it into 9KB chunks */
1896 SLIST_INIT(&ring->jfreelist);
1897
1898 buf = ring->jpool;
1899 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) {
1900 jbuf = &ring->jbuf[i];
1901
1902 jbuf->sc = sc;
1903 jbuf->ring = ring;
1904 jbuf->inuse = 0;
1905 jbuf->slot = i;
1906 jbuf->buf = buf;
1907 jbuf->physaddr = physaddr;
1908
1909 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1910
1911 buf += NFE_JBYTES;
1912 physaddr += NFE_JBYTES;
1913 }
1914
1915 return 0;
1916 }
1917
1918 static void
nfe_jpool_free(struct nfe_softc * sc,struct nfe_rx_ring * ring)1919 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1920 {
1921 if (ring->jtag != NULL) {
1922 bus_dmamap_unload(ring->jtag, ring->jmap);
1923 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1924 bus_dma_tag_destroy(ring->jtag);
1925 }
1926 }
1927
1928 static int
nfe_alloc_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1929 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1930 {
1931 int i, j, error, descsize;
1932 bus_dmamem_t dmem;
1933 void **desc;
1934
1935 if (sc->sc_caps & NFE_40BIT_ADDR) {
1936 desc = (void *)&ring->desc64;
1937 descsize = sizeof(struct nfe_desc64);
1938 } else {
1939 desc = (void *)&ring->desc32;
1940 descsize = sizeof(struct nfe_desc32);
1941 }
1942
1943 ring->queued = 0;
1944 ring->cur = ring->next = 0;
1945
1946 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1947 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1948 sc->sc_tx_ring_count * descsize,
1949 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1950 if (error) {
1951 if_printf(&sc->arpcom.ac_if,
1952 "could not create TX desc ring\n");
1953 return error;
1954 }
1955 ring->tag = dmem.dmem_tag;
1956 ring->map = dmem.dmem_map;
1957 *desc = dmem.dmem_addr;
1958 ring->physaddr = dmem.dmem_busaddr;
1959
1960 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count,
1961 M_DEVBUF, M_WAITOK | M_ZERO);
1962
1963 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1964 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1965 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES,
1966 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1967 &ring->data_tag);
1968 if (error) {
1969 if_printf(&sc->arpcom.ac_if,
1970 "could not create TX buf DMA tag\n");
1971 return error;
1972 }
1973
1974 for (i = 0; i < sc->sc_tx_ring_count; i++) {
1975 error = bus_dmamap_create(ring->data_tag,
1976 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1977 &ring->data[i].map);
1978 if (error) {
1979 if_printf(&sc->arpcom.ac_if,
1980 "could not create %dth TX buf DMA map\n", i);
1981 goto fail;
1982 }
1983 }
1984
1985 return 0;
1986 fail:
1987 for (j = 0; j < i; ++j)
1988 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1989 bus_dma_tag_destroy(ring->data_tag);
1990 ring->data_tag = NULL;
1991 return error;
1992 }
1993
1994 static void
nfe_reset_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1995 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1996 {
1997 int i;
1998
1999 for (i = 0; i < sc->sc_tx_ring_count; i++) {
2000 struct nfe_tx_data *data = &ring->data[i];
2001
2002 if (sc->sc_caps & NFE_40BIT_ADDR)
2003 ring->desc64[i].flags = 0;
2004 else
2005 ring->desc32[i].flags = 0;
2006
2007 if (data->m != NULL) {
2008 bus_dmamap_unload(ring->data_tag, data->map);
2009 m_freem(data->m);
2010 data->m = NULL;
2011 }
2012 }
2013
2014 ring->queued = 0;
2015 ring->cur = ring->next = 0;
2016 }
2017
2018 static int
nfe_init_tx_ring(struct nfe_softc * sc __unused,struct nfe_tx_ring * ring __unused)2019 nfe_init_tx_ring(struct nfe_softc *sc __unused,
2020 struct nfe_tx_ring *ring __unused)
2021 {
2022 return 0;
2023 }
2024
2025 static void
nfe_free_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)2026 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
2027 {
2028 if (ring->data_tag != NULL) {
2029 struct nfe_tx_data *data;
2030 int i;
2031
2032 for (i = 0; i < sc->sc_tx_ring_count; ++i) {
2033 data = &ring->data[i];
2034
2035 if (data->m != NULL) {
2036 bus_dmamap_unload(ring->data_tag, data->map);
2037 m_freem(data->m);
2038 }
2039 bus_dmamap_destroy(ring->data_tag, data->map);
2040 }
2041
2042 bus_dma_tag_destroy(ring->data_tag);
2043 }
2044
2045 if (ring->data != NULL)
2046 kfree(ring->data, M_DEVBUF);
2047
2048 if (ring->tag != NULL) {
2049 void *desc;
2050
2051 if (sc->sc_caps & NFE_40BIT_ADDR)
2052 desc = ring->desc64;
2053 else
2054 desc = ring->desc32;
2055
2056 bus_dmamap_unload(ring->tag, ring->map);
2057 bus_dmamem_free(ring->tag, desc, ring->map);
2058 bus_dma_tag_destroy(ring->tag);
2059 }
2060 }
2061
2062 static int
nfe_ifmedia_upd(struct ifnet * ifp)2063 nfe_ifmedia_upd(struct ifnet *ifp)
2064 {
2065 struct nfe_softc *sc = ifp->if_softc;
2066 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2067
2068 ASSERT_SERIALIZED(ifp->if_serializer);
2069
2070 if (mii->mii_instance != 0) {
2071 struct mii_softc *miisc;
2072
2073 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2074 mii_phy_reset(miisc);
2075 }
2076 mii_mediachg(mii);
2077
2078 return 0;
2079 }
2080
2081 static void
nfe_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)2082 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2083 {
2084 struct nfe_softc *sc = ifp->if_softc;
2085 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2086
2087 ASSERT_SERIALIZED(ifp->if_serializer);
2088
2089 mii_pollstat(mii);
2090 ifmr->ifm_status = mii->mii_media_status;
2091 ifmr->ifm_active = mii->mii_media_active;
2092 }
2093
2094 static void
nfe_setmulti(struct nfe_softc * sc)2095 nfe_setmulti(struct nfe_softc *sc)
2096 {
2097 struct ifnet *ifp = &sc->arpcom.ac_if;
2098 struct ifmultiaddr *ifma;
2099 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2100 uint32_t filter = NFE_RXFILTER_MAGIC;
2101 int i;
2102
2103 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2104 bzero(addr, ETHER_ADDR_LEN);
2105 bzero(mask, ETHER_ADDR_LEN);
2106 goto done;
2107 }
2108
2109 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2110 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2111
2112 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2113 caddr_t maddr;
2114
2115 if (ifma->ifma_addr->sa_family != AF_LINK)
2116 continue;
2117
2118 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2119 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2120 addr[i] &= maddr[i];
2121 mask[i] &= ~maddr[i];
2122 }
2123 }
2124
2125 for (i = 0; i < ETHER_ADDR_LEN; i++)
2126 mask[i] |= addr[i];
2127
2128 done:
2129 addr[0] |= 0x01; /* make sure multicast bit is set */
2130
2131 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2132 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2133 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2134 addr[5] << 8 | addr[4]);
2135 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2136 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2137 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2138 mask[5] << 8 | mask[4]);
2139
2140 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
2141 NFE_WRITE(sc, NFE_RXFILTER, filter);
2142 }
2143
2144 static void
nfe_get_macaddr(struct nfe_softc * sc,uint8_t * addr)2145 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2146 {
2147 uint32_t lo, hi;
2148
2149 lo = NFE_READ(sc, NFE_MACADDR_LO);
2150 hi = NFE_READ(sc, NFE_MACADDR_HI);
2151 if (sc->sc_caps & NFE_FIX_EADDR) {
2152 addr[0] = (lo >> 8) & 0xff;
2153 addr[1] = (lo & 0xff);
2154
2155 addr[2] = (hi >> 24) & 0xff;
2156 addr[3] = (hi >> 16) & 0xff;
2157 addr[4] = (hi >> 8) & 0xff;
2158 addr[5] = (hi & 0xff);
2159 } else {
2160 addr[0] = (hi & 0xff);
2161 addr[1] = (hi >> 8) & 0xff;
2162 addr[2] = (hi >> 16) & 0xff;
2163 addr[3] = (hi >> 24) & 0xff;
2164
2165 addr[4] = (lo & 0xff);
2166 addr[5] = (lo >> 8) & 0xff;
2167 }
2168 }
2169
2170 static void
nfe_set_macaddr(struct nfe_softc * sc,const uint8_t * addr)2171 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
2172 {
2173 NFE_WRITE(sc, NFE_MACADDR_LO,
2174 addr[5] << 8 | addr[4]);
2175 NFE_WRITE(sc, NFE_MACADDR_HI,
2176 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2177 }
2178
2179 static void
nfe_tick(void * arg)2180 nfe_tick(void *arg)
2181 {
2182 struct nfe_softc *sc = arg;
2183 struct ifnet *ifp = &sc->arpcom.ac_if;
2184 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2185
2186 lwkt_serialize_enter(ifp->if_serializer);
2187
2188 mii_tick(mii);
2189 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
2190
2191 lwkt_serialize_exit(ifp->if_serializer);
2192 }
2193
2194 static int
nfe_newbuf_std(struct nfe_softc * sc,struct nfe_rx_ring * ring,int idx,int wait)2195 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2196 int wait)
2197 {
2198 struct nfe_rx_data *data = &ring->data[idx];
2199 bus_dma_segment_t seg;
2200 bus_dmamap_t map;
2201 struct mbuf *m;
2202 int nsegs, error;
2203
2204 m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
2205 if (m == NULL)
2206 return ENOBUFS;
2207 m->m_len = m->m_pkthdr.len = MCLBYTES;
2208
2209 /*
2210 * Aligning the payload improves access times.
2211 */
2212 if (sc->sc_caps & NFE_WORDALIGN)
2213 m_adj(m, ETHER_ALIGN);
2214
2215 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap,
2216 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2217 if (error) {
2218 m_freem(m);
2219 if (wait) {
2220 if_printf(&sc->arpcom.ac_if,
2221 "could map RX mbuf %d\n", error);
2222 }
2223 return error;
2224 }
2225
2226 if (data->m != NULL) {
2227 /* Sync and unload originally mapped mbuf */
2228 bus_dmamap_sync(ring->data_tag, data->map,
2229 BUS_DMASYNC_POSTREAD);
2230 bus_dmamap_unload(ring->data_tag, data->map);
2231 }
2232
2233 /* Swap this DMA map with tmp DMA map */
2234 map = data->map;
2235 data->map = ring->data_tmpmap;
2236 ring->data_tmpmap = map;
2237
2238 /* Caller is assumed to have collected the old mbuf */
2239 data->m = m;
2240
2241 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2242 return 0;
2243 }
2244
2245 static int
nfe_newbuf_jumbo(struct nfe_softc * sc,struct nfe_rx_ring * ring,int idx,int wait)2246 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2247 int wait)
2248 {
2249 struct nfe_rx_data *data = &ring->data[idx];
2250 struct nfe_jbuf *jbuf;
2251 struct mbuf *m;
2252
2253 MGETHDR(m, wait ? M_WAITOK : M_NOWAIT, MT_DATA);
2254 if (m == NULL)
2255 return ENOBUFS;
2256
2257 jbuf = nfe_jalloc(sc);
2258 if (jbuf == NULL) {
2259 m_freem(m);
2260 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2261 "-- packet dropped!\n");
2262 return ENOBUFS;
2263 }
2264
2265 m->m_ext.ext_arg = jbuf;
2266 m->m_ext.ext_buf = jbuf->buf;
2267 m->m_ext.ext_free = nfe_jfree;
2268 m->m_ext.ext_ref = nfe_jref;
2269 m->m_ext.ext_size = NFE_JBYTES;
2270
2271 m->m_data = m->m_ext.ext_buf;
2272 m->m_flags |= M_EXT;
2273 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2274
2275 /*
2276 * Aligning the payload improves access times.
2277 */
2278 if (sc->sc_caps & NFE_WORDALIGN)
2279 m_adj(m, ETHER_ALIGN);
2280
2281 /* Caller is assumed to have collected the old mbuf */
2282 data->m = m;
2283
2284 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2285 return 0;
2286 }
2287
2288 static void
nfe_set_paddr_rxdesc(struct nfe_softc * sc,struct nfe_rx_ring * ring,int idx,bus_addr_t physaddr)2289 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2290 bus_addr_t physaddr)
2291 {
2292 if (sc->sc_caps & NFE_40BIT_ADDR) {
2293 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2294
2295 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr));
2296 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr));
2297 } else {
2298 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2299
2300 desc32->physaddr = htole32(physaddr);
2301 }
2302 }
2303
2304 static void
nfe_set_ready_rxdesc(struct nfe_softc * sc,struct nfe_rx_ring * ring,int idx)2305 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2306 {
2307 if (sc->sc_caps & NFE_40BIT_ADDR) {
2308 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2309
2310 desc64->length = htole16(ring->bufsz);
2311 desc64->flags = htole16(NFE_RX_READY);
2312 } else {
2313 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2314
2315 desc32->length = htole16(ring->bufsz);
2316 desc32->flags = htole16(NFE_RX_READY);
2317 }
2318 }
2319
2320 static int
nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS)2321 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS)
2322 {
2323 struct nfe_softc *sc = arg1;
2324 struct ifnet *ifp = &sc->arpcom.ac_if;
2325 uint32_t flags;
2326 int error, v;
2327
2328 lwkt_serialize_enter(ifp->if_serializer);
2329
2330 flags = sc->sc_flags & ~NFE_F_DYN_IM;
2331 v = sc->sc_imtime;
2332 if (sc->sc_flags & NFE_F_DYN_IM)
2333 v = -v;
2334
2335 error = sysctl_handle_int(oidp, &v, 0, req);
2336 if (error || req->newptr == NULL)
2337 goto back;
2338
2339 if (v < 0) {
2340 flags |= NFE_F_DYN_IM;
2341 v = -v;
2342 }
2343
2344 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) {
2345 if (NFE_IMTIME(v) == 0)
2346 v = 0;
2347 sc->sc_imtime = v;
2348 sc->sc_flags = flags;
2349 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
2350
2351 if ((ifp->if_flags & (IFF_NPOLLING | IFF_RUNNING))
2352 == IFF_RUNNING) {
2353 nfe_enable_intrs(sc);
2354 }
2355 }
2356 back:
2357 lwkt_serialize_exit(ifp->if_serializer);
2358 return error;
2359 }
2360
2361 static void
nfe_powerup(device_t dev)2362 nfe_powerup(device_t dev)
2363 {
2364 struct nfe_softc *sc = device_get_softc(dev);
2365 uint32_t pwr_state;
2366 uint16_t did;
2367
2368 /*
2369 * Bring MAC and PHY out of low power state
2370 */
2371
2372 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK;
2373
2374 did = pci_get_device(dev);
2375 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 ||
2376 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) &&
2377 pci_get_revid(dev) >= 0xa3)
2378 pwr_state |= NFE_PWRUP_REV_A3;
2379
2380 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state);
2381 }
2382
2383 static void
nfe_mac_reset(struct nfe_softc * sc)2384 nfe_mac_reset(struct nfe_softc *sc)
2385 {
2386 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
2387 uint32_t macaddr_hi, macaddr_lo, tx_poll;
2388
2389 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
2390
2391 /* Save several registers for later restoration */
2392 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI);
2393 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO);
2394 tx_poll = NFE_READ(sc, NFE_TX_POLL);
2395
2396 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT);
2397 DELAY(100);
2398
2399 NFE_WRITE(sc, NFE_MAC_RESET, 0);
2400 DELAY(100);
2401
2402 /* Restore saved registers */
2403 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi);
2404 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo);
2405 NFE_WRITE(sc, NFE_TX_POLL, tx_poll);
2406
2407 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
2408 }
2409
2410 static void
nfe_enable_intrs(struct nfe_softc * sc)2411 nfe_enable_intrs(struct nfe_softc *sc)
2412 {
2413 /*
2414 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
2415 * It is unclear how wide the timer is. Base programming does
2416 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
2417 * we don't get any interrupt moderation. TX moderation is
2418 * possible by using the timer interrupt instead of TX_DONE.
2419 *
2420 * It is unclear whether there are other bits that can be
2421 * set to make the NFE device actually do interrupt moderation
2422 * on the RX side.
2423 *
2424 * For now set a 128uS interval as a placemark, but don't use
2425 * the timer.
2426 */
2427 if (sc->sc_imtime == 0)
2428 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT);
2429 else
2430 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime));
2431
2432 /* Enable interrupts */
2433 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable);
2434
2435 if (sc->sc_irq_enable & NFE_IRQ_TIMER)
2436 sc->sc_flags |= NFE_F_IRQ_TIMER;
2437 else
2438 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
2439 }
2440