1 /* $NetBSD: if_nfe.c,v 1.61 2016/06/10 13:27:14 ozaki-r Exp $ */
2 /* $OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $ */
3
4 /*-
5 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.61 2016/06/10 13:27:14 ozaki-r Exp $");
25
26 #include "opt_inet.h"
27 #include "vlan.h"
28
29 #include <sys/param.h>
30 #include <sys/endian.h>
31 #include <sys/systm.h>
32 #include <sys/types.h>
33 #include <sys/sockio.h>
34 #include <sys/mbuf.h>
35 #include <sys/mutex.h>
36 #include <sys/queue.h>
37 #include <sys/kernel.h>
38 #include <sys/device.h>
39 #include <sys/callout.h>
40 #include <sys/socket.h>
41
42 #include <sys/bus.h>
43
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_ether.h>
48 #include <net/if_arp.h>
49
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in_var.h>
54 #include <netinet/ip.h>
55 #include <netinet/if_inarp.h>
56 #endif
57
58 #if NVLAN > 0
59 #include <net/if_types.h>
60 #endif
61
62 #include <net/bpf.h>
63
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 #include <dev/pci/pcidevs.h>
70
71 #include <dev/pci/if_nfereg.h>
72 #include <dev/pci/if_nfevar.h>
73
74 static int nfe_ifflags_cb(struct ethercom *);
75
76 int nfe_match(device_t, cfdata_t, void *);
77 void nfe_attach(device_t, device_t, void *);
78 int nfe_detach(device_t, int);
79 void nfe_power(int, void *);
80 void nfe_miibus_statchg(struct ifnet *);
81 int nfe_miibus_readreg(device_t, int, int);
82 void nfe_miibus_writereg(device_t, int, int, int);
83 int nfe_intr(void *);
84 int nfe_ioctl(struct ifnet *, u_long, void *);
85 void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
86 void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
87 void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
88 void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
89 void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
90 void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
91 void nfe_rxeof(struct nfe_softc *);
92 void nfe_txeof(struct nfe_softc *);
93 int nfe_encap(struct nfe_softc *, struct mbuf *);
94 void nfe_start(struct ifnet *);
95 void nfe_watchdog(struct ifnet *);
96 int nfe_init(struct ifnet *);
97 void nfe_stop(struct ifnet *, int);
98 struct nfe_jbuf *nfe_jalloc(struct nfe_softc *, int);
99 void nfe_jfree(struct mbuf *, void *, size_t, void *);
100 int nfe_jpool_alloc(struct nfe_softc *);
101 void nfe_jpool_free(struct nfe_softc *);
102 int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
103 void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
104 void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
105 int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
106 void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
107 void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
108 void nfe_setmulti(struct nfe_softc *);
109 void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
110 void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
111 void nfe_tick(void *);
112 void nfe_poweron(device_t);
113 bool nfe_resume(device_t, const pmf_qual_t *);
114
115 CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc),
116 nfe_match, nfe_attach, nfe_detach, NULL);
117
118 /* #define NFE_NO_JUMBO */
119
120 #ifdef NFE_DEBUG
121 int nfedebug = 0;
122 #define DPRINTF(x) do { if (nfedebug) printf x; } while (0)
123 #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0)
124 #else
125 #define DPRINTF(x)
126 #define DPRINTFN(n,x)
127 #endif
128
129 /* deal with naming differences */
130
131 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \
132 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1
133 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \
134 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2
135 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \
136 PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN
137
138 #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \
139 PCI_PRODUCT_NVIDIA_NFORCE4_LAN1
140 #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \
141 PCI_PRODUCT_NVIDIA_NFORCE4_LAN2
142
143 #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \
144 PCI_PRODUCT_NVIDIA_NFORCE430_LAN1
145 #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \
146 PCI_PRODUCT_NVIDIA_NFORCE430_LAN2
147
148 #ifdef _LP64
149 #define __LP64__ 1
150 #endif
151
152 const struct nfe_product {
153 pci_vendor_id_t vendor;
154 pci_product_id_t product;
155 } nfe_devices[] = {
156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
168 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
169 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
170 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
171 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
172 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
173 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
174 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
175 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
176 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
177 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
178 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
179 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
180 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
181 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
182 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
183 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
184 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
185 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
186 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
187 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
188 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
189 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
190 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
191 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
192 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
193 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
194 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }
195 };
196
197 int
nfe_match(device_t dev,cfdata_t match,void * aux)198 nfe_match(device_t dev, cfdata_t match, void *aux)
199 {
200 struct pci_attach_args *pa = aux;
201 const struct nfe_product *np;
202 int i;
203
204 for (i = 0; i < __arraycount(nfe_devices); i++) {
205 np = &nfe_devices[i];
206 if (PCI_VENDOR(pa->pa_id) == np->vendor &&
207 PCI_PRODUCT(pa->pa_id) == np->product)
208 return 1;
209 }
210 return 0;
211 }
212
213 void
nfe_attach(device_t parent,device_t self,void * aux)214 nfe_attach(device_t parent, device_t self, void *aux)
215 {
216 struct nfe_softc *sc = device_private(self);
217 struct pci_attach_args *pa = aux;
218 pci_chipset_tag_t pc = pa->pa_pc;
219 pci_intr_handle_t ih;
220 const char *intrstr;
221 struct ifnet *ifp;
222 pcireg_t memtype, csr;
223 int mii_flags = 0;
224 char intrbuf[PCI_INTRSTR_LEN];
225
226 sc->sc_dev = self;
227 sc->sc_pc = pa->pa_pc;
228 pci_aprint_devinfo(pa, NULL);
229
230 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
231 switch (memtype) {
232 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
233 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
234 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
235 &sc->sc_memh, NULL, &sc->sc_mems) == 0)
236 break;
237 /* FALLTHROUGH */
238 default:
239 aprint_error_dev(self, "could not map mem space\n");
240 return;
241 }
242
243 if (pci_intr_map(pa, &ih) != 0) {
244 aprint_error_dev(self, "could not map interrupt\n");
245 goto fail;
246 }
247
248 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
249 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc);
250 if (sc->sc_ih == NULL) {
251 aprint_error_dev(self, "could not establish interrupt");
252 if (intrstr != NULL)
253 aprint_error(" at %s", intrstr);
254 aprint_error("\n");
255 goto fail;
256 }
257 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
258
259 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
260 csr |= PCI_COMMAND_MASTER_ENABLE;
261 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
262
263 sc->sc_flags = 0;
264
265 switch (PCI_PRODUCT(pa->pa_id)) {
266 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
267 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
268 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
269 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
270 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
271 break;
272 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
273 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
274 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
275 break;
276 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
277 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
278 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
279 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
280 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
281 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
282 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
283 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
284 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
285 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
286 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
287 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
288 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
289 NFE_PWR_MGMT;
290 break;
291 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
292 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
293 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
294 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
295 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
296 NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
297 break;
298 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
299 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
300 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
301 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
302 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
303 NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
304 break;
305 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
306 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
307 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
308 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
309 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
310 break;
311 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
312 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
313 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
314 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
315 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
316 NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
317 mii_flags = MIIF_DOPAUSE;
318 break;
319 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
320 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
321 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
322 NFE_HW_VLAN | NFE_PWR_MGMT;
323 break;
324 }
325
326 if (pci_dma64_available(pa) && (sc->sc_flags & NFE_40BIT_ADDR) != 0)
327 sc->sc_dmat = pa->pa_dmat64;
328 else
329 sc->sc_dmat = pa->pa_dmat;
330
331 nfe_poweron(self);
332
333 #ifndef NFE_NO_JUMBO
334 /* enable jumbo frames for adapters that support it */
335 if (sc->sc_flags & NFE_JUMBO_SUP)
336 sc->sc_flags |= NFE_USE_JUMBO;
337 #endif
338
339 /* Check for reversed ethernet address */
340 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
341 sc->sc_flags |= NFE_CORRECT_MACADDR;
342
343 nfe_get_macaddr(sc, sc->sc_enaddr);
344 aprint_normal_dev(self, "Ethernet address %s\n",
345 ether_sprintf(sc->sc_enaddr));
346
347 /*
348 * Allocate Tx and Rx rings.
349 */
350 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
351 aprint_error_dev(self, "could not allocate Tx ring\n");
352 goto fail;
353 }
354
355 mutex_init(&sc->rxq.mtx, MUTEX_DEFAULT, IPL_NET);
356
357 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
358 aprint_error_dev(self, "could not allocate Rx ring\n");
359 nfe_free_tx_ring(sc, &sc->txq);
360 goto fail;
361 }
362
363 ifp = &sc->sc_ethercom.ec_if;
364 ifp->if_softc = sc;
365 ifp->if_mtu = ETHERMTU;
366 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
367 ifp->if_ioctl = nfe_ioctl;
368 ifp->if_start = nfe_start;
369 ifp->if_stop = nfe_stop;
370 ifp->if_watchdog = nfe_watchdog;
371 ifp->if_init = nfe_init;
372 ifp->if_baudrate = IF_Gbps(1);
373 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
374 IFQ_SET_READY(&ifp->if_snd);
375 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
376
377 if (sc->sc_flags & NFE_USE_JUMBO)
378 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
379
380 #if NVLAN > 0
381 if (sc->sc_flags & NFE_HW_VLAN)
382 sc->sc_ethercom.ec_capabilities |=
383 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
384 #endif
385 if (sc->sc_flags & NFE_HW_CSUM) {
386 ifp->if_capabilities |=
387 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
388 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
389 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
390 }
391
392 sc->sc_mii.mii_ifp = ifp;
393 sc->sc_mii.mii_readreg = nfe_miibus_readreg;
394 sc->sc_mii.mii_writereg = nfe_miibus_writereg;
395 sc->sc_mii.mii_statchg = nfe_miibus_statchg;
396
397 sc->sc_ethercom.ec_mii = &sc->sc_mii;
398 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
399 ether_mediastatus);
400
401 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, mii_flags);
402
403 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
404 aprint_error_dev(self, "no PHY found!\n");
405 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
406 0, NULL);
407 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
408 } else
409 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
410
411 if_attach(ifp);
412 ether_ifattach(ifp, sc->sc_enaddr);
413 ether_set_ifflags_cb(&sc->sc_ethercom, nfe_ifflags_cb);
414
415 callout_init(&sc->sc_tick_ch, 0);
416 callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc);
417
418 if (pmf_device_register(self, NULL, nfe_resume))
419 pmf_class_network_register(self, ifp);
420 else
421 aprint_error_dev(self, "couldn't establish power handler\n");
422
423 return;
424
425 fail:
426 if (sc->sc_ih != NULL) {
427 pci_intr_disestablish(pc, sc->sc_ih);
428 sc->sc_ih = NULL;
429 }
430 if (sc->sc_mems != 0) {
431 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
432 sc->sc_mems = 0;
433 }
434 }
435
436 int
nfe_detach(device_t self,int flags)437 nfe_detach(device_t self, int flags)
438 {
439 struct nfe_softc *sc = device_private(self);
440 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
441 int s;
442
443 s = splnet();
444
445 nfe_stop(ifp, 1);
446
447 pmf_device_deregister(self);
448 callout_destroy(&sc->sc_tick_ch);
449 ether_ifdetach(ifp);
450 if_detach(ifp);
451 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
452
453 nfe_free_rx_ring(sc, &sc->rxq);
454 mutex_destroy(&sc->rxq.mtx);
455 nfe_free_tx_ring(sc, &sc->txq);
456
457 if (sc->sc_ih != NULL) {
458 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
459 sc->sc_ih = NULL;
460 }
461
462 if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) {
463 nfe_set_macaddr(sc, sc->sc_enaddr);
464 } else {
465 NFE_WRITE(sc, NFE_MACADDR_LO,
466 sc->sc_enaddr[0] << 8 | sc->sc_enaddr[1]);
467 NFE_WRITE(sc, NFE_MACADDR_HI,
468 sc->sc_enaddr[2] << 24 | sc->sc_enaddr[3] << 16 |
469 sc->sc_enaddr[4] << 8 | sc->sc_enaddr[5]);
470 }
471
472 if (sc->sc_mems != 0) {
473 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
474 sc->sc_mems = 0;
475 }
476
477 splx(s);
478
479 return 0;
480 }
481
482 void
nfe_miibus_statchg(struct ifnet * ifp)483 nfe_miibus_statchg(struct ifnet *ifp)
484 {
485 struct nfe_softc *sc = ifp->if_softc;
486 struct mii_data *mii = &sc->sc_mii;
487 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
488
489 phy = NFE_READ(sc, NFE_PHY_IFACE);
490 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
491
492 seed = NFE_READ(sc, NFE_RNDSEED);
493 seed &= ~NFE_SEED_MASK;
494
495 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
496 phy |= NFE_PHY_HDX; /* half-duplex */
497 misc |= NFE_MISC1_HDX;
498 }
499
500 switch (IFM_SUBTYPE(mii->mii_media_active)) {
501 case IFM_1000_T: /* full-duplex only */
502 link |= NFE_MEDIA_1000T;
503 seed |= NFE_SEED_1000T;
504 phy |= NFE_PHY_1000T;
505 break;
506 case IFM_100_TX:
507 link |= NFE_MEDIA_100TX;
508 seed |= NFE_SEED_100TX;
509 phy |= NFE_PHY_100TX;
510 break;
511 case IFM_10_T:
512 link |= NFE_MEDIA_10T;
513 seed |= NFE_SEED_10T;
514 break;
515 }
516
517 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
518
519 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
520 NFE_WRITE(sc, NFE_MISC1, misc);
521 NFE_WRITE(sc, NFE_LINKSPEED, link);
522 }
523
524 int
nfe_miibus_readreg(device_t dev,int phy,int reg)525 nfe_miibus_readreg(device_t dev, int phy, int reg)
526 {
527 struct nfe_softc *sc = device_private(dev);
528 uint32_t val;
529 int ntries;
530
531 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
532
533 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
534 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
535 DELAY(100);
536 }
537
538 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
539
540 for (ntries = 0; ntries < 1000; ntries++) {
541 DELAY(100);
542 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
543 break;
544 }
545 if (ntries == 1000) {
546 DPRINTFN(2, ("%s: timeout waiting for PHY\n",
547 device_xname(sc->sc_dev)));
548 return 0;
549 }
550
551 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
552 DPRINTFN(2, ("%s: could not read PHY\n",
553 device_xname(sc->sc_dev)));
554 return 0;
555 }
556
557 val = NFE_READ(sc, NFE_PHY_DATA);
558 if (val != 0xffffffff && val != 0)
559 sc->mii_phyaddr = phy;
560
561 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
562 device_xname(sc->sc_dev), phy, reg, val));
563
564 return val;
565 }
566
567 void
nfe_miibus_writereg(device_t dev,int phy,int reg,int val)568 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
569 {
570 struct nfe_softc *sc = device_private(dev);
571 uint32_t ctl;
572 int ntries;
573
574 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
575
576 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
577 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
578 DELAY(100);
579 }
580
581 NFE_WRITE(sc, NFE_PHY_DATA, val);
582 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
583 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
584
585 for (ntries = 0; ntries < 1000; ntries++) {
586 DELAY(100);
587 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
588 break;
589 }
590 #ifdef NFE_DEBUG
591 if (nfedebug >= 2 && ntries == 1000)
592 printf("could not write to PHY\n");
593 #endif
594 }
595
596 int
nfe_intr(void * arg)597 nfe_intr(void *arg)
598 {
599 struct nfe_softc *sc = arg;
600 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
601 uint32_t r;
602 int handled;
603
604 if ((ifp->if_flags & IFF_UP) == 0)
605 return 0;
606
607 handled = 0;
608
609 for (;;) {
610 r = NFE_READ(sc, NFE_IRQ_STATUS);
611 if ((r & NFE_IRQ_WANTED) == 0)
612 break;
613
614 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
615 handled = 1;
616 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
617
618 if ((r & (NFE_IRQ_RXERR|NFE_IRQ_RX_NOBUF|NFE_IRQ_RX)) != 0) {
619 /* check Rx ring */
620 nfe_rxeof(sc);
621 }
622 if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) {
623 /* check Tx ring */
624 nfe_txeof(sc);
625 }
626 if ((r & NFE_IRQ_LINK) != 0) {
627 NFE_READ(sc, NFE_PHY_STATUS);
628 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
629 DPRINTF(("%s: link state changed\n",
630 device_xname(sc->sc_dev)));
631 }
632 }
633
634 if (handled && !IF_IS_EMPTY(&ifp->if_snd))
635 nfe_start(ifp);
636
637 return handled;
638 }
639
640 static int
nfe_ifflags_cb(struct ethercom * ec)641 nfe_ifflags_cb(struct ethercom *ec)
642 {
643 struct ifnet *ifp = &ec->ec_if;
644 struct nfe_softc *sc = ifp->if_softc;
645 int change = ifp->if_flags ^ sc->sc_if_flags;
646
647 /*
648 * If only the PROMISC flag changes, then
649 * don't do a full re-init of the chip, just update
650 * the Rx filter.
651 */
652 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
653 return ENETRESET;
654 else if ((change & IFF_PROMISC) != 0)
655 nfe_setmulti(sc);
656
657 return 0;
658 }
659
660 int
nfe_ioctl(struct ifnet * ifp,u_long cmd,void * data)661 nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
662 {
663 struct nfe_softc *sc = ifp->if_softc;
664 struct ifaddr *ifa = (struct ifaddr *)data;
665 int s, error = 0;
666
667 s = splnet();
668
669 switch (cmd) {
670 case SIOCINITIFADDR:
671 ifp->if_flags |= IFF_UP;
672 nfe_init(ifp);
673 switch (ifa->ifa_addr->sa_family) {
674 #ifdef INET
675 case AF_INET:
676 arp_ifinit(ifp, ifa);
677 break;
678 #endif
679 default:
680 break;
681 }
682 break;
683 default:
684 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
685 break;
686
687 error = 0;
688
689 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
690 ;
691 else if (ifp->if_flags & IFF_RUNNING)
692 nfe_setmulti(sc);
693 break;
694 }
695 sc->sc_if_flags = ifp->if_flags;
696
697 splx(s);
698
699 return error;
700 }
701
702 void
nfe_txdesc32_sync(struct nfe_softc * sc,struct nfe_desc32 * desc32,int ops)703 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
704 {
705 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
706 (char *)desc32 - (char *)sc->txq.desc32,
707 sizeof (struct nfe_desc32), ops);
708 }
709
710 void
nfe_txdesc64_sync(struct nfe_softc * sc,struct nfe_desc64 * desc64,int ops)711 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
712 {
713 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
714 (char *)desc64 - (char *)sc->txq.desc64,
715 sizeof (struct nfe_desc64), ops);
716 }
717
718 void
nfe_txdesc32_rsync(struct nfe_softc * sc,int start,int end,int ops)719 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
720 {
721 if (end > start) {
722 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
723 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32,
724 (char *)&sc->txq.desc32[end] -
725 (char *)&sc->txq.desc32[start], ops);
726 return;
727 }
728 /* sync from 'start' to end of ring */
729 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
730 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32,
731 (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] -
732 (char *)&sc->txq.desc32[start], ops);
733
734 /* sync from start of ring to 'end' */
735 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
736 (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops);
737 }
738
739 void
nfe_txdesc64_rsync(struct nfe_softc * sc,int start,int end,int ops)740 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
741 {
742 if (end > start) {
743 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
744 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64,
745 (char *)&sc->txq.desc64[end] -
746 (char *)&sc->txq.desc64[start], ops);
747 return;
748 }
749 /* sync from 'start' to end of ring */
750 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
751 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64,
752 (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] -
753 (char *)&sc->txq.desc64[start], ops);
754
755 /* sync from start of ring to 'end' */
756 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
757 (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops);
758 }
759
760 void
nfe_rxdesc32_sync(struct nfe_softc * sc,struct nfe_desc32 * desc32,int ops)761 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
762 {
763 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
764 (char *)desc32 - (char *)sc->rxq.desc32,
765 sizeof (struct nfe_desc32), ops);
766 }
767
768 void
nfe_rxdesc64_sync(struct nfe_softc * sc,struct nfe_desc64 * desc64,int ops)769 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
770 {
771 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
772 (char *)desc64 - (char *)sc->rxq.desc64,
773 sizeof (struct nfe_desc64), ops);
774 }
775
776 void
nfe_rxeof(struct nfe_softc * sc)777 nfe_rxeof(struct nfe_softc *sc)
778 {
779 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
780 struct nfe_desc32 *desc32;
781 struct nfe_desc64 *desc64;
782 struct nfe_rx_data *data;
783 struct nfe_jbuf *jbuf;
784 struct mbuf *m, *mnew;
785 bus_addr_t physaddr;
786 uint16_t flags;
787 int error, len, i;
788
789 desc32 = NULL;
790 desc64 = NULL;
791 for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) {
792 data = &sc->rxq.data[i];
793
794 if (sc->sc_flags & NFE_40BIT_ADDR) {
795 desc64 = &sc->rxq.desc64[i];
796 nfe_rxdesc64_sync(sc, desc64,
797 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
798
799 flags = le16toh(desc64->flags);
800 len = le16toh(desc64->length) & 0x3fff;
801 } else {
802 desc32 = &sc->rxq.desc32[i];
803 nfe_rxdesc32_sync(sc, desc32,
804 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
805
806 flags = le16toh(desc32->flags);
807 len = le16toh(desc32->length) & 0x3fff;
808 }
809
810 if ((flags & NFE_RX_READY) != 0)
811 break;
812
813 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
814 if ((flags & NFE_RX_VALID_V1) == 0)
815 goto skip;
816
817 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
818 flags &= ~NFE_RX_ERROR;
819 len--; /* fix buffer length */
820 }
821 } else {
822 if ((flags & NFE_RX_VALID_V2) == 0)
823 goto skip;
824
825 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
826 flags &= ~NFE_RX_ERROR;
827 len--; /* fix buffer length */
828 }
829 }
830
831 if (flags & NFE_RX_ERROR) {
832 ifp->if_ierrors++;
833 goto skip;
834 }
835
836 /*
837 * Try to allocate a new mbuf for this ring element and load
838 * it before processing the current mbuf. If the ring element
839 * cannot be loaded, drop the received packet and reuse the
840 * old mbuf. In the unlikely case that the old mbuf can't be
841 * reloaded either, explicitly panic.
842 */
843 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
844 if (mnew == NULL) {
845 ifp->if_ierrors++;
846 goto skip;
847 }
848
849 if (sc->sc_flags & NFE_USE_JUMBO) {
850 physaddr =
851 sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr;
852 if ((jbuf = nfe_jalloc(sc, i)) == NULL) {
853 if (len > MCLBYTES) {
854 m_freem(mnew);
855 ifp->if_ierrors++;
856 goto skip1;
857 }
858 MCLGET(mnew, M_DONTWAIT);
859 if ((mnew->m_flags & M_EXT) == 0) {
860 m_freem(mnew);
861 ifp->if_ierrors++;
862 goto skip1;
863 }
864
865 (void)memcpy(mtod(mnew, void *),
866 mtod(data->m, const void *), len);
867 m = mnew;
868 goto mbufcopied;
869 } else {
870 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);
871 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
872 mtod(data->m, char *) - (char *)sc->rxq.jpool,
873 NFE_JBYTES, BUS_DMASYNC_POSTREAD);
874
875 physaddr = jbuf->physaddr;
876 }
877 } else {
878 MCLGET(mnew, M_DONTWAIT);
879 if ((mnew->m_flags & M_EXT) == 0) {
880 m_freem(mnew);
881 ifp->if_ierrors++;
882 goto skip;
883 }
884
885 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
886 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
887 bus_dmamap_unload(sc->sc_dmat, data->map);
888
889 error = bus_dmamap_load(sc->sc_dmat, data->map,
890 mtod(mnew, void *), MCLBYTES, NULL,
891 BUS_DMA_READ | BUS_DMA_NOWAIT);
892 if (error != 0) {
893 m_freem(mnew);
894
895 /* try to reload the old mbuf */
896 error = bus_dmamap_load(sc->sc_dmat, data->map,
897 mtod(data->m, void *), MCLBYTES, NULL,
898 BUS_DMA_READ | BUS_DMA_NOWAIT);
899 if (error != 0) {
900 /* very unlikely that it will fail.. */
901 panic("%s: could not load old rx mbuf",
902 device_xname(sc->sc_dev));
903 }
904 ifp->if_ierrors++;
905 goto skip;
906 }
907 physaddr = data->map->dm_segs[0].ds_addr;
908 }
909
910 /*
911 * New mbuf successfully loaded, update Rx ring and continue
912 * processing.
913 */
914 m = data->m;
915 data->m = mnew;
916
917 mbufcopied:
918 /* finalize mbuf */
919 m->m_pkthdr.len = m->m_len = len;
920 m_set_rcvif(m, ifp);
921
922 if ((sc->sc_flags & NFE_HW_CSUM) != 0) {
923 /*
924 * XXX
925 * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets?
926 */
927 if (flags & NFE_RX_IP_CSUMOK) {
928 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
929 DPRINTFN(3, ("%s: ip4csum-rx ok\n",
930 device_xname(sc->sc_dev)));
931 }
932 /*
933 * XXX
934 * no way to check M_CSUM_TCP_UDP_BAD or
935 * other protocols?
936 */
937 if (flags & NFE_RX_UDP_CSUMOK) {
938 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
939 DPRINTFN(3, ("%s: udp4csum-rx ok\n",
940 device_xname(sc->sc_dev)));
941 } else if (flags & NFE_RX_TCP_CSUMOK) {
942 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
943 DPRINTFN(3, ("%s: tcp4csum-rx ok\n",
944 device_xname(sc->sc_dev)));
945 }
946 }
947 bpf_mtap(ifp, m);
948 ifp->if_ipackets++;
949 if_percpuq_enqueue(ifp->if_percpuq, m);
950
951 skip1:
952 /* update mapping address in h/w descriptor */
953 if (sc->sc_flags & NFE_40BIT_ADDR) {
954 #if defined(__LP64__)
955 desc64->physaddr[0] = htole32(physaddr >> 32);
956 #endif
957 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
958 } else {
959 desc32->physaddr = htole32(physaddr);
960 }
961
962 skip:
963 if (sc->sc_flags & NFE_40BIT_ADDR) {
964 desc64->length = htole16(sc->rxq.bufsz);
965 desc64->flags = htole16(NFE_RX_READY);
966
967 nfe_rxdesc64_sync(sc, desc64,
968 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
969 } else {
970 desc32->length = htole16(sc->rxq.bufsz);
971 desc32->flags = htole16(NFE_RX_READY);
972
973 nfe_rxdesc32_sync(sc, desc32,
974 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
975 }
976 }
977 /* update current RX pointer */
978 sc->rxq.cur = i;
979 }
980
981 void
nfe_txeof(struct nfe_softc * sc)982 nfe_txeof(struct nfe_softc *sc)
983 {
984 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
985 struct nfe_desc32 *desc32;
986 struct nfe_desc64 *desc64;
987 struct nfe_tx_data *data = NULL;
988 int i;
989 uint16_t flags;
990 char buf[128];
991
992 for (i = sc->txq.next;
993 sc->txq.queued > 0;
994 i = NFE_TX_NEXTDESC(i), sc->txq.queued--) {
995 if (sc->sc_flags & NFE_40BIT_ADDR) {
996 desc64 = &sc->txq.desc64[i];
997 nfe_txdesc64_sync(sc, desc64,
998 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
999
1000 flags = le16toh(desc64->flags);
1001 } else {
1002 desc32 = &sc->txq.desc32[i];
1003 nfe_txdesc32_sync(sc, desc32,
1004 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1005
1006 flags = le16toh(desc32->flags);
1007 }
1008
1009 if ((flags & NFE_TX_VALID) != 0)
1010 break;
1011
1012 data = &sc->txq.data[i];
1013
1014 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1015 if ((flags & NFE_TX_LASTFRAG_V1) == 0 &&
1016 data->m == NULL)
1017 continue;
1018
1019 if ((flags & NFE_TX_ERROR_V1) != 0) {
1020 snprintb(buf, sizeof(buf), NFE_V1_TXERR, flags);
1021 aprint_error_dev(sc->sc_dev, "tx v1 error %s\n",
1022 buf);
1023 ifp->if_oerrors++;
1024 } else
1025 ifp->if_opackets++;
1026 } else {
1027 if ((flags & NFE_TX_LASTFRAG_V2) == 0 &&
1028 data->m == NULL)
1029 continue;
1030
1031 if ((flags & NFE_TX_ERROR_V2) != 0) {
1032 snprintb(buf, sizeof(buf), NFE_V2_TXERR, flags);
1033 aprint_error_dev(sc->sc_dev, "tx v2 error %s\n",
1034 buf);
1035 ifp->if_oerrors++;
1036 } else
1037 ifp->if_opackets++;
1038 }
1039
1040 if (data->m == NULL) { /* should not get there */
1041 aprint_error_dev(sc->sc_dev,
1042 "last fragment bit w/o associated mbuf!\n");
1043 continue;
1044 }
1045
1046 /* last fragment of the mbuf chain transmitted */
1047 bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1048 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1049 bus_dmamap_unload(sc->sc_dmat, data->active);
1050 m_freem(data->m);
1051 data->m = NULL;
1052 }
1053
1054 sc->txq.next = i;
1055
1056 if (sc->txq.queued < NFE_TX_RING_COUNT) {
1057 /* at least one slot freed */
1058 ifp->if_flags &= ~IFF_OACTIVE;
1059 }
1060
1061 if (sc->txq.queued == 0) {
1062 /* all queued packets are sent */
1063 ifp->if_timer = 0;
1064 }
1065 }
1066
1067 int
nfe_encap(struct nfe_softc * sc,struct mbuf * m0)1068 nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1069 {
1070 struct nfe_desc32 *desc32;
1071 struct nfe_desc64 *desc64;
1072 struct nfe_tx_data *data;
1073 bus_dmamap_t map;
1074 uint16_t flags, csumflags;
1075 #if NVLAN > 0
1076 struct m_tag *mtag;
1077 uint32_t vtag = 0;
1078 #endif
1079 int error, i, first;
1080
1081 desc32 = NULL;
1082 desc64 = NULL;
1083 data = NULL;
1084
1085 flags = 0;
1086 csumflags = 0;
1087 first = sc->txq.cur;
1088
1089 map = sc->txq.data[first].map;
1090
1091 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
1092 if (error != 0) {
1093 aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n",
1094 error);
1095 return error;
1096 }
1097
1098 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
1099 bus_dmamap_unload(sc->sc_dmat, map);
1100 return ENOBUFS;
1101 }
1102
1103 #if NVLAN > 0
1104 /* setup h/w VLAN tagging */
1105 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL)
1106 vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag);
1107 #endif
1108 if ((sc->sc_flags & NFE_HW_CSUM) != 0) {
1109 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4)
1110 csumflags |= NFE_TX_IP_CSUM;
1111 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4))
1112 csumflags |= NFE_TX_TCP_UDP_CSUM;
1113 }
1114
1115 for (i = 0; i < map->dm_nsegs; i++) {
1116 data = &sc->txq.data[sc->txq.cur];
1117
1118 if (sc->sc_flags & NFE_40BIT_ADDR) {
1119 desc64 = &sc->txq.desc64[sc->txq.cur];
1120 #if defined(__LP64__)
1121 desc64->physaddr[0] =
1122 htole32(map->dm_segs[i].ds_addr >> 32);
1123 #endif
1124 desc64->physaddr[1] =
1125 htole32(map->dm_segs[i].ds_addr & 0xffffffff);
1126 desc64->length = htole16(map->dm_segs[i].ds_len - 1);
1127 desc64->flags = htole16(flags);
1128 desc64->vtag = 0;
1129 } else {
1130 desc32 = &sc->txq.desc32[sc->txq.cur];
1131
1132 desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
1133 desc32->length = htole16(map->dm_segs[i].ds_len - 1);
1134 desc32->flags = htole16(flags);
1135 }
1136
1137 /*
1138 * Setting of the valid bit in the first descriptor is
1139 * deferred until the whole chain is fully setup.
1140 */
1141 flags |= NFE_TX_VALID;
1142
1143 sc->txq.queued++;
1144 sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur);
1145 }
1146
1147 /* the whole mbuf chain has been setup */
1148 if (sc->sc_flags & NFE_40BIT_ADDR) {
1149 /* fix last descriptor */
1150 flags |= NFE_TX_LASTFRAG_V2;
1151 desc64->flags = htole16(flags);
1152
1153 /* Checksum flags and vtag belong to the first fragment only. */
1154 #if NVLAN > 0
1155 sc->txq.desc64[first].vtag = htole32(vtag);
1156 #endif
1157 sc->txq.desc64[first].flags |= htole16(csumflags);
1158
1159 /* finally, set the valid bit in the first descriptor */
1160 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
1161 } else {
1162 /* fix last descriptor */
1163 if (sc->sc_flags & NFE_JUMBO_SUP)
1164 flags |= NFE_TX_LASTFRAG_V2;
1165 else
1166 flags |= NFE_TX_LASTFRAG_V1;
1167 desc32->flags = htole16(flags);
1168
1169 /* Checksum flags belong to the first fragment only. */
1170 sc->txq.desc32[first].flags |= htole16(csumflags);
1171
1172 /* finally, set the valid bit in the first descriptor */
1173 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
1174 }
1175
1176 data->m = m0;
1177 data->active = map;
1178
1179 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1180 BUS_DMASYNC_PREWRITE);
1181
1182 return 0;
1183 }
1184
1185 void
nfe_start(struct ifnet * ifp)1186 nfe_start(struct ifnet *ifp)
1187 {
1188 struct nfe_softc *sc = ifp->if_softc;
1189 int old = sc->txq.queued;
1190 struct mbuf *m0;
1191
1192 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1193 return;
1194
1195 for (;;) {
1196 IFQ_POLL(&ifp->if_snd, m0);
1197 if (m0 == NULL)
1198 break;
1199
1200 if (nfe_encap(sc, m0) != 0) {
1201 ifp->if_flags |= IFF_OACTIVE;
1202 break;
1203 }
1204
1205 /* packet put in h/w queue, remove from s/w queue */
1206 IFQ_DEQUEUE(&ifp->if_snd, m0);
1207
1208 bpf_mtap(ifp, m0);
1209 }
1210
1211 if (sc->txq.queued != old) {
1212 /* packets are queued */
1213 if (sc->sc_flags & NFE_40BIT_ADDR)
1214 nfe_txdesc64_rsync(sc, old, sc->txq.cur,
1215 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1216 else
1217 nfe_txdesc32_rsync(sc, old, sc->txq.cur,
1218 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1219 /* kick Tx */
1220 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1221
1222 /*
1223 * Set a timeout in case the chip goes out to lunch.
1224 */
1225 ifp->if_timer = 5;
1226 }
1227 }
1228
1229 void
nfe_watchdog(struct ifnet * ifp)1230 nfe_watchdog(struct ifnet *ifp)
1231 {
1232 struct nfe_softc *sc = ifp->if_softc;
1233
1234 aprint_error_dev(sc->sc_dev, "watchdog timeout\n");
1235
1236 ifp->if_flags &= ~IFF_RUNNING;
1237 nfe_init(ifp);
1238
1239 ifp->if_oerrors++;
1240 }
1241
1242 int
nfe_init(struct ifnet * ifp)1243 nfe_init(struct ifnet *ifp)
1244 {
1245 struct nfe_softc *sc = ifp->if_softc;
1246 uint32_t tmp;
1247 int rc = 0, s;
1248
1249 if (ifp->if_flags & IFF_RUNNING)
1250 return 0;
1251
1252 nfe_stop(ifp, 0);
1253
1254 NFE_WRITE(sc, NFE_TX_UNK, 0);
1255 NFE_WRITE(sc, NFE_STATUS, 0);
1256
1257 sc->rxtxctl = NFE_RXTX_BIT2;
1258 if (sc->sc_flags & NFE_40BIT_ADDR)
1259 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1260 else if (sc->sc_flags & NFE_JUMBO_SUP)
1261 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1262 if (sc->sc_flags & NFE_HW_CSUM)
1263 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1264 #if NVLAN > 0
1265 /*
1266 * Although the adapter is capable of stripping VLAN tags from received
1267 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1268 * purpose. This will be done in software by our network stack.
1269 */
1270 if (sc->sc_flags & NFE_HW_VLAN)
1271 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1272 #endif
1273 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1274 DELAY(10);
1275 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1276
1277 #if NVLAN
1278 if (sc->sc_flags & NFE_HW_VLAN)
1279 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1280 #endif
1281
1282 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1283
1284 /* set MAC address */
1285 nfe_set_macaddr(sc, sc->sc_enaddr);
1286
1287 /* tell MAC where rings are in memory */
1288 #ifdef __LP64__
1289 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1290 #endif
1291 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1292 #ifdef __LP64__
1293 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1294 #endif
1295 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1296
1297 NFE_WRITE(sc, NFE_RING_SIZE,
1298 (NFE_RX_RING_COUNT - 1) << 16 |
1299 (NFE_TX_RING_COUNT - 1));
1300
1301 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1302
1303 /* force MAC to wakeup */
1304 tmp = NFE_READ(sc, NFE_PWR_STATE);
1305 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1306 DELAY(10);
1307 tmp = NFE_READ(sc, NFE_PWR_STATE);
1308 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1309
1310 s = splnet();
1311 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1312 nfe_intr(sc); /* XXX clear IRQ status registers */
1313 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1314 splx(s);
1315
1316 #if 1
1317 /* configure interrupts coalescing/mitigation */
1318 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1319 #else
1320 /* no interrupt mitigation: one interrupt per packet */
1321 NFE_WRITE(sc, NFE_IMTIMER, 970);
1322 #endif
1323
1324 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1325 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1326 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1327
1328 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1329 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1330
1331 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1332 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1333
1334 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1335 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1336 DELAY(10);
1337 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1338
1339 /* set Rx filter */
1340 nfe_setmulti(sc);
1341
1342 if ((rc = ether_mediachange(ifp)) != 0)
1343 goto out;
1344
1345 nfe_tick(sc);
1346
1347 /* enable Rx */
1348 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1349
1350 /* enable Tx */
1351 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1352
1353 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1354
1355 /* enable interrupts */
1356 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1357
1358 callout_schedule(&sc->sc_tick_ch, hz);
1359
1360 ifp->if_flags |= IFF_RUNNING;
1361 ifp->if_flags &= ~IFF_OACTIVE;
1362
1363 out:
1364 return rc;
1365 }
1366
1367 void
nfe_stop(struct ifnet * ifp,int disable)1368 nfe_stop(struct ifnet *ifp, int disable)
1369 {
1370 struct nfe_softc *sc = ifp->if_softc;
1371
1372 callout_stop(&sc->sc_tick_ch);
1373
1374 ifp->if_timer = 0;
1375 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1376
1377 mii_down(&sc->sc_mii);
1378
1379 /* abort Tx */
1380 NFE_WRITE(sc, NFE_TX_CTL, 0);
1381
1382 /* disable Rx */
1383 NFE_WRITE(sc, NFE_RX_CTL, 0);
1384
1385 /* disable interrupts */
1386 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1387
1388 /* reset Tx and Rx rings */
1389 nfe_reset_tx_ring(sc, &sc->txq);
1390 nfe_reset_rx_ring(sc, &sc->rxq);
1391 }
1392
1393 int
nfe_alloc_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1394 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1395 {
1396 struct nfe_desc32 *desc32;
1397 struct nfe_desc64 *desc64;
1398 struct nfe_rx_data *data;
1399 struct nfe_jbuf *jbuf;
1400 void **desc;
1401 bus_addr_t physaddr;
1402 int i, nsegs, error, descsize;
1403
1404 if (sc->sc_flags & NFE_40BIT_ADDR) {
1405 desc = (void **)&ring->desc64;
1406 descsize = sizeof (struct nfe_desc64);
1407 } else {
1408 desc = (void **)&ring->desc32;
1409 descsize = sizeof (struct nfe_desc32);
1410 }
1411
1412 ring->cur = ring->next = 0;
1413 ring->bufsz = MCLBYTES;
1414
1415 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1416 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1417 if (error != 0) {
1418 aprint_error_dev(sc->sc_dev,
1419 "could not create desc DMA map\n");
1420 ring->map = NULL;
1421 goto fail;
1422 }
1423
1424 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1425 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1426 if (error != 0) {
1427 aprint_error_dev(sc->sc_dev,
1428 "could not allocate DMA memory\n");
1429 goto fail;
1430 }
1431
1432 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1433 NFE_RX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT);
1434 if (error != 0) {
1435 aprint_error_dev(sc->sc_dev,
1436 "could not map desc DMA memory\n");
1437 goto fail;
1438 }
1439
1440 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1441 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1442 if (error != 0) {
1443 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n");
1444 goto fail;
1445 }
1446
1447 memset(*desc, 0, NFE_RX_RING_COUNT * descsize);
1448 ring->physaddr = ring->map->dm_segs[0].ds_addr;
1449
1450 if (sc->sc_flags & NFE_USE_JUMBO) {
1451 ring->bufsz = NFE_JBYTES;
1452 if ((error = nfe_jpool_alloc(sc)) != 0) {
1453 aprint_error_dev(sc->sc_dev,
1454 "could not allocate jumbo frames\n");
1455 goto fail;
1456 }
1457 }
1458
1459 /*
1460 * Pre-allocate Rx buffers and populate Rx ring.
1461 */
1462 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1463 data = &sc->rxq.data[i];
1464
1465 MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1466 if (data->m == NULL) {
1467 aprint_error_dev(sc->sc_dev,
1468 "could not allocate rx mbuf\n");
1469 error = ENOMEM;
1470 goto fail;
1471 }
1472
1473 if (sc->sc_flags & NFE_USE_JUMBO) {
1474 if ((jbuf = nfe_jalloc(sc, i)) == NULL) {
1475 aprint_error_dev(sc->sc_dev,
1476 "could not allocate jumbo buffer\n");
1477 goto fail;
1478 }
1479 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree,
1480 sc);
1481
1482 physaddr = jbuf->physaddr;
1483 } else {
1484 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1485 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1486 if (error != 0) {
1487 aprint_error_dev(sc->sc_dev,
1488 "could not create DMA map\n");
1489 data->map = NULL;
1490 goto fail;
1491 }
1492 MCLGET(data->m, M_DONTWAIT);
1493 if (!(data->m->m_flags & M_EXT)) {
1494 aprint_error_dev(sc->sc_dev,
1495 "could not allocate mbuf cluster\n");
1496 error = ENOMEM;
1497 goto fail;
1498 }
1499
1500 error = bus_dmamap_load(sc->sc_dmat, data->map,
1501 mtod(data->m, void *), MCLBYTES, NULL,
1502 BUS_DMA_READ | BUS_DMA_NOWAIT);
1503 if (error != 0) {
1504 aprint_error_dev(sc->sc_dev,
1505 "could not load rx buf DMA map");
1506 goto fail;
1507 }
1508 physaddr = data->map->dm_segs[0].ds_addr;
1509 }
1510
1511 if (sc->sc_flags & NFE_40BIT_ADDR) {
1512 desc64 = &sc->rxq.desc64[i];
1513 #if defined(__LP64__)
1514 desc64->physaddr[0] = htole32(physaddr >> 32);
1515 #endif
1516 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1517 desc64->length = htole16(sc->rxq.bufsz);
1518 desc64->flags = htole16(NFE_RX_READY);
1519 } else {
1520 desc32 = &sc->rxq.desc32[i];
1521 desc32->physaddr = htole32(physaddr);
1522 desc32->length = htole16(sc->rxq.bufsz);
1523 desc32->flags = htole16(NFE_RX_READY);
1524 }
1525 }
1526
1527 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1528 BUS_DMASYNC_PREWRITE);
1529
1530 return 0;
1531
1532 fail: nfe_free_rx_ring(sc, ring);
1533 return error;
1534 }
1535
1536 void
nfe_reset_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1537 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1538 {
1539 int i;
1540
1541 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1542 if (sc->sc_flags & NFE_40BIT_ADDR) {
1543 ring->desc64[i].length = htole16(ring->bufsz);
1544 ring->desc64[i].flags = htole16(NFE_RX_READY);
1545 } else {
1546 ring->desc32[i].length = htole16(ring->bufsz);
1547 ring->desc32[i].flags = htole16(NFE_RX_READY);
1548 }
1549 }
1550
1551 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1552 BUS_DMASYNC_PREWRITE);
1553
1554 ring->cur = ring->next = 0;
1555 }
1556
1557 void
nfe_free_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1558 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1559 {
1560 struct nfe_rx_data *data;
1561 void *desc;
1562 int i, descsize;
1563
1564 if (sc->sc_flags & NFE_40BIT_ADDR) {
1565 desc = ring->desc64;
1566 descsize = sizeof (struct nfe_desc64);
1567 } else {
1568 desc = ring->desc32;
1569 descsize = sizeof (struct nfe_desc32);
1570 }
1571
1572 if (desc != NULL) {
1573 bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1574 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1575 bus_dmamap_unload(sc->sc_dmat, ring->map);
1576 bus_dmamem_unmap(sc->sc_dmat, (void *)desc,
1577 NFE_RX_RING_COUNT * descsize);
1578 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1579 }
1580
1581 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1582 data = &ring->data[i];
1583
1584 if (data->map != NULL) {
1585 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1586 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1587 bus_dmamap_unload(sc->sc_dmat, data->map);
1588 bus_dmamap_destroy(sc->sc_dmat, data->map);
1589 }
1590 if (data->m != NULL)
1591 m_freem(data->m);
1592 }
1593
1594 nfe_jpool_free(sc);
1595 }
1596
1597 struct nfe_jbuf *
nfe_jalloc(struct nfe_softc * sc,int i)1598 nfe_jalloc(struct nfe_softc *sc, int i)
1599 {
1600 struct nfe_jbuf *jbuf;
1601
1602 mutex_enter(&sc->rxq.mtx);
1603 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1604 if (jbuf != NULL)
1605 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1606 mutex_exit(&sc->rxq.mtx);
1607 if (jbuf == NULL)
1608 return NULL;
1609 sc->rxq.jbufmap[i] =
1610 ((char *)jbuf->buf - (char *)sc->rxq.jpool) / NFE_JBYTES;
1611 return jbuf;
1612 }
1613
1614 /*
1615 * This is called automatically by the network stack when the mbuf is freed.
1616 * Caution must be taken that the NIC might be reset by the time the mbuf is
1617 * freed.
1618 */
1619 void
nfe_jfree(struct mbuf * m,void * buf,size_t size,void * arg)1620 nfe_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1621 {
1622 struct nfe_softc *sc = arg;
1623 struct nfe_jbuf *jbuf;
1624 int i;
1625
1626 /* find the jbuf from the base pointer */
1627 i = ((char *)buf - (char *)sc->rxq.jpool) / NFE_JBYTES;
1628 if (i < 0 || i >= NFE_JPOOL_COUNT) {
1629 aprint_error_dev(sc->sc_dev,
1630 "request to free a buffer (%p) not managed by us\n", buf);
1631 return;
1632 }
1633 jbuf = &sc->rxq.jbuf[i];
1634
1635 /* ..and put it back in the free list */
1636 mutex_enter(&sc->rxq.mtx);
1637 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
1638 mutex_exit(&sc->rxq.mtx);
1639
1640 if (m != NULL)
1641 pool_cache_put(mb_cache, m);
1642 }
1643
1644 int
nfe_jpool_alloc(struct nfe_softc * sc)1645 nfe_jpool_alloc(struct nfe_softc *sc)
1646 {
1647 struct nfe_rx_ring *ring = &sc->rxq;
1648 struct nfe_jbuf *jbuf;
1649 bus_addr_t physaddr;
1650 char *buf;
1651 int i, nsegs, error;
1652
1653 /*
1654 * Allocate a big chunk of DMA'able memory.
1655 */
1656 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1,
1657 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
1658 if (error != 0) {
1659 aprint_error_dev(sc->sc_dev,
1660 "could not create jumbo DMA map\n");
1661 ring->jmap = NULL;
1662 goto fail;
1663 }
1664
1665 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0,
1666 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
1667 if (error != 0) {
1668 aprint_error_dev(sc->sc_dev,
1669 "could not allocate jumbo DMA memory\n");
1670 goto fail;
1671 }
1672
1673 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
1674 &ring->jpool, BUS_DMA_NOWAIT);
1675 if (error != 0) {
1676 aprint_error_dev(sc->sc_dev,
1677 "could not map jumbo DMA memory\n");
1678 goto fail;
1679 }
1680
1681 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
1682 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1683 if (error != 0) {
1684 aprint_error_dev(sc->sc_dev,
1685 "could not load jumbo DMA map\n");
1686 goto fail;
1687 }
1688
1689 /* ..and split it into 9KB chunks */
1690 SLIST_INIT(&ring->jfreelist);
1691
1692 buf = ring->jpool;
1693 physaddr = ring->jmap->dm_segs[0].ds_addr;
1694 for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1695 jbuf = &ring->jbuf[i];
1696
1697 jbuf->buf = buf;
1698 jbuf->physaddr = physaddr;
1699
1700 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1701
1702 buf += NFE_JBYTES;
1703 physaddr += NFE_JBYTES;
1704 }
1705
1706 return 0;
1707
1708 fail: nfe_jpool_free(sc);
1709 return error;
1710 }
1711
1712 void
nfe_jpool_free(struct nfe_softc * sc)1713 nfe_jpool_free(struct nfe_softc *sc)
1714 {
1715 struct nfe_rx_ring *ring = &sc->rxq;
1716
1717 if (ring->jmap != NULL) {
1718 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
1719 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1720 bus_dmamap_unload(sc->sc_dmat, ring->jmap);
1721 bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
1722 ring->jmap = NULL;
1723 }
1724 if (ring->jpool != NULL) {
1725 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
1726 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
1727 ring->jpool = NULL;
1728 }
1729 }
1730
1731 int
nfe_alloc_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1732 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1733 {
1734 int i, nsegs, error;
1735 void **desc;
1736 int descsize;
1737
1738 if (sc->sc_flags & NFE_40BIT_ADDR) {
1739 desc = (void **)&ring->desc64;
1740 descsize = sizeof (struct nfe_desc64);
1741 } else {
1742 desc = (void **)&ring->desc32;
1743 descsize = sizeof (struct nfe_desc32);
1744 }
1745
1746 ring->queued = 0;
1747 ring->cur = ring->next = 0;
1748
1749 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1750 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1751
1752 if (error != 0) {
1753 aprint_error_dev(sc->sc_dev,
1754 "could not create desc DMA map\n");
1755 ring->map = NULL;
1756 goto fail;
1757 }
1758
1759 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1760 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1761 if (error != 0) {
1762 aprint_error_dev(sc->sc_dev,
1763 "could not allocate DMA memory\n");
1764 goto fail;
1765 }
1766
1767 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1768 NFE_TX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT);
1769 if (error != 0) {
1770 aprint_error_dev(sc->sc_dev,
1771 "could not map desc DMA memory\n");
1772 goto fail;
1773 }
1774
1775 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1776 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1777 if (error != 0) {
1778 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n");
1779 goto fail;
1780 }
1781
1782 memset(*desc, 0, NFE_TX_RING_COUNT * descsize);
1783 ring->physaddr = ring->map->dm_segs[0].ds_addr;
1784
1785 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1786 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1787 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1788 &ring->data[i].map);
1789 if (error != 0) {
1790 aprint_error_dev(sc->sc_dev,
1791 "could not create DMA map\n");
1792 ring->data[i].map = NULL;
1793 goto fail;
1794 }
1795 }
1796
1797 return 0;
1798
1799 fail: nfe_free_tx_ring(sc, ring);
1800 return error;
1801 }
1802
1803 void
nfe_reset_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1804 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1805 {
1806 struct nfe_tx_data *data;
1807 int i;
1808
1809 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1810 if (sc->sc_flags & NFE_40BIT_ADDR)
1811 ring->desc64[i].flags = 0;
1812 else
1813 ring->desc32[i].flags = 0;
1814
1815 data = &ring->data[i];
1816
1817 if (data->m != NULL) {
1818 bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1819 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1820 bus_dmamap_unload(sc->sc_dmat, data->active);
1821 m_freem(data->m);
1822 data->m = NULL;
1823 }
1824 }
1825
1826 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1827 BUS_DMASYNC_PREWRITE);
1828
1829 ring->queued = 0;
1830 ring->cur = ring->next = 0;
1831 }
1832
1833 void
nfe_free_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1834 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1835 {
1836 struct nfe_tx_data *data;
1837 void *desc;
1838 int i, descsize;
1839
1840 if (sc->sc_flags & NFE_40BIT_ADDR) {
1841 desc = ring->desc64;
1842 descsize = sizeof (struct nfe_desc64);
1843 } else {
1844 desc = ring->desc32;
1845 descsize = sizeof (struct nfe_desc32);
1846 }
1847
1848 if (desc != NULL) {
1849 bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1850 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1851 bus_dmamap_unload(sc->sc_dmat, ring->map);
1852 bus_dmamem_unmap(sc->sc_dmat, (void *)desc,
1853 NFE_TX_RING_COUNT * descsize);
1854 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1855 }
1856
1857 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1858 data = &ring->data[i];
1859
1860 if (data->m != NULL) {
1861 bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1862 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1863 bus_dmamap_unload(sc->sc_dmat, data->active);
1864 m_freem(data->m);
1865 }
1866 }
1867
1868 /* ..and now actually destroy the DMA mappings */
1869 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1870 data = &ring->data[i];
1871 if (data->map == NULL)
1872 continue;
1873 bus_dmamap_destroy(sc->sc_dmat, data->map);
1874 }
1875 }
1876
1877 void
nfe_setmulti(struct nfe_softc * sc)1878 nfe_setmulti(struct nfe_softc *sc)
1879 {
1880 struct ethercom *ec = &sc->sc_ethercom;
1881 struct ifnet *ifp = &ec->ec_if;
1882 struct ether_multi *enm;
1883 struct ether_multistep step;
1884 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1885 uint32_t filter = NFE_RXFILTER_MAGIC;
1886 int i;
1887
1888 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1889 memset(addr, 0, ETHER_ADDR_LEN);
1890 memset(mask, 0, ETHER_ADDR_LEN);
1891 goto done;
1892 }
1893
1894 memcpy(addr, etherbroadcastaddr, ETHER_ADDR_LEN);
1895 memcpy(mask, etherbroadcastaddr, ETHER_ADDR_LEN);
1896
1897 ETHER_FIRST_MULTI(step, ec, enm);
1898 while (enm != NULL) {
1899 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1900 ifp->if_flags |= IFF_ALLMULTI;
1901 memset(addr, 0, ETHER_ADDR_LEN);
1902 memset(mask, 0, ETHER_ADDR_LEN);
1903 goto done;
1904 }
1905 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1906 addr[i] &= enm->enm_addrlo[i];
1907 mask[i] &= ~enm->enm_addrlo[i];
1908 }
1909 ETHER_NEXT_MULTI(step, enm);
1910 }
1911 for (i = 0; i < ETHER_ADDR_LEN; i++)
1912 mask[i] |= addr[i];
1913
1914 done:
1915 addr[0] |= 0x01; /* make sure multicast bit is set */
1916
1917 NFE_WRITE(sc, NFE_MULTIADDR_HI,
1918 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1919 NFE_WRITE(sc, NFE_MULTIADDR_LO,
1920 addr[5] << 8 | addr[4]);
1921 NFE_WRITE(sc, NFE_MULTIMASK_HI,
1922 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1923 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1924 mask[5] << 8 | mask[4]);
1925
1926 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1927 NFE_WRITE(sc, NFE_RXFILTER, filter);
1928 }
1929
1930 void
nfe_get_macaddr(struct nfe_softc * sc,uint8_t * addr)1931 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1932 {
1933 uint32_t tmp;
1934
1935 if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) {
1936 tmp = NFE_READ(sc, NFE_MACADDR_HI);
1937 addr[0] = (tmp & 0xff);
1938 addr[1] = (tmp >> 8) & 0xff;
1939 addr[2] = (tmp >> 16) & 0xff;
1940 addr[3] = (tmp >> 24) & 0xff;
1941
1942 tmp = NFE_READ(sc, NFE_MACADDR_LO);
1943 addr[4] = (tmp & 0xff);
1944 addr[5] = (tmp >> 8) & 0xff;
1945
1946 } else {
1947 tmp = NFE_READ(sc, NFE_MACADDR_LO);
1948 addr[0] = (tmp >> 8) & 0xff;
1949 addr[1] = (tmp & 0xff);
1950
1951 tmp = NFE_READ(sc, NFE_MACADDR_HI);
1952 addr[2] = (tmp >> 24) & 0xff;
1953 addr[3] = (tmp >> 16) & 0xff;
1954 addr[4] = (tmp >> 8) & 0xff;
1955 addr[5] = (tmp & 0xff);
1956 }
1957 }
1958
1959 void
nfe_set_macaddr(struct nfe_softc * sc,const uint8_t * addr)1960 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1961 {
1962 NFE_WRITE(sc, NFE_MACADDR_LO,
1963 addr[5] << 8 | addr[4]);
1964 NFE_WRITE(sc, NFE_MACADDR_HI,
1965 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1966 }
1967
1968 void
nfe_tick(void * arg)1969 nfe_tick(void *arg)
1970 {
1971 struct nfe_softc *sc = arg;
1972 int s;
1973
1974 s = splnet();
1975 mii_tick(&sc->sc_mii);
1976 splx(s);
1977
1978 callout_schedule(&sc->sc_tick_ch, hz);
1979 }
1980
1981 void
nfe_poweron(device_t self)1982 nfe_poweron(device_t self)
1983 {
1984 struct nfe_softc *sc = device_private(self);
1985
1986 if ((sc->sc_flags & NFE_PWR_MGMT) != 0) {
1987 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
1988 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
1989 DELAY(100);
1990 NFE_WRITE(sc, NFE_MAC_RESET, 0);
1991 DELAY(100);
1992 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
1993 NFE_WRITE(sc, NFE_PWR2_CTL,
1994 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
1995 }
1996 }
1997
1998 bool
nfe_resume(device_t dv,const pmf_qual_t * qual)1999 nfe_resume(device_t dv, const pmf_qual_t *qual)
2000 {
2001 nfe_poweron(dv);
2002
2003 return true;
2004 }
2005