1 /*
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 *
4 * Copyright (c) 2001-2008, Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 *
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
35 *
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 */
66
67 #include "opt_ifpoll.h"
68 #include "opt_emx.h"
69
70 #include <sys/param.h>
71 #include <sys/bus.h>
72 #include <sys/endian.h>
73 #include <sys/interrupt.h>
74 #include <sys/kernel.h>
75 #include <sys/ktr.h>
76 #include <sys/malloc.h>
77 #include <sys/mbuf.h>
78 #include <sys/proc.h>
79 #include <sys/rman.h>
80 #include <sys/serialize.h>
81 #include <sys/serialize2.h>
82 #include <sys/socket.h>
83 #include <sys/sockio.h>
84 #include <sys/sysctl.h>
85 #include <sys/systm.h>
86
87 #include <net/bpf.h>
88 #include <net/ethernet.h>
89 #include <net/if.h>
90 #include <net/if_arp.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 #include <net/ifq_var.h>
94 #include <net/if_ringmap.h>
95 #include <net/toeplitz.h>
96 #include <net/toeplitz2.h>
97 #include <net/vlan/if_vlan_var.h>
98 #include <net/vlan/if_vlan_ether.h>
99 #include <net/if_poll.h>
100
101 #include <netinet/in_systm.h>
102 #include <netinet/in.h>
103 #include <netinet/ip.h>
104 #include <netinet/tcp.h>
105 #include <netinet/udp.h>
106
107 #include <bus/pci/pcivar.h>
108 #include <bus/pci/pcireg.h>
109
110 #include <dev/netif/ig_hal/e1000_api.h>
111 #include <dev/netif/ig_hal/e1000_82571.h>
112 #include <dev/netif/ig_hal/e1000_dragonfly.h>
113 #include <dev/netif/emx/if_emx.h>
114
115 #define DEBUG_HW 0
116
117 #ifdef EMX_RSS_DEBUG
118 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \
119 do { \
120 if (sc->rss_debug >= lvl) \
121 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
122 } while (0)
123 #else /* !EMX_RSS_DEBUG */
124 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
125 #endif /* EMX_RSS_DEBUG */
126
127 #define EMX_NAME "Intel(R) PRO/1000 "
128
129 #define EMX_DEVICE(id) \
130 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id }
131 #define EMX_DEVICE_NULL { 0, 0, NULL }
132
133 static const struct emx_device {
134 uint16_t vid;
135 uint16_t did;
136 const char *desc;
137 } emx_devices[] = {
138 EMX_DEVICE(82571EB_COPPER),
139 EMX_DEVICE(82571EB_FIBER),
140 EMX_DEVICE(82571EB_SERDES),
141 EMX_DEVICE(82571EB_SERDES_DUAL),
142 EMX_DEVICE(82571EB_SERDES_QUAD),
143 EMX_DEVICE(82571EB_QUAD_COPPER),
144 EMX_DEVICE(82571EB_QUAD_COPPER_BP),
145 EMX_DEVICE(82571EB_QUAD_COPPER_LP),
146 EMX_DEVICE(82571EB_QUAD_FIBER),
147 EMX_DEVICE(82571PT_QUAD_COPPER),
148
149 EMX_DEVICE(82572EI_COPPER),
150 EMX_DEVICE(82572EI_FIBER),
151 EMX_DEVICE(82572EI_SERDES),
152 EMX_DEVICE(82572EI),
153
154 EMX_DEVICE(82573E),
155 EMX_DEVICE(82573E_IAMT),
156 EMX_DEVICE(82573L),
157
158 EMX_DEVICE(80003ES2LAN_COPPER_SPT),
159 EMX_DEVICE(80003ES2LAN_SERDES_SPT),
160 EMX_DEVICE(80003ES2LAN_COPPER_DPT),
161 EMX_DEVICE(80003ES2LAN_SERDES_DPT),
162
163 EMX_DEVICE(82574L),
164 EMX_DEVICE(82574LA),
165
166 EMX_DEVICE(PCH_LPT_I217_LM),
167 EMX_DEVICE(PCH_LPT_I217_V),
168 EMX_DEVICE(PCH_LPTLP_I218_LM),
169 EMX_DEVICE(PCH_LPTLP_I218_V),
170 EMX_DEVICE(PCH_I218_LM2),
171 EMX_DEVICE(PCH_I218_V2),
172 EMX_DEVICE(PCH_I218_LM3),
173 EMX_DEVICE(PCH_I218_V3),
174 EMX_DEVICE(PCH_SPT_I219_LM),
175 EMX_DEVICE(PCH_SPT_I219_V),
176 EMX_DEVICE(PCH_SPT_I219_LM2),
177 EMX_DEVICE(PCH_SPT_I219_V2),
178 EMX_DEVICE(PCH_LBG_I219_LM3),
179 EMX_DEVICE(PCH_SPT_I219_LM4),
180 EMX_DEVICE(PCH_SPT_I219_V4),
181 EMX_DEVICE(PCH_SPT_I219_LM5),
182 EMX_DEVICE(PCH_SPT_I219_V5),
183 EMX_DEVICE(PCH_CNP_I219_LM6),
184 EMX_DEVICE(PCH_CNP_I219_V6),
185 EMX_DEVICE(PCH_CNP_I219_LM7),
186 EMX_DEVICE(PCH_CNP_I219_V7),
187 EMX_DEVICE(PCH_ICP_I219_LM8),
188 EMX_DEVICE(PCH_ICP_I219_V8),
189 EMX_DEVICE(PCH_ICP_I219_LM9),
190 EMX_DEVICE(PCH_ICP_I219_V9),
191 EMX_DEVICE(PCH_CMP_I219_LM10),
192 EMX_DEVICE(PCH_CMP_I219_V10),
193 EMX_DEVICE(PCH_CMP_I219_LM11),
194 EMX_DEVICE(PCH_CMP_I219_V11),
195 EMX_DEVICE(PCH_CMP_I219_LM12),
196 EMX_DEVICE(PCH_CMP_I219_V12),
197 EMX_DEVICE(PCH_TGP_I219_LM13),
198 EMX_DEVICE(PCH_TGP_I219_V13),
199 EMX_DEVICE(PCH_TGP_I219_LM14),
200 EMX_DEVICE(PCH_TGP_I219_V14),
201 EMX_DEVICE(PCH_TGP_I219_LM15),
202 EMX_DEVICE(PCH_TGP_I219_V15),
203 EMX_DEVICE(PCH_ADP_I219_LM16),
204 EMX_DEVICE(PCH_ADP_I219_V16),
205 EMX_DEVICE(PCH_ADP_I219_LM17),
206 EMX_DEVICE(PCH_ADP_I219_V17),
207 EMX_DEVICE(PCH_MTP_I219_LM18),
208 EMX_DEVICE(PCH_MTP_I219_V18),
209 EMX_DEVICE(PCH_MTP_I219_LM19),
210 EMX_DEVICE(PCH_MTP_I219_V19),
211
212 /* required last entry */
213 EMX_DEVICE_NULL
214 };
215
216 static int emx_probe(device_t);
217 static int emx_attach(device_t);
218 static int emx_detach(device_t);
219 static int emx_shutdown(device_t);
220 static int emx_suspend(device_t);
221 static int emx_resume(device_t);
222
223 static void emx_init(void *);
224 static void emx_stop(struct emx_softc *);
225 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
226 static void emx_start(struct ifnet *, struct ifaltq_subque *);
227 #ifdef IFPOLL_ENABLE
228 static void emx_npoll(struct ifnet *, struct ifpoll_info *);
229 static void emx_npoll_status(struct ifnet *);
230 static void emx_npoll_tx(struct ifnet *, void *, int);
231 static void emx_npoll_rx(struct ifnet *, void *, int);
232 #endif
233 static void emx_watchdog(struct ifaltq_subque *);
234 static void emx_media_status(struct ifnet *, struct ifmediareq *);
235 static int emx_media_change(struct ifnet *);
236 static void emx_timer(void *);
237 static void emx_serialize(struct ifnet *, enum ifnet_serialize);
238 static void emx_deserialize(struct ifnet *, enum ifnet_serialize);
239 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize);
240 #ifdef INVARIANTS
241 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize,
242 boolean_t);
243 #endif
244
245 static void emx_intr(void *);
246 static void emx_intr_mask(void *);
247 static void emx_intr_body(struct emx_softc *, boolean_t);
248 static void emx_rxeof(struct emx_rxdata *, int);
249 static void emx_txeof(struct emx_txdata *);
250 static void emx_tx_collect(struct emx_txdata *, boolean_t);
251 static void emx_txgc_timer(void *);
252 static void emx_tx_purge(struct emx_softc *);
253 static void emx_enable_intr(struct emx_softc *);
254 static void emx_disable_intr(struct emx_softc *);
255
256 static int emx_dma_alloc(struct emx_softc *);
257 static void emx_dma_free(struct emx_softc *);
258 static void emx_init_tx_ring(struct emx_txdata *);
259 static int emx_init_rx_ring(struct emx_rxdata *);
260 static void emx_free_tx_ring(struct emx_txdata *);
261 static void emx_free_rx_ring(struct emx_rxdata *);
262 static int emx_create_tx_ring(struct emx_txdata *);
263 static int emx_create_rx_ring(struct emx_rxdata *);
264 static void emx_destroy_tx_ring(struct emx_txdata *, int);
265 static void emx_destroy_rx_ring(struct emx_rxdata *, int);
266 static int emx_newbuf(struct emx_rxdata *, int, int);
267 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *);
268 static int emx_txcsum(struct emx_txdata *, struct mbuf *,
269 uint32_t *, uint32_t *);
270 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **);
271 static int emx_tso_setup(struct emx_txdata *, struct mbuf *,
272 uint32_t *, uint32_t *);
273 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t);
274
275 static int emx_is_valid_eaddr(const uint8_t *);
276 static int emx_reset(struct emx_softc *);
277 static void emx_setup_ifp(struct emx_softc *);
278 static void emx_init_tx_unit(struct emx_softc *);
279 static void emx_init_rx_unit(struct emx_softc *);
280 static void emx_update_stats(struct emx_softc *);
281 static void emx_set_promisc(struct emx_softc *);
282 static void emx_disable_promisc(struct emx_softc *);
283 static void emx_set_multi(struct emx_softc *);
284 static void emx_update_link_status(struct emx_softc *);
285 static void emx_smartspeed(struct emx_softc *);
286 static void emx_set_itr(struct emx_softc *, uint32_t);
287 static void emx_disable_aspm(struct emx_softc *);
288 static void emx_flush_tx_ring(struct emx_softc *);
289 static void emx_flush_rx_ring(struct emx_softc *);
290 static void emx_flush_txrx_ring(struct emx_softc *);
291
292 static void emx_print_debug_info(struct emx_softc *);
293 static void emx_print_nvm_info(struct emx_softc *);
294 static void emx_print_hw_stats(struct emx_softc *);
295
296 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS);
297 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
298 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
299 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
300 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS);
301 static void emx_add_sysctl(struct emx_softc *);
302
303 static void emx_serialize_skipmain(struct emx_softc *);
304 static void emx_deserialize_skipmain(struct emx_softc *);
305
306 /* Management and WOL Support */
307 static void emx_get_mgmt(struct emx_softc *);
308 static void emx_rel_mgmt(struct emx_softc *);
309 static void emx_get_hw_control(struct emx_softc *);
310 static void emx_rel_hw_control(struct emx_softc *);
311 static void emx_enable_wol(device_t);
312
313 static device_method_t emx_methods[] = {
314 /* Device interface */
315 DEVMETHOD(device_probe, emx_probe),
316 DEVMETHOD(device_attach, emx_attach),
317 DEVMETHOD(device_detach, emx_detach),
318 DEVMETHOD(device_shutdown, emx_shutdown),
319 DEVMETHOD(device_suspend, emx_suspend),
320 DEVMETHOD(device_resume, emx_resume),
321 DEVMETHOD_END
322 };
323
324 static driver_t emx_driver = {
325 "emx",
326 emx_methods,
327 sizeof(struct emx_softc),
328 };
329
330 static devclass_t emx_devclass;
331
332 DECLARE_DUMMY_MODULE(if_emx);
333 MODULE_DEPEND(emx, ig_hal, 1, 1, 1);
334 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL);
335
336 /*
337 * Tunables
338 */
339 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR;
340 static int emx_rxd = EMX_DEFAULT_RXD;
341 static int emx_txd = EMX_DEFAULT_TXD;
342 static int emx_smart_pwr_down = 0;
343 static int emx_rxr = 0;
344 static int emx_txr = 1;
345
346 /* Controls whether promiscuous also shows bad packets */
347 static int emx_debug_sbp = 0;
348
349 static int emx_82573_workaround = 1;
350 static int emx_msi_enable = 1;
351
352 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE;
353
354 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil);
355 TUNABLE_INT("hw.emx.rxd", &emx_rxd);
356 TUNABLE_INT("hw.emx.rxr", &emx_rxr);
357 TUNABLE_INT("hw.emx.txd", &emx_txd);
358 TUNABLE_INT("hw.emx.txr", &emx_txr);
359 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down);
360 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp);
361 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround);
362 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable);
363 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl));
364
365 /* Global used in WOL setup with multiport cards */
366 static int emx_global_quad_port_a = 0;
367
368 /* Set this to one to display debug statistics */
369 static int emx_display_debug_stats = 0;
370
371 #if !defined(KTR_IF_EMX)
372 #define KTR_IF_EMX KTR_ALL
373 #endif
374 KTR_INFO_MASTER(if_emx);
375 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin");
376 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end");
377 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet");
378 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet");
379 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean");
380 #define logif(name) KTR_LOG(if_emx_ ## name)
381
382 static __inline void
emx_setup_rxdesc(emx_rxdesc_t * rxd,const struct emx_rxbuf * rxbuf)383 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf)
384 {
385 rxd->rxd_bufaddr = htole64(rxbuf->paddr);
386 /* DD bit must be cleared */
387 rxd->rxd_staterr = 0;
388 }
389
390 static __inline void
emx_free_txbuf(struct emx_txdata * tdata,struct emx_txbuf * tx_buffer)391 emx_free_txbuf(struct emx_txdata *tdata, struct emx_txbuf *tx_buffer)
392 {
393
394 KKASSERT(tx_buffer->m_head != NULL);
395 KKASSERT(tdata->tx_nmbuf > 0);
396 tdata->tx_nmbuf--;
397
398 bus_dmamap_unload(tdata->txtag, tx_buffer->map);
399 m_freem(tx_buffer->m_head);
400 tx_buffer->m_head = NULL;
401 }
402
403 static __inline void
emx_tx_intr(struct emx_txdata * tdata)404 emx_tx_intr(struct emx_txdata *tdata)
405 {
406
407 emx_txeof(tdata);
408 if (!ifsq_is_empty(tdata->ifsq))
409 ifsq_devstart(tdata->ifsq);
410 }
411
412 static __inline void
emx_try_txgc(struct emx_txdata * tdata,int16_t dec)413 emx_try_txgc(struct emx_txdata *tdata, int16_t dec)
414 {
415
416 if (tdata->tx_running > 0) {
417 tdata->tx_running -= dec;
418 if (tdata->tx_running <= 0 && tdata->tx_nmbuf &&
419 tdata->num_tx_desc_avail < tdata->num_tx_desc &&
420 tdata->num_tx_desc_avail + tdata->tx_intr_nsegs >
421 tdata->num_tx_desc)
422 emx_tx_collect(tdata, TRUE);
423 }
424 }
425
426 static void
emx_txgc_timer(void * xtdata)427 emx_txgc_timer(void *xtdata)
428 {
429 struct emx_txdata *tdata = xtdata;
430 struct ifnet *ifp = &tdata->sc->arpcom.ac_if;
431
432 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) !=
433 (IFF_RUNNING | IFF_UP))
434 return;
435
436 if (!lwkt_serialize_try(&tdata->tx_serialize))
437 goto done;
438
439 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) !=
440 (IFF_RUNNING | IFF_UP)) {
441 lwkt_serialize_exit(&tdata->tx_serialize);
442 return;
443 }
444 emx_try_txgc(tdata, EMX_TX_RUNNING_DEC);
445
446 lwkt_serialize_exit(&tdata->tx_serialize);
447 done:
448 callout_reset(&tdata->tx_gc_timer, 1, emx_txgc_timer, tdata);
449 }
450
451 static __inline void
emx_rxcsum(uint32_t staterr,struct mbuf * mp)452 emx_rxcsum(uint32_t staterr, struct mbuf *mp)
453 {
454 /* Ignore Checksum bit is set */
455 if (staterr & E1000_RXD_STAT_IXSM)
456 return;
457
458 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
459 E1000_RXD_STAT_IPCS)
460 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
461
462 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
463 E1000_RXD_STAT_TCPCS) {
464 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
465 CSUM_PSEUDO_HDR |
466 CSUM_FRAG_NOT_CHECKED;
467 mp->m_pkthdr.csum_data = htons(0xffff);
468 }
469 }
470
471 static __inline struct pktinfo *
emx_rssinfo(struct mbuf * m,struct pktinfo * pi,uint32_t mrq,uint32_t hash,uint32_t staterr)472 emx_rssinfo(struct mbuf *m, struct pktinfo *pi,
473 uint32_t mrq, uint32_t hash, uint32_t staterr)
474 {
475 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) {
476 case EMX_RXDMRQ_IPV4_TCP:
477 pi->pi_netisr = NETISR_IP;
478 pi->pi_flags = 0;
479 pi->pi_l3proto = IPPROTO_TCP;
480 break;
481
482 case EMX_RXDMRQ_IPV6_TCP:
483 pi->pi_netisr = NETISR_IPV6;
484 pi->pi_flags = 0;
485 pi->pi_l3proto = IPPROTO_TCP;
486 break;
487
488 case EMX_RXDMRQ_IPV4:
489 if (staterr & E1000_RXD_STAT_IXSM)
490 return NULL;
491
492 if ((staterr &
493 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
494 E1000_RXD_STAT_TCPCS) {
495 pi->pi_netisr = NETISR_IP;
496 pi->pi_flags = 0;
497 pi->pi_l3proto = IPPROTO_UDP;
498 break;
499 }
500 /* FALL THROUGH */
501 default:
502 return NULL;
503 }
504
505 m_sethash(m, toeplitz_hash(hash));
506 return pi;
507 }
508
509 static int
emx_probe(device_t dev)510 emx_probe(device_t dev)
511 {
512 const struct emx_device *d;
513 uint16_t vid, did;
514
515 vid = pci_get_vendor(dev);
516 did = pci_get_device(dev);
517
518 for (d = emx_devices; d->desc != NULL; ++d) {
519 if (vid == d->vid && did == d->did) {
520 device_set_desc(dev, d->desc);
521 device_set_async_attach(dev, TRUE);
522 return 0;
523 }
524 }
525 return ENXIO;
526 }
527
528 static int
emx_attach(device_t dev)529 emx_attach(device_t dev)
530 {
531 struct emx_softc *sc = device_get_softc(dev);
532 int error = 0, i, throttle, msi_enable;
533 int tx_ring_max, ring_cnt;
534 u_int intr_flags;
535 uint16_t eeprom_data, device_id, apme_mask;
536 driver_intr_t *intr_func;
537 char flowctrl[IFM_ETH_FC_STRLEN];
538
539 /*
540 * Setup RX rings
541 */
542 for (i = 0; i < EMX_NRX_RING; ++i) {
543 sc->rx_data[i].sc = sc;
544 sc->rx_data[i].idx = i;
545 }
546
547 /*
548 * Setup TX ring
549 */
550 for (i = 0; i < EMX_NTX_RING; ++i) {
551 sc->tx_data[i].sc = sc;
552 sc->tx_data[i].idx = i;
553 callout_init_mp(&sc->tx_data[i].tx_gc_timer);
554 }
555
556 /*
557 * Initialize serializers
558 */
559 lwkt_serialize_init(&sc->main_serialize);
560 for (i = 0; i < EMX_NTX_RING; ++i)
561 lwkt_serialize_init(&sc->tx_data[i].tx_serialize);
562 for (i = 0; i < EMX_NRX_RING; ++i)
563 lwkt_serialize_init(&sc->rx_data[i].rx_serialize);
564
565 /*
566 * Initialize serializer array
567 */
568 i = 0;
569
570 KKASSERT(i < EMX_NSERIALIZE);
571 sc->serializes[i++] = &sc->main_serialize;
572
573 KKASSERT(i < EMX_NSERIALIZE);
574 sc->serializes[i++] = &sc->tx_data[0].tx_serialize;
575 KKASSERT(i < EMX_NSERIALIZE);
576 sc->serializes[i++] = &sc->tx_data[1].tx_serialize;
577
578 KKASSERT(i < EMX_NSERIALIZE);
579 sc->serializes[i++] = &sc->rx_data[0].rx_serialize;
580 KKASSERT(i < EMX_NSERIALIZE);
581 sc->serializes[i++] = &sc->rx_data[1].rx_serialize;
582
583 KKASSERT(i == EMX_NSERIALIZE);
584
585 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK,
586 emx_media_change, emx_media_status);
587 callout_init_mp(&sc->timer);
588
589 sc->dev = sc->osdep.dev = dev;
590
591 /*
592 * Determine hardware and mac type
593 */
594 sc->hw.vendor_id = pci_get_vendor(dev);
595 sc->hw.device_id = pci_get_device(dev);
596 sc->hw.revision_id = pci_get_revid(dev);
597 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev);
598 sc->hw.subsystem_device_id = pci_get_subdevice(dev);
599
600 if (e1000_set_mac_type(&sc->hw))
601 return ENXIO;
602
603 /* Enable bus mastering */
604 pci_enable_busmaster(dev);
605
606 /*
607 * Allocate IO memory
608 */
609 sc->memory_rid = EMX_BAR_MEM;
610 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
611 &sc->memory_rid, RF_ACTIVE);
612 if (sc->memory == NULL) {
613 device_printf(dev, "Unable to allocate bus resource: memory\n");
614 error = ENXIO;
615 goto fail;
616 }
617 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
618 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory);
619
620 /* XXX This is quite goofy, it is not actually used */
621 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
622
623 /*
624 * Don't enable MSI-X on 82574, see:
625 * 82574 specification update errata #15
626 *
627 * Don't enable MSI on 82571/82572, see:
628 * 82571/82572 specification update errata #63
629 */
630 msi_enable = emx_msi_enable;
631 if (msi_enable &&
632 (sc->hw.mac.type == e1000_82571 ||
633 sc->hw.mac.type == e1000_82572))
634 msi_enable = 0;
635 again:
636 /*
637 * Allocate interrupt
638 */
639 sc->intr_type = pci_alloc_1intr(dev, msi_enable,
640 &sc->intr_rid, &intr_flags);
641
642 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
643 int unshared;
644
645 unshared = device_getenv_int(dev, "irq.unshared", 0);
646 if (!unshared) {
647 sc->flags |= EMX_FLAG_SHARED_INTR;
648 if (bootverbose)
649 device_printf(dev, "IRQ shared\n");
650 } else {
651 intr_flags &= ~RF_SHAREABLE;
652 if (bootverbose)
653 device_printf(dev, "IRQ unshared\n");
654 }
655 }
656
657 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid,
658 intr_flags);
659 if (sc->intr_res == NULL) {
660 device_printf(dev, "Unable to allocate bus resource: %s\n",
661 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr");
662 if (!msi_enable) {
663 /* Retry with MSI. */
664 msi_enable = 1;
665 sc->flags &= ~EMX_FLAG_SHARED_INTR;
666 goto again;
667 }
668 error = ENXIO;
669 goto fail;
670 }
671
672 /* Save PCI command register for Shared Code */
673 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
674 sc->hw.back = &sc->osdep;
675
676 /*
677 * For I217/I218, we need to map the flash memory and this
678 * must happen after the MAC is identified.
679 */
680 if (sc->hw.mac.type == e1000_pch_lpt) {
681 sc->flash_rid = EMX_BAR_FLASH;
682
683 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
684 &sc->flash_rid, RF_ACTIVE);
685 if (sc->flash == NULL) {
686 device_printf(dev, "Mapping of Flash failed\n");
687 error = ENXIO;
688 goto fail;
689 }
690 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash);
691 sc->osdep.flash_bus_space_handle =
692 rman_get_bushandle(sc->flash);
693
694 /*
695 * This is used in the shared code
696 * XXX this goof is actually not used.
697 */
698 sc->hw.flash_address = (uint8_t *)sc->flash;
699 } else if (sc->hw.mac.type >= e1000_pch_spt) {
700 /*
701 * In the new SPT device flash is not a seperate BAR,
702 * rather it is also in BAR0, so use the same tag and
703 * an offset handle for the FLASH read/write macros
704 * in the shared code.
705 */
706 sc->osdep.flash_bus_space_tag = sc->osdep.mem_bus_space_tag;
707 sc->osdep.flash_bus_space_handle =
708 sc->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR;
709 }
710
711 /* Do Shared Code initialization */
712 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
713 device_printf(dev, "Setup of Shared code failed\n");
714 error = ENXIO;
715 goto fail;
716 }
717 e1000_get_bus_info(&sc->hw);
718
719 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
720 sc->hw.phy.autoneg_wait_to_complete = FALSE;
721 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
722
723 /*
724 * Interrupt throttle rate
725 */
726 throttle = device_getenv_int(dev, "int_throttle_ceil",
727 emx_int_throttle_ceil);
728 if (throttle == 0) {
729 sc->int_throttle_ceil = 0;
730 } else {
731 if (throttle < 0)
732 throttle = EMX_DEFAULT_ITR;
733
734 /* Recalculate the tunable value to get the exact frequency. */
735 throttle = 1000000000 / 256 / throttle;
736
737 /* Upper 16bits of ITR is reserved and should be zero */
738 if (throttle & 0xffff0000)
739 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR;
740
741 sc->int_throttle_ceil = 1000000000 / 256 / throttle;
742 }
743
744 e1000_init_script_state_82541(&sc->hw, TRUE);
745 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE);
746
747 /* Copper options */
748 if (sc->hw.phy.media_type == e1000_media_type_copper) {
749 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES;
750 sc->hw.phy.disable_polarity_correction = FALSE;
751 sc->hw.phy.ms_type = EMX_MASTER_SLAVE;
752 }
753
754 /* Set the frame limits assuming standard ethernet sized frames. */
755 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
756
757 /* This controls when hardware reports transmit completion status. */
758 sc->hw.mac.report_tx_early = 1;
759
760 /*
761 * Calculate # of RX/TX rings
762 */
763 ring_cnt = device_getenv_int(dev, "rxr", emx_rxr);
764 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, EMX_NRX_RING);
765
766 tx_ring_max = 1;
767 if (sc->hw.mac.type == e1000_82571 ||
768 sc->hw.mac.type == e1000_82572 ||
769 sc->hw.mac.type == e1000_80003es2lan ||
770 sc->hw.mac.type == e1000_pch_lpt ||
771 sc->hw.mac.type == e1000_pch_spt ||
772 sc->hw.mac.type == e1000_pch_cnp ||
773 sc->hw.mac.type == e1000_82574)
774 tx_ring_max = EMX_NTX_RING;
775 ring_cnt = device_getenv_int(dev, "txr", emx_txr);
776 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, tx_ring_max);
777
778 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap);
779 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap);
780 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap);
781
782 /* Allocate RX/TX rings' busdma(9) stuffs */
783 error = emx_dma_alloc(sc);
784 if (error)
785 goto fail;
786
787 /* Allocate multicast array memory. */
788 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX,
789 M_DEVBUF, M_WAITOK);
790
791 /* Indicate SOL/IDER usage */
792 if (e1000_check_reset_block(&sc->hw)) {
793 device_printf(dev,
794 "PHY reset is blocked due to SOL/IDER session.\n");
795 }
796
797 /* Disable EEE on I217/I218 */
798 sc->hw.dev_spec.ich8lan.eee_disable = 1;
799
800 /*
801 * Start from a known state, this is important in reading the
802 * nvm and mac from that.
803 */
804 e1000_reset_hw(&sc->hw);
805
806 /* Make sure we have a good EEPROM before we read from it */
807 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
808 /*
809 * Some PCI-E parts fail the first check due to
810 * the link being in sleep state, call it again,
811 * if it fails a second time its a real issue.
812 */
813 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
814 device_printf(dev,
815 "The EEPROM Checksum Is Not Valid\n");
816 error = EIO;
817 goto fail;
818 }
819 }
820
821 /* Copy the permanent MAC address out of the EEPROM */
822 if (e1000_read_mac_addr(&sc->hw) < 0) {
823 device_printf(dev, "EEPROM read error while reading MAC"
824 " address\n");
825 error = EIO;
826 goto fail;
827 }
828 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) {
829 device_printf(dev, "Invalid MAC address\n");
830 error = EIO;
831 goto fail;
832 }
833
834 /* Disable ULP support */
835 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE);
836
837 /* Determine if we have to control management hardware */
838 if (e1000_enable_mng_pass_thru(&sc->hw))
839 sc->flags |= EMX_FLAG_HAS_MGMT;
840
841 /*
842 * Setup Wake-on-Lan
843 */
844 apme_mask = EMX_EEPROM_APME;
845 eeprom_data = 0;
846 switch (sc->hw.mac.type) {
847 case e1000_82573:
848 sc->flags |= EMX_FLAG_HAS_AMT;
849 /* FALL THROUGH */
850
851 case e1000_82571:
852 case e1000_82572:
853 case e1000_80003es2lan:
854 if (sc->hw.bus.func == 1) {
855 e1000_read_nvm(&sc->hw,
856 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
857 } else {
858 e1000_read_nvm(&sc->hw,
859 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
860 }
861 break;
862
863 case e1000_pch_lpt:
864 case e1000_pch_spt:
865 case e1000_pch_cnp:
866 apme_mask = E1000_WUC_APME;
867 sc->flags |= EMX_FLAG_HAS_AMT;
868 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC);
869 break;
870
871 default:
872 e1000_read_nvm(&sc->hw,
873 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
874 break;
875 }
876 if (eeprom_data & apme_mask)
877 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC;
878
879 /*
880 * We have the eeprom settings, now apply the special cases
881 * where the eeprom may be wrong or the board won't support
882 * wake on lan on a particular port
883 */
884 device_id = pci_get_device(dev);
885 switch (device_id) {
886 case E1000_DEV_ID_82571EB_FIBER:
887 /*
888 * Wake events only supported on port A for dual fiber
889 * regardless of eeprom setting
890 */
891 if (E1000_READ_REG(&sc->hw, E1000_STATUS) &
892 E1000_STATUS_FUNC_1)
893 sc->wol = 0;
894 break;
895
896 case E1000_DEV_ID_82571EB_QUAD_COPPER:
897 case E1000_DEV_ID_82571EB_QUAD_FIBER:
898 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
899 /* if quad port sc, disable WoL on all but port A */
900 if (emx_global_quad_port_a != 0)
901 sc->wol = 0;
902 /* Reset for multiple quad port adapters */
903 if (++emx_global_quad_port_a == 4)
904 emx_global_quad_port_a = 0;
905 break;
906 }
907
908 /* XXX disable wol */
909 sc->wol = 0;
910
911 /* Initialized #of TX rings to use. */
912 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE);
913
914 /* Setup flow control. */
915 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl),
916 emx_flowctrl);
917 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl);
918
919 /* Setup OS specific network interface */
920 emx_setup_ifp(sc);
921
922 /* Add sysctl tree, must after em_setup_ifp() */
923 emx_add_sysctl(sc);
924
925 /* Reset the hardware */
926 error = emx_reset(sc);
927 if (error) {
928 /*
929 * Some 82573 parts fail the first reset, call it again,
930 * if it fails a second time its a real issue.
931 */
932 error = emx_reset(sc);
933 if (error) {
934 device_printf(dev, "Unable to reset the hardware\n");
935 ether_ifdetach(&sc->arpcom.ac_if);
936 goto fail;
937 }
938 }
939
940 /* Initialize statistics */
941 emx_update_stats(sc);
942
943 sc->hw.mac.get_link_status = 1;
944 emx_update_link_status(sc);
945
946 /* Non-AMT based hardware can now take control from firmware */
947 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
948 EMX_FLAG_HAS_MGMT)
949 emx_get_hw_control(sc);
950
951 /*
952 * Missing Interrupt Following ICR read:
953 *
954 * 82571/82572 specification update errata #76
955 * 82573 specification update errata #31
956 * 82574 specification update errata #12
957 */
958 intr_func = emx_intr;
959 if ((sc->flags & EMX_FLAG_SHARED_INTR) &&
960 (sc->hw.mac.type == e1000_82571 ||
961 sc->hw.mac.type == e1000_82572 ||
962 sc->hw.mac.type == e1000_82573 ||
963 sc->hw.mac.type == e1000_82574))
964 intr_func = emx_intr_mask;
965
966 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc,
967 &sc->intr_tag, &sc->main_serialize);
968 if (error) {
969 device_printf(dev, "Failed to register interrupt handler");
970 ether_ifdetach(&sc->arpcom.ac_if);
971 goto fail;
972 }
973 return (0);
974 fail:
975 emx_detach(dev);
976 return (error);
977 }
978
979 static int
emx_detach(device_t dev)980 emx_detach(device_t dev)
981 {
982 struct emx_softc *sc = device_get_softc(dev);
983
984 if (device_is_attached(dev)) {
985 struct ifnet *ifp = &sc->arpcom.ac_if;
986
987 ifnet_serialize_all(ifp);
988
989 emx_stop(sc);
990
991 e1000_phy_hw_reset(&sc->hw);
992
993 emx_rel_mgmt(sc);
994 emx_rel_hw_control(sc);
995
996 if (sc->wol) {
997 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
998 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
999 emx_enable_wol(dev);
1000 }
1001
1002 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag);
1003
1004 ifnet_deserialize_all(ifp);
1005
1006 ether_ifdetach(ifp);
1007 } else if (sc->memory != NULL) {
1008 emx_rel_hw_control(sc);
1009 }
1010
1011 ifmedia_removeall(&sc->media);
1012 bus_generic_detach(dev);
1013
1014 if (sc->intr_res != NULL) {
1015 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
1016 sc->intr_res);
1017 }
1018
1019 if (sc->intr_type == PCI_INTR_TYPE_MSI)
1020 pci_release_msi(dev);
1021
1022 if (sc->memory != NULL) {
1023 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid,
1024 sc->memory);
1025 }
1026
1027 if (sc->flash != NULL) {
1028 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid,
1029 sc->flash);
1030 }
1031
1032 emx_dma_free(sc);
1033
1034 if (sc->mta != NULL)
1035 kfree(sc->mta, M_DEVBUF);
1036
1037 if (sc->rx_rmap != NULL)
1038 if_ringmap_free(sc->rx_rmap);
1039 if (sc->tx_rmap != NULL)
1040 if_ringmap_free(sc->tx_rmap);
1041
1042 return (0);
1043 }
1044
1045 static int
emx_shutdown(device_t dev)1046 emx_shutdown(device_t dev)
1047 {
1048 return emx_suspend(dev);
1049 }
1050
1051 static int
emx_suspend(device_t dev)1052 emx_suspend(device_t dev)
1053 {
1054 struct emx_softc *sc = device_get_softc(dev);
1055 struct ifnet *ifp = &sc->arpcom.ac_if;
1056
1057 ifnet_serialize_all(ifp);
1058
1059 emx_stop(sc);
1060
1061 emx_rel_mgmt(sc);
1062 emx_rel_hw_control(sc);
1063
1064 if (sc->wol) {
1065 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
1066 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
1067 emx_enable_wol(dev);
1068 }
1069
1070 ifnet_deserialize_all(ifp);
1071
1072 return bus_generic_suspend(dev);
1073 }
1074
1075 static int
emx_resume(device_t dev)1076 emx_resume(device_t dev)
1077 {
1078 struct emx_softc *sc = device_get_softc(dev);
1079 struct ifnet *ifp = &sc->arpcom.ac_if;
1080 int i;
1081
1082 ifnet_serialize_all(ifp);
1083
1084 emx_init(sc);
1085 emx_get_mgmt(sc);
1086 for (i = 0; i < sc->tx_ring_inuse; ++i)
1087 ifsq_devstart_sched(sc->tx_data[i].ifsq);
1088
1089 ifnet_deserialize_all(ifp);
1090
1091 return bus_generic_resume(dev);
1092 }
1093
1094 static void
emx_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)1095 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1096 {
1097 struct emx_softc *sc = ifp->if_softc;
1098 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
1099 struct mbuf *m_head;
1100 int idx = -1, nsegs = 0;
1101
1102 KKASSERT(tdata->ifsq == ifsq);
1103 ASSERT_SERIALIZED(&tdata->tx_serialize);
1104
1105 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
1106 return;
1107
1108 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) {
1109 ifsq_purge(ifsq);
1110 return;
1111 }
1112
1113 while (!ifsq_is_empty(ifsq)) {
1114 /* Now do we at least have a minimal? */
1115 if (EMX_IS_OACTIVE(tdata)) {
1116 emx_tx_collect(tdata, FALSE);
1117 if (EMX_IS_OACTIVE(tdata)) {
1118 ifsq_set_oactive(ifsq);
1119 break;
1120 }
1121 }
1122
1123 logif(pkt_txqueue);
1124 m_head = ifsq_dequeue(ifsq);
1125 if (m_head == NULL)
1126 break;
1127
1128 if (emx_encap(tdata, &m_head, &nsegs, &idx)) {
1129 IFNET_STAT_INC(ifp, oerrors, 1);
1130 emx_tx_collect(tdata, FALSE);
1131 continue;
1132 }
1133
1134 /*
1135 * TX interrupt are aggressively aggregated, so increasing
1136 * opackets at TX interrupt time will make the opackets
1137 * statistics vastly inaccurate; we do the opackets increment
1138 * now.
1139 */
1140 IFNET_STAT_INC(ifp, opackets, 1);
1141
1142 if (nsegs >= tdata->tx_wreg_nsegs) {
1143 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
1144 nsegs = 0;
1145 idx = -1;
1146 }
1147
1148 /* Send a copy of the frame to the BPF listener */
1149 ETHER_BPF_MTAP(ifp, m_head);
1150
1151 /* Set timeout in case hardware has problems transmitting. */
1152 ifsq_watchdog_set_count(&tdata->tx_watchdog, EMX_TX_TIMEOUT);
1153 }
1154 if (idx >= 0)
1155 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
1156 tdata->tx_running = EMX_TX_RUNNING;
1157 }
1158
1159 static int
emx_ioctl(struct ifnet * ifp,u_long command,caddr_t data,struct ucred * cr)1160 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1161 {
1162 struct emx_softc *sc = ifp->if_softc;
1163 struct ifreq *ifr = (struct ifreq *)data;
1164 uint16_t eeprom_data = 0;
1165 int max_frame_size, mask, reinit;
1166 int error = 0;
1167
1168 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1169
1170 switch (command) {
1171 case SIOCSIFMTU:
1172 switch (sc->hw.mac.type) {
1173 case e1000_82573:
1174 /*
1175 * 82573 only supports jumbo frames
1176 * if ASPM is disabled.
1177 */
1178 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1,
1179 &eeprom_data);
1180 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1181 max_frame_size = ETHER_MAX_LEN;
1182 break;
1183 }
1184 /* FALL THROUGH */
1185
1186 /* Limit Jumbo Frame size */
1187 case e1000_82571:
1188 case e1000_82572:
1189 case e1000_82574:
1190 case e1000_pch_lpt:
1191 case e1000_pch_spt:
1192 case e1000_pch_cnp:
1193 case e1000_80003es2lan:
1194 max_frame_size = 9234;
1195 break;
1196
1197 default:
1198 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1199 break;
1200 }
1201 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1202 ETHER_CRC_LEN) {
1203 error = EINVAL;
1204 break;
1205 }
1206
1207 ifp->if_mtu = ifr->ifr_mtu;
1208 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
1209 ETHER_CRC_LEN;
1210
1211 if (ifp->if_flags & IFF_RUNNING)
1212 emx_init(sc);
1213 break;
1214
1215 case SIOCSIFFLAGS:
1216 if (ifp->if_flags & IFF_UP) {
1217 if ((ifp->if_flags & IFF_RUNNING)) {
1218 if ((ifp->if_flags ^ sc->if_flags) &
1219 (IFF_PROMISC | IFF_ALLMULTI)) {
1220 emx_disable_promisc(sc);
1221 emx_set_promisc(sc);
1222 }
1223 } else {
1224 emx_init(sc);
1225 }
1226 } else if (ifp->if_flags & IFF_RUNNING) {
1227 emx_stop(sc);
1228 }
1229 sc->if_flags = ifp->if_flags;
1230 break;
1231
1232 case SIOCADDMULTI:
1233 case SIOCDELMULTI:
1234 if (ifp->if_flags & IFF_RUNNING) {
1235 emx_disable_intr(sc);
1236 emx_set_multi(sc);
1237 #ifdef IFPOLL_ENABLE
1238 if (!(ifp->if_flags & IFF_NPOLLING))
1239 #endif
1240 emx_enable_intr(sc);
1241 }
1242 break;
1243
1244 case SIOCSIFMEDIA:
1245 /* Check SOL/IDER usage */
1246 if (e1000_check_reset_block(&sc->hw)) {
1247 device_printf(sc->dev, "Media change is"
1248 " blocked due to SOL/IDER session.\n");
1249 break;
1250 }
1251 /* FALL THROUGH */
1252
1253 case SIOCGIFMEDIA:
1254 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
1255 break;
1256
1257 case SIOCSIFCAP:
1258 reinit = 0;
1259 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1260 if (mask & IFCAP_RXCSUM) {
1261 ifp->if_capenable ^= IFCAP_RXCSUM;
1262 reinit = 1;
1263 }
1264 if (mask & IFCAP_VLAN_HWTAGGING) {
1265 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1266 reinit = 1;
1267 }
1268 if (mask & IFCAP_TXCSUM) {
1269 ifp->if_capenable ^= IFCAP_TXCSUM;
1270 if (ifp->if_capenable & IFCAP_TXCSUM)
1271 ifp->if_hwassist |= EMX_CSUM_FEATURES;
1272 else
1273 ifp->if_hwassist &= ~EMX_CSUM_FEATURES;
1274 }
1275 if (mask & IFCAP_TSO) {
1276 ifp->if_capenable ^= IFCAP_TSO;
1277 if (ifp->if_capenable & IFCAP_TSO)
1278 ifp->if_hwassist |= CSUM_TSO;
1279 else
1280 ifp->if_hwassist &= ~CSUM_TSO;
1281 }
1282 if (mask & IFCAP_RSS)
1283 ifp->if_capenable ^= IFCAP_RSS;
1284 if (reinit && (ifp->if_flags & IFF_RUNNING))
1285 emx_init(sc);
1286 break;
1287
1288 default:
1289 error = ether_ioctl(ifp, command, data);
1290 break;
1291 }
1292 return (error);
1293 }
1294
1295 static void
emx_watchdog(struct ifaltq_subque * ifsq)1296 emx_watchdog(struct ifaltq_subque *ifsq)
1297 {
1298 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
1299 struct ifnet *ifp = ifsq_get_ifp(ifsq);
1300 struct emx_softc *sc = ifp->if_softc;
1301 int i;
1302
1303 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1304
1305 /*
1306 * The timer is set to 5 every time start queues a packet.
1307 * Then txeof keeps resetting it as long as it cleans at
1308 * least one descriptor.
1309 * Finally, anytime all descriptors are clean the timer is
1310 * set to 0.
1311 */
1312
1313 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) ==
1314 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) {
1315 /*
1316 * If we reach here, all TX jobs are completed and
1317 * the TX engine should have been idled for some time.
1318 * We don't need to call ifsq_devstart_sched() here.
1319 */
1320 ifsq_clr_oactive(ifsq);
1321 ifsq_watchdog_set_count(&tdata->tx_watchdog, 0);
1322 return;
1323 }
1324
1325 /*
1326 * If we are in this routine because of pause frames, then
1327 * don't reset the hardware.
1328 */
1329 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) {
1330 ifsq_watchdog_set_count(&tdata->tx_watchdog, EMX_TX_TIMEOUT);
1331 return;
1332 }
1333
1334 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx);
1335
1336 IFNET_STAT_INC(ifp, oerrors, 1);
1337
1338 emx_init(sc);
1339 for (i = 0; i < sc->tx_ring_inuse; ++i)
1340 ifsq_devstart_sched(sc->tx_data[i].ifsq);
1341 }
1342
1343 static void
emx_init(void * xsc)1344 emx_init(void *xsc)
1345 {
1346 struct emx_softc *sc = xsc;
1347 struct ifnet *ifp = &sc->arpcom.ac_if;
1348 device_t dev = sc->dev;
1349 boolean_t polling;
1350 int i;
1351
1352 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1353
1354 emx_stop(sc);
1355
1356 /* Get the latest mac address, User can use a LAA */
1357 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
1358
1359 /* Put the address into the Receive Address Array */
1360 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1361
1362 /*
1363 * With the 82571 sc, RAR[0] may be overwritten
1364 * when the other port is reset, we make a duplicate
1365 * in RAR[14] for that eventuality, this assures
1366 * the interface continues to function.
1367 */
1368 if (sc->hw.mac.type == e1000_82571) {
1369 e1000_set_laa_state_82571(&sc->hw, TRUE);
1370 e1000_rar_set(&sc->hw, sc->hw.mac.addr,
1371 E1000_RAR_ENTRIES - 1);
1372 }
1373
1374 /* Initialize the hardware */
1375 if (emx_reset(sc)) {
1376 device_printf(dev, "Unable to reset the hardware\n");
1377 /* XXX emx_stop()? */
1378 return;
1379 }
1380 emx_update_link_status(sc);
1381
1382 /* Setup VLAN support, basic and offload if available */
1383 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1384
1385 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1386 uint32_t ctrl;
1387
1388 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL);
1389 ctrl |= E1000_CTRL_VME;
1390 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl);
1391 }
1392
1393 /* Configure for OS presence */
1394 emx_get_mgmt(sc);
1395
1396 polling = FALSE;
1397 #ifdef IFPOLL_ENABLE
1398 if (ifp->if_flags & IFF_NPOLLING)
1399 polling = TRUE;
1400 #endif
1401 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling);
1402 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse);
1403
1404 /* Prepare transmit descriptors and buffers */
1405 for (i = 0; i < sc->tx_ring_inuse; ++i)
1406 emx_init_tx_ring(&sc->tx_data[i]);
1407 emx_init_tx_unit(sc);
1408
1409 /* Setup Multicast table */
1410 emx_set_multi(sc);
1411
1412 /* Prepare receive descriptors and buffers */
1413 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1414 if (emx_init_rx_ring(&sc->rx_data[i])) {
1415 device_printf(dev,
1416 "Could not setup receive structures\n");
1417 emx_stop(sc);
1418 return;
1419 }
1420 }
1421 emx_init_rx_unit(sc);
1422
1423 /* Don't lose promiscuous settings */
1424 emx_set_promisc(sc);
1425
1426 /* Reset hardware counters */
1427 e1000_clear_hw_cntrs_base_generic(&sc->hw);
1428
1429 /* MSI/X configuration for 82574 */
1430 if (sc->hw.mac.type == e1000_82574) {
1431 int tmp;
1432
1433 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
1434 tmp |= E1000_CTRL_EXT_PBA_CLR;
1435 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp);
1436 /*
1437 * XXX MSIX
1438 * Set the IVAR - interrupt vector routing.
1439 * Each nibble represents a vector, high bit
1440 * is enable, other 3 bits are the MSIX table
1441 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1442 * Link (other) to 2, hence the magic number.
1443 */
1444 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908);
1445 }
1446
1447 /*
1448 * Only enable interrupts if we are not polling, make sure
1449 * they are off otherwise.
1450 */
1451 if (polling)
1452 emx_disable_intr(sc);
1453 else
1454 emx_enable_intr(sc);
1455
1456 /* AMT based hardware can now take control from firmware */
1457 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
1458 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT))
1459 emx_get_hw_control(sc);
1460
1461 ifp->if_flags |= IFF_RUNNING;
1462 for (i = 0; i < sc->tx_ring_inuse; ++i) {
1463 struct emx_txdata *tdata = &sc->tx_data[i];
1464
1465 ifsq_clr_oactive(tdata->ifsq);
1466 ifsq_watchdog_start(&tdata->tx_watchdog);
1467 if (!polling) {
1468 callout_reset_bycpu(&tdata->tx_gc_timer, 1,
1469 emx_txgc_timer, tdata, ifsq_get_cpuid(tdata->ifsq));
1470 }
1471 }
1472 callout_reset(&sc->timer, hz, emx_timer, sc);
1473 }
1474
1475 static void
emx_intr(void * xsc)1476 emx_intr(void *xsc)
1477 {
1478 emx_intr_body(xsc, TRUE);
1479 }
1480
1481 static void
emx_intr_body(struct emx_softc * sc,boolean_t chk_asserted)1482 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted)
1483 {
1484 struct ifnet *ifp = &sc->arpcom.ac_if;
1485 uint32_t reg_icr;
1486
1487 logif(intr_beg);
1488 ASSERT_SERIALIZED(&sc->main_serialize);
1489
1490 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
1491
1492 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) {
1493 logif(intr_end);
1494 return;
1495 }
1496
1497 /*
1498 * XXX: some laptops trigger several spurious interrupts
1499 * on emx(4) when in the resume cycle. The ICR register
1500 * reports all-ones value in this case. Processing such
1501 * interrupts would lead to a freeze. I don't know why.
1502 */
1503 if (reg_icr == 0xffffffff) {
1504 logif(intr_end);
1505 return;
1506 }
1507
1508 if (ifp->if_flags & IFF_RUNNING) {
1509 if (reg_icr &
1510 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) {
1511 int i;
1512
1513 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1514 lwkt_serialize_enter(
1515 &sc->rx_data[i].rx_serialize);
1516 emx_rxeof(&sc->rx_data[i], -1);
1517 lwkt_serialize_exit(
1518 &sc->rx_data[i].rx_serialize);
1519 }
1520 }
1521 if (reg_icr & E1000_ICR_TXDW) {
1522 struct emx_txdata *tdata = &sc->tx_data[0];
1523
1524 lwkt_serialize_enter(&tdata->tx_serialize);
1525 emx_tx_intr(tdata);
1526 lwkt_serialize_exit(&tdata->tx_serialize);
1527 }
1528 }
1529
1530 /* Link status change */
1531 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1532 emx_serialize_skipmain(sc);
1533
1534 callout_stop(&sc->timer);
1535 sc->hw.mac.get_link_status = 1;
1536 emx_update_link_status(sc);
1537
1538 /* Deal with TX cruft when link lost */
1539 emx_tx_purge(sc);
1540
1541 callout_reset(&sc->timer, hz, emx_timer, sc);
1542
1543 emx_deserialize_skipmain(sc);
1544 }
1545
1546 if (reg_icr & E1000_ICR_RXO)
1547 sc->rx_overruns++;
1548
1549 logif(intr_end);
1550 }
1551
1552 static void
emx_intr_mask(void * xsc)1553 emx_intr_mask(void *xsc)
1554 {
1555 struct emx_softc *sc = xsc;
1556
1557 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
1558 /*
1559 * NOTE:
1560 * ICR.INT_ASSERTED bit will never be set if IMS is 0,
1561 * so don't check it.
1562 */
1563 emx_intr_body(sc, FALSE);
1564 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
1565 }
1566
1567 static void
emx_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)1568 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1569 {
1570 struct emx_softc *sc = ifp->if_softc;
1571
1572 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1573
1574 emx_update_link_status(sc);
1575
1576 ifmr->ifm_status = IFM_AVALID;
1577 ifmr->ifm_active = IFM_ETHER;
1578
1579 if (!sc->link_active) {
1580 if (sc->hw.mac.autoneg)
1581 ifmr->ifm_active |= IFM_NONE;
1582 else
1583 ifmr->ifm_active |= sc->media.ifm_media;
1584 return;
1585 }
1586
1587 ifmr->ifm_status |= IFM_ACTIVE;
1588 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE)
1589 ifmr->ifm_active |= sc->ifm_flowctrl;
1590
1591 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1592 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1593 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1594 } else {
1595 switch (sc->link_speed) {
1596 case 10:
1597 ifmr->ifm_active |= IFM_10_T;
1598 break;
1599 case 100:
1600 ifmr->ifm_active |= IFM_100_TX;
1601 break;
1602
1603 case 1000:
1604 ifmr->ifm_active |= IFM_1000_T;
1605 break;
1606 }
1607 if (sc->link_duplex == FULL_DUPLEX)
1608 ifmr->ifm_active |= IFM_FDX;
1609 else
1610 ifmr->ifm_active |= IFM_HDX;
1611 }
1612 if (ifmr->ifm_active & IFM_FDX)
1613 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode);
1614 }
1615
1616 static int
emx_media_change(struct ifnet * ifp)1617 emx_media_change(struct ifnet *ifp)
1618 {
1619 struct emx_softc *sc = ifp->if_softc;
1620 struct ifmedia *ifm = &sc->media;
1621
1622 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1623
1624 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1625 return (EINVAL);
1626
1627 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1628 case IFM_AUTO:
1629 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1630 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
1631 break;
1632
1633 case IFM_1000_SX:
1634 case IFM_1000_T:
1635 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1636 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1637 break;
1638
1639 case IFM_100_TX:
1640 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) {
1641 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1642 } else {
1643 if (IFM_OPTIONS(ifm->ifm_media) &
1644 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
1645 if (bootverbose) {
1646 if_printf(ifp, "Flow control is not "
1647 "allowed for half-duplex\n");
1648 }
1649 return EINVAL;
1650 }
1651 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1652 }
1653 sc->hw.mac.autoneg = FALSE;
1654 sc->hw.phy.autoneg_advertised = 0;
1655 break;
1656
1657 case IFM_10_T:
1658 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) {
1659 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1660 } else {
1661 if (IFM_OPTIONS(ifm->ifm_media) &
1662 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
1663 if (bootverbose) {
1664 if_printf(ifp, "Flow control is not "
1665 "allowed for half-duplex\n");
1666 }
1667 return EINVAL;
1668 }
1669 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1670 }
1671 sc->hw.mac.autoneg = FALSE;
1672 sc->hw.phy.autoneg_advertised = 0;
1673 break;
1674
1675 default:
1676 if (bootverbose) {
1677 if_printf(ifp, "Unsupported media type %d\n",
1678 IFM_SUBTYPE(ifm->ifm_media));
1679 }
1680 return EINVAL;
1681 }
1682 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK;
1683
1684 if (ifp->if_flags & IFF_RUNNING)
1685 emx_init(sc);
1686
1687 return (0);
1688 }
1689
1690 static int
emx_encap(struct emx_txdata * tdata,struct mbuf ** m_headp,int * segs_used,int * idx)1691 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp,
1692 int *segs_used, int *idx)
1693 {
1694 bus_dma_segment_t segs[EMX_MAX_SCATTER];
1695 bus_dmamap_t map;
1696 struct emx_txbuf *tx_buffer, *tx_buffer_mapped;
1697 struct e1000_tx_desc *ctxd = NULL;
1698 struct mbuf *m_head = *m_headp;
1699 uint32_t txd_upper, txd_lower, cmd = 0;
1700 int maxsegs, nsegs, i, j, first, last = 0, error;
1701
1702 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1703 error = emx_tso_pullup(tdata, m_headp);
1704 if (error)
1705 return error;
1706 m_head = *m_headp;
1707 }
1708
1709 txd_upper = txd_lower = 0;
1710
1711 /*
1712 * Capture the first descriptor index, this descriptor
1713 * will have the index of the EOP which is the only one
1714 * that now gets a DONE bit writeback.
1715 */
1716 first = tdata->next_avail_tx_desc;
1717 tx_buffer = &tdata->tx_buf[first];
1718 tx_buffer_mapped = tx_buffer;
1719 map = tx_buffer->map;
1720
1721 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED;
1722 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc"));
1723 if (maxsegs > EMX_MAX_SCATTER)
1724 maxsegs = EMX_MAX_SCATTER;
1725
1726 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp,
1727 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1728 if (error) {
1729 m_freem(*m_headp);
1730 *m_headp = NULL;
1731 return error;
1732 }
1733 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE);
1734
1735 m_head = *m_headp;
1736 tdata->tx_nsegs += nsegs;
1737 *segs_used += nsegs;
1738
1739 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1740 /* TSO will consume one TX desc */
1741 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower);
1742 tdata->tx_nsegs += i;
1743 *segs_used += i;
1744 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) {
1745 /* TX csum offloading will consume one TX desc */
1746 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower);
1747 tdata->tx_nsegs += i;
1748 *segs_used += i;
1749 }
1750
1751 /* Handle VLAN tag */
1752 if (m_head->m_flags & M_VLANTAG) {
1753 /* Set the vlan id. */
1754 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16);
1755 /* Tell hardware to add tag */
1756 txd_lower |= htole32(E1000_TXD_CMD_VLE);
1757 }
1758
1759 i = tdata->next_avail_tx_desc;
1760
1761 /* Set up our transmit descriptors */
1762 for (j = 0; j < nsegs; j++) {
1763 tx_buffer = &tdata->tx_buf[i];
1764 ctxd = &tdata->tx_desc_base[i];
1765
1766 ctxd->buffer_addr = htole64(segs[j].ds_addr);
1767 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS |
1768 txd_lower | segs[j].ds_len);
1769 ctxd->upper.data = htole32(txd_upper);
1770
1771 last = i;
1772 if (++i == tdata->num_tx_desc)
1773 i = 0;
1774 }
1775
1776 tdata->next_avail_tx_desc = i;
1777
1778 KKASSERT(tdata->num_tx_desc_avail > nsegs);
1779 tdata->num_tx_desc_avail -= nsegs;
1780 tdata->tx_nmbuf++;
1781
1782 tx_buffer->m_head = m_head;
1783 tx_buffer_mapped->map = tx_buffer->map;
1784 tx_buffer->map = map;
1785
1786 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) {
1787 tdata->tx_nsegs = 0;
1788
1789 /*
1790 * Report Status (RS) is turned on
1791 * every tx_intr_nsegs descriptors.
1792 */
1793 cmd = E1000_TXD_CMD_RS;
1794
1795 /*
1796 * Keep track of the descriptor, which will
1797 * be written back by hardware.
1798 */
1799 tdata->tx_dd[tdata->tx_dd_tail] = last;
1800 EMX_INC_TXDD_IDX(tdata->tx_dd_tail);
1801 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head);
1802 }
1803
1804 /*
1805 * Last Descriptor of Packet needs End Of Packet (EOP)
1806 */
1807 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd);
1808
1809 /*
1810 * Defer TDT updating, until enough descriptors are setup
1811 */
1812 *idx = i;
1813
1814 #ifdef EMX_TSS_DEBUG
1815 tdata->tx_pkts++;
1816 #endif
1817
1818 return (0);
1819 }
1820
1821 static void
emx_set_promisc(struct emx_softc * sc)1822 emx_set_promisc(struct emx_softc *sc)
1823 {
1824 struct ifnet *ifp = &sc->arpcom.ac_if;
1825 uint32_t reg_rctl;
1826
1827 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1828
1829 if (ifp->if_flags & IFF_PROMISC) {
1830 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1831 /* Turn this on if you want to see bad packets */
1832 if (emx_debug_sbp)
1833 reg_rctl |= E1000_RCTL_SBP;
1834 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1835 } else if (ifp->if_flags & IFF_ALLMULTI) {
1836 reg_rctl |= E1000_RCTL_MPE;
1837 reg_rctl &= ~E1000_RCTL_UPE;
1838 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1839 }
1840 }
1841
1842 static void
emx_disable_promisc(struct emx_softc * sc)1843 emx_disable_promisc(struct emx_softc *sc)
1844 {
1845 struct ifnet *ifp = &sc->arpcom.ac_if;
1846 uint32_t reg_rctl;
1847 int mcnt = 0;
1848
1849 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1850 reg_rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP);
1851
1852 if (ifp->if_flags & IFF_ALLMULTI) {
1853 mcnt = EMX_MCAST_ADDR_MAX;
1854 } else {
1855 const struct ifmultiaddr *ifma;
1856
1857 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1858 if (ifma->ifma_addr->sa_family != AF_LINK)
1859 continue;
1860 if (mcnt == EMX_MCAST_ADDR_MAX)
1861 break;
1862 mcnt++;
1863 }
1864 }
1865 /* Don't disable if in MAX groups */
1866 if (mcnt < EMX_MCAST_ADDR_MAX)
1867 reg_rctl &= ~E1000_RCTL_MPE;
1868
1869 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1870 }
1871
1872 static void
emx_set_multi(struct emx_softc * sc)1873 emx_set_multi(struct emx_softc *sc)
1874 {
1875 struct ifnet *ifp = &sc->arpcom.ac_if;
1876 struct ifmultiaddr *ifma;
1877 uint32_t reg_rctl = 0;
1878 uint8_t *mta;
1879 int mcnt = 0;
1880
1881 mta = sc->mta;
1882 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX);
1883
1884 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1885 if (ifma->ifma_addr->sa_family != AF_LINK)
1886 continue;
1887
1888 if (mcnt == EMX_MCAST_ADDR_MAX)
1889 break;
1890
1891 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1892 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1893 mcnt++;
1894 }
1895
1896 if (mcnt >= EMX_MCAST_ADDR_MAX) {
1897 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1898 reg_rctl |= E1000_RCTL_MPE;
1899 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1900 } else {
1901 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1902 }
1903 }
1904
1905 /*
1906 * This routine checks for link status and updates statistics.
1907 */
1908 static void
emx_timer(void * xsc)1909 emx_timer(void *xsc)
1910 {
1911 struct emx_softc *sc = xsc;
1912 struct ifnet *ifp = &sc->arpcom.ac_if;
1913
1914 lwkt_serialize_enter(&sc->main_serialize);
1915
1916 emx_update_link_status(sc);
1917 emx_update_stats(sc);
1918
1919 /* Reset LAA into RAR[0] on 82571 */
1920 if (e1000_get_laa_state_82571(&sc->hw) == TRUE)
1921 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1922
1923 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING))
1924 emx_print_hw_stats(sc);
1925
1926 emx_smartspeed(sc);
1927
1928 callout_reset(&sc->timer, hz, emx_timer, sc);
1929
1930 lwkt_serialize_exit(&sc->main_serialize);
1931 }
1932
1933 static void
emx_update_link_status(struct emx_softc * sc)1934 emx_update_link_status(struct emx_softc *sc)
1935 {
1936 struct e1000_hw *hw = &sc->hw;
1937 struct ifnet *ifp = &sc->arpcom.ac_if;
1938 device_t dev = sc->dev;
1939 uint32_t link_check = 0;
1940
1941 /* Get the cached link value or read phy for real */
1942 switch (hw->phy.media_type) {
1943 case e1000_media_type_copper:
1944 if (hw->mac.get_link_status) {
1945 if (hw->mac.type >= e1000_pch_spt)
1946 msec_delay(50);
1947 /* Do the work to read phy */
1948 e1000_check_for_link(hw);
1949 link_check = !hw->mac.get_link_status;
1950 if (link_check) /* ESB2 fix */
1951 e1000_cfg_on_link_up(hw);
1952 } else {
1953 link_check = TRUE;
1954 }
1955 break;
1956
1957 case e1000_media_type_fiber:
1958 e1000_check_for_link(hw);
1959 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1960 break;
1961
1962 case e1000_media_type_internal_serdes:
1963 e1000_check_for_link(hw);
1964 link_check = sc->hw.mac.serdes_has_link;
1965 break;
1966
1967 case e1000_media_type_unknown:
1968 default:
1969 break;
1970 }
1971
1972 /* Now check for a transition */
1973 if (link_check && sc->link_active == 0) {
1974 e1000_get_speed_and_duplex(hw, &sc->link_speed,
1975 &sc->link_duplex);
1976
1977 /*
1978 * Check if we should enable/disable SPEED_MODE bit on
1979 * 82571EB/82572EI
1980 */
1981 if (sc->link_speed != SPEED_1000 &&
1982 (hw->mac.type == e1000_82571 ||
1983 hw->mac.type == e1000_82572)) {
1984 int tarc0;
1985
1986 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
1987 tarc0 &= ~EMX_TARC_SPEED_MODE;
1988 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
1989 }
1990 if (bootverbose) {
1991 char flowctrl[IFM_ETH_FC_STRLEN];
1992
1993 e1000_fc2str(hw->fc.current_mode, flowctrl,
1994 sizeof(flowctrl));
1995 device_printf(dev, "Link is up %d Mbps %s, "
1996 "Flow control: %s\n",
1997 sc->link_speed,
1998 (sc->link_duplex == FULL_DUPLEX) ?
1999 "Full Duplex" : "Half Duplex",
2000 flowctrl);
2001 }
2002 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE)
2003 e1000_force_flowctrl(hw, sc->ifm_flowctrl);
2004 sc->link_active = 1;
2005 sc->smartspeed = 0;
2006 ifp->if_baudrate = sc->link_speed * 1000000;
2007 ifp->if_link_state = LINK_STATE_UP;
2008 if_link_state_change(ifp);
2009 } else if (!link_check && sc->link_active == 1) {
2010 ifp->if_baudrate = sc->link_speed = 0;
2011 sc->link_duplex = 0;
2012 if (bootverbose)
2013 device_printf(dev, "Link is Down\n");
2014 sc->link_active = 0;
2015 ifp->if_link_state = LINK_STATE_DOWN;
2016 if_link_state_change(ifp);
2017 }
2018 }
2019
2020 static void
emx_stop(struct emx_softc * sc)2021 emx_stop(struct emx_softc *sc)
2022 {
2023 struct ifnet *ifp = &sc->arpcom.ac_if;
2024 int i;
2025
2026 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2027
2028 emx_disable_intr(sc);
2029
2030 callout_stop(&sc->timer);
2031
2032 ifp->if_flags &= ~IFF_RUNNING;
2033 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2034 struct emx_txdata *tdata = &sc->tx_data[i];
2035
2036 ifsq_clr_oactive(tdata->ifsq);
2037 ifsq_watchdog_stop(&tdata->tx_watchdog);
2038 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED;
2039
2040 tdata->tx_running = 0;
2041 callout_stop(&tdata->tx_gc_timer);
2042 }
2043
2044 /* I219 needs some special flushing to avoid hangs */
2045 if (sc->hw.mac.type >= e1000_pch_spt)
2046 emx_flush_txrx_ring(sc);
2047
2048 /*
2049 * Disable multiple receive queues.
2050 *
2051 * NOTE:
2052 * We should disable multiple receive queues before
2053 * resetting the hardware.
2054 */
2055 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0);
2056
2057 e1000_reset_hw(&sc->hw);
2058 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
2059
2060 for (i = 0; i < sc->tx_ring_cnt; ++i)
2061 emx_free_tx_ring(&sc->tx_data[i]);
2062 for (i = 0; i < sc->rx_ring_cnt; ++i)
2063 emx_free_rx_ring(&sc->rx_data[i]);
2064 }
2065
2066 static int
emx_reset(struct emx_softc * sc)2067 emx_reset(struct emx_softc *sc)
2068 {
2069 device_t dev = sc->dev;
2070 uint16_t rx_buffer_size;
2071 uint32_t pba;
2072
2073 /* Set up smart power down as default off on newer adapters. */
2074 if (!emx_smart_pwr_down &&
2075 (sc->hw.mac.type == e1000_82571 ||
2076 sc->hw.mac.type == e1000_82572)) {
2077 uint16_t phy_tmp = 0;
2078
2079 /* Speed up time to link by disabling smart power down. */
2080 e1000_read_phy_reg(&sc->hw,
2081 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
2082 phy_tmp &= ~IGP02E1000_PM_SPD;
2083 e1000_write_phy_reg(&sc->hw,
2084 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
2085 }
2086
2087 /*
2088 * Packet Buffer Allocation (PBA)
2089 * Writing PBA sets the receive portion of the buffer
2090 * the remainder is used for the transmit buffer.
2091 */
2092 switch (sc->hw.mac.type) {
2093 /* Total Packet Buffer on these is 48K */
2094 case e1000_82571:
2095 case e1000_82572:
2096 case e1000_80003es2lan:
2097 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
2098 break;
2099
2100 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
2101 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
2102 break;
2103
2104 case e1000_82574:
2105 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
2106 break;
2107
2108 case e1000_pch_lpt:
2109 case e1000_pch_spt:
2110 case e1000_pch_cnp:
2111 pba = E1000_PBA_26K;
2112 break;
2113
2114 default:
2115 /* Devices before 82547 had a Packet Buffer of 64K. */
2116 if (sc->hw.mac.max_frame_size > 8192)
2117 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
2118 else
2119 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
2120 }
2121 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba);
2122
2123 /*
2124 * These parameters control the automatic generation (Tx) and
2125 * response (Rx) to Ethernet PAUSE frames.
2126 * - High water mark should allow for at least two frames to be
2127 * received after sending an XOFF.
2128 * - Low water mark works best when it is very near the high water mark.
2129 * This allows the receiver to restart by sending XON when it has
2130 * drained a bit. Here we use an arbitary value of 1500 which will
2131 * restart after one full frame is pulled from the buffer. There
2132 * could be several smaller frames in the buffer and if so they will
2133 * not trigger the XON until their total number reduces the buffer
2134 * by 1500.
2135 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2136 */
2137 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10;
2138
2139 sc->hw.fc.high_water = rx_buffer_size -
2140 roundup2(sc->hw.mac.max_frame_size, 1024);
2141 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500;
2142
2143 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME;
2144 sc->hw.fc.send_xon = TRUE;
2145 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl);
2146
2147 /*
2148 * Device specific overrides/settings
2149 */
2150 if (sc->hw.mac.type == e1000_pch_lpt ||
2151 sc->hw.mac.type == e1000_pch_spt ||
2152 sc->hw.mac.type == e1000_pch_cnp) {
2153 sc->hw.fc.high_water = 0x5C20;
2154 sc->hw.fc.low_water = 0x5048;
2155 sc->hw.fc.pause_time = 0x0650;
2156 sc->hw.fc.refresh_time = 0x0400;
2157 /* Jumbos need adjusted PBA */
2158 if (sc->arpcom.ac_if.if_mtu > ETHERMTU)
2159 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12);
2160 else
2161 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26);
2162 } else if (sc->hw.mac.type == e1000_80003es2lan) {
2163 sc->hw.fc.pause_time = 0xFFFF;
2164 }
2165
2166 /* I219 needs some special flushing to avoid hangs */
2167 if (sc->hw.mac.type >= e1000_pch_spt)
2168 emx_flush_txrx_ring(sc);
2169
2170 /* Issue a global reset */
2171 e1000_reset_hw(&sc->hw);
2172 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
2173 emx_disable_aspm(sc);
2174
2175 if (e1000_init_hw(&sc->hw) < 0) {
2176 device_printf(dev, "Hardware Initialization Failed\n");
2177 return (EIO);
2178 }
2179
2180 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
2181 e1000_get_phy_info(&sc->hw);
2182 e1000_check_for_link(&sc->hw);
2183
2184 return (0);
2185 }
2186
2187 static void
emx_setup_ifp(struct emx_softc * sc)2188 emx_setup_ifp(struct emx_softc *sc)
2189 {
2190 struct ifnet *ifp = &sc->arpcom.ac_if;
2191 int i;
2192
2193 if_initname(ifp, device_get_name(sc->dev),
2194 device_get_unit(sc->dev));
2195 ifp->if_softc = sc;
2196 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2197 ifp->if_init = emx_init;
2198 ifp->if_ioctl = emx_ioctl;
2199 ifp->if_start = emx_start;
2200 #ifdef IFPOLL_ENABLE
2201 ifp->if_npoll = emx_npoll;
2202 #endif
2203 ifp->if_serialize = emx_serialize;
2204 ifp->if_deserialize = emx_deserialize;
2205 ifp->if_tryserialize = emx_tryserialize;
2206 #ifdef INVARIANTS
2207 ifp->if_serialize_assert = emx_serialize_assert;
2208 #endif
2209
2210 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc;
2211
2212 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1);
2213 ifq_set_ready(&ifp->if_snd);
2214 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
2215
2216 ifp->if_mapsubq = ifq_mapsubq_modulo;
2217 ifq_set_subq_divisor(&ifp->if_snd, 1);
2218
2219 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
2220
2221 ifp->if_capabilities = IFCAP_HWCSUM |
2222 IFCAP_VLAN_HWTAGGING |
2223 IFCAP_VLAN_MTU |
2224 IFCAP_TSO;
2225 if (sc->rx_ring_cnt > 1)
2226 ifp->if_capabilities |= IFCAP_RSS;
2227 ifp->if_capenable = ifp->if_capabilities;
2228 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO;
2229
2230 /*
2231 * Tell the upper layer(s) we support long frames.
2232 */
2233 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2234
2235 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2236 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
2237 struct emx_txdata *tdata = &sc->tx_data[i];
2238
2239 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res));
2240 ifsq_set_priv(ifsq, tdata);
2241 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize);
2242 tdata->ifsq = ifsq;
2243
2244 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog, 0);
2245 }
2246
2247 /*
2248 * Specify the media types supported by this sc and register
2249 * callbacks to update media and link information
2250 */
2251 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2252 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
2253 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2254 0, NULL);
2255 } else {
2256 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
2257 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2258 0, NULL);
2259 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2260 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2261 0, NULL);
2262 if (sc->hw.phy.type != e1000_phy_ife) {
2263 ifmedia_add(&sc->media,
2264 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2265 }
2266 }
2267 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2268 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl);
2269 }
2270
2271 /*
2272 * Workaround for SmartSpeed on 82541 and 82547 controllers
2273 */
2274 static void
emx_smartspeed(struct emx_softc * sc)2275 emx_smartspeed(struct emx_softc *sc)
2276 {
2277 uint16_t phy_tmp;
2278
2279 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp ||
2280 sc->hw.mac.autoneg == 0 ||
2281 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2282 return;
2283
2284 if (sc->smartspeed == 0) {
2285 /*
2286 * If Master/Slave config fault is asserted twice,
2287 * we assume back-to-back
2288 */
2289 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2290 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2291 return;
2292 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2293 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2294 e1000_read_phy_reg(&sc->hw,
2295 PHY_1000T_CTRL, &phy_tmp);
2296 if (phy_tmp & CR_1000T_MS_ENABLE) {
2297 phy_tmp &= ~CR_1000T_MS_ENABLE;
2298 e1000_write_phy_reg(&sc->hw,
2299 PHY_1000T_CTRL, phy_tmp);
2300 sc->smartspeed++;
2301 if (sc->hw.mac.autoneg &&
2302 !e1000_phy_setup_autoneg(&sc->hw) &&
2303 !e1000_read_phy_reg(&sc->hw,
2304 PHY_CONTROL, &phy_tmp)) {
2305 phy_tmp |= MII_CR_AUTO_NEG_EN |
2306 MII_CR_RESTART_AUTO_NEG;
2307 e1000_write_phy_reg(&sc->hw,
2308 PHY_CONTROL, phy_tmp);
2309 }
2310 }
2311 }
2312 return;
2313 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) {
2314 /* If still no link, perhaps using 2/3 pair cable */
2315 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2316 phy_tmp |= CR_1000T_MS_ENABLE;
2317 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2318 if (sc->hw.mac.autoneg &&
2319 !e1000_phy_setup_autoneg(&sc->hw) &&
2320 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) {
2321 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2322 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp);
2323 }
2324 }
2325
2326 /* Restart process after EMX_SMARTSPEED_MAX iterations */
2327 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX)
2328 sc->smartspeed = 0;
2329 }
2330
2331 static int
emx_create_tx_ring(struct emx_txdata * tdata)2332 emx_create_tx_ring(struct emx_txdata *tdata)
2333 {
2334 device_t dev = tdata->sc->dev;
2335 struct emx_txbuf *tx_buffer;
2336 int error, i, tsize, ntxd;
2337
2338 /*
2339 * Validate number of transmit descriptors. It must not exceed
2340 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2341 */
2342 ntxd = device_getenv_int(dev, "txd", emx_txd);
2343 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 ||
2344 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) {
2345 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
2346 EMX_DEFAULT_TXD, ntxd);
2347 tdata->num_tx_desc = EMX_DEFAULT_TXD;
2348 } else {
2349 tdata->num_tx_desc = ntxd;
2350 }
2351
2352 /*
2353 * Allocate Transmit Descriptor ring
2354 */
2355 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc),
2356 EMX_DBA_ALIGN);
2357 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag,
2358 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
2359 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap,
2360 &tdata->tx_desc_paddr);
2361 if (tdata->tx_desc_base == NULL) {
2362 device_printf(dev, "Unable to allocate tx_desc memory\n");
2363 return ENOMEM;
2364 }
2365
2366 tsize = __VM_CACHELINE_ALIGN(
2367 sizeof(struct emx_txbuf) * tdata->num_tx_desc);
2368 tdata->tx_buf = kmalloc(tsize, M_DEVBUF,
2369 M_WAITOK | M_ZERO | M_CACHEALIGN);
2370
2371 /*
2372 * Create DMA tags for tx buffers
2373 */
2374 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */
2375 1, 0, /* alignment, bounds */
2376 BUS_SPACE_MAXADDR, /* lowaddr */
2377 BUS_SPACE_MAXADDR, /* highaddr */
2378 EMX_TSO_SIZE, /* maxsize */
2379 EMX_MAX_SCATTER, /* nsegments */
2380 EMX_MAX_SEGSIZE, /* maxsegsize */
2381 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
2382 BUS_DMA_ONEBPAGE, /* flags */
2383 &tdata->txtag);
2384 if (error) {
2385 device_printf(dev, "Unable to allocate TX DMA tag\n");
2386 kfree(tdata->tx_buf, M_DEVBUF);
2387 tdata->tx_buf = NULL;
2388 return error;
2389 }
2390
2391 /*
2392 * Create DMA maps for tx buffers
2393 */
2394 for (i = 0; i < tdata->num_tx_desc; i++) {
2395 tx_buffer = &tdata->tx_buf[i];
2396
2397 error = bus_dmamap_create(tdata->txtag,
2398 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2399 &tx_buffer->map);
2400 if (error) {
2401 device_printf(dev, "Unable to create TX DMA map\n");
2402 emx_destroy_tx_ring(tdata, i);
2403 return error;
2404 }
2405 }
2406
2407 /*
2408 * Setup TX parameters
2409 */
2410 tdata->spare_tx_desc = EMX_TX_SPARE;
2411 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG;
2412
2413 /*
2414 * Keep following relationship between spare_tx_desc, oact_tx_desc
2415 * and tx_intr_nsegs:
2416 * (spare_tx_desc + EMX_TX_RESERVED) <=
2417 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs
2418 */
2419 tdata->oact_tx_desc = tdata->num_tx_desc / 8;
2420 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX)
2421 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX;
2422 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED)
2423 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED;
2424
2425 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16;
2426 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc)
2427 tdata->tx_intr_nsegs = tdata->oact_tx_desc;
2428
2429 /*
2430 * Pullup extra 4bytes into the first data segment for TSO, see:
2431 * 82571/82572 specification update errata #7
2432 *
2433 * Same applies to I217 (and maybe I218 and I219).
2434 *
2435 * NOTE:
2436 * 4bytes instead of 2bytes, which are mentioned in the errata,
2437 * are pulled; mainly to keep rest of the data properly aligned.
2438 */
2439 if (tdata->sc->hw.mac.type == e1000_82571 ||
2440 tdata->sc->hw.mac.type == e1000_82572 ||
2441 tdata->sc->hw.mac.type == e1000_pch_lpt ||
2442 tdata->sc->hw.mac.type == e1000_pch_spt ||
2443 tdata->sc->hw.mac.type == e1000_pch_cnp)
2444 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX;
2445
2446 return (0);
2447 }
2448
2449 static void
emx_init_tx_ring(struct emx_txdata * tdata)2450 emx_init_tx_ring(struct emx_txdata *tdata)
2451 {
2452 /* Clear the old ring contents */
2453 bzero(tdata->tx_desc_base,
2454 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc);
2455
2456 /* Reset state */
2457 tdata->next_avail_tx_desc = 0;
2458 tdata->next_tx_to_clean = 0;
2459 tdata->num_tx_desc_avail = tdata->num_tx_desc;
2460 tdata->tx_nmbuf = 0;
2461 tdata->tx_running = 0;
2462
2463 tdata->tx_flags |= EMX_TXFLAG_ENABLED;
2464 if (tdata->sc->tx_ring_inuse > 1) {
2465 tdata->tx_flags |= EMX_TXFLAG_FORCECTX;
2466 if (bootverbose) {
2467 if_printf(&tdata->sc->arpcom.ac_if,
2468 "TX %d force ctx setup\n", tdata->idx);
2469 }
2470 }
2471 }
2472
2473 static void
emx_init_tx_unit(struct emx_softc * sc)2474 emx_init_tx_unit(struct emx_softc *sc)
2475 {
2476 uint32_t tctl, tarc, tipg = 0, txdctl;
2477 int i;
2478
2479 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2480 struct emx_txdata *tdata = &sc->tx_data[i];
2481 uint64_t bus_addr;
2482
2483 /* Setup the Base and Length of the Tx Descriptor Ring */
2484 bus_addr = tdata->tx_desc_paddr;
2485 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i),
2486 tdata->num_tx_desc * sizeof(struct e1000_tx_desc));
2487 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i),
2488 (uint32_t)(bus_addr >> 32));
2489 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i),
2490 (uint32_t)bus_addr);
2491 /* Setup the HW Tx Head and Tail descriptor pointers */
2492 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0);
2493 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0);
2494
2495 txdctl = 0x1f; /* PTHRESH */
2496 txdctl |= 1 << 8; /* HTHRESH */
2497 txdctl |= 1 << 16; /* WTHRESH */
2498 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
2499 txdctl |= E1000_TXDCTL_GRAN;
2500 txdctl |= 1 << 25; /* LWTHRESH */
2501
2502 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(i), txdctl);
2503 }
2504
2505 /* Set the default values for the Tx Inter Packet Gap timer */
2506 switch (sc->hw.mac.type) {
2507 case e1000_80003es2lan:
2508 tipg = DEFAULT_82543_TIPG_IPGR1;
2509 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2510 E1000_TIPG_IPGR2_SHIFT;
2511 break;
2512
2513 default:
2514 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2515 sc->hw.phy.media_type == e1000_media_type_internal_serdes)
2516 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2517 else
2518 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2519 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2520 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2521 break;
2522 }
2523
2524 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg);
2525
2526 /* NOTE: 0 is not allowed for TIDV */
2527 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1);
2528 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0);
2529
2530 /*
2531 * Errata workaround (obtained from Linux). This is necessary
2532 * to make multiple TX queues work on 82574.
2533 * XXX can't find it in any published errata though.
2534 */
2535 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0));
2536 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl);
2537
2538 if (sc->hw.mac.type == e1000_82571 ||
2539 sc->hw.mac.type == e1000_82572) {
2540 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2541 tarc |= EMX_TARC_SPEED_MODE;
2542 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2543 } else if (sc->hw.mac.type == e1000_80003es2lan) {
2544 /* errata: program both queues to unweighted RR */
2545 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2546 tarc |= 1;
2547 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2548 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2549 tarc |= 1;
2550 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2551 } else if (sc->hw.mac.type == e1000_82574) {
2552 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2553 tarc |= EMX_TARC_ERRATA;
2554 if (sc->tx_ring_inuse > 1) {
2555 tarc |= (EMX_TARC_COMPENSATION_MODE | EMX_TARC_MQ_FIX);
2556 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2557 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2558 } else {
2559 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2560 }
2561 }
2562
2563 /* Program the Transmit Control Register */
2564 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL);
2565 tctl &= ~E1000_TCTL_CT;
2566 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2567 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2568 tctl |= E1000_TCTL_MULR;
2569
2570 /* This write will effectively turn on the transmit unit. */
2571 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl);
2572
2573 if (sc->hw.mac.type == e1000_82571 ||
2574 sc->hw.mac.type == e1000_82572 ||
2575 sc->hw.mac.type == e1000_80003es2lan) {
2576 /* Bit 28 of TARC1 must be cleared when MULR is enabled */
2577 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2578 tarc &= ~(1 << 28);
2579 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2580 } else if (sc->hw.mac.type >= e1000_pch_spt) {
2581 uint32_t reg;
2582
2583 reg = E1000_READ_REG(&sc->hw, E1000_IOSFPC);
2584 reg |= E1000_RCTL_RDMTS_HEX;
2585 E1000_WRITE_REG(&sc->hw, E1000_IOSFPC, reg);
2586 reg = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2587 reg |= E1000_TARC0_CB_MULTIQ_3_REQ;
2588 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), reg);
2589 }
2590
2591 if (sc->tx_ring_inuse > 1) {
2592 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2593 tarc &= ~EMX_TARC_COUNT_MASK;
2594 tarc |= 1;
2595 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2596
2597 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2598 tarc &= ~EMX_TARC_COUNT_MASK;
2599 tarc |= 1;
2600 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2601 }
2602 }
2603
2604 static void
emx_destroy_tx_ring(struct emx_txdata * tdata,int ndesc)2605 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc)
2606 {
2607 struct emx_txbuf *tx_buffer;
2608 int i;
2609
2610 /* Free Transmit Descriptor ring */
2611 if (tdata->tx_desc_base) {
2612 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap);
2613 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base,
2614 tdata->tx_desc_dmap);
2615 bus_dma_tag_destroy(tdata->tx_desc_dtag);
2616
2617 tdata->tx_desc_base = NULL;
2618 }
2619
2620 if (tdata->tx_buf == NULL)
2621 return;
2622
2623 for (i = 0; i < ndesc; i++) {
2624 tx_buffer = &tdata->tx_buf[i];
2625
2626 KKASSERT(tx_buffer->m_head == NULL);
2627 bus_dmamap_destroy(tdata->txtag, tx_buffer->map);
2628 }
2629 bus_dma_tag_destroy(tdata->txtag);
2630
2631 kfree(tdata->tx_buf, M_DEVBUF);
2632 tdata->tx_buf = NULL;
2633 }
2634
2635 /*
2636 * The offload context needs to be set when we transfer the first
2637 * packet of a particular protocol (TCP/UDP). This routine has been
2638 * enhanced to deal with inserted VLAN headers.
2639 *
2640 * If the new packet's ether header length, ip header length and
2641 * csum offloading type are same as the previous packet, we should
2642 * avoid allocating a new csum context descriptor; mainly to take
2643 * advantage of the pipeline effect of the TX data read request.
2644 *
2645 * This function returns number of TX descrptors allocated for
2646 * csum context.
2647 */
2648 static int
emx_txcsum(struct emx_txdata * tdata,struct mbuf * mp,uint32_t * txd_upper,uint32_t * txd_lower)2649 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp,
2650 uint32_t *txd_upper, uint32_t *txd_lower)
2651 {
2652 struct e1000_context_desc *TXD;
2653 int curr_txd, ehdrlen, csum_flags;
2654 uint32_t cmd, hdr_len, ip_hlen;
2655
2656 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES;
2657 ip_hlen = mp->m_pkthdr.csum_iphlen;
2658 ehdrlen = mp->m_pkthdr.csum_lhlen;
2659
2660 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 &&
2661 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen &&
2662 tdata->csum_flags == csum_flags) {
2663 /*
2664 * Same csum offload context as the previous packets;
2665 * just return.
2666 */
2667 *txd_upper = tdata->csum_txd_upper;
2668 *txd_lower = tdata->csum_txd_lower;
2669 return 0;
2670 }
2671
2672 /*
2673 * Setup a new csum offload context.
2674 */
2675
2676 curr_txd = tdata->next_avail_tx_desc;
2677 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd];
2678
2679 cmd = 0;
2680
2681 /* Setup of IP header checksum. */
2682 if (csum_flags & CSUM_IP) {
2683 /*
2684 * Start offset for header checksum calculation.
2685 * End offset for header checksum calculation.
2686 * Offset of place to put the checksum.
2687 */
2688 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2689 TXD->lower_setup.ip_fields.ipcse =
2690 htole16(ehdrlen + ip_hlen - 1);
2691 TXD->lower_setup.ip_fields.ipcso =
2692 ehdrlen + offsetof(struct ip, ip_sum);
2693 cmd |= E1000_TXD_CMD_IP;
2694 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2695 }
2696 hdr_len = ehdrlen + ip_hlen;
2697
2698 if (csum_flags & CSUM_TCP) {
2699 /*
2700 * Start offset for payload checksum calculation.
2701 * End offset for payload checksum calculation.
2702 * Offset of place to put the checksum.
2703 */
2704 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2705 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2706 TXD->upper_setup.tcp_fields.tucso =
2707 hdr_len + offsetof(struct tcphdr, th_sum);
2708 cmd |= E1000_TXD_CMD_TCP;
2709 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2710 } else if (csum_flags & CSUM_UDP) {
2711 /*
2712 * Start offset for header checksum calculation.
2713 * End offset for header checksum calculation.
2714 * Offset of place to put the checksum.
2715 */
2716 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2717 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2718 TXD->upper_setup.tcp_fields.tucso =
2719 hdr_len + offsetof(struct udphdr, uh_sum);
2720 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2721 }
2722
2723 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
2724 E1000_TXD_DTYP_D; /* Data descr */
2725
2726 /* Save the information for this csum offloading context */
2727 tdata->csum_lhlen = ehdrlen;
2728 tdata->csum_iphlen = ip_hlen;
2729 tdata->csum_flags = csum_flags;
2730 tdata->csum_txd_upper = *txd_upper;
2731 tdata->csum_txd_lower = *txd_lower;
2732
2733 TXD->tcp_seg_setup.data = htole32(0);
2734 TXD->cmd_and_length =
2735 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
2736
2737 if (++curr_txd == tdata->num_tx_desc)
2738 curr_txd = 0;
2739
2740 KKASSERT(tdata->num_tx_desc_avail > 0);
2741 tdata->num_tx_desc_avail--;
2742
2743 tdata->next_avail_tx_desc = curr_txd;
2744 return 1;
2745 }
2746
2747 static void
emx_txeof(struct emx_txdata * tdata)2748 emx_txeof(struct emx_txdata *tdata)
2749 {
2750 struct emx_txbuf *tx_buffer;
2751 int first, num_avail;
2752
2753 if (tdata->tx_dd_head == tdata->tx_dd_tail)
2754 return;
2755
2756 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2757 return;
2758
2759 num_avail = tdata->num_tx_desc_avail;
2760 first = tdata->next_tx_to_clean;
2761
2762 while (tdata->tx_dd_head != tdata->tx_dd_tail) {
2763 int dd_idx = tdata->tx_dd[tdata->tx_dd_head];
2764 struct e1000_tx_desc *tx_desc;
2765
2766 tx_desc = &tdata->tx_desc_base[dd_idx];
2767 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2768 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
2769
2770 if (++dd_idx == tdata->num_tx_desc)
2771 dd_idx = 0;
2772
2773 while (first != dd_idx) {
2774 logif(pkt_txclean);
2775
2776 KKASSERT(num_avail < tdata->num_tx_desc);
2777 num_avail++;
2778
2779 tx_buffer = &tdata->tx_buf[first];
2780 if (tx_buffer->m_head)
2781 emx_free_txbuf(tdata, tx_buffer);
2782
2783 if (++first == tdata->num_tx_desc)
2784 first = 0;
2785 }
2786 } else {
2787 break;
2788 }
2789 }
2790 tdata->next_tx_to_clean = first;
2791 tdata->num_tx_desc_avail = num_avail;
2792
2793 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2794 tdata->tx_dd_head = 0;
2795 tdata->tx_dd_tail = 0;
2796 }
2797
2798 if (!EMX_IS_OACTIVE(tdata)) {
2799 ifsq_clr_oactive(tdata->ifsq);
2800
2801 /* All clean, turn off the timer */
2802 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2803 ifsq_watchdog_set_count(&tdata->tx_watchdog, 0);
2804 }
2805 tdata->tx_running = EMX_TX_RUNNING;
2806 }
2807
2808 static void
emx_tx_collect(struct emx_txdata * tdata,boolean_t gc)2809 emx_tx_collect(struct emx_txdata *tdata, boolean_t gc)
2810 {
2811 struct emx_txbuf *tx_buffer;
2812 int tdh, first, num_avail, dd_idx = -1;
2813
2814 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2815 return;
2816
2817 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx));
2818 if (tdh == tdata->next_tx_to_clean) {
2819 if (gc && tdata->tx_nmbuf > 0)
2820 tdata->tx_running = EMX_TX_RUNNING;
2821 return;
2822 }
2823 if (gc)
2824 tdata->tx_gc++;
2825
2826 if (tdata->tx_dd_head != tdata->tx_dd_tail)
2827 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
2828
2829 num_avail = tdata->num_tx_desc_avail;
2830 first = tdata->next_tx_to_clean;
2831
2832 while (first != tdh) {
2833 logif(pkt_txclean);
2834
2835 KKASSERT(num_avail < tdata->num_tx_desc);
2836 num_avail++;
2837
2838 tx_buffer = &tdata->tx_buf[first];
2839 if (tx_buffer->m_head)
2840 emx_free_txbuf(tdata, tx_buffer);
2841
2842 if (first == dd_idx) {
2843 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
2844 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2845 tdata->tx_dd_head = 0;
2846 tdata->tx_dd_tail = 0;
2847 dd_idx = -1;
2848 } else {
2849 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
2850 }
2851 }
2852
2853 if (++first == tdata->num_tx_desc)
2854 first = 0;
2855 }
2856 tdata->next_tx_to_clean = first;
2857 tdata->num_tx_desc_avail = num_avail;
2858
2859 if (!EMX_IS_OACTIVE(tdata)) {
2860 ifsq_clr_oactive(tdata->ifsq);
2861
2862 /* All clean, turn off the timer */
2863 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2864 ifsq_watchdog_set_count(&tdata->tx_watchdog, 0);
2865 }
2866 if (!gc || tdata->tx_nmbuf > 0)
2867 tdata->tx_running = EMX_TX_RUNNING;
2868 }
2869
2870 /*
2871 * When Link is lost sometimes there is work still in the TX ring
2872 * which will result in a watchdog, rather than allow that do an
2873 * attempted cleanup and then reinit here. Note that this has been
2874 * seens mostly with fiber adapters.
2875 */
2876 static void
emx_tx_purge(struct emx_softc * sc)2877 emx_tx_purge(struct emx_softc *sc)
2878 {
2879 int i;
2880
2881 if (sc->link_active)
2882 return;
2883
2884 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2885 struct emx_txdata *tdata = &sc->tx_data[i];
2886
2887 if (tdata->tx_watchdog.wd_timer) {
2888 emx_tx_collect(tdata, FALSE);
2889 if (tdata->tx_watchdog.wd_timer) {
2890 if_printf(&sc->arpcom.ac_if,
2891 "Link lost, TX pending, reinit\n");
2892 emx_init(sc);
2893 return;
2894 }
2895 }
2896 }
2897 }
2898
2899 static int
emx_newbuf(struct emx_rxdata * rdata,int i,int init)2900 emx_newbuf(struct emx_rxdata *rdata, int i, int init)
2901 {
2902 struct mbuf *m;
2903 bus_dma_segment_t seg;
2904 bus_dmamap_t map;
2905 struct emx_rxbuf *rx_buffer;
2906 int error, nseg;
2907
2908 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
2909 if (m == NULL) {
2910 if (init) {
2911 if_printf(&rdata->sc->arpcom.ac_if,
2912 "Unable to allocate RX mbuf\n");
2913 }
2914 return (ENOBUFS);
2915 }
2916 m->m_len = m->m_pkthdr.len = MCLBYTES;
2917
2918 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN)
2919 m_adj(m, ETHER_ALIGN);
2920
2921 error = bus_dmamap_load_mbuf_segment(rdata->rxtag,
2922 rdata->rx_sparemap, m,
2923 &seg, 1, &nseg, BUS_DMA_NOWAIT);
2924 if (error) {
2925 m_freem(m);
2926 if (init) {
2927 if_printf(&rdata->sc->arpcom.ac_if,
2928 "Unable to load RX mbuf\n");
2929 }
2930 return (error);
2931 }
2932
2933 rx_buffer = &rdata->rx_buf[i];
2934 if (rx_buffer->m_head != NULL)
2935 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
2936
2937 map = rx_buffer->map;
2938 rx_buffer->map = rdata->rx_sparemap;
2939 rdata->rx_sparemap = map;
2940
2941 rx_buffer->m_head = m;
2942 rx_buffer->paddr = seg.ds_addr;
2943
2944 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer);
2945 return (0);
2946 }
2947
2948 static int
emx_create_rx_ring(struct emx_rxdata * rdata)2949 emx_create_rx_ring(struct emx_rxdata *rdata)
2950 {
2951 device_t dev = rdata->sc->dev;
2952 struct emx_rxbuf *rx_buffer;
2953 int i, error, rsize, nrxd;
2954
2955 /*
2956 * Validate number of receive descriptors. It must not exceed
2957 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2958 */
2959 nrxd = device_getenv_int(dev, "rxd", emx_rxd);
2960 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 ||
2961 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) {
2962 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
2963 EMX_DEFAULT_RXD, nrxd);
2964 rdata->num_rx_desc = EMX_DEFAULT_RXD;
2965 } else {
2966 rdata->num_rx_desc = nrxd;
2967 }
2968
2969 /*
2970 * Allocate Receive Descriptor ring
2971 */
2972 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t),
2973 EMX_DBA_ALIGN);
2974 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag,
2975 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
2976 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap,
2977 &rdata->rx_desc_paddr);
2978 if (rdata->rx_desc == NULL) {
2979 device_printf(dev, "Unable to allocate rx_desc memory\n");
2980 return ENOMEM;
2981 }
2982
2983 rsize = __VM_CACHELINE_ALIGN(
2984 sizeof(struct emx_rxbuf) * rdata->num_rx_desc);
2985 rdata->rx_buf = kmalloc(rsize, M_DEVBUF,
2986 M_WAITOK | M_ZERO | M_CACHEALIGN);
2987
2988 /*
2989 * Create DMA tag for rx buffers
2990 */
2991 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */
2992 1, 0, /* alignment, bounds */
2993 BUS_SPACE_MAXADDR, /* lowaddr */
2994 BUS_SPACE_MAXADDR, /* highaddr */
2995 MCLBYTES, /* maxsize */
2996 1, /* nsegments */
2997 MCLBYTES, /* maxsegsize */
2998 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
2999 &rdata->rxtag);
3000 if (error) {
3001 device_printf(dev, "Unable to allocate RX DMA tag\n");
3002 kfree(rdata->rx_buf, M_DEVBUF);
3003 rdata->rx_buf = NULL;
3004 return error;
3005 }
3006
3007 /*
3008 * Create spare DMA map for rx buffers
3009 */
3010 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
3011 &rdata->rx_sparemap);
3012 if (error) {
3013 device_printf(dev, "Unable to create spare RX DMA map\n");
3014 bus_dma_tag_destroy(rdata->rxtag);
3015 kfree(rdata->rx_buf, M_DEVBUF);
3016 rdata->rx_buf = NULL;
3017 return error;
3018 }
3019
3020 /*
3021 * Create DMA maps for rx buffers
3022 */
3023 for (i = 0; i < rdata->num_rx_desc; i++) {
3024 rx_buffer = &rdata->rx_buf[i];
3025
3026 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
3027 &rx_buffer->map);
3028 if (error) {
3029 device_printf(dev, "Unable to create RX DMA map\n");
3030 emx_destroy_rx_ring(rdata, i);
3031 return error;
3032 }
3033 }
3034 return (0);
3035 }
3036
3037 static void
emx_free_rx_ring(struct emx_rxdata * rdata)3038 emx_free_rx_ring(struct emx_rxdata *rdata)
3039 {
3040 int i;
3041
3042 for (i = 0; i < rdata->num_rx_desc; i++) {
3043 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i];
3044
3045 if (rx_buffer->m_head != NULL) {
3046 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
3047 m_freem(rx_buffer->m_head);
3048 rx_buffer->m_head = NULL;
3049 }
3050 }
3051
3052 if (rdata->fmp != NULL)
3053 m_freem(rdata->fmp);
3054 rdata->fmp = NULL;
3055 rdata->lmp = NULL;
3056 }
3057
3058 static void
emx_free_tx_ring(struct emx_txdata * tdata)3059 emx_free_tx_ring(struct emx_txdata *tdata)
3060 {
3061 int i;
3062
3063 for (i = 0; i < tdata->num_tx_desc; i++) {
3064 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i];
3065
3066 if (tx_buffer->m_head != NULL)
3067 emx_free_txbuf(tdata, tx_buffer);
3068 }
3069
3070 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX;
3071
3072 tdata->csum_flags = 0;
3073 tdata->csum_lhlen = 0;
3074 tdata->csum_iphlen = 0;
3075 tdata->csum_thlen = 0;
3076 tdata->csum_mss = 0;
3077 tdata->csum_pktlen = 0;
3078
3079 tdata->tx_dd_head = 0;
3080 tdata->tx_dd_tail = 0;
3081 tdata->tx_nsegs = 0;
3082 }
3083
3084 static int
emx_init_rx_ring(struct emx_rxdata * rdata)3085 emx_init_rx_ring(struct emx_rxdata *rdata)
3086 {
3087 int i, error;
3088
3089 /* Reset descriptor ring */
3090 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc);
3091
3092 /* Allocate new ones. */
3093 for (i = 0; i < rdata->num_rx_desc; i++) {
3094 error = emx_newbuf(rdata, i, 1);
3095 if (error)
3096 return (error);
3097 }
3098
3099 /* Setup our descriptor pointers */
3100 rdata->next_rx_desc_to_check = 0;
3101
3102 return (0);
3103 }
3104
3105 static void
emx_init_rx_unit(struct emx_softc * sc)3106 emx_init_rx_unit(struct emx_softc *sc)
3107 {
3108 struct ifnet *ifp = &sc->arpcom.ac_if;
3109 uint64_t bus_addr;
3110 uint32_t rctl, itr, rfctl, rxcsum;
3111 int i;
3112
3113 /*
3114 * Make sure receives are disabled while setting
3115 * up the descriptor ring
3116 */
3117 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
3118 /* Do not disable if ever enabled on this hardware */
3119 if (sc->hw.mac.type != e1000_82574)
3120 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3121
3122 /*
3123 * Set the interrupt throttling rate. Value is calculated
3124 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
3125 */
3126 if (sc->int_throttle_ceil)
3127 itr = 1000000000 / 256 / sc->int_throttle_ceil;
3128 else
3129 itr = 0;
3130 emx_set_itr(sc, itr);
3131
3132 /* Use extended RX descriptor */
3133 rfctl = E1000_READ_REG(&sc->hw, E1000_RFCTL);
3134 rfctl |= E1000_RFCTL_EXTEN;
3135 /* Disable accelerated ackknowledge */
3136 if (sc->hw.mac.type == e1000_82574)
3137 rfctl |= E1000_RFCTL_ACK_DIS;
3138 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl);
3139
3140 /*
3141 * Receive Checksum Offload for TCP and UDP
3142 *
3143 * Checksum offloading is also enabled if multiple receive
3144 * queue is to be supported, since we need it to figure out
3145 * packet type.
3146 */
3147 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
3148 if ((ifp->if_capenable & IFCAP_RXCSUM) ||
3149 sc->rx_ring_cnt > 1) {
3150 /*
3151 * NOTE:
3152 * PCSD must be enabled to enable multiple
3153 * receive queues.
3154 */
3155 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
3156 E1000_RXCSUM_PCSD;
3157 } else {
3158 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
3159 E1000_RXCSUM_PCSD);
3160 }
3161 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
3162
3163 /*
3164 * Configure multiple receive queue (RSS)
3165 */
3166 if (sc->rx_ring_cnt > 1) {
3167 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE];
3168 int r, j;
3169
3170 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING,
3171 ("invalid number of RX ring (%d)", sc->rx_ring_cnt));
3172
3173 /*
3174 * NOTE:
3175 * When we reach here, RSS has already been disabled
3176 * in emx_stop(), so we could safely configure RSS key
3177 * and redirect table.
3178 */
3179
3180 /*
3181 * Configure RSS key
3182 */
3183 toeplitz_get_key(key, sizeof(key));
3184 for (i = 0; i < EMX_NRSSRK; ++i) {
3185 uint32_t rssrk;
3186
3187 rssrk = EMX_RSSRK_VAL(key, i);
3188 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
3189
3190 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk);
3191 }
3192
3193 /*
3194 * Configure RSS redirect table.
3195 */
3196 if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table,
3197 EMX_RDRTABLE_SIZE);
3198
3199 r = 0;
3200 for (j = 0; j < EMX_NRETA; ++j) {
3201 uint32_t reta = 0;
3202
3203 for (i = 0; i < EMX_RETA_SIZE; ++i) {
3204 uint32_t q;
3205
3206 q = sc->rdr_table[r] << EMX_RETA_RINGIDX_SHIFT;
3207 reta |= q << (8 * i);
3208 ++r;
3209 }
3210 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
3211 E1000_WRITE_REG(&sc->hw, E1000_RETA(j), reta);
3212 }
3213
3214 /*
3215 * Enable multiple receive queues.
3216 * Enable IPv4 RSS standard hash functions.
3217 * Disable RSS interrupt.
3218 */
3219 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
3220 E1000_MRQC_ENABLE_RSS_2Q |
3221 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3222 E1000_MRQC_RSS_FIELD_IPV4);
3223 }
3224
3225 /*
3226 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3227 * long latencies are observed, like Lenovo X60. This
3228 * change eliminates the problem, but since having positive
3229 * values in RDTR is a known source of problems on other
3230 * platforms another solution is being sought.
3231 */
3232 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) {
3233 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573);
3234 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573);
3235 }
3236
3237 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3238 struct emx_rxdata *rdata = &sc->rx_data[i];
3239
3240 /*
3241 * Setup the Base and Length of the Rx Descriptor Ring
3242 */
3243 bus_addr = rdata->rx_desc_paddr;
3244 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i),
3245 rdata->num_rx_desc * sizeof(emx_rxdesc_t));
3246 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i),
3247 (uint32_t)(bus_addr >> 32));
3248 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i),
3249 (uint32_t)bus_addr);
3250
3251 /*
3252 * Setup the HW Rx Head and Tail Descriptor Pointers
3253 */
3254 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0);
3255 E1000_WRITE_REG(&sc->hw, E1000_RDT(i),
3256 sc->rx_data[i].num_rx_desc - 1);
3257 }
3258
3259 /* Set PTHRESH for improved jumbo performance */
3260 if (ifp->if_mtu > ETHERMTU && sc->hw.mac.type == e1000_82574) {
3261 uint32_t rxdctl;
3262
3263 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3264 rxdctl = E1000_READ_REG(&sc->hw, E1000_RXDCTL(i));
3265 rxdctl |= 0x20; /* PTHRESH */
3266 rxdctl |= 4 << 8; /* HTHRESH */
3267 rxdctl |= 4 << 16; /* WTHRESH */
3268 rxdctl |= 1 << 24; /* Switch to granularity */
3269 E1000_WRITE_REG(&sc->hw, E1000_RXDCTL(i), rxdctl);
3270 }
3271 }
3272
3273 if (sc->hw.mac.type >= e1000_pch2lan) {
3274 if (ifp->if_mtu > ETHERMTU)
3275 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE);
3276 else
3277 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE);
3278 }
3279
3280 /* Setup the Receive Control Register */
3281 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3282 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3283 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC |
3284 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3285
3286 /* Make sure VLAN Filters are off */
3287 rctl &= ~E1000_RCTL_VFE;
3288
3289 /* Don't store bad paket */
3290 rctl &= ~E1000_RCTL_SBP;
3291
3292 /* MCLBYTES */
3293 rctl |= E1000_RCTL_SZ_2048;
3294
3295 if (ifp->if_mtu > ETHERMTU)
3296 rctl |= E1000_RCTL_LPE;
3297 else
3298 rctl &= ~E1000_RCTL_LPE;
3299
3300 /* Enable Receives */
3301 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl);
3302 }
3303
3304 static void
emx_destroy_rx_ring(struct emx_rxdata * rdata,int ndesc)3305 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc)
3306 {
3307 struct emx_rxbuf *rx_buffer;
3308 int i;
3309
3310 /* Free Receive Descriptor ring */
3311 if (rdata->rx_desc) {
3312 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap);
3313 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc,
3314 rdata->rx_desc_dmap);
3315 bus_dma_tag_destroy(rdata->rx_desc_dtag);
3316
3317 rdata->rx_desc = NULL;
3318 }
3319
3320 if (rdata->rx_buf == NULL)
3321 return;
3322
3323 for (i = 0; i < ndesc; i++) {
3324 rx_buffer = &rdata->rx_buf[i];
3325
3326 KKASSERT(rx_buffer->m_head == NULL);
3327 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map);
3328 }
3329 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap);
3330 bus_dma_tag_destroy(rdata->rxtag);
3331
3332 kfree(rdata->rx_buf, M_DEVBUF);
3333 rdata->rx_buf = NULL;
3334 }
3335
3336 static void
emx_rxeof(struct emx_rxdata * rdata,int count)3337 emx_rxeof(struct emx_rxdata *rdata, int count)
3338 {
3339 struct ifnet *ifp = &rdata->sc->arpcom.ac_if;
3340 uint32_t staterr;
3341 emx_rxdesc_t *current_desc;
3342 struct mbuf *mp;
3343 int i, cpuid = mycpuid;
3344
3345 i = rdata->next_rx_desc_to_check;
3346 current_desc = &rdata->rx_desc[i];
3347 staterr = le32toh(current_desc->rxd_staterr);
3348
3349 if (!(staterr & E1000_RXD_STAT_DD))
3350 return;
3351
3352 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
3353 struct pktinfo *pi = NULL, pi0;
3354 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i];
3355 struct mbuf *m = NULL;
3356 int eop, len;
3357
3358 logif(pkt_receive);
3359
3360 mp = rx_buf->m_head;
3361
3362 /*
3363 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3364 * needs to access the last received byte in the mbuf.
3365 */
3366 bus_dmamap_sync(rdata->rxtag, rx_buf->map,
3367 BUS_DMASYNC_POSTREAD);
3368
3369 len = le16toh(current_desc->rxd_length);
3370 if (staterr & E1000_RXD_STAT_EOP) {
3371 count--;
3372 eop = 1;
3373 } else {
3374 eop = 0;
3375 }
3376
3377 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3378 uint16_t vlan = 0;
3379 uint32_t mrq, rss_hash;
3380
3381 /*
3382 * Save several necessary information,
3383 * before emx_newbuf() destroy it.
3384 */
3385 if ((staterr & E1000_RXD_STAT_VP) && eop)
3386 vlan = le16toh(current_desc->rxd_vlan);
3387
3388 mrq = le32toh(current_desc->rxd_mrq);
3389 rss_hash = le32toh(current_desc->rxd_rss);
3390
3391 EMX_RSS_DPRINTF(rdata->sc, 10,
3392 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n",
3393 rdata->idx, mrq, rss_hash);
3394
3395 if (emx_newbuf(rdata, i, 0) != 0) {
3396 IFNET_STAT_INC(ifp, iqdrops, 1);
3397 goto discard;
3398 }
3399
3400 /* Assign correct length to the current fragment */
3401 mp->m_len = len;
3402
3403 if (rdata->fmp == NULL) {
3404 mp->m_pkthdr.len = len;
3405 rdata->fmp = mp; /* Store the first mbuf */
3406 rdata->lmp = mp;
3407 } else {
3408 /*
3409 * Chain mbuf's together
3410 */
3411 rdata->lmp->m_next = mp;
3412 rdata->lmp = rdata->lmp->m_next;
3413 rdata->fmp->m_pkthdr.len += len;
3414 }
3415
3416 if (eop) {
3417 rdata->fmp->m_pkthdr.rcvif = ifp;
3418 IFNET_STAT_INC(ifp, ipackets, 1);
3419
3420 if (ifp->if_capenable & IFCAP_RXCSUM)
3421 emx_rxcsum(staterr, rdata->fmp);
3422
3423 if (staterr & E1000_RXD_STAT_VP) {
3424 rdata->fmp->m_pkthdr.ether_vlantag =
3425 vlan;
3426 rdata->fmp->m_flags |= M_VLANTAG;
3427 }
3428 m = rdata->fmp;
3429 rdata->fmp = NULL;
3430 rdata->lmp = NULL;
3431
3432 if (ifp->if_capenable & IFCAP_RSS) {
3433 pi = emx_rssinfo(m, &pi0, mrq,
3434 rss_hash, staterr);
3435 }
3436 #ifdef EMX_RSS_DEBUG
3437 rdata->rx_pkts++;
3438 #endif
3439 }
3440 } else {
3441 IFNET_STAT_INC(ifp, ierrors, 1);
3442 discard:
3443 emx_setup_rxdesc(current_desc, rx_buf);
3444 if (rdata->fmp != NULL) {
3445 m_freem(rdata->fmp);
3446 rdata->fmp = NULL;
3447 rdata->lmp = NULL;
3448 }
3449 m = NULL;
3450 }
3451
3452 if (m != NULL)
3453 ifp->if_input(ifp, m, pi, cpuid);
3454
3455 /* Advance our pointers to the next descriptor. */
3456 if (++i == rdata->num_rx_desc)
3457 i = 0;
3458
3459 current_desc = &rdata->rx_desc[i];
3460 staterr = le32toh(current_desc->rxd_staterr);
3461 }
3462 rdata->next_rx_desc_to_check = i;
3463
3464 /* Advance the E1000's Receive Queue "Tail Pointer". */
3465 if (--i < 0)
3466 i = rdata->num_rx_desc - 1;
3467 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i);
3468 }
3469
3470 static void
emx_enable_intr(struct emx_softc * sc)3471 emx_enable_intr(struct emx_softc *sc)
3472 {
3473 uint32_t ims_mask = IMS_ENABLE_MASK;
3474
3475 lwkt_serialize_handler_enable(&sc->main_serialize);
3476
3477 #if 0
3478 if (sc->hw.mac.type == e1000_82574) {
3479 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK);
3480 ims_mask |= EM_MSIX_MASK;
3481 }
3482 #endif
3483 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask);
3484 }
3485
3486 static void
emx_disable_intr(struct emx_softc * sc)3487 emx_disable_intr(struct emx_softc *sc)
3488 {
3489 if (sc->hw.mac.type == e1000_82574)
3490 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0);
3491 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
3492
3493 lwkt_serialize_handler_disable(&sc->main_serialize);
3494 }
3495
3496 /*
3497 * Bit of a misnomer, what this really means is
3498 * to enable OS management of the system... aka
3499 * to disable special hardware management features
3500 */
3501 static void
emx_get_mgmt(struct emx_softc * sc)3502 emx_get_mgmt(struct emx_softc *sc)
3503 {
3504 /* A shared code workaround */
3505 if (sc->flags & EMX_FLAG_HAS_MGMT) {
3506 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
3507 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3508
3509 /* disable hardware interception of ARP */
3510 manc &= ~(E1000_MANC_ARP_EN);
3511
3512 /* enable receiving management packets to the host */
3513 manc |= E1000_MANC_EN_MNG2HOST;
3514 #define E1000_MNG2HOST_PORT_623 (1 << 5)
3515 #define E1000_MNG2HOST_PORT_664 (1 << 6)
3516 manc2h |= E1000_MNG2HOST_PORT_623;
3517 manc2h |= E1000_MNG2HOST_PORT_664;
3518 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
3519
3520 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3521 }
3522 }
3523
3524 /*
3525 * Give control back to hardware management
3526 * controller if there is one.
3527 */
3528 static void
emx_rel_mgmt(struct emx_softc * sc)3529 emx_rel_mgmt(struct emx_softc *sc)
3530 {
3531 if (sc->flags & EMX_FLAG_HAS_MGMT) {
3532 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3533
3534 /* re-enable hardware interception of ARP */
3535 manc |= E1000_MANC_ARP_EN;
3536 manc &= ~E1000_MANC_EN_MNG2HOST;
3537
3538 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3539 }
3540 }
3541
3542 /*
3543 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3544 * For ASF and Pass Through versions of f/w this means that
3545 * the driver is loaded. For AMT version (only with 82573)
3546 * of the f/w this means that the network i/f is open.
3547 */
3548 static void
emx_get_hw_control(struct emx_softc * sc)3549 emx_get_hw_control(struct emx_softc *sc)
3550 {
3551 /* Let firmware know the driver has taken over */
3552 if (sc->hw.mac.type == e1000_82573) {
3553 uint32_t swsm;
3554
3555 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3556 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3557 swsm | E1000_SWSM_DRV_LOAD);
3558 } else {
3559 uint32_t ctrl_ext;
3560
3561 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3562 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3563 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3564 }
3565 sc->flags |= EMX_FLAG_HW_CTRL;
3566 }
3567
3568 /*
3569 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3570 * For ASF and Pass Through versions of f/w this means that the
3571 * driver is no longer loaded. For AMT version (only with 82573)
3572 * of the f/w this means that the network i/f is closed.
3573 */
3574 static void
emx_rel_hw_control(struct emx_softc * sc)3575 emx_rel_hw_control(struct emx_softc *sc)
3576 {
3577 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0)
3578 return;
3579 sc->flags &= ~EMX_FLAG_HW_CTRL;
3580
3581 /* Let firmware taken over control of h/w */
3582 if (sc->hw.mac.type == e1000_82573) {
3583 uint32_t swsm;
3584
3585 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3586 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3587 swsm & ~E1000_SWSM_DRV_LOAD);
3588 } else {
3589 uint32_t ctrl_ext;
3590
3591 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3592 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3593 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3594 }
3595 }
3596
3597 static int
emx_is_valid_eaddr(const uint8_t * addr)3598 emx_is_valid_eaddr(const uint8_t *addr)
3599 {
3600 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
3601
3602 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
3603 return (FALSE);
3604
3605 return (TRUE);
3606 }
3607
3608 /*
3609 * Enable PCI Wake On Lan capability
3610 */
3611 static void
emx_enable_wol(device_t dev)3612 emx_enable_wol(device_t dev)
3613 {
3614 uint16_t cap, status;
3615 uint8_t id;
3616
3617 /* First find the capabilities pointer*/
3618 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
3619
3620 /* Read the PM Capabilities */
3621 id = pci_read_config(dev, cap, 1);
3622 if (id != PCIY_PMG) /* Something wrong */
3623 return;
3624
3625 /*
3626 * OK, we have the power capabilities,
3627 * so now get the status register
3628 */
3629 cap += PCIR_POWER_STATUS;
3630 status = pci_read_config(dev, cap, 2);
3631 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3632 pci_write_config(dev, cap, status, 2);
3633 }
3634
3635 static void
emx_update_stats(struct emx_softc * sc)3636 emx_update_stats(struct emx_softc *sc)
3637 {
3638 struct ifnet *ifp = &sc->arpcom.ac_if;
3639
3640 if (sc->hw.phy.media_type == e1000_media_type_copper ||
3641 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) {
3642 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
3643 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC);
3644 }
3645 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
3646 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
3647 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC);
3648 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
3649
3650 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
3651 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
3652 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC);
3653 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC);
3654 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
3655 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
3656 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
3657 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
3658 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
3659 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
3660 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
3661 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
3662 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
3663 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
3664 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
3665 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
3666 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
3667 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
3668 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
3669 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
3670
3671 /* For the 64-bit byte counters the low dword must be read first. */
3672 /* Both registers clear on the read of the high dword */
3673
3674 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH);
3675 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH);
3676
3677 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
3678 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
3679 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
3680 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC);
3681 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
3682
3683 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH);
3684 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
3685
3686 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
3687 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
3688 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
3689 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
3690 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
3691 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
3692 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
3693 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
3694 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
3695 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
3696
3697 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC);
3698 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC);
3699 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS);
3700 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR);
3701 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC);
3702 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC);
3703
3704 IFNET_STAT_SET(ifp, collisions, sc->stats.colc);
3705
3706 /* Rx Errors */
3707 IFNET_STAT_SET(ifp, ierrors,
3708 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc +
3709 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr);
3710
3711 /* Tx Errors */
3712 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol);
3713 }
3714
3715 static void
emx_print_debug_info(struct emx_softc * sc)3716 emx_print_debug_info(struct emx_softc *sc)
3717 {
3718 device_t dev = sc->dev;
3719 uint8_t *hw_addr = sc->hw.hw_addr;
3720 int i;
3721
3722 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3723 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
3724 E1000_READ_REG(&sc->hw, E1000_CTRL),
3725 E1000_READ_REG(&sc->hw, E1000_RCTL));
3726 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
3727 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\
3728 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) );
3729 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3730 sc->hw.fc.high_water, sc->hw.fc.low_water);
3731 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3732 E1000_READ_REG(&sc->hw, E1000_TIDV),
3733 E1000_READ_REG(&sc->hw, E1000_TADV));
3734 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3735 E1000_READ_REG(&sc->hw, E1000_RDTR),
3736 E1000_READ_REG(&sc->hw, E1000_RADV));
3737
3738 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3739 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i,
3740 E1000_READ_REG(&sc->hw, E1000_TDH(i)),
3741 E1000_READ_REG(&sc->hw, E1000_TDT(i)));
3742 }
3743 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3744 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i,
3745 E1000_READ_REG(&sc->hw, E1000_RDH(i)),
3746 E1000_READ_REG(&sc->hw, E1000_RDT(i)));
3747 }
3748
3749 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3750 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i,
3751 sc->tx_data[i].num_tx_desc_avail);
3752 device_printf(dev, "TX %d TSO segments = %lu\n", i,
3753 sc->tx_data[i].tso_segments);
3754 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i,
3755 sc->tx_data[i].tso_ctx_reused);
3756 }
3757 }
3758
3759 static void
emx_print_hw_stats(struct emx_softc * sc)3760 emx_print_hw_stats(struct emx_softc *sc)
3761 {
3762 device_t dev = sc->dev;
3763
3764 device_printf(dev, "Excessive collisions = %lld\n",
3765 (long long)sc->stats.ecol);
3766 #if (DEBUG_HW > 0) /* Dont output these errors normally */
3767 device_printf(dev, "Symbol errors = %lld\n",
3768 (long long)sc->stats.symerrs);
3769 #endif
3770 device_printf(dev, "Sequence errors = %lld\n",
3771 (long long)sc->stats.sec);
3772 device_printf(dev, "Defer count = %lld\n",
3773 (long long)sc->stats.dc);
3774 device_printf(dev, "Missed Packets = %lld\n",
3775 (long long)sc->stats.mpc);
3776 device_printf(dev, "Receive No Buffers = %lld\n",
3777 (long long)sc->stats.rnbc);
3778 /* RLEC is inaccurate on some hardware, calculate our own. */
3779 device_printf(dev, "Receive Length Errors = %lld\n",
3780 ((long long)sc->stats.roc + (long long)sc->stats.ruc));
3781 device_printf(dev, "Receive errors = %lld\n",
3782 (long long)sc->stats.rxerrc);
3783 device_printf(dev, "Crc errors = %lld\n",
3784 (long long)sc->stats.crcerrs);
3785 device_printf(dev, "Alignment errors = %lld\n",
3786 (long long)sc->stats.algnerrc);
3787 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
3788 (long long)sc->stats.cexterr);
3789 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns);
3790 device_printf(dev, "XON Rcvd = %lld\n",
3791 (long long)sc->stats.xonrxc);
3792 device_printf(dev, "XON Xmtd = %lld\n",
3793 (long long)sc->stats.xontxc);
3794 device_printf(dev, "XOFF Rcvd = %lld\n",
3795 (long long)sc->stats.xoffrxc);
3796 device_printf(dev, "XOFF Xmtd = %lld\n",
3797 (long long)sc->stats.xofftxc);
3798 device_printf(dev, "Good Packets Rcvd = %lld\n",
3799 (long long)sc->stats.gprc);
3800 device_printf(dev, "Good Packets Xmtd = %lld\n",
3801 (long long)sc->stats.gptc);
3802 }
3803
3804 static void
emx_print_nvm_info(struct emx_softc * sc)3805 emx_print_nvm_info(struct emx_softc *sc)
3806 {
3807 uint16_t eeprom_data;
3808 int i, j, row = 0;
3809
3810 /* Its a bit crude, but it gets the job done */
3811 kprintf("\nInterface EEPROM Dump:\n");
3812 kprintf("Offset\n0x0000 ");
3813 for (i = 0, j = 0; i < 32; i++, j++) {
3814 if (j == 8) { /* Make the offset block */
3815 j = 0; ++row;
3816 kprintf("\n0x00%x0 ",row);
3817 }
3818 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data);
3819 kprintf("%04x ", eeprom_data);
3820 }
3821 kprintf("\n");
3822 }
3823
3824 static int
emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS)3825 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3826 {
3827 struct emx_softc *sc;
3828 struct ifnet *ifp;
3829 int error, result;
3830
3831 result = -1;
3832 error = sysctl_handle_int(oidp, &result, 0, req);
3833 if (error || !req->newptr)
3834 return (error);
3835
3836 sc = (struct emx_softc *)arg1;
3837 ifp = &sc->arpcom.ac_if;
3838
3839 ifnet_serialize_all(ifp);
3840
3841 if (result == 1)
3842 emx_print_debug_info(sc);
3843
3844 /*
3845 * This value will cause a hex dump of the
3846 * first 32 16-bit words of the EEPROM to
3847 * the screen.
3848 */
3849 if (result == 2)
3850 emx_print_nvm_info(sc);
3851
3852 ifnet_deserialize_all(ifp);
3853
3854 return (error);
3855 }
3856
3857 static int
emx_sysctl_stats(SYSCTL_HANDLER_ARGS)3858 emx_sysctl_stats(SYSCTL_HANDLER_ARGS)
3859 {
3860 int error, result;
3861
3862 result = -1;
3863 error = sysctl_handle_int(oidp, &result, 0, req);
3864 if (error || !req->newptr)
3865 return (error);
3866
3867 if (result == 1) {
3868 struct emx_softc *sc = (struct emx_softc *)arg1;
3869 struct ifnet *ifp = &sc->arpcom.ac_if;
3870
3871 ifnet_serialize_all(ifp);
3872 emx_print_hw_stats(sc);
3873 ifnet_deserialize_all(ifp);
3874 }
3875 return (error);
3876 }
3877
3878 static void
emx_add_sysctl(struct emx_softc * sc)3879 emx_add_sysctl(struct emx_softc *sc)
3880 {
3881 struct sysctl_ctx_list *ctx;
3882 struct sysctl_oid *tree;
3883 char pkt_desc[32];
3884 int i;
3885
3886 ctx = device_get_sysctl_ctx(sc->dev);
3887 tree = device_get_sysctl_tree(sc->dev);
3888 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3889 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3890 emx_sysctl_debug_info, "I", "Debug Information");
3891
3892 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3893 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3894 emx_sysctl_stats, "I", "Statistics");
3895
3896 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3897 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0,
3898 "# of RX descs");
3899 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3900 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0,
3901 "# of TX descs");
3902
3903 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3904 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3905 emx_sysctl_int_throttle, "I", "interrupt throttling rate");
3906 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3907 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3908 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt");
3909 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3910 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3911 emx_sysctl_tx_wreg_nsegs, "I",
3912 "# segments sent before write to hardware register");
3913
3914 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3915 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0,
3916 "# of RX rings");
3917 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3918 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0,
3919 "# of TX rings");
3920 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3921 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0,
3922 "# of TX rings used");
3923
3924 #ifdef IFPOLL_ENABLE
3925 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3926 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
3927 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
3928 "TX polling CPU map");
3929 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3930 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
3931 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
3932 "RX polling CPU map");
3933 #endif
3934
3935 #ifdef EMX_RSS_DEBUG
3936 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3937 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug,
3938 0, "RSS debug level");
3939 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3940 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i);
3941 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3942 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts,
3943 "RXed packets");
3944 }
3945 #endif
3946 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3947 #ifdef EMX_TSS_DEBUG
3948 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i);
3949 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3950 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts,
3951 "TXed packets");
3952 #endif
3953
3954 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_nmbuf", i);
3955 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3956 pkt_desc, CTLFLAG_RD, &sc->tx_data[i].tx_nmbuf, 0,
3957 "# of pending TX mbufs");
3958 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_gc", i);
3959 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3960 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_gc,
3961 "# of TX desc GC");
3962 }
3963 }
3964
3965 static int
emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)3966 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3967 {
3968 struct emx_softc *sc = (void *)arg1;
3969 struct ifnet *ifp = &sc->arpcom.ac_if;
3970 int error, throttle;
3971
3972 throttle = sc->int_throttle_ceil;
3973 error = sysctl_handle_int(oidp, &throttle, 0, req);
3974 if (error || req->newptr == NULL)
3975 return error;
3976 if (throttle < 0 || throttle > 1000000000 / 256)
3977 return EINVAL;
3978
3979 if (throttle) {
3980 /*
3981 * Set the interrupt throttling rate in 256ns increments,
3982 * recalculate sysctl value assignment to get exact frequency.
3983 */
3984 throttle = 1000000000 / 256 / throttle;
3985
3986 /* Upper 16bits of ITR is reserved and should be zero */
3987 if (throttle & 0xffff0000)
3988 return EINVAL;
3989 }
3990
3991 ifnet_serialize_all(ifp);
3992
3993 if (throttle)
3994 sc->int_throttle_ceil = 1000000000 / 256 / throttle;
3995 else
3996 sc->int_throttle_ceil = 0;
3997
3998 if (ifp->if_flags & IFF_RUNNING)
3999 emx_set_itr(sc, throttle);
4000
4001 ifnet_deserialize_all(ifp);
4002
4003 if (bootverbose) {
4004 if_printf(ifp, "Interrupt moderation set to %d/sec\n",
4005 sc->int_throttle_ceil);
4006 }
4007 return 0;
4008 }
4009
4010 static int
emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)4011 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
4012 {
4013 struct emx_softc *sc = (void *)arg1;
4014 struct ifnet *ifp = &sc->arpcom.ac_if;
4015 struct emx_txdata *tdata = &sc->tx_data[0];
4016 int error, segs;
4017
4018 segs = tdata->tx_intr_nsegs;
4019 error = sysctl_handle_int(oidp, &segs, 0, req);
4020 if (error || req->newptr == NULL)
4021 return error;
4022 if (segs <= 0)
4023 return EINVAL;
4024
4025 ifnet_serialize_all(ifp);
4026
4027 /*
4028 * Don't allow tx_intr_nsegs to become:
4029 * o Less the oact_tx_desc
4030 * o Too large that no TX desc will cause TX interrupt to
4031 * be generated (OACTIVE will never recover)
4032 * o Too small that will cause tx_dd[] overflow
4033 */
4034 if (segs < tdata->oact_tx_desc ||
4035 segs >= tdata->num_tx_desc - tdata->oact_tx_desc ||
4036 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) {
4037 error = EINVAL;
4038 } else {
4039 int i;
4040
4041 error = 0;
4042 for (i = 0; i < sc->tx_ring_cnt; ++i)
4043 sc->tx_data[i].tx_intr_nsegs = segs;
4044 }
4045
4046 ifnet_deserialize_all(ifp);
4047
4048 return error;
4049 }
4050
4051 static int
emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS)4052 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS)
4053 {
4054 struct emx_softc *sc = (void *)arg1;
4055 struct ifnet *ifp = &sc->arpcom.ac_if;
4056 int error, nsegs, i;
4057
4058 nsegs = sc->tx_data[0].tx_wreg_nsegs;
4059 error = sysctl_handle_int(oidp, &nsegs, 0, req);
4060 if (error || req->newptr == NULL)
4061 return error;
4062
4063 ifnet_serialize_all(ifp);
4064 for (i = 0; i < sc->tx_ring_cnt; ++i)
4065 sc->tx_data[i].tx_wreg_nsegs =nsegs;
4066 ifnet_deserialize_all(ifp);
4067
4068 return 0;
4069 }
4070
4071 static int
emx_dma_alloc(struct emx_softc * sc)4072 emx_dma_alloc(struct emx_softc *sc)
4073 {
4074 int error, i;
4075
4076 /*
4077 * Create top level busdma tag
4078 */
4079 error = bus_dma_tag_create(NULL, 1, 0,
4080 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4081 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
4082 0, &sc->parent_dtag);
4083 if (error) {
4084 device_printf(sc->dev, "could not create top level DMA tag\n");
4085 return error;
4086 }
4087
4088 /*
4089 * Allocate transmit descriptors ring and buffers
4090 */
4091 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4092 error = emx_create_tx_ring(&sc->tx_data[i]);
4093 if (error) {
4094 device_printf(sc->dev,
4095 "Could not setup transmit structures\n");
4096 return error;
4097 }
4098 }
4099
4100 /*
4101 * Allocate receive descriptors ring and buffers
4102 */
4103 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4104 error = emx_create_rx_ring(&sc->rx_data[i]);
4105 if (error) {
4106 device_printf(sc->dev,
4107 "Could not setup receive structures\n");
4108 return error;
4109 }
4110 }
4111 return 0;
4112 }
4113
4114 static void
emx_dma_free(struct emx_softc * sc)4115 emx_dma_free(struct emx_softc *sc)
4116 {
4117 int i;
4118
4119 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4120 emx_destroy_tx_ring(&sc->tx_data[i],
4121 sc->tx_data[i].num_tx_desc);
4122 }
4123
4124 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4125 emx_destroy_rx_ring(&sc->rx_data[i],
4126 sc->rx_data[i].num_rx_desc);
4127 }
4128
4129 /* Free top level busdma tag */
4130 if (sc->parent_dtag != NULL)
4131 bus_dma_tag_destroy(sc->parent_dtag);
4132 }
4133
4134 static void
emx_serialize(struct ifnet * ifp,enum ifnet_serialize slz)4135 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
4136 {
4137 struct emx_softc *sc = ifp->if_softc;
4138
4139 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz);
4140 }
4141
4142 static void
emx_deserialize(struct ifnet * ifp,enum ifnet_serialize slz)4143 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
4144 {
4145 struct emx_softc *sc = ifp->if_softc;
4146
4147 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz);
4148 }
4149
4150 static int
emx_tryserialize(struct ifnet * ifp,enum ifnet_serialize slz)4151 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
4152 {
4153 struct emx_softc *sc = ifp->if_softc;
4154
4155 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz);
4156 }
4157
4158 static void
emx_serialize_skipmain(struct emx_softc * sc)4159 emx_serialize_skipmain(struct emx_softc *sc)
4160 {
4161 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1);
4162 }
4163
4164 static void
emx_deserialize_skipmain(struct emx_softc * sc)4165 emx_deserialize_skipmain(struct emx_softc *sc)
4166 {
4167 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1);
4168 }
4169
4170 #ifdef INVARIANTS
4171
4172 static void
emx_serialize_assert(struct ifnet * ifp,enum ifnet_serialize slz,boolean_t serialized)4173 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
4174 boolean_t serialized)
4175 {
4176 struct emx_softc *sc = ifp->if_softc;
4177
4178 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE,
4179 slz, serialized);
4180 }
4181
4182 #endif /* INVARIANTS */
4183
4184 #ifdef IFPOLL_ENABLE
4185
4186 static void
emx_npoll_status(struct ifnet * ifp)4187 emx_npoll_status(struct ifnet *ifp)
4188 {
4189 struct emx_softc *sc = ifp->if_softc;
4190 uint32_t reg_icr;
4191
4192 ASSERT_SERIALIZED(&sc->main_serialize);
4193
4194 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
4195 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4196 callout_stop(&sc->timer);
4197 sc->hw.mac.get_link_status = 1;
4198 emx_update_link_status(sc);
4199 callout_reset(&sc->timer, hz, emx_timer, sc);
4200 }
4201 }
4202
4203 static void
emx_npoll_tx(struct ifnet * ifp,void * arg,int cycle __unused)4204 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
4205 {
4206 struct emx_txdata *tdata = arg;
4207
4208 ASSERT_SERIALIZED(&tdata->tx_serialize);
4209
4210 emx_tx_intr(tdata);
4211 emx_try_txgc(tdata, 1);
4212 }
4213
4214 static void
emx_npoll_rx(struct ifnet * ifp __unused,void * arg,int cycle)4215 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
4216 {
4217 struct emx_rxdata *rdata = arg;
4218
4219 ASSERT_SERIALIZED(&rdata->rx_serialize);
4220
4221 emx_rxeof(rdata, cycle);
4222 }
4223
4224 static void
emx_npoll(struct ifnet * ifp,struct ifpoll_info * info)4225 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
4226 {
4227 struct emx_softc *sc = ifp->if_softc;
4228 int i, txr_cnt;
4229
4230 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4231
4232 if (info) {
4233 int cpu;
4234
4235 info->ifpi_status.status_func = emx_npoll_status;
4236 info->ifpi_status.serializer = &sc->main_serialize;
4237
4238 txr_cnt = emx_get_txring_inuse(sc, TRUE);
4239 for (i = 0; i < txr_cnt; ++i) {
4240 struct emx_txdata *tdata = &sc->tx_data[i];
4241
4242 cpu = if_ringmap_cpumap(sc->tx_rmap, i);
4243 KKASSERT(cpu < netisr_ncpus);
4244 info->ifpi_tx[cpu].poll_func = emx_npoll_tx;
4245 info->ifpi_tx[cpu].arg = tdata;
4246 info->ifpi_tx[cpu].serializer = &tdata->tx_serialize;
4247 ifsq_set_cpuid(tdata->ifsq, cpu);
4248 }
4249
4250 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4251 struct emx_rxdata *rdata = &sc->rx_data[i];
4252
4253 cpu = if_ringmap_cpumap(sc->rx_rmap, i);
4254 KKASSERT(cpu < netisr_ncpus);
4255 info->ifpi_rx[cpu].poll_func = emx_npoll_rx;
4256 info->ifpi_rx[cpu].arg = rdata;
4257 info->ifpi_rx[cpu].serializer = &rdata->rx_serialize;
4258 }
4259 } else {
4260 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4261 struct emx_txdata *tdata = &sc->tx_data[i];
4262
4263 ifsq_set_cpuid(tdata->ifsq,
4264 rman_get_cpuid(sc->intr_res));
4265 }
4266 }
4267 if (ifp->if_flags & IFF_RUNNING)
4268 emx_init(sc);
4269 }
4270
4271 #endif /* IFPOLL_ENABLE */
4272
4273 static void
emx_set_itr(struct emx_softc * sc,uint32_t itr)4274 emx_set_itr(struct emx_softc *sc, uint32_t itr)
4275 {
4276 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr);
4277 if (sc->hw.mac.type == e1000_82574) {
4278 int i;
4279
4280 /*
4281 * When using MSIX interrupts we need to
4282 * throttle using the EITR register
4283 */
4284 for (i = 0; i < 4; ++i)
4285 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr);
4286 }
4287 }
4288
4289 /*
4290 * Disable the L0s, 82574L Errata #20
4291 */
4292 static void
emx_disable_aspm(struct emx_softc * sc)4293 emx_disable_aspm(struct emx_softc *sc)
4294 {
4295 uint16_t link_cap, link_ctrl, disable;
4296 uint8_t pcie_ptr, reg;
4297 device_t dev = sc->dev;
4298
4299 switch (sc->hw.mac.type) {
4300 case e1000_82571:
4301 case e1000_82572:
4302 case e1000_82573:
4303 /*
4304 * 82573 specification update
4305 * errata #8 disable L0s
4306 * errata #41 disable L1
4307 *
4308 * 82571/82572 specification update
4309 # errata #13 disable L1
4310 * errata #68 disable L0s
4311 */
4312 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1;
4313 break;
4314
4315 case e1000_82574:
4316 /*
4317 * 82574 specification update errata #20
4318 *
4319 * There is no need to disable L1
4320 */
4321 disable = PCIEM_LNKCTL_ASPM_L0S;
4322 break;
4323
4324 default:
4325 return;
4326 }
4327
4328 pcie_ptr = pci_get_pciecap_ptr(dev);
4329 if (pcie_ptr == 0)
4330 return;
4331
4332 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2);
4333 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0)
4334 return;
4335
4336 if (bootverbose)
4337 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable);
4338
4339 reg = pcie_ptr + PCIER_LINKCTRL;
4340 link_ctrl = pci_read_config(dev, reg, 2);
4341 link_ctrl &= ~disable;
4342 pci_write_config(dev, reg, link_ctrl, 2);
4343 }
4344
4345 static int
emx_tso_pullup(struct emx_txdata * tdata,struct mbuf ** mp)4346 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp)
4347 {
4348 int iphlen, hoff, thoff, ex = 0;
4349 struct mbuf *m;
4350 struct ip *ip;
4351
4352 m = *mp;
4353 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4354
4355 iphlen = m->m_pkthdr.csum_iphlen;
4356 thoff = m->m_pkthdr.csum_thlen;
4357 hoff = m->m_pkthdr.csum_lhlen;
4358
4359 KASSERT(iphlen > 0, ("invalid ip hlen"));
4360 KASSERT(thoff > 0, ("invalid tcp hlen"));
4361 KASSERT(hoff > 0, ("invalid ether hlen"));
4362
4363 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX)
4364 ex = 4;
4365
4366 if (m->m_len < hoff + iphlen + thoff + ex) {
4367 m = m_pullup(m, hoff + iphlen + thoff + ex);
4368 if (m == NULL) {
4369 *mp = NULL;
4370 return ENOBUFS;
4371 }
4372 *mp = m;
4373 }
4374 ip = mtodoff(m, struct ip *, hoff);
4375 ip->ip_len = 0;
4376
4377 return 0;
4378 }
4379
4380 static int
emx_tso_setup(struct emx_txdata * tdata,struct mbuf * mp,uint32_t * txd_upper,uint32_t * txd_lower)4381 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp,
4382 uint32_t *txd_upper, uint32_t *txd_lower)
4383 {
4384 struct e1000_context_desc *TXD;
4385 int hoff, iphlen, thoff, hlen;
4386 int mss, pktlen, curr_txd;
4387
4388 #ifdef EMX_TSO_DEBUG
4389 tdata->tso_segments++;
4390 #endif
4391
4392 iphlen = mp->m_pkthdr.csum_iphlen;
4393 thoff = mp->m_pkthdr.csum_thlen;
4394 hoff = mp->m_pkthdr.csum_lhlen;
4395 mss = mp->m_pkthdr.tso_segsz;
4396 pktlen = mp->m_pkthdr.len;
4397
4398 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 &&
4399 tdata->csum_flags == CSUM_TSO &&
4400 tdata->csum_iphlen == iphlen &&
4401 tdata->csum_lhlen == hoff &&
4402 tdata->csum_thlen == thoff &&
4403 tdata->csum_mss == mss &&
4404 tdata->csum_pktlen == pktlen) {
4405 *txd_upper = tdata->csum_txd_upper;
4406 *txd_lower = tdata->csum_txd_lower;
4407 #ifdef EMX_TSO_DEBUG
4408 tdata->tso_ctx_reused++;
4409 #endif
4410 return 0;
4411 }
4412 hlen = hoff + iphlen + thoff;
4413
4414 /*
4415 * Setup a new TSO context.
4416 */
4417
4418 curr_txd = tdata->next_avail_tx_desc;
4419 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd];
4420
4421 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
4422 E1000_TXD_DTYP_D | /* Data descr type */
4423 E1000_TXD_CMD_TSE; /* Do TSE on this packet */
4424
4425 /* IP and/or TCP header checksum calculation and insertion. */
4426 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
4427
4428 /*
4429 * Start offset for header checksum calculation.
4430 * End offset for header checksum calculation.
4431 * Offset of place put the checksum.
4432 */
4433 TXD->lower_setup.ip_fields.ipcss = hoff;
4434 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1);
4435 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum);
4436
4437 /*
4438 * Start offset for payload checksum calculation.
4439 * End offset for payload checksum calculation.
4440 * Offset of place to put the checksum.
4441 */
4442 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen;
4443 TXD->upper_setup.tcp_fields.tucse = 0;
4444 TXD->upper_setup.tcp_fields.tucso =
4445 hoff + iphlen + offsetof(struct tcphdr, th_sum);
4446
4447 /*
4448 * Payload size per packet w/o any headers.
4449 * Length of all headers up to payload.
4450 */
4451 TXD->tcp_seg_setup.fields.mss = htole16(mss);
4452 TXD->tcp_seg_setup.fields.hdr_len = hlen;
4453 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS |
4454 E1000_TXD_CMD_DEXT | /* Extended descr */
4455 E1000_TXD_CMD_TSE | /* TSE context */
4456 E1000_TXD_CMD_IP | /* Do IP csum */
4457 E1000_TXD_CMD_TCP | /* Do TCP checksum */
4458 (pktlen - hlen)); /* Total len */
4459
4460 /* Save the information for this TSO context */
4461 tdata->csum_flags = CSUM_TSO;
4462 tdata->csum_lhlen = hoff;
4463 tdata->csum_iphlen = iphlen;
4464 tdata->csum_thlen = thoff;
4465 tdata->csum_mss = mss;
4466 tdata->csum_pktlen = pktlen;
4467 tdata->csum_txd_upper = *txd_upper;
4468 tdata->csum_txd_lower = *txd_lower;
4469
4470 if (++curr_txd == tdata->num_tx_desc)
4471 curr_txd = 0;
4472
4473 KKASSERT(tdata->num_tx_desc_avail > 0);
4474 tdata->num_tx_desc_avail--;
4475
4476 tdata->next_avail_tx_desc = curr_txd;
4477 return 1;
4478 }
4479
4480 static int
emx_get_txring_inuse(const struct emx_softc * sc,boolean_t polling)4481 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling)
4482 {
4483 if (polling)
4484 return sc->tx_ring_cnt;
4485 else
4486 return 1;
4487 }
4488
4489 /*
4490 * Remove all descriptors from the TX ring.
4491 *
4492 * We want to clear all pending descriptors from the TX ring. Zeroing
4493 * happens when the HW reads the regs. We assign the ring itself as
4494 * the data of the next descriptor. We don't care about the data we
4495 * are about to reset the HW.
4496 */
4497 static void
emx_flush_tx_ring(struct emx_softc * sc)4498 emx_flush_tx_ring(struct emx_softc *sc)
4499 {
4500 struct e1000_hw *hw = &sc->hw;
4501 uint32_t tctl;
4502 int i;
4503
4504 tctl = E1000_READ_REG(hw, E1000_TCTL);
4505 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
4506
4507 for (i = 0; i < sc->tx_ring_inuse; ++i) {
4508 struct emx_txdata *tdata = &sc->tx_data[i];
4509 struct e1000_tx_desc *txd;
4510
4511 if (E1000_READ_REG(hw, E1000_TDLEN(i)) == 0)
4512 continue;
4513
4514 txd = &tdata->tx_desc_base[tdata->next_avail_tx_desc++];
4515 if (tdata->next_avail_tx_desc == tdata->num_tx_desc)
4516 tdata->next_avail_tx_desc = 0;
4517
4518 /* Just use the ring as a dummy buffer addr */
4519 txd->buffer_addr = tdata->tx_desc_paddr;
4520 txd->lower.data = htole32(E1000_TXD_CMD_IFCS | 512);
4521 txd->upper.data = 0;
4522
4523 E1000_WRITE_REG(hw, E1000_TDT(i), tdata->next_avail_tx_desc);
4524 usec_delay(250);
4525 }
4526 }
4527
4528 /*
4529 * Remove all descriptors from the RX rings.
4530 *
4531 * Mark all descriptors in the RX rings as consumed and disable the RX rings.
4532 */
4533 static void
emx_flush_rx_ring(struct emx_softc * sc)4534 emx_flush_rx_ring(struct emx_softc *sc)
4535 {
4536 struct e1000_hw *hw = &sc->hw;
4537 uint32_t rctl;
4538 int i;
4539
4540 rctl = E1000_READ_REG(hw, E1000_RCTL);
4541 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4542 E1000_WRITE_FLUSH(hw);
4543 usec_delay(150);
4544
4545 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4546 uint32_t rxdctl;
4547
4548 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
4549 /* Zero the lower 14 bits (prefetch and host thresholds) */
4550 rxdctl &= 0xffffc000;
4551 /*
4552 * Update thresholds: prefetch threshold to 31, host threshold
4553 * to 1 and make sure the granularity is "descriptors" and not
4554 * "cache lines".
4555 */
4556 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
4557 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
4558 }
4559
4560 /* Momentarily enable the RX rings for the changes to take effect */
4561 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
4562 E1000_WRITE_FLUSH(hw);
4563 usec_delay(150);
4564 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4565 }
4566
4567 /*
4568 * Remove all descriptors from the descriptor rings.
4569 *
4570 * In i219, the descriptor rings must be emptied before resetting the HW
4571 * or before changing the device state to D3 during runtime (runtime PM).
4572 *
4573 * Failure to do this will cause the HW to enter a unit hang state which
4574 * can only be released by PCI reset on the device.
4575 */
4576 static void
emx_flush_txrx_ring(struct emx_softc * sc)4577 emx_flush_txrx_ring(struct emx_softc *sc)
4578 {
4579 struct e1000_hw *hw = &sc->hw;
4580 device_t dev = sc->dev;
4581 uint16_t hang_state;
4582 uint32_t fext_nvm11, tdlen;
4583 int i;
4584
4585 /*
4586 * First, disable MULR fix in FEXTNVM11.
4587 */
4588 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
4589 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
4590 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
4591
4592 /*
4593 * Do nothing if we're not in faulty state, or if the queue is
4594 * empty.
4595 */
4596 tdlen = 0;
4597 for (i = 0; i < sc->tx_ring_inuse; ++i)
4598 tdlen += E1000_READ_REG(hw, E1000_TDLEN(i));
4599 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2);
4600 if ((hang_state & EMX_FLUSH_DESC_REQUIRED) && tdlen)
4601 emx_flush_tx_ring(sc);
4602
4603 /*
4604 * Recheck, maybe the fault is caused by the RX ring.
4605 */
4606 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2);
4607 if (hang_state & EMX_FLUSH_DESC_REQUIRED)
4608 emx_flush_rx_ring(sc);
4609 }
4610