1 /* $OpenBSD: if_ngbe.c,v 1.6 2024/09/20 02:15:53 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2015-2017 Beijing WangXun Technology Co., Ltd.
5 * Copyright (c) 2023 Kevin Lo <kevlo@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "bpfilter.h"
21 #include "vlan.h"
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/sockio.h>
26 #include <sys/mbuf.h>
27 #include <sys/malloc.h>
28 #include <sys/device.h>
29 #include <sys/endian.h>
30 #include <sys/intrmap.h>
31
32 #include <net/if.h>
33 #include <net/if_media.h>
34 #include <net/toeplitz.h>
35
36 #include <netinet/in.h>
37 #include <netinet/if_ether.h>
38
39 #if NBPFILTER > 0
40 #include <net/bpf.h>
41 #endif
42
43 #include <machine/bus.h>
44 #include <machine/intr.h>
45
46 #include <dev/mii/mii.h>
47
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcidevs.h>
51
52 #include <dev/pci/if_ngbereg.h>
53
54 const struct pci_matchid ngbe_devices[] = {
55 { PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860A2 },
56 { PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860AL1 }
57 };
58
59 int ngbe_match(struct device *, void *, void *);
60 void ngbe_attach(struct device *, struct device *, void *);
61 int ngbe_detach(struct device *, int);
62 void ngbe_init(void *);
63 int ngbe_ioctl(struct ifnet *, u_long, caddr_t);
64 int ngbe_media_change(struct ifnet *);
65 void ngbe_media_status(struct ifnet *, struct ifmediareq *);
66 int ngbe_rxfill(struct rx_ring *);
67 int ngbe_rxrinfo(struct ngbe_softc *, struct if_rxrinfo *);
68 void ngbe_start(struct ifqueue *);
69 void ngbe_stop(struct ngbe_softc *);
70 void ngbe_update_link_status(struct ngbe_softc *);
71 void ngbe_watchdog(struct ifnet *);
72 int ngbe_allocate_pci_resources(struct ngbe_softc *);
73 void ngbe_free_pci_resources(struct ngbe_softc *);
74 int ngbe_allocate_msix(struct ngbe_softc *);
75 void ngbe_setup_interface(struct ngbe_softc *);
76 int ngbe_setup_msix(struct ngbe_softc *);
77 int ngbe_dma_malloc(struct ngbe_softc *, bus_size_t,
78 struct ngbe_dma_alloc *);
79 void ngbe_dma_free(struct ngbe_softc *,
80 struct ngbe_dma_alloc *);
81 int ngbe_allocate_isb(struct ngbe_softc *);
82 void ngbe_free_isb(struct ngbe_softc *);
83 int ngbe_allocate_queues(struct ngbe_softc *);
84 void ngbe_free_receive_structures(struct ngbe_softc *);
85 void ngbe_free_receive_buffers(struct rx_ring *);
86 void ngbe_free_transmit_structures(struct ngbe_softc *);
87 void ngbe_free_transmit_buffers(struct tx_ring *);
88 int ngbe_allocate_receive_buffers(struct rx_ring *);
89 int ngbe_allocate_transmit_buffers(struct tx_ring *);
90 int ngbe_setup_receive_ring(struct rx_ring *);
91 int ngbe_setup_transmit_ring(struct tx_ring *);
92 int ngbe_setup_receive_structures(struct ngbe_softc *);
93 int ngbe_setup_transmit_structures(struct ngbe_softc *);
94 uint8_t * ngbe_addr_list_itr(struct ngbe_hw *, uint8_t **,
95 uint32_t *);
96 void ngbe_iff(struct ngbe_softc *);
97 int ngbe_initialize_receive_unit(struct ngbe_softc *);
98 void ngbe_initialize_rss_mapping(struct ngbe_softc *);
99 int ngbe_initialize_transmit_unit(struct ngbe_softc *);
100 int ngbe_intr_link(void *);
101 int ngbe_intr_queue(void *);
102 void ngbe_init_eeprom_params(struct ngbe_hw *);
103 int ngbe_init_hw(struct ngbe_softc *);
104 void ngbe_init_ops(struct ngbe_hw *);
105 void ngbe_init_rx_addrs(struct ngbe_softc *);
106 void ngbe_init_shared_code(struct ngbe_softc *);
107 void ngbe_init_thermal_sensor_thresh(struct ngbe_hw *);
108 void ngbe_init_uta_tables(struct ngbe_hw *);
109 void ngbe_fc_autoneg(struct ngbe_softc *);
110 int ngbe_fc_autoneg_copper(struct ngbe_softc *);
111 int ngbe_fc_enable(struct ngbe_softc *);
112 int ngbe_fmgr_cmd_op(struct ngbe_hw *, uint32_t, uint32_t);
113 uint32_t ngbe_flash_read_dword(struct ngbe_hw *, uint32_t);
114 uint8_t ngbe_calculate_checksum(uint8_t *, uint32_t);
115 int ngbe_check_flash_load(struct ngbe_softc *, uint32_t);
116 int ngbe_check_internal_phy_id(struct ngbe_softc *);
117 int ngbe_check_mac_link(struct ngbe_hw *, uint32_t *, int *,
118 int);
119 int ngbe_check_mng_access(struct ngbe_hw *);
120 int ngbe_check_reset_blocked(struct ngbe_softc *);
121 void ngbe_clear_hw_cntrs(struct ngbe_hw *);
122 void ngbe_clear_vfta(struct ngbe_hw *);
123 void ngbe_configure_ivars(struct ngbe_softc *);
124 void ngbe_configure_pb(struct ngbe_softc *);
125 void ngbe_disable_intr(struct ngbe_softc *);
126 int ngbe_disable_pcie_master(struct ngbe_softc *);
127 void ngbe_disable_queue(struct ngbe_softc *, uint32_t);
128 void ngbe_disable_rx(struct ngbe_hw *);
129 void ngbe_disable_sec_rx_path(struct ngbe_hw *);
130 int ngbe_eepromcheck_cap(struct ngbe_softc *, uint16_t,
131 uint32_t *);
132 void ngbe_enable_intr(struct ngbe_softc *);
133 void ngbe_enable_queue(struct ngbe_softc *, uint32_t);
134 void ngbe_enable_rx(struct ngbe_hw *);
135 void ngbe_enable_rx_dma(struct ngbe_hw *, uint32_t);
136 void ngbe_enable_sec_rx_path(struct ngbe_hw *);
137 int ngbe_encap(struct tx_ring *, struct mbuf *);
138 int ngbe_get_buf(struct rx_ring *, int);
139 void ngbe_get_bus_info(struct ngbe_softc *);
140 void ngbe_get_copper_link_capabilities(struct ngbe_hw *,
141 uint32_t *, int *);
142 int ngbe_get_eeprom_semaphore(struct ngbe_softc *);
143 void ngbe_get_hw_control(struct ngbe_hw *);
144 void ngbe_release_hw_control(struct ngbe_softc *);
145 void ngbe_get_mac_addr(struct ngbe_hw *, uint8_t *);
146 enum ngbe_media_type ngbe_get_media_type(struct ngbe_hw *);
147 void ngbe_gphy_dis_eee(struct ngbe_hw *);
148 void ngbe_gphy_efuse_calibration(struct ngbe_softc *);
149 void ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *);
150 void ngbe_handle_phy_event(struct ngbe_softc *);
151 int ngbe_host_interface_command(struct ngbe_softc *,
152 uint32_t *, uint32_t, uint32_t, int);
153 int ngbe_hpbthresh(struct ngbe_softc *);
154 int ngbe_lpbthresh(struct ngbe_softc *);
155 int ngbe_mng_present(struct ngbe_hw *);
156 int ngbe_mta_vector(struct ngbe_hw *, uint8_t *);
157 int ngbe_negotiate_fc(struct ngbe_softc *, uint32_t,
158 uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
159 int ngbe_non_sfp_link_config(struct ngbe_softc *);
160 void ngbe_pbthresh_setup(struct ngbe_softc *);
161 void ngbe_phy_check_event(struct ngbe_softc *);
162 int ngbe_phy_check_overtemp(struct ngbe_hw *);
163 void ngbe_phy_get_advertised_pause(struct ngbe_hw *,
164 uint8_t *);
165 void ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *,
166 uint8_t *);
167 int ngbe_phy_identify(struct ngbe_softc *);
168 int ngbe_phy_init(struct ngbe_softc *);
169 void ngbe_phy_led_ctrl(struct ngbe_softc *);
170 int ngbe_phy_led_oem_chk(struct ngbe_softc *, uint32_t *);
171 int ngbe_phy_read_reg(struct ngbe_hw *, uint32_t, uint32_t,
172 uint16_t *);
173 int ngbe_phy_write_reg(struct ngbe_hw *, uint32_t, uint32_t,
174 uint16_t);
175 int ngbe_phy_reset(struct ngbe_softc *);
176 int ngbe_phy_set_pause_advertisement(struct ngbe_hw *,
177 uint16_t);
178 int ngbe_phy_setup(struct ngbe_softc *);
179 int ngbe_phy_setup_link(struct ngbe_softc *, uint32_t, int);
180 uint16_t ngbe_read_pci_cfg_word(struct ngbe_softc *, uint32_t);
181 void ngbe_release_eeprom_semaphore(struct ngbe_hw *);
182 int ngbe_acquire_swfw_sync(struct ngbe_softc *, uint32_t);
183 void ngbe_release_swfw_sync(struct ngbe_softc *, uint32_t);
184 void ngbe_reset(struct ngbe_softc *);
185 int ngbe_reset_hw(struct ngbe_softc *);
186 void ngbe_reset_misc(struct ngbe_hw *);
187 int ngbe_set_fw_drv_ver(struct ngbe_softc *, uint8_t,
188 uint8_t, uint8_t, uint8_t);
189 void ngbe_set_ivar(struct ngbe_softc *, uint16_t, uint16_t,
190 int8_t);
191 void ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *);
192 void ngbe_set_mta(struct ngbe_hw *, uint8_t *);
193 void ngbe_set_pci_config_data(struct ngbe_hw *, uint16_t);
194 int ngbe_set_rar(struct ngbe_softc *, uint32_t, uint8_t *,
195 uint64_t, uint32_t);
196 void ngbe_set_rx_drop_en(struct ngbe_softc *);
197 void ngbe_set_rxpba(struct ngbe_hw *, int, uint32_t, int);
198 int ngbe_setup_copper_link(struct ngbe_softc *, uint32_t,
199 int);
200 int ngbe_setup_fc(struct ngbe_softc *);
201 void ngbe_setup_gpie(struct ngbe_hw *);
202 void ngbe_setup_isb(struct ngbe_softc *);
203 void ngbe_setup_psrtype(struct ngbe_hw *);
204 void ngbe_setup_vlan_hw_support(struct ngbe_softc *);
205 int ngbe_start_hw(struct ngbe_softc *);
206 int ngbe_stop_adapter(struct ngbe_softc *);
207 void ngbe_rx_checksum(uint32_t, struct mbuf *);
208 void ngbe_rxeof(struct rx_ring *);
209 void ngbe_rxrefill(void *);
210 int ngbe_tx_ctx_setup(struct tx_ring *, struct mbuf *,
211 uint32_t *, uint32_t *);
212 void ngbe_txeof(struct tx_ring *);
213 void ngbe_update_mc_addr_list(struct ngbe_hw *, uint8_t *,
214 uint32_t, ngbe_mc_addr_itr, int);
215 int ngbe_validate_mac_addr(uint8_t *);
216
217 struct cfdriver ngbe_cd = {
218 NULL, "ngbe", DV_IFNET
219 };
220
221 const struct cfattach ngbe_ca = {
222 sizeof(struct ngbe_softc), ngbe_match, ngbe_attach, ngbe_detach
223 };
224
225 int
ngbe_match(struct device * parent,void * match,void * aux)226 ngbe_match(struct device *parent, void *match, void *aux)
227 {
228 return pci_matchbyid((struct pci_attach_args *)aux, ngbe_devices,
229 nitems(ngbe_devices));
230 }
231
232 void
ngbe_attach(struct device * parent,struct device * self,void * aux)233 ngbe_attach(struct device *parent, struct device *self, void *aux)
234 {
235 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
236 struct ngbe_softc *sc = (struct ngbe_softc *)self;
237 struct ngbe_hw *hw = &sc->hw;
238 uint32_t eeprom_cksum_devcap, devcap, led_conf;
239 int error;
240
241 sc->osdep.os_sc = sc;
242 sc->osdep.os_pa = *pa;
243
244 /* Setup PCI resources. */
245 if (ngbe_allocate_pci_resources(sc))
246 goto fail1;
247
248 sc->num_tx_desc = NGBE_DEFAULT_TXD;
249 sc->num_rx_desc = NGBE_DEFAULT_RXD;
250
251 /* Allocate Tx/Rx queues. */
252 if (ngbe_allocate_queues(sc))
253 goto fail1;
254
255 /* Allocate multicast array memory. */
256 sc->mta = mallocarray(ETHER_ADDR_LEN, NGBE_SP_RAR_ENTRIES, M_DEVBUF,
257 M_NOWAIT);
258 if (sc->mta == NULL) {
259 printf(": can not allocate multicast setup array\n");
260 goto fail1;
261 }
262
263 /* Allocate interrupt status resources. */
264 if (ngbe_allocate_isb(sc))
265 goto fail2;
266
267 hw->mac.autoneg = 1;
268 hw->phy.autoneg_advertised = NGBE_LINK_SPEED_AUTONEG;
269 hw->phy.force_speed = NGBE_LINK_SPEED_UNKNOWN;
270
271 /* Initialize the shared code. */
272 ngbe_init_shared_code(sc);
273
274 sc->hw.mac.ops.set_lan_id(&sc->hw);
275
276 /* Check if flash load is done after hw power up. */
277 error = ngbe_check_flash_load(sc, NGBE_SPI_ILDR_STATUS_PERST);
278 if (error)
279 goto fail3;
280 error = ngbe_check_flash_load(sc, NGBE_SPI_ILDR_STATUS_PWRRST);
281 if (error)
282 goto fail3;
283
284 hw->phy.reset_if_overtemp = 1;
285 error = sc->hw.mac.ops.reset_hw(sc);
286 hw->phy.reset_if_overtemp = 0;
287 if (error) {
288 printf(": HW reset failed\n");
289 goto fail3;
290 }
291
292 eeprom_cksum_devcap = devcap = 0;
293 if (hw->bus.lan_id == 0) {
294 NGBE_WRITE_REG(hw, NGBE_CALSUM_CAP_STATUS, 0);
295 NGBE_WRITE_REG(hw, NGBE_EEPROM_VERSION_STORE_REG, 0);
296 } else
297 eeprom_cksum_devcap = NGBE_READ_REG(hw, NGBE_CALSUM_CAP_STATUS);
298
299 hw->eeprom.ops.init_params(hw);
300 hw->mac.ops.release_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB);
301 if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) {
302 /* Make sure the EEPROM is good */
303 if (hw->eeprom.ops.eeprom_chksum_cap_st(sc, NGBE_CALSUM_COMMAND,
304 &devcap)) {
305 printf(": eeprom checksum is not valid\n");
306 goto fail3;
307 }
308 }
309
310 led_conf = 0;
311 if (hw->eeprom.ops.phy_led_oem_chk(sc, &led_conf))
312 sc->led_conf = -1;
313 else
314 sc->led_conf = led_conf;
315
316 memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
317
318 error = ngbe_allocate_msix(sc);
319 if (error)
320 goto fail3;
321
322 ngbe_setup_interface(sc);
323
324 /* Reset the hardware with the new settings */
325 error = hw->mac.ops.start_hw(sc);
326 if (error) {
327 printf(": HW init failed\n");
328 goto fail3;
329 }
330
331 /* Pick up the PCI bus settings for reporting later */
332 hw->mac.ops.get_bus_info(sc);
333
334 hw->mac.ops.set_fw_drv_ver(sc, 0xff, 0xff, 0xff, 0xff);
335
336 printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
337 return;
338
339 fail3:
340 ngbe_free_isb(sc);
341 fail2:
342 ngbe_free_transmit_structures(sc);
343 ngbe_free_receive_structures(sc);
344 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES);
345 fail1:
346 ngbe_free_pci_resources(sc);
347 }
348
349 int
ngbe_detach(struct device * self,int flags)350 ngbe_detach(struct device *self, int flags)
351 {
352 struct ngbe_softc *sc = (struct ngbe_softc *)self;
353 struct ifnet *ifp = &sc->sc_ac.ac_if;
354
355 ngbe_stop(sc);
356 ngbe_release_hw_control(sc);
357
358 ether_ifdetach(ifp);
359 if_detach(ifp);
360
361 ngbe_free_pci_resources(sc);
362
363 ngbe_free_transmit_structures(sc);
364 ngbe_free_receive_structures(sc);
365 ngbe_free_isb(sc);
366 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES);
367
368 return 0;
369 }
370
371 static inline uint32_t
NGBE_READ_REG_MASK(struct ngbe_hw * hw,uint32_t reg,uint32_t mask)372 NGBE_READ_REG_MASK(struct ngbe_hw *hw, uint32_t reg, uint32_t mask)
373 {
374 uint32_t val;
375
376 val = NGBE_READ_REG(hw, reg);
377 if (val == NGBE_FAILED_READ_REG)
378 return val;
379 return val & mask;
380 }
381
382 static inline void
NGBE_WRITE_REG_MASK(struct ngbe_hw * hw,uint32_t reg,uint32_t mask,uint32_t field)383 NGBE_WRITE_REG_MASK(struct ngbe_hw *hw, uint32_t reg, uint32_t mask,
384 uint32_t field)
385 {
386 uint32_t val;
387
388 val = NGBE_READ_REG(hw, reg);
389 if (val == NGBE_FAILED_READ_REG)
390 return;
391 val = ((val & ~mask) | (field & mask));
392 NGBE_WRITE_REG(hw, reg, val);
393 }
394
395 static inline uint32_t
ngbe_misc_isb(struct ngbe_softc * sc,enum ngbe_isb_idx idx)396 ngbe_misc_isb(struct ngbe_softc *sc, enum ngbe_isb_idx idx)
397 {
398 return htole32(sc->isb_base[idx]);
399 }
400
401 void
ngbe_init(void * arg)402 ngbe_init(void *arg)
403 {
404 struct ngbe_softc *sc = (struct ngbe_softc *)arg;
405 struct ngbe_hw *hw = &sc->hw;
406 struct ifnet *ifp = &sc->sc_ac.ac_if;
407 int i, s;
408
409 s = splnet();
410
411 ngbe_stop(sc);
412
413 ngbe_setup_isb(sc);
414
415 /* Setup the receive address. */
416 hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, NGBE_PSR_MAC_SWC_AD_H_AV);
417
418 /* Get the latest mac address, user can use a LAA. */
419 bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
420
421 hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, 1);
422
423 ngbe_configure_pb(sc);
424
425 /* Program promiscuous mode and multicast filters. */
426 ngbe_iff(sc);
427
428 ngbe_setup_vlan_hw_support(sc);
429
430 /* Prepare transmit descriptors and buffers. */
431 if (ngbe_setup_transmit_structures(sc)) {
432 printf("%s: could not setup transmit structures\n",
433 DEVNAME(sc));
434 ngbe_stop(sc);
435 splx(s);
436 return;
437 }
438 if (ngbe_initialize_transmit_unit(sc)) {
439 ngbe_stop(sc);
440 splx(s);
441 return;
442 }
443
444 /* Prepare receive descriptors and buffers. */
445 if (ngbe_setup_receive_structures(sc)) {
446 printf("%s: could not setup receive structures\n",
447 DEVNAME(sc));
448 ngbe_stop(sc);
449 splx(s);
450 return;
451 }
452 if (ngbe_initialize_receive_unit(sc)) {
453 ngbe_stop(sc);
454 splx(s);
455 return;
456 }
457
458 ngbe_get_hw_control(hw);
459 ngbe_setup_gpie(hw);
460 ngbe_configure_ivars(sc);
461
462 if (ngbe_non_sfp_link_config(sc)) {
463 ngbe_stop(sc);
464 splx(s);
465 return;
466 }
467
468 /* Select GMII */
469 NGBE_WRITE_REG(hw, NGBE_MAC_TX_CFG,
470 (NGBE_READ_REG(hw, NGBE_MAC_TX_CFG) & ~NGBE_MAC_TX_CFG_SPEED_MASK) |
471 NGBE_MAC_TX_CFG_SPEED_1G);
472
473 /* Clear any pending interrupts, may auto mask */
474 NGBE_READ_REG(hw, NGBE_PX_IC);
475 NGBE_READ_REG(hw, NGBE_PX_MISC_IC);
476 ngbe_enable_intr(sc);
477
478 switch (hw->bus.lan_id) {
479 case 0:
480 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
481 NGBE_MIS_PRB_CTL_LAN0_UP, NGBE_MIS_PRB_CTL_LAN0_UP);
482 break;
483 case 1:
484 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
485 NGBE_MIS_PRB_CTL_LAN1_UP, NGBE_MIS_PRB_CTL_LAN1_UP);
486 break;
487 case 2:
488 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
489 NGBE_MIS_PRB_CTL_LAN2_UP, NGBE_MIS_PRB_CTL_LAN2_UP);
490 break;
491 case 3:
492 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
493 NGBE_MIS_PRB_CTL_LAN3_UP, NGBE_MIS_PRB_CTL_LAN3_UP);
494 break;
495 }
496
497 NGBE_WRITE_REG_MASK(hw, NGBE_CFG_PORT_CTL, NGBE_CFG_PORT_CTL_PFRSTD,
498 NGBE_CFG_PORT_CTL_PFRSTD);
499
500 /* Now inform the stack we're ready */
501 ifp->if_flags |= IFF_RUNNING;
502 for (i = 0; i < sc->sc_nqueues; i++)
503 ifq_clr_oactive(ifp->if_ifqs[i]);
504 splx(s);
505 }
506
507 int
ngbe_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)508 ngbe_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
509 {
510 struct ngbe_softc *sc = ifp->if_softc;
511 struct ifreq *ifr = (struct ifreq *)data;
512 int s, error = 0;
513
514 s = splnet();
515
516 switch (cmd) {
517 case SIOCSIFADDR:
518 ifp->if_flags |= IFF_UP;
519 if (!(ifp->if_flags & IFF_RUNNING))
520 ngbe_init(sc);
521 break;
522 case SIOCSIFFLAGS:
523 if (ifp->if_flags & IFF_UP) {
524 if (ifp->if_flags & IFF_RUNNING)
525 error = ENETRESET;
526 else
527 ngbe_init(sc);
528 } else {
529 if (ifp->if_flags & IFF_RUNNING)
530 ngbe_stop(sc);
531 }
532 break;
533 case SIOCSIFMEDIA:
534 case SIOCGIFMEDIA:
535 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
536 break;
537 case SIOCGIFRXR:
538 error = ngbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
539 break;
540 default:
541 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
542 }
543
544 if (error == ENETRESET) {
545 if (ifp->if_flags & IFF_RUNNING) {
546 ngbe_disable_intr(sc);
547 ngbe_iff(sc);
548 ngbe_enable_intr(sc);
549 }
550 error = 0;
551 }
552
553 splx(s);
554 return error;
555 }
556
557 int
ngbe_media_change(struct ifnet * ifp)558 ngbe_media_change(struct ifnet *ifp)
559 {
560 struct ngbe_softc *sc = ifp->if_softc;
561 struct ngbe_hw *hw = &sc->hw;
562 struct ifmedia *ifm = &sc->sc_media;
563 uint32_t advertised = 0;
564
565 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
566 return EINVAL;
567
568 switch (IFM_SUBTYPE(ifm->ifm_media)) {
569 case IFM_AUTO:
570 case IFM_1000_T:
571 advertised |= NGBE_LINK_SPEED_AUTONEG;
572 break;
573 case IFM_100_TX:
574 advertised |= NGBE_LINK_SPEED_100_FULL;
575 break;
576 case IFM_10_T:
577 advertised |= NGBE_LINK_SPEED_10_FULL;
578 break;
579 default:
580 return EINVAL;
581 }
582
583 hw->mac.autotry_restart = true;
584 hw->mac.ops.setup_link(sc, advertised, 1);
585
586 return 0;
587 }
588
589 void
ngbe_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)590 ngbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
591 {
592 struct ngbe_softc *sc = ifp->if_softc;
593
594 ifmr->ifm_status = IFM_AVALID;
595 ifmr->ifm_active = IFM_ETHER;
596
597 ngbe_update_link_status(sc);
598
599 if (!LINK_STATE_IS_UP(ifp->if_link_state))
600 return;
601
602 ifmr->ifm_status |= IFM_ACTIVE;
603
604 switch (sc->link_speed) {
605 case NGBE_LINK_SPEED_1GB_FULL:
606 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
607 break;
608 case NGBE_LINK_SPEED_100_FULL:
609 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
610 break;
611 case NGBE_LINK_SPEED_10_FULL:
612 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
613 break;
614 }
615
616 switch (sc->hw.fc.current_mode) {
617 case ngbe_fc_tx_pause:
618 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
619 break;
620 case ngbe_fc_rx_pause:
621 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
622 break;
623 case ngbe_fc_full:
624 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
625 IFM_ETH_TXPAUSE;
626 break;
627 default:
628 ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
629 IFM_ETH_TXPAUSE);
630 break;
631 }
632 }
633
634 int
ngbe_rxfill(struct rx_ring * rxr)635 ngbe_rxfill(struct rx_ring *rxr)
636 {
637 struct ngbe_softc *sc = rxr->sc;
638 int i, post = 0;
639 u_int slots;
640
641 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
642 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
643
644 i = rxr->last_desc_filled;
645 for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0;
646 slots--) {
647 if (++i == sc->num_rx_desc)
648 i = 0;
649
650 if (ngbe_get_buf(rxr, i) != 0)
651 break;
652
653 rxr->last_desc_filled = i;
654 post = 1;
655 }
656
657 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
658 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
659
660 if_rxr_put(&rxr->rx_ring, slots);
661
662 return post;
663 }
664
665 int
ngbe_rxrinfo(struct ngbe_softc * sc,struct if_rxrinfo * ifri)666 ngbe_rxrinfo(struct ngbe_softc *sc, struct if_rxrinfo *ifri)
667 {
668 struct if_rxring_info *ifr;
669 struct rx_ring *rxr;
670 int error, i, n = 0;
671
672 if ((ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF,
673 M_WAITOK | M_CANFAIL | M_ZERO)) == NULL)
674 return ENOMEM;
675
676 for (i = 0; i < sc->sc_nqueues; i++) {
677 rxr = &sc->rx_rings[i];
678 ifr[n].ifr_size = MCLBYTES;
679 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i);
680 ifr[n].ifr_info = rxr->rx_ring;
681 n++;
682 }
683
684 error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
685 free(ifr, M_DEVBUF, sc->sc_nqueues * sizeof(*ifr));
686
687 return error;
688 }
689
690 void
ngbe_start(struct ifqueue * ifq)691 ngbe_start(struct ifqueue *ifq)
692 {
693 struct ifnet *ifp = ifq->ifq_if;
694 struct ngbe_softc *sc = ifp->if_softc;
695 struct tx_ring *txr = ifq->ifq_softc;
696 struct mbuf *m;
697 unsigned int prod, free, used;
698 int post = 0;
699
700 if (!sc->link_up)
701 return;
702
703 prod = txr->next_avail_desc;
704 free = txr->next_to_clean;
705 if (free <= prod)
706 free += sc->num_tx_desc;
707 free -= prod;
708
709 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
710 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
711
712 for (;;) {
713 if (free <= NGBE_MAX_SCATTER + 2) {
714 ifq_set_oactive(ifq);
715 break;
716 }
717
718 m = ifq_dequeue(ifq);
719 if (m == NULL)
720 break;
721
722 used = ngbe_encap(txr, m);
723 if (used == 0) {
724 m_freem(m);
725 continue;
726 }
727
728 free -= used;
729
730 #if NBPFILTER > 0
731 if (ifp->if_bpf)
732 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
733 #endif
734
735 /* Set timeout in case hardware has problems transmitting */
736 txr->watchdog_timer = NGBE_TX_TIMEOUT;
737 ifp->if_timer = NGBE_TX_TIMEOUT;
738
739 post = 1;
740 }
741
742 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
743 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
744
745 if (post)
746 NGBE_WRITE_REG(&sc->hw, NGBE_PX_TR_WP(txr->me),
747 txr->next_avail_desc);
748 }
749
750 void
ngbe_stop(struct ngbe_softc * sc)751 ngbe_stop(struct ngbe_softc *sc)
752 {
753 struct ifnet *ifp = &sc->sc_ac.ac_if;
754 struct ngbe_hw *hw = &sc->hw;
755 uint32_t rxdctl;
756 int i, wait_loop = NGBE_MAX_RX_DESC_POLL;
757
758 /* Tell the stack that the interface is no longer active. */
759 ifp->if_flags &= ~IFF_RUNNING;
760 ifp->if_timer = 0;
761
762 ngbe_disable_pcie_master(sc);
763 /* Disable receives */
764 hw->mac.ops.disable_rx(hw);
765
766 for (i = 0; i < sc->sc_nqueues; i++) {
767 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
768 NGBE_PX_RR_CFG_RR_EN, 0);
769 do {
770 DELAY(10);
771 rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
772 } while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN));
773 if (!wait_loop) {
774 printf("%s: Rx queue %d not cleared within "
775 "the polling period\n", DEVNAME(sc), i);
776 return;
777 }
778 }
779
780 ngbe_disable_intr(sc);
781
782 switch (hw->bus.lan_id) {
783 case 0:
784 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
785 NGBE_MIS_PRB_CTL_LAN0_UP, 0);
786 break;
787 case 1:
788 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
789 NGBE_MIS_PRB_CTL_LAN1_UP, 0);
790 break;
791 case 2:
792 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
793 NGBE_MIS_PRB_CTL_LAN2_UP, 0);
794 break;
795 case 3:
796 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
797 NGBE_MIS_PRB_CTL_LAN3_UP, 0);
798 break;
799 }
800
801 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE, 0);
802 for (i = 0; i < sc->sc_nqueues; i++)
803 NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), NGBE_PX_TR_CFG_SWFLSH);
804 NGBE_WRITE_REG_MASK(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0);
805
806 ngbe_reset(sc);
807
808 hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, NGBE_PSR_MAC_SWC_AD_H_AV);
809
810 intr_barrier(sc->tag);
811 for (i = 0; i < sc->sc_nqueues; i++) {
812 struct ifqueue *ifq = ifp->if_ifqs[i];
813 ifq_barrier(ifq);
814 ifq_clr_oactive(ifq);
815
816 if (sc->queues[i].tag != NULL)
817 intr_barrier(sc->queues[i].tag);
818 timeout_del(&sc->rx_rings[i].rx_refill);
819 }
820
821 ngbe_free_transmit_structures(sc);
822 ngbe_free_receive_structures(sc);
823
824 ngbe_update_link_status(sc);
825 }
826
827 void
ngbe_update_link_status(struct ngbe_softc * sc)828 ngbe_update_link_status(struct ngbe_softc *sc)
829 {
830 struct ifnet *ifp = &sc->sc_ac.ac_if;
831 struct ngbe_hw *hw = &sc->hw;
832 uint32_t reg, speed = 0;
833 int link_state = LINK_STATE_DOWN;
834
835 hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up, 0);
836
837 ifp->if_baudrate = 0;
838 if (sc->link_up) {
839 link_state = LINK_STATE_FULL_DUPLEX;
840
841 switch (sc->link_speed) {
842 case NGBE_LINK_SPEED_UNKNOWN:
843 ifp->if_baudrate = 0;
844 break;
845 case NGBE_LINK_SPEED_1GB_FULL:
846 ifp->if_baudrate = IF_Gbps(1);
847 speed = 2;
848 break;
849 case NGBE_LINK_SPEED_100_FULL:
850 ifp->if_baudrate = IF_Mbps(100);
851 speed = 1;
852 break;
853 case NGBE_LINK_SPEED_10_FULL:
854 ifp->if_baudrate = IF_Mbps(10);
855 break;
856 }
857 NGBE_WRITE_REG_MASK(hw, NGBE_CFG_LAN_SPEED, 0x3, speed);
858
859 /* Update any flow control changes */
860 hw->mac.ops.fc_enable(sc);
861
862 ngbe_set_rx_drop_en(sc);
863
864 if (sc->link_speed & (NGBE_LINK_SPEED_1GB_FULL |
865 NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) {
866 NGBE_WRITE_REG(hw, NGBE_MAC_TX_CFG,
867 (NGBE_READ_REG(hw, NGBE_MAC_TX_CFG) &
868 ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE |
869 NGBE_MAC_TX_CFG_SPEED_1G);
870 }
871
872 reg = NGBE_READ_REG(hw, NGBE_MAC_RX_CFG);
873 NGBE_WRITE_REG(hw, NGBE_MAC_RX_CFG, reg);
874 NGBE_WRITE_REG(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR);
875 reg = NGBE_READ_REG(hw, NGBE_MAC_WDG_TIMEOUT);
876 NGBE_WRITE_REG(hw, NGBE_MAC_WDG_TIMEOUT, reg);
877 }
878
879 if (ifp->if_link_state != link_state) {
880 ifp->if_link_state = link_state;
881 if_link_state_change(ifp);
882 }
883 }
884
885 void
ngbe_watchdog(struct ifnet * ifp)886 ngbe_watchdog(struct ifnet *ifp)
887 {
888 struct ngbe_softc *sc = ifp->if_softc;
889 struct tx_ring *txr = sc->tx_rings;
890 int i, tx_hang = 0;
891
892 /*
893 * The timer is set to 5 every time ixgbe_start() queues a packet.
894 * Anytime all descriptors are clean the timer is set to 0.
895 */
896 for (i = 0; i < sc->sc_nqueues; i++, txr++) {
897 if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
898 continue;
899 else {
900 tx_hang = 1;
901 break;
902 }
903 }
904 if (!tx_hang)
905 return;
906
907 printf("%s: watchdog timeout\n", DEVNAME(sc));
908 ifp->if_oerrors++;
909
910 ifp->if_flags &= ~IFF_RUNNING;
911 ngbe_init(sc);
912 }
913
914 int
ngbe_allocate_pci_resources(struct ngbe_softc * sc)915 ngbe_allocate_pci_resources(struct ngbe_softc *sc)
916 {
917 struct ngbe_osdep *os = &sc->osdep;
918 struct pci_attach_args *pa = &os->os_pa;
919 pcireg_t memtype;
920
921 memtype = PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT;
922 if (pci_mapreg_map(pa, NGBE_PCIREG, memtype, 0, &os->os_memt,
923 &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
924 printf(": unable to map registers\n");
925 return ENXIO;
926 }
927 sc->hw.back = os;
928
929 if (ngbe_setup_msix(sc))
930 return EINVAL;
931
932 return 0;
933 }
934
935 void
ngbe_free_pci_resources(struct ngbe_softc * sc)936 ngbe_free_pci_resources(struct ngbe_softc *sc)
937 {
938 struct ngbe_osdep *os = &sc->osdep;
939 struct pci_attach_args *pa = &os->os_pa;
940
941 if (sc->tag)
942 pci_intr_disestablish(pa->pa_pc, sc->tag);
943 sc->tag = NULL;
944 if (os->os_membase)
945 bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
946 os->os_membase = 0;
947 }
948
949 int
ngbe_allocate_msix(struct ngbe_softc * sc)950 ngbe_allocate_msix(struct ngbe_softc *sc)
951 {
952 struct ngbe_osdep *os = &sc->osdep;
953 struct pci_attach_args *pa = &os->os_pa;
954 struct ngbe_queue *nq;
955 pci_intr_handle_t ih;
956 int i, error = 0;
957
958 for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++) {
959 if (pci_intr_map_msix(pa, i, &ih)) {
960 printf(": unable to map msi-x vector %d", i);
961 error = ENXIO;
962 goto fail;
963 }
964
965 nq->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
966 IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
967 ngbe_intr_queue, nq, nq->name);
968 if (nq->tag == NULL) {
969 printf(": unable to establish interrupt %d\n", i);
970 error = ENXIO;
971 goto fail;
972 }
973
974 nq->msix = i;
975 }
976
977 /* Now the link status/control last MSI-X vector */
978 if (pci_intr_map_msix(pa, i, &ih)) {
979 printf(": unable to map link vector\n");
980 error = ENXIO;
981 goto fail;
982 }
983
984 sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
985 ngbe_intr_link, sc, sc->sc_dev.dv_xname);
986 if (sc->tag == NULL) {
987 printf(": unable to establish link interrupt\n");
988 error = ENXIO;
989 goto fail;
990 }
991
992 sc->linkvec = i;
993 printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih), i,
994 (i > 1) ? "s" : "");
995
996 return 0;
997 fail:
998 for (nq = sc->queues; i > 0; i--, nq++) {
999 if (nq->tag == NULL)
1000 continue;
1001 pci_intr_disestablish(pa->pa_pc, nq->tag);
1002 nq->tag = NULL;
1003 }
1004
1005 return error;
1006 }
1007
1008 void
ngbe_setup_interface(struct ngbe_softc * sc)1009 ngbe_setup_interface(struct ngbe_softc *sc)
1010 {
1011 struct ifnet *ifp = &sc->sc_ac.ac_if;
1012 int i;
1013
1014 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1015 ifp->if_softc = sc;
1016 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1017 ifp->if_xflags = IFXF_MPSAFE;
1018 ifp->if_ioctl = ngbe_ioctl;
1019 ifp->if_qstart = ngbe_start;
1020 ifp->if_watchdog = ngbe_watchdog;
1021 ifp->if_hardmtu = NGBE_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN -
1022 ETHER_CRC_LEN;
1023 ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
1024
1025 ifp->if_capabilities = IFCAP_VLAN_MTU;
1026
1027 #if NVLAN > 0
1028 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1029 #endif
1030
1031 /* Initialize ifmedia structures. */
1032 ifmedia_init(&sc->sc_media, IFM_IMASK, ngbe_media_change,
1033 ngbe_media_status);
1034 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1035 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1036 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1037
1038 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1039 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1040 sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
1041
1042 if_attach(ifp);
1043 ether_ifattach(ifp);
1044
1045 if_attach_queues(ifp, sc->sc_nqueues);
1046 if_attach_iqueues(ifp, sc->sc_nqueues);
1047 for (i = 0; i < sc->sc_nqueues; i++) {
1048 struct ifqueue *ifq = ifp->if_ifqs[i];
1049 struct ifiqueue *ifiq = ifp->if_iqs[i];
1050 struct tx_ring *txr = &sc->tx_rings[i];
1051 struct rx_ring *rxr = &sc->rx_rings[i];
1052
1053 ifq->ifq_softc = txr;
1054 txr->ifq = ifq;
1055
1056 ifiq->ifiq_softc = rxr;
1057 rxr->ifiq = ifiq;
1058 }
1059 }
1060
1061 int
ngbe_setup_msix(struct ngbe_softc * sc)1062 ngbe_setup_msix(struct ngbe_softc *sc)
1063 {
1064 struct ngbe_osdep *os = &sc->osdep;
1065 struct pci_attach_args *pa = &os->os_pa;
1066 int nmsix;
1067
1068 nmsix = pci_intr_msix_count(pa);
1069 if (nmsix <= 1) {
1070 printf(": not enough msi-x vectors\n");
1071 return EINVAL;
1072 }
1073
1074 /* Give one vector to events. */
1075 nmsix--;
1076
1077 sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, NGBE_MAX_VECTORS,
1078 INTRMAP_POWEROF2);
1079 sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
1080
1081 return 0;
1082 }
1083
1084 int
ngbe_dma_malloc(struct ngbe_softc * sc,bus_size_t size,struct ngbe_dma_alloc * dma)1085 ngbe_dma_malloc(struct ngbe_softc *sc, bus_size_t size,
1086 struct ngbe_dma_alloc *dma)
1087 {
1088 struct ngbe_osdep *os = &sc->osdep;
1089
1090 dma->dma_tag = os->os_pa.pa_dmat;
1091
1092 if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1093 &dma->dma_map))
1094 return 1;
1095 if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1096 1, &dma->dma_nseg, BUS_DMA_NOWAIT))
1097 goto destroy;
1098 if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1099 &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT))
1100 goto free;
1101 if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
1102 NULL, BUS_DMA_NOWAIT))
1103 goto unmap;
1104
1105 dma->dma_size = size;
1106
1107 return 0;
1108 unmap:
1109 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1110 free:
1111 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1112 destroy:
1113 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1114 dma->dma_map = NULL;
1115 dma->dma_tag = NULL;
1116 return 1;
1117 }
1118
1119 void
ngbe_dma_free(struct ngbe_softc * sc,struct ngbe_dma_alloc * dma)1120 ngbe_dma_free(struct ngbe_softc *sc, struct ngbe_dma_alloc *dma)
1121 {
1122 if (dma->dma_tag == NULL)
1123 return;
1124
1125 if (dma->dma_map != NULL) {
1126 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1127 dma->dma_map->dm_mapsize,
1128 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1129 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1130 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1131 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1132 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1133 dma->dma_map = NULL;
1134 }
1135 }
1136
1137 int
ngbe_allocate_isb(struct ngbe_softc * sc)1138 ngbe_allocate_isb(struct ngbe_softc *sc)
1139 {
1140 int isize;
1141
1142 isize = sizeof(uint32_t) * NGBE_ISB_MAX;
1143 if (ngbe_dma_malloc(sc, isize, &sc->isbdma)) {
1144 printf("%s: unable to allocate interrupt status resources\n",
1145 DEVNAME(sc));
1146 return ENOMEM;
1147 }
1148 sc->isb_base = (uint32_t *)sc->isbdma.dma_vaddr;
1149 bzero((void *)sc->isb_base, isize);
1150
1151 return 0;
1152 }
1153
1154 void
ngbe_free_isb(struct ngbe_softc * sc)1155 ngbe_free_isb(struct ngbe_softc *sc)
1156 {
1157 ngbe_dma_free(sc, &sc->isbdma);
1158 }
1159
1160 int
ngbe_allocate_queues(struct ngbe_softc * sc)1161 ngbe_allocate_queues(struct ngbe_softc *sc)
1162 {
1163 struct ngbe_queue *nq;
1164 struct tx_ring *txr;
1165 struct rx_ring *rxr;
1166 int i, rsize, rxconf, tsize, txconf;
1167
1168 /* Allocate the top level queue structs. */
1169 sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct ngbe_queue),
1170 M_DEVBUF, M_NOWAIT | M_ZERO);
1171 if (sc->queues == NULL) {
1172 printf("%s: unable to allocate queue\n", DEVNAME(sc));
1173 goto fail;
1174 }
1175
1176 /* Allocate the Tx ring. */
1177 sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring),
1178 M_DEVBUF, M_NOWAIT | M_ZERO);
1179 if (sc->tx_rings == NULL) {
1180 printf("%s: unable to allocate Tx ring\n", DEVNAME(sc));
1181 goto fail;
1182 }
1183
1184 /* Allocate the Rx ring. */
1185 sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring),
1186 M_DEVBUF, M_NOWAIT | M_ZERO);
1187 if (sc->rx_rings == NULL) {
1188 printf("%s: unable to allocate Rx ring\n", DEVNAME(sc));
1189 goto rx_fail;
1190 }
1191
1192 txconf = rxconf = 0;
1193
1194 /* Set up the Tx queues. */
1195 tsize = roundup2(sc->num_tx_desc * sizeof(union ngbe_tx_desc),
1196 PAGE_SIZE);
1197 for (i = 0; i < sc->sc_nqueues; i++, txconf++) {
1198 txr = &sc->tx_rings[i];
1199 txr->sc = sc;
1200 txr->me = i;
1201
1202 if (ngbe_dma_malloc(sc, tsize, &txr->txdma)) {
1203 printf("%s: unable to allocate Tx descriptor\n",
1204 DEVNAME(sc));
1205 goto err_tx_desc;
1206 }
1207 txr->tx_base = (union ngbe_tx_desc *)txr->txdma.dma_vaddr;
1208 bzero((void *)txr->tx_base, tsize);
1209 }
1210
1211 /* Set up the Rx queues. */
1212 rsize = roundup2(sc->num_rx_desc * sizeof(union ngbe_rx_desc),
1213 PAGE_SIZE);
1214 for (i = 0; i < sc->sc_nqueues; i++, rxconf++) {
1215 rxr = &sc->rx_rings[i];
1216 rxr->sc = sc;
1217 rxr->me = i;
1218 timeout_set(&rxr->rx_refill, ngbe_rxrefill, rxr);
1219
1220 if (ngbe_dma_malloc(sc, rsize, &rxr->rxdma)) {
1221 printf("%s: unable to allocate Rx descriptor\n",
1222 DEVNAME(sc));
1223 goto err_rx_desc;
1224 }
1225 rxr->rx_base = (union ngbe_rx_desc *)rxr->rxdma.dma_vaddr;
1226 bzero((void *)rxr->rx_base, rsize);
1227 }
1228
1229 /* Set up the queue holding structs. */
1230 for (i = 0; i < sc->sc_nqueues; i++) {
1231 nq = &sc->queues[i];
1232 nq->sc = sc;
1233 nq->txr = &sc->tx_rings[i];
1234 nq->rxr = &sc->rx_rings[i];
1235 snprintf(nq->name, sizeof(nq->name), "%s:%d", DEVNAME(sc), i);
1236 }
1237
1238 return 0;
1239
1240 err_rx_desc:
1241 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1242 ngbe_dma_free(sc, &rxr->rxdma);
1243 err_tx_desc:
1244 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
1245 ngbe_dma_free(sc, &txr->txdma);
1246 free(sc->rx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct rx_ring));
1247 sc->rx_rings = NULL;
1248 rx_fail:
1249 free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring));
1250 sc->tx_rings = NULL;
1251 fail:
1252 return ENOMEM;
1253 }
1254
1255 void
ngbe_free_receive_structures(struct ngbe_softc * sc)1256 ngbe_free_receive_structures(struct ngbe_softc *sc)
1257 {
1258 struct rx_ring *rxr;
1259 int i;
1260
1261 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
1262 if_rxr_init(&rxr->rx_ring, 0, 0);
1263
1264 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
1265 ngbe_free_receive_buffers(rxr);
1266 }
1267
1268 void
ngbe_free_receive_buffers(struct rx_ring * rxr)1269 ngbe_free_receive_buffers(struct rx_ring *rxr)
1270 {
1271 struct ngbe_softc *sc;
1272 struct ngbe_rx_buf *rxbuf;
1273 int i;
1274
1275 sc = rxr->sc;
1276 if (rxr->rx_buffers != NULL) {
1277 for (i = 0; i < sc->num_rx_desc; i++) {
1278 rxbuf = &rxr->rx_buffers[i];
1279 if (rxbuf->buf != NULL) {
1280 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
1281 0, rxbuf->map->dm_mapsize,
1282 BUS_DMASYNC_POSTREAD);
1283 bus_dmamap_unload(rxr->rxdma.dma_tag,
1284 rxbuf->map);
1285 m_freem(rxbuf->buf);
1286 rxbuf->buf = NULL;
1287 }
1288 bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
1289 rxbuf->map = NULL;
1290 }
1291 free(rxr->rx_buffers, M_DEVBUF,
1292 sc->num_rx_desc * sizeof(struct ngbe_rx_buf));
1293 rxr->rx_buffers = NULL;
1294 }
1295 }
1296
1297 void
ngbe_free_transmit_structures(struct ngbe_softc * sc)1298 ngbe_free_transmit_structures(struct ngbe_softc *sc)
1299 {
1300 struct tx_ring *txr = sc->tx_rings;
1301 int i;
1302
1303 for (i = 0; i < sc->sc_nqueues; i++, txr++)
1304 ngbe_free_transmit_buffers(txr);
1305 }
1306
1307 void
ngbe_free_transmit_buffers(struct tx_ring * txr)1308 ngbe_free_transmit_buffers(struct tx_ring *txr)
1309 {
1310 struct ngbe_softc *sc = txr->sc;
1311 struct ngbe_tx_buf *tx_buffer;
1312 int i;
1313
1314 if (txr->tx_buffers == NULL)
1315 return;
1316
1317 tx_buffer = txr->tx_buffers;
1318 for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1319 if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
1320 bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
1321 0, tx_buffer->map->dm_mapsize,
1322 BUS_DMASYNC_POSTWRITE);
1323 bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
1324 }
1325 if (tx_buffer->m_head != NULL) {
1326 m_freem(tx_buffer->m_head);
1327 tx_buffer->m_head = NULL;
1328 }
1329 if (tx_buffer->map != NULL) {
1330 bus_dmamap_destroy(txr->txdma.dma_tag, tx_buffer->map);
1331 tx_buffer->map = NULL;
1332 }
1333 }
1334
1335 if (txr->tx_buffers != NULL)
1336 free(txr->tx_buffers, M_DEVBUF,
1337 sc->num_tx_desc * sizeof(struct ngbe_tx_buf));
1338 txr->tx_buffers = NULL;
1339 txr->txtag = NULL;
1340 }
1341
1342 int
ngbe_allocate_receive_buffers(struct rx_ring * rxr)1343 ngbe_allocate_receive_buffers(struct rx_ring *rxr)
1344 {
1345 struct ngbe_softc *sc = rxr->sc;
1346 struct ngbe_rx_buf *rxbuf;
1347 int i, error;
1348
1349 rxr->rx_buffers = mallocarray(sc->num_rx_desc,
1350 sizeof(struct ngbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
1351 if (rxr->rx_buffers == NULL) {
1352 printf("%s: unable to allocate rx_buffer memory\n",
1353 DEVNAME(sc));
1354 error = ENOMEM;
1355 goto fail;
1356 }
1357
1358 rxbuf = rxr->rx_buffers;
1359 for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
1360 error = bus_dmamap_create(rxr->rxdma.dma_tag,
1361 NGBE_MAX_JUMBO_FRAME_SIZE, 1, NGBE_MAX_JUMBO_FRAME_SIZE, 0,
1362 BUS_DMA_NOWAIT, &rxbuf->map);
1363 if (error) {
1364 printf("%s: unable to create RX DMA map\n",
1365 DEVNAME(sc));
1366 goto fail;
1367 }
1368 }
1369 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
1370 rxr->rxdma.dma_map->dm_mapsize,
1371 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1372
1373 return 0;
1374 fail:
1375 return error;
1376 }
1377
1378 int
ngbe_allocate_transmit_buffers(struct tx_ring * txr)1379 ngbe_allocate_transmit_buffers(struct tx_ring *txr)
1380 {
1381 struct ngbe_softc *sc = txr->sc;
1382 struct ngbe_tx_buf *txbuf;
1383 int error, i;
1384
1385 txr->tx_buffers = mallocarray(sc->num_tx_desc,
1386 sizeof(struct ngbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
1387 if (txr->tx_buffers == NULL) {
1388 printf("%s: unable to allocate tx_buffer memory\n",
1389 DEVNAME(sc));
1390 error = ENOMEM;
1391 goto fail;
1392 }
1393 txr->txtag = txr->txdma.dma_tag;
1394
1395 /* Create the descriptor buffer dma maps. */
1396 for (i = 0; i < sc->num_tx_desc; i++) {
1397 txbuf = &txr->tx_buffers[i];
1398 error = bus_dmamap_create(txr->txdma.dma_tag, NGBE_TSO_SIZE,
1399 NGBE_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT,
1400 &txbuf->map);
1401 if (error != 0) {
1402 printf("%s: unable to create TX DMA map\n",
1403 DEVNAME(sc));
1404 goto fail;
1405 }
1406 }
1407
1408 return 0;
1409 fail:
1410 return error;
1411 }
1412
1413 int
ngbe_setup_receive_ring(struct rx_ring * rxr)1414 ngbe_setup_receive_ring(struct rx_ring *rxr)
1415 {
1416 struct ngbe_softc *sc = rxr->sc;
1417 struct ifnet *ifp = &sc->sc_ac.ac_if;
1418 int rsize;
1419
1420 rsize = roundup2(sc->num_rx_desc * sizeof(union ngbe_rx_desc),
1421 PAGE_SIZE);
1422
1423 /* Clear the ring contents. */
1424 bzero((void *)rxr->rx_base, rsize);
1425
1426 if (ngbe_allocate_receive_buffers(rxr))
1427 return ENOMEM;
1428
1429 /* Setup our descriptor indices. */
1430 rxr->next_to_check = 0;
1431 rxr->last_desc_filled = sc->num_rx_desc - 1;
1432
1433 if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
1434 sc->num_rx_desc - 1);
1435
1436 ngbe_rxfill(rxr);
1437 if (if_rxr_inuse(&rxr->rx_ring) == 0) {
1438 printf("%s: unable to fill any rx descriptors\n", DEVNAME(sc));
1439 return ENOBUFS;
1440 }
1441
1442 return 0;
1443 }
1444
1445 int
ngbe_setup_transmit_ring(struct tx_ring * txr)1446 ngbe_setup_transmit_ring(struct tx_ring *txr)
1447 {
1448 struct ngbe_softc *sc = txr->sc;
1449
1450 /* Now allocate transmit buffers for the ring. */
1451 if (ngbe_allocate_transmit_buffers(txr))
1452 return ENOMEM;
1453
1454 /* Clear the old ring contents */
1455 bzero((void *)txr->tx_base,
1456 (sizeof(union ngbe_tx_desc)) * sc->num_tx_desc);
1457
1458 /* Reset indices. */
1459 txr->next_avail_desc = 0;
1460 txr->next_to_clean = 0;
1461
1462 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1463 txr->txdma.dma_map->dm_mapsize,
1464 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1465
1466 return 0;
1467 }
1468
1469 int
ngbe_setup_receive_structures(struct ngbe_softc * sc)1470 ngbe_setup_receive_structures(struct ngbe_softc *sc)
1471 {
1472 struct rx_ring *rxr = sc->rx_rings;
1473 int i;
1474
1475 for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
1476 if (ngbe_setup_receive_ring(rxr))
1477 goto fail;
1478 }
1479
1480 return 0;
1481 fail:
1482 ngbe_free_receive_structures(sc);
1483 return ENOBUFS;
1484 }
1485
1486 int
ngbe_setup_transmit_structures(struct ngbe_softc * sc)1487 ngbe_setup_transmit_structures(struct ngbe_softc *sc)
1488 {
1489 struct tx_ring *txr = sc->tx_rings;
1490 int i;
1491
1492 for (i = 0; i < sc->sc_nqueues; i++, txr++) {
1493 if (ngbe_setup_transmit_ring(txr))
1494 goto fail;
1495 }
1496
1497 return 0;
1498 fail:
1499 ngbe_free_transmit_structures(sc);
1500 return ENOBUFS;
1501 }
1502
1503 uint8_t *
ngbe_addr_list_itr(struct ngbe_hw * hw,uint8_t ** mc_addr_ptr,uint32_t * vmdq)1504 ngbe_addr_list_itr(struct ngbe_hw *hw, uint8_t **mc_addr_ptr, uint32_t *vmdq)
1505 {
1506 uint8_t *addr = *mc_addr_ptr;
1507 uint8_t *newptr;
1508 *vmdq = 0;
1509
1510 newptr = addr + ETHER_ADDR_LEN;
1511 *mc_addr_ptr = newptr;
1512 return addr;
1513 }
1514
1515 void
ngbe_iff(struct ngbe_softc * sc)1516 ngbe_iff(struct ngbe_softc *sc)
1517 {
1518 struct ngbe_hw *hw = &sc->hw;
1519 struct ifnet *ifp = &sc->sc_ac.ac_if;
1520 struct arpcom *ac = &sc->sc_ac;
1521 struct ether_multi *enm;
1522 struct ether_multistep step;
1523 uint32_t fctrl, vlanctrl;
1524 uint8_t *mta, *update_ptr;
1525 int mcnt = 0;
1526
1527 mta = sc->mta;
1528 bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES);
1529
1530 fctrl = NGBE_READ_REG_MASK(hw, NGBE_PSR_CTL,
1531 ~(NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE));
1532 vlanctrl = NGBE_READ_REG_MASK(hw, NGBE_PSR_VLAN_CTL,
1533 ~(NGBE_PSR_VLAN_CTL_VFE | NGBE_PSR_VLAN_CTL_CFIEN));
1534 ifp->if_flags &= ~IFF_ALLMULTI;
1535
1536 /* Set all bits that we expect to always be set */
1537 fctrl |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_MFE;
1538 vlanctrl |= NGBE_PSR_VLAN_CTL_VFE;
1539
1540 hw->addr_ctrl.user_set_promisc = 0;
1541 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1542 ac->ac_multicnt > NGBE_SP_RAR_ENTRIES) {
1543 ifp->if_flags |= IFF_ALLMULTI;
1544 fctrl |= NGBE_PSR_CTL_MPE;
1545 if (ifp->if_flags & IFF_PROMISC) {
1546 fctrl |= NGBE_PSR_CTL_UPE;
1547 vlanctrl &= ~NGBE_PSR_VLAN_CTL_VFE;
1548 }
1549 } else {
1550 ETHER_FIRST_MULTI(step, ac, enm);
1551 while (enm != NULL) {
1552 bcopy(enm->enm_addrlo, &mta[mcnt * ETHER_ADDR_LEN],
1553 ETHER_ADDR_LEN);
1554 mcnt++;
1555
1556 ETHER_NEXT_MULTI(step, enm);
1557 }
1558
1559 update_ptr = mta;
1560 hw->mac.ops.update_mc_addr_list(hw, update_ptr, mcnt,
1561 ngbe_addr_list_itr, 1);
1562 }
1563
1564 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_CTL, vlanctrl);
1565 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, fctrl);
1566 }
1567
1568 int
ngbe_initialize_receive_unit(struct ngbe_softc * sc)1569 ngbe_initialize_receive_unit(struct ngbe_softc *sc)
1570 {
1571 struct ngbe_hw *hw = &sc->hw;
1572 struct rx_ring *rxr = sc->rx_rings;
1573 uint32_t bufsz, mhadd, rxctrl, rxdctl, srrctl;
1574 int i, wait_loop = NGBE_MAX_RX_DESC_POLL;
1575 int error = 0;
1576
1577 /* Disable receives while setting up the descriptors */
1578 hw->mac.ops.disable_rx(hw);
1579
1580 ngbe_setup_psrtype(hw);
1581
1582 /* Enable hw crc stripping */
1583 NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, NGBE_RSEC_CTL_CRC_STRIP,
1584 NGBE_RSEC_CTL_CRC_STRIP);
1585
1586 if (sc->sc_nqueues > 1) {
1587 NGBE_WRITE_REG_MASK(hw, NGBE_PSR_CTL, NGBE_PSR_CTL_PCSD,
1588 NGBE_PSR_CTL_PCSD);
1589 ngbe_initialize_rss_mapping(sc);
1590 }
1591
1592 mhadd = NGBE_READ_REG(hw, NGBE_PSR_MAX_SZ);
1593 if (mhadd != NGBE_MAX_JUMBO_FRAME_SIZE)
1594 NGBE_WRITE_REG(hw, NGBE_PSR_MAX_SZ, NGBE_MAX_JUMBO_FRAME_SIZE);
1595
1596 bufsz = MCLBYTES >> NGBE_PX_RR_CFG_BSIZEPKT_SHIFT;
1597
1598 for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
1599 uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
1600
1601 /* Disable queue to avoid issues while updating state */
1602 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
1603 NGBE_PX_RR_CFG_RR_EN, 0);
1604
1605 /* Hardware may take up to 100us to actually disable Rx queue */
1606 do {
1607 DELAY(10);
1608 rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
1609 } while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN));
1610 if (!wait_loop) {
1611 printf("%s: Rx queue %d not cleared within "
1612 "the polling period\n", DEVNAME(sc), i);
1613 error = ETIMEDOUT;
1614 goto out;
1615 }
1616
1617 NGBE_WRITE_REG(hw, NGBE_PX_RR_BAL(i),
1618 (rdba & 0x00000000ffffffffULL));
1619 NGBE_WRITE_REG(hw, NGBE_PX_RR_BAH(i), (rdba >> 32));
1620
1621 rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
1622 rxdctl |=
1623 (sc->num_rx_desc / 128) << NGBE_PX_RR_CFG_RR_SIZE_SHIFT;
1624 rxdctl |= 0x1 << NGBE_PX_RR_CFG_RR_THER_SHIFT;
1625 NGBE_WRITE_REG(hw, NGBE_PX_RR_CFG(i), rxdctl);
1626
1627 /* Reset head and tail pointers */
1628 NGBE_WRITE_REG(hw, NGBE_PX_RR_RP(i), 0);
1629 NGBE_WRITE_REG(hw, NGBE_PX_RR_WP(i), 0);
1630
1631 /* Set up the SRRCTL register */
1632 srrctl = NGBE_READ_REG_MASK(hw, NGBE_PX_RR_CFG(i),
1633 ~(NGBE_PX_RR_CFG_RR_HDR_SZ | NGBE_PX_RR_CFG_RR_BUF_SZ |
1634 NGBE_PX_RR_CFG_SPLIT_MODE));
1635 srrctl |= bufsz;
1636 NGBE_WRITE_REG(hw, NGBE_PX_RR_CFG(i), srrctl);
1637
1638 /* Enable receive descriptor ring */
1639 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
1640 NGBE_PX_RR_CFG_RR_EN, NGBE_PX_RR_CFG_RR_EN);
1641
1642 do {
1643 msec_delay(1);
1644 rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
1645 } while (--wait_loop && !(rxdctl & NGBE_PX_RR_CFG_RR_EN));
1646 if (!wait_loop) {
1647 printf("%s: Rx queue %d not set within "
1648 "the polling period\n", DEVNAME(sc), i);
1649 error = ETIMEDOUT;
1650 goto out;
1651 }
1652 NGBE_WRITE_REG(hw, NGBE_PX_RR_WP(i), rxr->last_desc_filled);
1653 }
1654
1655 /* Enable all receives */
1656 rxctrl = NGBE_READ_REG(hw, NGBE_RDB_PB_CTL);
1657 rxctrl |= NGBE_RDB_PB_CTL_PBEN;
1658 hw->mac.ops.enable_rx_dma(hw, rxctrl);
1659 out:
1660 return error;
1661 }
1662
1663 void
ngbe_initialize_rss_mapping(struct ngbe_softc * sc)1664 ngbe_initialize_rss_mapping(struct ngbe_softc *sc)
1665 {
1666 struct ngbe_hw *hw = &sc->hw;
1667 uint32_t reta = 0, rss_field, rss_key[10];
1668 int i, j, queue_id;
1669
1670 /* Set up the redirection table */
1671 for (i = 0, j = 0; i < 128; i++, j++) {
1672 if (j == sc->sc_nqueues)
1673 j = 0;
1674 queue_id = j;
1675 /*
1676 * The low 8 bits are for hash value (n+0);
1677 * The next 8 bits are for hash value (n+1), etc.
1678 */
1679 reta = reta >> 8;
1680 reta = reta | (((uint32_t)queue_id) << 24);
1681 if ((i & 3) == 3) {
1682 NGBE_WRITE_REG(hw, NGBE_RDB_RSSTBL(i >> 2), reta);
1683 reta = 0;
1684 }
1685 }
1686
1687 /* Set up random bits */
1688 stoeplitz_to_key(&rss_key, sizeof(rss_key));
1689
1690 /* Fill out hash function seeds */
1691 for (i = 0; i < 10; i++)
1692 NGBE_WRITE_REG(hw, NGBE_RDB_RSSRK(i), rss_key[i]);
1693
1694 /* Perform hash on these packet types */
1695 rss_field = NGBE_RDB_RA_CTL_RSS_EN | NGBE_RDB_RA_CTL_RSS_IPV4 |
1696 NGBE_RDB_RA_CTL_RSS_IPV4_TCP | NGBE_RDB_RA_CTL_RSS_IPV6 |
1697 NGBE_RDB_RA_CTL_RSS_IPV6_TCP;
1698
1699 NGBE_WRITE_REG(hw, NGBE_RDB_RA_CTL, rss_field);
1700 }
1701
1702 int
ngbe_initialize_transmit_unit(struct ngbe_softc * sc)1703 ngbe_initialize_transmit_unit(struct ngbe_softc *sc)
1704 {
1705 struct ngbe_hw *hw = &sc->hw;
1706 struct ifnet *ifp = &sc->sc_ac.ac_if;
1707 struct tx_ring *txr;
1708 uint64_t tdba;
1709 uint32_t txdctl;
1710 int i, wait_loop = NGBE_MAX_RX_DESC_POLL;
1711 int error = 0;
1712
1713 /* TDM_CTL.TE must be before Tx queues are enabled */
1714 NGBE_WRITE_REG_MASK(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE,
1715 NGBE_TDM_CTL_TE);
1716
1717 /* Setup the base and length of the Tx descriptor ring. */
1718 for (i = 0; i < sc->sc_nqueues; i++) {
1719 txr = &sc->tx_rings[i];
1720 tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
1721
1722 /* Disable queue to avoid issues while updating state */
1723 NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), NGBE_PX_TR_CFG_SWFLSH);
1724 NGBE_WRITE_FLUSH(hw);
1725
1726 NGBE_WRITE_REG(hw, NGBE_PX_TR_BAL(i),
1727 (tdba & 0x00000000ffffffffULL));
1728 NGBE_WRITE_REG(hw, NGBE_PX_TR_BAH(i), (tdba >> 32));
1729
1730 /* Reset head and tail pointers */
1731 NGBE_WRITE_REG(hw, NGBE_PX_TR_RP(i), 0);
1732 NGBE_WRITE_REG(hw, NGBE_PX_TR_WP(i), 0);
1733
1734 txr->watchdog_timer = 0;
1735
1736 txdctl = NGBE_PX_TR_CFG_ENABLE;
1737 txdctl |= 4 << NGBE_PX_TR_CFG_TR_SIZE_SHIFT;
1738 txdctl |= 0x20 << NGBE_PX_TR_CFG_WTHRESH_SHIFT;
1739
1740 /* Enable queue */
1741 NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), txdctl);
1742
1743 /* Poll to verify queue is enabled */
1744 do {
1745 msec_delay(1);
1746 txdctl = NGBE_READ_REG(hw, NGBE_PX_TR_CFG(i));
1747 } while (--wait_loop && !(txdctl & NGBE_PX_TR_CFG_ENABLE));
1748 if (!wait_loop) {
1749 printf("%s: Tx queue %d not set within "
1750 "the polling period\n", DEVNAME(sc), i);
1751 error = ETIMEDOUT;
1752 goto out;
1753 }
1754 }
1755
1756 ifp->if_timer = 0;
1757
1758 NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_BUF_AE, 0x3ff, 0x10);
1759 NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_CTL, 0x2, 0);
1760 NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_CTL, 0x1, 1);
1761
1762 /* Enable mac transmitter */
1763 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE,
1764 NGBE_MAC_TX_CFG_TE);
1765 out:
1766 return error;
1767 }
1768
1769 int
ngbe_intr_link(void * arg)1770 ngbe_intr_link(void *arg)
1771 {
1772 struct ngbe_softc *sc = (struct ngbe_softc *)arg;
1773 uint32_t eicr;
1774
1775 eicr = ngbe_misc_isb(sc, NGBE_ISB_MISC);
1776 if (eicr & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) {
1777 KERNEL_LOCK();
1778 ngbe_handle_phy_event(sc);
1779 ngbe_update_link_status(sc);
1780 KERNEL_UNLOCK();
1781 }
1782 ngbe_enable_queue(sc, sc->linkvec);
1783 return 1;
1784 }
1785
1786 int
ngbe_intr_queue(void * arg)1787 ngbe_intr_queue(void *arg)
1788 {
1789 struct ngbe_queue *nq = arg;
1790 struct ngbe_softc *sc = nq->sc;
1791 struct ifnet *ifp = &sc->sc_ac.ac_if;
1792 struct rx_ring *rxr = nq->rxr;
1793 struct tx_ring *txr = nq->txr;
1794
1795 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1796 ngbe_rxeof(rxr);
1797 ngbe_txeof(txr);
1798 ngbe_rxrefill(rxr);
1799 }
1800
1801 ngbe_enable_queue(sc, nq->msix);
1802
1803 return 1;
1804 }
1805
1806 void
ngbe_init_eeprom_params(struct ngbe_hw * hw)1807 ngbe_init_eeprom_params(struct ngbe_hw *hw)
1808 {
1809 struct ngbe_eeprom_info *eeprom = &hw->eeprom;
1810
1811 if (eeprom->type == ngbe_eeprom_uninitialized) {
1812 eeprom->type = ngbe_eeprom_none;
1813
1814 if (!(NGBE_READ_REG(hw, NGBE_SPI_STATUS) &
1815 NGBE_SPI_STATUS_FLASH_BYPASS))
1816 eeprom->type = ngbe_flash;
1817 }
1818
1819 eeprom->sw_region_offset = 0x80;
1820 }
1821
1822 int
ngbe_init_hw(struct ngbe_softc * sc)1823 ngbe_init_hw(struct ngbe_softc *sc)
1824 {
1825 struct ngbe_hw *hw = &sc->hw;
1826 int status;
1827
1828 /* Reset the hardware */
1829 status = hw->mac.ops.reset_hw(sc);
1830
1831 if (!status)
1832 status = hw->mac.ops.start_hw(sc);
1833
1834 return status;
1835 }
1836
1837 void
ngbe_init_ops(struct ngbe_hw * hw)1838 ngbe_init_ops(struct ngbe_hw *hw)
1839 {
1840 struct ngbe_mac_info *mac = &hw->mac;
1841 struct ngbe_phy_info *phy = &hw->phy;
1842 struct ngbe_eeprom_info *eeprom = &hw->eeprom;
1843
1844 phy->ops.reset = ngbe_phy_reset;
1845 phy->ops.read_reg = ngbe_phy_read_reg;
1846 phy->ops.write_reg = ngbe_phy_write_reg;
1847 phy->ops.setup_link = ngbe_phy_setup_link;
1848 phy->ops.phy_led_ctrl = ngbe_phy_led_ctrl;
1849 phy->ops.check_overtemp = ngbe_phy_check_overtemp;
1850 phy->ops.identify = ngbe_phy_identify;
1851 phy->ops.init = ngbe_phy_init;
1852 phy->ops.check_event = ngbe_phy_check_event;
1853 phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause;
1854 phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause;
1855 phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement;
1856 phy->ops.setup_once = ngbe_phy_setup;
1857
1858 /* MAC */
1859 mac->ops.init_hw = ngbe_init_hw;
1860 mac->ops.clear_hw_cntrs = ngbe_clear_hw_cntrs;
1861 mac->ops.get_mac_addr = ngbe_get_mac_addr;
1862 mac->ops.stop_adapter = ngbe_stop_adapter;
1863 mac->ops.get_bus_info = ngbe_get_bus_info;
1864 mac->ops.set_lan_id = ngbe_set_lan_id_multi_port_pcie;
1865 mac->ops.acquire_swfw_sync = ngbe_acquire_swfw_sync;
1866 mac->ops.release_swfw_sync = ngbe_release_swfw_sync;
1867 mac->ops.reset_hw = ngbe_reset_hw;
1868 mac->ops.get_media_type = ngbe_get_media_type;
1869 mac->ops.disable_sec_rx_path = ngbe_disable_sec_rx_path;
1870 mac->ops.enable_sec_rx_path = ngbe_enable_sec_rx_path;
1871 mac->ops.enable_rx_dma = ngbe_enable_rx_dma;
1872 mac->ops.start_hw = ngbe_start_hw;
1873
1874 /* RAR, Multicast, VLAN */
1875 mac->ops.set_rar = ngbe_set_rar;
1876 mac->ops.init_rx_addrs = ngbe_init_rx_addrs;
1877 mac->ops.update_mc_addr_list = ngbe_update_mc_addr_list;
1878 mac->ops.enable_rx = ngbe_enable_rx;
1879 mac->ops.disable_rx = ngbe_disable_rx;
1880 mac->ops.clear_vfta = ngbe_clear_vfta;
1881 mac->ops.init_uta_tables = ngbe_init_uta_tables;
1882
1883 /* Flow Control */
1884 mac->ops.fc_enable = ngbe_fc_enable;
1885 mac->ops.setup_fc = ngbe_setup_fc;
1886
1887 /* Link */
1888 mac->ops.check_link = ngbe_check_mac_link;
1889 mac->ops.setup_rxpba = ngbe_set_rxpba;
1890
1891 mac->mcft_size = NGBE_SP_MC_TBL_SIZE;
1892 mac->vft_size = NGBE_SP_VFT_TBL_SIZE;
1893 mac->num_rar_entries = NGBE_SP_RAR_ENTRIES;
1894 mac->rx_pb_size = NGBE_SP_RX_PB_SIZE;
1895 mac->max_rx_queues = NGBE_SP_MAX_RX_QUEUES;
1896 mac->max_tx_queues = NGBE_SP_MAX_TX_QUEUES;
1897
1898 /* EEPROM */
1899 eeprom->ops.init_params = ngbe_init_eeprom_params;
1900 eeprom->ops.eeprom_chksum_cap_st = ngbe_eepromcheck_cap;
1901 eeprom->ops.phy_led_oem_chk = ngbe_phy_led_oem_chk;
1902
1903 /* Manageability interface */
1904 mac->ops.set_fw_drv_ver = ngbe_set_fw_drv_ver;
1905 mac->ops.init_thermal_sensor_thresh = ngbe_init_thermal_sensor_thresh;
1906 }
1907
1908 void
ngbe_init_rx_addrs(struct ngbe_softc * sc)1909 ngbe_init_rx_addrs(struct ngbe_softc *sc)
1910 {
1911 struct ngbe_hw *hw = &sc->hw;
1912 uint32_t rar_entries = hw->mac.num_rar_entries;
1913 uint32_t i, psrctl;
1914
1915 /*
1916 * If the current mac address is valid, assume it is a software
1917 * override to the permanent address.
1918 * Otherwise, use the permanent address from the eeprom.
1919 */
1920 if (ngbe_validate_mac_addr(hw->mac.addr)) {
1921 /* Get the MAC address from the RAR0 for later reference */
1922 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1923 }
1924 hw->addr_ctrl.overflow_promisc = 0;
1925 hw->addr_ctrl.rar_used_count = 1;
1926
1927 /* Zero out the other receive addresses. */
1928 for (i = 1; i < rar_entries; i++) {
1929 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, i);
1930 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_L, 0);
1931 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_H, 0);
1932 }
1933
1934 /* Clear the MTA */
1935 hw->addr_ctrl.mta_in_use = 0;
1936 psrctl = NGBE_READ_REG(hw, NGBE_PSR_CTL);
1937 psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE);
1938 psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT;
1939 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctl);
1940
1941 for (i = 0; i < hw->mac.mcft_size; i++)
1942 NGBE_WRITE_REG(hw, NGBE_PSR_MC_TBL(i), 0);
1943
1944 hw->mac.ops.init_uta_tables(hw);
1945 }
1946
1947 void
ngbe_init_shared_code(struct ngbe_softc * sc)1948 ngbe_init_shared_code(struct ngbe_softc *sc)
1949 {
1950 struct ngbe_osdep *os = &sc->osdep;
1951 struct pci_attach_args *pa = &os->os_pa;
1952 struct ngbe_hw *hw = &sc->hw;
1953
1954 hw->subsystem_device_id = PCI_PRODUCT(pci_conf_read(pa->pa_pc,
1955 pa->pa_tag, PCI_SUBSYS_ID_REG));
1956
1957 hw->phy.type = ngbe_phy_internal;
1958
1959 NGBE_WRITE_REG(hw, NGBE_MDIO_CLAUSE_SELECT, 0xf);
1960
1961 ngbe_init_ops(hw);
1962
1963 /* Default flow control settings. */
1964 hw->fc.requested_mode = ngbe_fc_full;
1965 hw->fc.current_mode = ngbe_fc_full;
1966
1967 hw->fc.pause_time = NGBE_DEFAULT_FCPAUSE;
1968 hw->fc.disable_fc_autoneg = 0;
1969 }
1970
1971 void
ngbe_init_thermal_sensor_thresh(struct ngbe_hw * hw)1972 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw)
1973 {
1974 /* Only support thermal sensors attached to SP physical port 0 */
1975 if (hw->bus.lan_id)
1976 return;
1977
1978 NGBE_WRITE_REG(hw, NGBE_TS_INT_EN, NGBE_TS_INT_EN_DALARM_INT_EN |
1979 NGBE_TS_INT_EN_ALARM_INT_EN);
1980 NGBE_WRITE_REG(hw, NGBE_TS_EN, NGBE_TS_EN_ENA);
1981
1982 NGBE_WRITE_REG(hw, NGBE_TS_ALARM_THRE, 0x344);
1983 NGBE_WRITE_REG(hw, NGBE_TS_DALARM_THRE, 0x330);
1984 }
1985
1986 void
ngbe_init_uta_tables(struct ngbe_hw * hw)1987 ngbe_init_uta_tables(struct ngbe_hw *hw)
1988 {
1989 int i;
1990
1991 for (i = 0; i < 128; i++)
1992 NGBE_WRITE_REG(hw, NGBE_PSR_UC_TBL(i), 0);
1993 }
1994
1995 void
ngbe_fc_autoneg(struct ngbe_softc * sc)1996 ngbe_fc_autoneg(struct ngbe_softc *sc)
1997 {
1998 struct ngbe_hw *hw = &sc->hw;
1999 uint32_t speed;
2000 int link_up;
2001 int error = EINVAL;
2002
2003 /*
2004 * AN should have completed when the cable was plugged in.
2005 * Look for reasons to bail out. Bail out if:
2006 * - FC autoneg is disabled, or if
2007 * - link is not up.
2008 */
2009 if (hw->fc.disable_fc_autoneg) {
2010 printf("%s: flow control autoneg is disabled\n", DEVNAME(sc));
2011 goto out;
2012 }
2013
2014 hw->mac.ops.check_link(hw, &speed, &link_up, 0);
2015 if (!link_up)
2016 goto out;
2017
2018 switch (hw->phy.media_type) {
2019 /* Autoneg flow control on fiber adapters */
2020 case ngbe_media_type_fiber:
2021 break;
2022
2023 /* Autoneg flow control on copper adapters */
2024 case ngbe_media_type_copper:
2025 error = ngbe_fc_autoneg_copper(sc);
2026 break;
2027 default:
2028 break;
2029 }
2030 out:
2031 if (error) {
2032 hw->fc.fc_was_autonegged = 0;
2033 hw->fc.current_mode = hw->fc.requested_mode;
2034 } else
2035 hw->fc.fc_was_autonegged = 1;
2036 }
2037
2038 int
ngbe_fc_autoneg_copper(struct ngbe_softc * sc)2039 ngbe_fc_autoneg_copper(struct ngbe_softc *sc)
2040 {
2041 struct ngbe_hw *hw = &sc->hw;
2042 uint8_t technology_ability_reg, lp_technology_ability_reg;
2043
2044 technology_ability_reg = lp_technology_ability_reg = 0;
2045 if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) {
2046 hw->phy.ops.get_adv_pause(hw, &technology_ability_reg);
2047 hw->phy.ops.get_lp_adv_pause(hw, &lp_technology_ability_reg);
2048 }
2049
2050 return ngbe_negotiate_fc(sc, (uint32_t)technology_ability_reg,
2051 (uint32_t)lp_technology_ability_reg, NGBE_TAF_SYM_PAUSE,
2052 NGBE_TAF_ASM_PAUSE, NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE);
2053 }
2054
2055 int
ngbe_fc_enable(struct ngbe_softc * sc)2056 ngbe_fc_enable(struct ngbe_softc *sc)
2057 {
2058 struct ngbe_hw *hw = &sc->hw;
2059 uint32_t mflcn, fccfg;
2060 uint32_t fcrtl, fcrth;
2061 uint32_t reg;
2062 int error = 0;
2063
2064 /* Validate the water mark configuration */
2065 if (!hw->fc.pause_time) {
2066 error = EINVAL;
2067 goto out;
2068 }
2069
2070 /* Low water mark of zero causes XOFF floods */
2071 if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) {
2072 if (!hw->fc.low_water ||
2073 hw->fc.low_water >= hw->fc.high_water) {
2074 printf("%s: invalid water mark configuration\n",
2075 DEVNAME(sc));
2076 error = EINVAL;
2077 goto out;
2078 }
2079 }
2080
2081 /* Negotiate the fc mode to use */
2082 ngbe_fc_autoneg(sc);
2083
2084 /* Disable any previous flow control settings */
2085 mflcn = NGBE_READ_REG(hw, NGBE_MAC_RX_FLOW_CTRL);
2086 mflcn &= ~NGBE_MAC_RX_FLOW_CTRL_RFE;
2087
2088 fccfg = NGBE_READ_REG(hw, NGBE_RDB_RFCC);
2089 fccfg &= ~NGBE_RDB_RFCC_RFCE_802_3X;
2090
2091 /*
2092 * The possible values of fc.current_mode are:
2093 * 0: Flow control is completely disabled
2094 * 1: Rx flow control is enabled (we can receive pause frames,
2095 * but not send pause frames).
2096 * 2: Tx flow control is enabled (we can send pause frames but
2097 * we do not support receiving pause frames).
2098 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2099 * other: Invalid.
2100 */
2101 switch (hw->fc.current_mode) {
2102 case ngbe_fc_none:
2103 /*
2104 * Flow control is disabled by software override or autoneg.
2105 * The code below will actually disable it in the HW.
2106 */
2107 break;
2108 case ngbe_fc_rx_pause:
2109 /*
2110 * Rx Flow control is enabled and Tx Flow control is
2111 * disabled by software override. Since there really
2112 * isn't a way to advertise that we are capable of RX
2113 * Pause ONLY, we will advertise that we support both
2114 * symmetric and asymmetric Rx PAUSE. Later, we will
2115 * disable the adapter's ability to send PAUSE frames.
2116 */
2117 mflcn |= NGBE_MAC_RX_FLOW_CTRL_RFE;
2118 break;
2119 case ngbe_fc_tx_pause:
2120 /*
2121 * Tx Flow control is enabled, and Rx Flow control is
2122 * disabled by software override.
2123 */
2124 fccfg |= NGBE_RDB_RFCC_RFCE_802_3X;
2125 break;
2126 case ngbe_fc_full:
2127 /* Flow control (both Rx and Tx) is enabled by SW override. */
2128 mflcn |= NGBE_MAC_RX_FLOW_CTRL_RFE;
2129 fccfg |= NGBE_RDB_RFCC_RFCE_802_3X;
2130 break;
2131 default:
2132 printf("%s: flow control param set incorrectly\n", DEVNAME(sc));
2133 error = EINVAL;
2134 goto out;
2135 }
2136
2137 /* Set 802.3x based flow control settings. */
2138 NGBE_WRITE_REG(hw, NGBE_MAC_RX_FLOW_CTRL, mflcn);
2139 NGBE_WRITE_REG(hw, NGBE_RDB_RFCC, fccfg);
2140
2141 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2142 if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) {
2143 /* 32Byte granularity */
2144 fcrtl = (hw->fc.low_water << 10) | NGBE_RDB_RFCL_XONE;
2145 NGBE_WRITE_REG(hw, NGBE_RDB_RFCL, fcrtl);
2146 fcrth = (hw->fc.high_water << 10) | NGBE_RDB_RFCH_XOFFE;
2147 } else {
2148 NGBE_WRITE_REG(hw, NGBE_RDB_RFCL, 0);
2149 /*
2150 * In order to prevent Tx hangs when the internal Tx
2151 * switch is enabled we must set the high water mark
2152 * to the Rx packet buffer size - 24KB. This allows
2153 * the Tx switch to function even under heavy Rx
2154 * workloads.
2155 */
2156 fcrth = NGBE_READ_REG(hw, NGBE_RDB_PB_SZ) - 24576;
2157 }
2158
2159 NGBE_WRITE_REG(hw, NGBE_RDB_RFCH, fcrth);
2160
2161 /* Configure pause time (2 TCs per register) */
2162 reg = hw->fc.pause_time * 0x00010000;
2163 NGBE_WRITE_REG(hw, NGBE_RDB_RFCV, reg);
2164
2165 /* Configure flow control refresh threshold value */
2166 NGBE_WRITE_REG(hw, NGBE_RDB_RFCRT, hw->fc.pause_time / 2);
2167 out:
2168 return error;
2169 }
2170
2171 int
ngbe_fmgr_cmd_op(struct ngbe_hw * hw,uint32_t cmd,uint32_t cmd_addr)2172 ngbe_fmgr_cmd_op(struct ngbe_hw *hw, uint32_t cmd, uint32_t cmd_addr)
2173 {
2174 uint32_t val;
2175 int timeout = 0;
2176
2177 val = (cmd << SPI_CLK_CMD_OFFSET) | cmd_addr |
2178 (SPI_CLK_DIV << SPI_CLK_DIV_OFFSET);
2179 NGBE_WRITE_REG(hw, NGBE_SPI_CMD, val);
2180 for (;;) {
2181 if (NGBE_READ_REG(hw, NGBE_SPI_STATUS) & 0x1)
2182 break;
2183 if (timeout == SPI_TIME_OUT_VALUE)
2184 return ETIMEDOUT;
2185
2186 timeout++;
2187 DELAY(10);
2188 }
2189
2190 return 0;
2191 }
2192
2193 uint32_t
ngbe_flash_read_dword(struct ngbe_hw * hw,uint32_t addr)2194 ngbe_flash_read_dword(struct ngbe_hw *hw, uint32_t addr)
2195 {
2196 int status = ngbe_fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr);
2197 if (status)
2198 return status;
2199
2200 return NGBE_READ_REG(hw, NGBE_SPI_DATA);
2201 }
2202
2203 uint8_t
ngbe_calculate_checksum(uint8_t * buffer,uint32_t length)2204 ngbe_calculate_checksum(uint8_t *buffer, uint32_t length)
2205 {
2206 uint32_t i;
2207 uint8_t sum = 0;
2208
2209 if (!buffer)
2210 return 0;
2211
2212 for (i = 0; i < length; i++)
2213 sum += buffer[i];
2214 return (uint8_t)(0 - sum);
2215 }
2216
2217 int
ngbe_check_flash_load(struct ngbe_softc * sc,uint32_t check_bit)2218 ngbe_check_flash_load(struct ngbe_softc *sc, uint32_t check_bit)
2219 {
2220 struct ngbe_hw *hw = &sc->hw;
2221 uint32_t reg = 0;
2222 int i, error = 0;
2223
2224 /* if there's flash existing */
2225 if (!(NGBE_READ_REG(hw, NGBE_SPI_STATUS) &
2226 NGBE_SPI_STATUS_FLASH_BYPASS)) {
2227 /* wait hw load flash done */
2228 for (i = 0; i < NGBE_MAX_FLASH_LOAD_POLL_TIME; i++) {
2229 reg = NGBE_READ_REG(hw, NGBE_SPI_ILDR_STATUS);
2230 if (!(reg & check_bit))
2231 break;
2232 msec_delay(200);
2233 }
2234 if (i == NGBE_MAX_FLASH_LOAD_POLL_TIME) {
2235 error = ETIMEDOUT;
2236 printf("%s: hardware loading flash failed\n",
2237 DEVNAME(sc));
2238 }
2239 }
2240 return error;
2241 }
2242
2243 int
ngbe_check_internal_phy_id(struct ngbe_softc * sc)2244 ngbe_check_internal_phy_id(struct ngbe_softc *sc)
2245 {
2246 struct ngbe_hw *hw = &sc->hw;
2247 uint16_t phy_id, phy_id_high, phy_id_low;
2248
2249 ngbe_gphy_wait_mdio_access_on(hw);
2250
2251 ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high);
2252 phy_id = phy_id_high << 6;
2253 ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low);
2254 phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10;
2255
2256 if (NGBE_INTERNAL_PHY_ID != phy_id) {
2257 printf("%s: internal phy id 0x%x not supported\n",
2258 DEVNAME(sc), phy_id);
2259 return ENOTSUP;
2260 } else
2261 hw->phy.id = (uint32_t)phy_id;
2262
2263 return 0;
2264 }
2265
2266 int
ngbe_check_mac_link(struct ngbe_hw * hw,uint32_t * speed,int * link_up,int link_up_wait_to_complete)2267 ngbe_check_mac_link(struct ngbe_hw *hw, uint32_t *speed, int *link_up,
2268 int link_up_wait_to_complete)
2269 {
2270 uint32_t status = 0;
2271 uint16_t speed_sta, value = 0;
2272 int i;
2273
2274 if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) {
2275 *link_up = 1;
2276 *speed = NGBE_LINK_SPEED_1GB_FULL;
2277 return status;
2278 }
2279
2280 if (link_up_wait_to_complete) {
2281 for (i = 0; i < NGBE_LINK_UP_TIME; i++) {
2282 status = hw->phy.ops.read_reg(hw,
2283 NGBE_MDIO_AUTO_NEG_STATUS,
2284 NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
2285 if (!status && (value & 0x4)) {
2286 *link_up = 1;
2287 break;
2288 } else
2289 *link_up = 0;
2290 msec_delay(100);
2291 }
2292 } else {
2293 status = hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_STATUS,
2294 NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
2295 if (!status && (value & 0x4))
2296 *link_up = 1;
2297 else
2298 *link_up = 0;
2299 }
2300
2301 speed_sta = value & 0x38;
2302 if (*link_up) {
2303 if (speed_sta == 0x28)
2304 *speed = NGBE_LINK_SPEED_1GB_FULL;
2305 else if (speed_sta == 0x18)
2306 *speed = NGBE_LINK_SPEED_100_FULL;
2307 else if (speed_sta == 0x8)
2308 *speed = NGBE_LINK_SPEED_10_FULL;
2309 } else
2310 *speed = NGBE_LINK_SPEED_UNKNOWN;
2311
2312 return status;
2313 }
2314
2315 int
ngbe_check_mng_access(struct ngbe_hw * hw)2316 ngbe_check_mng_access(struct ngbe_hw *hw)
2317 {
2318 if (!ngbe_mng_present(hw))
2319 return 0;
2320 return 1;
2321 }
2322
2323 int
ngbe_check_reset_blocked(struct ngbe_softc * sc)2324 ngbe_check_reset_blocked(struct ngbe_softc *sc)
2325 {
2326 uint32_t mmngc;
2327
2328 mmngc = NGBE_READ_REG(&sc->hw, NGBE_MIS_ST);
2329 if (mmngc & NGBE_MIS_ST_MNG_VETO) {
2330 printf("%s: MNG_VETO bit detected\n", DEVNAME(sc));
2331 return 1;
2332 }
2333
2334 return 0;
2335 }
2336
2337 void
ngbe_clear_hw_cntrs(struct ngbe_hw * hw)2338 ngbe_clear_hw_cntrs(struct ngbe_hw *hw)
2339 {
2340 uint16_t i;
2341
2342 NGBE_READ_REG(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW);
2343 NGBE_READ_REG(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW);
2344 NGBE_READ_REG(hw, NGBE_RDB_LXONTXC);
2345 NGBE_READ_REG(hw, NGBE_RDB_LXOFFTXC);
2346 NGBE_READ_REG(hw, NGBE_MAC_LXOFFRXC);
2347
2348 for (i = 0; i < 8; i++) {
2349 NGBE_WRITE_REG_MASK(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_UP,
2350 i << 16);
2351 NGBE_READ_REG(hw, NGBE_MAC_PXOFFRXC);
2352 }
2353
2354 NGBE_READ_REG(hw, NGBE_PX_GPRC);
2355 NGBE_READ_REG(hw, NGBE_PX_GPTC);
2356 NGBE_READ_REG(hw, NGBE_PX_GORC_MSB);
2357 NGBE_READ_REG(hw, NGBE_PX_GOTC_MSB);
2358
2359 NGBE_READ_REG(hw, NGBE_RX_BC_FRAMES_GOOD_LOW);
2360 NGBE_READ_REG(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD);
2361 NGBE_READ_REG(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD);
2362 NGBE_READ_REG(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW);
2363 NGBE_READ_REG(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW);
2364 NGBE_READ_REG(hw, NGBE_TX_MC_FRAMES_GOOD_LOW);
2365 NGBE_READ_REG(hw, NGBE_TX_BC_FRAMES_GOOD_LOW);
2366 NGBE_READ_REG(hw, NGBE_RDM_DRP_PKT);
2367 }
2368
2369 void
ngbe_clear_vfta(struct ngbe_hw * hw)2370 ngbe_clear_vfta(struct ngbe_hw *hw)
2371 {
2372 uint32_t offset;
2373
2374 for (offset = 0; offset < hw->mac.vft_size; offset++) {
2375 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_TBL(offset), 0);
2376 /* Errata 5 */
2377 hw->mac.vft_shadow[offset] = 0;
2378 }
2379
2380 for (offset = 0; offset < NGBE_PSR_VLAN_SWC_ENTRIES; offset++) {
2381 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC_IDX, offset);
2382 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC, 0);
2383 NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC_VM_L, 0);
2384 }
2385 }
2386
2387 void
ngbe_configure_ivars(struct ngbe_softc * sc)2388 ngbe_configure_ivars(struct ngbe_softc *sc)
2389 {
2390 struct ngbe_queue *nq = sc->queues;
2391 uint32_t newitr;
2392 int i;
2393
2394 /* Populate MSIX to EITR select */
2395 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITRSEL, 0);
2396
2397 newitr = (4000000 / NGBE_MAX_INTS_PER_SEC) & NGBE_MAX_EITR;
2398 newitr |= NGBE_PX_ITR_CNT_WDIS;
2399
2400 for (i = 0; i < sc->sc_nqueues; i++, nq++) {
2401 /* Rx queue entry */
2402 ngbe_set_ivar(sc, i, nq->msix, 0);
2403 /* Tx queue entry */
2404 ngbe_set_ivar(sc, i, nq->msix, 1);
2405 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITR(nq->msix), newitr);
2406 }
2407
2408 /* For the Link interrupt */
2409 ngbe_set_ivar(sc, 0, sc->linkvec, -1);
2410 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITR(sc->linkvec), 1950);
2411 }
2412
2413 void
ngbe_configure_pb(struct ngbe_softc * sc)2414 ngbe_configure_pb(struct ngbe_softc *sc)
2415 {
2416 struct ngbe_hw *hw = &sc->hw;
2417
2418 hw->mac.ops.setup_rxpba(hw, 0, 0, PBA_STRATEGY_EQUAL);
2419 ngbe_pbthresh_setup(sc);
2420 }
2421
2422 void
ngbe_disable_intr(struct ngbe_softc * sc)2423 ngbe_disable_intr(struct ngbe_softc *sc)
2424 {
2425 struct ngbe_queue *nq;
2426 int i;
2427
2428 NGBE_WRITE_REG(&sc->hw, NGBE_PX_MISC_IEN, 0);
2429 for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++)
2430 ngbe_disable_queue(sc, nq->msix);
2431 NGBE_WRITE_FLUSH(&sc->hw);
2432 }
2433
2434 int
ngbe_disable_pcie_master(struct ngbe_softc * sc)2435 ngbe_disable_pcie_master(struct ngbe_softc *sc)
2436 {
2437 int i, error = 0;
2438
2439 /* Exit if master requests are blocked */
2440 if (!(NGBE_READ_REG(&sc->hw, NGBE_PX_TRANSACTION_PENDING)))
2441 goto out;
2442
2443 /* Poll for master request bit to clear */
2444 for (i = 0; i < NGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2445 DELAY(100);
2446 if (!(NGBE_READ_REG(&sc->hw, NGBE_PX_TRANSACTION_PENDING)))
2447 goto out;
2448 }
2449 printf("%s: PCIe transaction pending bit did not clear\n",
2450 DEVNAME(sc));
2451 error = ETIMEDOUT;
2452 out:
2453 return error;
2454 }
2455
2456 void
ngbe_disable_queue(struct ngbe_softc * sc,uint32_t vector)2457 ngbe_disable_queue(struct ngbe_softc *sc, uint32_t vector)
2458 {
2459 uint64_t queue = 1ULL << vector;
2460 uint32_t mask;
2461
2462 mask = (queue & 0xffffffff);
2463 if (mask)
2464 NGBE_WRITE_REG(&sc->hw, NGBE_PX_IMS, mask);
2465 }
2466
2467 void
ngbe_disable_rx(struct ngbe_hw * hw)2468 ngbe_disable_rx(struct ngbe_hw *hw)
2469 {
2470 uint32_t rxctrl, psrctrl;
2471
2472 rxctrl = NGBE_READ_REG(hw, NGBE_RDB_PB_CTL);
2473 if (rxctrl & NGBE_RDB_PB_CTL_PBEN) {
2474 psrctrl = NGBE_READ_REG(hw, NGBE_PSR_CTL);
2475 if (psrctrl & NGBE_PSR_CTL_SW_EN) {
2476 psrctrl &= ~NGBE_PSR_CTL_SW_EN;
2477 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctrl);
2478 hw->mac.set_lben = 1;
2479 } else
2480 hw->mac.set_lben = 0;
2481 rxctrl &= ~NGBE_RDB_PB_CTL_PBEN;
2482 NGBE_WRITE_REG(hw, NGBE_RDB_PB_CTL, rxctrl);
2483
2484 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_RE,
2485 0);
2486 }
2487 }
2488
2489 void
ngbe_disable_sec_rx_path(struct ngbe_hw * hw)2490 ngbe_disable_sec_rx_path(struct ngbe_hw *hw)
2491 {
2492 uint32_t secrxreg;
2493 int i;
2494
2495 NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, NGBE_RSEC_CTL_RX_DIS,
2496 NGBE_RSEC_CTL_RX_DIS);
2497 for (i = 0; i < 40; i++) {
2498 secrxreg = NGBE_READ_REG(hw, NGBE_RSEC_ST);
2499 if (secrxreg & NGBE_RSEC_ST_RSEC_RDY)
2500 break;
2501 else
2502 DELAY(1000);
2503 }
2504 }
2505
2506 int
ngbe_eepromcheck_cap(struct ngbe_softc * sc,uint16_t offset,uint32_t * data)2507 ngbe_eepromcheck_cap(struct ngbe_softc *sc, uint16_t offset, uint32_t *data)
2508 {
2509 struct ngbe_hw *hw = &sc->hw;
2510 struct ngbe_hic_read_shadow_ram buffer;
2511 uint32_t tmp;
2512 int status;
2513
2514 buffer.hdr.req.cmd = FW_EEPROM_CHECK_STATUS;
2515 buffer.hdr.req.buf_lenh = 0;
2516 buffer.hdr.req.buf_lenl = 0;
2517 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
2518
2519 /* Convert offset from words to bytes */
2520 buffer.address = 0;
2521 /* one word */
2522 buffer.length = 0;
2523
2524 status = ngbe_host_interface_command(sc, (uint32_t *)&buffer,
2525 sizeof(buffer), NGBE_HI_COMMAND_TIMEOUT, 0);
2526 if (status)
2527 return status;
2528
2529 if (ngbe_check_mng_access(hw)) {
2530 tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 1);
2531 if (tmp == NGBE_CHECKSUM_CAP_ST_PASS)
2532 status = 0;
2533 else
2534 status = EINVAL;
2535 } else
2536 status = EINVAL;
2537
2538 return status;
2539 }
2540
2541 void
ngbe_enable_intr(struct ngbe_softc * sc)2542 ngbe_enable_intr(struct ngbe_softc *sc)
2543 {
2544 struct ngbe_hw *hw = &sc->hw;
2545 struct ngbe_queue *nq;
2546 uint32_t mask;
2547 int i;
2548
2549 /* Enable misc interrupt */
2550 mask = NGBE_PX_MISC_IEN_MASK;
2551
2552 mask |= NGBE_PX_MISC_IEN_OVER_HEAT;
2553 NGBE_WRITE_REG(hw, NGBE_GPIO_DDR, 0x1);
2554 NGBE_WRITE_REG(hw, NGBE_GPIO_INTEN, 0x3);
2555 NGBE_WRITE_REG(hw, NGBE_GPIO_INTTYPE_LEVEL, 0x0);
2556
2557 NGBE_WRITE_REG(hw, NGBE_GPIO_POLARITY, 0x3);
2558
2559 NGBE_WRITE_REG(hw, NGBE_PX_MISC_IEN, mask);
2560
2561 /* Enable all queues */
2562 for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++)
2563 ngbe_enable_queue(sc, nq->msix);
2564 NGBE_WRITE_FLUSH(hw);
2565
2566 ngbe_enable_queue(sc, sc->linkvec);
2567 }
2568
2569 void
ngbe_enable_queue(struct ngbe_softc * sc,uint32_t vector)2570 ngbe_enable_queue(struct ngbe_softc *sc, uint32_t vector)
2571 {
2572 uint64_t queue = 1ULL << vector;
2573 uint32_t mask;
2574
2575 mask = (queue & 0xffffffff);
2576 if (mask)
2577 NGBE_WRITE_REG(&sc->hw, NGBE_PX_IMC, mask);
2578 }
2579
2580 void
ngbe_enable_rx(struct ngbe_hw * hw)2581 ngbe_enable_rx(struct ngbe_hw *hw)
2582 {
2583 uint32_t val;
2584
2585 /* Enable mac receiver */
2586 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_RE,
2587 NGBE_MAC_RX_CFG_RE);
2588
2589 NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, 0x2, 0);
2590
2591 NGBE_WRITE_REG_MASK(hw, NGBE_RDB_PB_CTL, NGBE_RDB_PB_CTL_PBEN,
2592 NGBE_RDB_PB_CTL_PBEN);
2593
2594 if (hw->mac.set_lben) {
2595 val = NGBE_READ_REG(hw, NGBE_PSR_CTL);
2596 val |= NGBE_PSR_CTL_SW_EN;
2597 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, val);
2598 hw->mac.set_lben = 0;
2599 }
2600 }
2601
2602 void
ngbe_enable_rx_dma(struct ngbe_hw * hw,uint32_t reg)2603 ngbe_enable_rx_dma(struct ngbe_hw *hw, uint32_t reg)
2604 {
2605 /*
2606 * Workaround for emerald silicon errata when enabling the Rx datapath.
2607 * If traffic is incoming before we enable the Rx unit, it could hang
2608 * the Rx DMA unit. Therefore, make sure the security engine is
2609 * completely disabled prior to enabling the Rx unit.
2610 */
2611 hw->mac.ops.disable_sec_rx_path(hw);
2612
2613 if (reg & NGBE_RDB_PB_CTL_PBEN)
2614 hw->mac.ops.enable_rx(hw);
2615 else
2616 hw->mac.ops.disable_rx(hw);
2617
2618 hw->mac.ops.enable_sec_rx_path(hw);
2619 }
2620
2621 void
ngbe_enable_sec_rx_path(struct ngbe_hw * hw)2622 ngbe_enable_sec_rx_path(struct ngbe_hw *hw)
2623 {
2624 NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, NGBE_RSEC_CTL_RX_DIS, 0);
2625 NGBE_WRITE_FLUSH(hw);
2626 }
2627
2628 int
ngbe_encap(struct tx_ring * txr,struct mbuf * m)2629 ngbe_encap(struct tx_ring *txr, struct mbuf *m)
2630 {
2631 struct ngbe_softc *sc = txr->sc;
2632 uint32_t olinfo_status = 0, cmd_type_len;
2633 int i, j, ntxc;
2634 int first, last = 0;
2635 bus_dmamap_t map;
2636 struct ngbe_tx_buf *txbuf;
2637 union ngbe_tx_desc *txd = NULL;
2638
2639 /* Basic descriptor defines */
2640 cmd_type_len = NGBE_TXD_DTYP_DATA | NGBE_TXD_IFCS;
2641
2642 /*
2643 * Important to capture the first descriptor
2644 * used because it will contain the index of
2645 * the one we tell the hardware to report back
2646 */
2647 first = txr->next_avail_desc;
2648 txbuf = &txr->tx_buffers[first];
2649 map = txbuf->map;
2650
2651 /*
2652 * Set the appropriate offload context
2653 * this will becomes the first descriptor.
2654 */
2655 ntxc = ngbe_tx_ctx_setup(txr, m, &cmd_type_len, &olinfo_status);
2656 if (ntxc == -1)
2657 goto fail;
2658
2659 /*
2660 * Map the packet for DMA.
2661 */
2662 switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m,
2663 BUS_DMA_NOWAIT)) {
2664 case 0:
2665 break;
2666 case EFBIG:
2667 if (m_defrag(m, M_NOWAIT) == 0 &&
2668 bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m,
2669 BUS_DMA_NOWAIT) == 0)
2670 break;
2671 /* FALLTHROUGH */
2672 default:
2673 return 0;
2674 }
2675
2676 i = txr->next_avail_desc + ntxc;
2677 if (i >= sc->num_tx_desc)
2678 i -= sc->num_tx_desc;
2679
2680 for (j = 0; j < map->dm_nsegs; j++) {
2681 txd = &txr->tx_base[i];
2682
2683 txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
2684 txd->read.cmd_type_len =
2685 htole32(cmd_type_len | map->dm_segs[j].ds_len);
2686 txd->read.olinfo_status = htole32(olinfo_status);
2687 last = i;
2688
2689 if (++i == sc->num_tx_desc)
2690 i = 0;
2691 }
2692
2693 txd->read.cmd_type_len |= htole32(NGBE_TXD_EOP | NGBE_TXD_RS);
2694
2695 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
2696 BUS_DMASYNC_PREWRITE);
2697
2698 /* Set the index of the descriptor that will be marked done */
2699 txbuf->m_head = m;
2700 txbuf->eop_index = last;
2701
2702 txr->next_avail_desc = i;
2703
2704 return ntxc + j;
2705
2706 fail:
2707 bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
2708 return 0;
2709 }
2710
2711 int
ngbe_get_buf(struct rx_ring * rxr,int i)2712 ngbe_get_buf(struct rx_ring *rxr, int i)
2713 {
2714 struct ngbe_softc *sc = rxr->sc;
2715 struct ngbe_rx_buf *rxbuf;
2716 struct mbuf *m;
2717 union ngbe_rx_desc *rxdesc;
2718 int error;
2719
2720 rxbuf = &rxr->rx_buffers[i];
2721 rxdesc = &rxr->rx_base[i];
2722 if (rxbuf->buf) {
2723 printf("%s: slot %d already has an mbuf\n", DEVNAME(sc), i);
2724 return ENOBUFS;
2725 }
2726
2727 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
2728 if (!m)
2729 return ENOBUFS;
2730
2731 m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
2732 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2733
2734 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m,
2735 BUS_DMA_NOWAIT);
2736 if (error) {
2737 m_freem(m);
2738 return error;
2739 }
2740
2741 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
2742 rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2743 rxbuf->buf = m;
2744
2745 rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2746
2747 return 0;
2748 }
2749
2750 void
ngbe_get_bus_info(struct ngbe_softc * sc)2751 ngbe_get_bus_info(struct ngbe_softc *sc)
2752 {
2753 struct ngbe_hw *hw = &sc->hw;
2754 uint16_t link_status;
2755
2756 /* Get the negotiated link width and speed from PCI config space */
2757 link_status = ngbe_read_pci_cfg_word(sc, NGBE_PCI_LINK_STATUS);
2758
2759 ngbe_set_pci_config_data(hw, link_status);
2760 }
2761
2762 void
ngbe_get_copper_link_capabilities(struct ngbe_hw * hw,uint32_t * speed,int * autoneg)2763 ngbe_get_copper_link_capabilities(struct ngbe_hw *hw, uint32_t *speed,
2764 int *autoneg)
2765 {
2766 *speed = 0;
2767
2768 if (hw->mac.autoneg)
2769 *autoneg = 1;
2770 else
2771 *autoneg = 0;
2772
2773 *speed = NGBE_LINK_SPEED_10_FULL | NGBE_LINK_SPEED_100_FULL |
2774 NGBE_LINK_SPEED_1GB_FULL;
2775 }
2776
2777 int
ngbe_get_eeprom_semaphore(struct ngbe_softc * sc)2778 ngbe_get_eeprom_semaphore(struct ngbe_softc *sc)
2779 {
2780 struct ngbe_hw *hw = &sc->hw;
2781 uint32_t swsm;
2782 int i, timeout = 2000;
2783 int status = ETIMEDOUT;
2784
2785 /* Get SMBI software semaphore between device drivers first */
2786 for (i = 0; i < timeout; i++) {
2787 /*
2788 * If the SMBI bit is 0 when we read it, then the bit will be
2789 * set and we have the semaphore.
2790 */
2791 swsm = NGBE_READ_REG(hw, NGBE_MIS_SWSM);
2792 if (!(swsm & NGBE_MIS_SWSM_SMBI)) {
2793 status = 0;
2794 break;
2795 }
2796 DELAY(50);
2797 }
2798
2799 if (i == timeout) {
2800 printf("%s: cannot access the eeprom - SMBI semaphore not "
2801 "granted\n", DEVNAME(sc));
2802 /*
2803 * this release is particularly important because our attempts
2804 * above to get the semaphore may have succeeded, and if there
2805 * was a timeout, we should unconditionally clear the semaphore
2806 * bits to free the driver to make progress.
2807 */
2808 ngbe_release_eeprom_semaphore(hw);
2809 DELAY(50);
2810
2811 /*
2812 * One last try if the SMBI bit is 0 when we read it,
2813 * then the bit will be set and we have the semaphore.
2814 */
2815 swsm = NGBE_READ_REG(hw, NGBE_MIS_SWSM);
2816 if (!(swsm & NGBE_MIS_SWSM_SMBI))
2817 status = 0;
2818 }
2819
2820 return status;
2821 }
2822
2823 void
ngbe_get_hw_control(struct ngbe_hw * hw)2824 ngbe_get_hw_control(struct ngbe_hw *hw)
2825 {
2826 /* Let firmware know the driver has taken over */
2827 NGBE_WRITE_REG_MASK(hw, NGBE_CFG_PORT_CTL,
2828 NGBE_CFG_PORT_CTL_DRV_LOAD, NGBE_CFG_PORT_CTL_DRV_LOAD);
2829 }
2830
2831 void
ngbe_release_hw_control(struct ngbe_softc * sc)2832 ngbe_release_hw_control(struct ngbe_softc *sc)
2833 {
2834 /* Let firmware take over control of hw. */
2835 NGBE_WRITE_REG_MASK(&sc->hw, NGBE_CFG_PORT_CTL,
2836 NGBE_CFG_PORT_CTL_DRV_LOAD, 0);
2837 }
2838
2839 void
ngbe_get_mac_addr(struct ngbe_hw * hw,uint8_t * mac_addr)2840 ngbe_get_mac_addr(struct ngbe_hw *hw, uint8_t *mac_addr)
2841 {
2842 uint32_t rar_high, rar_low;
2843 int i;
2844
2845 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, 0);
2846 rar_high = NGBE_READ_REG(hw, NGBE_PSR_MAC_SWC_AD_H);
2847 rar_low = NGBE_READ_REG(hw, NGBE_PSR_MAC_SWC_AD_L);
2848
2849 for (i = 0; i < 2; i++)
2850 mac_addr[i] = (uint8_t)(rar_high >> (1 - i) * 8);
2851
2852 for (i = 0; i < 4; i++)
2853 mac_addr[i + 2] = (uint8_t)(rar_low >> (3 - i) * 8);
2854 }
2855
2856 enum ngbe_media_type
ngbe_get_media_type(struct ngbe_hw * hw)2857 ngbe_get_media_type(struct ngbe_hw *hw)
2858 {
2859 enum ngbe_media_type media_type = ngbe_media_type_copper;
2860
2861 return media_type;
2862 }
2863
2864 void
ngbe_gphy_dis_eee(struct ngbe_hw * hw)2865 ngbe_gphy_dis_eee(struct ngbe_hw *hw)
2866 {
2867 uint16_t val = 0;
2868
2869 hw->phy.ops.write_reg(hw, 0x11, 0xa4b, 0x1110);
2870 hw->phy.ops.write_reg(hw, MII_MMDACR, 0x0, MMDACR_FN_ADDRESS | 0x07);
2871 hw->phy.ops.write_reg(hw, MII_MMDAADR, 0x0, 0x003c);
2872 hw->phy.ops.write_reg(hw, MII_MMDACR, 0x0, MMDACR_FN_DATANPI | 0x07);
2873 hw->phy.ops.write_reg(hw, MII_MMDAADR, 0x0, 0);
2874
2875 /* Disable 10/100M Half Duplex */
2876 msec_delay(100);
2877 hw->phy.ops.read_reg(hw, MII_ANAR, 0, &val);
2878 val &= ~(ANAR_TX | ANAR_10);
2879 hw->phy.ops.write_reg(hw, MII_ANAR, 0x0, val);
2880 }
2881
2882 void
ngbe_gphy_efuse_calibration(struct ngbe_softc * sc)2883 ngbe_gphy_efuse_calibration(struct ngbe_softc *sc)
2884 {
2885 struct ngbe_hw *hw = &sc->hw;
2886 uint32_t efuse[2];
2887
2888 ngbe_gphy_wait_mdio_access_on(hw);
2889
2890 efuse[0] = sc->gphy_efuse[0];
2891 efuse[1] = sc->gphy_efuse[1];
2892
2893 if (!efuse[0] && !efuse[1])
2894 efuse[0] = efuse[1] = 0xffffffff;
2895
2896 /* Calibration */
2897 efuse[0] |= 0xf0000100;
2898 efuse[1] |= 0xff807fff;
2899
2900 /* EODR, Efuse Output Data Register */
2901 ngbe_phy_write_reg(hw, 16, 0xa46, (efuse[0] >> 0) & 0xffff);
2902 ngbe_phy_write_reg(hw, 17, 0xa46, (efuse[0] >> 16) & 0xffff);
2903 ngbe_phy_write_reg(hw, 18, 0xa46, (efuse[1] >> 0) & 0xffff);
2904 ngbe_phy_write_reg(hw, 19, 0xa46, (efuse[1] >> 16) & 0xffff);
2905
2906 /* Set efuse ready */
2907 ngbe_phy_write_reg(hw, 20, 0xa46, 0x01);
2908 ngbe_gphy_wait_mdio_access_on(hw);
2909 ngbe_phy_write_reg(hw, 27, NGBE_INTERNAL_PHY_PAGE_OFFSET, 0x8011);
2910 ngbe_phy_write_reg(hw, 28, NGBE_INTERNAL_PHY_PAGE_OFFSET, 0x5737);
2911 ngbe_gphy_dis_eee(hw);
2912 }
2913
2914 void
ngbe_gphy_wait_mdio_access_on(struct ngbe_hw * hw)2915 ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *hw)
2916 {
2917 uint16_t val = 0;
2918 int i;
2919
2920 for (i = 0; i < 100; i++) {
2921 ngbe_phy_read_reg(hw, 29, NGBE_INTERNAL_PHY_PAGE_OFFSET, &val);
2922 if (val & 0x20)
2923 break;
2924 DELAY(1000);
2925 }
2926 }
2927
2928 void
ngbe_handle_phy_event(struct ngbe_softc * sc)2929 ngbe_handle_phy_event(struct ngbe_softc *sc)
2930 {
2931 struct ngbe_hw *hw = &sc->hw;
2932 uint32_t reg;
2933
2934 reg = NGBE_READ_REG(hw, NGBE_GPIO_INTSTATUS);
2935 NGBE_WRITE_REG(hw, NGBE_GPIO_EOI, reg);
2936 if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA))
2937 hw->phy.ops.check_event(sc);
2938 }
2939
2940 int
ngbe_host_interface_command(struct ngbe_softc * sc,uint32_t * buffer,uint32_t length,uint32_t timeout,int return_data)2941 ngbe_host_interface_command(struct ngbe_softc *sc, uint32_t *buffer,
2942 uint32_t length, uint32_t timeout, int return_data)
2943 {
2944 struct ngbe_hw *hw = &sc->hw;
2945 uint32_t hicr, i, bi, dword_len;
2946 uint32_t hdr_size = sizeof(struct ngbe_hic_hdr);
2947 uint32_t buf[64] = {};
2948 uint16_t buf_len;
2949 int status = 0;
2950
2951 if (length == 0 || length > NGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
2952 printf("%s: buffer length failure\n", DEVNAME(sc));
2953 return EINVAL;
2954 }
2955
2956 if (hw->mac.ops.acquire_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB))
2957 return EINVAL;
2958
2959 /* Calculate length in DWORDs. We must be multiple of DWORD */
2960 if ((length % (sizeof(uint32_t))) != 0) {
2961 printf("%s: buffer length failure, not aligned to dword\n",
2962 DEVNAME(sc));
2963 status = EINVAL;
2964 goto rel_out;
2965 }
2966
2967 if (ngbe_check_mng_access(hw)) {
2968 hicr = NGBE_READ_REG(hw, NGBE_MNG_MBOX_CTL);
2969 if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY))
2970 printf("%s: fwrdy is set before command\n",
2971 DEVNAME(sc));
2972 }
2973
2974 dword_len = length >> 2;
2975
2976 /*
2977 * The device driver writes the relevant command block
2978 * into the ram area.
2979 */
2980 for (i = 0; i < dword_len; i++) {
2981 if (ngbe_check_mng_access(hw)) {
2982 NGBE_WRITE_REG_ARRAY(hw, NGBE_MNG_MBOX, i,
2983 htole32(buffer[i]));
2984 } else {
2985 status = EINVAL;
2986 goto rel_out;
2987 }
2988 }
2989
2990 /* Setting this bit tells the ARC that a new command is pending. */
2991 if (ngbe_check_mng_access(hw)) {
2992 NGBE_WRITE_REG_MASK(hw, NGBE_MNG_MBOX_CTL,
2993 NGBE_MNG_MBOX_CTL_SWRDY, NGBE_MNG_MBOX_CTL_SWRDY);
2994 } else {
2995 status = EINVAL;
2996 goto rel_out;
2997 }
2998
2999 for (i = 0; i < timeout; i++) {
3000 if (ngbe_check_mng_access(hw)) {
3001 hicr = NGBE_READ_REG(hw, NGBE_MNG_MBOX_CTL);
3002 if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY))
3003 break;
3004 }
3005 msec_delay(1);
3006 }
3007
3008 buf[0] = NGBE_READ_REG(hw, NGBE_MNG_MBOX);
3009 /* Check command completion */
3010 if (timeout != 0 && i == timeout) {
3011 printf("%s: command has failed with no status valid\n",
3012 DEVNAME(sc));
3013 if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
3014 status = EINVAL;
3015 goto rel_out;
3016 }
3017 }
3018
3019 if (!return_data)
3020 goto rel_out;
3021
3022 /* Calculate length in DWORDs */
3023 dword_len = hdr_size >> 2;
3024
3025 /* First pull in the header so we know the buffer length */
3026 for (bi = 0; bi < dword_len; bi++) {
3027 if (ngbe_check_mng_access(hw)) {
3028 buffer[bi] = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, bi);
3029 le32_to_cpus(&buffer[bi]);
3030 } else {
3031 status = EINVAL;
3032 goto rel_out;
3033 }
3034 }
3035
3036 /* If there is any thing in data position pull it in */
3037 buf_len = ((struct ngbe_hic_hdr *)buffer)->buf_len;
3038 if (buf_len == 0)
3039 goto rel_out;
3040
3041 if (length < buf_len + hdr_size) {
3042 printf("%s: buffer not large enough for reply message\n",
3043 DEVNAME(sc));
3044 status = EINVAL;
3045 goto rel_out;
3046 }
3047
3048 /* Calculate length in DWORDs, add 3 for odd lengths */
3049 dword_len = (buf_len + 3) >> 2;
3050
3051 /* Pull in the rest of the buffer (bi is where we left off) */
3052 for (; bi <= dword_len; bi++) {
3053 if (ngbe_check_mng_access(hw)) {
3054 buffer[bi] = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, bi);
3055 le32_to_cpus(&buffer[bi]);
3056 } else {
3057 status = EINVAL;
3058 goto rel_out;
3059 }
3060 }
3061
3062 rel_out:
3063 hw->mac.ops.release_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB);
3064 return status;
3065 }
3066
3067 int
ngbe_hpbthresh(struct ngbe_softc * sc)3068 ngbe_hpbthresh(struct ngbe_softc *sc)
3069 {
3070 uint32_t dv_id, rx_pba;
3071 int kb, link, marker, tc;
3072
3073 /* Calculate max LAN frame size */
3074 tc = link = sc->sc_ac.ac_if.if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3075 NGBE_ETH_FRAMING;
3076
3077 /* Calculate delay value for device */
3078 dv_id = NGBE_DV(link, tc);
3079
3080 /* Delay value is calculated in bit times convert to KB */
3081 kb = NGBE_BT2KB(dv_id);
3082 rx_pba = NGBE_READ_REG(&sc->hw, NGBE_RDB_PB_SZ) >> NGBE_RDB_PB_SZ_SHIFT;
3083
3084 marker = rx_pba - kb;
3085
3086 return marker;
3087 }
3088
3089 int
ngbe_lpbthresh(struct ngbe_softc * sc)3090 ngbe_lpbthresh(struct ngbe_softc *sc)
3091 {
3092 uint32_t dv_id;
3093 int tc;
3094
3095 /* Calculate max LAN frame size */
3096 tc = sc->sc_ac.ac_if.if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3097
3098 /* Calculate delay value for device */
3099 dv_id = NGBE_LOW_DV(tc);
3100
3101 /* Delay value is calculated in bit times convert to KB */
3102 return NGBE_BT2KB(dv_id);
3103 }
3104
3105 int
ngbe_mng_present(struct ngbe_hw * hw)3106 ngbe_mng_present(struct ngbe_hw *hw)
3107 {
3108 uint32_t fwsm;
3109
3110 fwsm = NGBE_READ_REG(hw, NGBE_MIS_ST);
3111
3112 return fwsm & NGBE_MIS_ST_MNG_INIT_DN;
3113 }
3114
3115 int
ngbe_mta_vector(struct ngbe_hw * hw,uint8_t * mc_addr)3116 ngbe_mta_vector(struct ngbe_hw *hw, uint8_t *mc_addr)
3117 {
3118 uint32_t vector = 0;
3119 int rshift;
3120
3121 /* pick bits [47:32] of the address. */
3122 vector = mc_addr[4] | (((uint16_t)mc_addr[5]) << 8);
3123 switch (hw->mac.mc_filter_type) {
3124 case 0: /* bits 47:36 */
3125 case 1: /* bits 46:35 */
3126 case 2: /* bits 45:34 */
3127 rshift = 4 - hw->mac.mc_filter_type;
3128 break;
3129 case 3: /* bits 43:32 */
3130 rshift = 0;
3131 break;
3132 default: /* Invalid mc_filter_type */
3133 vector = rshift = 0;
3134 break;
3135 }
3136 vector = (vector >> rshift) & 0x0fff;
3137
3138 return vector;
3139 }
3140
3141 int
ngbe_negotiate_fc(struct ngbe_softc * sc,uint32_t adv_reg,uint32_t lp_reg,uint32_t adv_sym,uint32_t adv_asm,uint32_t lp_sym,uint32_t lp_asm)3142 ngbe_negotiate_fc(struct ngbe_softc *sc, uint32_t adv_reg, uint32_t lp_reg,
3143 uint32_t adv_sym, uint32_t adv_asm, uint32_t lp_sym, uint32_t lp_asm)
3144 {
3145 struct ngbe_hw *hw = &sc->hw;
3146
3147 if ((!(adv_reg)) || (!(lp_reg)))
3148 return EINVAL;
3149
3150 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
3151 /*
3152 * Now we need to check if the user selected Rx ONLY
3153 * of pause frames. In this case, we had to advertise
3154 * FULL flow control because we could not advertise RX
3155 * ONLY. Hence, we must now check to see if we need to
3156 * turn OFF the TRANSMISSION of PAUSE frames.
3157 */
3158 if (hw->fc.requested_mode == ngbe_fc_full)
3159 hw->fc.current_mode = ngbe_fc_full;
3160 else
3161 hw->fc.current_mode = ngbe_fc_rx_pause;
3162
3163 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3164 (lp_reg & lp_sym) && (lp_reg & lp_asm))
3165 hw->fc.current_mode = ngbe_fc_tx_pause;
3166 else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3167 !(lp_reg & lp_sym) && (lp_reg & lp_asm))
3168 hw->fc.current_mode = ngbe_fc_rx_pause;
3169 else
3170 hw->fc.current_mode = ngbe_fc_none;
3171
3172 return 0;
3173 }
3174
3175 int
ngbe_non_sfp_link_config(struct ngbe_softc * sc)3176 ngbe_non_sfp_link_config(struct ngbe_softc *sc)
3177 {
3178 struct ngbe_hw *hw = &sc->hw;
3179 uint32_t speed;
3180 int error;
3181
3182 if (hw->mac.autoneg)
3183 speed = hw->phy.autoneg_advertised;
3184 else
3185 speed = hw->phy.force_speed;
3186
3187 msec_delay(50);
3188 if (hw->phy.type == ngbe_phy_internal) {
3189 error = hw->phy.ops.setup_once(sc);
3190 if (error)
3191 return error;
3192 }
3193
3194 error = hw->mac.ops.setup_link(sc, speed, 0);
3195 return error;
3196 }
3197
3198 void
ngbe_pbthresh_setup(struct ngbe_softc * sc)3199 ngbe_pbthresh_setup(struct ngbe_softc *sc)
3200 {
3201 struct ngbe_hw *hw = &sc->hw;
3202
3203 hw->fc.high_water = ngbe_hpbthresh(sc);
3204 hw->fc.low_water = ngbe_lpbthresh(sc);
3205
3206 /* Low water marks must not be larger than high water marks */
3207 if (hw->fc.low_water > hw->fc.high_water)
3208 hw->fc.low_water = 0;
3209 }
3210
3211 void
ngbe_phy_check_event(struct ngbe_softc * sc)3212 ngbe_phy_check_event(struct ngbe_softc *sc)
3213 {
3214 struct ngbe_hw *hw = &sc->hw;
3215 uint16_t value = 0;
3216
3217 hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_LSC,
3218 NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
3219 }
3220
3221 int
ngbe_phy_check_overtemp(struct ngbe_hw * hw)3222 ngbe_phy_check_overtemp(struct ngbe_hw *hw)
3223 {
3224 uint32_t ts_state;
3225 int status = 0;
3226
3227 /* Check that the LASI temp alarm status was triggered */
3228 ts_state = NGBE_READ_REG(hw, NGBE_TS_ALARM_ST);
3229
3230 if (ts_state & NGBE_TS_ALARM_ST_ALARM)
3231 status = 1;
3232
3233 return status;
3234 }
3235
3236 void
ngbe_phy_get_advertised_pause(struct ngbe_hw * hw,uint8_t * pause_bit)3237 ngbe_phy_get_advertised_pause(struct ngbe_hw *hw, uint8_t *pause_bit)
3238 {
3239 uint16_t value;
3240
3241 hw->phy.ops.read_reg(hw, 4, 0, &value);
3242 *pause_bit = (uint8_t)((value >> 10) & 0x3);
3243 }
3244
3245 void
ngbe_phy_get_lp_advertised_pause(struct ngbe_hw * hw,uint8_t * pause_bit)3246 ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *hw, uint8_t *pause_bit)
3247 {
3248 uint16_t value;
3249
3250 hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_LSC,
3251 NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
3252 hw->phy.ops.read_reg(hw, MII_BMSR, 0, &value);
3253 value = (value & BMSR_ACOMP) ? 1 : 0;
3254
3255 /* If AN complete then check lp adv pause */
3256 hw->phy.ops.read_reg(hw, MII_ANLPAR, 0, &value);
3257 *pause_bit = (uint8_t)((value >> 10) & 0x3);
3258 }
3259
3260 int
ngbe_phy_identify(struct ngbe_softc * sc)3261 ngbe_phy_identify(struct ngbe_softc *sc)
3262 {
3263 struct ngbe_hw *hw = &sc->hw;
3264 int error;
3265
3266 switch(hw->phy.type) {
3267 case ngbe_phy_internal:
3268 error = ngbe_check_internal_phy_id(sc);
3269 break;
3270 default:
3271 error = ENOTSUP;
3272 }
3273
3274 return error;
3275 }
3276
3277 int
ngbe_phy_init(struct ngbe_softc * sc)3278 ngbe_phy_init(struct ngbe_softc *sc)
3279 {
3280 struct ngbe_hw *hw = &sc->hw;
3281 uint16_t value;
3282 uint8_t lan_id = hw->bus.lan_id;
3283 int error;
3284
3285 /* Set fwsw semaphore mask for phy first */
3286 if (!hw->phy.phy_semaphore_mask)
3287 hw->phy.phy_semaphore_mask = NGBE_MNG_SWFW_SYNC_SW_PHY;
3288
3289 /* Init phy.addr according to HW design */
3290 hw->phy.addr = 0;
3291
3292 /* Identify the PHY or SFP module */
3293 error = hw->phy.ops.identify(sc);
3294 if (error == ENOTSUP)
3295 return error;
3296
3297 /* Enable interrupts, only link status change and an done is allowed */
3298 if (hw->phy.type == ngbe_phy_internal) {
3299 value = NGBE_INTPHY_INT_LSC | NGBE_INTPHY_INT_ANC;
3300 hw->phy.ops.write_reg(hw, 0x12, 0xa42, value);
3301 sc->gphy_efuse[0] =
3302 ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8);
3303 sc->gphy_efuse[1] =
3304 ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8 + 4);
3305 }
3306
3307 return error;
3308 }
3309
3310 void
ngbe_phy_led_ctrl(struct ngbe_softc * sc)3311 ngbe_phy_led_ctrl(struct ngbe_softc *sc)
3312 {
3313 struct ngbe_hw *hw = &sc->hw;
3314 uint16_t value;
3315
3316 if (sc->led_conf != -1)
3317 value = sc->led_conf & 0xffff;
3318 else
3319 value = 0x205b;
3320 hw->phy.ops.write_reg(hw, 16, 0xd04, value);
3321 hw->phy.ops.write_reg(hw, 17, 0xd04, 0);
3322
3323 hw->phy.ops.read_reg(hw, 18, 0xd04, &value);
3324 if (sc->led_conf != -1) {
3325 value &= ~0x73;
3326 value |= sc->led_conf >> 16;
3327 } else {
3328 value &= 0xfffc;
3329 /* Act led blinking mode set to 60ms */
3330 value |= 0x2;
3331 }
3332 hw->phy.ops.write_reg(hw, 18, 0xd04, value);
3333 }
3334
3335 int
ngbe_phy_led_oem_chk(struct ngbe_softc * sc,uint32_t * data)3336 ngbe_phy_led_oem_chk(struct ngbe_softc *sc, uint32_t *data)
3337 {
3338 struct ngbe_hw *hw = &sc->hw;
3339 struct ngbe_hic_read_shadow_ram buffer;
3340 uint32_t tmp;
3341 int status;
3342
3343 buffer.hdr.req.cmd = FW_PHY_LED_CONF;
3344 buffer.hdr.req.buf_lenh = 0;
3345 buffer.hdr.req.buf_lenl = 0;
3346 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3347
3348 /* Convert offset from words to bytes */
3349 buffer.address = 0;
3350 /* One word */
3351 buffer.length = 0;
3352
3353 status = ngbe_host_interface_command(sc, (uint32_t *)&buffer,
3354 sizeof(buffer), NGBE_HI_COMMAND_TIMEOUT, 0);
3355 if (status)
3356 return status;
3357
3358 if (ngbe_check_mng_access(hw)) {
3359 tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 1);
3360 if (tmp == NGBE_CHECKSUM_CAP_ST_PASS) {
3361 tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 2);
3362 *data = tmp;
3363 status = 0;
3364 } else if (tmp == NGBE_CHECKSUM_CAP_ST_FAIL) {
3365 *data = tmp;
3366 status = EINVAL;
3367 } else
3368 status = EINVAL;
3369 } else {
3370 status = EINVAL;
3371 return status;
3372 }
3373
3374 return status;
3375 }
3376
3377 int
ngbe_phy_read_reg(struct ngbe_hw * hw,uint32_t off,uint32_t page,uint16_t * data)3378 ngbe_phy_read_reg(struct ngbe_hw *hw, uint32_t off, uint32_t page,
3379 uint16_t *data)
3380 {
3381 *data = 0;
3382
3383 if (!((page == NGBE_INTERNAL_PHY_PAGE_OFFSET) &&
3384 ((off == NGBE_MDIO_AUTO_NEG_STATUS) ||
3385 (off == NGBE_MDIO_AUTO_NEG_LSC)))) {
3386 NGBE_WRITE_REG(hw,
3387 NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET),
3388 page);
3389 }
3390 *data = NGBE_READ_REG(hw, NGBE_PHY_CONFIG(off)) & 0xffff;
3391
3392 return 0;
3393 }
3394
3395 int
ngbe_phy_write_reg(struct ngbe_hw * hw,uint32_t off,uint32_t page,uint16_t data)3396 ngbe_phy_write_reg(struct ngbe_hw *hw, uint32_t off, uint32_t page,
3397 uint16_t data)
3398 {
3399 if (!((page == NGBE_INTERNAL_PHY_PAGE_OFFSET) &&
3400 ((off == NGBE_MDIO_AUTO_NEG_STATUS) ||
3401 (off == NGBE_MDIO_AUTO_NEG_LSC)))) {
3402 NGBE_WRITE_REG(hw,
3403 NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET),
3404 page);
3405 }
3406 NGBE_WRITE_REG(hw, NGBE_PHY_CONFIG(off), data);
3407
3408 return 0;
3409 }
3410
3411 int
ngbe_phy_reset(struct ngbe_softc * sc)3412 ngbe_phy_reset(struct ngbe_softc *sc)
3413 {
3414 struct ngbe_hw *hw = &sc->hw;
3415 uint16_t value;
3416 int i, status;
3417
3418 /* only support internal phy */
3419 if (hw->phy.type != ngbe_phy_internal) {
3420 printf("%s: operation not supported\n", DEVNAME(sc));
3421 return EINVAL;
3422 }
3423
3424 /* Don't reset PHY if it's shut down due to overtemp. */
3425 if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw) != 0) {
3426 printf("%s: overtemp! skip phy reset\n", DEVNAME(sc));
3427 return EINVAL;
3428 }
3429
3430 /* Blocked by MNG FW so bail */
3431 status = ngbe_check_reset_blocked(sc);
3432 if (status)
3433 return status;
3434
3435 value = NGBE_MDI_PHY_RESET;
3436 status = hw->phy.ops.write_reg(hw, 0, 0, value);
3437 for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) {
3438 status = hw->phy.ops.read_reg(hw, 0, 0, &value);
3439 if (!(value & NGBE_MDI_PHY_RESET))
3440 break;
3441 msec_delay(1);
3442 }
3443
3444 if (i == NGBE_PHY_RST_WAIT_PERIOD) {
3445 printf("%s: phy mode reset did not complete\n", DEVNAME(sc));
3446 return ETIMEDOUT;
3447 }
3448
3449 return status;
3450 }
3451
3452 int
ngbe_phy_set_pause_advertisement(struct ngbe_hw * hw,uint16_t pause_bit)3453 ngbe_phy_set_pause_advertisement(struct ngbe_hw *hw, uint16_t pause_bit)
3454 {
3455 uint16_t value;
3456 int status;
3457
3458 status = hw->phy.ops.read_reg(hw, MII_ANAR, 0, &value);
3459 value &= ~0xc00;
3460 value |= pause_bit;
3461 status = hw->phy.ops.write_reg(hw, MII_ANAR, 0, value);
3462 return status;
3463 }
3464
3465 int
ngbe_phy_setup(struct ngbe_softc * sc)3466 ngbe_phy_setup(struct ngbe_softc *sc)
3467 {
3468 struct ngbe_hw *hw = &sc->hw;
3469 uint16_t value = 0;
3470 int i;
3471
3472 for (i = 0; i < 15; i++) {
3473 if (!NGBE_READ_REG_MASK(hw, NGBE_MIS_ST,
3474 NGBE_MIS_ST_GPHY_IN_RST(hw->bus.lan_id)))
3475 break;
3476 msec_delay(1);
3477 }
3478 if (i == 15) {
3479 printf("%s: gphy reset exceeds maximum time\n", DEVNAME(sc));
3480 return ETIMEDOUT;
3481 }
3482
3483 ngbe_gphy_efuse_calibration(sc);
3484 hw->phy.ops.write_reg(hw, 20, 0xa46, 2);
3485 ngbe_gphy_wait_mdio_access_on(hw);
3486
3487 for (i = 0; i < 100; i++) {
3488 hw->phy.ops.read_reg(hw, 16, 0xa42, &value);
3489 if ((value & 0x7) == 3)
3490 break;
3491 DELAY(1000);
3492 }
3493 if (i == 100) {
3494 printf("%s: phy reset exceeds maximum time\n", DEVNAME(sc));
3495 return ETIMEDOUT;
3496 }
3497
3498 return 0;
3499 }
3500
3501 int
ngbe_phy_setup_link(struct ngbe_softc * sc,uint32_t speed,int need_restart)3502 ngbe_phy_setup_link(struct ngbe_softc *sc, uint32_t speed, int need_restart)
3503 {
3504 struct ngbe_hw *hw = &sc->hw;
3505 uint16_t value = 0;
3506 int status;
3507
3508 if (!hw->mac.autoneg) {
3509 status = hw->phy.ops.reset(sc);
3510 if (status) {
3511 printf("%s: phy reset failed\n", DEVNAME(sc));
3512 return status;
3513 }
3514
3515 switch (speed) {
3516 case NGBE_LINK_SPEED_1GB_FULL:
3517 value = NGBE_MDI_PHY_SPEED_SELECT1;
3518 break;
3519 case NGBE_LINK_SPEED_100_FULL:
3520 value = NGBE_MDI_PHY_SPEED_SELECT0;
3521 break;
3522 case NGBE_LINK_SPEED_10_FULL:
3523 value = 0;
3524 break;
3525 default:
3526 value = NGBE_MDI_PHY_SPEED_SELECT0 |
3527 NGBE_MDI_PHY_SPEED_SELECT1;
3528 printf("%s: unknown speed = 0x%x\n",
3529 DEVNAME(sc), speed);
3530 break;
3531 }
3532 /* duplex full */
3533 value |= NGBE_MDI_PHY_DUPLEX;
3534 hw->phy.ops.write_reg(hw, 0, 0, value);
3535
3536 goto skip_an;
3537 }
3538
3539 /* Disable 10/100M Half Duplex */
3540 hw->phy.ops.read_reg(hw, 4, 0, &value);
3541 value &= 0xff5f;
3542 hw->phy.ops.write_reg(hw, 4, 0, value);
3543
3544 /* Set advertise enable according to input speed */
3545 hw->phy.ops.read_reg(hw, 9, 0, &value);
3546 if (!(speed & NGBE_LINK_SPEED_1GB_FULL))
3547 value &= 0xfdff;
3548 else
3549 value |= 0x200;
3550 hw->phy.ops.write_reg(hw, 9, 0, value);
3551
3552 hw->phy.ops.read_reg(hw, 4, 0, &value);
3553 if (!(speed & NGBE_LINK_SPEED_100_FULL))
3554 value &= 0xfeff;
3555 else
3556 value |= 0x100;
3557 hw->phy.ops.write_reg(hw, 4, 0, value);
3558
3559 hw->phy.ops.read_reg(hw, 4, 0, &value);
3560 if (!(speed & NGBE_LINK_SPEED_10_FULL))
3561 value &= 0xffbf;
3562 else
3563 value |= 0x40;
3564 hw->phy.ops.write_reg(hw, 4, 0, value);
3565
3566 /* Restart AN and wait AN done interrupt */
3567 value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE;
3568 hw->phy.ops.write_reg(hw, 0, 0, value);
3569
3570 skip_an:
3571 hw->phy.ops.phy_led_ctrl(sc);
3572 hw->phy.ops.check_event(sc);
3573
3574 return 0;
3575 }
3576
3577 uint16_t
ngbe_read_pci_cfg_word(struct ngbe_softc * sc,uint32_t reg)3578 ngbe_read_pci_cfg_word(struct ngbe_softc *sc, uint32_t reg)
3579 {
3580 struct ngbe_osdep *os = &sc->osdep;
3581 struct pci_attach_args *pa = &os->os_pa;
3582 uint32_t value;
3583 int high = 0;
3584
3585 if (reg & 0x2) {
3586 high = 1;
3587 reg &= ~0x2;
3588 }
3589 value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3590
3591 if (high)
3592 value >>= 16;
3593
3594 return (value & 0xffff);
3595 }
3596
3597 void
ngbe_release_eeprom_semaphore(struct ngbe_hw * hw)3598 ngbe_release_eeprom_semaphore(struct ngbe_hw *hw)
3599 {
3600 if (ngbe_check_mng_access(hw)) {
3601 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_SWSM, NGBE_MIS_SWSM_SMBI, 0);
3602 NGBE_WRITE_FLUSH(hw);
3603 }
3604 }
3605
3606 int
ngbe_acquire_swfw_sync(struct ngbe_softc * sc,uint32_t mask)3607 ngbe_acquire_swfw_sync(struct ngbe_softc *sc, uint32_t mask)
3608 {
3609 struct ngbe_hw *hw = &sc->hw;
3610 uint32_t gssr = 0;
3611 uint32_t swmask = mask;
3612 uint32_t fwmask = mask << 16;
3613 int i, timeout = 200;
3614
3615 for (i = 0; i < timeout; i++) {
3616 /*
3617 * SW NVM semaphore bit is used for access to all
3618 * SW_FW_SYNC bits (not just NVM)
3619 */
3620 if (ngbe_get_eeprom_semaphore(sc))
3621 return 1;
3622 if (ngbe_check_mng_access(hw)) {
3623 gssr = NGBE_READ_REG(hw, NGBE_MNG_SWFW_SYNC);
3624 if (!(gssr & (fwmask | swmask))) {
3625 gssr |= swmask;
3626 NGBE_WRITE_REG(hw, NGBE_MNG_SWFW_SYNC, gssr);
3627 ngbe_release_eeprom_semaphore(hw);
3628 return 0;
3629 } else {
3630 /* Resource is currently in use by FW or SW */
3631 ngbe_release_eeprom_semaphore(hw);
3632 msec_delay(5);
3633 }
3634 }
3635 }
3636
3637 printf("%s: semaphore failed\n", DEVNAME(sc));
3638
3639 /* If time expired clear the bits holding the lock and retry */
3640 if (gssr & (fwmask | swmask))
3641 ngbe_release_swfw_sync(sc, gssr & (fwmask | swmask));
3642
3643 msec_delay(5);
3644 return 1;
3645 }
3646
3647 void
ngbe_release_swfw_sync(struct ngbe_softc * sc,uint32_t mask)3648 ngbe_release_swfw_sync(struct ngbe_softc *sc, uint32_t mask)
3649 {
3650 struct ngbe_hw *hw = &sc->hw;
3651
3652 ngbe_get_eeprom_semaphore(sc);
3653 if (ngbe_check_mng_access(hw))
3654 NGBE_WRITE_REG_MASK(hw, NGBE_MNG_SWFW_SYNC, mask, 0);
3655
3656 ngbe_release_eeprom_semaphore(hw);
3657 }
3658
3659 void
ngbe_reset(struct ngbe_softc * sc)3660 ngbe_reset(struct ngbe_softc *sc)
3661 {
3662 struct ngbe_hw *hw = &sc->hw;
3663 int error;
3664
3665 error = hw->mac.ops.init_hw(sc);
3666 switch (error) {
3667 case 0:
3668 break;
3669 default:
3670 printf("%s: hardware error\n", DEVNAME(sc));
3671 break;
3672 }
3673 }
3674
3675 int
ngbe_reset_hw(struct ngbe_softc * sc)3676 ngbe_reset_hw(struct ngbe_softc *sc)
3677 {
3678 struct ngbe_hw *hw = &sc->hw;
3679 struct ngbe_mac_info *mac = &hw->mac;
3680 uint32_t i, reset_status, rst_delay;
3681 uint32_t reset = 0;
3682 int status = 0;
3683
3684 status = hw->mac.ops.stop_adapter(sc);
3685 if (status)
3686 goto reset_hw_out;
3687
3688 /* Identify PHY and related function pointers */
3689 if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) {
3690 status = hw->phy.ops.init(sc);
3691 if (status)
3692 goto reset_hw_out;
3693 }
3694
3695 if (ngbe_get_media_type(hw) == ngbe_media_type_copper) {
3696 mac->ops.setup_link = ngbe_setup_copper_link;
3697 mac->ops.get_link_capabilities =
3698 ngbe_get_copper_link_capabilities;
3699 }
3700
3701 /*
3702 * Issue global reset to the MAC. Needs to be SW reset if link is up.
3703 * If link reset is used when link is up, it might reset the PHY when
3704 * mng is using it. If link is down or the flag to force full link
3705 * reset is set, then perform link reset.
3706 */
3707 if (hw->force_full_reset) {
3708 rst_delay = (NGBE_READ_REG(hw, NGBE_MIS_RST_ST) &
3709 NGBE_MIS_RST_ST_RST_INIT) >> NGBE_MIS_RST_ST_RST_INI_SHIFT;
3710 if (hw->reset_type == NGBE_SW_RESET) {
3711 for (i = 0; i < rst_delay + 20; i++) {
3712 reset_status =
3713 NGBE_READ_REG(hw, NGBE_MIS_RST_ST);
3714 if (!(reset_status &
3715 NGBE_MIS_RST_ST_DEV_RST_ST_MASK))
3716 break;
3717 msec_delay(100);
3718 }
3719
3720 if (reset_status & NGBE_MIS_RST_ST_DEV_RST_ST_MASK) {
3721 status = ETIMEDOUT;
3722 printf("%s: software reset polling failed to "
3723 "complete\n", DEVNAME(sc));
3724 goto reset_hw_out;
3725 }
3726 status = ngbe_check_flash_load(sc,
3727 NGBE_SPI_ILDR_STATUS_SW_RESET);
3728 if (status)
3729 goto reset_hw_out;
3730 } else if (hw->reset_type == NGBE_GLOBAL_RESET) {
3731 msec_delay(100 * rst_delay + 2000);
3732 }
3733 } else {
3734 if (hw->bus.lan_id == 0)
3735 reset = NGBE_MIS_RST_LAN0_RST;
3736 else if (hw->bus.lan_id == 1)
3737 reset = NGBE_MIS_RST_LAN1_RST;
3738 else if (hw->bus.lan_id == 2)
3739 reset = NGBE_MIS_RST_LAN2_RST;
3740 else if (hw->bus.lan_id == 3)
3741 reset = NGBE_MIS_RST_LAN3_RST;
3742
3743 NGBE_WRITE_REG(hw, NGBE_MIS_RST,
3744 reset | NGBE_READ_REG(hw, NGBE_MIS_RST));
3745 NGBE_WRITE_FLUSH(hw);
3746 msec_delay(15);
3747 }
3748
3749 ngbe_reset_misc(hw);
3750
3751 /* Store the permanent mac address */
3752 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
3753
3754 /*
3755 * Store MAC address from RAR0, clear receive address registers, and
3756 * clear the multicast table. Also reset num_rar_entries to 32,
3757 * since we modify this value when programming the SAN MAC address.
3758 */
3759 hw->mac.num_rar_entries = NGBE_SP_RAR_ENTRIES;
3760 hw->mac.ops.init_rx_addrs(sc);
3761
3762 reset_hw_out:
3763 return status;
3764 }
3765
3766 void
ngbe_reset_misc(struct ngbe_hw * hw)3767 ngbe_reset_misc(struct ngbe_hw *hw)
3768 {
3769 int i;
3770
3771 /* Receive packets of size > 2048 */
3772 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_JE,
3773 NGBE_MAC_RX_CFG_JE);
3774
3775 /* Clear counters on read */
3776 NGBE_WRITE_REG_MASK(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_RSTONRD,
3777 NGBE_MMC_CONTROL_RSTONRD);
3778
3779 NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_FLOW_CTRL,
3780 NGBE_MAC_RX_FLOW_CTRL_RFE, NGBE_MAC_RX_FLOW_CTRL_RFE);
3781
3782 NGBE_WRITE_REG(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR);
3783
3784 NGBE_WRITE_REG_MASK(hw, NGBE_MIS_RST_ST, NGBE_MIS_RST_ST_RST_INIT,
3785 0x1e00);
3786
3787 /* errata 4: initialize mng flex tbl and wakeup flex tbl */
3788 NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_SEL, 0);
3789 for (i = 0; i < 16; i++) {
3790 NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_DW_L(i), 0);
3791 NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_DW_H(i), 0);
3792 NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_MSK(i), 0);
3793 }
3794 NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_SEL, 0);
3795 for (i = 0; i < 16; i++) {
3796 NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_DW_L(i), 0);
3797 NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_DW_H(i), 0);
3798 NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_MSK(i), 0);
3799 }
3800
3801 /* Set pause frame dst mac addr */
3802 NGBE_WRITE_REG(hw, NGBE_RDB_PFCMACDAL, 0xc2000001);
3803 NGBE_WRITE_REG(hw, NGBE_RDB_PFCMACDAH, 0x0180);
3804
3805 NGBE_WRITE_REG(hw, NGBE_MDIO_CLAUSE_SELECT, 0xf);
3806
3807 ngbe_init_thermal_sensor_thresh(hw);
3808 }
3809
3810 int
ngbe_set_fw_drv_ver(struct ngbe_softc * sc,uint8_t maj,uint8_t min,uint8_t build,uint8_t sub)3811 ngbe_set_fw_drv_ver(struct ngbe_softc *sc, uint8_t maj, uint8_t min,
3812 uint8_t build, uint8_t sub)
3813 {
3814 struct ngbe_hw *hw = &sc->hw;
3815 struct ngbe_hic_drv_info fw_cmd;
3816 int i, error = 0;
3817
3818 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
3819 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
3820 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
3821 fw_cmd.port_num = (uint8_t)hw->bus.lan_id;
3822 fw_cmd.ver_maj = maj;
3823 fw_cmd.ver_min = min;
3824 fw_cmd.ver_build = build;
3825 fw_cmd.ver_sub = sub;
3826 fw_cmd.hdr.checksum = 0;
3827 fw_cmd.hdr.checksum = ngbe_calculate_checksum((uint8_t *)&fw_cmd,
3828 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
3829 fw_cmd.pad = 0;
3830 fw_cmd.pad2 = 0;
3831
3832 DELAY(5000);
3833 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
3834 error = ngbe_host_interface_command(sc, (uint32_t *)&fw_cmd,
3835 sizeof(fw_cmd), NGBE_HI_COMMAND_TIMEOUT, 1);
3836 if (error)
3837 continue;
3838
3839 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
3840 FW_CEM_RESP_STATUS_SUCCESS)
3841 error = 0;
3842 else
3843 error = EINVAL;
3844 break;
3845 }
3846
3847 return error;
3848 }
3849
3850 void
ngbe_set_ivar(struct ngbe_softc * sc,uint16_t entry,uint16_t vector,int8_t type)3851 ngbe_set_ivar(struct ngbe_softc *sc, uint16_t entry, uint16_t vector, int8_t
3852 type)
3853 {
3854 struct ngbe_hw *hw = &sc->hw;
3855 uint32_t ivar, index;
3856
3857 vector |= NGBE_PX_IVAR_ALLOC_VAL;
3858
3859 if (type == -1) {
3860 /* other causes */
3861 index = 0;
3862 ivar = NGBE_READ_REG(hw, NGBE_PX_MISC_IVAR);
3863 ivar &= ~((uint32_t)0xff << index);
3864 ivar |= ((uint32_t)vector << index);
3865 NGBE_WRITE_REG(hw, NGBE_PX_MISC_IVAR, ivar);
3866 } else {
3867 /* Tx or Rx causes */
3868 index = ((16 * (entry & 1)) + (8 * type));
3869 ivar = NGBE_READ_REG(hw, NGBE_PX_IVAR(entry >> 1));
3870 ivar &= ~((uint32_t)0xff << index);
3871 ivar |= ((uint32_t)vector << index);
3872 NGBE_WRITE_REG(hw, NGBE_PX_IVAR(entry >> 1), ivar);
3873 }
3874 }
3875
3876 void
ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw * hw)3877 ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw)
3878 {
3879 struct ngbe_bus_info *bus = &hw->bus;
3880 uint32_t reg = 0;
3881
3882 reg = NGBE_READ_REG(hw, NGBE_CFG_PORT_ST);
3883 bus->lan_id = NGBE_CFG_PORT_ST_LAN_ID(reg);
3884 }
3885
3886 void
ngbe_set_mta(struct ngbe_hw * hw,uint8_t * mc_addr)3887 ngbe_set_mta(struct ngbe_hw *hw, uint8_t *mc_addr)
3888 {
3889 uint32_t vector, vector_bit, vector_reg;
3890
3891 hw->addr_ctrl.mta_in_use++;
3892
3893 vector = ngbe_mta_vector(hw, mc_addr);
3894
3895 /*
3896 * The MTA is a register array of 128 32-bit registers. It is treated
3897 * like an array of 4096 bits. We want to set bit
3898 * BitArray[vector_value]. So we figure out what register the bit is
3899 * in, read it, OR in the new bit, then write back the new value. The
3900 * register is determined by the upper 7 bits of the vector value and
3901 * the bit within that register are determined by the lower 5 bits of
3902 * the value.
3903 */
3904 vector_reg = (vector >> 5) & 0x7f;
3905 vector_bit = vector & 0x1f;
3906 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
3907 }
3908
3909 void
ngbe_set_pci_config_data(struct ngbe_hw * hw,uint16_t link_status)3910 ngbe_set_pci_config_data(struct ngbe_hw *hw, uint16_t link_status)
3911 {
3912 if (hw->bus.type == ngbe_bus_type_unknown)
3913 hw->bus.type = ngbe_bus_type_pci_express;
3914
3915 switch (link_status & NGBE_PCI_LINK_WIDTH) {
3916 case NGBE_PCI_LINK_WIDTH_1:
3917 hw->bus.width = ngbe_bus_width_pcie_x1;
3918 break;
3919 case NGBE_PCI_LINK_WIDTH_2:
3920 hw->bus.width = ngbe_bus_width_pcie_x2;
3921 break;
3922 case NGBE_PCI_LINK_WIDTH_4:
3923 hw->bus.width = ngbe_bus_width_pcie_x4;
3924 break;
3925 case NGBE_PCI_LINK_WIDTH_8:
3926 hw->bus.width = ngbe_bus_width_pcie_x8;
3927 break;
3928 default:
3929 hw->bus.width = ngbe_bus_width_unknown;
3930 break;
3931 }
3932
3933 switch (link_status & NGBE_PCI_LINK_SPEED) {
3934 case NGBE_PCI_LINK_SPEED_2500:
3935 hw->bus.speed = ngbe_bus_speed_2500;
3936 break;
3937 case NGBE_PCI_LINK_SPEED_5000:
3938 hw->bus.speed = ngbe_bus_speed_5000;
3939 break;
3940 case NGBE_PCI_LINK_SPEED_8000:
3941 hw->bus.speed = ngbe_bus_speed_8000;
3942 break;
3943 default:
3944 hw->bus.speed = ngbe_bus_speed_unknown;
3945 break;
3946 }
3947 }
3948
3949 int
ngbe_set_rar(struct ngbe_softc * sc,uint32_t index,uint8_t * addr,uint64_t pools,uint32_t enable_addr)3950 ngbe_set_rar(struct ngbe_softc *sc, uint32_t index, uint8_t *addr,
3951 uint64_t pools, uint32_t enable_addr)
3952 {
3953 struct ngbe_hw *hw = &sc->hw;
3954 uint32_t rar_entries = hw->mac.num_rar_entries;
3955 uint32_t rar_low, rar_high;
3956
3957 /* Make sure we are using a valid rar index range */
3958 if (index >= rar_entries) {
3959 printf("%s: RAR index %d is out of range\n",
3960 DEVNAME(sc), index);
3961 return EINVAL;
3962 }
3963
3964 /* Select the MAC address */
3965 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, index);
3966
3967 /* Setup VMDq pool mapping */
3968 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_VM, pools & 0xffffffff);
3969
3970 /*
3971 * HW expects these in little endian so we reverse the byte
3972 * order from network order (big endian) to little endian
3973 *
3974 * Some parts put the VMDq setting in the extra RAH bits,
3975 * so save everything except the lower 16 bits that hold part
3976 * of the address and the address valid bit.
3977 */
3978 rar_low = ((uint32_t)addr[5] | ((uint32_t)addr[4] << 8) |
3979 ((uint32_t)addr[3] << 16) | ((uint32_t)addr[2] << 24));
3980 rar_high = ((uint32_t)addr[1] | ((uint32_t)addr[0] << 8));
3981 if (enable_addr != 0)
3982 rar_high |= NGBE_PSR_MAC_SWC_AD_H_AV;
3983
3984 NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_L, rar_low);
3985 NGBE_WRITE_REG_MASK(hw, NGBE_PSR_MAC_SWC_AD_H,
3986 (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
3987 NGBE_PSR_MAC_SWC_AD_H_AV), rar_high);
3988
3989 return 0;
3990 }
3991
3992 void
ngbe_set_rx_drop_en(struct ngbe_softc * sc)3993 ngbe_set_rx_drop_en(struct ngbe_softc *sc)
3994 {
3995 uint32_t srrctl;
3996 int i;
3997
3998 if ((sc->sc_nqueues > 1) &&
3999 !(sc->hw.fc.current_mode & ngbe_fc_tx_pause)) {
4000 for (i = 0; i < sc->sc_nqueues; i++) {
4001 srrctl = NGBE_READ_REG(&sc->hw, NGBE_PX_RR_CFG(i));
4002 srrctl |= NGBE_PX_RR_CFG_DROP_EN;
4003 NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_CFG(i), srrctl);
4004 }
4005
4006 } else {
4007 for (i = 0; i < sc->sc_nqueues; i++) {
4008 srrctl = NGBE_READ_REG(&sc->hw, NGBE_PX_RR_CFG(i));
4009 srrctl &= ~NGBE_PX_RR_CFG_DROP_EN;
4010 NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_CFG(i), srrctl);
4011 }
4012 }
4013 }
4014
4015 void
ngbe_set_rxpba(struct ngbe_hw * hw,int num_pb,uint32_t headroom,int strategy)4016 ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, uint32_t headroom, int strategy)
4017 {
4018 uint32_t pbsize = hw->mac.rx_pb_size;
4019 uint32_t txpktsize, txpbthresh, rxpktsize = 0;
4020
4021 /* Reserve headroom */
4022 pbsize -= headroom;
4023
4024 if (!num_pb)
4025 num_pb = 1;
4026
4027 /*
4028 * Divide remaining packet buffer space amongst the number of packet
4029 * buffers requested using supplied strategy.
4030 */
4031 switch (strategy) {
4032 case PBA_STRATEGY_EQUAL:
4033 rxpktsize = (pbsize / num_pb) << NGBE_RDB_PB_SZ_SHIFT;
4034 NGBE_WRITE_REG(hw, NGBE_RDB_PB_SZ, rxpktsize);
4035 break;
4036 default:
4037 break;
4038 }
4039
4040 /* Only support an equally distributed Tx packet buffer strategy. */
4041 txpktsize = NGBE_TDB_PB_SZ_MAX / num_pb;
4042 txpbthresh = (txpktsize / 1024) - NGBE_TXPKT_SIZE_MAX;
4043
4044 NGBE_WRITE_REG(hw, NGBE_TDB_PB_SZ, txpktsize);
4045 NGBE_WRITE_REG(hw, NGBE_TDM_PB_THRE, txpbthresh);
4046 }
4047
4048 int
ngbe_setup_copper_link(struct ngbe_softc * sc,uint32_t speed,int need_restart)4049 ngbe_setup_copper_link(struct ngbe_softc *sc, uint32_t speed, int need_restart)
4050 {
4051 struct ngbe_hw *hw = &sc->hw;
4052 int status = 0;
4053
4054 /* Setup the PHY according to input speed */
4055 if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA))
4056 status = hw->phy.ops.setup_link(sc, speed, need_restart);
4057
4058 return status;
4059 }
4060
4061 int
ngbe_setup_fc(struct ngbe_softc * sc)4062 ngbe_setup_fc(struct ngbe_softc *sc)
4063 {
4064 struct ngbe_hw *hw = &sc->hw;
4065 uint16_t pcap_backplane = 0;
4066 int error = 0;
4067
4068 /* Validate the requested mode */
4069 if (hw->fc.strict_ieee && hw->fc.requested_mode == ngbe_fc_rx_pause) {
4070 printf("%s: ngbe_fc_rx_pause not valid in strict IEEE mode\n",
4071 DEVNAME(sc));
4072 error = EINVAL;
4073 goto out;
4074 }
4075
4076 /*
4077 * Gig parts do not have a word in the EEPROM to determine the
4078 * default flow control setting, so we explicitly set it to full.
4079 */
4080 if (hw->fc.requested_mode == ngbe_fc_default)
4081 hw->fc.requested_mode = ngbe_fc_full;
4082
4083 /*
4084 * The possible values of fc.requested_mode are:
4085 * 0: Flow control is completely disabled
4086 * 1: Rx flow control is enabled (we can receive pause frames,
4087 * but not send pause frames).
4088 * 2: Tx flow control is enabled (we can send pause frames but
4089 * we do not support receiving pause frames).
4090 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4091 * other: Invalid.
4092 */
4093 switch (hw->fc.requested_mode) {
4094 case ngbe_fc_none:
4095 /* Flow control completely disabled by software override. */
4096 break;
4097 case ngbe_fc_tx_pause:
4098 /*
4099 * Tx Flow control is enabled, and Rx Flow control is
4100 * disabled by software override.
4101 */
4102 pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM;
4103 break;
4104 case ngbe_fc_rx_pause:
4105 /*
4106 * Rx Flow control is enabled and Tx Flow control is
4107 * disabled by software override. Since there really
4108 * isn't a way to advertise that we are capable of RX
4109 * Pause ONLY, we will advertise that we support both
4110 * symmetric and asymmetric Rx PAUSE, as such we fall
4111 * through to the fc_full statement. Later, we will
4112 * disable the adapter's ability to send PAUSE frames.
4113 */
4114 case ngbe_fc_full:
4115 /* Flow control (both Rx and Tx) is enabled by SW override. */
4116 pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM |
4117 NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM;
4118 break;
4119 default:
4120 printf("%s: flow control param set incorrectly\n", DEVNAME(sc));
4121 error = EINVAL;
4122 goto out;
4123 }
4124
4125 /* AUTOC restart handles negotiation of 1G on backplane and copper. */
4126 if ((hw->phy.media_type == ngbe_media_type_copper) &&
4127 !((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA))
4128 error = hw->phy.ops.set_adv_pause(hw, pcap_backplane);
4129 out:
4130 return error;
4131 }
4132
4133 void
ngbe_setup_gpie(struct ngbe_hw * hw)4134 ngbe_setup_gpie(struct ngbe_hw *hw)
4135 {
4136 uint32_t gpie;
4137
4138 gpie = NGBE_PX_GPIE_MODEL;
4139
4140 /*
4141 * use EIAM to auto-mask when MSI-X interrupt is asserted
4142 * this saves a register write for every interrupt.
4143 */
4144 NGBE_WRITE_REG(hw, NGBE_PX_GPIE, gpie);
4145 }
4146
4147 void
ngbe_setup_isb(struct ngbe_softc * sc)4148 ngbe_setup_isb(struct ngbe_softc *sc)
4149 {
4150 uint64_t idba = sc->isbdma.dma_map->dm_segs[0].ds_addr;
4151
4152 /* Set ISB address */
4153 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ISB_ADDR_L,
4154 (idba & 0x00000000ffffffffULL));
4155 NGBE_WRITE_REG(&sc->hw, NGBE_PX_ISB_ADDR_H, (idba >> 32));
4156 }
4157
4158 void
ngbe_setup_psrtype(struct ngbe_hw * hw)4159 ngbe_setup_psrtype(struct ngbe_hw *hw)
4160 {
4161 uint32_t psrtype;
4162
4163 /* PSRTYPE must be initialized in adapters */
4164 psrtype = NGBE_RDB_PL_CFG_L4HDR | NGBE_RDB_PL_CFG_L3HDR |
4165 NGBE_RDB_PL_CFG_L2HDR | NGBE_RDB_PL_CFG_TUN_TUNHDR |
4166 NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR;
4167
4168 NGBE_WRITE_REG(hw, NGBE_RDB_PL_CFG(0), psrtype);
4169 }
4170
4171 void
ngbe_setup_vlan_hw_support(struct ngbe_softc * sc)4172 ngbe_setup_vlan_hw_support(struct ngbe_softc *sc)
4173 {
4174 struct ngbe_hw *hw = &sc->hw;
4175 int i;
4176
4177 for (i = 0; i < sc->sc_nqueues; i++) {
4178 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
4179 NGBE_PX_RR_CFG_VLAN, NGBE_PX_RR_CFG_VLAN);
4180 }
4181 }
4182
4183 int
ngbe_start_hw(struct ngbe_softc * sc)4184 ngbe_start_hw(struct ngbe_softc *sc)
4185 {
4186 struct ngbe_hw *hw = &sc->hw;
4187 int error;
4188
4189 /* Set the media type */
4190 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
4191
4192 /* Clear the VLAN filter table */
4193 hw->mac.ops.clear_vfta(hw);
4194
4195 /* Clear statistics registers */
4196 hw->mac.ops.clear_hw_cntrs(hw);
4197
4198 NGBE_WRITE_FLUSH(hw);
4199
4200 /* Setup flow control */
4201 error = hw->mac.ops.setup_fc(sc);
4202
4203 /* Clear adapter stopped flag */
4204 hw->adapter_stopped = 0;
4205
4206 /* We need to run link autotry after the driver loads */
4207 hw->mac.autotry_restart = 1;
4208
4209 return error;
4210 }
4211
4212 int
ngbe_stop_adapter(struct ngbe_softc * sc)4213 ngbe_stop_adapter(struct ngbe_softc *sc)
4214 {
4215 struct ngbe_hw *hw = &sc->hw;
4216 int i;
4217
4218 /*
4219 * Set the adapter_stopped flag so other driver functions stop touching
4220 * the hardware.
4221 */
4222 hw->adapter_stopped = 1;
4223
4224 /* Disable the receive unit. */
4225 hw->mac.ops.disable_rx(hw);
4226
4227 /* Clear any pending interrupts, flush previous writes. */
4228 NGBE_WRITE_REG(hw, NGBE_PX_MISC_IC, 0xffffffff);
4229
4230 NGBE_WRITE_REG(hw, NGBE_BME_CTL, 0x3);
4231
4232 /* Disable the transmit unit. Each queue must be disabled. */
4233 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4234 NGBE_WRITE_REG_MASK(hw, NGBE_PX_TR_CFG(i),
4235 NGBE_PX_TR_CFG_SWFLSH | NGBE_PX_TR_CFG_ENABLE,
4236 NGBE_PX_TR_CFG_SWFLSH);
4237 }
4238
4239 /* Disable the receive unit by stopping each queue */
4240 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4241 NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
4242 NGBE_PX_RR_CFG_RR_EN, 0);
4243 }
4244
4245 /* Flush all queues disables. */
4246 NGBE_WRITE_FLUSH(hw);
4247 msec_delay(2);
4248
4249 return ngbe_disable_pcie_master(sc);
4250 }
4251
4252 void
ngbe_rx_checksum(uint32_t staterr,struct mbuf * m)4253 ngbe_rx_checksum(uint32_t staterr, struct mbuf *m)
4254 {
4255 if (staterr & NGBE_RXD_STAT_IPCS) {
4256 if (!(staterr & NGBE_RXD_ERR_IPE))
4257 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
4258 else
4259 m->m_pkthdr.csum_flags = 0;
4260 }
4261 if (staterr & NGBE_RXD_STAT_L4CS) {
4262 if (!(staterr & NGBE_RXD_ERR_TCPE))
4263 m->m_pkthdr.csum_flags |=
4264 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
4265 }
4266 }
4267
4268 void
ngbe_rxeof(struct rx_ring * rxr)4269 ngbe_rxeof(struct rx_ring *rxr)
4270 {
4271 struct ngbe_softc *sc = rxr->sc;
4272 struct ifnet *ifp = &sc->sc_ac.ac_if;
4273 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
4274 struct mbuf *mp, *m;
4275 struct ngbe_rx_buf *rxbuf, *nxbuf;
4276 union ngbe_rx_desc *rxdesc;
4277 uint32_t staterr = 0;
4278 uint16_t len, vtag;
4279 uint8_t eop = 0;
4280 int i, nextp;
4281
4282 if (!ISSET(ifp->if_flags, IFF_RUNNING))
4283 return;
4284
4285 i = rxr->next_to_check;
4286 while (if_rxr_inuse(&rxr->rx_ring) > 0) {
4287 uint32_t hash;
4288 uint16_t hashtype;
4289
4290 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4291 i * sizeof(union ngbe_rx_desc), sizeof(union ngbe_rx_desc),
4292 BUS_DMASYNC_POSTREAD);
4293
4294 rxdesc = &rxr->rx_base[i];
4295 staterr = letoh32(rxdesc->wb.upper.status_error);
4296 if (!ISSET(staterr, NGBE_RXD_STAT_DD)) {
4297 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4298 i * sizeof(union ngbe_rx_desc),
4299 sizeof(union ngbe_rx_desc), BUS_DMASYNC_PREREAD);
4300 break;
4301 }
4302
4303 /* Zero out the receive descriptors status. */
4304 rxdesc->wb.upper.status_error = 0;
4305 rxbuf = &rxr->rx_buffers[i];
4306
4307 /* Pull the mbuf off the ring. */
4308 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
4309 rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
4310 bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
4311
4312 mp = rxbuf->buf;
4313 len = letoh16(rxdesc->wb.upper.length);
4314 vtag = letoh16(rxdesc->wb.upper.vlan);
4315 eop = ((staterr & NGBE_RXD_STAT_EOP) != 0);
4316 hash = letoh32(rxdesc->wb.lower.hi_dword.rss);
4317 hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) &
4318 NGBE_RXD_RSSTYPE_MASK;
4319
4320 if (staterr & NGBE_RXD_ERR_RXE) {
4321 if (rxbuf->fmp) {
4322 m_freem(rxbuf->fmp);
4323 rxbuf->fmp = NULL;
4324 }
4325
4326 m_freem(mp);
4327 rxbuf->buf = NULL;
4328 goto next_desc;
4329 }
4330
4331 if (mp == NULL) {
4332 panic("%s: ngbe_rxeof: NULL mbuf in slot %d "
4333 "(nrx %d, filled %d)", DEVNAME(sc), i,
4334 if_rxr_inuse(&rxr->rx_ring), rxr->last_desc_filled);
4335 }
4336
4337 if (!eop) {
4338 /*
4339 * Figure out the next descriptor of this frame.
4340 */
4341 nextp = i + 1;
4342 if (nextp == sc->num_rx_desc)
4343 nextp = 0;
4344 nxbuf = &rxr->rx_buffers[nextp];
4345 /* prefetch(nxbuf); */
4346 }
4347
4348 mp->m_len = len;
4349
4350 m = rxbuf->fmp;
4351 rxbuf->buf = rxbuf->fmp = NULL;
4352
4353 if (m != NULL)
4354 m->m_pkthdr.len += mp->m_len;
4355 else {
4356 m = mp;
4357 m->m_pkthdr.len = mp->m_len;
4358 #if NVLAN > 0
4359 if (staterr & NGBE_RXD_STAT_VP) {
4360 m->m_pkthdr.ether_vtag = vtag;
4361 m->m_flags |= M_VLANTAG;
4362 }
4363 #endif
4364 }
4365
4366 /* Pass the head pointer on */
4367 if (eop == 0) {
4368 nxbuf->fmp = m;
4369 m = NULL;
4370 mp->m_next = nxbuf->buf;
4371 } else {
4372 ngbe_rx_checksum(staterr, m);
4373
4374 if (hashtype != NGBE_RXD_RSSTYPE_NONE) {
4375 m->m_pkthdr.ph_flowid = hash;
4376 SET(m->m_pkthdr.csum_flags, M_FLOWID);
4377 }
4378
4379 ml_enqueue(&ml, m);
4380 }
4381 next_desc:
4382 if_rxr_put(&rxr->rx_ring, 1);
4383 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4384 i * sizeof(union ngbe_rx_desc), sizeof(union ngbe_rx_desc),
4385 BUS_DMASYNC_PREREAD);
4386
4387 /* Advance our pointers to the next descriptor. */
4388 if (++i == sc->num_rx_desc)
4389 i = 0;
4390 }
4391 rxr->next_to_check = i;
4392
4393 if (ifiq_input(rxr->ifiq, &ml))
4394 if_rxr_livelocked(&rxr->rx_ring);
4395
4396 if (!(staterr & NGBE_RXD_STAT_DD))
4397 return;
4398 }
4399
4400 void
ngbe_rxrefill(void * xrxr)4401 ngbe_rxrefill(void *xrxr)
4402 {
4403 struct rx_ring *rxr = xrxr;
4404 struct ngbe_softc *sc = rxr->sc;
4405
4406 if (ngbe_rxfill(rxr))
4407 NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_WP(rxr->me),
4408 rxr->last_desc_filled);
4409 else if (if_rxr_inuse(&rxr->rx_ring) == 0)
4410 timeout_add(&rxr->rx_refill, 1);
4411 }
4412
4413 int
ngbe_tx_ctx_setup(struct tx_ring * txr,struct mbuf * m,uint32_t * cmd_type_len,uint32_t * olinfo_status)4414 ngbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *m, uint32_t *cmd_type_len,
4415 uint32_t *olinfo_status)
4416 {
4417 struct ngbe_tx_context_desc *txd;
4418 struct ngbe_tx_buf *tx_buffer;
4419 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
4420 int ctxd = txr->next_avail_desc;
4421 int offload = 0;
4422
4423 /* Indicate the whole packet as payload when not doing TSO */
4424 *olinfo_status |= m->m_pkthdr.len << NGBE_TXD_PAYLEN_SHIFT;
4425
4426 #if NVLAN > 0
4427 if (ISSET(m->m_flags, M_VLANTAG)) {
4428 uint32_t vtag = m->m_pkthdr.ether_vtag;
4429 vlan_macip_lens |= (vtag << NGBE_TXD_VLAN_SHIFT);
4430 *cmd_type_len |= NGBE_TXD_VLE;
4431 offload |= 1;
4432 }
4433 #endif
4434
4435 if (!offload)
4436 return 0;
4437
4438 txd = (struct ngbe_tx_context_desc *)&txr->tx_base[ctxd];
4439 tx_buffer = &txr->tx_buffers[ctxd];
4440
4441 type_tucmd_mlhl |= NGBE_TXD_DTYP_CTXT;
4442
4443 /* Now copy bits into descriptor */
4444 txd->vlan_macip_lens = htole32(vlan_macip_lens);
4445 txd->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
4446 txd->seqnum_seed = htole32(0);
4447 txd->mss_l4len_idx = htole32(0);
4448
4449 tx_buffer->m_head = NULL;
4450 tx_buffer->eop_index = -1;
4451
4452 return 1;
4453 }
4454
4455 void
ngbe_txeof(struct tx_ring * txr)4456 ngbe_txeof(struct tx_ring *txr)
4457 {
4458 struct ngbe_softc *sc = txr->sc;
4459 struct ifqueue *ifq = txr->ifq;
4460 struct ifnet *ifp = &sc->sc_ac.ac_if;
4461 struct ngbe_tx_buf *tx_buffer;
4462 union ngbe_tx_desc *tx_desc;
4463 unsigned int prod, cons, last;
4464
4465 if (!ISSET(ifp->if_flags, IFF_RUNNING))
4466 return;
4467
4468 prod = txr->next_avail_desc;
4469 cons = txr->next_to_clean;
4470
4471 if (prod == cons)
4472 return;
4473
4474 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
4475 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
4476
4477 for (;;) {
4478 tx_buffer = &txr->tx_buffers[cons];
4479 last = tx_buffer->eop_index;
4480 tx_desc = (union ngbe_tx_desc *)&txr->tx_base[last];
4481
4482 if (!ISSET(tx_desc->wb.status, NGBE_TXD_STAT_DD))
4483 break;
4484
4485 bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
4486 0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4487 bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
4488 m_freem(tx_buffer->m_head);
4489
4490 tx_buffer->m_head = NULL;
4491 tx_buffer->eop_index = -1;
4492
4493 cons = last + 1;
4494 if (cons == sc->num_tx_desc)
4495 cons = 0;
4496 if (prod == cons) {
4497 /* All clean, turn off the timer */
4498 ifp->if_timer = 0;
4499 break;
4500 }
4501 }
4502
4503 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
4504 0, txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
4505
4506 txr->next_to_clean = cons;
4507
4508 if (ifq_is_oactive(ifq))
4509 ifq_restart(ifq);
4510 }
4511
4512 void
ngbe_update_mc_addr_list(struct ngbe_hw * hw,uint8_t * mc_addr_list,uint32_t mc_addr_count,ngbe_mc_addr_itr next,int clear)4513 ngbe_update_mc_addr_list(struct ngbe_hw *hw, uint8_t *mc_addr_list,
4514 uint32_t mc_addr_count, ngbe_mc_addr_itr next, int clear)
4515 {
4516 uint32_t i, psrctl, vmdq;
4517
4518 /*
4519 * Set the new number of MC addresses that we are being requested to
4520 * use.
4521 */
4522 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
4523 hw->addr_ctrl.mta_in_use = 0;
4524
4525 /* Clear mta_shadow */
4526 if (clear)
4527 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
4528
4529 /* Update mta_shadow */
4530 for (i = 0; i < mc_addr_count; i++)
4531 ngbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
4532
4533 /* Enable mta */
4534 for (i = 0; i < hw->mac.mcft_size; i++)
4535 NGBE_WRITE_REG_ARRAY(hw, NGBE_PSR_MC_TBL(0), i,
4536 hw->mac.mta_shadow[i]);
4537
4538 if (hw->addr_ctrl.mta_in_use > 0) {
4539 psrctl = NGBE_READ_REG(hw, NGBE_PSR_CTL);
4540 psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE);
4541 psrctl |= NGBE_PSR_CTL_MFE |
4542 (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT);
4543 NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctl);
4544 }
4545 }
4546
4547 int
ngbe_validate_mac_addr(uint8_t * mac_addr)4548 ngbe_validate_mac_addr(uint8_t *mac_addr)
4549 {
4550 uint32_t status = 0;
4551
4552 /* Make sure it is not a multicast address */
4553 if (NGBE_IS_MULTICAST(mac_addr))
4554 status = EINVAL;
4555 /* Not a broadcast address */
4556 else if (NGBE_IS_BROADCAST(mac_addr))
4557 status = EINVAL;
4558 /* Reject the zero address */
4559 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
4560 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
4561 status = EINVAL;
4562
4563 return status;
4564 }
4565