xref: /openbsd/sys/dev/pci/if_ngbe.c (revision 4bdff4be)
1 /*	$OpenBSD: if_ngbe.c,v 1.2 2023/11/10 15:51:20 bluhm Exp $	*/
2 
3 /*
4  * Copyright (c) 2015-2017 Beijing WangXun Technology Co., Ltd.
5  * Copyright (c) 2023 Kevin Lo <kevlo@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bpfilter.h"
21 #include "vlan.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/sockio.h>
26 #include <sys/mbuf.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/device.h>
31 #include <sys/endian.h>
32 #include <sys/intrmap.h>
33 
34 #include <net/if.h>
35 #include <net/if_media.h>
36 #include <net/toeplitz.h>
37 
38 #include <netinet/in.h>
39 #include <netinet/if_ether.h>
40 #include <netinet/ip.h>
41 #include <netinet/ip6.h>
42 
43 #if NBPFILTER > 0
44 #include <net/bpf.h>
45 #endif
46 
47 #include <machine/bus.h>
48 #include <machine/intr.h>
49 
50 #include <dev/mii/mii.h>
51 
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcidevs.h>
55 
56 #include <dev/pci/if_ngbereg.h>
57 
58 const struct pci_matchid ngbe_devices[] = {
59 	{ PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860A2 },
60 	{ PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860AL1 }
61 };
62 
63 int			ngbe_match(struct device *, void *, void *);
64 void			ngbe_attach(struct device *, struct device *, void *);
65 int			ngbe_detach(struct device *, int);
66 void			ngbe_init(void *);
67 int			ngbe_ioctl(struct ifnet *, u_long, caddr_t);
68 int			ngbe_media_change(struct ifnet *);
69 void			ngbe_media_status(struct ifnet *, struct ifmediareq *);
70 int			ngbe_rxfill(struct rx_ring *);
71 int			ngbe_rxrinfo(struct ngbe_softc *, struct if_rxrinfo *);
72 void			ngbe_start(struct ifqueue *);
73 void			ngbe_stop(struct ngbe_softc *);
74 void			ngbe_update_link_status(struct ngbe_softc *);
75 void			ngbe_watchdog(struct ifnet *);
76 int			ngbe_allocate_pci_resources(struct ngbe_softc *);
77 void			ngbe_free_pci_resources(struct ngbe_softc *);
78 int			ngbe_allocate_msix(struct ngbe_softc *);
79 void			ngbe_setup_interface(struct ngbe_softc *);
80 int			ngbe_setup_msix(struct ngbe_softc *);
81 int			ngbe_dma_malloc(struct ngbe_softc *, bus_size_t,
82 			    struct ngbe_dma_alloc *);
83 void			ngbe_dma_free(struct ngbe_softc *,
84 			    struct ngbe_dma_alloc *);
85 int			ngbe_allocate_isb(struct ngbe_softc *);
86 void			ngbe_free_isb(struct ngbe_softc *);
87 int			ngbe_allocate_queues(struct ngbe_softc *);
88 void			ngbe_free_receive_structures(struct ngbe_softc *);
89 void			ngbe_free_receive_buffers(struct rx_ring *);
90 void			ngbe_free_transmit_structures(struct ngbe_softc *);
91 void			ngbe_free_transmit_buffers(struct tx_ring *);
92 int			ngbe_allocate_receive_buffers(struct rx_ring *);
93 int			ngbe_allocate_transmit_buffers(struct tx_ring *);
94 int			ngbe_setup_receive_ring(struct rx_ring *);
95 int			ngbe_setup_transmit_ring(struct tx_ring *);
96 int			ngbe_setup_receive_structures(struct ngbe_softc *);
97 int			ngbe_setup_transmit_structures(struct ngbe_softc *);
98 uint8_t *		ngbe_addr_list_itr(struct ngbe_hw *, uint8_t **,
99 			    uint32_t *);
100 void			ngbe_iff(struct ngbe_softc *);
101 int			ngbe_initialize_receive_unit(struct ngbe_softc *);
102 void			ngbe_initialize_rss_mapping(struct ngbe_softc *);
103 int			ngbe_initialize_transmit_unit(struct ngbe_softc *);
104 int			ngbe_intr_link(void *);
105 int			ngbe_intr_queue(void *);
106 void			ngbe_init_eeprom_params(struct ngbe_hw *);
107 int			ngbe_init_hw(struct ngbe_softc *);
108 void			ngbe_init_ops(struct ngbe_hw *);
109 void			ngbe_init_rx_addrs(struct ngbe_softc *);
110 void			ngbe_init_shared_code(struct ngbe_softc *);
111 void			ngbe_init_thermal_sensor_thresh(struct ngbe_hw *);
112 void			ngbe_init_uta_tables(struct ngbe_hw *);
113 void			ngbe_fc_autoneg(struct ngbe_softc *);
114 int			ngbe_fc_autoneg_copper(struct ngbe_softc *);
115 int			ngbe_fc_enable(struct ngbe_softc *);
116 int			ngbe_fmgr_cmd_op(struct ngbe_hw *, uint32_t, uint32_t);
117 uint32_t		ngbe_flash_read_dword(struct ngbe_hw *, uint32_t);
118 uint8_t			ngbe_calculate_checksum(uint8_t *, uint32_t);
119 int			ngbe_check_flash_load(struct ngbe_softc *, uint32_t);
120 int			ngbe_check_internal_phy_id(struct ngbe_softc *);
121 int			ngbe_check_mac_link(struct ngbe_hw *, uint32_t *, int *,
122 			    int);
123 int			ngbe_check_mng_access(struct ngbe_hw *);
124 int			ngbe_check_reset_blocked(struct ngbe_softc *);
125 void			ngbe_clear_hw_cntrs(struct ngbe_hw *);
126 void			ngbe_clear_vfta(struct ngbe_hw *);
127 void			ngbe_configure_ivars(struct ngbe_softc *);
128 void			ngbe_configure_pb(struct ngbe_softc *);
129 void			ngbe_disable_intr(struct ngbe_softc *);
130 int			ngbe_disable_pcie_master(struct ngbe_softc *);
131 void			ngbe_disable_queue(struct ngbe_softc *, uint32_t);
132 void			ngbe_disable_rx(struct ngbe_hw *);
133 void			ngbe_disable_sec_rx_path(struct ngbe_hw *);
134 int			ngbe_eepromcheck_cap(struct ngbe_softc *, uint16_t,
135 			    uint32_t *);
136 void			ngbe_enable_intr(struct ngbe_softc *);
137 void			ngbe_enable_queue(struct ngbe_softc *, uint32_t);
138 void			ngbe_enable_rx(struct ngbe_hw *);
139 void			ngbe_enable_rx_dma(struct ngbe_hw *, uint32_t);
140 void			ngbe_enable_sec_rx_path(struct ngbe_hw *);
141 int			ngbe_encap(struct tx_ring *, struct mbuf *);
142 int			ngbe_get_buf(struct rx_ring *, int);
143 void			ngbe_get_bus_info(struct ngbe_softc *);
144 void			ngbe_get_copper_link_capabilities(struct ngbe_hw *,
145 			    uint32_t *, int *);
146 int			ngbe_get_eeprom_semaphore(struct ngbe_softc *);
147 void			ngbe_get_hw_control(struct ngbe_hw *);
148 void			ngbe_release_hw_control(struct ngbe_softc *);
149 void			ngbe_get_mac_addr(struct ngbe_hw *, uint8_t *);
150 enum ngbe_media_type	ngbe_get_media_type(struct ngbe_hw *);
151 void			ngbe_gphy_dis_eee(struct ngbe_hw *);
152 void			ngbe_gphy_efuse_calibration(struct ngbe_softc *);
153 void			ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *);
154 void			ngbe_handle_phy_event(struct ngbe_softc *);
155 int			ngbe_host_interface_command(struct ngbe_softc *,
156 			    uint32_t *, uint32_t, uint32_t, int);
157 int			ngbe_hpbthresh(struct ngbe_softc *);
158 int			ngbe_lpbthresh(struct ngbe_softc *);
159 int			ngbe_mng_present(struct ngbe_hw *);
160 int			ngbe_mta_vector(struct ngbe_hw *, uint8_t *);
161 int			ngbe_negotiate_fc(struct ngbe_softc *, uint32_t,
162 			    uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
163 int			ngbe_non_sfp_link_config(struct ngbe_softc *);
164 void			ngbe_pbthresh_setup(struct ngbe_softc *);
165 void			ngbe_phy_check_event(struct ngbe_softc *);
166 int			ngbe_phy_check_overtemp(struct ngbe_hw *);
167 void			ngbe_phy_get_advertised_pause(struct ngbe_hw *,
168 			    uint8_t *);
169 void			ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *,
170 			    uint8_t *);
171 int			ngbe_phy_identify(struct ngbe_softc *);
172 int			ngbe_phy_init(struct ngbe_softc *);
173 void			ngbe_phy_led_ctrl(struct ngbe_softc *);
174 int			ngbe_phy_led_oem_chk(struct ngbe_softc *, uint32_t *);
175 int			ngbe_phy_read_reg(struct ngbe_hw *, uint32_t, uint32_t,
176 			    uint16_t *);
177 int			ngbe_phy_write_reg(struct ngbe_hw *, uint32_t, uint32_t,
178 			    uint16_t);
179 int			ngbe_phy_reset(struct ngbe_softc *);
180 int			ngbe_phy_set_pause_advertisement(struct ngbe_hw *,
181 			    uint16_t);
182 int			ngbe_phy_setup(struct ngbe_softc *);
183 int			ngbe_phy_setup_link(struct ngbe_softc *, uint32_t, int);
184 uint16_t		ngbe_read_pci_cfg_word(struct ngbe_softc *, uint32_t);
185 void			ngbe_release_eeprom_semaphore(struct ngbe_hw *);
186 int			ngbe_acquire_swfw_sync(struct ngbe_softc *, uint32_t);
187 void			ngbe_release_swfw_sync(struct ngbe_softc *, uint32_t);
188 void			ngbe_reset(struct ngbe_softc *);
189 int			ngbe_reset_hw(struct ngbe_softc *);
190 void			ngbe_reset_misc(struct ngbe_hw *);
191 int			ngbe_set_fw_drv_ver(struct ngbe_softc *, uint8_t,
192 			    uint8_t, uint8_t, uint8_t);
193 void			ngbe_set_ivar(struct ngbe_softc *, uint16_t, uint16_t,
194 			    int8_t);
195 void			ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *);
196 void			ngbe_set_mta(struct ngbe_hw *, uint8_t *);
197 void			ngbe_set_pci_config_data(struct ngbe_hw *, uint16_t);
198 int			ngbe_set_rar(struct ngbe_softc *, uint32_t, uint8_t *,
199 			    uint64_t, uint32_t);
200 void			ngbe_set_rx_drop_en(struct ngbe_softc *);
201 void			ngbe_set_rxpba(struct ngbe_hw *, int, uint32_t, int);
202 int			ngbe_setup_copper_link(struct ngbe_softc *, uint32_t,
203 			    int);
204 int			ngbe_setup_fc(struct ngbe_softc *);
205 void			ngbe_setup_gpie(struct ngbe_hw *);
206 void			ngbe_setup_isb(struct ngbe_softc *);
207 void			ngbe_setup_psrtype(struct ngbe_hw *);
208 void			ngbe_setup_vlan_hw_support(struct ngbe_softc *);
209 int			ngbe_start_hw(struct ngbe_softc *);
210 int			ngbe_stop_adapter(struct ngbe_softc *);
211 void			ngbe_rx_checksum(uint32_t, struct mbuf *);
212 void			ngbe_rxeof(struct rx_ring *);
213 void			ngbe_rxrefill(void *);
214 int			ngbe_tx_ctx_setup(struct tx_ring *, struct mbuf *,
215 			    uint32_t *, uint32_t *);
216 void			ngbe_txeof(struct tx_ring *);
217 void			ngbe_update_mc_addr_list(struct ngbe_hw *, uint8_t *,
218 			    uint32_t, ngbe_mc_addr_itr, int);
219 int			ngbe_validate_mac_addr(uint8_t *);
220 
221 struct cfdriver ngbe_cd = {
222 	NULL, "ngbe", DV_IFNET
223 };
224 
225 const struct cfattach ngbe_ca = {
226 	sizeof(struct ngbe_softc), ngbe_match, ngbe_attach, ngbe_detach
227 };
228 
229 int
230 ngbe_match(struct device *parent, void *match, void *aux)
231 {
232 	return pci_matchbyid((struct pci_attach_args *)aux, ngbe_devices,
233 	    nitems(ngbe_devices));
234 }
235 
236 void
237 ngbe_attach(struct device *parent, struct device *self, void *aux)
238 {
239 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
240 	struct ngbe_softc *sc = (struct ngbe_softc *)self;
241 	struct ngbe_hw *hw = &sc->hw;
242 	uint32_t eeprom_cksum_devcap, devcap, led_conf;
243 	int error;
244 
245 	sc->osdep.os_sc = sc;
246 	sc->osdep.os_pa = *pa;
247 
248 	/* Setup PCI resources. */
249 	if (ngbe_allocate_pci_resources(sc))
250 		goto fail1;
251 
252 	sc->num_tx_desc = NGBE_DEFAULT_TXD;
253 	sc->num_rx_desc = NGBE_DEFAULT_RXD;
254 
255 	/* Allocate Tx/Rx queues. */
256 	if (ngbe_allocate_queues(sc))
257 		goto fail1;
258 
259 	/* Allocate multicast array memory. */
260 	sc->mta = mallocarray(ETHER_ADDR_LEN, NGBE_SP_RAR_ENTRIES, M_DEVBUF,
261 	    M_NOWAIT);
262 	if (sc->mta == NULL) {
263 		printf(": can not allocate multicast setup array\n");
264 		goto fail1;
265 	}
266 
267 	/* Allocate interrupt status resources. */
268 	if (ngbe_allocate_isb(sc))
269 		goto fail2;
270 
271 	hw->mac.autoneg = 1;
272 	hw->phy.autoneg_advertised = NGBE_LINK_SPEED_AUTONEG;
273 	hw->phy.force_speed = NGBE_LINK_SPEED_UNKNOWN;
274 
275 	/* Initialize the shared code. */
276 	ngbe_init_shared_code(sc);
277 
278 	sc->hw.mac.ops.set_lan_id(&sc->hw);
279 
280 	/* Check if flash load is done after hw power up. */
281 	error = ngbe_check_flash_load(sc, NGBE_SPI_ILDR_STATUS_PERST);
282 	if (error)
283 		goto fail3;
284 	error = ngbe_check_flash_load(sc, NGBE_SPI_ILDR_STATUS_PWRRST);
285 	if (error)
286 		goto fail3;
287 
288 	hw->phy.reset_if_overtemp = 1;
289 	error = sc->hw.mac.ops.reset_hw(sc);
290 	hw->phy.reset_if_overtemp = 0;
291 	if (error) {
292 		printf(": HW reset failed\n");
293 		goto fail3;
294 	}
295 
296 	eeprom_cksum_devcap = devcap = 0;
297 	if (hw->bus.lan_id == 0) {
298 		NGBE_WRITE_REG(hw, NGBE_CALSUM_CAP_STATUS, 0);
299 		NGBE_WRITE_REG(hw, NGBE_EEPROM_VERSION_STORE_REG, 0);
300 	} else
301 		eeprom_cksum_devcap = NGBE_READ_REG(hw, NGBE_CALSUM_CAP_STATUS);
302 
303 	hw->eeprom.ops.init_params(hw);
304 	hw->mac.ops.release_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB);
305 	if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) {
306 		/* Make sure the EEPROM is good */
307 		if (hw->eeprom.ops.eeprom_chksum_cap_st(sc, NGBE_CALSUM_COMMAND,
308 		    &devcap)) {
309 			printf(": eeprom checksum is not valid\n");
310 			goto fail3;
311 		}
312 	}
313 
314 	led_conf = 0;
315 	if (hw->eeprom.ops.phy_led_oem_chk(sc, &led_conf))
316 		sc->led_conf = -1;
317 	else
318 		sc->led_conf = led_conf;
319 
320 	memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
321 
322 	error = ngbe_allocate_msix(sc);
323 	if (error)
324 		goto fail3;
325 
326 	ngbe_setup_interface(sc);
327 
328 	/* Reset the hardware with the new settings */
329 	error = hw->mac.ops.start_hw(sc);
330 	if (error) {
331 		printf(": HW init failed\n");
332 		goto fail3;
333 	}
334 
335 	/* Pick up the PCI bus settings for reporting later */
336 	hw->mac.ops.get_bus_info(sc);
337 
338 	hw->mac.ops.set_fw_drv_ver(sc, 0xff, 0xff, 0xff, 0xff);
339 
340 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
341 	return;
342 
343 fail3:
344 	ngbe_free_isb(sc);
345 fail2:
346 	ngbe_free_transmit_structures(sc);
347 	ngbe_free_receive_structures(sc);
348 	free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES);
349 fail1:
350 	ngbe_free_pci_resources(sc);
351 }
352 
353 int
354 ngbe_detach(struct device *self, int flags)
355 {
356 	struct ngbe_softc *sc = (struct ngbe_softc *)self;
357 	struct ifnet *ifp = &sc->sc_ac.ac_if;
358 
359 	ngbe_stop(sc);
360 	ngbe_release_hw_control(sc);
361 
362 	ether_ifdetach(ifp);
363 	if_detach(ifp);
364 
365 	ngbe_free_pci_resources(sc);
366 
367 	ngbe_free_transmit_structures(sc);
368 	ngbe_free_receive_structures(sc);
369 	ngbe_free_isb(sc);
370 	free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES);
371 
372 	return 0;
373 }
374 
375 static inline uint32_t
376 NGBE_READ_REG_MASK(struct ngbe_hw *hw, uint32_t reg, uint32_t mask)
377 {
378 	uint32_t val;
379 
380 	val = NGBE_READ_REG(hw, reg);
381 	if (val == NGBE_FAILED_READ_REG)
382 		return val;
383 	return val & mask;
384 }
385 
386 static inline void
387 NGBE_WRITE_REG_MASK(struct ngbe_hw *hw, uint32_t reg, uint32_t mask,
388     uint32_t field)
389 {
390 	uint32_t val;
391 
392 	val = NGBE_READ_REG(hw, reg);
393 	if (val == NGBE_FAILED_READ_REG)
394 		return;
395 	val = ((val & ~mask) | (field & mask));
396 	NGBE_WRITE_REG(hw, reg, val);
397 }
398 
399 static inline uint32_t
400 ngbe_misc_isb(struct ngbe_softc *sc, enum ngbe_isb_idx idx)
401 {
402 	return htole32(sc->isb_base[idx]);
403 }
404 
405 void
406 ngbe_init(void *arg)
407 {
408 	struct ngbe_softc *sc = (struct ngbe_softc *)arg;
409 	struct ngbe_hw *hw = &sc->hw;
410 	struct ifnet *ifp = &sc->sc_ac.ac_if;
411 	int i, s;
412 
413 	s = splnet();
414 
415 	ngbe_stop(sc);
416 
417 	ngbe_setup_isb(sc);
418 
419 	/* Setup the receive address. */
420 	hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, NGBE_PSR_MAC_SWC_AD_H_AV);
421 
422 	/* Get the latest mac address, user can use a LAA. */
423 	bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
424 
425 	hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, 1);
426 
427 	ngbe_configure_pb(sc);
428 
429 	/* Program promiscuous mode and multicast filters. */
430 	ngbe_iff(sc);
431 
432 	ngbe_setup_vlan_hw_support(sc);
433 
434 	/* Prepare transmit descriptors and buffers. */
435 	if (ngbe_setup_transmit_structures(sc)) {
436 		printf("%s: could not setup transmit structures\n",
437 		    DEVNAME(sc));
438 		ngbe_stop(sc);
439 		splx(s);
440 		return;
441 	}
442 	if (ngbe_initialize_transmit_unit(sc)) {
443 		ngbe_stop(sc);
444 		splx(s);
445 		return;
446 	}
447 
448 	/* Prepare receive descriptors and buffers. */
449 	if (ngbe_setup_receive_structures(sc)) {
450 		printf("%s: could not setup receive structures\n",
451 		    DEVNAME(sc));
452 		ngbe_stop(sc);
453 		splx(s);
454 		return;
455 	}
456 	if (ngbe_initialize_receive_unit(sc)) {
457 		ngbe_stop(sc);
458 		splx(s);
459 		return;
460 	}
461 
462 	ngbe_get_hw_control(hw);
463 	ngbe_setup_gpie(hw);
464 	ngbe_configure_ivars(sc);
465 
466 	if (ngbe_non_sfp_link_config(sc)) {
467 		ngbe_stop(sc);
468 		splx(s);
469 		return;
470 	}
471 
472 	/* Select GMII */
473 	NGBE_WRITE_REG(hw, NGBE_MAC_TX_CFG,
474 	    (NGBE_READ_REG(hw, NGBE_MAC_TX_CFG) & ~NGBE_MAC_TX_CFG_SPEED_MASK) |
475 	    NGBE_MAC_TX_CFG_SPEED_1G);
476 
477 	/* Clear any pending interrupts, may auto mask */
478 	NGBE_READ_REG(hw, NGBE_PX_IC);
479 	NGBE_READ_REG(hw, NGBE_PX_MISC_IC);
480 	ngbe_enable_intr(sc);
481 
482 	switch (hw->bus.lan_id) {
483 	case 0:
484 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
485 		    NGBE_MIS_PRB_CTL_LAN0_UP, NGBE_MIS_PRB_CTL_LAN0_UP);
486 		break;
487 	case 1:
488 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
489 		    NGBE_MIS_PRB_CTL_LAN1_UP, NGBE_MIS_PRB_CTL_LAN1_UP);
490 		break;
491 	case 2:
492 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
493 		    NGBE_MIS_PRB_CTL_LAN2_UP, NGBE_MIS_PRB_CTL_LAN2_UP);
494 		break;
495 	case 3:
496 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
497 		    NGBE_MIS_PRB_CTL_LAN3_UP, NGBE_MIS_PRB_CTL_LAN3_UP);
498 		break;
499 	}
500 
501 	NGBE_WRITE_REG_MASK(hw, NGBE_CFG_PORT_CTL, NGBE_CFG_PORT_CTL_PFRSTD,
502 	    NGBE_CFG_PORT_CTL_PFRSTD);
503 
504 	/* Now inform the stack we're ready */
505 	ifp->if_flags |= IFF_RUNNING;
506 	for (i = 0; i < sc->sc_nqueues; i++)
507 		ifq_clr_oactive(ifp->if_ifqs[i]);
508 	splx(s);
509 }
510 
511 int
512 ngbe_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
513 {
514 	struct ngbe_softc *sc = ifp->if_softc;
515 	struct ifreq *ifr = (struct ifreq *)data;
516 	int s, error = 0;
517 
518 	s = splnet();
519 
520 	switch (cmd) {
521 	case SIOCSIFADDR:
522 		ifp->if_flags |= IFF_UP;
523 		if (!(ifp->if_flags & IFF_RUNNING))
524 			ngbe_init(sc);
525 		break;
526 	case SIOCSIFFLAGS:
527 		if (ifp->if_flags & IFF_UP) {
528 			if (ifp->if_flags & IFF_RUNNING)
529 				error = ENETRESET;
530 			else
531 				ngbe_init(sc);
532 		} else {
533 			if (ifp->if_flags & IFF_RUNNING)
534 				ngbe_stop(sc);
535 		}
536 		break;
537 	case SIOCSIFMEDIA:
538 	case SIOCGIFMEDIA:
539 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
540 		break;
541 	case SIOCGIFRXR:
542 		error = ngbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
543 		break;
544 	default:
545 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
546 	}
547 
548 	if (error == ENETRESET) {
549 		if (ifp->if_flags & IFF_RUNNING) {
550 			ngbe_disable_intr(sc);
551 			ngbe_iff(sc);
552 			ngbe_enable_intr(sc);
553 		}
554 		error = 0;
555 	}
556 
557 	splx(s);
558 	return error;
559 }
560 
561 int
562 ngbe_media_change(struct ifnet *ifp)
563 {
564 	struct ngbe_softc *sc = ifp->if_softc;
565 	struct ngbe_hw *hw = &sc->hw;
566 	struct ifmedia *ifm = &sc->sc_media;
567 	uint32_t advertised = 0;
568 
569 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
570 		return EINVAL;
571 
572 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
573 	case IFM_AUTO:
574 	case IFM_1000_T:
575 		advertised |= NGBE_LINK_SPEED_AUTONEG;
576 		break;
577 	case IFM_100_TX:
578 		advertised |= NGBE_LINK_SPEED_100_FULL;
579 		break;
580 	case IFM_10_T:
581 		advertised |= NGBE_LINK_SPEED_10_FULL;
582 		break;
583 	default:
584 		return EINVAL;
585 	}
586 
587 	hw->mac.autotry_restart = true;
588 	hw->mac.ops.setup_link(sc, advertised, 1);
589 
590 	return 0;
591 }
592 
593 void
594 ngbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
595 {
596 	struct ngbe_softc *sc = ifp->if_softc;
597 
598 	ifmr->ifm_status = IFM_AVALID;
599 	ifmr->ifm_active = IFM_ETHER;
600 
601 	ngbe_update_link_status(sc);
602 
603 	if (!LINK_STATE_IS_UP(ifp->if_link_state))
604 		return;
605 
606 	ifmr->ifm_status |= IFM_ACTIVE;
607 
608 	switch (sc->link_speed) {
609 	case NGBE_LINK_SPEED_1GB_FULL:
610 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
611 		break;
612 	case NGBE_LINK_SPEED_100_FULL:
613 		ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
614 		break;
615 	case NGBE_LINK_SPEED_10_FULL:
616 		ifmr->ifm_active |= IFM_10_T | IFM_FDX;
617 		break;
618 	}
619 
620 	switch (sc->hw.fc.current_mode) {
621 	case ngbe_fc_tx_pause:
622 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
623 		break;
624 	case ngbe_fc_rx_pause:
625 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
626 		break;
627 	case ngbe_fc_full:
628 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
629 		    IFM_ETH_TXPAUSE;
630 		break;
631 	default:
632 		ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
633 		    IFM_ETH_TXPAUSE);
634 		break;
635 	}
636 }
637 
638 int
639 ngbe_rxfill(struct rx_ring *rxr)
640 {
641 	struct ngbe_softc *sc = rxr->sc;
642 	int i, post = 0;
643 	u_int slots;
644 
645 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
646 	    rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
647 
648 	i = rxr->last_desc_filled;
649 	for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0;
650 	    slots--) {
651 		if (++i == sc->num_rx_desc)
652 			i = 0;
653 
654 		if (ngbe_get_buf(rxr, i) != 0)
655 			break;
656 
657 		rxr->last_desc_filled = i;
658 		post = 1;
659 	}
660 
661 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
662 	    rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
663 
664 	if_rxr_put(&rxr->rx_ring, slots);
665 
666 	return post;
667 }
668 
669 int
670 ngbe_rxrinfo(struct ngbe_softc *sc, struct if_rxrinfo *ifri)
671 {
672 	struct if_rxring_info *ifr;
673 	struct rx_ring *rxr;
674 	int error, i, n = 0;
675 
676 	if ((ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF,
677 	    M_WAITOK | M_CANFAIL | M_ZERO)) == NULL)
678 		return ENOMEM;
679 
680 	for (i = 0; i < sc->sc_nqueues; i++) {
681 		rxr = &sc->rx_rings[i];
682 		ifr[n].ifr_size = MCLBYTES;
683 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i);
684 		ifr[n].ifr_info = rxr->rx_ring;
685 		n++;
686 	}
687 
688 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
689 	free(ifr, M_DEVBUF, sc->sc_nqueues * sizeof(*ifr));
690 
691 	return error;
692 }
693 
694 void
695 ngbe_start(struct ifqueue *ifq)
696 {
697 	struct ifnet *ifp = ifq->ifq_if;
698 	struct ngbe_softc *sc = ifp->if_softc;
699 	struct tx_ring *txr = ifq->ifq_softc;
700 	struct mbuf *m;
701 	unsigned int prod, free, used;
702 	int post = 0;
703 
704 	if (!sc->link_up)
705 		return;
706 
707 	prod = txr->next_avail_desc;
708 	free = txr->next_to_clean;
709 	if (free <= prod)
710 		free += sc->num_tx_desc;
711 	free -= prod;
712 
713 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
714 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
715 
716 	for (;;) {
717 		if (free <= NGBE_MAX_SCATTER + 2) {
718 			ifq_set_oactive(ifq);
719 			break;
720 		}
721 
722 		m = ifq_dequeue(ifq);
723 		if (m == NULL)
724 			break;
725 
726 		used = ngbe_encap(txr, m);
727 		if (used == 0) {
728 			m_freem(m);
729 			continue;
730 		}
731 
732 		free -= used;
733 
734 #if NBPFILTER > 0
735 		if (ifp->if_bpf)
736 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
737 #endif
738 
739 		/* Set timeout in case hardware has problems transmitting */
740 		txr->watchdog_timer = NGBE_TX_TIMEOUT;
741 		ifp->if_timer = NGBE_TX_TIMEOUT;
742 
743 		post = 1;
744 	}
745 
746 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
747 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
748 
749 	if (post)
750 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_TR_WP(txr->me),
751 		    txr->next_avail_desc);
752 }
753 
754 void
755 ngbe_stop(struct ngbe_softc *sc)
756 {
757 	struct ifnet *ifp = &sc->sc_ac.ac_if;
758 	struct ngbe_hw *hw = &sc->hw;
759 	uint32_t rxdctl;
760 	int i, wait_loop = NGBE_MAX_RX_DESC_POLL;
761 
762 	/* Tell the stack that the interface is no longer active. */
763 	ifp->if_flags &= ~IFF_RUNNING;
764 	ifp->if_timer = 0;
765 
766 	ngbe_disable_pcie_master(sc);
767 	/* Disable receives */
768 	hw->mac.ops.disable_rx(hw);
769 
770 	for (i = 0; i < sc->sc_nqueues; i++) {
771 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
772 		    NGBE_PX_RR_CFG_RR_EN, 0);
773 		do {
774 			DELAY(10);
775 			rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
776 		} while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN));
777 		if (!wait_loop) {
778 			printf("%s: Rx queue %d not cleared within "
779 			    "the polling period\n", DEVNAME(sc), i);
780 			return;
781 		}
782 	}
783 
784 	ngbe_disable_intr(sc);
785 
786 	switch (hw->bus.lan_id) {
787 	case 0:
788 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
789 		    NGBE_MIS_PRB_CTL_LAN0_UP, 0);
790 		break;
791 	case 1:
792 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
793 		    NGBE_MIS_PRB_CTL_LAN1_UP, 0);
794 		break;
795 	case 2:
796 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
797 		    NGBE_MIS_PRB_CTL_LAN2_UP, 0);
798 		break;
799 	case 3:
800 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
801 		    NGBE_MIS_PRB_CTL_LAN3_UP, 0);
802 		break;
803 	}
804 
805 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE, 0);
806 	for (i = 0; i < sc->sc_nqueues; i++)
807 		NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), NGBE_PX_TR_CFG_SWFLSH);
808 	NGBE_WRITE_REG_MASK(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0);
809 
810 	ngbe_reset(sc);
811 
812 	hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, NGBE_PSR_MAC_SWC_AD_H_AV);
813 
814 	intr_barrier(sc->tag);
815 	for (i = 0; i < sc->sc_nqueues; i++) {
816 		struct ifqueue *ifq = ifp->if_ifqs[i];
817 		ifq_barrier(ifq);
818 		ifq_clr_oactive(ifq);
819 
820 		if (sc->queues[i].tag != NULL)
821 			intr_barrier(sc->queues[i].tag);
822 		timeout_del(&sc->rx_rings[i].rx_refill);
823 	}
824 
825 	ngbe_free_transmit_structures(sc);
826 	ngbe_free_receive_structures(sc);
827 
828 	ngbe_update_link_status(sc);
829 }
830 
831 void
832 ngbe_update_link_status(struct ngbe_softc *sc)
833 {
834 	struct ifnet *ifp = &sc->sc_ac.ac_if;
835 	struct ngbe_hw *hw = &sc->hw;
836 	uint32_t reg, speed = 0;
837 	int link_state = LINK_STATE_DOWN;
838 
839 	hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up, 0);
840 
841 	ifp->if_baudrate = 0;
842 	if (sc->link_up) {
843 		link_state = LINK_STATE_FULL_DUPLEX;
844 
845 		switch (sc->link_speed) {
846 		case NGBE_LINK_SPEED_UNKNOWN:
847 			ifp->if_baudrate = 0;
848 			break;
849 		case NGBE_LINK_SPEED_1GB_FULL:
850 			ifp->if_baudrate = IF_Gbps(1);
851 			speed = 2;
852 			break;
853 		case NGBE_LINK_SPEED_100_FULL:
854 			ifp->if_baudrate = IF_Mbps(100);
855 			speed = 1;
856 			break;
857 		case NGBE_LINK_SPEED_10_FULL:
858 			ifp->if_baudrate = IF_Mbps(10);
859 			break;
860 		}
861 		NGBE_WRITE_REG_MASK(hw, NGBE_CFG_LAN_SPEED, 0x3, speed);
862 
863 		/* Update any flow control changes */
864 		hw->mac.ops.fc_enable(sc);
865 
866 		ngbe_set_rx_drop_en(sc);
867 
868 		if (sc->link_speed & (NGBE_LINK_SPEED_1GB_FULL |
869 		    NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) {
870 			NGBE_WRITE_REG(hw, NGBE_MAC_TX_CFG,
871 			    (NGBE_READ_REG(hw, NGBE_MAC_TX_CFG) &
872 			    ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE |
873 			    NGBE_MAC_TX_CFG_SPEED_1G);
874 		}
875 
876 		reg = NGBE_READ_REG(hw, NGBE_MAC_RX_CFG);
877 		NGBE_WRITE_REG(hw, NGBE_MAC_RX_CFG, reg);
878 		NGBE_WRITE_REG(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR);
879 		reg = NGBE_READ_REG(hw, NGBE_MAC_WDG_TIMEOUT);
880 		NGBE_WRITE_REG(hw, NGBE_MAC_WDG_TIMEOUT, reg);
881 	}
882 
883 	if (ifp->if_link_state != link_state) {
884 		ifp->if_link_state = link_state;
885 		if_link_state_change(ifp);
886 	}
887 }
888 
889 void
890 ngbe_watchdog(struct ifnet *ifp)
891 {
892 	struct ngbe_softc *sc = ifp->if_softc;
893 	struct tx_ring *txr = sc->tx_rings;
894 	int i, tx_hang = 0;
895 
896 	/*
897 	 * The timer is set to 5 every time ixgbe_start() queues a packet.
898 	 * Anytime all descriptors are clean the timer is set to 0.
899 	 */
900 	for (i = 0; i < sc->sc_nqueues; i++, txr++) {
901 		if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
902 			continue;
903 		else {
904 			tx_hang = 1;
905 			break;
906 		}
907 	}
908 	if (!tx_hang)
909 		return;
910 
911 	printf("%s: watchdog timeout\n", DEVNAME(sc));
912 	ifp->if_oerrors++;
913 
914 	ifp->if_flags &= ~IFF_RUNNING;
915 	ngbe_init(sc);
916 }
917 
918 int
919 ngbe_allocate_pci_resources(struct ngbe_softc *sc)
920 {
921 	struct ngbe_osdep *os = &sc->osdep;
922 	struct pci_attach_args *pa = &os->os_pa;
923 	pcireg_t memtype;
924 
925 	memtype = PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT;
926 	if (pci_mapreg_map(pa, NGBE_PCIREG, memtype, 0, &os->os_memt,
927 	    &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
928 		printf(": unable to map registers\n");
929 		return ENXIO;
930 	}
931 	sc->hw.back = os;
932 
933 	if (ngbe_setup_msix(sc))
934 		return EINVAL;
935 
936 	return 0;
937 }
938 
939 void
940 ngbe_free_pci_resources(struct ngbe_softc *sc)
941 {
942 	struct ngbe_osdep *os = &sc->osdep;
943 	struct pci_attach_args *pa = &os->os_pa;
944 
945 	if (sc->tag)
946 		pci_intr_disestablish(pa->pa_pc, sc->tag);
947 	sc->tag = NULL;
948 	if (os->os_membase)
949 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
950 	os->os_membase = 0;
951 }
952 
953 int
954 ngbe_allocate_msix(struct ngbe_softc *sc)
955 {
956 	struct ngbe_osdep *os = &sc->osdep;
957 	struct pci_attach_args *pa = &os->os_pa;
958 	struct ngbe_queue *nq;
959 	pci_intr_handle_t ih;
960 	int i, error = 0;
961 
962 	for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++) {
963 		if (pci_intr_map_msix(pa, i, &ih)) {
964 			printf(": unable to map msi-x vector %d", i);
965 			error = ENXIO;
966 			goto fail;
967 		}
968 
969 		nq->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
970 		    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
971 		    ngbe_intr_queue, nq, nq->name);
972 		if (nq->tag == NULL) {
973 			printf(": unable to establish interrupt %d\n", i);
974 			error = ENXIO;
975 			goto fail;
976 		}
977 
978 		nq->msix = i;
979 	}
980 
981 	/* Now the link status/control last MSI-X vector */
982 	if (pci_intr_map_msix(pa, i, &ih)) {
983 		printf(": unable to map link vector\n");
984 		error = ENXIO;
985 		goto fail;
986 	}
987 
988 	sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
989 		ngbe_intr_link, sc, sc->sc_dev.dv_xname);
990 	if (sc->tag == NULL) {
991 		printf(": unable to establish link interrupt\n");
992 		error = ENXIO;
993 		goto fail;
994 	}
995 
996 	sc->linkvec = i;
997 	printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih), i,
998 	    (i > 1) ? "s" : "");
999 
1000 	return 0;
1001 fail:
1002 	for (nq = sc->queues; i > 0; i--, nq++) {
1003 		if (nq->tag == NULL)
1004 			continue;
1005 		pci_intr_disestablish(pa->pa_pc, nq->tag);
1006 		nq->tag = NULL;
1007 	}
1008 
1009 	return error;
1010 }
1011 
1012 void
1013 ngbe_setup_interface(struct ngbe_softc *sc)
1014 {
1015 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1016 	int i;
1017 
1018 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1019 	ifp->if_softc = sc;
1020 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1021 	ifp->if_xflags = IFXF_MPSAFE;
1022 	ifp->if_ioctl = ngbe_ioctl;
1023 	ifp->if_qstart = ngbe_start;
1024 	ifp->if_watchdog = ngbe_watchdog;
1025 	ifp->if_hardmtu = NGBE_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN -
1026 	    ETHER_CRC_LEN;
1027 	ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
1028 
1029 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1030 
1031 #if NVLAN > 0
1032 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1033 #endif
1034 
1035 	/* Initialize ifmedia structures. */
1036 	ifmedia_init(&sc->sc_media, IFM_IMASK, ngbe_media_change,
1037 	    ngbe_media_status);
1038 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1039 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1040 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1041 
1042 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1043 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1044 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
1045 
1046 	if_attach(ifp);
1047 	ether_ifattach(ifp);
1048 
1049 	if_attach_queues(ifp, sc->sc_nqueues);
1050 	if_attach_iqueues(ifp, sc->sc_nqueues);
1051 	for (i = 0; i < sc->sc_nqueues; i++) {
1052 		struct ifqueue *ifq = ifp->if_ifqs[i];
1053 		struct ifiqueue *ifiq = ifp->if_iqs[i];
1054 		struct tx_ring *txr = &sc->tx_rings[i];
1055 		struct rx_ring *rxr = &sc->rx_rings[i];
1056 
1057 		ifq->ifq_softc = txr;
1058 		txr->ifq = ifq;
1059 
1060 		ifiq->ifiq_softc = rxr;
1061 		rxr->ifiq = ifiq;
1062 	}
1063 }
1064 
1065 int
1066 ngbe_setup_msix(struct ngbe_softc *sc)
1067 {
1068 	struct ngbe_osdep *os = &sc->osdep;
1069 	struct pci_attach_args *pa = &os->os_pa;
1070 	int nmsix;
1071 
1072 	nmsix = pci_intr_msix_count(pa);
1073 	if (nmsix <= 1) {
1074 		printf(": not enough msi-x vectors\n");
1075 		return EINVAL;
1076 	}
1077 
1078 	/* Give one vector to events. */
1079 	nmsix--;
1080 
1081 	sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, NGBE_MAX_VECTORS,
1082 	    INTRMAP_POWEROF2);
1083 	sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
1084 
1085 	return 0;
1086 }
1087 
1088 int
1089 ngbe_dma_malloc(struct ngbe_softc *sc, bus_size_t size,
1090     struct ngbe_dma_alloc *dma)
1091 {
1092 	struct ngbe_osdep *os = &sc->osdep;
1093 
1094 	dma->dma_tag = os->os_pa.pa_dmat;
1095 
1096 	if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1097 	    &dma->dma_map))
1098 		return 1;
1099 	if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1100 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT))
1101 		goto destroy;
1102 	if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1103 	    &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT))
1104 		goto free;
1105 	if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
1106 	    NULL, BUS_DMA_NOWAIT))
1107 		goto unmap;
1108 
1109 	dma->dma_size = size;
1110 
1111 	return 0;
1112 unmap:
1113 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1114 free:
1115 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1116 destroy:
1117 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1118 	dma->dma_map = NULL;
1119 	dma->dma_tag = NULL;
1120 	return 1;
1121 }
1122 
1123 void
1124 ngbe_dma_free(struct ngbe_softc *sc, struct ngbe_dma_alloc *dma)
1125 {
1126 	if (dma->dma_tag == NULL)
1127 		return;
1128 
1129 	if (dma->dma_map != NULL) {
1130 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1131 		    dma->dma_map->dm_mapsize,
1132 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1133 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1134 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1135 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1136 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1137 		dma->dma_map = NULL;
1138 	}
1139 }
1140 
1141 int
1142 ngbe_allocate_isb(struct ngbe_softc *sc)
1143 {
1144 	int isize;
1145 
1146 	isize = sizeof(uint32_t) * NGBE_ISB_MAX;
1147 	if (ngbe_dma_malloc(sc, isize, &sc->isbdma)) {
1148 		printf("%s: unable to allocate interrupt status resources\n",
1149 		    DEVNAME(sc));
1150 		return ENOMEM;
1151 	}
1152 	sc->isb_base = (uint32_t *)sc->isbdma.dma_vaddr;
1153 	bzero((void *)sc->isb_base, isize);
1154 
1155 	return 0;
1156 }
1157 
1158 void
1159 ngbe_free_isb(struct ngbe_softc *sc)
1160 {
1161 	ngbe_dma_free(sc, &sc->isbdma);
1162 }
1163 
1164 int
1165 ngbe_allocate_queues(struct ngbe_softc *sc)
1166 {
1167 	struct ngbe_queue *nq;
1168 	struct tx_ring *txr;
1169 	struct rx_ring *rxr;
1170 	int i, rsize, rxconf, tsize, txconf;
1171 
1172 	/* Allocate the top level queue structs. */
1173 	sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct ngbe_queue),
1174 	    M_DEVBUF, M_NOWAIT | M_ZERO);
1175 	if (sc->queues == NULL) {
1176 		printf("%s: unable to allocate queue\n", DEVNAME(sc));
1177 		goto fail;
1178 	}
1179 
1180 	/* Allocate the Tx ring. */
1181 	sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring),
1182 	    M_DEVBUF, M_NOWAIT | M_ZERO);
1183 	if (sc->tx_rings == NULL) {
1184 		printf("%s: unable to allocate Tx ring\n", DEVNAME(sc));
1185 		goto fail;
1186 	}
1187 
1188 	/* Allocate the Rx ring. */
1189 	sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring),
1190 	    M_DEVBUF, M_NOWAIT | M_ZERO);
1191 	if (sc->rx_rings == NULL) {
1192 		printf("%s: unable to allocate Rx ring\n", DEVNAME(sc));
1193 		goto rx_fail;
1194 	}
1195 
1196 	txconf = rxconf = 0;
1197 
1198 	/* Set up the Tx queues. */
1199 	tsize = roundup2(sc->num_tx_desc * sizeof(union ngbe_tx_desc),
1200 	    PAGE_SIZE);
1201 	for (i = 0; i < sc->sc_nqueues; i++, txconf++) {
1202 		txr = &sc->tx_rings[i];
1203 		txr->sc = sc;
1204 		txr->me = i;
1205 
1206 		if (ngbe_dma_malloc(sc, tsize, &txr->txdma)) {
1207 			printf("%s: unable to allocate Tx descriptor\n",
1208 			    DEVNAME(sc));
1209 			goto err_tx_desc;
1210 		}
1211 		txr->tx_base = (union ngbe_tx_desc *)txr->txdma.dma_vaddr;
1212 		bzero((void *)txr->tx_base, tsize);
1213 	}
1214 
1215 	/* Set up the Rx queues. */
1216 	rsize = roundup2(sc->num_rx_desc * sizeof(union ngbe_rx_desc),
1217 	    PAGE_SIZE);
1218 	for (i = 0; i < sc->sc_nqueues; i++, rxconf++) {
1219 		rxr = &sc->rx_rings[i];
1220 		rxr->sc = sc;
1221 		rxr->me = i;
1222 		timeout_set(&rxr->rx_refill, ngbe_rxrefill, rxr);
1223 
1224 		if (ngbe_dma_malloc(sc, rsize, &rxr->rxdma)) {
1225 			printf("%s: unable to allocate Rx descriptor\n",
1226 			    DEVNAME(sc));
1227 			goto err_rx_desc;
1228 		}
1229 		rxr->rx_base = (union ngbe_rx_desc *)rxr->rxdma.dma_vaddr;
1230 		bzero((void *)rxr->rx_base, rsize);
1231 	}
1232 
1233 	/* Set up the queue holding structs. */
1234 	for (i = 0; i < sc->sc_nqueues; i++) {
1235 		nq = &sc->queues[i];
1236 		nq->sc = sc;
1237 		nq->txr = &sc->tx_rings[i];
1238 		nq->rxr = &sc->rx_rings[i];
1239 		snprintf(nq->name, sizeof(nq->name), "%s:%d", DEVNAME(sc), i);
1240 	}
1241 
1242 	return 0;
1243 
1244 err_rx_desc:
1245 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1246 		ngbe_dma_free(sc, &rxr->rxdma);
1247 err_tx_desc:
1248 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
1249 		ngbe_dma_free(sc, &txr->txdma);
1250 	free(sc->rx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct rx_ring));
1251 	sc->rx_rings = NULL;
1252 rx_fail:
1253 	free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring));
1254 	sc->tx_rings = NULL;
1255 fail:
1256 	return ENOMEM;
1257 }
1258 
1259 void
1260 ngbe_free_receive_structures(struct ngbe_softc *sc)
1261 {
1262 	struct rx_ring *rxr;
1263 	int i;
1264 
1265 	for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
1266 		if_rxr_init(&rxr->rx_ring, 0, 0);
1267 
1268 	for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
1269 		ngbe_free_receive_buffers(rxr);
1270 }
1271 
1272 void
1273 ngbe_free_receive_buffers(struct rx_ring *rxr)
1274 {
1275 	struct ngbe_softc *sc;
1276 	struct ngbe_rx_buf *rxbuf;
1277 	int i;
1278 
1279 	sc = rxr->sc;
1280 	if (rxr->rx_buffers != NULL) {
1281 		for (i = 0; i < sc->num_rx_desc; i++) {
1282 			rxbuf = &rxr->rx_buffers[i];
1283 			if (rxbuf->buf != NULL) {
1284 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
1285 				    0, rxbuf->map->dm_mapsize,
1286 				    BUS_DMASYNC_POSTREAD);
1287 				bus_dmamap_unload(rxr->rxdma.dma_tag,
1288 				    rxbuf->map);
1289 				m_freem(rxbuf->buf);
1290 				rxbuf->buf = NULL;
1291 			}
1292 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
1293 			rxbuf->map = NULL;
1294 		}
1295 		free(rxr->rx_buffers, M_DEVBUF,
1296 		    sc->num_rx_desc * sizeof(struct ngbe_rx_buf));
1297 		rxr->rx_buffers = NULL;
1298 	}
1299 }
1300 
1301 void
1302 ngbe_free_transmit_structures(struct ngbe_softc *sc)
1303 {
1304 	struct tx_ring *txr = sc->tx_rings;
1305 	int i;
1306 
1307 	for (i = 0; i < sc->sc_nqueues; i++, txr++)
1308 		ngbe_free_transmit_buffers(txr);
1309 }
1310 
1311 void
1312 ngbe_free_transmit_buffers(struct tx_ring *txr)
1313 {
1314 	struct ngbe_softc *sc = txr->sc;
1315 	struct ngbe_tx_buf *tx_buffer;
1316 	int i;
1317 
1318 	if (txr->tx_buffers == NULL)
1319 		return;
1320 
1321 	tx_buffer = txr->tx_buffers;
1322 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1323 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
1324 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
1325 			    0, tx_buffer->map->dm_mapsize,
1326 			    BUS_DMASYNC_POSTWRITE);
1327 			bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
1328 		}
1329 		if (tx_buffer->m_head != NULL) {
1330 			m_freem(tx_buffer->m_head);
1331 			tx_buffer->m_head = NULL;
1332 		}
1333 		if (tx_buffer->map != NULL) {
1334 			bus_dmamap_destroy(txr->txdma.dma_tag, tx_buffer->map);
1335 			tx_buffer->map = NULL;
1336 		}
1337 	}
1338 
1339 	if (txr->tx_buffers != NULL)
1340 		free(txr->tx_buffers, M_DEVBUF,
1341 		    sc->num_tx_desc * sizeof(struct ngbe_tx_buf));
1342 	txr->tx_buffers = NULL;
1343 	txr->txtag = NULL;
1344 }
1345 
1346 int
1347 ngbe_allocate_receive_buffers(struct rx_ring *rxr)
1348 {
1349 	struct ngbe_softc *sc = rxr->sc;
1350 	struct ngbe_rx_buf *rxbuf;
1351 	int i, error;
1352 
1353 	rxr->rx_buffers = mallocarray(sc->num_rx_desc,
1354 	    sizeof(struct ngbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
1355 	if (rxr->rx_buffers == NULL) {
1356 		printf("%s: unable to allocate rx_buffer memory\n",
1357 		    DEVNAME(sc));
1358 		error = ENOMEM;
1359 		goto fail;
1360 	}
1361 
1362 	rxbuf = rxr->rx_buffers;
1363 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
1364 		error = bus_dmamap_create(rxr->rxdma.dma_tag,
1365 		    NGBE_MAX_JUMBO_FRAME_SIZE, 1, NGBE_MAX_JUMBO_FRAME_SIZE, 0,
1366 		    BUS_DMA_NOWAIT, &rxbuf->map);
1367 		if (error) {
1368 			printf("%s: unable to create RX DMA map\n",
1369 			    DEVNAME(sc));
1370 			goto fail;
1371 		}
1372 	}
1373 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
1374 	    rxr->rxdma.dma_map->dm_mapsize,
1375 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1376 
1377 	return 0;
1378 fail:
1379 	return error;
1380 }
1381 
1382 int
1383 ngbe_allocate_transmit_buffers(struct tx_ring *txr)
1384 {
1385 	struct ngbe_softc *sc = txr->sc;
1386 	struct ngbe_tx_buf *txbuf;
1387 	int error, i;
1388 
1389 	txr->tx_buffers = mallocarray(sc->num_tx_desc,
1390 	    sizeof(struct ngbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
1391 	if (txr->tx_buffers == NULL) {
1392 		printf("%s: unable to allocate tx_buffer memory\n",
1393 		    DEVNAME(sc));
1394 		error = ENOMEM;
1395 		goto fail;
1396 	}
1397 	txr->txtag = txr->txdma.dma_tag;
1398 
1399 	/* Create the descriptor buffer dma maps. */
1400 	for (i = 0; i < sc->num_tx_desc; i++) {
1401 		txbuf = &txr->tx_buffers[i];
1402 		error = bus_dmamap_create(txr->txdma.dma_tag, NGBE_TSO_SIZE,
1403 		    NGBE_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT,
1404 		    &txbuf->map);
1405 		if (error != 0) {
1406 			printf("%s: unable to create TX DMA map\n",
1407 			    DEVNAME(sc));
1408 			goto fail;
1409 		}
1410 	}
1411 
1412 	return 0;
1413 fail:
1414 	return error;
1415 }
1416 
1417 int
1418 ngbe_setup_receive_ring(struct rx_ring *rxr)
1419 {
1420 	struct ngbe_softc *sc = rxr->sc;
1421 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1422 	int rsize;
1423 
1424 	rsize = roundup2(sc->num_rx_desc * sizeof(union ngbe_rx_desc),
1425 	    PAGE_SIZE);
1426 
1427 	/* Clear the ring contents. */
1428 	bzero((void *)rxr->rx_base, rsize);
1429 
1430 	if (ngbe_allocate_receive_buffers(rxr))
1431 		return ENOMEM;
1432 
1433 	/* Setup our descriptor indices. */
1434 	rxr->next_to_check = 0;
1435 	rxr->last_desc_filled = sc->num_rx_desc - 1;
1436 
1437 	if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
1438 	    sc->num_rx_desc - 1);
1439 
1440 	ngbe_rxfill(rxr);
1441 	if (if_rxr_inuse(&rxr->rx_ring) == 0) {
1442 		printf("%s: unable to fill any rx descriptors\n", DEVNAME(sc));
1443 		return ENOBUFS;
1444 	}
1445 
1446 	return 0;
1447 }
1448 
1449 int
1450 ngbe_setup_transmit_ring(struct tx_ring *txr)
1451 {
1452 	struct ngbe_softc *sc = txr->sc;
1453 
1454 	/* Now allocate transmit buffers for the ring. */
1455 	if (ngbe_allocate_transmit_buffers(txr))
1456 		return ENOMEM;
1457 
1458 	/* Clear the old ring contents */
1459 	bzero((void *)txr->tx_base,
1460 	    (sizeof(union ngbe_tx_desc)) * sc->num_tx_desc);
1461 
1462 	/* Reset indices. */
1463 	txr->next_avail_desc = 0;
1464 	txr->next_to_clean = 0;
1465 
1466 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1467 	    txr->txdma.dma_map->dm_mapsize,
1468 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1469 
1470 	return 0;
1471 }
1472 
1473 int
1474 ngbe_setup_receive_structures(struct ngbe_softc *sc)
1475 {
1476 	struct rx_ring *rxr = sc->rx_rings;
1477 	int i;
1478 
1479 	for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
1480 		if (ngbe_setup_receive_ring(rxr))
1481 			goto fail;
1482 	}
1483 
1484 	return 0;
1485 fail:
1486 	ngbe_free_receive_structures(sc);
1487 	return ENOBUFS;
1488 }
1489 
1490 int
1491 ngbe_setup_transmit_structures(struct ngbe_softc *sc)
1492 {
1493 	struct tx_ring *txr = sc->tx_rings;
1494 	int i;
1495 
1496 	for (i = 0; i < sc->sc_nqueues; i++, txr++) {
1497 		if (ngbe_setup_transmit_ring(txr))
1498 			goto fail;
1499 	}
1500 
1501 	return 0;
1502 fail:
1503 	ngbe_free_transmit_structures(sc);
1504 	return ENOBUFS;
1505 }
1506 
1507 uint8_t *
1508 ngbe_addr_list_itr(struct ngbe_hw *hw, uint8_t **mc_addr_ptr, uint32_t *vmdq)
1509 {
1510 	uint8_t *addr = *mc_addr_ptr;
1511 	uint8_t *newptr;
1512 	*vmdq = 0;
1513 
1514 	newptr = addr + ETHER_ADDR_LEN;
1515 	*mc_addr_ptr = newptr;
1516 	return addr;
1517 }
1518 
1519 void
1520 ngbe_iff(struct ngbe_softc *sc)
1521 {
1522 	struct ngbe_hw *hw = &sc->hw;
1523 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1524 	struct arpcom *ac = &sc->sc_ac;
1525 	struct ether_multi *enm;
1526 	struct ether_multistep step;
1527 	uint32_t fctrl, vlanctrl;
1528 	uint8_t *mta, *update_ptr;
1529 	int mcnt = 0;
1530 
1531 	mta = sc->mta;
1532 	bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES);
1533 
1534 	fctrl = NGBE_READ_REG_MASK(hw, NGBE_PSR_CTL,
1535 	    ~(NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE));
1536 	vlanctrl = NGBE_READ_REG_MASK(hw, NGBE_PSR_VLAN_CTL,
1537 	    ~(NGBE_PSR_VLAN_CTL_VFE | NGBE_PSR_VLAN_CTL_CFIEN));
1538 	ifp->if_flags &= ~IFF_ALLMULTI;
1539 
1540 	/* Set all bits that we expect to always be set */
1541 	fctrl |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_MFE;
1542 	vlanctrl |= NGBE_PSR_VLAN_CTL_VFE;
1543 
1544 	hw->addr_ctrl.user_set_promisc = 0;
1545 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1546 	    ac->ac_multicnt > NGBE_SP_RAR_ENTRIES) {
1547 		ifp->if_flags |= IFF_ALLMULTI;
1548 		fctrl |= NGBE_PSR_CTL_MPE;
1549 		if (ifp->if_flags & IFF_PROMISC) {
1550 			fctrl |= NGBE_PSR_CTL_UPE;
1551 			vlanctrl &= ~NGBE_PSR_VLAN_CTL_VFE;
1552 		}
1553 	} else {
1554 		ETHER_FIRST_MULTI(step, ac, enm);
1555 		while (enm != NULL) {
1556 			bcopy(enm->enm_addrlo, &mta[mcnt * ETHER_ADDR_LEN],
1557 			    ETHER_ADDR_LEN);
1558 			mcnt++;
1559 
1560 			ETHER_NEXT_MULTI(step, enm);
1561 		}
1562 
1563 		update_ptr = mta;
1564 		hw->mac.ops.update_mc_addr_list(hw, update_ptr, mcnt,
1565 		    ngbe_addr_list_itr, 1);
1566 	}
1567 
1568 	NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_CTL, vlanctrl);
1569 	NGBE_WRITE_REG(hw, NGBE_PSR_CTL, fctrl);
1570 }
1571 
1572 int
1573 ngbe_initialize_receive_unit(struct ngbe_softc *sc)
1574 {
1575 	struct ngbe_hw *hw = &sc->hw;
1576 	struct rx_ring *rxr = sc->rx_rings;
1577 	uint32_t bufsz, mhadd, rxctrl, rxdctl, srrctl;
1578 	int i, wait_loop = NGBE_MAX_RX_DESC_POLL;
1579 	int error = 0;
1580 
1581 	/* Disable receives while setting up the descriptors */
1582 	hw->mac.ops.disable_rx(hw);
1583 
1584 	ngbe_setup_psrtype(hw);
1585 
1586 	/* Enable hw crc stripping */
1587 	NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, NGBE_RSEC_CTL_CRC_STRIP,
1588 	    NGBE_RSEC_CTL_CRC_STRIP);
1589 
1590 	if (sc->sc_nqueues > 1) {
1591 		NGBE_WRITE_REG_MASK(hw, NGBE_PSR_CTL, NGBE_PSR_CTL_PCSD,
1592 		    NGBE_PSR_CTL_PCSD);
1593 		ngbe_initialize_rss_mapping(sc);
1594 	}
1595 
1596 	mhadd = NGBE_READ_REG(hw, NGBE_PSR_MAX_SZ);
1597 	if (mhadd != NGBE_MAX_JUMBO_FRAME_SIZE)
1598 		NGBE_WRITE_REG(hw, NGBE_PSR_MAX_SZ, NGBE_MAX_JUMBO_FRAME_SIZE);
1599 
1600 	bufsz = MCLBYTES >> NGBE_PX_RR_CFG_BSIZEPKT_SHIFT;
1601 
1602 	for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
1603 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
1604 
1605 		/* Disable queue to avoid issues while updating state */
1606 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
1607 		    NGBE_PX_RR_CFG_RR_EN, 0);
1608 
1609 		/* Hardware may take up to 100us to actually disable Rx queue */
1610 		do {
1611 			DELAY(10);
1612 			rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
1613 		} while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN));
1614 		if (!wait_loop) {
1615 			printf("%s: Rx queue %d not cleared within "
1616 			    "the polling period\n", DEVNAME(sc), i);
1617 			error = ETIMEDOUT;
1618 			goto out;
1619 		}
1620 
1621 		NGBE_WRITE_REG(hw, NGBE_PX_RR_BAL(i),
1622 		    (rdba & 0x00000000ffffffffULL));
1623 		NGBE_WRITE_REG(hw, NGBE_PX_RR_BAH(i), (rdba >> 32));
1624 
1625 		rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
1626 		rxdctl |=
1627 		    (sc->num_rx_desc / 128) << NGBE_PX_RR_CFG_RR_SIZE_SHIFT;
1628 		rxdctl |= 0x1 << NGBE_PX_RR_CFG_RR_THER_SHIFT;
1629 		NGBE_WRITE_REG(hw, NGBE_PX_RR_CFG(i), rxdctl);
1630 
1631 		/* Reset head and tail pointers */
1632 		NGBE_WRITE_REG(hw, NGBE_PX_RR_RP(i), 0);
1633 		NGBE_WRITE_REG(hw, NGBE_PX_RR_WP(i), 0);
1634 
1635 		/* Set up the SRRCTL register */
1636 		srrctl = NGBE_READ_REG_MASK(hw, NGBE_PX_RR_CFG(i),
1637 		    ~(NGBE_PX_RR_CFG_RR_HDR_SZ | NGBE_PX_RR_CFG_RR_BUF_SZ |
1638 		    NGBE_PX_RR_CFG_SPLIT_MODE));
1639 		srrctl |= bufsz;
1640 		NGBE_WRITE_REG(hw, NGBE_PX_RR_CFG(i), srrctl);
1641 
1642 		/* Enable receive descriptor ring */
1643 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
1644 		    NGBE_PX_RR_CFG_RR_EN, NGBE_PX_RR_CFG_RR_EN);
1645 
1646 		do {
1647 			msec_delay(1);
1648 			rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
1649 		} while (--wait_loop && !(rxdctl & NGBE_PX_RR_CFG_RR_EN));
1650 		if (!wait_loop) {
1651 			printf("%s: Rx queue %d not set within "
1652 			    "the polling period\n", DEVNAME(sc), i);
1653 			error = ETIMEDOUT;
1654 			goto out;
1655 		}
1656 		NGBE_WRITE_REG(hw, NGBE_PX_RR_WP(i), rxr->last_desc_filled);
1657 	}
1658 
1659 	/* Enable all receives */
1660 	rxctrl = NGBE_READ_REG(hw, NGBE_RDB_PB_CTL);
1661 	rxctrl |= NGBE_RDB_PB_CTL_PBEN;
1662 	hw->mac.ops.enable_rx_dma(hw, rxctrl);
1663 out:
1664 	return error;
1665 }
1666 
1667 void
1668 ngbe_initialize_rss_mapping(struct ngbe_softc *sc)
1669 {
1670 	struct ngbe_hw *hw = &sc->hw;
1671 	uint32_t reta = 0, rss_field, rss_key[10];
1672 	int i, j, queue_id;
1673 
1674 	/* Set up the redirection table */
1675 	for (i = 0, j = 0; i < 128; i++, j++) {
1676 		if (j == sc->sc_nqueues)
1677 			j = 0;
1678 		queue_id = j;
1679 		/*
1680 		 * The low 8 bits are for hash value (n+0);
1681 		 * The next 8 bits are for hash value (n+1), etc.
1682 		 */
1683 		reta = reta >> 8;
1684 		reta = reta | (((uint32_t)queue_id) << 24);
1685 		if ((i & 3) == 3) {
1686 			NGBE_WRITE_REG(hw, NGBE_RDB_RSSTBL(i >> 2), reta);
1687 			reta = 0;
1688 		}
1689 	}
1690 
1691 	/* Set up random bits */
1692 	stoeplitz_to_key(&rss_key, sizeof(rss_key));
1693 
1694 	/* Fill out hash function seeds */
1695 	for (i = 0; i < 10; i++)
1696 		NGBE_WRITE_REG(hw, NGBE_RDB_RSSRK(i), rss_key[i]);
1697 
1698 	/* Perform hash on these packet types */
1699 	rss_field = NGBE_RDB_RA_CTL_RSS_EN | NGBE_RDB_RA_CTL_RSS_IPV4 |
1700 	    NGBE_RDB_RA_CTL_RSS_IPV4_TCP | NGBE_RDB_RA_CTL_RSS_IPV6 |
1701 	    NGBE_RDB_RA_CTL_RSS_IPV6_TCP;
1702 
1703 	NGBE_WRITE_REG(hw, NGBE_RDB_RA_CTL, rss_field);
1704 }
1705 
1706 int
1707 ngbe_initialize_transmit_unit(struct ngbe_softc *sc)
1708 {
1709 	struct ngbe_hw *hw = &sc->hw;
1710 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1711 	struct tx_ring *txr;
1712 	uint64_t tdba;
1713 	uint32_t txdctl;
1714 	int i, wait_loop = NGBE_MAX_RX_DESC_POLL;;
1715 	int error = 0;
1716 
1717 	/* TDM_CTL.TE must be before Tx queues are enabled */
1718 	NGBE_WRITE_REG_MASK(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE,
1719 	    NGBE_TDM_CTL_TE);
1720 
1721 	/* Setup the base and length of the Tx descriptor ring. */
1722 	for (i = 0; i < sc->sc_nqueues; i++) {
1723 		txr = &sc->tx_rings[i];
1724 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
1725 
1726 		/* Disable queue to avoid issues while updating state */
1727 		NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), NGBE_PX_TR_CFG_SWFLSH);
1728 		NGBE_WRITE_FLUSH(hw);
1729 
1730 		NGBE_WRITE_REG(hw, NGBE_PX_TR_BAL(i),
1731 		    (tdba & 0x00000000ffffffffULL));
1732 		NGBE_WRITE_REG(hw, NGBE_PX_TR_BAH(i), (tdba >> 32));
1733 
1734 		/* Reset head and tail pointers */
1735 		NGBE_WRITE_REG(hw, NGBE_PX_TR_RP(i), 0);
1736 		NGBE_WRITE_REG(hw, NGBE_PX_TR_WP(i), 0);
1737 
1738 		txr->watchdog_timer = 0;
1739 
1740 		txdctl = NGBE_PX_TR_CFG_ENABLE;
1741 		txdctl |= 4 << NGBE_PX_TR_CFG_TR_SIZE_SHIFT;
1742 		txdctl |= 0x20 << NGBE_PX_TR_CFG_WTHRESH_SHIFT;
1743 
1744 		/* Enable queue */
1745 		NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), txdctl);
1746 
1747 		/* Poll to verify queue is enabled */
1748 		do {
1749 			msec_delay(1);
1750 			txdctl = NGBE_READ_REG(hw, NGBE_PX_TR_CFG(i));
1751 		} while (--wait_loop && !(txdctl & NGBE_PX_TR_CFG_ENABLE));
1752 		if (!wait_loop) {
1753 			printf("%s: Tx queue %d not set within "
1754 			    "the polling period\n", DEVNAME(sc), i);
1755 			error = ETIMEDOUT;
1756 			goto out;
1757 		}
1758 	}
1759 
1760 	ifp->if_timer = 0;
1761 
1762 	NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_BUF_AE, 0x3ff, 0x10);
1763 	NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_CTL, 0x2, 0);
1764 	NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_CTL, 0x1, 1);
1765 
1766 	/* Enable mac transmitter */
1767 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE,
1768 	    NGBE_MAC_TX_CFG_TE);
1769 out:
1770 	return error;
1771 }
1772 
1773 int
1774 ngbe_intr_link(void *arg)
1775 {
1776 	struct ngbe_softc *sc = (struct ngbe_softc *)arg;
1777 	uint32_t eicr;
1778 
1779 	eicr = ngbe_misc_isb(sc, NGBE_ISB_MISC);
1780 	if (eicr & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) {
1781 		KERNEL_LOCK();
1782 		ngbe_handle_phy_event(sc);
1783 		ngbe_update_link_status(sc);
1784 		KERNEL_UNLOCK();
1785 	}
1786 	ngbe_enable_queue(sc, sc->linkvec);
1787 	return 1;
1788 }
1789 
1790 int
1791 ngbe_intr_queue(void *arg)
1792 {
1793 	struct ngbe_queue *nq = arg;
1794 	struct ngbe_softc *sc = nq->sc;
1795 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1796 	struct rx_ring *rxr = nq->rxr;
1797 	struct tx_ring *txr = nq->txr;
1798 
1799 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1800 		ngbe_rxeof(rxr);
1801 		ngbe_txeof(txr);
1802 		ngbe_rxrefill(rxr);
1803 	}
1804 
1805 	ngbe_enable_queue(sc, nq->msix);
1806 
1807 	return 1;
1808 }
1809 
1810 void
1811 ngbe_init_eeprom_params(struct ngbe_hw *hw)
1812 {
1813 	struct ngbe_eeprom_info *eeprom = &hw->eeprom;
1814 
1815 	if (eeprom->type == ngbe_eeprom_uninitialized) {
1816 		eeprom->type = ngbe_eeprom_none;
1817 
1818 		if (!(NGBE_READ_REG(hw, NGBE_SPI_STATUS) &
1819 		    NGBE_SPI_STATUS_FLASH_BYPASS))
1820 			eeprom->type = ngbe_flash;
1821 	}
1822 
1823 	eeprom->sw_region_offset = 0x80;
1824 }
1825 
1826 int
1827 ngbe_init_hw(struct ngbe_softc *sc)
1828 {
1829 	struct ngbe_hw *hw = &sc->hw;
1830 	int status;
1831 
1832 	/* Reset the hardware */
1833 	status = hw->mac.ops.reset_hw(sc);
1834 
1835 	if (!status)
1836 		status = hw->mac.ops.start_hw(sc);
1837 
1838 	return status;
1839 }
1840 
1841 void
1842 ngbe_init_ops(struct ngbe_hw *hw)
1843 {
1844 	struct ngbe_mac_info *mac = &hw->mac;
1845 	struct ngbe_phy_info *phy = &hw->phy;
1846 	struct ngbe_eeprom_info *eeprom = &hw->eeprom;
1847 
1848 	phy->ops.reset = ngbe_phy_reset;
1849 	phy->ops.read_reg = ngbe_phy_read_reg;
1850 	phy->ops.write_reg = ngbe_phy_write_reg;
1851 	phy->ops.setup_link = ngbe_phy_setup_link;
1852 	phy->ops.phy_led_ctrl = ngbe_phy_led_ctrl;
1853 	phy->ops.check_overtemp = ngbe_phy_check_overtemp;
1854 	phy->ops.identify = ngbe_phy_identify;
1855 	phy->ops.init = ngbe_phy_init;
1856 	phy->ops.check_event = ngbe_phy_check_event;
1857 	phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause;
1858 	phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause;
1859 	phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement;
1860 	phy->ops.setup_once = ngbe_phy_setup;
1861 
1862 	/* MAC */
1863 	mac->ops.init_hw = ngbe_init_hw;
1864 	mac->ops.clear_hw_cntrs = ngbe_clear_hw_cntrs;
1865 	mac->ops.get_mac_addr = ngbe_get_mac_addr;
1866 	mac->ops.stop_adapter = ngbe_stop_adapter;
1867 	mac->ops.get_bus_info = ngbe_get_bus_info;
1868 	mac->ops.set_lan_id = ngbe_set_lan_id_multi_port_pcie;
1869 	mac->ops.acquire_swfw_sync = ngbe_acquire_swfw_sync;
1870 	mac->ops.release_swfw_sync = ngbe_release_swfw_sync;
1871 	mac->ops.reset_hw = ngbe_reset_hw;
1872 	mac->ops.get_media_type = ngbe_get_media_type;
1873 	mac->ops.disable_sec_rx_path = ngbe_disable_sec_rx_path;
1874 	mac->ops.enable_sec_rx_path = ngbe_enable_sec_rx_path;
1875 	mac->ops.enable_rx_dma = ngbe_enable_rx_dma;
1876 	mac->ops.start_hw = ngbe_start_hw;
1877 
1878 	/* RAR, Multicast, VLAN */
1879 	mac->ops.set_rar = ngbe_set_rar;
1880 	mac->ops.init_rx_addrs = ngbe_init_rx_addrs;
1881 	mac->ops.update_mc_addr_list = ngbe_update_mc_addr_list;
1882 	mac->ops.enable_rx = ngbe_enable_rx;
1883 	mac->ops.disable_rx = ngbe_disable_rx;
1884 	mac->ops.clear_vfta = ngbe_clear_vfta;
1885 	mac->ops.init_uta_tables = ngbe_init_uta_tables;
1886 
1887 	/* Flow Control */
1888 	mac->ops.fc_enable = ngbe_fc_enable;
1889 	mac->ops.setup_fc = ngbe_setup_fc;
1890 
1891 	/* Link */
1892 	mac->ops.check_link = ngbe_check_mac_link;
1893 	mac->ops.setup_rxpba = ngbe_set_rxpba;
1894 
1895 	mac->mcft_size = NGBE_SP_MC_TBL_SIZE;
1896 	mac->vft_size = NGBE_SP_VFT_TBL_SIZE;
1897 	mac->num_rar_entries = NGBE_SP_RAR_ENTRIES;
1898 	mac->rx_pb_size = NGBE_SP_RX_PB_SIZE;
1899 	mac->max_rx_queues = NGBE_SP_MAX_RX_QUEUES;
1900 	mac->max_tx_queues = NGBE_SP_MAX_TX_QUEUES;
1901 
1902 	/* EEPROM */
1903 	eeprom->ops.init_params = ngbe_init_eeprom_params;
1904 	eeprom->ops.eeprom_chksum_cap_st = ngbe_eepromcheck_cap;
1905 	eeprom->ops.phy_led_oem_chk = ngbe_phy_led_oem_chk;
1906 
1907 	/* Manageability interface */
1908 	mac->ops.set_fw_drv_ver = ngbe_set_fw_drv_ver;
1909 	mac->ops.init_thermal_sensor_thresh = ngbe_init_thermal_sensor_thresh;
1910 }
1911 
1912 void
1913 ngbe_init_rx_addrs(struct ngbe_softc *sc)
1914 {
1915 	struct ngbe_hw *hw = &sc->hw;
1916 	uint32_t rar_entries = hw->mac.num_rar_entries;
1917 	uint32_t i, psrctl;
1918 
1919 	/*
1920 	 * If the current mac address is valid, assume it is a software
1921 	 * override to the permanent address.
1922 	 * Otherwise, use the permanent address from the eeprom.
1923 	 */
1924 	if (ngbe_validate_mac_addr(hw->mac.addr)) {
1925 		/* Get the MAC address from the RAR0 for later reference */
1926 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1927 	}
1928 	hw->addr_ctrl.overflow_promisc = 0;
1929 	hw->addr_ctrl.rar_used_count = 1;
1930 
1931 	/* Zero out the other receive addresses. */
1932 	for (i = 1; i < rar_entries; i++) {
1933 		NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, i);
1934 		NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_L, 0);
1935 		NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_H, 0);
1936 	}
1937 
1938 	/* Clear the MTA */
1939 	hw->addr_ctrl.mta_in_use = 0;
1940 	psrctl = NGBE_READ_REG(hw, NGBE_PSR_CTL);
1941 	psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE);
1942 	psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT;
1943 	NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctl);
1944 
1945 	for (i = 0; i < hw->mac.mcft_size; i++)
1946 		NGBE_WRITE_REG(hw, NGBE_PSR_MC_TBL(i), 0);
1947 
1948 	hw->mac.ops.init_uta_tables(hw);
1949 }
1950 
1951 void
1952 ngbe_init_shared_code(struct ngbe_softc *sc)
1953 {
1954 	struct ngbe_osdep *os = &sc->osdep;
1955 	struct pci_attach_args *pa = &os->os_pa;
1956 	struct ngbe_hw *hw = &sc->hw;
1957 
1958 	hw->subsystem_device_id = PCI_PRODUCT(pci_conf_read(pa->pa_pc,
1959 	    pa->pa_tag, PCI_SUBSYS_ID_REG));
1960 
1961 	hw->phy.type = ngbe_phy_internal;
1962 
1963 	NGBE_WRITE_REG(hw, NGBE_MDIO_CLAUSE_SELECT, 0xf);
1964 
1965 	ngbe_init_ops(hw);
1966 
1967 	/* Default flow control settings. */
1968 	hw->fc.requested_mode = ngbe_fc_full;
1969 	hw->fc.current_mode = ngbe_fc_full;
1970 
1971 	hw->fc.pause_time = NGBE_DEFAULT_FCPAUSE;
1972 	hw->fc.disable_fc_autoneg = 0;
1973 }
1974 
1975 void
1976 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw)
1977 {
1978 	/* Only support thermal sensors attached to SP physical port 0 */
1979 	if (hw->bus.lan_id)
1980 		return;
1981 
1982 	NGBE_WRITE_REG(hw, NGBE_TS_INT_EN, NGBE_TS_INT_EN_DALARM_INT_EN |
1983 	    NGBE_TS_INT_EN_ALARM_INT_EN);
1984 	NGBE_WRITE_REG(hw, NGBE_TS_EN, NGBE_TS_EN_ENA);
1985 
1986 	NGBE_WRITE_REG(hw, NGBE_TS_ALARM_THRE, 0x344);
1987 	NGBE_WRITE_REG(hw, NGBE_TS_DALARM_THRE, 0x330);
1988 }
1989 
1990 void
1991 ngbe_init_uta_tables(struct ngbe_hw *hw)
1992 {
1993 	int i;
1994 
1995 	for (i = 0; i < 128; i++)
1996 		NGBE_WRITE_REG(hw, NGBE_PSR_UC_TBL(i), 0);
1997 }
1998 
1999 void
2000 ngbe_fc_autoneg(struct ngbe_softc *sc)
2001 {
2002 	struct ngbe_hw *hw = &sc->hw;
2003 	uint32_t speed;
2004 	int link_up;
2005 	int error = EINVAL;
2006 
2007 	/*
2008 	 * AN should have completed when the cable was plugged in.
2009 	 * Look for reasons to bail out.  Bail out if:
2010 	 * - FC autoneg is disabled, or if
2011 	 * - link is not up.
2012 	 */
2013 	if (hw->fc.disable_fc_autoneg) {
2014 		printf("%s: flow control autoneg is disabled\n", DEVNAME(sc));
2015 		goto out;
2016 	}
2017 
2018 	hw->mac.ops.check_link(hw, &speed, &link_up, 0);
2019 	if (!link_up)
2020 		goto out;
2021 
2022 	switch (hw->phy.media_type) {
2023 	/* Autoneg flow control on fiber adapters */
2024 	case ngbe_media_type_fiber:
2025 		break;
2026 
2027 	/* Autoneg flow control on copper adapters */
2028 	case ngbe_media_type_copper:
2029 		error = ngbe_fc_autoneg_copper(sc);
2030 		break;
2031 	default:
2032 		break;
2033 	}
2034 out:
2035 	if (error) {
2036 		hw->fc.fc_was_autonegged = 0;
2037 		hw->fc.current_mode = hw->fc.requested_mode;
2038 	} else
2039 		hw->fc.fc_was_autonegged = 1;
2040 }
2041 
2042 int
2043 ngbe_fc_autoneg_copper(struct ngbe_softc *sc)
2044 {
2045 	struct ngbe_hw *hw = &sc->hw;
2046 	uint8_t technology_ability_reg, lp_technology_ability_reg;
2047 
2048 	technology_ability_reg = lp_technology_ability_reg = 0;
2049 	if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) {
2050 		hw->phy.ops.get_adv_pause(hw, &technology_ability_reg);
2051 		hw->phy.ops.get_lp_adv_pause(hw, &lp_technology_ability_reg);
2052 	}
2053 
2054 	return ngbe_negotiate_fc(sc, (uint32_t)technology_ability_reg,
2055 	    (uint32_t)lp_technology_ability_reg, NGBE_TAF_SYM_PAUSE,
2056 	    NGBE_TAF_ASM_PAUSE, NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE);
2057 }
2058 
2059 int
2060 ngbe_fc_enable(struct ngbe_softc *sc)
2061 {
2062 	struct ngbe_hw *hw = &sc->hw;
2063 	uint32_t mflcn, fccfg;
2064 	uint32_t fcrtl, fcrth;
2065 	uint32_t reg;
2066 	int error = 0;
2067 
2068 	/* Validate the water mark configuration */
2069 	if (!hw->fc.pause_time) {
2070 		error = EINVAL;
2071 		goto out;
2072 	}
2073 
2074 	/* Low water mark of zero causes XOFF floods */
2075 	if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) {
2076 		if (!hw->fc.low_water ||
2077 		    hw->fc.low_water >= hw->fc.high_water) {
2078 			printf("%s: invalid water mark configuration\n",
2079 			    DEVNAME(sc));
2080 			error = EINVAL;
2081 			goto out;
2082 		}
2083 	}
2084 
2085 	/* Negotiate the fc mode to use */
2086 	ngbe_fc_autoneg(sc);
2087 
2088 	/* Disable any previous flow control settings */
2089 	mflcn = NGBE_READ_REG(hw, NGBE_MAC_RX_FLOW_CTRL);
2090 	mflcn &= ~NGBE_MAC_RX_FLOW_CTRL_RFE;
2091 
2092 	fccfg = NGBE_READ_REG(hw, NGBE_RDB_RFCC);
2093 	fccfg &= ~NGBE_RDB_RFCC_RFCE_802_3X;
2094 
2095 	/*
2096 	 * The possible values of fc.current_mode are:
2097 	 * 0: Flow control is completely disabled
2098 	 * 1: Rx flow control is enabled (we can receive pause frames,
2099 	 *    but not send pause frames).
2100 	 * 2: Tx flow control is enabled (we can send pause frames but
2101 	 *    we do not support receiving pause frames).
2102 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2103 	 * other: Invalid.
2104 	 */
2105 	switch (hw->fc.current_mode) {
2106 	case ngbe_fc_none:
2107 		/*
2108 		 * Flow control is disabled by software override or autoneg.
2109 		 * The code below will actually disable it in the HW.
2110 		 */
2111 		break;
2112 	case ngbe_fc_rx_pause:
2113 		/*
2114 		 * Rx Flow control is enabled and Tx Flow control is
2115 		 * disabled by software override. Since there really
2116 		 * isn't a way to advertise that we are capable of RX
2117 		 * Pause ONLY, we will advertise that we support both
2118 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2119 		 * disable the adapter's ability to send PAUSE frames.
2120 		 */
2121 		mflcn |= NGBE_MAC_RX_FLOW_CTRL_RFE;
2122 		break;
2123 	case ngbe_fc_tx_pause:
2124 		/*
2125 		 * Tx Flow control is enabled, and Rx Flow control is
2126 		 * disabled by software override.
2127 		 */
2128 		fccfg |= NGBE_RDB_RFCC_RFCE_802_3X;
2129 		break;
2130 	case ngbe_fc_full:
2131 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2132 		mflcn |= NGBE_MAC_RX_FLOW_CTRL_RFE;
2133 		fccfg |= NGBE_RDB_RFCC_RFCE_802_3X;
2134 		break;
2135 	default:
2136 		printf("%s: flow control param set incorrectly\n", DEVNAME(sc));
2137 		error = EINVAL;
2138 		goto out;
2139 	}
2140 
2141 	/* Set 802.3x based flow control settings. */
2142 	NGBE_WRITE_REG(hw, NGBE_MAC_RX_FLOW_CTRL, mflcn);
2143 	NGBE_WRITE_REG(hw, NGBE_RDB_RFCC, fccfg);
2144 
2145 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2146 	if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) {
2147 		/* 32Byte granularity */
2148 		fcrtl = (hw->fc.low_water << 10) | NGBE_RDB_RFCL_XONE;
2149 		NGBE_WRITE_REG(hw, NGBE_RDB_RFCL, fcrtl);
2150 		fcrth = (hw->fc.high_water << 10) | NGBE_RDB_RFCH_XOFFE;
2151 	} else {
2152 		NGBE_WRITE_REG(hw, NGBE_RDB_RFCL, 0);
2153 		/*
2154 		 * In order to prevent Tx hangs when the internal Tx
2155 		 * switch is enabled we must set the high water mark
2156 		 * to the Rx packet buffer size - 24KB.  This allows
2157 		 * the Tx switch to function even under heavy Rx
2158 		 * workloads.
2159 		 */
2160 		fcrth = NGBE_READ_REG(hw, NGBE_RDB_PB_SZ) - 24576;
2161 	}
2162 
2163 	NGBE_WRITE_REG(hw, NGBE_RDB_RFCH, fcrth);
2164 
2165 	/* Configure pause time (2 TCs per register) */
2166 	reg = hw->fc.pause_time * 0x00010000;
2167 	NGBE_WRITE_REG(hw, NGBE_RDB_RFCV, reg);
2168 
2169 	/* Configure flow control refresh threshold value */
2170 	NGBE_WRITE_REG(hw, NGBE_RDB_RFCRT, hw->fc.pause_time / 2);
2171 out:
2172 	return error;
2173 }
2174 
2175 int
2176 ngbe_fmgr_cmd_op(struct ngbe_hw *hw, uint32_t cmd, uint32_t cmd_addr)
2177 {
2178 	uint32_t val;
2179 	int timeout = 0;
2180 
2181 	val = (cmd << SPI_CLK_CMD_OFFSET) | cmd_addr |
2182 	    (SPI_CLK_DIV << SPI_CLK_DIV_OFFSET);
2183 	NGBE_WRITE_REG(hw, NGBE_SPI_CMD, val);
2184 	for (;;) {
2185 		if (NGBE_READ_REG(hw, NGBE_SPI_STATUS) & 0x1)
2186 			break;
2187 		if (timeout == SPI_TIME_OUT_VALUE)
2188 			return ETIMEDOUT;
2189 
2190 		timeout++;
2191 		DELAY(10);
2192 	}
2193 
2194 	return 0;
2195 }
2196 
2197 uint32_t
2198 ngbe_flash_read_dword(struct ngbe_hw *hw, uint32_t addr)
2199 {
2200 	int status = ngbe_fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr);
2201 	if (status)
2202 		return status;
2203 
2204 	return NGBE_READ_REG(hw, NGBE_SPI_DATA);
2205 }
2206 
2207 uint8_t
2208 ngbe_calculate_checksum(uint8_t *buffer, uint32_t length)
2209 {
2210 	uint32_t i;
2211 	uint8_t sum = 0;
2212 
2213 	if (!buffer)
2214 		return 0;
2215 
2216 	for (i = 0; i < length; i++)
2217 		sum += buffer[i];
2218 	return (uint8_t)(0 - sum);
2219 }
2220 
2221 int
2222 ngbe_check_flash_load(struct ngbe_softc *sc, uint32_t check_bit)
2223 {
2224 	struct ngbe_hw *hw = &sc->hw;
2225 	uint32_t reg = 0;
2226 	int i, error = 0;
2227 
2228 	/* if there's flash existing */
2229 	if (!(NGBE_READ_REG(hw, NGBE_SPI_STATUS) &
2230 	    NGBE_SPI_STATUS_FLASH_BYPASS)) {
2231 		/* wait hw load flash done */
2232 		for (i = 0; i < NGBE_MAX_FLASH_LOAD_POLL_TIME; i++) {
2233 			reg = NGBE_READ_REG(hw, NGBE_SPI_ILDR_STATUS);
2234 			if (!(reg & check_bit))
2235 				break;
2236 			msec_delay(200);
2237 		}
2238 		if (i == NGBE_MAX_FLASH_LOAD_POLL_TIME) {
2239 			error = ETIMEDOUT;
2240 			printf("%s: hardware loading flash failed\n",
2241 			    DEVNAME(sc));
2242 		}
2243 	}
2244 	return error;
2245 }
2246 
2247 int
2248 ngbe_check_internal_phy_id(struct ngbe_softc *sc)
2249 {
2250 	struct ngbe_hw *hw = &sc->hw;
2251 	uint16_t phy_id, phy_id_high, phy_id_low;
2252 
2253 	ngbe_gphy_wait_mdio_access_on(hw);
2254 
2255 	ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high);
2256 	phy_id = phy_id_high << 6;
2257 	ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low);
2258 	phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10;
2259 
2260 	if (NGBE_INTERNAL_PHY_ID != phy_id) {
2261 		printf("%s: internal phy id 0x%x not supported\n",
2262 		    DEVNAME(sc), phy_id);
2263 		return ENOTSUP;
2264 	} else
2265 		hw->phy.id = (uint32_t)phy_id;
2266 
2267 	return 0;
2268 }
2269 
2270 int
2271 ngbe_check_mac_link(struct ngbe_hw *hw, uint32_t *speed, int *link_up,
2272     int link_up_wait_to_complete)
2273 {
2274 	uint32_t status = 0;
2275 	uint16_t speed_sta, value = 0;
2276 	int i;
2277 
2278 	if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) {
2279 		*link_up = 1;
2280 		*speed = NGBE_LINK_SPEED_1GB_FULL;
2281 		return status;
2282 	}
2283 
2284 	if (link_up_wait_to_complete) {
2285 		for (i = 0; i < NGBE_LINK_UP_TIME; i++) {
2286 			status = hw->phy.ops.read_reg(hw,
2287 			    NGBE_MDIO_AUTO_NEG_STATUS,
2288 			    NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
2289 			if (!status && (value & 0x4)) {
2290 				*link_up = 1;
2291 				break;
2292 			} else
2293 				*link_up = 0;
2294 			msec_delay(100);
2295 		}
2296 	} else {
2297 		status = hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_STATUS,
2298 		    NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
2299 		if (!status && (value & 0x4))
2300 			*link_up = 1;
2301 		else
2302 			*link_up = 0;
2303 	}
2304 
2305 	speed_sta = value & 0x38;
2306 	if (*link_up) {
2307 		if (speed_sta == 0x28)
2308 			*speed = NGBE_LINK_SPEED_1GB_FULL;
2309 		else if (speed_sta == 0x18)
2310 			*speed = NGBE_LINK_SPEED_100_FULL;
2311 		else if (speed_sta == 0x8)
2312 			*speed = NGBE_LINK_SPEED_10_FULL;
2313 	} else
2314 		*speed = NGBE_LINK_SPEED_UNKNOWN;
2315 
2316 	return status;
2317 }
2318 
2319 int
2320 ngbe_check_mng_access(struct ngbe_hw *hw)
2321 {
2322 	if (!ngbe_mng_present(hw))
2323 		return 0;
2324 	return 1;
2325 }
2326 
2327 int
2328 ngbe_check_reset_blocked(struct ngbe_softc *sc)
2329 {
2330 	uint32_t mmngc;
2331 
2332 	mmngc = NGBE_READ_REG(&sc->hw, NGBE_MIS_ST);
2333 	if (mmngc & NGBE_MIS_ST_MNG_VETO) {
2334 		printf("%s: MNG_VETO bit detected\n", DEVNAME(sc));
2335 		return 1;
2336 	}
2337 
2338 	return 0;
2339 }
2340 
2341 void
2342 ngbe_clear_hw_cntrs(struct ngbe_hw *hw)
2343 {
2344 	uint16_t i;
2345 
2346 	NGBE_READ_REG(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW);
2347 	NGBE_READ_REG(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW);
2348 	NGBE_READ_REG(hw, NGBE_RDB_LXONTXC);
2349 	NGBE_READ_REG(hw, NGBE_RDB_LXOFFTXC);
2350 	NGBE_READ_REG(hw, NGBE_MAC_LXOFFRXC);
2351 
2352 	for (i = 0; i < 8; i++) {
2353 		NGBE_WRITE_REG_MASK(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_UP,
2354 		    i << 16);
2355 		NGBE_READ_REG(hw, NGBE_MAC_PXOFFRXC);
2356 	}
2357 
2358 	NGBE_READ_REG(hw, NGBE_PX_GPRC);
2359 	NGBE_READ_REG(hw, NGBE_PX_GPTC);
2360 	NGBE_READ_REG(hw, NGBE_PX_GORC_MSB);
2361 	NGBE_READ_REG(hw, NGBE_PX_GOTC_MSB);
2362 
2363 	NGBE_READ_REG(hw, NGBE_RX_BC_FRAMES_GOOD_LOW);
2364 	NGBE_READ_REG(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD);
2365 	NGBE_READ_REG(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD);
2366 	NGBE_READ_REG(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW);
2367 	NGBE_READ_REG(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW);
2368 	NGBE_READ_REG(hw, NGBE_TX_MC_FRAMES_GOOD_LOW);
2369 	NGBE_READ_REG(hw, NGBE_TX_BC_FRAMES_GOOD_LOW);
2370 	NGBE_READ_REG(hw, NGBE_RDM_DRP_PKT);
2371 }
2372 
2373 void
2374 ngbe_clear_vfta(struct ngbe_hw *hw)
2375 {
2376 	uint32_t offset;
2377 
2378 	for (offset = 0; offset < hw->mac.vft_size; offset++) {
2379 		NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_TBL(offset), 0);
2380 		/* Errata 5 */
2381 		hw->mac.vft_shadow[offset] = 0;
2382 	}
2383 
2384 	for (offset = 0; offset < NGBE_PSR_VLAN_SWC_ENTRIES; offset++) {
2385 		NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC_IDX, offset);
2386 		NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC, 0);
2387 		NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC_VM_L, 0);
2388 	}
2389 }
2390 
2391 void
2392 ngbe_configure_ivars(struct ngbe_softc *sc)
2393 {
2394 	struct ngbe_queue *nq = sc->queues;
2395 	uint32_t newitr;
2396 	int i;
2397 
2398 	/* Populate MSIX to EITR select */
2399 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITRSEL, 0);
2400 
2401 	newitr = (4000000 / NGBE_MAX_INTS_PER_SEC) & NGBE_MAX_EITR;
2402 	newitr |= NGBE_PX_ITR_CNT_WDIS;
2403 
2404 	for (i = 0; i < sc->sc_nqueues; i++, nq++) {
2405 		/* Rx queue entry */
2406 		ngbe_set_ivar(sc, i, nq->msix, 0);
2407 		/* Tx queue entry */
2408 		ngbe_set_ivar(sc, i, nq->msix, 1);
2409 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITR(nq->msix), newitr);
2410 	}
2411 
2412 	/* For the Link interrupt */
2413 	ngbe_set_ivar(sc, 0, sc->linkvec, -1);
2414 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITR(sc->linkvec), 1950);
2415 }
2416 
2417 void
2418 ngbe_configure_pb(struct ngbe_softc *sc)
2419 {
2420 	struct ngbe_hw *hw = &sc->hw;
2421 
2422 	hw->mac.ops.setup_rxpba(hw, 0, 0, PBA_STRATEGY_EQUAL);
2423 	ngbe_pbthresh_setup(sc);
2424 }
2425 
2426 void
2427 ngbe_disable_intr(struct ngbe_softc *sc)
2428 {
2429 	struct ngbe_queue *nq;
2430 	int i;
2431 
2432 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_MISC_IEN, 0);
2433 	for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++)
2434 		ngbe_disable_queue(sc, nq->msix);
2435 	NGBE_WRITE_FLUSH(&sc->hw);
2436 }
2437 
2438 int
2439 ngbe_disable_pcie_master(struct ngbe_softc *sc)
2440 {
2441 	int i, error = 0;
2442 
2443 	/* Exit if master requests are blocked */
2444 	if (!(NGBE_READ_REG(&sc->hw, NGBE_PX_TRANSACTION_PENDING)))
2445 		goto out;
2446 
2447 	/* Poll for master request bit to clear */
2448 	for (i = 0; i < NGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2449 		DELAY(100);
2450 		if (!(NGBE_READ_REG(&sc->hw, NGBE_PX_TRANSACTION_PENDING)))
2451 			goto out;
2452 	}
2453 	printf("%s: PCIe transaction pending bit did not clear\n",
2454 	    DEVNAME(sc));
2455 	error = ETIMEDOUT;
2456 out:
2457 	return error;
2458 }
2459 
2460 void
2461 ngbe_disable_queue(struct ngbe_softc *sc, uint32_t vector)
2462 {
2463 	uint64_t queue = 1ULL << vector;
2464 	uint32_t mask;
2465 
2466 	mask = (queue & 0xffffffff);
2467 	if (mask)
2468 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_IMS, mask);
2469 }
2470 
2471 void
2472 ngbe_disable_rx(struct ngbe_hw *hw)
2473 {
2474 	uint32_t rxctrl, psrctrl;
2475 
2476 	rxctrl = NGBE_READ_REG(hw, NGBE_RDB_PB_CTL);
2477 	if (rxctrl & NGBE_RDB_PB_CTL_PBEN) {
2478 		psrctrl = NGBE_READ_REG(hw, NGBE_PSR_CTL);
2479 		if (psrctrl & NGBE_PSR_CTL_SW_EN) {
2480 			psrctrl &= ~NGBE_PSR_CTL_SW_EN;
2481 			NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctrl);
2482 			hw->mac.set_lben = 1;
2483 		} else
2484 			hw->mac.set_lben = 0;
2485 		rxctrl &= ~NGBE_RDB_PB_CTL_PBEN;
2486 		NGBE_WRITE_REG(hw, NGBE_RDB_PB_CTL, rxctrl);
2487 
2488 		NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_RE,
2489 		    0);
2490 	}
2491 }
2492 
2493 void
2494 ngbe_disable_sec_rx_path(struct ngbe_hw *hw)
2495 {
2496 	uint32_t secrxreg;
2497 	int i;
2498 
2499 	NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, NGBE_RSEC_CTL_RX_DIS,
2500 	    NGBE_RSEC_CTL_RX_DIS);
2501 	for (i = 0; i < 40; i++) {
2502 		secrxreg = NGBE_READ_REG(hw, NGBE_RSEC_ST);
2503 		if (secrxreg & NGBE_RSEC_ST_RSEC_RDY)
2504 			break;
2505 		else
2506 			DELAY(1000);
2507 	}
2508 }
2509 
2510 int
2511 ngbe_eepromcheck_cap(struct ngbe_softc *sc, uint16_t offset, uint32_t *data)
2512 {
2513 	struct ngbe_hw *hw = &sc->hw;
2514 	struct ngbe_hic_read_shadow_ram buffer;
2515 	uint32_t tmp;
2516 	int status;
2517 
2518 	buffer.hdr.req.cmd = FW_EEPROM_CHECK_STATUS;
2519 	buffer.hdr.req.buf_lenh = 0;
2520 	buffer.hdr.req.buf_lenl = 0;
2521 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
2522 
2523 	/* Convert offset from words to bytes */
2524 	buffer.address = 0;
2525 	/* one word */
2526 	buffer.length = 0;
2527 
2528 	status = ngbe_host_interface_command(sc, (uint32_t *)&buffer,
2529 	    sizeof(buffer), NGBE_HI_COMMAND_TIMEOUT, 0);
2530 	if (status)
2531 		return status;
2532 
2533 	if (ngbe_check_mng_access(hw)) {
2534 		tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 1);
2535 		if (tmp == NGBE_CHECKSUM_CAP_ST_PASS)
2536 			status = 0;
2537 		else
2538 			status = EINVAL;
2539 	} else
2540 		status = EINVAL;
2541 
2542 	return status;
2543 }
2544 
2545 void
2546 ngbe_enable_intr(struct ngbe_softc *sc)
2547 {
2548 	struct ngbe_hw *hw = &sc->hw;
2549 	struct ngbe_queue *nq;
2550 	uint32_t mask;
2551 	int i;
2552 
2553 	/* Enable misc interrupt */
2554 	mask = NGBE_PX_MISC_IEN_MASK;
2555 
2556 	mask |= NGBE_PX_MISC_IEN_OVER_HEAT;
2557 	NGBE_WRITE_REG(hw, NGBE_GPIO_DDR, 0x1);
2558 	NGBE_WRITE_REG(hw, NGBE_GPIO_INTEN, 0x3);
2559 	NGBE_WRITE_REG(hw, NGBE_GPIO_INTTYPE_LEVEL, 0x0);
2560 
2561 	NGBE_WRITE_REG(hw, NGBE_GPIO_POLARITY, 0x3);
2562 
2563 	NGBE_WRITE_REG(hw, NGBE_PX_MISC_IEN, mask);
2564 
2565 	/* Enable all queues */
2566 	for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++)
2567 		ngbe_enable_queue(sc, nq->msix);
2568 	NGBE_WRITE_FLUSH(hw);
2569 
2570 	ngbe_enable_queue(sc, sc->linkvec);
2571 }
2572 
2573 void
2574 ngbe_enable_queue(struct ngbe_softc *sc, uint32_t vector)
2575 {
2576 	uint64_t queue = 1ULL << vector;
2577 	uint32_t mask;
2578 
2579 	mask = (queue & 0xffffffff);
2580 	if (mask)
2581 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_IMC, mask);
2582 }
2583 
2584 void
2585 ngbe_enable_rx(struct ngbe_hw *hw)
2586 {
2587 	uint32_t val;
2588 
2589 	/* Enable mac receiver */
2590 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_RE,
2591 	    NGBE_MAC_RX_CFG_RE);
2592 
2593 	NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, 0x2, 0);
2594 
2595 	NGBE_WRITE_REG_MASK(hw, NGBE_RDB_PB_CTL, NGBE_RDB_PB_CTL_PBEN,
2596 	    NGBE_RDB_PB_CTL_PBEN);
2597 
2598 	if (hw->mac.set_lben) {
2599 		val = NGBE_READ_REG(hw, NGBE_PSR_CTL);
2600 		val |= NGBE_PSR_CTL_SW_EN;
2601 		NGBE_WRITE_REG(hw, NGBE_PSR_CTL, val);
2602 		hw->mac.set_lben = 0;
2603 	}
2604 }
2605 
2606 void
2607 ngbe_enable_rx_dma(struct ngbe_hw *hw, uint32_t reg)
2608 {
2609 	/*
2610 	 * Workaround for emerald silicon errata when enabling the Rx datapath.
2611 	 * If traffic is incoming before we enable the Rx unit, it could hang
2612 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2613 	 * completely disabled prior to enabling the Rx unit.
2614 	 */
2615 	hw->mac.ops.disable_sec_rx_path(hw);
2616 
2617 	if (reg & NGBE_RDB_PB_CTL_PBEN)
2618 		hw->mac.ops.enable_rx(hw);
2619 	else
2620 		hw->mac.ops.disable_rx(hw);
2621 
2622 	hw->mac.ops.enable_sec_rx_path(hw);
2623 }
2624 
2625 void
2626 ngbe_enable_sec_rx_path(struct ngbe_hw *hw)
2627 {
2628 	NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, NGBE_RSEC_CTL_RX_DIS, 0);
2629 	NGBE_WRITE_FLUSH(hw);
2630 }
2631 
2632 int
2633 ngbe_encap(struct tx_ring *txr, struct mbuf *m)
2634 {
2635 	struct ngbe_softc *sc = txr->sc;
2636 	uint32_t olinfo_status = 0, cmd_type_len;
2637 	int i, j, ntxc;
2638 	int first, last = 0;
2639 	bus_dmamap_t map;
2640 	struct ngbe_tx_buf *txbuf;
2641 	union ngbe_tx_desc *txd = NULL;
2642 
2643 	/* Basic descriptor defines */
2644 	cmd_type_len = NGBE_TXD_DTYP_DATA | NGBE_TXD_IFCS;
2645 
2646 	/*
2647 	 * Important to capture the first descriptor
2648 	 * used because it will contain the index of
2649 	 * the one we tell the hardware to report back
2650 	 */
2651 	first = txr->next_avail_desc;
2652 	txbuf = &txr->tx_buffers[first];
2653 	map = txbuf->map;
2654 
2655 	/*
2656 	 * Set the appropriate offload context
2657 	 * this will becomes the first descriptor.
2658 	 */
2659 	ntxc = ngbe_tx_ctx_setup(txr, m, &cmd_type_len, &olinfo_status);
2660 	if (ntxc == -1)
2661 		goto fail;
2662 
2663 	/*
2664 	 * Map the packet for DMA.
2665 	 */
2666 	switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m,
2667 	    BUS_DMA_NOWAIT)) {
2668 	case 0:
2669 		break;
2670 	case EFBIG:
2671 		if (m_defrag(m, M_NOWAIT) == 0 &&
2672 		    bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m,
2673 		    BUS_DMA_NOWAIT) == 0)
2674 			break;
2675 		/* FALLTHROUGH */
2676 	default:
2677 		return 0;
2678 	}
2679 
2680 	i = txr->next_avail_desc + ntxc;
2681 	if (i >= sc->num_tx_desc)
2682 		i -= sc->num_tx_desc;
2683 
2684 	for (j = 0; j < map->dm_nsegs; j++) {
2685 		txd = &txr->tx_base[i];
2686 
2687 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
2688 		txd->read.cmd_type_len =
2689 		    htole32(cmd_type_len | map->dm_segs[j].ds_len);
2690 		txd->read.olinfo_status = htole32(olinfo_status);
2691 		last = i;
2692 
2693 		if (++i == sc->num_tx_desc)
2694 			i = 0;
2695 	}
2696 
2697 	txd->read.cmd_type_len |= htole32(NGBE_TXD_EOP | NGBE_TXD_RS);
2698 
2699 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
2700 	    BUS_DMASYNC_PREWRITE);
2701 
2702 	/* Set the index of the descriptor that will be marked done */
2703 	txbuf->m_head = m;
2704 	txbuf->eop_index = last;
2705 
2706 	txr->next_avail_desc = i;
2707 
2708 	return ntxc + j;
2709 
2710 fail:
2711 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
2712 	return 0;
2713 }
2714 
2715 int
2716 ngbe_get_buf(struct rx_ring *rxr, int i)
2717 {
2718 	struct ngbe_softc *sc = rxr->sc;
2719 	struct ngbe_rx_buf *rxbuf;
2720 	struct mbuf *m;
2721 	union ngbe_rx_desc *rxdesc;
2722 	int error;
2723 
2724 	rxbuf = &rxr->rx_buffers[i];
2725 	rxdesc = &rxr->rx_base[i];
2726 	if (rxbuf->buf) {
2727 		printf("%s: slot %d already has an mbuf\n", DEVNAME(sc), i);
2728 		return ENOBUFS;
2729 	}
2730 
2731 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
2732 	if (!m)
2733 		return ENOBUFS;
2734 
2735 	m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
2736 	m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2737 
2738 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m,
2739 	    BUS_DMA_NOWAIT);
2740 	if (error) {
2741 		m_freem(m);
2742 		return error;
2743 	}
2744 
2745 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
2746 	    rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2747 	rxbuf->buf = m;
2748 
2749 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2750 
2751 	return 0;
2752 }
2753 
2754 void
2755 ngbe_get_bus_info(struct ngbe_softc *sc)
2756 {
2757 	struct ngbe_hw *hw = &sc->hw;
2758 	uint16_t link_status;
2759 
2760 	/* Get the negotiated link width and speed from PCI config space */
2761 	link_status = ngbe_read_pci_cfg_word(sc, NGBE_PCI_LINK_STATUS);
2762 
2763 	ngbe_set_pci_config_data(hw, link_status);
2764 }
2765 
2766 void
2767 ngbe_get_copper_link_capabilities(struct ngbe_hw *hw, uint32_t *speed,
2768     int *autoneg)
2769 {
2770 	*speed = 0;
2771 
2772 	if (hw->mac.autoneg)
2773 		*autoneg = 1;
2774 	else
2775 		*autoneg = 0;
2776 
2777 	*speed = NGBE_LINK_SPEED_10_FULL | NGBE_LINK_SPEED_100_FULL |
2778 	    NGBE_LINK_SPEED_1GB_FULL;
2779 }
2780 
2781 int
2782 ngbe_get_eeprom_semaphore(struct ngbe_softc *sc)
2783 {
2784 	struct ngbe_hw *hw = &sc->hw;
2785 	uint32_t swsm;
2786 	int i, timeout = 2000;
2787 	int status = ETIMEDOUT;
2788 
2789 	/* Get SMBI software semaphore between device drivers first */
2790 	for (i = 0; i < timeout; i++) {
2791 		/*
2792 		 * If the SMBI bit is 0 when we read it, then the bit will be
2793 		 * set and we have the semaphore.
2794 		 */
2795 		swsm = NGBE_READ_REG(hw, NGBE_MIS_SWSM);
2796 		if (!(swsm & NGBE_MIS_SWSM_SMBI)) {
2797 			status = 0;
2798 			break;
2799 		}
2800 		DELAY(50);
2801 	}
2802 
2803 	if (i == timeout) {
2804 		printf("%s: cannot access the eeprom - SMBI semaphore not "
2805 		    "granted\n", DEVNAME(sc));
2806 		/*
2807 		 * this release is particularly important because our attempts
2808 		 * above to get the semaphore may have succeeded, and if there
2809 		 * was a timeout, we should unconditionally clear the semaphore
2810 		 * bits to free the driver to make progress.
2811 		 */
2812 		ngbe_release_eeprom_semaphore(hw);
2813 		DELAY(50);
2814 
2815 		/*
2816 		 * One last try if the SMBI bit is 0 when we read it,
2817 		 * then the bit will be set and we have the semaphore.
2818 		 */
2819 		swsm = NGBE_READ_REG(hw, NGBE_MIS_SWSM);
2820 		if (!(swsm & NGBE_MIS_SWSM_SMBI))
2821 			status = 0;
2822 	}
2823 
2824 	return status;
2825 }
2826 
2827 void
2828 ngbe_get_hw_control(struct ngbe_hw *hw)
2829 {
2830 	 /* Let firmware know the driver has taken over */
2831 	NGBE_WRITE_REG_MASK(hw, NGBE_CFG_PORT_CTL,
2832 	    NGBE_CFG_PORT_CTL_DRV_LOAD, NGBE_CFG_PORT_CTL_DRV_LOAD);
2833 }
2834 
2835 void
2836 ngbe_release_hw_control(struct ngbe_softc *sc)
2837 {
2838 	/* Let firmware take over control of hw. */
2839 	NGBE_WRITE_REG_MASK(&sc->hw, NGBE_CFG_PORT_CTL,
2840 	    NGBE_CFG_PORT_CTL_DRV_LOAD, 0);
2841 }
2842 
2843 void
2844 ngbe_get_mac_addr(struct ngbe_hw *hw, uint8_t *mac_addr)
2845 {
2846 	uint32_t rar_high, rar_low;
2847 	int i;
2848 
2849 	NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, 0);
2850 	rar_high = NGBE_READ_REG(hw, NGBE_PSR_MAC_SWC_AD_H);
2851 	rar_low = NGBE_READ_REG(hw, NGBE_PSR_MAC_SWC_AD_L);
2852 
2853 	for (i = 0; i < 2; i++)
2854 		mac_addr[i] = (uint8_t)(rar_high >> (1 - i) * 8);
2855 
2856 	for (i = 0; i < 4; i++)
2857 		mac_addr[i + 2] = (uint8_t)(rar_low >> (3 - i) * 8);
2858 }
2859 
2860 enum ngbe_media_type
2861 ngbe_get_media_type(struct ngbe_hw *hw)
2862 {
2863 	enum ngbe_media_type media_type = ngbe_media_type_copper;
2864 
2865 	return media_type;
2866 }
2867 
2868 void
2869 ngbe_gphy_dis_eee(struct ngbe_hw *hw)
2870 {
2871 	uint16_t val = 0;
2872 
2873 	hw->phy.ops.write_reg(hw, 0x11, 0xa4b, 0x1110);
2874 	hw->phy.ops.write_reg(hw, MII_MMDACR, 0x0, MMDACR_FN_ADDRESS | 0x07);
2875 	hw->phy.ops.write_reg(hw, MII_MMDAADR, 0x0, 0x003c);
2876 	hw->phy.ops.write_reg(hw, MII_MMDACR, 0x0, MMDACR_FN_DATANPI | 0x07);
2877 	hw->phy.ops.write_reg(hw, MII_MMDAADR, 0x0, 0);
2878 
2879 	/* Disable 10/100M Half Duplex */
2880 	msec_delay(100);
2881 	hw->phy.ops.read_reg(hw, MII_ANAR, 0, &val);
2882 	val &= ~(ANAR_TX | ANAR_10);
2883 	hw->phy.ops.write_reg(hw, MII_ANAR, 0x0, val);
2884 }
2885 
2886 void
2887 ngbe_gphy_efuse_calibration(struct ngbe_softc *sc)
2888 {
2889 	struct ngbe_hw *hw = &sc->hw;
2890 	uint32_t efuse[2];
2891 
2892 	ngbe_gphy_wait_mdio_access_on(hw);
2893 
2894 	efuse[0] = sc->gphy_efuse[0];
2895 	efuse[1] = sc->gphy_efuse[1];
2896 
2897 	if (!efuse[0] && !efuse[1])
2898 		efuse[0] = efuse[1] = 0xffffffff;
2899 
2900 	/* Calibration */
2901 	efuse[0] |= 0xf0000100;
2902 	efuse[1] |= 0xff807fff;
2903 
2904 	/* EODR, Efuse Output Data Register */
2905 	ngbe_phy_write_reg(hw, 16, 0xa46, (efuse[0] >> 0) & 0xffff);
2906 	ngbe_phy_write_reg(hw, 17, 0xa46, (efuse[0] >> 16) & 0xffff);
2907 	ngbe_phy_write_reg(hw, 18, 0xa46, (efuse[1] >> 0) & 0xffff);
2908 	ngbe_phy_write_reg(hw, 19, 0xa46, (efuse[1] >> 16) & 0xffff);
2909 
2910 	/* Set efuse ready */
2911 	ngbe_phy_write_reg(hw, 20, 0xa46, 0x01);
2912 	ngbe_gphy_wait_mdio_access_on(hw);
2913 	ngbe_phy_write_reg(hw, 27, NGBE_INTERNAL_PHY_PAGE_OFFSET, 0x8011);
2914 	ngbe_phy_write_reg(hw, 28, NGBE_INTERNAL_PHY_PAGE_OFFSET, 0x5737);
2915 	ngbe_gphy_dis_eee(hw);
2916 }
2917 
2918 void
2919 ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *hw)
2920 {
2921 	uint16_t val = 0;
2922 	int i;
2923 
2924 	for (i = 0; i < 100; i++) {
2925 		ngbe_phy_read_reg(hw, 29, NGBE_INTERNAL_PHY_PAGE_OFFSET, &val);
2926 		if (val & 0x20)
2927 			break;
2928 		DELAY(1000);
2929 	}
2930 }
2931 
2932 void
2933 ngbe_handle_phy_event(struct ngbe_softc *sc)
2934 {
2935 	struct ngbe_hw *hw = &sc->hw;
2936 	uint32_t reg;
2937 
2938 	reg = NGBE_READ_REG(hw, NGBE_GPIO_INTSTATUS);
2939 	NGBE_WRITE_REG(hw, NGBE_GPIO_EOI, reg);
2940 	if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA))
2941 		hw->phy.ops.check_event(sc);
2942 }
2943 
2944 int
2945 ngbe_host_interface_command(struct ngbe_softc *sc, uint32_t *buffer,
2946     uint32_t length, uint32_t timeout, int return_data)
2947 {
2948 	struct ngbe_hw *hw = &sc->hw;
2949 	uint32_t hicr, i, bi, dword_len;
2950 	uint32_t hdr_size = sizeof(struct ngbe_hic_hdr);
2951 	uint32_t buf[64] = {};
2952 	uint16_t buf_len;
2953 	int status = 0;
2954 
2955 	if (length == 0 || length > NGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
2956 		printf("%s: buffer length failure\n", DEVNAME(sc));
2957 			return EINVAL;
2958 	}
2959 
2960 	if (hw->mac.ops.acquire_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB))
2961 		return EINVAL;
2962 
2963 	/* Calculate length in DWORDs. We must be multiple of DWORD */
2964 	if ((length % (sizeof(uint32_t))) != 0) {
2965 		printf("%s: buffer length failure, not aligned to dword\n",
2966 		    DEVNAME(sc));
2967 		status = EINVAL;
2968 		goto rel_out;
2969         }
2970 
2971 	if (ngbe_check_mng_access(hw)) {
2972 		hicr = NGBE_READ_REG(hw, NGBE_MNG_MBOX_CTL);
2973 		if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY))
2974 			printf("%s: fwrdy is set before command\n",
2975 			    DEVNAME(sc));
2976 	}
2977 
2978 	dword_len = length >> 2;
2979 
2980 	/*
2981 	 * The device driver writes the relevant command block
2982 	 * into the ram area.
2983 	 */
2984 	for (i = 0; i < dword_len; i++) {
2985 		if (ngbe_check_mng_access(hw)) {
2986 			NGBE_WRITE_REG_ARRAY(hw, NGBE_MNG_MBOX, i,
2987 			    htole32(buffer[i]));
2988 		} else {
2989 			status = EINVAL;
2990 			goto rel_out;
2991 		}
2992 	}
2993 
2994 	/* Setting this bit tells the ARC that a new command is pending. */
2995 	if (ngbe_check_mng_access(hw)) {
2996 		NGBE_WRITE_REG_MASK(hw, NGBE_MNG_MBOX_CTL,
2997 		    NGBE_MNG_MBOX_CTL_SWRDY, NGBE_MNG_MBOX_CTL_SWRDY);
2998 	} else {
2999 		status = EINVAL;
3000 		goto rel_out;
3001 	}
3002 
3003 	for (i = 0; i < timeout; i++) {
3004 		if (ngbe_check_mng_access(hw)) {
3005 			hicr = NGBE_READ_REG(hw, NGBE_MNG_MBOX_CTL);
3006 			if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY))
3007 				break;
3008 		}
3009 		msec_delay(1);
3010 	}
3011 
3012 	buf[0] = NGBE_READ_REG(hw, NGBE_MNG_MBOX);
3013 	/* Check command completion */
3014 	if (timeout != 0 && i == timeout) {
3015 		printf("%s: command has failed with no status valid\n",
3016 		    DEVNAME(sc));
3017 		if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
3018 			status = EINVAL;
3019 			goto rel_out;
3020 		}
3021 	}
3022 
3023 	if (!return_data)
3024 		goto rel_out;
3025 
3026 	/* Calculate length in DWORDs */
3027 	dword_len = hdr_size >> 2;
3028 
3029 	/* First pull in the header so we know the buffer length */
3030 	for (bi = 0; bi < dword_len; bi++) {
3031 		if (ngbe_check_mng_access(hw)) {
3032 			buffer[bi] = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, bi);
3033 			le32_to_cpus(&buffer[bi]);
3034 		} else {
3035 			status = EINVAL;
3036 			goto rel_out;
3037 		}
3038 	}
3039 
3040 	/* If there is any thing in data position pull it in */
3041 	buf_len = ((struct ngbe_hic_hdr *)buffer)->buf_len;
3042 	if (buf_len == 0)
3043 		goto rel_out;
3044 
3045 	if (length < buf_len + hdr_size) {
3046 		printf("%s: buffer not large enough for reply message\n",
3047 		    DEVNAME(sc));
3048 		status = EINVAL;
3049 		goto rel_out;
3050 	}
3051 
3052 	/* Calculate length in DWORDs, add 3 for odd lengths */
3053 	dword_len = (buf_len + 3) >> 2;
3054 
3055 	/* Pull in the rest of the buffer (bi is where we left off) */
3056 	for (; bi <= dword_len; bi++) {
3057 		if (ngbe_check_mng_access(hw)) {
3058 			buffer[bi] = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, bi);
3059 			le32_to_cpus(&buffer[bi]);
3060 		} else {
3061 			status = EINVAL;
3062 			goto rel_out;
3063 		}
3064 	}
3065 
3066 rel_out:
3067 	hw->mac.ops.release_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB);
3068 	return status;
3069 }
3070 
3071 int
3072 ngbe_hpbthresh(struct ngbe_softc *sc)
3073 {
3074 	uint32_t dv_id, rx_pba;
3075 	int kb, link, marker, tc;
3076 
3077 	/* Calculate max LAN frame size */
3078 	tc = link = sc->sc_ac.ac_if.if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3079 	    NGBE_ETH_FRAMING;
3080 
3081 	/* Calculate delay value for device */
3082 	dv_id = NGBE_DV(link, tc);
3083 
3084 	/* Delay value is calculated in bit times convert to KB */
3085 	kb = NGBE_BT2KB(dv_id);
3086 	rx_pba = NGBE_READ_REG(&sc->hw, NGBE_RDB_PB_SZ) >> NGBE_RDB_PB_SZ_SHIFT;
3087 
3088 	marker = rx_pba - kb;
3089 
3090 	return marker;
3091 }
3092 
3093 int
3094 ngbe_lpbthresh(struct ngbe_softc *sc)
3095 {
3096 	uint32_t dv_id;
3097 	int tc;
3098 
3099 	/* Calculate max LAN frame size */
3100 	tc = sc->sc_ac.ac_if.if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3101 
3102 	/* Calculate delay value for device */
3103 	dv_id = NGBE_LOW_DV(tc);
3104 
3105 	/* Delay value is calculated in bit times convert to KB */
3106 	return NGBE_BT2KB(dv_id);
3107 }
3108 
3109 int
3110 ngbe_mng_present(struct ngbe_hw *hw)
3111 {
3112 	uint32_t fwsm;
3113 
3114 	fwsm = NGBE_READ_REG(hw, NGBE_MIS_ST);
3115 
3116 	return fwsm & NGBE_MIS_ST_MNG_INIT_DN;
3117 }
3118 
3119 int
3120 ngbe_mta_vector(struct ngbe_hw *hw, uint8_t *mc_addr)
3121 {
3122 	uint32_t vector = 0;
3123 	int rshift;
3124 
3125 	/* pick bits [47:32] of the address. */
3126 	vector = mc_addr[4] | (((uint16_t)mc_addr[5]) << 8);
3127 	switch (hw->mac.mc_filter_type) {
3128 	case 0:	/* bits 47:36 */
3129 	case 1:	/* bits 46:35 */
3130 	case 2:	/* bits 45:34 */
3131 		rshift = 4 - hw->mac.mc_filter_type;
3132 		break;
3133 	case 3:	/* bits 43:32 */
3134 		rshift = 0;
3135 		break;
3136 	default:	/* Invalid mc_filter_type */
3137 		vector = rshift = 0;
3138 		break;
3139 	}
3140 	vector = (vector >> rshift) & 0x0fff;
3141 
3142 	return vector;
3143 }
3144 
3145 int
3146 ngbe_negotiate_fc(struct ngbe_softc *sc, uint32_t adv_reg, uint32_t lp_reg,
3147     uint32_t adv_sym, uint32_t adv_asm, uint32_t lp_sym, uint32_t lp_asm)
3148 {
3149 	struct ngbe_hw *hw = &sc->hw;
3150 
3151 	if ((!(adv_reg)) || (!(lp_reg)))
3152 		return EINVAL;
3153 
3154 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
3155 		/*
3156 		 * Now we need to check if the user selected Rx ONLY
3157 		 * of pause frames.  In this case, we had to advertise
3158 		 * FULL flow control because we could not advertise RX
3159 		 * ONLY. Hence, we must now check to see if we need to
3160 		 * turn OFF the TRANSMISSION of PAUSE frames.
3161 		 */
3162 		if (hw->fc.requested_mode == ngbe_fc_full)
3163 			hw->fc.current_mode = ngbe_fc_full;
3164 		else
3165 			hw->fc.current_mode = ngbe_fc_rx_pause;
3166 
3167 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3168 	    (lp_reg & lp_sym) && (lp_reg & lp_asm))
3169 	    	hw->fc.current_mode = ngbe_fc_tx_pause;
3170 	else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3171 	    !(lp_reg & lp_sym) && (lp_reg & lp_asm))
3172 	    	hw->fc.current_mode = ngbe_fc_rx_pause;
3173 	else
3174 		hw->fc.current_mode = ngbe_fc_none;
3175 
3176 	return 0;
3177 }
3178 
3179 int
3180 ngbe_non_sfp_link_config(struct ngbe_softc *sc)
3181 {
3182 	struct ngbe_hw *hw = &sc->hw;
3183 	uint32_t speed;
3184 	int error;
3185 
3186 	if (hw->mac.autoneg)
3187 		speed = hw->phy.autoneg_advertised;
3188 	else
3189 		speed = hw->phy.force_speed;
3190 
3191 	msec_delay(50);
3192 	if (hw->phy.type == ngbe_phy_internal) {
3193 		error = hw->phy.ops.setup_once(sc);
3194 		if (error)
3195 			return error;
3196 	}
3197 
3198 	error = hw->mac.ops.setup_link(sc, speed, 0);
3199 	return error;
3200 }
3201 
3202 void
3203 ngbe_pbthresh_setup(struct ngbe_softc *sc)
3204 {
3205 	struct ngbe_hw *hw = &sc->hw;
3206 
3207 	hw->fc.high_water = ngbe_hpbthresh(sc);
3208 	hw->fc.low_water = ngbe_lpbthresh(sc);
3209 
3210 	/* Low water marks must not be larger than high water marks */
3211 	if (hw->fc.low_water > hw->fc.high_water)
3212 		hw->fc.low_water = 0;
3213 }
3214 
3215 void
3216 ngbe_phy_check_event(struct ngbe_softc *sc)
3217 {
3218 	struct ngbe_hw *hw = &sc->hw;
3219 	uint16_t value = 0;
3220 
3221 	hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_LSC,
3222 	    NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
3223 }
3224 
3225 int
3226 ngbe_phy_check_overtemp(struct ngbe_hw *hw)
3227 {
3228 	uint32_t ts_state;
3229 	int status = 0;
3230 
3231 	/* Check that the LASI temp alarm status was triggered */
3232 	ts_state = NGBE_READ_REG(hw, NGBE_TS_ALARM_ST);
3233 
3234 	if (ts_state & NGBE_TS_ALARM_ST_ALARM)
3235 		status = 1;
3236 
3237 	return status;
3238 }
3239 
3240 void
3241 ngbe_phy_get_advertised_pause(struct ngbe_hw *hw, uint8_t *pause_bit)
3242 {
3243 	uint16_t value;
3244 
3245 	hw->phy.ops.read_reg(hw, 4, 0, &value);
3246 	*pause_bit = (uint8_t)((value >> 10) & 0x3);
3247 }
3248 
3249 void
3250 ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *hw, uint8_t *pause_bit)
3251 {
3252 	uint16_t value;
3253 
3254 	hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_LSC,
3255 	    NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
3256 	hw->phy.ops.read_reg(hw, MII_BMSR, 0, &value);
3257 	value = (value & BMSR_ACOMP) ? 1 : 0;
3258 
3259 	/* If AN complete then check lp adv pause */
3260 	hw->phy.ops.read_reg(hw, MII_ANLPAR, 0, &value);
3261 	*pause_bit = (uint8_t)((value >> 10) & 0x3);
3262 }
3263 
3264 int
3265 ngbe_phy_identify(struct ngbe_softc *sc)
3266 {
3267 	struct ngbe_hw *hw = &sc->hw;
3268 	int error;
3269 
3270 	switch(hw->phy.type) {
3271 	case ngbe_phy_internal:
3272 		error = ngbe_check_internal_phy_id(sc);
3273 		break;
3274 	default:
3275 		error = ENOTSUP;
3276 	}
3277 
3278 	return error;
3279 }
3280 
3281 int
3282 ngbe_phy_init(struct ngbe_softc *sc)
3283 {
3284 	struct ngbe_hw *hw = &sc->hw;
3285 	uint16_t value;
3286 	uint8_t lan_id = hw->bus.lan_id;
3287 	int error;
3288 
3289 	/* Set fwsw semaphore mask for phy first */
3290 	if (!hw->phy.phy_semaphore_mask)
3291 		hw->phy.phy_semaphore_mask = NGBE_MNG_SWFW_SYNC_SW_PHY;
3292 
3293 	/* Init phy.addr according to HW design */
3294 	hw->phy.addr = 0;
3295 
3296 	/* Identify the PHY or SFP module */
3297 	error = hw->phy.ops.identify(sc);
3298 	if (error == ENOTSUP)
3299 		return error;
3300 
3301 	/* Enable interrupts, only link status change and an done is allowed */
3302 	if (hw->phy.type == ngbe_phy_internal) {
3303 		value = NGBE_INTPHY_INT_LSC | NGBE_INTPHY_INT_ANC;
3304 		hw->phy.ops.write_reg(hw, 0x12, 0xa42, value);
3305 		sc->gphy_efuse[0] =
3306 		    ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8);
3307 		sc->gphy_efuse[1] =
3308 		    ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8 + 4);
3309 	}
3310 
3311 	return error;
3312 }
3313 
3314 void
3315 ngbe_phy_led_ctrl(struct ngbe_softc *sc)
3316 {
3317 	struct ngbe_hw *hw = &sc->hw;
3318 	uint16_t value;
3319 
3320 	if (sc->led_conf != -1)
3321 		value = sc->led_conf & 0xffff;
3322 	else
3323 		value = 0x205b;
3324 	hw->phy.ops.write_reg(hw, 16, 0xd04, value);
3325 	hw->phy.ops.write_reg(hw, 17, 0xd04, 0);
3326 
3327 	hw->phy.ops.read_reg(hw, 18, 0xd04, &value);
3328 	if (sc->led_conf != -1) {
3329 		value &= ~0x73;
3330 		value |= sc->led_conf >> 16;
3331 	} else {
3332 		value &= 0xfffc;
3333 		/* Act led blinking mode set to 60ms */
3334 		value |= 0x2;
3335 	}
3336 	hw->phy.ops.write_reg(hw, 18, 0xd04, value);
3337 }
3338 
3339 int
3340 ngbe_phy_led_oem_chk(struct ngbe_softc *sc, uint32_t *data)
3341 {
3342 	struct ngbe_hw *hw = &sc->hw;
3343 	struct ngbe_hic_read_shadow_ram buffer;
3344 	uint32_t tmp;
3345 	int status;
3346 
3347 	buffer.hdr.req.cmd = FW_PHY_LED_CONF;
3348 	buffer.hdr.req.buf_lenh = 0;
3349 	buffer.hdr.req.buf_lenl = 0;
3350 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3351 
3352 	/* Convert offset from words to bytes */
3353 	buffer.address = 0;
3354 	/* One word */
3355 	buffer.length = 0;
3356 
3357 	status = ngbe_host_interface_command(sc, (uint32_t *)&buffer,
3358 	    sizeof(buffer), NGBE_HI_COMMAND_TIMEOUT, 0);
3359 	if (status)
3360 		return status;
3361 
3362 	if (ngbe_check_mng_access(hw)) {
3363 		tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 1);
3364 		if (tmp == NGBE_CHECKSUM_CAP_ST_PASS) {
3365 			tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 2);
3366 			*data = tmp;
3367 			status = 0;
3368 		} else if (tmp == NGBE_CHECKSUM_CAP_ST_FAIL) {
3369 			*data = tmp;
3370 			status = EINVAL;
3371 		} else
3372 			status = EINVAL;
3373 	} else {
3374 		status = EINVAL;
3375 		return status;
3376 	}
3377 
3378 	return status;
3379 }
3380 
3381 int
3382 ngbe_phy_read_reg(struct ngbe_hw *hw, uint32_t off, uint32_t page,
3383     uint16_t *data)
3384 {
3385 	*data = 0;
3386 
3387 	if (!((page == NGBE_INTERNAL_PHY_PAGE_OFFSET) &&
3388 	    ((off == NGBE_MDIO_AUTO_NEG_STATUS) ||
3389 	    (off == NGBE_MDIO_AUTO_NEG_LSC)))) {
3390 		NGBE_WRITE_REG(hw,
3391 		    NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET),
3392 		    page);
3393 	}
3394 	*data = NGBE_READ_REG(hw, NGBE_PHY_CONFIG(off)) & 0xffff;
3395 
3396 	return 0;
3397 }
3398 
3399 int
3400 ngbe_phy_write_reg(struct ngbe_hw *hw, uint32_t off, uint32_t page,
3401     uint16_t data)
3402 {
3403 	if (!((page == NGBE_INTERNAL_PHY_PAGE_OFFSET) &&
3404 	    ((off == NGBE_MDIO_AUTO_NEG_STATUS) ||
3405 	    (off == NGBE_MDIO_AUTO_NEG_LSC)))) {
3406 		NGBE_WRITE_REG(hw,
3407 		    NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET),
3408 		    page);
3409 	}
3410 	NGBE_WRITE_REG(hw, NGBE_PHY_CONFIG(off), data);
3411 
3412 	return 0;
3413 }
3414 
3415 int
3416 ngbe_phy_reset(struct ngbe_softc *sc)
3417 {
3418 	struct ngbe_hw *hw = &sc->hw;
3419 	uint16_t value;
3420 	int i, status;
3421 
3422 	/* only support internal phy */
3423 	if (hw->phy.type != ngbe_phy_internal) {
3424 		printf("%s: operation not supported\n", DEVNAME(sc));
3425 		return EINVAL;
3426 	}
3427 
3428 	/* Don't reset PHY if it's shut down due to overtemp. */
3429 	if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw) != 0) {
3430 		printf("%s: overtemp! skip phy reset\n", DEVNAME(sc));
3431 		return EINVAL;
3432 	}
3433 
3434 	/* Blocked by MNG FW so bail */
3435 	status = ngbe_check_reset_blocked(sc);
3436 	if (status)
3437 		return status;
3438 
3439 	value = NGBE_MDI_PHY_RESET;
3440 	status = hw->phy.ops.write_reg(hw, 0, 0, value);
3441 	for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) {
3442 		status = hw->phy.ops.read_reg(hw, 0, 0, &value);
3443 		if (!(value & NGBE_MDI_PHY_RESET))
3444 			break;
3445 		msec_delay(1);
3446 	}
3447 
3448 	if (i == NGBE_PHY_RST_WAIT_PERIOD) {
3449 		printf("%s: phy mode reset did not complete\n", DEVNAME(sc));
3450 		return ETIMEDOUT;
3451 	}
3452 
3453 	return status;
3454 }
3455 
3456 int
3457 ngbe_phy_set_pause_advertisement(struct ngbe_hw *hw, uint16_t pause_bit)
3458 {
3459 	uint16_t value;
3460 	int status;
3461 
3462 	status = hw->phy.ops.read_reg(hw, MII_ANAR, 0, &value);
3463 	value &= ~0xc00;
3464 	value |= pause_bit;
3465 	status = hw->phy.ops.write_reg(hw, MII_ANAR, 0, value);
3466 	return status;
3467 }
3468 
3469 int
3470 ngbe_phy_setup(struct ngbe_softc *sc)
3471 {
3472 	struct ngbe_hw *hw = &sc->hw;
3473 	uint16_t value = 0;
3474 	int i;
3475 
3476 	for (i = 0; i < 15; i++) {
3477 		if (!NGBE_READ_REG_MASK(hw, NGBE_MIS_ST,
3478 		    NGBE_MIS_ST_GPHY_IN_RST(hw->bus.lan_id)))
3479 			break;
3480 		msec_delay(1);
3481 	}
3482 	if (i == 15) {
3483 		printf("%s: gphy reset exceeds maximum time\n", DEVNAME(sc));
3484 		return ETIMEDOUT;
3485 	}
3486 
3487 	ngbe_gphy_efuse_calibration(sc);
3488 	hw->phy.ops.write_reg(hw, 20, 0xa46, 2);
3489 	ngbe_gphy_wait_mdio_access_on(hw);
3490 
3491 	for (i = 0; i < 100; i++) {
3492 		hw->phy.ops.read_reg(hw, 16, 0xa42, &value);
3493 		if ((value & 0x7) == 3)
3494 			break;
3495 		DELAY(1000);
3496 	}
3497 	if (i == 100) {
3498 		printf("%s: phy reset exceeds maximum time\n", DEVNAME(sc));
3499 		return ETIMEDOUT;
3500 	}
3501 
3502 	return 0;
3503 }
3504 
3505 int
3506 ngbe_phy_setup_link(struct ngbe_softc *sc, uint32_t speed, int need_restart)
3507 {
3508 	struct ngbe_hw *hw = &sc->hw;
3509 	uint16_t value = 0;
3510 	int status;
3511 
3512 	if (!hw->mac.autoneg) {
3513 		status = hw->phy.ops.reset(sc);
3514 		if (status) {
3515 			printf("%s: phy reset failed\n", DEVNAME(sc));
3516 			return status;
3517 		}
3518 
3519 		switch (speed) {
3520 		case NGBE_LINK_SPEED_1GB_FULL:
3521 			value = NGBE_MDI_PHY_SPEED_SELECT1;
3522 			break;
3523 		case NGBE_LINK_SPEED_100_FULL:
3524 	      		value = NGBE_MDI_PHY_SPEED_SELECT0;
3525 			break;
3526 		case NGBE_LINK_SPEED_10_FULL:
3527 			value = 0;
3528 			break;
3529 		default:
3530 			value = NGBE_MDI_PHY_SPEED_SELECT0 |
3531 			    NGBE_MDI_PHY_SPEED_SELECT1;
3532 			printf("%s: unknown speed = 0x%x\n",
3533 			    DEVNAME(sc), speed);
3534 			break;
3535 		}
3536 		/* duplex full */
3537 		value |= NGBE_MDI_PHY_DUPLEX;
3538 		hw->phy.ops.write_reg(hw, 0, 0, value);
3539 
3540 		goto skip_an;
3541 	}
3542 
3543 	/* Disable 10/100M Half Duplex */
3544 	hw->phy.ops.read_reg(hw, 4, 0, &value);
3545 	value &= 0xff5f;
3546 	hw->phy.ops.write_reg(hw, 4, 0, value);
3547 
3548 	/* Set advertise enable according to input speed */
3549 	hw->phy.ops.read_reg(hw, 9, 0, &value);
3550 	if (!(speed & NGBE_LINK_SPEED_1GB_FULL))
3551 		value &= 0xfdff;
3552 	else
3553 		value |= 0x200;
3554 	hw->phy.ops.write_reg(hw, 9, 0, value);
3555 
3556 	hw->phy.ops.read_reg(hw, 4, 0, &value);
3557 	if (!(speed & NGBE_LINK_SPEED_100_FULL))
3558 		value &= 0xfeff;
3559 	else
3560 		value |= 0x100;
3561 	hw->phy.ops.write_reg(hw, 4, 0, value);
3562 
3563 	hw->phy.ops.read_reg(hw, 4, 0, &value);
3564 	if (!(speed & NGBE_LINK_SPEED_10_FULL))
3565 		value &= 0xffbf;
3566 	else
3567 		value |= 0x40;
3568 	hw->phy.ops.write_reg(hw, 4, 0, value);
3569 
3570 	/* Restart AN and wait AN done interrupt */
3571 	value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE;
3572 	hw->phy.ops.write_reg(hw, 0, 0, value);
3573 
3574 skip_an:
3575 	hw->phy.ops.phy_led_ctrl(sc);
3576 	hw->phy.ops.check_event(sc);
3577 
3578 	return 0;
3579 }
3580 
3581 uint16_t
3582 ngbe_read_pci_cfg_word(struct ngbe_softc *sc, uint32_t reg)
3583 {
3584 	struct ngbe_osdep *os = &sc->osdep;
3585 	struct pci_attach_args *pa = &os->os_pa;
3586 	uint32_t value;
3587 	int high = 0;
3588 
3589 	if (reg & 0x2) {
3590 		high = 1;
3591 		reg &= ~0x2;
3592 	}
3593 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3594 
3595 	if (high)
3596 		value >>= 16;
3597 
3598 	return (value & 0xffff);
3599 }
3600 
3601 void
3602 ngbe_release_eeprom_semaphore(struct ngbe_hw *hw)
3603 {
3604 	if (ngbe_check_mng_access(hw)) {
3605 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_SWSM, NGBE_MIS_SWSM_SMBI, 0);
3606 		NGBE_WRITE_FLUSH(hw);
3607 	}
3608 }
3609 
3610 int
3611 ngbe_acquire_swfw_sync(struct ngbe_softc *sc, uint32_t mask)
3612 {
3613 	struct ngbe_hw *hw = &sc->hw;
3614 	uint32_t gssr = 0;
3615 	uint32_t swmask = mask;
3616 	uint32_t fwmask = mask << 16;
3617 	int i, timeout = 200;
3618 
3619 	for (i = 0; i < timeout; i++) {
3620 		/*
3621 		 * SW NVM semaphore bit is used for access to all
3622 		 * SW_FW_SYNC bits (not just NVM)
3623 		 */
3624 		if (ngbe_get_eeprom_semaphore(sc))
3625 			return 1;
3626 		if (ngbe_check_mng_access(hw)) {
3627 			gssr = NGBE_READ_REG(hw, NGBE_MNG_SWFW_SYNC);
3628 			if (!(gssr & (fwmask | swmask))) {
3629 				gssr |= swmask;
3630 				NGBE_WRITE_REG(hw, NGBE_MNG_SWFW_SYNC, gssr);
3631 				ngbe_release_eeprom_semaphore(hw);
3632 				return 0;
3633 			} else {
3634 				/* Resource is currently in use by FW or SW */
3635 				ngbe_release_eeprom_semaphore(hw);
3636 				msec_delay(5);
3637 			}
3638 		}
3639 	}
3640 
3641 	printf("%s: semaphore failed\n", DEVNAME(sc));
3642 
3643 	/* If time expired clear the bits holding the lock and retry */
3644 	if (gssr & (fwmask | swmask))
3645 		ngbe_release_swfw_sync(sc, gssr & (fwmask | swmask));
3646 
3647 	msec_delay(5);
3648 	return 1;
3649 }
3650 
3651 void
3652 ngbe_release_swfw_sync(struct ngbe_softc *sc, uint32_t mask)
3653 {
3654 	struct ngbe_hw *hw = &sc->hw;
3655 
3656 	ngbe_get_eeprom_semaphore(sc);
3657 	if (ngbe_check_mng_access(hw))
3658 		NGBE_WRITE_REG_MASK(hw, NGBE_MNG_SWFW_SYNC, mask, 0);
3659 
3660 	ngbe_release_eeprom_semaphore(hw);
3661 }
3662 
3663 void
3664 ngbe_reset(struct ngbe_softc *sc)
3665 {
3666 	struct ngbe_hw *hw = &sc->hw;
3667 	int error;
3668 
3669 	error = hw->mac.ops.init_hw(sc);
3670 	switch (error) {
3671 	case 0:
3672 		break;
3673 	default:
3674 		printf("%s: hardware error\n", DEVNAME(sc));
3675 		break;
3676 	}
3677 }
3678 
3679 int
3680 ngbe_reset_hw(struct ngbe_softc *sc)
3681 {
3682 	struct ngbe_hw *hw = &sc->hw;
3683 	struct ngbe_mac_info *mac = &hw->mac;
3684 	uint32_t i, reset_status, rst_delay;
3685 	uint32_t reset = 0;
3686 	int status = 0;
3687 
3688 	status = hw->mac.ops.stop_adapter(sc);
3689 	if (status)
3690 		goto reset_hw_out;
3691 
3692 	/* Identify PHY and related function pointers */
3693 	if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) {
3694 		status = hw->phy.ops.init(sc);
3695 		if (status)
3696 			goto reset_hw_out;
3697 	}
3698 
3699 	if (ngbe_get_media_type(hw) == ngbe_media_type_copper) {
3700 		mac->ops.setup_link = ngbe_setup_copper_link;
3701 		mac->ops.get_link_capabilities =
3702 		    ngbe_get_copper_link_capabilities;
3703 	}
3704 
3705 	/*
3706 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
3707 	 * If link reset is used when link is up, it might reset the PHY when
3708 	 * mng is using it.  If link is down or the flag to force full link
3709 	 * reset is set, then perform link reset.
3710 	 */
3711 	 if (hw->force_full_reset) {
3712 	 	rst_delay = (NGBE_READ_REG(hw, NGBE_MIS_RST_ST) &
3713 		    NGBE_MIS_RST_ST_RST_INIT) >> NGBE_MIS_RST_ST_RST_INI_SHIFT;
3714 		if (hw->reset_type == NGBE_SW_RESET) {
3715 			for (i = 0; i < rst_delay + 20; i++) {
3716 				reset_status =
3717 				    NGBE_READ_REG(hw, NGBE_MIS_RST_ST);
3718 				if (!(reset_status &
3719 				    NGBE_MIS_RST_ST_DEV_RST_ST_MASK))
3720 					break;
3721 				msec_delay(100);
3722 			}
3723 
3724 			if (reset_status & NGBE_MIS_RST_ST_DEV_RST_ST_MASK) {
3725 				status = ETIMEDOUT;
3726 				printf("%s: software reset polling failed to "
3727 				    "complete\n", DEVNAME(sc));
3728 				goto reset_hw_out;
3729 			}
3730 			status = ngbe_check_flash_load(sc,
3731 			    NGBE_SPI_ILDR_STATUS_SW_RESET);
3732 			if (status)
3733 				goto reset_hw_out;
3734 		} else if (hw->reset_type == NGBE_GLOBAL_RESET) {
3735 			msec_delay(100 * rst_delay + 2000);
3736 		}
3737 	} else {
3738 		if (hw->bus.lan_id == 0)
3739 			reset = NGBE_MIS_RST_LAN0_RST;
3740 		else if (hw->bus.lan_id == 1)
3741 			reset = NGBE_MIS_RST_LAN1_RST;
3742 		else if (hw->bus.lan_id == 2)
3743 			reset = NGBE_MIS_RST_LAN2_RST;
3744 		else if (hw->bus.lan_id == 3)
3745 			reset = NGBE_MIS_RST_LAN3_RST;
3746 
3747 		NGBE_WRITE_REG(hw, NGBE_MIS_RST,
3748 		    reset | NGBE_READ_REG(hw, NGBE_MIS_RST));
3749 		NGBE_WRITE_FLUSH(hw);
3750 		msec_delay(15);
3751 	}
3752 
3753 	ngbe_reset_misc(hw);
3754 
3755 	/* Store the permanent mac address */
3756 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
3757 
3758 	/*
3759 	 * Store MAC address from RAR0, clear receive address registers, and
3760 	 * clear the multicast table.  Also reset num_rar_entries to 32,
3761 	 * since we modify this value when programming the SAN MAC address.
3762 	 */
3763 	hw->mac.num_rar_entries = NGBE_SP_RAR_ENTRIES;
3764 	hw->mac.ops.init_rx_addrs(sc);
3765 
3766 reset_hw_out:
3767 	return status;
3768 }
3769 
3770 void
3771 ngbe_reset_misc(struct ngbe_hw *hw)
3772 {
3773 	int i;
3774 
3775 	/* Receive packets of size > 2048 */
3776 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_JE,
3777 	    NGBE_MAC_RX_CFG_JE);
3778 
3779 	/* Clear counters on read */
3780 	NGBE_WRITE_REG_MASK(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_RSTONRD,
3781 	    NGBE_MMC_CONTROL_RSTONRD);
3782 
3783 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_FLOW_CTRL,
3784 	    NGBE_MAC_RX_FLOW_CTRL_RFE, NGBE_MAC_RX_FLOW_CTRL_RFE);
3785 
3786 	NGBE_WRITE_REG(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR);
3787 
3788 	NGBE_WRITE_REG_MASK(hw, NGBE_MIS_RST_ST, NGBE_MIS_RST_ST_RST_INIT,
3789 	    0x1e00);
3790 
3791 	/* errata 4: initialize mng flex tbl and wakeup flex tbl */
3792 	NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_SEL, 0);
3793 	for (i = 0; i < 16; i++) {
3794 		NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_DW_L(i), 0);
3795 		NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_DW_H(i), 0);
3796 		NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_MSK(i), 0);
3797 	}
3798 	NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_SEL, 0);
3799 	for (i = 0; i < 16; i++) {
3800 		NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_DW_L(i), 0);
3801 		NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_DW_H(i), 0);
3802 		NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_MSK(i), 0);
3803 	}
3804 
3805 	/* Set pause frame dst mac addr */
3806 	NGBE_WRITE_REG(hw, NGBE_RDB_PFCMACDAL, 0xc2000001);
3807 	NGBE_WRITE_REG(hw, NGBE_RDB_PFCMACDAH, 0x0180);
3808 
3809 	NGBE_WRITE_REG(hw, NGBE_MDIO_CLAUSE_SELECT, 0xf);
3810 
3811 	ngbe_init_thermal_sensor_thresh(hw);
3812 }
3813 
3814 int
3815 ngbe_set_fw_drv_ver(struct ngbe_softc *sc, uint8_t maj, uint8_t min,
3816     uint8_t build, uint8_t sub)
3817 {
3818 	struct ngbe_hw *hw = &sc->hw;
3819 	struct ngbe_hic_drv_info fw_cmd;
3820 	int i, error = 0;
3821 
3822 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
3823 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
3824 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
3825 	fw_cmd.port_num = (uint8_t)hw->bus.lan_id;
3826 	fw_cmd.ver_maj = maj;
3827 	fw_cmd.ver_min = min;
3828 	fw_cmd.ver_build = build;
3829 	fw_cmd.ver_sub = sub;
3830 	fw_cmd.hdr.checksum = 0;
3831 	fw_cmd.hdr.checksum = ngbe_calculate_checksum((uint8_t *)&fw_cmd,
3832 	    (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
3833 	fw_cmd.pad = 0;
3834 	fw_cmd.pad2 = 0;
3835 
3836 	DELAY(5000);
3837 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
3838 		error = ngbe_host_interface_command(sc, (uint32_t *)&fw_cmd,
3839 		    sizeof(fw_cmd), NGBE_HI_COMMAND_TIMEOUT, 1);
3840 		if (error)
3841 			continue;
3842 
3843 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
3844 		    FW_CEM_RESP_STATUS_SUCCESS)
3845 			error = 0;
3846 		else
3847 			error = EINVAL;
3848 		break;
3849 	}
3850 
3851 	return error;
3852 }
3853 
3854 void
3855 ngbe_set_ivar(struct ngbe_softc *sc, uint16_t entry, uint16_t vector, int8_t
3856 type)
3857 {
3858 	struct ngbe_hw *hw = &sc->hw;
3859 	uint32_t ivar, index;
3860 
3861 	vector |= NGBE_PX_IVAR_ALLOC_VAL;
3862 
3863 	if (type == -1) {
3864 		/* other causes */
3865 		index = 0;
3866 		ivar = NGBE_READ_REG(hw, NGBE_PX_MISC_IVAR);
3867 		ivar &= ~((uint32_t)0xff << index);
3868 		ivar |= ((uint32_t)vector << index);
3869 		NGBE_WRITE_REG(hw, NGBE_PX_MISC_IVAR, ivar);
3870 	} else {
3871 		/* Tx or Rx causes */
3872 		index = ((16 * (entry & 1)) + (8 * type));
3873 		ivar = NGBE_READ_REG(hw, NGBE_PX_IVAR(entry >> 1));
3874 		ivar &= ~((uint32_t)0xff << index);
3875 		ivar |= ((uint32_t)vector << index);
3876 		NGBE_WRITE_REG(hw, NGBE_PX_IVAR(entry >> 1), ivar);
3877 	}
3878 }
3879 
3880 void
3881 ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw)
3882 {
3883 	struct ngbe_bus_info *bus = &hw->bus;
3884 	uint32_t reg = 0;
3885 
3886 	reg = NGBE_READ_REG(hw, NGBE_CFG_PORT_ST);
3887 	bus->lan_id = NGBE_CFG_PORT_ST_LAN_ID(reg);
3888 }
3889 
3890 void
3891 ngbe_set_mta(struct ngbe_hw *hw, uint8_t *mc_addr)
3892 {
3893 	uint32_t vector, vector_bit, vector_reg;
3894 
3895 	hw->addr_ctrl.mta_in_use++;
3896 
3897 	vector = ngbe_mta_vector(hw, mc_addr);
3898 
3899 	/*
3900 	 * The MTA is a register array of 128 32-bit registers. It is treated
3901 	 * like an array of 4096 bits.  We want to set bit
3902 	 * BitArray[vector_value]. So we figure out what register the bit is
3903 	 * in, read it, OR in the new bit, then write back the new value.  The
3904 	 * register is determined by the upper 7 bits of the vector value and
3905 	 * the bit within that register are determined by the lower 5 bits of
3906 	 * the value.
3907 	 */
3908 	vector_reg = (vector >> 5) & 0x7f;
3909 	vector_bit = vector & 0x1f;
3910 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
3911 }
3912 
3913 void
3914 ngbe_set_pci_config_data(struct ngbe_hw *hw, uint16_t link_status)
3915 {
3916 	if (hw->bus.type == ngbe_bus_type_unknown)
3917 		hw->bus.type = ngbe_bus_type_pci_express;
3918 
3919 	switch (link_status & NGBE_PCI_LINK_WIDTH) {
3920 	case NGBE_PCI_LINK_WIDTH_1:
3921 		hw->bus.width = ngbe_bus_width_pcie_x1;
3922 		break;
3923 	case NGBE_PCI_LINK_WIDTH_2:
3924 		hw->bus.width = ngbe_bus_width_pcie_x2;
3925 		break;
3926 	case NGBE_PCI_LINK_WIDTH_4:
3927 		hw->bus.width = ngbe_bus_width_pcie_x4;
3928 		break;
3929 	case NGBE_PCI_LINK_WIDTH_8:
3930 		hw->bus.width = ngbe_bus_width_pcie_x8;
3931 		break;
3932 	default:
3933 		hw->bus.width = ngbe_bus_width_unknown;
3934 		break;
3935 	}
3936 
3937 	switch (link_status & NGBE_PCI_LINK_SPEED) {
3938 	case NGBE_PCI_LINK_SPEED_2500:
3939 		hw->bus.speed = ngbe_bus_speed_2500;
3940 		break;
3941 	case NGBE_PCI_LINK_SPEED_5000:
3942 		hw->bus.speed = ngbe_bus_speed_5000;
3943 		break;
3944 	case NGBE_PCI_LINK_SPEED_8000:
3945 		hw->bus.speed = ngbe_bus_speed_8000;
3946 		break;
3947 	default:
3948 		hw->bus.speed = ngbe_bus_speed_unknown;
3949 		break;
3950 	}
3951 }
3952 
3953 int
3954 ngbe_set_rar(struct ngbe_softc *sc, uint32_t index, uint8_t *addr,
3955     uint64_t pools, uint32_t enable_addr)
3956 {
3957 	struct ngbe_hw *hw = &sc->hw;
3958 	uint32_t rar_entries = hw->mac.num_rar_entries;
3959 	uint32_t rar_low, rar_high;
3960 
3961 	/* Make sure we are using a valid rar index range */
3962 	if (index >= rar_entries) {
3963 		printf("%s: RAR index %d is out of range\n",
3964 		    DEVNAME(sc), index);
3965 		return EINVAL;
3966 	}
3967 
3968 	/* Select the MAC address */
3969 	NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, index);
3970 
3971 	/* Setup VMDq pool mapping */
3972 	NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_VM, pools & 0xffffffff);
3973 
3974 	/*
3975 	 * HW expects these in little endian so we reverse the byte
3976 	 * order from network order (big endian) to little endian
3977 	 *
3978 	 * Some parts put the VMDq setting in the extra RAH bits,
3979 	 * so save everything except the lower 16 bits that hold part
3980 	 * of the address and the address valid bit.
3981 	 */
3982 	rar_low = ((uint32_t)addr[5] | ((uint32_t)addr[4] << 8) |
3983 	    ((uint32_t)addr[3] << 16) | ((uint32_t)addr[2] << 24));
3984 	rar_high = ((uint32_t)addr[1] | ((uint32_t)addr[0] << 8));
3985 	if (enable_addr != 0)
3986 		rar_high |= NGBE_PSR_MAC_SWC_AD_H_AV;
3987 
3988 	NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_L, rar_low);
3989 	NGBE_WRITE_REG_MASK(hw, NGBE_PSR_MAC_SWC_AD_H,
3990 	    (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
3991 	    NGBE_PSR_MAC_SWC_AD_H_AV), rar_high);
3992 
3993 	return 0;
3994 }
3995 
3996 void
3997 ngbe_set_rx_drop_en(struct ngbe_softc *sc)
3998 {
3999 	uint32_t srrctl;
4000 	int i;
4001 
4002 	if ((sc->sc_nqueues > 1) &&
4003 	    !(sc->hw.fc.current_mode & ngbe_fc_tx_pause)) {
4004 		for (i = 0; i < sc->sc_nqueues; i++) {
4005 			srrctl = NGBE_READ_REG(&sc->hw, NGBE_PX_RR_CFG(i));
4006 			srrctl |= NGBE_PX_RR_CFG_DROP_EN;
4007 			NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_CFG(i), srrctl);
4008 		}
4009 
4010 	} else {
4011 		for (i = 0; i < sc->sc_nqueues; i++) {
4012 			srrctl = NGBE_READ_REG(&sc->hw, NGBE_PX_RR_CFG(i));
4013 			srrctl &= ~NGBE_PX_RR_CFG_DROP_EN;
4014 			NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_CFG(i), srrctl);
4015 		}
4016 	}
4017 }
4018 
4019 void
4020 ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, uint32_t headroom, int strategy)
4021 {
4022 	uint32_t pbsize = hw->mac.rx_pb_size;
4023 	uint32_t txpktsize, txpbthresh, rxpktsize = 0;
4024 
4025 	/* Reserve headroom */
4026 	pbsize -= headroom;
4027 
4028 	if (!num_pb)
4029 		num_pb = 1;
4030 
4031 	/*
4032 	 * Divide remaining packet buffer space amongst the number of packet
4033 	 * buffers requested using supplied strategy.
4034 	 */
4035 	switch (strategy) {
4036 	case PBA_STRATEGY_EQUAL:
4037 		rxpktsize = (pbsize / num_pb) << NGBE_RDB_PB_SZ_SHIFT;
4038 		NGBE_WRITE_REG(hw, NGBE_RDB_PB_SZ, rxpktsize);
4039 		break;
4040 	default:
4041 		break;
4042 	}
4043 
4044 	/* Only support an equally distributed Tx packet buffer strategy. */
4045 	txpktsize = NGBE_TDB_PB_SZ_MAX / num_pb;
4046 	txpbthresh = (txpktsize / 1024) - NGBE_TXPKT_SIZE_MAX;
4047 
4048 	NGBE_WRITE_REG(hw, NGBE_TDB_PB_SZ, txpktsize);
4049 	NGBE_WRITE_REG(hw, NGBE_TDM_PB_THRE, txpbthresh);
4050 }
4051 
4052 int
4053 ngbe_setup_copper_link(struct ngbe_softc *sc, uint32_t speed, int need_restart)
4054 {
4055 	struct ngbe_hw *hw = &sc->hw;
4056 	int status = 0;
4057 
4058 	/* Setup the PHY according to input speed */
4059 	if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA))
4060 		status = hw->phy.ops.setup_link(sc, speed, need_restart);
4061 
4062 	return status;
4063 }
4064 
4065 int
4066 ngbe_setup_fc(struct ngbe_softc *sc)
4067 {
4068 	struct ngbe_hw *hw = &sc->hw;
4069 	uint16_t pcap_backplane = 0;
4070 	int error = 0;
4071 
4072 	/* Validate the requested mode */
4073 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ngbe_fc_rx_pause) {
4074 		printf("%s: ngbe_fc_rx_pause not valid in strict IEEE mode\n",
4075 		    DEVNAME(sc));
4076 		error = EINVAL;
4077 		goto out;
4078 	}
4079 
4080 	/*
4081 	 * Gig parts do not have a word in the EEPROM to determine the
4082 	 * default flow control setting, so we explicitly set it to full.
4083 	 */
4084 	if (hw->fc.requested_mode == ngbe_fc_default)
4085 		hw->fc.requested_mode = ngbe_fc_full;
4086 
4087 	/*
4088 	 * The possible values of fc.requested_mode are:
4089 	 * 0: Flow control is completely disabled
4090 	 * 1: Rx flow control is enabled (we can receive pause frames,
4091 	 *    but not send pause frames).
4092 	 * 2: Tx flow control is enabled (we can send pause frames but
4093 	 *    we do not support receiving pause frames).
4094 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4095 	 * other: Invalid.
4096 	 */
4097 	switch (hw->fc.requested_mode) {
4098 	case ngbe_fc_none:
4099 		/* Flow control completely disabled by software override. */
4100 		break;
4101 	case ngbe_fc_tx_pause:
4102 		/*
4103 		 * Tx Flow control is enabled, and Rx Flow control is
4104 		 * disabled by software override.
4105 		 */
4106 		pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM;
4107 		break;
4108 	case ngbe_fc_rx_pause:
4109 		/*
4110 		 * Rx Flow control is enabled and Tx Flow control is
4111 		 * disabled by software override. Since there really
4112 		 * isn't a way to advertise that we are capable of RX
4113 		 * Pause ONLY, we will advertise that we support both
4114 		 * symmetric and asymmetric Rx PAUSE, as such we fall
4115 		 * through to the fc_full statement.  Later, we will
4116 		 * disable the adapter's ability to send PAUSE frames.
4117 		 */
4118 	case ngbe_fc_full:
4119 		/* Flow control (both Rx and Tx) is enabled by SW override. */
4120 		pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM |
4121 		    NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM;
4122 		break;
4123 	default:
4124 		printf("%s: flow control param set incorrectly\n", DEVNAME(sc));
4125 		error = EINVAL;
4126 		goto out;
4127 	}
4128 
4129 	/* AUTOC restart handles negotiation of 1G on backplane and copper. */
4130 	if ((hw->phy.media_type == ngbe_media_type_copper) &&
4131 	    !((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA))
4132 		error = hw->phy.ops.set_adv_pause(hw, pcap_backplane);
4133 out:
4134 	return error;
4135 }
4136 
4137 void
4138 ngbe_setup_gpie(struct ngbe_hw *hw)
4139 {
4140 	uint32_t gpie;
4141 
4142 	gpie = NGBE_PX_GPIE_MODEL;
4143 
4144 	/*
4145 	 * use EIAM to auto-mask when MSI-X interrupt is asserted
4146 	 * this saves a register write for every interrupt.
4147 	 */
4148 	NGBE_WRITE_REG(hw, NGBE_PX_GPIE, gpie);
4149 }
4150 
4151 void
4152 ngbe_setup_isb(struct ngbe_softc *sc)
4153 {
4154 	uint64_t idba = sc->isbdma.dma_map->dm_segs[0].ds_addr;
4155 
4156 	/* Set ISB address */
4157 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_ISB_ADDR_L,
4158 	    (idba & 0x00000000ffffffffULL));
4159 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_ISB_ADDR_H, (idba >> 32));
4160 }
4161 
4162 void
4163 ngbe_setup_psrtype(struct ngbe_hw *hw)
4164 {
4165 	uint32_t psrtype;
4166 
4167 	/* PSRTYPE must be initialized in adapters */
4168 	psrtype = NGBE_RDB_PL_CFG_L4HDR | NGBE_RDB_PL_CFG_L3HDR |
4169 	    NGBE_RDB_PL_CFG_L2HDR | NGBE_RDB_PL_CFG_TUN_TUNHDR |
4170 	    NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR;
4171 
4172 	NGBE_WRITE_REG(hw, NGBE_RDB_PL_CFG(0), psrtype);
4173 }
4174 
4175 void
4176 ngbe_setup_vlan_hw_support(struct ngbe_softc *sc)
4177 {
4178 	struct ngbe_hw *hw = &sc->hw;
4179 	int i;
4180 
4181 	for (i = 0; i < sc->sc_nqueues; i++) {
4182 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
4183 		    NGBE_PX_RR_CFG_VLAN, NGBE_PX_RR_CFG_VLAN);
4184 	}
4185 }
4186 
4187 int
4188 ngbe_start_hw(struct ngbe_softc *sc)
4189 {
4190 	struct ngbe_hw *hw = &sc->hw;
4191 	int error;
4192 
4193 	/* Set the media type */
4194 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
4195 
4196 	/* Clear the VLAN filter table */
4197 	hw->mac.ops.clear_vfta(hw);
4198 
4199 	/* Clear statistics registers */
4200 	hw->mac.ops.clear_hw_cntrs(hw);
4201 
4202 	NGBE_WRITE_FLUSH(hw);
4203 
4204 	/* Setup flow control */
4205 	error = hw->mac.ops.setup_fc(sc);
4206 
4207 	/* Clear adapter stopped flag */
4208 	hw->adapter_stopped = 0;
4209 
4210 	/* We need to run link autotry after the driver loads */
4211 	hw->mac.autotry_restart = 1;
4212 
4213 	return error;
4214 }
4215 
4216 int
4217 ngbe_stop_adapter(struct ngbe_softc *sc)
4218 {
4219 	struct ngbe_hw *hw = &sc->hw;
4220 	int i;
4221 
4222 	/*
4223 	 * Set the adapter_stopped flag so other driver functions stop touching
4224 	 * the hardware.
4225 	 */
4226 	hw->adapter_stopped = 1;
4227 
4228 	/* Disable the receive unit. */
4229 	hw->mac.ops.disable_rx(hw);
4230 
4231 	/* Clear any pending interrupts, flush previous writes. */
4232 	NGBE_WRITE_REG(hw, NGBE_PX_MISC_IC, 0xffffffff);
4233 
4234 	NGBE_WRITE_REG(hw, NGBE_BME_CTL, 0x3);
4235 
4236 	/* Disable the transmit unit.  Each queue must be disabled. */
4237 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
4238 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_TR_CFG(i),
4239 		    NGBE_PX_TR_CFG_SWFLSH | NGBE_PX_TR_CFG_ENABLE,
4240 		    NGBE_PX_TR_CFG_SWFLSH);
4241 	}
4242 
4243 	/* Disable the receive unit by stopping each queue */
4244 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
4245 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
4246 		    NGBE_PX_RR_CFG_RR_EN, 0);
4247 	}
4248 
4249 	/* Flush all queues disables. */
4250 	NGBE_WRITE_FLUSH(hw);
4251 	msec_delay(2);
4252 
4253 	return ngbe_disable_pcie_master(sc);
4254 }
4255 
4256 void
4257 ngbe_rx_checksum(uint32_t staterr, struct mbuf *m)
4258 {
4259 	if (staterr & NGBE_RXD_STAT_IPCS) {
4260 		if (!(staterr & NGBE_RXD_ERR_IPE))
4261 			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
4262 		else
4263 			m->m_pkthdr.csum_flags = 0;
4264 	}
4265 	if (staterr & NGBE_RXD_STAT_L4CS) {
4266 		if (!(staterr & NGBE_RXD_ERR_TCPE))
4267 			m->m_pkthdr.csum_flags |=
4268 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
4269 	}
4270 }
4271 
4272 void
4273 ngbe_rxeof(struct rx_ring *rxr)
4274 {
4275 	struct ngbe_softc *sc = rxr->sc;
4276 	struct ifnet *ifp = &sc->sc_ac.ac_if;
4277 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
4278 	struct mbuf *mp, *m;
4279 	struct ngbe_rx_buf *rxbuf, *nxbuf;
4280 	union ngbe_rx_desc *rxdesc;
4281 	uint32_t staterr = 0;
4282 	uint16_t len, vtag;
4283 	uint8_t eop = 0;
4284 	int i, nextp;
4285 
4286 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
4287 		return;
4288 
4289 	i = rxr->next_to_check;
4290 	while (if_rxr_inuse(&rxr->rx_ring) > 0) {
4291 		uint32_t hash;
4292 		uint16_t hashtype;
4293 
4294 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4295 		    i * sizeof(union ngbe_rx_desc), sizeof(union ngbe_rx_desc),
4296 		    BUS_DMASYNC_POSTREAD);
4297 
4298 		rxdesc = &rxr->rx_base[i];
4299 		staterr = letoh32(rxdesc->wb.upper.status_error);
4300 		if (!ISSET(staterr, NGBE_RXD_STAT_DD)) {
4301 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4302 			    i * sizeof(union ngbe_rx_desc),
4303 			    sizeof(union ngbe_rx_desc), BUS_DMASYNC_PREREAD);
4304 			break;
4305 		}
4306 
4307 		/* Zero out the receive descriptors status. */
4308 		rxdesc->wb.upper.status_error = 0;
4309 		rxbuf = &rxr->rx_buffers[i];
4310 
4311 		/* Pull the mbuf off the ring. */
4312 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
4313 		    rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
4314 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
4315 
4316 		mp = rxbuf->buf;
4317 		len = letoh16(rxdesc->wb.upper.length);
4318 		vtag = letoh16(rxdesc->wb.upper.vlan);
4319 		eop = ((staterr & NGBE_RXD_STAT_EOP) != 0);
4320 		hash = letoh32(rxdesc->wb.lower.hi_dword.rss);
4321 		hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) &
4322 		    NGBE_RXD_RSSTYPE_MASK;
4323 
4324 		if (staterr & NGBE_RXD_ERR_RXE) {
4325 			if (rxbuf->fmp) {
4326 				m_freem(rxbuf->fmp);
4327 				rxbuf->fmp = NULL;
4328 			}
4329 
4330 			m_freem(mp);
4331 			rxbuf->buf = NULL;
4332 			goto next_desc;
4333 		}
4334 
4335 		if (mp == NULL) {
4336 			panic("%s: ngbe_rxeof: NULL mbuf in slot %d "
4337 			    "(nrx %d, filled %d)", DEVNAME(sc), i,
4338 			    if_rxr_inuse(&rxr->rx_ring), rxr->last_desc_filled);
4339 		}
4340 
4341 		if (!eop) {
4342 			/*
4343 			 * Figure out the next descriptor of this frame.
4344 			 */
4345 			nextp = i + 1;
4346 			if (nextp == sc->num_rx_desc)
4347 				nextp = 0;
4348 			nxbuf = &rxr->rx_buffers[nextp];
4349 			/* prefetch(nxbuf); */
4350 		}
4351 
4352 		mp->m_len = len;
4353 
4354 		m = rxbuf->fmp;
4355 		rxbuf->buf = rxbuf->fmp = NULL;
4356 
4357 		if (m != NULL)
4358 			m->m_pkthdr.len += mp->m_len;
4359 		else {
4360 			m = mp;
4361 			m->m_pkthdr.len = mp->m_len;
4362 #if NVLAN > 0
4363 			if (staterr & NGBE_RXD_STAT_VP) {
4364 				m->m_pkthdr.ether_vtag = vtag;
4365 				m->m_flags |= M_VLANTAG;
4366 			}
4367 #endif
4368 		}
4369 
4370 		/* Pass the head pointer on */
4371 		if (eop == 0) {
4372 			nxbuf->fmp = m;
4373 			m = NULL;
4374 			mp->m_next = nxbuf->buf;
4375 		} else {
4376 			ngbe_rx_checksum(staterr, m);
4377 
4378 			if (hashtype != NGBE_RXD_RSSTYPE_NONE) {
4379 				m->m_pkthdr.ph_flowid = hash;
4380 				SET(m->m_pkthdr.csum_flags, M_FLOWID);
4381 			}
4382 
4383 			ml_enqueue(&ml, m);
4384 		}
4385 next_desc:
4386 		if_rxr_put(&rxr->rx_ring, 1);
4387 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4388 		    i * sizeof(union ngbe_rx_desc), sizeof(union ngbe_rx_desc),
4389 		    BUS_DMASYNC_PREREAD);
4390 
4391 		/* Advance our pointers to the next descriptor. */
4392 		if (++i == sc->num_rx_desc)
4393 			i = 0;
4394 	}
4395 	rxr->next_to_check = i;
4396 
4397 	if (ifiq_input(rxr->ifiq, &ml))
4398 		if_rxr_livelocked(&rxr->rx_ring);
4399 
4400 	if (!(staterr & NGBE_RXD_STAT_DD))
4401 		return;
4402 }
4403 
4404 void
4405 ngbe_rxrefill(void *xrxr)
4406 {
4407 	struct rx_ring *rxr = xrxr;
4408 	struct ngbe_softc *sc = rxr->sc;
4409 
4410 	if (ngbe_rxfill(rxr))
4411 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_WP(rxr->me),
4412 		    rxr->last_desc_filled);
4413 	else if (if_rxr_inuse(&rxr->rx_ring) == 0)
4414 		timeout_add(&rxr->rx_refill, 1);
4415 }
4416 
4417 int
4418 ngbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *m, uint32_t *cmd_type_len,
4419     uint32_t *olinfo_status)
4420 {
4421 	struct ngbe_tx_context_desc *txd;
4422 	struct ngbe_tx_buf *tx_buffer;
4423 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
4424 	int ctxd = txr->next_avail_desc;
4425 	int offload = 0;
4426 
4427 	/* Indicate the whole packet as payload when not doing TSO */
4428 	*olinfo_status |= m->m_pkthdr.len << NGBE_TXD_PAYLEN_SHIFT;
4429 
4430 #if NVLAN > 0
4431 	if (ISSET(m->m_flags, M_VLANTAG)) {
4432 		uint32_t vtag = m->m_pkthdr.ether_vtag;
4433 		vlan_macip_lens |= (vtag << NGBE_TXD_VLAN_SHIFT);
4434 		*cmd_type_len |= NGBE_TXD_VLE;
4435 		offload |= 1;
4436 	}
4437 #endif
4438 
4439 	if (!offload)
4440 		return 0;
4441 
4442 	txd = (struct ngbe_tx_context_desc *)&txr->tx_base[ctxd];
4443 	tx_buffer = &txr->tx_buffers[ctxd];
4444 
4445 	type_tucmd_mlhl |= NGBE_TXD_DTYP_CTXT;
4446 
4447 	/* Now copy bits into descriptor */
4448 	txd->vlan_macip_lens = htole32(vlan_macip_lens);
4449 	txd->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
4450 	txd->seqnum_seed = htole32(0);
4451 	txd->mss_l4len_idx = htole32(0);
4452 
4453 	tx_buffer->m_head = NULL;
4454 	tx_buffer->eop_index = -1;
4455 
4456 	return 1;
4457 }
4458 
4459 void
4460 ngbe_txeof(struct tx_ring *txr)
4461 {
4462 	struct ngbe_softc *sc = txr->sc;
4463 	struct ifqueue *ifq = txr->ifq;
4464 	struct ifnet *ifp = &sc->sc_ac.ac_if;
4465 	struct ngbe_tx_buf *tx_buffer;
4466 	union ngbe_tx_desc *tx_desc;
4467 	unsigned int prod, cons, last;
4468 
4469 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
4470 		return;
4471 
4472 	prod = txr->next_avail_desc;
4473 	cons = txr->next_to_clean;
4474 
4475 	if (prod == cons)
4476 		return;
4477 
4478 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
4479 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
4480 
4481 	for (;;) {
4482 		tx_buffer = &txr->tx_buffers[cons];
4483 		last = tx_buffer->eop_index;
4484 		tx_desc = (union ngbe_tx_desc *)&txr->tx_base[last];
4485 
4486 		if (!ISSET(tx_desc->wb.status, NGBE_TXD_STAT_DD))
4487 			break;
4488 
4489 		bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
4490 		    0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4491 		bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
4492 		m_freem(tx_buffer->m_head);
4493 
4494 		tx_buffer->m_head = NULL;
4495 		tx_buffer->eop_index = -1;
4496 
4497 		cons = last + 1;
4498 		if (cons == sc->num_tx_desc)
4499 			cons = 0;
4500 		if (prod == cons) {
4501 			/* All clean, turn off the timer */
4502 			ifp->if_timer = 0;
4503 			break;
4504 		}
4505 	}
4506 
4507 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
4508 	    0, txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
4509 
4510 	txr->next_to_clean = cons;
4511 
4512 	if (ifq_is_oactive(ifq))
4513 		ifq_restart(ifq);
4514 }
4515 
4516 void
4517 ngbe_update_mc_addr_list(struct ngbe_hw *hw, uint8_t *mc_addr_list,
4518     uint32_t mc_addr_count, ngbe_mc_addr_itr next, int clear)
4519 {
4520 	uint32_t i, psrctl, vmdq;
4521 
4522 	/*
4523 	 * Set the new number of MC addresses that we are being requested to
4524 	 * use.
4525 	 */
4526 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
4527 	hw->addr_ctrl.mta_in_use = 0;
4528 
4529 	/* Clear mta_shadow */
4530 	if (clear)
4531 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
4532 
4533 	 /* Update mta_shadow */
4534 	 for (i = 0; i < mc_addr_count; i++)
4535 	 	ngbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
4536 
4537 	/* Enable mta */
4538 	for (i = 0; i < hw->mac.mcft_size; i++)
4539 		NGBE_WRITE_REG_ARRAY(hw, NGBE_PSR_MC_TBL(0), i,
4540 		    hw->mac.mta_shadow[i]);
4541 
4542 	if (hw->addr_ctrl.mta_in_use > 0) {
4543 		psrctl = NGBE_READ_REG(hw, NGBE_PSR_CTL);
4544 		psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE);
4545 		psrctl |= NGBE_PSR_CTL_MFE |
4546 		    (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT);
4547 		NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctl);
4548 	}
4549 }
4550 
4551 int
4552 ngbe_validate_mac_addr(uint8_t *mac_addr)
4553 {
4554 	uint32_t status = 0;
4555 
4556 	/* Make sure it is not a multicast address */
4557 	if (NGBE_IS_MULTICAST(mac_addr))
4558 		status = EINVAL;
4559 	/* Not a broadcast address */
4560 	else if (NGBE_IS_BROADCAST(mac_addr))
4561 		status = EINVAL;
4562 	/* Reject the zero address */
4563 	else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
4564 	    mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
4565 		status = EINVAL;
4566 
4567 	return status;
4568 }
4569