xref: /openbsd/sys/dev/pci/if_ngbe.c (revision 0f9891f1)
1*0f9891f1Sjsg /*	$OpenBSD: if_ngbe.c,v 1.5 2024/05/24 06:02:56 jsg Exp $	*/
25cd48f1eSkevlo 
35cd48f1eSkevlo /*
45cd48f1eSkevlo  * Copyright (c) 2015-2017 Beijing WangXun Technology Co., Ltd.
55cd48f1eSkevlo  * Copyright (c) 2023 Kevin Lo <kevlo@openbsd.org>
65cd48f1eSkevlo  *
75cd48f1eSkevlo  * Permission to use, copy, modify, and distribute this software for any
85cd48f1eSkevlo  * purpose with or without fee is hereby granted, provided that the above
95cd48f1eSkevlo  * copyright notice and this permission notice appear in all copies.
105cd48f1eSkevlo  *
115cd48f1eSkevlo  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
125cd48f1eSkevlo  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
135cd48f1eSkevlo  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
145cd48f1eSkevlo  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
155cd48f1eSkevlo  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
165cd48f1eSkevlo  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
175cd48f1eSkevlo  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
185cd48f1eSkevlo  */
195cd48f1eSkevlo 
205cd48f1eSkevlo #include "bpfilter.h"
215cd48f1eSkevlo #include "vlan.h"
225cd48f1eSkevlo 
235cd48f1eSkevlo #include <sys/param.h>
245cd48f1eSkevlo #include <sys/systm.h>
255cd48f1eSkevlo #include <sys/sockio.h>
265cd48f1eSkevlo #include <sys/mbuf.h>
275cd48f1eSkevlo #include <sys/malloc.h>
285cd48f1eSkevlo #include <sys/device.h>
295cd48f1eSkevlo #include <sys/endian.h>
305cd48f1eSkevlo #include <sys/intrmap.h>
315cd48f1eSkevlo 
325cd48f1eSkevlo #include <net/if.h>
335cd48f1eSkevlo #include <net/if_media.h>
345cd48f1eSkevlo #include <net/toeplitz.h>
355cd48f1eSkevlo 
365cd48f1eSkevlo #include <netinet/in.h>
375cd48f1eSkevlo #include <netinet/if_ether.h>
385cd48f1eSkevlo 
395cd48f1eSkevlo #if NBPFILTER > 0
405cd48f1eSkevlo #include <net/bpf.h>
415cd48f1eSkevlo #endif
425cd48f1eSkevlo 
435cd48f1eSkevlo #include <machine/bus.h>
445cd48f1eSkevlo #include <machine/intr.h>
455cd48f1eSkevlo 
465cd48f1eSkevlo #include <dev/mii/mii.h>
475cd48f1eSkevlo 
485cd48f1eSkevlo #include <dev/pci/pcivar.h>
495cd48f1eSkevlo #include <dev/pci/pcireg.h>
505cd48f1eSkevlo #include <dev/pci/pcidevs.h>
515cd48f1eSkevlo 
525cd48f1eSkevlo #include <dev/pci/if_ngbereg.h>
535cd48f1eSkevlo 
545cd48f1eSkevlo const struct pci_matchid ngbe_devices[] = {
555cd48f1eSkevlo 	{ PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860A2 },
565cd48f1eSkevlo 	{ PCI_VENDOR_WANGXUN, PCI_PRODUCT_WANGXUN_WX1860AL1 }
575cd48f1eSkevlo };
585cd48f1eSkevlo 
595cd48f1eSkevlo int			ngbe_match(struct device *, void *, void *);
605cd48f1eSkevlo void			ngbe_attach(struct device *, struct device *, void *);
615cd48f1eSkevlo int			ngbe_detach(struct device *, int);
625cd48f1eSkevlo void			ngbe_init(void *);
635cd48f1eSkevlo int			ngbe_ioctl(struct ifnet *, u_long, caddr_t);
645cd48f1eSkevlo int			ngbe_media_change(struct ifnet *);
655cd48f1eSkevlo void			ngbe_media_status(struct ifnet *, struct ifmediareq *);
665cd48f1eSkevlo int			ngbe_rxfill(struct rx_ring *);
675cd48f1eSkevlo int			ngbe_rxrinfo(struct ngbe_softc *, struct if_rxrinfo *);
685cd48f1eSkevlo void			ngbe_start(struct ifqueue *);
695cd48f1eSkevlo void			ngbe_stop(struct ngbe_softc *);
705cd48f1eSkevlo void			ngbe_update_link_status(struct ngbe_softc *);
715cd48f1eSkevlo void			ngbe_watchdog(struct ifnet *);
725cd48f1eSkevlo int			ngbe_allocate_pci_resources(struct ngbe_softc *);
735cd48f1eSkevlo void			ngbe_free_pci_resources(struct ngbe_softc *);
745cd48f1eSkevlo int			ngbe_allocate_msix(struct ngbe_softc *);
755cd48f1eSkevlo void			ngbe_setup_interface(struct ngbe_softc *);
765cd48f1eSkevlo int			ngbe_setup_msix(struct ngbe_softc *);
775cd48f1eSkevlo int			ngbe_dma_malloc(struct ngbe_softc *, bus_size_t,
785cd48f1eSkevlo 			    struct ngbe_dma_alloc *);
795cd48f1eSkevlo void			ngbe_dma_free(struct ngbe_softc *,
805cd48f1eSkevlo 			    struct ngbe_dma_alloc *);
815cd48f1eSkevlo int			ngbe_allocate_isb(struct ngbe_softc *);
825cd48f1eSkevlo void			ngbe_free_isb(struct ngbe_softc *);
835cd48f1eSkevlo int			ngbe_allocate_queues(struct ngbe_softc *);
845cd48f1eSkevlo void			ngbe_free_receive_structures(struct ngbe_softc *);
855cd48f1eSkevlo void			ngbe_free_receive_buffers(struct rx_ring *);
865cd48f1eSkevlo void			ngbe_free_transmit_structures(struct ngbe_softc *);
875cd48f1eSkevlo void			ngbe_free_transmit_buffers(struct tx_ring *);
885cd48f1eSkevlo int			ngbe_allocate_receive_buffers(struct rx_ring *);
895cd48f1eSkevlo int			ngbe_allocate_transmit_buffers(struct tx_ring *);
905cd48f1eSkevlo int			ngbe_setup_receive_ring(struct rx_ring *);
915cd48f1eSkevlo int			ngbe_setup_transmit_ring(struct tx_ring *);
925cd48f1eSkevlo int			ngbe_setup_receive_structures(struct ngbe_softc *);
935cd48f1eSkevlo int			ngbe_setup_transmit_structures(struct ngbe_softc *);
945cd48f1eSkevlo uint8_t *		ngbe_addr_list_itr(struct ngbe_hw *, uint8_t **,
955cd48f1eSkevlo 			    uint32_t *);
965cd48f1eSkevlo void			ngbe_iff(struct ngbe_softc *);
975cd48f1eSkevlo int			ngbe_initialize_receive_unit(struct ngbe_softc *);
985cd48f1eSkevlo void			ngbe_initialize_rss_mapping(struct ngbe_softc *);
995cd48f1eSkevlo int			ngbe_initialize_transmit_unit(struct ngbe_softc *);
1005cd48f1eSkevlo int			ngbe_intr_link(void *);
1015cd48f1eSkevlo int			ngbe_intr_queue(void *);
1025cd48f1eSkevlo void			ngbe_init_eeprom_params(struct ngbe_hw *);
1035cd48f1eSkevlo int			ngbe_init_hw(struct ngbe_softc *);
1045cd48f1eSkevlo void			ngbe_init_ops(struct ngbe_hw *);
1055cd48f1eSkevlo void			ngbe_init_rx_addrs(struct ngbe_softc *);
1065cd48f1eSkevlo void			ngbe_init_shared_code(struct ngbe_softc *);
1075cd48f1eSkevlo void			ngbe_init_thermal_sensor_thresh(struct ngbe_hw *);
1085cd48f1eSkevlo void			ngbe_init_uta_tables(struct ngbe_hw *);
1095cd48f1eSkevlo void			ngbe_fc_autoneg(struct ngbe_softc *);
1105cd48f1eSkevlo int			ngbe_fc_autoneg_copper(struct ngbe_softc *);
1115cd48f1eSkevlo int			ngbe_fc_enable(struct ngbe_softc *);
1125cd48f1eSkevlo int			ngbe_fmgr_cmd_op(struct ngbe_hw *, uint32_t, uint32_t);
1135cd48f1eSkevlo uint32_t		ngbe_flash_read_dword(struct ngbe_hw *, uint32_t);
1145cd48f1eSkevlo uint8_t			ngbe_calculate_checksum(uint8_t *, uint32_t);
1155cd48f1eSkevlo int			ngbe_check_flash_load(struct ngbe_softc *, uint32_t);
1165cd48f1eSkevlo int			ngbe_check_internal_phy_id(struct ngbe_softc *);
1175cd48f1eSkevlo int			ngbe_check_mac_link(struct ngbe_hw *, uint32_t *, int *,
1185cd48f1eSkevlo 			    int);
1195cd48f1eSkevlo int			ngbe_check_mng_access(struct ngbe_hw *);
1205cd48f1eSkevlo int			ngbe_check_reset_blocked(struct ngbe_softc *);
1215cd48f1eSkevlo void			ngbe_clear_hw_cntrs(struct ngbe_hw *);
1225cd48f1eSkevlo void			ngbe_clear_vfta(struct ngbe_hw *);
1235cd48f1eSkevlo void			ngbe_configure_ivars(struct ngbe_softc *);
1245cd48f1eSkevlo void			ngbe_configure_pb(struct ngbe_softc *);
1255cd48f1eSkevlo void			ngbe_disable_intr(struct ngbe_softc *);
1265cd48f1eSkevlo int			ngbe_disable_pcie_master(struct ngbe_softc *);
1275cd48f1eSkevlo void			ngbe_disable_queue(struct ngbe_softc *, uint32_t);
1285cd48f1eSkevlo void			ngbe_disable_rx(struct ngbe_hw *);
1295cd48f1eSkevlo void			ngbe_disable_sec_rx_path(struct ngbe_hw *);
1305cd48f1eSkevlo int			ngbe_eepromcheck_cap(struct ngbe_softc *, uint16_t,
1315cd48f1eSkevlo 			    uint32_t *);
1325cd48f1eSkevlo void			ngbe_enable_intr(struct ngbe_softc *);
1335cd48f1eSkevlo void			ngbe_enable_queue(struct ngbe_softc *, uint32_t);
1345cd48f1eSkevlo void			ngbe_enable_rx(struct ngbe_hw *);
1355cd48f1eSkevlo void			ngbe_enable_rx_dma(struct ngbe_hw *, uint32_t);
1365cd48f1eSkevlo void			ngbe_enable_sec_rx_path(struct ngbe_hw *);
1375cd48f1eSkevlo int			ngbe_encap(struct tx_ring *, struct mbuf *);
1385cd48f1eSkevlo int			ngbe_get_buf(struct rx_ring *, int);
1395cd48f1eSkevlo void			ngbe_get_bus_info(struct ngbe_softc *);
1405cd48f1eSkevlo void			ngbe_get_copper_link_capabilities(struct ngbe_hw *,
1415cd48f1eSkevlo 			    uint32_t *, int *);
1425cd48f1eSkevlo int			ngbe_get_eeprom_semaphore(struct ngbe_softc *);
1435cd48f1eSkevlo void			ngbe_get_hw_control(struct ngbe_hw *);
1445cd48f1eSkevlo void			ngbe_release_hw_control(struct ngbe_softc *);
1455cd48f1eSkevlo void			ngbe_get_mac_addr(struct ngbe_hw *, uint8_t *);
1465cd48f1eSkevlo enum ngbe_media_type	ngbe_get_media_type(struct ngbe_hw *);
1475cd48f1eSkevlo void			ngbe_gphy_dis_eee(struct ngbe_hw *);
1485cd48f1eSkevlo void			ngbe_gphy_efuse_calibration(struct ngbe_softc *);
1495cd48f1eSkevlo void			ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *);
1505cd48f1eSkevlo void			ngbe_handle_phy_event(struct ngbe_softc *);
1515cd48f1eSkevlo int			ngbe_host_interface_command(struct ngbe_softc *,
1525cd48f1eSkevlo 			    uint32_t *, uint32_t, uint32_t, int);
1535cd48f1eSkevlo int			ngbe_hpbthresh(struct ngbe_softc *);
1545cd48f1eSkevlo int			ngbe_lpbthresh(struct ngbe_softc *);
1555cd48f1eSkevlo int			ngbe_mng_present(struct ngbe_hw *);
1565cd48f1eSkevlo int			ngbe_mta_vector(struct ngbe_hw *, uint8_t *);
1575cd48f1eSkevlo int			ngbe_negotiate_fc(struct ngbe_softc *, uint32_t,
1585cd48f1eSkevlo 			    uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
1595cd48f1eSkevlo int			ngbe_non_sfp_link_config(struct ngbe_softc *);
1605cd48f1eSkevlo void			ngbe_pbthresh_setup(struct ngbe_softc *);
1615cd48f1eSkevlo void			ngbe_phy_check_event(struct ngbe_softc *);
1625cd48f1eSkevlo int			ngbe_phy_check_overtemp(struct ngbe_hw *);
1635cd48f1eSkevlo void			ngbe_phy_get_advertised_pause(struct ngbe_hw *,
1645cd48f1eSkevlo 			    uint8_t *);
1655cd48f1eSkevlo void			ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *,
1665cd48f1eSkevlo 			    uint8_t *);
1675cd48f1eSkevlo int			ngbe_phy_identify(struct ngbe_softc *);
1685cd48f1eSkevlo int			ngbe_phy_init(struct ngbe_softc *);
1695cd48f1eSkevlo void			ngbe_phy_led_ctrl(struct ngbe_softc *);
1705cd48f1eSkevlo int			ngbe_phy_led_oem_chk(struct ngbe_softc *, uint32_t *);
1715cd48f1eSkevlo int			ngbe_phy_read_reg(struct ngbe_hw *, uint32_t, uint32_t,
1725cd48f1eSkevlo 			    uint16_t *);
1735cd48f1eSkevlo int			ngbe_phy_write_reg(struct ngbe_hw *, uint32_t, uint32_t,
1745cd48f1eSkevlo 			    uint16_t);
1755cd48f1eSkevlo int			ngbe_phy_reset(struct ngbe_softc *);
1765cd48f1eSkevlo int			ngbe_phy_set_pause_advertisement(struct ngbe_hw *,
1775cd48f1eSkevlo 			    uint16_t);
1785cd48f1eSkevlo int			ngbe_phy_setup(struct ngbe_softc *);
1795cd48f1eSkevlo int			ngbe_phy_setup_link(struct ngbe_softc *, uint32_t, int);
1805cd48f1eSkevlo uint16_t		ngbe_read_pci_cfg_word(struct ngbe_softc *, uint32_t);
1815cd48f1eSkevlo void			ngbe_release_eeprom_semaphore(struct ngbe_hw *);
1825cd48f1eSkevlo int			ngbe_acquire_swfw_sync(struct ngbe_softc *, uint32_t);
1835cd48f1eSkevlo void			ngbe_release_swfw_sync(struct ngbe_softc *, uint32_t);
1845cd48f1eSkevlo void			ngbe_reset(struct ngbe_softc *);
1855cd48f1eSkevlo int			ngbe_reset_hw(struct ngbe_softc *);
1865cd48f1eSkevlo void			ngbe_reset_misc(struct ngbe_hw *);
1875cd48f1eSkevlo int			ngbe_set_fw_drv_ver(struct ngbe_softc *, uint8_t,
1885cd48f1eSkevlo 			    uint8_t, uint8_t, uint8_t);
1895cd48f1eSkevlo void			ngbe_set_ivar(struct ngbe_softc *, uint16_t, uint16_t,
1905cd48f1eSkevlo 			    int8_t);
1915cd48f1eSkevlo void			ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *);
1925cd48f1eSkevlo void			ngbe_set_mta(struct ngbe_hw *, uint8_t *);
1935cd48f1eSkevlo void			ngbe_set_pci_config_data(struct ngbe_hw *, uint16_t);
1945cd48f1eSkevlo int			ngbe_set_rar(struct ngbe_softc *, uint32_t, uint8_t *,
1955cd48f1eSkevlo 			    uint64_t, uint32_t);
1965cd48f1eSkevlo void			ngbe_set_rx_drop_en(struct ngbe_softc *);
1975cd48f1eSkevlo void			ngbe_set_rxpba(struct ngbe_hw *, int, uint32_t, int);
1985cd48f1eSkevlo int			ngbe_setup_copper_link(struct ngbe_softc *, uint32_t,
1995cd48f1eSkevlo 			    int);
2005cd48f1eSkevlo int			ngbe_setup_fc(struct ngbe_softc *);
2015cd48f1eSkevlo void			ngbe_setup_gpie(struct ngbe_hw *);
2025cd48f1eSkevlo void			ngbe_setup_isb(struct ngbe_softc *);
2035cd48f1eSkevlo void			ngbe_setup_psrtype(struct ngbe_hw *);
2045cd48f1eSkevlo void			ngbe_setup_vlan_hw_support(struct ngbe_softc *);
2055cd48f1eSkevlo int			ngbe_start_hw(struct ngbe_softc *);
2065cd48f1eSkevlo int			ngbe_stop_adapter(struct ngbe_softc *);
2075cd48f1eSkevlo void			ngbe_rx_checksum(uint32_t, struct mbuf *);
2085cd48f1eSkevlo void			ngbe_rxeof(struct rx_ring *);
2095cd48f1eSkevlo void			ngbe_rxrefill(void *);
2105cd48f1eSkevlo int			ngbe_tx_ctx_setup(struct tx_ring *, struct mbuf *,
2115cd48f1eSkevlo 			    uint32_t *, uint32_t *);
2125cd48f1eSkevlo void			ngbe_txeof(struct tx_ring *);
2135cd48f1eSkevlo void			ngbe_update_mc_addr_list(struct ngbe_hw *, uint8_t *,
2145cd48f1eSkevlo 			    uint32_t, ngbe_mc_addr_itr, int);
2155cd48f1eSkevlo int			ngbe_validate_mac_addr(uint8_t *);
2165cd48f1eSkevlo 
2175cd48f1eSkevlo struct cfdriver ngbe_cd = {
2185cd48f1eSkevlo 	NULL, "ngbe", DV_IFNET
2195cd48f1eSkevlo };
2205cd48f1eSkevlo 
2215cd48f1eSkevlo const struct cfattach ngbe_ca = {
2225cd48f1eSkevlo 	sizeof(struct ngbe_softc), ngbe_match, ngbe_attach, ngbe_detach
2235cd48f1eSkevlo };
2245cd48f1eSkevlo 
2255cd48f1eSkevlo int
ngbe_match(struct device * parent,void * match,void * aux)2265cd48f1eSkevlo ngbe_match(struct device *parent, void *match, void *aux)
2275cd48f1eSkevlo {
2285cd48f1eSkevlo 	return pci_matchbyid((struct pci_attach_args *)aux, ngbe_devices,
2295cd48f1eSkevlo 	    nitems(ngbe_devices));
2305cd48f1eSkevlo }
2315cd48f1eSkevlo 
2325cd48f1eSkevlo void
ngbe_attach(struct device * parent,struct device * self,void * aux)2335cd48f1eSkevlo ngbe_attach(struct device *parent, struct device *self, void *aux)
2345cd48f1eSkevlo {
2355cd48f1eSkevlo 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
2365cd48f1eSkevlo 	struct ngbe_softc *sc = (struct ngbe_softc *)self;
2375cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
2385cd48f1eSkevlo 	uint32_t eeprom_cksum_devcap, devcap, led_conf;
2395cd48f1eSkevlo 	int error;
2405cd48f1eSkevlo 
2415cd48f1eSkevlo 	sc->osdep.os_sc = sc;
2425cd48f1eSkevlo 	sc->osdep.os_pa = *pa;
2435cd48f1eSkevlo 
2445cd48f1eSkevlo 	/* Setup PCI resources. */
2455cd48f1eSkevlo 	if (ngbe_allocate_pci_resources(sc))
2465cd48f1eSkevlo 		goto fail1;
2475cd48f1eSkevlo 
2485cd48f1eSkevlo 	sc->num_tx_desc = NGBE_DEFAULT_TXD;
2495cd48f1eSkevlo 	sc->num_rx_desc = NGBE_DEFAULT_RXD;
2505cd48f1eSkevlo 
2515cd48f1eSkevlo 	/* Allocate Tx/Rx queues. */
2525cd48f1eSkevlo 	if (ngbe_allocate_queues(sc))
2535cd48f1eSkevlo 		goto fail1;
2545cd48f1eSkevlo 
2555cd48f1eSkevlo 	/* Allocate multicast array memory. */
2565cd48f1eSkevlo 	sc->mta = mallocarray(ETHER_ADDR_LEN, NGBE_SP_RAR_ENTRIES, M_DEVBUF,
2575cd48f1eSkevlo 	    M_NOWAIT);
2585cd48f1eSkevlo 	if (sc->mta == NULL) {
2595cd48f1eSkevlo 		printf(": can not allocate multicast setup array\n");
2605cd48f1eSkevlo 		goto fail1;
2615cd48f1eSkevlo 	}
2625cd48f1eSkevlo 
2635cd48f1eSkevlo 	/* Allocate interrupt status resources. */
2645cd48f1eSkevlo 	if (ngbe_allocate_isb(sc))
2655cd48f1eSkevlo 		goto fail2;
2665cd48f1eSkevlo 
2675cd48f1eSkevlo 	hw->mac.autoneg = 1;
2685cd48f1eSkevlo 	hw->phy.autoneg_advertised = NGBE_LINK_SPEED_AUTONEG;
2695cd48f1eSkevlo 	hw->phy.force_speed = NGBE_LINK_SPEED_UNKNOWN;
2705cd48f1eSkevlo 
2715cd48f1eSkevlo 	/* Initialize the shared code. */
2725cd48f1eSkevlo 	ngbe_init_shared_code(sc);
2735cd48f1eSkevlo 
2745cd48f1eSkevlo 	sc->hw.mac.ops.set_lan_id(&sc->hw);
2755cd48f1eSkevlo 
2765cd48f1eSkevlo 	/* Check if flash load is done after hw power up. */
2775cd48f1eSkevlo 	error = ngbe_check_flash_load(sc, NGBE_SPI_ILDR_STATUS_PERST);
2785cd48f1eSkevlo 	if (error)
2795cd48f1eSkevlo 		goto fail3;
2805cd48f1eSkevlo 	error = ngbe_check_flash_load(sc, NGBE_SPI_ILDR_STATUS_PWRRST);
2815cd48f1eSkevlo 	if (error)
2825cd48f1eSkevlo 		goto fail3;
2835cd48f1eSkevlo 
2845cd48f1eSkevlo 	hw->phy.reset_if_overtemp = 1;
2855cd48f1eSkevlo 	error = sc->hw.mac.ops.reset_hw(sc);
2865cd48f1eSkevlo 	hw->phy.reset_if_overtemp = 0;
2875cd48f1eSkevlo 	if (error) {
2885cd48f1eSkevlo 		printf(": HW reset failed\n");
2895cd48f1eSkevlo 		goto fail3;
2905cd48f1eSkevlo 	}
2915cd48f1eSkevlo 
2925cd48f1eSkevlo 	eeprom_cksum_devcap = devcap = 0;
2935cd48f1eSkevlo 	if (hw->bus.lan_id == 0) {
2945cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_CALSUM_CAP_STATUS, 0);
2955cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_EEPROM_VERSION_STORE_REG, 0);
2965cd48f1eSkevlo 	} else
2975cd48f1eSkevlo 		eeprom_cksum_devcap = NGBE_READ_REG(hw, NGBE_CALSUM_CAP_STATUS);
2985cd48f1eSkevlo 
2995cd48f1eSkevlo 	hw->eeprom.ops.init_params(hw);
3005cd48f1eSkevlo 	hw->mac.ops.release_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB);
3015cd48f1eSkevlo 	if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) {
3025cd48f1eSkevlo 		/* Make sure the EEPROM is good */
3035cd48f1eSkevlo 		if (hw->eeprom.ops.eeprom_chksum_cap_st(sc, NGBE_CALSUM_COMMAND,
3045cd48f1eSkevlo 		    &devcap)) {
3055cd48f1eSkevlo 			printf(": eeprom checksum is not valid\n");
3065cd48f1eSkevlo 			goto fail3;
3075cd48f1eSkevlo 		}
3085cd48f1eSkevlo 	}
3095cd48f1eSkevlo 
3105cd48f1eSkevlo 	led_conf = 0;
3115cd48f1eSkevlo 	if (hw->eeprom.ops.phy_led_oem_chk(sc, &led_conf))
3125cd48f1eSkevlo 		sc->led_conf = -1;
3135cd48f1eSkevlo 	else
3145cd48f1eSkevlo 		sc->led_conf = led_conf;
3155cd48f1eSkevlo 
3165cd48f1eSkevlo 	memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
3175cd48f1eSkevlo 
3185cd48f1eSkevlo 	error = ngbe_allocate_msix(sc);
3195cd48f1eSkevlo 	if (error)
3205cd48f1eSkevlo 		goto fail3;
3215cd48f1eSkevlo 
3225cd48f1eSkevlo 	ngbe_setup_interface(sc);
3235cd48f1eSkevlo 
3245cd48f1eSkevlo 	/* Reset the hardware with the new settings */
3255cd48f1eSkevlo 	error = hw->mac.ops.start_hw(sc);
3265cd48f1eSkevlo 	if (error) {
3275cd48f1eSkevlo 		printf(": HW init failed\n");
3285cd48f1eSkevlo 		goto fail3;
3295cd48f1eSkevlo 	}
3305cd48f1eSkevlo 
3315cd48f1eSkevlo 	/* Pick up the PCI bus settings for reporting later */
3325cd48f1eSkevlo 	hw->mac.ops.get_bus_info(sc);
3335cd48f1eSkevlo 
3345cd48f1eSkevlo 	hw->mac.ops.set_fw_drv_ver(sc, 0xff, 0xff, 0xff, 0xff);
3355cd48f1eSkevlo 
3365cd48f1eSkevlo 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
3375cd48f1eSkevlo 	return;
3385cd48f1eSkevlo 
3395cd48f1eSkevlo fail3:
3405cd48f1eSkevlo 	ngbe_free_isb(sc);
3415cd48f1eSkevlo fail2:
3425cd48f1eSkevlo 	ngbe_free_transmit_structures(sc);
3435cd48f1eSkevlo 	ngbe_free_receive_structures(sc);
3445cd48f1eSkevlo 	free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES);
3455cd48f1eSkevlo fail1:
3465cd48f1eSkevlo 	ngbe_free_pci_resources(sc);
3475cd48f1eSkevlo }
3485cd48f1eSkevlo 
3495cd48f1eSkevlo int
ngbe_detach(struct device * self,int flags)3505cd48f1eSkevlo ngbe_detach(struct device *self, int flags)
3515cd48f1eSkevlo {
3525cd48f1eSkevlo 	struct ngbe_softc *sc = (struct ngbe_softc *)self;
3535cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3545cd48f1eSkevlo 
3555cd48f1eSkevlo 	ngbe_stop(sc);
3565cd48f1eSkevlo 	ngbe_release_hw_control(sc);
3575cd48f1eSkevlo 
3585cd48f1eSkevlo 	ether_ifdetach(ifp);
3595cd48f1eSkevlo 	if_detach(ifp);
3605cd48f1eSkevlo 
3615cd48f1eSkevlo 	ngbe_free_pci_resources(sc);
3625cd48f1eSkevlo 
3635cd48f1eSkevlo 	ngbe_free_transmit_structures(sc);
3645cd48f1eSkevlo 	ngbe_free_receive_structures(sc);
3655cd48f1eSkevlo 	ngbe_free_isb(sc);
3665cd48f1eSkevlo 	free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES);
3675cd48f1eSkevlo 
3685cd48f1eSkevlo 	return 0;
3695cd48f1eSkevlo }
3705cd48f1eSkevlo 
3715cd48f1eSkevlo static inline uint32_t
NGBE_READ_REG_MASK(struct ngbe_hw * hw,uint32_t reg,uint32_t mask)3725cd48f1eSkevlo NGBE_READ_REG_MASK(struct ngbe_hw *hw, uint32_t reg, uint32_t mask)
3735cd48f1eSkevlo {
3745cd48f1eSkevlo 	uint32_t val;
3755cd48f1eSkevlo 
3765cd48f1eSkevlo 	val = NGBE_READ_REG(hw, reg);
3775cd48f1eSkevlo 	if (val == NGBE_FAILED_READ_REG)
3785cd48f1eSkevlo 		return val;
3795cd48f1eSkevlo 	return val & mask;
3805cd48f1eSkevlo }
3815cd48f1eSkevlo 
3825cd48f1eSkevlo static inline void
NGBE_WRITE_REG_MASK(struct ngbe_hw * hw,uint32_t reg,uint32_t mask,uint32_t field)3835cd48f1eSkevlo NGBE_WRITE_REG_MASK(struct ngbe_hw *hw, uint32_t reg, uint32_t mask,
3845cd48f1eSkevlo     uint32_t field)
3855cd48f1eSkevlo {
3865cd48f1eSkevlo 	uint32_t val;
3875cd48f1eSkevlo 
3885cd48f1eSkevlo 	val = NGBE_READ_REG(hw, reg);
3895cd48f1eSkevlo 	if (val == NGBE_FAILED_READ_REG)
3905cd48f1eSkevlo 		return;
3915cd48f1eSkevlo 	val = ((val & ~mask) | (field & mask));
3925cd48f1eSkevlo 	NGBE_WRITE_REG(hw, reg, val);
3935cd48f1eSkevlo }
3945cd48f1eSkevlo 
3955cd48f1eSkevlo static inline uint32_t
ngbe_misc_isb(struct ngbe_softc * sc,enum ngbe_isb_idx idx)3965cd48f1eSkevlo ngbe_misc_isb(struct ngbe_softc *sc, enum ngbe_isb_idx idx)
3975cd48f1eSkevlo {
3985cd48f1eSkevlo 	return htole32(sc->isb_base[idx]);
3995cd48f1eSkevlo }
4005cd48f1eSkevlo 
4015cd48f1eSkevlo void
ngbe_init(void * arg)4025cd48f1eSkevlo ngbe_init(void *arg)
4035cd48f1eSkevlo {
4045cd48f1eSkevlo 	struct ngbe_softc *sc = (struct ngbe_softc *)arg;
4055cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
4065cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
4075cd48f1eSkevlo 	int i, s;
4085cd48f1eSkevlo 
4095cd48f1eSkevlo 	s = splnet();
4105cd48f1eSkevlo 
4115cd48f1eSkevlo 	ngbe_stop(sc);
4125cd48f1eSkevlo 
4135cd48f1eSkevlo 	ngbe_setup_isb(sc);
4145cd48f1eSkevlo 
4155cd48f1eSkevlo 	/* Setup the receive address. */
4165cd48f1eSkevlo 	hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, NGBE_PSR_MAC_SWC_AD_H_AV);
4175cd48f1eSkevlo 
4185cd48f1eSkevlo 	/* Get the latest mac address, user can use a LAA. */
4195cd48f1eSkevlo 	bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
4205cd48f1eSkevlo 
4215cd48f1eSkevlo 	hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, 1);
4225cd48f1eSkevlo 
4235cd48f1eSkevlo 	ngbe_configure_pb(sc);
4245cd48f1eSkevlo 
4255cd48f1eSkevlo 	/* Program promiscuous mode and multicast filters. */
4265cd48f1eSkevlo 	ngbe_iff(sc);
4275cd48f1eSkevlo 
4285cd48f1eSkevlo 	ngbe_setup_vlan_hw_support(sc);
4295cd48f1eSkevlo 
4305cd48f1eSkevlo 	/* Prepare transmit descriptors and buffers. */
4315cd48f1eSkevlo 	if (ngbe_setup_transmit_structures(sc)) {
4325cd48f1eSkevlo 		printf("%s: could not setup transmit structures\n",
4335cd48f1eSkevlo 		    DEVNAME(sc));
4345cd48f1eSkevlo 		ngbe_stop(sc);
4355cd48f1eSkevlo 		splx(s);
4365cd48f1eSkevlo 		return;
4375cd48f1eSkevlo 	}
4385cd48f1eSkevlo 	if (ngbe_initialize_transmit_unit(sc)) {
4395cd48f1eSkevlo 		ngbe_stop(sc);
4405cd48f1eSkevlo 		splx(s);
4415cd48f1eSkevlo 		return;
4425cd48f1eSkevlo 	}
4435cd48f1eSkevlo 
4445cd48f1eSkevlo 	/* Prepare receive descriptors and buffers. */
4455cd48f1eSkevlo 	if (ngbe_setup_receive_structures(sc)) {
4465cd48f1eSkevlo 		printf("%s: could not setup receive structures\n",
4475cd48f1eSkevlo 		    DEVNAME(sc));
4485cd48f1eSkevlo 		ngbe_stop(sc);
4495cd48f1eSkevlo 		splx(s);
4505cd48f1eSkevlo 		return;
4515cd48f1eSkevlo 	}
4525cd48f1eSkevlo 	if (ngbe_initialize_receive_unit(sc)) {
4535cd48f1eSkevlo 		ngbe_stop(sc);
4545cd48f1eSkevlo 		splx(s);
4555cd48f1eSkevlo 		return;
4565cd48f1eSkevlo 	}
4575cd48f1eSkevlo 
4585cd48f1eSkevlo 	ngbe_get_hw_control(hw);
4595cd48f1eSkevlo 	ngbe_setup_gpie(hw);
4605cd48f1eSkevlo 	ngbe_configure_ivars(sc);
4615cd48f1eSkevlo 
4625cd48f1eSkevlo 	if (ngbe_non_sfp_link_config(sc)) {
4635cd48f1eSkevlo 		ngbe_stop(sc);
4645cd48f1eSkevlo 		splx(s);
4655cd48f1eSkevlo 		return;
4665cd48f1eSkevlo 	}
4675cd48f1eSkevlo 
4685cd48f1eSkevlo 	/* Select GMII */
4695cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_MAC_TX_CFG,
4705cd48f1eSkevlo 	    (NGBE_READ_REG(hw, NGBE_MAC_TX_CFG) & ~NGBE_MAC_TX_CFG_SPEED_MASK) |
4715cd48f1eSkevlo 	    NGBE_MAC_TX_CFG_SPEED_1G);
4725cd48f1eSkevlo 
4735cd48f1eSkevlo 	/* Clear any pending interrupts, may auto mask */
4745cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_PX_IC);
4755cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_PX_MISC_IC);
4765cd48f1eSkevlo 	ngbe_enable_intr(sc);
4775cd48f1eSkevlo 
4785cd48f1eSkevlo 	switch (hw->bus.lan_id) {
4795cd48f1eSkevlo 	case 0:
4805cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
4815cd48f1eSkevlo 		    NGBE_MIS_PRB_CTL_LAN0_UP, NGBE_MIS_PRB_CTL_LAN0_UP);
4825cd48f1eSkevlo 		break;
4835cd48f1eSkevlo 	case 1:
4845cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
4855cd48f1eSkevlo 		    NGBE_MIS_PRB_CTL_LAN1_UP, NGBE_MIS_PRB_CTL_LAN1_UP);
4865cd48f1eSkevlo 		break;
4875cd48f1eSkevlo 	case 2:
4885cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
4895cd48f1eSkevlo 		    NGBE_MIS_PRB_CTL_LAN2_UP, NGBE_MIS_PRB_CTL_LAN2_UP);
4905cd48f1eSkevlo 		break;
4915cd48f1eSkevlo 	case 3:
4925cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
4935cd48f1eSkevlo 		    NGBE_MIS_PRB_CTL_LAN3_UP, NGBE_MIS_PRB_CTL_LAN3_UP);
4945cd48f1eSkevlo 		break;
4955cd48f1eSkevlo 	}
4965cd48f1eSkevlo 
4975cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_CFG_PORT_CTL, NGBE_CFG_PORT_CTL_PFRSTD,
4985cd48f1eSkevlo 	    NGBE_CFG_PORT_CTL_PFRSTD);
4995cd48f1eSkevlo 
5005cd48f1eSkevlo 	/* Now inform the stack we're ready */
5015cd48f1eSkevlo 	ifp->if_flags |= IFF_RUNNING;
5025cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++)
5035cd48f1eSkevlo 		ifq_clr_oactive(ifp->if_ifqs[i]);
5045cd48f1eSkevlo 	splx(s);
5055cd48f1eSkevlo }
5065cd48f1eSkevlo 
5075cd48f1eSkevlo int
ngbe_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)5085cd48f1eSkevlo ngbe_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
5095cd48f1eSkevlo {
5105cd48f1eSkevlo 	struct ngbe_softc *sc = ifp->if_softc;
5115cd48f1eSkevlo 	struct ifreq *ifr = (struct ifreq *)data;
5125cd48f1eSkevlo 	int s, error = 0;
5135cd48f1eSkevlo 
5145cd48f1eSkevlo 	s = splnet();
5155cd48f1eSkevlo 
5165cd48f1eSkevlo 	switch (cmd) {
5175cd48f1eSkevlo 	case SIOCSIFADDR:
5185cd48f1eSkevlo 		ifp->if_flags |= IFF_UP;
5195cd48f1eSkevlo 		if (!(ifp->if_flags & IFF_RUNNING))
5205cd48f1eSkevlo 			ngbe_init(sc);
5215cd48f1eSkevlo 		break;
5225cd48f1eSkevlo 	case SIOCSIFFLAGS:
5235cd48f1eSkevlo 		if (ifp->if_flags & IFF_UP) {
5245cd48f1eSkevlo 			if (ifp->if_flags & IFF_RUNNING)
5255cd48f1eSkevlo 				error = ENETRESET;
5265cd48f1eSkevlo 			else
5275cd48f1eSkevlo 				ngbe_init(sc);
5285cd48f1eSkevlo 		} else {
5295cd48f1eSkevlo 			if (ifp->if_flags & IFF_RUNNING)
5305cd48f1eSkevlo 				ngbe_stop(sc);
5315cd48f1eSkevlo 		}
5325cd48f1eSkevlo 		break;
5335cd48f1eSkevlo 	case SIOCSIFMEDIA:
5345cd48f1eSkevlo 	case SIOCGIFMEDIA:
5355cd48f1eSkevlo 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
5365cd48f1eSkevlo 		break;
5375cd48f1eSkevlo 	case SIOCGIFRXR:
5385cd48f1eSkevlo 		error = ngbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
5395cd48f1eSkevlo 		break;
5405cd48f1eSkevlo 	default:
5415cd48f1eSkevlo 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
5425cd48f1eSkevlo 	}
5435cd48f1eSkevlo 
5445cd48f1eSkevlo 	if (error == ENETRESET) {
5455cd48f1eSkevlo 		if (ifp->if_flags & IFF_RUNNING) {
5465cd48f1eSkevlo 			ngbe_disable_intr(sc);
5475cd48f1eSkevlo 			ngbe_iff(sc);
5485cd48f1eSkevlo 			ngbe_enable_intr(sc);
5495cd48f1eSkevlo 		}
5505cd48f1eSkevlo 		error = 0;
5515cd48f1eSkevlo 	}
5525cd48f1eSkevlo 
5535cd48f1eSkevlo 	splx(s);
5545cd48f1eSkevlo 	return error;
5555cd48f1eSkevlo }
5565cd48f1eSkevlo 
5575cd48f1eSkevlo int
ngbe_media_change(struct ifnet * ifp)5585cd48f1eSkevlo ngbe_media_change(struct ifnet *ifp)
5595cd48f1eSkevlo {
5605cd48f1eSkevlo 	struct ngbe_softc *sc = ifp->if_softc;
5615cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
5625cd48f1eSkevlo 	struct ifmedia *ifm = &sc->sc_media;
5635cd48f1eSkevlo 	uint32_t advertised = 0;
5645cd48f1eSkevlo 
5655cd48f1eSkevlo 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5665cd48f1eSkevlo 		return EINVAL;
5675cd48f1eSkevlo 
5685cd48f1eSkevlo 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
5695cd48f1eSkevlo 	case IFM_AUTO:
5705cd48f1eSkevlo 	case IFM_1000_T:
5715cd48f1eSkevlo 		advertised |= NGBE_LINK_SPEED_AUTONEG;
5725cd48f1eSkevlo 		break;
5735cd48f1eSkevlo 	case IFM_100_TX:
5745cd48f1eSkevlo 		advertised |= NGBE_LINK_SPEED_100_FULL;
5755cd48f1eSkevlo 		break;
5765cd48f1eSkevlo 	case IFM_10_T:
5775cd48f1eSkevlo 		advertised |= NGBE_LINK_SPEED_10_FULL;
5785cd48f1eSkevlo 		break;
5795cd48f1eSkevlo 	default:
5805cd48f1eSkevlo 		return EINVAL;
5815cd48f1eSkevlo 	}
5825cd48f1eSkevlo 
5835cd48f1eSkevlo 	hw->mac.autotry_restart = true;
5845cd48f1eSkevlo 	hw->mac.ops.setup_link(sc, advertised, 1);
5855cd48f1eSkevlo 
5865cd48f1eSkevlo 	return 0;
5875cd48f1eSkevlo }
5885cd48f1eSkevlo 
5895cd48f1eSkevlo void
ngbe_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)5905cd48f1eSkevlo ngbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
5915cd48f1eSkevlo {
5925cd48f1eSkevlo 	struct ngbe_softc *sc = ifp->if_softc;
5935cd48f1eSkevlo 
5945cd48f1eSkevlo 	ifmr->ifm_status = IFM_AVALID;
5955cd48f1eSkevlo 	ifmr->ifm_active = IFM_ETHER;
5965cd48f1eSkevlo 
5975cd48f1eSkevlo 	ngbe_update_link_status(sc);
5985cd48f1eSkevlo 
5995cd48f1eSkevlo 	if (!LINK_STATE_IS_UP(ifp->if_link_state))
6005cd48f1eSkevlo 		return;
6015cd48f1eSkevlo 
6025cd48f1eSkevlo 	ifmr->ifm_status |= IFM_ACTIVE;
6035cd48f1eSkevlo 
6045cd48f1eSkevlo 	switch (sc->link_speed) {
6055cd48f1eSkevlo 	case NGBE_LINK_SPEED_1GB_FULL:
6065cd48f1eSkevlo 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
6075cd48f1eSkevlo 		break;
6085cd48f1eSkevlo 	case NGBE_LINK_SPEED_100_FULL:
6095cd48f1eSkevlo 		ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
6105cd48f1eSkevlo 		break;
6115cd48f1eSkevlo 	case NGBE_LINK_SPEED_10_FULL:
6125cd48f1eSkevlo 		ifmr->ifm_active |= IFM_10_T | IFM_FDX;
6135cd48f1eSkevlo 		break;
6145cd48f1eSkevlo 	}
6155cd48f1eSkevlo 
6165cd48f1eSkevlo 	switch (sc->hw.fc.current_mode) {
6175cd48f1eSkevlo 	case ngbe_fc_tx_pause:
6185cd48f1eSkevlo 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
6195cd48f1eSkevlo 		break;
6205cd48f1eSkevlo 	case ngbe_fc_rx_pause:
6215cd48f1eSkevlo 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
6225cd48f1eSkevlo 		break;
6235cd48f1eSkevlo 	case ngbe_fc_full:
6245cd48f1eSkevlo 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
6255cd48f1eSkevlo 		    IFM_ETH_TXPAUSE;
6265cd48f1eSkevlo 		break;
6275cd48f1eSkevlo 	default:
6285cd48f1eSkevlo 		ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
6295cd48f1eSkevlo 		    IFM_ETH_TXPAUSE);
6305cd48f1eSkevlo 		break;
6315cd48f1eSkevlo 	}
6325cd48f1eSkevlo }
6335cd48f1eSkevlo 
6345cd48f1eSkevlo int
ngbe_rxfill(struct rx_ring * rxr)6355cd48f1eSkevlo ngbe_rxfill(struct rx_ring *rxr)
6365cd48f1eSkevlo {
6375cd48f1eSkevlo 	struct ngbe_softc *sc = rxr->sc;
6385cd48f1eSkevlo 	int i, post = 0;
6395cd48f1eSkevlo 	u_int slots;
6405cd48f1eSkevlo 
6415cd48f1eSkevlo 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
6425cd48f1eSkevlo 	    rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6435cd48f1eSkevlo 
6445cd48f1eSkevlo 	i = rxr->last_desc_filled;
6455cd48f1eSkevlo 	for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0;
6465cd48f1eSkevlo 	    slots--) {
6475cd48f1eSkevlo 		if (++i == sc->num_rx_desc)
6485cd48f1eSkevlo 			i = 0;
6495cd48f1eSkevlo 
6505cd48f1eSkevlo 		if (ngbe_get_buf(rxr, i) != 0)
6515cd48f1eSkevlo 			break;
6525cd48f1eSkevlo 
6535cd48f1eSkevlo 		rxr->last_desc_filled = i;
6545cd48f1eSkevlo 		post = 1;
6555cd48f1eSkevlo 	}
6565cd48f1eSkevlo 
6575cd48f1eSkevlo 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
6585cd48f1eSkevlo 	    rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
6595cd48f1eSkevlo 
6605cd48f1eSkevlo 	if_rxr_put(&rxr->rx_ring, slots);
6615cd48f1eSkevlo 
6625cd48f1eSkevlo 	return post;
6635cd48f1eSkevlo }
6645cd48f1eSkevlo 
6655cd48f1eSkevlo int
ngbe_rxrinfo(struct ngbe_softc * sc,struct if_rxrinfo * ifri)6665cd48f1eSkevlo ngbe_rxrinfo(struct ngbe_softc *sc, struct if_rxrinfo *ifri)
6675cd48f1eSkevlo {
6685cd48f1eSkevlo 	struct if_rxring_info *ifr;
6695cd48f1eSkevlo 	struct rx_ring *rxr;
6705cd48f1eSkevlo 	int error, i, n = 0;
6715cd48f1eSkevlo 
6725cd48f1eSkevlo 	if ((ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF,
6735cd48f1eSkevlo 	    M_WAITOK | M_CANFAIL | M_ZERO)) == NULL)
6745cd48f1eSkevlo 		return ENOMEM;
6755cd48f1eSkevlo 
6765cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++) {
6775cd48f1eSkevlo 		rxr = &sc->rx_rings[i];
6785cd48f1eSkevlo 		ifr[n].ifr_size = MCLBYTES;
6795cd48f1eSkevlo 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i);
6805cd48f1eSkevlo 		ifr[n].ifr_info = rxr->rx_ring;
6815cd48f1eSkevlo 		n++;
6825cd48f1eSkevlo 	}
6835cd48f1eSkevlo 
6845cd48f1eSkevlo 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
6855cd48f1eSkevlo 	free(ifr, M_DEVBUF, sc->sc_nqueues * sizeof(*ifr));
6865cd48f1eSkevlo 
6875cd48f1eSkevlo 	return error;
6885cd48f1eSkevlo }
6895cd48f1eSkevlo 
6905cd48f1eSkevlo void
ngbe_start(struct ifqueue * ifq)6915cd48f1eSkevlo ngbe_start(struct ifqueue *ifq)
6925cd48f1eSkevlo {
6935cd48f1eSkevlo 	struct ifnet *ifp = ifq->ifq_if;
6945cd48f1eSkevlo 	struct ngbe_softc *sc = ifp->if_softc;
6955cd48f1eSkevlo 	struct tx_ring *txr = ifq->ifq_softc;
6965cd48f1eSkevlo 	struct mbuf *m;
6975cd48f1eSkevlo 	unsigned int prod, free, used;
6985cd48f1eSkevlo 	int post = 0;
6995cd48f1eSkevlo 
7005cd48f1eSkevlo 	if (!sc->link_up)
7015cd48f1eSkevlo 		return;
7025cd48f1eSkevlo 
7035cd48f1eSkevlo 	prod = txr->next_avail_desc;
7045cd48f1eSkevlo 	free = txr->next_to_clean;
7055cd48f1eSkevlo 	if (free <= prod)
7065cd48f1eSkevlo 		free += sc->num_tx_desc;
7075cd48f1eSkevlo 	free -= prod;
7085cd48f1eSkevlo 
7095cd48f1eSkevlo 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
7105cd48f1eSkevlo 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7115cd48f1eSkevlo 
7125cd48f1eSkevlo 	for (;;) {
7135cd48f1eSkevlo 		if (free <= NGBE_MAX_SCATTER + 2) {
7145cd48f1eSkevlo 			ifq_set_oactive(ifq);
7155cd48f1eSkevlo 			break;
7165cd48f1eSkevlo 		}
7175cd48f1eSkevlo 
7185cd48f1eSkevlo 		m = ifq_dequeue(ifq);
7195cd48f1eSkevlo 		if (m == NULL)
7205cd48f1eSkevlo 			break;
7215cd48f1eSkevlo 
7225cd48f1eSkevlo 		used = ngbe_encap(txr, m);
7235cd48f1eSkevlo 		if (used == 0) {
7245cd48f1eSkevlo 			m_freem(m);
7255cd48f1eSkevlo 			continue;
7265cd48f1eSkevlo 		}
7275cd48f1eSkevlo 
7285cd48f1eSkevlo 		free -= used;
7295cd48f1eSkevlo 
7305cd48f1eSkevlo #if NBPFILTER > 0
7315cd48f1eSkevlo 		if (ifp->if_bpf)
7325cd48f1eSkevlo 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
7335cd48f1eSkevlo #endif
7345cd48f1eSkevlo 
7355cd48f1eSkevlo 		/* Set timeout in case hardware has problems transmitting */
7365cd48f1eSkevlo 		txr->watchdog_timer = NGBE_TX_TIMEOUT;
7375cd48f1eSkevlo 		ifp->if_timer = NGBE_TX_TIMEOUT;
7385cd48f1eSkevlo 
7395cd48f1eSkevlo 		post = 1;
7405cd48f1eSkevlo 	}
7415cd48f1eSkevlo 
7425cd48f1eSkevlo 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
7435cd48f1eSkevlo 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
7445cd48f1eSkevlo 
7455cd48f1eSkevlo 	if (post)
7465cd48f1eSkevlo 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_TR_WP(txr->me),
7475cd48f1eSkevlo 		    txr->next_avail_desc);
7485cd48f1eSkevlo }
7495cd48f1eSkevlo 
7505cd48f1eSkevlo void
ngbe_stop(struct ngbe_softc * sc)7515cd48f1eSkevlo ngbe_stop(struct ngbe_softc *sc)
7525cd48f1eSkevlo {
7535cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
7545cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
7555cd48f1eSkevlo 	uint32_t rxdctl;
7565cd48f1eSkevlo 	int i, wait_loop = NGBE_MAX_RX_DESC_POLL;
7575cd48f1eSkevlo 
7585cd48f1eSkevlo 	/* Tell the stack that the interface is no longer active. */
7595cd48f1eSkevlo 	ifp->if_flags &= ~IFF_RUNNING;
7605cd48f1eSkevlo 	ifp->if_timer = 0;
7615cd48f1eSkevlo 
7625cd48f1eSkevlo 	ngbe_disable_pcie_master(sc);
7635cd48f1eSkevlo 	/* Disable receives */
7645cd48f1eSkevlo 	hw->mac.ops.disable_rx(hw);
7655cd48f1eSkevlo 
7665cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++) {
7675cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
7685cd48f1eSkevlo 		    NGBE_PX_RR_CFG_RR_EN, 0);
7695cd48f1eSkevlo 		do {
7705cd48f1eSkevlo 			DELAY(10);
7715cd48f1eSkevlo 			rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
7725cd48f1eSkevlo 		} while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN));
7735cd48f1eSkevlo 		if (!wait_loop) {
7745cd48f1eSkevlo 			printf("%s: Rx queue %d not cleared within "
7755cd48f1eSkevlo 			    "the polling period\n", DEVNAME(sc), i);
7765cd48f1eSkevlo 			return;
7775cd48f1eSkevlo 		}
7785cd48f1eSkevlo 	}
7795cd48f1eSkevlo 
7805cd48f1eSkevlo 	ngbe_disable_intr(sc);
7815cd48f1eSkevlo 
7825cd48f1eSkevlo 	switch (hw->bus.lan_id) {
7835cd48f1eSkevlo 	case 0:
7845cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
7855cd48f1eSkevlo 		    NGBE_MIS_PRB_CTL_LAN0_UP, 0);
7865cd48f1eSkevlo 		break;
7875cd48f1eSkevlo 	case 1:
7885cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
7895cd48f1eSkevlo 		    NGBE_MIS_PRB_CTL_LAN1_UP, 0);
7905cd48f1eSkevlo 		break;
7915cd48f1eSkevlo 	case 2:
7925cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
7935cd48f1eSkevlo 		    NGBE_MIS_PRB_CTL_LAN2_UP, 0);
7945cd48f1eSkevlo 		break;
7955cd48f1eSkevlo 	case 3:
7965cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_PRB_CTL,
7975cd48f1eSkevlo 		    NGBE_MIS_PRB_CTL_LAN3_UP, 0);
7985cd48f1eSkevlo 		break;
7995cd48f1eSkevlo 	}
8005cd48f1eSkevlo 
8015cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE, 0);
8025cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++)
8035cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), NGBE_PX_TR_CFG_SWFLSH);
8045cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0);
8055cd48f1eSkevlo 
8065cd48f1eSkevlo 	ngbe_reset(sc);
8075cd48f1eSkevlo 
8085cd48f1eSkevlo 	hw->mac.ops.set_rar(sc, 0, hw->mac.addr, 0, NGBE_PSR_MAC_SWC_AD_H_AV);
8095cd48f1eSkevlo 
8105cd48f1eSkevlo 	intr_barrier(sc->tag);
8115cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++) {
8125cd48f1eSkevlo 		struct ifqueue *ifq = ifp->if_ifqs[i];
8135cd48f1eSkevlo 		ifq_barrier(ifq);
8145cd48f1eSkevlo 		ifq_clr_oactive(ifq);
8155cd48f1eSkevlo 
8165cd48f1eSkevlo 		if (sc->queues[i].tag != NULL)
8175cd48f1eSkevlo 			intr_barrier(sc->queues[i].tag);
8185cd48f1eSkevlo 		timeout_del(&sc->rx_rings[i].rx_refill);
8195cd48f1eSkevlo 	}
8205cd48f1eSkevlo 
8215cd48f1eSkevlo 	ngbe_free_transmit_structures(sc);
8225cd48f1eSkevlo 	ngbe_free_receive_structures(sc);
8235cd48f1eSkevlo 
8245cd48f1eSkevlo 	ngbe_update_link_status(sc);
8255cd48f1eSkevlo }
8265cd48f1eSkevlo 
8275cd48f1eSkevlo void
ngbe_update_link_status(struct ngbe_softc * sc)8285cd48f1eSkevlo ngbe_update_link_status(struct ngbe_softc *sc)
8295cd48f1eSkevlo {
8305cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
8315cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
8325cd48f1eSkevlo 	uint32_t reg, speed = 0;
8335cd48f1eSkevlo 	int link_state = LINK_STATE_DOWN;
8345cd48f1eSkevlo 
8355cd48f1eSkevlo 	hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up, 0);
8365cd48f1eSkevlo 
8375cd48f1eSkevlo 	ifp->if_baudrate = 0;
8385cd48f1eSkevlo 	if (sc->link_up) {
8395cd48f1eSkevlo 		link_state = LINK_STATE_FULL_DUPLEX;
8405cd48f1eSkevlo 
8415cd48f1eSkevlo 		switch (sc->link_speed) {
8425cd48f1eSkevlo 		case NGBE_LINK_SPEED_UNKNOWN:
8435cd48f1eSkevlo 			ifp->if_baudrate = 0;
8445cd48f1eSkevlo 			break;
8455cd48f1eSkevlo 		case NGBE_LINK_SPEED_1GB_FULL:
8465cd48f1eSkevlo 			ifp->if_baudrate = IF_Gbps(1);
8475cd48f1eSkevlo 			speed = 2;
8485cd48f1eSkevlo 			break;
8495cd48f1eSkevlo 		case NGBE_LINK_SPEED_100_FULL:
8505cd48f1eSkevlo 			ifp->if_baudrate = IF_Mbps(100);
8515cd48f1eSkevlo 			speed = 1;
8525cd48f1eSkevlo 			break;
8535cd48f1eSkevlo 		case NGBE_LINK_SPEED_10_FULL:
8545cd48f1eSkevlo 			ifp->if_baudrate = IF_Mbps(10);
8555cd48f1eSkevlo 			break;
8565cd48f1eSkevlo 		}
8575cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_CFG_LAN_SPEED, 0x3, speed);
8585cd48f1eSkevlo 
8595cd48f1eSkevlo 		/* Update any flow control changes */
8605cd48f1eSkevlo 		hw->mac.ops.fc_enable(sc);
8615cd48f1eSkevlo 
8625cd48f1eSkevlo 		ngbe_set_rx_drop_en(sc);
8635cd48f1eSkevlo 
8645cd48f1eSkevlo 		if (sc->link_speed & (NGBE_LINK_SPEED_1GB_FULL |
8655cd48f1eSkevlo 		    NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) {
8665cd48f1eSkevlo 			NGBE_WRITE_REG(hw, NGBE_MAC_TX_CFG,
8675cd48f1eSkevlo 			    (NGBE_READ_REG(hw, NGBE_MAC_TX_CFG) &
8685cd48f1eSkevlo 			    ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE |
8695cd48f1eSkevlo 			    NGBE_MAC_TX_CFG_SPEED_1G);
8705cd48f1eSkevlo 		}
8715cd48f1eSkevlo 
8725cd48f1eSkevlo 		reg = NGBE_READ_REG(hw, NGBE_MAC_RX_CFG);
8735cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_MAC_RX_CFG, reg);
8745cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR);
8755cd48f1eSkevlo 		reg = NGBE_READ_REG(hw, NGBE_MAC_WDG_TIMEOUT);
8765cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_MAC_WDG_TIMEOUT, reg);
8775cd48f1eSkevlo 	}
8785cd48f1eSkevlo 
8795cd48f1eSkevlo 	if (ifp->if_link_state != link_state) {
8805cd48f1eSkevlo 		ifp->if_link_state = link_state;
8815cd48f1eSkevlo 		if_link_state_change(ifp);
8825cd48f1eSkevlo 	}
8835cd48f1eSkevlo }
8845cd48f1eSkevlo 
8855cd48f1eSkevlo void
ngbe_watchdog(struct ifnet * ifp)8865cd48f1eSkevlo ngbe_watchdog(struct ifnet *ifp)
8875cd48f1eSkevlo {
8885cd48f1eSkevlo 	struct ngbe_softc *sc = ifp->if_softc;
8895cd48f1eSkevlo 	struct tx_ring *txr = sc->tx_rings;
8905cd48f1eSkevlo 	int i, tx_hang = 0;
8915cd48f1eSkevlo 
8925cd48f1eSkevlo 	/*
8935cd48f1eSkevlo 	 * The timer is set to 5 every time ixgbe_start() queues a packet.
8945cd48f1eSkevlo 	 * Anytime all descriptors are clean the timer is set to 0.
8955cd48f1eSkevlo 	 */
8965cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++, txr++) {
8975cd48f1eSkevlo 		if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
8985cd48f1eSkevlo 			continue;
8995cd48f1eSkevlo 		else {
9005cd48f1eSkevlo 			tx_hang = 1;
9015cd48f1eSkevlo 			break;
9025cd48f1eSkevlo 		}
9035cd48f1eSkevlo 	}
9045cd48f1eSkevlo 	if (!tx_hang)
9055cd48f1eSkevlo 		return;
9065cd48f1eSkevlo 
9075cd48f1eSkevlo 	printf("%s: watchdog timeout\n", DEVNAME(sc));
9085cd48f1eSkevlo 	ifp->if_oerrors++;
9095cd48f1eSkevlo 
9105cd48f1eSkevlo 	ifp->if_flags &= ~IFF_RUNNING;
9115cd48f1eSkevlo 	ngbe_init(sc);
9125cd48f1eSkevlo }
9135cd48f1eSkevlo 
9145cd48f1eSkevlo int
ngbe_allocate_pci_resources(struct ngbe_softc * sc)9155cd48f1eSkevlo ngbe_allocate_pci_resources(struct ngbe_softc *sc)
9165cd48f1eSkevlo {
9175cd48f1eSkevlo 	struct ngbe_osdep *os = &sc->osdep;
9185cd48f1eSkevlo 	struct pci_attach_args *pa = &os->os_pa;
9195cd48f1eSkevlo 	pcireg_t memtype;
9205cd48f1eSkevlo 
9215cd48f1eSkevlo 	memtype = PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT;
9225cd48f1eSkevlo 	if (pci_mapreg_map(pa, NGBE_PCIREG, memtype, 0, &os->os_memt,
9235cd48f1eSkevlo 	    &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
9245cd48f1eSkevlo 		printf(": unable to map registers\n");
9255cd48f1eSkevlo 		return ENXIO;
9265cd48f1eSkevlo 	}
9275cd48f1eSkevlo 	sc->hw.back = os;
9285cd48f1eSkevlo 
9295cd48f1eSkevlo 	if (ngbe_setup_msix(sc))
9305cd48f1eSkevlo 		return EINVAL;
9315cd48f1eSkevlo 
9325cd48f1eSkevlo 	return 0;
9335cd48f1eSkevlo }
9345cd48f1eSkevlo 
9355cd48f1eSkevlo void
ngbe_free_pci_resources(struct ngbe_softc * sc)9365cd48f1eSkevlo ngbe_free_pci_resources(struct ngbe_softc *sc)
9375cd48f1eSkevlo {
9385cd48f1eSkevlo 	struct ngbe_osdep *os = &sc->osdep;
9395cd48f1eSkevlo 	struct pci_attach_args *pa = &os->os_pa;
9405cd48f1eSkevlo 
9415cd48f1eSkevlo 	if (sc->tag)
9425cd48f1eSkevlo 		pci_intr_disestablish(pa->pa_pc, sc->tag);
9435cd48f1eSkevlo 	sc->tag = NULL;
9445cd48f1eSkevlo 	if (os->os_membase)
9455cd48f1eSkevlo 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
9465cd48f1eSkevlo 	os->os_membase = 0;
9475cd48f1eSkevlo }
9485cd48f1eSkevlo 
9495cd48f1eSkevlo int
ngbe_allocate_msix(struct ngbe_softc * sc)9505cd48f1eSkevlo ngbe_allocate_msix(struct ngbe_softc *sc)
9515cd48f1eSkevlo {
9525cd48f1eSkevlo 	struct ngbe_osdep *os = &sc->osdep;
9535cd48f1eSkevlo 	struct pci_attach_args *pa = &os->os_pa;
9545cd48f1eSkevlo 	struct ngbe_queue *nq;
9555cd48f1eSkevlo 	pci_intr_handle_t ih;
9565cd48f1eSkevlo 	int i, error = 0;
9575cd48f1eSkevlo 
9585cd48f1eSkevlo 	for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++) {
9595cd48f1eSkevlo 		if (pci_intr_map_msix(pa, i, &ih)) {
9605cd48f1eSkevlo 			printf(": unable to map msi-x vector %d", i);
9615cd48f1eSkevlo 			error = ENXIO;
9625cd48f1eSkevlo 			goto fail;
9635cd48f1eSkevlo 		}
9645cd48f1eSkevlo 
9655cd48f1eSkevlo 		nq->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
9665cd48f1eSkevlo 		    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
9675cd48f1eSkevlo 		    ngbe_intr_queue, nq, nq->name);
9685cd48f1eSkevlo 		if (nq->tag == NULL) {
9695cd48f1eSkevlo 			printf(": unable to establish interrupt %d\n", i);
9705cd48f1eSkevlo 			error = ENXIO;
9715cd48f1eSkevlo 			goto fail;
9725cd48f1eSkevlo 		}
9735cd48f1eSkevlo 
9745cd48f1eSkevlo 		nq->msix = i;
9755cd48f1eSkevlo 	}
9765cd48f1eSkevlo 
9775cd48f1eSkevlo 	/* Now the link status/control last MSI-X vector */
9785cd48f1eSkevlo 	if (pci_intr_map_msix(pa, i, &ih)) {
9795cd48f1eSkevlo 		printf(": unable to map link vector\n");
9805cd48f1eSkevlo 		error = ENXIO;
9815cd48f1eSkevlo 		goto fail;
9825cd48f1eSkevlo 	}
9835cd48f1eSkevlo 
9845cd48f1eSkevlo 	sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
9855cd48f1eSkevlo 		ngbe_intr_link, sc, sc->sc_dev.dv_xname);
9865cd48f1eSkevlo 	if (sc->tag == NULL) {
9875cd48f1eSkevlo 		printf(": unable to establish link interrupt\n");
9885cd48f1eSkevlo 		error = ENXIO;
9895cd48f1eSkevlo 		goto fail;
9905cd48f1eSkevlo 	}
9915cd48f1eSkevlo 
9925cd48f1eSkevlo 	sc->linkvec = i;
9935cd48f1eSkevlo 	printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih), i,
9945cd48f1eSkevlo 	    (i > 1) ? "s" : "");
9955cd48f1eSkevlo 
9965cd48f1eSkevlo 	return 0;
9975cd48f1eSkevlo fail:
9985cd48f1eSkevlo 	for (nq = sc->queues; i > 0; i--, nq++) {
9995cd48f1eSkevlo 		if (nq->tag == NULL)
10005cd48f1eSkevlo 			continue;
10015cd48f1eSkevlo 		pci_intr_disestablish(pa->pa_pc, nq->tag);
10025cd48f1eSkevlo 		nq->tag = NULL;
10035cd48f1eSkevlo 	}
10045cd48f1eSkevlo 
10055cd48f1eSkevlo 	return error;
10065cd48f1eSkevlo }
10075cd48f1eSkevlo 
10085cd48f1eSkevlo void
ngbe_setup_interface(struct ngbe_softc * sc)10095cd48f1eSkevlo ngbe_setup_interface(struct ngbe_softc *sc)
10105cd48f1eSkevlo {
10115cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
10125cd48f1eSkevlo 	int i;
10135cd48f1eSkevlo 
10145cd48f1eSkevlo 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
10155cd48f1eSkevlo 	ifp->if_softc = sc;
10165cd48f1eSkevlo 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
10175cd48f1eSkevlo 	ifp->if_xflags = IFXF_MPSAFE;
10185cd48f1eSkevlo 	ifp->if_ioctl = ngbe_ioctl;
10195cd48f1eSkevlo 	ifp->if_qstart = ngbe_start;
10205cd48f1eSkevlo 	ifp->if_watchdog = ngbe_watchdog;
10215cd48f1eSkevlo 	ifp->if_hardmtu = NGBE_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN -
10225cd48f1eSkevlo 	    ETHER_CRC_LEN;
1023cf96265bSbluhm 	ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
10245cd48f1eSkevlo 
10255cd48f1eSkevlo 	ifp->if_capabilities = IFCAP_VLAN_MTU;
10265cd48f1eSkevlo 
10275cd48f1eSkevlo #if NVLAN > 0
10285cd48f1eSkevlo 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
10295cd48f1eSkevlo #endif
10305cd48f1eSkevlo 
10315cd48f1eSkevlo 	/* Initialize ifmedia structures. */
10325cd48f1eSkevlo 	ifmedia_init(&sc->sc_media, IFM_IMASK, ngbe_media_change,
10335cd48f1eSkevlo 	    ngbe_media_status);
10345cd48f1eSkevlo 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
10355cd48f1eSkevlo 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
10365cd48f1eSkevlo 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
10375cd48f1eSkevlo 
10385cd48f1eSkevlo 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
10395cd48f1eSkevlo 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
10405cd48f1eSkevlo 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
10415cd48f1eSkevlo 
10425cd48f1eSkevlo 	if_attach(ifp);
10435cd48f1eSkevlo 	ether_ifattach(ifp);
10445cd48f1eSkevlo 
10455cd48f1eSkevlo 	if_attach_queues(ifp, sc->sc_nqueues);
10465cd48f1eSkevlo 	if_attach_iqueues(ifp, sc->sc_nqueues);
10475cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++) {
10485cd48f1eSkevlo 		struct ifqueue *ifq = ifp->if_ifqs[i];
10495cd48f1eSkevlo 		struct ifiqueue *ifiq = ifp->if_iqs[i];
10505cd48f1eSkevlo 		struct tx_ring *txr = &sc->tx_rings[i];
10515cd48f1eSkevlo 		struct rx_ring *rxr = &sc->rx_rings[i];
10525cd48f1eSkevlo 
10535cd48f1eSkevlo 		ifq->ifq_softc = txr;
10545cd48f1eSkevlo 		txr->ifq = ifq;
10555cd48f1eSkevlo 
10565cd48f1eSkevlo 		ifiq->ifiq_softc = rxr;
10575cd48f1eSkevlo 		rxr->ifiq = ifiq;
10585cd48f1eSkevlo 	}
10595cd48f1eSkevlo }
10605cd48f1eSkevlo 
10615cd48f1eSkevlo int
ngbe_setup_msix(struct ngbe_softc * sc)10625cd48f1eSkevlo ngbe_setup_msix(struct ngbe_softc *sc)
10635cd48f1eSkevlo {
10645cd48f1eSkevlo 	struct ngbe_osdep *os = &sc->osdep;
10655cd48f1eSkevlo 	struct pci_attach_args *pa = &os->os_pa;
10665cd48f1eSkevlo 	int nmsix;
10675cd48f1eSkevlo 
10685cd48f1eSkevlo 	nmsix = pci_intr_msix_count(pa);
10695cd48f1eSkevlo 	if (nmsix <= 1) {
10705cd48f1eSkevlo 		printf(": not enough msi-x vectors\n");
10715cd48f1eSkevlo 		return EINVAL;
10725cd48f1eSkevlo 	}
10735cd48f1eSkevlo 
10745cd48f1eSkevlo 	/* Give one vector to events. */
10755cd48f1eSkevlo 	nmsix--;
10765cd48f1eSkevlo 
10775cd48f1eSkevlo 	sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, NGBE_MAX_VECTORS,
10785cd48f1eSkevlo 	    INTRMAP_POWEROF2);
10795cd48f1eSkevlo 	sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
10805cd48f1eSkevlo 
10815cd48f1eSkevlo 	return 0;
10825cd48f1eSkevlo }
10835cd48f1eSkevlo 
10845cd48f1eSkevlo int
ngbe_dma_malloc(struct ngbe_softc * sc,bus_size_t size,struct ngbe_dma_alloc * dma)10855cd48f1eSkevlo ngbe_dma_malloc(struct ngbe_softc *sc, bus_size_t size,
10865cd48f1eSkevlo     struct ngbe_dma_alloc *dma)
10875cd48f1eSkevlo {
10885cd48f1eSkevlo 	struct ngbe_osdep *os = &sc->osdep;
10895cd48f1eSkevlo 
10905cd48f1eSkevlo 	dma->dma_tag = os->os_pa.pa_dmat;
10915cd48f1eSkevlo 
10925cd48f1eSkevlo 	if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT,
10935cd48f1eSkevlo 	    &dma->dma_map))
10945cd48f1eSkevlo 		return 1;
10955cd48f1eSkevlo 	if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
10965cd48f1eSkevlo 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT))
10975cd48f1eSkevlo 		goto destroy;
10985cd48f1eSkevlo 	if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
10995cd48f1eSkevlo 	    &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT))
11005cd48f1eSkevlo 		goto free;
11015cd48f1eSkevlo 	if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
11025cd48f1eSkevlo 	    NULL, BUS_DMA_NOWAIT))
11035cd48f1eSkevlo 		goto unmap;
11045cd48f1eSkevlo 
11055cd48f1eSkevlo 	dma->dma_size = size;
11065cd48f1eSkevlo 
11075cd48f1eSkevlo 	return 0;
11085cd48f1eSkevlo unmap:
11095cd48f1eSkevlo 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
11105cd48f1eSkevlo free:
11115cd48f1eSkevlo 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
11125cd48f1eSkevlo destroy:
11135cd48f1eSkevlo 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
11145cd48f1eSkevlo 	dma->dma_map = NULL;
11155cd48f1eSkevlo 	dma->dma_tag = NULL;
11165cd48f1eSkevlo 	return 1;
11175cd48f1eSkevlo }
11185cd48f1eSkevlo 
11195cd48f1eSkevlo void
ngbe_dma_free(struct ngbe_softc * sc,struct ngbe_dma_alloc * dma)11205cd48f1eSkevlo ngbe_dma_free(struct ngbe_softc *sc, struct ngbe_dma_alloc *dma)
11215cd48f1eSkevlo {
11225cd48f1eSkevlo 	if (dma->dma_tag == NULL)
11235cd48f1eSkevlo 		return;
11245cd48f1eSkevlo 
11255cd48f1eSkevlo 	if (dma->dma_map != NULL) {
11265cd48f1eSkevlo 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
11275cd48f1eSkevlo 		    dma->dma_map->dm_mapsize,
11285cd48f1eSkevlo 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
11295cd48f1eSkevlo 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
11305cd48f1eSkevlo 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
11315cd48f1eSkevlo 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
11325cd48f1eSkevlo 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
11335cd48f1eSkevlo 		dma->dma_map = NULL;
11345cd48f1eSkevlo 	}
11355cd48f1eSkevlo }
11365cd48f1eSkevlo 
11375cd48f1eSkevlo int
ngbe_allocate_isb(struct ngbe_softc * sc)11385cd48f1eSkevlo ngbe_allocate_isb(struct ngbe_softc *sc)
11395cd48f1eSkevlo {
11405cd48f1eSkevlo 	int isize;
11415cd48f1eSkevlo 
11425cd48f1eSkevlo 	isize = sizeof(uint32_t) * NGBE_ISB_MAX;
11435cd48f1eSkevlo 	if (ngbe_dma_malloc(sc, isize, &sc->isbdma)) {
11445cd48f1eSkevlo 		printf("%s: unable to allocate interrupt status resources\n",
11455cd48f1eSkevlo 		    DEVNAME(sc));
11465cd48f1eSkevlo 		return ENOMEM;
11475cd48f1eSkevlo 	}
11485cd48f1eSkevlo 	sc->isb_base = (uint32_t *)sc->isbdma.dma_vaddr;
11495cd48f1eSkevlo 	bzero((void *)sc->isb_base, isize);
11505cd48f1eSkevlo 
11515cd48f1eSkevlo 	return 0;
11525cd48f1eSkevlo }
11535cd48f1eSkevlo 
11545cd48f1eSkevlo void
ngbe_free_isb(struct ngbe_softc * sc)11555cd48f1eSkevlo ngbe_free_isb(struct ngbe_softc *sc)
11565cd48f1eSkevlo {
11575cd48f1eSkevlo 	ngbe_dma_free(sc, &sc->isbdma);
11585cd48f1eSkevlo }
11595cd48f1eSkevlo 
11605cd48f1eSkevlo int
ngbe_allocate_queues(struct ngbe_softc * sc)11615cd48f1eSkevlo ngbe_allocate_queues(struct ngbe_softc *sc)
11625cd48f1eSkevlo {
11635cd48f1eSkevlo 	struct ngbe_queue *nq;
11645cd48f1eSkevlo 	struct tx_ring *txr;
11655cd48f1eSkevlo 	struct rx_ring *rxr;
11665cd48f1eSkevlo 	int i, rsize, rxconf, tsize, txconf;
11675cd48f1eSkevlo 
11685cd48f1eSkevlo 	/* Allocate the top level queue structs. */
11695cd48f1eSkevlo 	sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct ngbe_queue),
11705cd48f1eSkevlo 	    M_DEVBUF, M_NOWAIT | M_ZERO);
11715cd48f1eSkevlo 	if (sc->queues == NULL) {
11725cd48f1eSkevlo 		printf("%s: unable to allocate queue\n", DEVNAME(sc));
11735cd48f1eSkevlo 		goto fail;
11745cd48f1eSkevlo 	}
11755cd48f1eSkevlo 
11765cd48f1eSkevlo 	/* Allocate the Tx ring. */
11775cd48f1eSkevlo 	sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring),
11785cd48f1eSkevlo 	    M_DEVBUF, M_NOWAIT | M_ZERO);
11795cd48f1eSkevlo 	if (sc->tx_rings == NULL) {
11805cd48f1eSkevlo 		printf("%s: unable to allocate Tx ring\n", DEVNAME(sc));
11815cd48f1eSkevlo 		goto fail;
11825cd48f1eSkevlo 	}
11835cd48f1eSkevlo 
11845cd48f1eSkevlo 	/* Allocate the Rx ring. */
11855cd48f1eSkevlo 	sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring),
11865cd48f1eSkevlo 	    M_DEVBUF, M_NOWAIT | M_ZERO);
11875cd48f1eSkevlo 	if (sc->rx_rings == NULL) {
11885cd48f1eSkevlo 		printf("%s: unable to allocate Rx ring\n", DEVNAME(sc));
11895cd48f1eSkevlo 		goto rx_fail;
11905cd48f1eSkevlo 	}
11915cd48f1eSkevlo 
11925cd48f1eSkevlo 	txconf = rxconf = 0;
11935cd48f1eSkevlo 
11945cd48f1eSkevlo 	/* Set up the Tx queues. */
11955cd48f1eSkevlo 	tsize = roundup2(sc->num_tx_desc * sizeof(union ngbe_tx_desc),
11965cd48f1eSkevlo 	    PAGE_SIZE);
11975cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++, txconf++) {
11985cd48f1eSkevlo 		txr = &sc->tx_rings[i];
11995cd48f1eSkevlo 		txr->sc = sc;
12005cd48f1eSkevlo 		txr->me = i;
12015cd48f1eSkevlo 
12025cd48f1eSkevlo 		if (ngbe_dma_malloc(sc, tsize, &txr->txdma)) {
12035cd48f1eSkevlo 			printf("%s: unable to allocate Tx descriptor\n",
12045cd48f1eSkevlo 			    DEVNAME(sc));
12055cd48f1eSkevlo 			goto err_tx_desc;
12065cd48f1eSkevlo 		}
12075cd48f1eSkevlo 		txr->tx_base = (union ngbe_tx_desc *)txr->txdma.dma_vaddr;
12085cd48f1eSkevlo 		bzero((void *)txr->tx_base, tsize);
12095cd48f1eSkevlo 	}
12105cd48f1eSkevlo 
12115cd48f1eSkevlo 	/* Set up the Rx queues. */
12125cd48f1eSkevlo 	rsize = roundup2(sc->num_rx_desc * sizeof(union ngbe_rx_desc),
12135cd48f1eSkevlo 	    PAGE_SIZE);
12145cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++, rxconf++) {
12155cd48f1eSkevlo 		rxr = &sc->rx_rings[i];
12165cd48f1eSkevlo 		rxr->sc = sc;
12175cd48f1eSkevlo 		rxr->me = i;
12185cd48f1eSkevlo 		timeout_set(&rxr->rx_refill, ngbe_rxrefill, rxr);
12195cd48f1eSkevlo 
12205cd48f1eSkevlo 		if (ngbe_dma_malloc(sc, rsize, &rxr->rxdma)) {
12215cd48f1eSkevlo 			printf("%s: unable to allocate Rx descriptor\n",
12225cd48f1eSkevlo 			    DEVNAME(sc));
12235cd48f1eSkevlo 			goto err_rx_desc;
12245cd48f1eSkevlo 		}
12255cd48f1eSkevlo 		rxr->rx_base = (union ngbe_rx_desc *)rxr->rxdma.dma_vaddr;
12265cd48f1eSkevlo 		bzero((void *)rxr->rx_base, rsize);
12275cd48f1eSkevlo 	}
12285cd48f1eSkevlo 
12295cd48f1eSkevlo 	/* Set up the queue holding structs. */
12305cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++) {
12315cd48f1eSkevlo 		nq = &sc->queues[i];
12325cd48f1eSkevlo 		nq->sc = sc;
12335cd48f1eSkevlo 		nq->txr = &sc->tx_rings[i];
12345cd48f1eSkevlo 		nq->rxr = &sc->rx_rings[i];
12355cd48f1eSkevlo 		snprintf(nq->name, sizeof(nq->name), "%s:%d", DEVNAME(sc), i);
12365cd48f1eSkevlo 	}
12375cd48f1eSkevlo 
12385cd48f1eSkevlo 	return 0;
12395cd48f1eSkevlo 
12405cd48f1eSkevlo err_rx_desc:
12415cd48f1eSkevlo 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
12425cd48f1eSkevlo 		ngbe_dma_free(sc, &rxr->rxdma);
12435cd48f1eSkevlo err_tx_desc:
12445cd48f1eSkevlo 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
12455cd48f1eSkevlo 		ngbe_dma_free(sc, &txr->txdma);
12465cd48f1eSkevlo 	free(sc->rx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct rx_ring));
12475cd48f1eSkevlo 	sc->rx_rings = NULL;
12485cd48f1eSkevlo rx_fail:
12495cd48f1eSkevlo 	free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring));
12505cd48f1eSkevlo 	sc->tx_rings = NULL;
12515cd48f1eSkevlo fail:
12525cd48f1eSkevlo 	return ENOMEM;
12535cd48f1eSkevlo }
12545cd48f1eSkevlo 
12555cd48f1eSkevlo void
ngbe_free_receive_structures(struct ngbe_softc * sc)12565cd48f1eSkevlo ngbe_free_receive_structures(struct ngbe_softc *sc)
12575cd48f1eSkevlo {
12585cd48f1eSkevlo 	struct rx_ring *rxr;
12595cd48f1eSkevlo 	int i;
12605cd48f1eSkevlo 
12615cd48f1eSkevlo 	for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
12625cd48f1eSkevlo 		if_rxr_init(&rxr->rx_ring, 0, 0);
12635cd48f1eSkevlo 
12645cd48f1eSkevlo 	for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
12655cd48f1eSkevlo 		ngbe_free_receive_buffers(rxr);
12665cd48f1eSkevlo }
12675cd48f1eSkevlo 
12685cd48f1eSkevlo void
ngbe_free_receive_buffers(struct rx_ring * rxr)12695cd48f1eSkevlo ngbe_free_receive_buffers(struct rx_ring *rxr)
12705cd48f1eSkevlo {
12715cd48f1eSkevlo 	struct ngbe_softc *sc;
12725cd48f1eSkevlo 	struct ngbe_rx_buf *rxbuf;
12735cd48f1eSkevlo 	int i;
12745cd48f1eSkevlo 
12755cd48f1eSkevlo 	sc = rxr->sc;
12765cd48f1eSkevlo 	if (rxr->rx_buffers != NULL) {
12775cd48f1eSkevlo 		for (i = 0; i < sc->num_rx_desc; i++) {
12785cd48f1eSkevlo 			rxbuf = &rxr->rx_buffers[i];
12795cd48f1eSkevlo 			if (rxbuf->buf != NULL) {
12805cd48f1eSkevlo 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
12815cd48f1eSkevlo 				    0, rxbuf->map->dm_mapsize,
12825cd48f1eSkevlo 				    BUS_DMASYNC_POSTREAD);
12835cd48f1eSkevlo 				bus_dmamap_unload(rxr->rxdma.dma_tag,
12845cd48f1eSkevlo 				    rxbuf->map);
12855cd48f1eSkevlo 				m_freem(rxbuf->buf);
12865cd48f1eSkevlo 				rxbuf->buf = NULL;
12875cd48f1eSkevlo 			}
12885cd48f1eSkevlo 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
12895cd48f1eSkevlo 			rxbuf->map = NULL;
12905cd48f1eSkevlo 		}
12915cd48f1eSkevlo 		free(rxr->rx_buffers, M_DEVBUF,
12925cd48f1eSkevlo 		    sc->num_rx_desc * sizeof(struct ngbe_rx_buf));
12935cd48f1eSkevlo 		rxr->rx_buffers = NULL;
12945cd48f1eSkevlo 	}
12955cd48f1eSkevlo }
12965cd48f1eSkevlo 
12975cd48f1eSkevlo void
ngbe_free_transmit_structures(struct ngbe_softc * sc)12985cd48f1eSkevlo ngbe_free_transmit_structures(struct ngbe_softc *sc)
12995cd48f1eSkevlo {
13005cd48f1eSkevlo 	struct tx_ring *txr = sc->tx_rings;
13015cd48f1eSkevlo 	int i;
13025cd48f1eSkevlo 
13035cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++, txr++)
13045cd48f1eSkevlo 		ngbe_free_transmit_buffers(txr);
13055cd48f1eSkevlo }
13065cd48f1eSkevlo 
13075cd48f1eSkevlo void
ngbe_free_transmit_buffers(struct tx_ring * txr)13085cd48f1eSkevlo ngbe_free_transmit_buffers(struct tx_ring *txr)
13095cd48f1eSkevlo {
13105cd48f1eSkevlo 	struct ngbe_softc *sc = txr->sc;
13115cd48f1eSkevlo 	struct ngbe_tx_buf *tx_buffer;
13125cd48f1eSkevlo 	int i;
13135cd48f1eSkevlo 
13145cd48f1eSkevlo 	if (txr->tx_buffers == NULL)
13155cd48f1eSkevlo 		return;
13165cd48f1eSkevlo 
13175cd48f1eSkevlo 	tx_buffer = txr->tx_buffers;
13185cd48f1eSkevlo 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
13195cd48f1eSkevlo 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
13205cd48f1eSkevlo 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
13215cd48f1eSkevlo 			    0, tx_buffer->map->dm_mapsize,
13225cd48f1eSkevlo 			    BUS_DMASYNC_POSTWRITE);
13235cd48f1eSkevlo 			bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
13245cd48f1eSkevlo 		}
13255cd48f1eSkevlo 		if (tx_buffer->m_head != NULL) {
13265cd48f1eSkevlo 			m_freem(tx_buffer->m_head);
13275cd48f1eSkevlo 			tx_buffer->m_head = NULL;
13285cd48f1eSkevlo 		}
13295cd48f1eSkevlo 		if (tx_buffer->map != NULL) {
13305cd48f1eSkevlo 			bus_dmamap_destroy(txr->txdma.dma_tag, tx_buffer->map);
13315cd48f1eSkevlo 			tx_buffer->map = NULL;
13325cd48f1eSkevlo 		}
13335cd48f1eSkevlo 	}
13345cd48f1eSkevlo 
13355cd48f1eSkevlo 	if (txr->tx_buffers != NULL)
13365cd48f1eSkevlo 		free(txr->tx_buffers, M_DEVBUF,
13375cd48f1eSkevlo 		    sc->num_tx_desc * sizeof(struct ngbe_tx_buf));
13385cd48f1eSkevlo 	txr->tx_buffers = NULL;
13395cd48f1eSkevlo 	txr->txtag = NULL;
13405cd48f1eSkevlo }
13415cd48f1eSkevlo 
13425cd48f1eSkevlo int
ngbe_allocate_receive_buffers(struct rx_ring * rxr)13435cd48f1eSkevlo ngbe_allocate_receive_buffers(struct rx_ring *rxr)
13445cd48f1eSkevlo {
13455cd48f1eSkevlo 	struct ngbe_softc *sc = rxr->sc;
13465cd48f1eSkevlo 	struct ngbe_rx_buf *rxbuf;
13475cd48f1eSkevlo 	int i, error;
13485cd48f1eSkevlo 
13495cd48f1eSkevlo 	rxr->rx_buffers = mallocarray(sc->num_rx_desc,
13505cd48f1eSkevlo 	    sizeof(struct ngbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
13515cd48f1eSkevlo 	if (rxr->rx_buffers == NULL) {
13525cd48f1eSkevlo 		printf("%s: unable to allocate rx_buffer memory\n",
13535cd48f1eSkevlo 		    DEVNAME(sc));
13545cd48f1eSkevlo 		error = ENOMEM;
13555cd48f1eSkevlo 		goto fail;
13565cd48f1eSkevlo 	}
13575cd48f1eSkevlo 
13585cd48f1eSkevlo 	rxbuf = rxr->rx_buffers;
13595cd48f1eSkevlo 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
13605cd48f1eSkevlo 		error = bus_dmamap_create(rxr->rxdma.dma_tag,
13615cd48f1eSkevlo 		    NGBE_MAX_JUMBO_FRAME_SIZE, 1, NGBE_MAX_JUMBO_FRAME_SIZE, 0,
13625cd48f1eSkevlo 		    BUS_DMA_NOWAIT, &rxbuf->map);
13635cd48f1eSkevlo 		if (error) {
13645cd48f1eSkevlo 			printf("%s: unable to create RX DMA map\n",
13655cd48f1eSkevlo 			    DEVNAME(sc));
13665cd48f1eSkevlo 			goto fail;
13675cd48f1eSkevlo 		}
13685cd48f1eSkevlo 	}
13695cd48f1eSkevlo 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
13705cd48f1eSkevlo 	    rxr->rxdma.dma_map->dm_mapsize,
13715cd48f1eSkevlo 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
13725cd48f1eSkevlo 
13735cd48f1eSkevlo 	return 0;
13745cd48f1eSkevlo fail:
13755cd48f1eSkevlo 	return error;
13765cd48f1eSkevlo }
13775cd48f1eSkevlo 
13785cd48f1eSkevlo int
ngbe_allocate_transmit_buffers(struct tx_ring * txr)13795cd48f1eSkevlo ngbe_allocate_transmit_buffers(struct tx_ring *txr)
13805cd48f1eSkevlo {
13815cd48f1eSkevlo 	struct ngbe_softc *sc = txr->sc;
13825cd48f1eSkevlo 	struct ngbe_tx_buf *txbuf;
13835cd48f1eSkevlo 	int error, i;
13845cd48f1eSkevlo 
13855cd48f1eSkevlo 	txr->tx_buffers = mallocarray(sc->num_tx_desc,
13865cd48f1eSkevlo 	    sizeof(struct ngbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
13875cd48f1eSkevlo 	if (txr->tx_buffers == NULL) {
13885cd48f1eSkevlo 		printf("%s: unable to allocate tx_buffer memory\n",
13895cd48f1eSkevlo 		    DEVNAME(sc));
13905cd48f1eSkevlo 		error = ENOMEM;
13915cd48f1eSkevlo 		goto fail;
13925cd48f1eSkevlo 	}
13935cd48f1eSkevlo 	txr->txtag = txr->txdma.dma_tag;
13945cd48f1eSkevlo 
13955cd48f1eSkevlo 	/* Create the descriptor buffer dma maps. */
13965cd48f1eSkevlo 	for (i = 0; i < sc->num_tx_desc; i++) {
13975cd48f1eSkevlo 		txbuf = &txr->tx_buffers[i];
13985cd48f1eSkevlo 		error = bus_dmamap_create(txr->txdma.dma_tag, NGBE_TSO_SIZE,
13995cd48f1eSkevlo 		    NGBE_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT,
14005cd48f1eSkevlo 		    &txbuf->map);
14015cd48f1eSkevlo 		if (error != 0) {
14025cd48f1eSkevlo 			printf("%s: unable to create TX DMA map\n",
14035cd48f1eSkevlo 			    DEVNAME(sc));
14045cd48f1eSkevlo 			goto fail;
14055cd48f1eSkevlo 		}
14065cd48f1eSkevlo 	}
14075cd48f1eSkevlo 
14085cd48f1eSkevlo 	return 0;
14095cd48f1eSkevlo fail:
14105cd48f1eSkevlo 	return error;
14115cd48f1eSkevlo }
14125cd48f1eSkevlo 
14135cd48f1eSkevlo int
ngbe_setup_receive_ring(struct rx_ring * rxr)14145cd48f1eSkevlo ngbe_setup_receive_ring(struct rx_ring *rxr)
14155cd48f1eSkevlo {
14165cd48f1eSkevlo 	struct ngbe_softc *sc = rxr->sc;
14175cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
14185cd48f1eSkevlo 	int rsize;
14195cd48f1eSkevlo 
14205cd48f1eSkevlo 	rsize = roundup2(sc->num_rx_desc * sizeof(union ngbe_rx_desc),
14215cd48f1eSkevlo 	    PAGE_SIZE);
14225cd48f1eSkevlo 
14235cd48f1eSkevlo 	/* Clear the ring contents. */
14245cd48f1eSkevlo 	bzero((void *)rxr->rx_base, rsize);
14255cd48f1eSkevlo 
14265cd48f1eSkevlo 	if (ngbe_allocate_receive_buffers(rxr))
14275cd48f1eSkevlo 		return ENOMEM;
14285cd48f1eSkevlo 
14295cd48f1eSkevlo 	/* Setup our descriptor indices. */
14305cd48f1eSkevlo 	rxr->next_to_check = 0;
14315cd48f1eSkevlo 	rxr->last_desc_filled = sc->num_rx_desc - 1;
14325cd48f1eSkevlo 
14335cd48f1eSkevlo 	if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
14345cd48f1eSkevlo 	    sc->num_rx_desc - 1);
14355cd48f1eSkevlo 
14365cd48f1eSkevlo 	ngbe_rxfill(rxr);
14375cd48f1eSkevlo 	if (if_rxr_inuse(&rxr->rx_ring) == 0) {
14385cd48f1eSkevlo 		printf("%s: unable to fill any rx descriptors\n", DEVNAME(sc));
14395cd48f1eSkevlo 		return ENOBUFS;
14405cd48f1eSkevlo 	}
14415cd48f1eSkevlo 
14425cd48f1eSkevlo 	return 0;
14435cd48f1eSkevlo }
14445cd48f1eSkevlo 
14455cd48f1eSkevlo int
ngbe_setup_transmit_ring(struct tx_ring * txr)14465cd48f1eSkevlo ngbe_setup_transmit_ring(struct tx_ring *txr)
14475cd48f1eSkevlo {
14485cd48f1eSkevlo 	struct ngbe_softc *sc = txr->sc;
14495cd48f1eSkevlo 
14505cd48f1eSkevlo 	/* Now allocate transmit buffers for the ring. */
14515cd48f1eSkevlo 	if (ngbe_allocate_transmit_buffers(txr))
14525cd48f1eSkevlo 		return ENOMEM;
14535cd48f1eSkevlo 
14545cd48f1eSkevlo 	/* Clear the old ring contents */
14555cd48f1eSkevlo 	bzero((void *)txr->tx_base,
14565cd48f1eSkevlo 	    (sizeof(union ngbe_tx_desc)) * sc->num_tx_desc);
14575cd48f1eSkevlo 
14585cd48f1eSkevlo 	/* Reset indices. */
14595cd48f1eSkevlo 	txr->next_avail_desc = 0;
14605cd48f1eSkevlo 	txr->next_to_clean = 0;
14615cd48f1eSkevlo 
14625cd48f1eSkevlo 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
14635cd48f1eSkevlo 	    txr->txdma.dma_map->dm_mapsize,
14645cd48f1eSkevlo 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
14655cd48f1eSkevlo 
14665cd48f1eSkevlo 	return 0;
14675cd48f1eSkevlo }
14685cd48f1eSkevlo 
14695cd48f1eSkevlo int
ngbe_setup_receive_structures(struct ngbe_softc * sc)14705cd48f1eSkevlo ngbe_setup_receive_structures(struct ngbe_softc *sc)
14715cd48f1eSkevlo {
14725cd48f1eSkevlo 	struct rx_ring *rxr = sc->rx_rings;
14735cd48f1eSkevlo 	int i;
14745cd48f1eSkevlo 
14755cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
14765cd48f1eSkevlo 		if (ngbe_setup_receive_ring(rxr))
14775cd48f1eSkevlo 			goto fail;
14785cd48f1eSkevlo 	}
14795cd48f1eSkevlo 
14805cd48f1eSkevlo 	return 0;
14815cd48f1eSkevlo fail:
14825cd48f1eSkevlo 	ngbe_free_receive_structures(sc);
14835cd48f1eSkevlo 	return ENOBUFS;
14845cd48f1eSkevlo }
14855cd48f1eSkevlo 
14865cd48f1eSkevlo int
ngbe_setup_transmit_structures(struct ngbe_softc * sc)14875cd48f1eSkevlo ngbe_setup_transmit_structures(struct ngbe_softc *sc)
14885cd48f1eSkevlo {
14895cd48f1eSkevlo 	struct tx_ring *txr = sc->tx_rings;
14905cd48f1eSkevlo 	int i;
14915cd48f1eSkevlo 
14925cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++, txr++) {
14935cd48f1eSkevlo 		if (ngbe_setup_transmit_ring(txr))
14945cd48f1eSkevlo 			goto fail;
14955cd48f1eSkevlo 	}
14965cd48f1eSkevlo 
14975cd48f1eSkevlo 	return 0;
14985cd48f1eSkevlo fail:
14995cd48f1eSkevlo 	ngbe_free_transmit_structures(sc);
15005cd48f1eSkevlo 	return ENOBUFS;
15015cd48f1eSkevlo }
15025cd48f1eSkevlo 
15035cd48f1eSkevlo uint8_t *
ngbe_addr_list_itr(struct ngbe_hw * hw,uint8_t ** mc_addr_ptr,uint32_t * vmdq)15045cd48f1eSkevlo ngbe_addr_list_itr(struct ngbe_hw *hw, uint8_t **mc_addr_ptr, uint32_t *vmdq)
15055cd48f1eSkevlo {
15065cd48f1eSkevlo 	uint8_t *addr = *mc_addr_ptr;
15075cd48f1eSkevlo 	uint8_t *newptr;
15085cd48f1eSkevlo 	*vmdq = 0;
15095cd48f1eSkevlo 
15105cd48f1eSkevlo 	newptr = addr + ETHER_ADDR_LEN;
15115cd48f1eSkevlo 	*mc_addr_ptr = newptr;
15125cd48f1eSkevlo 	return addr;
15135cd48f1eSkevlo }
15145cd48f1eSkevlo 
15155cd48f1eSkevlo void
ngbe_iff(struct ngbe_softc * sc)15165cd48f1eSkevlo ngbe_iff(struct ngbe_softc *sc)
15175cd48f1eSkevlo {
15185cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
15195cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
15205cd48f1eSkevlo 	struct arpcom *ac = &sc->sc_ac;
15215cd48f1eSkevlo 	struct ether_multi *enm;
15225cd48f1eSkevlo 	struct ether_multistep step;
15235cd48f1eSkevlo 	uint32_t fctrl, vlanctrl;
15245cd48f1eSkevlo 	uint8_t *mta, *update_ptr;
15255cd48f1eSkevlo 	int mcnt = 0;
15265cd48f1eSkevlo 
15275cd48f1eSkevlo 	mta = sc->mta;
15285cd48f1eSkevlo 	bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN * NGBE_SP_RAR_ENTRIES);
15295cd48f1eSkevlo 
15305cd48f1eSkevlo 	fctrl = NGBE_READ_REG_MASK(hw, NGBE_PSR_CTL,
15315cd48f1eSkevlo 	    ~(NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE));
15325cd48f1eSkevlo 	vlanctrl = NGBE_READ_REG_MASK(hw, NGBE_PSR_VLAN_CTL,
15335cd48f1eSkevlo 	    ~(NGBE_PSR_VLAN_CTL_VFE | NGBE_PSR_VLAN_CTL_CFIEN));
15345cd48f1eSkevlo 	ifp->if_flags &= ~IFF_ALLMULTI;
15355cd48f1eSkevlo 
15365cd48f1eSkevlo 	/* Set all bits that we expect to always be set */
15375cd48f1eSkevlo 	fctrl |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_MFE;
15385cd48f1eSkevlo 	vlanctrl |= NGBE_PSR_VLAN_CTL_VFE;
15395cd48f1eSkevlo 
15405cd48f1eSkevlo 	hw->addr_ctrl.user_set_promisc = 0;
15415cd48f1eSkevlo 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
15425cd48f1eSkevlo 	    ac->ac_multicnt > NGBE_SP_RAR_ENTRIES) {
15435cd48f1eSkevlo 		ifp->if_flags |= IFF_ALLMULTI;
15445cd48f1eSkevlo 		fctrl |= NGBE_PSR_CTL_MPE;
15455cd48f1eSkevlo 		if (ifp->if_flags & IFF_PROMISC) {
15465cd48f1eSkevlo 			fctrl |= NGBE_PSR_CTL_UPE;
15475cd48f1eSkevlo 			vlanctrl &= ~NGBE_PSR_VLAN_CTL_VFE;
15485cd48f1eSkevlo 		}
15495cd48f1eSkevlo 	} else {
15505cd48f1eSkevlo 		ETHER_FIRST_MULTI(step, ac, enm);
15515cd48f1eSkevlo 		while (enm != NULL) {
15525cd48f1eSkevlo 			bcopy(enm->enm_addrlo, &mta[mcnt * ETHER_ADDR_LEN],
15535cd48f1eSkevlo 			    ETHER_ADDR_LEN);
15545cd48f1eSkevlo 			mcnt++;
15555cd48f1eSkevlo 
15565cd48f1eSkevlo 			ETHER_NEXT_MULTI(step, enm);
15575cd48f1eSkevlo 		}
15585cd48f1eSkevlo 
15595cd48f1eSkevlo 		update_ptr = mta;
15605cd48f1eSkevlo 		hw->mac.ops.update_mc_addr_list(hw, update_ptr, mcnt,
15615cd48f1eSkevlo 		    ngbe_addr_list_itr, 1);
15625cd48f1eSkevlo 	}
15635cd48f1eSkevlo 
15645cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_CTL, vlanctrl);
15655cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PSR_CTL, fctrl);
15665cd48f1eSkevlo }
15675cd48f1eSkevlo 
15685cd48f1eSkevlo int
ngbe_initialize_receive_unit(struct ngbe_softc * sc)15695cd48f1eSkevlo ngbe_initialize_receive_unit(struct ngbe_softc *sc)
15705cd48f1eSkevlo {
15715cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
15725cd48f1eSkevlo 	struct rx_ring *rxr = sc->rx_rings;
15735cd48f1eSkevlo 	uint32_t bufsz, mhadd, rxctrl, rxdctl, srrctl;
15745cd48f1eSkevlo 	int i, wait_loop = NGBE_MAX_RX_DESC_POLL;
15755cd48f1eSkevlo 	int error = 0;
15765cd48f1eSkevlo 
15775cd48f1eSkevlo 	/* Disable receives while setting up the descriptors */
15785cd48f1eSkevlo 	hw->mac.ops.disable_rx(hw);
15795cd48f1eSkevlo 
15805cd48f1eSkevlo 	ngbe_setup_psrtype(hw);
15815cd48f1eSkevlo 
15825cd48f1eSkevlo 	/* Enable hw crc stripping */
15835cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, NGBE_RSEC_CTL_CRC_STRIP,
15845cd48f1eSkevlo 	    NGBE_RSEC_CTL_CRC_STRIP);
15855cd48f1eSkevlo 
15865cd48f1eSkevlo 	if (sc->sc_nqueues > 1) {
15875cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_PSR_CTL, NGBE_PSR_CTL_PCSD,
15885cd48f1eSkevlo 		    NGBE_PSR_CTL_PCSD);
15895cd48f1eSkevlo 		ngbe_initialize_rss_mapping(sc);
15905cd48f1eSkevlo 	}
15915cd48f1eSkevlo 
15925cd48f1eSkevlo 	mhadd = NGBE_READ_REG(hw, NGBE_PSR_MAX_SZ);
15935cd48f1eSkevlo 	if (mhadd != NGBE_MAX_JUMBO_FRAME_SIZE)
15945cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_MAX_SZ, NGBE_MAX_JUMBO_FRAME_SIZE);
15955cd48f1eSkevlo 
15965cd48f1eSkevlo 	bufsz = MCLBYTES >> NGBE_PX_RR_CFG_BSIZEPKT_SHIFT;
15975cd48f1eSkevlo 
15985cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
15995cd48f1eSkevlo 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
16005cd48f1eSkevlo 
16015cd48f1eSkevlo 		/* Disable queue to avoid issues while updating state */
16025cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
16035cd48f1eSkevlo 		    NGBE_PX_RR_CFG_RR_EN, 0);
16045cd48f1eSkevlo 
16055cd48f1eSkevlo 		/* Hardware may take up to 100us to actually disable Rx queue */
16065cd48f1eSkevlo 		do {
16075cd48f1eSkevlo 			DELAY(10);
16085cd48f1eSkevlo 			rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
16095cd48f1eSkevlo 		} while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN));
16105cd48f1eSkevlo 		if (!wait_loop) {
16115cd48f1eSkevlo 			printf("%s: Rx queue %d not cleared within "
16125cd48f1eSkevlo 			    "the polling period\n", DEVNAME(sc), i);
16135cd48f1eSkevlo 			error = ETIMEDOUT;
16145cd48f1eSkevlo 			goto out;
16155cd48f1eSkevlo 		}
16165cd48f1eSkevlo 
16175cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_RR_BAL(i),
16185cd48f1eSkevlo 		    (rdba & 0x00000000ffffffffULL));
16195cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_RR_BAH(i), (rdba >> 32));
16205cd48f1eSkevlo 
16215cd48f1eSkevlo 		rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
16225cd48f1eSkevlo 		rxdctl |=
16235cd48f1eSkevlo 		    (sc->num_rx_desc / 128) << NGBE_PX_RR_CFG_RR_SIZE_SHIFT;
16245cd48f1eSkevlo 		rxdctl |= 0x1 << NGBE_PX_RR_CFG_RR_THER_SHIFT;
16255cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_RR_CFG(i), rxdctl);
16265cd48f1eSkevlo 
16275cd48f1eSkevlo 		/* Reset head and tail pointers */
16285cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_RR_RP(i), 0);
16295cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_RR_WP(i), 0);
16305cd48f1eSkevlo 
16315cd48f1eSkevlo 		/* Set up the SRRCTL register */
16325cd48f1eSkevlo 		srrctl = NGBE_READ_REG_MASK(hw, NGBE_PX_RR_CFG(i),
16335cd48f1eSkevlo 		    ~(NGBE_PX_RR_CFG_RR_HDR_SZ | NGBE_PX_RR_CFG_RR_BUF_SZ |
16345cd48f1eSkevlo 		    NGBE_PX_RR_CFG_SPLIT_MODE));
16355cd48f1eSkevlo 		srrctl |= bufsz;
16365cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_RR_CFG(i), srrctl);
16375cd48f1eSkevlo 
16385cd48f1eSkevlo 		/* Enable receive descriptor ring */
16395cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
16405cd48f1eSkevlo 		    NGBE_PX_RR_CFG_RR_EN, NGBE_PX_RR_CFG_RR_EN);
16415cd48f1eSkevlo 
16425cd48f1eSkevlo 		do {
16435cd48f1eSkevlo 			msec_delay(1);
16445cd48f1eSkevlo 			rxdctl = NGBE_READ_REG(hw, NGBE_PX_RR_CFG(i));
16455cd48f1eSkevlo 		} while (--wait_loop && !(rxdctl & NGBE_PX_RR_CFG_RR_EN));
16465cd48f1eSkevlo 		if (!wait_loop) {
16475cd48f1eSkevlo 			printf("%s: Rx queue %d not set within "
16485cd48f1eSkevlo 			    "the polling period\n", DEVNAME(sc), i);
16495cd48f1eSkevlo 			error = ETIMEDOUT;
16505cd48f1eSkevlo 			goto out;
16515cd48f1eSkevlo 		}
16525cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_RR_WP(i), rxr->last_desc_filled);
16535cd48f1eSkevlo 	}
16545cd48f1eSkevlo 
16555cd48f1eSkevlo 	/* Enable all receives */
16565cd48f1eSkevlo 	rxctrl = NGBE_READ_REG(hw, NGBE_RDB_PB_CTL);
16575cd48f1eSkevlo 	rxctrl |= NGBE_RDB_PB_CTL_PBEN;
16585cd48f1eSkevlo 	hw->mac.ops.enable_rx_dma(hw, rxctrl);
16595cd48f1eSkevlo out:
16605cd48f1eSkevlo 	return error;
16615cd48f1eSkevlo }
16625cd48f1eSkevlo 
16635cd48f1eSkevlo void
ngbe_initialize_rss_mapping(struct ngbe_softc * sc)16645cd48f1eSkevlo ngbe_initialize_rss_mapping(struct ngbe_softc *sc)
16655cd48f1eSkevlo {
16665cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
16675cd48f1eSkevlo 	uint32_t reta = 0, rss_field, rss_key[10];
16685cd48f1eSkevlo 	int i, j, queue_id;
16695cd48f1eSkevlo 
16705cd48f1eSkevlo 	/* Set up the redirection table */
16715cd48f1eSkevlo 	for (i = 0, j = 0; i < 128; i++, j++) {
16725cd48f1eSkevlo 		if (j == sc->sc_nqueues)
16735cd48f1eSkevlo 			j = 0;
16745cd48f1eSkevlo 		queue_id = j;
16755cd48f1eSkevlo 		/*
16765cd48f1eSkevlo 		 * The low 8 bits are for hash value (n+0);
16775cd48f1eSkevlo 		 * The next 8 bits are for hash value (n+1), etc.
16785cd48f1eSkevlo 		 */
16795cd48f1eSkevlo 		reta = reta >> 8;
16805cd48f1eSkevlo 		reta = reta | (((uint32_t)queue_id) << 24);
16815cd48f1eSkevlo 		if ((i & 3) == 3) {
16825cd48f1eSkevlo 			NGBE_WRITE_REG(hw, NGBE_RDB_RSSTBL(i >> 2), reta);
16835cd48f1eSkevlo 			reta = 0;
16845cd48f1eSkevlo 		}
16855cd48f1eSkevlo 	}
16865cd48f1eSkevlo 
16875cd48f1eSkevlo 	/* Set up random bits */
16885cd48f1eSkevlo 	stoeplitz_to_key(&rss_key, sizeof(rss_key));
16895cd48f1eSkevlo 
16905cd48f1eSkevlo 	/* Fill out hash function seeds */
16915cd48f1eSkevlo 	for (i = 0; i < 10; i++)
16925cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_RDB_RSSRK(i), rss_key[i]);
16935cd48f1eSkevlo 
16945cd48f1eSkevlo 	/* Perform hash on these packet types */
16955cd48f1eSkevlo 	rss_field = NGBE_RDB_RA_CTL_RSS_EN | NGBE_RDB_RA_CTL_RSS_IPV4 |
16965cd48f1eSkevlo 	    NGBE_RDB_RA_CTL_RSS_IPV4_TCP | NGBE_RDB_RA_CTL_RSS_IPV6 |
16975cd48f1eSkevlo 	    NGBE_RDB_RA_CTL_RSS_IPV6_TCP;
16985cd48f1eSkevlo 
16995cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_RDB_RA_CTL, rss_field);
17005cd48f1eSkevlo }
17015cd48f1eSkevlo 
17025cd48f1eSkevlo int
ngbe_initialize_transmit_unit(struct ngbe_softc * sc)17035cd48f1eSkevlo ngbe_initialize_transmit_unit(struct ngbe_softc *sc)
17045cd48f1eSkevlo {
17055cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
17065cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
17075cd48f1eSkevlo 	struct tx_ring *txr;
17085cd48f1eSkevlo 	uint64_t tdba;
17095cd48f1eSkevlo 	uint32_t txdctl;
17105cd48f1eSkevlo 	int i, wait_loop = NGBE_MAX_RX_DESC_POLL;;
17115cd48f1eSkevlo 	int error = 0;
17125cd48f1eSkevlo 
17135cd48f1eSkevlo 	/* TDM_CTL.TE must be before Tx queues are enabled */
17145cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE,
17155cd48f1eSkevlo 	    NGBE_TDM_CTL_TE);
17165cd48f1eSkevlo 
17175cd48f1eSkevlo 	/* Setup the base and length of the Tx descriptor ring. */
17185cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++) {
17195cd48f1eSkevlo 		txr = &sc->tx_rings[i];
17205cd48f1eSkevlo 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
17215cd48f1eSkevlo 
17225cd48f1eSkevlo 		/* Disable queue to avoid issues while updating state */
17235cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), NGBE_PX_TR_CFG_SWFLSH);
17245cd48f1eSkevlo 		NGBE_WRITE_FLUSH(hw);
17255cd48f1eSkevlo 
17265cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_TR_BAL(i),
17275cd48f1eSkevlo 		    (tdba & 0x00000000ffffffffULL));
17285cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_TR_BAH(i), (tdba >> 32));
17295cd48f1eSkevlo 
17305cd48f1eSkevlo 		/* Reset head and tail pointers */
17315cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_TR_RP(i), 0);
17325cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_TR_WP(i), 0);
17335cd48f1eSkevlo 
17345cd48f1eSkevlo 		txr->watchdog_timer = 0;
17355cd48f1eSkevlo 
17365cd48f1eSkevlo 		txdctl = NGBE_PX_TR_CFG_ENABLE;
17375cd48f1eSkevlo 		txdctl |= 4 << NGBE_PX_TR_CFG_TR_SIZE_SHIFT;
17385cd48f1eSkevlo 		txdctl |= 0x20 << NGBE_PX_TR_CFG_WTHRESH_SHIFT;
17395cd48f1eSkevlo 
17405cd48f1eSkevlo 		/* Enable queue */
17415cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_TR_CFG(i), txdctl);
17425cd48f1eSkevlo 
17435cd48f1eSkevlo 		/* Poll to verify queue is enabled */
17445cd48f1eSkevlo 		do {
17455cd48f1eSkevlo 			msec_delay(1);
17465cd48f1eSkevlo 			txdctl = NGBE_READ_REG(hw, NGBE_PX_TR_CFG(i));
17475cd48f1eSkevlo 		} while (--wait_loop && !(txdctl & NGBE_PX_TR_CFG_ENABLE));
17485cd48f1eSkevlo 		if (!wait_loop) {
17495cd48f1eSkevlo 			printf("%s: Tx queue %d not set within "
17505cd48f1eSkevlo 			    "the polling period\n", DEVNAME(sc), i);
17515cd48f1eSkevlo 			error = ETIMEDOUT;
17525cd48f1eSkevlo 			goto out;
17535cd48f1eSkevlo 		}
17545cd48f1eSkevlo 	}
17555cd48f1eSkevlo 
17565cd48f1eSkevlo 	ifp->if_timer = 0;
17575cd48f1eSkevlo 
17585cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_BUF_AE, 0x3ff, 0x10);
17595cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_CTL, 0x2, 0);
17605cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_TSEC_CTL, 0x1, 1);
17615cd48f1eSkevlo 
17625cd48f1eSkevlo 	/* Enable mac transmitter */
17635cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE,
17645cd48f1eSkevlo 	    NGBE_MAC_TX_CFG_TE);
17655cd48f1eSkevlo out:
17665cd48f1eSkevlo 	return error;
17675cd48f1eSkevlo }
17685cd48f1eSkevlo 
17695cd48f1eSkevlo int
ngbe_intr_link(void * arg)17705cd48f1eSkevlo ngbe_intr_link(void *arg)
17715cd48f1eSkevlo {
17725cd48f1eSkevlo 	struct ngbe_softc *sc = (struct ngbe_softc *)arg;
17735cd48f1eSkevlo 	uint32_t eicr;
17745cd48f1eSkevlo 
17755cd48f1eSkevlo 	eicr = ngbe_misc_isb(sc, NGBE_ISB_MISC);
17765cd48f1eSkevlo 	if (eicr & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) {
17775cd48f1eSkevlo 		KERNEL_LOCK();
17785cd48f1eSkevlo 		ngbe_handle_phy_event(sc);
17795cd48f1eSkevlo 		ngbe_update_link_status(sc);
17805cd48f1eSkevlo 		KERNEL_UNLOCK();
17815cd48f1eSkevlo 	}
17825cd48f1eSkevlo 	ngbe_enable_queue(sc, sc->linkvec);
17835cd48f1eSkevlo 	return 1;
17845cd48f1eSkevlo }
17855cd48f1eSkevlo 
17865cd48f1eSkevlo int
ngbe_intr_queue(void * arg)17875cd48f1eSkevlo ngbe_intr_queue(void *arg)
17885cd48f1eSkevlo {
17895cd48f1eSkevlo 	struct ngbe_queue *nq = arg;
17905cd48f1eSkevlo 	struct ngbe_softc *sc = nq->sc;
17915cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
17925cd48f1eSkevlo 	struct rx_ring *rxr = nq->rxr;
17935cd48f1eSkevlo 	struct tx_ring *txr = nq->txr;
17945cd48f1eSkevlo 
17955cd48f1eSkevlo 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
17965cd48f1eSkevlo 		ngbe_rxeof(rxr);
17975cd48f1eSkevlo 		ngbe_txeof(txr);
17985cd48f1eSkevlo 		ngbe_rxrefill(rxr);
17995cd48f1eSkevlo 	}
18005cd48f1eSkevlo 
18015cd48f1eSkevlo 	ngbe_enable_queue(sc, nq->msix);
18025cd48f1eSkevlo 
18035cd48f1eSkevlo 	return 1;
18045cd48f1eSkevlo }
18055cd48f1eSkevlo 
18065cd48f1eSkevlo void
ngbe_init_eeprom_params(struct ngbe_hw * hw)18075cd48f1eSkevlo ngbe_init_eeprom_params(struct ngbe_hw *hw)
18085cd48f1eSkevlo {
18095cd48f1eSkevlo 	struct ngbe_eeprom_info *eeprom = &hw->eeprom;
18105cd48f1eSkevlo 
18115cd48f1eSkevlo 	if (eeprom->type == ngbe_eeprom_uninitialized) {
18125cd48f1eSkevlo 		eeprom->type = ngbe_eeprom_none;
18135cd48f1eSkevlo 
18145cd48f1eSkevlo 		if (!(NGBE_READ_REG(hw, NGBE_SPI_STATUS) &
18155cd48f1eSkevlo 		    NGBE_SPI_STATUS_FLASH_BYPASS))
18165cd48f1eSkevlo 			eeprom->type = ngbe_flash;
18175cd48f1eSkevlo 	}
18185cd48f1eSkevlo 
18195cd48f1eSkevlo 	eeprom->sw_region_offset = 0x80;
18205cd48f1eSkevlo }
18215cd48f1eSkevlo 
18225cd48f1eSkevlo int
ngbe_init_hw(struct ngbe_softc * sc)18235cd48f1eSkevlo ngbe_init_hw(struct ngbe_softc *sc)
18245cd48f1eSkevlo {
18255cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
18265cd48f1eSkevlo 	int status;
18275cd48f1eSkevlo 
18285cd48f1eSkevlo 	/* Reset the hardware */
18295cd48f1eSkevlo 	status = hw->mac.ops.reset_hw(sc);
18305cd48f1eSkevlo 
18315cd48f1eSkevlo 	if (!status)
18325cd48f1eSkevlo 		status = hw->mac.ops.start_hw(sc);
18335cd48f1eSkevlo 
18345cd48f1eSkevlo 	return status;
18355cd48f1eSkevlo }
18365cd48f1eSkevlo 
18375cd48f1eSkevlo void
ngbe_init_ops(struct ngbe_hw * hw)18385cd48f1eSkevlo ngbe_init_ops(struct ngbe_hw *hw)
18395cd48f1eSkevlo {
18405cd48f1eSkevlo 	struct ngbe_mac_info *mac = &hw->mac;
18415cd48f1eSkevlo 	struct ngbe_phy_info *phy = &hw->phy;
18425cd48f1eSkevlo 	struct ngbe_eeprom_info *eeprom = &hw->eeprom;
18435cd48f1eSkevlo 
18445cd48f1eSkevlo 	phy->ops.reset = ngbe_phy_reset;
18455cd48f1eSkevlo 	phy->ops.read_reg = ngbe_phy_read_reg;
18465cd48f1eSkevlo 	phy->ops.write_reg = ngbe_phy_write_reg;
18475cd48f1eSkevlo 	phy->ops.setup_link = ngbe_phy_setup_link;
18485cd48f1eSkevlo 	phy->ops.phy_led_ctrl = ngbe_phy_led_ctrl;
18495cd48f1eSkevlo 	phy->ops.check_overtemp = ngbe_phy_check_overtemp;
18505cd48f1eSkevlo 	phy->ops.identify = ngbe_phy_identify;
18515cd48f1eSkevlo 	phy->ops.init = ngbe_phy_init;
18525cd48f1eSkevlo 	phy->ops.check_event = ngbe_phy_check_event;
18535cd48f1eSkevlo 	phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause;
18545cd48f1eSkevlo 	phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause;
18555cd48f1eSkevlo 	phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement;
18565cd48f1eSkevlo 	phy->ops.setup_once = ngbe_phy_setup;
18575cd48f1eSkevlo 
18585cd48f1eSkevlo 	/* MAC */
18595cd48f1eSkevlo 	mac->ops.init_hw = ngbe_init_hw;
18605cd48f1eSkevlo 	mac->ops.clear_hw_cntrs = ngbe_clear_hw_cntrs;
18615cd48f1eSkevlo 	mac->ops.get_mac_addr = ngbe_get_mac_addr;
18625cd48f1eSkevlo 	mac->ops.stop_adapter = ngbe_stop_adapter;
18635cd48f1eSkevlo 	mac->ops.get_bus_info = ngbe_get_bus_info;
18645cd48f1eSkevlo 	mac->ops.set_lan_id = ngbe_set_lan_id_multi_port_pcie;
18655cd48f1eSkevlo 	mac->ops.acquire_swfw_sync = ngbe_acquire_swfw_sync;
18665cd48f1eSkevlo 	mac->ops.release_swfw_sync = ngbe_release_swfw_sync;
18675cd48f1eSkevlo 	mac->ops.reset_hw = ngbe_reset_hw;
18685cd48f1eSkevlo 	mac->ops.get_media_type = ngbe_get_media_type;
18695cd48f1eSkevlo 	mac->ops.disable_sec_rx_path = ngbe_disable_sec_rx_path;
18705cd48f1eSkevlo 	mac->ops.enable_sec_rx_path = ngbe_enable_sec_rx_path;
18715cd48f1eSkevlo 	mac->ops.enable_rx_dma = ngbe_enable_rx_dma;
18725cd48f1eSkevlo 	mac->ops.start_hw = ngbe_start_hw;
18735cd48f1eSkevlo 
18745cd48f1eSkevlo 	/* RAR, Multicast, VLAN */
18755cd48f1eSkevlo 	mac->ops.set_rar = ngbe_set_rar;
18765cd48f1eSkevlo 	mac->ops.init_rx_addrs = ngbe_init_rx_addrs;
18775cd48f1eSkevlo 	mac->ops.update_mc_addr_list = ngbe_update_mc_addr_list;
18785cd48f1eSkevlo 	mac->ops.enable_rx = ngbe_enable_rx;
18795cd48f1eSkevlo 	mac->ops.disable_rx = ngbe_disable_rx;
18805cd48f1eSkevlo 	mac->ops.clear_vfta = ngbe_clear_vfta;
18815cd48f1eSkevlo 	mac->ops.init_uta_tables = ngbe_init_uta_tables;
18825cd48f1eSkevlo 
18835cd48f1eSkevlo 	/* Flow Control */
18845cd48f1eSkevlo 	mac->ops.fc_enable = ngbe_fc_enable;
18855cd48f1eSkevlo 	mac->ops.setup_fc = ngbe_setup_fc;
18865cd48f1eSkevlo 
18875cd48f1eSkevlo 	/* Link */
18885cd48f1eSkevlo 	mac->ops.check_link = ngbe_check_mac_link;
18895cd48f1eSkevlo 	mac->ops.setup_rxpba = ngbe_set_rxpba;
18905cd48f1eSkevlo 
18915cd48f1eSkevlo 	mac->mcft_size = NGBE_SP_MC_TBL_SIZE;
18925cd48f1eSkevlo 	mac->vft_size = NGBE_SP_VFT_TBL_SIZE;
18935cd48f1eSkevlo 	mac->num_rar_entries = NGBE_SP_RAR_ENTRIES;
18945cd48f1eSkevlo 	mac->rx_pb_size = NGBE_SP_RX_PB_SIZE;
18955cd48f1eSkevlo 	mac->max_rx_queues = NGBE_SP_MAX_RX_QUEUES;
18965cd48f1eSkevlo 	mac->max_tx_queues = NGBE_SP_MAX_TX_QUEUES;
18975cd48f1eSkevlo 
18985cd48f1eSkevlo 	/* EEPROM */
18995cd48f1eSkevlo 	eeprom->ops.init_params = ngbe_init_eeprom_params;
19005cd48f1eSkevlo 	eeprom->ops.eeprom_chksum_cap_st = ngbe_eepromcheck_cap;
19015cd48f1eSkevlo 	eeprom->ops.phy_led_oem_chk = ngbe_phy_led_oem_chk;
19025cd48f1eSkevlo 
19035cd48f1eSkevlo 	/* Manageability interface */
19045cd48f1eSkevlo 	mac->ops.set_fw_drv_ver = ngbe_set_fw_drv_ver;
19055cd48f1eSkevlo 	mac->ops.init_thermal_sensor_thresh = ngbe_init_thermal_sensor_thresh;
19065cd48f1eSkevlo }
19075cd48f1eSkevlo 
19085cd48f1eSkevlo void
ngbe_init_rx_addrs(struct ngbe_softc * sc)19095cd48f1eSkevlo ngbe_init_rx_addrs(struct ngbe_softc *sc)
19105cd48f1eSkevlo {
19115cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
19125cd48f1eSkevlo 	uint32_t rar_entries = hw->mac.num_rar_entries;
19135cd48f1eSkevlo 	uint32_t i, psrctl;
19145cd48f1eSkevlo 
19155cd48f1eSkevlo 	/*
19165cd48f1eSkevlo 	 * If the current mac address is valid, assume it is a software
19175cd48f1eSkevlo 	 * override to the permanent address.
19185cd48f1eSkevlo 	 * Otherwise, use the permanent address from the eeprom.
19195cd48f1eSkevlo 	 */
19205cd48f1eSkevlo 	if (ngbe_validate_mac_addr(hw->mac.addr)) {
19215cd48f1eSkevlo 		/* Get the MAC address from the RAR0 for later reference */
19225cd48f1eSkevlo 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
19235cd48f1eSkevlo 	}
19245cd48f1eSkevlo 	hw->addr_ctrl.overflow_promisc = 0;
19255cd48f1eSkevlo 	hw->addr_ctrl.rar_used_count = 1;
19265cd48f1eSkevlo 
19275cd48f1eSkevlo 	/* Zero out the other receive addresses. */
19285cd48f1eSkevlo 	for (i = 1; i < rar_entries; i++) {
19295cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, i);
19305cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_L, 0);
19315cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_H, 0);
19325cd48f1eSkevlo 	}
19335cd48f1eSkevlo 
19345cd48f1eSkevlo 	/* Clear the MTA */
19355cd48f1eSkevlo 	hw->addr_ctrl.mta_in_use = 0;
19365cd48f1eSkevlo 	psrctl = NGBE_READ_REG(hw, NGBE_PSR_CTL);
19375cd48f1eSkevlo 	psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE);
19385cd48f1eSkevlo 	psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT;
19395cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctl);
19405cd48f1eSkevlo 
19415cd48f1eSkevlo 	for (i = 0; i < hw->mac.mcft_size; i++)
19425cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_MC_TBL(i), 0);
19435cd48f1eSkevlo 
19445cd48f1eSkevlo 	hw->mac.ops.init_uta_tables(hw);
19455cd48f1eSkevlo }
19465cd48f1eSkevlo 
19475cd48f1eSkevlo void
ngbe_init_shared_code(struct ngbe_softc * sc)19485cd48f1eSkevlo ngbe_init_shared_code(struct ngbe_softc *sc)
19495cd48f1eSkevlo {
19505cd48f1eSkevlo 	struct ngbe_osdep *os = &sc->osdep;
19515cd48f1eSkevlo 	struct pci_attach_args *pa = &os->os_pa;
19525cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
19535cd48f1eSkevlo 
19545cd48f1eSkevlo 	hw->subsystem_device_id = PCI_PRODUCT(pci_conf_read(pa->pa_pc,
19555cd48f1eSkevlo 	    pa->pa_tag, PCI_SUBSYS_ID_REG));
19565cd48f1eSkevlo 
19575cd48f1eSkevlo 	hw->phy.type = ngbe_phy_internal;
19585cd48f1eSkevlo 
19595cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_MDIO_CLAUSE_SELECT, 0xf);
19605cd48f1eSkevlo 
19615cd48f1eSkevlo 	ngbe_init_ops(hw);
19625cd48f1eSkevlo 
19635cd48f1eSkevlo 	/* Default flow control settings. */
19645cd48f1eSkevlo 	hw->fc.requested_mode = ngbe_fc_full;
19655cd48f1eSkevlo 	hw->fc.current_mode = ngbe_fc_full;
19665cd48f1eSkevlo 
19675cd48f1eSkevlo 	hw->fc.pause_time = NGBE_DEFAULT_FCPAUSE;
19685cd48f1eSkevlo 	hw->fc.disable_fc_autoneg = 0;
19695cd48f1eSkevlo }
19705cd48f1eSkevlo 
19715cd48f1eSkevlo void
ngbe_init_thermal_sensor_thresh(struct ngbe_hw * hw)19725cd48f1eSkevlo ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw)
19735cd48f1eSkevlo {
19745cd48f1eSkevlo 	/* Only support thermal sensors attached to SP physical port 0 */
19755cd48f1eSkevlo 	if (hw->bus.lan_id)
19765cd48f1eSkevlo 		return;
19775cd48f1eSkevlo 
19785cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_TS_INT_EN, NGBE_TS_INT_EN_DALARM_INT_EN |
19795cd48f1eSkevlo 	    NGBE_TS_INT_EN_ALARM_INT_EN);
19805cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_TS_EN, NGBE_TS_EN_ENA);
19815cd48f1eSkevlo 
19825cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_TS_ALARM_THRE, 0x344);
19835cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_TS_DALARM_THRE, 0x330);
19845cd48f1eSkevlo }
19855cd48f1eSkevlo 
19865cd48f1eSkevlo void
ngbe_init_uta_tables(struct ngbe_hw * hw)19875cd48f1eSkevlo ngbe_init_uta_tables(struct ngbe_hw *hw)
19885cd48f1eSkevlo {
19895cd48f1eSkevlo 	int i;
19905cd48f1eSkevlo 
19915cd48f1eSkevlo 	for (i = 0; i < 128; i++)
19925cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_UC_TBL(i), 0);
19935cd48f1eSkevlo }
19945cd48f1eSkevlo 
19955cd48f1eSkevlo void
ngbe_fc_autoneg(struct ngbe_softc * sc)19965cd48f1eSkevlo ngbe_fc_autoneg(struct ngbe_softc *sc)
19975cd48f1eSkevlo {
19985cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
19995cd48f1eSkevlo 	uint32_t speed;
20005cd48f1eSkevlo 	int link_up;
20015cd48f1eSkevlo 	int error = EINVAL;
20025cd48f1eSkevlo 
20035cd48f1eSkevlo 	/*
20045cd48f1eSkevlo 	 * AN should have completed when the cable was plugged in.
20055cd48f1eSkevlo 	 * Look for reasons to bail out.  Bail out if:
20065cd48f1eSkevlo 	 * - FC autoneg is disabled, or if
20075cd48f1eSkevlo 	 * - link is not up.
20085cd48f1eSkevlo 	 */
20095cd48f1eSkevlo 	if (hw->fc.disable_fc_autoneg) {
20105cd48f1eSkevlo 		printf("%s: flow control autoneg is disabled\n", DEVNAME(sc));
20115cd48f1eSkevlo 		goto out;
20125cd48f1eSkevlo 	}
20135cd48f1eSkevlo 
20145cd48f1eSkevlo 	hw->mac.ops.check_link(hw, &speed, &link_up, 0);
20155cd48f1eSkevlo 	if (!link_up)
20165cd48f1eSkevlo 		goto out;
20175cd48f1eSkevlo 
20185cd48f1eSkevlo 	switch (hw->phy.media_type) {
20195cd48f1eSkevlo 	/* Autoneg flow control on fiber adapters */
20205cd48f1eSkevlo 	case ngbe_media_type_fiber:
20215cd48f1eSkevlo 		break;
20225cd48f1eSkevlo 
20235cd48f1eSkevlo 	/* Autoneg flow control on copper adapters */
20245cd48f1eSkevlo 	case ngbe_media_type_copper:
20255cd48f1eSkevlo 		error = ngbe_fc_autoneg_copper(sc);
20265cd48f1eSkevlo 		break;
20275cd48f1eSkevlo 	default:
20285cd48f1eSkevlo 		break;
20295cd48f1eSkevlo 	}
20305cd48f1eSkevlo out:
20315cd48f1eSkevlo 	if (error) {
20325cd48f1eSkevlo 		hw->fc.fc_was_autonegged = 0;
20335cd48f1eSkevlo 		hw->fc.current_mode = hw->fc.requested_mode;
20345cd48f1eSkevlo 	} else
20355cd48f1eSkevlo 		hw->fc.fc_was_autonegged = 1;
20365cd48f1eSkevlo }
20375cd48f1eSkevlo 
20385cd48f1eSkevlo int
ngbe_fc_autoneg_copper(struct ngbe_softc * sc)20395cd48f1eSkevlo ngbe_fc_autoneg_copper(struct ngbe_softc *sc)
20405cd48f1eSkevlo {
20415cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
20425cd48f1eSkevlo 	uint8_t technology_ability_reg, lp_technology_ability_reg;
20435cd48f1eSkevlo 
20445cd48f1eSkevlo 	technology_ability_reg = lp_technology_ability_reg = 0;
20455cd48f1eSkevlo 	if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) {
20465cd48f1eSkevlo 		hw->phy.ops.get_adv_pause(hw, &technology_ability_reg);
20475cd48f1eSkevlo 		hw->phy.ops.get_lp_adv_pause(hw, &lp_technology_ability_reg);
20485cd48f1eSkevlo 	}
20495cd48f1eSkevlo 
20505cd48f1eSkevlo 	return ngbe_negotiate_fc(sc, (uint32_t)technology_ability_reg,
20515cd48f1eSkevlo 	    (uint32_t)lp_technology_ability_reg, NGBE_TAF_SYM_PAUSE,
20525cd48f1eSkevlo 	    NGBE_TAF_ASM_PAUSE, NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE);
20535cd48f1eSkevlo }
20545cd48f1eSkevlo 
20555cd48f1eSkevlo int
ngbe_fc_enable(struct ngbe_softc * sc)20565cd48f1eSkevlo ngbe_fc_enable(struct ngbe_softc *sc)
20575cd48f1eSkevlo {
20585cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
20595cd48f1eSkevlo 	uint32_t mflcn, fccfg;
20605cd48f1eSkevlo 	uint32_t fcrtl, fcrth;
20615cd48f1eSkevlo 	uint32_t reg;
20625cd48f1eSkevlo 	int error = 0;
20635cd48f1eSkevlo 
20645cd48f1eSkevlo 	/* Validate the water mark configuration */
20655cd48f1eSkevlo 	if (!hw->fc.pause_time) {
20665cd48f1eSkevlo 		error = EINVAL;
20675cd48f1eSkevlo 		goto out;
20685cd48f1eSkevlo 	}
20695cd48f1eSkevlo 
20705cd48f1eSkevlo 	/* Low water mark of zero causes XOFF floods */
20715cd48f1eSkevlo 	if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) {
20725cd48f1eSkevlo 		if (!hw->fc.low_water ||
20735cd48f1eSkevlo 		    hw->fc.low_water >= hw->fc.high_water) {
20745cd48f1eSkevlo 			printf("%s: invalid water mark configuration\n",
20755cd48f1eSkevlo 			    DEVNAME(sc));
20765cd48f1eSkevlo 			error = EINVAL;
20775cd48f1eSkevlo 			goto out;
20785cd48f1eSkevlo 		}
20795cd48f1eSkevlo 	}
20805cd48f1eSkevlo 
20815cd48f1eSkevlo 	/* Negotiate the fc mode to use */
20825cd48f1eSkevlo 	ngbe_fc_autoneg(sc);
20835cd48f1eSkevlo 
20845cd48f1eSkevlo 	/* Disable any previous flow control settings */
20855cd48f1eSkevlo 	mflcn = NGBE_READ_REG(hw, NGBE_MAC_RX_FLOW_CTRL);
20865cd48f1eSkevlo 	mflcn &= ~NGBE_MAC_RX_FLOW_CTRL_RFE;
20875cd48f1eSkevlo 
20885cd48f1eSkevlo 	fccfg = NGBE_READ_REG(hw, NGBE_RDB_RFCC);
20895cd48f1eSkevlo 	fccfg &= ~NGBE_RDB_RFCC_RFCE_802_3X;
20905cd48f1eSkevlo 
20915cd48f1eSkevlo 	/*
20925cd48f1eSkevlo 	 * The possible values of fc.current_mode are:
20935cd48f1eSkevlo 	 * 0: Flow control is completely disabled
20945cd48f1eSkevlo 	 * 1: Rx flow control is enabled (we can receive pause frames,
20955cd48f1eSkevlo 	 *    but not send pause frames).
20965cd48f1eSkevlo 	 * 2: Tx flow control is enabled (we can send pause frames but
20975cd48f1eSkevlo 	 *    we do not support receiving pause frames).
20985cd48f1eSkevlo 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
20995cd48f1eSkevlo 	 * other: Invalid.
21005cd48f1eSkevlo 	 */
21015cd48f1eSkevlo 	switch (hw->fc.current_mode) {
21025cd48f1eSkevlo 	case ngbe_fc_none:
21035cd48f1eSkevlo 		/*
21045cd48f1eSkevlo 		 * Flow control is disabled by software override or autoneg.
21055cd48f1eSkevlo 		 * The code below will actually disable it in the HW.
21065cd48f1eSkevlo 		 */
21075cd48f1eSkevlo 		break;
21085cd48f1eSkevlo 	case ngbe_fc_rx_pause:
21095cd48f1eSkevlo 		/*
21105cd48f1eSkevlo 		 * Rx Flow control is enabled and Tx Flow control is
21115cd48f1eSkevlo 		 * disabled by software override. Since there really
21125cd48f1eSkevlo 		 * isn't a way to advertise that we are capable of RX
21135cd48f1eSkevlo 		 * Pause ONLY, we will advertise that we support both
21145cd48f1eSkevlo 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
21155cd48f1eSkevlo 		 * disable the adapter's ability to send PAUSE frames.
21165cd48f1eSkevlo 		 */
21175cd48f1eSkevlo 		mflcn |= NGBE_MAC_RX_FLOW_CTRL_RFE;
21185cd48f1eSkevlo 		break;
21195cd48f1eSkevlo 	case ngbe_fc_tx_pause:
21205cd48f1eSkevlo 		/*
21215cd48f1eSkevlo 		 * Tx Flow control is enabled, and Rx Flow control is
21225cd48f1eSkevlo 		 * disabled by software override.
21235cd48f1eSkevlo 		 */
21245cd48f1eSkevlo 		fccfg |= NGBE_RDB_RFCC_RFCE_802_3X;
21255cd48f1eSkevlo 		break;
21265cd48f1eSkevlo 	case ngbe_fc_full:
21275cd48f1eSkevlo 		/* Flow control (both Rx and Tx) is enabled by SW override. */
21285cd48f1eSkevlo 		mflcn |= NGBE_MAC_RX_FLOW_CTRL_RFE;
21295cd48f1eSkevlo 		fccfg |= NGBE_RDB_RFCC_RFCE_802_3X;
21305cd48f1eSkevlo 		break;
21315cd48f1eSkevlo 	default:
21325cd48f1eSkevlo 		printf("%s: flow control param set incorrectly\n", DEVNAME(sc));
21335cd48f1eSkevlo 		error = EINVAL;
21345cd48f1eSkevlo 		goto out;
21355cd48f1eSkevlo 	}
21365cd48f1eSkevlo 
21375cd48f1eSkevlo 	/* Set 802.3x based flow control settings. */
21385cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_MAC_RX_FLOW_CTRL, mflcn);
21395cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_RDB_RFCC, fccfg);
21405cd48f1eSkevlo 
21415cd48f1eSkevlo 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
21425cd48f1eSkevlo 	if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) {
21435cd48f1eSkevlo 		/* 32Byte granularity */
21445cd48f1eSkevlo 		fcrtl = (hw->fc.low_water << 10) | NGBE_RDB_RFCL_XONE;
21455cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_RDB_RFCL, fcrtl);
21465cd48f1eSkevlo 		fcrth = (hw->fc.high_water << 10) | NGBE_RDB_RFCH_XOFFE;
21475cd48f1eSkevlo 	} else {
21485cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_RDB_RFCL, 0);
21495cd48f1eSkevlo 		/*
21505cd48f1eSkevlo 		 * In order to prevent Tx hangs when the internal Tx
21515cd48f1eSkevlo 		 * switch is enabled we must set the high water mark
21525cd48f1eSkevlo 		 * to the Rx packet buffer size - 24KB.  This allows
21535cd48f1eSkevlo 		 * the Tx switch to function even under heavy Rx
21545cd48f1eSkevlo 		 * workloads.
21555cd48f1eSkevlo 		 */
21565cd48f1eSkevlo 		fcrth = NGBE_READ_REG(hw, NGBE_RDB_PB_SZ) - 24576;
21575cd48f1eSkevlo 	}
21585cd48f1eSkevlo 
21595cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_RDB_RFCH, fcrth);
21605cd48f1eSkevlo 
21615cd48f1eSkevlo 	/* Configure pause time (2 TCs per register) */
21625cd48f1eSkevlo 	reg = hw->fc.pause_time * 0x00010000;
21635cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_RDB_RFCV, reg);
21645cd48f1eSkevlo 
21655cd48f1eSkevlo 	/* Configure flow control refresh threshold value */
21665cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_RDB_RFCRT, hw->fc.pause_time / 2);
21675cd48f1eSkevlo out:
21685cd48f1eSkevlo 	return error;
21695cd48f1eSkevlo }
21705cd48f1eSkevlo 
21715cd48f1eSkevlo int
ngbe_fmgr_cmd_op(struct ngbe_hw * hw,uint32_t cmd,uint32_t cmd_addr)21725cd48f1eSkevlo ngbe_fmgr_cmd_op(struct ngbe_hw *hw, uint32_t cmd, uint32_t cmd_addr)
21735cd48f1eSkevlo {
21745cd48f1eSkevlo 	uint32_t val;
21755cd48f1eSkevlo 	int timeout = 0;
21765cd48f1eSkevlo 
21775cd48f1eSkevlo 	val = (cmd << SPI_CLK_CMD_OFFSET) | cmd_addr |
21785cd48f1eSkevlo 	    (SPI_CLK_DIV << SPI_CLK_DIV_OFFSET);
21795cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_SPI_CMD, val);
21805cd48f1eSkevlo 	for (;;) {
21815cd48f1eSkevlo 		if (NGBE_READ_REG(hw, NGBE_SPI_STATUS) & 0x1)
21825cd48f1eSkevlo 			break;
21835cd48f1eSkevlo 		if (timeout == SPI_TIME_OUT_VALUE)
21845cd48f1eSkevlo 			return ETIMEDOUT;
21855cd48f1eSkevlo 
21865cd48f1eSkevlo 		timeout++;
21875cd48f1eSkevlo 		DELAY(10);
21885cd48f1eSkevlo 	}
21895cd48f1eSkevlo 
21905cd48f1eSkevlo 	return 0;
21915cd48f1eSkevlo }
21925cd48f1eSkevlo 
21935cd48f1eSkevlo uint32_t
ngbe_flash_read_dword(struct ngbe_hw * hw,uint32_t addr)21945cd48f1eSkevlo ngbe_flash_read_dword(struct ngbe_hw *hw, uint32_t addr)
21955cd48f1eSkevlo {
21965cd48f1eSkevlo 	int status = ngbe_fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr);
21975cd48f1eSkevlo 	if (status)
21985cd48f1eSkevlo 		return status;
21995cd48f1eSkevlo 
22005cd48f1eSkevlo 	return NGBE_READ_REG(hw, NGBE_SPI_DATA);
22015cd48f1eSkevlo }
22025cd48f1eSkevlo 
22035cd48f1eSkevlo uint8_t
ngbe_calculate_checksum(uint8_t * buffer,uint32_t length)22045cd48f1eSkevlo ngbe_calculate_checksum(uint8_t *buffer, uint32_t length)
22055cd48f1eSkevlo {
22065cd48f1eSkevlo 	uint32_t i;
22075cd48f1eSkevlo 	uint8_t sum = 0;
22085cd48f1eSkevlo 
22095cd48f1eSkevlo 	if (!buffer)
22105cd48f1eSkevlo 		return 0;
22115cd48f1eSkevlo 
22125cd48f1eSkevlo 	for (i = 0; i < length; i++)
22135cd48f1eSkevlo 		sum += buffer[i];
22145cd48f1eSkevlo 	return (uint8_t)(0 - sum);
22155cd48f1eSkevlo }
22165cd48f1eSkevlo 
22175cd48f1eSkevlo int
ngbe_check_flash_load(struct ngbe_softc * sc,uint32_t check_bit)22185cd48f1eSkevlo ngbe_check_flash_load(struct ngbe_softc *sc, uint32_t check_bit)
22195cd48f1eSkevlo {
22205cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
22215cd48f1eSkevlo 	uint32_t reg = 0;
22225cd48f1eSkevlo 	int i, error = 0;
22235cd48f1eSkevlo 
22245cd48f1eSkevlo 	/* if there's flash existing */
22255cd48f1eSkevlo 	if (!(NGBE_READ_REG(hw, NGBE_SPI_STATUS) &
22265cd48f1eSkevlo 	    NGBE_SPI_STATUS_FLASH_BYPASS)) {
22275cd48f1eSkevlo 		/* wait hw load flash done */
22285cd48f1eSkevlo 		for (i = 0; i < NGBE_MAX_FLASH_LOAD_POLL_TIME; i++) {
22295cd48f1eSkevlo 			reg = NGBE_READ_REG(hw, NGBE_SPI_ILDR_STATUS);
22305cd48f1eSkevlo 			if (!(reg & check_bit))
22315cd48f1eSkevlo 				break;
22325cd48f1eSkevlo 			msec_delay(200);
22335cd48f1eSkevlo 		}
22345cd48f1eSkevlo 		if (i == NGBE_MAX_FLASH_LOAD_POLL_TIME) {
22355cd48f1eSkevlo 			error = ETIMEDOUT;
22365cd48f1eSkevlo 			printf("%s: hardware loading flash failed\n",
22375cd48f1eSkevlo 			    DEVNAME(sc));
22385cd48f1eSkevlo 		}
22395cd48f1eSkevlo 	}
22405cd48f1eSkevlo 	return error;
22415cd48f1eSkevlo }
22425cd48f1eSkevlo 
22435cd48f1eSkevlo int
ngbe_check_internal_phy_id(struct ngbe_softc * sc)22445cd48f1eSkevlo ngbe_check_internal_phy_id(struct ngbe_softc *sc)
22455cd48f1eSkevlo {
22465cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
22475cd48f1eSkevlo 	uint16_t phy_id, phy_id_high, phy_id_low;
22485cd48f1eSkevlo 
22495cd48f1eSkevlo 	ngbe_gphy_wait_mdio_access_on(hw);
22505cd48f1eSkevlo 
22515cd48f1eSkevlo 	ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high);
22525cd48f1eSkevlo 	phy_id = phy_id_high << 6;
22535cd48f1eSkevlo 	ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low);
22545cd48f1eSkevlo 	phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10;
22555cd48f1eSkevlo 
22565cd48f1eSkevlo 	if (NGBE_INTERNAL_PHY_ID != phy_id) {
22575cd48f1eSkevlo 		printf("%s: internal phy id 0x%x not supported\n",
22585cd48f1eSkevlo 		    DEVNAME(sc), phy_id);
22595cd48f1eSkevlo 		return ENOTSUP;
22605cd48f1eSkevlo 	} else
22615cd48f1eSkevlo 		hw->phy.id = (uint32_t)phy_id;
22625cd48f1eSkevlo 
22635cd48f1eSkevlo 	return 0;
22645cd48f1eSkevlo }
22655cd48f1eSkevlo 
22665cd48f1eSkevlo int
ngbe_check_mac_link(struct ngbe_hw * hw,uint32_t * speed,int * link_up,int link_up_wait_to_complete)22675cd48f1eSkevlo ngbe_check_mac_link(struct ngbe_hw *hw, uint32_t *speed, int *link_up,
22685cd48f1eSkevlo     int link_up_wait_to_complete)
22695cd48f1eSkevlo {
22705cd48f1eSkevlo 	uint32_t status = 0;
22715cd48f1eSkevlo 	uint16_t speed_sta, value = 0;
22725cd48f1eSkevlo 	int i;
22735cd48f1eSkevlo 
22745cd48f1eSkevlo 	if ((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA) {
22755cd48f1eSkevlo 		*link_up = 1;
22765cd48f1eSkevlo 		*speed = NGBE_LINK_SPEED_1GB_FULL;
22775cd48f1eSkevlo 		return status;
22785cd48f1eSkevlo 	}
22795cd48f1eSkevlo 
22805cd48f1eSkevlo 	if (link_up_wait_to_complete) {
22815cd48f1eSkevlo 		for (i = 0; i < NGBE_LINK_UP_TIME; i++) {
22825cd48f1eSkevlo 			status = hw->phy.ops.read_reg(hw,
22835cd48f1eSkevlo 			    NGBE_MDIO_AUTO_NEG_STATUS,
22845cd48f1eSkevlo 			    NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
22855cd48f1eSkevlo 			if (!status && (value & 0x4)) {
22865cd48f1eSkevlo 				*link_up = 1;
22875cd48f1eSkevlo 				break;
22885cd48f1eSkevlo 			} else
22895cd48f1eSkevlo 				*link_up = 0;
22905cd48f1eSkevlo 			msec_delay(100);
22915cd48f1eSkevlo 		}
22925cd48f1eSkevlo 	} else {
22935cd48f1eSkevlo 		status = hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_STATUS,
22945cd48f1eSkevlo 		    NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
22955cd48f1eSkevlo 		if (!status && (value & 0x4))
22965cd48f1eSkevlo 			*link_up = 1;
22975cd48f1eSkevlo 		else
22985cd48f1eSkevlo 			*link_up = 0;
22995cd48f1eSkevlo 	}
23005cd48f1eSkevlo 
23015cd48f1eSkevlo 	speed_sta = value & 0x38;
23025cd48f1eSkevlo 	if (*link_up) {
23035cd48f1eSkevlo 		if (speed_sta == 0x28)
23045cd48f1eSkevlo 			*speed = NGBE_LINK_SPEED_1GB_FULL;
23055cd48f1eSkevlo 		else if (speed_sta == 0x18)
23065cd48f1eSkevlo 			*speed = NGBE_LINK_SPEED_100_FULL;
23075cd48f1eSkevlo 		else if (speed_sta == 0x8)
23085cd48f1eSkevlo 			*speed = NGBE_LINK_SPEED_10_FULL;
23095cd48f1eSkevlo 	} else
23105cd48f1eSkevlo 		*speed = NGBE_LINK_SPEED_UNKNOWN;
23115cd48f1eSkevlo 
23125cd48f1eSkevlo 	return status;
23135cd48f1eSkevlo }
23145cd48f1eSkevlo 
23155cd48f1eSkevlo int
ngbe_check_mng_access(struct ngbe_hw * hw)23165cd48f1eSkevlo ngbe_check_mng_access(struct ngbe_hw *hw)
23175cd48f1eSkevlo {
23185cd48f1eSkevlo 	if (!ngbe_mng_present(hw))
23195cd48f1eSkevlo 		return 0;
23205cd48f1eSkevlo 	return 1;
23215cd48f1eSkevlo }
23225cd48f1eSkevlo 
23235cd48f1eSkevlo int
ngbe_check_reset_blocked(struct ngbe_softc * sc)23245cd48f1eSkevlo ngbe_check_reset_blocked(struct ngbe_softc *sc)
23255cd48f1eSkevlo {
23265cd48f1eSkevlo 	uint32_t mmngc;
23275cd48f1eSkevlo 
23285cd48f1eSkevlo 	mmngc = NGBE_READ_REG(&sc->hw, NGBE_MIS_ST);
23295cd48f1eSkevlo 	if (mmngc & NGBE_MIS_ST_MNG_VETO) {
23305cd48f1eSkevlo 		printf("%s: MNG_VETO bit detected\n", DEVNAME(sc));
23315cd48f1eSkevlo 		return 1;
23325cd48f1eSkevlo 	}
23335cd48f1eSkevlo 
23345cd48f1eSkevlo 	return 0;
23355cd48f1eSkevlo }
23365cd48f1eSkevlo 
23375cd48f1eSkevlo void
ngbe_clear_hw_cntrs(struct ngbe_hw * hw)23385cd48f1eSkevlo ngbe_clear_hw_cntrs(struct ngbe_hw *hw)
23395cd48f1eSkevlo {
23405cd48f1eSkevlo 	uint16_t i;
23415cd48f1eSkevlo 
23425cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW);
23435cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW);
23445cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_RDB_LXONTXC);
23455cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_RDB_LXOFFTXC);
23465cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_MAC_LXOFFRXC);
23475cd48f1eSkevlo 
23485cd48f1eSkevlo 	for (i = 0; i < 8; i++) {
23495cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_UP,
23505cd48f1eSkevlo 		    i << 16);
23515cd48f1eSkevlo 		NGBE_READ_REG(hw, NGBE_MAC_PXOFFRXC);
23525cd48f1eSkevlo 	}
23535cd48f1eSkevlo 
23545cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_PX_GPRC);
23555cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_PX_GPTC);
23565cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_PX_GORC_MSB);
23575cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_PX_GOTC_MSB);
23585cd48f1eSkevlo 
23595cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_RX_BC_FRAMES_GOOD_LOW);
23605cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD);
23615cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD);
23625cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW);
23635cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW);
23645cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_TX_MC_FRAMES_GOOD_LOW);
23655cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_TX_BC_FRAMES_GOOD_LOW);
23665cd48f1eSkevlo 	NGBE_READ_REG(hw, NGBE_RDM_DRP_PKT);
23675cd48f1eSkevlo }
23685cd48f1eSkevlo 
23695cd48f1eSkevlo void
ngbe_clear_vfta(struct ngbe_hw * hw)23705cd48f1eSkevlo ngbe_clear_vfta(struct ngbe_hw *hw)
23715cd48f1eSkevlo {
23725cd48f1eSkevlo 	uint32_t offset;
23735cd48f1eSkevlo 
23745cd48f1eSkevlo 	for (offset = 0; offset < hw->mac.vft_size; offset++) {
23755cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_TBL(offset), 0);
23765cd48f1eSkevlo 		/* Errata 5 */
23775cd48f1eSkevlo 		hw->mac.vft_shadow[offset] = 0;
23785cd48f1eSkevlo 	}
23795cd48f1eSkevlo 
23805cd48f1eSkevlo 	for (offset = 0; offset < NGBE_PSR_VLAN_SWC_ENTRIES; offset++) {
23815cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC_IDX, offset);
23825cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC, 0);
23835cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_VLAN_SWC_VM_L, 0);
23845cd48f1eSkevlo 	}
23855cd48f1eSkevlo }
23865cd48f1eSkevlo 
23875cd48f1eSkevlo void
ngbe_configure_ivars(struct ngbe_softc * sc)23885cd48f1eSkevlo ngbe_configure_ivars(struct ngbe_softc *sc)
23895cd48f1eSkevlo {
23905cd48f1eSkevlo 	struct ngbe_queue *nq = sc->queues;
23915cd48f1eSkevlo 	uint32_t newitr;
23925cd48f1eSkevlo 	int i;
23935cd48f1eSkevlo 
23945cd48f1eSkevlo 	/* Populate MSIX to EITR select */
23955cd48f1eSkevlo 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITRSEL, 0);
23965cd48f1eSkevlo 
23975cd48f1eSkevlo 	newitr = (4000000 / NGBE_MAX_INTS_PER_SEC) & NGBE_MAX_EITR;
23985cd48f1eSkevlo 	newitr |= NGBE_PX_ITR_CNT_WDIS;
23995cd48f1eSkevlo 
24005cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++, nq++) {
24015cd48f1eSkevlo 		/* Rx queue entry */
24025cd48f1eSkevlo 		ngbe_set_ivar(sc, i, nq->msix, 0);
24035cd48f1eSkevlo 		/* Tx queue entry */
24045cd48f1eSkevlo 		ngbe_set_ivar(sc, i, nq->msix, 1);
24055cd48f1eSkevlo 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITR(nq->msix), newitr);
24065cd48f1eSkevlo 	}
24075cd48f1eSkevlo 
24085cd48f1eSkevlo 	/* For the Link interrupt */
24095cd48f1eSkevlo 	ngbe_set_ivar(sc, 0, sc->linkvec, -1);
24105cd48f1eSkevlo 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_ITR(sc->linkvec), 1950);
24115cd48f1eSkevlo }
24125cd48f1eSkevlo 
24135cd48f1eSkevlo void
ngbe_configure_pb(struct ngbe_softc * sc)24145cd48f1eSkevlo ngbe_configure_pb(struct ngbe_softc *sc)
24155cd48f1eSkevlo {
24165cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
24175cd48f1eSkevlo 
24185cd48f1eSkevlo 	hw->mac.ops.setup_rxpba(hw, 0, 0, PBA_STRATEGY_EQUAL);
24195cd48f1eSkevlo 	ngbe_pbthresh_setup(sc);
24205cd48f1eSkevlo }
24215cd48f1eSkevlo 
24225cd48f1eSkevlo void
ngbe_disable_intr(struct ngbe_softc * sc)24235cd48f1eSkevlo ngbe_disable_intr(struct ngbe_softc *sc)
24245cd48f1eSkevlo {
24255cd48f1eSkevlo 	struct ngbe_queue *nq;
24265cd48f1eSkevlo 	int i;
24275cd48f1eSkevlo 
24285cd48f1eSkevlo 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_MISC_IEN, 0);
24295cd48f1eSkevlo 	for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++)
24305cd48f1eSkevlo 		ngbe_disable_queue(sc, nq->msix);
24315cd48f1eSkevlo 	NGBE_WRITE_FLUSH(&sc->hw);
24325cd48f1eSkevlo }
24335cd48f1eSkevlo 
24345cd48f1eSkevlo int
ngbe_disable_pcie_master(struct ngbe_softc * sc)24355cd48f1eSkevlo ngbe_disable_pcie_master(struct ngbe_softc *sc)
24365cd48f1eSkevlo {
24375cd48f1eSkevlo 	int i, error = 0;
24385cd48f1eSkevlo 
24395cd48f1eSkevlo 	/* Exit if master requests are blocked */
24405cd48f1eSkevlo 	if (!(NGBE_READ_REG(&sc->hw, NGBE_PX_TRANSACTION_PENDING)))
24415cd48f1eSkevlo 		goto out;
24425cd48f1eSkevlo 
24435cd48f1eSkevlo 	/* Poll for master request bit to clear */
24445cd48f1eSkevlo 	for (i = 0; i < NGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
24455cd48f1eSkevlo 		DELAY(100);
24465cd48f1eSkevlo 		if (!(NGBE_READ_REG(&sc->hw, NGBE_PX_TRANSACTION_PENDING)))
24475cd48f1eSkevlo 			goto out;
24485cd48f1eSkevlo 	}
24495cd48f1eSkevlo 	printf("%s: PCIe transaction pending bit did not clear\n",
24505cd48f1eSkevlo 	    DEVNAME(sc));
24515cd48f1eSkevlo 	error = ETIMEDOUT;
24525cd48f1eSkevlo out:
24535cd48f1eSkevlo 	return error;
24545cd48f1eSkevlo }
24555cd48f1eSkevlo 
24565cd48f1eSkevlo void
ngbe_disable_queue(struct ngbe_softc * sc,uint32_t vector)24575cd48f1eSkevlo ngbe_disable_queue(struct ngbe_softc *sc, uint32_t vector)
24585cd48f1eSkevlo {
24595cd48f1eSkevlo 	uint64_t queue = 1ULL << vector;
24605cd48f1eSkevlo 	uint32_t mask;
24615cd48f1eSkevlo 
24625cd48f1eSkevlo 	mask = (queue & 0xffffffff);
24635cd48f1eSkevlo 	if (mask)
24645cd48f1eSkevlo 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_IMS, mask);
24655cd48f1eSkevlo }
24665cd48f1eSkevlo 
24675cd48f1eSkevlo void
ngbe_disable_rx(struct ngbe_hw * hw)24685cd48f1eSkevlo ngbe_disable_rx(struct ngbe_hw *hw)
24695cd48f1eSkevlo {
24705cd48f1eSkevlo 	uint32_t rxctrl, psrctrl;
24715cd48f1eSkevlo 
24725cd48f1eSkevlo 	rxctrl = NGBE_READ_REG(hw, NGBE_RDB_PB_CTL);
24735cd48f1eSkevlo 	if (rxctrl & NGBE_RDB_PB_CTL_PBEN) {
24745cd48f1eSkevlo 		psrctrl = NGBE_READ_REG(hw, NGBE_PSR_CTL);
24755cd48f1eSkevlo 		if (psrctrl & NGBE_PSR_CTL_SW_EN) {
24765cd48f1eSkevlo 			psrctrl &= ~NGBE_PSR_CTL_SW_EN;
24775cd48f1eSkevlo 			NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctrl);
24785cd48f1eSkevlo 			hw->mac.set_lben = 1;
24795cd48f1eSkevlo 		} else
24805cd48f1eSkevlo 			hw->mac.set_lben = 0;
24815cd48f1eSkevlo 		rxctrl &= ~NGBE_RDB_PB_CTL_PBEN;
24825cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_RDB_PB_CTL, rxctrl);
24835cd48f1eSkevlo 
24845cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_RE,
24855cd48f1eSkevlo 		    0);
24865cd48f1eSkevlo 	}
24875cd48f1eSkevlo }
24885cd48f1eSkevlo 
24895cd48f1eSkevlo void
ngbe_disable_sec_rx_path(struct ngbe_hw * hw)24905cd48f1eSkevlo ngbe_disable_sec_rx_path(struct ngbe_hw *hw)
24915cd48f1eSkevlo {
24925cd48f1eSkevlo 	uint32_t secrxreg;
24935cd48f1eSkevlo 	int i;
24945cd48f1eSkevlo 
24955cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, NGBE_RSEC_CTL_RX_DIS,
24965cd48f1eSkevlo 	    NGBE_RSEC_CTL_RX_DIS);
24975cd48f1eSkevlo 	for (i = 0; i < 40; i++) {
24985cd48f1eSkevlo 		secrxreg = NGBE_READ_REG(hw, NGBE_RSEC_ST);
24995cd48f1eSkevlo 		if (secrxreg & NGBE_RSEC_ST_RSEC_RDY)
25005cd48f1eSkevlo 			break;
25015cd48f1eSkevlo 		else
25025cd48f1eSkevlo 			DELAY(1000);
25035cd48f1eSkevlo 	}
25045cd48f1eSkevlo }
25055cd48f1eSkevlo 
25065cd48f1eSkevlo int
ngbe_eepromcheck_cap(struct ngbe_softc * sc,uint16_t offset,uint32_t * data)25075cd48f1eSkevlo ngbe_eepromcheck_cap(struct ngbe_softc *sc, uint16_t offset, uint32_t *data)
25085cd48f1eSkevlo {
25095cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
25105cd48f1eSkevlo 	struct ngbe_hic_read_shadow_ram buffer;
25115cd48f1eSkevlo 	uint32_t tmp;
25125cd48f1eSkevlo 	int status;
25135cd48f1eSkevlo 
25145cd48f1eSkevlo 	buffer.hdr.req.cmd = FW_EEPROM_CHECK_STATUS;
25155cd48f1eSkevlo 	buffer.hdr.req.buf_lenh = 0;
25165cd48f1eSkevlo 	buffer.hdr.req.buf_lenl = 0;
25175cd48f1eSkevlo 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
25185cd48f1eSkevlo 
25195cd48f1eSkevlo 	/* Convert offset from words to bytes */
25205cd48f1eSkevlo 	buffer.address = 0;
25215cd48f1eSkevlo 	/* one word */
25225cd48f1eSkevlo 	buffer.length = 0;
25235cd48f1eSkevlo 
25245cd48f1eSkevlo 	status = ngbe_host_interface_command(sc, (uint32_t *)&buffer,
25255cd48f1eSkevlo 	    sizeof(buffer), NGBE_HI_COMMAND_TIMEOUT, 0);
25265cd48f1eSkevlo 	if (status)
25275cd48f1eSkevlo 		return status;
25285cd48f1eSkevlo 
25295cd48f1eSkevlo 	if (ngbe_check_mng_access(hw)) {
25305cd48f1eSkevlo 		tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 1);
25315cd48f1eSkevlo 		if (tmp == NGBE_CHECKSUM_CAP_ST_PASS)
25325cd48f1eSkevlo 			status = 0;
25335cd48f1eSkevlo 		else
25345cd48f1eSkevlo 			status = EINVAL;
25355cd48f1eSkevlo 	} else
25365cd48f1eSkevlo 		status = EINVAL;
25375cd48f1eSkevlo 
25385cd48f1eSkevlo 	return status;
25395cd48f1eSkevlo }
25405cd48f1eSkevlo 
25415cd48f1eSkevlo void
ngbe_enable_intr(struct ngbe_softc * sc)25425cd48f1eSkevlo ngbe_enable_intr(struct ngbe_softc *sc)
25435cd48f1eSkevlo {
25445cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
25455cd48f1eSkevlo 	struct ngbe_queue *nq;
25465cd48f1eSkevlo 	uint32_t mask;
25475cd48f1eSkevlo 	int i;
25485cd48f1eSkevlo 
25495cd48f1eSkevlo 	/* Enable misc interrupt */
25505cd48f1eSkevlo 	mask = NGBE_PX_MISC_IEN_MASK;
25515cd48f1eSkevlo 
25525cd48f1eSkevlo 	mask |= NGBE_PX_MISC_IEN_OVER_HEAT;
25535cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_GPIO_DDR, 0x1);
25545cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_GPIO_INTEN, 0x3);
25555cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_GPIO_INTTYPE_LEVEL, 0x0);
25565cd48f1eSkevlo 
25575cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_GPIO_POLARITY, 0x3);
25585cd48f1eSkevlo 
25595cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PX_MISC_IEN, mask);
25605cd48f1eSkevlo 
25615cd48f1eSkevlo 	/* Enable all queues */
25625cd48f1eSkevlo 	for (i = 0, nq = sc->queues; i < sc->sc_nqueues; i++, nq++)
25635cd48f1eSkevlo 		ngbe_enable_queue(sc, nq->msix);
25645cd48f1eSkevlo 	NGBE_WRITE_FLUSH(hw);
25655cd48f1eSkevlo 
25665cd48f1eSkevlo 	ngbe_enable_queue(sc, sc->linkvec);
25675cd48f1eSkevlo }
25685cd48f1eSkevlo 
25695cd48f1eSkevlo void
ngbe_enable_queue(struct ngbe_softc * sc,uint32_t vector)25705cd48f1eSkevlo ngbe_enable_queue(struct ngbe_softc *sc, uint32_t vector)
25715cd48f1eSkevlo {
25725cd48f1eSkevlo 	uint64_t queue = 1ULL << vector;
25735cd48f1eSkevlo 	uint32_t mask;
25745cd48f1eSkevlo 
25755cd48f1eSkevlo 	mask = (queue & 0xffffffff);
25765cd48f1eSkevlo 	if (mask)
25775cd48f1eSkevlo 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_IMC, mask);
25785cd48f1eSkevlo }
25795cd48f1eSkevlo 
25805cd48f1eSkevlo void
ngbe_enable_rx(struct ngbe_hw * hw)25815cd48f1eSkevlo ngbe_enable_rx(struct ngbe_hw *hw)
25825cd48f1eSkevlo {
25835cd48f1eSkevlo 	uint32_t val;
25845cd48f1eSkevlo 
25855cd48f1eSkevlo 	/* Enable mac receiver */
25865cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_RE,
25875cd48f1eSkevlo 	    NGBE_MAC_RX_CFG_RE);
25885cd48f1eSkevlo 
25895cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, 0x2, 0);
25905cd48f1eSkevlo 
25915cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_RDB_PB_CTL, NGBE_RDB_PB_CTL_PBEN,
25925cd48f1eSkevlo 	    NGBE_RDB_PB_CTL_PBEN);
25935cd48f1eSkevlo 
25945cd48f1eSkevlo 	if (hw->mac.set_lben) {
25955cd48f1eSkevlo 		val = NGBE_READ_REG(hw, NGBE_PSR_CTL);
25965cd48f1eSkevlo 		val |= NGBE_PSR_CTL_SW_EN;
25975cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_CTL, val);
25985cd48f1eSkevlo 		hw->mac.set_lben = 0;
25995cd48f1eSkevlo 	}
26005cd48f1eSkevlo }
26015cd48f1eSkevlo 
26025cd48f1eSkevlo void
ngbe_enable_rx_dma(struct ngbe_hw * hw,uint32_t reg)26035cd48f1eSkevlo ngbe_enable_rx_dma(struct ngbe_hw *hw, uint32_t reg)
26045cd48f1eSkevlo {
26055cd48f1eSkevlo 	/*
26065cd48f1eSkevlo 	 * Workaround for emerald silicon errata when enabling the Rx datapath.
26075cd48f1eSkevlo 	 * If traffic is incoming before we enable the Rx unit, it could hang
26085cd48f1eSkevlo 	 * the Rx DMA unit.  Therefore, make sure the security engine is
26095cd48f1eSkevlo 	 * completely disabled prior to enabling the Rx unit.
26105cd48f1eSkevlo 	 */
26115cd48f1eSkevlo 	hw->mac.ops.disable_sec_rx_path(hw);
26125cd48f1eSkevlo 
26135cd48f1eSkevlo 	if (reg & NGBE_RDB_PB_CTL_PBEN)
26145cd48f1eSkevlo 		hw->mac.ops.enable_rx(hw);
26155cd48f1eSkevlo 	else
26165cd48f1eSkevlo 		hw->mac.ops.disable_rx(hw);
26175cd48f1eSkevlo 
26185cd48f1eSkevlo 	hw->mac.ops.enable_sec_rx_path(hw);
26195cd48f1eSkevlo }
26205cd48f1eSkevlo 
26215cd48f1eSkevlo void
ngbe_enable_sec_rx_path(struct ngbe_hw * hw)26225cd48f1eSkevlo ngbe_enable_sec_rx_path(struct ngbe_hw *hw)
26235cd48f1eSkevlo {
26245cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_RSEC_CTL, NGBE_RSEC_CTL_RX_DIS, 0);
26255cd48f1eSkevlo 	NGBE_WRITE_FLUSH(hw);
26265cd48f1eSkevlo }
26275cd48f1eSkevlo 
26285cd48f1eSkevlo int
ngbe_encap(struct tx_ring * txr,struct mbuf * m)26295cd48f1eSkevlo ngbe_encap(struct tx_ring *txr, struct mbuf *m)
26305cd48f1eSkevlo {
26315cd48f1eSkevlo 	struct ngbe_softc *sc = txr->sc;
26325cd48f1eSkevlo 	uint32_t olinfo_status = 0, cmd_type_len;
26335cd48f1eSkevlo 	int i, j, ntxc;
26345cd48f1eSkevlo 	int first, last = 0;
26355cd48f1eSkevlo 	bus_dmamap_t map;
26365cd48f1eSkevlo 	struct ngbe_tx_buf *txbuf;
26375cd48f1eSkevlo 	union ngbe_tx_desc *txd = NULL;
26385cd48f1eSkevlo 
26395cd48f1eSkevlo 	/* Basic descriptor defines */
26405cd48f1eSkevlo 	cmd_type_len = NGBE_TXD_DTYP_DATA | NGBE_TXD_IFCS;
26415cd48f1eSkevlo 
26425cd48f1eSkevlo 	/*
26435cd48f1eSkevlo 	 * Important to capture the first descriptor
26445cd48f1eSkevlo 	 * used because it will contain the index of
26455cd48f1eSkevlo 	 * the one we tell the hardware to report back
26465cd48f1eSkevlo 	 */
26475cd48f1eSkevlo 	first = txr->next_avail_desc;
26485cd48f1eSkevlo 	txbuf = &txr->tx_buffers[first];
26495cd48f1eSkevlo 	map = txbuf->map;
26505cd48f1eSkevlo 
26515cd48f1eSkevlo 	/*
26525cd48f1eSkevlo 	 * Set the appropriate offload context
26535cd48f1eSkevlo 	 * this will becomes the first descriptor.
26545cd48f1eSkevlo 	 */
26555cd48f1eSkevlo 	ntxc = ngbe_tx_ctx_setup(txr, m, &cmd_type_len, &olinfo_status);
26565cd48f1eSkevlo 	if (ntxc == -1)
26575cd48f1eSkevlo 		goto fail;
26585cd48f1eSkevlo 
26595cd48f1eSkevlo 	/*
26605cd48f1eSkevlo 	 * Map the packet for DMA.
26615cd48f1eSkevlo 	 */
26625cd48f1eSkevlo 	switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m,
26635cd48f1eSkevlo 	    BUS_DMA_NOWAIT)) {
26645cd48f1eSkevlo 	case 0:
26655cd48f1eSkevlo 		break;
26665cd48f1eSkevlo 	case EFBIG:
26675cd48f1eSkevlo 		if (m_defrag(m, M_NOWAIT) == 0 &&
26685cd48f1eSkevlo 		    bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m,
26695cd48f1eSkevlo 		    BUS_DMA_NOWAIT) == 0)
26705cd48f1eSkevlo 			break;
26715cd48f1eSkevlo 		/* FALLTHROUGH */
26725cd48f1eSkevlo 	default:
26735cd48f1eSkevlo 		return 0;
26745cd48f1eSkevlo 	}
26755cd48f1eSkevlo 
26765cd48f1eSkevlo 	i = txr->next_avail_desc + ntxc;
26775cd48f1eSkevlo 	if (i >= sc->num_tx_desc)
26785cd48f1eSkevlo 		i -= sc->num_tx_desc;
26795cd48f1eSkevlo 
26805cd48f1eSkevlo 	for (j = 0; j < map->dm_nsegs; j++) {
26815cd48f1eSkevlo 		txd = &txr->tx_base[i];
26825cd48f1eSkevlo 
26835cd48f1eSkevlo 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
26845cd48f1eSkevlo 		txd->read.cmd_type_len =
26855cd48f1eSkevlo 		    htole32(cmd_type_len | map->dm_segs[j].ds_len);
26865cd48f1eSkevlo 		txd->read.olinfo_status = htole32(olinfo_status);
26875cd48f1eSkevlo 		last = i;
26885cd48f1eSkevlo 
26895cd48f1eSkevlo 		if (++i == sc->num_tx_desc)
26905cd48f1eSkevlo 			i = 0;
26915cd48f1eSkevlo 	}
26925cd48f1eSkevlo 
26935cd48f1eSkevlo 	txd->read.cmd_type_len |= htole32(NGBE_TXD_EOP | NGBE_TXD_RS);
26945cd48f1eSkevlo 
26955cd48f1eSkevlo 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
26965cd48f1eSkevlo 	    BUS_DMASYNC_PREWRITE);
26975cd48f1eSkevlo 
26985cd48f1eSkevlo 	/* Set the index of the descriptor that will be marked done */
26995cd48f1eSkevlo 	txbuf->m_head = m;
27005cd48f1eSkevlo 	txbuf->eop_index = last;
27015cd48f1eSkevlo 
27025cd48f1eSkevlo 	txr->next_avail_desc = i;
27035cd48f1eSkevlo 
27045cd48f1eSkevlo 	return ntxc + j;
27055cd48f1eSkevlo 
27065cd48f1eSkevlo fail:
27075cd48f1eSkevlo 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
27085cd48f1eSkevlo 	return 0;
27095cd48f1eSkevlo }
27105cd48f1eSkevlo 
27115cd48f1eSkevlo int
ngbe_get_buf(struct rx_ring * rxr,int i)27125cd48f1eSkevlo ngbe_get_buf(struct rx_ring *rxr, int i)
27135cd48f1eSkevlo {
27145cd48f1eSkevlo 	struct ngbe_softc *sc = rxr->sc;
27155cd48f1eSkevlo 	struct ngbe_rx_buf *rxbuf;
27165cd48f1eSkevlo 	struct mbuf *m;
27175cd48f1eSkevlo 	union ngbe_rx_desc *rxdesc;
27185cd48f1eSkevlo 	int error;
27195cd48f1eSkevlo 
27205cd48f1eSkevlo 	rxbuf = &rxr->rx_buffers[i];
27215cd48f1eSkevlo 	rxdesc = &rxr->rx_base[i];
27225cd48f1eSkevlo 	if (rxbuf->buf) {
27235cd48f1eSkevlo 		printf("%s: slot %d already has an mbuf\n", DEVNAME(sc), i);
27245cd48f1eSkevlo 		return ENOBUFS;
27255cd48f1eSkevlo 	}
27265cd48f1eSkevlo 
27275cd48f1eSkevlo 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
27285cd48f1eSkevlo 	if (!m)
27295cd48f1eSkevlo 		return ENOBUFS;
27305cd48f1eSkevlo 
27315cd48f1eSkevlo 	m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
27325cd48f1eSkevlo 	m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
27335cd48f1eSkevlo 
27345cd48f1eSkevlo 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m,
27355cd48f1eSkevlo 	    BUS_DMA_NOWAIT);
27365cd48f1eSkevlo 	if (error) {
27375cd48f1eSkevlo 		m_freem(m);
27385cd48f1eSkevlo 		return error;
27395cd48f1eSkevlo 	}
27405cd48f1eSkevlo 
27415cd48f1eSkevlo 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
27425cd48f1eSkevlo 	    rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
27435cd48f1eSkevlo 	rxbuf->buf = m;
27445cd48f1eSkevlo 
27455cd48f1eSkevlo 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
27465cd48f1eSkevlo 
27475cd48f1eSkevlo 	return 0;
27485cd48f1eSkevlo }
27495cd48f1eSkevlo 
27505cd48f1eSkevlo void
ngbe_get_bus_info(struct ngbe_softc * sc)27515cd48f1eSkevlo ngbe_get_bus_info(struct ngbe_softc *sc)
27525cd48f1eSkevlo {
27535cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
27545cd48f1eSkevlo 	uint16_t link_status;
27555cd48f1eSkevlo 
27565cd48f1eSkevlo 	/* Get the negotiated link width and speed from PCI config space */
27575cd48f1eSkevlo 	link_status = ngbe_read_pci_cfg_word(sc, NGBE_PCI_LINK_STATUS);
27585cd48f1eSkevlo 
27595cd48f1eSkevlo 	ngbe_set_pci_config_data(hw, link_status);
27605cd48f1eSkevlo }
27615cd48f1eSkevlo 
27625cd48f1eSkevlo void
ngbe_get_copper_link_capabilities(struct ngbe_hw * hw,uint32_t * speed,int * autoneg)27635cd48f1eSkevlo ngbe_get_copper_link_capabilities(struct ngbe_hw *hw, uint32_t *speed,
27645cd48f1eSkevlo     int *autoneg)
27655cd48f1eSkevlo {
27665cd48f1eSkevlo 	*speed = 0;
27675cd48f1eSkevlo 
27685cd48f1eSkevlo 	if (hw->mac.autoneg)
27695cd48f1eSkevlo 		*autoneg = 1;
27705cd48f1eSkevlo 	else
27715cd48f1eSkevlo 		*autoneg = 0;
27725cd48f1eSkevlo 
27735cd48f1eSkevlo 	*speed = NGBE_LINK_SPEED_10_FULL | NGBE_LINK_SPEED_100_FULL |
27745cd48f1eSkevlo 	    NGBE_LINK_SPEED_1GB_FULL;
27755cd48f1eSkevlo }
27765cd48f1eSkevlo 
27775cd48f1eSkevlo int
ngbe_get_eeprom_semaphore(struct ngbe_softc * sc)27785cd48f1eSkevlo ngbe_get_eeprom_semaphore(struct ngbe_softc *sc)
27795cd48f1eSkevlo {
27805cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
27815cd48f1eSkevlo 	uint32_t swsm;
27825cd48f1eSkevlo 	int i, timeout = 2000;
27835cd48f1eSkevlo 	int status = ETIMEDOUT;
27845cd48f1eSkevlo 
27855cd48f1eSkevlo 	/* Get SMBI software semaphore between device drivers first */
27865cd48f1eSkevlo 	for (i = 0; i < timeout; i++) {
27875cd48f1eSkevlo 		/*
27885cd48f1eSkevlo 		 * If the SMBI bit is 0 when we read it, then the bit will be
27895cd48f1eSkevlo 		 * set and we have the semaphore.
27905cd48f1eSkevlo 		 */
27915cd48f1eSkevlo 		swsm = NGBE_READ_REG(hw, NGBE_MIS_SWSM);
27925cd48f1eSkevlo 		if (!(swsm & NGBE_MIS_SWSM_SMBI)) {
27935cd48f1eSkevlo 			status = 0;
27945cd48f1eSkevlo 			break;
27955cd48f1eSkevlo 		}
27965cd48f1eSkevlo 		DELAY(50);
27975cd48f1eSkevlo 	}
27985cd48f1eSkevlo 
27995cd48f1eSkevlo 	if (i == timeout) {
28005cd48f1eSkevlo 		printf("%s: cannot access the eeprom - SMBI semaphore not "
28015cd48f1eSkevlo 		    "granted\n", DEVNAME(sc));
28025cd48f1eSkevlo 		/*
28035cd48f1eSkevlo 		 * this release is particularly important because our attempts
28045cd48f1eSkevlo 		 * above to get the semaphore may have succeeded, and if there
28055cd48f1eSkevlo 		 * was a timeout, we should unconditionally clear the semaphore
28065cd48f1eSkevlo 		 * bits to free the driver to make progress.
28075cd48f1eSkevlo 		 */
28085cd48f1eSkevlo 		ngbe_release_eeprom_semaphore(hw);
28095cd48f1eSkevlo 		DELAY(50);
28105cd48f1eSkevlo 
28115cd48f1eSkevlo 		/*
28125cd48f1eSkevlo 		 * One last try if the SMBI bit is 0 when we read it,
28135cd48f1eSkevlo 		 * then the bit will be set and we have the semaphore.
28145cd48f1eSkevlo 		 */
28155cd48f1eSkevlo 		swsm = NGBE_READ_REG(hw, NGBE_MIS_SWSM);
28165cd48f1eSkevlo 		if (!(swsm & NGBE_MIS_SWSM_SMBI))
28175cd48f1eSkevlo 			status = 0;
28185cd48f1eSkevlo 	}
28195cd48f1eSkevlo 
28205cd48f1eSkevlo 	return status;
28215cd48f1eSkevlo }
28225cd48f1eSkevlo 
28235cd48f1eSkevlo void
ngbe_get_hw_control(struct ngbe_hw * hw)28245cd48f1eSkevlo ngbe_get_hw_control(struct ngbe_hw *hw)
28255cd48f1eSkevlo {
28265cd48f1eSkevlo 	 /* Let firmware know the driver has taken over */
28275cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_CFG_PORT_CTL,
28285cd48f1eSkevlo 	    NGBE_CFG_PORT_CTL_DRV_LOAD, NGBE_CFG_PORT_CTL_DRV_LOAD);
28295cd48f1eSkevlo }
28305cd48f1eSkevlo 
28315cd48f1eSkevlo void
ngbe_release_hw_control(struct ngbe_softc * sc)28325cd48f1eSkevlo ngbe_release_hw_control(struct ngbe_softc *sc)
28335cd48f1eSkevlo {
28345cd48f1eSkevlo 	/* Let firmware take over control of hw. */
28355cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(&sc->hw, NGBE_CFG_PORT_CTL,
28365cd48f1eSkevlo 	    NGBE_CFG_PORT_CTL_DRV_LOAD, 0);
28375cd48f1eSkevlo }
28385cd48f1eSkevlo 
28395cd48f1eSkevlo void
ngbe_get_mac_addr(struct ngbe_hw * hw,uint8_t * mac_addr)28405cd48f1eSkevlo ngbe_get_mac_addr(struct ngbe_hw *hw, uint8_t *mac_addr)
28415cd48f1eSkevlo {
28425cd48f1eSkevlo 	uint32_t rar_high, rar_low;
28435cd48f1eSkevlo 	int i;
28445cd48f1eSkevlo 
28455cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, 0);
28465cd48f1eSkevlo 	rar_high = NGBE_READ_REG(hw, NGBE_PSR_MAC_SWC_AD_H);
28475cd48f1eSkevlo 	rar_low = NGBE_READ_REG(hw, NGBE_PSR_MAC_SWC_AD_L);
28485cd48f1eSkevlo 
28495cd48f1eSkevlo 	for (i = 0; i < 2; i++)
28505cd48f1eSkevlo 		mac_addr[i] = (uint8_t)(rar_high >> (1 - i) * 8);
28515cd48f1eSkevlo 
28525cd48f1eSkevlo 	for (i = 0; i < 4; i++)
28535cd48f1eSkevlo 		mac_addr[i + 2] = (uint8_t)(rar_low >> (3 - i) * 8);
28545cd48f1eSkevlo }
28555cd48f1eSkevlo 
28565cd48f1eSkevlo enum ngbe_media_type
ngbe_get_media_type(struct ngbe_hw * hw)28575cd48f1eSkevlo ngbe_get_media_type(struct ngbe_hw *hw)
28585cd48f1eSkevlo {
28595cd48f1eSkevlo 	enum ngbe_media_type media_type = ngbe_media_type_copper;
28605cd48f1eSkevlo 
28615cd48f1eSkevlo 	return media_type;
28625cd48f1eSkevlo }
28635cd48f1eSkevlo 
28645cd48f1eSkevlo void
ngbe_gphy_dis_eee(struct ngbe_hw * hw)28655cd48f1eSkevlo ngbe_gphy_dis_eee(struct ngbe_hw *hw)
28665cd48f1eSkevlo {
28675cd48f1eSkevlo 	uint16_t val = 0;
28685cd48f1eSkevlo 
28695cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 0x11, 0xa4b, 0x1110);
28705cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, MII_MMDACR, 0x0, MMDACR_FN_ADDRESS | 0x07);
28715cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, MII_MMDAADR, 0x0, 0x003c);
28725cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, MII_MMDACR, 0x0, MMDACR_FN_DATANPI | 0x07);
28735cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, MII_MMDAADR, 0x0, 0);
28745cd48f1eSkevlo 
28755cd48f1eSkevlo 	/* Disable 10/100M Half Duplex */
28765cd48f1eSkevlo 	msec_delay(100);
28775cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, MII_ANAR, 0, &val);
28785cd48f1eSkevlo 	val &= ~(ANAR_TX | ANAR_10);
28795cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, MII_ANAR, 0x0, val);
28805cd48f1eSkevlo }
28815cd48f1eSkevlo 
28825cd48f1eSkevlo void
ngbe_gphy_efuse_calibration(struct ngbe_softc * sc)28835cd48f1eSkevlo ngbe_gphy_efuse_calibration(struct ngbe_softc *sc)
28845cd48f1eSkevlo {
28855cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
28865cd48f1eSkevlo 	uint32_t efuse[2];
28875cd48f1eSkevlo 
28885cd48f1eSkevlo 	ngbe_gphy_wait_mdio_access_on(hw);
28895cd48f1eSkevlo 
28905cd48f1eSkevlo 	efuse[0] = sc->gphy_efuse[0];
28915cd48f1eSkevlo 	efuse[1] = sc->gphy_efuse[1];
28925cd48f1eSkevlo 
28935cd48f1eSkevlo 	if (!efuse[0] && !efuse[1])
28945cd48f1eSkevlo 		efuse[0] = efuse[1] = 0xffffffff;
28955cd48f1eSkevlo 
28965cd48f1eSkevlo 	/* Calibration */
28975cd48f1eSkevlo 	efuse[0] |= 0xf0000100;
28985cd48f1eSkevlo 	efuse[1] |= 0xff807fff;
28995cd48f1eSkevlo 
29005cd48f1eSkevlo 	/* EODR, Efuse Output Data Register */
29015cd48f1eSkevlo 	ngbe_phy_write_reg(hw, 16, 0xa46, (efuse[0] >> 0) & 0xffff);
29025cd48f1eSkevlo 	ngbe_phy_write_reg(hw, 17, 0xa46, (efuse[0] >> 16) & 0xffff);
29035cd48f1eSkevlo 	ngbe_phy_write_reg(hw, 18, 0xa46, (efuse[1] >> 0) & 0xffff);
29045cd48f1eSkevlo 	ngbe_phy_write_reg(hw, 19, 0xa46, (efuse[1] >> 16) & 0xffff);
29055cd48f1eSkevlo 
29065cd48f1eSkevlo 	/* Set efuse ready */
29075cd48f1eSkevlo 	ngbe_phy_write_reg(hw, 20, 0xa46, 0x01);
29085cd48f1eSkevlo 	ngbe_gphy_wait_mdio_access_on(hw);
29095cd48f1eSkevlo 	ngbe_phy_write_reg(hw, 27, NGBE_INTERNAL_PHY_PAGE_OFFSET, 0x8011);
29105cd48f1eSkevlo 	ngbe_phy_write_reg(hw, 28, NGBE_INTERNAL_PHY_PAGE_OFFSET, 0x5737);
29115cd48f1eSkevlo 	ngbe_gphy_dis_eee(hw);
29125cd48f1eSkevlo }
29135cd48f1eSkevlo 
29145cd48f1eSkevlo void
ngbe_gphy_wait_mdio_access_on(struct ngbe_hw * hw)29155cd48f1eSkevlo ngbe_gphy_wait_mdio_access_on(struct ngbe_hw *hw)
29165cd48f1eSkevlo {
29175cd48f1eSkevlo 	uint16_t val = 0;
29185cd48f1eSkevlo 	int i;
29195cd48f1eSkevlo 
29205cd48f1eSkevlo 	for (i = 0; i < 100; i++) {
29215cd48f1eSkevlo 		ngbe_phy_read_reg(hw, 29, NGBE_INTERNAL_PHY_PAGE_OFFSET, &val);
29225cd48f1eSkevlo 		if (val & 0x20)
29235cd48f1eSkevlo 			break;
29245cd48f1eSkevlo 		DELAY(1000);
29255cd48f1eSkevlo 	}
29265cd48f1eSkevlo }
29275cd48f1eSkevlo 
29285cd48f1eSkevlo void
ngbe_handle_phy_event(struct ngbe_softc * sc)29295cd48f1eSkevlo ngbe_handle_phy_event(struct ngbe_softc *sc)
29305cd48f1eSkevlo {
29315cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
29325cd48f1eSkevlo 	uint32_t reg;
29335cd48f1eSkevlo 
29345cd48f1eSkevlo 	reg = NGBE_READ_REG(hw, NGBE_GPIO_INTSTATUS);
29355cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_GPIO_EOI, reg);
29365cd48f1eSkevlo 	if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA))
29375cd48f1eSkevlo 		hw->phy.ops.check_event(sc);
29385cd48f1eSkevlo }
29395cd48f1eSkevlo 
29405cd48f1eSkevlo int
ngbe_host_interface_command(struct ngbe_softc * sc,uint32_t * buffer,uint32_t length,uint32_t timeout,int return_data)29415cd48f1eSkevlo ngbe_host_interface_command(struct ngbe_softc *sc, uint32_t *buffer,
29425cd48f1eSkevlo     uint32_t length, uint32_t timeout, int return_data)
29435cd48f1eSkevlo {
29445cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
29455cd48f1eSkevlo 	uint32_t hicr, i, bi, dword_len;
29465cd48f1eSkevlo 	uint32_t hdr_size = sizeof(struct ngbe_hic_hdr);
29475cd48f1eSkevlo 	uint32_t buf[64] = {};
29485cd48f1eSkevlo 	uint16_t buf_len;
29495cd48f1eSkevlo 	int status = 0;
29505cd48f1eSkevlo 
29515cd48f1eSkevlo 	if (length == 0 || length > NGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
29525cd48f1eSkevlo 		printf("%s: buffer length failure\n", DEVNAME(sc));
29535cd48f1eSkevlo 		return EINVAL;
29545cd48f1eSkevlo 	}
29555cd48f1eSkevlo 
29565cd48f1eSkevlo 	if (hw->mac.ops.acquire_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB))
29575cd48f1eSkevlo 		return EINVAL;
29585cd48f1eSkevlo 
29595cd48f1eSkevlo 	/* Calculate length in DWORDs. We must be multiple of DWORD */
29605cd48f1eSkevlo 	if ((length % (sizeof(uint32_t))) != 0) {
29615cd48f1eSkevlo 		printf("%s: buffer length failure, not aligned to dword\n",
29625cd48f1eSkevlo 		    DEVNAME(sc));
29635cd48f1eSkevlo 		status = EINVAL;
29645cd48f1eSkevlo 		goto rel_out;
29655cd48f1eSkevlo         }
29665cd48f1eSkevlo 
29675cd48f1eSkevlo 	if (ngbe_check_mng_access(hw)) {
29685cd48f1eSkevlo 		hicr = NGBE_READ_REG(hw, NGBE_MNG_MBOX_CTL);
29695cd48f1eSkevlo 		if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY))
29705cd48f1eSkevlo 			printf("%s: fwrdy is set before command\n",
29715cd48f1eSkevlo 			    DEVNAME(sc));
29725cd48f1eSkevlo 	}
29735cd48f1eSkevlo 
29745cd48f1eSkevlo 	dword_len = length >> 2;
29755cd48f1eSkevlo 
29765cd48f1eSkevlo 	/*
29775cd48f1eSkevlo 	 * The device driver writes the relevant command block
29785cd48f1eSkevlo 	 * into the ram area.
29795cd48f1eSkevlo 	 */
29805cd48f1eSkevlo 	for (i = 0; i < dword_len; i++) {
29815cd48f1eSkevlo 		if (ngbe_check_mng_access(hw)) {
29825cd48f1eSkevlo 			NGBE_WRITE_REG_ARRAY(hw, NGBE_MNG_MBOX, i,
29835cd48f1eSkevlo 			    htole32(buffer[i]));
29845cd48f1eSkevlo 		} else {
29855cd48f1eSkevlo 			status = EINVAL;
29865cd48f1eSkevlo 			goto rel_out;
29875cd48f1eSkevlo 		}
29885cd48f1eSkevlo 	}
29895cd48f1eSkevlo 
29905cd48f1eSkevlo 	/* Setting this bit tells the ARC that a new command is pending. */
29915cd48f1eSkevlo 	if (ngbe_check_mng_access(hw)) {
29925cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MNG_MBOX_CTL,
29935cd48f1eSkevlo 		    NGBE_MNG_MBOX_CTL_SWRDY, NGBE_MNG_MBOX_CTL_SWRDY);
29945cd48f1eSkevlo 	} else {
29955cd48f1eSkevlo 		status = EINVAL;
29965cd48f1eSkevlo 		goto rel_out;
29975cd48f1eSkevlo 	}
29985cd48f1eSkevlo 
29995cd48f1eSkevlo 	for (i = 0; i < timeout; i++) {
30005cd48f1eSkevlo 		if (ngbe_check_mng_access(hw)) {
30015cd48f1eSkevlo 			hicr = NGBE_READ_REG(hw, NGBE_MNG_MBOX_CTL);
30025cd48f1eSkevlo 			if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY))
30035cd48f1eSkevlo 				break;
30045cd48f1eSkevlo 		}
30055cd48f1eSkevlo 		msec_delay(1);
30065cd48f1eSkevlo 	}
30075cd48f1eSkevlo 
30085cd48f1eSkevlo 	buf[0] = NGBE_READ_REG(hw, NGBE_MNG_MBOX);
30095cd48f1eSkevlo 	/* Check command completion */
30105cd48f1eSkevlo 	if (timeout != 0 && i == timeout) {
30115cd48f1eSkevlo 		printf("%s: command has failed with no status valid\n",
30125cd48f1eSkevlo 		    DEVNAME(sc));
30135cd48f1eSkevlo 		if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
30145cd48f1eSkevlo 			status = EINVAL;
30155cd48f1eSkevlo 			goto rel_out;
30165cd48f1eSkevlo 		}
30175cd48f1eSkevlo 	}
30185cd48f1eSkevlo 
30195cd48f1eSkevlo 	if (!return_data)
30205cd48f1eSkevlo 		goto rel_out;
30215cd48f1eSkevlo 
30225cd48f1eSkevlo 	/* Calculate length in DWORDs */
30235cd48f1eSkevlo 	dword_len = hdr_size >> 2;
30245cd48f1eSkevlo 
30255cd48f1eSkevlo 	/* First pull in the header so we know the buffer length */
30265cd48f1eSkevlo 	for (bi = 0; bi < dword_len; bi++) {
30275cd48f1eSkevlo 		if (ngbe_check_mng_access(hw)) {
30285cd48f1eSkevlo 			buffer[bi] = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, bi);
30295cd48f1eSkevlo 			le32_to_cpus(&buffer[bi]);
30305cd48f1eSkevlo 		} else {
30315cd48f1eSkevlo 			status = EINVAL;
30325cd48f1eSkevlo 			goto rel_out;
30335cd48f1eSkevlo 		}
30345cd48f1eSkevlo 	}
30355cd48f1eSkevlo 
30365cd48f1eSkevlo 	/* If there is any thing in data position pull it in */
30375cd48f1eSkevlo 	buf_len = ((struct ngbe_hic_hdr *)buffer)->buf_len;
30385cd48f1eSkevlo 	if (buf_len == 0)
30395cd48f1eSkevlo 		goto rel_out;
30405cd48f1eSkevlo 
30415cd48f1eSkevlo 	if (length < buf_len + hdr_size) {
30425cd48f1eSkevlo 		printf("%s: buffer not large enough for reply message\n",
30435cd48f1eSkevlo 		    DEVNAME(sc));
30445cd48f1eSkevlo 		status = EINVAL;
30455cd48f1eSkevlo 		goto rel_out;
30465cd48f1eSkevlo 	}
30475cd48f1eSkevlo 
30485cd48f1eSkevlo 	/* Calculate length in DWORDs, add 3 for odd lengths */
30495cd48f1eSkevlo 	dword_len = (buf_len + 3) >> 2;
30505cd48f1eSkevlo 
30515cd48f1eSkevlo 	/* Pull in the rest of the buffer (bi is where we left off) */
30525cd48f1eSkevlo 	for (; bi <= dword_len; bi++) {
30535cd48f1eSkevlo 		if (ngbe_check_mng_access(hw)) {
30545cd48f1eSkevlo 			buffer[bi] = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, bi);
30555cd48f1eSkevlo 			le32_to_cpus(&buffer[bi]);
30565cd48f1eSkevlo 		} else {
30575cd48f1eSkevlo 			status = EINVAL;
30585cd48f1eSkevlo 			goto rel_out;
30595cd48f1eSkevlo 		}
30605cd48f1eSkevlo 	}
30615cd48f1eSkevlo 
30625cd48f1eSkevlo rel_out:
30635cd48f1eSkevlo 	hw->mac.ops.release_swfw_sync(sc, NGBE_MNG_SWFW_SYNC_SW_MB);
30645cd48f1eSkevlo 	return status;
30655cd48f1eSkevlo }
30665cd48f1eSkevlo 
30675cd48f1eSkevlo int
ngbe_hpbthresh(struct ngbe_softc * sc)30685cd48f1eSkevlo ngbe_hpbthresh(struct ngbe_softc *sc)
30695cd48f1eSkevlo {
30705cd48f1eSkevlo 	uint32_t dv_id, rx_pba;
30715cd48f1eSkevlo 	int kb, link, marker, tc;
30725cd48f1eSkevlo 
30735cd48f1eSkevlo 	/* Calculate max LAN frame size */
30745cd48f1eSkevlo 	tc = link = sc->sc_ac.ac_if.if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
30755cd48f1eSkevlo 	    NGBE_ETH_FRAMING;
30765cd48f1eSkevlo 
30775cd48f1eSkevlo 	/* Calculate delay value for device */
30785cd48f1eSkevlo 	dv_id = NGBE_DV(link, tc);
30795cd48f1eSkevlo 
30805cd48f1eSkevlo 	/* Delay value is calculated in bit times convert to KB */
30815cd48f1eSkevlo 	kb = NGBE_BT2KB(dv_id);
30825cd48f1eSkevlo 	rx_pba = NGBE_READ_REG(&sc->hw, NGBE_RDB_PB_SZ) >> NGBE_RDB_PB_SZ_SHIFT;
30835cd48f1eSkevlo 
30845cd48f1eSkevlo 	marker = rx_pba - kb;
30855cd48f1eSkevlo 
30865cd48f1eSkevlo 	return marker;
30875cd48f1eSkevlo }
30885cd48f1eSkevlo 
30895cd48f1eSkevlo int
ngbe_lpbthresh(struct ngbe_softc * sc)30905cd48f1eSkevlo ngbe_lpbthresh(struct ngbe_softc *sc)
30915cd48f1eSkevlo {
30925cd48f1eSkevlo 	uint32_t dv_id;
30935cd48f1eSkevlo 	int tc;
30945cd48f1eSkevlo 
30955cd48f1eSkevlo 	/* Calculate max LAN frame size */
30965cd48f1eSkevlo 	tc = sc->sc_ac.ac_if.if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
30975cd48f1eSkevlo 
30985cd48f1eSkevlo 	/* Calculate delay value for device */
30995cd48f1eSkevlo 	dv_id = NGBE_LOW_DV(tc);
31005cd48f1eSkevlo 
31015cd48f1eSkevlo 	/* Delay value is calculated in bit times convert to KB */
31025cd48f1eSkevlo 	return NGBE_BT2KB(dv_id);
31035cd48f1eSkevlo }
31045cd48f1eSkevlo 
31055cd48f1eSkevlo int
ngbe_mng_present(struct ngbe_hw * hw)31065cd48f1eSkevlo ngbe_mng_present(struct ngbe_hw *hw)
31075cd48f1eSkevlo {
31085cd48f1eSkevlo 	uint32_t fwsm;
31095cd48f1eSkevlo 
31105cd48f1eSkevlo 	fwsm = NGBE_READ_REG(hw, NGBE_MIS_ST);
31115cd48f1eSkevlo 
31125cd48f1eSkevlo 	return fwsm & NGBE_MIS_ST_MNG_INIT_DN;
31135cd48f1eSkevlo }
31145cd48f1eSkevlo 
31155cd48f1eSkevlo int
ngbe_mta_vector(struct ngbe_hw * hw,uint8_t * mc_addr)31165cd48f1eSkevlo ngbe_mta_vector(struct ngbe_hw *hw, uint8_t *mc_addr)
31175cd48f1eSkevlo {
31185cd48f1eSkevlo 	uint32_t vector = 0;
31195cd48f1eSkevlo 	int rshift;
31205cd48f1eSkevlo 
31215cd48f1eSkevlo 	/* pick bits [47:32] of the address. */
31225cd48f1eSkevlo 	vector = mc_addr[4] | (((uint16_t)mc_addr[5]) << 8);
31235cd48f1eSkevlo 	switch (hw->mac.mc_filter_type) {
31245cd48f1eSkevlo 	case 0:	/* bits 47:36 */
31255cd48f1eSkevlo 	case 1:	/* bits 46:35 */
31265cd48f1eSkevlo 	case 2:	/* bits 45:34 */
31275cd48f1eSkevlo 		rshift = 4 - hw->mac.mc_filter_type;
31285cd48f1eSkevlo 		break;
31295cd48f1eSkevlo 	case 3:	/* bits 43:32 */
31305cd48f1eSkevlo 		rshift = 0;
31315cd48f1eSkevlo 		break;
31325cd48f1eSkevlo 	default:	/* Invalid mc_filter_type */
31335cd48f1eSkevlo 		vector = rshift = 0;
31345cd48f1eSkevlo 		break;
31355cd48f1eSkevlo 	}
31365cd48f1eSkevlo 	vector = (vector >> rshift) & 0x0fff;
31375cd48f1eSkevlo 
31385cd48f1eSkevlo 	return vector;
31395cd48f1eSkevlo }
31405cd48f1eSkevlo 
31415cd48f1eSkevlo int
ngbe_negotiate_fc(struct ngbe_softc * sc,uint32_t adv_reg,uint32_t lp_reg,uint32_t adv_sym,uint32_t adv_asm,uint32_t lp_sym,uint32_t lp_asm)31425cd48f1eSkevlo ngbe_negotiate_fc(struct ngbe_softc *sc, uint32_t adv_reg, uint32_t lp_reg,
31435cd48f1eSkevlo     uint32_t adv_sym, uint32_t adv_asm, uint32_t lp_sym, uint32_t lp_asm)
31445cd48f1eSkevlo {
31455cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
31465cd48f1eSkevlo 
31475cd48f1eSkevlo 	if ((!(adv_reg)) || (!(lp_reg)))
31485cd48f1eSkevlo 		return EINVAL;
31495cd48f1eSkevlo 
31505cd48f1eSkevlo 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
31515cd48f1eSkevlo 		/*
31525cd48f1eSkevlo 		 * Now we need to check if the user selected Rx ONLY
31535cd48f1eSkevlo 		 * of pause frames.  In this case, we had to advertise
31545cd48f1eSkevlo 		 * FULL flow control because we could not advertise RX
31555cd48f1eSkevlo 		 * ONLY. Hence, we must now check to see if we need to
31565cd48f1eSkevlo 		 * turn OFF the TRANSMISSION of PAUSE frames.
31575cd48f1eSkevlo 		 */
31585cd48f1eSkevlo 		if (hw->fc.requested_mode == ngbe_fc_full)
31595cd48f1eSkevlo 			hw->fc.current_mode = ngbe_fc_full;
31605cd48f1eSkevlo 		else
31615cd48f1eSkevlo 			hw->fc.current_mode = ngbe_fc_rx_pause;
31625cd48f1eSkevlo 
31635cd48f1eSkevlo 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
31645cd48f1eSkevlo 	    (lp_reg & lp_sym) && (lp_reg & lp_asm))
31655cd48f1eSkevlo 	    	hw->fc.current_mode = ngbe_fc_tx_pause;
31665cd48f1eSkevlo 	else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
31675cd48f1eSkevlo 	    !(lp_reg & lp_sym) && (lp_reg & lp_asm))
31685cd48f1eSkevlo 	    	hw->fc.current_mode = ngbe_fc_rx_pause;
31695cd48f1eSkevlo 	else
31705cd48f1eSkevlo 		hw->fc.current_mode = ngbe_fc_none;
31715cd48f1eSkevlo 
31725cd48f1eSkevlo 	return 0;
31735cd48f1eSkevlo }
31745cd48f1eSkevlo 
31755cd48f1eSkevlo int
ngbe_non_sfp_link_config(struct ngbe_softc * sc)31765cd48f1eSkevlo ngbe_non_sfp_link_config(struct ngbe_softc *sc)
31775cd48f1eSkevlo {
31785cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
31795cd48f1eSkevlo 	uint32_t speed;
31805cd48f1eSkevlo 	int error;
31815cd48f1eSkevlo 
31825cd48f1eSkevlo 	if (hw->mac.autoneg)
31835cd48f1eSkevlo 		speed = hw->phy.autoneg_advertised;
31845cd48f1eSkevlo 	else
31855cd48f1eSkevlo 		speed = hw->phy.force_speed;
31865cd48f1eSkevlo 
31875cd48f1eSkevlo 	msec_delay(50);
31885cd48f1eSkevlo 	if (hw->phy.type == ngbe_phy_internal) {
31895cd48f1eSkevlo 		error = hw->phy.ops.setup_once(sc);
31905cd48f1eSkevlo 		if (error)
31915cd48f1eSkevlo 			return error;
31925cd48f1eSkevlo 	}
31935cd48f1eSkevlo 
31945cd48f1eSkevlo 	error = hw->mac.ops.setup_link(sc, speed, 0);
31955cd48f1eSkevlo 	return error;
31965cd48f1eSkevlo }
31975cd48f1eSkevlo 
31985cd48f1eSkevlo void
ngbe_pbthresh_setup(struct ngbe_softc * sc)31995cd48f1eSkevlo ngbe_pbthresh_setup(struct ngbe_softc *sc)
32005cd48f1eSkevlo {
32015cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
32025cd48f1eSkevlo 
32035cd48f1eSkevlo 	hw->fc.high_water = ngbe_hpbthresh(sc);
32045cd48f1eSkevlo 	hw->fc.low_water = ngbe_lpbthresh(sc);
32055cd48f1eSkevlo 
32065cd48f1eSkevlo 	/* Low water marks must not be larger than high water marks */
32075cd48f1eSkevlo 	if (hw->fc.low_water > hw->fc.high_water)
32085cd48f1eSkevlo 		hw->fc.low_water = 0;
32095cd48f1eSkevlo }
32105cd48f1eSkevlo 
32115cd48f1eSkevlo void
ngbe_phy_check_event(struct ngbe_softc * sc)32125cd48f1eSkevlo ngbe_phy_check_event(struct ngbe_softc *sc)
32135cd48f1eSkevlo {
32145cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
32155cd48f1eSkevlo 	uint16_t value = 0;
32165cd48f1eSkevlo 
32175cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_LSC,
32185cd48f1eSkevlo 	    NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
32195cd48f1eSkevlo }
32205cd48f1eSkevlo 
32215cd48f1eSkevlo int
ngbe_phy_check_overtemp(struct ngbe_hw * hw)32225cd48f1eSkevlo ngbe_phy_check_overtemp(struct ngbe_hw *hw)
32235cd48f1eSkevlo {
32245cd48f1eSkevlo 	uint32_t ts_state;
32255cd48f1eSkevlo 	int status = 0;
32265cd48f1eSkevlo 
32275cd48f1eSkevlo 	/* Check that the LASI temp alarm status was triggered */
32285cd48f1eSkevlo 	ts_state = NGBE_READ_REG(hw, NGBE_TS_ALARM_ST);
32295cd48f1eSkevlo 
32305cd48f1eSkevlo 	if (ts_state & NGBE_TS_ALARM_ST_ALARM)
32315cd48f1eSkevlo 		status = 1;
32325cd48f1eSkevlo 
32335cd48f1eSkevlo 	return status;
32345cd48f1eSkevlo }
32355cd48f1eSkevlo 
32365cd48f1eSkevlo void
ngbe_phy_get_advertised_pause(struct ngbe_hw * hw,uint8_t * pause_bit)32375cd48f1eSkevlo ngbe_phy_get_advertised_pause(struct ngbe_hw *hw, uint8_t *pause_bit)
32385cd48f1eSkevlo {
32395cd48f1eSkevlo 	uint16_t value;
32405cd48f1eSkevlo 
32415cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, 4, 0, &value);
32425cd48f1eSkevlo 	*pause_bit = (uint8_t)((value >> 10) & 0x3);
32435cd48f1eSkevlo }
32445cd48f1eSkevlo 
32455cd48f1eSkevlo void
ngbe_phy_get_lp_advertised_pause(struct ngbe_hw * hw,uint8_t * pause_bit)32465cd48f1eSkevlo ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *hw, uint8_t *pause_bit)
32475cd48f1eSkevlo {
32485cd48f1eSkevlo 	uint16_t value;
32495cd48f1eSkevlo 
32505cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, NGBE_MDIO_AUTO_NEG_LSC,
32515cd48f1eSkevlo 	    NGBE_INTERNAL_PHY_PAGE_OFFSET, &value);
32525cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, MII_BMSR, 0, &value);
32535cd48f1eSkevlo 	value = (value & BMSR_ACOMP) ? 1 : 0;
32545cd48f1eSkevlo 
32555cd48f1eSkevlo 	/* If AN complete then check lp adv pause */
32565cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, MII_ANLPAR, 0, &value);
32575cd48f1eSkevlo 	*pause_bit = (uint8_t)((value >> 10) & 0x3);
32585cd48f1eSkevlo }
32595cd48f1eSkevlo 
32605cd48f1eSkevlo int
ngbe_phy_identify(struct ngbe_softc * sc)32615cd48f1eSkevlo ngbe_phy_identify(struct ngbe_softc *sc)
32625cd48f1eSkevlo {
32635cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
32645cd48f1eSkevlo 	int error;
32655cd48f1eSkevlo 
32665cd48f1eSkevlo 	switch(hw->phy.type) {
32675cd48f1eSkevlo 	case ngbe_phy_internal:
32685cd48f1eSkevlo 		error = ngbe_check_internal_phy_id(sc);
32695cd48f1eSkevlo 		break;
32705cd48f1eSkevlo 	default:
32715cd48f1eSkevlo 		error = ENOTSUP;
32725cd48f1eSkevlo 	}
32735cd48f1eSkevlo 
32745cd48f1eSkevlo 	return error;
32755cd48f1eSkevlo }
32765cd48f1eSkevlo 
32775cd48f1eSkevlo int
ngbe_phy_init(struct ngbe_softc * sc)32785cd48f1eSkevlo ngbe_phy_init(struct ngbe_softc *sc)
32795cd48f1eSkevlo {
32805cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
32815cd48f1eSkevlo 	uint16_t value;
32825cd48f1eSkevlo 	uint8_t lan_id = hw->bus.lan_id;
32835cd48f1eSkevlo 	int error;
32845cd48f1eSkevlo 
32855cd48f1eSkevlo 	/* Set fwsw semaphore mask for phy first */
32865cd48f1eSkevlo 	if (!hw->phy.phy_semaphore_mask)
32875cd48f1eSkevlo 		hw->phy.phy_semaphore_mask = NGBE_MNG_SWFW_SYNC_SW_PHY;
32885cd48f1eSkevlo 
32895cd48f1eSkevlo 	/* Init phy.addr according to HW design */
32905cd48f1eSkevlo 	hw->phy.addr = 0;
32915cd48f1eSkevlo 
32925cd48f1eSkevlo 	/* Identify the PHY or SFP module */
32935cd48f1eSkevlo 	error = hw->phy.ops.identify(sc);
32945cd48f1eSkevlo 	if (error == ENOTSUP)
32955cd48f1eSkevlo 		return error;
32965cd48f1eSkevlo 
32975cd48f1eSkevlo 	/* Enable interrupts, only link status change and an done is allowed */
32985cd48f1eSkevlo 	if (hw->phy.type == ngbe_phy_internal) {
32995cd48f1eSkevlo 		value = NGBE_INTPHY_INT_LSC | NGBE_INTPHY_INT_ANC;
33005cd48f1eSkevlo 		hw->phy.ops.write_reg(hw, 0x12, 0xa42, value);
33015cd48f1eSkevlo 		sc->gphy_efuse[0] =
33025cd48f1eSkevlo 		    ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8);
33035cd48f1eSkevlo 		sc->gphy_efuse[1] =
33045cd48f1eSkevlo 		    ngbe_flash_read_dword(hw, 0xfe010 + lan_id * 8 + 4);
33055cd48f1eSkevlo 	}
33065cd48f1eSkevlo 
33075cd48f1eSkevlo 	return error;
33085cd48f1eSkevlo }
33095cd48f1eSkevlo 
33105cd48f1eSkevlo void
ngbe_phy_led_ctrl(struct ngbe_softc * sc)33115cd48f1eSkevlo ngbe_phy_led_ctrl(struct ngbe_softc *sc)
33125cd48f1eSkevlo {
33135cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
33145cd48f1eSkevlo 	uint16_t value;
33155cd48f1eSkevlo 
33165cd48f1eSkevlo 	if (sc->led_conf != -1)
33175cd48f1eSkevlo 		value = sc->led_conf & 0xffff;
33185cd48f1eSkevlo 	else
33195cd48f1eSkevlo 		value = 0x205b;
33205cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 16, 0xd04, value);
33215cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 17, 0xd04, 0);
33225cd48f1eSkevlo 
33235cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, 18, 0xd04, &value);
33245cd48f1eSkevlo 	if (sc->led_conf != -1) {
33255cd48f1eSkevlo 		value &= ~0x73;
33265cd48f1eSkevlo 		value |= sc->led_conf >> 16;
33275cd48f1eSkevlo 	} else {
33285cd48f1eSkevlo 		value &= 0xfffc;
33295cd48f1eSkevlo 		/* Act led blinking mode set to 60ms */
33305cd48f1eSkevlo 		value |= 0x2;
33315cd48f1eSkevlo 	}
33325cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 18, 0xd04, value);
33335cd48f1eSkevlo }
33345cd48f1eSkevlo 
33355cd48f1eSkevlo int
ngbe_phy_led_oem_chk(struct ngbe_softc * sc,uint32_t * data)33365cd48f1eSkevlo ngbe_phy_led_oem_chk(struct ngbe_softc *sc, uint32_t *data)
33375cd48f1eSkevlo {
33385cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
33395cd48f1eSkevlo 	struct ngbe_hic_read_shadow_ram buffer;
33405cd48f1eSkevlo 	uint32_t tmp;
33415cd48f1eSkevlo 	int status;
33425cd48f1eSkevlo 
33435cd48f1eSkevlo 	buffer.hdr.req.cmd = FW_PHY_LED_CONF;
33445cd48f1eSkevlo 	buffer.hdr.req.buf_lenh = 0;
33455cd48f1eSkevlo 	buffer.hdr.req.buf_lenl = 0;
33465cd48f1eSkevlo 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
33475cd48f1eSkevlo 
33485cd48f1eSkevlo 	/* Convert offset from words to bytes */
33495cd48f1eSkevlo 	buffer.address = 0;
33505cd48f1eSkevlo 	/* One word */
33515cd48f1eSkevlo 	buffer.length = 0;
33525cd48f1eSkevlo 
33535cd48f1eSkevlo 	status = ngbe_host_interface_command(sc, (uint32_t *)&buffer,
33545cd48f1eSkevlo 	    sizeof(buffer), NGBE_HI_COMMAND_TIMEOUT, 0);
33555cd48f1eSkevlo 	if (status)
33565cd48f1eSkevlo 		return status;
33575cd48f1eSkevlo 
33585cd48f1eSkevlo 	if (ngbe_check_mng_access(hw)) {
33595cd48f1eSkevlo 		tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 1);
33605cd48f1eSkevlo 		if (tmp == NGBE_CHECKSUM_CAP_ST_PASS) {
33615cd48f1eSkevlo 			tmp = NGBE_READ_REG_ARRAY(hw, NGBE_MNG_MBOX, 2);
33625cd48f1eSkevlo 			*data = tmp;
33635cd48f1eSkevlo 			status = 0;
33645cd48f1eSkevlo 		} else if (tmp == NGBE_CHECKSUM_CAP_ST_FAIL) {
33655cd48f1eSkevlo 			*data = tmp;
33665cd48f1eSkevlo 			status = EINVAL;
33675cd48f1eSkevlo 		} else
33685cd48f1eSkevlo 			status = EINVAL;
33695cd48f1eSkevlo 	} else {
33705cd48f1eSkevlo 		status = EINVAL;
33715cd48f1eSkevlo 		return status;
33725cd48f1eSkevlo 	}
33735cd48f1eSkevlo 
33745cd48f1eSkevlo 	return status;
33755cd48f1eSkevlo }
33765cd48f1eSkevlo 
33775cd48f1eSkevlo int
ngbe_phy_read_reg(struct ngbe_hw * hw,uint32_t off,uint32_t page,uint16_t * data)33785cd48f1eSkevlo ngbe_phy_read_reg(struct ngbe_hw *hw, uint32_t off, uint32_t page,
33795cd48f1eSkevlo     uint16_t *data)
33805cd48f1eSkevlo {
33815cd48f1eSkevlo 	*data = 0;
33825cd48f1eSkevlo 
33835cd48f1eSkevlo 	if (!((page == NGBE_INTERNAL_PHY_PAGE_OFFSET) &&
33845cd48f1eSkevlo 	    ((off == NGBE_MDIO_AUTO_NEG_STATUS) ||
33855cd48f1eSkevlo 	    (off == NGBE_MDIO_AUTO_NEG_LSC)))) {
33865cd48f1eSkevlo 		NGBE_WRITE_REG(hw,
33875cd48f1eSkevlo 		    NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET),
33885cd48f1eSkevlo 		    page);
33895cd48f1eSkevlo 	}
33905cd48f1eSkevlo 	*data = NGBE_READ_REG(hw, NGBE_PHY_CONFIG(off)) & 0xffff;
33915cd48f1eSkevlo 
33925cd48f1eSkevlo 	return 0;
33935cd48f1eSkevlo }
33945cd48f1eSkevlo 
33955cd48f1eSkevlo int
ngbe_phy_write_reg(struct ngbe_hw * hw,uint32_t off,uint32_t page,uint16_t data)33965cd48f1eSkevlo ngbe_phy_write_reg(struct ngbe_hw *hw, uint32_t off, uint32_t page,
33975cd48f1eSkevlo     uint16_t data)
33985cd48f1eSkevlo {
33995cd48f1eSkevlo 	if (!((page == NGBE_INTERNAL_PHY_PAGE_OFFSET) &&
34005cd48f1eSkevlo 	    ((off == NGBE_MDIO_AUTO_NEG_STATUS) ||
34015cd48f1eSkevlo 	    (off == NGBE_MDIO_AUTO_NEG_LSC)))) {
34025cd48f1eSkevlo 		NGBE_WRITE_REG(hw,
34035cd48f1eSkevlo 		    NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET),
34045cd48f1eSkevlo 		    page);
34055cd48f1eSkevlo 	}
34065cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PHY_CONFIG(off), data);
34075cd48f1eSkevlo 
34085cd48f1eSkevlo 	return 0;
34095cd48f1eSkevlo }
34105cd48f1eSkevlo 
34115cd48f1eSkevlo int
ngbe_phy_reset(struct ngbe_softc * sc)34125cd48f1eSkevlo ngbe_phy_reset(struct ngbe_softc *sc)
34135cd48f1eSkevlo {
34145cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
34155cd48f1eSkevlo 	uint16_t value;
34165cd48f1eSkevlo 	int i, status;
34175cd48f1eSkevlo 
34185cd48f1eSkevlo 	/* only support internal phy */
34195cd48f1eSkevlo 	if (hw->phy.type != ngbe_phy_internal) {
34205cd48f1eSkevlo 		printf("%s: operation not supported\n", DEVNAME(sc));
34215cd48f1eSkevlo 		return EINVAL;
34225cd48f1eSkevlo 	}
34235cd48f1eSkevlo 
34245cd48f1eSkevlo 	/* Don't reset PHY if it's shut down due to overtemp. */
34255cd48f1eSkevlo 	if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw) != 0) {
34265cd48f1eSkevlo 		printf("%s: overtemp! skip phy reset\n", DEVNAME(sc));
34275cd48f1eSkevlo 		return EINVAL;
34285cd48f1eSkevlo 	}
34295cd48f1eSkevlo 
34305cd48f1eSkevlo 	/* Blocked by MNG FW so bail */
34315cd48f1eSkevlo 	status = ngbe_check_reset_blocked(sc);
34325cd48f1eSkevlo 	if (status)
34335cd48f1eSkevlo 		return status;
34345cd48f1eSkevlo 
34355cd48f1eSkevlo 	value = NGBE_MDI_PHY_RESET;
34365cd48f1eSkevlo 	status = hw->phy.ops.write_reg(hw, 0, 0, value);
34375cd48f1eSkevlo 	for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) {
34385cd48f1eSkevlo 		status = hw->phy.ops.read_reg(hw, 0, 0, &value);
34395cd48f1eSkevlo 		if (!(value & NGBE_MDI_PHY_RESET))
34405cd48f1eSkevlo 			break;
34415cd48f1eSkevlo 		msec_delay(1);
34425cd48f1eSkevlo 	}
34435cd48f1eSkevlo 
34445cd48f1eSkevlo 	if (i == NGBE_PHY_RST_WAIT_PERIOD) {
34455cd48f1eSkevlo 		printf("%s: phy mode reset did not complete\n", DEVNAME(sc));
34465cd48f1eSkevlo 		return ETIMEDOUT;
34475cd48f1eSkevlo 	}
34485cd48f1eSkevlo 
34495cd48f1eSkevlo 	return status;
34505cd48f1eSkevlo }
34515cd48f1eSkevlo 
34525cd48f1eSkevlo int
ngbe_phy_set_pause_advertisement(struct ngbe_hw * hw,uint16_t pause_bit)34535cd48f1eSkevlo ngbe_phy_set_pause_advertisement(struct ngbe_hw *hw, uint16_t pause_bit)
34545cd48f1eSkevlo {
34555cd48f1eSkevlo 	uint16_t value;
34565cd48f1eSkevlo 	int status;
34575cd48f1eSkevlo 
34585cd48f1eSkevlo 	status = hw->phy.ops.read_reg(hw, MII_ANAR, 0, &value);
34595cd48f1eSkevlo 	value &= ~0xc00;
34605cd48f1eSkevlo 	value |= pause_bit;
34615cd48f1eSkevlo 	status = hw->phy.ops.write_reg(hw, MII_ANAR, 0, value);
34625cd48f1eSkevlo 	return status;
34635cd48f1eSkevlo }
34645cd48f1eSkevlo 
34655cd48f1eSkevlo int
ngbe_phy_setup(struct ngbe_softc * sc)34665cd48f1eSkevlo ngbe_phy_setup(struct ngbe_softc *sc)
34675cd48f1eSkevlo {
34685cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
34695cd48f1eSkevlo 	uint16_t value = 0;
34705cd48f1eSkevlo 	int i;
34715cd48f1eSkevlo 
34725cd48f1eSkevlo 	for (i = 0; i < 15; i++) {
34735cd48f1eSkevlo 		if (!NGBE_READ_REG_MASK(hw, NGBE_MIS_ST,
34745cd48f1eSkevlo 		    NGBE_MIS_ST_GPHY_IN_RST(hw->bus.lan_id)))
34755cd48f1eSkevlo 			break;
34765cd48f1eSkevlo 		msec_delay(1);
34775cd48f1eSkevlo 	}
34785cd48f1eSkevlo 	if (i == 15) {
34795cd48f1eSkevlo 		printf("%s: gphy reset exceeds maximum time\n", DEVNAME(sc));
34805cd48f1eSkevlo 		return ETIMEDOUT;
34815cd48f1eSkevlo 	}
34825cd48f1eSkevlo 
34835cd48f1eSkevlo 	ngbe_gphy_efuse_calibration(sc);
34845cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 20, 0xa46, 2);
34855cd48f1eSkevlo 	ngbe_gphy_wait_mdio_access_on(hw);
34865cd48f1eSkevlo 
34875cd48f1eSkevlo 	for (i = 0; i < 100; i++) {
34885cd48f1eSkevlo 		hw->phy.ops.read_reg(hw, 16, 0xa42, &value);
34895cd48f1eSkevlo 		if ((value & 0x7) == 3)
34905cd48f1eSkevlo 			break;
34915cd48f1eSkevlo 		DELAY(1000);
34925cd48f1eSkevlo 	}
34935cd48f1eSkevlo 	if (i == 100) {
34945cd48f1eSkevlo 		printf("%s: phy reset exceeds maximum time\n", DEVNAME(sc));
34955cd48f1eSkevlo 		return ETIMEDOUT;
34965cd48f1eSkevlo 	}
34975cd48f1eSkevlo 
34985cd48f1eSkevlo 	return 0;
34995cd48f1eSkevlo }
35005cd48f1eSkevlo 
35015cd48f1eSkevlo int
ngbe_phy_setup_link(struct ngbe_softc * sc,uint32_t speed,int need_restart)35025cd48f1eSkevlo ngbe_phy_setup_link(struct ngbe_softc *sc, uint32_t speed, int need_restart)
35035cd48f1eSkevlo {
35045cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
35055cd48f1eSkevlo 	uint16_t value = 0;
35065cd48f1eSkevlo 	int status;
35075cd48f1eSkevlo 
35085cd48f1eSkevlo 	if (!hw->mac.autoneg) {
35095cd48f1eSkevlo 		status = hw->phy.ops.reset(sc);
35105cd48f1eSkevlo 		if (status) {
35115cd48f1eSkevlo 			printf("%s: phy reset failed\n", DEVNAME(sc));
35125cd48f1eSkevlo 			return status;
35135cd48f1eSkevlo 		}
35145cd48f1eSkevlo 
35155cd48f1eSkevlo 		switch (speed) {
35165cd48f1eSkevlo 		case NGBE_LINK_SPEED_1GB_FULL:
35175cd48f1eSkevlo 			value = NGBE_MDI_PHY_SPEED_SELECT1;
35185cd48f1eSkevlo 			break;
35195cd48f1eSkevlo 		case NGBE_LINK_SPEED_100_FULL:
35205cd48f1eSkevlo 	      		value = NGBE_MDI_PHY_SPEED_SELECT0;
35215cd48f1eSkevlo 			break;
35225cd48f1eSkevlo 		case NGBE_LINK_SPEED_10_FULL:
35235cd48f1eSkevlo 			value = 0;
35245cd48f1eSkevlo 			break;
35255cd48f1eSkevlo 		default:
35265cd48f1eSkevlo 			value = NGBE_MDI_PHY_SPEED_SELECT0 |
35275cd48f1eSkevlo 			    NGBE_MDI_PHY_SPEED_SELECT1;
35285cd48f1eSkevlo 			printf("%s: unknown speed = 0x%x\n",
35295cd48f1eSkevlo 			    DEVNAME(sc), speed);
35305cd48f1eSkevlo 			break;
35315cd48f1eSkevlo 		}
35325cd48f1eSkevlo 		/* duplex full */
35335cd48f1eSkevlo 		value |= NGBE_MDI_PHY_DUPLEX;
35345cd48f1eSkevlo 		hw->phy.ops.write_reg(hw, 0, 0, value);
35355cd48f1eSkevlo 
35365cd48f1eSkevlo 		goto skip_an;
35375cd48f1eSkevlo 	}
35385cd48f1eSkevlo 
35395cd48f1eSkevlo 	/* Disable 10/100M Half Duplex */
35405cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, 4, 0, &value);
35415cd48f1eSkevlo 	value &= 0xff5f;
35425cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 4, 0, value);
35435cd48f1eSkevlo 
35445cd48f1eSkevlo 	/* Set advertise enable according to input speed */
35455cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, 9, 0, &value);
35465cd48f1eSkevlo 	if (!(speed & NGBE_LINK_SPEED_1GB_FULL))
35475cd48f1eSkevlo 		value &= 0xfdff;
35485cd48f1eSkevlo 	else
35495cd48f1eSkevlo 		value |= 0x200;
35505cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 9, 0, value);
35515cd48f1eSkevlo 
35525cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, 4, 0, &value);
35535cd48f1eSkevlo 	if (!(speed & NGBE_LINK_SPEED_100_FULL))
35545cd48f1eSkevlo 		value &= 0xfeff;
35555cd48f1eSkevlo 	else
35565cd48f1eSkevlo 		value |= 0x100;
35575cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 4, 0, value);
35585cd48f1eSkevlo 
35595cd48f1eSkevlo 	hw->phy.ops.read_reg(hw, 4, 0, &value);
35605cd48f1eSkevlo 	if (!(speed & NGBE_LINK_SPEED_10_FULL))
35615cd48f1eSkevlo 		value &= 0xffbf;
35625cd48f1eSkevlo 	else
35635cd48f1eSkevlo 		value |= 0x40;
35645cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 4, 0, value);
35655cd48f1eSkevlo 
35665cd48f1eSkevlo 	/* Restart AN and wait AN done interrupt */
35675cd48f1eSkevlo 	value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE;
35685cd48f1eSkevlo 	hw->phy.ops.write_reg(hw, 0, 0, value);
35695cd48f1eSkevlo 
35705cd48f1eSkevlo skip_an:
35715cd48f1eSkevlo 	hw->phy.ops.phy_led_ctrl(sc);
35725cd48f1eSkevlo 	hw->phy.ops.check_event(sc);
35735cd48f1eSkevlo 
35745cd48f1eSkevlo 	return 0;
35755cd48f1eSkevlo }
35765cd48f1eSkevlo 
35775cd48f1eSkevlo uint16_t
ngbe_read_pci_cfg_word(struct ngbe_softc * sc,uint32_t reg)35785cd48f1eSkevlo ngbe_read_pci_cfg_word(struct ngbe_softc *sc, uint32_t reg)
35795cd48f1eSkevlo {
35805cd48f1eSkevlo 	struct ngbe_osdep *os = &sc->osdep;
35815cd48f1eSkevlo 	struct pci_attach_args *pa = &os->os_pa;
35825cd48f1eSkevlo 	uint32_t value;
35835cd48f1eSkevlo 	int high = 0;
35845cd48f1eSkevlo 
35855cd48f1eSkevlo 	if (reg & 0x2) {
35865cd48f1eSkevlo 		high = 1;
35875cd48f1eSkevlo 		reg &= ~0x2;
35885cd48f1eSkevlo 	}
35895cd48f1eSkevlo 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
35905cd48f1eSkevlo 
35915cd48f1eSkevlo 	if (high)
35925cd48f1eSkevlo 		value >>= 16;
35935cd48f1eSkevlo 
35945cd48f1eSkevlo 	return (value & 0xffff);
35955cd48f1eSkevlo }
35965cd48f1eSkevlo 
35975cd48f1eSkevlo void
ngbe_release_eeprom_semaphore(struct ngbe_hw * hw)35985cd48f1eSkevlo ngbe_release_eeprom_semaphore(struct ngbe_hw *hw)
35995cd48f1eSkevlo {
36005cd48f1eSkevlo 	if (ngbe_check_mng_access(hw)) {
36015cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MIS_SWSM, NGBE_MIS_SWSM_SMBI, 0);
36025cd48f1eSkevlo 		NGBE_WRITE_FLUSH(hw);
36035cd48f1eSkevlo 	}
36045cd48f1eSkevlo }
36055cd48f1eSkevlo 
36065cd48f1eSkevlo int
ngbe_acquire_swfw_sync(struct ngbe_softc * sc,uint32_t mask)36075cd48f1eSkevlo ngbe_acquire_swfw_sync(struct ngbe_softc *sc, uint32_t mask)
36085cd48f1eSkevlo {
36095cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
36105cd48f1eSkevlo 	uint32_t gssr = 0;
36115cd48f1eSkevlo 	uint32_t swmask = mask;
36125cd48f1eSkevlo 	uint32_t fwmask = mask << 16;
36135cd48f1eSkevlo 	int i, timeout = 200;
36145cd48f1eSkevlo 
36155cd48f1eSkevlo 	for (i = 0; i < timeout; i++) {
36165cd48f1eSkevlo 		/*
36175cd48f1eSkevlo 		 * SW NVM semaphore bit is used for access to all
36185cd48f1eSkevlo 		 * SW_FW_SYNC bits (not just NVM)
36195cd48f1eSkevlo 		 */
36205cd48f1eSkevlo 		if (ngbe_get_eeprom_semaphore(sc))
36215cd48f1eSkevlo 			return 1;
36225cd48f1eSkevlo 		if (ngbe_check_mng_access(hw)) {
36235cd48f1eSkevlo 			gssr = NGBE_READ_REG(hw, NGBE_MNG_SWFW_SYNC);
36245cd48f1eSkevlo 			if (!(gssr & (fwmask | swmask))) {
36255cd48f1eSkevlo 				gssr |= swmask;
36265cd48f1eSkevlo 				NGBE_WRITE_REG(hw, NGBE_MNG_SWFW_SYNC, gssr);
36275cd48f1eSkevlo 				ngbe_release_eeprom_semaphore(hw);
36285cd48f1eSkevlo 				return 0;
36295cd48f1eSkevlo 			} else {
36305cd48f1eSkevlo 				/* Resource is currently in use by FW or SW */
36315cd48f1eSkevlo 				ngbe_release_eeprom_semaphore(hw);
36325cd48f1eSkevlo 				msec_delay(5);
36335cd48f1eSkevlo 			}
36345cd48f1eSkevlo 		}
36355cd48f1eSkevlo 	}
36365cd48f1eSkevlo 
36375cd48f1eSkevlo 	printf("%s: semaphore failed\n", DEVNAME(sc));
36385cd48f1eSkevlo 
36395cd48f1eSkevlo 	/* If time expired clear the bits holding the lock and retry */
36405cd48f1eSkevlo 	if (gssr & (fwmask | swmask))
36415cd48f1eSkevlo 		ngbe_release_swfw_sync(sc, gssr & (fwmask | swmask));
36425cd48f1eSkevlo 
36435cd48f1eSkevlo 	msec_delay(5);
36445cd48f1eSkevlo 	return 1;
36455cd48f1eSkevlo }
36465cd48f1eSkevlo 
36475cd48f1eSkevlo void
ngbe_release_swfw_sync(struct ngbe_softc * sc,uint32_t mask)36485cd48f1eSkevlo ngbe_release_swfw_sync(struct ngbe_softc *sc, uint32_t mask)
36495cd48f1eSkevlo {
36505cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
36515cd48f1eSkevlo 
36525cd48f1eSkevlo 	ngbe_get_eeprom_semaphore(sc);
36535cd48f1eSkevlo 	if (ngbe_check_mng_access(hw))
36545cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_MNG_SWFW_SYNC, mask, 0);
36555cd48f1eSkevlo 
36565cd48f1eSkevlo 	ngbe_release_eeprom_semaphore(hw);
36575cd48f1eSkevlo }
36585cd48f1eSkevlo 
36595cd48f1eSkevlo void
ngbe_reset(struct ngbe_softc * sc)36605cd48f1eSkevlo ngbe_reset(struct ngbe_softc *sc)
36615cd48f1eSkevlo {
36625cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
36635cd48f1eSkevlo 	int error;
36645cd48f1eSkevlo 
36655cd48f1eSkevlo 	error = hw->mac.ops.init_hw(sc);
36665cd48f1eSkevlo 	switch (error) {
36675cd48f1eSkevlo 	case 0:
36685cd48f1eSkevlo 		break;
36695cd48f1eSkevlo 	default:
36705cd48f1eSkevlo 		printf("%s: hardware error\n", DEVNAME(sc));
36715cd48f1eSkevlo 		break;
36725cd48f1eSkevlo 	}
36735cd48f1eSkevlo }
36745cd48f1eSkevlo 
36755cd48f1eSkevlo int
ngbe_reset_hw(struct ngbe_softc * sc)36765cd48f1eSkevlo ngbe_reset_hw(struct ngbe_softc *sc)
36775cd48f1eSkevlo {
36785cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
36795cd48f1eSkevlo 	struct ngbe_mac_info *mac = &hw->mac;
36805cd48f1eSkevlo 	uint32_t i, reset_status, rst_delay;
36815cd48f1eSkevlo 	uint32_t reset = 0;
36825cd48f1eSkevlo 	int status = 0;
36835cd48f1eSkevlo 
36845cd48f1eSkevlo 	status = hw->mac.ops.stop_adapter(sc);
36855cd48f1eSkevlo 	if (status)
36865cd48f1eSkevlo 		goto reset_hw_out;
36875cd48f1eSkevlo 
36885cd48f1eSkevlo 	/* Identify PHY and related function pointers */
36895cd48f1eSkevlo 	if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA)) {
36905cd48f1eSkevlo 		status = hw->phy.ops.init(sc);
36915cd48f1eSkevlo 		if (status)
36925cd48f1eSkevlo 			goto reset_hw_out;
36935cd48f1eSkevlo 	}
36945cd48f1eSkevlo 
36955cd48f1eSkevlo 	if (ngbe_get_media_type(hw) == ngbe_media_type_copper) {
36965cd48f1eSkevlo 		mac->ops.setup_link = ngbe_setup_copper_link;
36975cd48f1eSkevlo 		mac->ops.get_link_capabilities =
36985cd48f1eSkevlo 		    ngbe_get_copper_link_capabilities;
36995cd48f1eSkevlo 	}
37005cd48f1eSkevlo 
37015cd48f1eSkevlo 	/*
37025cd48f1eSkevlo 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
37035cd48f1eSkevlo 	 * If link reset is used when link is up, it might reset the PHY when
37045cd48f1eSkevlo 	 * mng is using it.  If link is down or the flag to force full link
37055cd48f1eSkevlo 	 * reset is set, then perform link reset.
37065cd48f1eSkevlo 	 */
37075cd48f1eSkevlo 	if (hw->force_full_reset) {
37085cd48f1eSkevlo 	 	rst_delay = (NGBE_READ_REG(hw, NGBE_MIS_RST_ST) &
37095cd48f1eSkevlo 		    NGBE_MIS_RST_ST_RST_INIT) >> NGBE_MIS_RST_ST_RST_INI_SHIFT;
37105cd48f1eSkevlo 		if (hw->reset_type == NGBE_SW_RESET) {
37115cd48f1eSkevlo 			for (i = 0; i < rst_delay + 20; i++) {
37125cd48f1eSkevlo 				reset_status =
37135cd48f1eSkevlo 				    NGBE_READ_REG(hw, NGBE_MIS_RST_ST);
37145cd48f1eSkevlo 				if (!(reset_status &
37155cd48f1eSkevlo 				    NGBE_MIS_RST_ST_DEV_RST_ST_MASK))
37165cd48f1eSkevlo 					break;
37175cd48f1eSkevlo 				msec_delay(100);
37185cd48f1eSkevlo 			}
37195cd48f1eSkevlo 
37205cd48f1eSkevlo 			if (reset_status & NGBE_MIS_RST_ST_DEV_RST_ST_MASK) {
37215cd48f1eSkevlo 				status = ETIMEDOUT;
37225cd48f1eSkevlo 				printf("%s: software reset polling failed to "
37235cd48f1eSkevlo 				    "complete\n", DEVNAME(sc));
37245cd48f1eSkevlo 				goto reset_hw_out;
37255cd48f1eSkevlo 			}
37265cd48f1eSkevlo 			status = ngbe_check_flash_load(sc,
37275cd48f1eSkevlo 			    NGBE_SPI_ILDR_STATUS_SW_RESET);
37285cd48f1eSkevlo 			if (status)
37295cd48f1eSkevlo 				goto reset_hw_out;
37305cd48f1eSkevlo 		} else if (hw->reset_type == NGBE_GLOBAL_RESET) {
37315cd48f1eSkevlo 			msec_delay(100 * rst_delay + 2000);
37325cd48f1eSkevlo 		}
37335cd48f1eSkevlo 	} else {
37345cd48f1eSkevlo 		if (hw->bus.lan_id == 0)
37355cd48f1eSkevlo 			reset = NGBE_MIS_RST_LAN0_RST;
37365cd48f1eSkevlo 		else if (hw->bus.lan_id == 1)
37375cd48f1eSkevlo 			reset = NGBE_MIS_RST_LAN1_RST;
37385cd48f1eSkevlo 		else if (hw->bus.lan_id == 2)
37395cd48f1eSkevlo 			reset = NGBE_MIS_RST_LAN2_RST;
37405cd48f1eSkevlo 		else if (hw->bus.lan_id == 3)
37415cd48f1eSkevlo 			reset = NGBE_MIS_RST_LAN3_RST;
37425cd48f1eSkevlo 
37435cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_MIS_RST,
37445cd48f1eSkevlo 		    reset | NGBE_READ_REG(hw, NGBE_MIS_RST));
37455cd48f1eSkevlo 		NGBE_WRITE_FLUSH(hw);
37465cd48f1eSkevlo 		msec_delay(15);
37475cd48f1eSkevlo 	}
37485cd48f1eSkevlo 
37495cd48f1eSkevlo 	ngbe_reset_misc(hw);
37505cd48f1eSkevlo 
37515cd48f1eSkevlo 	/* Store the permanent mac address */
37525cd48f1eSkevlo 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
37535cd48f1eSkevlo 
37545cd48f1eSkevlo 	/*
37555cd48f1eSkevlo 	 * Store MAC address from RAR0, clear receive address registers, and
37565cd48f1eSkevlo 	 * clear the multicast table.  Also reset num_rar_entries to 32,
37575cd48f1eSkevlo 	 * since we modify this value when programming the SAN MAC address.
37585cd48f1eSkevlo 	 */
37595cd48f1eSkevlo 	hw->mac.num_rar_entries = NGBE_SP_RAR_ENTRIES;
37605cd48f1eSkevlo 	hw->mac.ops.init_rx_addrs(sc);
37615cd48f1eSkevlo 
37625cd48f1eSkevlo reset_hw_out:
37635cd48f1eSkevlo 	return status;
37645cd48f1eSkevlo }
37655cd48f1eSkevlo 
37665cd48f1eSkevlo void
ngbe_reset_misc(struct ngbe_hw * hw)37675cd48f1eSkevlo ngbe_reset_misc(struct ngbe_hw *hw)
37685cd48f1eSkevlo {
37695cd48f1eSkevlo 	int i;
37705cd48f1eSkevlo 
37715cd48f1eSkevlo 	/* Receive packets of size > 2048 */
37725cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_CFG, NGBE_MAC_RX_CFG_JE,
37735cd48f1eSkevlo 	    NGBE_MAC_RX_CFG_JE);
37745cd48f1eSkevlo 
37755cd48f1eSkevlo 	/* Clear counters on read */
37765cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_RSTONRD,
37775cd48f1eSkevlo 	    NGBE_MMC_CONTROL_RSTONRD);
37785cd48f1eSkevlo 
37795cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_MAC_RX_FLOW_CTRL,
37805cd48f1eSkevlo 	    NGBE_MAC_RX_FLOW_CTRL_RFE, NGBE_MAC_RX_FLOW_CTRL_RFE);
37815cd48f1eSkevlo 
37825cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR);
37835cd48f1eSkevlo 
37845cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_MIS_RST_ST, NGBE_MIS_RST_ST_RST_INIT,
37855cd48f1eSkevlo 	    0x1e00);
37865cd48f1eSkevlo 
37875cd48f1eSkevlo 	/* errata 4: initialize mng flex tbl and wakeup flex tbl */
37885cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_SEL, 0);
37895cd48f1eSkevlo 	for (i = 0; i < 16; i++) {
37905cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_DW_L(i), 0);
37915cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_DW_H(i), 0);
37925cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_MNG_FLEX_MSK(i), 0);
37935cd48f1eSkevlo 	}
37945cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_SEL, 0);
37955cd48f1eSkevlo 	for (i = 0; i < 16; i++) {
37965cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_DW_L(i), 0);
37975cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_DW_H(i), 0);
37985cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_LAN_FLEX_MSK(i), 0);
37995cd48f1eSkevlo 	}
38005cd48f1eSkevlo 
38015cd48f1eSkevlo 	/* Set pause frame dst mac addr */
38025cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_RDB_PFCMACDAL, 0xc2000001);
38035cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_RDB_PFCMACDAH, 0x0180);
38045cd48f1eSkevlo 
38055cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_MDIO_CLAUSE_SELECT, 0xf);
38065cd48f1eSkevlo 
38075cd48f1eSkevlo 	ngbe_init_thermal_sensor_thresh(hw);
38085cd48f1eSkevlo }
38095cd48f1eSkevlo 
38105cd48f1eSkevlo int
ngbe_set_fw_drv_ver(struct ngbe_softc * sc,uint8_t maj,uint8_t min,uint8_t build,uint8_t sub)38115cd48f1eSkevlo ngbe_set_fw_drv_ver(struct ngbe_softc *sc, uint8_t maj, uint8_t min,
38125cd48f1eSkevlo     uint8_t build, uint8_t sub)
38135cd48f1eSkevlo {
38145cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
38155cd48f1eSkevlo 	struct ngbe_hic_drv_info fw_cmd;
38165cd48f1eSkevlo 	int i, error = 0;
38175cd48f1eSkevlo 
38185cd48f1eSkevlo 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
38195cd48f1eSkevlo 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
38205cd48f1eSkevlo 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
38215cd48f1eSkevlo 	fw_cmd.port_num = (uint8_t)hw->bus.lan_id;
38225cd48f1eSkevlo 	fw_cmd.ver_maj = maj;
38235cd48f1eSkevlo 	fw_cmd.ver_min = min;
38245cd48f1eSkevlo 	fw_cmd.ver_build = build;
38255cd48f1eSkevlo 	fw_cmd.ver_sub = sub;
38265cd48f1eSkevlo 	fw_cmd.hdr.checksum = 0;
38275cd48f1eSkevlo 	fw_cmd.hdr.checksum = ngbe_calculate_checksum((uint8_t *)&fw_cmd,
38285cd48f1eSkevlo 	    (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
38295cd48f1eSkevlo 	fw_cmd.pad = 0;
38305cd48f1eSkevlo 	fw_cmd.pad2 = 0;
38315cd48f1eSkevlo 
38325cd48f1eSkevlo 	DELAY(5000);
38335cd48f1eSkevlo 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
38345cd48f1eSkevlo 		error = ngbe_host_interface_command(sc, (uint32_t *)&fw_cmd,
38355cd48f1eSkevlo 		    sizeof(fw_cmd), NGBE_HI_COMMAND_TIMEOUT, 1);
38365cd48f1eSkevlo 		if (error)
38375cd48f1eSkevlo 			continue;
38385cd48f1eSkevlo 
38395cd48f1eSkevlo 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
38405cd48f1eSkevlo 		    FW_CEM_RESP_STATUS_SUCCESS)
38415cd48f1eSkevlo 			error = 0;
38425cd48f1eSkevlo 		else
38435cd48f1eSkevlo 			error = EINVAL;
38445cd48f1eSkevlo 		break;
38455cd48f1eSkevlo 	}
38465cd48f1eSkevlo 
38475cd48f1eSkevlo 	return error;
38485cd48f1eSkevlo }
38495cd48f1eSkevlo 
38505cd48f1eSkevlo void
ngbe_set_ivar(struct ngbe_softc * sc,uint16_t entry,uint16_t vector,int8_t type)38515cd48f1eSkevlo ngbe_set_ivar(struct ngbe_softc *sc, uint16_t entry, uint16_t vector, int8_t
38525cd48f1eSkevlo type)
38535cd48f1eSkevlo {
38545cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
38555cd48f1eSkevlo 	uint32_t ivar, index;
38565cd48f1eSkevlo 
38575cd48f1eSkevlo 	vector |= NGBE_PX_IVAR_ALLOC_VAL;
38585cd48f1eSkevlo 
38595cd48f1eSkevlo 	if (type == -1) {
38605cd48f1eSkevlo 		/* other causes */
38615cd48f1eSkevlo 		index = 0;
38625cd48f1eSkevlo 		ivar = NGBE_READ_REG(hw, NGBE_PX_MISC_IVAR);
38635cd48f1eSkevlo 		ivar &= ~((uint32_t)0xff << index);
38645cd48f1eSkevlo 		ivar |= ((uint32_t)vector << index);
38655cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_MISC_IVAR, ivar);
38665cd48f1eSkevlo 	} else {
38675cd48f1eSkevlo 		/* Tx or Rx causes */
38685cd48f1eSkevlo 		index = ((16 * (entry & 1)) + (8 * type));
38695cd48f1eSkevlo 		ivar = NGBE_READ_REG(hw, NGBE_PX_IVAR(entry >> 1));
38705cd48f1eSkevlo 		ivar &= ~((uint32_t)0xff << index);
38715cd48f1eSkevlo 		ivar |= ((uint32_t)vector << index);
38725cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PX_IVAR(entry >> 1), ivar);
38735cd48f1eSkevlo 	}
38745cd48f1eSkevlo }
38755cd48f1eSkevlo 
38765cd48f1eSkevlo void
ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw * hw)38775cd48f1eSkevlo ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw)
38785cd48f1eSkevlo {
38795cd48f1eSkevlo 	struct ngbe_bus_info *bus = &hw->bus;
38805cd48f1eSkevlo 	uint32_t reg = 0;
38815cd48f1eSkevlo 
38825cd48f1eSkevlo 	reg = NGBE_READ_REG(hw, NGBE_CFG_PORT_ST);
38835cd48f1eSkevlo 	bus->lan_id = NGBE_CFG_PORT_ST_LAN_ID(reg);
38845cd48f1eSkevlo }
38855cd48f1eSkevlo 
38865cd48f1eSkevlo void
ngbe_set_mta(struct ngbe_hw * hw,uint8_t * mc_addr)38875cd48f1eSkevlo ngbe_set_mta(struct ngbe_hw *hw, uint8_t *mc_addr)
38885cd48f1eSkevlo {
38895cd48f1eSkevlo 	uint32_t vector, vector_bit, vector_reg;
38905cd48f1eSkevlo 
38915cd48f1eSkevlo 	hw->addr_ctrl.mta_in_use++;
38925cd48f1eSkevlo 
38935cd48f1eSkevlo 	vector = ngbe_mta_vector(hw, mc_addr);
38945cd48f1eSkevlo 
38955cd48f1eSkevlo 	/*
38965cd48f1eSkevlo 	 * The MTA is a register array of 128 32-bit registers. It is treated
38975cd48f1eSkevlo 	 * like an array of 4096 bits.  We want to set bit
38985cd48f1eSkevlo 	 * BitArray[vector_value]. So we figure out what register the bit is
38995cd48f1eSkevlo 	 * in, read it, OR in the new bit, then write back the new value.  The
39005cd48f1eSkevlo 	 * register is determined by the upper 7 bits of the vector value and
39015cd48f1eSkevlo 	 * the bit within that register are determined by the lower 5 bits of
39025cd48f1eSkevlo 	 * the value.
39035cd48f1eSkevlo 	 */
39045cd48f1eSkevlo 	vector_reg = (vector >> 5) & 0x7f;
39055cd48f1eSkevlo 	vector_bit = vector & 0x1f;
39065cd48f1eSkevlo 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
39075cd48f1eSkevlo }
39085cd48f1eSkevlo 
39095cd48f1eSkevlo void
ngbe_set_pci_config_data(struct ngbe_hw * hw,uint16_t link_status)39105cd48f1eSkevlo ngbe_set_pci_config_data(struct ngbe_hw *hw, uint16_t link_status)
39115cd48f1eSkevlo {
39125cd48f1eSkevlo 	if (hw->bus.type == ngbe_bus_type_unknown)
39135cd48f1eSkevlo 		hw->bus.type = ngbe_bus_type_pci_express;
39145cd48f1eSkevlo 
39155cd48f1eSkevlo 	switch (link_status & NGBE_PCI_LINK_WIDTH) {
39165cd48f1eSkevlo 	case NGBE_PCI_LINK_WIDTH_1:
39175cd48f1eSkevlo 		hw->bus.width = ngbe_bus_width_pcie_x1;
39185cd48f1eSkevlo 		break;
39195cd48f1eSkevlo 	case NGBE_PCI_LINK_WIDTH_2:
39205cd48f1eSkevlo 		hw->bus.width = ngbe_bus_width_pcie_x2;
39215cd48f1eSkevlo 		break;
39225cd48f1eSkevlo 	case NGBE_PCI_LINK_WIDTH_4:
39235cd48f1eSkevlo 		hw->bus.width = ngbe_bus_width_pcie_x4;
39245cd48f1eSkevlo 		break;
39255cd48f1eSkevlo 	case NGBE_PCI_LINK_WIDTH_8:
39265cd48f1eSkevlo 		hw->bus.width = ngbe_bus_width_pcie_x8;
39275cd48f1eSkevlo 		break;
39285cd48f1eSkevlo 	default:
39295cd48f1eSkevlo 		hw->bus.width = ngbe_bus_width_unknown;
39305cd48f1eSkevlo 		break;
39315cd48f1eSkevlo 	}
39325cd48f1eSkevlo 
39335cd48f1eSkevlo 	switch (link_status & NGBE_PCI_LINK_SPEED) {
39345cd48f1eSkevlo 	case NGBE_PCI_LINK_SPEED_2500:
39355cd48f1eSkevlo 		hw->bus.speed = ngbe_bus_speed_2500;
39365cd48f1eSkevlo 		break;
39375cd48f1eSkevlo 	case NGBE_PCI_LINK_SPEED_5000:
39385cd48f1eSkevlo 		hw->bus.speed = ngbe_bus_speed_5000;
39395cd48f1eSkevlo 		break;
39405cd48f1eSkevlo 	case NGBE_PCI_LINK_SPEED_8000:
39415cd48f1eSkevlo 		hw->bus.speed = ngbe_bus_speed_8000;
39425cd48f1eSkevlo 		break;
39435cd48f1eSkevlo 	default:
39445cd48f1eSkevlo 		hw->bus.speed = ngbe_bus_speed_unknown;
39455cd48f1eSkevlo 		break;
39465cd48f1eSkevlo 	}
39475cd48f1eSkevlo }
39485cd48f1eSkevlo 
39495cd48f1eSkevlo int
ngbe_set_rar(struct ngbe_softc * sc,uint32_t index,uint8_t * addr,uint64_t pools,uint32_t enable_addr)39505cd48f1eSkevlo ngbe_set_rar(struct ngbe_softc *sc, uint32_t index, uint8_t *addr,
39515cd48f1eSkevlo     uint64_t pools, uint32_t enable_addr)
39525cd48f1eSkevlo {
39535cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
39545cd48f1eSkevlo 	uint32_t rar_entries = hw->mac.num_rar_entries;
39555cd48f1eSkevlo 	uint32_t rar_low, rar_high;
39565cd48f1eSkevlo 
39575cd48f1eSkevlo 	/* Make sure we are using a valid rar index range */
39585cd48f1eSkevlo 	if (index >= rar_entries) {
39595cd48f1eSkevlo 		printf("%s: RAR index %d is out of range\n",
39605cd48f1eSkevlo 		    DEVNAME(sc), index);
39615cd48f1eSkevlo 		return EINVAL;
39625cd48f1eSkevlo 	}
39635cd48f1eSkevlo 
39645cd48f1eSkevlo 	/* Select the MAC address */
39655cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_IDX, index);
39665cd48f1eSkevlo 
39675cd48f1eSkevlo 	/* Setup VMDq pool mapping */
39685cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_VM, pools & 0xffffffff);
39695cd48f1eSkevlo 
39705cd48f1eSkevlo 	/*
39715cd48f1eSkevlo 	 * HW expects these in little endian so we reverse the byte
39725cd48f1eSkevlo 	 * order from network order (big endian) to little endian
39735cd48f1eSkevlo 	 *
39745cd48f1eSkevlo 	 * Some parts put the VMDq setting in the extra RAH bits,
39755cd48f1eSkevlo 	 * so save everything except the lower 16 bits that hold part
39765cd48f1eSkevlo 	 * of the address and the address valid bit.
39775cd48f1eSkevlo 	 */
39785cd48f1eSkevlo 	rar_low = ((uint32_t)addr[5] | ((uint32_t)addr[4] << 8) |
39795cd48f1eSkevlo 	    ((uint32_t)addr[3] << 16) | ((uint32_t)addr[2] << 24));
39805cd48f1eSkevlo 	rar_high = ((uint32_t)addr[1] | ((uint32_t)addr[0] << 8));
39815cd48f1eSkevlo 	if (enable_addr != 0)
39825cd48f1eSkevlo 		rar_high |= NGBE_PSR_MAC_SWC_AD_H_AV;
39835cd48f1eSkevlo 
39845cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PSR_MAC_SWC_AD_L, rar_low);
39855cd48f1eSkevlo 	NGBE_WRITE_REG_MASK(hw, NGBE_PSR_MAC_SWC_AD_H,
39865cd48f1eSkevlo 	    (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
39875cd48f1eSkevlo 	    NGBE_PSR_MAC_SWC_AD_H_AV), rar_high);
39885cd48f1eSkevlo 
39895cd48f1eSkevlo 	return 0;
39905cd48f1eSkevlo }
39915cd48f1eSkevlo 
39925cd48f1eSkevlo void
ngbe_set_rx_drop_en(struct ngbe_softc * sc)39935cd48f1eSkevlo ngbe_set_rx_drop_en(struct ngbe_softc *sc)
39945cd48f1eSkevlo {
39955cd48f1eSkevlo 	uint32_t srrctl;
39965cd48f1eSkevlo 	int i;
39975cd48f1eSkevlo 
39985cd48f1eSkevlo 	if ((sc->sc_nqueues > 1) &&
39995cd48f1eSkevlo 	    !(sc->hw.fc.current_mode & ngbe_fc_tx_pause)) {
40005cd48f1eSkevlo 		for (i = 0; i < sc->sc_nqueues; i++) {
40015cd48f1eSkevlo 			srrctl = NGBE_READ_REG(&sc->hw, NGBE_PX_RR_CFG(i));
40025cd48f1eSkevlo 			srrctl |= NGBE_PX_RR_CFG_DROP_EN;
40035cd48f1eSkevlo 			NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_CFG(i), srrctl);
40045cd48f1eSkevlo 		}
40055cd48f1eSkevlo 
40065cd48f1eSkevlo 	} else {
40075cd48f1eSkevlo 		for (i = 0; i < sc->sc_nqueues; i++) {
40085cd48f1eSkevlo 			srrctl = NGBE_READ_REG(&sc->hw, NGBE_PX_RR_CFG(i));
40095cd48f1eSkevlo 			srrctl &= ~NGBE_PX_RR_CFG_DROP_EN;
40105cd48f1eSkevlo 			NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_CFG(i), srrctl);
40115cd48f1eSkevlo 		}
40125cd48f1eSkevlo 	}
40135cd48f1eSkevlo }
40145cd48f1eSkevlo 
40155cd48f1eSkevlo void
ngbe_set_rxpba(struct ngbe_hw * hw,int num_pb,uint32_t headroom,int strategy)40165cd48f1eSkevlo ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, uint32_t headroom, int strategy)
40175cd48f1eSkevlo {
40185cd48f1eSkevlo 	uint32_t pbsize = hw->mac.rx_pb_size;
40195cd48f1eSkevlo 	uint32_t txpktsize, txpbthresh, rxpktsize = 0;
40205cd48f1eSkevlo 
40215cd48f1eSkevlo 	/* Reserve headroom */
40225cd48f1eSkevlo 	pbsize -= headroom;
40235cd48f1eSkevlo 
40245cd48f1eSkevlo 	if (!num_pb)
40255cd48f1eSkevlo 		num_pb = 1;
40265cd48f1eSkevlo 
40275cd48f1eSkevlo 	/*
40285cd48f1eSkevlo 	 * Divide remaining packet buffer space amongst the number of packet
40295cd48f1eSkevlo 	 * buffers requested using supplied strategy.
40305cd48f1eSkevlo 	 */
40315cd48f1eSkevlo 	switch (strategy) {
40325cd48f1eSkevlo 	case PBA_STRATEGY_EQUAL:
40335cd48f1eSkevlo 		rxpktsize = (pbsize / num_pb) << NGBE_RDB_PB_SZ_SHIFT;
40345cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_RDB_PB_SZ, rxpktsize);
40355cd48f1eSkevlo 		break;
40365cd48f1eSkevlo 	default:
40375cd48f1eSkevlo 		break;
40385cd48f1eSkevlo 	}
40395cd48f1eSkevlo 
40405cd48f1eSkevlo 	/* Only support an equally distributed Tx packet buffer strategy. */
40415cd48f1eSkevlo 	txpktsize = NGBE_TDB_PB_SZ_MAX / num_pb;
40425cd48f1eSkevlo 	txpbthresh = (txpktsize / 1024) - NGBE_TXPKT_SIZE_MAX;
40435cd48f1eSkevlo 
40445cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_TDB_PB_SZ, txpktsize);
40455cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_TDM_PB_THRE, txpbthresh);
40465cd48f1eSkevlo }
40475cd48f1eSkevlo 
40485cd48f1eSkevlo int
ngbe_setup_copper_link(struct ngbe_softc * sc,uint32_t speed,int need_restart)40495cd48f1eSkevlo ngbe_setup_copper_link(struct ngbe_softc *sc, uint32_t speed, int need_restart)
40505cd48f1eSkevlo {
40515cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
40525cd48f1eSkevlo 	int status = 0;
40535cd48f1eSkevlo 
40545cd48f1eSkevlo 	/* Setup the PHY according to input speed */
40555cd48f1eSkevlo 	if (!((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA))
40565cd48f1eSkevlo 		status = hw->phy.ops.setup_link(sc, speed, need_restart);
40575cd48f1eSkevlo 
40585cd48f1eSkevlo 	return status;
40595cd48f1eSkevlo }
40605cd48f1eSkevlo 
40615cd48f1eSkevlo int
ngbe_setup_fc(struct ngbe_softc * sc)40625cd48f1eSkevlo ngbe_setup_fc(struct ngbe_softc *sc)
40635cd48f1eSkevlo {
40645cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
40655cd48f1eSkevlo 	uint16_t pcap_backplane = 0;
40665cd48f1eSkevlo 	int error = 0;
40675cd48f1eSkevlo 
40685cd48f1eSkevlo 	/* Validate the requested mode */
40695cd48f1eSkevlo 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ngbe_fc_rx_pause) {
40705cd48f1eSkevlo 		printf("%s: ngbe_fc_rx_pause not valid in strict IEEE mode\n",
40715cd48f1eSkevlo 		    DEVNAME(sc));
40725cd48f1eSkevlo 		error = EINVAL;
40735cd48f1eSkevlo 		goto out;
40745cd48f1eSkevlo 	}
40755cd48f1eSkevlo 
40765cd48f1eSkevlo 	/*
40775cd48f1eSkevlo 	 * Gig parts do not have a word in the EEPROM to determine the
40785cd48f1eSkevlo 	 * default flow control setting, so we explicitly set it to full.
40795cd48f1eSkevlo 	 */
40805cd48f1eSkevlo 	if (hw->fc.requested_mode == ngbe_fc_default)
40815cd48f1eSkevlo 		hw->fc.requested_mode = ngbe_fc_full;
40825cd48f1eSkevlo 
40835cd48f1eSkevlo 	/*
40845cd48f1eSkevlo 	 * The possible values of fc.requested_mode are:
40855cd48f1eSkevlo 	 * 0: Flow control is completely disabled
40865cd48f1eSkevlo 	 * 1: Rx flow control is enabled (we can receive pause frames,
40875cd48f1eSkevlo 	 *    but not send pause frames).
40885cd48f1eSkevlo 	 * 2: Tx flow control is enabled (we can send pause frames but
40895cd48f1eSkevlo 	 *    we do not support receiving pause frames).
40905cd48f1eSkevlo 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
40915cd48f1eSkevlo 	 * other: Invalid.
40925cd48f1eSkevlo 	 */
40935cd48f1eSkevlo 	switch (hw->fc.requested_mode) {
40945cd48f1eSkevlo 	case ngbe_fc_none:
40955cd48f1eSkevlo 		/* Flow control completely disabled by software override. */
40965cd48f1eSkevlo 		break;
40975cd48f1eSkevlo 	case ngbe_fc_tx_pause:
40985cd48f1eSkevlo 		/*
40995cd48f1eSkevlo 		 * Tx Flow control is enabled, and Rx Flow control is
41005cd48f1eSkevlo 		 * disabled by software override.
41015cd48f1eSkevlo 		 */
41025cd48f1eSkevlo 		pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM;
41035cd48f1eSkevlo 		break;
41045cd48f1eSkevlo 	case ngbe_fc_rx_pause:
41055cd48f1eSkevlo 		/*
41065cd48f1eSkevlo 		 * Rx Flow control is enabled and Tx Flow control is
41075cd48f1eSkevlo 		 * disabled by software override. Since there really
41085cd48f1eSkevlo 		 * isn't a way to advertise that we are capable of RX
41095cd48f1eSkevlo 		 * Pause ONLY, we will advertise that we support both
41105cd48f1eSkevlo 		 * symmetric and asymmetric Rx PAUSE, as such we fall
41115cd48f1eSkevlo 		 * through to the fc_full statement.  Later, we will
41125cd48f1eSkevlo 		 * disable the adapter's ability to send PAUSE frames.
41135cd48f1eSkevlo 		 */
41145cd48f1eSkevlo 	case ngbe_fc_full:
41155cd48f1eSkevlo 		/* Flow control (both Rx and Tx) is enabled by SW override. */
41165cd48f1eSkevlo 		pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM |
41175cd48f1eSkevlo 		    NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM;
41185cd48f1eSkevlo 		break;
41195cd48f1eSkevlo 	default:
41205cd48f1eSkevlo 		printf("%s: flow control param set incorrectly\n", DEVNAME(sc));
41215cd48f1eSkevlo 		error = EINVAL;
41225cd48f1eSkevlo 		goto out;
41235cd48f1eSkevlo 	}
41245cd48f1eSkevlo 
41255cd48f1eSkevlo 	/* AUTOC restart handles negotiation of 1G on backplane and copper. */
41265cd48f1eSkevlo 	if ((hw->phy.media_type == ngbe_media_type_copper) &&
41275cd48f1eSkevlo 	    !((hw->subsystem_device_id & OEM_MASK) == RGMII_FPGA))
41285cd48f1eSkevlo 		error = hw->phy.ops.set_adv_pause(hw, pcap_backplane);
41295cd48f1eSkevlo out:
41305cd48f1eSkevlo 	return error;
41315cd48f1eSkevlo }
41325cd48f1eSkevlo 
41335cd48f1eSkevlo void
ngbe_setup_gpie(struct ngbe_hw * hw)41345cd48f1eSkevlo ngbe_setup_gpie(struct ngbe_hw *hw)
41355cd48f1eSkevlo {
41365cd48f1eSkevlo 	uint32_t gpie;
41375cd48f1eSkevlo 
41385cd48f1eSkevlo 	gpie = NGBE_PX_GPIE_MODEL;
41395cd48f1eSkevlo 
41405cd48f1eSkevlo 	/*
41415cd48f1eSkevlo 	 * use EIAM to auto-mask when MSI-X interrupt is asserted
41425cd48f1eSkevlo 	 * this saves a register write for every interrupt.
41435cd48f1eSkevlo 	 */
41445cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PX_GPIE, gpie);
41455cd48f1eSkevlo }
41465cd48f1eSkevlo 
41475cd48f1eSkevlo void
ngbe_setup_isb(struct ngbe_softc * sc)41485cd48f1eSkevlo ngbe_setup_isb(struct ngbe_softc *sc)
41495cd48f1eSkevlo {
41505cd48f1eSkevlo 	uint64_t idba = sc->isbdma.dma_map->dm_segs[0].ds_addr;
41515cd48f1eSkevlo 
41525cd48f1eSkevlo 	/* Set ISB address */
41535cd48f1eSkevlo 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_ISB_ADDR_L,
41545cd48f1eSkevlo 	    (idba & 0x00000000ffffffffULL));
41555cd48f1eSkevlo 	NGBE_WRITE_REG(&sc->hw, NGBE_PX_ISB_ADDR_H, (idba >> 32));
41565cd48f1eSkevlo }
41575cd48f1eSkevlo 
41585cd48f1eSkevlo void
ngbe_setup_psrtype(struct ngbe_hw * hw)41595cd48f1eSkevlo ngbe_setup_psrtype(struct ngbe_hw *hw)
41605cd48f1eSkevlo {
41615cd48f1eSkevlo 	uint32_t psrtype;
41625cd48f1eSkevlo 
41635cd48f1eSkevlo 	/* PSRTYPE must be initialized in adapters */
41645cd48f1eSkevlo 	psrtype = NGBE_RDB_PL_CFG_L4HDR | NGBE_RDB_PL_CFG_L3HDR |
41655cd48f1eSkevlo 	    NGBE_RDB_PL_CFG_L2HDR | NGBE_RDB_PL_CFG_TUN_TUNHDR |
41665cd48f1eSkevlo 	    NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR;
41675cd48f1eSkevlo 
41685cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_RDB_PL_CFG(0), psrtype);
41695cd48f1eSkevlo }
41705cd48f1eSkevlo 
41715cd48f1eSkevlo void
ngbe_setup_vlan_hw_support(struct ngbe_softc * sc)41725cd48f1eSkevlo ngbe_setup_vlan_hw_support(struct ngbe_softc *sc)
41735cd48f1eSkevlo {
41745cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
41755cd48f1eSkevlo 	int i;
41765cd48f1eSkevlo 
41775cd48f1eSkevlo 	for (i = 0; i < sc->sc_nqueues; i++) {
41785cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
41795cd48f1eSkevlo 		    NGBE_PX_RR_CFG_VLAN, NGBE_PX_RR_CFG_VLAN);
41805cd48f1eSkevlo 	}
41815cd48f1eSkevlo }
41825cd48f1eSkevlo 
41835cd48f1eSkevlo int
ngbe_start_hw(struct ngbe_softc * sc)41845cd48f1eSkevlo ngbe_start_hw(struct ngbe_softc *sc)
41855cd48f1eSkevlo {
41865cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
41875cd48f1eSkevlo 	int error;
41885cd48f1eSkevlo 
41895cd48f1eSkevlo 	/* Set the media type */
41905cd48f1eSkevlo 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
41915cd48f1eSkevlo 
41925cd48f1eSkevlo 	/* Clear the VLAN filter table */
41935cd48f1eSkevlo 	hw->mac.ops.clear_vfta(hw);
41945cd48f1eSkevlo 
41955cd48f1eSkevlo 	/* Clear statistics registers */
41965cd48f1eSkevlo 	hw->mac.ops.clear_hw_cntrs(hw);
41975cd48f1eSkevlo 
41985cd48f1eSkevlo 	NGBE_WRITE_FLUSH(hw);
41995cd48f1eSkevlo 
42005cd48f1eSkevlo 	/* Setup flow control */
42015cd48f1eSkevlo 	error = hw->mac.ops.setup_fc(sc);
42025cd48f1eSkevlo 
42035cd48f1eSkevlo 	/* Clear adapter stopped flag */
42045cd48f1eSkevlo 	hw->adapter_stopped = 0;
42055cd48f1eSkevlo 
42065cd48f1eSkevlo 	/* We need to run link autotry after the driver loads */
42075cd48f1eSkevlo 	hw->mac.autotry_restart = 1;
42085cd48f1eSkevlo 
42095cd48f1eSkevlo 	return error;
42105cd48f1eSkevlo }
42115cd48f1eSkevlo 
42125cd48f1eSkevlo int
ngbe_stop_adapter(struct ngbe_softc * sc)42135cd48f1eSkevlo ngbe_stop_adapter(struct ngbe_softc *sc)
42145cd48f1eSkevlo {
42155cd48f1eSkevlo 	struct ngbe_hw *hw = &sc->hw;
42165cd48f1eSkevlo 	int i;
42175cd48f1eSkevlo 
42185cd48f1eSkevlo 	/*
42195cd48f1eSkevlo 	 * Set the adapter_stopped flag so other driver functions stop touching
42205cd48f1eSkevlo 	 * the hardware.
42215cd48f1eSkevlo 	 */
42225cd48f1eSkevlo 	hw->adapter_stopped = 1;
42235cd48f1eSkevlo 
42245cd48f1eSkevlo 	/* Disable the receive unit. */
42255cd48f1eSkevlo 	hw->mac.ops.disable_rx(hw);
42265cd48f1eSkevlo 
42275cd48f1eSkevlo 	/* Clear any pending interrupts, flush previous writes. */
42285cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_PX_MISC_IC, 0xffffffff);
42295cd48f1eSkevlo 
42305cd48f1eSkevlo 	NGBE_WRITE_REG(hw, NGBE_BME_CTL, 0x3);
42315cd48f1eSkevlo 
42325cd48f1eSkevlo 	/* Disable the transmit unit.  Each queue must be disabled. */
42335cd48f1eSkevlo 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
42345cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_TR_CFG(i),
42355cd48f1eSkevlo 		    NGBE_PX_TR_CFG_SWFLSH | NGBE_PX_TR_CFG_ENABLE,
42365cd48f1eSkevlo 		    NGBE_PX_TR_CFG_SWFLSH);
42375cd48f1eSkevlo 	}
42385cd48f1eSkevlo 
42395cd48f1eSkevlo 	/* Disable the receive unit by stopping each queue */
42405cd48f1eSkevlo 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
42415cd48f1eSkevlo 		NGBE_WRITE_REG_MASK(hw, NGBE_PX_RR_CFG(i),
42425cd48f1eSkevlo 		    NGBE_PX_RR_CFG_RR_EN, 0);
42435cd48f1eSkevlo 	}
42445cd48f1eSkevlo 
42455cd48f1eSkevlo 	/* Flush all queues disables. */
42465cd48f1eSkevlo 	NGBE_WRITE_FLUSH(hw);
42475cd48f1eSkevlo 	msec_delay(2);
42485cd48f1eSkevlo 
42495cd48f1eSkevlo 	return ngbe_disable_pcie_master(sc);
42505cd48f1eSkevlo }
42515cd48f1eSkevlo 
42525cd48f1eSkevlo void
ngbe_rx_checksum(uint32_t staterr,struct mbuf * m)42535cd48f1eSkevlo ngbe_rx_checksum(uint32_t staterr, struct mbuf *m)
42545cd48f1eSkevlo {
42555cd48f1eSkevlo 	if (staterr & NGBE_RXD_STAT_IPCS) {
42565cd48f1eSkevlo 		if (!(staterr & NGBE_RXD_ERR_IPE))
42575cd48f1eSkevlo 			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
42585cd48f1eSkevlo 		else
42595cd48f1eSkevlo 			m->m_pkthdr.csum_flags = 0;
42605cd48f1eSkevlo 	}
42615cd48f1eSkevlo 	if (staterr & NGBE_RXD_STAT_L4CS) {
42625cd48f1eSkevlo 		if (!(staterr & NGBE_RXD_ERR_TCPE))
42635cd48f1eSkevlo 			m->m_pkthdr.csum_flags |=
42645cd48f1eSkevlo 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
42655cd48f1eSkevlo 	}
42665cd48f1eSkevlo }
42675cd48f1eSkevlo 
42685cd48f1eSkevlo void
ngbe_rxeof(struct rx_ring * rxr)42695cd48f1eSkevlo ngbe_rxeof(struct rx_ring *rxr)
42705cd48f1eSkevlo {
42715cd48f1eSkevlo 	struct ngbe_softc *sc = rxr->sc;
42725cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
42735cd48f1eSkevlo 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
42745cd48f1eSkevlo 	struct mbuf *mp, *m;
42755cd48f1eSkevlo 	struct ngbe_rx_buf *rxbuf, *nxbuf;
42765cd48f1eSkevlo 	union ngbe_rx_desc *rxdesc;
42775cd48f1eSkevlo 	uint32_t staterr = 0;
42785cd48f1eSkevlo 	uint16_t len, vtag;
42795cd48f1eSkevlo 	uint8_t eop = 0;
42805cd48f1eSkevlo 	int i, nextp;
42815cd48f1eSkevlo 
42825cd48f1eSkevlo 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
42835cd48f1eSkevlo 		return;
42845cd48f1eSkevlo 
42855cd48f1eSkevlo 	i = rxr->next_to_check;
42865cd48f1eSkevlo 	while (if_rxr_inuse(&rxr->rx_ring) > 0) {
42875cd48f1eSkevlo 		uint32_t hash;
42885cd48f1eSkevlo 		uint16_t hashtype;
42895cd48f1eSkevlo 
42905cd48f1eSkevlo 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
42915cd48f1eSkevlo 		    i * sizeof(union ngbe_rx_desc), sizeof(union ngbe_rx_desc),
42925cd48f1eSkevlo 		    BUS_DMASYNC_POSTREAD);
42935cd48f1eSkevlo 
42945cd48f1eSkevlo 		rxdesc = &rxr->rx_base[i];
42955cd48f1eSkevlo 		staterr = letoh32(rxdesc->wb.upper.status_error);
42965cd48f1eSkevlo 		if (!ISSET(staterr, NGBE_RXD_STAT_DD)) {
42975cd48f1eSkevlo 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
42985cd48f1eSkevlo 			    i * sizeof(union ngbe_rx_desc),
42995cd48f1eSkevlo 			    sizeof(union ngbe_rx_desc), BUS_DMASYNC_PREREAD);
43005cd48f1eSkevlo 			break;
43015cd48f1eSkevlo 		}
43025cd48f1eSkevlo 
43035cd48f1eSkevlo 		/* Zero out the receive descriptors status. */
43045cd48f1eSkevlo 		rxdesc->wb.upper.status_error = 0;
43055cd48f1eSkevlo 		rxbuf = &rxr->rx_buffers[i];
43065cd48f1eSkevlo 
43075cd48f1eSkevlo 		/* Pull the mbuf off the ring. */
43085cd48f1eSkevlo 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
43095cd48f1eSkevlo 		    rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
43105cd48f1eSkevlo 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
43115cd48f1eSkevlo 
43125cd48f1eSkevlo 		mp = rxbuf->buf;
43135cd48f1eSkevlo 		len = letoh16(rxdesc->wb.upper.length);
43145cd48f1eSkevlo 		vtag = letoh16(rxdesc->wb.upper.vlan);
43155cd48f1eSkevlo 		eop = ((staterr & NGBE_RXD_STAT_EOP) != 0);
43165cd48f1eSkevlo 		hash = letoh32(rxdesc->wb.lower.hi_dword.rss);
43175cd48f1eSkevlo 		hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) &
43185cd48f1eSkevlo 		    NGBE_RXD_RSSTYPE_MASK;
43195cd48f1eSkevlo 
43205cd48f1eSkevlo 		if (staterr & NGBE_RXD_ERR_RXE) {
43215cd48f1eSkevlo 			if (rxbuf->fmp) {
43225cd48f1eSkevlo 				m_freem(rxbuf->fmp);
43235cd48f1eSkevlo 				rxbuf->fmp = NULL;
43245cd48f1eSkevlo 			}
43255cd48f1eSkevlo 
43265cd48f1eSkevlo 			m_freem(mp);
43275cd48f1eSkevlo 			rxbuf->buf = NULL;
43285cd48f1eSkevlo 			goto next_desc;
43295cd48f1eSkevlo 		}
43305cd48f1eSkevlo 
43315cd48f1eSkevlo 		if (mp == NULL) {
43325cd48f1eSkevlo 			panic("%s: ngbe_rxeof: NULL mbuf in slot %d "
43335cd48f1eSkevlo 			    "(nrx %d, filled %d)", DEVNAME(sc), i,
43345cd48f1eSkevlo 			    if_rxr_inuse(&rxr->rx_ring), rxr->last_desc_filled);
43355cd48f1eSkevlo 		}
43365cd48f1eSkevlo 
43375cd48f1eSkevlo 		if (!eop) {
43385cd48f1eSkevlo 			/*
43395cd48f1eSkevlo 			 * Figure out the next descriptor of this frame.
43405cd48f1eSkevlo 			 */
43415cd48f1eSkevlo 			nextp = i + 1;
43425cd48f1eSkevlo 			if (nextp == sc->num_rx_desc)
43435cd48f1eSkevlo 				nextp = 0;
43445cd48f1eSkevlo 			nxbuf = &rxr->rx_buffers[nextp];
43455cd48f1eSkevlo 			/* prefetch(nxbuf); */
43465cd48f1eSkevlo 		}
43475cd48f1eSkevlo 
43485cd48f1eSkevlo 		mp->m_len = len;
43495cd48f1eSkevlo 
43505cd48f1eSkevlo 		m = rxbuf->fmp;
43515cd48f1eSkevlo 		rxbuf->buf = rxbuf->fmp = NULL;
43525cd48f1eSkevlo 
43535cd48f1eSkevlo 		if (m != NULL)
43545cd48f1eSkevlo 			m->m_pkthdr.len += mp->m_len;
43555cd48f1eSkevlo 		else {
43565cd48f1eSkevlo 			m = mp;
43575cd48f1eSkevlo 			m->m_pkthdr.len = mp->m_len;
43585cd48f1eSkevlo #if NVLAN > 0
43595cd48f1eSkevlo 			if (staterr & NGBE_RXD_STAT_VP) {
43605cd48f1eSkevlo 				m->m_pkthdr.ether_vtag = vtag;
43615cd48f1eSkevlo 				m->m_flags |= M_VLANTAG;
43625cd48f1eSkevlo 			}
43635cd48f1eSkevlo #endif
43645cd48f1eSkevlo 		}
43655cd48f1eSkevlo 
43665cd48f1eSkevlo 		/* Pass the head pointer on */
43675cd48f1eSkevlo 		if (eop == 0) {
43685cd48f1eSkevlo 			nxbuf->fmp = m;
43695cd48f1eSkevlo 			m = NULL;
43705cd48f1eSkevlo 			mp->m_next = nxbuf->buf;
43715cd48f1eSkevlo 		} else {
43725cd48f1eSkevlo 			ngbe_rx_checksum(staterr, m);
43735cd48f1eSkevlo 
43745cd48f1eSkevlo 			if (hashtype != NGBE_RXD_RSSTYPE_NONE) {
43755cd48f1eSkevlo 				m->m_pkthdr.ph_flowid = hash;
43765cd48f1eSkevlo 				SET(m->m_pkthdr.csum_flags, M_FLOWID);
43775cd48f1eSkevlo 			}
43785cd48f1eSkevlo 
43795cd48f1eSkevlo 			ml_enqueue(&ml, m);
43805cd48f1eSkevlo 		}
43815cd48f1eSkevlo next_desc:
43825cd48f1eSkevlo 		if_rxr_put(&rxr->rx_ring, 1);
43835cd48f1eSkevlo 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
43845cd48f1eSkevlo 		    i * sizeof(union ngbe_rx_desc), sizeof(union ngbe_rx_desc),
43855cd48f1eSkevlo 		    BUS_DMASYNC_PREREAD);
43865cd48f1eSkevlo 
43875cd48f1eSkevlo 		/* Advance our pointers to the next descriptor. */
43885cd48f1eSkevlo 		if (++i == sc->num_rx_desc)
43895cd48f1eSkevlo 			i = 0;
43905cd48f1eSkevlo 	}
43915cd48f1eSkevlo 	rxr->next_to_check = i;
43925cd48f1eSkevlo 
43935cd48f1eSkevlo 	if (ifiq_input(rxr->ifiq, &ml))
43945cd48f1eSkevlo 		if_rxr_livelocked(&rxr->rx_ring);
43955cd48f1eSkevlo 
43965cd48f1eSkevlo 	if (!(staterr & NGBE_RXD_STAT_DD))
43975cd48f1eSkevlo 		return;
43985cd48f1eSkevlo }
43995cd48f1eSkevlo 
44005cd48f1eSkevlo void
ngbe_rxrefill(void * xrxr)44015cd48f1eSkevlo ngbe_rxrefill(void *xrxr)
44025cd48f1eSkevlo {
44035cd48f1eSkevlo 	struct rx_ring *rxr = xrxr;
44045cd48f1eSkevlo 	struct ngbe_softc *sc = rxr->sc;
44055cd48f1eSkevlo 
44065cd48f1eSkevlo 	if (ngbe_rxfill(rxr))
44075cd48f1eSkevlo 		NGBE_WRITE_REG(&sc->hw, NGBE_PX_RR_WP(rxr->me),
44085cd48f1eSkevlo 		    rxr->last_desc_filled);
44095cd48f1eSkevlo 	else if (if_rxr_inuse(&rxr->rx_ring) == 0)
44105cd48f1eSkevlo 		timeout_add(&rxr->rx_refill, 1);
44115cd48f1eSkevlo }
44125cd48f1eSkevlo 
44135cd48f1eSkevlo int
ngbe_tx_ctx_setup(struct tx_ring * txr,struct mbuf * m,uint32_t * cmd_type_len,uint32_t * olinfo_status)44145cd48f1eSkevlo ngbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *m, uint32_t *cmd_type_len,
44155cd48f1eSkevlo     uint32_t *olinfo_status)
44165cd48f1eSkevlo {
44175cd48f1eSkevlo 	struct ngbe_tx_context_desc *txd;
44185cd48f1eSkevlo 	struct ngbe_tx_buf *tx_buffer;
44195cd48f1eSkevlo 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
44205cd48f1eSkevlo 	int ctxd = txr->next_avail_desc;
44215cd48f1eSkevlo 	int offload = 0;
44225cd48f1eSkevlo 
44235cd48f1eSkevlo 	/* Indicate the whole packet as payload when not doing TSO */
44245cd48f1eSkevlo 	*olinfo_status |= m->m_pkthdr.len << NGBE_TXD_PAYLEN_SHIFT;
44255cd48f1eSkevlo 
44265cd48f1eSkevlo #if NVLAN > 0
44275cd48f1eSkevlo 	if (ISSET(m->m_flags, M_VLANTAG)) {
44285cd48f1eSkevlo 		uint32_t vtag = m->m_pkthdr.ether_vtag;
44295cd48f1eSkevlo 		vlan_macip_lens |= (vtag << NGBE_TXD_VLAN_SHIFT);
44305cd48f1eSkevlo 		*cmd_type_len |= NGBE_TXD_VLE;
44315cd48f1eSkevlo 		offload |= 1;
44325cd48f1eSkevlo 	}
44335cd48f1eSkevlo #endif
44345cd48f1eSkevlo 
44355cd48f1eSkevlo 	if (!offload)
44365cd48f1eSkevlo 		return 0;
44375cd48f1eSkevlo 
44385cd48f1eSkevlo 	txd = (struct ngbe_tx_context_desc *)&txr->tx_base[ctxd];
44395cd48f1eSkevlo 	tx_buffer = &txr->tx_buffers[ctxd];
44405cd48f1eSkevlo 
44415cd48f1eSkevlo 	type_tucmd_mlhl |= NGBE_TXD_DTYP_CTXT;
44425cd48f1eSkevlo 
44435cd48f1eSkevlo 	/* Now copy bits into descriptor */
44445cd48f1eSkevlo 	txd->vlan_macip_lens = htole32(vlan_macip_lens);
44455cd48f1eSkevlo 	txd->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
44465cd48f1eSkevlo 	txd->seqnum_seed = htole32(0);
44475cd48f1eSkevlo 	txd->mss_l4len_idx = htole32(0);
44485cd48f1eSkevlo 
44495cd48f1eSkevlo 	tx_buffer->m_head = NULL;
44505cd48f1eSkevlo 	tx_buffer->eop_index = -1;
44515cd48f1eSkevlo 
44525cd48f1eSkevlo 	return 1;
44535cd48f1eSkevlo }
44545cd48f1eSkevlo 
44555cd48f1eSkevlo void
ngbe_txeof(struct tx_ring * txr)44565cd48f1eSkevlo ngbe_txeof(struct tx_ring *txr)
44575cd48f1eSkevlo {
44585cd48f1eSkevlo 	struct ngbe_softc *sc = txr->sc;
44595cd48f1eSkevlo 	struct ifqueue *ifq = txr->ifq;
44605cd48f1eSkevlo 	struct ifnet *ifp = &sc->sc_ac.ac_if;
44615cd48f1eSkevlo 	struct ngbe_tx_buf *tx_buffer;
44625cd48f1eSkevlo 	union ngbe_tx_desc *tx_desc;
44635cd48f1eSkevlo 	unsigned int prod, cons, last;
44645cd48f1eSkevlo 
44655cd48f1eSkevlo 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
44665cd48f1eSkevlo 		return;
44675cd48f1eSkevlo 
44685cd48f1eSkevlo 	prod = txr->next_avail_desc;
44695cd48f1eSkevlo 	cons = txr->next_to_clean;
44705cd48f1eSkevlo 
44715cd48f1eSkevlo 	if (prod == cons)
44725cd48f1eSkevlo 		return;
44735cd48f1eSkevlo 
44745cd48f1eSkevlo 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
44755cd48f1eSkevlo 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
44765cd48f1eSkevlo 
44775cd48f1eSkevlo 	for (;;) {
44785cd48f1eSkevlo 		tx_buffer = &txr->tx_buffers[cons];
44795cd48f1eSkevlo 		last = tx_buffer->eop_index;
44805cd48f1eSkevlo 		tx_desc = (union ngbe_tx_desc *)&txr->tx_base[last];
44815cd48f1eSkevlo 
44825cd48f1eSkevlo 		if (!ISSET(tx_desc->wb.status, NGBE_TXD_STAT_DD))
44835cd48f1eSkevlo 			break;
44845cd48f1eSkevlo 
44855cd48f1eSkevlo 		bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
44865cd48f1eSkevlo 		    0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
44875cd48f1eSkevlo 		bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
44885cd48f1eSkevlo 		m_freem(tx_buffer->m_head);
44895cd48f1eSkevlo 
44905cd48f1eSkevlo 		tx_buffer->m_head = NULL;
44915cd48f1eSkevlo 		tx_buffer->eop_index = -1;
44925cd48f1eSkevlo 
44935cd48f1eSkevlo 		cons = last + 1;
44945cd48f1eSkevlo 		if (cons == sc->num_tx_desc)
44955cd48f1eSkevlo 			cons = 0;
44965cd48f1eSkevlo 		if (prod == cons) {
44975cd48f1eSkevlo 			/* All clean, turn off the timer */
44985cd48f1eSkevlo 			ifp->if_timer = 0;
44995cd48f1eSkevlo 			break;
45005cd48f1eSkevlo 		}
45015cd48f1eSkevlo 	}
45025cd48f1eSkevlo 
45035cd48f1eSkevlo 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
45045cd48f1eSkevlo 	    0, txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
45055cd48f1eSkevlo 
45065cd48f1eSkevlo 	txr->next_to_clean = cons;
45075cd48f1eSkevlo 
45085cd48f1eSkevlo 	if (ifq_is_oactive(ifq))
45095cd48f1eSkevlo 		ifq_restart(ifq);
45105cd48f1eSkevlo }
45115cd48f1eSkevlo 
45125cd48f1eSkevlo void
ngbe_update_mc_addr_list(struct ngbe_hw * hw,uint8_t * mc_addr_list,uint32_t mc_addr_count,ngbe_mc_addr_itr next,int clear)45135cd48f1eSkevlo ngbe_update_mc_addr_list(struct ngbe_hw *hw, uint8_t *mc_addr_list,
45145cd48f1eSkevlo     uint32_t mc_addr_count, ngbe_mc_addr_itr next, int clear)
45155cd48f1eSkevlo {
45165cd48f1eSkevlo 	uint32_t i, psrctl, vmdq;
45175cd48f1eSkevlo 
45185cd48f1eSkevlo 	/*
45195cd48f1eSkevlo 	 * Set the new number of MC addresses that we are being requested to
45205cd48f1eSkevlo 	 * use.
45215cd48f1eSkevlo 	 */
45225cd48f1eSkevlo 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
45235cd48f1eSkevlo 	hw->addr_ctrl.mta_in_use = 0;
45245cd48f1eSkevlo 
45255cd48f1eSkevlo 	/* Clear mta_shadow */
45265cd48f1eSkevlo 	if (clear)
45275cd48f1eSkevlo 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
45285cd48f1eSkevlo 
45295cd48f1eSkevlo 	/* Update mta_shadow */
45305cd48f1eSkevlo 	for (i = 0; i < mc_addr_count; i++)
45315cd48f1eSkevlo 		ngbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
45325cd48f1eSkevlo 
45335cd48f1eSkevlo 	/* Enable mta */
45345cd48f1eSkevlo 	for (i = 0; i < hw->mac.mcft_size; i++)
45355cd48f1eSkevlo 		NGBE_WRITE_REG_ARRAY(hw, NGBE_PSR_MC_TBL(0), i,
45365cd48f1eSkevlo 		    hw->mac.mta_shadow[i]);
45375cd48f1eSkevlo 
45385cd48f1eSkevlo 	if (hw->addr_ctrl.mta_in_use > 0) {
45395cd48f1eSkevlo 		psrctl = NGBE_READ_REG(hw, NGBE_PSR_CTL);
45405cd48f1eSkevlo 		psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE);
45415cd48f1eSkevlo 		psrctl |= NGBE_PSR_CTL_MFE |
45425cd48f1eSkevlo 		    (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT);
45435cd48f1eSkevlo 		NGBE_WRITE_REG(hw, NGBE_PSR_CTL, psrctl);
45445cd48f1eSkevlo 	}
45455cd48f1eSkevlo }
45465cd48f1eSkevlo 
45475cd48f1eSkevlo int
ngbe_validate_mac_addr(uint8_t * mac_addr)45485cd48f1eSkevlo ngbe_validate_mac_addr(uint8_t *mac_addr)
45495cd48f1eSkevlo {
45505cd48f1eSkevlo 	uint32_t status = 0;
45515cd48f1eSkevlo 
45525cd48f1eSkevlo 	/* Make sure it is not a multicast address */
45535cd48f1eSkevlo 	if (NGBE_IS_MULTICAST(mac_addr))
45545cd48f1eSkevlo 		status = EINVAL;
45555cd48f1eSkevlo 	/* Not a broadcast address */
45565cd48f1eSkevlo 	else if (NGBE_IS_BROADCAST(mac_addr))
45575cd48f1eSkevlo 		status = EINVAL;
45585cd48f1eSkevlo 	/* Reject the zero address */
45595cd48f1eSkevlo 	else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
45605cd48f1eSkevlo 	    mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
45615cd48f1eSkevlo 		status = EINVAL;
45625cd48f1eSkevlo 
45635cd48f1eSkevlo 	return status;
45645cd48f1eSkevlo }
4565