1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "if_em.h"
30 #include <sys/sbuf.h>
31 #include <machine/_inttypes.h>
32
33 #define em_mac_min e1000_82571
34 #define igb_mac_min e1000_82575
35
36 /*********************************************************************
37 * Driver version:
38 *********************************************************************/
39 static const char em_driver_version[] = "7.7.8-fbsd";
40 static const char igb_driver_version[] = "2.5.19-fbsd";
41
42 /*********************************************************************
43 * PCI Device ID Table
44 *
45 * Used by probe to select devices to load on
46 * Last field stores an index into e1000_strings
47 * Last entry must be all 0s
48 *
49 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
50 *********************************************************************/
51
52 static const pci_vendor_info_t em_vendor_info_array[] =
53 {
54 /* Intel(R) - lem-class legacy devices */
55 PVID(0x8086, E1000_DEV_ID_82540EM, "Intel(R) Legacy PRO/1000 MT 82540EM"),
56 PVID(0x8086, E1000_DEV_ID_82540EM_LOM, "Intel(R) Legacy PRO/1000 MT 82540EM (LOM)"),
57 PVID(0x8086, E1000_DEV_ID_82540EP, "Intel(R) Legacy PRO/1000 MT 82540EP"),
58 PVID(0x8086, E1000_DEV_ID_82540EP_LOM, "Intel(R) Legacy PRO/1000 MT 82540EP (LOM)"),
59 PVID(0x8086, E1000_DEV_ID_82540EP_LP, "Intel(R) Legacy PRO/1000 MT 82540EP (Mobile)"),
60
61 PVID(0x8086, E1000_DEV_ID_82541EI, "Intel(R) Legacy PRO/1000 MT 82541EI (Copper)"),
62 PVID(0x8086, E1000_DEV_ID_82541ER, "Intel(R) Legacy PRO/1000 82541ER"),
63 PVID(0x8086, E1000_DEV_ID_82541ER_LOM, "Intel(R) Legacy PRO/1000 MT 82541ER"),
64 PVID(0x8086, E1000_DEV_ID_82541EI_MOBILE, "Intel(R) Legacy PRO/1000 MT 82541EI (Mobile)"),
65 PVID(0x8086, E1000_DEV_ID_82541GI, "Intel(R) Legacy PRO/1000 MT 82541GI"),
66 PVID(0x8086, E1000_DEV_ID_82541GI_LF, "Intel(R) Legacy PRO/1000 GT 82541PI"),
67 PVID(0x8086, E1000_DEV_ID_82541GI_MOBILE, "Intel(R) Legacy PRO/1000 MT 82541GI (Mobile)"),
68
69 PVID(0x8086, E1000_DEV_ID_82542, "Intel(R) Legacy PRO/1000 82542 (Fiber)"),
70
71 PVID(0x8086, E1000_DEV_ID_82543GC_FIBER, "Intel(R) Legacy PRO/1000 F 82543GC (Fiber)"),
72 PVID(0x8086, E1000_DEV_ID_82543GC_COPPER, "Intel(R) Legacy PRO/1000 T 82543GC (Copper)"),
73
74 PVID(0x8086, E1000_DEV_ID_82544EI_COPPER, "Intel(R) Legacy PRO/1000 XT 82544EI (Copper)"),
75 PVID(0x8086, E1000_DEV_ID_82544EI_FIBER, "Intel(R) Legacy PRO/1000 XF 82544EI (Fiber)"),
76 PVID(0x8086, E1000_DEV_ID_82544GC_COPPER, "Intel(R) Legacy PRO/1000 T 82544GC (Copper)"),
77 PVID(0x8086, E1000_DEV_ID_82544GC_LOM, "Intel(R) Legacy PRO/1000 XT 82544GC (LOM)"),
78
79 PVID(0x8086, E1000_DEV_ID_82545EM_COPPER, "Intel(R) Legacy PRO/1000 MT 82545EM (Copper)"),
80 PVID(0x8086, E1000_DEV_ID_82545EM_FIBER, "Intel(R) Legacy PRO/1000 MF 82545EM (Fiber)"),
81 PVID(0x8086, E1000_DEV_ID_82545GM_COPPER, "Intel(R) Legacy PRO/1000 MT 82545GM (Copper)"),
82 PVID(0x8086, E1000_DEV_ID_82545GM_FIBER, "Intel(R) Legacy PRO/1000 MF 82545GM (Fiber)"),
83 PVID(0x8086, E1000_DEV_ID_82545GM_SERDES, "Intel(R) Legacy PRO/1000 MB 82545GM (SERDES)"),
84
85 PVID(0x8086, E1000_DEV_ID_82546EB_COPPER, "Intel(R) Legacy PRO/1000 MT 82546EB (Copper)"),
86 PVID(0x8086, E1000_DEV_ID_82546EB_FIBER, "Intel(R) Legacy PRO/1000 MF 82546EB (Fiber)"),
87 PVID(0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, "Intel(R) Legacy PRO/1000 MT 82546EB (Quad Copper"),
88 PVID(0x8086, E1000_DEV_ID_82546GB_COPPER, "Intel(R) Legacy PRO/1000 MT 82546GB (Copper)"),
89 PVID(0x8086, E1000_DEV_ID_82546GB_FIBER, "Intel(R) Legacy PRO/1000 MF 82546GB (Fiber)"),
90 PVID(0x8086, E1000_DEV_ID_82546GB_SERDES, "Intel(R) Legacy PRO/1000 MB 82546GB (SERDES)"),
91 PVID(0x8086, E1000_DEV_ID_82546GB_PCIE, "Intel(R) Legacy PRO/1000 P 82546GB (PCIe)"),
92 PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, "Intel(R) Legacy PRO/1000 GT 82546GB (Quad Copper)"),
93 PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, "Intel(R) Legacy PRO/1000 GT 82546GB (Quad Copper)"),
94
95 PVID(0x8086, E1000_DEV_ID_82547EI, "Intel(R) Legacy PRO/1000 CT 82547EI"),
96 PVID(0x8086, E1000_DEV_ID_82547EI_MOBILE, "Intel(R) Legacy PRO/1000 CT 82547EI (Mobile)"),
97 PVID(0x8086, E1000_DEV_ID_82547GI, "Intel(R) Legacy PRO/1000 CT 82547GI"),
98
99 /* Intel(R) - em-class devices */
100 PVID(0x8086, E1000_DEV_ID_82571EB_COPPER, "Intel(R) PRO/1000 PT 82571EB/82571GB (Copper)"),
101 PVID(0x8086, E1000_DEV_ID_82571EB_FIBER, "Intel(R) PRO/1000 PF 82571EB/82571GB (Fiber)"),
102 PVID(0x8086, E1000_DEV_ID_82571EB_SERDES, "Intel(R) PRO/1000 PB 82571EB (SERDES)"),
103 PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, "Intel(R) PRO/1000 82571EB (Dual Mezzanine)"),
104 PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, "Intel(R) PRO/1000 82571EB (Quad Mezzanine)"),
105 PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, "Intel(R) PRO/1000 PT 82571EB/82571GB (Quad Copper)"),
106 PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, "Intel(R) PRO/1000 PT 82571EB/82571GB (Quad Copper)"),
107 PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, "Intel(R) PRO/1000 PF 82571EB (Quad Fiber)"),
108 PVID(0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, "Intel(R) PRO/1000 PT 82571PT (Quad Copper)"),
109 PVID(0x8086, E1000_DEV_ID_82572EI, "Intel(R) PRO/1000 PT 82572EI (Copper)"),
110 PVID(0x8086, E1000_DEV_ID_82572EI_COPPER, "Intel(R) PRO/1000 PT 82572EI (Copper)"),
111 PVID(0x8086, E1000_DEV_ID_82572EI_FIBER, "Intel(R) PRO/1000 PF 82572EI (Fiber)"),
112 PVID(0x8086, E1000_DEV_ID_82572EI_SERDES, "Intel(R) PRO/1000 82572EI (SERDES)"),
113 PVID(0x8086, E1000_DEV_ID_82573E, "Intel(R) PRO/1000 82573E (Copper)"),
114 PVID(0x8086, E1000_DEV_ID_82573E_IAMT, "Intel(R) PRO/1000 82573E AMT (Copper)"),
115 PVID(0x8086, E1000_DEV_ID_82573L, "Intel(R) PRO/1000 82573L"),
116 PVID(0x8086, E1000_DEV_ID_82583V, "Intel(R) 82583V"),
117 PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, "Intel(R) 80003ES2LAN (Copper)"),
118 PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, "Intel(R) 80003ES2LAN (SERDES)"),
119 PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, "Intel(R) 80003ES2LAN (Dual Copper)"),
120 PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, "Intel(R) 80003ES2LAN (Dual SERDES)"),
121 PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, "Intel(R) 82566MM ICH8 AMT (Mobile)"),
122 PVID(0x8086, E1000_DEV_ID_ICH8_IGP_AMT, "Intel(R) 82566DM ICH8 AMT"),
123 PVID(0x8086, E1000_DEV_ID_ICH8_IGP_C, "Intel(R) 82566DC ICH8"),
124 PVID(0x8086, E1000_DEV_ID_ICH8_IFE, "Intel(R) 82562V ICH8"),
125 PVID(0x8086, E1000_DEV_ID_ICH8_IFE_GT, "Intel(R) 82562GT ICH8"),
126 PVID(0x8086, E1000_DEV_ID_ICH8_IFE_G, "Intel(R) 82562G ICH8"),
127 PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M, "Intel(R) 82566MC ICH8"),
128 PVID(0x8086, E1000_DEV_ID_ICH8_82567V_3, "Intel(R) 82567V-3 ICH8"),
129 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, "Intel(R) 82567LM ICH9 AMT"),
130 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_AMT, "Intel(R) 82566DM-2 ICH9 AMT"),
131 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_C, "Intel(R) 82566DC-2 ICH9"),
132 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M, "Intel(R) 82567LF ICH9"),
133 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_V, "Intel(R) 82567V ICH9"),
134 PVID(0x8086, E1000_DEV_ID_ICH9_IFE, "Intel(R) 82562V-2 ICH9"),
135 PVID(0x8086, E1000_DEV_ID_ICH9_IFE_GT, "Intel(R) 82562GT-2 ICH9"),
136 PVID(0x8086, E1000_DEV_ID_ICH9_IFE_G, "Intel(R) 82562G-2 ICH9"),
137 PVID(0x8086, E1000_DEV_ID_ICH9_BM, "Intel(R) 82567LM-4 ICH9"),
138 PVID(0x8086, E1000_DEV_ID_82574L, "Intel(R) Gigabit CT 82574L"),
139 PVID(0x8086, E1000_DEV_ID_82574LA, "Intel(R) 82574L-Apple"),
140 PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LM, "Intel(R) 82567LM-2 ICH10"),
141 PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LF, "Intel(R) 82567LF-2 ICH10"),
142 PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_V, "Intel(R) 82567V-2 ICH10"),
143 PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LM, "Intel(R) 82567LM-3 ICH10"),
144 PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LF, "Intel(R) 82567LF-3 ICH10"),
145 PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_V, "Intel(R) 82567V-4 ICH10"),
146 PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LM, "Intel(R) 82577LM"),
147 PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LC, "Intel(R) 82577LC"),
148 PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DM, "Intel(R) 82578DM"),
149 PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DC, "Intel(R) 82578DC"),
150 PVID(0x8086, E1000_DEV_ID_PCH2_LV_LM, "Intel(R) 82579LM"),
151 PVID(0x8086, E1000_DEV_ID_PCH2_LV_V, "Intel(R) 82579V"),
152 PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, "Intel(R) I217-LM LPT"),
153 PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_V, "Intel(R) I217-V LPT"),
154 PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM, "Intel(R) I218-LM LPTLP"),
155 PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V, "Intel(R) I218-V LPTLP"),
156 PVID(0x8086, E1000_DEV_ID_PCH_I218_LM2, "Intel(R) I218-LM (2)"),
157 PVID(0x8086, E1000_DEV_ID_PCH_I218_V2, "Intel(R) I218-V (2)"),
158 PVID(0x8086, E1000_DEV_ID_PCH_I218_LM3, "Intel(R) I218-LM (3)"),
159 PVID(0x8086, E1000_DEV_ID_PCH_I218_V3, "Intel(R) I218-V (3)"),
160 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, "Intel(R) I219-LM SPT"),
161 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V, "Intel(R) I219-V SPT"),
162 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2, "Intel(R) I219-LM SPT-H(2)"),
163 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, "Intel(R) I219-V SPT-H(2)"),
164 PVID(0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3, "Intel(R) I219-LM LBG(3)"),
165 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM4, "Intel(R) I219-LM SPT(4)"),
166 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V4, "Intel(R) I219-V SPT(4)"),
167 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM5, "Intel(R) I219-LM SPT(5)"),
168 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V5, "Intel(R) I219-V SPT(5)"),
169 PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM6, "Intel(R) I219-LM CNP(6)"),
170 PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_V6, "Intel(R) I219-V CNP(6)"),
171 PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM7, "Intel(R) I219-LM CNP(7)"),
172 PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_V7, "Intel(R) I219-V CNP(7)"),
173 PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM8, "Intel(R) I219-LM ICP(8)"),
174 PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_V8, "Intel(R) I219-V ICP(8)"),
175 PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM9, "Intel(R) I219-LM ICP(9)"),
176 PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_V9, "Intel(R) I219-V ICP(9)"),
177 PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM10, "Intel(R) I219-LM CMP(10)"),
178 PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V10, "Intel(R) I219-V CMP(10)"),
179 PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM11, "Intel(R) I219-LM CMP(11)"),
180 PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V11, "Intel(R) I219-V CMP(11)"),
181 PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM12, "Intel(R) I219-LM CMP(12)"),
182 PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V12, "Intel(R) I219-V CMP(12)"),
183 PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_LM13, "Intel(R) I219-LM TGP(13)"),
184 PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_V13, "Intel(R) I219-V TGP(13)"),
185 PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_LM14, "Intel(R) I219-LM TGP(14)"),
186 PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_V14, "Intel(R) I219-V GTP(14)"),
187 PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_LM15, "Intel(R) I219-LM TGP(15)"),
188 PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_V15, "Intel(R) I219-V TGP(15)"),
189 PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_LM16, "Intel(R) I219-LM ADL(16)"),
190 PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_V16, "Intel(R) I219-V ADL(16)"),
191 PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_LM17, "Intel(R) I219-LM ADL(17)"),
192 PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_V17, "Intel(R) I219-V ADL(17)"),
193 PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_LM18, "Intel(R) I219-LM MTP(18)"),
194 PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_V18, "Intel(R) I219-V MTP(18)"),
195 PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_LM19, "Intel(R) I219-LM MTP(19)"),
196 PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_V19, "Intel(R) I219-V MTP(19)"),
197 PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_LM20, "Intel(R) I219-LM LNL(20)"),
198 PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_V20, "Intel(R) I219-V LNL(20)"),
199 PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_LM21, "Intel(R) I219-LM LNL(21)"),
200 PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_V21, "Intel(R) I219-V LNL(21)"),
201 PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_LM22, "Intel(R) I219-LM RPL(22)"),
202 PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_V22, "Intel(R) I219-V RPL(22)"),
203 PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_LM23, "Intel(R) I219-LM RPL(23)"),
204 PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_V23, "Intel(R) I219-V RPL(23)"),
205 PVID(0x8086, E1000_DEV_ID_PCH_ARL_I219_LM24, "Intel(R) I219-LM ARL(24)"),
206 PVID(0x8086, E1000_DEV_ID_PCH_ARL_I219_V24, "Intel(R) I219-V ARL(24)"),
207 PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_LM25, "Intel(R) I219-LM PTP(25)"),
208 PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_V25, "Intel(R) I219-V PTP(25)"),
209 PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_LM26, "Intel(R) I219-LM PTP(26)"),
210 PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_V26, "Intel(R) I219-V PTP(26)"),
211 PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_LM27, "Intel(R) I219-LM PTP(27)"),
212 PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_V27, "Intel(R) I219-V PTP(27)"),
213 /* required last entry */
214 PVID_END
215 };
216
217 static const pci_vendor_info_t igb_vendor_info_array[] =
218 {
219 /* Intel(R) - igb-class devices */
220 PVID(0x8086, E1000_DEV_ID_82575EB_COPPER, "Intel(R) PRO/1000 82575EB (Copper)"),
221 PVID(0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, "Intel(R) PRO/1000 82575EB (SERDES)"),
222 PVID(0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, "Intel(R) PRO/1000 VT 82575GB (Quad Copper)"),
223 PVID(0x8086, E1000_DEV_ID_82576, "Intel(R) PRO/1000 82576"),
224 PVID(0x8086, E1000_DEV_ID_82576_NS, "Intel(R) PRO/1000 82576NS"),
225 PVID(0x8086, E1000_DEV_ID_82576_NS_SERDES, "Intel(R) PRO/1000 82576NS (SERDES)"),
226 PVID(0x8086, E1000_DEV_ID_82576_FIBER, "Intel(R) PRO/1000 EF 82576 (Dual Fiber)"),
227 PVID(0x8086, E1000_DEV_ID_82576_SERDES, "Intel(R) PRO/1000 82576 (Dual SERDES)"),
228 PVID(0x8086, E1000_DEV_ID_82576_SERDES_QUAD, "Intel(R) PRO/1000 ET 82576 (Quad SERDES)"),
229 PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER, "Intel(R) PRO/1000 ET 82576 (Quad Copper)"),
230 PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, "Intel(R) PRO/1000 ET(2) 82576 (Quad Copper)"),
231 PVID(0x8086, E1000_DEV_ID_82576_VF, "Intel(R) PRO/1000 82576 Virtual Function"),
232 PVID(0x8086, E1000_DEV_ID_82580_COPPER, "Intel(R) I340 82580 (Copper)"),
233 PVID(0x8086, E1000_DEV_ID_82580_FIBER, "Intel(R) I340 82580 (Fiber)"),
234 PVID(0x8086, E1000_DEV_ID_82580_SERDES, "Intel(R) I340 82580 (SERDES)"),
235 PVID(0x8086, E1000_DEV_ID_82580_SGMII, "Intel(R) I340 82580 (SGMII)"),
236 PVID(0x8086, E1000_DEV_ID_82580_COPPER_DUAL, "Intel(R) I340-T2 82580 (Dual Copper)"),
237 PVID(0x8086, E1000_DEV_ID_82580_QUAD_FIBER, "Intel(R) I340-F4 82580 (Quad Fiber)"),
238 PVID(0x8086, E1000_DEV_ID_DH89XXCC_SERDES, "Intel(R) DH89XXCC (SERDES)"),
239 PVID(0x8086, E1000_DEV_ID_DH89XXCC_SGMII, "Intel(R) I347-AT4 DH89XXCC"),
240 PVID(0x8086, E1000_DEV_ID_DH89XXCC_SFP, "Intel(R) DH89XXCC (SFP)"),
241 PVID(0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, "Intel(R) DH89XXCC (Backplane)"),
242 PVID(0x8086, E1000_DEV_ID_I350_COPPER, "Intel(R) I350 (Copper)"),
243 PVID(0x8086, E1000_DEV_ID_I350_FIBER, "Intel(R) I350 (Fiber)"),
244 PVID(0x8086, E1000_DEV_ID_I350_SERDES, "Intel(R) I350 (SERDES)"),
245 PVID(0x8086, E1000_DEV_ID_I350_SGMII, "Intel(R) I350 (SGMII)"),
246 PVID(0x8086, E1000_DEV_ID_I350_VF, "Intel(R) I350 Virtual Function"),
247 PVID(0x8086, E1000_DEV_ID_I210_COPPER, "Intel(R) I210 (Copper)"),
248 PVID(0x8086, E1000_DEV_ID_I210_COPPER_IT, "Intel(R) I210 IT (Copper)"),
249 PVID(0x8086, E1000_DEV_ID_I210_COPPER_OEM1, "Intel(R) I210 (OEM)"),
250 PVID(0x8086, E1000_DEV_ID_I210_COPPER_FLASHLESS, "Intel(R) I210 Flashless (Copper)"),
251 PVID(0x8086, E1000_DEV_ID_I210_SERDES_FLASHLESS, "Intel(R) I210 Flashless (SERDES)"),
252 PVID(0x8086, E1000_DEV_ID_I210_SGMII_FLASHLESS, "Intel(R) I210 Flashless (SGMII)"),
253 PVID(0x8086, E1000_DEV_ID_I210_FIBER, "Intel(R) I210 (Fiber)"),
254 PVID(0x8086, E1000_DEV_ID_I210_SERDES, "Intel(R) I210 (SERDES)"),
255 PVID(0x8086, E1000_DEV_ID_I210_SGMII, "Intel(R) I210 (SGMII)"),
256 PVID(0x8086, E1000_DEV_ID_I211_COPPER, "Intel(R) I211 (Copper)"),
257 PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_1GBPS, "Intel(R) I354 (1.0 GbE Backplane)"),
258 PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS, "Intel(R) I354 (2.5 GbE Backplane)"),
259 PVID(0x8086, E1000_DEV_ID_I354_SGMII, "Intel(R) I354 (SGMII)"),
260 /* required last entry */
261 PVID_END
262 };
263
264 /*********************************************************************
265 * Function prototypes
266 *********************************************************************/
267 static void *em_register(device_t);
268 static void *igb_register(device_t);
269 static int em_if_attach_pre(if_ctx_t);
270 static int em_if_attach_post(if_ctx_t);
271 static int em_if_detach(if_ctx_t);
272 static int em_if_shutdown(if_ctx_t);
273 static int em_if_suspend(if_ctx_t);
274 static int em_if_resume(if_ctx_t);
275
276 static int em_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
277 static int em_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
278 static void em_if_queues_free(if_ctx_t);
279
280 static uint64_t em_if_get_counter(if_ctx_t, ift_counter);
281 static void em_if_init(if_ctx_t);
282 static void em_if_stop(if_ctx_t);
283 static void em_if_media_status(if_ctx_t, struct ifmediareq *);
284 static int em_if_media_change(if_ctx_t);
285 static int em_if_mtu_set(if_ctx_t, uint32_t);
286 static void em_if_timer(if_ctx_t, uint16_t);
287 static void em_if_vlan_register(if_ctx_t, u16);
288 static void em_if_vlan_unregister(if_ctx_t, u16);
289 static void em_if_watchdog_reset(if_ctx_t);
290 static bool em_if_needs_restart(if_ctx_t, enum iflib_restart_event);
291
292 static void em_identify_hardware(if_ctx_t);
293 static int em_allocate_pci_resources(if_ctx_t);
294 static void em_free_pci_resources(if_ctx_t);
295 static void em_reset(if_ctx_t);
296 static int em_setup_interface(if_ctx_t);
297 static int em_setup_msix(if_ctx_t);
298
299 static void em_initialize_transmit_unit(if_ctx_t);
300 static void em_initialize_receive_unit(if_ctx_t);
301
302 static void em_if_intr_enable(if_ctx_t);
303 static void em_if_intr_disable(if_ctx_t);
304 static void igb_if_intr_enable(if_ctx_t);
305 static void igb_if_intr_disable(if_ctx_t);
306 static int em_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
307 static int em_if_tx_queue_intr_enable(if_ctx_t, uint16_t);
308 static int igb_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
309 static int igb_if_tx_queue_intr_enable(if_ctx_t, uint16_t);
310 static void em_if_multi_set(if_ctx_t);
311 static void em_if_update_admin_status(if_ctx_t);
312 static void em_if_debug(if_ctx_t);
313 static void em_update_stats_counters(struct e1000_softc *);
314 static void em_add_hw_stats(struct e1000_softc *);
315 static int em_if_set_promisc(if_ctx_t, int);
316 static bool em_if_vlan_filter_capable(if_ctx_t);
317 static bool em_if_vlan_filter_used(if_ctx_t);
318 static void em_if_vlan_filter_enable(struct e1000_softc *);
319 static void em_if_vlan_filter_disable(struct e1000_softc *);
320 static void em_if_vlan_filter_write(struct e1000_softc *);
321 static void em_setup_vlan_hw_support(if_ctx_t ctx);
322 static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
323 static void em_print_nvm_info(struct e1000_softc *);
324 static void em_fw_version_locked(if_ctx_t);
325 static void em_sbuf_fw_version(struct e1000_fw_version *, struct sbuf *);
326 static void em_print_fw_version(struct e1000_softc *);
327 static int em_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
328 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
329 static int em_get_rs(SYSCTL_HANDLER_ARGS);
330 static void em_print_debug_info(struct e1000_softc *);
331 static int em_is_valid_ether_addr(u8 *);
332 static bool em_automask_tso(if_ctx_t);
333 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
334 static void em_add_int_delay_sysctl(struct e1000_softc *, const char *,
335 const char *, struct em_int_delay_info *, int, int);
336 /* Management and WOL Support */
337 static void em_init_manageability(struct e1000_softc *);
338 static void em_release_manageability(struct e1000_softc *);
339 static void em_get_hw_control(struct e1000_softc *);
340 static void em_release_hw_control(struct e1000_softc *);
341 static void em_get_wakeup(if_ctx_t);
342 static void em_enable_wakeup(if_ctx_t);
343 static int em_enable_phy_wakeup(struct e1000_softc *);
344 static void em_disable_aspm(struct e1000_softc *);
345
346 int em_intr(void *);
347
348 /* MSI-X handlers */
349 static int em_if_msix_intr_assign(if_ctx_t, int);
350 static int em_msix_link(void *);
351 static void em_handle_link(void *);
352
353 static void em_enable_vectors_82574(if_ctx_t);
354
355 static int em_set_flowcntl(SYSCTL_HANDLER_ARGS);
356 static int em_sysctl_eee(SYSCTL_HANDLER_ARGS);
357 static void em_if_led_func(if_ctx_t, int);
358
359 static int em_get_regs(SYSCTL_HANDLER_ARGS);
360
361 static void lem_smartspeed(struct e1000_softc *);
362 static void igb_configure_queues(struct e1000_softc *);
363 static void em_flush_desc_rings(struct e1000_softc *);
364
365
366 /*********************************************************************
367 * FreeBSD Device Interface Entry Points
368 *********************************************************************/
369 static device_method_t em_methods[] = {
370 /* Device interface */
371 DEVMETHOD(device_register, em_register),
372 DEVMETHOD(device_probe, iflib_device_probe),
373 DEVMETHOD(device_attach, iflib_device_attach),
374 DEVMETHOD(device_detach, iflib_device_detach),
375 DEVMETHOD(device_shutdown, iflib_device_shutdown),
376 DEVMETHOD(device_suspend, iflib_device_suspend),
377 DEVMETHOD(device_resume, iflib_device_resume),
378 DEVMETHOD_END
379 };
380
381 static device_method_t igb_methods[] = {
382 /* Device interface */
383 DEVMETHOD(device_register, igb_register),
384 DEVMETHOD(device_probe, iflib_device_probe),
385 DEVMETHOD(device_attach, iflib_device_attach),
386 DEVMETHOD(device_detach, iflib_device_detach),
387 DEVMETHOD(device_shutdown, iflib_device_shutdown),
388 DEVMETHOD(device_suspend, iflib_device_suspend),
389 DEVMETHOD(device_resume, iflib_device_resume),
390 DEVMETHOD_END
391 };
392
393
394 static driver_t em_driver = {
395 "em", em_methods, sizeof(struct e1000_softc),
396 };
397
398 DRIVER_MODULE(em, pci, em_driver, 0, 0);
399
400 MODULE_DEPEND(em, pci, 1, 1, 1);
401 MODULE_DEPEND(em, ether, 1, 1, 1);
402 MODULE_DEPEND(em, iflib, 1, 1, 1);
403
404 IFLIB_PNP_INFO(pci, em, em_vendor_info_array);
405
406 static driver_t igb_driver = {
407 "igb", igb_methods, sizeof(struct e1000_softc),
408 };
409
410 DRIVER_MODULE(igb, pci, igb_driver, 0, 0);
411
412 MODULE_DEPEND(igb, pci, 1, 1, 1);
413 MODULE_DEPEND(igb, ether, 1, 1, 1);
414 MODULE_DEPEND(igb, iflib, 1, 1, 1);
415
416 IFLIB_PNP_INFO(pci, igb, igb_vendor_info_array);
417
418 static device_method_t em_if_methods[] = {
419 DEVMETHOD(ifdi_attach_pre, em_if_attach_pre),
420 DEVMETHOD(ifdi_attach_post, em_if_attach_post),
421 DEVMETHOD(ifdi_detach, em_if_detach),
422 DEVMETHOD(ifdi_shutdown, em_if_shutdown),
423 DEVMETHOD(ifdi_suspend, em_if_suspend),
424 DEVMETHOD(ifdi_resume, em_if_resume),
425 DEVMETHOD(ifdi_init, em_if_init),
426 DEVMETHOD(ifdi_stop, em_if_stop),
427 DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign),
428 DEVMETHOD(ifdi_intr_enable, em_if_intr_enable),
429 DEVMETHOD(ifdi_intr_disable, em_if_intr_disable),
430 DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc),
431 DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc),
432 DEVMETHOD(ifdi_queues_free, em_if_queues_free),
433 DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status),
434 DEVMETHOD(ifdi_multi_set, em_if_multi_set),
435 DEVMETHOD(ifdi_media_status, em_if_media_status),
436 DEVMETHOD(ifdi_media_change, em_if_media_change),
437 DEVMETHOD(ifdi_mtu_set, em_if_mtu_set),
438 DEVMETHOD(ifdi_promisc_set, em_if_set_promisc),
439 DEVMETHOD(ifdi_timer, em_if_timer),
440 DEVMETHOD(ifdi_watchdog_reset, em_if_watchdog_reset),
441 DEVMETHOD(ifdi_vlan_register, em_if_vlan_register),
442 DEVMETHOD(ifdi_vlan_unregister, em_if_vlan_unregister),
443 DEVMETHOD(ifdi_get_counter, em_if_get_counter),
444 DEVMETHOD(ifdi_led_func, em_if_led_func),
445 DEVMETHOD(ifdi_rx_queue_intr_enable, em_if_rx_queue_intr_enable),
446 DEVMETHOD(ifdi_tx_queue_intr_enable, em_if_tx_queue_intr_enable),
447 DEVMETHOD(ifdi_debug, em_if_debug),
448 DEVMETHOD(ifdi_needs_restart, em_if_needs_restart),
449 DEVMETHOD_END
450 };
451
452 static driver_t em_if_driver = {
453 "em_if", em_if_methods, sizeof(struct e1000_softc)
454 };
455
456 static device_method_t igb_if_methods[] = {
457 DEVMETHOD(ifdi_attach_pre, em_if_attach_pre),
458 DEVMETHOD(ifdi_attach_post, em_if_attach_post),
459 DEVMETHOD(ifdi_detach, em_if_detach),
460 DEVMETHOD(ifdi_shutdown, em_if_shutdown),
461 DEVMETHOD(ifdi_suspend, em_if_suspend),
462 DEVMETHOD(ifdi_resume, em_if_resume),
463 DEVMETHOD(ifdi_init, em_if_init),
464 DEVMETHOD(ifdi_stop, em_if_stop),
465 DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign),
466 DEVMETHOD(ifdi_intr_enable, igb_if_intr_enable),
467 DEVMETHOD(ifdi_intr_disable, igb_if_intr_disable),
468 DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc),
469 DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc),
470 DEVMETHOD(ifdi_queues_free, em_if_queues_free),
471 DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status),
472 DEVMETHOD(ifdi_multi_set, em_if_multi_set),
473 DEVMETHOD(ifdi_media_status, em_if_media_status),
474 DEVMETHOD(ifdi_media_change, em_if_media_change),
475 DEVMETHOD(ifdi_mtu_set, em_if_mtu_set),
476 DEVMETHOD(ifdi_promisc_set, em_if_set_promisc),
477 DEVMETHOD(ifdi_timer, em_if_timer),
478 DEVMETHOD(ifdi_watchdog_reset, em_if_watchdog_reset),
479 DEVMETHOD(ifdi_vlan_register, em_if_vlan_register),
480 DEVMETHOD(ifdi_vlan_unregister, em_if_vlan_unregister),
481 DEVMETHOD(ifdi_get_counter, em_if_get_counter),
482 DEVMETHOD(ifdi_led_func, em_if_led_func),
483 DEVMETHOD(ifdi_rx_queue_intr_enable, igb_if_rx_queue_intr_enable),
484 DEVMETHOD(ifdi_tx_queue_intr_enable, igb_if_tx_queue_intr_enable),
485 DEVMETHOD(ifdi_debug, em_if_debug),
486 DEVMETHOD(ifdi_needs_restart, em_if_needs_restart),
487 DEVMETHOD_END
488 };
489
490 static driver_t igb_if_driver = {
491 "igb_if", igb_if_methods, sizeof(struct e1000_softc)
492 };
493
494 /*********************************************************************
495 * Tunable default values.
496 *********************************************************************/
497
498 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
499 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
500
501 #define MAX_INTS_PER_SEC 8000
502 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
503
504 /* Allow common code without TSO */
505 #ifndef CSUM_TSO
506 #define CSUM_TSO 0
507 #endif
508
509 static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
510 "EM driver parameters");
511
512 static int em_disable_crc_stripping = 0;
513 SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
514 &em_disable_crc_stripping, 0, "Disable CRC Stripping");
515
516 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
517 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
518 SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt,
519 0, "Default transmit interrupt delay in usecs");
520 SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt,
521 0, "Default receive interrupt delay in usecs");
522
523 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
524 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
525 SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
526 &em_tx_abs_int_delay_dflt, 0,
527 "Default transmit interrupt delay limit in usecs");
528 SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
529 &em_rx_abs_int_delay_dflt, 0,
530 "Default receive interrupt delay limit in usecs");
531
532 static int em_smart_pwr_down = false;
533 SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down,
534 0, "Set to true to leave smart power down enabled on newer adapters");
535
536 static bool em_unsupported_tso = false;
537 SYSCTL_BOOL(_hw_em, OID_AUTO, unsupported_tso, CTLFLAG_RDTUN,
538 &em_unsupported_tso, 0, "Allow unsupported em(4) TSO configurations");
539
540 /* Controls whether promiscuous also shows bad packets */
541 static int em_debug_sbp = false;
542 SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0,
543 "Show bad packets in promiscuous mode");
544
545 /* Energy efficient ethernet - default to OFF */
546 static int eee_setting = 1;
547 SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0,
548 "Enable Energy Efficient Ethernet");
549
550 /*
551 ** Tuneable Interrupt rate
552 */
553 static int em_max_interrupt_rate = 8000;
554 SYSCTL_INT(_hw_em, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
555 &em_max_interrupt_rate, 0, "Maximum interrupts per second");
556
557 /* Global used in WOL setup with multiport cards */
558 static int global_quad_port_a = 0;
559
560 extern struct if_txrx igb_txrx;
561 extern struct if_txrx em_txrx;
562 extern struct if_txrx lem_txrx;
563
564 static struct if_shared_ctx em_sctx_init = {
565 .isc_magic = IFLIB_MAGIC,
566 .isc_q_align = PAGE_SIZE,
567 .isc_tx_maxsize = EM_TSO_SIZE + sizeof(struct ether_vlan_header),
568 .isc_tx_maxsegsize = PAGE_SIZE,
569 .isc_tso_maxsize = EM_TSO_SIZE + sizeof(struct ether_vlan_header),
570 .isc_tso_maxsegsize = EM_TSO_SEG_SIZE,
571 .isc_rx_maxsize = MJUM9BYTES,
572 .isc_rx_nsegments = 1,
573 .isc_rx_maxsegsize = MJUM9BYTES,
574 .isc_nfl = 1,
575 .isc_nrxqs = 1,
576 .isc_ntxqs = 1,
577 .isc_admin_intrcnt = 1,
578 .isc_vendor_info = em_vendor_info_array,
579 .isc_driver_version = em_driver_version,
580 .isc_driver = &em_if_driver,
581 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
582
583 .isc_nrxd_min = {EM_MIN_RXD},
584 .isc_ntxd_min = {EM_MIN_TXD},
585 .isc_nrxd_max = {EM_MAX_RXD},
586 .isc_ntxd_max = {EM_MAX_TXD},
587 .isc_nrxd_default = {EM_DEFAULT_RXD},
588 .isc_ntxd_default = {EM_DEFAULT_TXD},
589 };
590
591 static struct if_shared_ctx igb_sctx_init = {
592 .isc_magic = IFLIB_MAGIC,
593 .isc_q_align = PAGE_SIZE,
594 .isc_tx_maxsize = EM_TSO_SIZE + sizeof(struct ether_vlan_header),
595 .isc_tx_maxsegsize = PAGE_SIZE,
596 .isc_tso_maxsize = EM_TSO_SIZE + sizeof(struct ether_vlan_header),
597 .isc_tso_maxsegsize = EM_TSO_SEG_SIZE,
598 .isc_rx_maxsize = MJUM9BYTES,
599 .isc_rx_nsegments = 1,
600 .isc_rx_maxsegsize = MJUM9BYTES,
601 .isc_nfl = 1,
602 .isc_nrxqs = 1,
603 .isc_ntxqs = 1,
604 .isc_admin_intrcnt = 1,
605 .isc_vendor_info = igb_vendor_info_array,
606 .isc_driver_version = igb_driver_version,
607 .isc_driver = &igb_if_driver,
608 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
609
610 .isc_nrxd_min = {EM_MIN_RXD},
611 .isc_ntxd_min = {EM_MIN_TXD},
612 .isc_nrxd_max = {IGB_MAX_RXD},
613 .isc_ntxd_max = {IGB_MAX_TXD},
614 .isc_nrxd_default = {EM_DEFAULT_RXD},
615 .isc_ntxd_default = {EM_DEFAULT_TXD},
616 };
617
618 /*****************************************************************
619 *
620 * Dump Registers
621 *
622 ****************************************************************/
623 #define IGB_REGS_LEN 739
624
em_get_regs(SYSCTL_HANDLER_ARGS)625 static int em_get_regs(SYSCTL_HANDLER_ARGS)
626 {
627 struct e1000_softc *sc = (struct e1000_softc *)arg1;
628 struct e1000_hw *hw = &sc->hw;
629 struct sbuf *sb;
630 u32 *regs_buff;
631 int rc;
632
633 regs_buff = malloc(sizeof(u32) * IGB_REGS_LEN, M_DEVBUF, M_WAITOK);
634 memset(regs_buff, 0, IGB_REGS_LEN * sizeof(u32));
635
636 rc = sysctl_wire_old_buffer(req, 0);
637 MPASS(rc == 0);
638 if (rc != 0) {
639 free(regs_buff, M_DEVBUF);
640 return (rc);
641 }
642
643 sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req);
644 MPASS(sb != NULL);
645 if (sb == NULL) {
646 free(regs_buff, M_DEVBUF);
647 return (ENOMEM);
648 }
649
650 /* General Registers */
651 regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
652 regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
653 regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
654 regs_buff[3] = E1000_READ_REG(hw, E1000_ICR);
655 regs_buff[4] = E1000_READ_REG(hw, E1000_RCTL);
656 regs_buff[5] = E1000_READ_REG(hw, E1000_RDLEN(0));
657 regs_buff[6] = E1000_READ_REG(hw, E1000_RDH(0));
658 regs_buff[7] = E1000_READ_REG(hw, E1000_RDT(0));
659 regs_buff[8] = E1000_READ_REG(hw, E1000_RXDCTL(0));
660 regs_buff[9] = E1000_READ_REG(hw, E1000_RDBAL(0));
661 regs_buff[10] = E1000_READ_REG(hw, E1000_RDBAH(0));
662 regs_buff[11] = E1000_READ_REG(hw, E1000_TCTL);
663 regs_buff[12] = E1000_READ_REG(hw, E1000_TDBAL(0));
664 regs_buff[13] = E1000_READ_REG(hw, E1000_TDBAH(0));
665 regs_buff[14] = E1000_READ_REG(hw, E1000_TDLEN(0));
666 regs_buff[15] = E1000_READ_REG(hw, E1000_TDH(0));
667 regs_buff[16] = E1000_READ_REG(hw, E1000_TDT(0));
668 regs_buff[17] = E1000_READ_REG(hw, E1000_TXDCTL(0));
669 regs_buff[18] = E1000_READ_REG(hw, E1000_TDFH);
670 regs_buff[19] = E1000_READ_REG(hw, E1000_TDFT);
671 regs_buff[20] = E1000_READ_REG(hw, E1000_TDFHS);
672 regs_buff[21] = E1000_READ_REG(hw, E1000_TDFPC);
673
674 sbuf_printf(sb, "General Registers\n");
675 sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]);
676 sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]);
677 sbuf_printf(sb, "\tCTRL_EXT\t %08x\n\n", regs_buff[2]);
678
679 sbuf_printf(sb, "Interrupt Registers\n");
680 sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]);
681
682 sbuf_printf(sb, "RX Registers\n");
683 sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]);
684 sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]);
685 sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]);
686 sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]);
687 sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]);
688 sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]);
689 sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]);
690
691 sbuf_printf(sb, "TX Registers\n");
692 sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]);
693 sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]);
694 sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]);
695 sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]);
696 sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]);
697 sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]);
698 sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]);
699 sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]);
700 sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]);
701 sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]);
702 sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]);
703
704 free(regs_buff, M_DEVBUF);
705
706 #ifdef DUMP_DESCS
707 {
708 if_softc_ctx_t scctx = sc->shared;
709 struct rx_ring *rxr = &rx_que->rxr;
710 struct tx_ring *txr = &tx_que->txr;
711 int ntxd = scctx->isc_ntxd[0];
712 int nrxd = scctx->isc_nrxd[0];
713 int j;
714
715 for (j = 0; j < nrxd; j++) {
716 u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error);
717 u32 length = le32toh(rxr->rx_base[j].wb.upper.length);
718 sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length);
719 }
720
721 for (j = 0; j < min(ntxd, 256); j++) {
722 unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
723
724 sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n",
725 j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
726 buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & E1000_TXD_STAT_DD : 0);
727
728 }
729 }
730 #endif
731
732 rc = sbuf_finish(sb);
733 sbuf_delete(sb);
734 return(rc);
735 }
736
737 static void *
em_register(device_t dev)738 em_register(device_t dev)
739 {
740 return (&em_sctx_init);
741 }
742
743 static void *
igb_register(device_t dev)744 igb_register(device_t dev)
745 {
746 return (&igb_sctx_init);
747 }
748
749 static int
em_set_num_queues(if_ctx_t ctx)750 em_set_num_queues(if_ctx_t ctx)
751 {
752 struct e1000_softc *sc = iflib_get_softc(ctx);
753 int maxqueues;
754
755 /* Sanity check based on HW */
756 switch (sc->hw.mac.type) {
757 case e1000_82576:
758 case e1000_82580:
759 case e1000_i350:
760 case e1000_i354:
761 maxqueues = 8;
762 break;
763 case e1000_i210:
764 case e1000_82575:
765 maxqueues = 4;
766 break;
767 case e1000_i211:
768 case e1000_82574:
769 maxqueues = 2;
770 break;
771 default:
772 maxqueues = 1;
773 break;
774 }
775
776 return (maxqueues);
777 }
778
779 #define LEM_CAPS \
780 IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \
781 IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_VLAN_HWFILTER | IFCAP_TSO4 | \
782 IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6
783
784 #define EM_CAPS \
785 IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \
786 IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_VLAN_HWFILTER | IFCAP_TSO4 | \
787 IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 | \
788 IFCAP_TSO6
789
790 #define IGB_CAPS \
791 IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \
792 IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_VLAN_HWFILTER | IFCAP_TSO4 | \
793 IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 | \
794 IFCAP_TSO6
795
796 /*********************************************************************
797 * Device initialization routine
798 *
799 * The attach entry point is called when the driver is being loaded.
800 * This routine identifies the type of hardware, allocates all resources
801 * and initializes the hardware.
802 *
803 * return 0 on success, positive on failure
804 *********************************************************************/
805 static int
em_if_attach_pre(if_ctx_t ctx)806 em_if_attach_pre(if_ctx_t ctx)
807 {
808 struct e1000_softc *sc;
809 if_softc_ctx_t scctx;
810 device_t dev;
811 struct e1000_hw *hw;
812 struct sysctl_oid_list *child;
813 struct sysctl_ctx_list *ctx_list;
814 int error = 0;
815
816 INIT_DEBUGOUT("em_if_attach_pre: begin");
817 dev = iflib_get_dev(ctx);
818 sc = iflib_get_softc(ctx);
819
820 sc->ctx = sc->osdep.ctx = ctx;
821 sc->dev = sc->osdep.dev = dev;
822 scctx = sc->shared = iflib_get_softc_ctx(ctx);
823 sc->media = iflib_get_media(ctx);
824 hw = &sc->hw;
825
826 /* Determine hardware and mac info */
827 em_identify_hardware(ctx);
828
829 /* SYSCTL stuff */
830 ctx_list = device_get_sysctl_ctx(dev);
831 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
832
833 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "nvm",
834 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
835 em_sysctl_nvm_info, "I", "NVM Information");
836
837 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
838 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
839 em_sysctl_print_fw_version, "A",
840 "Prints FW/NVM Versions");
841
842 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "debug",
843 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
844 em_sysctl_debug_info, "I", "Debug Information");
845
846 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
847 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
848 em_set_flowcntl, "I", "Flow Control");
849
850 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "reg_dump",
851 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
852 em_get_regs, "A", "Dump Registers");
853
854 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "rs_dump",
855 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
856 em_get_rs, "I", "Dump RS indexes");
857
858 scctx->isc_tx_nsegments = EM_MAX_SCATTER;
859 scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = em_set_num_queues(ctx);
860 if (bootverbose)
861 device_printf(dev, "attach_pre capping queues at %d\n",
862 scctx->isc_ntxqsets_max);
863
864 if (hw->mac.type >= igb_mac_min) {
865 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union e1000_adv_tx_desc), EM_DBA_ALIGN);
866 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_adv_rx_desc), EM_DBA_ALIGN);
867 scctx->isc_txd_size[0] = sizeof(union e1000_adv_tx_desc);
868 scctx->isc_rxd_size[0] = sizeof(union e1000_adv_rx_desc);
869 scctx->isc_txrx = &igb_txrx;
870 scctx->isc_tx_tso_segments_max = EM_MAX_SCATTER;
871 scctx->isc_tx_tso_size_max = EM_TSO_SIZE;
872 scctx->isc_tx_tso_segsize_max = EM_TSO_SEG_SIZE;
873 scctx->isc_capabilities = scctx->isc_capenable = IGB_CAPS;
874 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO |
875 CSUM_IP6_TCP | CSUM_IP6_UDP;
876 if (hw->mac.type != e1000_82575)
877 scctx->isc_tx_csum_flags |= CSUM_SCTP | CSUM_IP6_SCTP;
878 /*
879 ** Some new devices, as with ixgbe, now may
880 ** use a different BAR, so we need to keep
881 ** track of which is used.
882 */
883 scctx->isc_msix_bar = pci_msix_table_bar(dev);
884 } else if (hw->mac.type >= em_mac_min) {
885 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]* sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
886 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
887 scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
888 scctx->isc_rxd_size[0] = sizeof(union e1000_rx_desc_extended);
889 scctx->isc_txrx = &em_txrx;
890 scctx->isc_tx_tso_segments_max = EM_MAX_SCATTER;
891 scctx->isc_tx_tso_size_max = EM_TSO_SIZE;
892 scctx->isc_tx_tso_segsize_max = EM_TSO_SEG_SIZE;
893 scctx->isc_capabilities = scctx->isc_capenable = EM_CAPS;
894 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO |
895 CSUM_IP6_TCP | CSUM_IP6_UDP;
896
897 /* Disable TSO on all em(4) until ring stalls can be debugged */
898 scctx->isc_capenable &= ~IFCAP_TSO;
899
900 /*
901 * Disable TSO on SPT due to errata that downclocks DMA performance
902 * i218-i219 Specification Update 1.5.4.5
903 */
904 if (hw->mac.type == e1000_pch_spt)
905 scctx->isc_capenable &= ~IFCAP_TSO;
906
907 /*
908 * We support MSI-X with 82574 only, but indicate to iflib(4)
909 * that it shall give MSI at least a try with other devices.
910 */
911 if (hw->mac.type == e1000_82574) {
912 scctx->isc_msix_bar = pci_msix_table_bar(dev);
913 } else {
914 scctx->isc_msix_bar = -1;
915 scctx->isc_disable_msix = 1;
916 }
917 } else {
918 scctx->isc_txqsizes[0] = roundup2((scctx->isc_ntxd[0] + 1) * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
919 scctx->isc_rxqsizes[0] = roundup2((scctx->isc_nrxd[0] + 1) * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
920 scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
921 scctx->isc_rxd_size[0] = sizeof(struct e1000_rx_desc);
922 scctx->isc_txrx = &lem_txrx;
923 scctx->isc_tx_tso_segments_max = EM_MAX_SCATTER;
924 scctx->isc_tx_tso_size_max = EM_TSO_SIZE;
925 scctx->isc_tx_tso_segsize_max = EM_TSO_SEG_SIZE;
926 scctx->isc_capabilities = scctx->isc_capenable = LEM_CAPS;
927 if (em_unsupported_tso)
928 scctx->isc_capabilities |= IFCAP_TSO6;
929 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO |
930 CSUM_IP6_TCP | CSUM_IP6_UDP;
931
932 /* Disable TSO on all lem(4) until ring stalls can be debugged */
933 scctx->isc_capenable &= ~IFCAP_TSO;
934
935 /* 82541ER doesn't do HW tagging */
936 if (hw->device_id == E1000_DEV_ID_82541ER ||
937 hw->device_id == E1000_DEV_ID_82541ER_LOM) {
938 scctx->isc_capabilities &= ~IFCAP_VLAN_HWTAGGING;
939 scctx->isc_capenable = scctx->isc_capabilities;
940 }
941 /* This is the first e1000 chip and it does not do offloads */
942 if (hw->mac.type == e1000_82542) {
943 scctx->isc_capabilities &= ~(IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
944 IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWTAGGING |
945 IFCAP_VLAN_HWFILTER | IFCAP_TSO | IFCAP_VLAN_HWTSO);
946 scctx->isc_capenable = scctx->isc_capabilities;
947 }
948 /* These can't do TSO for various reasons */
949 if (hw->mac.type < e1000_82544 || hw->mac.type == e1000_82547 ||
950 hw->mac.type == e1000_82547_rev_2) {
951 scctx->isc_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
952 scctx->isc_capenable = scctx->isc_capabilities;
953 }
954 /* XXXKB: No IPv6 before this? */
955 if (hw->mac.type < e1000_82545){
956 scctx->isc_capabilities &= ~IFCAP_HWCSUM_IPV6;
957 scctx->isc_capenable = scctx->isc_capabilities;
958 }
959 /* "PCI/PCI-X SDM 4.0" page 33 (b) - FDX requirement on these chips */
960 if (hw->mac.type == e1000_82547 || hw->mac.type == e1000_82547_rev_2)
961 scctx->isc_capenable &= ~(IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
962 IFCAP_HWCSUM_IPV6);
963
964 /* INTx only */
965 scctx->isc_msix_bar = 0;
966 }
967
968 /* Setup PCI resources */
969 if (em_allocate_pci_resources(ctx)) {
970 device_printf(dev, "Allocation of PCI resources failed\n");
971 error = ENXIO;
972 goto err_pci;
973 }
974
975 /*
976 ** For ICH8 and family we need to
977 ** map the flash memory, and this
978 ** must happen after the MAC is
979 ** identified
980 */
981 if ((hw->mac.type == e1000_ich8lan) ||
982 (hw->mac.type == e1000_ich9lan) ||
983 (hw->mac.type == e1000_ich10lan) ||
984 (hw->mac.type == e1000_pchlan) ||
985 (hw->mac.type == e1000_pch2lan) ||
986 (hw->mac.type == e1000_pch_lpt)) {
987 int rid = EM_BAR_TYPE_FLASH;
988 sc->flash = bus_alloc_resource_any(dev,
989 SYS_RES_MEMORY, &rid, RF_ACTIVE);
990 if (sc->flash == NULL) {
991 device_printf(dev, "Mapping of Flash failed\n");
992 error = ENXIO;
993 goto err_pci;
994 }
995 /* This is used in the shared code */
996 hw->flash_address = (u8 *)sc->flash;
997 sc->osdep.flash_bus_space_tag =
998 rman_get_bustag(sc->flash);
999 sc->osdep.flash_bus_space_handle =
1000 rman_get_bushandle(sc->flash);
1001 }
1002 /*
1003 ** In the new SPT device flash is not a
1004 ** separate BAR, rather it is also in BAR0,
1005 ** so use the same tag and an offset handle for the
1006 ** FLASH read/write macros in the shared code.
1007 */
1008 else if (hw->mac.type >= e1000_pch_spt) {
1009 sc->osdep.flash_bus_space_tag =
1010 sc->osdep.mem_bus_space_tag;
1011 sc->osdep.flash_bus_space_handle =
1012 sc->osdep.mem_bus_space_handle
1013 + E1000_FLASH_BASE_ADDR;
1014 }
1015
1016 /* Do Shared Code initialization */
1017 error = e1000_setup_init_funcs(hw, true);
1018 if (error) {
1019 device_printf(dev, "Setup of Shared code failed, error %d\n",
1020 error);
1021 error = ENXIO;
1022 goto err_pci;
1023 }
1024
1025 em_setup_msix(ctx);
1026 e1000_get_bus_info(hw);
1027
1028 /* Set up some sysctls for the tunable interrupt delays */
1029 em_add_int_delay_sysctl(sc, "rx_int_delay",
1030 "receive interrupt delay in usecs", &sc->rx_int_delay,
1031 E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt);
1032 em_add_int_delay_sysctl(sc, "tx_int_delay",
1033 "transmit interrupt delay in usecs", &sc->tx_int_delay,
1034 E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt);
1035 em_add_int_delay_sysctl(sc, "rx_abs_int_delay",
1036 "receive interrupt delay limit in usecs",
1037 &sc->rx_abs_int_delay,
1038 E1000_REGISTER(hw, E1000_RADV),
1039 em_rx_abs_int_delay_dflt);
1040 em_add_int_delay_sysctl(sc, "tx_abs_int_delay",
1041 "transmit interrupt delay limit in usecs",
1042 &sc->tx_abs_int_delay,
1043 E1000_REGISTER(hw, E1000_TADV),
1044 em_tx_abs_int_delay_dflt);
1045 em_add_int_delay_sysctl(sc, "itr",
1046 "interrupt delay limit in usecs/4",
1047 &sc->tx_itr,
1048 E1000_REGISTER(hw, E1000_ITR),
1049 DEFAULT_ITR);
1050
1051 hw->mac.autoneg = DO_AUTO_NEG;
1052 hw->phy.autoneg_wait_to_complete = false;
1053 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1054
1055 if (hw->mac.type < em_mac_min) {
1056 e1000_init_script_state_82541(hw, true);
1057 e1000_set_tbi_compatibility_82543(hw, true);
1058 }
1059 /* Copper options */
1060 if (hw->phy.media_type == e1000_media_type_copper) {
1061 hw->phy.mdix = AUTO_ALL_MODES;
1062 hw->phy.disable_polarity_correction = false;
1063 hw->phy.ms_type = EM_MASTER_SLAVE;
1064 }
1065
1066 /*
1067 * Set the frame limits assuming
1068 * standard ethernet sized frames.
1069 */
1070 scctx->isc_max_frame_size = hw->mac.max_frame_size =
1071 ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
1072
1073 /*
1074 * This controls when hardware reports transmit completion
1075 * status.
1076 */
1077 hw->mac.report_tx_early = 1;
1078
1079 /* Allocate multicast array memory. */
1080 sc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
1081 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
1082 if (sc->mta == NULL) {
1083 device_printf(dev, "Can not allocate multicast setup array\n");
1084 error = ENOMEM;
1085 goto err_late;
1086 }
1087
1088 /* Clear the IFCAP_TSO auto mask */
1089 sc->tso_automasked = 0;
1090
1091 /* Check SOL/IDER usage */
1092 if (e1000_check_reset_block(hw))
1093 device_printf(dev, "PHY reset is blocked"
1094 " due to SOL/IDER session.\n");
1095
1096 /* Sysctl for setting Energy Efficient Ethernet */
1097 hw->dev_spec.ich8lan.eee_disable = eee_setting;
1098 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_control",
1099 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
1100 em_sysctl_eee, "I", "Disable Energy Efficient Ethernet");
1101
1102 /*
1103 ** Start from a known state, this is
1104 ** important in reading the nvm and
1105 ** mac from that.
1106 */
1107 e1000_reset_hw(hw);
1108
1109 /* Make sure we have a good EEPROM before we read from it */
1110 if (e1000_validate_nvm_checksum(hw) < 0) {
1111 /*
1112 ** Some PCI-E parts fail the first check due to
1113 ** the link being in sleep state, call it again,
1114 ** if it fails a second time its a real issue.
1115 */
1116 if (e1000_validate_nvm_checksum(hw) < 0) {
1117 device_printf(dev,
1118 "The EEPROM Checksum Is Not Valid\n");
1119 error = EIO;
1120 goto err_late;
1121 }
1122 }
1123
1124 /* Copy the permanent MAC address out of the EEPROM */
1125 if (e1000_read_mac_addr(hw) < 0) {
1126 device_printf(dev, "EEPROM read error while reading MAC"
1127 " address\n");
1128 error = EIO;
1129 goto err_late;
1130 }
1131
1132 if (!em_is_valid_ether_addr(hw->mac.addr)) {
1133 if (sc->vf_ifp) {
1134 ether_gen_addr(iflib_get_ifp(ctx),
1135 (struct ether_addr *)hw->mac.addr);
1136 } else {
1137 device_printf(dev, "Invalid MAC address\n");
1138 error = EIO;
1139 goto err_late;
1140 }
1141 }
1142
1143 /* Save the EEPROM/NVM versions, must be done under IFLIB_CTX_LOCK */
1144 em_fw_version_locked(ctx);
1145
1146 em_print_fw_version(sc);
1147
1148 /*
1149 * Get Wake-on-Lan and Management info for later use
1150 */
1151 em_get_wakeup(ctx);
1152
1153 /* Enable only WOL MAGIC by default */
1154 scctx->isc_capenable &= ~IFCAP_WOL;
1155 if (sc->wol != 0)
1156 scctx->isc_capenable |= IFCAP_WOL_MAGIC;
1157
1158 iflib_set_mac(ctx, hw->mac.addr);
1159
1160 return (0);
1161
1162 err_late:
1163 em_release_hw_control(sc);
1164 err_pci:
1165 em_free_pci_resources(ctx);
1166 free(sc->mta, M_DEVBUF);
1167
1168 return (error);
1169 }
1170
1171 static int
em_if_attach_post(if_ctx_t ctx)1172 em_if_attach_post(if_ctx_t ctx)
1173 {
1174 struct e1000_softc *sc = iflib_get_softc(ctx);
1175 struct e1000_hw *hw = &sc->hw;
1176 int error = 0;
1177
1178 /* Setup OS specific network interface */
1179 error = em_setup_interface(ctx);
1180 if (error != 0) {
1181 device_printf(sc->dev, "Interface setup failed: %d\n", error);
1182 goto err_late;
1183 }
1184
1185 em_reset(ctx);
1186
1187 /* Initialize statistics */
1188 em_update_stats_counters(sc);
1189 hw->mac.get_link_status = 1;
1190 em_if_update_admin_status(ctx);
1191 em_add_hw_stats(sc);
1192
1193 /* Non-AMT based hardware can now take control from firmware */
1194 if (sc->has_manage && !sc->has_amt)
1195 em_get_hw_control(sc);
1196
1197 INIT_DEBUGOUT("em_if_attach_post: end");
1198
1199 return (0);
1200
1201 err_late:
1202 /* upon attach_post() error, iflib calls _if_detach() to free resources. */
1203 return (error);
1204 }
1205
1206 /*********************************************************************
1207 * Device removal routine
1208 *
1209 * The detach entry point is called when the driver is being removed.
1210 * This routine stops the adapter and deallocates all the resources
1211 * that were allocated for driver operation.
1212 *
1213 * return 0 on success, positive on failure
1214 *********************************************************************/
1215 static int
em_if_detach(if_ctx_t ctx)1216 em_if_detach(if_ctx_t ctx)
1217 {
1218 struct e1000_softc *sc = iflib_get_softc(ctx);
1219
1220 INIT_DEBUGOUT("em_if_detach: begin");
1221
1222 e1000_phy_hw_reset(&sc->hw);
1223
1224 em_release_manageability(sc);
1225 em_release_hw_control(sc);
1226 em_free_pci_resources(ctx);
1227 free(sc->mta, M_DEVBUF);
1228 sc->mta = NULL;
1229
1230 return (0);
1231 }
1232
1233 /*********************************************************************
1234 *
1235 * Shutdown entry point
1236 *
1237 **********************************************************************/
1238
1239 static int
em_if_shutdown(if_ctx_t ctx)1240 em_if_shutdown(if_ctx_t ctx)
1241 {
1242 return em_if_suspend(ctx);
1243 }
1244
1245 /*
1246 * Suspend/resume device methods.
1247 */
1248 static int
em_if_suspend(if_ctx_t ctx)1249 em_if_suspend(if_ctx_t ctx)
1250 {
1251 struct e1000_softc *sc = iflib_get_softc(ctx);
1252
1253 em_release_manageability(sc);
1254 em_release_hw_control(sc);
1255 em_enable_wakeup(ctx);
1256 return (0);
1257 }
1258
1259 static int
em_if_resume(if_ctx_t ctx)1260 em_if_resume(if_ctx_t ctx)
1261 {
1262 struct e1000_softc *sc = iflib_get_softc(ctx);
1263
1264 if (sc->hw.mac.type == e1000_pch2lan)
1265 e1000_resume_workarounds_pchlan(&sc->hw);
1266 em_if_init(ctx);
1267 em_init_manageability(sc);
1268
1269 return(0);
1270 }
1271
1272 static int
em_if_mtu_set(if_ctx_t ctx,uint32_t mtu)1273 em_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1274 {
1275 int max_frame_size;
1276 struct e1000_softc *sc = iflib_get_softc(ctx);
1277 if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
1278
1279 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1280
1281 switch (sc->hw.mac.type) {
1282 case e1000_82571:
1283 case e1000_82572:
1284 case e1000_ich9lan:
1285 case e1000_ich10lan:
1286 case e1000_pch2lan:
1287 case e1000_pch_lpt:
1288 case e1000_pch_spt:
1289 case e1000_pch_cnp:
1290 case e1000_pch_tgp:
1291 case e1000_pch_adp:
1292 case e1000_pch_mtp:
1293 case e1000_pch_ptp:
1294 case e1000_82574:
1295 case e1000_82583:
1296 case e1000_80003es2lan:
1297 /* 9K Jumbo Frame size */
1298 max_frame_size = 9234;
1299 break;
1300 case e1000_pchlan:
1301 max_frame_size = 4096;
1302 break;
1303 case e1000_82542:
1304 case e1000_ich8lan:
1305 /* Adapters that do not support jumbo frames */
1306 max_frame_size = ETHER_MAX_LEN;
1307 break;
1308 default:
1309 if (sc->hw.mac.type >= igb_mac_min)
1310 max_frame_size = 9234;
1311 else /* lem */
1312 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1313 }
1314 if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
1315 return (EINVAL);
1316 }
1317
1318 scctx->isc_max_frame_size = sc->hw.mac.max_frame_size =
1319 mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1320 return (0);
1321 }
1322
1323 /*********************************************************************
1324 * Init entry point
1325 *
1326 * This routine is used in two ways. It is used by the stack as
1327 * init entry point in network interface structure. It is also used
1328 * by the driver as a hw/sw initialization routine to get to a
1329 * consistent state.
1330 *
1331 **********************************************************************/
1332 static void
em_if_init(if_ctx_t ctx)1333 em_if_init(if_ctx_t ctx)
1334 {
1335 struct e1000_softc *sc = iflib_get_softc(ctx);
1336 if_softc_ctx_t scctx = sc->shared;
1337 if_t ifp = iflib_get_ifp(ctx);
1338 struct em_tx_queue *tx_que;
1339 int i;
1340
1341 INIT_DEBUGOUT("em_if_init: begin");
1342
1343 /* Get the latest mac address, User can use a LAA */
1344 bcopy(if_getlladdr(ifp), sc->hw.mac.addr,
1345 ETHER_ADDR_LEN);
1346
1347 /* Put the address into the Receive Address Array */
1348 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1349
1350 /*
1351 * With the 82571 adapter, RAR[0] may be overwritten
1352 * when the other port is reset, we make a duplicate
1353 * in RAR[14] for that eventuality, this assures
1354 * the interface continues to function.
1355 */
1356 if (sc->hw.mac.type == e1000_82571) {
1357 e1000_set_laa_state_82571(&sc->hw, true);
1358 e1000_rar_set(&sc->hw, sc->hw.mac.addr,
1359 E1000_RAR_ENTRIES - 1);
1360 }
1361
1362 /* Initialize the hardware */
1363 em_reset(ctx);
1364 em_if_update_admin_status(ctx);
1365
1366 for (i = 0, tx_que = sc->tx_queues; i < sc->tx_num_queues; i++, tx_que++) {
1367 struct tx_ring *txr = &tx_que->txr;
1368
1369 txr->tx_rs_cidx = txr->tx_rs_pidx;
1370
1371 /* Initialize the last processed descriptor to be the end of
1372 * the ring, rather than the start, so that we avoid an
1373 * off-by-one error when calculating how many descriptors are
1374 * done in the credits_update function.
1375 */
1376 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1377 }
1378
1379 /* Setup VLAN support, basic and offload if available */
1380 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1381
1382 /* Clear bad data from Rx FIFOs */
1383 if (sc->hw.mac.type >= igb_mac_min)
1384 e1000_rx_fifo_flush_base(&sc->hw);
1385
1386 /* Configure for OS presence */
1387 em_init_manageability(sc);
1388
1389 /* Prepare transmit descriptors and buffers */
1390 em_initialize_transmit_unit(ctx);
1391
1392 /* Setup Multicast table */
1393 em_if_multi_set(ctx);
1394
1395 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
1396 em_initialize_receive_unit(ctx);
1397
1398 /* Set up VLAN support and filter */
1399 em_setup_vlan_hw_support(ctx);
1400
1401 /* Don't lose promiscuous settings */
1402 em_if_set_promisc(ctx, if_getflags(ifp));
1403 e1000_clear_hw_cntrs_base_generic(&sc->hw);
1404
1405 /* MSI-X configuration for 82574 */
1406 if (sc->hw.mac.type == e1000_82574) {
1407 int tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
1408
1409 tmp |= E1000_CTRL_EXT_PBA_CLR;
1410 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp);
1411 /* Set the IVAR - interrupt vector routing. */
1412 E1000_WRITE_REG(&sc->hw, E1000_IVAR, sc->ivars);
1413 } else if (sc->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
1414 igb_configure_queues(sc);
1415
1416 /* this clears any pending interrupts */
1417 E1000_READ_REG(&sc->hw, E1000_ICR);
1418 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC);
1419
1420 /* AMT based hardware can now take control from firmware */
1421 if (sc->has_manage && sc->has_amt)
1422 em_get_hw_control(sc);
1423
1424 /* Set Energy Efficient Ethernet */
1425 if (sc->hw.mac.type >= igb_mac_min &&
1426 sc->hw.phy.media_type == e1000_media_type_copper) {
1427 if (sc->hw.mac.type == e1000_i354)
1428 e1000_set_eee_i354(&sc->hw, true, true);
1429 else
1430 e1000_set_eee_i350(&sc->hw, true, true);
1431 }
1432 }
1433
1434 /*********************************************************************
1435 *
1436 * Fast Legacy/MSI Combined Interrupt Service routine
1437 *
1438 *********************************************************************/
1439 int
em_intr(void * arg)1440 em_intr(void *arg)
1441 {
1442 struct e1000_softc *sc = arg;
1443 if_ctx_t ctx = sc->ctx;
1444 u32 reg_icr;
1445
1446 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
1447
1448 /* Hot eject? */
1449 if (reg_icr == 0xffffffff)
1450 return FILTER_STRAY;
1451
1452 /* Definitely not our interrupt. */
1453 if (reg_icr == 0x0)
1454 return FILTER_STRAY;
1455
1456 /*
1457 * Starting with the 82571 chip, bit 31 should be used to
1458 * determine whether the interrupt belongs to us.
1459 */
1460 if (sc->hw.mac.type >= e1000_82571 &&
1461 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1462 return FILTER_STRAY;
1463
1464 /*
1465 * Only MSI-X interrupts have one-shot behavior by taking advantage
1466 * of the EIAC register. Thus, explicitly disable interrupts. This
1467 * also works around the MSI message reordering errata on certain
1468 * systems.
1469 */
1470 IFDI_INTR_DISABLE(ctx);
1471
1472 /* Link status change */
1473 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
1474 em_handle_link(ctx);
1475
1476 if (reg_icr & E1000_ICR_RXO)
1477 sc->rx_overruns++;
1478
1479 return (FILTER_SCHEDULE_THREAD);
1480 }
1481
1482 static int
em_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)1483 em_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1484 {
1485 struct e1000_softc *sc = iflib_get_softc(ctx);
1486 struct em_rx_queue *rxq = &sc->rx_queues[rxqid];
1487
1488 E1000_WRITE_REG(&sc->hw, E1000_IMS, rxq->eims);
1489 return (0);
1490 }
1491
1492 static int
em_if_tx_queue_intr_enable(if_ctx_t ctx,uint16_t txqid)1493 em_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1494 {
1495 struct e1000_softc *sc = iflib_get_softc(ctx);
1496 struct em_tx_queue *txq = &sc->tx_queues[txqid];
1497
1498 E1000_WRITE_REG(&sc->hw, E1000_IMS, txq->eims);
1499 return (0);
1500 }
1501
1502 static int
igb_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)1503 igb_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1504 {
1505 struct e1000_softc *sc = iflib_get_softc(ctx);
1506 struct em_rx_queue *rxq = &sc->rx_queues[rxqid];
1507
1508 E1000_WRITE_REG(&sc->hw, E1000_EIMS, rxq->eims);
1509 return (0);
1510 }
1511
1512 static int
igb_if_tx_queue_intr_enable(if_ctx_t ctx,uint16_t txqid)1513 igb_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1514 {
1515 struct e1000_softc *sc = iflib_get_softc(ctx);
1516 struct em_tx_queue *txq = &sc->tx_queues[txqid];
1517
1518 E1000_WRITE_REG(&sc->hw, E1000_EIMS, txq->eims);
1519 return (0);
1520 }
1521
1522 /*********************************************************************
1523 *
1524 * MSI-X RX Interrupt Service routine
1525 *
1526 **********************************************************************/
1527 static int
em_msix_que(void * arg)1528 em_msix_que(void *arg)
1529 {
1530 struct em_rx_queue *que = arg;
1531
1532 ++que->irqs;
1533
1534 return (FILTER_SCHEDULE_THREAD);
1535 }
1536
1537 /*********************************************************************
1538 *
1539 * MSI-X Link Fast Interrupt Service routine
1540 *
1541 **********************************************************************/
1542 static int
em_msix_link(void * arg)1543 em_msix_link(void *arg)
1544 {
1545 struct e1000_softc *sc = arg;
1546 u32 reg_icr;
1547
1548 ++sc->link_irq;
1549 MPASS(sc->hw.back != NULL);
1550 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
1551
1552 if (reg_icr & E1000_ICR_RXO)
1553 sc->rx_overruns++;
1554
1555 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
1556 em_handle_link(sc->ctx);
1557
1558 /* Re-arm unconditionally */
1559 if (sc->hw.mac.type >= igb_mac_min) {
1560 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
1561 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->link_mask);
1562 } else if (sc->hw.mac.type == e1000_82574) {
1563 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC |
1564 E1000_IMS_OTHER);
1565 /*
1566 * Because we must read the ICR for this interrupt it may
1567 * clear other causes using autoclear, for this reason we
1568 * simply create a soft interrupt for all these vectors.
1569 */
1570 if (reg_icr)
1571 E1000_WRITE_REG(&sc->hw, E1000_ICS, sc->ims);
1572 } else
1573 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
1574
1575 return (FILTER_HANDLED);
1576 }
1577
1578 static void
em_handle_link(void * context)1579 em_handle_link(void *context)
1580 {
1581 if_ctx_t ctx = context;
1582 struct e1000_softc *sc = iflib_get_softc(ctx);
1583
1584 sc->hw.mac.get_link_status = 1;
1585 iflib_admin_intr_deferred(ctx);
1586 }
1587
1588 /*********************************************************************
1589 *
1590 * Media Ioctl callback
1591 *
1592 * This routine is called whenever the user queries the status of
1593 * the interface using ifconfig.
1594 *
1595 **********************************************************************/
1596 static void
em_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)1597 em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1598 {
1599 struct e1000_softc *sc = iflib_get_softc(ctx);
1600 u_char fiber_type = IFM_1000_SX;
1601
1602 INIT_DEBUGOUT("em_if_media_status: begin");
1603
1604 iflib_admin_intr_deferred(ctx);
1605
1606 ifmr->ifm_status = IFM_AVALID;
1607 ifmr->ifm_active = IFM_ETHER;
1608
1609 if (!sc->link_active) {
1610 return;
1611 }
1612
1613 ifmr->ifm_status |= IFM_ACTIVE;
1614
1615 if ((sc->hw.phy.media_type == e1000_media_type_fiber) ||
1616 (sc->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1617 if (sc->hw.mac.type == e1000_82545)
1618 fiber_type = IFM_1000_LX;
1619 ifmr->ifm_active |= fiber_type | IFM_FDX;
1620 } else {
1621 switch (sc->link_speed) {
1622 case 10:
1623 ifmr->ifm_active |= IFM_10_T;
1624 break;
1625 case 100:
1626 ifmr->ifm_active |= IFM_100_TX;
1627 break;
1628 case 1000:
1629 ifmr->ifm_active |= IFM_1000_T;
1630 break;
1631 }
1632 if (sc->link_duplex == FULL_DUPLEX)
1633 ifmr->ifm_active |= IFM_FDX;
1634 else
1635 ifmr->ifm_active |= IFM_HDX;
1636 }
1637 }
1638
1639 /*********************************************************************
1640 *
1641 * Media Ioctl callback
1642 *
1643 * This routine is called when the user changes speed/duplex using
1644 * media/mediopt option with ifconfig.
1645 *
1646 **********************************************************************/
1647 static int
em_if_media_change(if_ctx_t ctx)1648 em_if_media_change(if_ctx_t ctx)
1649 {
1650 struct e1000_softc *sc = iflib_get_softc(ctx);
1651 struct ifmedia *ifm = iflib_get_media(ctx);
1652
1653 INIT_DEBUGOUT("em_if_media_change: begin");
1654
1655 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1656 return (EINVAL);
1657
1658 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1659 case IFM_AUTO:
1660 sc->hw.mac.autoneg = DO_AUTO_NEG;
1661 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1662 break;
1663 case IFM_1000_LX:
1664 case IFM_1000_SX:
1665 case IFM_1000_T:
1666 sc->hw.mac.autoneg = DO_AUTO_NEG;
1667 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1668 break;
1669 case IFM_100_TX:
1670 sc->hw.mac.autoneg = false;
1671 sc->hw.phy.autoneg_advertised = 0;
1672 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1673 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1674 else
1675 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1676 break;
1677 case IFM_10_T:
1678 sc->hw.mac.autoneg = false;
1679 sc->hw.phy.autoneg_advertised = 0;
1680 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1681 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1682 else
1683 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1684 break;
1685 default:
1686 device_printf(sc->dev, "Unsupported media type\n");
1687 }
1688
1689 em_if_init(ctx);
1690
1691 return (0);
1692 }
1693
1694 static int
em_if_set_promisc(if_ctx_t ctx,int flags)1695 em_if_set_promisc(if_ctx_t ctx, int flags)
1696 {
1697 struct e1000_softc *sc = iflib_get_softc(ctx);
1698 if_t ifp = iflib_get_ifp(ctx);
1699 u32 reg_rctl;
1700 int mcnt = 0;
1701
1702 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1703 reg_rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_UPE);
1704 if (flags & IFF_ALLMULTI)
1705 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1706 else
1707 mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
1708
1709 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1710 reg_rctl &= (~E1000_RCTL_MPE);
1711 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1712
1713 if (flags & IFF_PROMISC) {
1714 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1715 em_if_vlan_filter_disable(sc);
1716 /* Turn this on if you want to see bad packets */
1717 if (em_debug_sbp)
1718 reg_rctl |= E1000_RCTL_SBP;
1719 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1720 } else {
1721 if (flags & IFF_ALLMULTI) {
1722 reg_rctl |= E1000_RCTL_MPE;
1723 reg_rctl &= ~E1000_RCTL_UPE;
1724 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1725 }
1726 if (em_if_vlan_filter_used(ctx))
1727 em_if_vlan_filter_enable(sc);
1728 }
1729 return (0);
1730 }
1731
1732 static u_int
em_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int idx)1733 em_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx)
1734 {
1735 u8 *mta = arg;
1736
1737 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
1738 return (0);
1739
1740 bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1741
1742 return (1);
1743 }
1744
1745 /*********************************************************************
1746 * Multicast Update
1747 *
1748 * This routine is called whenever multicast address list is updated.
1749 *
1750 **********************************************************************/
1751 static void
em_if_multi_set(if_ctx_t ctx)1752 em_if_multi_set(if_ctx_t ctx)
1753 {
1754 struct e1000_softc *sc = iflib_get_softc(ctx);
1755 if_t ifp = iflib_get_ifp(ctx);
1756 u8 *mta; /* Multicast array memory */
1757 u32 reg_rctl = 0;
1758 int mcnt = 0;
1759
1760 IOCTL_DEBUGOUT("em_set_multi: begin");
1761
1762 mta = sc->mta;
1763 bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1764
1765 if (sc->hw.mac.type == e1000_82542 &&
1766 sc->hw.revision_id == E1000_REVISION_2) {
1767 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1768 if (sc->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1769 e1000_pci_clear_mwi(&sc->hw);
1770 reg_rctl |= E1000_RCTL_RST;
1771 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1772 msec_delay(5);
1773 }
1774
1775 mcnt = if_foreach_llmaddr(ifp, em_copy_maddr, mta);
1776
1777 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1778 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1779
1780 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1781
1782 if (if_getflags(ifp) & IFF_PROMISC)
1783 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1784 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1785 if_getflags(ifp) & IFF_ALLMULTI) {
1786 reg_rctl |= E1000_RCTL_MPE;
1787 reg_rctl &= ~E1000_RCTL_UPE;
1788 } else
1789 reg_rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1790
1791 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1792
1793 if (sc->hw.mac.type == e1000_82542 &&
1794 sc->hw.revision_id == E1000_REVISION_2) {
1795 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1796 reg_rctl &= ~E1000_RCTL_RST;
1797 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1798 msec_delay(5);
1799 if (sc->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1800 e1000_pci_set_mwi(&sc->hw);
1801 }
1802 }
1803
1804 /*********************************************************************
1805 * Timer routine
1806 *
1807 * This routine schedules em_if_update_admin_status() to check for
1808 * link status and to gather statistics as well as to perform some
1809 * controller-specific hardware patting.
1810 *
1811 **********************************************************************/
1812 static void
em_if_timer(if_ctx_t ctx,uint16_t qid)1813 em_if_timer(if_ctx_t ctx, uint16_t qid)
1814 {
1815
1816 if (qid != 0)
1817 return;
1818
1819 iflib_admin_intr_deferred(ctx);
1820 }
1821
1822 static void
em_if_update_admin_status(if_ctx_t ctx)1823 em_if_update_admin_status(if_ctx_t ctx)
1824 {
1825 struct e1000_softc *sc = iflib_get_softc(ctx);
1826 struct e1000_hw *hw = &sc->hw;
1827 device_t dev = iflib_get_dev(ctx);
1828 u32 link_check, thstat, ctrl;
1829 bool automasked = false;
1830
1831 link_check = thstat = ctrl = 0;
1832 /* Get the cached link value or read phy for real */
1833 switch (hw->phy.media_type) {
1834 case e1000_media_type_copper:
1835 if (hw->mac.get_link_status) {
1836 if (hw->mac.type == e1000_pch_spt)
1837 msec_delay(50);
1838 /* Do the work to read phy */
1839 e1000_check_for_link(hw);
1840 link_check = !hw->mac.get_link_status;
1841 if (link_check) /* ESB2 fix */
1842 e1000_cfg_on_link_up(hw);
1843 } else {
1844 link_check = true;
1845 }
1846 break;
1847 case e1000_media_type_fiber:
1848 e1000_check_for_link(hw);
1849 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1850 E1000_STATUS_LU);
1851 break;
1852 case e1000_media_type_internal_serdes:
1853 e1000_check_for_link(hw);
1854 link_check = hw->mac.serdes_has_link;
1855 break;
1856 /* VF device is type_unknown */
1857 case e1000_media_type_unknown:
1858 e1000_check_for_link(hw);
1859 link_check = !hw->mac.get_link_status;
1860 /* FALLTHROUGH */
1861 default:
1862 break;
1863 }
1864
1865 /* Check for thermal downshift or shutdown */
1866 if (hw->mac.type == e1000_i350) {
1867 thstat = E1000_READ_REG(hw, E1000_THSTAT);
1868 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
1869 }
1870
1871 /* Now check for a transition */
1872 if (link_check && (sc->link_active == 0)) {
1873 e1000_get_speed_and_duplex(hw, &sc->link_speed,
1874 &sc->link_duplex);
1875 /* Check if we must disable SPEED_MODE bit on PCI-E */
1876 if ((sc->link_speed != SPEED_1000) &&
1877 ((hw->mac.type == e1000_82571) ||
1878 (hw->mac.type == e1000_82572))) {
1879 int tarc0;
1880 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
1881 tarc0 &= ~TARC_SPEED_MODE_BIT;
1882 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
1883 }
1884 if (bootverbose)
1885 device_printf(dev, "Link is up %d Mbps %s\n",
1886 sc->link_speed,
1887 ((sc->link_duplex == FULL_DUPLEX) ?
1888 "Full Duplex" : "Half Duplex"));
1889 sc->link_active = 1;
1890 sc->smartspeed = 0;
1891 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_MASK) ==
1892 E1000_CTRL_EXT_LINK_MODE_GMII &&
1893 (thstat & E1000_THSTAT_LINK_THROTTLE))
1894 device_printf(dev, "Link: thermal downshift\n");
1895 /* Delay Link Up for Phy update */
1896 if (((hw->mac.type == e1000_i210) ||
1897 (hw->mac.type == e1000_i211)) &&
1898 (hw->phy.id == I210_I_PHY_ID))
1899 msec_delay(I210_LINK_DELAY);
1900 /* Reset if the media type changed. */
1901 if (hw->dev_spec._82575.media_changed &&
1902 hw->mac.type >= igb_mac_min) {
1903 hw->dev_spec._82575.media_changed = false;
1904 sc->flags |= IGB_MEDIA_RESET;
1905 em_reset(ctx);
1906 }
1907 /* Only do TSO on gigabit Ethernet for older chips due to errata */
1908 if (hw->mac.type < igb_mac_min)
1909 automasked = em_automask_tso(ctx);
1910
1911 /* Automasking resets the interface, so don't mark it up yet */
1912 if (!automasked)
1913 iflib_link_state_change(ctx, LINK_STATE_UP,
1914 IF_Mbps(sc->link_speed));
1915 } else if (!link_check && (sc->link_active == 1)) {
1916 sc->link_speed = 0;
1917 sc->link_duplex = 0;
1918 sc->link_active = 0;
1919 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
1920 }
1921 em_update_stats_counters(sc);
1922
1923 /* Reset LAA into RAR[0] on 82571 */
1924 if (hw->mac.type == e1000_82571 && e1000_get_laa_state_82571(hw))
1925 e1000_rar_set(hw, hw->mac.addr, 0);
1926
1927 if (hw->mac.type < em_mac_min)
1928 lem_smartspeed(sc);
1929 }
1930
1931 static void
em_if_watchdog_reset(if_ctx_t ctx)1932 em_if_watchdog_reset(if_ctx_t ctx)
1933 {
1934 struct e1000_softc *sc = iflib_get_softc(ctx);
1935
1936 /*
1937 * Just count the event; iflib(4) will already trigger a
1938 * sufficient reset of the controller.
1939 */
1940 sc->watchdog_events++;
1941 }
1942
1943 /*********************************************************************
1944 *
1945 * This routine disables all traffic on the adapter by issuing a
1946 * global reset on the MAC.
1947 *
1948 **********************************************************************/
1949 static void
em_if_stop(if_ctx_t ctx)1950 em_if_stop(if_ctx_t ctx)
1951 {
1952 struct e1000_softc *sc = iflib_get_softc(ctx);
1953
1954 INIT_DEBUGOUT("em_if_stop: begin");
1955
1956 /* I219 needs special flushing to avoid hangs */
1957 if (sc->hw.mac.type >= e1000_pch_spt && sc->hw.mac.type < igb_mac_min)
1958 em_flush_desc_rings(sc);
1959
1960 e1000_reset_hw(&sc->hw);
1961 if (sc->hw.mac.type >= e1000_82544)
1962 E1000_WRITE_REG(&sc->hw, E1000_WUFC, 0);
1963
1964 e1000_led_off(&sc->hw);
1965 e1000_cleanup_led(&sc->hw);
1966 }
1967
1968 /*********************************************************************
1969 *
1970 * Determine hardware revision.
1971 *
1972 **********************************************************************/
1973 static void
em_identify_hardware(if_ctx_t ctx)1974 em_identify_hardware(if_ctx_t ctx)
1975 {
1976 device_t dev = iflib_get_dev(ctx);
1977 struct e1000_softc *sc = iflib_get_softc(ctx);
1978
1979 /* Make sure our PCI config space has the necessary stuff set */
1980 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1981
1982 /* Save off the information about this board */
1983 sc->hw.vendor_id = pci_get_vendor(dev);
1984 sc->hw.device_id = pci_get_device(dev);
1985 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1986 sc->hw.subsystem_vendor_id =
1987 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1988 sc->hw.subsystem_device_id =
1989 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1990
1991 /* Do Shared Code Init and Setup */
1992 if (e1000_set_mac_type(&sc->hw)) {
1993 device_printf(dev, "Setup init failure\n");
1994 return;
1995 }
1996
1997 /* Are we a VF device? */
1998 if ((sc->hw.mac.type == e1000_vfadapt) ||
1999 (sc->hw.mac.type == e1000_vfadapt_i350))
2000 sc->vf_ifp = 1;
2001 else
2002 sc->vf_ifp = 0;
2003 }
2004
2005 static int
em_allocate_pci_resources(if_ctx_t ctx)2006 em_allocate_pci_resources(if_ctx_t ctx)
2007 {
2008 struct e1000_softc *sc = iflib_get_softc(ctx);
2009 device_t dev = iflib_get_dev(ctx);
2010 int rid, val;
2011
2012 rid = PCIR_BAR(0);
2013 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2014 &rid, RF_ACTIVE);
2015 if (sc->memory == NULL) {
2016 device_printf(dev, "Unable to allocate bus resource: memory\n");
2017 return (ENXIO);
2018 }
2019 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
2020 sc->osdep.mem_bus_space_handle =
2021 rman_get_bushandle(sc->memory);
2022 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
2023
2024 /* Only older adapters use IO mapping */
2025 if (sc->hw.mac.type < em_mac_min && sc->hw.mac.type > e1000_82543) {
2026 /* Figure our where our IO BAR is ? */
2027 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2028 val = pci_read_config(dev, rid, 4);
2029 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2030 break;
2031 }
2032 rid += 4;
2033 /* check for 64bit BAR */
2034 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2035 rid += 4;
2036 }
2037 if (rid >= PCIR_CIS) {
2038 device_printf(dev, "Unable to locate IO BAR\n");
2039 return (ENXIO);
2040 }
2041 sc->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
2042 &rid, RF_ACTIVE);
2043 if (sc->ioport == NULL) {
2044 device_printf(dev, "Unable to allocate bus resource: "
2045 "ioport\n");
2046 return (ENXIO);
2047 }
2048 sc->hw.io_base = 0;
2049 sc->osdep.io_bus_space_tag =
2050 rman_get_bustag(sc->ioport);
2051 sc->osdep.io_bus_space_handle =
2052 rman_get_bushandle(sc->ioport);
2053 }
2054
2055 sc->hw.back = &sc->osdep;
2056
2057 return (0);
2058 }
2059
2060 /*********************************************************************
2061 *
2062 * Set up the MSI-X Interrupt handlers
2063 *
2064 **********************************************************************/
2065 static int
em_if_msix_intr_assign(if_ctx_t ctx,int msix)2066 em_if_msix_intr_assign(if_ctx_t ctx, int msix)
2067 {
2068 struct e1000_softc *sc = iflib_get_softc(ctx);
2069 struct em_rx_queue *rx_que = sc->rx_queues;
2070 struct em_tx_queue *tx_que = sc->tx_queues;
2071 int error, rid, i, vector = 0, rx_vectors;
2072 char buf[16];
2073
2074 /* First set up ring resources */
2075 for (i = 0; i < sc->rx_num_queues; i++, rx_que++, vector++) {
2076 rid = vector + 1;
2077 snprintf(buf, sizeof(buf), "rxq%d", i);
2078 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, em_msix_que, rx_que, rx_que->me, buf);
2079 if (error) {
2080 device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
2081 sc->rx_num_queues = i + 1;
2082 goto fail;
2083 }
2084
2085 rx_que->msix = vector;
2086
2087 /*
2088 * Set the bit to enable interrupt
2089 * in E1000_IMS -- bits 20 and 21
2090 * are for RX0 and RX1, note this has
2091 * NOTHING to do with the MSI-X vector
2092 */
2093 if (sc->hw.mac.type == e1000_82574) {
2094 rx_que->eims = 1 << (20 + i);
2095 sc->ims |= rx_que->eims;
2096 sc->ivars |= (8 | rx_que->msix) << (i * 4);
2097 } else if (sc->hw.mac.type == e1000_82575)
2098 rx_que->eims = E1000_EICR_TX_QUEUE0 << vector;
2099 else
2100 rx_que->eims = 1 << vector;
2101 }
2102 rx_vectors = vector;
2103
2104 vector = 0;
2105 for (i = 0; i < sc->tx_num_queues; i++, tx_que++, vector++) {
2106 snprintf(buf, sizeof(buf), "txq%d", i);
2107 tx_que = &sc->tx_queues[i];
2108 iflib_softirq_alloc_generic(ctx,
2109 &sc->rx_queues[i % sc->rx_num_queues].que_irq,
2110 IFLIB_INTR_TX, tx_que, tx_que->me, buf);
2111
2112 tx_que->msix = (vector % sc->rx_num_queues);
2113
2114 /*
2115 * Set the bit to enable interrupt
2116 * in E1000_IMS -- bits 22 and 23
2117 * are for TX0 and TX1, note this has
2118 * NOTHING to do with the MSI-X vector
2119 */
2120 if (sc->hw.mac.type == e1000_82574) {
2121 tx_que->eims = 1 << (22 + i);
2122 sc->ims |= tx_que->eims;
2123 sc->ivars |= (8 | tx_que->msix) << (8 + (i * 4));
2124 } else if (sc->hw.mac.type == e1000_82575) {
2125 tx_que->eims = E1000_EICR_TX_QUEUE0 << i;
2126 } else {
2127 tx_que->eims = 1 << i;
2128 }
2129 }
2130
2131 /* Link interrupt */
2132 rid = rx_vectors + 1;
2133 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, sc, 0, "aq");
2134
2135 if (error) {
2136 device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
2137 goto fail;
2138 }
2139 sc->linkvec = rx_vectors;
2140 if (sc->hw.mac.type < igb_mac_min) {
2141 sc->ivars |= (8 | rx_vectors) << 16;
2142 sc->ivars |= 0x80000000;
2143 /* Enable the "Other" interrupt type for link status change */
2144 sc->ims |= E1000_IMS_OTHER;
2145 }
2146
2147 return (0);
2148 fail:
2149 iflib_irq_free(ctx, &sc->irq);
2150 rx_que = sc->rx_queues;
2151 for (int i = 0; i < sc->rx_num_queues; i++, rx_que++)
2152 iflib_irq_free(ctx, &rx_que->que_irq);
2153 return (error);
2154 }
2155
2156 static void
igb_configure_queues(struct e1000_softc * sc)2157 igb_configure_queues(struct e1000_softc *sc)
2158 {
2159 struct e1000_hw *hw = &sc->hw;
2160 struct em_rx_queue *rx_que;
2161 struct em_tx_queue *tx_que;
2162 u32 tmp, ivar = 0, newitr = 0;
2163
2164 /* First turn on RSS capability */
2165 if (hw->mac.type != e1000_82575)
2166 E1000_WRITE_REG(hw, E1000_GPIE,
2167 E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
2168 E1000_GPIE_PBA | E1000_GPIE_NSICR);
2169
2170 /* Turn on MSI-X */
2171 switch (hw->mac.type) {
2172 case e1000_82580:
2173 case e1000_i350:
2174 case e1000_i354:
2175 case e1000_i210:
2176 case e1000_i211:
2177 case e1000_vfadapt:
2178 case e1000_vfadapt_i350:
2179 /* RX entries */
2180 for (int i = 0; i < sc->rx_num_queues; i++) {
2181 u32 index = i >> 1;
2182 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2183 rx_que = &sc->rx_queues[i];
2184 if (i & 1) {
2185 ivar &= 0xFF00FFFF;
2186 ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
2187 } else {
2188 ivar &= 0xFFFFFF00;
2189 ivar |= rx_que->msix | E1000_IVAR_VALID;
2190 }
2191 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2192 }
2193 /* TX entries */
2194 for (int i = 0; i < sc->tx_num_queues; i++) {
2195 u32 index = i >> 1;
2196 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2197 tx_que = &sc->tx_queues[i];
2198 if (i & 1) {
2199 ivar &= 0x00FFFFFF;
2200 ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
2201 } else {
2202 ivar &= 0xFFFF00FF;
2203 ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
2204 }
2205 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2206 sc->que_mask |= tx_que->eims;
2207 }
2208
2209 /* And for the link interrupt */
2210 ivar = (sc->linkvec | E1000_IVAR_VALID) << 8;
2211 sc->link_mask = 1 << sc->linkvec;
2212 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
2213 break;
2214 case e1000_82576:
2215 /* RX entries */
2216 for (int i = 0; i < sc->rx_num_queues; i++) {
2217 u32 index = i & 0x7; /* Each IVAR has two entries */
2218 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2219 rx_que = &sc->rx_queues[i];
2220 if (i < 8) {
2221 ivar &= 0xFFFFFF00;
2222 ivar |= rx_que->msix | E1000_IVAR_VALID;
2223 } else {
2224 ivar &= 0xFF00FFFF;
2225 ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
2226 }
2227 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2228 sc->que_mask |= rx_que->eims;
2229 }
2230 /* TX entries */
2231 for (int i = 0; i < sc->tx_num_queues; i++) {
2232 u32 index = i & 0x7; /* Each IVAR has two entries */
2233 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2234 tx_que = &sc->tx_queues[i];
2235 if (i < 8) {
2236 ivar &= 0xFFFF00FF;
2237 ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
2238 } else {
2239 ivar &= 0x00FFFFFF;
2240 ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
2241 }
2242 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2243 sc->que_mask |= tx_que->eims;
2244 }
2245
2246 /* And for the link interrupt */
2247 ivar = (sc->linkvec | E1000_IVAR_VALID) << 8;
2248 sc->link_mask = 1 << sc->linkvec;
2249 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
2250 break;
2251
2252 case e1000_82575:
2253 /* enable MSI-X support*/
2254 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
2255 tmp |= E1000_CTRL_EXT_PBA_CLR;
2256 /* Auto-Mask interrupts upon ICR read. */
2257 tmp |= E1000_CTRL_EXT_EIAME;
2258 tmp |= E1000_CTRL_EXT_IRCA;
2259 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
2260
2261 /* Queues */
2262 for (int i = 0; i < sc->rx_num_queues; i++) {
2263 rx_que = &sc->rx_queues[i];
2264 tmp = E1000_EICR_RX_QUEUE0 << i;
2265 tmp |= E1000_EICR_TX_QUEUE0 << i;
2266 rx_que->eims = tmp;
2267 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
2268 i, rx_que->eims);
2269 sc->que_mask |= rx_que->eims;
2270 }
2271
2272 /* Link */
2273 E1000_WRITE_REG(hw, E1000_MSIXBM(sc->linkvec),
2274 E1000_EIMS_OTHER);
2275 sc->link_mask |= E1000_EIMS_OTHER;
2276 default:
2277 break;
2278 }
2279
2280 /* Set the starting interrupt rate */
2281 if (em_max_interrupt_rate > 0)
2282 newitr = (4000000 / em_max_interrupt_rate) & 0x7FFC;
2283
2284 if (hw->mac.type == e1000_82575)
2285 newitr |= newitr << 16;
2286 else
2287 newitr |= E1000_EITR_CNT_IGNR;
2288
2289 for (int i = 0; i < sc->rx_num_queues; i++) {
2290 rx_que = &sc->rx_queues[i];
2291 E1000_WRITE_REG(hw, E1000_EITR(rx_que->msix), newitr);
2292 }
2293
2294 return;
2295 }
2296
2297 static void
em_free_pci_resources(if_ctx_t ctx)2298 em_free_pci_resources(if_ctx_t ctx)
2299 {
2300 struct e1000_softc *sc = iflib_get_softc(ctx);
2301 struct em_rx_queue *que = sc->rx_queues;
2302 device_t dev = iflib_get_dev(ctx);
2303
2304 /* Release all MSI-X queue resources */
2305 if (sc->intr_type == IFLIB_INTR_MSIX)
2306 iflib_irq_free(ctx, &sc->irq);
2307
2308 if (que != NULL) {
2309 for (int i = 0; i < sc->rx_num_queues; i++, que++) {
2310 iflib_irq_free(ctx, &que->que_irq);
2311 }
2312 }
2313
2314 if (sc->memory != NULL) {
2315 bus_release_resource(dev, SYS_RES_MEMORY,
2316 rman_get_rid(sc->memory), sc->memory);
2317 sc->memory = NULL;
2318 }
2319
2320 if (sc->flash != NULL) {
2321 bus_release_resource(dev, SYS_RES_MEMORY,
2322 rman_get_rid(sc->flash), sc->flash);
2323 sc->flash = NULL;
2324 }
2325
2326 if (sc->ioport != NULL) {
2327 bus_release_resource(dev, SYS_RES_IOPORT,
2328 rman_get_rid(sc->ioport), sc->ioport);
2329 sc->ioport = NULL;
2330 }
2331 }
2332
2333 /* Set up MSI or MSI-X */
2334 static int
em_setup_msix(if_ctx_t ctx)2335 em_setup_msix(if_ctx_t ctx)
2336 {
2337 struct e1000_softc *sc = iflib_get_softc(ctx);
2338
2339 if (sc->hw.mac.type == e1000_82574) {
2340 em_enable_vectors_82574(ctx);
2341 }
2342 return (0);
2343 }
2344
2345 /*********************************************************************
2346 *
2347 * Workaround for SmartSpeed on 82541 and 82547 controllers
2348 *
2349 **********************************************************************/
2350 static void
lem_smartspeed(struct e1000_softc * sc)2351 lem_smartspeed(struct e1000_softc *sc)
2352 {
2353 u16 phy_tmp;
2354
2355 if (sc->link_active || (sc->hw.phy.type != e1000_phy_igp) ||
2356 sc->hw.mac.autoneg == 0 ||
2357 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2358 return;
2359
2360 if (sc->smartspeed == 0) {
2361 /* If Master/Slave config fault is asserted twice,
2362 * we assume back-to-back */
2363 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2364 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2365 return;
2366 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2367 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2368 e1000_read_phy_reg(&sc->hw,
2369 PHY_1000T_CTRL, &phy_tmp);
2370 if(phy_tmp & CR_1000T_MS_ENABLE) {
2371 phy_tmp &= ~CR_1000T_MS_ENABLE;
2372 e1000_write_phy_reg(&sc->hw,
2373 PHY_1000T_CTRL, phy_tmp);
2374 sc->smartspeed++;
2375 if(sc->hw.mac.autoneg &&
2376 !e1000_copper_link_autoneg(&sc->hw) &&
2377 !e1000_read_phy_reg(&sc->hw,
2378 PHY_CONTROL, &phy_tmp)) {
2379 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2380 MII_CR_RESTART_AUTO_NEG);
2381 e1000_write_phy_reg(&sc->hw,
2382 PHY_CONTROL, phy_tmp);
2383 }
2384 }
2385 }
2386 return;
2387 } else if(sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2388 /* If still no link, perhaps using 2/3 pair cable */
2389 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2390 phy_tmp |= CR_1000T_MS_ENABLE;
2391 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2392 if(sc->hw.mac.autoneg &&
2393 !e1000_copper_link_autoneg(&sc->hw) &&
2394 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) {
2395 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2396 MII_CR_RESTART_AUTO_NEG);
2397 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp);
2398 }
2399 }
2400 /* Restart process after EM_SMARTSPEED_MAX iterations */
2401 if(sc->smartspeed++ == EM_SMARTSPEED_MAX)
2402 sc->smartspeed = 0;
2403 }
2404
2405 /*********************************************************************
2406 *
2407 * Initialize the DMA Coalescing feature
2408 *
2409 **********************************************************************/
2410 static void
igb_init_dmac(struct e1000_softc * sc,u32 pba)2411 igb_init_dmac(struct e1000_softc *sc, u32 pba)
2412 {
2413 device_t dev = sc->dev;
2414 struct e1000_hw *hw = &sc->hw;
2415 u32 dmac, reg = ~E1000_DMACR_DMAC_EN;
2416 u16 hwm;
2417 u16 max_frame_size;
2418
2419 if (hw->mac.type == e1000_i211)
2420 return;
2421
2422 max_frame_size = sc->shared->isc_max_frame_size;
2423 if (hw->mac.type > e1000_82580) {
2424
2425 if (sc->dmac == 0) { /* Disabling it */
2426 E1000_WRITE_REG(hw, E1000_DMACR, reg);
2427 return;
2428 } else
2429 device_printf(dev, "DMA Coalescing enabled\n");
2430
2431 /* Set starting threshold */
2432 E1000_WRITE_REG(hw, E1000_DMCTXTH, 0);
2433
2434 hwm = 64 * pba - max_frame_size / 16;
2435 if (hwm < 64 * (pba - 6))
2436 hwm = 64 * (pba - 6);
2437 reg = E1000_READ_REG(hw, E1000_FCRTC);
2438 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
2439 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
2440 & E1000_FCRTC_RTH_COAL_MASK);
2441 E1000_WRITE_REG(hw, E1000_FCRTC, reg);
2442
2443
2444 dmac = pba - max_frame_size / 512;
2445 if (dmac < pba - 10)
2446 dmac = pba - 10;
2447 reg = E1000_READ_REG(hw, E1000_DMACR);
2448 reg &= ~E1000_DMACR_DMACTHR_MASK;
2449 reg |= ((dmac << E1000_DMACR_DMACTHR_SHIFT)
2450 & E1000_DMACR_DMACTHR_MASK);
2451
2452 /* transition to L0x or L1 if available..*/
2453 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
2454
2455 /* Check if status is 2.5Gb backplane connection
2456 * before configuration of watchdog timer, which is
2457 * in msec values in 12.8usec intervals
2458 * watchdog timer= msec values in 32usec intervals
2459 * for non 2.5Gb connection
2460 */
2461 if (hw->mac.type == e1000_i354) {
2462 int status = E1000_READ_REG(hw, E1000_STATUS);
2463 if ((status & E1000_STATUS_2P5_SKU) &&
2464 (!(status & E1000_STATUS_2P5_SKU_OVER)))
2465 reg |= ((sc->dmac * 5) >> 6);
2466 else
2467 reg |= (sc->dmac >> 5);
2468 } else {
2469 reg |= (sc->dmac >> 5);
2470 }
2471
2472 E1000_WRITE_REG(hw, E1000_DMACR, reg);
2473
2474 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
2475
2476 /* Set the interval before transition */
2477 reg = E1000_READ_REG(hw, E1000_DMCTLX);
2478 if (hw->mac.type == e1000_i350)
2479 reg |= IGB_DMCTLX_DCFLUSH_DIS;
2480 /*
2481 ** in 2.5Gb connection, TTLX unit is 0.4 usec
2482 ** which is 0x4*2 = 0xA. But delay is still 4 usec
2483 */
2484 if (hw->mac.type == e1000_i354) {
2485 int status = E1000_READ_REG(hw, E1000_STATUS);
2486 if ((status & E1000_STATUS_2P5_SKU) &&
2487 (!(status & E1000_STATUS_2P5_SKU_OVER)))
2488 reg |= 0xA;
2489 else
2490 reg |= 0x4;
2491 } else {
2492 reg |= 0x4;
2493 }
2494
2495 E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
2496
2497 /* free space in tx packet buffer to wake from DMA coal */
2498 E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_TXPBSIZE -
2499 (2 * max_frame_size)) >> 6);
2500
2501 /* make low power state decision controlled by DMA coal */
2502 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
2503 reg &= ~E1000_PCIEMISC_LX_DECISION;
2504 E1000_WRITE_REG(hw, E1000_PCIEMISC, reg);
2505
2506 } else if (hw->mac.type == e1000_82580) {
2507 u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
2508 E1000_WRITE_REG(hw, E1000_PCIEMISC,
2509 reg & ~E1000_PCIEMISC_LX_DECISION);
2510 E1000_WRITE_REG(hw, E1000_DMACR, 0);
2511 }
2512 }
2513 /*********************************************************************
2514 * The 3 following flush routines are used as a workaround in the
2515 * I219 client parts and only for them.
2516 *
2517 * em_flush_tx_ring - remove all descriptors from the tx_ring
2518 *
2519 * We want to clear all pending descriptors from the TX ring.
2520 * zeroing happens when the HW reads the regs. We assign the ring itself as
2521 * the data of the next descriptor. We don't care about the data we are about
2522 * to reset the HW.
2523 **********************************************************************/
2524 static void
em_flush_tx_ring(struct e1000_softc * sc)2525 em_flush_tx_ring(struct e1000_softc *sc)
2526 {
2527 struct e1000_hw *hw = &sc->hw;
2528 struct tx_ring *txr = &sc->tx_queues->txr;
2529 struct e1000_tx_desc *txd;
2530 u32 tctl, txd_lower = E1000_TXD_CMD_IFCS;
2531 u16 size = 512;
2532
2533 tctl = E1000_READ_REG(hw, E1000_TCTL);
2534 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
2535
2536 txd = &txr->tx_base[txr->tx_cidx_processed];
2537
2538 /* Just use the ring as a dummy buffer addr */
2539 txd->buffer_addr = txr->tx_paddr;
2540 txd->lower.data = htole32(txd_lower | size);
2541 txd->upper.data = 0;
2542
2543 /* flush descriptors to memory before notifying the HW */
2544 wmb();
2545
2546 E1000_WRITE_REG(hw, E1000_TDT(0), txr->tx_cidx_processed);
2547 mb();
2548 usec_delay(250);
2549 }
2550
2551 /*********************************************************************
2552 * em_flush_rx_ring - remove all descriptors from the rx_ring
2553 *
2554 * Mark all descriptors in the RX ring as consumed and disable the rx ring
2555 **********************************************************************/
2556 static void
em_flush_rx_ring(struct e1000_softc * sc)2557 em_flush_rx_ring(struct e1000_softc *sc)
2558 {
2559 struct e1000_hw *hw = &sc->hw;
2560 u32 rctl, rxdctl;
2561
2562 rctl = E1000_READ_REG(hw, E1000_RCTL);
2563 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2564 E1000_WRITE_FLUSH(hw);
2565 usec_delay(150);
2566
2567 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
2568 /* zero the lower 14 bits (prefetch and host thresholds) */
2569 rxdctl &= 0xffffc000;
2570 /*
2571 * update thresholds: prefetch threshold to 31, host threshold to 1
2572 * and make sure the granularity is "descriptors" and not "cache lines"
2573 */
2574 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
2575 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl);
2576
2577 /* momentarily enable the RX ring for the changes to take effect */
2578 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
2579 E1000_WRITE_FLUSH(hw);
2580 usec_delay(150);
2581 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2582 }
2583
2584 /*********************************************************************
2585 * em_flush_desc_rings - remove all descriptors from the descriptor rings
2586 *
2587 * In I219, the descriptor rings must be emptied before resetting the HW
2588 * or before changing the device state to D3 during runtime (runtime PM).
2589 *
2590 * Failure to do this will cause the HW to enter a unit hang state which can
2591 * only be released by PCI reset on the device
2592 *
2593 **********************************************************************/
2594 static void
em_flush_desc_rings(struct e1000_softc * sc)2595 em_flush_desc_rings(struct e1000_softc *sc)
2596 {
2597 struct e1000_hw *hw = &sc->hw;
2598 device_t dev = sc->dev;
2599 u16 hang_state;
2600 u32 fext_nvm11, tdlen;
2601
2602 /* First, disable MULR fix in FEXTNVM11 */
2603 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
2604 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
2605 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
2606
2607 /* do nothing if we're not in faulty state, or if the queue is empty */
2608 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
2609 hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2);
2610 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
2611 return;
2612 em_flush_tx_ring(sc);
2613
2614 /* recheck, maybe the fault is caused by the rx ring */
2615 hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2);
2616 if (hang_state & FLUSH_DESC_REQUIRED)
2617 em_flush_rx_ring(sc);
2618 }
2619
2620
2621 /*********************************************************************
2622 *
2623 * Initialize the hardware to a configuration as specified by the
2624 * sc structure.
2625 *
2626 **********************************************************************/
2627 static void
em_reset(if_ctx_t ctx)2628 em_reset(if_ctx_t ctx)
2629 {
2630 device_t dev = iflib_get_dev(ctx);
2631 struct e1000_softc *sc = iflib_get_softc(ctx);
2632 if_t ifp = iflib_get_ifp(ctx);
2633 struct e1000_hw *hw = &sc->hw;
2634 u32 rx_buffer_size;
2635 u32 pba;
2636
2637 INIT_DEBUGOUT("em_reset: begin");
2638 /* Let the firmware know the OS is in control */
2639 em_get_hw_control(sc);
2640
2641 /* Set up smart power down as default off on newer adapters. */
2642 if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 ||
2643 hw->mac.type == e1000_82572)) {
2644 u16 phy_tmp = 0;
2645
2646 /* Speed up time to link by disabling smart power down. */
2647 e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
2648 phy_tmp &= ~IGP02E1000_PM_SPD;
2649 e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
2650 }
2651
2652 /*
2653 * Packet Buffer Allocation (PBA)
2654 * Writing PBA sets the receive portion of the buffer
2655 * the remainder is used for the transmit buffer.
2656 */
2657 switch (hw->mac.type) {
2658 /* 82547: Total Packet Buffer is 40K */
2659 case e1000_82547:
2660 case e1000_82547_rev_2:
2661 if (hw->mac.max_frame_size > 8192)
2662 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
2663 else
2664 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
2665 break;
2666 /* 82571/82572/80003es2lan: Total Packet Buffer is 48K */
2667 case e1000_82571:
2668 case e1000_82572:
2669 case e1000_80003es2lan:
2670 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
2671 break;
2672 /* 82573: Total Packet Buffer is 32K */
2673 case e1000_82573:
2674 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
2675 break;
2676 case e1000_82574:
2677 case e1000_82583:
2678 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
2679 break;
2680 case e1000_ich8lan:
2681 pba = E1000_PBA_8K;
2682 break;
2683 case e1000_ich9lan:
2684 case e1000_ich10lan:
2685 /* Boost Receive side for jumbo frames */
2686 if (hw->mac.max_frame_size > 4096)
2687 pba = E1000_PBA_14K;
2688 else
2689 pba = E1000_PBA_10K;
2690 break;
2691 case e1000_pchlan:
2692 case e1000_pch2lan:
2693 case e1000_pch_lpt:
2694 case e1000_pch_spt:
2695 case e1000_pch_cnp:
2696 case e1000_pch_tgp:
2697 case e1000_pch_adp:
2698 case e1000_pch_mtp:
2699 case e1000_pch_ptp:
2700 pba = E1000_PBA_26K;
2701 break;
2702 case e1000_82575:
2703 pba = E1000_PBA_32K;
2704 break;
2705 case e1000_82576:
2706 case e1000_vfadapt:
2707 pba = E1000_READ_REG(hw, E1000_RXPBS);
2708 pba &= E1000_RXPBS_SIZE_MASK_82576;
2709 break;
2710 case e1000_82580:
2711 case e1000_i350:
2712 case e1000_i354:
2713 case e1000_vfadapt_i350:
2714 pba = E1000_READ_REG(hw, E1000_RXPBS);
2715 pba = e1000_rxpbs_adjust_82580(pba);
2716 break;
2717 case e1000_i210:
2718 case e1000_i211:
2719 pba = E1000_PBA_34K;
2720 break;
2721 default:
2722 /* Remaining devices assumed to have a Packet Buffer of 64K. */
2723 if (hw->mac.max_frame_size > 8192)
2724 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
2725 else
2726 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
2727 }
2728
2729 /* Special needs in case of Jumbo frames */
2730 if ((hw->mac.type == e1000_82575) && (if_getmtu(ifp) > ETHERMTU)) {
2731 u32 tx_space, min_tx, min_rx;
2732 pba = E1000_READ_REG(hw, E1000_PBA);
2733 tx_space = pba >> 16;
2734 pba &= 0xffff;
2735 min_tx = (hw->mac.max_frame_size +
2736 sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
2737 min_tx = roundup2(min_tx, 1024);
2738 min_tx >>= 10;
2739 min_rx = hw->mac.max_frame_size;
2740 min_rx = roundup2(min_rx, 1024);
2741 min_rx >>= 10;
2742 if (tx_space < min_tx &&
2743 ((min_tx - tx_space) < pba)) {
2744 pba = pba - (min_tx - tx_space);
2745 /*
2746 * if short on rx space, rx wins
2747 * and must trump tx adjustment
2748 */
2749 if (pba < min_rx)
2750 pba = min_rx;
2751 }
2752 E1000_WRITE_REG(hw, E1000_PBA, pba);
2753 }
2754
2755 if (hw->mac.type < igb_mac_min)
2756 E1000_WRITE_REG(hw, E1000_PBA, pba);
2757
2758 INIT_DEBUGOUT1("em_reset: pba=%dK",pba);
2759
2760 /*
2761 * These parameters control the automatic generation (Tx) and
2762 * response (Rx) to Ethernet PAUSE frames.
2763 * - High water mark should allow for at least two frames to be
2764 * received after sending an XOFF.
2765 * - Low water mark works best when it is very near the high water mark.
2766 * This allows the receiver to restart by sending XON when it has
2767 * drained a bit. Here we use an arbitrary value of 1500 which will
2768 * restart after one full frame is pulled from the buffer. There
2769 * could be several smaller frames in the buffer and if so they will
2770 * not trigger the XON until their total number reduces the buffer
2771 * by 1500.
2772 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2773 */
2774 rx_buffer_size = (pba & 0xffff) << 10;
2775 hw->fc.high_water = rx_buffer_size -
2776 roundup2(hw->mac.max_frame_size, 1024);
2777 hw->fc.low_water = hw->fc.high_water - 1500;
2778
2779 if (sc->fc) /* locally set flow control value? */
2780 hw->fc.requested_mode = sc->fc;
2781 else
2782 hw->fc.requested_mode = e1000_fc_full;
2783
2784 if (hw->mac.type == e1000_80003es2lan)
2785 hw->fc.pause_time = 0xFFFF;
2786 else
2787 hw->fc.pause_time = EM_FC_PAUSE_TIME;
2788
2789 hw->fc.send_xon = true;
2790
2791 /* Device specific overrides/settings */
2792 switch (hw->mac.type) {
2793 case e1000_pchlan:
2794 /* Workaround: no TX flow ctrl for PCH */
2795 hw->fc.requested_mode = e1000_fc_rx_pause;
2796 hw->fc.pause_time = 0xFFFF; /* override */
2797 if (if_getmtu(ifp) > ETHERMTU) {
2798 hw->fc.high_water = 0x3500;
2799 hw->fc.low_water = 0x1500;
2800 } else {
2801 hw->fc.high_water = 0x5000;
2802 hw->fc.low_water = 0x3000;
2803 }
2804 hw->fc.refresh_time = 0x1000;
2805 break;
2806 case e1000_pch2lan:
2807 case e1000_pch_lpt:
2808 case e1000_pch_spt:
2809 case e1000_pch_cnp:
2810 case e1000_pch_tgp:
2811 case e1000_pch_adp:
2812 case e1000_pch_mtp:
2813 case e1000_pch_ptp:
2814 hw->fc.high_water = 0x5C20;
2815 hw->fc.low_water = 0x5048;
2816 hw->fc.pause_time = 0x0650;
2817 hw->fc.refresh_time = 0x0400;
2818 /* Jumbos need adjusted PBA */
2819 if (if_getmtu(ifp) > ETHERMTU)
2820 E1000_WRITE_REG(hw, E1000_PBA, 12);
2821 else
2822 E1000_WRITE_REG(hw, E1000_PBA, 26);
2823 break;
2824 case e1000_82575:
2825 case e1000_82576:
2826 /* 8-byte granularity */
2827 hw->fc.low_water = hw->fc.high_water - 8;
2828 break;
2829 case e1000_82580:
2830 case e1000_i350:
2831 case e1000_i354:
2832 case e1000_i210:
2833 case e1000_i211:
2834 case e1000_vfadapt:
2835 case e1000_vfadapt_i350:
2836 /* 16-byte granularity */
2837 hw->fc.low_water = hw->fc.high_water - 16;
2838 break;
2839 case e1000_ich9lan:
2840 case e1000_ich10lan:
2841 if (if_getmtu(ifp) > ETHERMTU) {
2842 hw->fc.high_water = 0x2800;
2843 hw->fc.low_water = hw->fc.high_water - 8;
2844 break;
2845 }
2846 /* FALLTHROUGH */
2847 default:
2848 if (hw->mac.type == e1000_80003es2lan)
2849 hw->fc.pause_time = 0xFFFF;
2850 break;
2851 }
2852
2853 /* I219 needs some special flushing to avoid hangs */
2854 if (sc->hw.mac.type >= e1000_pch_spt && sc->hw.mac.type < igb_mac_min)
2855 em_flush_desc_rings(sc);
2856
2857 /* Issue a global reset */
2858 e1000_reset_hw(hw);
2859 if (hw->mac.type >= igb_mac_min) {
2860 E1000_WRITE_REG(hw, E1000_WUC, 0);
2861 } else {
2862 E1000_WRITE_REG(hw, E1000_WUFC, 0);
2863 em_disable_aspm(sc);
2864 }
2865 if (sc->flags & IGB_MEDIA_RESET) {
2866 e1000_setup_init_funcs(hw, true);
2867 e1000_get_bus_info(hw);
2868 sc->flags &= ~IGB_MEDIA_RESET;
2869 }
2870 /* and a re-init */
2871 if (e1000_init_hw(hw) < 0) {
2872 device_printf(dev, "Hardware Initialization Failed\n");
2873 return;
2874 }
2875 if (hw->mac.type >= igb_mac_min)
2876 igb_init_dmac(sc, pba);
2877
2878 E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN);
2879 e1000_get_phy_info(hw);
2880 e1000_check_for_link(hw);
2881 }
2882
2883 /*
2884 * Initialise the RSS mapping for NICs that support multiple transmit/
2885 * receive rings.
2886 */
2887
2888 #define RSSKEYLEN 10
2889 static void
em_initialize_rss_mapping(struct e1000_softc * sc)2890 em_initialize_rss_mapping(struct e1000_softc *sc)
2891 {
2892 uint8_t rss_key[4 * RSSKEYLEN];
2893 uint32_t reta = 0;
2894 struct e1000_hw *hw = &sc->hw;
2895 int i;
2896
2897 /*
2898 * Configure RSS key
2899 */
2900 arc4rand(rss_key, sizeof(rss_key), 0);
2901 for (i = 0; i < RSSKEYLEN; ++i) {
2902 uint32_t rssrk = 0;
2903
2904 rssrk = EM_RSSRK_VAL(rss_key, i);
2905 E1000_WRITE_REG(hw,E1000_RSSRK(i), rssrk);
2906 }
2907
2908 /*
2909 * Configure RSS redirect table in following fashion:
2910 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
2911 */
2912 for (i = 0; i < sizeof(reta); ++i) {
2913 uint32_t q;
2914
2915 q = (i % sc->rx_num_queues) << 7;
2916 reta |= q << (8 * i);
2917 }
2918
2919 for (i = 0; i < 32; ++i)
2920 E1000_WRITE_REG(hw, E1000_RETA(i), reta);
2921
2922 E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q |
2923 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2924 E1000_MRQC_RSS_FIELD_IPV4 |
2925 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX |
2926 E1000_MRQC_RSS_FIELD_IPV6_EX |
2927 E1000_MRQC_RSS_FIELD_IPV6);
2928 }
2929
2930 static void
igb_initialize_rss_mapping(struct e1000_softc * sc)2931 igb_initialize_rss_mapping(struct e1000_softc *sc)
2932 {
2933 struct e1000_hw *hw = &sc->hw;
2934 int i;
2935 int queue_id;
2936 u32 reta;
2937 u32 rss_key[10], mrqc, shift = 0;
2938
2939 /* XXX? */
2940 if (hw->mac.type == e1000_82575)
2941 shift = 6;
2942
2943 /*
2944 * The redirection table controls which destination
2945 * queue each bucket redirects traffic to.
2946 * Each DWORD represents four queues, with the LSB
2947 * being the first queue in the DWORD.
2948 *
2949 * This just allocates buckets to queues using round-robin
2950 * allocation.
2951 *
2952 * NOTE: It Just Happens to line up with the default
2953 * RSS allocation method.
2954 */
2955
2956 /* Warning FM follows */
2957 reta = 0;
2958 for (i = 0; i < 128; i++) {
2959 #ifdef RSS
2960 queue_id = rss_get_indirection_to_bucket(i);
2961 /*
2962 * If we have more queues than buckets, we'll
2963 * end up mapping buckets to a subset of the
2964 * queues.
2965 *
2966 * If we have more buckets than queues, we'll
2967 * end up instead assigning multiple buckets
2968 * to queues.
2969 *
2970 * Both are suboptimal, but we need to handle
2971 * the case so we don't go out of bounds
2972 * indexing arrays and such.
2973 */
2974 queue_id = queue_id % sc->rx_num_queues;
2975 #else
2976 queue_id = (i % sc->rx_num_queues);
2977 #endif
2978 /* Adjust if required */
2979 queue_id = queue_id << shift;
2980
2981 /*
2982 * The low 8 bits are for hash value (n+0);
2983 * The next 8 bits are for hash value (n+1), etc.
2984 */
2985 reta = reta >> 8;
2986 reta = reta | ( ((uint32_t) queue_id) << 24);
2987 if ((i & 3) == 3) {
2988 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2989 reta = 0;
2990 }
2991 }
2992
2993 /* Now fill in hash table */
2994
2995 /*
2996 * MRQC: Multiple Receive Queues Command
2997 * Set queuing to RSS control, number depends on the device.
2998 */
2999 mrqc = E1000_MRQC_ENABLE_RSS_MQ;
3000
3001 #ifdef RSS
3002 /* XXX ew typecasting */
3003 rss_getkey((uint8_t *) &rss_key);
3004 #else
3005 arc4rand(&rss_key, sizeof(rss_key), 0);
3006 #endif
3007 for (i = 0; i < 10; i++)
3008 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key[i]);
3009
3010 /*
3011 * Configure the RSS fields to hash upon.
3012 */
3013 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
3014 E1000_MRQC_RSS_FIELD_IPV4_TCP);
3015 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
3016 E1000_MRQC_RSS_FIELD_IPV6_TCP);
3017 mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP |
3018 E1000_MRQC_RSS_FIELD_IPV6_UDP);
3019 mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
3020 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3021
3022 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
3023 }
3024
3025 /*********************************************************************
3026 *
3027 * Setup networking device structure and register interface media.
3028 *
3029 **********************************************************************/
3030 static int
em_setup_interface(if_ctx_t ctx)3031 em_setup_interface(if_ctx_t ctx)
3032 {
3033 if_t ifp = iflib_get_ifp(ctx);
3034 struct e1000_softc *sc = iflib_get_softc(ctx);
3035 if_softc_ctx_t scctx = sc->shared;
3036
3037 INIT_DEBUGOUT("em_setup_interface: begin");
3038
3039 /* Single Queue */
3040 if (sc->tx_num_queues == 1) {
3041 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
3042 if_setsendqready(ifp);
3043 }
3044
3045 /*
3046 * Specify the media types supported by this adapter and register
3047 * callbacks to update media and link information
3048 */
3049 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
3050 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
3051 u_char fiber_type = IFM_1000_SX; /* default type */
3052
3053 if (sc->hw.mac.type == e1000_82545)
3054 fiber_type = IFM_1000_LX;
3055 ifmedia_add(sc->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL);
3056 ifmedia_add(sc->media, IFM_ETHER | fiber_type, 0, NULL);
3057 } else {
3058 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
3059 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
3060 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
3061 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
3062 if (sc->hw.phy.type != e1000_phy_ife) {
3063 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3064 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
3065 }
3066 }
3067 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3068 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
3069 return (0);
3070 }
3071
3072 static int
em_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)3073 em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
3074 {
3075 struct e1000_softc *sc = iflib_get_softc(ctx);
3076 if_softc_ctx_t scctx = sc->shared;
3077 int error = E1000_SUCCESS;
3078 struct em_tx_queue *que;
3079 int i, j;
3080
3081 MPASS(sc->tx_num_queues > 0);
3082 MPASS(sc->tx_num_queues == ntxqsets);
3083
3084 /* First allocate the top level queue structs */
3085 if (!(sc->tx_queues =
3086 (struct em_tx_queue *) malloc(sizeof(struct em_tx_queue) *
3087 sc->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3088 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
3089 return(ENOMEM);
3090 }
3091
3092 for (i = 0, que = sc->tx_queues; i < sc->tx_num_queues; i++, que++) {
3093 /* Set up some basics */
3094
3095 struct tx_ring *txr = &que->txr;
3096 txr->sc = que->sc = sc;
3097 que->me = txr->me = i;
3098
3099 /* Allocate report status array */
3100 if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
3101 device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n");
3102 error = ENOMEM;
3103 goto fail;
3104 }
3105 for (j = 0; j < scctx->isc_ntxd[0]; j++)
3106 txr->tx_rsq[j] = QIDX_INVALID;
3107 /* get the virtual and physical address of the hardware queues */
3108 txr->tx_base = (struct e1000_tx_desc *)vaddrs[i*ntxqs];
3109 txr->tx_paddr = paddrs[i*ntxqs];
3110 }
3111
3112 if (bootverbose)
3113 device_printf(iflib_get_dev(ctx),
3114 "allocated for %d tx_queues\n", sc->tx_num_queues);
3115 return (0);
3116 fail:
3117 em_if_queues_free(ctx);
3118 return (error);
3119 }
3120
3121 static int
em_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)3122 em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
3123 {
3124 struct e1000_softc *sc = iflib_get_softc(ctx);
3125 int error = E1000_SUCCESS;
3126 struct em_rx_queue *que;
3127 int i;
3128
3129 MPASS(sc->rx_num_queues > 0);
3130 MPASS(sc->rx_num_queues == nrxqsets);
3131
3132 /* First allocate the top level queue structs */
3133 if (!(sc->rx_queues =
3134 (struct em_rx_queue *) malloc(sizeof(struct em_rx_queue) *
3135 sc->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3136 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
3137 error = ENOMEM;
3138 goto fail;
3139 }
3140
3141 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
3142 /* Set up some basics */
3143 struct rx_ring *rxr = &que->rxr;
3144 rxr->sc = que->sc = sc;
3145 rxr->que = que;
3146 que->me = rxr->me = i;
3147
3148 /* get the virtual and physical address of the hardware queues */
3149 rxr->rx_base = (union e1000_rx_desc_extended *)vaddrs[i*nrxqs];
3150 rxr->rx_paddr = paddrs[i*nrxqs];
3151 }
3152
3153 if (bootverbose)
3154 device_printf(iflib_get_dev(ctx),
3155 "allocated for %d rx_queues\n", sc->rx_num_queues);
3156
3157 return (0);
3158 fail:
3159 em_if_queues_free(ctx);
3160 return (error);
3161 }
3162
3163 static void
em_if_queues_free(if_ctx_t ctx)3164 em_if_queues_free(if_ctx_t ctx)
3165 {
3166 struct e1000_softc *sc = iflib_get_softc(ctx);
3167 struct em_tx_queue *tx_que = sc->tx_queues;
3168 struct em_rx_queue *rx_que = sc->rx_queues;
3169
3170 if (tx_que != NULL) {
3171 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) {
3172 struct tx_ring *txr = &tx_que->txr;
3173 if (txr->tx_rsq == NULL)
3174 break;
3175
3176 free(txr->tx_rsq, M_DEVBUF);
3177 txr->tx_rsq = NULL;
3178 }
3179 free(sc->tx_queues, M_DEVBUF);
3180 sc->tx_queues = NULL;
3181 }
3182
3183 if (rx_que != NULL) {
3184 free(sc->rx_queues, M_DEVBUF);
3185 sc->rx_queues = NULL;
3186 }
3187 }
3188
3189 /*********************************************************************
3190 *
3191 * Enable transmit unit.
3192 *
3193 **********************************************************************/
3194 static void
em_initialize_transmit_unit(if_ctx_t ctx)3195 em_initialize_transmit_unit(if_ctx_t ctx)
3196 {
3197 struct e1000_softc *sc = iflib_get_softc(ctx);
3198 if_softc_ctx_t scctx = sc->shared;
3199 struct em_tx_queue *que;
3200 struct tx_ring *txr;
3201 struct e1000_hw *hw = &sc->hw;
3202 u32 tctl, txdctl = 0, tarc, tipg = 0;
3203
3204 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3205
3206 for (int i = 0; i < sc->tx_num_queues; i++, txr++) {
3207 u64 bus_addr;
3208 caddr_t offp, endp;
3209
3210 que = &sc->tx_queues[i];
3211 txr = &que->txr;
3212 bus_addr = txr->tx_paddr;
3213
3214 /* Clear checksum offload context. */
3215 offp = (caddr_t)&txr->csum_flags;
3216 endp = (caddr_t)(txr + 1);
3217 bzero(offp, endp - offp);
3218
3219 /* Base and Len of TX Ring */
3220 E1000_WRITE_REG(hw, E1000_TDLEN(i),
3221 scctx->isc_ntxd[0] * sizeof(struct e1000_tx_desc));
3222 E1000_WRITE_REG(hw, E1000_TDBAH(i),
3223 (u32)(bus_addr >> 32));
3224 E1000_WRITE_REG(hw, E1000_TDBAL(i),
3225 (u32)bus_addr);
3226 /* Init the HEAD/TAIL indices */
3227 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
3228 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
3229
3230 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3231 E1000_READ_REG(hw, E1000_TDBAL(i)),
3232 E1000_READ_REG(hw, E1000_TDLEN(i)));
3233
3234 txdctl = 0; /* clear txdctl */
3235 txdctl |= 0x1f; /* PTHRESH */
3236 txdctl |= 1 << 8; /* HTHRESH */
3237 txdctl |= 1 << 16;/* WTHRESH */
3238 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
3239 txdctl |= E1000_TXDCTL_GRAN;
3240 txdctl |= 1 << 25; /* LWTHRESH */
3241
3242 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
3243 }
3244
3245 /* Set the default values for the Tx Inter Packet Gap timer */
3246 switch (hw->mac.type) {
3247 case e1000_80003es2lan:
3248 tipg = DEFAULT_82543_TIPG_IPGR1;
3249 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3250 E1000_TIPG_IPGR2_SHIFT;
3251 break;
3252 case e1000_82542:
3253 tipg = DEFAULT_82542_TIPG_IPGT;
3254 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3255 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3256 break;
3257 default:
3258 if (hw->phy.media_type == e1000_media_type_fiber ||
3259 hw->phy.media_type == e1000_media_type_internal_serdes)
3260 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3261 else
3262 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3263 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3264 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3265 }
3266
3267 E1000_WRITE_REG(hw, E1000_TIPG, tipg);
3268 E1000_WRITE_REG(hw, E1000_TIDV, sc->tx_int_delay.value);
3269
3270 if(hw->mac.type >= e1000_82540)
3271 E1000_WRITE_REG(hw, E1000_TADV,
3272 sc->tx_abs_int_delay.value);
3273
3274 if (hw->mac.type == e1000_82571 || hw->mac.type == e1000_82572) {
3275 tarc = E1000_READ_REG(hw, E1000_TARC(0));
3276 tarc |= TARC_SPEED_MODE_BIT;
3277 E1000_WRITE_REG(hw, E1000_TARC(0), tarc);
3278 } else if (hw->mac.type == e1000_80003es2lan) {
3279 /* errata: program both queues to unweighted RR */
3280 tarc = E1000_READ_REG(hw, E1000_TARC(0));
3281 tarc |= 1;
3282 E1000_WRITE_REG(hw, E1000_TARC(0), tarc);
3283 tarc = E1000_READ_REG(hw, E1000_TARC(1));
3284 tarc |= 1;
3285 E1000_WRITE_REG(hw, E1000_TARC(1), tarc);
3286 } else if (hw->mac.type == e1000_82574) {
3287 tarc = E1000_READ_REG(hw, E1000_TARC(0));
3288 tarc |= TARC_ERRATA_BIT;
3289 if ( sc->tx_num_queues > 1) {
3290 tarc |= (TARC_COMPENSATION_MODE | TARC_MQ_FIX);
3291 E1000_WRITE_REG(hw, E1000_TARC(0), tarc);
3292 E1000_WRITE_REG(hw, E1000_TARC(1), tarc);
3293 } else
3294 E1000_WRITE_REG(hw, E1000_TARC(0), tarc);
3295 }
3296
3297 if (sc->tx_int_delay.value > 0)
3298 sc->txd_cmd |= E1000_TXD_CMD_IDE;
3299
3300 /* Program the Transmit Control Register */
3301 tctl = E1000_READ_REG(hw, E1000_TCTL);
3302 tctl &= ~E1000_TCTL_CT;
3303 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3304 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3305
3306 if (hw->mac.type >= e1000_82571)
3307 tctl |= E1000_TCTL_MULR;
3308
3309 /* This write will effectively turn on the transmit unit. */
3310 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
3311
3312 /* SPT and KBL errata workarounds */
3313 if (hw->mac.type == e1000_pch_spt) {
3314 u32 reg;
3315 reg = E1000_READ_REG(hw, E1000_IOSFPC);
3316 reg |= E1000_RCTL_RDMTS_HEX;
3317 E1000_WRITE_REG(hw, E1000_IOSFPC, reg);
3318 /* i218-i219 Specification Update 1.5.4.5 */
3319 reg = E1000_READ_REG(hw, E1000_TARC(0));
3320 reg &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
3321 reg |= E1000_TARC0_CB_MULTIQ_2_REQ;
3322 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3323 }
3324 }
3325
3326 /*********************************************************************
3327 *
3328 * Enable receive unit.
3329 *
3330 **********************************************************************/
3331 #define BSIZEPKT_ROUNDUP ((1<<E1000_SRRCTL_BSIZEPKT_SHIFT)-1)
3332
3333 static void
em_initialize_receive_unit(if_ctx_t ctx)3334 em_initialize_receive_unit(if_ctx_t ctx)
3335 {
3336 struct e1000_softc *sc = iflib_get_softc(ctx);
3337 if_softc_ctx_t scctx = sc->shared;
3338 if_t ifp = iflib_get_ifp(ctx);
3339 struct e1000_hw *hw = &sc->hw;
3340 struct em_rx_queue *que;
3341 int i;
3342 uint32_t rctl, rxcsum;
3343
3344 INIT_DEBUGOUT("em_initialize_receive_units: begin");
3345
3346 /*
3347 * Make sure receives are disabled while setting
3348 * up the descriptor ring
3349 */
3350 rctl = E1000_READ_REG(hw, E1000_RCTL);
3351 /* Do not disable if ever enabled on this hardware */
3352 if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
3353 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3354
3355 /* Setup the Receive Control Register */
3356 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3357 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
3358 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3359 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3360
3361 /* Do not store bad packets */
3362 rctl &= ~E1000_RCTL_SBP;
3363
3364 /* Enable Long Packet receive */
3365 if (if_getmtu(ifp) > ETHERMTU)
3366 rctl |= E1000_RCTL_LPE;
3367 else
3368 rctl &= ~E1000_RCTL_LPE;
3369
3370 /* Strip the CRC */
3371 if (!em_disable_crc_stripping)
3372 rctl |= E1000_RCTL_SECRC;
3373
3374 if (hw->mac.type >= e1000_82540) {
3375 E1000_WRITE_REG(hw, E1000_RADV,
3376 sc->rx_abs_int_delay.value);
3377
3378 /*
3379 * Set the interrupt throttling rate. Value is calculated
3380 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3381 */
3382 E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR);
3383 }
3384 E1000_WRITE_REG(hw, E1000_RDTR, sc->rx_int_delay.value);
3385
3386 if (hw->mac.type >= em_mac_min) {
3387 uint32_t rfctl;
3388 /* Use extended rx descriptor formats */
3389 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3390 rfctl |= E1000_RFCTL_EXTEN;
3391
3392 /*
3393 * When using MSI-X interrupts we need to throttle
3394 * using the EITR register (82574 only)
3395 */
3396 if (hw->mac.type == e1000_82574) {
3397 for (int i = 0; i < 4; i++)
3398 E1000_WRITE_REG(hw, E1000_EITR_82574(i),
3399 DEFAULT_ITR);
3400 /* Disable accelerated acknowledge */
3401 rfctl |= E1000_RFCTL_ACK_DIS;
3402 }
3403 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
3404 }
3405
3406 /* Set up L3 and L4 csum Rx descriptor offloads */
3407 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
3408 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3409 rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL;
3410 if (hw->mac.type > e1000_82575)
3411 rxcsum |= E1000_RXCSUM_CRCOFL;
3412 else if (hw->mac.type < em_mac_min &&
3413 if_getcapenable(ifp) & IFCAP_HWCSUM_IPV6)
3414 rxcsum |= E1000_RXCSUM_IPV6OFL;
3415 } else {
3416 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3417 if (hw->mac.type > e1000_82575)
3418 rxcsum &= ~E1000_RXCSUM_CRCOFL;
3419 else if (hw->mac.type < em_mac_min)
3420 rxcsum &= ~E1000_RXCSUM_IPV6OFL;
3421 }
3422
3423 if (sc->rx_num_queues > 1) {
3424 /* RSS hash needed in the Rx descriptor */
3425 rxcsum |= E1000_RXCSUM_PCSD;
3426
3427 if (hw->mac.type >= igb_mac_min)
3428 igb_initialize_rss_mapping(sc);
3429 else
3430 em_initialize_rss_mapping(sc);
3431 }
3432 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
3433
3434 /*
3435 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3436 * long latencies are observed, like Lenovo X60. This
3437 * change eliminates the problem, but since having positive
3438 * values in RDTR is a known source of problems on other
3439 * platforms another solution is being sought.
3440 */
3441 if (hw->mac.type == e1000_82573)
3442 E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
3443
3444 for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) {
3445 struct rx_ring *rxr = &que->rxr;
3446 /* Setup the Base and Length of the Rx Descriptor Ring */
3447 u64 bus_addr = rxr->rx_paddr;
3448 #if 0
3449 u32 rdt = sc->rx_num_queues -1; /* default */
3450 #endif
3451
3452 E1000_WRITE_REG(hw, E1000_RDLEN(i),
3453 scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended));
3454 E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
3455 E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
3456 /* Setup the Head and Tail Descriptor Pointers */
3457 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
3458 E1000_WRITE_REG(hw, E1000_RDT(i), 0);
3459 }
3460
3461 /*
3462 * Set PTHRESH for improved jumbo performance
3463 * According to 10.2.5.11 of Intel 82574 Datasheet,
3464 * RXDCTL(1) is written whenever RXDCTL(0) is written.
3465 * Only write to RXDCTL(1) if there is a need for different
3466 * settings.
3467 */
3468 if ((hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_pch2lan ||
3469 hw->mac.type == e1000_ich10lan) && if_getmtu(ifp) > ETHERMTU) {
3470 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
3471 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
3472 } else if (hw->mac.type == e1000_82574) {
3473 for (int i = 0; i < sc->rx_num_queues; i++) {
3474 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
3475 rxdctl |= 0x20; /* PTHRESH */
3476 rxdctl |= 4 << 8; /* HTHRESH */
3477 rxdctl |= 4 << 16;/* WTHRESH */
3478 rxdctl |= 1 << 24; /* Switch to granularity */
3479 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
3480 }
3481 } else if (hw->mac.type >= igb_mac_min) {
3482 u32 psize, srrctl = 0;
3483
3484 if (if_getmtu(ifp) > ETHERMTU) {
3485 psize = scctx->isc_max_frame_size;
3486 /* are we on a vlan? */
3487 if (if_vlantrunkinuse(ifp))
3488 psize += VLAN_TAG_SIZE;
3489
3490 if (sc->vf_ifp)
3491 e1000_rlpml_set_vf(hw, psize);
3492 else
3493 E1000_WRITE_REG(hw, E1000_RLPML, psize);
3494 }
3495
3496 /* Set maximum packet buffer len */
3497 srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
3498 E1000_SRRCTL_BSIZEPKT_SHIFT;
3499
3500 /*
3501 * If TX flow control is disabled and there's >1 queue defined,
3502 * enable DROP.
3503 *
3504 * This drops frames rather than hanging the RX MAC for all queues.
3505 */
3506 if ((sc->rx_num_queues > 1) &&
3507 (sc->fc == e1000_fc_none ||
3508 sc->fc == e1000_fc_rx_pause)) {
3509 srrctl |= E1000_SRRCTL_DROP_EN;
3510 }
3511 /* Setup the Base and Length of the Rx Descriptor Rings */
3512 for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) {
3513 struct rx_ring *rxr = &que->rxr;
3514 u64 bus_addr = rxr->rx_paddr;
3515 u32 rxdctl;
3516
3517 #ifdef notyet
3518 /* Configure for header split? -- ignore for now */
3519 rxr->hdr_split = igb_header_split;
3520 #else
3521 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3522 #endif
3523
3524 E1000_WRITE_REG(hw, E1000_RDLEN(i),
3525 scctx->isc_nrxd[0] * sizeof(struct e1000_rx_desc));
3526 E1000_WRITE_REG(hw, E1000_RDBAH(i),
3527 (uint32_t)(bus_addr >> 32));
3528 E1000_WRITE_REG(hw, E1000_RDBAL(i),
3529 (uint32_t)bus_addr);
3530 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
3531 /* Enable this Queue */
3532 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
3533 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3534 rxdctl &= 0xFFF00000;
3535 rxdctl |= IGB_RX_PTHRESH;
3536 rxdctl |= IGB_RX_HTHRESH << 8;
3537 rxdctl |= IGB_RX_WTHRESH << 16;
3538 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
3539 }
3540 } else if (hw->mac.type >= e1000_pch2lan) {
3541 if (if_getmtu(ifp) > ETHERMTU)
3542 e1000_lv_jumbo_workaround_ich8lan(hw, true);
3543 else
3544 e1000_lv_jumbo_workaround_ich8lan(hw, false);
3545 }
3546
3547 /* Make sure VLAN Filters are off */
3548 rctl &= ~E1000_RCTL_VFE;
3549
3550 /* Set up packet buffer size, overridden by per queue srrctl on igb */
3551 if (hw->mac.type < igb_mac_min) {
3552 if (sc->rx_mbuf_sz > 2048 && sc->rx_mbuf_sz <= 4096)
3553 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
3554 else if (sc->rx_mbuf_sz > 4096 && sc->rx_mbuf_sz <= 8192)
3555 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
3556 else if (sc->rx_mbuf_sz > 8192)
3557 rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX;
3558 else {
3559 rctl |= E1000_RCTL_SZ_2048;
3560 rctl &= ~E1000_RCTL_BSEX;
3561 }
3562 } else
3563 rctl |= E1000_RCTL_SZ_2048;
3564
3565 /*
3566 * rctl bits 11:10 are as follows
3567 * lem: reserved
3568 * em: DTYPE
3569 * igb: reserved
3570 * and should be 00 on all of the above
3571 */
3572 rctl &= ~0x00000C00;
3573
3574 /* Write out the settings */
3575 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3576
3577 return;
3578 }
3579
3580 static void
em_if_vlan_register(if_ctx_t ctx,u16 vtag)3581 em_if_vlan_register(if_ctx_t ctx, u16 vtag)
3582 {
3583 struct e1000_softc *sc = iflib_get_softc(ctx);
3584 u32 index, bit;
3585
3586 index = (vtag >> 5) & 0x7F;
3587 bit = vtag & 0x1F;
3588 sc->shadow_vfta[index] |= (1 << bit);
3589 ++sc->num_vlans;
3590 em_if_vlan_filter_write(sc);
3591 }
3592
3593 static void
em_if_vlan_unregister(if_ctx_t ctx,u16 vtag)3594 em_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
3595 {
3596 struct e1000_softc *sc = iflib_get_softc(ctx);
3597 u32 index, bit;
3598
3599 index = (vtag >> 5) & 0x7F;
3600 bit = vtag & 0x1F;
3601 sc->shadow_vfta[index] &= ~(1 << bit);
3602 --sc->num_vlans;
3603 em_if_vlan_filter_write(sc);
3604 }
3605
3606 static bool
em_if_vlan_filter_capable(if_ctx_t ctx)3607 em_if_vlan_filter_capable(if_ctx_t ctx)
3608 {
3609 if_t ifp = iflib_get_ifp(ctx);
3610
3611 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) &&
3612 !em_disable_crc_stripping)
3613 return (true);
3614
3615 return (false);
3616 }
3617
3618 static bool
em_if_vlan_filter_used(if_ctx_t ctx)3619 em_if_vlan_filter_used(if_ctx_t ctx)
3620 {
3621 struct e1000_softc *sc = iflib_get_softc(ctx);
3622
3623 if (!em_if_vlan_filter_capable(ctx))
3624 return (false);
3625
3626 for (int i = 0; i < EM_VFTA_SIZE; i++)
3627 if (sc->shadow_vfta[i] != 0)
3628 return (true);
3629
3630 return (false);
3631 }
3632
3633 static void
em_if_vlan_filter_enable(struct e1000_softc * sc)3634 em_if_vlan_filter_enable(struct e1000_softc *sc)
3635 {
3636 struct e1000_hw *hw = &sc->hw;
3637 u32 reg;
3638
3639 reg = E1000_READ_REG(hw, E1000_RCTL);
3640 reg &= ~E1000_RCTL_CFIEN;
3641 reg |= E1000_RCTL_VFE;
3642 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3643 }
3644
3645 static void
em_if_vlan_filter_disable(struct e1000_softc * sc)3646 em_if_vlan_filter_disable(struct e1000_softc *sc)
3647 {
3648 struct e1000_hw *hw = &sc->hw;
3649 u32 reg;
3650
3651 reg = E1000_READ_REG(hw, E1000_RCTL);
3652 reg &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
3653 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3654 }
3655
3656 static void
em_if_vlan_filter_write(struct e1000_softc * sc)3657 em_if_vlan_filter_write(struct e1000_softc *sc)
3658 {
3659 struct e1000_hw *hw = &sc->hw;
3660
3661 if (sc->vf_ifp)
3662 return;
3663
3664 /* Disable interrupts for lem-class devices during the filter change */
3665 if (hw->mac.type < em_mac_min)
3666 em_if_intr_disable(sc->ctx);
3667
3668 for (int i = 0; i < EM_VFTA_SIZE; i++)
3669 if (sc->shadow_vfta[i] != 0) {
3670 /* XXXKB: incomplete VF support, we return early above */
3671 if (sc->vf_ifp)
3672 e1000_vfta_set_vf(hw, sc->shadow_vfta[i], true);
3673 else
3674 e1000_write_vfta(hw, i, sc->shadow_vfta[i]);
3675 }
3676
3677 /* Re-enable interrupts for lem-class devices */
3678 if (hw->mac.type < em_mac_min)
3679 em_if_intr_enable(sc->ctx);
3680 }
3681
3682 static void
em_setup_vlan_hw_support(if_ctx_t ctx)3683 em_setup_vlan_hw_support(if_ctx_t ctx)
3684 {
3685 struct e1000_softc *sc = iflib_get_softc(ctx);
3686 struct e1000_hw *hw = &sc->hw;
3687 if_t ifp = iflib_get_ifp(ctx);
3688 u32 reg;
3689
3690 /* XXXKB: Return early if we are a VF until VF decap and filter management
3691 * is ready and tested.
3692 */
3693 if (sc->vf_ifp)
3694 return;
3695
3696 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING &&
3697 !em_disable_crc_stripping) {
3698 reg = E1000_READ_REG(hw, E1000_CTRL);
3699 reg |= E1000_CTRL_VME;
3700 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3701 } else {
3702 reg = E1000_READ_REG(hw, E1000_CTRL);
3703 reg &= ~E1000_CTRL_VME;
3704 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3705 }
3706
3707 /* If we aren't doing HW filtering, we're done */
3708 if (!em_if_vlan_filter_capable(ctx)) {
3709 em_if_vlan_filter_disable(sc);
3710 return;
3711 }
3712
3713 /*
3714 * A soft reset zero's out the VFTA, so
3715 * we need to repopulate it now.
3716 * We also insert VLAN 0 in the filter list, so we pass VLAN 0 tagged
3717 * traffic through. This will write the entire table.
3718 */
3719 em_if_vlan_register(ctx, 0);
3720
3721 /* Enable the Filter Table */
3722 em_if_vlan_filter_enable(sc);
3723 }
3724
3725 static void
em_if_intr_enable(if_ctx_t ctx)3726 em_if_intr_enable(if_ctx_t ctx)
3727 {
3728 struct e1000_softc *sc = iflib_get_softc(ctx);
3729 struct e1000_hw *hw = &sc->hw;
3730 u32 ims_mask = IMS_ENABLE_MASK;
3731
3732 if (sc->intr_type == IFLIB_INTR_MSIX) {
3733 E1000_WRITE_REG(hw, EM_EIAC, sc->ims);
3734 ims_mask |= sc->ims;
3735 }
3736 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3737 E1000_WRITE_FLUSH(hw);
3738 }
3739
3740 static void
em_if_intr_disable(if_ctx_t ctx)3741 em_if_intr_disable(if_ctx_t ctx)
3742 {
3743 struct e1000_softc *sc = iflib_get_softc(ctx);
3744 struct e1000_hw *hw = &sc->hw;
3745
3746 if (sc->intr_type == IFLIB_INTR_MSIX)
3747 E1000_WRITE_REG(hw, EM_EIAC, 0);
3748 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3749 E1000_WRITE_FLUSH(hw);
3750 }
3751
3752 static void
igb_if_intr_enable(if_ctx_t ctx)3753 igb_if_intr_enable(if_ctx_t ctx)
3754 {
3755 struct e1000_softc *sc = iflib_get_softc(ctx);
3756 struct e1000_hw *hw = &sc->hw;
3757 u32 mask;
3758
3759 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) {
3760 mask = (sc->que_mask | sc->link_mask);
3761 E1000_WRITE_REG(hw, E1000_EIAC, mask);
3762 E1000_WRITE_REG(hw, E1000_EIAM, mask);
3763 E1000_WRITE_REG(hw, E1000_EIMS, mask);
3764 E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC);
3765 } else
3766 E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
3767 E1000_WRITE_FLUSH(hw);
3768 }
3769
3770 static void
igb_if_intr_disable(if_ctx_t ctx)3771 igb_if_intr_disable(if_ctx_t ctx)
3772 {
3773 struct e1000_softc *sc = iflib_get_softc(ctx);
3774 struct e1000_hw *hw = &sc->hw;
3775
3776 if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) {
3777 E1000_WRITE_REG(hw, E1000_EIMC, 0xffffffff);
3778 E1000_WRITE_REG(hw, E1000_EIAC, 0);
3779 }
3780 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3781 E1000_WRITE_FLUSH(hw);
3782 }
3783
3784 /*
3785 * Bit of a misnomer, what this really means is
3786 * to enable OS management of the system... aka
3787 * to disable special hardware management features
3788 */
3789 static void
em_init_manageability(struct e1000_softc * sc)3790 em_init_manageability(struct e1000_softc *sc)
3791 {
3792 /* A shared code workaround */
3793 #define E1000_82542_MANC2H E1000_MANC2H
3794 if (sc->has_manage) {
3795 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
3796 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3797
3798 /* disable hardware interception of ARP */
3799 manc &= ~(E1000_MANC_ARP_EN);
3800
3801 /* enable receiving management packets to the host */
3802 manc |= E1000_MANC_EN_MNG2HOST;
3803 #define E1000_MNG2HOST_PORT_623 (1 << 5)
3804 #define E1000_MNG2HOST_PORT_664 (1 << 6)
3805 manc2h |= E1000_MNG2HOST_PORT_623;
3806 manc2h |= E1000_MNG2HOST_PORT_664;
3807 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
3808 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3809 }
3810 }
3811
3812 /*
3813 * Give control back to hardware management
3814 * controller if there is one.
3815 */
3816 static void
em_release_manageability(struct e1000_softc * sc)3817 em_release_manageability(struct e1000_softc *sc)
3818 {
3819 if (sc->has_manage) {
3820 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3821
3822 /* re-enable hardware interception of ARP */
3823 manc |= E1000_MANC_ARP_EN;
3824 manc &= ~E1000_MANC_EN_MNG2HOST;
3825
3826 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3827 }
3828 }
3829
3830 /*
3831 * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3832 * For ASF and Pass Through versions of f/w this means
3833 * that the driver is loaded. For AMT version type f/w
3834 * this means that the network i/f is open.
3835 */
3836 static void
em_get_hw_control(struct e1000_softc * sc)3837 em_get_hw_control(struct e1000_softc *sc)
3838 {
3839 u32 ctrl_ext, swsm;
3840
3841 if (sc->vf_ifp)
3842 return;
3843
3844 if (sc->hw.mac.type == e1000_82573) {
3845 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3846 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3847 swsm | E1000_SWSM_DRV_LOAD);
3848 return;
3849 }
3850 /* else */
3851 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3852 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3853 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3854 }
3855
3856 /*
3857 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3858 * For ASF and Pass Through versions of f/w this means that
3859 * the driver is no longer loaded. For AMT versions of the
3860 * f/w this means that the network i/f is closed.
3861 */
3862 static void
em_release_hw_control(struct e1000_softc * sc)3863 em_release_hw_control(struct e1000_softc *sc)
3864 {
3865 u32 ctrl_ext, swsm;
3866
3867 if (!sc->has_manage)
3868 return;
3869
3870 if (sc->hw.mac.type == e1000_82573) {
3871 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3872 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3873 swsm & ~E1000_SWSM_DRV_LOAD);
3874 return;
3875 }
3876 /* else */
3877 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3878 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3879 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3880 return;
3881 }
3882
3883 static int
em_is_valid_ether_addr(u8 * addr)3884 em_is_valid_ether_addr(u8 *addr)
3885 {
3886 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3887
3888 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3889 return (false);
3890 }
3891
3892 return (true);
3893 }
3894
3895 static bool
em_automask_tso(if_ctx_t ctx)3896 em_automask_tso(if_ctx_t ctx)
3897 {
3898 struct e1000_softc *sc = iflib_get_softc(ctx);
3899 if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
3900 if_t ifp = iflib_get_ifp(ctx);
3901
3902 if (!em_unsupported_tso && sc->link_speed &&
3903 sc->link_speed != SPEED_1000 && scctx->isc_capenable & IFCAP_TSO) {
3904 device_printf(sc->dev, "Disabling TSO for 10/100 Ethernet.\n");
3905 sc->tso_automasked = scctx->isc_capenable & IFCAP_TSO;
3906 scctx->isc_capenable &= ~IFCAP_TSO;
3907 if_setcapenablebit(ifp, 0, IFCAP_TSO);
3908 /* iflib_init_locked handles ifnet hwassistbits */
3909 iflib_request_reset(ctx);
3910 return true;
3911 } else if (sc->link_speed == SPEED_1000 && sc->tso_automasked) {
3912 device_printf(sc->dev, "Re-enabling TSO for GbE.\n");
3913 scctx->isc_capenable |= sc->tso_automasked;
3914 if_setcapenablebit(ifp, sc->tso_automasked, 0);
3915 sc->tso_automasked = 0;
3916 /* iflib_init_locked handles ifnet hwassistbits */
3917 iflib_request_reset(ctx);
3918 return true;
3919 }
3920
3921 return false;
3922 }
3923
3924 /*
3925 ** Parse the interface capabilities with regard
3926 ** to both system management and wake-on-lan for
3927 ** later use.
3928 */
3929 static void
em_get_wakeup(if_ctx_t ctx)3930 em_get_wakeup(if_ctx_t ctx)
3931 {
3932 struct e1000_softc *sc = iflib_get_softc(ctx);
3933 device_t dev = iflib_get_dev(ctx);
3934 u16 eeprom_data = 0, device_id, apme_mask;
3935
3936 sc->has_manage = e1000_enable_mng_pass_thru(&sc->hw);
3937 apme_mask = EM_EEPROM_APME;
3938
3939 switch (sc->hw.mac.type) {
3940 case e1000_82542:
3941 case e1000_82543:
3942 break;
3943 case e1000_82544:
3944 e1000_read_nvm(&sc->hw,
3945 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3946 apme_mask = EM_82544_APME;
3947 break;
3948 case e1000_82546:
3949 case e1000_82546_rev_3:
3950 if (sc->hw.bus.func == 1) {
3951 e1000_read_nvm(&sc->hw,
3952 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3953 break;
3954 } else
3955 e1000_read_nvm(&sc->hw,
3956 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3957 break;
3958 case e1000_82573:
3959 case e1000_82583:
3960 sc->has_amt = true;
3961 /* FALLTHROUGH */
3962 case e1000_82571:
3963 case e1000_82572:
3964 case e1000_80003es2lan:
3965 if (sc->hw.bus.func == 1) {
3966 e1000_read_nvm(&sc->hw,
3967 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3968 break;
3969 } else
3970 e1000_read_nvm(&sc->hw,
3971 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3972 break;
3973 case e1000_ich8lan:
3974 case e1000_ich9lan:
3975 case e1000_ich10lan:
3976 case e1000_pchlan:
3977 case e1000_pch2lan:
3978 case e1000_pch_lpt:
3979 case e1000_pch_spt:
3980 case e1000_82575: /* listing all igb devices */
3981 case e1000_82576:
3982 case e1000_82580:
3983 case e1000_i350:
3984 case e1000_i354:
3985 case e1000_i210:
3986 case e1000_i211:
3987 case e1000_vfadapt:
3988 case e1000_vfadapt_i350:
3989 apme_mask = E1000_WUC_APME;
3990 sc->has_amt = true;
3991 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC);
3992 break;
3993 default:
3994 e1000_read_nvm(&sc->hw,
3995 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3996 break;
3997 }
3998 if (eeprom_data & apme_mask)
3999 sc->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4000 /*
4001 * We have the eeprom settings, now apply the special cases
4002 * where the eeprom may be wrong or the board won't support
4003 * wake on lan on a particular port
4004 */
4005 device_id = pci_get_device(dev);
4006 switch (device_id) {
4007 case E1000_DEV_ID_82546GB_PCIE:
4008 sc->wol = 0;
4009 break;
4010 case E1000_DEV_ID_82546EB_FIBER:
4011 case E1000_DEV_ID_82546GB_FIBER:
4012 /* Wake events only supported on port A for dual fiber
4013 * regardless of eeprom setting */
4014 if (E1000_READ_REG(&sc->hw, E1000_STATUS) &
4015 E1000_STATUS_FUNC_1)
4016 sc->wol = 0;
4017 break;
4018 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4019 /* if quad port adapter, disable WoL on all but port A */
4020 if (global_quad_port_a != 0)
4021 sc->wol = 0;
4022 /* Reset for multiple quad port adapters */
4023 if (++global_quad_port_a == 4)
4024 global_quad_port_a = 0;
4025 break;
4026 case E1000_DEV_ID_82571EB_FIBER:
4027 /* Wake events only supported on port A for dual fiber
4028 * regardless of eeprom setting */
4029 if (E1000_READ_REG(&sc->hw, E1000_STATUS) &
4030 E1000_STATUS_FUNC_1)
4031 sc->wol = 0;
4032 break;
4033 case E1000_DEV_ID_82571EB_QUAD_COPPER:
4034 case E1000_DEV_ID_82571EB_QUAD_FIBER:
4035 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
4036 /* if quad port adapter, disable WoL on all but port A */
4037 if (global_quad_port_a != 0)
4038 sc->wol = 0;
4039 /* Reset for multiple quad port adapters */
4040 if (++global_quad_port_a == 4)
4041 global_quad_port_a = 0;
4042 break;
4043 }
4044 return;
4045 }
4046
4047
4048 /*
4049 * Enable PCI Wake On Lan capability
4050 */
4051 static void
em_enable_wakeup(if_ctx_t ctx)4052 em_enable_wakeup(if_ctx_t ctx)
4053 {
4054 struct e1000_softc *sc = iflib_get_softc(ctx);
4055 device_t dev = iflib_get_dev(ctx);
4056 if_t ifp = iflib_get_ifp(ctx);
4057 int error = 0;
4058 u32 pmc, ctrl, ctrl_ext, rctl;
4059 u16 status;
4060
4061 if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0)
4062 return;
4063
4064 /*
4065 * Determine type of Wakeup: note that wol
4066 * is set with all bits on by default.
4067 */
4068 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0)
4069 sc->wol &= ~E1000_WUFC_MAG;
4070
4071 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0)
4072 sc->wol &= ~E1000_WUFC_EX;
4073
4074 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
4075 sc->wol &= ~E1000_WUFC_MC;
4076 else {
4077 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
4078 rctl |= E1000_RCTL_MPE;
4079 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl);
4080 }
4081
4082 if (!(sc->wol & (E1000_WUFC_EX | E1000_WUFC_MAG | E1000_WUFC_MC)))
4083 goto pme;
4084
4085 /* Advertise the wakeup capability */
4086 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL);
4087 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4088 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl);
4089
4090 /* Keep the laser running on Fiber adapters */
4091 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
4092 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
4093 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
4094 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4095 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, ctrl_ext);
4096 }
4097
4098 if ((sc->hw.mac.type == e1000_ich8lan) ||
4099 (sc->hw.mac.type == e1000_pchlan) ||
4100 (sc->hw.mac.type == e1000_ich9lan) ||
4101 (sc->hw.mac.type == e1000_ich10lan))
4102 e1000_suspend_workarounds_ich8lan(&sc->hw);
4103
4104 if ( sc->hw.mac.type >= e1000_pchlan) {
4105 error = em_enable_phy_wakeup(sc);
4106 if (error)
4107 goto pme;
4108 } else {
4109 /* Enable wakeup by the MAC */
4110 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
4111 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
4112 }
4113
4114 if (sc->hw.phy.type == e1000_phy_igp_3)
4115 e1000_igp3_phy_powerdown_workaround_ich8lan(&sc->hw);
4116
4117 pme:
4118 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4119 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4120 if (!error && (if_getcapenable(ifp) & IFCAP_WOL))
4121 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4122 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4123
4124 return;
4125 }
4126
4127 /*
4128 * WOL in the newer chipset interfaces (pchlan)
4129 * require thing to be copied into the phy
4130 */
4131 static int
em_enable_phy_wakeup(struct e1000_softc * sc)4132 em_enable_phy_wakeup(struct e1000_softc *sc)
4133 {
4134 struct e1000_hw *hw = &sc->hw;
4135 u32 mreg, ret = 0;
4136 u16 preg;
4137
4138 /* copy MAC RARs to PHY RARs */
4139 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
4140
4141 /* copy MAC MTA to PHY MTA */
4142 for (int i = 0; i < hw->mac.mta_reg_count; i++) {
4143 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4144 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4145 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4146 (u16)((mreg >> 16) & 0xFFFF));
4147 }
4148
4149 /* configure PHY Rx Control register */
4150 e1000_read_phy_reg(hw, BM_RCTL, &preg);
4151 mreg = E1000_READ_REG(hw, E1000_RCTL);
4152 if (mreg & E1000_RCTL_UPE)
4153 preg |= BM_RCTL_UPE;
4154 if (mreg & E1000_RCTL_MPE)
4155 preg |= BM_RCTL_MPE;
4156 preg &= ~(BM_RCTL_MO_MASK);
4157 if (mreg & E1000_RCTL_MO_3)
4158 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4159 << BM_RCTL_MO_SHIFT);
4160 if (mreg & E1000_RCTL_BAM)
4161 preg |= BM_RCTL_BAM;
4162 if (mreg & E1000_RCTL_PMCF)
4163 preg |= BM_RCTL_PMCF;
4164 mreg = E1000_READ_REG(hw, E1000_CTRL);
4165 if (mreg & E1000_CTRL_RFCE)
4166 preg |= BM_RCTL_RFCE;
4167 e1000_write_phy_reg(hw, BM_RCTL, preg);
4168
4169 /* enable PHY wakeup in MAC register */
4170 E1000_WRITE_REG(hw, E1000_WUC,
4171 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN | E1000_WUC_APME);
4172 E1000_WRITE_REG(hw, E1000_WUFC, sc->wol);
4173
4174 /* configure and enable PHY wakeup in PHY registers */
4175 e1000_write_phy_reg(hw, BM_WUFC, sc->wol);
4176 e1000_write_phy_reg(hw, BM_WUC, E1000_WUC_PME_EN);
4177
4178 /* activate PHY wakeup */
4179 ret = hw->phy.ops.acquire(hw);
4180 if (ret) {
4181 printf("Could not acquire PHY\n");
4182 return ret;
4183 }
4184 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4185 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4186 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4187 if (ret) {
4188 printf("Could not read PHY page 769\n");
4189 goto out;
4190 }
4191 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4192 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4193 if (ret)
4194 printf("Could not set PHY Host Wakeup bit\n");
4195 out:
4196 hw->phy.ops.release(hw);
4197
4198 return ret;
4199 }
4200
4201 static void
em_if_led_func(if_ctx_t ctx,int onoff)4202 em_if_led_func(if_ctx_t ctx, int onoff)
4203 {
4204 struct e1000_softc *sc = iflib_get_softc(ctx);
4205
4206 if (onoff) {
4207 e1000_setup_led(&sc->hw);
4208 e1000_led_on(&sc->hw);
4209 } else {
4210 e1000_led_off(&sc->hw);
4211 e1000_cleanup_led(&sc->hw);
4212 }
4213 }
4214
4215 /*
4216 * Disable the L0S and L1 LINK states
4217 */
4218 static void
em_disable_aspm(struct e1000_softc * sc)4219 em_disable_aspm(struct e1000_softc *sc)
4220 {
4221 int base, reg;
4222 u16 link_cap,link_ctrl;
4223 device_t dev = sc->dev;
4224
4225 switch (sc->hw.mac.type) {
4226 case e1000_82573:
4227 case e1000_82574:
4228 case e1000_82583:
4229 break;
4230 default:
4231 return;
4232 }
4233 if (pci_find_cap(dev, PCIY_EXPRESS, &base) != 0)
4234 return;
4235 reg = base + PCIER_LINK_CAP;
4236 link_cap = pci_read_config(dev, reg, 2);
4237 if ((link_cap & PCIEM_LINK_CAP_ASPM) == 0)
4238 return;
4239 reg = base + PCIER_LINK_CTL;
4240 link_ctrl = pci_read_config(dev, reg, 2);
4241 link_ctrl &= ~PCIEM_LINK_CTL_ASPMC;
4242 pci_write_config(dev, reg, link_ctrl, 2);
4243 return;
4244 }
4245
4246 /**********************************************************************
4247 *
4248 * Update the board statistics counters.
4249 *
4250 **********************************************************************/
4251 static void
em_update_stats_counters(struct e1000_softc * sc)4252 em_update_stats_counters(struct e1000_softc *sc)
4253 {
4254 u64 prev_xoffrxc = sc->stats.xoffrxc;
4255
4256 if(sc->hw.phy.media_type == e1000_media_type_copper ||
4257 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4258 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
4259 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC);
4260 }
4261 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
4262 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
4263 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC);
4264 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
4265
4266 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
4267 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
4268 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC);
4269 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC);
4270 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
4271 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
4272 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
4273 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
4274 /*
4275 ** For watchdog management we need to know if we have been
4276 ** paused during the last interval, so capture that here.
4277 */
4278 if (sc->stats.xoffrxc != prev_xoffrxc)
4279 sc->shared->isc_pause_frames = 1;
4280 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
4281 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
4282 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
4283 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
4284 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
4285 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
4286 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
4287 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
4288 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
4289 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
4290 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
4291 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
4292
4293 /* For the 64-bit byte counters the low dword must be read first. */
4294 /* Both registers clear on the read of the high dword */
4295
4296 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCL) +
4297 ((u64)E1000_READ_REG(&sc->hw, E1000_GORCH) << 32);
4298 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCL) +
4299 ((u64)E1000_READ_REG(&sc->hw, E1000_GOTCH) << 32);
4300
4301 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
4302 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
4303 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
4304 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC);
4305 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
4306
4307 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH);
4308 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
4309
4310 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
4311 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
4312 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
4313 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
4314 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
4315 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
4316 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
4317 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
4318 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
4319 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
4320
4321 /* Interrupt Counts */
4322
4323 sc->stats.iac += E1000_READ_REG(&sc->hw, E1000_IAC);
4324 sc->stats.icrxptc += E1000_READ_REG(&sc->hw, E1000_ICRXPTC);
4325 sc->stats.icrxatc += E1000_READ_REG(&sc->hw, E1000_ICRXATC);
4326 sc->stats.ictxptc += E1000_READ_REG(&sc->hw, E1000_ICTXPTC);
4327 sc->stats.ictxatc += E1000_READ_REG(&sc->hw, E1000_ICTXATC);
4328 sc->stats.ictxqec += E1000_READ_REG(&sc->hw, E1000_ICTXQEC);
4329 sc->stats.ictxqmtc += E1000_READ_REG(&sc->hw, E1000_ICTXQMTC);
4330 sc->stats.icrxdmtc += E1000_READ_REG(&sc->hw, E1000_ICRXDMTC);
4331 sc->stats.icrxoc += E1000_READ_REG(&sc->hw, E1000_ICRXOC);
4332
4333 if (sc->hw.mac.type >= e1000_82543) {
4334 sc->stats.algnerrc +=
4335 E1000_READ_REG(&sc->hw, E1000_ALGNERRC);
4336 sc->stats.rxerrc +=
4337 E1000_READ_REG(&sc->hw, E1000_RXERRC);
4338 sc->stats.tncrs +=
4339 E1000_READ_REG(&sc->hw, E1000_TNCRS);
4340 sc->stats.cexterr +=
4341 E1000_READ_REG(&sc->hw, E1000_CEXTERR);
4342 sc->stats.tsctc +=
4343 E1000_READ_REG(&sc->hw, E1000_TSCTC);
4344 sc->stats.tsctfc +=
4345 E1000_READ_REG(&sc->hw, E1000_TSCTFC);
4346 }
4347 }
4348
4349 static uint64_t
em_if_get_counter(if_ctx_t ctx,ift_counter cnt)4350 em_if_get_counter(if_ctx_t ctx, ift_counter cnt)
4351 {
4352 struct e1000_softc *sc = iflib_get_softc(ctx);
4353 if_t ifp = iflib_get_ifp(ctx);
4354
4355 switch (cnt) {
4356 case IFCOUNTER_COLLISIONS:
4357 return (sc->stats.colc);
4358 case IFCOUNTER_IERRORS:
4359 return (sc->dropped_pkts + sc->stats.rxerrc +
4360 sc->stats.crcerrs + sc->stats.algnerrc +
4361 sc->stats.ruc + sc->stats.roc +
4362 sc->stats.mpc + sc->stats.cexterr);
4363 case IFCOUNTER_OERRORS:
4364 return (sc->stats.ecol + sc->stats.latecol +
4365 sc->watchdog_events);
4366 default:
4367 return (if_get_counter_default(ifp, cnt));
4368 }
4369 }
4370
4371 /* em_if_needs_restart - Tell iflib when the driver needs to be reinitialized
4372 * @ctx: iflib context
4373 * @event: event code to check
4374 *
4375 * Defaults to returning false for unknown events.
4376 *
4377 * @returns true if iflib needs to reinit the interface
4378 */
4379 static bool
em_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)4380 em_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
4381 {
4382 switch (event) {
4383 case IFLIB_RESTART_VLAN_CONFIG:
4384 default:
4385 return (false);
4386 }
4387 }
4388
4389 /* Export a single 32-bit register via a read-only sysctl. */
4390 static int
em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)4391 em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4392 {
4393 struct e1000_softc *sc;
4394 u_int val;
4395
4396 sc = oidp->oid_arg1;
4397 val = E1000_READ_REG(&sc->hw, oidp->oid_arg2);
4398 return (sysctl_handle_int(oidp, &val, 0, req));
4399 }
4400
4401 /*
4402 * Add sysctl variables, one per statistic, to the system.
4403 */
4404 static void
em_add_hw_stats(struct e1000_softc * sc)4405 em_add_hw_stats(struct e1000_softc *sc)
4406 {
4407 device_t dev = iflib_get_dev(sc->ctx);
4408 struct em_tx_queue *tx_que = sc->tx_queues;
4409 struct em_rx_queue *rx_que = sc->rx_queues;
4410
4411 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4412 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4413 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4414 struct e1000_hw_stats *stats = &sc->stats;
4415
4416 struct sysctl_oid *stat_node, *queue_node, *int_node;
4417 struct sysctl_oid_list *stat_list, *queue_list, *int_list;
4418
4419 #define QUEUE_NAME_LEN 32
4420 char namebuf[QUEUE_NAME_LEN];
4421
4422 /* Driver Statistics */
4423 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4424 CTLFLAG_RD, &sc->dropped_pkts,
4425 "Driver dropped packets");
4426 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4427 CTLFLAG_RD, &sc->link_irq,
4428 "Link MSI-X IRQ Handled");
4429 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4430 CTLFLAG_RD, &sc->rx_overruns,
4431 "RX overruns");
4432 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4433 CTLFLAG_RD, &sc->watchdog_events,
4434 "Watchdog timeouts");
4435 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4436 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
4437 sc, E1000_CTRL, em_sysctl_reg_handler, "IU",
4438 "Device Control Register");
4439 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4440 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
4441 sc, E1000_RCTL, em_sysctl_reg_handler, "IU",
4442 "Receiver Control Register");
4443 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4444 CTLFLAG_RD, &sc->hw.fc.high_water, 0,
4445 "Flow Control High Watermark");
4446 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4447 CTLFLAG_RD, &sc->hw.fc.low_water, 0,
4448 "Flow Control Low Watermark");
4449
4450 for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) {
4451 struct tx_ring *txr = &tx_que->txr;
4452 snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i);
4453 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4454 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name");
4455 queue_list = SYSCTL_CHILDREN(queue_node);
4456
4457 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4458 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
4459 E1000_TDH(txr->me), em_sysctl_reg_handler, "IU",
4460 "Transmit Descriptor Head");
4461 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4462 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
4463 E1000_TDT(txr->me), em_sysctl_reg_handler, "IU",
4464 "Transmit Descriptor Tail");
4465 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
4466 CTLFLAG_RD, &txr->tx_irq,
4467 "Queue MSI-X Transmit Interrupts");
4468 }
4469
4470 for (int j = 0; j < sc->rx_num_queues; j++, rx_que++) {
4471 struct rx_ring *rxr = &rx_que->rxr;
4472 snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j);
4473 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4474 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name");
4475 queue_list = SYSCTL_CHILDREN(queue_node);
4476
4477 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4478 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
4479 E1000_RDH(rxr->me), em_sysctl_reg_handler, "IU",
4480 "Receive Descriptor Head");
4481 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4482 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
4483 E1000_RDT(rxr->me), em_sysctl_reg_handler, "IU",
4484 "Receive Descriptor Tail");
4485 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq",
4486 CTLFLAG_RD, &rxr->rx_irq,
4487 "Queue MSI-X Receive Interrupts");
4488 }
4489
4490 /* MAC stats get their own sub node */
4491
4492 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4493 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
4494 stat_list = SYSCTL_CHILDREN(stat_node);
4495
4496 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4497 CTLFLAG_RD, &stats->ecol,
4498 "Excessive collisions");
4499 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4500 CTLFLAG_RD, &stats->scc,
4501 "Single collisions");
4502 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4503 CTLFLAG_RD, &stats->mcc,
4504 "Multiple collisions");
4505 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4506 CTLFLAG_RD, &stats->latecol,
4507 "Late collisions");
4508 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4509 CTLFLAG_RD, &stats->colc,
4510 "Collision Count");
4511 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4512 CTLFLAG_RD, &sc->stats.symerrs,
4513 "Symbol Errors");
4514 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4515 CTLFLAG_RD, &sc->stats.sec,
4516 "Sequence Errors");
4517 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4518 CTLFLAG_RD, &sc->stats.dc,
4519 "Defer Count");
4520 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4521 CTLFLAG_RD, &sc->stats.mpc,
4522 "Missed Packets");
4523 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4524 CTLFLAG_RD, &sc->stats.rnbc,
4525 "Receive No Buffers");
4526 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4527 CTLFLAG_RD, &sc->stats.ruc,
4528 "Receive Undersize");
4529 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4530 CTLFLAG_RD, &sc->stats.rfc,
4531 "Fragmented Packets Received ");
4532 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4533 CTLFLAG_RD, &sc->stats.roc,
4534 "Oversized Packets Received");
4535 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4536 CTLFLAG_RD, &sc->stats.rjc,
4537 "Recevied Jabber");
4538 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4539 CTLFLAG_RD, &sc->stats.rxerrc,
4540 "Receive Errors");
4541 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4542 CTLFLAG_RD, &sc->stats.crcerrs,
4543 "CRC errors");
4544 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4545 CTLFLAG_RD, &sc->stats.algnerrc,
4546 "Alignment Errors");
4547 /* On 82575 these are collision counts */
4548 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4549 CTLFLAG_RD, &sc->stats.cexterr,
4550 "Collision/Carrier extension errors");
4551 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4552 CTLFLAG_RD, &sc->stats.xonrxc,
4553 "XON Received");
4554 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4555 CTLFLAG_RD, &sc->stats.xontxc,
4556 "XON Transmitted");
4557 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4558 CTLFLAG_RD, &sc->stats.xoffrxc,
4559 "XOFF Received");
4560 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4561 CTLFLAG_RD, &sc->stats.xofftxc,
4562 "XOFF Transmitted");
4563
4564 /* Packet Reception Stats */
4565 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4566 CTLFLAG_RD, &sc->stats.tpr,
4567 "Total Packets Received ");
4568 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4569 CTLFLAG_RD, &sc->stats.gprc,
4570 "Good Packets Received");
4571 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4572 CTLFLAG_RD, &sc->stats.bprc,
4573 "Broadcast Packets Received");
4574 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4575 CTLFLAG_RD, &sc->stats.mprc,
4576 "Multicast Packets Received");
4577 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4578 CTLFLAG_RD, &sc->stats.prc64,
4579 "64 byte frames received ");
4580 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4581 CTLFLAG_RD, &sc->stats.prc127,
4582 "65-127 byte frames received");
4583 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4584 CTLFLAG_RD, &sc->stats.prc255,
4585 "128-255 byte frames received");
4586 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4587 CTLFLAG_RD, &sc->stats.prc511,
4588 "256-511 byte frames received");
4589 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4590 CTLFLAG_RD, &sc->stats.prc1023,
4591 "512-1023 byte frames received");
4592 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4593 CTLFLAG_RD, &sc->stats.prc1522,
4594 "1023-1522 byte frames received");
4595 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4596 CTLFLAG_RD, &sc->stats.gorc,
4597 "Good Octets Received");
4598
4599 /* Packet Transmission Stats */
4600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4601 CTLFLAG_RD, &sc->stats.gotc,
4602 "Good Octets Transmitted");
4603 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4604 CTLFLAG_RD, &sc->stats.tpt,
4605 "Total Packets Transmitted");
4606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4607 CTLFLAG_RD, &sc->stats.gptc,
4608 "Good Packets Transmitted");
4609 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4610 CTLFLAG_RD, &sc->stats.bptc,
4611 "Broadcast Packets Transmitted");
4612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4613 CTLFLAG_RD, &sc->stats.mptc,
4614 "Multicast Packets Transmitted");
4615 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4616 CTLFLAG_RD, &sc->stats.ptc64,
4617 "64 byte frames transmitted ");
4618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4619 CTLFLAG_RD, &sc->stats.ptc127,
4620 "65-127 byte frames transmitted");
4621 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4622 CTLFLAG_RD, &sc->stats.ptc255,
4623 "128-255 byte frames transmitted");
4624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4625 CTLFLAG_RD, &sc->stats.ptc511,
4626 "256-511 byte frames transmitted");
4627 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4628 CTLFLAG_RD, &sc->stats.ptc1023,
4629 "512-1023 byte frames transmitted");
4630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4631 CTLFLAG_RD, &sc->stats.ptc1522,
4632 "1024-1522 byte frames transmitted");
4633 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4634 CTLFLAG_RD, &sc->stats.tsctc,
4635 "TSO Contexts Transmitted");
4636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4637 CTLFLAG_RD, &sc->stats.tsctfc,
4638 "TSO Contexts Failed");
4639
4640
4641 /* Interrupt Stats */
4642
4643 int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts",
4644 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics");
4645 int_list = SYSCTL_CHILDREN(int_node);
4646
4647 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
4648 CTLFLAG_RD, &sc->stats.iac,
4649 "Interrupt Assertion Count");
4650
4651 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
4652 CTLFLAG_RD, &sc->stats.icrxptc,
4653 "Interrupt Cause Rx Pkt Timer Expire Count");
4654
4655 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
4656 CTLFLAG_RD, &sc->stats.icrxatc,
4657 "Interrupt Cause Rx Abs Timer Expire Count");
4658
4659 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
4660 CTLFLAG_RD, &sc->stats.ictxptc,
4661 "Interrupt Cause Tx Pkt Timer Expire Count");
4662
4663 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
4664 CTLFLAG_RD, &sc->stats.ictxatc,
4665 "Interrupt Cause Tx Abs Timer Expire Count");
4666
4667 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
4668 CTLFLAG_RD, &sc->stats.ictxqec,
4669 "Interrupt Cause Tx Queue Empty Count");
4670
4671 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
4672 CTLFLAG_RD, &sc->stats.ictxqmtc,
4673 "Interrupt Cause Tx Queue Min Thresh Count");
4674
4675 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
4676 CTLFLAG_RD, &sc->stats.icrxdmtc,
4677 "Interrupt Cause Rx Desc Min Thresh Count");
4678
4679 SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun",
4680 CTLFLAG_RD, &sc->stats.icrxoc,
4681 "Interrupt Cause Receiver Overrun Count");
4682 }
4683
4684 static void
em_fw_version_locked(if_ctx_t ctx)4685 em_fw_version_locked(if_ctx_t ctx)
4686 {
4687 struct e1000_softc *sc = iflib_get_softc(ctx);
4688 struct e1000_hw *hw = &sc->hw;
4689 struct e1000_fw_version *fw_ver = &sc->fw_ver;
4690 uint16_t eep = 0;
4691
4692 /*
4693 * em_fw_version_locked() must run under the IFLIB_CTX_LOCK to meet the
4694 * NVM locking model, so we do it in em_if_attach_pre() and store the
4695 * info in the softc
4696 */
4697 ASSERT_CTX_LOCK_HELD(hw);
4698
4699 *fw_ver = (struct e1000_fw_version){0};
4700
4701 if (hw->mac.type >= igb_mac_min) {
4702 /*
4703 * Use the Shared Code for igb(4)
4704 */
4705 e1000_get_fw_version(hw, fw_ver);
4706 } else {
4707 /*
4708 * Otherwise, EEPROM version should be present on (almost?) all
4709 * devices here
4710 */
4711 if(e1000_read_nvm(hw, NVM_VERSION, 1, &eep)) {
4712 INIT_DEBUGOUT("can't get EEPROM version");
4713 return;
4714 }
4715
4716 fw_ver->eep_major = (eep & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
4717 fw_ver->eep_minor = (eep & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
4718 fw_ver->eep_build = (eep & NVM_IMAGE_ID_MASK);
4719 }
4720 }
4721
4722 static void
em_sbuf_fw_version(struct e1000_fw_version * fw_ver,struct sbuf * buf)4723 em_sbuf_fw_version(struct e1000_fw_version *fw_ver, struct sbuf *buf)
4724 {
4725 const char *space = "";
4726
4727 if (fw_ver->eep_major || fw_ver->eep_minor || fw_ver->eep_build) {
4728 sbuf_printf(buf, "EEPROM V%d.%d-%d", fw_ver->eep_major,
4729 fw_ver->eep_minor, fw_ver->eep_build);
4730 space = " ";
4731 }
4732
4733 if (fw_ver->invm_major || fw_ver->invm_minor || fw_ver->invm_img_type) {
4734 sbuf_printf(buf, "%sNVM V%d.%d imgtype%d",
4735 space, fw_ver->invm_major, fw_ver->invm_minor,
4736 fw_ver->invm_img_type);
4737 space = " ";
4738 }
4739
4740 if (fw_ver->or_valid) {
4741 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
4742 space, fw_ver->or_major, fw_ver->or_build,
4743 fw_ver->or_patch);
4744 space = " ";
4745 }
4746
4747 if (fw_ver->etrack_id)
4748 sbuf_printf(buf, "%seTrack 0x%08x", space, fw_ver->etrack_id);
4749 }
4750
4751 static void
em_print_fw_version(struct e1000_softc * sc)4752 em_print_fw_version(struct e1000_softc *sc )
4753 {
4754 device_t dev = sc->dev;
4755 struct sbuf *buf;
4756 int error = 0;
4757
4758 buf = sbuf_new_auto();
4759 if (!buf) {
4760 device_printf(dev, "Could not allocate sbuf for output.\n");
4761 return;
4762 }
4763
4764 em_sbuf_fw_version(&sc->fw_ver, buf);
4765
4766 error = sbuf_finish(buf);
4767 if (error)
4768 device_printf(dev, "Error finishing sbuf: %d\n", error);
4769 else if (sbuf_len(buf))
4770 device_printf(dev, "%s\n", sbuf_data(buf));
4771
4772 sbuf_delete(buf);
4773 }
4774
4775 static int
em_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)4776 em_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
4777 {
4778 struct e1000_softc *sc = (struct e1000_softc *)arg1;
4779 device_t dev = sc->dev;
4780 struct sbuf *buf;
4781 int error = 0;
4782
4783 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4784 if (!buf) {
4785 device_printf(dev, "Could not allocate sbuf for output.\n");
4786 return (ENOMEM);
4787 }
4788
4789 em_sbuf_fw_version(&sc->fw_ver, buf);
4790
4791 error = sbuf_finish(buf);
4792 if (error)
4793 device_printf(dev, "Error finishing sbuf: %d\n", error);
4794
4795 sbuf_delete(buf);
4796
4797 return (0);
4798 }
4799
4800 /**********************************************************************
4801 *
4802 * This routine provides a way to dump out the adapter eeprom,
4803 * often a useful debug/service tool. This only dumps the first
4804 * 32 words, stuff that matters is in that extent.
4805 *
4806 **********************************************************************/
4807 static int
em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)4808 em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4809 {
4810 struct e1000_softc *sc = (struct e1000_softc *)arg1;
4811 int error;
4812 int result;
4813
4814 result = -1;
4815 error = sysctl_handle_int(oidp, &result, 0, req);
4816
4817 if (error || !req->newptr)
4818 return (error);
4819
4820 /*
4821 * This value will cause a hex dump of the
4822 * first 32 16-bit words of the EEPROM to
4823 * the screen.
4824 */
4825 if (result == 1)
4826 em_print_nvm_info(sc);
4827
4828 return (error);
4829 }
4830
4831 static void
em_print_nvm_info(struct e1000_softc * sc)4832 em_print_nvm_info(struct e1000_softc *sc)
4833 {
4834 struct e1000_hw *hw = &sc->hw;
4835 struct sx *iflib_ctx_lock = iflib_ctx_lock_get(sc->ctx);
4836 u16 eeprom_data;
4837 int i, j, row = 0;
4838
4839 /* Its a bit crude, but it gets the job done */
4840 printf("\nInterface EEPROM Dump:\n");
4841 printf("Offset\n0x0000 ");
4842
4843 /* We rely on the IFLIB_CTX_LOCK as part of NVM locking model */
4844 sx_xlock(iflib_ctx_lock);
4845 ASSERT_CTX_LOCK_HELD(hw);
4846 for (i = 0, j = 0; i < 32; i++, j++) {
4847 if (j == 8) { /* Make the offset block */
4848 j = 0; ++row;
4849 printf("\n0x00%x0 ",row);
4850 }
4851 e1000_read_nvm(hw, i, 1, &eeprom_data);
4852 printf("%04x ", eeprom_data);
4853 }
4854 sx_xunlock(iflib_ctx_lock);
4855 printf("\n");
4856 }
4857
4858 static int
em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)4859 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4860 {
4861 struct em_int_delay_info *info;
4862 struct e1000_softc *sc;
4863 u32 regval;
4864 int error, usecs, ticks;
4865
4866 info = (struct em_int_delay_info *) arg1;
4867 usecs = info->value;
4868 error = sysctl_handle_int(oidp, &usecs, 0, req);
4869 if (error != 0 || req->newptr == NULL)
4870 return (error);
4871 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4872 return (EINVAL);
4873 info->value = usecs;
4874 ticks = EM_USECS_TO_TICKS(usecs);
4875 if (info->offset == E1000_ITR) /* units are 256ns here */
4876 ticks *= 4;
4877
4878 sc = info->sc;
4879
4880 regval = E1000_READ_OFFSET(&sc->hw, info->offset);
4881 regval = (regval & ~0xffff) | (ticks & 0xffff);
4882 /* Handle a few special cases. */
4883 switch (info->offset) {
4884 case E1000_RDTR:
4885 break;
4886 case E1000_TIDV:
4887 if (ticks == 0) {
4888 sc->txd_cmd &= ~E1000_TXD_CMD_IDE;
4889 /* Don't write 0 into the TIDV register. */
4890 regval++;
4891 } else
4892 sc->txd_cmd |= E1000_TXD_CMD_IDE;
4893 break;
4894 }
4895 E1000_WRITE_OFFSET(&sc->hw, info->offset, regval);
4896 return (0);
4897 }
4898
4899 static void
em_add_int_delay_sysctl(struct e1000_softc * sc,const char * name,const char * description,struct em_int_delay_info * info,int offset,int value)4900 em_add_int_delay_sysctl(struct e1000_softc *sc, const char *name,
4901 const char *description, struct em_int_delay_info *info,
4902 int offset, int value)
4903 {
4904 info->sc = sc;
4905 info->offset = offset;
4906 info->value = value;
4907 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
4908 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
4909 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
4910 info, 0, em_sysctl_int_delay, "I", description);
4911 }
4912
4913 /*
4914 * Set flow control using sysctl:
4915 * Flow control values:
4916 * 0 - off
4917 * 1 - rx pause
4918 * 2 - tx pause
4919 * 3 - full
4920 */
4921 static int
em_set_flowcntl(SYSCTL_HANDLER_ARGS)4922 em_set_flowcntl(SYSCTL_HANDLER_ARGS)
4923 {
4924 int error;
4925 static int input = 3; /* default is full */
4926 struct e1000_softc *sc = (struct e1000_softc *) arg1;
4927
4928 error = sysctl_handle_int(oidp, &input, 0, req);
4929
4930 if ((error) || (req->newptr == NULL))
4931 return (error);
4932
4933 if (input == sc->fc) /* no change? */
4934 return (error);
4935
4936 switch (input) {
4937 case e1000_fc_rx_pause:
4938 case e1000_fc_tx_pause:
4939 case e1000_fc_full:
4940 case e1000_fc_none:
4941 sc->hw.fc.requested_mode = input;
4942 sc->fc = input;
4943 break;
4944 default:
4945 /* Do nothing */
4946 return (error);
4947 }
4948
4949 sc->hw.fc.current_mode = sc->hw.fc.requested_mode;
4950 e1000_force_mac_fc(&sc->hw);
4951 return (error);
4952 }
4953
4954 /*
4955 * Manage Energy Efficient Ethernet:
4956 * Control values:
4957 * 0/1 - enabled/disabled
4958 */
4959 static int
em_sysctl_eee(SYSCTL_HANDLER_ARGS)4960 em_sysctl_eee(SYSCTL_HANDLER_ARGS)
4961 {
4962 struct e1000_softc *sc = (struct e1000_softc *) arg1;
4963 int error, value;
4964
4965 value = sc->hw.dev_spec.ich8lan.eee_disable;
4966 error = sysctl_handle_int(oidp, &value, 0, req);
4967 if (error || req->newptr == NULL)
4968 return (error);
4969 sc->hw.dev_spec.ich8lan.eee_disable = (value != 0);
4970 em_if_init(sc->ctx);
4971
4972 return (0);
4973 }
4974
4975 static int
em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)4976 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4977 {
4978 struct e1000_softc *sc;
4979 int error;
4980 int result;
4981
4982 result = -1;
4983 error = sysctl_handle_int(oidp, &result, 0, req);
4984
4985 if (error || !req->newptr)
4986 return (error);
4987
4988 if (result == 1) {
4989 sc = (struct e1000_softc *) arg1;
4990 em_print_debug_info(sc);
4991 }
4992
4993 return (error);
4994 }
4995
4996 static int
em_get_rs(SYSCTL_HANDLER_ARGS)4997 em_get_rs(SYSCTL_HANDLER_ARGS)
4998 {
4999 struct e1000_softc *sc = (struct e1000_softc *) arg1;
5000 int error;
5001 int result;
5002
5003 result = 0;
5004 error = sysctl_handle_int(oidp, &result, 0, req);
5005
5006 if (error || !req->newptr || result != 1)
5007 return (error);
5008 em_dump_rs(sc);
5009
5010 return (error);
5011 }
5012
5013 static void
em_if_debug(if_ctx_t ctx)5014 em_if_debug(if_ctx_t ctx)
5015 {
5016 em_dump_rs(iflib_get_softc(ctx));
5017 }
5018
5019 /*
5020 * This routine is meant to be fluid, add whatever is
5021 * needed for debugging a problem. -jfv
5022 */
5023 static void
em_print_debug_info(struct e1000_softc * sc)5024 em_print_debug_info(struct e1000_softc *sc)
5025 {
5026 device_t dev = iflib_get_dev(sc->ctx);
5027 if_t ifp = iflib_get_ifp(sc->ctx);
5028 struct tx_ring *txr = &sc->tx_queues->txr;
5029 struct rx_ring *rxr = &sc->rx_queues->rxr;
5030
5031 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
5032 printf("Interface is RUNNING ");
5033 else
5034 printf("Interface is NOT RUNNING\n");
5035
5036 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
5037 printf("and INACTIVE\n");
5038 else
5039 printf("and ACTIVE\n");
5040
5041 for (int i = 0; i < sc->tx_num_queues; i++, txr++) {
5042 device_printf(dev, "TX Queue %d ------\n", i);
5043 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5044 E1000_READ_REG(&sc->hw, E1000_TDH(i)),
5045 E1000_READ_REG(&sc->hw, E1000_TDT(i)));
5046
5047 }
5048 for (int j=0; j < sc->rx_num_queues; j++, rxr++) {
5049 device_printf(dev, "RX Queue %d ------\n", j);
5050 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5051 E1000_READ_REG(&sc->hw, E1000_RDH(j)),
5052 E1000_READ_REG(&sc->hw, E1000_RDT(j)));
5053 }
5054 }
5055
5056 /*
5057 * 82574 only:
5058 * Write a new value to the EEPROM increasing the number of MSI-X
5059 * vectors from 3 to 5, for proper multiqueue support.
5060 */
5061 static void
em_enable_vectors_82574(if_ctx_t ctx)5062 em_enable_vectors_82574(if_ctx_t ctx)
5063 {
5064 struct e1000_softc *sc = iflib_get_softc(ctx);
5065 struct e1000_hw *hw = &sc->hw;
5066 device_t dev = iflib_get_dev(ctx);
5067 u16 edata;
5068
5069 e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
5070 if (bootverbose)
5071 device_printf(dev, "EM_NVM_PCIE_CTRL = %#06x\n", edata);
5072 if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) {
5073 device_printf(dev, "Writing to eeprom: increasing "
5074 "reported MSI-X vectors from 3 to 5...\n");
5075 edata &= ~(EM_NVM_MSIX_N_MASK);
5076 edata |= 4 << EM_NVM_MSIX_N_SHIFT;
5077 e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
5078 e1000_update_nvm_checksum(hw);
5079 device_printf(dev, "Writing to eeprom: done\n");
5080 }
5081 }
5082